1#[allow(unused_imports)]
2use crate::Instruction;
3use crate::{
4 encode_vec, BlockType, Catch, Encode, Handle, HeapType, Lane, MemArg, Ordering, RefType,
5 ValType,
6};
7use alloc::vec::Vec;
8
9#[derive(Debug)]
11pub struct InstructionSink<'a> {
12 sink: &'a mut Vec<u8>,
13}
14
15impl<'a> InstructionSink<'a> {
16 pub fn new(sink: &'a mut Vec<u8>) -> Self {
18 Self { sink }
19 }
20
21 pub fn unreachable(&mut self) -> &mut Self {
25 self.sink.push(0x00);
26 self
27 }
28
29 pub fn nop(&mut self) -> &mut Self {
31 self.sink.push(0x01);
32 self
33 }
34
35 pub fn block(&mut self, bt: BlockType) -> &mut Self {
37 self.sink.push(0x02);
38 bt.encode(self.sink);
39 self
40 }
41
42 pub fn loop_(&mut self, bt: BlockType) -> &mut Self {
44 self.sink.push(0x03);
45 bt.encode(self.sink);
46 self
47 }
48
49 pub fn if_(&mut self, bt: BlockType) -> &mut Self {
51 self.sink.push(0x04);
52 bt.encode(self.sink);
53 self
54 }
55
56 pub fn else_(&mut self) -> &mut Self {
58 self.sink.push(0x05);
59 self
60 }
61
62 pub fn end(&mut self) -> &mut Self {
64 self.sink.push(0x0B);
65 self
66 }
67
68 pub fn br(&mut self, l: u32) -> &mut Self {
70 self.sink.push(0x0C);
71 l.encode(self.sink);
72 self
73 }
74
75 pub fn br_if(&mut self, l: u32) -> &mut Self {
77 self.sink.push(0x0D);
78 l.encode(self.sink);
79 self
80 }
81
82 pub fn br_table<V: IntoIterator<Item = u32>>(&mut self, ls: V, l: u32) -> &mut Self
84 where
85 V::IntoIter: ExactSizeIterator,
86 {
87 self.sink.push(0x0E);
88 encode_vec(ls, self.sink);
89 l.encode(self.sink);
90 self
91 }
92
93 pub fn br_on_null(&mut self, l: u32) -> &mut Self {
95 self.sink.push(0xD5);
96 l.encode(self.sink);
97 self
98 }
99
100 pub fn br_on_non_null(&mut self, l: u32) -> &mut Self {
102 self.sink.push(0xD6);
103 l.encode(self.sink);
104 self
105 }
106
107 pub fn return_(&mut self) -> &mut Self {
109 self.sink.push(0x0F);
110 self
111 }
112
113 pub fn call(&mut self, f: u32) -> &mut Self {
115 self.sink.push(0x10);
116 f.encode(self.sink);
117 self
118 }
119
120 pub fn call_ref(&mut self, ty: u32) -> &mut Self {
122 self.sink.push(0x14);
123 ty.encode(self.sink);
124 self
125 }
126
127 pub fn call_indirect(&mut self, table_index: u32, type_index: u32) -> &mut Self {
129 self.sink.push(0x11);
130 type_index.encode(self.sink);
131 table_index.encode(self.sink);
132 self
133 }
134
135 pub fn return_call_ref(&mut self, ty: u32) -> &mut Self {
137 self.sink.push(0x15);
138 ty.encode(self.sink);
139 self
140 }
141
142 pub fn return_call(&mut self, f: u32) -> &mut Self {
144 self.sink.push(0x12);
145 f.encode(self.sink);
146 self
147 }
148
149 pub fn return_call_indirect(&mut self, table_index: u32, type_index: u32) -> &mut Self {
151 self.sink.push(0x13);
152 type_index.encode(self.sink);
153 table_index.encode(self.sink);
154 self
155 }
156
157 pub fn try_table<V: IntoIterator<Item = Catch>>(
159 &mut self,
160 ty: BlockType,
161 catches: V,
162 ) -> &mut Self
163 where
164 V::IntoIter: ExactSizeIterator,
165 {
166 self.sink.push(0x1f);
167 ty.encode(self.sink);
168 encode_vec(catches, self.sink);
169 self
170 }
171
172 pub fn throw(&mut self, t: u32) -> &mut Self {
174 self.sink.push(0x08);
175 t.encode(self.sink);
176 self
177 }
178
179 pub fn throw_ref(&mut self) -> &mut Self {
181 self.sink.push(0x0A);
182 self
183 }
184
185 pub fn try_(&mut self, bt: BlockType) -> &mut Self {
189 self.sink.push(0x06);
190 bt.encode(self.sink);
191 self
192 }
193
194 pub fn delegate(&mut self, l: u32) -> &mut Self {
196 self.sink.push(0x18);
197 l.encode(self.sink);
198 self
199 }
200
201 pub fn catch(&mut self, t: u32) -> &mut Self {
203 self.sink.push(0x07);
204 t.encode(self.sink);
205 self
206 }
207
208 pub fn catch_all(&mut self) -> &mut Self {
210 self.sink.push(0x19);
211 self
212 }
213
214 pub fn rethrow(&mut self, l: u32) -> &mut Self {
216 self.sink.push(0x09);
217 l.encode(self.sink);
218 self
219 }
220
221 pub fn drop(&mut self) -> &mut Self {
225 self.sink.push(0x1A);
226 self
227 }
228
229 pub fn select(&mut self) -> &mut Self {
231 self.sink.push(0x1B);
232 self
233 }
234
235 pub fn local_get(&mut self, l: u32) -> &mut Self {
239 self.sink.push(0x20);
240 l.encode(self.sink);
241 self
242 }
243
244 pub fn local_set(&mut self, l: u32) -> &mut Self {
246 self.sink.push(0x21);
247 l.encode(self.sink);
248 self
249 }
250
251 pub fn local_tee(&mut self, l: u32) -> &mut Self {
253 self.sink.push(0x22);
254 l.encode(self.sink);
255 self
256 }
257
258 pub fn global_get(&mut self, g: u32) -> &mut Self {
260 self.sink.push(0x23);
261 g.encode(self.sink);
262 self
263 }
264
265 pub fn global_set(&mut self, g: u32) -> &mut Self {
267 self.sink.push(0x24);
268 g.encode(self.sink);
269 self
270 }
271
272 pub fn i32_load(&mut self, m: MemArg) -> &mut Self {
276 self.sink.push(0x28);
277 m.encode(self.sink);
278 self
279 }
280
281 pub fn i64_load(&mut self, m: MemArg) -> &mut Self {
283 self.sink.push(0x29);
284 m.encode(self.sink);
285 self
286 }
287
288 pub fn f32_load(&mut self, m: MemArg) -> &mut Self {
290 self.sink.push(0x2A);
291 m.encode(self.sink);
292 self
293 }
294
295 pub fn f64_load(&mut self, m: MemArg) -> &mut Self {
297 self.sink.push(0x2B);
298 m.encode(self.sink);
299 self
300 }
301
302 pub fn i32_load8_s(&mut self, m: MemArg) -> &mut Self {
304 self.sink.push(0x2C);
305 m.encode(self.sink);
306 self
307 }
308
309 pub fn i32_load8_u(&mut self, m: MemArg) -> &mut Self {
311 self.sink.push(0x2D);
312 m.encode(self.sink);
313 self
314 }
315
316 pub fn i32_load16_s(&mut self, m: MemArg) -> &mut Self {
318 self.sink.push(0x2E);
319 m.encode(self.sink);
320 self
321 }
322
323 pub fn i32_load16_u(&mut self, m: MemArg) -> &mut Self {
325 self.sink.push(0x2F);
326 m.encode(self.sink);
327 self
328 }
329
330 pub fn i64_load8_s(&mut self, m: MemArg) -> &mut Self {
332 self.sink.push(0x30);
333 m.encode(self.sink);
334 self
335 }
336
337 pub fn i64_load8_u(&mut self, m: MemArg) -> &mut Self {
339 self.sink.push(0x31);
340 m.encode(self.sink);
341 self
342 }
343
344 pub fn i64_load16_s(&mut self, m: MemArg) -> &mut Self {
346 self.sink.push(0x32);
347 m.encode(self.sink);
348 self
349 }
350
351 pub fn i64_load16_u(&mut self, m: MemArg) -> &mut Self {
353 self.sink.push(0x33);
354 m.encode(self.sink);
355 self
356 }
357
358 pub fn i64_load32_s(&mut self, m: MemArg) -> &mut Self {
360 self.sink.push(0x34);
361 m.encode(self.sink);
362 self
363 }
364
365 pub fn i64_load32_u(&mut self, m: MemArg) -> &mut Self {
367 self.sink.push(0x35);
368 m.encode(self.sink);
369 self
370 }
371
372 pub fn i32_store(&mut self, m: MemArg) -> &mut Self {
374 self.sink.push(0x36);
375 m.encode(self.sink);
376 self
377 }
378
379 pub fn i64_store(&mut self, m: MemArg) -> &mut Self {
381 self.sink.push(0x37);
382 m.encode(self.sink);
383 self
384 }
385
386 pub fn f32_store(&mut self, m: MemArg) -> &mut Self {
388 self.sink.push(0x38);
389 m.encode(self.sink);
390 self
391 }
392
393 pub fn f64_store(&mut self, m: MemArg) -> &mut Self {
395 self.sink.push(0x39);
396 m.encode(self.sink);
397 self
398 }
399
400 pub fn i32_store8(&mut self, m: MemArg) -> &mut Self {
402 self.sink.push(0x3A);
403 m.encode(self.sink);
404 self
405 }
406
407 pub fn i32_store16(&mut self, m: MemArg) -> &mut Self {
409 self.sink.push(0x3B);
410 m.encode(self.sink);
411 self
412 }
413
414 pub fn i64_store8(&mut self, m: MemArg) -> &mut Self {
416 self.sink.push(0x3C);
417 m.encode(self.sink);
418 self
419 }
420
421 pub fn i64_store16(&mut self, m: MemArg) -> &mut Self {
423 self.sink.push(0x3D);
424 m.encode(self.sink);
425 self
426 }
427
428 pub fn i64_store32(&mut self, m: MemArg) -> &mut Self {
430 self.sink.push(0x3E);
431 m.encode(self.sink);
432 self
433 }
434
435 pub fn memory_size(&mut self, i: u32) -> &mut Self {
437 self.sink.push(0x3F);
438 i.encode(self.sink);
439 self
440 }
441
442 pub fn memory_grow(&mut self, i: u32) -> &mut Self {
444 self.sink.push(0x40);
445 i.encode(self.sink);
446 self
447 }
448
449 pub fn memory_init(&mut self, mem: u32, data_index: u32) -> &mut Self {
451 self.sink.push(0xfc);
452 self.sink.push(0x08);
453 data_index.encode(self.sink);
454 mem.encode(self.sink);
455 self
456 }
457
458 pub fn data_drop(&mut self, data: u32) -> &mut Self {
460 self.sink.push(0xfc);
461 self.sink.push(0x09);
462 data.encode(self.sink);
463 self
464 }
465
466 pub fn memory_copy(&mut self, dst_mem: u32, src_mem: u32) -> &mut Self {
468 self.sink.push(0xfc);
469 self.sink.push(0x0a);
470 dst_mem.encode(self.sink);
471 src_mem.encode(self.sink);
472 self
473 }
474
475 pub fn memory_fill(&mut self, mem: u32) -> &mut Self {
477 self.sink.push(0xfc);
478 self.sink.push(0x0b);
479 mem.encode(self.sink);
480 self
481 }
482
483 pub fn memory_discard(&mut self, mem: u32) -> &mut Self {
485 self.sink.push(0xfc);
486 self.sink.push(0x12);
487 mem.encode(self.sink);
488 self
489 }
490
491 pub fn i32_const(&mut self, x: i32) -> &mut Self {
495 self.sink.push(0x41);
496 x.encode(self.sink);
497 self
498 }
499
500 pub fn i64_const(&mut self, x: i64) -> &mut Self {
502 self.sink.push(0x42);
503 x.encode(self.sink);
504 self
505 }
506
507 pub fn f32_const(&mut self, x: f32) -> &mut Self {
509 self.sink.push(0x43);
510 let x = x.to_bits();
511 self.sink.extend(x.to_le_bytes().iter().copied());
512 self
513 }
514
515 pub fn f64_const(&mut self, x: f64) -> &mut Self {
517 self.sink.push(0x44);
518 let x = x.to_bits();
519 self.sink.extend(x.to_le_bytes().iter().copied());
520 self
521 }
522
523 pub fn i32_eqz(&mut self) -> &mut Self {
525 self.sink.push(0x45);
526 self
527 }
528
529 pub fn i32_eq(&mut self) -> &mut Self {
531 self.sink.push(0x46);
532 self
533 }
534
535 pub fn i32_ne(&mut self) -> &mut Self {
537 self.sink.push(0x47);
538 self
539 }
540
541 pub fn i32_lt_s(&mut self) -> &mut Self {
543 self.sink.push(0x48);
544 self
545 }
546
547 pub fn i32_lt_u(&mut self) -> &mut Self {
549 self.sink.push(0x49);
550 self
551 }
552
553 pub fn i32_gt_s(&mut self) -> &mut Self {
555 self.sink.push(0x4A);
556 self
557 }
558
559 pub fn i32_gt_u(&mut self) -> &mut Self {
561 self.sink.push(0x4B);
562 self
563 }
564
565 pub fn i32_le_s(&mut self) -> &mut Self {
567 self.sink.push(0x4C);
568 self
569 }
570
571 pub fn i32_le_u(&mut self) -> &mut Self {
573 self.sink.push(0x4D);
574 self
575 }
576
577 pub fn i32_ge_s(&mut self) -> &mut Self {
579 self.sink.push(0x4E);
580 self
581 }
582
583 pub fn i32_ge_u(&mut self) -> &mut Self {
585 self.sink.push(0x4F);
586 self
587 }
588
589 pub fn i64_eqz(&mut self) -> &mut Self {
591 self.sink.push(0x50);
592 self
593 }
594
595 pub fn i64_eq(&mut self) -> &mut Self {
597 self.sink.push(0x51);
598 self
599 }
600
601 pub fn i64_ne(&mut self) -> &mut Self {
603 self.sink.push(0x52);
604 self
605 }
606
607 pub fn i64_lt_s(&mut self) -> &mut Self {
609 self.sink.push(0x53);
610 self
611 }
612
613 pub fn i64_lt_u(&mut self) -> &mut Self {
615 self.sink.push(0x54);
616 self
617 }
618
619 pub fn i64_gt_s(&mut self) -> &mut Self {
621 self.sink.push(0x55);
622 self
623 }
624
625 pub fn i64_gt_u(&mut self) -> &mut Self {
627 self.sink.push(0x56);
628 self
629 }
630
631 pub fn i64_le_s(&mut self) -> &mut Self {
633 self.sink.push(0x57);
634 self
635 }
636
637 pub fn i64_le_u(&mut self) -> &mut Self {
639 self.sink.push(0x58);
640 self
641 }
642
643 pub fn i64_ge_s(&mut self) -> &mut Self {
645 self.sink.push(0x59);
646 self
647 }
648
649 pub fn i64_ge_u(&mut self) -> &mut Self {
651 self.sink.push(0x5A);
652 self
653 }
654
655 pub fn f32_eq(&mut self) -> &mut Self {
657 self.sink.push(0x5B);
658 self
659 }
660
661 pub fn f32_ne(&mut self) -> &mut Self {
663 self.sink.push(0x5C);
664 self
665 }
666
667 pub fn f32_lt(&mut self) -> &mut Self {
669 self.sink.push(0x5D);
670 self
671 }
672
673 pub fn f32_gt(&mut self) -> &mut Self {
675 self.sink.push(0x5E);
676 self
677 }
678
679 pub fn f32_le(&mut self) -> &mut Self {
681 self.sink.push(0x5F);
682 self
683 }
684
685 pub fn f32_ge(&mut self) -> &mut Self {
687 self.sink.push(0x60);
688 self
689 }
690
691 pub fn f64_eq(&mut self) -> &mut Self {
693 self.sink.push(0x61);
694 self
695 }
696
697 pub fn f64_ne(&mut self) -> &mut Self {
699 self.sink.push(0x62);
700 self
701 }
702
703 pub fn f64_lt(&mut self) -> &mut Self {
705 self.sink.push(0x63);
706 self
707 }
708
709 pub fn f64_gt(&mut self) -> &mut Self {
711 self.sink.push(0x64);
712 self
713 }
714
715 pub fn f64_le(&mut self) -> &mut Self {
717 self.sink.push(0x65);
718 self
719 }
720
721 pub fn f64_ge(&mut self) -> &mut Self {
723 self.sink.push(0x66);
724 self
725 }
726
727 pub fn i32_clz(&mut self) -> &mut Self {
729 self.sink.push(0x67);
730 self
731 }
732
733 pub fn i32_ctz(&mut self) -> &mut Self {
735 self.sink.push(0x68);
736 self
737 }
738
739 pub fn i32_popcnt(&mut self) -> &mut Self {
741 self.sink.push(0x69);
742 self
743 }
744
745 pub fn i32_add(&mut self) -> &mut Self {
747 self.sink.push(0x6A);
748 self
749 }
750
751 pub fn i32_sub(&mut self) -> &mut Self {
753 self.sink.push(0x6B);
754 self
755 }
756
757 pub fn i32_mul(&mut self) -> &mut Self {
759 self.sink.push(0x6C);
760 self
761 }
762
763 pub fn i32_div_s(&mut self) -> &mut Self {
765 self.sink.push(0x6D);
766 self
767 }
768
769 pub fn i32_div_u(&mut self) -> &mut Self {
771 self.sink.push(0x6E);
772 self
773 }
774
775 pub fn i32_rem_s(&mut self) -> &mut Self {
777 self.sink.push(0x6F);
778 self
779 }
780
781 pub fn i32_rem_u(&mut self) -> &mut Self {
783 self.sink.push(0x70);
784 self
785 }
786
787 pub fn i32_and(&mut self) -> &mut Self {
789 self.sink.push(0x71);
790 self
791 }
792
793 pub fn i32_or(&mut self) -> &mut Self {
795 self.sink.push(0x72);
796 self
797 }
798
799 pub fn i32_xor(&mut self) -> &mut Self {
801 self.sink.push(0x73);
802 self
803 }
804
805 pub fn i32_shl(&mut self) -> &mut Self {
807 self.sink.push(0x74);
808 self
809 }
810
811 pub fn i32_shr_s(&mut self) -> &mut Self {
813 self.sink.push(0x75);
814 self
815 }
816
817 pub fn i32_shr_u(&mut self) -> &mut Self {
819 self.sink.push(0x76);
820 self
821 }
822
823 pub fn i32_rotl(&mut self) -> &mut Self {
825 self.sink.push(0x77);
826 self
827 }
828
829 pub fn i32_rotr(&mut self) -> &mut Self {
831 self.sink.push(0x78);
832 self
833 }
834
835 pub fn i64_clz(&mut self) -> &mut Self {
837 self.sink.push(0x79);
838 self
839 }
840
841 pub fn i64_ctz(&mut self) -> &mut Self {
843 self.sink.push(0x7A);
844 self
845 }
846
847 pub fn i64_popcnt(&mut self) -> &mut Self {
849 self.sink.push(0x7B);
850 self
851 }
852
853 pub fn i64_add(&mut self) -> &mut Self {
855 self.sink.push(0x7C);
856 self
857 }
858
859 pub fn i64_sub(&mut self) -> &mut Self {
861 self.sink.push(0x7D);
862 self
863 }
864
865 pub fn i64_mul(&mut self) -> &mut Self {
867 self.sink.push(0x7E);
868 self
869 }
870
871 pub fn i64_div_s(&mut self) -> &mut Self {
873 self.sink.push(0x7F);
874 self
875 }
876
877 pub fn i64_div_u(&mut self) -> &mut Self {
879 self.sink.push(0x80);
880 self
881 }
882
883 pub fn i64_rem_s(&mut self) -> &mut Self {
885 self.sink.push(0x81);
886 self
887 }
888
889 pub fn i64_rem_u(&mut self) -> &mut Self {
891 self.sink.push(0x82);
892 self
893 }
894
895 pub fn i64_and(&mut self) -> &mut Self {
897 self.sink.push(0x83);
898 self
899 }
900
901 pub fn i64_or(&mut self) -> &mut Self {
903 self.sink.push(0x84);
904 self
905 }
906
907 pub fn i64_xor(&mut self) -> &mut Self {
909 self.sink.push(0x85);
910 self
911 }
912
913 pub fn i64_shl(&mut self) -> &mut Self {
915 self.sink.push(0x86);
916 self
917 }
918
919 pub fn i64_shr_s(&mut self) -> &mut Self {
921 self.sink.push(0x87);
922 self
923 }
924
925 pub fn i64_shr_u(&mut self) -> &mut Self {
927 self.sink.push(0x88);
928 self
929 }
930
931 pub fn i64_rotl(&mut self) -> &mut Self {
933 self.sink.push(0x89);
934 self
935 }
936
937 pub fn i64_rotr(&mut self) -> &mut Self {
939 self.sink.push(0x8A);
940 self
941 }
942
943 pub fn f32_abs(&mut self) -> &mut Self {
945 self.sink.push(0x8B);
946 self
947 }
948
949 pub fn f32_neg(&mut self) -> &mut Self {
951 self.sink.push(0x8C);
952 self
953 }
954
955 pub fn f32_ceil(&mut self) -> &mut Self {
957 self.sink.push(0x8D);
958 self
959 }
960
961 pub fn f32_floor(&mut self) -> &mut Self {
963 self.sink.push(0x8E);
964 self
965 }
966
967 pub fn f32_trunc(&mut self) -> &mut Self {
969 self.sink.push(0x8F);
970 self
971 }
972
973 pub fn f32_nearest(&mut self) -> &mut Self {
975 self.sink.push(0x90);
976 self
977 }
978
979 pub fn f32_sqrt(&mut self) -> &mut Self {
981 self.sink.push(0x91);
982 self
983 }
984
985 pub fn f32_add(&mut self) -> &mut Self {
987 self.sink.push(0x92);
988 self
989 }
990
991 pub fn f32_sub(&mut self) -> &mut Self {
993 self.sink.push(0x93);
994 self
995 }
996
997 pub fn f32_mul(&mut self) -> &mut Self {
999 self.sink.push(0x94);
1000 self
1001 }
1002
1003 pub fn f32_div(&mut self) -> &mut Self {
1005 self.sink.push(0x95);
1006 self
1007 }
1008
1009 pub fn f32_min(&mut self) -> &mut Self {
1011 self.sink.push(0x96);
1012 self
1013 }
1014
1015 pub fn f32_max(&mut self) -> &mut Self {
1017 self.sink.push(0x97);
1018 self
1019 }
1020
1021 pub fn f32_copysign(&mut self) -> &mut Self {
1023 self.sink.push(0x98);
1024 self
1025 }
1026
1027 pub fn f64_abs(&mut self) -> &mut Self {
1029 self.sink.push(0x99);
1030 self
1031 }
1032
1033 pub fn f64_neg(&mut self) -> &mut Self {
1035 self.sink.push(0x9A);
1036 self
1037 }
1038
1039 pub fn f64_ceil(&mut self) -> &mut Self {
1041 self.sink.push(0x9B);
1042 self
1043 }
1044
1045 pub fn f64_floor(&mut self) -> &mut Self {
1047 self.sink.push(0x9C);
1048 self
1049 }
1050
1051 pub fn f64_trunc(&mut self) -> &mut Self {
1053 self.sink.push(0x9D);
1054 self
1055 }
1056
1057 pub fn f64_nearest(&mut self) -> &mut Self {
1059 self.sink.push(0x9E);
1060 self
1061 }
1062
1063 pub fn f64_sqrt(&mut self) -> &mut Self {
1065 self.sink.push(0x9F);
1066 self
1067 }
1068
1069 pub fn f64_add(&mut self) -> &mut Self {
1071 self.sink.push(0xA0);
1072 self
1073 }
1074
1075 pub fn f64_sub(&mut self) -> &mut Self {
1077 self.sink.push(0xA1);
1078 self
1079 }
1080
1081 pub fn f64_mul(&mut self) -> &mut Self {
1083 self.sink.push(0xA2);
1084 self
1085 }
1086
1087 pub fn f64_div(&mut self) -> &mut Self {
1089 self.sink.push(0xA3);
1090 self
1091 }
1092
1093 pub fn f64_min(&mut self) -> &mut Self {
1095 self.sink.push(0xA4);
1096 self
1097 }
1098
1099 pub fn f64_max(&mut self) -> &mut Self {
1101 self.sink.push(0xA5);
1102 self
1103 }
1104
1105 pub fn f64_copysign(&mut self) -> &mut Self {
1107 self.sink.push(0xA6);
1108 self
1109 }
1110
1111 pub fn i32_wrap_i64(&mut self) -> &mut Self {
1113 self.sink.push(0xA7);
1114 self
1115 }
1116
1117 pub fn i32_trunc_f32_s(&mut self) -> &mut Self {
1119 self.sink.push(0xA8);
1120 self
1121 }
1122
1123 pub fn i32_trunc_f32_u(&mut self) -> &mut Self {
1125 self.sink.push(0xA9);
1126 self
1127 }
1128
1129 pub fn i32_trunc_f64_s(&mut self) -> &mut Self {
1131 self.sink.push(0xAA);
1132 self
1133 }
1134
1135 pub fn i32_trunc_f64_u(&mut self) -> &mut Self {
1137 self.sink.push(0xAB);
1138 self
1139 }
1140
1141 pub fn i64_extend_i32_s(&mut self) -> &mut Self {
1143 self.sink.push(0xAC);
1144 self
1145 }
1146
1147 pub fn i64_extend_i32_u(&mut self) -> &mut Self {
1149 self.sink.push(0xAD);
1150 self
1151 }
1152
1153 pub fn i64_trunc_f32_s(&mut self) -> &mut Self {
1155 self.sink.push(0xAE);
1156 self
1157 }
1158
1159 pub fn i64_trunc_f32_u(&mut self) -> &mut Self {
1161 self.sink.push(0xAF);
1162 self
1163 }
1164
1165 pub fn i64_trunc_f64_s(&mut self) -> &mut Self {
1167 self.sink.push(0xB0);
1168 self
1169 }
1170
1171 pub fn i64_trunc_f64_u(&mut self) -> &mut Self {
1173 self.sink.push(0xB1);
1174 self
1175 }
1176
1177 pub fn f32_convert_i32_s(&mut self) -> &mut Self {
1179 self.sink.push(0xB2);
1180 self
1181 }
1182
1183 pub fn f32_convert_i32_u(&mut self) -> &mut Self {
1185 self.sink.push(0xB3);
1186 self
1187 }
1188
1189 pub fn f32_convert_i64_s(&mut self) -> &mut Self {
1191 self.sink.push(0xB4);
1192 self
1193 }
1194
1195 pub fn f32_convert_i64_u(&mut self) -> &mut Self {
1197 self.sink.push(0xB5);
1198 self
1199 }
1200
1201 pub fn f32_demote_f64(&mut self) -> &mut Self {
1203 self.sink.push(0xB6);
1204 self
1205 }
1206
1207 pub fn f64_convert_i32_s(&mut self) -> &mut Self {
1209 self.sink.push(0xB7);
1210 self
1211 }
1212
1213 pub fn f64_convert_i32_u(&mut self) -> &mut Self {
1215 self.sink.push(0xB8);
1216 self
1217 }
1218
1219 pub fn f64_convert_i64_s(&mut self) -> &mut Self {
1221 self.sink.push(0xB9);
1222 self
1223 }
1224
1225 pub fn f64_convert_i64_u(&mut self) -> &mut Self {
1227 self.sink.push(0xBA);
1228 self
1229 }
1230
1231 pub fn f64_promote_f32(&mut self) -> &mut Self {
1233 self.sink.push(0xBB);
1234 self
1235 }
1236
1237 pub fn i32_reinterpret_f32(&mut self) -> &mut Self {
1239 self.sink.push(0xBC);
1240 self
1241 }
1242
1243 pub fn i64_reinterpret_f64(&mut self) -> &mut Self {
1245 self.sink.push(0xBD);
1246 self
1247 }
1248
1249 pub fn f32_reinterpret_i32(&mut self) -> &mut Self {
1251 self.sink.push(0xBE);
1252 self
1253 }
1254
1255 pub fn f64_reinterpret_i64(&mut self) -> &mut Self {
1257 self.sink.push(0xBF);
1258 self
1259 }
1260
1261 pub fn i32_extend8_s(&mut self) -> &mut Self {
1263 self.sink.push(0xC0);
1264 self
1265 }
1266
1267 pub fn i32_extend16_s(&mut self) -> &mut Self {
1269 self.sink.push(0xC1);
1270 self
1271 }
1272
1273 pub fn i64_extend8_s(&mut self) -> &mut Self {
1275 self.sink.push(0xC2);
1276 self
1277 }
1278
1279 pub fn i64_extend16_s(&mut self) -> &mut Self {
1281 self.sink.push(0xC3);
1282 self
1283 }
1284
1285 pub fn i64_extend32_s(&mut self) -> &mut Self {
1287 self.sink.push(0xC4);
1288 self
1289 }
1290
1291 pub fn i32_trunc_sat_f32_s(&mut self) -> &mut Self {
1293 self.sink.push(0xFC);
1294 self.sink.push(0x00);
1295 self
1296 }
1297
1298 pub fn i32_trunc_sat_f32_u(&mut self) -> &mut Self {
1300 self.sink.push(0xFC);
1301 self.sink.push(0x01);
1302 self
1303 }
1304
1305 pub fn i32_trunc_sat_f64_s(&mut self) -> &mut Self {
1307 self.sink.push(0xFC);
1308 self.sink.push(0x02);
1309 self
1310 }
1311
1312 pub fn i32_trunc_sat_f64_u(&mut self) -> &mut Self {
1314 self.sink.push(0xFC);
1315 self.sink.push(0x03);
1316 self
1317 }
1318
1319 pub fn i64_trunc_sat_f32_s(&mut self) -> &mut Self {
1321 self.sink.push(0xFC);
1322 self.sink.push(0x04);
1323 self
1324 }
1325
1326 pub fn i64_trunc_sat_f32_u(&mut self) -> &mut Self {
1328 self.sink.push(0xFC);
1329 self.sink.push(0x05);
1330 self
1331 }
1332
1333 pub fn i64_trunc_sat_f64_s(&mut self) -> &mut Self {
1335 self.sink.push(0xFC);
1336 self.sink.push(0x06);
1337 self
1338 }
1339
1340 pub fn i64_trunc_sat_f64_u(&mut self) -> &mut Self {
1342 self.sink.push(0xFC);
1343 self.sink.push(0x07);
1344 self
1345 }
1346
1347 pub fn typed_select(&mut self, ty: ValType) -> &mut Self {
1351 self.sink.push(0x1c);
1352 [ty].encode(self.sink);
1353 self
1354 }
1355
1356 pub fn ref_null(&mut self, ty: HeapType) -> &mut Self {
1358 self.sink.push(0xd0);
1359 ty.encode(self.sink);
1360 self
1361 }
1362
1363 pub fn ref_is_null(&mut self) -> &mut Self {
1365 self.sink.push(0xd1);
1366 self
1367 }
1368
1369 pub fn ref_func(&mut self, f: u32) -> &mut Self {
1371 self.sink.push(0xd2);
1372 f.encode(self.sink);
1373 self
1374 }
1375
1376 pub fn ref_eq(&mut self) -> &mut Self {
1378 self.sink.push(0xd3);
1379 self
1380 }
1381
1382 pub fn ref_as_non_null(&mut self) -> &mut Self {
1384 self.sink.push(0xd4);
1385 self
1386 }
1387
1388 pub fn struct_new(&mut self, type_index: u32) -> &mut Self {
1392 self.sink.push(0xfb);
1393 self.sink.push(0x00);
1394 type_index.encode(self.sink);
1395 self
1396 }
1397
1398 pub fn struct_new_default(&mut self, type_index: u32) -> &mut Self {
1400 self.sink.push(0xfb);
1401 self.sink.push(0x01);
1402 type_index.encode(self.sink);
1403 self
1404 }
1405
1406 pub fn struct_get(&mut self, struct_type_index: u32, field_index: u32) -> &mut Self {
1408 self.sink.push(0xfb);
1409 self.sink.push(0x02);
1410 struct_type_index.encode(self.sink);
1411 field_index.encode(self.sink);
1412 self
1413 }
1414
1415 pub fn struct_get_s(&mut self, struct_type_index: u32, field_index: u32) -> &mut Self {
1417 self.sink.push(0xfb);
1418 self.sink.push(0x03);
1419 struct_type_index.encode(self.sink);
1420 field_index.encode(self.sink);
1421 self
1422 }
1423
1424 pub fn struct_get_u(&mut self, struct_type_index: u32, field_index: u32) -> &mut Self {
1426 self.sink.push(0xfb);
1427 self.sink.push(0x04);
1428 struct_type_index.encode(self.sink);
1429 field_index.encode(self.sink);
1430 self
1431 }
1432
1433 pub fn struct_set(&mut self, struct_type_index: u32, field_index: u32) -> &mut Self {
1435 self.sink.push(0xfb);
1436 self.sink.push(0x05);
1437 struct_type_index.encode(self.sink);
1438 field_index.encode(self.sink);
1439 self
1440 }
1441
1442 pub fn array_new(&mut self, type_index: u32) -> &mut Self {
1444 self.sink.push(0xfb);
1445 self.sink.push(0x06);
1446 type_index.encode(self.sink);
1447 self
1448 }
1449
1450 pub fn array_new_default(&mut self, type_index: u32) -> &mut Self {
1452 self.sink.push(0xfb);
1453 self.sink.push(0x07);
1454 type_index.encode(self.sink);
1455 self
1456 }
1457
1458 pub fn array_new_fixed(&mut self, array_type_index: u32, array_size: u32) -> &mut Self {
1460 self.sink.push(0xfb);
1461 self.sink.push(0x08);
1462 array_type_index.encode(self.sink);
1463 array_size.encode(self.sink);
1464 self
1465 }
1466
1467 pub fn array_new_data(&mut self, array_type_index: u32, array_data_index: u32) -> &mut Self {
1469 self.sink.push(0xfb);
1470 self.sink.push(0x09);
1471 array_type_index.encode(self.sink);
1472 array_data_index.encode(self.sink);
1473 self
1474 }
1475
1476 pub fn array_new_elem(&mut self, array_type_index: u32, array_elem_index: u32) -> &mut Self {
1478 self.sink.push(0xfb);
1479 self.sink.push(0x0a);
1480 array_type_index.encode(self.sink);
1481 array_elem_index.encode(self.sink);
1482 self
1483 }
1484
1485 pub fn array_get(&mut self, type_index: u32) -> &mut Self {
1487 self.sink.push(0xfb);
1488 self.sink.push(0x0b);
1489 type_index.encode(self.sink);
1490 self
1491 }
1492
1493 pub fn array_get_s(&mut self, type_index: u32) -> &mut Self {
1495 self.sink.push(0xfb);
1496 self.sink.push(0x0c);
1497 type_index.encode(self.sink);
1498 self
1499 }
1500
1501 pub fn array_get_u(&mut self, type_index: u32) -> &mut Self {
1503 self.sink.push(0xfb);
1504 self.sink.push(0x0d);
1505 type_index.encode(self.sink);
1506 self
1507 }
1508
1509 pub fn array_set(&mut self, type_index: u32) -> &mut Self {
1511 self.sink.push(0xfb);
1512 self.sink.push(0x0e);
1513 type_index.encode(self.sink);
1514 self
1515 }
1516
1517 pub fn array_len(&mut self) -> &mut Self {
1519 self.sink.push(0xfb);
1520 self.sink.push(0x0f);
1521 self
1522 }
1523
1524 pub fn array_fill(&mut self, type_index: u32) -> &mut Self {
1526 self.sink.push(0xfb);
1527 self.sink.push(0x10);
1528 type_index.encode(self.sink);
1529 self
1530 }
1531
1532 pub fn array_copy(
1534 &mut self,
1535 array_type_index_dst: u32,
1536 array_type_index_src: u32,
1537 ) -> &mut Self {
1538 self.sink.push(0xfb);
1539 self.sink.push(0x11);
1540 array_type_index_dst.encode(self.sink);
1541 array_type_index_src.encode(self.sink);
1542 self
1543 }
1544
1545 pub fn array_init_data(&mut self, array_type_index: u32, array_data_index: u32) -> &mut Self {
1547 self.sink.push(0xfb);
1548 self.sink.push(0x12);
1549 array_type_index.encode(self.sink);
1550 array_data_index.encode(self.sink);
1551 self
1552 }
1553
1554 pub fn array_init_elem(&mut self, array_type_index: u32, array_elem_index: u32) -> &mut Self {
1556 self.sink.push(0xfb);
1557 self.sink.push(0x13);
1558 array_type_index.encode(self.sink);
1559 array_elem_index.encode(self.sink);
1560 self
1561 }
1562
1563 pub fn ref_test_non_null(&mut self, heap_type: HeapType) -> &mut Self {
1565 self.sink.push(0xfb);
1566 self.sink.push(0x14);
1567 heap_type.encode(self.sink);
1568 self
1569 }
1570
1571 pub fn ref_test_nullable(&mut self, heap_type: HeapType) -> &mut Self {
1573 self.sink.push(0xfb);
1574 self.sink.push(0x15);
1575 heap_type.encode(self.sink);
1576 self
1577 }
1578
1579 pub fn ref_cast_non_null(&mut self, heap_type: HeapType) -> &mut Self {
1581 self.sink.push(0xfb);
1582 self.sink.push(0x16);
1583 heap_type.encode(self.sink);
1584 self
1585 }
1586
1587 pub fn ref_cast_nullable(&mut self, heap_type: HeapType) -> &mut Self {
1589 self.sink.push(0xfb);
1590 self.sink.push(0x17);
1591 heap_type.encode(self.sink);
1592 self
1593 }
1594
1595 pub fn br_on_cast(
1597 &mut self,
1598 relative_depth: u32,
1599 from_ref_type: RefType,
1600 to_ref_type: RefType,
1601 ) -> &mut Self {
1602 self.sink.push(0xfb);
1603 self.sink.push(0x18);
1604 let cast_flags = (from_ref_type.nullable as u8) | ((to_ref_type.nullable as u8) << 1);
1605 self.sink.push(cast_flags);
1606 relative_depth.encode(self.sink);
1607 from_ref_type.heap_type.encode(self.sink);
1608 to_ref_type.heap_type.encode(self.sink);
1609 self
1610 }
1611
1612 pub fn br_on_cast_fail(
1614 &mut self,
1615 relative_depth: u32,
1616 from_ref_type: RefType,
1617 to_ref_type: RefType,
1618 ) -> &mut Self {
1619 self.sink.push(0xfb);
1620 self.sink.push(0x19);
1621 let cast_flags = (from_ref_type.nullable as u8) | ((to_ref_type.nullable as u8) << 1);
1622 self.sink.push(cast_flags);
1623 relative_depth.encode(self.sink);
1624 from_ref_type.heap_type.encode(self.sink);
1625 to_ref_type.heap_type.encode(self.sink);
1626 self
1627 }
1628
1629 pub fn any_convert_extern(&mut self) -> &mut Self {
1631 self.sink.push(0xfb);
1632 self.sink.push(0x1a);
1633 self
1634 }
1635
1636 pub fn extern_convert_any(&mut self) -> &mut Self {
1638 self.sink.push(0xfb);
1639 self.sink.push(0x1b);
1640 self
1641 }
1642
1643 pub fn ref_i31(&mut self) -> &mut Self {
1645 self.sink.push(0xfb);
1646 self.sink.push(0x1c);
1647 self
1648 }
1649
1650 pub fn i31_get_s(&mut self) -> &mut Self {
1652 self.sink.push(0xfb);
1653 self.sink.push(0x1d);
1654 self
1655 }
1656
1657 pub fn i31_get_u(&mut self) -> &mut Self {
1659 self.sink.push(0xfb);
1660 self.sink.push(0x1e);
1661 self
1662 }
1663
1664 pub fn table_init(&mut self, table: u32, elem_index: u32) -> &mut Self {
1668 self.sink.push(0xfc);
1669 self.sink.push(0x0c);
1670 elem_index.encode(self.sink);
1671 table.encode(self.sink);
1672 self
1673 }
1674
1675 pub fn elem_drop(&mut self, segment: u32) -> &mut Self {
1677 self.sink.push(0xfc);
1678 self.sink.push(0x0d);
1679 segment.encode(self.sink);
1680 self
1681 }
1682
1683 pub fn table_fill(&mut self, table: u32) -> &mut Self {
1685 self.sink.push(0xfc);
1686 self.sink.push(0x11);
1687 table.encode(self.sink);
1688 self
1689 }
1690
1691 pub fn table_set(&mut self, table: u32) -> &mut Self {
1693 self.sink.push(0x26);
1694 table.encode(self.sink);
1695 self
1696 }
1697
1698 pub fn table_get(&mut self, table: u32) -> &mut Self {
1700 self.sink.push(0x25);
1701 table.encode(self.sink);
1702 self
1703 }
1704
1705 pub fn table_grow(&mut self, table: u32) -> &mut Self {
1707 self.sink.push(0xfc);
1708 self.sink.push(0x0f);
1709 table.encode(self.sink);
1710 self
1711 }
1712
1713 pub fn table_size(&mut self, table: u32) -> &mut Self {
1715 self.sink.push(0xfc);
1716 self.sink.push(0x10);
1717 table.encode(self.sink);
1718 self
1719 }
1720
1721 pub fn table_copy(&mut self, dst_table: u32, src_table: u32) -> &mut Self {
1723 self.sink.push(0xfc);
1724 self.sink.push(0x0e);
1725 dst_table.encode(self.sink);
1726 src_table.encode(self.sink);
1727 self
1728 }
1729
1730 pub fn v128_load(&mut self, memarg: MemArg) -> &mut Self {
1734 self.sink.push(0xFD);
1735 0x00u32.encode(self.sink);
1736 memarg.encode(self.sink);
1737 self
1738 }
1739
1740 pub fn v128_load8x8_s(&mut self, memarg: MemArg) -> &mut Self {
1742 self.sink.push(0xFD);
1743 0x01u32.encode(self.sink);
1744 memarg.encode(self.sink);
1745 self
1746 }
1747
1748 pub fn v128_load8x8_u(&mut self, memarg: MemArg) -> &mut Self {
1750 self.sink.push(0xFD);
1751 0x02u32.encode(self.sink);
1752 memarg.encode(self.sink);
1753 self
1754 }
1755
1756 pub fn v128_load16x4_s(&mut self, memarg: MemArg) -> &mut Self {
1758 self.sink.push(0xFD);
1759 0x03u32.encode(self.sink);
1760 memarg.encode(self.sink);
1761 self
1762 }
1763
1764 pub fn v128_load16x4_u(&mut self, memarg: MemArg) -> &mut Self {
1766 self.sink.push(0xFD);
1767 0x04u32.encode(self.sink);
1768 memarg.encode(self.sink);
1769 self
1770 }
1771
1772 pub fn v128_load32x2_s(&mut self, memarg: MemArg) -> &mut Self {
1774 self.sink.push(0xFD);
1775 0x05u32.encode(self.sink);
1776 memarg.encode(self.sink);
1777 self
1778 }
1779
1780 pub fn v128_load32x2_u(&mut self, memarg: MemArg) -> &mut Self {
1782 self.sink.push(0xFD);
1783 0x06u32.encode(self.sink);
1784 memarg.encode(self.sink);
1785 self
1786 }
1787
1788 pub fn v128_load8_splat(&mut self, memarg: MemArg) -> &mut Self {
1790 self.sink.push(0xFD);
1791 0x07u32.encode(self.sink);
1792 memarg.encode(self.sink);
1793 self
1794 }
1795
1796 pub fn v128_load16_splat(&mut self, memarg: MemArg) -> &mut Self {
1798 self.sink.push(0xFD);
1799 0x08u32.encode(self.sink);
1800 memarg.encode(self.sink);
1801 self
1802 }
1803
1804 pub fn v128_load32_splat(&mut self, memarg: MemArg) -> &mut Self {
1806 self.sink.push(0xFD);
1807 0x09u32.encode(self.sink);
1808 memarg.encode(self.sink);
1809 self
1810 }
1811
1812 pub fn v128_load64_splat(&mut self, memarg: MemArg) -> &mut Self {
1814 self.sink.push(0xFD);
1815 0x0Au32.encode(self.sink);
1816 memarg.encode(self.sink);
1817 self
1818 }
1819
1820 pub fn v128_load32_zero(&mut self, memarg: MemArg) -> &mut Self {
1822 self.sink.push(0xFD);
1823 0x5Cu32.encode(self.sink);
1824 memarg.encode(self.sink);
1825 self
1826 }
1827
1828 pub fn v128_load64_zero(&mut self, memarg: MemArg) -> &mut Self {
1830 self.sink.push(0xFD);
1831 0x5Du32.encode(self.sink);
1832 memarg.encode(self.sink);
1833 self
1834 }
1835
1836 pub fn v128_store(&mut self, memarg: MemArg) -> &mut Self {
1838 self.sink.push(0xFD);
1839 0x0Bu32.encode(self.sink);
1840 memarg.encode(self.sink);
1841 self
1842 }
1843
1844 pub fn v128_load8_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1846 self.sink.push(0xFD);
1847 0x54u32.encode(self.sink);
1848 memarg.encode(self.sink);
1849 assert!(lane < 16);
1850 self.sink.push(lane);
1851 self
1852 }
1853
1854 pub fn v128_load16_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1856 self.sink.push(0xFD);
1857 0x55u32.encode(self.sink);
1858 memarg.encode(self.sink);
1859 assert!(lane < 8);
1860 self.sink.push(lane);
1861 self
1862 }
1863
1864 pub fn v128_load32_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1866 self.sink.push(0xFD);
1867 0x56u32.encode(self.sink);
1868 memarg.encode(self.sink);
1869 assert!(lane < 4);
1870 self.sink.push(lane);
1871 self
1872 }
1873
1874 pub fn v128_load64_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1876 self.sink.push(0xFD);
1877 0x57u32.encode(self.sink);
1878 memarg.encode(self.sink);
1879 assert!(lane < 2);
1880 self.sink.push(lane);
1881 self
1882 }
1883
1884 pub fn v128_store8_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1886 self.sink.push(0xFD);
1887 0x58u32.encode(self.sink);
1888 memarg.encode(self.sink);
1889 assert!(lane < 16);
1890 self.sink.push(lane);
1891 self
1892 }
1893
1894 pub fn v128_store16_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1896 self.sink.push(0xFD);
1897 0x59u32.encode(self.sink);
1898 memarg.encode(self.sink);
1899 assert!(lane < 8);
1900 self.sink.push(lane);
1901 self
1902 }
1903
1904 pub fn v128_store32_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1906 self.sink.push(0xFD);
1907 0x5Au32.encode(self.sink);
1908 memarg.encode(self.sink);
1909 assert!(lane < 4);
1910 self.sink.push(lane);
1911 self
1912 }
1913
1914 pub fn v128_store64_lane(&mut self, memarg: MemArg, lane: Lane) -> &mut Self {
1916 self.sink.push(0xFD);
1917 0x5Bu32.encode(self.sink);
1918 memarg.encode(self.sink);
1919 assert!(lane < 2);
1920 self.sink.push(lane);
1921 self
1922 }
1923
1924 pub fn v128_const(&mut self, x: i128) -> &mut Self {
1926 self.sink.push(0xFD);
1927 0x0Cu32.encode(self.sink);
1928 self.sink.extend(x.to_le_bytes().iter().copied());
1929 self
1930 }
1931
1932 pub fn i8x16_shuffle(&mut self, lanes: [Lane; 16]) -> &mut Self {
1934 self.sink.push(0xFD);
1935 0x0Du32.encode(self.sink);
1936 assert!(lanes.iter().all(|l: &u8| *l < 32));
1937 self.sink.extend(lanes.iter().copied());
1938 self
1939 }
1940
1941 pub fn i8x16_extract_lane_s(&mut self, lane: Lane) -> &mut Self {
1943 self.sink.push(0xFD);
1944 0x15u32.encode(self.sink);
1945 assert!(lane < 16);
1946 self.sink.push(lane);
1947 self
1948 }
1949
1950 pub fn i8x16_extract_lane_u(&mut self, lane: Lane) -> &mut Self {
1952 self.sink.push(0xFD);
1953 0x16u32.encode(self.sink);
1954 assert!(lane < 16);
1955 self.sink.push(lane);
1956 self
1957 }
1958
1959 pub fn i8x16_replace_lane(&mut self, lane: Lane) -> &mut Self {
1961 self.sink.push(0xFD);
1962 0x17u32.encode(self.sink);
1963 assert!(lane < 16);
1964 self.sink.push(lane);
1965 self
1966 }
1967
1968 pub fn i16x8_extract_lane_s(&mut self, lane: Lane) -> &mut Self {
1970 self.sink.push(0xFD);
1971 0x18u32.encode(self.sink);
1972 assert!(lane < 8);
1973 self.sink.push(lane);
1974 self
1975 }
1976
1977 pub fn i16x8_extract_lane_u(&mut self, lane: Lane) -> &mut Self {
1979 self.sink.push(0xFD);
1980 0x19u32.encode(self.sink);
1981 assert!(lane < 8);
1982 self.sink.push(lane);
1983 self
1984 }
1985
1986 pub fn i16x8_replace_lane(&mut self, lane: Lane) -> &mut Self {
1988 self.sink.push(0xFD);
1989 0x1Au32.encode(self.sink);
1990 assert!(lane < 8);
1991 self.sink.push(lane);
1992 self
1993 }
1994
1995 pub fn i32x4_extract_lane(&mut self, lane: Lane) -> &mut Self {
1997 self.sink.push(0xFD);
1998 0x1Bu32.encode(self.sink);
1999 assert!(lane < 4);
2000 self.sink.push(lane);
2001 self
2002 }
2003
2004 pub fn i32x4_replace_lane(&mut self, lane: Lane) -> &mut Self {
2006 self.sink.push(0xFD);
2007 0x1Cu32.encode(self.sink);
2008 assert!(lane < 4);
2009 self.sink.push(lane);
2010 self
2011 }
2012
2013 pub fn i64x2_extract_lane(&mut self, lane: Lane) -> &mut Self {
2015 self.sink.push(0xFD);
2016 0x1Du32.encode(self.sink);
2017 assert!(lane < 2);
2018 self.sink.push(lane);
2019 self
2020 }
2021
2022 pub fn i64x2_replace_lane(&mut self, lane: Lane) -> &mut Self {
2024 self.sink.push(0xFD);
2025 0x1Eu32.encode(self.sink);
2026 assert!(lane < 2);
2027 self.sink.push(lane);
2028 self
2029 }
2030
2031 pub fn f32x4_extract_lane(&mut self, lane: Lane) -> &mut Self {
2033 self.sink.push(0xFD);
2034 0x1Fu32.encode(self.sink);
2035 assert!(lane < 4);
2036 self.sink.push(lane);
2037 self
2038 }
2039
2040 pub fn f32x4_replace_lane(&mut self, lane: Lane) -> &mut Self {
2042 self.sink.push(0xFD);
2043 0x20u32.encode(self.sink);
2044 assert!(lane < 4);
2045 self.sink.push(lane);
2046 self
2047 }
2048
2049 pub fn f64x2_extract_lane(&mut self, lane: Lane) -> &mut Self {
2051 self.sink.push(0xFD);
2052 0x21u32.encode(self.sink);
2053 assert!(lane < 2);
2054 self.sink.push(lane);
2055 self
2056 }
2057
2058 pub fn f64x2_replace_lane(&mut self, lane: Lane) -> &mut Self {
2060 self.sink.push(0xFD);
2061 0x22u32.encode(self.sink);
2062 assert!(lane < 2);
2063 self.sink.push(lane);
2064 self
2065 }
2066
2067 pub fn i8x16_swizzle(&mut self) -> &mut Self {
2069 self.sink.push(0xFD);
2070 0x0Eu32.encode(self.sink);
2071 self
2072 }
2073
2074 pub fn i8x16_splat(&mut self) -> &mut Self {
2076 self.sink.push(0xFD);
2077 0x0Fu32.encode(self.sink);
2078 self
2079 }
2080
2081 pub fn i16x8_splat(&mut self) -> &mut Self {
2083 self.sink.push(0xFD);
2084 0x10u32.encode(self.sink);
2085 self
2086 }
2087
2088 pub fn i32x4_splat(&mut self) -> &mut Self {
2090 self.sink.push(0xFD);
2091 0x11u32.encode(self.sink);
2092 self
2093 }
2094
2095 pub fn i64x2_splat(&mut self) -> &mut Self {
2097 self.sink.push(0xFD);
2098 0x12u32.encode(self.sink);
2099 self
2100 }
2101
2102 pub fn f32x4_splat(&mut self) -> &mut Self {
2104 self.sink.push(0xFD);
2105 0x13u32.encode(self.sink);
2106 self
2107 }
2108
2109 pub fn f64x2_splat(&mut self) -> &mut Self {
2111 self.sink.push(0xFD);
2112 0x14u32.encode(self.sink);
2113 self
2114 }
2115
2116 pub fn i8x16_eq(&mut self) -> &mut Self {
2118 self.sink.push(0xFD);
2119 0x23u32.encode(self.sink);
2120 self
2121 }
2122
2123 pub fn i8x16_ne(&mut self) -> &mut Self {
2125 self.sink.push(0xFD);
2126 0x24u32.encode(self.sink);
2127 self
2128 }
2129
2130 pub fn i8x16_lt_s(&mut self) -> &mut Self {
2132 self.sink.push(0xFD);
2133 0x25u32.encode(self.sink);
2134 self
2135 }
2136
2137 pub fn i8x16_lt_u(&mut self) -> &mut Self {
2139 self.sink.push(0xFD);
2140 0x26u32.encode(self.sink);
2141 self
2142 }
2143
2144 pub fn i8x16_gt_s(&mut self) -> &mut Self {
2146 self.sink.push(0xFD);
2147 0x27u32.encode(self.sink);
2148 self
2149 }
2150
2151 pub fn i8x16_gt_u(&mut self) -> &mut Self {
2153 self.sink.push(0xFD);
2154 0x28u32.encode(self.sink);
2155 self
2156 }
2157
2158 pub fn i8x16_le_s(&mut self) -> &mut Self {
2160 self.sink.push(0xFD);
2161 0x29u32.encode(self.sink);
2162 self
2163 }
2164
2165 pub fn i8x16_le_u(&mut self) -> &mut Self {
2167 self.sink.push(0xFD);
2168 0x2Au32.encode(self.sink);
2169 self
2170 }
2171
2172 pub fn i8x16_ge_s(&mut self) -> &mut Self {
2174 self.sink.push(0xFD);
2175 0x2Bu32.encode(self.sink);
2176 self
2177 }
2178
2179 pub fn i8x16_ge_u(&mut self) -> &mut Self {
2181 self.sink.push(0xFD);
2182 0x2Cu32.encode(self.sink);
2183 self
2184 }
2185
2186 pub fn i16x8_eq(&mut self) -> &mut Self {
2188 self.sink.push(0xFD);
2189 0x2Du32.encode(self.sink);
2190 self
2191 }
2192
2193 pub fn i16x8_ne(&mut self) -> &mut Self {
2195 self.sink.push(0xFD);
2196 0x2Eu32.encode(self.sink);
2197 self
2198 }
2199
2200 pub fn i16x8_lt_s(&mut self) -> &mut Self {
2202 self.sink.push(0xFD);
2203 0x2Fu32.encode(self.sink);
2204 self
2205 }
2206
2207 pub fn i16x8_lt_u(&mut self) -> &mut Self {
2209 self.sink.push(0xFD);
2210 0x30u32.encode(self.sink);
2211 self
2212 }
2213
2214 pub fn i16x8_gt_s(&mut self) -> &mut Self {
2216 self.sink.push(0xFD);
2217 0x31u32.encode(self.sink);
2218 self
2219 }
2220
2221 pub fn i16x8_gt_u(&mut self) -> &mut Self {
2223 self.sink.push(0xFD);
2224 0x32u32.encode(self.sink);
2225 self
2226 }
2227
2228 pub fn i16x8_le_s(&mut self) -> &mut Self {
2230 self.sink.push(0xFD);
2231 0x33u32.encode(self.sink);
2232 self
2233 }
2234
2235 pub fn i16x8_le_u(&mut self) -> &mut Self {
2237 self.sink.push(0xFD);
2238 0x34u32.encode(self.sink);
2239 self
2240 }
2241
2242 pub fn i16x8_ge_s(&mut self) -> &mut Self {
2244 self.sink.push(0xFD);
2245 0x35u32.encode(self.sink);
2246 self
2247 }
2248
2249 pub fn i16x8_ge_u(&mut self) -> &mut Self {
2251 self.sink.push(0xFD);
2252 0x36u32.encode(self.sink);
2253 self
2254 }
2255
2256 pub fn i32x4_eq(&mut self) -> &mut Self {
2258 self.sink.push(0xFD);
2259 0x37u32.encode(self.sink);
2260 self
2261 }
2262
2263 pub fn i32x4_ne(&mut self) -> &mut Self {
2265 self.sink.push(0xFD);
2266 0x38u32.encode(self.sink);
2267 self
2268 }
2269
2270 pub fn i32x4_lt_s(&mut self) -> &mut Self {
2272 self.sink.push(0xFD);
2273 0x39u32.encode(self.sink);
2274 self
2275 }
2276
2277 pub fn i32x4_lt_u(&mut self) -> &mut Self {
2279 self.sink.push(0xFD);
2280 0x3Au32.encode(self.sink);
2281 self
2282 }
2283
2284 pub fn i32x4_gt_s(&mut self) -> &mut Self {
2286 self.sink.push(0xFD);
2287 0x3Bu32.encode(self.sink);
2288 self
2289 }
2290
2291 pub fn i32x4_gt_u(&mut self) -> &mut Self {
2293 self.sink.push(0xFD);
2294 0x3Cu32.encode(self.sink);
2295 self
2296 }
2297
2298 pub fn i32x4_le_s(&mut self) -> &mut Self {
2300 self.sink.push(0xFD);
2301 0x3Du32.encode(self.sink);
2302 self
2303 }
2304
2305 pub fn i32x4_le_u(&mut self) -> &mut Self {
2307 self.sink.push(0xFD);
2308 0x3Eu32.encode(self.sink);
2309 self
2310 }
2311
2312 pub fn i32x4_ge_s(&mut self) -> &mut Self {
2314 self.sink.push(0xFD);
2315 0x3Fu32.encode(self.sink);
2316 self
2317 }
2318
2319 pub fn i32x4_ge_u(&mut self) -> &mut Self {
2321 self.sink.push(0xFD);
2322 0x40u32.encode(self.sink);
2323 self
2324 }
2325
2326 pub fn i64x2_eq(&mut self) -> &mut Self {
2328 self.sink.push(0xFD);
2329 0xD6u32.encode(self.sink);
2330 self
2331 }
2332
2333 pub fn i64x2_ne(&mut self) -> &mut Self {
2335 self.sink.push(0xFD);
2336 0xD7u32.encode(self.sink);
2337 self
2338 }
2339
2340 pub fn i64x2_lt_s(&mut self) -> &mut Self {
2342 self.sink.push(0xFD);
2343 0xD8u32.encode(self.sink);
2344 self
2345 }
2346
2347 pub fn i64x2_gt_s(&mut self) -> &mut Self {
2349 self.sink.push(0xFD);
2350 0xD9u32.encode(self.sink);
2351 self
2352 }
2353
2354 pub fn i64x2_le_s(&mut self) -> &mut Self {
2356 self.sink.push(0xFD);
2357 0xDAu32.encode(self.sink);
2358 self
2359 }
2360
2361 pub fn i64x2_ge_s(&mut self) -> &mut Self {
2363 self.sink.push(0xFD);
2364 0xDBu32.encode(self.sink);
2365 self
2366 }
2367
2368 pub fn f32x4_eq(&mut self) -> &mut Self {
2370 self.sink.push(0xFD);
2371 0x41u32.encode(self.sink);
2372 self
2373 }
2374
2375 pub fn f32x4_ne(&mut self) -> &mut Self {
2377 self.sink.push(0xFD);
2378 0x42u32.encode(self.sink);
2379 self
2380 }
2381
2382 pub fn f32x4_lt(&mut self) -> &mut Self {
2384 self.sink.push(0xFD);
2385 0x43u32.encode(self.sink);
2386 self
2387 }
2388
2389 pub fn f32x4_gt(&mut self) -> &mut Self {
2391 self.sink.push(0xFD);
2392 0x44u32.encode(self.sink);
2393 self
2394 }
2395
2396 pub fn f32x4_le(&mut self) -> &mut Self {
2398 self.sink.push(0xFD);
2399 0x45u32.encode(self.sink);
2400 self
2401 }
2402
2403 pub fn f32x4_ge(&mut self) -> &mut Self {
2405 self.sink.push(0xFD);
2406 0x46u32.encode(self.sink);
2407 self
2408 }
2409
2410 pub fn f64x2_eq(&mut self) -> &mut Self {
2412 self.sink.push(0xFD);
2413 0x47u32.encode(self.sink);
2414 self
2415 }
2416
2417 pub fn f64x2_ne(&mut self) -> &mut Self {
2419 self.sink.push(0xFD);
2420 0x48u32.encode(self.sink);
2421 self
2422 }
2423
2424 pub fn f64x2_lt(&mut self) -> &mut Self {
2426 self.sink.push(0xFD);
2427 0x49u32.encode(self.sink);
2428 self
2429 }
2430
2431 pub fn f64x2_gt(&mut self) -> &mut Self {
2433 self.sink.push(0xFD);
2434 0x4Au32.encode(self.sink);
2435 self
2436 }
2437
2438 pub fn f64x2_le(&mut self) -> &mut Self {
2440 self.sink.push(0xFD);
2441 0x4Bu32.encode(self.sink);
2442 self
2443 }
2444
2445 pub fn f64x2_ge(&mut self) -> &mut Self {
2447 self.sink.push(0xFD);
2448 0x4Cu32.encode(self.sink);
2449 self
2450 }
2451
2452 pub fn v128_not(&mut self) -> &mut Self {
2454 self.sink.push(0xFD);
2455 0x4Du32.encode(self.sink);
2456 self
2457 }
2458
2459 pub fn v128_and(&mut self) -> &mut Self {
2461 self.sink.push(0xFD);
2462 0x4Eu32.encode(self.sink);
2463 self
2464 }
2465
2466 pub fn v128_andnot(&mut self) -> &mut Self {
2468 self.sink.push(0xFD);
2469 0x4Fu32.encode(self.sink);
2470 self
2471 }
2472
2473 pub fn v128_or(&mut self) -> &mut Self {
2475 self.sink.push(0xFD);
2476 0x50u32.encode(self.sink);
2477 self
2478 }
2479
2480 pub fn v128_xor(&mut self) -> &mut Self {
2482 self.sink.push(0xFD);
2483 0x51u32.encode(self.sink);
2484 self
2485 }
2486
2487 pub fn v128_bitselect(&mut self) -> &mut Self {
2489 self.sink.push(0xFD);
2490 0x52u32.encode(self.sink);
2491 self
2492 }
2493
2494 pub fn v128_any_true(&mut self) -> &mut Self {
2496 self.sink.push(0xFD);
2497 0x53u32.encode(self.sink);
2498 self
2499 }
2500
2501 pub fn i8x16_abs(&mut self) -> &mut Self {
2503 self.sink.push(0xFD);
2504 0x60u32.encode(self.sink);
2505 self
2506 }
2507
2508 pub fn i8x16_neg(&mut self) -> &mut Self {
2510 self.sink.push(0xFD);
2511 0x61u32.encode(self.sink);
2512 self
2513 }
2514
2515 pub fn i8x16_popcnt(&mut self) -> &mut Self {
2517 self.sink.push(0xFD);
2518 0x62u32.encode(self.sink);
2519 self
2520 }
2521
2522 pub fn i8x16_all_true(&mut self) -> &mut Self {
2524 self.sink.push(0xFD);
2525 0x63u32.encode(self.sink);
2526 self
2527 }
2528
2529 pub fn i8x16_bitmask(&mut self) -> &mut Self {
2531 self.sink.push(0xFD);
2532 0x64u32.encode(self.sink);
2533 self
2534 }
2535
2536 pub fn i8x16_narrow_i16x8_s(&mut self) -> &mut Self {
2538 self.sink.push(0xFD);
2539 0x65u32.encode(self.sink);
2540 self
2541 }
2542
2543 pub fn i8x16_narrow_i16x8_u(&mut self) -> &mut Self {
2545 self.sink.push(0xFD);
2546 0x66u32.encode(self.sink);
2547 self
2548 }
2549
2550 pub fn i8x16_shl(&mut self) -> &mut Self {
2552 self.sink.push(0xFD);
2553 0x6bu32.encode(self.sink);
2554 self
2555 }
2556
2557 pub fn i8x16_shr_s(&mut self) -> &mut Self {
2559 self.sink.push(0xFD);
2560 0x6cu32.encode(self.sink);
2561 self
2562 }
2563
2564 pub fn i8x16_shr_u(&mut self) -> &mut Self {
2566 self.sink.push(0xFD);
2567 0x6du32.encode(self.sink);
2568 self
2569 }
2570
2571 pub fn i8x16_add(&mut self) -> &mut Self {
2573 self.sink.push(0xFD);
2574 0x6eu32.encode(self.sink);
2575 self
2576 }
2577
2578 pub fn i8x16_add_sat_s(&mut self) -> &mut Self {
2580 self.sink.push(0xFD);
2581 0x6fu32.encode(self.sink);
2582 self
2583 }
2584
2585 pub fn i8x16_add_sat_u(&mut self) -> &mut Self {
2587 self.sink.push(0xFD);
2588 0x70u32.encode(self.sink);
2589 self
2590 }
2591
2592 pub fn i8x16_sub(&mut self) -> &mut Self {
2594 self.sink.push(0xFD);
2595 0x71u32.encode(self.sink);
2596 self
2597 }
2598
2599 pub fn i8x16_sub_sat_s(&mut self) -> &mut Self {
2601 self.sink.push(0xFD);
2602 0x72u32.encode(self.sink);
2603 self
2604 }
2605
2606 pub fn i8x16_sub_sat_u(&mut self) -> &mut Self {
2608 self.sink.push(0xFD);
2609 0x73u32.encode(self.sink);
2610 self
2611 }
2612
2613 pub fn i8x16_min_s(&mut self) -> &mut Self {
2615 self.sink.push(0xFD);
2616 0x76u32.encode(self.sink);
2617 self
2618 }
2619
2620 pub fn i8x16_min_u(&mut self) -> &mut Self {
2622 self.sink.push(0xFD);
2623 0x77u32.encode(self.sink);
2624 self
2625 }
2626
2627 pub fn i8x16_max_s(&mut self) -> &mut Self {
2629 self.sink.push(0xFD);
2630 0x78u32.encode(self.sink);
2631 self
2632 }
2633
2634 pub fn i8x16_max_u(&mut self) -> &mut Self {
2636 self.sink.push(0xFD);
2637 0x79u32.encode(self.sink);
2638 self
2639 }
2640
2641 pub fn i8x16_avgr_u(&mut self) -> &mut Self {
2643 self.sink.push(0xFD);
2644 0x7Bu32.encode(self.sink);
2645 self
2646 }
2647
2648 pub fn i16x8_extadd_pairwise_i8x16_s(&mut self) -> &mut Self {
2650 self.sink.push(0xFD);
2651 0x7Cu32.encode(self.sink);
2652 self
2653 }
2654
2655 pub fn i16x8_extadd_pairwise_i8x16_u(&mut self) -> &mut Self {
2657 self.sink.push(0xFD);
2658 0x7Du32.encode(self.sink);
2659 self
2660 }
2661
2662 pub fn i16x8_abs(&mut self) -> &mut Self {
2664 self.sink.push(0xFD);
2665 0x80u32.encode(self.sink);
2666 self
2667 }
2668
2669 pub fn i16x8_neg(&mut self) -> &mut Self {
2671 self.sink.push(0xFD);
2672 0x81u32.encode(self.sink);
2673 self
2674 }
2675
2676 pub fn i16x8_q15mulr_sat_s(&mut self) -> &mut Self {
2678 self.sink.push(0xFD);
2679 0x82u32.encode(self.sink);
2680 self
2681 }
2682
2683 pub fn i16x8_all_true(&mut self) -> &mut Self {
2685 self.sink.push(0xFD);
2686 0x83u32.encode(self.sink);
2687 self
2688 }
2689
2690 pub fn i16x8_bitmask(&mut self) -> &mut Self {
2692 self.sink.push(0xFD);
2693 0x84u32.encode(self.sink);
2694 self
2695 }
2696
2697 pub fn i16x8_narrow_i32x4_s(&mut self) -> &mut Self {
2699 self.sink.push(0xFD);
2700 0x85u32.encode(self.sink);
2701 self
2702 }
2703
2704 pub fn i16x8_narrow_i32x4_u(&mut self) -> &mut Self {
2706 self.sink.push(0xFD);
2707 0x86u32.encode(self.sink);
2708 self
2709 }
2710
2711 pub fn i16x8_extend_low_i8x16_s(&mut self) -> &mut Self {
2713 self.sink.push(0xFD);
2714 0x87u32.encode(self.sink);
2715 self
2716 }
2717
2718 pub fn i16x8_extend_high_i8x16_s(&mut self) -> &mut Self {
2720 self.sink.push(0xFD);
2721 0x88u32.encode(self.sink);
2722 self
2723 }
2724
2725 pub fn i16x8_extend_low_i8x16_u(&mut self) -> &mut Self {
2727 self.sink.push(0xFD);
2728 0x89u32.encode(self.sink);
2729 self
2730 }
2731
2732 pub fn i16x8_extend_high_i8x16_u(&mut self) -> &mut Self {
2734 self.sink.push(0xFD);
2735 0x8Au32.encode(self.sink);
2736 self
2737 }
2738
2739 pub fn i16x8_shl(&mut self) -> &mut Self {
2741 self.sink.push(0xFD);
2742 0x8Bu32.encode(self.sink);
2743 self
2744 }
2745
2746 pub fn i16x8_shr_s(&mut self) -> &mut Self {
2748 self.sink.push(0xFD);
2749 0x8Cu32.encode(self.sink);
2750 self
2751 }
2752
2753 pub fn i16x8_shr_u(&mut self) -> &mut Self {
2755 self.sink.push(0xFD);
2756 0x8Du32.encode(self.sink);
2757 self
2758 }
2759
2760 pub fn i16x8_add(&mut self) -> &mut Self {
2762 self.sink.push(0xFD);
2763 0x8Eu32.encode(self.sink);
2764 self
2765 }
2766
2767 pub fn i16x8_add_sat_s(&mut self) -> &mut Self {
2769 self.sink.push(0xFD);
2770 0x8Fu32.encode(self.sink);
2771 self
2772 }
2773
2774 pub fn i16x8_add_sat_u(&mut self) -> &mut Self {
2776 self.sink.push(0xFD);
2777 0x90u32.encode(self.sink);
2778 self
2779 }
2780
2781 pub fn i16x8_sub(&mut self) -> &mut Self {
2783 self.sink.push(0xFD);
2784 0x91u32.encode(self.sink);
2785 self
2786 }
2787
2788 pub fn i16x8_sub_sat_s(&mut self) -> &mut Self {
2790 self.sink.push(0xFD);
2791 0x92u32.encode(self.sink);
2792 self
2793 }
2794
2795 pub fn i16x8_sub_sat_u(&mut self) -> &mut Self {
2797 self.sink.push(0xFD);
2798 0x93u32.encode(self.sink);
2799 self
2800 }
2801
2802 pub fn i16x8_mul(&mut self) -> &mut Self {
2804 self.sink.push(0xFD);
2805 0x95u32.encode(self.sink);
2806 self
2807 }
2808
2809 pub fn i16x8_min_s(&mut self) -> &mut Self {
2811 self.sink.push(0xFD);
2812 0x96u32.encode(self.sink);
2813 self
2814 }
2815
2816 pub fn i16x8_min_u(&mut self) -> &mut Self {
2818 self.sink.push(0xFD);
2819 0x97u32.encode(self.sink);
2820 self
2821 }
2822
2823 pub fn i16x8_max_s(&mut self) -> &mut Self {
2825 self.sink.push(0xFD);
2826 0x98u32.encode(self.sink);
2827 self
2828 }
2829
2830 pub fn i16x8_max_u(&mut self) -> &mut Self {
2832 self.sink.push(0xFD);
2833 0x99u32.encode(self.sink);
2834 self
2835 }
2836
2837 pub fn i16x8_avgr_u(&mut self) -> &mut Self {
2839 self.sink.push(0xFD);
2840 0x9Bu32.encode(self.sink);
2841 self
2842 }
2843
2844 pub fn i16x8_extmul_low_i8x16_s(&mut self) -> &mut Self {
2846 self.sink.push(0xFD);
2847 0x9Cu32.encode(self.sink);
2848 self
2849 }
2850
2851 pub fn i16x8_extmul_high_i8x16_s(&mut self) -> &mut Self {
2853 self.sink.push(0xFD);
2854 0x9Du32.encode(self.sink);
2855 self
2856 }
2857
2858 pub fn i16x8_extmul_low_i8x16_u(&mut self) -> &mut Self {
2860 self.sink.push(0xFD);
2861 0x9Eu32.encode(self.sink);
2862 self
2863 }
2864
2865 pub fn i16x8_extmul_high_i8x16_u(&mut self) -> &mut Self {
2867 self.sink.push(0xFD);
2868 0x9Fu32.encode(self.sink);
2869 self
2870 }
2871
2872 pub fn i32x4_extadd_pairwise_i16x8_s(&mut self) -> &mut Self {
2874 self.sink.push(0xFD);
2875 0x7Eu32.encode(self.sink);
2876 self
2877 }
2878
2879 pub fn i32x4_extadd_pairwise_i16x8_u(&mut self) -> &mut Self {
2881 self.sink.push(0xFD);
2882 0x7Fu32.encode(self.sink);
2883 self
2884 }
2885
2886 pub fn i32x4_abs(&mut self) -> &mut Self {
2888 self.sink.push(0xFD);
2889 0xA0u32.encode(self.sink);
2890 self
2891 }
2892
2893 pub fn i32x4_neg(&mut self) -> &mut Self {
2895 self.sink.push(0xFD);
2896 0xA1u32.encode(self.sink);
2897 self
2898 }
2899
2900 pub fn i32x4_all_true(&mut self) -> &mut Self {
2902 self.sink.push(0xFD);
2903 0xA3u32.encode(self.sink);
2904 self
2905 }
2906
2907 pub fn i32x4_bitmask(&mut self) -> &mut Self {
2909 self.sink.push(0xFD);
2910 0xA4u32.encode(self.sink);
2911 self
2912 }
2913
2914 pub fn i32x4_extend_low_i16x8_s(&mut self) -> &mut Self {
2916 self.sink.push(0xFD);
2917 0xA7u32.encode(self.sink);
2918 self
2919 }
2920
2921 pub fn i32x4_extend_high_i16x8_s(&mut self) -> &mut Self {
2923 self.sink.push(0xFD);
2924 0xA8u32.encode(self.sink);
2925 self
2926 }
2927
2928 pub fn i32x4_extend_low_i16x8_u(&mut self) -> &mut Self {
2930 self.sink.push(0xFD);
2931 0xA9u32.encode(self.sink);
2932 self
2933 }
2934
2935 pub fn i32x4_extend_high_i16x8_u(&mut self) -> &mut Self {
2937 self.sink.push(0xFD);
2938 0xAAu32.encode(self.sink);
2939 self
2940 }
2941
2942 pub fn i32x4_shl(&mut self) -> &mut Self {
2944 self.sink.push(0xFD);
2945 0xABu32.encode(self.sink);
2946 self
2947 }
2948
2949 pub fn i32x4_shr_s(&mut self) -> &mut Self {
2951 self.sink.push(0xFD);
2952 0xACu32.encode(self.sink);
2953 self
2954 }
2955
2956 pub fn i32x4_shr_u(&mut self) -> &mut Self {
2958 self.sink.push(0xFD);
2959 0xADu32.encode(self.sink);
2960 self
2961 }
2962
2963 pub fn i32x4_add(&mut self) -> &mut Self {
2965 self.sink.push(0xFD);
2966 0xAEu32.encode(self.sink);
2967 self
2968 }
2969
2970 pub fn i32x4_sub(&mut self) -> &mut Self {
2972 self.sink.push(0xFD);
2973 0xB1u32.encode(self.sink);
2974 self
2975 }
2976
2977 pub fn i32x4_mul(&mut self) -> &mut Self {
2979 self.sink.push(0xFD);
2980 0xB5u32.encode(self.sink);
2981 self
2982 }
2983
2984 pub fn i32x4_min_s(&mut self) -> &mut Self {
2986 self.sink.push(0xFD);
2987 0xB6u32.encode(self.sink);
2988 self
2989 }
2990
2991 pub fn i32x4_min_u(&mut self) -> &mut Self {
2993 self.sink.push(0xFD);
2994 0xB7u32.encode(self.sink);
2995 self
2996 }
2997
2998 pub fn i32x4_max_s(&mut self) -> &mut Self {
3000 self.sink.push(0xFD);
3001 0xB8u32.encode(self.sink);
3002 self
3003 }
3004
3005 pub fn i32x4_max_u(&mut self) -> &mut Self {
3007 self.sink.push(0xFD);
3008 0xB9u32.encode(self.sink);
3009 self
3010 }
3011
3012 pub fn i32x4_dot_i16x8_s(&mut self) -> &mut Self {
3014 self.sink.push(0xFD);
3015 0xBAu32.encode(self.sink);
3016 self
3017 }
3018
3019 pub fn i32x4_extmul_low_i16x8_s(&mut self) -> &mut Self {
3021 self.sink.push(0xFD);
3022 0xBCu32.encode(self.sink);
3023 self
3024 }
3025
3026 pub fn i32x4_extmul_high_i16x8_s(&mut self) -> &mut Self {
3028 self.sink.push(0xFD);
3029 0xBDu32.encode(self.sink);
3030 self
3031 }
3032
3033 pub fn i32x4_extmul_low_i16x8_u(&mut self) -> &mut Self {
3035 self.sink.push(0xFD);
3036 0xBEu32.encode(self.sink);
3037 self
3038 }
3039
3040 pub fn i32x4_extmul_high_i16x8_u(&mut self) -> &mut Self {
3042 self.sink.push(0xFD);
3043 0xBFu32.encode(self.sink);
3044 self
3045 }
3046
3047 pub fn i64x2_abs(&mut self) -> &mut Self {
3049 self.sink.push(0xFD);
3050 0xC0u32.encode(self.sink);
3051 self
3052 }
3053
3054 pub fn i64x2_neg(&mut self) -> &mut Self {
3056 self.sink.push(0xFD);
3057 0xC1u32.encode(self.sink);
3058 self
3059 }
3060
3061 pub fn i64x2_all_true(&mut self) -> &mut Self {
3063 self.sink.push(0xFD);
3064 0xC3u32.encode(self.sink);
3065 self
3066 }
3067
3068 pub fn i64x2_bitmask(&mut self) -> &mut Self {
3070 self.sink.push(0xFD);
3071 0xC4u32.encode(self.sink);
3072 self
3073 }
3074
3075 pub fn i64x2_extend_low_i32x4_s(&mut self) -> &mut Self {
3077 self.sink.push(0xFD);
3078 0xC7u32.encode(self.sink);
3079 self
3080 }
3081
3082 pub fn i64x2_extend_high_i32x4_s(&mut self) -> &mut Self {
3084 self.sink.push(0xFD);
3085 0xC8u32.encode(self.sink);
3086 self
3087 }
3088
3089 pub fn i64x2_extend_low_i32x4_u(&mut self) -> &mut Self {
3091 self.sink.push(0xFD);
3092 0xC9u32.encode(self.sink);
3093 self
3094 }
3095
3096 pub fn i64x2_extend_high_i32x4_u(&mut self) -> &mut Self {
3098 self.sink.push(0xFD);
3099 0xCAu32.encode(self.sink);
3100 self
3101 }
3102
3103 pub fn i64x2_shl(&mut self) -> &mut Self {
3105 self.sink.push(0xFD);
3106 0xCBu32.encode(self.sink);
3107 self
3108 }
3109
3110 pub fn i64x2_shr_s(&mut self) -> &mut Self {
3112 self.sink.push(0xFD);
3113 0xCCu32.encode(self.sink);
3114 self
3115 }
3116
3117 pub fn i64x2_shr_u(&mut self) -> &mut Self {
3119 self.sink.push(0xFD);
3120 0xCDu32.encode(self.sink);
3121 self
3122 }
3123
3124 pub fn i64x2_add(&mut self) -> &mut Self {
3126 self.sink.push(0xFD);
3127 0xCEu32.encode(self.sink);
3128 self
3129 }
3130
3131 pub fn i64x2_sub(&mut self) -> &mut Self {
3133 self.sink.push(0xFD);
3134 0xD1u32.encode(self.sink);
3135 self
3136 }
3137
3138 pub fn i64x2_mul(&mut self) -> &mut Self {
3140 self.sink.push(0xFD);
3141 0xD5u32.encode(self.sink);
3142 self
3143 }
3144
3145 pub fn i64x2_extmul_low_i32x4_s(&mut self) -> &mut Self {
3147 self.sink.push(0xFD);
3148 0xDCu32.encode(self.sink);
3149 self
3150 }
3151
3152 pub fn i64x2_extmul_high_i32x4_s(&mut self) -> &mut Self {
3154 self.sink.push(0xFD);
3155 0xDDu32.encode(self.sink);
3156 self
3157 }
3158
3159 pub fn i64x2_extmul_low_i32x4_u(&mut self) -> &mut Self {
3161 self.sink.push(0xFD);
3162 0xDEu32.encode(self.sink);
3163 self
3164 }
3165
3166 pub fn i64x2_extmul_high_i32x4_u(&mut self) -> &mut Self {
3168 self.sink.push(0xFD);
3169 0xDFu32.encode(self.sink);
3170 self
3171 }
3172
3173 pub fn f32x4_ceil(&mut self) -> &mut Self {
3175 self.sink.push(0xFD);
3176 0x67u32.encode(self.sink);
3177 self
3178 }
3179
3180 pub fn f32x4_floor(&mut self) -> &mut Self {
3182 self.sink.push(0xFD);
3183 0x68u32.encode(self.sink);
3184 self
3185 }
3186
3187 pub fn f32x4_trunc(&mut self) -> &mut Self {
3189 self.sink.push(0xFD);
3190 0x69u32.encode(self.sink);
3191 self
3192 }
3193
3194 pub fn f32x4_nearest(&mut self) -> &mut Self {
3196 self.sink.push(0xFD);
3197 0x6Au32.encode(self.sink);
3198 self
3199 }
3200
3201 pub fn f32x4_abs(&mut self) -> &mut Self {
3203 self.sink.push(0xFD);
3204 0xE0u32.encode(self.sink);
3205 self
3206 }
3207
3208 pub fn f32x4_neg(&mut self) -> &mut Self {
3210 self.sink.push(0xFD);
3211 0xE1u32.encode(self.sink);
3212 self
3213 }
3214
3215 pub fn f32x4_sqrt(&mut self) -> &mut Self {
3217 self.sink.push(0xFD);
3218 0xE3u32.encode(self.sink);
3219 self
3220 }
3221
3222 pub fn f32x4_add(&mut self) -> &mut Self {
3224 self.sink.push(0xFD);
3225 0xE4u32.encode(self.sink);
3226 self
3227 }
3228
3229 pub fn f32x4_sub(&mut self) -> &mut Self {
3231 self.sink.push(0xFD);
3232 0xE5u32.encode(self.sink);
3233 self
3234 }
3235
3236 pub fn f32x4_mul(&mut self) -> &mut Self {
3238 self.sink.push(0xFD);
3239 0xE6u32.encode(self.sink);
3240 self
3241 }
3242
3243 pub fn f32x4_div(&mut self) -> &mut Self {
3245 self.sink.push(0xFD);
3246 0xE7u32.encode(self.sink);
3247 self
3248 }
3249
3250 pub fn f32x4_min(&mut self) -> &mut Self {
3252 self.sink.push(0xFD);
3253 0xE8u32.encode(self.sink);
3254 self
3255 }
3256
3257 pub fn f32x4_max(&mut self) -> &mut Self {
3259 self.sink.push(0xFD);
3260 0xE9u32.encode(self.sink);
3261 self
3262 }
3263
3264 pub fn f32x4_pmin(&mut self) -> &mut Self {
3266 self.sink.push(0xFD);
3267 0xEAu32.encode(self.sink);
3268 self
3269 }
3270
3271 pub fn f32x4_pmax(&mut self) -> &mut Self {
3273 self.sink.push(0xFD);
3274 0xEBu32.encode(self.sink);
3275 self
3276 }
3277
3278 pub fn f64x2_ceil(&mut self) -> &mut Self {
3280 self.sink.push(0xFD);
3281 0x74u32.encode(self.sink);
3282 self
3283 }
3284
3285 pub fn f64x2_floor(&mut self) -> &mut Self {
3287 self.sink.push(0xFD);
3288 0x75u32.encode(self.sink);
3289 self
3290 }
3291
3292 pub fn f64x2_trunc(&mut self) -> &mut Self {
3294 self.sink.push(0xFD);
3295 0x7Au32.encode(self.sink);
3296 self
3297 }
3298
3299 pub fn f64x2_nearest(&mut self) -> &mut Self {
3301 self.sink.push(0xFD);
3302 0x94u32.encode(self.sink);
3303 self
3304 }
3305
3306 pub fn f64x2_abs(&mut self) -> &mut Self {
3308 self.sink.push(0xFD);
3309 0xECu32.encode(self.sink);
3310 self
3311 }
3312
3313 pub fn f64x2_neg(&mut self) -> &mut Self {
3315 self.sink.push(0xFD);
3316 0xEDu32.encode(self.sink);
3317 self
3318 }
3319
3320 pub fn f64x2_sqrt(&mut self) -> &mut Self {
3322 self.sink.push(0xFD);
3323 0xEFu32.encode(self.sink);
3324 self
3325 }
3326
3327 pub fn f64x2_add(&mut self) -> &mut Self {
3329 self.sink.push(0xFD);
3330 0xF0u32.encode(self.sink);
3331 self
3332 }
3333
3334 pub fn f64x2_sub(&mut self) -> &mut Self {
3336 self.sink.push(0xFD);
3337 0xF1u32.encode(self.sink);
3338 self
3339 }
3340
3341 pub fn f64x2_mul(&mut self) -> &mut Self {
3343 self.sink.push(0xFD);
3344 0xF2u32.encode(self.sink);
3345 self
3346 }
3347
3348 pub fn f64x2_div(&mut self) -> &mut Self {
3350 self.sink.push(0xFD);
3351 0xF3u32.encode(self.sink);
3352 self
3353 }
3354
3355 pub fn f64x2_min(&mut self) -> &mut Self {
3357 self.sink.push(0xFD);
3358 0xF4u32.encode(self.sink);
3359 self
3360 }
3361
3362 pub fn f64x2_max(&mut self) -> &mut Self {
3364 self.sink.push(0xFD);
3365 0xF5u32.encode(self.sink);
3366 self
3367 }
3368
3369 pub fn f64x2_pmin(&mut self) -> &mut Self {
3371 self.sink.push(0xFD);
3372 0xF6u32.encode(self.sink);
3373 self
3374 }
3375
3376 pub fn f64x2_pmax(&mut self) -> &mut Self {
3378 self.sink.push(0xFD);
3379 0xF7u32.encode(self.sink);
3380 self
3381 }
3382
3383 pub fn i32x4_trunc_sat_f32x4_s(&mut self) -> &mut Self {
3385 self.sink.push(0xFD);
3386 0xF8u32.encode(self.sink);
3387 self
3388 }
3389
3390 pub fn i32x4_trunc_sat_f32x4_u(&mut self) -> &mut Self {
3392 self.sink.push(0xFD);
3393 0xF9u32.encode(self.sink);
3394 self
3395 }
3396
3397 pub fn f32x4_convert_i32x4_s(&mut self) -> &mut Self {
3399 self.sink.push(0xFD);
3400 0xFAu32.encode(self.sink);
3401 self
3402 }
3403
3404 pub fn f32x4_convert_i32x4_u(&mut self) -> &mut Self {
3406 self.sink.push(0xFD);
3407 0xFBu32.encode(self.sink);
3408 self
3409 }
3410
3411 pub fn i32x4_trunc_sat_f64x2_s_zero(&mut self) -> &mut Self {
3413 self.sink.push(0xFD);
3414 0xFCu32.encode(self.sink);
3415 self
3416 }
3417
3418 pub fn i32x4_trunc_sat_f64x2_u_zero(&mut self) -> &mut Self {
3420 self.sink.push(0xFD);
3421 0xFDu32.encode(self.sink);
3422 self
3423 }
3424
3425 pub fn f64x2_convert_low_i32x4_s(&mut self) -> &mut Self {
3427 self.sink.push(0xFD);
3428 0xFEu32.encode(self.sink);
3429 self
3430 }
3431
3432 pub fn f64x2_convert_low_i32x4_u(&mut self) -> &mut Self {
3434 self.sink.push(0xFD);
3435 0xFFu32.encode(self.sink);
3436 self
3437 }
3438
3439 pub fn f32x4_demote_f64x2_zero(&mut self) -> &mut Self {
3441 self.sink.push(0xFD);
3442 0x5Eu32.encode(self.sink);
3443 self
3444 }
3445
3446 pub fn f64x2_promote_low_f32x4(&mut self) -> &mut Self {
3448 self.sink.push(0xFD);
3449 0x5Fu32.encode(self.sink);
3450 self
3451 }
3452
3453 pub fn i8x16_relaxed_swizzle(&mut self) -> &mut Self {
3457 self.sink.push(0xFD);
3458 0x100u32.encode(self.sink);
3459 self
3460 }
3461
3462 pub fn i32x4_relaxed_trunc_f32x4_s(&mut self) -> &mut Self {
3464 self.sink.push(0xFD);
3465 0x101u32.encode(self.sink);
3466 self
3467 }
3468
3469 pub fn i32x4_relaxed_trunc_f32x4_u(&mut self) -> &mut Self {
3471 self.sink.push(0xFD);
3472 0x102u32.encode(self.sink);
3473 self
3474 }
3475
3476 pub fn i32x4_relaxed_trunc_f64x2_s_zero(&mut self) -> &mut Self {
3478 self.sink.push(0xFD);
3479 0x103u32.encode(self.sink);
3480 self
3481 }
3482
3483 pub fn i32x4_relaxed_trunc_f64x2_u_zero(&mut self) -> &mut Self {
3485 self.sink.push(0xFD);
3486 0x104u32.encode(self.sink);
3487 self
3488 }
3489
3490 pub fn f32x4_relaxed_madd(&mut self) -> &mut Self {
3492 self.sink.push(0xFD);
3493 0x105u32.encode(self.sink);
3494 self
3495 }
3496
3497 pub fn f32x4_relaxed_nmadd(&mut self) -> &mut Self {
3499 self.sink.push(0xFD);
3500 0x106u32.encode(self.sink);
3501 self
3502 }
3503
3504 pub fn f64x2_relaxed_madd(&mut self) -> &mut Self {
3506 self.sink.push(0xFD);
3507 0x107u32.encode(self.sink);
3508 self
3509 }
3510
3511 pub fn f64x2_relaxed_nmadd(&mut self) -> &mut Self {
3513 self.sink.push(0xFD);
3514 0x108u32.encode(self.sink);
3515 self
3516 }
3517
3518 pub fn i8x16_relaxed_laneselect(&mut self) -> &mut Self {
3520 self.sink.push(0xFD);
3521 0x109u32.encode(self.sink);
3522 self
3523 }
3524
3525 pub fn i16x8_relaxed_laneselect(&mut self) -> &mut Self {
3527 self.sink.push(0xFD);
3528 0x10Au32.encode(self.sink);
3529 self
3530 }
3531
3532 pub fn i32x4_relaxed_laneselect(&mut self) -> &mut Self {
3534 self.sink.push(0xFD);
3535 0x10Bu32.encode(self.sink);
3536 self
3537 }
3538
3539 pub fn i64x2_relaxed_laneselect(&mut self) -> &mut Self {
3541 self.sink.push(0xFD);
3542 0x10Cu32.encode(self.sink);
3543 self
3544 }
3545
3546 pub fn f32x4_relaxed_min(&mut self) -> &mut Self {
3548 self.sink.push(0xFD);
3549 0x10Du32.encode(self.sink);
3550 self
3551 }
3552
3553 pub fn f32x4_relaxed_max(&mut self) -> &mut Self {
3555 self.sink.push(0xFD);
3556 0x10Eu32.encode(self.sink);
3557 self
3558 }
3559
3560 pub fn f64x2_relaxed_min(&mut self) -> &mut Self {
3562 self.sink.push(0xFD);
3563 0x10Fu32.encode(self.sink);
3564 self
3565 }
3566
3567 pub fn f64x2_relaxed_max(&mut self) -> &mut Self {
3569 self.sink.push(0xFD);
3570 0x110u32.encode(self.sink);
3571 self
3572 }
3573
3574 pub fn i16x8_relaxed_q15mulr_s(&mut self) -> &mut Self {
3576 self.sink.push(0xFD);
3577 0x111u32.encode(self.sink);
3578 self
3579 }
3580
3581 pub fn i16x8_relaxed_dot_i8x16_i7x16_s(&mut self) -> &mut Self {
3583 self.sink.push(0xFD);
3584 0x112u32.encode(self.sink);
3585 self
3586 }
3587
3588 pub fn i32x4_relaxed_dot_i8x16_i7x16_add_s(&mut self) -> &mut Self {
3590 self.sink.push(0xFD);
3591 0x113u32.encode(self.sink);
3592 self
3593 }
3594
3595 pub fn memory_atomic_notify(&mut self, memarg: MemArg) -> &mut Self {
3599 self.sink.push(0xFE);
3600 self.sink.push(0x00);
3601 memarg.encode(self.sink);
3602 self
3603 }
3604
3605 pub fn memory_atomic_wait32(&mut self, memarg: MemArg) -> &mut Self {
3607 self.sink.push(0xFE);
3608 self.sink.push(0x01);
3609 memarg.encode(self.sink);
3610 self
3611 }
3612
3613 pub fn memory_atomic_wait64(&mut self, memarg: MemArg) -> &mut Self {
3615 self.sink.push(0xFE);
3616 self.sink.push(0x02);
3617 memarg.encode(self.sink);
3618 self
3619 }
3620
3621 pub fn atomic_fence(&mut self) -> &mut Self {
3623 self.sink.push(0xFE);
3624 self.sink.push(0x03);
3625 self.sink.push(0x00);
3626 self
3627 }
3628
3629 pub fn i32_atomic_load(&mut self, memarg: MemArg) -> &mut Self {
3631 self.sink.push(0xFE);
3632 self.sink.push(0x10);
3633 memarg.encode(self.sink);
3634 self
3635 }
3636
3637 pub fn i64_atomic_load(&mut self, memarg: MemArg) -> &mut Self {
3639 self.sink.push(0xFE);
3640 self.sink.push(0x11);
3641 memarg.encode(self.sink);
3642 self
3643 }
3644
3645 pub fn i32_atomic_load8_u(&mut self, memarg: MemArg) -> &mut Self {
3647 self.sink.push(0xFE);
3648 self.sink.push(0x12);
3649 memarg.encode(self.sink);
3650 self
3651 }
3652
3653 pub fn i32_atomic_load16_u(&mut self, memarg: MemArg) -> &mut Self {
3655 self.sink.push(0xFE);
3656 self.sink.push(0x13);
3657 memarg.encode(self.sink);
3658 self
3659 }
3660
3661 pub fn i64_atomic_load8_u(&mut self, memarg: MemArg) -> &mut Self {
3663 self.sink.push(0xFE);
3664 self.sink.push(0x14);
3665 memarg.encode(self.sink);
3666 self
3667 }
3668
3669 pub fn i64_atomic_load16_u(&mut self, memarg: MemArg) -> &mut Self {
3671 self.sink.push(0xFE);
3672 self.sink.push(0x15);
3673 memarg.encode(self.sink);
3674 self
3675 }
3676
3677 pub fn i64_atomic_load32_u(&mut self, memarg: MemArg) -> &mut Self {
3679 self.sink.push(0xFE);
3680 self.sink.push(0x16);
3681 memarg.encode(self.sink);
3682 self
3683 }
3684
3685 pub fn i32_atomic_store(&mut self, memarg: MemArg) -> &mut Self {
3687 self.sink.push(0xFE);
3688 self.sink.push(0x17);
3689 memarg.encode(self.sink);
3690 self
3691 }
3692
3693 pub fn i64_atomic_store(&mut self, memarg: MemArg) -> &mut Self {
3695 self.sink.push(0xFE);
3696 self.sink.push(0x18);
3697 memarg.encode(self.sink);
3698 self
3699 }
3700
3701 pub fn i32_atomic_store8(&mut self, memarg: MemArg) -> &mut Self {
3703 self.sink.push(0xFE);
3704 self.sink.push(0x19);
3705 memarg.encode(self.sink);
3706 self
3707 }
3708
3709 pub fn i32_atomic_store16(&mut self, memarg: MemArg) -> &mut Self {
3711 self.sink.push(0xFE);
3712 self.sink.push(0x1A);
3713 memarg.encode(self.sink);
3714 self
3715 }
3716
3717 pub fn i64_atomic_store8(&mut self, memarg: MemArg) -> &mut Self {
3719 self.sink.push(0xFE);
3720 self.sink.push(0x1B);
3721 memarg.encode(self.sink);
3722 self
3723 }
3724
3725 pub fn i64_atomic_store16(&mut self, memarg: MemArg) -> &mut Self {
3727 self.sink.push(0xFE);
3728 self.sink.push(0x1C);
3729 memarg.encode(self.sink);
3730 self
3731 }
3732
3733 pub fn i64_atomic_store32(&mut self, memarg: MemArg) -> &mut Self {
3735 self.sink.push(0xFE);
3736 self.sink.push(0x1D);
3737 memarg.encode(self.sink);
3738 self
3739 }
3740
3741 pub fn i32_atomic_rmw_add(&mut self, memarg: MemArg) -> &mut Self {
3743 self.sink.push(0xFE);
3744 self.sink.push(0x1E);
3745 memarg.encode(self.sink);
3746 self
3747 }
3748
3749 pub fn i64_atomic_rmw_add(&mut self, memarg: MemArg) -> &mut Self {
3751 self.sink.push(0xFE);
3752 self.sink.push(0x1F);
3753 memarg.encode(self.sink);
3754 self
3755 }
3756
3757 pub fn i32_atomic_rmw8_add_u(&mut self, memarg: MemArg) -> &mut Self {
3759 self.sink.push(0xFE);
3760 self.sink.push(0x20);
3761 memarg.encode(self.sink);
3762 self
3763 }
3764
3765 pub fn i32_atomic_rmw16_add_u(&mut self, memarg: MemArg) -> &mut Self {
3767 self.sink.push(0xFE);
3768 self.sink.push(0x21);
3769 memarg.encode(self.sink);
3770 self
3771 }
3772
3773 pub fn i64_atomic_rmw8_add_u(&mut self, memarg: MemArg) -> &mut Self {
3775 self.sink.push(0xFE);
3776 self.sink.push(0x22);
3777 memarg.encode(self.sink);
3778 self
3779 }
3780
3781 pub fn i64_atomic_rmw16_add_u(&mut self, memarg: MemArg) -> &mut Self {
3783 self.sink.push(0xFE);
3784 self.sink.push(0x23);
3785 memarg.encode(self.sink);
3786 self
3787 }
3788
3789 pub fn i64_atomic_rmw32_add_u(&mut self, memarg: MemArg) -> &mut Self {
3791 self.sink.push(0xFE);
3792 self.sink.push(0x24);
3793 memarg.encode(self.sink);
3794 self
3795 }
3796
3797 pub fn i32_atomic_rmw_sub(&mut self, memarg: MemArg) -> &mut Self {
3799 self.sink.push(0xFE);
3800 self.sink.push(0x25);
3801 memarg.encode(self.sink);
3802 self
3803 }
3804
3805 pub fn i64_atomic_rmw_sub(&mut self, memarg: MemArg) -> &mut Self {
3807 self.sink.push(0xFE);
3808 self.sink.push(0x26);
3809 memarg.encode(self.sink);
3810 self
3811 }
3812
3813 pub fn i32_atomic_rmw8_sub_u(&mut self, memarg: MemArg) -> &mut Self {
3815 self.sink.push(0xFE);
3816 self.sink.push(0x27);
3817 memarg.encode(self.sink);
3818 self
3819 }
3820
3821 pub fn i32_atomic_rmw16_sub_u(&mut self, memarg: MemArg) -> &mut Self {
3823 self.sink.push(0xFE);
3824 self.sink.push(0x28);
3825 memarg.encode(self.sink);
3826 self
3827 }
3828
3829 pub fn i64_atomic_rmw8_sub_u(&mut self, memarg: MemArg) -> &mut Self {
3831 self.sink.push(0xFE);
3832 self.sink.push(0x29);
3833 memarg.encode(self.sink);
3834 self
3835 }
3836
3837 pub fn i64_atomic_rmw16_sub_u(&mut self, memarg: MemArg) -> &mut Self {
3839 self.sink.push(0xFE);
3840 self.sink.push(0x2A);
3841 memarg.encode(self.sink);
3842 self
3843 }
3844
3845 pub fn i64_atomic_rmw32_sub_u(&mut self, memarg: MemArg) -> &mut Self {
3847 self.sink.push(0xFE);
3848 self.sink.push(0x2B);
3849 memarg.encode(self.sink);
3850 self
3851 }
3852
3853 pub fn i32_atomic_rmw_and(&mut self, memarg: MemArg) -> &mut Self {
3855 self.sink.push(0xFE);
3856 self.sink.push(0x2C);
3857 memarg.encode(self.sink);
3858 self
3859 }
3860
3861 pub fn i64_atomic_rmw_and(&mut self, memarg: MemArg) -> &mut Self {
3863 self.sink.push(0xFE);
3864 self.sink.push(0x2D);
3865 memarg.encode(self.sink);
3866 self
3867 }
3868
3869 pub fn i32_atomic_rmw8_and_u(&mut self, memarg: MemArg) -> &mut Self {
3871 self.sink.push(0xFE);
3872 self.sink.push(0x2E);
3873 memarg.encode(self.sink);
3874 self
3875 }
3876
3877 pub fn i32_atomic_rmw16_and_u(&mut self, memarg: MemArg) -> &mut Self {
3879 self.sink.push(0xFE);
3880 self.sink.push(0x2F);
3881 memarg.encode(self.sink);
3882 self
3883 }
3884
3885 pub fn i64_atomic_rmw8_and_u(&mut self, memarg: MemArg) -> &mut Self {
3887 self.sink.push(0xFE);
3888 self.sink.push(0x30);
3889 memarg.encode(self.sink);
3890 self
3891 }
3892
3893 pub fn i64_atomic_rmw16_and_u(&mut self, memarg: MemArg) -> &mut Self {
3895 self.sink.push(0xFE);
3896 self.sink.push(0x31);
3897 memarg.encode(self.sink);
3898 self
3899 }
3900
3901 pub fn i64_atomic_rmw32_and_u(&mut self, memarg: MemArg) -> &mut Self {
3903 self.sink.push(0xFE);
3904 self.sink.push(0x32);
3905 memarg.encode(self.sink);
3906 self
3907 }
3908
3909 pub fn i32_atomic_rmw_or(&mut self, memarg: MemArg) -> &mut Self {
3911 self.sink.push(0xFE);
3912 self.sink.push(0x33);
3913 memarg.encode(self.sink);
3914 self
3915 }
3916
3917 pub fn i64_atomic_rmw_or(&mut self, memarg: MemArg) -> &mut Self {
3919 self.sink.push(0xFE);
3920 self.sink.push(0x34);
3921 memarg.encode(self.sink);
3922 self
3923 }
3924
3925 pub fn i32_atomic_rmw8_or_u(&mut self, memarg: MemArg) -> &mut Self {
3927 self.sink.push(0xFE);
3928 self.sink.push(0x35);
3929 memarg.encode(self.sink);
3930 self
3931 }
3932
3933 pub fn i32_atomic_rmw16_or_u(&mut self, memarg: MemArg) -> &mut Self {
3935 self.sink.push(0xFE);
3936 self.sink.push(0x36);
3937 memarg.encode(self.sink);
3938 self
3939 }
3940
3941 pub fn i64_atomic_rmw8_or_u(&mut self, memarg: MemArg) -> &mut Self {
3943 self.sink.push(0xFE);
3944 self.sink.push(0x37);
3945 memarg.encode(self.sink);
3946 self
3947 }
3948
3949 pub fn i64_atomic_rmw16_or_u(&mut self, memarg: MemArg) -> &mut Self {
3951 self.sink.push(0xFE);
3952 self.sink.push(0x38);
3953 memarg.encode(self.sink);
3954 self
3955 }
3956
3957 pub fn i64_atomic_rmw32_or_u(&mut self, memarg: MemArg) -> &mut Self {
3959 self.sink.push(0xFE);
3960 self.sink.push(0x39);
3961 memarg.encode(self.sink);
3962 self
3963 }
3964
3965 pub fn i32_atomic_rmw_xor(&mut self, memarg: MemArg) -> &mut Self {
3967 self.sink.push(0xFE);
3968 self.sink.push(0x3A);
3969 memarg.encode(self.sink);
3970 self
3971 }
3972
3973 pub fn i64_atomic_rmw_xor(&mut self, memarg: MemArg) -> &mut Self {
3975 self.sink.push(0xFE);
3976 self.sink.push(0x3B);
3977 memarg.encode(self.sink);
3978 self
3979 }
3980
3981 pub fn i32_atomic_rmw8_xor_u(&mut self, memarg: MemArg) -> &mut Self {
3983 self.sink.push(0xFE);
3984 self.sink.push(0x3C);
3985 memarg.encode(self.sink);
3986 self
3987 }
3988
3989 pub fn i32_atomic_rmw16_xor_u(&mut self, memarg: MemArg) -> &mut Self {
3991 self.sink.push(0xFE);
3992 self.sink.push(0x3D);
3993 memarg.encode(self.sink);
3994 self
3995 }
3996
3997 pub fn i64_atomic_rmw8_xor_u(&mut self, memarg: MemArg) -> &mut Self {
3999 self.sink.push(0xFE);
4000 self.sink.push(0x3E);
4001 memarg.encode(self.sink);
4002 self
4003 }
4004
4005 pub fn i64_atomic_rmw16_xor_u(&mut self, memarg: MemArg) -> &mut Self {
4007 self.sink.push(0xFE);
4008 self.sink.push(0x3F);
4009 memarg.encode(self.sink);
4010 self
4011 }
4012
4013 pub fn i64_atomic_rmw32_xor_u(&mut self, memarg: MemArg) -> &mut Self {
4015 self.sink.push(0xFE);
4016 self.sink.push(0x40);
4017 memarg.encode(self.sink);
4018 self
4019 }
4020
4021 pub fn i32_atomic_rmw_xchg(&mut self, memarg: MemArg) -> &mut Self {
4023 self.sink.push(0xFE);
4024 self.sink.push(0x41);
4025 memarg.encode(self.sink);
4026 self
4027 }
4028
4029 pub fn i64_atomic_rmw_xchg(&mut self, memarg: MemArg) -> &mut Self {
4031 self.sink.push(0xFE);
4032 self.sink.push(0x42);
4033 memarg.encode(self.sink);
4034 self
4035 }
4036
4037 pub fn i32_atomic_rmw8_xchg_u(&mut self, memarg: MemArg) -> &mut Self {
4039 self.sink.push(0xFE);
4040 self.sink.push(0x43);
4041 memarg.encode(self.sink);
4042 self
4043 }
4044
4045 pub fn i32_atomic_rmw16_xchg_u(&mut self, memarg: MemArg) -> &mut Self {
4047 self.sink.push(0xFE);
4048 self.sink.push(0x44);
4049 memarg.encode(self.sink);
4050 self
4051 }
4052
4053 pub fn i64_atomic_rmw8_xchg_u(&mut self, memarg: MemArg) -> &mut Self {
4055 self.sink.push(0xFE);
4056 self.sink.push(0x45);
4057 memarg.encode(self.sink);
4058 self
4059 }
4060
4061 pub fn i64_atomic_rmw16_xchg_u(&mut self, memarg: MemArg) -> &mut Self {
4063 self.sink.push(0xFE);
4064 self.sink.push(0x46);
4065 memarg.encode(self.sink);
4066 self
4067 }
4068
4069 pub fn i64_atomic_rmw32_xchg_u(&mut self, memarg: MemArg) -> &mut Self {
4071 self.sink.push(0xFE);
4072 self.sink.push(0x47);
4073 memarg.encode(self.sink);
4074 self
4075 }
4076
4077 pub fn i32_atomic_rmw_cmpxchg(&mut self, memarg: MemArg) -> &mut Self {
4079 self.sink.push(0xFE);
4080 self.sink.push(0x48);
4081 memarg.encode(self.sink);
4082 self
4083 }
4084
4085 pub fn i64_atomic_rmw_cmpxchg(&mut self, memarg: MemArg) -> &mut Self {
4087 self.sink.push(0xFE);
4088 self.sink.push(0x49);
4089 memarg.encode(self.sink);
4090 self
4091 }
4092
4093 pub fn i32_atomic_rmw8_cmpxchg_u(&mut self, memarg: MemArg) -> &mut Self {
4095 self.sink.push(0xFE);
4096 self.sink.push(0x4A);
4097 memarg.encode(self.sink);
4098 self
4099 }
4100
4101 pub fn i32_atomic_rmw16_cmpxchg_u(&mut self, memarg: MemArg) -> &mut Self {
4103 self.sink.push(0xFE);
4104 self.sink.push(0x4B);
4105 memarg.encode(self.sink);
4106 self
4107 }
4108
4109 pub fn i64_atomic_rmw8_cmpxchg_u(&mut self, memarg: MemArg) -> &mut Self {
4111 self.sink.push(0xFE);
4112 self.sink.push(0x4C);
4113 memarg.encode(self.sink);
4114 self
4115 }
4116
4117 pub fn i64_atomic_rmw16_cmpxchg_u(&mut self, memarg: MemArg) -> &mut Self {
4119 self.sink.push(0xFE);
4120 self.sink.push(0x4D);
4121 memarg.encode(self.sink);
4122 self
4123 }
4124
4125 pub fn i64_atomic_rmw32_cmpxchg_u(&mut self, memarg: MemArg) -> &mut Self {
4127 self.sink.push(0xFE);
4128 self.sink.push(0x4E);
4129 memarg.encode(self.sink);
4130 self
4131 }
4132
4133 pub fn global_atomic_get(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4137 self.sink.push(0xFE);
4138 self.sink.push(0x4F);
4139 ordering.encode(self.sink);
4140 global_index.encode(self.sink);
4141 self
4142 }
4143
4144 pub fn global_atomic_set(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4146 self.sink.push(0xFE);
4147 self.sink.push(0x50);
4148 ordering.encode(self.sink);
4149 global_index.encode(self.sink);
4150 self
4151 }
4152
4153 pub fn global_atomic_rmw_add(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4155 self.sink.push(0xFE);
4156 self.sink.push(0x51);
4157 ordering.encode(self.sink);
4158 global_index.encode(self.sink);
4159 self
4160 }
4161
4162 pub fn global_atomic_rmw_sub(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4164 self.sink.push(0xFE);
4165 self.sink.push(0x52);
4166 ordering.encode(self.sink);
4167 global_index.encode(self.sink);
4168 self
4169 }
4170
4171 pub fn global_atomic_rmw_and(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4173 self.sink.push(0xFE);
4174 self.sink.push(0x53);
4175 ordering.encode(self.sink);
4176 global_index.encode(self.sink);
4177 self
4178 }
4179
4180 pub fn global_atomic_rmw_or(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4182 self.sink.push(0xFE);
4183 self.sink.push(0x54);
4184 ordering.encode(self.sink);
4185 global_index.encode(self.sink);
4186 self
4187 }
4188
4189 pub fn global_atomic_rmw_xor(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4191 self.sink.push(0xFE);
4192 self.sink.push(0x55);
4193 ordering.encode(self.sink);
4194 global_index.encode(self.sink);
4195 self
4196 }
4197
4198 pub fn global_atomic_rmw_xchg(&mut self, ordering: Ordering, global_index: u32) -> &mut Self {
4200 self.sink.push(0xFE);
4201 self.sink.push(0x56);
4202 ordering.encode(self.sink);
4203 global_index.encode(self.sink);
4204 self
4205 }
4206
4207 pub fn global_atomic_rmw_cmpxchg(
4209 &mut self,
4210 ordering: Ordering,
4211 global_index: u32,
4212 ) -> &mut Self {
4213 self.sink.push(0xFE);
4214 self.sink.push(0x57);
4215 ordering.encode(self.sink);
4216 global_index.encode(self.sink);
4217 self
4218 }
4219
4220 pub fn table_atomic_get(&mut self, ordering: Ordering, table_index: u32) -> &mut Self {
4222 self.sink.push(0xFE);
4223 self.sink.push(0x58);
4224 ordering.encode(self.sink);
4225 table_index.encode(self.sink);
4226 self
4227 }
4228
4229 pub fn table_atomic_set(&mut self, ordering: Ordering, table_index: u32) -> &mut Self {
4231 self.sink.push(0xFE);
4232 self.sink.push(0x59);
4233 ordering.encode(self.sink);
4234 table_index.encode(self.sink);
4235 self
4236 }
4237
4238 pub fn table_atomic_rmw_xchg(&mut self, ordering: Ordering, table_index: u32) -> &mut Self {
4240 self.sink.push(0xFE);
4241 self.sink.push(0x5A);
4242 ordering.encode(self.sink);
4243 table_index.encode(self.sink);
4244 self
4245 }
4246
4247 pub fn table_atomic_rmw_cmpxchg(&mut self, ordering: Ordering, table_index: u32) -> &mut Self {
4249 self.sink.push(0xFE);
4250 self.sink.push(0x5B);
4251 ordering.encode(self.sink);
4252 table_index.encode(self.sink);
4253 self
4254 }
4255
4256 pub fn struct_atomic_get(
4258 &mut self,
4259 ordering: Ordering,
4260 struct_type_index: u32,
4261 field_index: u32,
4262 ) -> &mut Self {
4263 self.sink.push(0xFE);
4264 self.sink.push(0x5C);
4265 ordering.encode(self.sink);
4266 struct_type_index.encode(self.sink);
4267 field_index.encode(self.sink);
4268 self
4269 }
4270
4271 pub fn struct_atomic_get_s(
4273 &mut self,
4274 ordering: Ordering,
4275 struct_type_index: u32,
4276 field_index: u32,
4277 ) -> &mut Self {
4278 self.sink.push(0xFE);
4279 self.sink.push(0x5D);
4280 ordering.encode(self.sink);
4281 struct_type_index.encode(self.sink);
4282 field_index.encode(self.sink);
4283 self
4284 }
4285
4286 pub fn struct_atomic_get_u(
4288 &mut self,
4289 ordering: Ordering,
4290 struct_type_index: u32,
4291 field_index: u32,
4292 ) -> &mut Self {
4293 self.sink.push(0xFE);
4294 self.sink.push(0x5E);
4295 ordering.encode(self.sink);
4296 struct_type_index.encode(self.sink);
4297 field_index.encode(self.sink);
4298 self
4299 }
4300
4301 pub fn struct_atomic_set(
4303 &mut self,
4304 ordering: Ordering,
4305 struct_type_index: u32,
4306 field_index: u32,
4307 ) -> &mut Self {
4308 self.sink.push(0xFE);
4309 self.sink.push(0x5F);
4310 ordering.encode(self.sink);
4311 struct_type_index.encode(self.sink);
4312 field_index.encode(self.sink);
4313 self
4314 }
4315
4316 pub fn struct_atomic_rmw_add(
4318 &mut self,
4319 ordering: Ordering,
4320 struct_type_index: u32,
4321 field_index: u32,
4322 ) -> &mut Self {
4323 self.sink.push(0xFE);
4324 self.sink.push(0x60);
4325 ordering.encode(self.sink);
4326 struct_type_index.encode(self.sink);
4327 field_index.encode(self.sink);
4328 self
4329 }
4330
4331 pub fn struct_atomic_rmw_sub(
4333 &mut self,
4334 ordering: Ordering,
4335 struct_type_index: u32,
4336 field_index: u32,
4337 ) -> &mut Self {
4338 self.sink.push(0xFE);
4339 self.sink.push(0x61);
4340 ordering.encode(self.sink);
4341 struct_type_index.encode(self.sink);
4342 field_index.encode(self.sink);
4343 self
4344 }
4345
4346 pub fn struct_atomic_rmw_and(
4348 &mut self,
4349 ordering: Ordering,
4350 struct_type_index: u32,
4351 field_index: u32,
4352 ) -> &mut Self {
4353 self.sink.push(0xFE);
4354 self.sink.push(0x62);
4355 ordering.encode(self.sink);
4356 struct_type_index.encode(self.sink);
4357 field_index.encode(self.sink);
4358 self
4359 }
4360
4361 pub fn struct_atomic_rmw_or(
4363 &mut self,
4364 ordering: Ordering,
4365 struct_type_index: u32,
4366 field_index: u32,
4367 ) -> &mut Self {
4368 self.sink.push(0xFE);
4369 self.sink.push(0x63);
4370 ordering.encode(self.sink);
4371 struct_type_index.encode(self.sink);
4372 field_index.encode(self.sink);
4373 self
4374 }
4375
4376 pub fn struct_atomic_rmw_xor(
4378 &mut self,
4379 ordering: Ordering,
4380 struct_type_index: u32,
4381 field_index: u32,
4382 ) -> &mut Self {
4383 self.sink.push(0xFE);
4384 self.sink.push(0x64);
4385 ordering.encode(self.sink);
4386 struct_type_index.encode(self.sink);
4387 field_index.encode(self.sink);
4388 self
4389 }
4390
4391 pub fn struct_atomic_rmw_xchg(
4393 &mut self,
4394 ordering: Ordering,
4395 struct_type_index: u32,
4396 field_index: u32,
4397 ) -> &mut Self {
4398 self.sink.push(0xFE);
4399 self.sink.push(0x65);
4400 ordering.encode(self.sink);
4401 struct_type_index.encode(self.sink);
4402 field_index.encode(self.sink);
4403 self
4404 }
4405
4406 pub fn struct_atomic_rmw_cmpxchg(
4408 &mut self,
4409 ordering: Ordering,
4410 struct_type_index: u32,
4411 field_index: u32,
4412 ) -> &mut Self {
4413 self.sink.push(0xFE);
4414 self.sink.push(0x66);
4415 ordering.encode(self.sink);
4416 struct_type_index.encode(self.sink);
4417 field_index.encode(self.sink);
4418 self
4419 }
4420
4421 pub fn array_atomic_get(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4423 self.sink.push(0xFE);
4424 self.sink.push(0x67);
4425 ordering.encode(self.sink);
4426 array_type_index.encode(self.sink);
4427 self
4428 }
4429
4430 pub fn array_atomic_get_s(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4432 self.sink.push(0xFE);
4433 self.sink.push(0x68);
4434 ordering.encode(self.sink);
4435 array_type_index.encode(self.sink);
4436 self
4437 }
4438
4439 pub fn array_atomic_get_u(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4441 self.sink.push(0xFE);
4442 self.sink.push(0x69);
4443 ordering.encode(self.sink);
4444 array_type_index.encode(self.sink);
4445 self
4446 }
4447
4448 pub fn array_atomic_set(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4450 self.sink.push(0xFE);
4451 self.sink.push(0x6A);
4452 ordering.encode(self.sink);
4453 array_type_index.encode(self.sink);
4454 self
4455 }
4456
4457 pub fn array_atomic_rmw_add(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4459 self.sink.push(0xFE);
4460 self.sink.push(0x6B);
4461 ordering.encode(self.sink);
4462 array_type_index.encode(self.sink);
4463 self
4464 }
4465
4466 pub fn array_atomic_rmw_sub(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4468 self.sink.push(0xFE);
4469 self.sink.push(0x6C);
4470 ordering.encode(self.sink);
4471 array_type_index.encode(self.sink);
4472 self
4473 }
4474
4475 pub fn array_atomic_rmw_and(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4477 self.sink.push(0xFE);
4478 self.sink.push(0x6D);
4479 ordering.encode(self.sink);
4480 array_type_index.encode(self.sink);
4481 self
4482 }
4483
4484 pub fn array_atomic_rmw_or(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4486 self.sink.push(0xFE);
4487 self.sink.push(0x6E);
4488 ordering.encode(self.sink);
4489 array_type_index.encode(self.sink);
4490 self
4491 }
4492
4493 pub fn array_atomic_rmw_xor(&mut self, ordering: Ordering, array_type_index: u32) -> &mut Self {
4495 self.sink.push(0xFE);
4496 self.sink.push(0x6F);
4497 ordering.encode(self.sink);
4498 array_type_index.encode(self.sink);
4499 self
4500 }
4501
4502 pub fn array_atomic_rmw_xchg(
4504 &mut self,
4505 ordering: Ordering,
4506 array_type_index: u32,
4507 ) -> &mut Self {
4508 self.sink.push(0xFE);
4509 self.sink.push(0x70);
4510 ordering.encode(self.sink);
4511 array_type_index.encode(self.sink);
4512 self
4513 }
4514
4515 pub fn array_atomic_rmw_cmpxchg(
4517 &mut self,
4518 ordering: Ordering,
4519 array_type_index: u32,
4520 ) -> &mut Self {
4521 self.sink.push(0xFE);
4522 self.sink.push(0x71);
4523 ordering.encode(self.sink);
4524 array_type_index.encode(self.sink);
4525 self
4526 }
4527
4528 pub fn ref_i31_shared(&mut self) -> &mut Self {
4530 self.sink.push(0xFE);
4531 self.sink.push(0x72);
4532 self
4533 }
4534
4535 pub fn cont_new(&mut self, type_index: u32) -> &mut Self {
4539 self.sink.push(0xE0);
4540 type_index.encode(self.sink);
4541 self
4542 }
4543
4544 pub fn cont_bind(&mut self, argument_index: u32, result_index: u32) -> &mut Self {
4546 self.sink.push(0xE1);
4547 argument_index.encode(self.sink);
4548 result_index.encode(self.sink);
4549 self
4550 }
4551
4552 pub fn suspend(&mut self, tag_index: u32) -> &mut Self {
4554 self.sink.push(0xE2);
4555 tag_index.encode(self.sink);
4556 self
4557 }
4558
4559 pub fn resume<V: IntoIterator<Item = Handle>>(
4561 &mut self,
4562 cont_type_index: u32,
4563 resume_table: V,
4564 ) -> &mut Self
4565 where
4566 V::IntoIter: ExactSizeIterator,
4567 {
4568 self.sink.push(0xE3);
4569 cont_type_index.encode(self.sink);
4570 encode_vec(resume_table, self.sink);
4571 self
4572 }
4573
4574 pub fn resume_throw<V: IntoIterator<Item = Handle>>(
4576 &mut self,
4577 cont_type_index: u32,
4578 tag_index: u32,
4579 resume_table: V,
4580 ) -> &mut Self
4581 where
4582 V::IntoIter: ExactSizeIterator,
4583 {
4584 self.sink.push(0xE4);
4585 cont_type_index.encode(self.sink);
4586 tag_index.encode(self.sink);
4587 encode_vec(resume_table, self.sink);
4588 self
4589 }
4590
4591 pub fn switch(&mut self, cont_type_index: u32, tag_index: u32) -> &mut Self {
4593 self.sink.push(0xE5);
4594 cont_type_index.encode(self.sink);
4595 tag_index.encode(self.sink);
4596 self
4597 }
4598
4599 pub fn i64_add128(&mut self) -> &mut Self {
4603 self.sink.push(0xFC);
4604 19u32.encode(self.sink);
4605 self
4606 }
4607
4608 pub fn i64_sub128(&mut self) -> &mut Self {
4610 self.sink.push(0xFC);
4611 20u32.encode(self.sink);
4612 self
4613 }
4614
4615 pub fn i64_mul_wide_s(&mut self) -> &mut Self {
4617 self.sink.push(0xFC);
4618 21u32.encode(self.sink);
4619 self
4620 }
4621
4622 pub fn i64_mul_wide_u(&mut self) -> &mut Self {
4624 self.sink.push(0xFC);
4625 22u32.encode(self.sink);
4626 self
4627 }
4628}