wasmer_compiler_singlepass/
machine.rs

1use crate::{
2    common_decl::*,
3    location::{Location, Reg},
4    machine_arm64::MachineARM64,
5    machine_x64::MachineX86_64,
6    unwind::UnwindInstructions,
7};
8use dynasmrt::{AssemblyOffset, DynamicLabel};
9use std::{collections::BTreeMap, fmt::Debug};
10use wasmer_compiler::{
11    types::{
12        address_map::InstructionAddressMap,
13        function::FunctionBody,
14        relocation::{Relocation, RelocationTarget},
15        section::CustomSection,
16        target::{Architecture, CallingConvention, Target},
17    },
18    wasmparser::{MemArg, ValType as WpType},
19};
20use wasmer_types::{
21    CompileError, FunctionIndex, FunctionType, TrapCode, TrapInformation, VMOffsets,
22};
23pub type Label = DynamicLabel;
24pub type Offset = AssemblyOffset;
25
26#[allow(dead_code)]
27#[derive(Clone, PartialEq)]
28pub enum Value {
29    I8(i8),
30    I32(i32),
31    I64(i64),
32    F32(f32),
33    F64(f64),
34}
35
36#[macro_export]
37macro_rules! codegen_error {
38    ($($arg:tt)*) => {return Err(CompileError::Codegen(format!($($arg)*)))}
39}
40
41#[allow(unused)]
42pub trait MaybeImmediate {
43    fn imm_value(&self) -> Option<Value>;
44    fn is_imm(&self) -> bool {
45        self.imm_value().is_some()
46    }
47}
48
49/// A trap table for a `RunnableModuleInfo`.
50#[derive(Clone, Debug, Default)]
51pub struct TrapTable {
52    /// Mappings from offsets in generated machine code to the corresponding trap code.
53    pub offset_to_code: BTreeMap<usize, TrapCode>,
54}
55
56// all machine seems to have a page this size, so not per arch for now
57pub const NATIVE_PAGE_SIZE: usize = 4096;
58
59pub struct MachineStackOffset(pub usize);
60
61#[allow(unused)]
62pub trait Machine {
63    type GPR: Copy + Eq + Debug + Reg;
64    type SIMD: Copy + Eq + Debug + Reg;
65    /// Get current assembler offset
66    fn assembler_get_offset(&self) -> Offset;
67    /// Convert from a GPR register to index register
68    fn index_from_gpr(&self, x: Self::GPR) -> RegisterIndex;
69    /// Convert from an SIMD register
70    fn index_from_simd(&self, x: Self::SIMD) -> RegisterIndex;
71    /// Get the GPR that hold vmctx
72    fn get_vmctx_reg(&self) -> Self::GPR;
73    /// Picks an unused general purpose register for local/stack/argument use.
74    ///
75    /// This method does not mark the register as used
76    fn pick_gpr(&self) -> Option<Self::GPR>;
77    /// Picks an unused general purpose register for internal temporary use.
78    ///
79    /// This method does not mark the register as used
80    fn pick_temp_gpr(&self) -> Option<Self::GPR>;
81    /// Get all used GPR
82    fn get_used_gprs(&self) -> Vec<Self::GPR>;
83    /// Get all used SIMD regs
84    fn get_used_simd(&self) -> Vec<Self::SIMD>;
85    /// Picks an unused general pupose register and mark it as used
86    fn acquire_temp_gpr(&mut self) -> Option<Self::GPR>;
87    /// Releases a temporary GPR.
88    fn release_gpr(&mut self, gpr: Self::GPR);
89    /// Specify that a given register is in use.
90    fn reserve_unused_temp_gpr(&mut self, gpr: Self::GPR) -> Self::GPR;
91    /// reserve a GPR
92    fn reserve_gpr(&mut self, gpr: Self::GPR);
93    /// Push used gpr to the stack. Return the bytes taken on the stack
94    fn push_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<usize, CompileError>;
95    /// Pop used gpr to the stack
96    fn pop_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<(), CompileError>;
97    /// Picks an unused SIMD register.
98    ///
99    /// This method does not mark the register as used
100    fn pick_simd(&self) -> Option<Self::SIMD>;
101    /// Picks an unused SIMD register for internal temporary use.
102    ///
103    /// This method does not mark the register as used
104    fn pick_temp_simd(&self) -> Option<Self::SIMD>;
105    /// Acquires a temporary XMM register.
106    fn acquire_temp_simd(&mut self) -> Option<Self::SIMD>;
107    /// reserve a SIMD register
108    fn reserve_simd(&mut self, simd: Self::SIMD);
109    /// Releases a temporary XMM register.
110    fn release_simd(&mut self, simd: Self::SIMD);
111    /// Push used simd regs to the stack. Return bytes taken on the stack
112    fn push_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<usize, CompileError>;
113    /// Pop used simd regs to the stack
114    fn pop_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<(), CompileError>;
115    /// Return a rounded stack adjustement value (must be multiple of 16bytes on ARM64 for example)
116    fn round_stack_adjust(&self, value: usize) -> usize;
117    /// Set the source location of the Wasm to the given offset.
118    fn set_srcloc(&mut self, offset: u32);
119    /// Marks each address in the code range emitted by `f` with the trap code `code`.
120    fn mark_address_range_with_trap_code(&mut self, code: TrapCode, begin: usize, end: usize);
121    /// Marks one address as trappable with trap code `code`.
122    fn mark_address_with_trap_code(&mut self, code: TrapCode);
123    /// Marks the instruction as trappable with trap code `code`. return "begin" offset
124    fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize;
125    /// Pushes the instruction to the address map, calculating the offset from a
126    /// provided beginning address.
127    fn mark_instruction_address_end(&mut self, begin: usize);
128    /// Insert a StackOverflow (at offset 0)
129    fn insert_stackoverflow(&mut self);
130    /// Get all current TrapInformation
131    fn collect_trap_information(&self) -> Vec<TrapInformation>;
132    // Get all intructions address map
133    fn instructions_address_map(&self) -> Vec<InstructionAddressMap>;
134    /// Memory location for a local on the stack
135    /// Like Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)) for x86_64
136    fn local_on_stack(&mut self, stack_offset: i32) -> Location<Self::GPR, Self::SIMD>;
137    /// Adjust stack for locals
138    /// Like assembler.emit_sub(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
139    fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
140    /// restore stack
141    /// Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
142    fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
143    /// Pop stack of locals
144    /// Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
145    fn pop_stack_locals(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
146    /// Zero a location taht is 32bits
147    fn zero_location(
148        &mut self,
149        size: Size,
150        location: Location<Self::GPR, Self::SIMD>,
151    ) -> Result<(), CompileError>;
152    /// GPR Reg used for local pointer on the stack
153    fn local_pointer(&self) -> Self::GPR;
154    /// push a value on the stack for a native call
155    fn move_location_for_native(
156        &mut self,
157        size: Size,
158        loc: Location<Self::GPR, Self::SIMD>,
159        dest: Location<Self::GPR, Self::SIMD>,
160    ) -> Result<(), CompileError>;
161    /// Determine whether a local should be allocated on the stack.
162    fn is_local_on_stack(&self, idx: usize) -> bool;
163    /// Determine a local's location.
164    fn get_local_location(
165        &self,
166        idx: usize,
167        callee_saved_regs_size: usize,
168    ) -> Location<Self::GPR, Self::SIMD>;
169    /// Move a local to the stack
170    /// Like emit_mov(Size::S64, location, Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)));
171    fn move_local(
172        &mut self,
173        stack_offset: i32,
174        location: Location<Self::GPR, Self::SIMD>,
175    ) -> Result<(), CompileError>;
176    /// List of register to save, depending on the CallingConvention
177    fn list_to_save(
178        &self,
179        calling_convention: CallingConvention,
180    ) -> Vec<Location<Self::GPR, Self::SIMD>>;
181    /// Get param location (to build a call, using SP for stack args)
182    fn get_param_location(
183        &self,
184        idx: usize,
185        sz: Size,
186        stack_offset: &mut usize,
187        calling_convention: CallingConvention,
188    ) -> Location<Self::GPR, Self::SIMD>;
189    /// Get call param location (from a call, using FP for stack args)
190    fn get_call_param_location(
191        &self,
192        idx: usize,
193        sz: Size,
194        stack_offset: &mut usize,
195        calling_convention: CallingConvention,
196    ) -> Location<Self::GPR, Self::SIMD>;
197    /// Get simple param location
198    fn get_simple_param_location(
199        &self,
200        idx: usize,
201        calling_convention: CallingConvention,
202    ) -> Location<Self::GPR, Self::SIMD>;
203    /// move a location to another
204    fn move_location(
205        &mut self,
206        size: Size,
207        source: Location<Self::GPR, Self::SIMD>,
208        dest: Location<Self::GPR, Self::SIMD>,
209    ) -> Result<(), CompileError>;
210    /// move a location to another, with zero or sign extension
211    fn move_location_extend(
212        &mut self,
213        size_val: Size,
214        signed: bool,
215        source: Location<Self::GPR, Self::SIMD>,
216        size_op: Size,
217        dest: Location<Self::GPR, Self::SIMD>,
218    ) -> Result<(), CompileError>;
219    /// Load a memory value to a register, zero extending to 64bits.
220    /// Panic if gpr is not a Location::GPR or if mem is not a Memory(2)
221    fn load_address(
222        &mut self,
223        size: Size,
224        gpr: Location<Self::GPR, Self::SIMD>,
225        mem: Location<Self::GPR, Self::SIMD>,
226    ) -> Result<(), CompileError>;
227    /// Init the stack loc counter
228    fn init_stack_loc(
229        &mut self,
230        init_stack_loc_cnt: u64,
231        last_stack_loc: Location<Self::GPR, Self::SIMD>,
232    ) -> Result<(), CompileError>;
233    /// Restore save_area
234    fn restore_saved_area(&mut self, saved_area_offset: i32) -> Result<(), CompileError>;
235    /// Pop a location
236    fn pop_location(
237        &mut self,
238        location: Location<Self::GPR, Self::SIMD>,
239    ) -> Result<(), CompileError>;
240    /// Create a new `MachineState` with default values.
241    fn new_machine_state(&self) -> MachineState;
242
243    /// Finalize the assembler
244    fn assembler_finalize(self) -> Result<Vec<u8>, CompileError>;
245
246    /// get_offset of Assembler
247    fn get_offset(&self) -> Offset;
248
249    /// finalize a function
250    fn finalize_function(&mut self) -> Result<(), CompileError>;
251
252    /// emit native function prolog (depending on the calling Convention, like "PUSH RBP / MOV RSP, RBP")
253    fn emit_function_prolog(&mut self) -> Result<(), CompileError>;
254    /// emit native function epilog (depending on the calling Convention, like "MOV RBP, RSP / POP RBP")
255    fn emit_function_epilog(&mut self) -> Result<(), CompileError>;
256    /// handle return value, with optionnal cannonicalization if wanted
257    fn emit_function_return_value(
258        &mut self,
259        ty: WpType,
260        cannonicalize: bool,
261        loc: Location<Self::GPR, Self::SIMD>,
262    ) -> Result<(), CompileError>;
263    /// Handle copy to SIMD register from ret value (if needed by the arch/calling convention)
264    fn emit_function_return_float(&mut self) -> Result<(), CompileError>;
265    /// Is NaN canonicalization supported
266    fn arch_supports_canonicalize_nan(&self) -> bool;
267    /// Cannonicalize a NaN (or panic if not supported)
268    fn canonicalize_nan(
269        &mut self,
270        sz: Size,
271        input: Location<Self::GPR, Self::SIMD>,
272        output: Location<Self::GPR, Self::SIMD>,
273    ) -> Result<(), CompileError>;
274
275    /// emit an Illegal Opcode, associated with a trapcode
276    fn emit_illegal_op(&mut self, trp: TrapCode) -> Result<(), CompileError>;
277    /// create a new label
278    fn get_label(&mut self) -> Label;
279    /// emit a label
280    fn emit_label(&mut self, label: Label) -> Result<(), CompileError>;
281
282    /// get the gpr use for call. like RAX on x86_64
283    fn get_grp_for_call(&self) -> Self::GPR;
284    /// Emit a call using the value in register
285    fn emit_call_register(&mut self, register: Self::GPR) -> Result<(), CompileError>;
286    /// Emit a call to a label
287    fn emit_call_label(&mut self, label: Label) -> Result<(), CompileError>;
288    /// Does an trampoline is neededfor indirect call
289    fn arch_requires_indirect_call_trampoline(&self) -> bool;
290    /// indirect call with trampoline
291    fn arch_emit_indirect_call_with_trampoline(
292        &mut self,
293        location: Location<Self::GPR, Self::SIMD>,
294    ) -> Result<(), CompileError>;
295    /// emit a call to a location
296    fn emit_call_location(
297        &mut self,
298        location: Location<Self::GPR, Self::SIMD>,
299    ) -> Result<(), CompileError>;
300    /// get the gpr for the return of generic values
301    fn get_gpr_for_ret(&self) -> Self::GPR;
302    /// get the simd for the return of float/double values
303    fn get_simd_for_ret(&self) -> Self::SIMD;
304
305    /// Emit a debug breakpoint
306    fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>;
307
308    /// load the address of a memory location (will panic if src is not a memory)
309    /// like LEA opcode on x86_64
310    fn location_address(
311        &mut self,
312        size: Size,
313        source: Location<Self::GPR, Self::SIMD>,
314        dest: Location<Self::GPR, Self::SIMD>,
315    ) -> Result<(), CompileError>;
316
317    /// And src & dst -> dst (with or without flags)
318    fn location_and(
319        &mut self,
320        size: Size,
321        source: Location<Self::GPR, Self::SIMD>,
322        dest: Location<Self::GPR, Self::SIMD>,
323        flags: bool,
324    ) -> Result<(), CompileError>;
325    /// Xor src & dst -> dst (with or without flags)
326    fn location_xor(
327        &mut self,
328        size: Size,
329        source: Location<Self::GPR, Self::SIMD>,
330        dest: Location<Self::GPR, Self::SIMD>,
331        flags: bool,
332    ) -> Result<(), CompileError>;
333    /// Or src & dst -> dst (with or without flags)
334    fn location_or(
335        &mut self,
336        size: Size,
337        source: Location<Self::GPR, Self::SIMD>,
338        dest: Location<Self::GPR, Self::SIMD>,
339        flags: bool,
340    ) -> Result<(), CompileError>;
341
342    /// Add src+dst -> dst (with or without flags)
343    fn location_add(
344        &mut self,
345        size: Size,
346        source: Location<Self::GPR, Self::SIMD>,
347        dest: Location<Self::GPR, Self::SIMD>,
348        flags: bool,
349    ) -> Result<(), CompileError>;
350    /// Sub dst-src -> dst (with or without flags)
351    fn location_sub(
352        &mut self,
353        size: Size,
354        source: Location<Self::GPR, Self::SIMD>,
355        dest: Location<Self::GPR, Self::SIMD>,
356        flags: bool,
357    ) -> Result<(), CompileError>;
358    /// -src -> dst
359    fn location_neg(
360        &mut self,
361        size_val: Size, // size of src
362        signed: bool,
363        source: Location<Self::GPR, Self::SIMD>,
364        size_op: Size,
365        dest: Location<Self::GPR, Self::SIMD>,
366    ) -> Result<(), CompileError>;
367
368    /// Cmp src - dst and set flags
369    fn location_cmp(
370        &mut self,
371        size: Size,
372        source: Location<Self::GPR, Self::SIMD>,
373        dest: Location<Self::GPR, Self::SIMD>,
374    ) -> Result<(), CompileError>;
375    /// Test src & dst and set flags
376    fn location_test(
377        &mut self,
378        size: Size,
379        source: Location<Self::GPR, Self::SIMD>,
380        dest: Location<Self::GPR, Self::SIMD>,
381    ) -> Result<(), CompileError>;
382
383    /// jmp without condidtion
384    fn jmp_unconditionnal(&mut self, label: Label) -> Result<(), CompileError>;
385    /// jmp on equal (src==dst)
386    /// like Equal set on x86_64
387    fn jmp_on_equal(&mut self, label: Label) -> Result<(), CompileError>;
388    /// jmp on different (src!=dst)
389    /// like NotEqual set on x86_64
390    fn jmp_on_different(&mut self, label: Label) -> Result<(), CompileError>;
391    /// jmp on above (src>dst)
392    /// like Above set on x86_64
393    fn jmp_on_above(&mut self, label: Label) -> Result<(), CompileError>;
394    /// jmp on above (src>=dst)
395    /// like Above or Equal set on x86_64
396    fn jmp_on_aboveequal(&mut self, label: Label) -> Result<(), CompileError>;
397    /// jmp on above (src<=dst)
398    /// like Below or Equal set on x86_64
399    fn jmp_on_belowequal(&mut self, label: Label) -> Result<(), CompileError>;
400    /// jmp on overflow
401    /// like Carry set on x86_64
402    fn jmp_on_overflow(&mut self, label: Label) -> Result<(), CompileError>;
403
404    /// jmp using a jump table at lable with cond as the indice
405    fn emit_jmp_to_jumptable(
406        &mut self,
407        label: Label,
408        cond: Location<Self::GPR, Self::SIMD>,
409    ) -> Result<(), CompileError>;
410
411    /// Align for Loop (may do nothing, depending on the arch)
412    fn align_for_loop(&mut self) -> Result<(), CompileError>;
413
414    /// ret (from a Call)
415    fn emit_ret(&mut self) -> Result<(), CompileError>;
416
417    /// Stack push of a location
418    fn emit_push(
419        &mut self,
420        size: Size,
421        loc: Location<Self::GPR, Self::SIMD>,
422    ) -> Result<(), CompileError>;
423    /// Stack pop of a location
424    fn emit_pop(
425        &mut self,
426        size: Size,
427        loc: Location<Self::GPR, Self::SIMD>,
428    ) -> Result<(), CompileError>;
429    /// relaxed mov: move from anywhere to anywhere
430    fn emit_relaxed_mov(
431        &mut self,
432        sz: Size,
433        src: Location<Self::GPR, Self::SIMD>,
434        dst: Location<Self::GPR, Self::SIMD>,
435    ) -> Result<(), CompileError>;
436    /// relaxed cmp: compare from anywhere and anywhere
437    fn emit_relaxed_cmp(
438        &mut self,
439        sz: Size,
440        src: Location<Self::GPR, Self::SIMD>,
441        dst: Location<Self::GPR, Self::SIMD>,
442    ) -> Result<(), CompileError>;
443    /// Emit a memory fence. Can be nothing for x86_64 or a DMB on ARM64 for example
444    fn emit_memory_fence(&mut self) -> Result<(), CompileError>;
445    /// relaxed move with zero extension
446    fn emit_relaxed_zero_extension(
447        &mut self,
448        sz_src: Size,
449        src: Location<Self::GPR, Self::SIMD>,
450        sz_dst: Size,
451        dst: Location<Self::GPR, Self::SIMD>,
452    ) -> Result<(), CompileError>;
453    /// relaxed move with sign extension
454    fn emit_relaxed_sign_extension(
455        &mut self,
456        sz_src: Size,
457        src: Location<Self::GPR, Self::SIMD>,
458        sz_dst: Size,
459        dst: Location<Self::GPR, Self::SIMD>,
460    ) -> Result<(), CompileError>;
461    /// Multiply location with immediate
462    fn emit_imul_imm32(
463        &mut self,
464        size: Size,
465        imm32: u32,
466        gpr: Self::GPR,
467    ) -> Result<(), CompileError>;
468    /// Add with location directly from the stack
469    fn emit_binop_add32(
470        &mut self,
471        loc_a: Location<Self::GPR, Self::SIMD>,
472        loc_b: Location<Self::GPR, Self::SIMD>,
473        ret: Location<Self::GPR, Self::SIMD>,
474    ) -> Result<(), CompileError>;
475    /// Sub with location directly from the stack
476    fn emit_binop_sub32(
477        &mut self,
478        loc_a: Location<Self::GPR, Self::SIMD>,
479        loc_b: Location<Self::GPR, Self::SIMD>,
480        ret: Location<Self::GPR, Self::SIMD>,
481    ) -> Result<(), CompileError>;
482    /// Multiply with location directly from the stack
483    fn emit_binop_mul32(
484        &mut self,
485        loc_a: Location<Self::GPR, Self::SIMD>,
486        loc_b: Location<Self::GPR, Self::SIMD>,
487        ret: Location<Self::GPR, Self::SIMD>,
488    ) -> Result<(), CompileError>;
489    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
490    fn emit_binop_udiv32(
491        &mut self,
492        loc_a: Location<Self::GPR, Self::SIMD>,
493        loc_b: Location<Self::GPR, Self::SIMD>,
494        ret: Location<Self::GPR, Self::SIMD>,
495        integer_division_by_zero: Label,
496        integer_overflow: Label,
497    ) -> Result<usize, CompileError>;
498    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
499    fn emit_binop_sdiv32(
500        &mut self,
501        loc_a: Location<Self::GPR, Self::SIMD>,
502        loc_b: Location<Self::GPR, Self::SIMD>,
503        ret: Location<Self::GPR, Self::SIMD>,
504        integer_division_by_zero: Label,
505        integer_overflow: Label,
506    ) -> Result<usize, CompileError>;
507    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
508    fn emit_binop_urem32(
509        &mut self,
510        loc_a: Location<Self::GPR, Self::SIMD>,
511        loc_b: Location<Self::GPR, Self::SIMD>,
512        ret: Location<Self::GPR, Self::SIMD>,
513        integer_division_by_zero: Label,
514        integer_overflow: Label,
515    ) -> Result<usize, CompileError>;
516    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
517    fn emit_binop_srem32(
518        &mut self,
519        loc_a: Location<Self::GPR, Self::SIMD>,
520        loc_b: Location<Self::GPR, Self::SIMD>,
521        ret: Location<Self::GPR, Self::SIMD>,
522        integer_division_by_zero: Label,
523        integer_overflow: Label,
524    ) -> Result<usize, CompileError>;
525    /// And with location directly from the stack
526    fn emit_binop_and32(
527        &mut self,
528        loc_a: Location<Self::GPR, Self::SIMD>,
529        loc_b: Location<Self::GPR, Self::SIMD>,
530        ret: Location<Self::GPR, Self::SIMD>,
531    ) -> Result<(), CompileError>;
532    /// Or with location directly from the stack
533    fn emit_binop_or32(
534        &mut self,
535        loc_a: Location<Self::GPR, Self::SIMD>,
536        loc_b: Location<Self::GPR, Self::SIMD>,
537        ret: Location<Self::GPR, Self::SIMD>,
538    ) -> Result<(), CompileError>;
539    /// Xor with location directly from the stack
540    fn emit_binop_xor32(
541        &mut self,
542        loc_a: Location<Self::GPR, Self::SIMD>,
543        loc_b: Location<Self::GPR, Self::SIMD>,
544        ret: Location<Self::GPR, Self::SIMD>,
545    ) -> Result<(), CompileError>;
546    /// Signed Greater of Equal Compare 2 i32, result in a GPR
547    fn i32_cmp_ge_s(
548        &mut self,
549        loc_a: Location<Self::GPR, Self::SIMD>,
550        loc_b: Location<Self::GPR, Self::SIMD>,
551        ret: Location<Self::GPR, Self::SIMD>,
552    ) -> Result<(), CompileError>;
553    /// Signed Greater Than Compare 2 i32, result in a GPR
554    fn i32_cmp_gt_s(
555        &mut self,
556        loc_a: Location<Self::GPR, Self::SIMD>,
557        loc_b: Location<Self::GPR, Self::SIMD>,
558        ret: Location<Self::GPR, Self::SIMD>,
559    ) -> Result<(), CompileError>;
560    /// Signed Less of Equal Compare 2 i32, result in a GPR
561    fn i32_cmp_le_s(
562        &mut self,
563        loc_a: Location<Self::GPR, Self::SIMD>,
564        loc_b: Location<Self::GPR, Self::SIMD>,
565        ret: Location<Self::GPR, Self::SIMD>,
566    ) -> Result<(), CompileError>;
567    /// Signed Less Than Compare 2 i32, result in a GPR
568    fn i32_cmp_lt_s(
569        &mut self,
570        loc_a: Location<Self::GPR, Self::SIMD>,
571        loc_b: Location<Self::GPR, Self::SIMD>,
572        ret: Location<Self::GPR, Self::SIMD>,
573    ) -> Result<(), CompileError>;
574    /// Unsigned Greater of Equal Compare 2 i32, result in a GPR
575    fn i32_cmp_ge_u(
576        &mut self,
577        loc_a: Location<Self::GPR, Self::SIMD>,
578        loc_b: Location<Self::GPR, Self::SIMD>,
579        ret: Location<Self::GPR, Self::SIMD>,
580    ) -> Result<(), CompileError>;
581    /// Unsigned Greater Than Compare 2 i32, result in a GPR
582    fn i32_cmp_gt_u(
583        &mut self,
584        loc_a: Location<Self::GPR, Self::SIMD>,
585        loc_b: Location<Self::GPR, Self::SIMD>,
586        ret: Location<Self::GPR, Self::SIMD>,
587    ) -> Result<(), CompileError>;
588    /// Unsigned Less of Equal Compare 2 i32, result in a GPR
589    fn i32_cmp_le_u(
590        &mut self,
591        loc_a: Location<Self::GPR, Self::SIMD>,
592        loc_b: Location<Self::GPR, Self::SIMD>,
593        ret: Location<Self::GPR, Self::SIMD>,
594    ) -> Result<(), CompileError>;
595    /// Unsigned Less Than Compare 2 i32, result in a GPR
596    fn i32_cmp_lt_u(
597        &mut self,
598        loc_a: Location<Self::GPR, Self::SIMD>,
599        loc_b: Location<Self::GPR, Self::SIMD>,
600        ret: Location<Self::GPR, Self::SIMD>,
601    ) -> Result<(), CompileError>;
602    /// Not Equal Compare 2 i32, result in a GPR
603    fn i32_cmp_ne(
604        &mut self,
605        loc_a: Location<Self::GPR, Self::SIMD>,
606        loc_b: Location<Self::GPR, Self::SIMD>,
607        ret: Location<Self::GPR, Self::SIMD>,
608    ) -> Result<(), CompileError>;
609    /// Equal Compare 2 i32, result in a GPR
610    fn i32_cmp_eq(
611        &mut self,
612        loc_a: Location<Self::GPR, Self::SIMD>,
613        loc_b: Location<Self::GPR, Self::SIMD>,
614        ret: Location<Self::GPR, Self::SIMD>,
615    ) -> Result<(), CompileError>;
616    /// Count Leading 0 bit of an i32
617    fn i32_clz(
618        &mut self,
619        loc: Location<Self::GPR, Self::SIMD>,
620        ret: Location<Self::GPR, Self::SIMD>,
621    ) -> Result<(), CompileError>;
622    /// Count Trailling 0 bit of an i32
623    fn i32_ctz(
624        &mut self,
625        loc: Location<Self::GPR, Self::SIMD>,
626        ret: Location<Self::GPR, Self::SIMD>,
627    ) -> Result<(), CompileError>;
628    /// Count the number of 1 bit of an i32
629    fn i32_popcnt(
630        &mut self,
631        loc: Location<Self::GPR, Self::SIMD>,
632        ret: Location<Self::GPR, Self::SIMD>,
633    ) -> Result<(), CompileError>;
634    /// i32 Logical Shift Left
635    fn i32_shl(
636        &mut self,
637        loc_a: Location<Self::GPR, Self::SIMD>,
638        loc_b: Location<Self::GPR, Self::SIMD>,
639        ret: Location<Self::GPR, Self::SIMD>,
640    ) -> Result<(), CompileError>;
641    /// i32 Logical Shift Right
642    fn i32_shr(
643        &mut self,
644        loc_a: Location<Self::GPR, Self::SIMD>,
645        loc_b: Location<Self::GPR, Self::SIMD>,
646        ret: Location<Self::GPR, Self::SIMD>,
647    ) -> Result<(), CompileError>;
648    /// i32 Arithmetic Shift Right
649    fn i32_sar(
650        &mut self,
651        loc_a: Location<Self::GPR, Self::SIMD>,
652        loc_b: Location<Self::GPR, Self::SIMD>,
653        ret: Location<Self::GPR, Self::SIMD>,
654    ) -> Result<(), CompileError>;
655    /// i32 Roll Left
656    fn i32_rol(
657        &mut self,
658        loc_a: Location<Self::GPR, Self::SIMD>,
659        loc_b: Location<Self::GPR, Self::SIMD>,
660        ret: Location<Self::GPR, Self::SIMD>,
661    ) -> Result<(), CompileError>;
662    /// i32 Roll Right
663    fn i32_ror(
664        &mut self,
665        loc_a: Location<Self::GPR, Self::SIMD>,
666        loc_b: Location<Self::GPR, Self::SIMD>,
667        ret: Location<Self::GPR, Self::SIMD>,
668    ) -> Result<(), CompileError>;
669    /// i32 load
670    #[allow(clippy::too_many_arguments)]
671    fn i32_load(
672        &mut self,
673        addr: Location<Self::GPR, Self::SIMD>,
674        memarg: &MemArg,
675        ret: Location<Self::GPR, Self::SIMD>,
676        need_check: bool,
677        imported_memories: bool,
678        offset: i32,
679        heap_access_oob: Label,
680        unaligned_atomic: Label,
681    ) -> Result<(), CompileError>;
682    /// i32 load of an unsigned 8bits
683    #[allow(clippy::too_many_arguments)]
684    fn i32_load_8u(
685        &mut self,
686        addr: Location<Self::GPR, Self::SIMD>,
687        memarg: &MemArg,
688        ret: Location<Self::GPR, Self::SIMD>,
689        need_check: bool,
690        imported_memories: bool,
691        offset: i32,
692        heap_access_oob: Label,
693        unaligned_atomic: Label,
694    ) -> Result<(), CompileError>;
695    /// i32 load of an signed 8bits
696    #[allow(clippy::too_many_arguments)]
697    fn i32_load_8s(
698        &mut self,
699        addr: Location<Self::GPR, Self::SIMD>,
700        memarg: &MemArg,
701        ret: Location<Self::GPR, Self::SIMD>,
702        need_check: bool,
703        imported_memories: bool,
704        offset: i32,
705        heap_access_oob: Label,
706        unaligned_atomic: Label,
707    ) -> Result<(), CompileError>;
708    /// i32 load of an unsigned 16bits
709    #[allow(clippy::too_many_arguments)]
710    fn i32_load_16u(
711        &mut self,
712        addr: Location<Self::GPR, Self::SIMD>,
713        memarg: &MemArg,
714        ret: Location<Self::GPR, Self::SIMD>,
715        need_check: bool,
716        imported_memories: bool,
717        offset: i32,
718        heap_access_oob: Label,
719        unaligned_atomic: Label,
720    ) -> Result<(), CompileError>;
721    /// i32 load of an signed 16bits
722    #[allow(clippy::too_many_arguments)]
723    fn i32_load_16s(
724        &mut self,
725        addr: Location<Self::GPR, Self::SIMD>,
726        memarg: &MemArg,
727        ret: Location<Self::GPR, Self::SIMD>,
728        need_check: bool,
729        imported_memories: bool,
730        offset: i32,
731        heap_access_oob: Label,
732        unaligned_atomic: Label,
733    ) -> Result<(), CompileError>;
734    /// i32 atomic load
735    #[allow(clippy::too_many_arguments)]
736    fn i32_atomic_load(
737        &mut self,
738        addr: Location<Self::GPR, Self::SIMD>,
739        memarg: &MemArg,
740        ret: Location<Self::GPR, Self::SIMD>,
741        need_check: bool,
742        imported_memories: bool,
743        offset: i32,
744        heap_access_oob: Label,
745        unaligned_atomic: Label,
746    ) -> Result<(), CompileError>;
747    /// i32 atomic load of an unsigned 8bits
748    #[allow(clippy::too_many_arguments)]
749    fn i32_atomic_load_8u(
750        &mut self,
751        addr: Location<Self::GPR, Self::SIMD>,
752        memarg: &MemArg,
753        ret: Location<Self::GPR, Self::SIMD>,
754        need_check: bool,
755        imported_memories: bool,
756        offset: i32,
757        heap_access_oob: Label,
758        unaligned_atomic: Label,
759    ) -> Result<(), CompileError>;
760    /// i32 atomic load of an unsigned 16bits
761    #[allow(clippy::too_many_arguments)]
762    fn i32_atomic_load_16u(
763        &mut self,
764        addr: Location<Self::GPR, Self::SIMD>,
765        memarg: &MemArg,
766        ret: Location<Self::GPR, Self::SIMD>,
767        need_check: bool,
768        imported_memories: bool,
769        offset: i32,
770        heap_access_oob: Label,
771        unaligned_atomic: Label,
772    ) -> Result<(), CompileError>;
773    /// i32 save
774    #[allow(clippy::too_many_arguments)]
775    fn i32_save(
776        &mut self,
777        value: Location<Self::GPR, Self::SIMD>,
778        memarg: &MemArg,
779        addr: Location<Self::GPR, Self::SIMD>,
780        need_check: bool,
781        imported_memories: bool,
782        offset: i32,
783        heap_access_oob: Label,
784        unaligned_atomic: Label,
785    ) -> Result<(), CompileError>;
786    /// i32 save of the lower 8bits
787    #[allow(clippy::too_many_arguments)]
788    fn i32_save_8(
789        &mut self,
790        value: Location<Self::GPR, Self::SIMD>,
791        memarg: &MemArg,
792        addr: Location<Self::GPR, Self::SIMD>,
793        need_check: bool,
794        imported_memories: bool,
795        offset: i32,
796        heap_access_oob: Label,
797        unaligned_atomic: Label,
798    ) -> Result<(), CompileError>;
799    /// i32 save of the lower 16bits
800    #[allow(clippy::too_many_arguments)]
801    fn i32_save_16(
802        &mut self,
803        value: Location<Self::GPR, Self::SIMD>,
804        memarg: &MemArg,
805        addr: Location<Self::GPR, Self::SIMD>,
806        need_check: bool,
807        imported_memories: bool,
808        offset: i32,
809        heap_access_oob: Label,
810        unaligned_atomic: Label,
811    ) -> Result<(), CompileError>;
812    /// i32 atomic save
813    #[allow(clippy::too_many_arguments)]
814    fn i32_atomic_save(
815        &mut self,
816        value: Location<Self::GPR, Self::SIMD>,
817        memarg: &MemArg,
818        addr: Location<Self::GPR, Self::SIMD>,
819        need_check: bool,
820        imported_memories: bool,
821        offset: i32,
822        heap_access_oob: Label,
823        unaligned_atomic: Label,
824    ) -> Result<(), CompileError>;
825    /// i32 atomic save of a the lower 8bits
826    #[allow(clippy::too_many_arguments)]
827    fn i32_atomic_save_8(
828        &mut self,
829        value: Location<Self::GPR, Self::SIMD>,
830        memarg: &MemArg,
831        addr: Location<Self::GPR, Self::SIMD>,
832        need_check: bool,
833        imported_memories: bool,
834        offset: i32,
835        heap_access_oob: Label,
836        unaligned_atomic: Label,
837    ) -> Result<(), CompileError>;
838    /// i32 atomic save of a the lower 16bits
839    #[allow(clippy::too_many_arguments)]
840    fn i32_atomic_save_16(
841        &mut self,
842        value: Location<Self::GPR, Self::SIMD>,
843        memarg: &MemArg,
844        addr: Location<Self::GPR, Self::SIMD>,
845        need_check: bool,
846        imported_memories: bool,
847        offset: i32,
848        heap_access_oob: Label,
849        unaligned_atomic: Label,
850    ) -> Result<(), CompileError>;
851    /// i32 atomic Add with i32
852    #[allow(clippy::too_many_arguments)]
853    fn i32_atomic_add(
854        &mut self,
855        loc: Location<Self::GPR, Self::SIMD>,
856        target: Location<Self::GPR, Self::SIMD>,
857        memarg: &MemArg,
858        ret: Location<Self::GPR, Self::SIMD>,
859        need_check: bool,
860        imported_memories: bool,
861        offset: i32,
862        heap_access_oob: Label,
863        unaligned_atomic: Label,
864    ) -> Result<(), CompileError>;
865    /// i32 atomic Add with unsigned 8bits
866    #[allow(clippy::too_many_arguments)]
867    fn i32_atomic_add_8u(
868        &mut self,
869        loc: Location<Self::GPR, Self::SIMD>,
870        target: Location<Self::GPR, Self::SIMD>,
871        memarg: &MemArg,
872        ret: Location<Self::GPR, Self::SIMD>,
873        need_check: bool,
874        imported_memories: bool,
875        offset: i32,
876        heap_access_oob: Label,
877        unaligned_atomic: Label,
878    ) -> Result<(), CompileError>;
879    /// i32 atomic Add with unsigned 16bits
880    #[allow(clippy::too_many_arguments)]
881    fn i32_atomic_add_16u(
882        &mut self,
883        loc: Location<Self::GPR, Self::SIMD>,
884        target: Location<Self::GPR, Self::SIMD>,
885        memarg: &MemArg,
886        ret: Location<Self::GPR, Self::SIMD>,
887        need_check: bool,
888        imported_memories: bool,
889        offset: i32,
890        heap_access_oob: Label,
891        unaligned_atomic: Label,
892    ) -> Result<(), CompileError>;
893    /// i32 atomic Sub with i32
894    #[allow(clippy::too_many_arguments)]
895    fn i32_atomic_sub(
896        &mut self,
897        loc: Location<Self::GPR, Self::SIMD>,
898        target: Location<Self::GPR, Self::SIMD>,
899        memarg: &MemArg,
900        ret: Location<Self::GPR, Self::SIMD>,
901        need_check: bool,
902        imported_memories: bool,
903        offset: i32,
904        heap_access_oob: Label,
905        unaligned_atomic: Label,
906    ) -> Result<(), CompileError>;
907    /// i32 atomic Sub with unsigned 8bits
908    #[allow(clippy::too_many_arguments)]
909    fn i32_atomic_sub_8u(
910        &mut self,
911        loc: Location<Self::GPR, Self::SIMD>,
912        target: Location<Self::GPR, Self::SIMD>,
913        memarg: &MemArg,
914        ret: Location<Self::GPR, Self::SIMD>,
915        need_check: bool,
916        imported_memories: bool,
917        offset: i32,
918        heap_access_oob: Label,
919        unaligned_atomic: Label,
920    ) -> Result<(), CompileError>;
921    /// i32 atomic Sub with unsigned 16bits
922    #[allow(clippy::too_many_arguments)]
923    fn i32_atomic_sub_16u(
924        &mut self,
925        loc: Location<Self::GPR, Self::SIMD>,
926        target: Location<Self::GPR, Self::SIMD>,
927        memarg: &MemArg,
928        ret: Location<Self::GPR, Self::SIMD>,
929        need_check: bool,
930        imported_memories: bool,
931        offset: i32,
932        heap_access_oob: Label,
933        unaligned_atomic: Label,
934    ) -> Result<(), CompileError>;
935    /// i32 atomic And with i32
936    #[allow(clippy::too_many_arguments)]
937    fn i32_atomic_and(
938        &mut self,
939        loc: Location<Self::GPR, Self::SIMD>,
940        target: Location<Self::GPR, Self::SIMD>,
941        memarg: &MemArg,
942        ret: Location<Self::GPR, Self::SIMD>,
943        need_check: bool,
944        imported_memories: bool,
945        offset: i32,
946        heap_access_oob: Label,
947        unaligned_atomic: Label,
948    ) -> Result<(), CompileError>;
949    /// i32 atomic And with unsigned 8bits
950    #[allow(clippy::too_many_arguments)]
951    fn i32_atomic_and_8u(
952        &mut self,
953        loc: Location<Self::GPR, Self::SIMD>,
954        target: Location<Self::GPR, Self::SIMD>,
955        memarg: &MemArg,
956        ret: Location<Self::GPR, Self::SIMD>,
957        need_check: bool,
958        imported_memories: bool,
959        offset: i32,
960        heap_access_oob: Label,
961        unaligned_atomic: Label,
962    ) -> Result<(), CompileError>;
963    /// i32 atomic And with unsigned 16bits
964    #[allow(clippy::too_many_arguments)]
965    fn i32_atomic_and_16u(
966        &mut self,
967        loc: Location<Self::GPR, Self::SIMD>,
968        target: Location<Self::GPR, Self::SIMD>,
969        memarg: &MemArg,
970        ret: Location<Self::GPR, Self::SIMD>,
971        need_check: bool,
972        imported_memories: bool,
973        offset: i32,
974        heap_access_oob: Label,
975        unaligned_atomic: Label,
976    ) -> Result<(), CompileError>;
977    /// i32 atomic Or with i32
978    #[allow(clippy::too_many_arguments)]
979    fn i32_atomic_or(
980        &mut self,
981        loc: Location<Self::GPR, Self::SIMD>,
982        target: Location<Self::GPR, Self::SIMD>,
983        memarg: &MemArg,
984        ret: Location<Self::GPR, Self::SIMD>,
985        need_check: bool,
986        imported_memories: bool,
987        offset: i32,
988        heap_access_oob: Label,
989        unaligned_atomic: Label,
990    ) -> Result<(), CompileError>;
991    /// i32 atomic Or with unsigned 8bits
992    #[allow(clippy::too_many_arguments)]
993    fn i32_atomic_or_8u(
994        &mut self,
995        loc: Location<Self::GPR, Self::SIMD>,
996        target: Location<Self::GPR, Self::SIMD>,
997        memarg: &MemArg,
998        ret: Location<Self::GPR, Self::SIMD>,
999        need_check: bool,
1000        imported_memories: bool,
1001        offset: i32,
1002        heap_access_oob: Label,
1003        unaligned_atomic: Label,
1004    ) -> Result<(), CompileError>;
1005    /// i32 atomic Or with unsigned 16bits
1006    #[allow(clippy::too_many_arguments)]
1007    fn i32_atomic_or_16u(
1008        &mut self,
1009        loc: Location<Self::GPR, Self::SIMD>,
1010        target: Location<Self::GPR, Self::SIMD>,
1011        memarg: &MemArg,
1012        ret: Location<Self::GPR, Self::SIMD>,
1013        need_check: bool,
1014        imported_memories: bool,
1015        offset: i32,
1016        heap_access_oob: Label,
1017        unaligned_atomic: Label,
1018    ) -> Result<(), CompileError>;
1019    /// i32 atomic Xor with i32
1020    #[allow(clippy::too_many_arguments)]
1021    fn i32_atomic_xor(
1022        &mut self,
1023        loc: Location<Self::GPR, Self::SIMD>,
1024        target: Location<Self::GPR, Self::SIMD>,
1025        memarg: &MemArg,
1026        ret: Location<Self::GPR, Self::SIMD>,
1027        need_check: bool,
1028        imported_memories: bool,
1029        offset: i32,
1030        heap_access_oob: Label,
1031        unaligned_atomic: Label,
1032    ) -> Result<(), CompileError>;
1033    /// i32 atomic Xor with unsigned 8bits
1034    #[allow(clippy::too_many_arguments)]
1035    fn i32_atomic_xor_8u(
1036        &mut self,
1037        loc: Location<Self::GPR, Self::SIMD>,
1038        target: Location<Self::GPR, Self::SIMD>,
1039        memarg: &MemArg,
1040        ret: Location<Self::GPR, Self::SIMD>,
1041        need_check: bool,
1042        imported_memories: bool,
1043        offset: i32,
1044        heap_access_oob: Label,
1045        unaligned_atomic: Label,
1046    ) -> Result<(), CompileError>;
1047    /// i32 atomic Xor with unsigned 16bits
1048    #[allow(clippy::too_many_arguments)]
1049    fn i32_atomic_xor_16u(
1050        &mut self,
1051        loc: Location<Self::GPR, Self::SIMD>,
1052        target: Location<Self::GPR, Self::SIMD>,
1053        memarg: &MemArg,
1054        ret: Location<Self::GPR, Self::SIMD>,
1055        need_check: bool,
1056        imported_memories: bool,
1057        offset: i32,
1058        heap_access_oob: Label,
1059        unaligned_atomic: Label,
1060    ) -> Result<(), CompileError>;
1061    /// i32 atomic Exchange with i32
1062    #[allow(clippy::too_many_arguments)]
1063    fn i32_atomic_xchg(
1064        &mut self,
1065        loc: Location<Self::GPR, Self::SIMD>,
1066        target: Location<Self::GPR, Self::SIMD>,
1067        memarg: &MemArg,
1068        ret: Location<Self::GPR, Self::SIMD>,
1069        need_check: bool,
1070        imported_memories: bool,
1071        offset: i32,
1072        heap_access_oob: Label,
1073        unaligned_atomic: Label,
1074    ) -> Result<(), CompileError>;
1075    /// i32 atomic Exchange with u8
1076    #[allow(clippy::too_many_arguments)]
1077    fn i32_atomic_xchg_8u(
1078        &mut self,
1079        loc: Location<Self::GPR, Self::SIMD>,
1080        target: Location<Self::GPR, Self::SIMD>,
1081        memarg: &MemArg,
1082        ret: Location<Self::GPR, Self::SIMD>,
1083        need_check: bool,
1084        imported_memories: bool,
1085        offset: i32,
1086        heap_access_oob: Label,
1087        unaligned_atomic: Label,
1088    ) -> Result<(), CompileError>;
1089    /// i32 atomic Exchange with u16
1090    #[allow(clippy::too_many_arguments)]
1091    fn i32_atomic_xchg_16u(
1092        &mut self,
1093        loc: Location<Self::GPR, Self::SIMD>,
1094        target: Location<Self::GPR, Self::SIMD>,
1095        memarg: &MemArg,
1096        ret: Location<Self::GPR, Self::SIMD>,
1097        need_check: bool,
1098        imported_memories: bool,
1099        offset: i32,
1100        heap_access_oob: Label,
1101        unaligned_atomic: Label,
1102    ) -> Result<(), CompileError>;
1103    /// i32 atomic Compare and Exchange with i32
1104    #[allow(clippy::too_many_arguments)]
1105    fn i32_atomic_cmpxchg(
1106        &mut self,
1107        new: Location<Self::GPR, Self::SIMD>,
1108        cmp: Location<Self::GPR, Self::SIMD>,
1109        target: Location<Self::GPR, Self::SIMD>,
1110        memarg: &MemArg,
1111        ret: Location<Self::GPR, Self::SIMD>,
1112        need_check: bool,
1113        imported_memories: bool,
1114        offset: i32,
1115        heap_access_oob: Label,
1116        unaligned_atomic: Label,
1117    ) -> Result<(), CompileError>;
1118    /// i32 atomic Compare and Exchange with u8
1119    #[allow(clippy::too_many_arguments)]
1120    fn i32_atomic_cmpxchg_8u(
1121        &mut self,
1122        new: Location<Self::GPR, Self::SIMD>,
1123        cmp: Location<Self::GPR, Self::SIMD>,
1124        target: Location<Self::GPR, Self::SIMD>,
1125        memarg: &MemArg,
1126        ret: Location<Self::GPR, Self::SIMD>,
1127        need_check: bool,
1128        imported_memories: bool,
1129        offset: i32,
1130        heap_access_oob: Label,
1131        unaligned_atomic: Label,
1132    ) -> Result<(), CompileError>;
1133    /// i32 atomic Compare and Exchange with u16
1134    #[allow(clippy::too_many_arguments)]
1135    fn i32_atomic_cmpxchg_16u(
1136        &mut self,
1137        new: Location<Self::GPR, Self::SIMD>,
1138        cmp: Location<Self::GPR, Self::SIMD>,
1139        target: Location<Self::GPR, Self::SIMD>,
1140        memarg: &MemArg,
1141        ret: Location<Self::GPR, Self::SIMD>,
1142        need_check: bool,
1143        imported_memories: bool,
1144        offset: i32,
1145        heap_access_oob: Label,
1146        unaligned_atomic: Label,
1147    ) -> Result<(), CompileError>;
1148
1149    /// emit a move function address to GPR ready for call, using appropriate relocation
1150    fn emit_call_with_reloc(
1151        &mut self,
1152        calling_convention: CallingConvention,
1153        reloc_target: RelocationTarget,
1154    ) -> Result<Vec<Relocation>, CompileError>;
1155    /// Add with location directly from the stack
1156    fn emit_binop_add64(
1157        &mut self,
1158        loc_a: Location<Self::GPR, Self::SIMD>,
1159        loc_b: Location<Self::GPR, Self::SIMD>,
1160        ret: Location<Self::GPR, Self::SIMD>,
1161    ) -> Result<(), CompileError>;
1162    /// Sub with location directly from the stack
1163    fn emit_binop_sub64(
1164        &mut self,
1165        loc_a: Location<Self::GPR, Self::SIMD>,
1166        loc_b: Location<Self::GPR, Self::SIMD>,
1167        ret: Location<Self::GPR, Self::SIMD>,
1168    ) -> Result<(), CompileError>;
1169    /// Multiply with location directly from the stack
1170    fn emit_binop_mul64(
1171        &mut self,
1172        loc_a: Location<Self::GPR, Self::SIMD>,
1173        loc_b: Location<Self::GPR, Self::SIMD>,
1174        ret: Location<Self::GPR, Self::SIMD>,
1175    ) -> Result<(), CompileError>;
1176    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1177    fn emit_binop_udiv64(
1178        &mut self,
1179        loc_a: Location<Self::GPR, Self::SIMD>,
1180        loc_b: Location<Self::GPR, Self::SIMD>,
1181        ret: Location<Self::GPR, Self::SIMD>,
1182        integer_division_by_zero: Label,
1183        integer_overflow: Label,
1184    ) -> Result<usize, CompileError>;
1185    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1186    fn emit_binop_sdiv64(
1187        &mut self,
1188        loc_a: Location<Self::GPR, Self::SIMD>,
1189        loc_b: Location<Self::GPR, Self::SIMD>,
1190        ret: Location<Self::GPR, Self::SIMD>,
1191        integer_division_by_zero: Label,
1192        integer_overflow: Label,
1193    ) -> Result<usize, CompileError>;
1194    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1195    fn emit_binop_urem64(
1196        &mut self,
1197        loc_a: Location<Self::GPR, Self::SIMD>,
1198        loc_b: Location<Self::GPR, Self::SIMD>,
1199        ret: Location<Self::GPR, Self::SIMD>,
1200        integer_division_by_zero: Label,
1201        integer_overflow: Label,
1202    ) -> Result<usize, CompileError>;
1203    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1204    fn emit_binop_srem64(
1205        &mut self,
1206        loc_a: Location<Self::GPR, Self::SIMD>,
1207        loc_b: Location<Self::GPR, Self::SIMD>,
1208        ret: Location<Self::GPR, Self::SIMD>,
1209        integer_division_by_zero: Label,
1210        integer_overflow: Label,
1211    ) -> Result<usize, CompileError>;
1212    /// And with location directly from the stack
1213    fn emit_binop_and64(
1214        &mut self,
1215        loc_a: Location<Self::GPR, Self::SIMD>,
1216        loc_b: Location<Self::GPR, Self::SIMD>,
1217        ret: Location<Self::GPR, Self::SIMD>,
1218    ) -> Result<(), CompileError>;
1219    /// Or with location directly from the stack
1220    fn emit_binop_or64(
1221        &mut self,
1222        loc_a: Location<Self::GPR, Self::SIMD>,
1223        loc_b: Location<Self::GPR, Self::SIMD>,
1224        ret: Location<Self::GPR, Self::SIMD>,
1225    ) -> Result<(), CompileError>;
1226    /// Xor with location directly from the stack
1227    fn emit_binop_xor64(
1228        &mut self,
1229        loc_a: Location<Self::GPR, Self::SIMD>,
1230        loc_b: Location<Self::GPR, Self::SIMD>,
1231        ret: Location<Self::GPR, Self::SIMD>,
1232    ) -> Result<(), CompileError>;
1233    /// Signed Greater of Equal Compare 2 i64, result in a GPR
1234    fn i64_cmp_ge_s(
1235        &mut self,
1236        loc_a: Location<Self::GPR, Self::SIMD>,
1237        loc_b: Location<Self::GPR, Self::SIMD>,
1238        ret: Location<Self::GPR, Self::SIMD>,
1239    ) -> Result<(), CompileError>;
1240    /// Signed Greater Than Compare 2 i64, result in a GPR
1241    fn i64_cmp_gt_s(
1242        &mut self,
1243        loc_a: Location<Self::GPR, Self::SIMD>,
1244        loc_b: Location<Self::GPR, Self::SIMD>,
1245        ret: Location<Self::GPR, Self::SIMD>,
1246    ) -> Result<(), CompileError>;
1247    /// Signed Less of Equal Compare 2 i64, result in a GPR
1248    fn i64_cmp_le_s(
1249        &mut self,
1250        loc_a: Location<Self::GPR, Self::SIMD>,
1251        loc_b: Location<Self::GPR, Self::SIMD>,
1252        ret: Location<Self::GPR, Self::SIMD>,
1253    ) -> Result<(), CompileError>;
1254    /// Signed Less Than Compare 2 i64, result in a GPR
1255    fn i64_cmp_lt_s(
1256        &mut self,
1257        loc_a: Location<Self::GPR, Self::SIMD>,
1258        loc_b: Location<Self::GPR, Self::SIMD>,
1259        ret: Location<Self::GPR, Self::SIMD>,
1260    ) -> Result<(), CompileError>;
1261    /// Unsigned Greater of Equal Compare 2 i64, result in a GPR
1262    fn i64_cmp_ge_u(
1263        &mut self,
1264        loc_a: Location<Self::GPR, Self::SIMD>,
1265        loc_b: Location<Self::GPR, Self::SIMD>,
1266        ret: Location<Self::GPR, Self::SIMD>,
1267    ) -> Result<(), CompileError>;
1268    /// Unsigned Greater Than Compare 2 i64, result in a GPR
1269    fn i64_cmp_gt_u(
1270        &mut self,
1271        loc_a: Location<Self::GPR, Self::SIMD>,
1272        loc_b: Location<Self::GPR, Self::SIMD>,
1273        ret: Location<Self::GPR, Self::SIMD>,
1274    ) -> Result<(), CompileError>;
1275    /// Unsigned Less of Equal Compare 2 i64, result in a GPR
1276    fn i64_cmp_le_u(
1277        &mut self,
1278        loc_a: Location<Self::GPR, Self::SIMD>,
1279        loc_b: Location<Self::GPR, Self::SIMD>,
1280        ret: Location<Self::GPR, Self::SIMD>,
1281    ) -> Result<(), CompileError>;
1282    /// Unsigned Less Than Compare 2 i64, result in a GPR
1283    fn i64_cmp_lt_u(
1284        &mut self,
1285        loc_a: Location<Self::GPR, Self::SIMD>,
1286        loc_b: Location<Self::GPR, Self::SIMD>,
1287        ret: Location<Self::GPR, Self::SIMD>,
1288    ) -> Result<(), CompileError>;
1289    /// Not Equal Compare 2 i64, result in a GPR
1290    fn i64_cmp_ne(
1291        &mut self,
1292        loc_a: Location<Self::GPR, Self::SIMD>,
1293        loc_b: Location<Self::GPR, Self::SIMD>,
1294        ret: Location<Self::GPR, Self::SIMD>,
1295    ) -> Result<(), CompileError>;
1296    /// Equal Compare 2 i64, result in a GPR
1297    fn i64_cmp_eq(
1298        &mut self,
1299        loc_a: Location<Self::GPR, Self::SIMD>,
1300        loc_b: Location<Self::GPR, Self::SIMD>,
1301        ret: Location<Self::GPR, Self::SIMD>,
1302    ) -> Result<(), CompileError>;
1303    /// Count Leading 0 bit of an i64
1304    fn i64_clz(
1305        &mut self,
1306        loc: Location<Self::GPR, Self::SIMD>,
1307        ret: Location<Self::GPR, Self::SIMD>,
1308    ) -> Result<(), CompileError>;
1309    /// Count Trailling 0 bit of an i64
1310    fn i64_ctz(
1311        &mut self,
1312        loc: Location<Self::GPR, Self::SIMD>,
1313        ret: Location<Self::GPR, Self::SIMD>,
1314    ) -> Result<(), CompileError>;
1315    /// Count the number of 1 bit of an i64
1316    fn i64_popcnt(
1317        &mut self,
1318        loc: Location<Self::GPR, Self::SIMD>,
1319        ret: Location<Self::GPR, Self::SIMD>,
1320    ) -> Result<(), CompileError>;
1321    /// i64 Logical Shift Left
1322    fn i64_shl(
1323        &mut self,
1324        loc_a: Location<Self::GPR, Self::SIMD>,
1325        loc_b: Location<Self::GPR, Self::SIMD>,
1326        ret: Location<Self::GPR, Self::SIMD>,
1327    ) -> Result<(), CompileError>;
1328    /// i64 Logical Shift Right
1329    fn i64_shr(
1330        &mut self,
1331        loc_a: Location<Self::GPR, Self::SIMD>,
1332        loc_b: Location<Self::GPR, Self::SIMD>,
1333        ret: Location<Self::GPR, Self::SIMD>,
1334    ) -> Result<(), CompileError>;
1335    /// i64 Arithmetic Shift Right
1336    fn i64_sar(
1337        &mut self,
1338        loc_a: Location<Self::GPR, Self::SIMD>,
1339        loc_b: Location<Self::GPR, Self::SIMD>,
1340        ret: Location<Self::GPR, Self::SIMD>,
1341    ) -> Result<(), CompileError>;
1342    /// i64 Roll Left
1343    fn i64_rol(
1344        &mut self,
1345        loc_a: Location<Self::GPR, Self::SIMD>,
1346        loc_b: Location<Self::GPR, Self::SIMD>,
1347        ret: Location<Self::GPR, Self::SIMD>,
1348    ) -> Result<(), CompileError>;
1349    /// i64 Roll Right
1350    fn i64_ror(
1351        &mut self,
1352        loc_a: Location<Self::GPR, Self::SIMD>,
1353        loc_b: Location<Self::GPR, Self::SIMD>,
1354        ret: Location<Self::GPR, Self::SIMD>,
1355    ) -> Result<(), CompileError>;
1356    /// i64 load
1357    #[allow(clippy::too_many_arguments)]
1358    fn i64_load(
1359        &mut self,
1360        addr: Location<Self::GPR, Self::SIMD>,
1361        memarg: &MemArg,
1362        ret: Location<Self::GPR, Self::SIMD>,
1363        need_check: bool,
1364        imported_memories: bool,
1365        offset: i32,
1366        heap_access_oob: Label,
1367        unaligned_atomic: Label,
1368    ) -> Result<(), CompileError>;
1369    /// i64 load of an unsigned 8bits
1370    #[allow(clippy::too_many_arguments)]
1371    fn i64_load_8u(
1372        &mut self,
1373        addr: Location<Self::GPR, Self::SIMD>,
1374        memarg: &MemArg,
1375        ret: Location<Self::GPR, Self::SIMD>,
1376        need_check: bool,
1377        imported_memories: bool,
1378        offset: i32,
1379        heap_access_oob: Label,
1380        unaligned_atomic: Label,
1381    ) -> Result<(), CompileError>;
1382    /// i64 load of an signed 8bits
1383    #[allow(clippy::too_many_arguments)]
1384    fn i64_load_8s(
1385        &mut self,
1386        addr: Location<Self::GPR, Self::SIMD>,
1387        memarg: &MemArg,
1388        ret: Location<Self::GPR, Self::SIMD>,
1389        need_check: bool,
1390        imported_memories: bool,
1391        offset: i32,
1392        heap_access_oob: Label,
1393        unaligned_atomic: Label,
1394    ) -> Result<(), CompileError>;
1395    /// i64 load of an unsigned 32bits
1396    #[allow(clippy::too_many_arguments)]
1397    fn i64_load_32u(
1398        &mut self,
1399        addr: Location<Self::GPR, Self::SIMD>,
1400        memarg: &MemArg,
1401        ret: Location<Self::GPR, Self::SIMD>,
1402        need_check: bool,
1403        imported_memories: bool,
1404        offset: i32,
1405        heap_access_oob: Label,
1406        unaligned_atomic: Label,
1407    ) -> Result<(), CompileError>;
1408    /// i64 load of an signed 32bits
1409    #[allow(clippy::too_many_arguments)]
1410    fn i64_load_32s(
1411        &mut self,
1412        addr: Location<Self::GPR, Self::SIMD>,
1413        memarg: &MemArg,
1414        ret: Location<Self::GPR, Self::SIMD>,
1415        need_check: bool,
1416        imported_memories: bool,
1417        offset: i32,
1418        heap_access_oob: Label,
1419        unaligned_atomic: Label,
1420    ) -> Result<(), CompileError>;
1421    /// i64 load of an signed 16bits
1422    #[allow(clippy::too_many_arguments)]
1423    fn i64_load_16u(
1424        &mut self,
1425        addr: Location<Self::GPR, Self::SIMD>,
1426        memarg: &MemArg,
1427        ret: Location<Self::GPR, Self::SIMD>,
1428        need_check: bool,
1429        imported_memories: bool,
1430        offset: i32,
1431        heap_access_oob: Label,
1432        unaligned_atomic: Label,
1433    ) -> Result<(), CompileError>;
1434    /// i64 load of an signed 16bits
1435    #[allow(clippy::too_many_arguments)]
1436    fn i64_load_16s(
1437        &mut self,
1438        addr: Location<Self::GPR, Self::SIMD>,
1439        memarg: &MemArg,
1440        ret: Location<Self::GPR, Self::SIMD>,
1441        need_check: bool,
1442        imported_memories: bool,
1443        offset: i32,
1444        heap_access_oob: Label,
1445        unaligned_atomic: Label,
1446    ) -> Result<(), CompileError>;
1447    /// i64 atomic load
1448    #[allow(clippy::too_many_arguments)]
1449    fn i64_atomic_load(
1450        &mut self,
1451        addr: Location<Self::GPR, Self::SIMD>,
1452        memarg: &MemArg,
1453        ret: Location<Self::GPR, Self::SIMD>,
1454        need_check: bool,
1455        imported_memories: bool,
1456        offset: i32,
1457        heap_access_oob: Label,
1458        unaligned_atomic: Label,
1459    ) -> Result<(), CompileError>;
1460    /// i64 atomic load from unsigned 8bits
1461    #[allow(clippy::too_many_arguments)]
1462    fn i64_atomic_load_8u(
1463        &mut self,
1464        addr: Location<Self::GPR, Self::SIMD>,
1465        memarg: &MemArg,
1466        ret: Location<Self::GPR, Self::SIMD>,
1467        need_check: bool,
1468        imported_memories: bool,
1469        offset: i32,
1470        heap_access_oob: Label,
1471        unaligned_atomic: Label,
1472    ) -> Result<(), CompileError>;
1473    /// i64 atomic load from unsigned 16bits
1474    #[allow(clippy::too_many_arguments)]
1475    fn i64_atomic_load_16u(
1476        &mut self,
1477        addr: Location<Self::GPR, Self::SIMD>,
1478        memarg: &MemArg,
1479        ret: Location<Self::GPR, Self::SIMD>,
1480        need_check: bool,
1481        imported_memories: bool,
1482        offset: i32,
1483        heap_access_oob: Label,
1484        unaligned_atomic: Label,
1485    ) -> Result<(), CompileError>;
1486    /// i64 atomic load from unsigned 32bits
1487    #[allow(clippy::too_many_arguments)]
1488    fn i64_atomic_load_32u(
1489        &mut self,
1490        addr: Location<Self::GPR, Self::SIMD>,
1491        memarg: &MemArg,
1492        ret: Location<Self::GPR, Self::SIMD>,
1493        need_check: bool,
1494        imported_memories: bool,
1495        offset: i32,
1496        heap_access_oob: Label,
1497        unaligned_atomic: Label,
1498    ) -> Result<(), CompileError>;
1499    /// i64 save
1500    #[allow(clippy::too_many_arguments)]
1501    fn i64_save(
1502        &mut self,
1503        value: Location<Self::GPR, Self::SIMD>,
1504        memarg: &MemArg,
1505        addr: Location<Self::GPR, Self::SIMD>,
1506        need_check: bool,
1507        imported_memories: bool,
1508        offset: i32,
1509        heap_access_oob: Label,
1510        unaligned_atomic: Label,
1511    ) -> Result<(), CompileError>;
1512    /// i64 save of the lower 8bits
1513    #[allow(clippy::too_many_arguments)]
1514    fn i64_save_8(
1515        &mut self,
1516        value: Location<Self::GPR, Self::SIMD>,
1517        memarg: &MemArg,
1518        addr: Location<Self::GPR, Self::SIMD>,
1519        need_check: bool,
1520        imported_memories: bool,
1521        offset: i32,
1522        heap_access_oob: Label,
1523        unaligned_atomic: Label,
1524    ) -> Result<(), CompileError>;
1525    /// i64 save of the lower 16bits
1526    #[allow(clippy::too_many_arguments)]
1527    fn i64_save_16(
1528        &mut self,
1529        value: Location<Self::GPR, Self::SIMD>,
1530        memarg: &MemArg,
1531        addr: Location<Self::GPR, Self::SIMD>,
1532        need_check: bool,
1533        imported_memories: bool,
1534        offset: i32,
1535        heap_access_oob: Label,
1536        unaligned_atomic: Label,
1537    ) -> Result<(), CompileError>;
1538    /// i64 save of the lower 32bits
1539    #[allow(clippy::too_many_arguments)]
1540    fn i64_save_32(
1541        &mut self,
1542        value: Location<Self::GPR, Self::SIMD>,
1543        memarg: &MemArg,
1544        addr: Location<Self::GPR, Self::SIMD>,
1545        need_check: bool,
1546        imported_memories: bool,
1547        offset: i32,
1548        heap_access_oob: Label,
1549        unaligned_atomic: Label,
1550    ) -> Result<(), CompileError>;
1551    /// i64 atomic save
1552    #[allow(clippy::too_many_arguments)]
1553    fn i64_atomic_save(
1554        &mut self,
1555        value: Location<Self::GPR, Self::SIMD>,
1556        memarg: &MemArg,
1557        addr: Location<Self::GPR, Self::SIMD>,
1558        need_check: bool,
1559        imported_memories: bool,
1560        offset: i32,
1561        heap_access_oob: Label,
1562        unaligned_atomic: Label,
1563    ) -> Result<(), CompileError>;
1564    /// i64 atomic save of a the lower 8bits
1565    #[allow(clippy::too_many_arguments)]
1566    fn i64_atomic_save_8(
1567        &mut self,
1568        value: Location<Self::GPR, Self::SIMD>,
1569        memarg: &MemArg,
1570        addr: Location<Self::GPR, Self::SIMD>,
1571        need_check: bool,
1572        imported_memories: bool,
1573        offset: i32,
1574        heap_access_oob: Label,
1575        unaligned_atomic: Label,
1576    ) -> Result<(), CompileError>;
1577    /// i64 atomic save of a the lower 16bits
1578    #[allow(clippy::too_many_arguments)]
1579    fn i64_atomic_save_16(
1580        &mut self,
1581        value: Location<Self::GPR, Self::SIMD>,
1582        memarg: &MemArg,
1583        addr: Location<Self::GPR, Self::SIMD>,
1584        need_check: bool,
1585        imported_memories: bool,
1586        offset: i32,
1587        heap_access_oob: Label,
1588        unaligned_atomic: Label,
1589    ) -> Result<(), CompileError>;
1590    /// i64 atomic save of a the lower 32bits
1591    #[allow(clippy::too_many_arguments)]
1592    fn i64_atomic_save_32(
1593        &mut self,
1594        value: Location<Self::GPR, Self::SIMD>,
1595        memarg: &MemArg,
1596        addr: Location<Self::GPR, Self::SIMD>,
1597        need_check: bool,
1598        imported_memories: bool,
1599        offset: i32,
1600        heap_access_oob: Label,
1601        unaligned_atomic: Label,
1602    ) -> Result<(), CompileError>;
1603    /// i64 atomic Add with i64
1604    #[allow(clippy::too_many_arguments)]
1605    fn i64_atomic_add(
1606        &mut self,
1607        loc: Location<Self::GPR, Self::SIMD>,
1608        target: Location<Self::GPR, Self::SIMD>,
1609        memarg: &MemArg,
1610        ret: Location<Self::GPR, Self::SIMD>,
1611        need_check: bool,
1612        imported_memories: bool,
1613        offset: i32,
1614        heap_access_oob: Label,
1615        unaligned_atomic: Label,
1616    ) -> Result<(), CompileError>;
1617    /// i64 atomic Add with unsigned 8bits
1618    #[allow(clippy::too_many_arguments)]
1619    fn i64_atomic_add_8u(
1620        &mut self,
1621        loc: Location<Self::GPR, Self::SIMD>,
1622        target: Location<Self::GPR, Self::SIMD>,
1623        memarg: &MemArg,
1624        ret: Location<Self::GPR, Self::SIMD>,
1625        need_check: bool,
1626        imported_memories: bool,
1627        offset: i32,
1628        heap_access_oob: Label,
1629        unaligned_atomic: Label,
1630    ) -> Result<(), CompileError>;
1631    /// i64 atomic Add with unsigned 16bits
1632    #[allow(clippy::too_many_arguments)]
1633    fn i64_atomic_add_16u(
1634        &mut self,
1635        loc: Location<Self::GPR, Self::SIMD>,
1636        target: Location<Self::GPR, Self::SIMD>,
1637        memarg: &MemArg,
1638        ret: Location<Self::GPR, Self::SIMD>,
1639        need_check: bool,
1640        imported_memories: bool,
1641        offset: i32,
1642        heap_access_oob: Label,
1643        unaligned_atomic: Label,
1644    ) -> Result<(), CompileError>;
1645    /// i64 atomic Add with unsigned 32bits
1646    #[allow(clippy::too_many_arguments)]
1647    fn i64_atomic_add_32u(
1648        &mut self,
1649        loc: Location<Self::GPR, Self::SIMD>,
1650        target: Location<Self::GPR, Self::SIMD>,
1651        memarg: &MemArg,
1652        ret: Location<Self::GPR, Self::SIMD>,
1653        need_check: bool,
1654        imported_memories: bool,
1655        offset: i32,
1656        heap_access_oob: Label,
1657        unaligned_atomic: Label,
1658    ) -> Result<(), CompileError>;
1659    /// i64 atomic Sub with i64
1660    #[allow(clippy::too_many_arguments)]
1661    fn i64_atomic_sub(
1662        &mut self,
1663        loc: Location<Self::GPR, Self::SIMD>,
1664        target: Location<Self::GPR, Self::SIMD>,
1665        memarg: &MemArg,
1666        ret: Location<Self::GPR, Self::SIMD>,
1667        need_check: bool,
1668        imported_memories: bool,
1669        offset: i32,
1670        heap_access_oob: Label,
1671        unaligned_atomic: Label,
1672    ) -> Result<(), CompileError>;
1673    /// i64 atomic Sub with unsigned 8bits
1674    #[allow(clippy::too_many_arguments)]
1675    fn i64_atomic_sub_8u(
1676        &mut self,
1677        loc: Location<Self::GPR, Self::SIMD>,
1678        target: Location<Self::GPR, Self::SIMD>,
1679        memarg: &MemArg,
1680        ret: Location<Self::GPR, Self::SIMD>,
1681        need_check: bool,
1682        imported_memories: bool,
1683        offset: i32,
1684        heap_access_oob: Label,
1685        unaligned_atomic: Label,
1686    ) -> Result<(), CompileError>;
1687    /// i64 atomic Sub with unsigned 16bits
1688    #[allow(clippy::too_many_arguments)]
1689    fn i64_atomic_sub_16u(
1690        &mut self,
1691        loc: Location<Self::GPR, Self::SIMD>,
1692        target: Location<Self::GPR, Self::SIMD>,
1693        memarg: &MemArg,
1694        ret: Location<Self::GPR, Self::SIMD>,
1695        need_check: bool,
1696        imported_memories: bool,
1697        offset: i32,
1698        heap_access_oob: Label,
1699        unaligned_atomic: Label,
1700    ) -> Result<(), CompileError>;
1701    /// i64 atomic Sub with unsigned 32bits
1702    #[allow(clippy::too_many_arguments)]
1703    fn i64_atomic_sub_32u(
1704        &mut self,
1705        loc: Location<Self::GPR, Self::SIMD>,
1706        target: Location<Self::GPR, Self::SIMD>,
1707        memarg: &MemArg,
1708        ret: Location<Self::GPR, Self::SIMD>,
1709        need_check: bool,
1710        imported_memories: bool,
1711        offset: i32,
1712        heap_access_oob: Label,
1713        unaligned_atomic: Label,
1714    ) -> Result<(), CompileError>;
1715    /// i64 atomic And with i64
1716    #[allow(clippy::too_many_arguments)]
1717    fn i64_atomic_and(
1718        &mut self,
1719        loc: Location<Self::GPR, Self::SIMD>,
1720        target: Location<Self::GPR, Self::SIMD>,
1721        memarg: &MemArg,
1722        ret: Location<Self::GPR, Self::SIMD>,
1723        need_check: bool,
1724        imported_memories: bool,
1725        offset: i32,
1726        heap_access_oob: Label,
1727        unaligned_atomic: Label,
1728    ) -> Result<(), CompileError>;
1729    /// i64 atomic And with unsigned 8bits
1730    #[allow(clippy::too_many_arguments)]
1731    fn i64_atomic_and_8u(
1732        &mut self,
1733        loc: Location<Self::GPR, Self::SIMD>,
1734        target: Location<Self::GPR, Self::SIMD>,
1735        memarg: &MemArg,
1736        ret: Location<Self::GPR, Self::SIMD>,
1737        need_check: bool,
1738        imported_memories: bool,
1739        offset: i32,
1740        heap_access_oob: Label,
1741        unaligned_atomic: Label,
1742    ) -> Result<(), CompileError>;
1743    /// i64 atomic And with unsigned 16bits
1744    #[allow(clippy::too_many_arguments)]
1745    fn i64_atomic_and_16u(
1746        &mut self,
1747        loc: Location<Self::GPR, Self::SIMD>,
1748        target: Location<Self::GPR, Self::SIMD>,
1749        memarg: &MemArg,
1750        ret: Location<Self::GPR, Self::SIMD>,
1751        need_check: bool,
1752        imported_memories: bool,
1753        offset: i32,
1754        heap_access_oob: Label,
1755        unaligned_atomic: Label,
1756    ) -> Result<(), CompileError>;
1757    /// i64 atomic And with unsigned 32bits
1758    #[allow(clippy::too_many_arguments)]
1759    fn i64_atomic_and_32u(
1760        &mut self,
1761        loc: Location<Self::GPR, Self::SIMD>,
1762        target: Location<Self::GPR, Self::SIMD>,
1763        memarg: &MemArg,
1764        ret: Location<Self::GPR, Self::SIMD>,
1765        need_check: bool,
1766        imported_memories: bool,
1767        offset: i32,
1768        heap_access_oob: Label,
1769        unaligned_atomic: Label,
1770    ) -> Result<(), CompileError>;
1771    /// i64 atomic Or with i64
1772    #[allow(clippy::too_many_arguments)]
1773    fn i64_atomic_or(
1774        &mut self,
1775        loc: Location<Self::GPR, Self::SIMD>,
1776        target: Location<Self::GPR, Self::SIMD>,
1777        memarg: &MemArg,
1778        ret: Location<Self::GPR, Self::SIMD>,
1779        need_check: bool,
1780        imported_memories: bool,
1781        offset: i32,
1782        heap_access_oob: Label,
1783        unaligned_atomic: Label,
1784    ) -> Result<(), CompileError>;
1785    /// i64 atomic Or with unsigned 8bits
1786    #[allow(clippy::too_many_arguments)]
1787    fn i64_atomic_or_8u(
1788        &mut self,
1789        loc: Location<Self::GPR, Self::SIMD>,
1790        target: Location<Self::GPR, Self::SIMD>,
1791        memarg: &MemArg,
1792        ret: Location<Self::GPR, Self::SIMD>,
1793        need_check: bool,
1794        imported_memories: bool,
1795        offset: i32,
1796        heap_access_oob: Label,
1797        unaligned_atomic: Label,
1798    ) -> Result<(), CompileError>;
1799    /// i64 atomic Or with unsigned 16bits
1800    #[allow(clippy::too_many_arguments)]
1801    fn i64_atomic_or_16u(
1802        &mut self,
1803        loc: Location<Self::GPR, Self::SIMD>,
1804        target: Location<Self::GPR, Self::SIMD>,
1805        memarg: &MemArg,
1806        ret: Location<Self::GPR, Self::SIMD>,
1807        need_check: bool,
1808        imported_memories: bool,
1809        offset: i32,
1810        heap_access_oob: Label,
1811        unaligned_atomic: Label,
1812    ) -> Result<(), CompileError>;
1813    /// i64 atomic Or with unsigned 32bits
1814    #[allow(clippy::too_many_arguments)]
1815    fn i64_atomic_or_32u(
1816        &mut self,
1817        loc: Location<Self::GPR, Self::SIMD>,
1818        target: Location<Self::GPR, Self::SIMD>,
1819        memarg: &MemArg,
1820        ret: Location<Self::GPR, Self::SIMD>,
1821        need_check: bool,
1822        imported_memories: bool,
1823        offset: i32,
1824        heap_access_oob: Label,
1825        unaligned_atomic: Label,
1826    ) -> Result<(), CompileError>;
1827    /// i64 atomic Xor with i64
1828    #[allow(clippy::too_many_arguments)]
1829    fn i64_atomic_xor(
1830        &mut self,
1831        loc: Location<Self::GPR, Self::SIMD>,
1832        target: Location<Self::GPR, Self::SIMD>,
1833        memarg: &MemArg,
1834        ret: Location<Self::GPR, Self::SIMD>,
1835        need_check: bool,
1836        imported_memories: bool,
1837        offset: i32,
1838        heap_access_oob: Label,
1839        unaligned_atomic: Label,
1840    ) -> Result<(), CompileError>;
1841    /// i64 atomic Xor with unsigned 8bits
1842    #[allow(clippy::too_many_arguments)]
1843    fn i64_atomic_xor_8u(
1844        &mut self,
1845        loc: Location<Self::GPR, Self::SIMD>,
1846        target: Location<Self::GPR, Self::SIMD>,
1847        memarg: &MemArg,
1848        ret: Location<Self::GPR, Self::SIMD>,
1849        need_check: bool,
1850        imported_memories: bool,
1851        offset: i32,
1852        heap_access_oob: Label,
1853        unaligned_atomic: Label,
1854    ) -> Result<(), CompileError>;
1855    /// i64 atomic Xor with unsigned 16bits
1856    #[allow(clippy::too_many_arguments)]
1857    fn i64_atomic_xor_16u(
1858        &mut self,
1859        loc: Location<Self::GPR, Self::SIMD>,
1860        target: Location<Self::GPR, Self::SIMD>,
1861        memarg: &MemArg,
1862        ret: Location<Self::GPR, Self::SIMD>,
1863        need_check: bool,
1864        imported_memories: bool,
1865        offset: i32,
1866        heap_access_oob: Label,
1867        unaligned_atomic: Label,
1868    ) -> Result<(), CompileError>;
1869    /// i64 atomic Xor with unsigned 32bits
1870    #[allow(clippy::too_many_arguments)]
1871    fn i64_atomic_xor_32u(
1872        &mut self,
1873        loc: Location<Self::GPR, Self::SIMD>,
1874        target: Location<Self::GPR, Self::SIMD>,
1875        memarg: &MemArg,
1876        ret: Location<Self::GPR, Self::SIMD>,
1877        need_check: bool,
1878        imported_memories: bool,
1879        offset: i32,
1880        heap_access_oob: Label,
1881        unaligned_atomic: Label,
1882    ) -> Result<(), CompileError>;
1883    /// i64 atomic Exchange with i64
1884    #[allow(clippy::too_many_arguments)]
1885    fn i64_atomic_xchg(
1886        &mut self,
1887        loc: Location<Self::GPR, Self::SIMD>,
1888        target: Location<Self::GPR, Self::SIMD>,
1889        memarg: &MemArg,
1890        ret: Location<Self::GPR, Self::SIMD>,
1891        need_check: bool,
1892        imported_memories: bool,
1893        offset: i32,
1894        heap_access_oob: Label,
1895        unaligned_atomic: Label,
1896    ) -> Result<(), CompileError>;
1897    /// i64 atomic Exchange with u8
1898    #[allow(clippy::too_many_arguments)]
1899    fn i64_atomic_xchg_8u(
1900        &mut self,
1901        loc: Location<Self::GPR, Self::SIMD>,
1902        target: Location<Self::GPR, Self::SIMD>,
1903        memarg: &MemArg,
1904        ret: Location<Self::GPR, Self::SIMD>,
1905        need_check: bool,
1906        imported_memories: bool,
1907        offset: i32,
1908        heap_access_oob: Label,
1909        unaligned_atomic: Label,
1910    ) -> Result<(), CompileError>;
1911    /// i64 atomic Exchange with u16
1912    #[allow(clippy::too_many_arguments)]
1913    fn i64_atomic_xchg_16u(
1914        &mut self,
1915        loc: Location<Self::GPR, Self::SIMD>,
1916        target: Location<Self::GPR, Self::SIMD>,
1917        memarg: &MemArg,
1918        ret: Location<Self::GPR, Self::SIMD>,
1919        need_check: bool,
1920        imported_memories: bool,
1921        offset: i32,
1922        heap_access_oob: Label,
1923        unaligned_atomic: Label,
1924    ) -> Result<(), CompileError>;
1925    /// i64 atomic Exchange with u32
1926    #[allow(clippy::too_many_arguments)]
1927    fn i64_atomic_xchg_32u(
1928        &mut self,
1929        loc: Location<Self::GPR, Self::SIMD>,
1930        target: Location<Self::GPR, Self::SIMD>,
1931        memarg: &MemArg,
1932        ret: Location<Self::GPR, Self::SIMD>,
1933        need_check: bool,
1934        imported_memories: bool,
1935        offset: i32,
1936        heap_access_oob: Label,
1937        unaligned_atomic: Label,
1938    ) -> Result<(), CompileError>;
1939    /// i64 atomic Compare and Exchange with i32
1940    #[allow(clippy::too_many_arguments)]
1941    fn i64_atomic_cmpxchg(
1942        &mut self,
1943        new: Location<Self::GPR, Self::SIMD>,
1944        cmp: Location<Self::GPR, Self::SIMD>,
1945        target: Location<Self::GPR, Self::SIMD>,
1946        memarg: &MemArg,
1947        ret: Location<Self::GPR, Self::SIMD>,
1948        need_check: bool,
1949        imported_memories: bool,
1950        offset: i32,
1951        heap_access_oob: Label,
1952        unaligned_atomic: Label,
1953    ) -> Result<(), CompileError>;
1954    /// i64 atomic Compare and Exchange with u8
1955    #[allow(clippy::too_many_arguments)]
1956    fn i64_atomic_cmpxchg_8u(
1957        &mut self,
1958        new: Location<Self::GPR, Self::SIMD>,
1959        cmp: Location<Self::GPR, Self::SIMD>,
1960        target: Location<Self::GPR, Self::SIMD>,
1961        memarg: &MemArg,
1962        ret: Location<Self::GPR, Self::SIMD>,
1963        need_check: bool,
1964        imported_memories: bool,
1965        offset: i32,
1966        heap_access_oob: Label,
1967        unaligned_atomic: Label,
1968    ) -> Result<(), CompileError>;
1969    /// i64 atomic Compare and Exchange with u16
1970    #[allow(clippy::too_many_arguments)]
1971    fn i64_atomic_cmpxchg_16u(
1972        &mut self,
1973        new: Location<Self::GPR, Self::SIMD>,
1974        cmp: Location<Self::GPR, Self::SIMD>,
1975        target: Location<Self::GPR, Self::SIMD>,
1976        memarg: &MemArg,
1977        ret: Location<Self::GPR, Self::SIMD>,
1978        need_check: bool,
1979        imported_memories: bool,
1980        offset: i32,
1981        heap_access_oob: Label,
1982        unaligned_atomic: Label,
1983    ) -> Result<(), CompileError>;
1984    /// i64 atomic Compare and Exchange with u32
1985    #[allow(clippy::too_many_arguments)]
1986    fn i64_atomic_cmpxchg_32u(
1987        &mut self,
1988        new: Location<Self::GPR, Self::SIMD>,
1989        cmp: Location<Self::GPR, Self::SIMD>,
1990        target: Location<Self::GPR, Self::SIMD>,
1991        memarg: &MemArg,
1992        ret: Location<Self::GPR, Self::SIMD>,
1993        need_check: bool,
1994        imported_memories: bool,
1995        offset: i32,
1996        heap_access_oob: Label,
1997        unaligned_atomic: Label,
1998    ) -> Result<(), CompileError>;
1999
2000    /// load an F32
2001    #[allow(clippy::too_many_arguments)]
2002    fn f32_load(
2003        &mut self,
2004        addr: Location<Self::GPR, Self::SIMD>,
2005        memarg: &MemArg,
2006        ret: Location<Self::GPR, Self::SIMD>,
2007        need_check: bool,
2008        imported_memories: bool,
2009        offset: i32,
2010        heap_access_oob: Label,
2011        unaligned_atomic: Label,
2012    ) -> Result<(), CompileError>;
2013    /// f32 save
2014    #[allow(clippy::too_many_arguments)]
2015    fn f32_save(
2016        &mut self,
2017        value: Location<Self::GPR, Self::SIMD>,
2018        memarg: &MemArg,
2019        addr: Location<Self::GPR, Self::SIMD>,
2020        canonicalize: bool,
2021        need_check: bool,
2022        imported_memories: bool,
2023        offset: i32,
2024        heap_access_oob: Label,
2025        unaligned_atomic: Label,
2026    ) -> Result<(), CompileError>;
2027    /// load an F64
2028    #[allow(clippy::too_many_arguments)]
2029    fn f64_load(
2030        &mut self,
2031        addr: Location<Self::GPR, Self::SIMD>,
2032        memarg: &MemArg,
2033        ret: Location<Self::GPR, Self::SIMD>,
2034        need_check: bool,
2035        imported_memories: bool,
2036        offset: i32,
2037        heap_access_oob: Label,
2038        unaligned_atomic: Label,
2039    ) -> Result<(), CompileError>;
2040    /// f64 save
2041    #[allow(clippy::too_many_arguments)]
2042    fn f64_save(
2043        &mut self,
2044        value: Location<Self::GPR, Self::SIMD>,
2045        memarg: &MemArg,
2046        addr: Location<Self::GPR, Self::SIMD>,
2047        canonicalize: bool,
2048        need_check: bool,
2049        imported_memories: bool,
2050        offset: i32,
2051        heap_access_oob: Label,
2052        unaligned_atomic: Label,
2053    ) -> Result<(), CompileError>;
2054    /// Convert a F64 from I64, signed or unsigned
2055    fn convert_f64_i64(
2056        &mut self,
2057        loc: Location<Self::GPR, Self::SIMD>,
2058        signed: bool,
2059        ret: Location<Self::GPR, Self::SIMD>,
2060    ) -> Result<(), CompileError>;
2061    /// Convert a F64 from I32, signed or unsigned
2062    fn convert_f64_i32(
2063        &mut self,
2064        loc: Location<Self::GPR, Self::SIMD>,
2065        signed: bool,
2066        ret: Location<Self::GPR, Self::SIMD>,
2067    ) -> Result<(), CompileError>;
2068    /// Convert a F32 from I64, signed or unsigned
2069    fn convert_f32_i64(
2070        &mut self,
2071        loc: Location<Self::GPR, Self::SIMD>,
2072        signed: bool,
2073        ret: Location<Self::GPR, Self::SIMD>,
2074    ) -> Result<(), CompileError>;
2075    /// Convert a F32 from I32, signed or unsigned
2076    fn convert_f32_i32(
2077        &mut self,
2078        loc: Location<Self::GPR, Self::SIMD>,
2079        signed: bool,
2080        ret: Location<Self::GPR, Self::SIMD>,
2081    ) -> Result<(), CompileError>;
2082    /// Convert a F64 to I64, signed or unsigned, without or without saturation
2083    fn convert_i64_f64(
2084        &mut self,
2085        loc: Location<Self::GPR, Self::SIMD>,
2086        ret: Location<Self::GPR, Self::SIMD>,
2087        signed: bool,
2088        sat: bool,
2089    ) -> Result<(), CompileError>;
2090    /// Convert a F64 to I32, signed or unsigned, without or without saturation
2091    fn convert_i32_f64(
2092        &mut self,
2093        loc: Location<Self::GPR, Self::SIMD>,
2094        ret: Location<Self::GPR, Self::SIMD>,
2095        signed: bool,
2096        sat: bool,
2097    ) -> Result<(), CompileError>;
2098    /// Convert a F32 to I64, signed or unsigned, without or without saturation
2099    fn convert_i64_f32(
2100        &mut self,
2101        loc: Location<Self::GPR, Self::SIMD>,
2102        ret: Location<Self::GPR, Self::SIMD>,
2103        signed: bool,
2104        sat: bool,
2105    ) -> Result<(), CompileError>;
2106    /// Convert a F32 to I32, signed or unsigned, without or without saturation
2107    fn convert_i32_f32(
2108        &mut self,
2109        loc: Location<Self::GPR, Self::SIMD>,
2110        ret: Location<Self::GPR, Self::SIMD>,
2111        signed: bool,
2112        sat: bool,
2113    ) -> Result<(), CompileError>;
2114    /// Convert a F32 to F64
2115    fn convert_f64_f32(
2116        &mut self,
2117        loc: Location<Self::GPR, Self::SIMD>,
2118        ret: Location<Self::GPR, Self::SIMD>,
2119    ) -> Result<(), CompileError>;
2120    /// Convert a F64 to F32
2121    fn convert_f32_f64(
2122        &mut self,
2123        loc: Location<Self::GPR, Self::SIMD>,
2124        ret: Location<Self::GPR, Self::SIMD>,
2125    ) -> Result<(), CompileError>;
2126    /// Negate an F64
2127    fn f64_neg(
2128        &mut self,
2129        loc: Location<Self::GPR, Self::SIMD>,
2130        ret: Location<Self::GPR, Self::SIMD>,
2131    ) -> Result<(), CompileError>;
2132    /// Get the Absolute Value of an F64
2133    fn f64_abs(
2134        &mut self,
2135        loc: Location<Self::GPR, Self::SIMD>,
2136        ret: Location<Self::GPR, Self::SIMD>,
2137    ) -> Result<(), CompileError>;
2138    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2139    fn emit_i64_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2140    /// Get the Square Root of an F64
2141    fn f64_sqrt(
2142        &mut self,
2143        loc: Location<Self::GPR, Self::SIMD>,
2144        ret: Location<Self::GPR, Self::SIMD>,
2145    ) -> Result<(), CompileError>;
2146    /// Trunc of an F64
2147    fn f64_trunc(
2148        &mut self,
2149        loc: Location<Self::GPR, Self::SIMD>,
2150        ret: Location<Self::GPR, Self::SIMD>,
2151    ) -> Result<(), CompileError>;
2152    /// Ceil of an F64
2153    fn f64_ceil(
2154        &mut self,
2155        loc: Location<Self::GPR, Self::SIMD>,
2156        ret: Location<Self::GPR, Self::SIMD>,
2157    ) -> Result<(), CompileError>;
2158    /// Floor of an F64
2159    fn f64_floor(
2160        &mut self,
2161        loc: Location<Self::GPR, Self::SIMD>,
2162        ret: Location<Self::GPR, Self::SIMD>,
2163    ) -> Result<(), CompileError>;
2164    /// Round at nearest int of an F64
2165    fn f64_nearest(
2166        &mut self,
2167        loc: Location<Self::GPR, Self::SIMD>,
2168        ret: Location<Self::GPR, Self::SIMD>,
2169    ) -> Result<(), CompileError>;
2170    /// Greater of Equal Compare 2 F64, result in a GPR
2171    fn f64_cmp_ge(
2172        &mut self,
2173        loc_a: Location<Self::GPR, Self::SIMD>,
2174        loc_b: Location<Self::GPR, Self::SIMD>,
2175        ret: Location<Self::GPR, Self::SIMD>,
2176    ) -> Result<(), CompileError>;
2177    /// Greater Than Compare 2 F64, result in a GPR
2178    fn f64_cmp_gt(
2179        &mut self,
2180        loc_a: Location<Self::GPR, Self::SIMD>,
2181        loc_b: Location<Self::GPR, Self::SIMD>,
2182        ret: Location<Self::GPR, Self::SIMD>,
2183    ) -> Result<(), CompileError>;
2184    /// Less of Equal Compare 2 F64, result in a GPR
2185    fn f64_cmp_le(
2186        &mut self,
2187        loc_a: Location<Self::GPR, Self::SIMD>,
2188        loc_b: Location<Self::GPR, Self::SIMD>,
2189        ret: Location<Self::GPR, Self::SIMD>,
2190    ) -> Result<(), CompileError>;
2191    /// Less Than Compare 2 F64, result in a GPR
2192    fn f64_cmp_lt(
2193        &mut self,
2194        loc_a: Location<Self::GPR, Self::SIMD>,
2195        loc_b: Location<Self::GPR, Self::SIMD>,
2196        ret: Location<Self::GPR, Self::SIMD>,
2197    ) -> Result<(), CompileError>;
2198    /// Not Equal Compare 2 F64, result in a GPR
2199    fn f64_cmp_ne(
2200        &mut self,
2201        loc_a: Location<Self::GPR, Self::SIMD>,
2202        loc_b: Location<Self::GPR, Self::SIMD>,
2203        ret: Location<Self::GPR, Self::SIMD>,
2204    ) -> Result<(), CompileError>;
2205    /// Equal Compare 2 F64, result in a GPR
2206    fn f64_cmp_eq(
2207        &mut self,
2208        loc_a: Location<Self::GPR, Self::SIMD>,
2209        loc_b: Location<Self::GPR, Self::SIMD>,
2210        ret: Location<Self::GPR, Self::SIMD>,
2211    ) -> Result<(), CompileError>;
2212    /// get Min for 2 F64 values
2213    fn f64_min(
2214        &mut self,
2215        loc_a: Location<Self::GPR, Self::SIMD>,
2216        loc_b: Location<Self::GPR, Self::SIMD>,
2217        ret: Location<Self::GPR, Self::SIMD>,
2218    ) -> Result<(), CompileError>;
2219    /// get Max for 2 F64 values
2220    fn f64_max(
2221        &mut self,
2222        loc_a: Location<Self::GPR, Self::SIMD>,
2223        loc_b: Location<Self::GPR, Self::SIMD>,
2224        ret: Location<Self::GPR, Self::SIMD>,
2225    ) -> Result<(), CompileError>;
2226    /// Add 2 F64 values
2227    fn f64_add(
2228        &mut self,
2229        loc_a: Location<Self::GPR, Self::SIMD>,
2230        loc_b: Location<Self::GPR, Self::SIMD>,
2231        ret: Location<Self::GPR, Self::SIMD>,
2232    ) -> Result<(), CompileError>;
2233    /// Sub 2 F64 values
2234    fn f64_sub(
2235        &mut self,
2236        loc_a: Location<Self::GPR, Self::SIMD>,
2237        loc_b: Location<Self::GPR, Self::SIMD>,
2238        ret: Location<Self::GPR, Self::SIMD>,
2239    ) -> Result<(), CompileError>;
2240    /// Multiply 2 F64 values
2241    fn f64_mul(
2242        &mut self,
2243        loc_a: Location<Self::GPR, Self::SIMD>,
2244        loc_b: Location<Self::GPR, Self::SIMD>,
2245        ret: Location<Self::GPR, Self::SIMD>,
2246    ) -> Result<(), CompileError>;
2247    /// Divide 2 F64 values
2248    fn f64_div(
2249        &mut self,
2250        loc_a: Location<Self::GPR, Self::SIMD>,
2251        loc_b: Location<Self::GPR, Self::SIMD>,
2252        ret: Location<Self::GPR, Self::SIMD>,
2253    ) -> Result<(), CompileError>;
2254    /// Negate an F32
2255    fn f32_neg(
2256        &mut self,
2257        loc: Location<Self::GPR, Self::SIMD>,
2258        ret: Location<Self::GPR, Self::SIMD>,
2259    ) -> Result<(), CompileError>;
2260    /// Get the Absolute Value of an F32
2261    fn f32_abs(
2262        &mut self,
2263        loc: Location<Self::GPR, Self::SIMD>,
2264        ret: Location<Self::GPR, Self::SIMD>,
2265    ) -> Result<(), CompileError>;
2266    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2267    fn emit_i32_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2268    /// Get the Square Root of an F32
2269    fn f32_sqrt(
2270        &mut self,
2271        loc: Location<Self::GPR, Self::SIMD>,
2272        ret: Location<Self::GPR, Self::SIMD>,
2273    ) -> Result<(), CompileError>;
2274    /// Trunc of an F32
2275    fn f32_trunc(
2276        &mut self,
2277        loc: Location<Self::GPR, Self::SIMD>,
2278        ret: Location<Self::GPR, Self::SIMD>,
2279    ) -> Result<(), CompileError>;
2280    /// Ceil of an F32
2281    fn f32_ceil(
2282        &mut self,
2283        loc: Location<Self::GPR, Self::SIMD>,
2284        ret: Location<Self::GPR, Self::SIMD>,
2285    ) -> Result<(), CompileError>;
2286    /// Floor of an F32
2287    fn f32_floor(
2288        &mut self,
2289        loc: Location<Self::GPR, Self::SIMD>,
2290        ret: Location<Self::GPR, Self::SIMD>,
2291    ) -> Result<(), CompileError>;
2292    /// Round at nearest int of an F32
2293    fn f32_nearest(
2294        &mut self,
2295        loc: Location<Self::GPR, Self::SIMD>,
2296        ret: Location<Self::GPR, Self::SIMD>,
2297    ) -> Result<(), CompileError>;
2298    /// Greater of Equal Compare 2 F32, result in a GPR
2299    fn f32_cmp_ge(
2300        &mut self,
2301        loc_a: Location<Self::GPR, Self::SIMD>,
2302        loc_b: Location<Self::GPR, Self::SIMD>,
2303        ret: Location<Self::GPR, Self::SIMD>,
2304    ) -> Result<(), CompileError>;
2305    /// Greater Than Compare 2 F32, result in a GPR
2306    fn f32_cmp_gt(
2307        &mut self,
2308        loc_a: Location<Self::GPR, Self::SIMD>,
2309        loc_b: Location<Self::GPR, Self::SIMD>,
2310        ret: Location<Self::GPR, Self::SIMD>,
2311    ) -> Result<(), CompileError>;
2312    /// Less of Equal Compare 2 F32, result in a GPR
2313    fn f32_cmp_le(
2314        &mut self,
2315        loc_a: Location<Self::GPR, Self::SIMD>,
2316        loc_b: Location<Self::GPR, Self::SIMD>,
2317        ret: Location<Self::GPR, Self::SIMD>,
2318    ) -> Result<(), CompileError>;
2319    /// Less Than Compare 2 F32, result in a GPR
2320    fn f32_cmp_lt(
2321        &mut self,
2322        loc_a: Location<Self::GPR, Self::SIMD>,
2323        loc_b: Location<Self::GPR, Self::SIMD>,
2324        ret: Location<Self::GPR, Self::SIMD>,
2325    ) -> Result<(), CompileError>;
2326    /// Not Equal Compare 2 F32, result in a GPR
2327    fn f32_cmp_ne(
2328        &mut self,
2329        loc_a: Location<Self::GPR, Self::SIMD>,
2330        loc_b: Location<Self::GPR, Self::SIMD>,
2331        ret: Location<Self::GPR, Self::SIMD>,
2332    ) -> Result<(), CompileError>;
2333    /// Equal Compare 2 F32, result in a GPR
2334    fn f32_cmp_eq(
2335        &mut self,
2336        loc_a: Location<Self::GPR, Self::SIMD>,
2337        loc_b: Location<Self::GPR, Self::SIMD>,
2338        ret: Location<Self::GPR, Self::SIMD>,
2339    ) -> Result<(), CompileError>;
2340    /// get Min for 2 F32 values
2341    fn f32_min(
2342        &mut self,
2343        loc_a: Location<Self::GPR, Self::SIMD>,
2344        loc_b: Location<Self::GPR, Self::SIMD>,
2345        ret: Location<Self::GPR, Self::SIMD>,
2346    ) -> Result<(), CompileError>;
2347    /// get Max for 2 F32 values
2348    fn f32_max(
2349        &mut self,
2350        loc_a: Location<Self::GPR, Self::SIMD>,
2351        loc_b: Location<Self::GPR, Self::SIMD>,
2352        ret: Location<Self::GPR, Self::SIMD>,
2353    ) -> Result<(), CompileError>;
2354    /// Add 2 F32 values
2355    fn f32_add(
2356        &mut self,
2357        loc_a: Location<Self::GPR, Self::SIMD>,
2358        loc_b: Location<Self::GPR, Self::SIMD>,
2359        ret: Location<Self::GPR, Self::SIMD>,
2360    ) -> Result<(), CompileError>;
2361    /// Sub 2 F32 values
2362    fn f32_sub(
2363        &mut self,
2364        loc_a: Location<Self::GPR, Self::SIMD>,
2365        loc_b: Location<Self::GPR, Self::SIMD>,
2366        ret: Location<Self::GPR, Self::SIMD>,
2367    ) -> Result<(), CompileError>;
2368    /// Multiply 2 F32 values
2369    fn f32_mul(
2370        &mut self,
2371        loc_a: Location<Self::GPR, Self::SIMD>,
2372        loc_b: Location<Self::GPR, Self::SIMD>,
2373        ret: Location<Self::GPR, Self::SIMD>,
2374    ) -> Result<(), CompileError>;
2375    /// Divide 2 F32 values
2376    fn f32_div(
2377        &mut self,
2378        loc_a: Location<Self::GPR, Self::SIMD>,
2379        loc_b: Location<Self::GPR, Self::SIMD>,
2380        ret: Location<Self::GPR, Self::SIMD>,
2381    ) -> Result<(), CompileError>;
2382
2383    /// Standard function Trampoline generation
2384    fn gen_std_trampoline(
2385        &self,
2386        sig: &FunctionType,
2387        calling_convention: CallingConvention,
2388    ) -> Result<FunctionBody, CompileError>;
2389    /// Generates dynamic import function call trampoline for a function type.
2390    fn gen_std_dynamic_import_trampoline(
2391        &self,
2392        vmoffsets: &VMOffsets,
2393        sig: &FunctionType,
2394        calling_convention: CallingConvention,
2395    ) -> Result<FunctionBody, CompileError>;
2396    /// Singlepass calls import functions through a trampoline.
2397    fn gen_import_call_trampoline(
2398        &self,
2399        vmoffsets: &VMOffsets,
2400        index: FunctionIndex,
2401        sig: &FunctionType,
2402        calling_convention: CallingConvention,
2403    ) -> Result<CustomSection, CompileError>;
2404    /// generate eh_frame instruction (or None if not possible / supported)
2405    fn gen_dwarf_unwind_info(&mut self, code_len: usize) -> Option<UnwindInstructions>;
2406    /// generate Windows unwind instructions (or None if not possible / supported)
2407    fn gen_windows_unwind_info(&mut self, code_len: usize) -> Option<Vec<u8>>;
2408}
2409
2410/// Standard entry trampoline generation
2411pub fn gen_std_trampoline(
2412    sig: &FunctionType,
2413    target: &Target,
2414    calling_convention: CallingConvention,
2415) -> Result<FunctionBody, CompileError> {
2416    match target.triple().architecture {
2417        Architecture::X86_64 => {
2418            let machine = MachineX86_64::new(Some(target.clone()))?;
2419            machine.gen_std_trampoline(sig, calling_convention)
2420        }
2421        Architecture::Aarch64(_) => {
2422            let machine = MachineARM64::new(Some(target.clone()));
2423            machine.gen_std_trampoline(sig, calling_convention)
2424        }
2425        _ => Err(CompileError::UnsupportedTarget(
2426            "singlepass unimplemented arch for gen_std_trampoline".to_owned(),
2427        )),
2428    }
2429}
2430
2431/// Generates dynamic import function call trampoline for a function type.
2432pub fn gen_std_dynamic_import_trampoline(
2433    vmoffsets: &VMOffsets,
2434    sig: &FunctionType,
2435    target: &Target,
2436    calling_convention: CallingConvention,
2437) -> Result<FunctionBody, CompileError> {
2438    match target.triple().architecture {
2439        Architecture::X86_64 => {
2440            let machine = MachineX86_64::new(Some(target.clone()))?;
2441            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2442        }
2443        Architecture::Aarch64(_) => {
2444            let machine = MachineARM64::new(Some(target.clone()));
2445            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2446        }
2447        _ => Err(CompileError::UnsupportedTarget(
2448            "singlepass unimplemented arch for gen_std_dynamic_import_trampoline".to_owned(),
2449        )),
2450    }
2451}
2452/// Singlepass calls import functions through a trampoline.
2453pub fn gen_import_call_trampoline(
2454    vmoffsets: &VMOffsets,
2455    index: FunctionIndex,
2456    sig: &FunctionType,
2457    target: &Target,
2458    calling_convention: CallingConvention,
2459) -> Result<CustomSection, CompileError> {
2460    match target.triple().architecture {
2461        Architecture::X86_64 => {
2462            let machine = MachineX86_64::new(Some(target.clone()))?;
2463            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2464        }
2465        Architecture::Aarch64(_) => {
2466            let machine = MachineARM64::new(Some(target.clone()));
2467            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2468        }
2469        _ => Err(CompileError::UnsupportedTarget(
2470            "singlepass unimplemented arch for gen_import_call_trampoline".to_owned(),
2471        )),
2472    }
2473}
2474
2475// Constants for the bounds of truncation operations. These are the least or
2476// greatest exact floats in either f32 or f64 representation less-than (for
2477// least) or greater-than (for greatest) the i32 or i64 or u32 or u64
2478// min (for least) or max (for greatest), when rounding towards zero.
2479
2480/// Greatest Exact Float (32 bits) less-than i32::MIN when rounding towards zero.
2481pub const GEF32_LT_I32_MIN: f32 = -2147483904.0;
2482/// Least Exact Float (32 bits) greater-than i32::MAX when rounding towards zero.
2483pub const LEF32_GT_I32_MAX: f32 = 2147483648.0;
2484/// Greatest Exact Float (32 bits) less-than i64::MIN when rounding towards zero.
2485pub const GEF32_LT_I64_MIN: f32 = -9223373136366403584.0;
2486/// Least Exact Float (32 bits) greater-than i64::MAX when rounding towards zero.
2487pub const LEF32_GT_I64_MAX: f32 = 9223372036854775808.0;
2488/// Greatest Exact Float (32 bits) less-than u32::MIN when rounding towards zero.
2489pub const GEF32_LT_U32_MIN: f32 = -1.0;
2490/// Least Exact Float (32 bits) greater-than u32::MAX when rounding towards zero.
2491pub const LEF32_GT_U32_MAX: f32 = 4294967296.0;
2492/// Greatest Exact Float (32 bits) less-than u64::MIN when rounding towards zero.
2493pub const GEF32_LT_U64_MIN: f32 = -1.0;
2494/// Least Exact Float (32 bits) greater-than u64::MAX when rounding towards zero.
2495pub const LEF32_GT_U64_MAX: f32 = 18446744073709551616.0;
2496
2497/// Greatest Exact Float (64 bits) less-than i32::MIN when rounding towards zero.
2498pub const GEF64_LT_I32_MIN: f64 = -2147483649.0;
2499/// Least Exact Float (64 bits) greater-than i32::MAX when rounding towards zero.
2500pub const LEF64_GT_I32_MAX: f64 = 2147483648.0;
2501/// Greatest Exact Float (64 bits) less-than i64::MIN when rounding towards zero.
2502pub const GEF64_LT_I64_MIN: f64 = -9223372036854777856.0;
2503/// Least Exact Float (64 bits) greater-than i64::MAX when rounding towards zero.
2504pub const LEF64_GT_I64_MAX: f64 = 9223372036854775808.0;
2505/// Greatest Exact Float (64 bits) less-than u32::MIN when rounding towards zero.
2506pub const GEF64_LT_U32_MIN: f64 = -1.0;
2507/// Least Exact Float (64 bits) greater-than u32::MAX when rounding towards zero.
2508pub const LEF64_GT_U32_MAX: f64 = 4294967296.0;
2509/// Greatest Exact Float (64 bits) less-than u64::MIN when rounding towards zero.
2510pub const GEF64_LT_U64_MIN: f64 = -1.0;
2511/// Least Exact Float (64 bits) greater-than u64::MAX when rounding towards zero.
2512pub const LEF64_GT_U64_MAX: f64 = 18446744073709551616.0;