use crate::common_decl::*;
use crate::location::{Location, Reg};
use crate::machine_arm64::MachineARM64;
use crate::machine_x64::MachineX86_64;
use crate::unwind::UnwindInstructions;
use dynasmrt::{AssemblyOffset, DynamicLabel};
use std::collections::BTreeMap;
use std::fmt::Debug;
pub use wasmer_compiler::wasmparser::MemArg;
use wasmer_compiler::wasmparser::ValType as WpType;
use wasmer_types::{
Architecture, CallingConvention, CompileError, CustomSection, FunctionBody, FunctionIndex,
FunctionType, InstructionAddressMap, Relocation, RelocationTarget, Target, TrapCode,
TrapInformation, VMOffsets,
};
pub type Label = DynamicLabel;
pub type Offset = AssemblyOffset;
#[allow(dead_code)]
#[derive(Clone, PartialEq)]
pub enum Value {
I8(i8),
I32(i32),
I64(i64),
F32(f32),
F64(f64),
}
#[macro_export]
macro_rules! codegen_error {
($($arg:tt)*) => {return Err(CompileError::Codegen(format!($($arg)*)))}
}
#[allow(unused)]
pub trait MaybeImmediate {
fn imm_value(&self) -> Option<Value>;
fn is_imm(&self) -> bool {
self.imm_value().is_some()
}
}
#[derive(Clone, Debug, Default)]
pub struct TrapTable {
pub offset_to_code: BTreeMap<usize, TrapCode>,
}
pub const NATIVE_PAGE_SIZE: usize = 4096;
pub struct MachineStackOffset(pub usize);
#[allow(unused)]
pub trait Machine {
type GPR: Copy + Eq + Debug + Reg;
type SIMD: Copy + Eq + Debug + Reg;
fn assembler_get_offset(&self) -> Offset;
fn index_from_gpr(&self, x: Self::GPR) -> RegisterIndex;
fn index_from_simd(&self, x: Self::SIMD) -> RegisterIndex;
fn get_vmctx_reg(&self) -> Self::GPR;
fn pick_gpr(&self) -> Option<Self::GPR>;
fn pick_temp_gpr(&self) -> Option<Self::GPR>;
fn get_used_gprs(&self) -> Vec<Self::GPR>;
fn get_used_simd(&self) -> Vec<Self::SIMD>;
fn acquire_temp_gpr(&mut self) -> Option<Self::GPR>;
fn release_gpr(&mut self, gpr: Self::GPR);
fn reserve_unused_temp_gpr(&mut self, gpr: Self::GPR) -> Self::GPR;
fn reserve_gpr(&mut self, gpr: Self::GPR);
fn push_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<usize, CompileError>;
fn pop_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<(), CompileError>;
fn pick_simd(&self) -> Option<Self::SIMD>;
fn pick_temp_simd(&self) -> Option<Self::SIMD>;
fn acquire_temp_simd(&mut self) -> Option<Self::SIMD>;
fn reserve_simd(&mut self, simd: Self::SIMD);
fn release_simd(&mut self, simd: Self::SIMD);
fn push_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<usize, CompileError>;
fn pop_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<(), CompileError>;
fn round_stack_adjust(&self, value: usize) -> usize;
fn set_srcloc(&mut self, offset: u32);
fn mark_address_range_with_trap_code(&mut self, code: TrapCode, begin: usize, end: usize);
fn mark_address_with_trap_code(&mut self, code: TrapCode);
fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize;
fn mark_instruction_address_end(&mut self, begin: usize);
fn insert_stackoverflow(&mut self);
fn collect_trap_information(&self) -> Vec<TrapInformation>;
fn instructions_address_map(&self) -> Vec<InstructionAddressMap>;
fn local_on_stack(&mut self, stack_offset: i32) -> Location<Self::GPR, Self::SIMD>;
fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
fn pop_stack_locals(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
fn zero_location(
&mut self,
size: Size,
location: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn local_pointer(&self) -> Self::GPR;
fn move_location_for_native(
&mut self,
size: Size,
loc: Location<Self::GPR, Self::SIMD>,
dest: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn is_local_on_stack(&self, idx: usize) -> bool;
fn get_local_location(
&self,
idx: usize,
callee_saved_regs_size: usize,
) -> Location<Self::GPR, Self::SIMD>;
fn move_local(
&mut self,
stack_offset: i32,
location: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn list_to_save(
&self,
calling_convention: CallingConvention,
) -> Vec<Location<Self::GPR, Self::SIMD>>;
fn get_param_location(
&self,
idx: usize,
sz: Size,
stack_offset: &mut usize,
calling_convention: CallingConvention,
) -> Location<Self::GPR, Self::SIMD>;
fn get_call_param_location(
&self,
idx: usize,
sz: Size,
stack_offset: &mut usize,
calling_convention: CallingConvention,
) -> Location<Self::GPR, Self::SIMD>;
fn get_simple_param_location(
&self,
idx: usize,
calling_convention: CallingConvention,
) -> Location<Self::GPR, Self::SIMD>;
fn move_location(
&mut self,
size: Size,
source: Location<Self::GPR, Self::SIMD>,
dest: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn move_location_extend(
&mut self,
size_val: Size,
signed: bool,
source: Location<Self::GPR, Self::SIMD>,
size_op: Size,
dest: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn load_address(
&mut self,
size: Size,
gpr: Location<Self::GPR, Self::SIMD>,
mem: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn init_stack_loc(
&mut self,
init_stack_loc_cnt: u64,
last_stack_loc: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn restore_saved_area(&mut self, saved_area_offset: i32) -> Result<(), CompileError>;
fn pop_location(
&mut self,
location: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn new_machine_state(&self) -> MachineState;
fn assembler_finalize(self) -> Result<Vec<u8>, CompileError>;
fn get_offset(&self) -> Offset;
fn finalize_function(&mut self) -> Result<(), CompileError>;
fn emit_function_prolog(&mut self) -> Result<(), CompileError>;
fn emit_function_epilog(&mut self) -> Result<(), CompileError>;
fn emit_function_return_value(
&mut self,
ty: WpType,
cannonicalize: bool,
loc: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_function_return_float(&mut self) -> Result<(), CompileError>;
fn arch_supports_canonicalize_nan(&self) -> bool;
fn canonicalize_nan(
&mut self,
sz: Size,
input: Location<Self::GPR, Self::SIMD>,
output: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_illegal_op(&mut self, trp: TrapCode) -> Result<(), CompileError>;
fn get_label(&mut self) -> Label;
fn emit_label(&mut self, label: Label) -> Result<(), CompileError>;
fn get_grp_for_call(&self) -> Self::GPR;
fn emit_call_register(&mut self, register: Self::GPR) -> Result<(), CompileError>;
fn emit_call_label(&mut self, label: Label) -> Result<(), CompileError>;
fn arch_requires_indirect_call_trampoline(&self) -> bool;
fn arch_emit_indirect_call_with_trampoline(
&mut self,
location: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_call_location(
&mut self,
location: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn get_gpr_for_ret(&self) -> Self::GPR;
fn get_simd_for_ret(&self) -> Self::SIMD;
fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>;
fn location_address(
&mut self,
size: Size,
source: Location<Self::GPR, Self::SIMD>,
dest: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn location_and(
&mut self,
size: Size,
source: Location<Self::GPR, Self::SIMD>,
dest: Location<Self::GPR, Self::SIMD>,
flags: bool,
) -> Result<(), CompileError>;
fn location_xor(
&mut self,
size: Size,
source: Location<Self::GPR, Self::SIMD>,
dest: Location<Self::GPR, Self::SIMD>,
flags: bool,
) -> Result<(), CompileError>;
fn location_or(
&mut self,
size: Size,
source: Location<Self::GPR, Self::SIMD>,
dest: Location<Self::GPR, Self::SIMD>,
flags: bool,
) -> Result<(), CompileError>;
fn location_add(
&mut self,
size: Size,
source: Location<Self::GPR, Self::SIMD>,
dest: Location<Self::GPR, Self::SIMD>,
flags: bool,
) -> Result<(), CompileError>;
fn location_sub(
&mut self,
size: Size,
source: Location<Self::GPR, Self::SIMD>,
dest: Location<Self::GPR, Self::SIMD>,
flags: bool,
) -> Result<(), CompileError>;
fn location_neg(
&mut self,
size_val: Size, signed: bool,
source: Location<Self::GPR, Self::SIMD>,
size_op: Size,
dest: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn location_cmp(
&mut self,
size: Size,
source: Location<Self::GPR, Self::SIMD>,
dest: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn location_test(
&mut self,
size: Size,
source: Location<Self::GPR, Self::SIMD>,
dest: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn jmp_unconditionnal(&mut self, label: Label) -> Result<(), CompileError>;
fn jmp_on_equal(&mut self, label: Label) -> Result<(), CompileError>;
fn jmp_on_different(&mut self, label: Label) -> Result<(), CompileError>;
fn jmp_on_above(&mut self, label: Label) -> Result<(), CompileError>;
fn jmp_on_aboveequal(&mut self, label: Label) -> Result<(), CompileError>;
fn jmp_on_belowequal(&mut self, label: Label) -> Result<(), CompileError>;
fn jmp_on_overflow(&mut self, label: Label) -> Result<(), CompileError>;
fn emit_jmp_to_jumptable(
&mut self,
label: Label,
cond: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn align_for_loop(&mut self) -> Result<(), CompileError>;
fn emit_ret(&mut self) -> Result<(), CompileError>;
fn emit_push(
&mut self,
size: Size,
loc: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_pop(
&mut self,
size: Size,
loc: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_relaxed_mov(
&mut self,
sz: Size,
src: Location<Self::GPR, Self::SIMD>,
dst: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_relaxed_cmp(
&mut self,
sz: Size,
src: Location<Self::GPR, Self::SIMD>,
dst: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_memory_fence(&mut self) -> Result<(), CompileError>;
fn emit_relaxed_zero_extension(
&mut self,
sz_src: Size,
src: Location<Self::GPR, Self::SIMD>,
sz_dst: Size,
dst: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_relaxed_sign_extension(
&mut self,
sz_src: Size,
src: Location<Self::GPR, Self::SIMD>,
sz_dst: Size,
dst: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_imul_imm32(
&mut self,
size: Size,
imm32: u32,
gpr: Self::GPR,
) -> Result<(), CompileError>;
fn emit_binop_add32(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_binop_sub32(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_binop_mul32(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_binop_udiv32(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
integer_division_by_zero: Label,
integer_overflow: Label,
) -> Result<usize, CompileError>;
fn emit_binop_sdiv32(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
integer_division_by_zero: Label,
integer_overflow: Label,
) -> Result<usize, CompileError>;
fn emit_binop_urem32(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
integer_division_by_zero: Label,
integer_overflow: Label,
) -> Result<usize, CompileError>;
fn emit_binop_srem32(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
integer_division_by_zero: Label,
integer_overflow: Label,
) -> Result<usize, CompileError>;
fn emit_binop_and32(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_binop_or32(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_binop_xor32(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_cmp_ge_s(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_cmp_gt_s(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_cmp_le_s(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_cmp_lt_s(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_cmp_ge_u(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_cmp_gt_u(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_cmp_le_u(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_cmp_lt_u(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_cmp_ne(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_cmp_eq(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_clz(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_ctz(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_popcnt(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_shl(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_shr(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_sar(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_rol(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i32_ror(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_load(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_load_8u(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_load_8s(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_load_16u(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_load_16s(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_load(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_load_8u(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_load_16u(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_save(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_save_8(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_save_16(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_save(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_save_8(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_save_16(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_add(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_add_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_add_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_sub(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_sub_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_sub_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_and(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_and_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_and_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_or(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_or_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_or_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_xor(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_xor_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_xor_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_xchg(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_xchg_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_xchg_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_cmpxchg(
&mut self,
new: Location<Self::GPR, Self::SIMD>,
cmp: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_cmpxchg_8u(
&mut self,
new: Location<Self::GPR, Self::SIMD>,
cmp: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i32_atomic_cmpxchg_16u(
&mut self,
new: Location<Self::GPR, Self::SIMD>,
cmp: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
fn emit_call_with_reloc(
&mut self,
calling_convention: CallingConvention,
reloc_target: RelocationTarget,
) -> Result<Vec<Relocation>, CompileError>;
fn emit_binop_add64(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_binop_sub64(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_binop_mul64(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_binop_udiv64(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
integer_division_by_zero: Label,
integer_overflow: Label,
) -> Result<usize, CompileError>;
fn emit_binop_sdiv64(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
integer_division_by_zero: Label,
integer_overflow: Label,
) -> Result<usize, CompileError>;
fn emit_binop_urem64(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
integer_division_by_zero: Label,
integer_overflow: Label,
) -> Result<usize, CompileError>;
fn emit_binop_srem64(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
integer_division_by_zero: Label,
integer_overflow: Label,
) -> Result<usize, CompileError>;
fn emit_binop_and64(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_binop_or64(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_binop_xor64(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_cmp_ge_s(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_cmp_gt_s(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_cmp_le_s(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_cmp_lt_s(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_cmp_ge_u(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_cmp_gt_u(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_cmp_le_u(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_cmp_lt_u(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_cmp_ne(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_cmp_eq(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_clz(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_ctz(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_popcnt(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_shl(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_shr(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_sar(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_rol(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn i64_ror(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_load(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_load_8u(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_load_8s(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_load_32u(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_load_32s(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_load_16u(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_load_16s(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_load(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_load_8u(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_load_16u(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_load_32u(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_save(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_save_8(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_save_16(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_save_32(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_save(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_save_8(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_save_16(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_save_32(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_add(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_add_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_add_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_add_32u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_sub(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_sub_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_sub_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_sub_32u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_and(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_and_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_and_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_and_32u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_or(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_or_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_or_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_or_32u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_xor(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_xor_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_xor_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_xor_32u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_xchg(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_xchg_8u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_xchg_16u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_xchg_32u(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_cmpxchg(
&mut self,
new: Location<Self::GPR, Self::SIMD>,
cmp: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_cmpxchg_8u(
&mut self,
new: Location<Self::GPR, Self::SIMD>,
cmp: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_cmpxchg_16u(
&mut self,
new: Location<Self::GPR, Self::SIMD>,
cmp: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn i64_atomic_cmpxchg_32u(
&mut self,
new: Location<Self::GPR, Self::SIMD>,
cmp: Location<Self::GPR, Self::SIMD>,
target: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn f32_load(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn f32_save(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
canonicalize: bool,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn f64_load(
&mut self,
addr: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
ret: Location<Self::GPR, Self::SIMD>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
#[allow(clippy::too_many_arguments)]
fn f64_save(
&mut self,
value: Location<Self::GPR, Self::SIMD>,
memarg: &MemArg,
addr: Location<Self::GPR, Self::SIMD>,
canonicalize: bool,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: Label,
unaligned_atomic: Label,
) -> Result<(), CompileError>;
fn convert_f64_i64(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
signed: bool,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn convert_f64_i32(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
signed: bool,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn convert_f32_i64(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
signed: bool,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn convert_f32_i32(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
signed: bool,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn convert_i64_f64(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
signed: bool,
sat: bool,
) -> Result<(), CompileError>;
fn convert_i32_f64(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
signed: bool,
sat: bool,
) -> Result<(), CompileError>;
fn convert_i64_f32(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
signed: bool,
sat: bool,
) -> Result<(), CompileError>;
fn convert_i32_f32(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
signed: bool,
sat: bool,
) -> Result<(), CompileError>;
fn convert_f64_f32(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn convert_f32_f64(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_neg(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_abs(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_i64_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
fn f64_sqrt(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_trunc(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_ceil(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_floor(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_nearest(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_cmp_ge(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_cmp_gt(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_cmp_le(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_cmp_lt(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_cmp_ne(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_cmp_eq(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_min(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_max(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_add(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_sub(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_mul(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f64_div(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_neg(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_abs(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn emit_i32_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
fn f32_sqrt(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_trunc(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_ceil(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_floor(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_nearest(
&mut self,
loc: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_cmp_ge(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_cmp_gt(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_cmp_le(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_cmp_lt(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_cmp_ne(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_cmp_eq(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_min(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_max(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_add(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_sub(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_mul(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn f32_div(
&mut self,
loc_a: Location<Self::GPR, Self::SIMD>,
loc_b: Location<Self::GPR, Self::SIMD>,
ret: Location<Self::GPR, Self::SIMD>,
) -> Result<(), CompileError>;
fn gen_std_trampoline(
&self,
sig: &FunctionType,
calling_convention: CallingConvention,
) -> Result<FunctionBody, CompileError>;
fn gen_std_dynamic_import_trampoline(
&self,
vmoffsets: &VMOffsets,
sig: &FunctionType,
calling_convention: CallingConvention,
) -> Result<FunctionBody, CompileError>;
fn gen_import_call_trampoline(
&self,
vmoffsets: &VMOffsets,
index: FunctionIndex,
sig: &FunctionType,
calling_convention: CallingConvention,
) -> Result<CustomSection, CompileError>;
fn gen_dwarf_unwind_info(&mut self, code_len: usize) -> Option<UnwindInstructions>;
fn gen_windows_unwind_info(&mut self, code_len: usize) -> Option<Vec<u8>>;
}
pub fn gen_std_trampoline(
sig: &FunctionType,
target: &Target,
calling_convention: CallingConvention,
) -> Result<FunctionBody, CompileError> {
match target.triple().architecture {
Architecture::X86_64 => {
let machine = MachineX86_64::new(Some(target.clone()))?;
machine.gen_std_trampoline(sig, calling_convention)
}
Architecture::Aarch64(_) => {
let machine = MachineARM64::new(Some(target.clone()));
machine.gen_std_trampoline(sig, calling_convention)
}
_ => Err(CompileError::UnsupportedTarget(
"singlepass unimplemented arch for gen_std_trampoline".to_owned(),
)),
}
}
pub fn gen_std_dynamic_import_trampoline(
vmoffsets: &VMOffsets,
sig: &FunctionType,
target: &Target,
calling_convention: CallingConvention,
) -> Result<FunctionBody, CompileError> {
match target.triple().architecture {
Architecture::X86_64 => {
let machine = MachineX86_64::new(Some(target.clone()))?;
machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
}
Architecture::Aarch64(_) => {
let machine = MachineARM64::new(Some(target.clone()));
machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
}
_ => Err(CompileError::UnsupportedTarget(
"singlepass unimplemented arch for gen_std_dynamic_import_trampoline".to_owned(),
)),
}
}
pub fn gen_import_call_trampoline(
vmoffsets: &VMOffsets,
index: FunctionIndex,
sig: &FunctionType,
target: &Target,
calling_convention: CallingConvention,
) -> Result<CustomSection, CompileError> {
match target.triple().architecture {
Architecture::X86_64 => {
let machine = MachineX86_64::new(Some(target.clone()))?;
machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
}
Architecture::Aarch64(_) => {
let machine = MachineARM64::new(Some(target.clone()));
machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
}
_ => Err(CompileError::UnsupportedTarget(
"singlepass unimplemented arch for gen_import_call_trampoline".to_owned(),
)),
}
}
pub const GEF32_LT_I32_MIN: f32 = -2147483904.0;
pub const LEF32_GT_I32_MAX: f32 = 2147483648.0;
pub const GEF32_LT_I64_MIN: f32 = -9223373136366403584.0;
pub const LEF32_GT_I64_MAX: f32 = 9223372036854775808.0;
pub const GEF32_LT_U32_MIN: f32 = -1.0;
pub const LEF32_GT_U32_MAX: f32 = 4294967296.0;
pub const GEF32_LT_U64_MIN: f32 = -1.0;
pub const LEF32_GT_U64_MAX: f32 = 18446744073709551616.0;
pub const GEF64_LT_I32_MIN: f64 = -2147483649.0;
pub const LEF64_GT_I32_MAX: f64 = 2147483648.0;
pub const GEF64_LT_I64_MIN: f64 = -9223372036854777856.0;
pub const LEF64_GT_I64_MAX: f64 = 9223372036854775808.0;
pub const GEF64_LT_U32_MIN: f64 = -1.0;
pub const LEF64_GT_U32_MAX: f64 = 4294967296.0;
pub const GEF64_LT_U64_MIN: f64 = -1.0;
pub const LEF64_GT_U64_MAX: f64 = 18446744073709551616.0;