wasmer_vm/
vmcontext.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/main/docs/ATTRIBUTIONS.md
3
4//! This file declares `VMContext` and several related structs which contain
5//! fields that compiled wasm code accesses directly.
6
7use crate::global::VMGlobal;
8use crate::instance::Instance;
9use crate::memory::VMMemory;
10use crate::store::InternalStoreHandle;
11use crate::trap::{Trap, TrapCode};
12use crate::VMTable;
13use crate::{VMBuiltinFunctionIndex, VMFunction};
14use crate::{VMFunctionBody, VMTag};
15use std::convert::TryFrom;
16use std::ptr::{self, NonNull};
17use std::sync::atomic::{AtomicPtr, Ordering};
18use wasmer_types::RawValue;
19
20/// Union representing the first parameter passed when calling a function.
21///
22/// It may either be a pointer to the [`VMContext`] if it's a Wasm function
23/// or a pointer to arbitrary data controlled by the host if it's a host function.
24#[derive(Copy, Clone, Eq)]
25#[repr(C)]
26pub union VMFunctionContext {
27    /// Wasm functions take a pointer to [`VMContext`].
28    pub vmctx: *mut VMContext,
29    /// Host functions can have custom environments.
30    pub host_env: *mut std::ffi::c_void,
31}
32
33impl VMFunctionContext {
34    /// Check whether the pointer stored is null or not.
35    pub fn is_null(&self) -> bool {
36        unsafe { self.host_env.is_null() }
37    }
38}
39
40impl std::fmt::Debug for VMFunctionContext {
41    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
42        f.debug_struct("VMFunctionContext")
43            .field("vmctx_or_hostenv", unsafe { &self.host_env })
44            .finish()
45    }
46}
47
48impl std::cmp::PartialEq for VMFunctionContext {
49    fn eq(&self, rhs: &Self) -> bool {
50        unsafe { self.host_env as usize == rhs.host_env as usize }
51    }
52}
53
54impl std::hash::Hash for VMFunctionContext {
55    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
56        unsafe {
57            self.vmctx.hash(state);
58        }
59    }
60}
61
62/// An imported function.
63#[derive(Debug, Copy, Clone)]
64#[repr(C)]
65pub struct VMFunctionImport {
66    /// A pointer to the imported function body.
67    pub body: *const VMFunctionBody,
68
69    /// A pointer to the `VMContext` that owns the function or host env data.
70    pub environment: VMFunctionContext,
71
72    /// Handle to the `VMFunction` in the context.
73    pub handle: InternalStoreHandle<VMFunction>,
74}
75
76#[cfg(test)]
77mod test_vmfunction_import {
78    use super::VMFunctionImport;
79    use memoffset::offset_of;
80    use std::mem::size_of;
81    use wasmer_types::ModuleInfo;
82    use wasmer_types::VMOffsets;
83
84    #[test]
85    fn check_vmfunction_import_offsets() {
86        let module = ModuleInfo::new();
87        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
88        assert_eq!(
89            size_of::<VMFunctionImport>(),
90            usize::from(offsets.size_of_vmfunction_import())
91        );
92        assert_eq!(
93            offset_of!(VMFunctionImport, body),
94            usize::from(offsets.vmfunction_import_body())
95        );
96        assert_eq!(
97            offset_of!(VMFunctionImport, environment),
98            usize::from(offsets.vmfunction_import_vmctx())
99        );
100    }
101}
102
103/// The `VMDynamicFunctionContext` is the context that dynamic
104/// functions will receive when called (rather than `vmctx`).
105/// A dynamic function is a function for which we don't know the signature
106/// until runtime.
107///
108/// As such, we need to expose the dynamic function `context`
109/// containing the relevant context for running the function indicated
110/// in `address`.
111#[repr(C)]
112pub struct VMDynamicFunctionContext<T> {
113    /// The address of the inner dynamic function.
114    ///
115    /// Note: The function must be on the form of
116    /// `(*mut T, SignatureIndex, *mut i128)`.
117    pub address: *const VMFunctionBody,
118
119    /// The context that the inner dynamic function will receive.
120    pub ctx: T,
121}
122
123// The `ctx` itself must be `Send`, `address` can be passed between
124// threads because all usage is `unsafe` and synchronized.
125unsafe impl<T: Sized + Send + Sync> Send for VMDynamicFunctionContext<T> {}
126// The `ctx` itself must be `Sync`, `address` can be shared between
127// threads because all usage is `unsafe` and synchronized.
128unsafe impl<T: Sized + Send + Sync> Sync for VMDynamicFunctionContext<T> {}
129
130impl<T: Sized + Clone + Send + Sync> Clone for VMDynamicFunctionContext<T> {
131    fn clone(&self) -> Self {
132        Self {
133            address: self.address,
134            ctx: self.ctx.clone(),
135        }
136    }
137}
138
139#[cfg(test)]
140mod test_vmdynamicfunction_import_context {
141    use super::VMDynamicFunctionContext;
142    use crate::VMOffsets;
143    use memoffset::offset_of;
144    use std::mem::size_of;
145    use wasmer_types::ModuleInfo;
146
147    #[test]
148    fn check_vmdynamicfunction_import_context_offsets() {
149        let module = ModuleInfo::new();
150        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
151        assert_eq!(
152            size_of::<VMDynamicFunctionContext<usize>>(),
153            usize::from(offsets.size_of_vmdynamicfunction_import_context())
154        );
155        assert_eq!(
156            offset_of!(VMDynamicFunctionContext<usize>, address),
157            usize::from(offsets.vmdynamicfunction_import_context_address())
158        );
159        assert_eq!(
160            offset_of!(VMDynamicFunctionContext<usize>, ctx),
161            usize::from(offsets.vmdynamicfunction_import_context_ctx())
162        );
163    }
164}
165
166/// A function kind is a calling convention into and out of wasm code.
167#[derive(Debug, Copy, Clone, Eq, PartialEq)]
168#[repr(C)]
169pub enum VMFunctionKind {
170    /// A static function has the native signature:
171    /// `extern "C" (vmctx, arg1, arg2...) -> (result1, result2, ...)`.
172    ///
173    /// This is the default for functions that are defined:
174    /// 1. In the Host, natively
175    /// 2. In the WebAssembly file
176    Static,
177
178    /// A dynamic function has the native signature:
179    /// `extern "C" (ctx, &[Value]) -> Vec<Value>`.
180    ///
181    /// This is the default for functions that are defined:
182    /// 1. In the Host, dynamically
183    Dynamic,
184}
185
186/// The fields compiled code needs to access to utilize a WebAssembly table
187/// imported from another instance.
188#[derive(Clone)]
189#[repr(C)]
190pub struct VMTableImport {
191    /// A pointer to the imported table description.
192    pub definition: NonNull<VMTableDefinition>,
193
194    /// Handle to the `VMTable` in the context.
195    pub handle: InternalStoreHandle<VMTable>,
196}
197
198#[cfg(test)]
199mod test_vmtable_import {
200    use super::VMTableImport;
201    use crate::VMOffsets;
202    use memoffset::offset_of;
203    use std::mem::size_of;
204    use wasmer_types::ModuleInfo;
205
206    #[test]
207    fn check_vmtable_import_offsets() {
208        let module = ModuleInfo::new();
209        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
210        assert_eq!(
211            size_of::<VMTableImport>(),
212            usize::from(offsets.size_of_vmtable_import())
213        );
214        assert_eq!(
215            offset_of!(VMTableImport, definition),
216            usize::from(offsets.vmtable_import_definition())
217        );
218    }
219}
220
221/// The fields compiled code needs to access to utilize a WebAssembly linear
222/// memory imported from another instance.
223#[derive(Clone)]
224#[repr(C)]
225pub struct VMMemoryImport {
226    /// A pointer to the imported memory description.
227    pub definition: NonNull<VMMemoryDefinition>,
228
229    /// A handle to the `Memory` that owns the memory description.
230    pub handle: InternalStoreHandle<VMMemory>,
231}
232
233#[cfg(test)]
234mod test_vmmemory_import {
235    use super::VMMemoryImport;
236    use crate::VMOffsets;
237    use memoffset::offset_of;
238    use std::mem::size_of;
239    use wasmer_types::ModuleInfo;
240
241    #[test]
242    fn check_vmmemory_import_offsets() {
243        let module = ModuleInfo::new();
244        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
245        assert_eq!(
246            size_of::<VMMemoryImport>(),
247            usize::from(offsets.size_of_vmmemory_import())
248        );
249        assert_eq!(
250            offset_of!(VMMemoryImport, definition),
251            usize::from(offsets.vmmemory_import_definition())
252        );
253        assert_eq!(
254            offset_of!(VMMemoryImport, handle),
255            usize::from(offsets.vmmemory_import_handle())
256        );
257    }
258}
259
260/// The fields compiled code needs to access to utilize a WebAssembly tag
261/// variable imported from another instance.
262#[derive(Clone)]
263#[repr(C)]
264pub struct VMTagImport {
265    /// A handle to the `Tag` that owns the tag description.
266    pub handle: InternalStoreHandle<VMTag>,
267}
268
269/// # Safety
270/// This data is safe to share between threads because it's plain data that
271/// is the user's responsibility to synchronize. Additionally, all operations
272/// on `from` are thread-safe through the use of a mutex in [`VMTag`].
273unsafe impl Send for VMTagImport {}
274/// # Safety
275/// This data is safe to share between threads because it's plain data that
276/// is the user's responsibility to synchronize. And because it's `Clone`, there's
277/// really no difference between passing it by reference or by value as far as
278/// correctness in a multi-threaded context is concerned.
279unsafe impl Sync for VMTagImport {}
280
281//#[cfg(test)]
282//mod test_vmtag_import {
283//    use super::VMTagImport;
284//    use crate::VMOffsets;
285//    use memoffset::offset_of;
286//    use std::mem::size_of;
287//    use wasmer_types::ModuleInfo;
288//
289//    #[test]
290//    fn check_vmtag_import_offsets() {
291//        let module = ModuleInfo::new();
292//        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
293//        assert_eq!(
294//            size_of::<VMTagImport>(),
295//            usize::from(offsets.size_of_vmtag_import())
296//        );
297//        assert_eq!(
298//            offset_of!(VMTagImport, handle),
299//            usize::from(offsets.vmtag_import_definition())
300//        );
301//    }
302//}
303
304/// The fields compiled code needs to access to utilize a WebAssembly global
305/// variable imported from another instance.
306#[derive(Clone)]
307#[repr(C)]
308pub struct VMGlobalImport {
309    /// A pointer to the imported global variable description.
310    pub definition: NonNull<VMGlobalDefinition>,
311
312    /// A handle to the `Global` that owns the global description.
313    pub handle: InternalStoreHandle<VMGlobal>,
314}
315
316/// # Safety
317/// This data is safe to share between threads because it's plain data that
318/// is the user's responsibility to synchronize. Additionally, all operations
319/// on `from` are thread-safe through the use of a mutex in [`VMGlobal`].
320unsafe impl Send for VMGlobalImport {}
321/// # Safety
322/// This data is safe to share between threads because it's plain data that
323/// is the user's responsibility to synchronize. And because it's `Clone`, there's
324/// really no difference between passing it by reference or by value as far as
325/// correctness in a multi-threaded context is concerned.
326unsafe impl Sync for VMGlobalImport {}
327
328#[cfg(test)]
329mod test_vmglobal_import {
330    use super::VMGlobalImport;
331    use crate::VMOffsets;
332    use memoffset::offset_of;
333    use std::mem::size_of;
334    use wasmer_types::ModuleInfo;
335
336    #[test]
337    fn check_vmglobal_import_offsets() {
338        let module = ModuleInfo::new();
339        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
340        assert_eq!(
341            size_of::<VMGlobalImport>(),
342            usize::from(offsets.size_of_vmglobal_import())
343        );
344        assert_eq!(
345            offset_of!(VMGlobalImport, definition),
346            usize::from(offsets.vmglobal_import_definition())
347        );
348    }
349}
350
351/// Do an unsynchronized, non-atomic `memory.copy` for the memory.
352///
353/// # Errors
354///
355/// Returns a `Trap` error when the source or destination ranges are out of
356/// bounds.
357///
358/// # Safety
359/// The memory is not copied atomically and is not synchronized: it's the
360/// caller's responsibility to synchronize.
361pub(crate) unsafe fn memory_copy(
362    mem: &VMMemoryDefinition,
363    dst: u32,
364    src: u32,
365    len: u32,
366) -> Result<(), Trap> {
367    // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
368    if src
369        .checked_add(len)
370        .map_or(true, |n| usize::try_from(n).unwrap() > mem.current_length)
371        || dst
372            .checked_add(len)
373            .map_or(true, |m| usize::try_from(m).unwrap() > mem.current_length)
374    {
375        return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
376    }
377
378    let dst = usize::try_from(dst).unwrap();
379    let src = usize::try_from(src).unwrap();
380
381    // Bounds and casts are checked above, by this point we know that
382    // everything is safe.
383    let dst = mem.base.add(dst);
384    let src = mem.base.add(src);
385    ptr::copy(src, dst, len as usize);
386
387    Ok(())
388}
389
390/// Perform the `memory.fill` operation for the memory in an unsynchronized,
391/// non-atomic way.
392///
393/// # Errors
394///
395/// Returns a `Trap` error if the memory range is out of bounds.
396///
397/// # Safety
398/// The memory is not filled atomically and is not synchronized: it's the
399/// caller's responsibility to synchronize.
400pub(crate) unsafe fn memory_fill(
401    mem: &VMMemoryDefinition,
402    dst: u32,
403    val: u32,
404    len: u32,
405) -> Result<(), Trap> {
406    if dst
407        .checked_add(len)
408        .map_or(true, |m| usize::try_from(m).unwrap() > mem.current_length)
409    {
410        return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
411    }
412
413    let dst = isize::try_from(dst).unwrap();
414    let val = val as u8;
415
416    // Bounds and casts are checked above, by this point we know that
417    // everything is safe.
418    let dst = mem.base.offset(dst);
419    ptr::write_bytes(dst, val, len as usize);
420
421    Ok(())
422}
423
424/// Perform the `memory32.atomic.check32` operation for the memory. Return 0 if same, 1 if different
425///
426/// # Errors
427///
428/// Returns a `Trap` error if the memory range is out of bounds or 32bits unligned.
429///
430/// # Safety
431/// memory access is unsafe
432pub(crate) unsafe fn memory32_atomic_check32(
433    mem: &VMMemoryDefinition,
434    dst: u32,
435    val: u32,
436) -> Result<u32, Trap> {
437    if usize::try_from(dst).unwrap() > mem.current_length {
438        return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
439    }
440
441    let dst = isize::try_from(dst).unwrap();
442    if dst & 0b11 != 0 {
443        return Err(Trap::lib(TrapCode::UnalignedAtomic));
444    }
445
446    // Bounds and casts are checked above, by this point we know that
447    // everything is safe.
448    let dst = mem.base.offset(dst) as *mut u32;
449    let atomic_dst = AtomicPtr::new(dst);
450    let read_val = *atomic_dst.load(Ordering::Acquire);
451    let ret = if read_val == val { 0 } else { 1 };
452    Ok(ret)
453}
454
455/// Perform the `memory32.atomic.check64` operation for the memory. Return 0 if same, 1 if different
456///
457/// # Errors
458///
459/// Returns a `Trap` error if the memory range is out of bounds or 64bits unaligned.
460///
461/// # Safety
462/// memory access is unsafe
463pub(crate) unsafe fn memory32_atomic_check64(
464    mem: &VMMemoryDefinition,
465    dst: u32,
466    val: u64,
467) -> Result<u32, Trap> {
468    if usize::try_from(dst).unwrap() > mem.current_length {
469        return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
470    }
471
472    let dst = isize::try_from(dst).unwrap();
473    if dst & 0b111 != 0 {
474        return Err(Trap::lib(TrapCode::UnalignedAtomic));
475    }
476
477    // Bounds and casts are checked above, by this point we know that
478    // everything is safe.
479    let dst = mem.base.offset(dst) as *mut u64;
480    let atomic_dst = AtomicPtr::new(dst);
481    let read_val = *atomic_dst.load(Ordering::Acquire);
482    let ret = if read_val == val { 0 } else { 1 };
483    Ok(ret)
484}
485
486/// The fields compiled code needs to access to utilize a WebAssembly table
487/// defined within the instance.
488#[derive(Debug, Clone, Copy)]
489#[repr(C)]
490pub struct VMTableDefinition {
491    /// Pointer to the table data.
492    pub base: *mut u8,
493
494    /// The current number of elements in the table.
495    pub current_elements: u32,
496}
497
498#[cfg(test)]
499mod test_vmtable_definition {
500    use super::VMTableDefinition;
501    use crate::VMOffsets;
502    use memoffset::offset_of;
503    use std::mem::size_of;
504    use wasmer_types::ModuleInfo;
505
506    #[test]
507    fn check_vmtable_definition_offsets() {
508        let module = ModuleInfo::new();
509        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
510        assert_eq!(
511            size_of::<VMTableDefinition>(),
512            usize::from(offsets.size_of_vmtable_definition())
513        );
514        assert_eq!(
515            offset_of!(VMTableDefinition, base),
516            usize::from(offsets.vmtable_definition_base())
517        );
518        assert_eq!(
519            offset_of!(VMTableDefinition, current_elements),
520            usize::from(offsets.vmtable_definition_current_elements())
521        );
522    }
523}
524
525/// The storage for a WebAssembly global defined within the instance.
526///
527/// TODO: Pack the globals more densely, rather than using the same size
528/// for every type.
529#[derive(Debug, Clone)]
530#[repr(C, align(16))]
531pub struct VMGlobalDefinition {
532    /// Raw value of the global.
533    pub val: RawValue,
534}
535
536#[cfg(test)]
537mod test_vmglobal_definition {
538    use super::VMGlobalDefinition;
539    use crate::{VMFuncRef, VMOffsets};
540    use more_asserts::assert_ge;
541    use std::mem::{align_of, size_of};
542    use wasmer_types::ModuleInfo;
543
544    #[test]
545    fn check_vmglobal_definition_alignment() {
546        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<i32>());
547        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<i64>());
548        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<f32>());
549        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<f64>());
550        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<VMFuncRef>());
551        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<[u8; 16]>());
552    }
553
554    #[test]
555    fn check_vmglobal_definition_offsets() {
556        let module = ModuleInfo::new();
557        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
558        assert_eq!(
559            size_of::<*const VMGlobalDefinition>(),
560            usize::from(offsets.size_of_vmglobal_local())
561        );
562    }
563
564    #[test]
565    fn check_vmglobal_begins_aligned() {
566        let module = ModuleInfo::new();
567        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
568        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
569    }
570}
571
572impl VMGlobalDefinition {
573    /// Construct a `VMGlobalDefinition`.
574    pub fn new() -> Self {
575        Self {
576            val: Default::default(),
577        }
578    }
579}
580
581/// An index into the shared signature registry, usable for checking signatures
582/// at indirect calls.
583#[repr(C)]
584#[cfg_attr(feature = "artifact-size", derive(loupe::MemoryUsage))]
585#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)]
586pub struct VMSharedSignatureIndex(u32);
587
588#[cfg(test)]
589mod test_vmshared_signature_index {
590    use super::VMSharedSignatureIndex;
591    use std::mem::size_of;
592    use wasmer_types::{ModuleInfo, TargetSharedSignatureIndex, VMOffsets};
593
594    #[test]
595    fn check_vmshared_signature_index() {
596        let module = ModuleInfo::new();
597        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
598        assert_eq!(
599            size_of::<VMSharedSignatureIndex>(),
600            usize::from(offsets.size_of_vmshared_signature_index())
601        );
602    }
603
604    #[test]
605    fn check_target_shared_signature_index() {
606        assert_eq!(
607            size_of::<VMSharedSignatureIndex>(),
608            size_of::<TargetSharedSignatureIndex>()
609        );
610    }
611}
612
613impl VMSharedSignatureIndex {
614    /// Create a new `VMSharedSignatureIndex`.
615    pub fn new(value: u32) -> Self {
616        Self(value)
617    }
618}
619
620impl Default for VMSharedSignatureIndex {
621    fn default() -> Self {
622        Self::new(u32::MAX)
623    }
624}
625
626/// The VM caller-checked "anyfunc" record, for caller-side signature checking.
627/// It consists of the actual function pointer and a signature id to be checked
628/// by the caller.
629#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
630#[repr(C)]
631pub struct VMCallerCheckedAnyfunc {
632    /// Function body.
633    pub func_ptr: *const VMFunctionBody,
634    /// Function signature id.
635    pub type_index: VMSharedSignatureIndex,
636    /// Function `VMContext` or host env.
637    pub vmctx: VMFunctionContext,
638    /// Address of the function call trampoline to invoke this function using
639    /// a dynamic argument list.
640    pub call_trampoline: VMTrampoline,
641    // If more elements are added here, remember to add offset_of tests below!
642}
643
644#[cfg(test)]
645mod test_vmcaller_checked_anyfunc {
646    use super::VMCallerCheckedAnyfunc;
647    use crate::VMOffsets;
648    use memoffset::offset_of;
649    use std::mem::size_of;
650    use wasmer_types::ModuleInfo;
651
652    #[test]
653    fn check_vmcaller_checked_anyfunc_offsets() {
654        let module = ModuleInfo::new();
655        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
656        assert_eq!(
657            size_of::<VMCallerCheckedAnyfunc>(),
658            usize::from(offsets.size_of_vmcaller_checked_anyfunc())
659        );
660        assert_eq!(
661            offset_of!(VMCallerCheckedAnyfunc, func_ptr),
662            usize::from(offsets.vmcaller_checked_anyfunc_func_ptr())
663        );
664        assert_eq!(
665            offset_of!(VMCallerCheckedAnyfunc, type_index),
666            usize::from(offsets.vmcaller_checked_anyfunc_type_index())
667        );
668        assert_eq!(
669            offset_of!(VMCallerCheckedAnyfunc, vmctx),
670            usize::from(offsets.vmcaller_checked_anyfunc_vmctx())
671        );
672    }
673}
674
675/// An array that stores addresses of builtin functions. We translate code
676/// to use indirect calls. This way, we don't have to patch the code.
677#[repr(C)]
678pub struct VMBuiltinFunctionsArray {
679    ptrs: [usize; Self::len()],
680}
681
682impl VMBuiltinFunctionsArray {
683    pub const fn len() -> usize {
684        VMBuiltinFunctionIndex::builtin_functions_total_number() as usize
685    }
686
687    pub fn initialized() -> Self {
688        use crate::libcalls::*;
689
690        let mut ptrs = [0; Self::len()];
691
692        ptrs[VMBuiltinFunctionIndex::get_memory32_grow_index().index() as usize] =
693            wasmer_vm_memory32_grow as usize;
694        ptrs[VMBuiltinFunctionIndex::get_imported_memory32_grow_index().index() as usize] =
695            wasmer_vm_imported_memory32_grow as usize;
696
697        ptrs[VMBuiltinFunctionIndex::get_memory32_size_index().index() as usize] =
698            wasmer_vm_memory32_size as usize;
699        ptrs[VMBuiltinFunctionIndex::get_imported_memory32_size_index().index() as usize] =
700            wasmer_vm_imported_memory32_size as usize;
701
702        ptrs[VMBuiltinFunctionIndex::get_table_copy_index().index() as usize] =
703            wasmer_vm_table_copy as usize;
704
705        ptrs[VMBuiltinFunctionIndex::get_table_init_index().index() as usize] =
706            wasmer_vm_table_init as usize;
707        ptrs[VMBuiltinFunctionIndex::get_elem_drop_index().index() as usize] =
708            wasmer_vm_elem_drop as usize;
709
710        ptrs[VMBuiltinFunctionIndex::get_memory_copy_index().index() as usize] =
711            wasmer_vm_memory32_copy as usize;
712        ptrs[VMBuiltinFunctionIndex::get_imported_memory_copy_index().index() as usize] =
713            wasmer_vm_imported_memory32_copy as usize;
714        ptrs[VMBuiltinFunctionIndex::get_memory_fill_index().index() as usize] =
715            wasmer_vm_memory32_fill as usize;
716        ptrs[VMBuiltinFunctionIndex::get_imported_memory_fill_index().index() as usize] =
717            wasmer_vm_imported_memory32_fill as usize;
718        ptrs[VMBuiltinFunctionIndex::get_memory_init_index().index() as usize] =
719            wasmer_vm_memory32_init as usize;
720        ptrs[VMBuiltinFunctionIndex::get_data_drop_index().index() as usize] =
721            wasmer_vm_data_drop as usize;
722        ptrs[VMBuiltinFunctionIndex::get_raise_trap_index().index() as usize] =
723            wasmer_vm_raise_trap as usize;
724        ptrs[VMBuiltinFunctionIndex::get_table_size_index().index() as usize] =
725            wasmer_vm_table_size as usize;
726        ptrs[VMBuiltinFunctionIndex::get_imported_table_size_index().index() as usize] =
727            wasmer_vm_imported_table_size as usize;
728        ptrs[VMBuiltinFunctionIndex::get_table_grow_index().index() as usize] =
729            wasmer_vm_table_grow as usize;
730        ptrs[VMBuiltinFunctionIndex::get_imported_table_grow_index().index() as usize] =
731            wasmer_vm_imported_table_grow as usize;
732        ptrs[VMBuiltinFunctionIndex::get_table_get_index().index() as usize] =
733            wasmer_vm_table_get as usize;
734        ptrs[VMBuiltinFunctionIndex::get_imported_table_get_index().index() as usize] =
735            wasmer_vm_imported_table_get as usize;
736        ptrs[VMBuiltinFunctionIndex::get_table_set_index().index() as usize] =
737            wasmer_vm_table_set as usize;
738        ptrs[VMBuiltinFunctionIndex::get_imported_table_set_index().index() as usize] =
739            wasmer_vm_imported_table_set as usize;
740        ptrs[VMBuiltinFunctionIndex::get_func_ref_index().index() as usize] =
741            wasmer_vm_func_ref as usize;
742        ptrs[VMBuiltinFunctionIndex::get_table_fill_index().index() as usize] =
743            wasmer_vm_table_fill as usize;
744
745        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_wait32_index().index() as usize] =
746            wasmer_vm_memory32_atomic_wait32 as usize;
747        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_wait32_index().index() as usize] =
748            wasmer_vm_imported_memory32_atomic_wait32 as usize;
749        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_wait64_index().index() as usize] =
750            wasmer_vm_memory32_atomic_wait64 as usize;
751        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_wait64_index().index() as usize] =
752            wasmer_vm_imported_memory32_atomic_wait64 as usize;
753        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_notify_index().index() as usize] =
754            wasmer_vm_memory32_atomic_notify as usize;
755        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_notify_index().index() as usize] =
756            wasmer_vm_imported_memory32_atomic_notify as usize;
757        ptrs[VMBuiltinFunctionIndex::get_imported_throw_index().index() as usize] =
758            wasmer_vm_throw as usize;
759        ptrs[VMBuiltinFunctionIndex::get_imported_rethrow_index().index() as usize] =
760            wasmer_vm_rethrow as usize;
761
762        ptrs[VMBuiltinFunctionIndex::get_imported_alloc_exception_index().index() as usize] =
763            wasmer_vm_alloc_exception as usize;
764        ptrs[VMBuiltinFunctionIndex::get_imported_delete_exception_index().index() as usize] =
765            wasmer_vm_delete_exception as usize;
766        ptrs[VMBuiltinFunctionIndex::get_imported_read_exception_index().index() as usize] =
767            wasmer_vm_read_exception as usize;
768
769        ptrs[VMBuiltinFunctionIndex::get_imported_debug_usize_index().index() as usize] =
770            wasmer_vm_dbg_usize as usize;
771
772        debug_assert!(ptrs.iter().cloned().all(|p| p != 0));
773
774        Self { ptrs }
775    }
776}
777
778/// The VM "context", which is pointed to by the `vmctx` arg in the compiler.
779/// This has information about globals, memories, tables, and other runtime
780/// state associated with the current instance.
781///
782/// The struct here is empty, as the sizes of these fields are dynamic, and
783/// we can't describe them in Rust's type system. Sufficient memory is
784/// allocated at runtime.
785///
786/// TODO: We could move the globals into the `vmctx` allocation too.
787#[derive(Debug)]
788#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
789pub struct VMContext {}
790
791impl VMContext {
792    /// Return a mutable reference to the associated `Instance`.
793    ///
794    /// # Safety
795    /// This is unsafe because it doesn't work on just any `VMContext`, it must
796    /// be a `VMContext` allocated as part of an `Instance`.
797    #[allow(clippy::cast_ptr_alignment)]
798    #[inline]
799    pub(crate) unsafe fn instance(&self) -> &Instance {
800        &*((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *const Instance)
801    }
802
803    #[inline]
804    pub(crate) unsafe fn instance_mut(&mut self) -> &mut Instance {
805        &mut *((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *mut Instance)
806    }
807}
808
809/// The type for tramplines in the VM.
810pub type VMTrampoline = unsafe extern "C" fn(
811    *mut VMContext,        // callee vmctx
812    *const VMFunctionBody, // function we're actually calling
813    *mut RawValue,         // space for arguments and return values
814);
815
816/// The fields compiled code needs to access to utilize a WebAssembly linear
817/// memory defined within the instance, namely the start address and the
818/// size in bytes.
819#[derive(Debug, Copy, Clone)]
820#[repr(C)]
821pub struct VMMemoryDefinition {
822    /// The start address which is always valid, even if the memory grows.
823    pub base: *mut u8,
824
825    /// The current logical size of this linear memory in bytes.
826    pub current_length: usize,
827}
828
829/// # Safety
830/// This data is safe to share between threads because it's plain data that
831/// is the user's responsibility to synchronize.
832unsafe impl Send for VMMemoryDefinition {}
833/// # Safety
834/// This data is safe to share between threads because it's plain data that
835/// is the user's responsibility to synchronize. And it's `Copy` so there's
836/// really no difference between passing it by reference or by value as far as
837/// correctness in a multi-threaded context is concerned.
838unsafe impl Sync for VMMemoryDefinition {}
839
840#[cfg(test)]
841mod test_vmmemory_definition {
842    use super::VMMemoryDefinition;
843    use crate::VMOffsets;
844    use memoffset::offset_of;
845    use std::mem::size_of;
846    use wasmer_types::ModuleInfo;
847
848    #[test]
849    fn check_vmmemory_definition_offsets() {
850        let module = ModuleInfo::new();
851        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
852        assert_eq!(
853            size_of::<VMMemoryDefinition>(),
854            usize::from(offsets.size_of_vmmemory_definition())
855        );
856        assert_eq!(
857            offset_of!(VMMemoryDefinition, base),
858            usize::from(offsets.vmmemory_definition_base())
859        );
860        assert_eq!(
861            offset_of!(VMMemoryDefinition, current_length),
862            usize::from(offsets.vmmemory_definition_current_length())
863        );
864    }
865}