wasmer_vm/
vmcontext.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/main/docs/ATTRIBUTIONS.md
3
4//! This file declares `VMContext` and several related structs which contain
5//! fields that compiled wasm code accesses directly.
6
7use crate::global::VMGlobal;
8use crate::instance::Instance;
9use crate::memory::VMMemory;
10use crate::store::InternalStoreHandle;
11use crate::trap::{Trap, TrapCode};
12use crate::VMFunctionBody;
13use crate::VMTable;
14use crate::{VMBuiltinFunctionIndex, VMFunction};
15use std::convert::TryFrom;
16use std::ptr::{self, NonNull};
17use std::sync::atomic::{AtomicPtr, Ordering};
18use wasmer_types::RawValue;
19
20/// Union representing the first parameter passed when calling a function.
21///
22/// It may either be a pointer to the [`VMContext`] if it's a Wasm function
23/// or a pointer to arbitrary data controlled by the host if it's a host function.
24#[derive(Copy, Clone, Eq)]
25#[repr(C)]
26pub union VMFunctionContext {
27    /// Wasm functions take a pointer to [`VMContext`].
28    pub vmctx: *mut VMContext,
29    /// Host functions can have custom environments.
30    pub host_env: *mut std::ffi::c_void,
31}
32
33impl VMFunctionContext {
34    /// Check whether the pointer stored is null or not.
35    pub fn is_null(&self) -> bool {
36        unsafe { self.host_env.is_null() }
37    }
38}
39
40impl std::fmt::Debug for VMFunctionContext {
41    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
42        f.debug_struct("VMFunctionContext")
43            .field("vmctx_or_hostenv", unsafe { &self.host_env })
44            .finish()
45    }
46}
47
48impl std::cmp::PartialEq for VMFunctionContext {
49    fn eq(&self, rhs: &Self) -> bool {
50        unsafe { self.host_env as usize == rhs.host_env as usize }
51    }
52}
53
54impl std::hash::Hash for VMFunctionContext {
55    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
56        unsafe {
57            self.vmctx.hash(state);
58        }
59    }
60}
61
62/// An imported function.
63#[derive(Debug, Copy, Clone)]
64#[repr(C)]
65pub struct VMFunctionImport {
66    /// A pointer to the imported function body.
67    pub body: *const VMFunctionBody,
68
69    /// A pointer to the `VMContext` that owns the function or host env data.
70    pub environment: VMFunctionContext,
71
72    /// Handle to the `VMFunction` in the context.
73    pub handle: InternalStoreHandle<VMFunction>,
74}
75
76#[cfg(test)]
77mod test_vmfunction_import {
78    use super::VMFunctionImport;
79    use memoffset::offset_of;
80    use std::mem::size_of;
81    use wasmer_types::ModuleInfo;
82    use wasmer_types::VMOffsets;
83
84    #[test]
85    fn check_vmfunction_import_offsets() {
86        let module = ModuleInfo::new();
87        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
88        assert_eq!(
89            size_of::<VMFunctionImport>(),
90            usize::from(offsets.size_of_vmfunction_import())
91        );
92        assert_eq!(
93            offset_of!(VMFunctionImport, body),
94            usize::from(offsets.vmfunction_import_body())
95        );
96        assert_eq!(
97            offset_of!(VMFunctionImport, environment),
98            usize::from(offsets.vmfunction_import_vmctx())
99        );
100    }
101}
102
103/// The `VMDynamicFunctionContext` is the context that dynamic
104/// functions will receive when called (rather than `vmctx`).
105/// A dynamic function is a function for which we don't know the signature
106/// until runtime.
107///
108/// As such, we need to expose the dynamic function `context`
109/// containing the relevant context for running the function indicated
110/// in `address`.
111#[repr(C)]
112pub struct VMDynamicFunctionContext<T> {
113    /// The address of the inner dynamic function.
114    ///
115    /// Note: The function must be on the form of
116    /// `(*mut T, SignatureIndex, *mut i128)`.
117    pub address: *const VMFunctionBody,
118
119    /// The context that the inner dynamic function will receive.
120    pub ctx: T,
121}
122
123// The `ctx` itself must be `Send`, `address` can be passed between
124// threads because all usage is `unsafe` and synchronized.
125unsafe impl<T: Sized + Send + Sync> Send for VMDynamicFunctionContext<T> {}
126// The `ctx` itself must be `Sync`, `address` can be shared between
127// threads because all usage is `unsafe` and synchronized.
128unsafe impl<T: Sized + Send + Sync> Sync for VMDynamicFunctionContext<T> {}
129
130impl<T: Sized + Clone + Send + Sync> Clone for VMDynamicFunctionContext<T> {
131    fn clone(&self) -> Self {
132        Self {
133            address: self.address,
134            ctx: self.ctx.clone(),
135        }
136    }
137}
138
139#[cfg(test)]
140mod test_vmdynamicfunction_import_context {
141    use super::VMDynamicFunctionContext;
142    use crate::VMOffsets;
143    use memoffset::offset_of;
144    use std::mem::size_of;
145    use wasmer_types::ModuleInfo;
146
147    #[test]
148    fn check_vmdynamicfunction_import_context_offsets() {
149        let module = ModuleInfo::new();
150        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
151        assert_eq!(
152            size_of::<VMDynamicFunctionContext<usize>>(),
153            usize::from(offsets.size_of_vmdynamicfunction_import_context())
154        );
155        assert_eq!(
156            offset_of!(VMDynamicFunctionContext<usize>, address),
157            usize::from(offsets.vmdynamicfunction_import_context_address())
158        );
159        assert_eq!(
160            offset_of!(VMDynamicFunctionContext<usize>, ctx),
161            usize::from(offsets.vmdynamicfunction_import_context_ctx())
162        );
163    }
164}
165
166/// A function kind is a calling convention into and out of wasm code.
167#[derive(Debug, Copy, Clone, Eq, PartialEq)]
168#[repr(C)]
169pub enum VMFunctionKind {
170    /// A static function has the native signature:
171    /// `extern "C" (vmctx, arg1, arg2...) -> (result1, result2, ...)`.
172    ///
173    /// This is the default for functions that are defined:
174    /// 1. In the Host, natively
175    /// 2. In the WebAssembly file
176    Static,
177
178    /// A dynamic function has the native signature:
179    /// `extern "C" (ctx, &[Value]) -> Vec<Value>`.
180    ///
181    /// This is the default for functions that are defined:
182    /// 1. In the Host, dynamically
183    Dynamic,
184}
185
186/// The fields compiled code needs to access to utilize a WebAssembly table
187/// imported from another instance.
188#[derive(Clone)]
189#[repr(C)]
190pub struct VMTableImport {
191    /// A pointer to the imported table description.
192    pub definition: NonNull<VMTableDefinition>,
193
194    /// Handle to the `VMTable` in the context.
195    pub handle: InternalStoreHandle<VMTable>,
196}
197
198#[cfg(test)]
199mod test_vmtable_import {
200    use super::VMTableImport;
201    use crate::VMOffsets;
202    use memoffset::offset_of;
203    use std::mem::size_of;
204    use wasmer_types::ModuleInfo;
205
206    #[test]
207    fn check_vmtable_import_offsets() {
208        let module = ModuleInfo::new();
209        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
210        assert_eq!(
211            size_of::<VMTableImport>(),
212            usize::from(offsets.size_of_vmtable_import())
213        );
214        assert_eq!(
215            offset_of!(VMTableImport, definition),
216            usize::from(offsets.vmtable_import_definition())
217        );
218    }
219}
220
221/// The fields compiled code needs to access to utilize a WebAssembly linear
222/// memory imported from another instance.
223#[derive(Clone)]
224#[repr(C)]
225pub struct VMMemoryImport {
226    /// A pointer to the imported memory description.
227    pub definition: NonNull<VMMemoryDefinition>,
228
229    /// A handle to the `Memory` that owns the memory description.
230    pub handle: InternalStoreHandle<VMMemory>,
231}
232
233#[cfg(test)]
234mod test_vmmemory_import {
235    use super::VMMemoryImport;
236    use crate::VMOffsets;
237    use memoffset::offset_of;
238    use std::mem::size_of;
239    use wasmer_types::ModuleInfo;
240
241    #[test]
242    fn check_vmmemory_import_offsets() {
243        let module = ModuleInfo::new();
244        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
245        assert_eq!(
246            size_of::<VMMemoryImport>(),
247            usize::from(offsets.size_of_vmmemory_import())
248        );
249        assert_eq!(
250            offset_of!(VMMemoryImport, definition),
251            usize::from(offsets.vmmemory_import_definition())
252        );
253        assert_eq!(
254            offset_of!(VMMemoryImport, handle),
255            usize::from(offsets.vmmemory_import_handle())
256        );
257    }
258}
259
260/// The fields compiled code needs to access to utilize a WebAssembly global
261/// variable imported from another instance.
262#[derive(Clone)]
263#[repr(C)]
264pub struct VMGlobalImport {
265    /// A pointer to the imported global variable description.
266    pub definition: NonNull<VMGlobalDefinition>,
267
268    /// A handle to the `Global` that owns the global description.
269    pub handle: InternalStoreHandle<VMGlobal>,
270}
271
272/// # Safety
273/// This data is safe to share between threads because it's plain data that
274/// is the user's responsibility to synchronize. Additionally, all operations
275/// on `from` are thread-safe through the use of a mutex in [`VMGlobal`].
276unsafe impl Send for VMGlobalImport {}
277/// # Safety
278/// This data is safe to share between threads because it's plain data that
279/// is the user's responsibility to synchronize. And because it's `Clone`, there's
280/// really no difference between passing it by reference or by value as far as
281/// correctness in a multi-threaded context is concerned.
282unsafe impl Sync for VMGlobalImport {}
283
284#[cfg(test)]
285mod test_vmglobal_import {
286    use super::VMGlobalImport;
287    use crate::VMOffsets;
288    use memoffset::offset_of;
289    use std::mem::size_of;
290    use wasmer_types::ModuleInfo;
291
292    #[test]
293    fn check_vmglobal_import_offsets() {
294        let module = ModuleInfo::new();
295        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
296        assert_eq!(
297            size_of::<VMGlobalImport>(),
298            usize::from(offsets.size_of_vmglobal_import())
299        );
300        assert_eq!(
301            offset_of!(VMGlobalImport, definition),
302            usize::from(offsets.vmglobal_import_definition())
303        );
304    }
305}
306
307/// Do an unsynchronized, non-atomic `memory.copy` for the memory.
308///
309/// # Errors
310///
311/// Returns a `Trap` error when the source or destination ranges are out of
312/// bounds.
313///
314/// # Safety
315/// The memory is not copied atomically and is not synchronized: it's the
316/// caller's responsibility to synchronize.
317pub(crate) unsafe fn memory_copy(
318    mem: &VMMemoryDefinition,
319    dst: u32,
320    src: u32,
321    len: u32,
322) -> Result<(), Trap> {
323    // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
324    if src
325        .checked_add(len)
326        .map_or(true, |n| usize::try_from(n).unwrap() > mem.current_length)
327        || dst
328            .checked_add(len)
329            .map_or(true, |m| usize::try_from(m).unwrap() > mem.current_length)
330    {
331        return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
332    }
333
334    let dst = usize::try_from(dst).unwrap();
335    let src = usize::try_from(src).unwrap();
336
337    // Bounds and casts are checked above, by this point we know that
338    // everything is safe.
339    let dst = mem.base.add(dst);
340    let src = mem.base.add(src);
341    ptr::copy(src, dst, len as usize);
342
343    Ok(())
344}
345
346/// Perform the `memory.fill` operation for the memory in an unsynchronized,
347/// non-atomic way.
348///
349/// # Errors
350///
351/// Returns a `Trap` error if the memory range is out of bounds.
352///
353/// # Safety
354/// The memory is not filled atomically and is not synchronized: it's the
355/// caller's responsibility to synchronize.
356pub(crate) unsafe fn memory_fill(
357    mem: &VMMemoryDefinition,
358    dst: u32,
359    val: u32,
360    len: u32,
361) -> Result<(), Trap> {
362    if dst
363        .checked_add(len)
364        .map_or(true, |m| usize::try_from(m).unwrap() > mem.current_length)
365    {
366        return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
367    }
368
369    let dst = isize::try_from(dst).unwrap();
370    let val = val as u8;
371
372    // Bounds and casts are checked above, by this point we know that
373    // everything is safe.
374    let dst = mem.base.offset(dst);
375    ptr::write_bytes(dst, val, len as usize);
376
377    Ok(())
378}
379
380/// Perform the `memory32.atomic.check32` operation for the memory. Return 0 if same, 1 if different
381///
382/// # Errors
383///
384/// Returns a `Trap` error if the memory range is out of bounds or 32bits unligned.
385///
386/// # Safety
387/// memory access is unsafe
388pub(crate) unsafe fn memory32_atomic_check32(
389    mem: &VMMemoryDefinition,
390    dst: u32,
391    val: u32,
392) -> Result<u32, Trap> {
393    if usize::try_from(dst).unwrap() > mem.current_length {
394        return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
395    }
396
397    let dst = isize::try_from(dst).unwrap();
398    if dst & 0b11 != 0 {
399        return Err(Trap::lib(TrapCode::UnalignedAtomic));
400    }
401
402    // Bounds and casts are checked above, by this point we know that
403    // everything is safe.
404    let dst = mem.base.offset(dst) as *mut u32;
405    let atomic_dst = AtomicPtr::new(dst);
406    let read_val = *atomic_dst.load(Ordering::Acquire);
407    let ret = if read_val == val { 0 } else { 1 };
408    Ok(ret)
409}
410
411/// Perform the `memory32.atomic.check64` operation for the memory. Return 0 if same, 1 if different
412///
413/// # Errors
414///
415/// Returns a `Trap` error if the memory range is out of bounds or 64bits unaligned.
416///
417/// # Safety
418/// memory access is unsafe
419pub(crate) unsafe fn memory32_atomic_check64(
420    mem: &VMMemoryDefinition,
421    dst: u32,
422    val: u64,
423) -> Result<u32, Trap> {
424    if usize::try_from(dst).unwrap() > mem.current_length {
425        return Err(Trap::lib(TrapCode::HeapAccessOutOfBounds));
426    }
427
428    let dst = isize::try_from(dst).unwrap();
429    if dst & 0b111 != 0 {
430        return Err(Trap::lib(TrapCode::UnalignedAtomic));
431    }
432
433    // Bounds and casts are checked above, by this point we know that
434    // everything is safe.
435    let dst = mem.base.offset(dst) as *mut u64;
436    let atomic_dst = AtomicPtr::new(dst);
437    let read_val = *atomic_dst.load(Ordering::Acquire);
438    let ret = if read_val == val { 0 } else { 1 };
439    Ok(ret)
440}
441
442/// The fields compiled code needs to access to utilize a WebAssembly table
443/// defined within the instance.
444#[derive(Debug, Clone, Copy)]
445#[repr(C)]
446pub struct VMTableDefinition {
447    /// Pointer to the table data.
448    pub base: *mut u8,
449
450    /// The current number of elements in the table.
451    pub current_elements: u32,
452}
453
454#[cfg(test)]
455mod test_vmtable_definition {
456    use super::VMTableDefinition;
457    use crate::VMOffsets;
458    use memoffset::offset_of;
459    use std::mem::size_of;
460    use wasmer_types::ModuleInfo;
461
462    #[test]
463    fn check_vmtable_definition_offsets() {
464        let module = ModuleInfo::new();
465        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
466        assert_eq!(
467            size_of::<VMTableDefinition>(),
468            usize::from(offsets.size_of_vmtable_definition())
469        );
470        assert_eq!(
471            offset_of!(VMTableDefinition, base),
472            usize::from(offsets.vmtable_definition_base())
473        );
474        assert_eq!(
475            offset_of!(VMTableDefinition, current_elements),
476            usize::from(offsets.vmtable_definition_current_elements())
477        );
478    }
479}
480
481/// The storage for a WebAssembly global defined within the instance.
482///
483/// TODO: Pack the globals more densely, rather than using the same size
484/// for every type.
485#[derive(Debug, Clone)]
486#[repr(C, align(16))]
487pub struct VMGlobalDefinition {
488    /// Raw value of the global.
489    pub val: RawValue,
490}
491
492#[cfg(test)]
493mod test_vmglobal_definition {
494    use super::VMGlobalDefinition;
495    use crate::{VMFuncRef, VMOffsets};
496    use more_asserts::assert_ge;
497    use std::mem::{align_of, size_of};
498    use wasmer_types::ModuleInfo;
499
500    #[test]
501    fn check_vmglobal_definition_alignment() {
502        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<i32>());
503        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<i64>());
504        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<f32>());
505        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<f64>());
506        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<VMFuncRef>());
507        assert_ge!(align_of::<VMGlobalDefinition>(), align_of::<[u8; 16]>());
508    }
509
510    #[test]
511    fn check_vmglobal_definition_offsets() {
512        let module = ModuleInfo::new();
513        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
514        assert_eq!(
515            size_of::<*const VMGlobalDefinition>(),
516            usize::from(offsets.size_of_vmglobal_local())
517        );
518    }
519
520    #[test]
521    fn check_vmglobal_begins_aligned() {
522        let module = ModuleInfo::new();
523        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
524        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
525    }
526}
527
528impl VMGlobalDefinition {
529    /// Construct a `VMGlobalDefinition`.
530    pub fn new() -> Self {
531        Self {
532            val: Default::default(),
533        }
534    }
535}
536
537/// An index into the shared signature registry, usable for checking signatures
538/// at indirect calls.
539#[repr(C)]
540#[cfg_attr(feature = "artifact-size", derive(loupe::MemoryUsage))]
541#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)]
542pub struct VMSharedSignatureIndex(u32);
543
544#[cfg(test)]
545mod test_vmshared_signature_index {
546    use super::VMSharedSignatureIndex;
547    use std::mem::size_of;
548    use wasmer_types::{ModuleInfo, TargetSharedSignatureIndex, VMOffsets};
549
550    #[test]
551    fn check_vmshared_signature_index() {
552        let module = ModuleInfo::new();
553        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
554        assert_eq!(
555            size_of::<VMSharedSignatureIndex>(),
556            usize::from(offsets.size_of_vmshared_signature_index())
557        );
558    }
559
560    #[test]
561    fn check_target_shared_signature_index() {
562        assert_eq!(
563            size_of::<VMSharedSignatureIndex>(),
564            size_of::<TargetSharedSignatureIndex>()
565        );
566    }
567}
568
569impl VMSharedSignatureIndex {
570    /// Create a new `VMSharedSignatureIndex`.
571    pub fn new(value: u32) -> Self {
572        Self(value)
573    }
574}
575
576impl Default for VMSharedSignatureIndex {
577    fn default() -> Self {
578        Self::new(u32::MAX)
579    }
580}
581
582/// The VM caller-checked "anyfunc" record, for caller-side signature checking.
583/// It consists of the actual function pointer and a signature id to be checked
584/// by the caller.
585#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
586#[repr(C)]
587pub struct VMCallerCheckedAnyfunc {
588    /// Function body.
589    pub func_ptr: *const VMFunctionBody,
590    /// Function signature id.
591    pub type_index: VMSharedSignatureIndex,
592    /// Function `VMContext` or host env.
593    pub vmctx: VMFunctionContext,
594    /// Address of the function call trampoline to invoke this function using
595    /// a dynamic argument list.
596    pub call_trampoline: VMTrampoline,
597    // If more elements are added here, remember to add offset_of tests below!
598}
599
600#[cfg(test)]
601mod test_vmcaller_checked_anyfunc {
602    use super::VMCallerCheckedAnyfunc;
603    use crate::VMOffsets;
604    use memoffset::offset_of;
605    use std::mem::size_of;
606    use wasmer_types::ModuleInfo;
607
608    #[test]
609    fn check_vmcaller_checked_anyfunc_offsets() {
610        let module = ModuleInfo::new();
611        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
612        assert_eq!(
613            size_of::<VMCallerCheckedAnyfunc>(),
614            usize::from(offsets.size_of_vmcaller_checked_anyfunc())
615        );
616        assert_eq!(
617            offset_of!(VMCallerCheckedAnyfunc, func_ptr),
618            usize::from(offsets.vmcaller_checked_anyfunc_func_ptr())
619        );
620        assert_eq!(
621            offset_of!(VMCallerCheckedAnyfunc, type_index),
622            usize::from(offsets.vmcaller_checked_anyfunc_type_index())
623        );
624        assert_eq!(
625            offset_of!(VMCallerCheckedAnyfunc, vmctx),
626            usize::from(offsets.vmcaller_checked_anyfunc_vmctx())
627        );
628    }
629}
630
631/// An array that stores addresses of builtin functions. We translate code
632/// to use indirect calls. This way, we don't have to patch the code.
633#[repr(C)]
634pub struct VMBuiltinFunctionsArray {
635    ptrs: [usize; Self::len()],
636}
637
638impl VMBuiltinFunctionsArray {
639    pub const fn len() -> usize {
640        VMBuiltinFunctionIndex::builtin_functions_total_number() as usize
641    }
642
643    pub fn initialized() -> Self {
644        use crate::libcalls::*;
645
646        let mut ptrs = [0; Self::len()];
647
648        ptrs[VMBuiltinFunctionIndex::get_memory32_grow_index().index() as usize] =
649            wasmer_vm_memory32_grow as usize;
650        ptrs[VMBuiltinFunctionIndex::get_imported_memory32_grow_index().index() as usize] =
651            wasmer_vm_imported_memory32_grow as usize;
652
653        ptrs[VMBuiltinFunctionIndex::get_memory32_size_index().index() as usize] =
654            wasmer_vm_memory32_size as usize;
655        ptrs[VMBuiltinFunctionIndex::get_imported_memory32_size_index().index() as usize] =
656            wasmer_vm_imported_memory32_size as usize;
657
658        ptrs[VMBuiltinFunctionIndex::get_table_copy_index().index() as usize] =
659            wasmer_vm_table_copy as usize;
660
661        ptrs[VMBuiltinFunctionIndex::get_table_init_index().index() as usize] =
662            wasmer_vm_table_init as usize;
663        ptrs[VMBuiltinFunctionIndex::get_elem_drop_index().index() as usize] =
664            wasmer_vm_elem_drop as usize;
665
666        ptrs[VMBuiltinFunctionIndex::get_memory_copy_index().index() as usize] =
667            wasmer_vm_memory32_copy as usize;
668        ptrs[VMBuiltinFunctionIndex::get_imported_memory_copy_index().index() as usize] =
669            wasmer_vm_imported_memory32_copy as usize;
670        ptrs[VMBuiltinFunctionIndex::get_memory_fill_index().index() as usize] =
671            wasmer_vm_memory32_fill as usize;
672        ptrs[VMBuiltinFunctionIndex::get_imported_memory_fill_index().index() as usize] =
673            wasmer_vm_imported_memory32_fill as usize;
674        ptrs[VMBuiltinFunctionIndex::get_memory_init_index().index() as usize] =
675            wasmer_vm_memory32_init as usize;
676        ptrs[VMBuiltinFunctionIndex::get_data_drop_index().index() as usize] =
677            wasmer_vm_data_drop as usize;
678        ptrs[VMBuiltinFunctionIndex::get_raise_trap_index().index() as usize] =
679            wasmer_vm_raise_trap as usize;
680        ptrs[VMBuiltinFunctionIndex::get_table_size_index().index() as usize] =
681            wasmer_vm_table_size as usize;
682        ptrs[VMBuiltinFunctionIndex::get_imported_table_size_index().index() as usize] =
683            wasmer_vm_imported_table_size as usize;
684        ptrs[VMBuiltinFunctionIndex::get_table_grow_index().index() as usize] =
685            wasmer_vm_table_grow as usize;
686        ptrs[VMBuiltinFunctionIndex::get_imported_table_grow_index().index() as usize] =
687            wasmer_vm_imported_table_grow as usize;
688        ptrs[VMBuiltinFunctionIndex::get_table_get_index().index() as usize] =
689            wasmer_vm_table_get as usize;
690        ptrs[VMBuiltinFunctionIndex::get_imported_table_get_index().index() as usize] =
691            wasmer_vm_imported_table_get as usize;
692        ptrs[VMBuiltinFunctionIndex::get_table_set_index().index() as usize] =
693            wasmer_vm_table_set as usize;
694        ptrs[VMBuiltinFunctionIndex::get_imported_table_set_index().index() as usize] =
695            wasmer_vm_imported_table_set as usize;
696        ptrs[VMBuiltinFunctionIndex::get_func_ref_index().index() as usize] =
697            wasmer_vm_func_ref as usize;
698        ptrs[VMBuiltinFunctionIndex::get_table_fill_index().index() as usize] =
699            wasmer_vm_table_fill as usize;
700
701        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_wait32_index().index() as usize] =
702            wasmer_vm_memory32_atomic_wait32 as usize;
703        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_wait32_index().index() as usize] =
704            wasmer_vm_imported_memory32_atomic_wait32 as usize;
705        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_wait64_index().index() as usize] =
706            wasmer_vm_memory32_atomic_wait64 as usize;
707        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_wait64_index().index() as usize] =
708            wasmer_vm_imported_memory32_atomic_wait64 as usize;
709        ptrs[VMBuiltinFunctionIndex::get_memory_atomic_notify_index().index() as usize] =
710            wasmer_vm_memory32_atomic_notify as usize;
711        ptrs[VMBuiltinFunctionIndex::get_imported_memory_atomic_notify_index().index() as usize] =
712            wasmer_vm_imported_memory32_atomic_notify as usize;
713
714        debug_assert!(ptrs.iter().cloned().all(|p| p != 0));
715
716        Self { ptrs }
717    }
718}
719
720/// The VM "context", which is pointed to by the `vmctx` arg in the compiler.
721/// This has information about globals, memories, tables, and other runtime
722/// state associated with the current instance.
723///
724/// The struct here is empty, as the sizes of these fields are dynamic, and
725/// we can't describe them in Rust's type system. Sufficient memory is
726/// allocated at runtime.
727///
728/// TODO: We could move the globals into the `vmctx` allocation too.
729#[derive(Debug)]
730#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
731pub struct VMContext {}
732
733impl VMContext {
734    /// Return a mutable reference to the associated `Instance`.
735    ///
736    /// # Safety
737    /// This is unsafe because it doesn't work on just any `VMContext`, it must
738    /// be a `VMContext` allocated as part of an `Instance`.
739    #[allow(clippy::cast_ptr_alignment)]
740    #[inline]
741    pub(crate) unsafe fn instance(&self) -> &Instance {
742        &*((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *const Instance)
743    }
744
745    #[inline]
746    pub(crate) unsafe fn instance_mut(&mut self) -> &mut Instance {
747        &mut *((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *mut Instance)
748    }
749}
750
751/// The type for tramplines in the VM.
752pub type VMTrampoline = unsafe extern "C" fn(
753    *mut VMContext,        // callee vmctx
754    *const VMFunctionBody, // function we're actually calling
755    *mut RawValue,         // space for arguments and return values
756);
757
758/// The fields compiled code needs to access to utilize a WebAssembly linear
759/// memory defined within the instance, namely the start address and the
760/// size in bytes.
761#[derive(Debug, Copy, Clone)]
762#[repr(C)]
763pub struct VMMemoryDefinition {
764    /// The start address which is always valid, even if the memory grows.
765    pub base: *mut u8,
766
767    /// The current logical size of this linear memory in bytes.
768    pub current_length: usize,
769}
770
771/// # Safety
772/// This data is safe to share between threads because it's plain data that
773/// is the user's responsibility to synchronize.
774unsafe impl Send for VMMemoryDefinition {}
775/// # Safety
776/// This data is safe to share between threads because it's plain data that
777/// is the user's responsibility to synchronize. And it's `Copy` so there's
778/// really no difference between passing it by reference or by value as far as
779/// correctness in a multi-threaded context is concerned.
780unsafe impl Sync for VMMemoryDefinition {}
781
782#[cfg(test)]
783mod test_vmmemory_definition {
784    use super::VMMemoryDefinition;
785    use crate::VMOffsets;
786    use memoffset::offset_of;
787    use std::mem::size_of;
788    use wasmer_types::ModuleInfo;
789
790    #[test]
791    fn check_vmmemory_definition_offsets() {
792        let module = ModuleInfo::new();
793        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
794        assert_eq!(
795            size_of::<VMMemoryDefinition>(),
796            usize::from(offsets.size_of_vmmemory_definition())
797        );
798        assert_eq!(
799            offset_of!(VMMemoryDefinition, base),
800            usize::from(offsets.vmmemory_definition_base())
801        );
802        assert_eq!(
803            offset_of!(VMMemoryDefinition, current_length),
804            usize::from(offsets.vmmemory_definition_current_length())
805        );
806    }
807}