wasmtime_runtime/
instance.rs

1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::export::Export;
6use crate::memory::{Memory, RuntimeMemoryCreator};
7use crate::table::{Table, TableElement, TableElementType};
8use crate::vmcontext::{
9    VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition,
10    VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMRuntimeLimits,
11    VMTableDefinition, VMTableImport,
12};
13use crate::{
14    ExportFunction, ExportGlobal, ExportMemory, ExportTable, GcStore, Imports, ModuleRuntimeInfo,
15    SendSyncPtr, Store, VMFunctionBody, VMGcRef, VMSharedTypeIndex, WasmFault, I31,
16};
17use anyhow::Error;
18use anyhow::Result;
19use sptr::Strict;
20use std::alloc::{self, Layout};
21use std::any::Any;
22use std::ops::Range;
23use std::ptr::NonNull;
24use std::sync::atomic::AtomicU64;
25use std::sync::Arc;
26use std::{mem, ptr};
27use wasmtime_environ::ModuleInternedTypeIndex;
28use wasmtime_environ::{
29    packed_option::ReservedValue, DataIndex, DefinedGlobalIndex, DefinedMemoryIndex,
30    DefinedTableIndex, ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex, GlobalIndex,
31    GlobalInit, HostPtr, MemoryIndex, MemoryPlan, Module, PrimaryMap, TableElementExpression,
32    TableIndex, TableInitialValue, TableSegmentElements, Trap, VMOffsets, WasmRefType, WasmValType,
33    VMCONTEXT_MAGIC,
34};
35#[cfg(feature = "wmemcheck")]
36use wasmtime_wmemcheck::Wmemcheck;
37
38mod allocator;
39
40pub use allocator::*;
41
42/// A type that roughly corresponds to a WebAssembly instance, but is also used
43/// for host-defined objects.
44///
45/// This structure is is never allocated directly but is instead managed through
46/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
47/// dynamic size corresponding to the `module` configured within. Memory
48/// management of this structure is always externalized.
49///
50/// Instances here can correspond to actual instantiated modules, but it's also
51/// used ubiquitously for host-defined objects. For example creating a
52/// host-defined memory will have a `module` that looks like it exports a single
53/// memory (and similar for other constructs).
54///
55/// This `Instance` type is used as a ubiquitous representation for WebAssembly
56/// values, whether or not they were created on the host or through a module.
57#[repr(C)] // ensure that the vmctx field is last.
58pub struct Instance {
59    /// The runtime info (corresponding to the "compiled module"
60    /// abstraction in higher layers) that is retained and needed for
61    /// lazy initialization. This provides access to the underlying
62    /// Wasm module entities, the compiled JIT code, metadata about
63    /// functions, lazy initialization state, etc.
64    runtime_info: Arc<dyn ModuleRuntimeInfo>,
65
66    /// WebAssembly linear memory data.
67    ///
68    /// This is where all runtime information about defined linear memories in
69    /// this module lives.
70    ///
71    /// The `MemoryAllocationIndex` was given from our `InstanceAllocator` and
72    /// must be given back to the instance allocator when deallocating each
73    /// memory.
74    memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
75
76    /// WebAssembly table data.
77    ///
78    /// Like memories, this is only for defined tables in the module and
79    /// contains all of their runtime state.
80    ///
81    /// The `TableAllocationIndex` was given from our `InstanceAllocator` and
82    /// must be given back to the instance allocator when deallocating each
83    /// table.
84    tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
85
86    /// Stores the dropped passive element segments in this instantiation by index.
87    /// If the index is present in the set, the segment has been dropped.
88    dropped_elements: EntitySet<ElemIndex>,
89
90    /// Stores the dropped passive data segments in this instantiation by index.
91    /// If the index is present in the set, the segment has been dropped.
92    dropped_data: EntitySet<DataIndex>,
93
94    /// Hosts can store arbitrary per-instance information here.
95    ///
96    /// Most of the time from Wasmtime this is `Box::new(())`, a noop
97    /// allocation, but some host-defined objects will store their state here.
98    host_state: Box<dyn Any + Send + Sync>,
99
100    /// A pointer to the `vmctx` field at the end of the `Instance`.
101    ///
102    /// If you're looking at this a reasonable question would be "why do we need
103    /// a pointer to ourselves?" because after all the pointer's value is
104    /// trivially derivable from any `&Instance` pointer. The rationale for this
105    /// field's existence is subtle, but it's required for correctness. The
106    /// short version is "this makes miri happy".
107    ///
108    /// The long version of why this field exists is that the rules that MIRI
109    /// uses to ensure pointers are used correctly have various conditions on
110    /// them depend on how pointers are used. More specifically if `*mut T` is
111    /// derived from `&mut T`, then that invalidates all prior pointers drived
112    /// from the `&mut T`. This means that while we liberally want to re-acquire
113    /// a `*mut VMContext` throughout the implementation of `Instance` the
114    /// trivial way, a function `fn vmctx(&mut Instance) -> *mut VMContext`
115    /// would effectively invalidate all prior `*mut VMContext` pointers
116    /// acquired. The purpose of this field is to serve as a sort of
117    /// source-of-truth for where `*mut VMContext` pointers come from.
118    ///
119    /// This field is initialized when the `Instance` is created with the
120    /// original allocation's pointer. That means that the provenance of this
121    /// pointer contains the entire allocation (both instance and `VMContext`).
122    /// This provenance bit is then "carried through" where `fn vmctx` will base
123    /// all returned pointers on this pointer itself. This provides the means of
124    /// never invalidating this pointer throughout MIRI and additionally being
125    /// able to still temporarily have `&mut Instance` methods and such.
126    ///
127    /// It's important to note, though, that this is not here purely for MIRI.
128    /// The careful construction of the `fn vmctx` method has ramifications on
129    /// the LLVM IR generated, for example. A historical CVE on Wasmtime,
130    /// GHSA-ch89-5g45-qwc7, was caused due to relying on undefined behavior. By
131    /// deriving VMContext pointers from this pointer it specifically hints to
132    /// LLVM that trickery is afoot and it properly informs `noalias` and such
133    /// annotations and analysis. More-or-less this pointer is actually loaded
134    /// in LLVM IR which helps defeat otherwise present aliasing optimizations,
135    /// which we want, since writes to this should basically never be optimized
136    /// out.
137    ///
138    /// As a final note it's worth pointing out that the machine code generated
139    /// for accessing `fn vmctx` is still as one would expect. This member isn't
140    /// actually ever loaded at runtime (or at least shouldn't be). Perhaps in
141    /// the future if the memory consumption of this field is a problem we could
142    /// shrink it slightly, but for now one extra pointer per wasm instance
143    /// seems not too bad.
144    vmctx_self_reference: SendSyncPtr<VMContext>,
145
146    // TODO: add support for multiple memories; `wmemcheck_state` corresponds to
147    // memory 0.
148    #[cfg(feature = "wmemcheck")]
149    pub(crate) wmemcheck_state: Option<Wmemcheck>,
150
151    /// Additional context used by compiled wasm code. This field is last, and
152    /// represents a dynamically-sized array that extends beyond the nominal
153    /// end of the struct (similar to a flexible array member).
154    vmctx: VMContext,
155}
156
157impl Instance {
158    /// Create an instance at the given memory address.
159    ///
160    /// It is assumed the memory was properly aligned and the
161    /// allocation was `alloc_size` in bytes.
162    unsafe fn new(
163        req: InstanceAllocationRequest,
164        memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
165        tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
166        memory_plans: &PrimaryMap<MemoryIndex, MemoryPlan>,
167    ) -> InstanceHandle {
168        // The allocation must be *at least* the size required of `Instance`.
169        let layout = Self::alloc_layout(req.runtime_info.offsets());
170        let ptr = alloc::alloc(layout);
171        if ptr.is_null() {
172            alloc::handle_alloc_error(layout);
173        }
174        let ptr = ptr.cast::<Instance>();
175
176        let module = req.runtime_info.module();
177        let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
178        let dropped_data = EntitySet::with_capacity(module.passive_data_map.len());
179
180        #[cfg(not(feature = "wmemcheck"))]
181        let _ = memory_plans;
182
183        ptr::write(
184            ptr,
185            Instance {
186                runtime_info: req.runtime_info.clone(),
187                memories,
188                tables,
189                dropped_elements,
190                dropped_data,
191                host_state: req.host_state,
192                vmctx_self_reference: SendSyncPtr::new(NonNull::new(ptr.add(1).cast()).unwrap()),
193                vmctx: VMContext {
194                    _marker: std::marker::PhantomPinned,
195                },
196                #[cfg(feature = "wmemcheck")]
197                wmemcheck_state: {
198                    if req.wmemcheck {
199                        let size = memory_plans
200                            .iter()
201                            .next()
202                            .map(|plan| plan.1.memory.minimum)
203                            .unwrap_or(0)
204                            * 64
205                            * 1024;
206                        Some(Wmemcheck::new(size as usize))
207                    } else {
208                        None
209                    }
210                },
211            },
212        );
213
214        (*ptr).initialize_vmctx(module, req.runtime_info.offsets(), req.store, req.imports);
215        InstanceHandle {
216            instance: Some(SendSyncPtr::new(NonNull::new(ptr).unwrap())),
217        }
218    }
219
220    /// Converts the provided `*mut VMContext` to an `Instance` pointer and runs
221    /// the provided closure with the instance.
222    ///
223    /// This method will move the `vmctx` pointer backwards to point to the
224    /// original `Instance` that precedes it. The closure is provided a
225    /// temporary version of the `Instance` pointer with a constrained lifetime
226    /// to the closure to ensure it doesn't accidentally escape.
227    ///
228    /// # Unsafety
229    ///
230    /// Callers must validate that the `vmctx` pointer is a valid allocation
231    /// and that it's valid to acquire `&mut Instance` at this time. For example
232    /// this can't be called twice on the same `VMContext` to get two active
233    /// pointers to the same `Instance`.
234    #[inline]
235    pub unsafe fn from_vmctx<R>(vmctx: *mut VMContext, f: impl FnOnce(&mut Instance) -> R) -> R {
236        assert!(!vmctx.is_null());
237        let ptr = vmctx
238            .byte_sub(mem::size_of::<Instance>())
239            .cast::<Instance>();
240        f(&mut *ptr)
241    }
242
243    /// Helper function to access various locations offset from our `*mut
244    /// VMContext` object.
245    ///
246    /// # Safety
247    ///
248    /// This method is unsafe because the `offset` must be within bounds of the
249    /// `VMContext` object trailing this instance.
250    unsafe fn vmctx_plus_offset<T>(&self, offset: u32) -> *const T {
251        self.vmctx()
252            .byte_add(usize::try_from(offset).unwrap())
253            .cast()
254    }
255
256    /// Dual of `vmctx_plus_offset`, but for mutability.
257    unsafe fn vmctx_plus_offset_mut<T>(&mut self, offset: u32) -> *mut T {
258        self.vmctx()
259            .byte_add(usize::try_from(offset).unwrap())
260            .cast()
261    }
262
263    pub(crate) fn module(&self) -> &Arc<Module> {
264        self.runtime_info.module()
265    }
266
267    /// Translate a module-level interned type index into an engine-level
268    /// interned type index.
269    pub fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
270        self.runtime_info.engine_type_index(module_index)
271    }
272
273    #[inline]
274    fn offsets(&self) -> &VMOffsets<HostPtr> {
275        self.runtime_info.offsets()
276    }
277
278    /// Return the indexed `VMFunctionImport`.
279    fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
280        unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
281    }
282
283    /// Return the index `VMTableImport`.
284    fn imported_table(&self, index: TableIndex) -> &VMTableImport {
285        unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
286    }
287
288    /// Return the indexed `VMMemoryImport`.
289    fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
290        unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
291    }
292
293    /// Return the indexed `VMGlobalImport`.
294    fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
295        unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
296    }
297
298    /// Return the indexed `VMTableDefinition`.
299    #[allow(dead_code)]
300    fn table(&mut self, index: DefinedTableIndex) -> VMTableDefinition {
301        unsafe { *self.table_ptr(index) }
302    }
303
304    /// Updates the value for a defined table to `VMTableDefinition`.
305    fn set_table(&mut self, index: DefinedTableIndex, table: VMTableDefinition) {
306        unsafe {
307            *self.table_ptr(index) = table;
308        }
309    }
310
311    /// Return the indexed `VMTableDefinition`.
312    fn table_ptr(&mut self, index: DefinedTableIndex) -> *mut VMTableDefinition {
313        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_vmtable_definition(index)) }
314    }
315
316    /// Get a locally defined or imported memory.
317    pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
318        if let Some(defined_index) = self.module().defined_memory_index(index) {
319            self.memory(defined_index)
320        } else {
321            let import = self.imported_memory(index);
322            unsafe { VMMemoryDefinition::load(import.from) }
323        }
324    }
325
326    /// Get a locally defined or imported memory.
327    #[cfg(feature = "threads")]
328    pub(crate) fn get_runtime_memory(&mut self, index: MemoryIndex) -> &mut Memory {
329        if let Some(defined_index) = self.module().defined_memory_index(index) {
330            unsafe { &mut *self.get_defined_memory(defined_index) }
331        } else {
332            let import = self.imported_memory(index);
333            unsafe {
334                let ptr =
335                    Instance::from_vmctx(import.vmctx, |i| i.get_defined_memory(import.index));
336                &mut *ptr
337            }
338        }
339    }
340
341    /// Return the indexed `VMMemoryDefinition`.
342    fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
343        unsafe { VMMemoryDefinition::load(self.memory_ptr(index)) }
344    }
345
346    /// Set the indexed memory to `VMMemoryDefinition`.
347    fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
348        unsafe {
349            *self.memory_ptr(index) = mem;
350        }
351    }
352
353    /// Return the indexed `VMMemoryDefinition`.
354    fn memory_ptr(&self, index: DefinedMemoryIndex) -> *mut VMMemoryDefinition {
355        unsafe { *self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_pointer(index)) }
356    }
357
358    /// Return the memories defined within this instance (not imported).
359    pub fn defined_memories<'a>(
360        &'a self,
361    ) -> impl ExactSizeIterator<Item = (DefinedMemoryIndex, &'a Memory)> + 'a {
362        self.memories
363            .iter()
364            .map(|(index, (_alloc_index, memory))| (index, memory))
365    }
366
367    /// Return the indexed `VMGlobalDefinition`.
368    fn global(&mut self, index: DefinedGlobalIndex) -> &VMGlobalDefinition {
369        unsafe { &*self.global_ptr(index) }
370    }
371
372    /// Return the indexed `VMGlobalDefinition`.
373    fn global_ptr(&mut self, index: DefinedGlobalIndex) -> *mut VMGlobalDefinition {
374        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_vmglobal_definition(index)) }
375    }
376
377    /// Get a raw pointer to the global at the given index regardless whether it
378    /// is defined locally or imported from another module.
379    ///
380    /// Panics if the index is out of bound or is the reserved value.
381    pub(crate) fn defined_or_imported_global_ptr(
382        &mut self,
383        index: GlobalIndex,
384    ) -> *mut VMGlobalDefinition {
385        if let Some(index) = self.module().defined_global_index(index) {
386            self.global_ptr(index)
387        } else {
388            self.imported_global(index).from
389        }
390    }
391
392    /// Get all globals within this instance.
393    ///
394    /// Returns both import and defined globals.
395    ///
396    /// Returns both exported and non-exported globals.
397    ///
398    /// Gives access to the full globals space.
399    pub fn all_globals<'a>(
400        &'a mut self,
401    ) -> impl ExactSizeIterator<Item = (GlobalIndex, ExportGlobal)> + 'a {
402        let module = self.module().clone();
403        module.globals.keys().map(move |idx| {
404            (
405                idx,
406                ExportGlobal {
407                    definition: self.defined_or_imported_global_ptr(idx),
408                    vmctx: self.vmctx(),
409                    global: self.module().globals[idx],
410                },
411            )
412        })
413    }
414
415    /// Get the globals defined in this instance (not imported).
416    pub fn defined_globals<'a>(
417        &'a mut self,
418    ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, ExportGlobal)> + 'a {
419        let module = self.module().clone();
420        module
421            .globals
422            .keys()
423            .skip(module.num_imported_globals)
424            .map(move |global_idx| {
425                let def_idx = module.defined_global_index(global_idx).unwrap();
426                let global = ExportGlobal {
427                    definition: self.global_ptr(def_idx),
428                    vmctx: self.vmctx(),
429                    global: self.module().globals[global_idx],
430                };
431                (def_idx, global)
432            })
433    }
434
435    /// Return a pointer to the interrupts structure
436    #[inline]
437    pub fn runtime_limits(&mut self) -> *mut *const VMRuntimeLimits {
438        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_runtime_limits()) }
439    }
440
441    /// Return a pointer to the global epoch counter used by this instance.
442    pub fn epoch_ptr(&mut self) -> *mut *const AtomicU64 {
443        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_epoch_ptr()) }
444    }
445
446    /// Return a pointer to the GC heap base pointer.
447    pub fn gc_heap_base(&mut self) -> *mut *mut u8 {
448        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_gc_heap_base()) }
449    }
450
451    /// Return a pointer to the GC heap bound.
452    pub fn gc_heap_bound(&mut self) -> *mut usize {
453        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_gc_heap_bound()) }
454    }
455
456    /// Return a pointer to the collector-specific heap data.
457    pub fn gc_heap_data(&mut self) -> *mut *mut u8 {
458        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_gc_heap_data()) }
459    }
460
461    /// Gets a pointer to this instance's `Store` which was originally
462    /// configured on creation.
463    ///
464    /// # Panics
465    ///
466    /// This will panic if the originally configured store was `None`. That can
467    /// happen for host functions so host functions can't be queried what their
468    /// original `Store` was since it's just retained as null (since host
469    /// functions are shared amongst threads and don't all share the same
470    /// store).
471    #[inline]
472    pub fn store(&self) -> *mut dyn Store {
473        let ptr =
474            unsafe { *self.vmctx_plus_offset::<*mut dyn Store>(self.offsets().vmctx_store()) };
475        assert!(!ptr.is_null());
476        ptr
477    }
478
479    pub(crate) unsafe fn set_store(&mut self, store: Option<*mut dyn Store>) {
480        if let Some(store) = store {
481            *self.vmctx_plus_offset_mut(self.offsets().vmctx_store()) = store;
482            *self.runtime_limits() = (*store).vmruntime_limits();
483            *self.epoch_ptr() = (*store).epoch_ptr();
484            self.set_gc_heap((*store).maybe_gc_store());
485        } else {
486            assert_eq!(
487                mem::size_of::<*mut dyn Store>(),
488                mem::size_of::<[*mut (); 2]>()
489            );
490            *self.vmctx_plus_offset_mut::<[*mut (); 2]>(self.offsets().vmctx_store()) =
491                [ptr::null_mut(), ptr::null_mut()];
492            *self.runtime_limits() = ptr::null_mut();
493            *self.epoch_ptr() = ptr::null_mut();
494            self.set_gc_heap(None);
495        }
496    }
497
498    unsafe fn set_gc_heap(&mut self, gc_store: Option<&mut GcStore>) {
499        if let Some(gc_store) = gc_store {
500            *self.gc_heap_base() = gc_store.gc_heap.vmctx_gc_heap_base();
501            *self.gc_heap_bound() = gc_store.gc_heap.vmctx_gc_heap_bound();
502            *self.gc_heap_data() = gc_store.gc_heap.vmctx_gc_heap_data();
503        } else {
504            *self.gc_heap_base() = ptr::null_mut();
505            *self.gc_heap_bound() = 0;
506            *self.gc_heap_data() = ptr::null_mut();
507        }
508    }
509
510    pub(crate) unsafe fn set_callee(&mut self, callee: Option<NonNull<VMFunctionBody>>) {
511        *self.vmctx_plus_offset_mut(self.offsets().vmctx_callee()) =
512            callee.map_or(ptr::null_mut(), |c| c.as_ptr());
513    }
514
515    /// Return a reference to the vmctx used by compiled wasm code.
516    #[inline]
517    pub fn vmctx(&self) -> *mut VMContext {
518        // The definition of this method is subtle but intentional. The goal
519        // here is that effectively this should return `&mut self.vmctx`, but
520        // it's not quite so simple. Some more documentation is available on the
521        // `vmctx_self_reference` field, but the general idea is that we're
522        // creating a pointer to return with proper provenance. Provenance is
523        // still in the works in Rust at the time of this writing but the load
524        // of the `self.vmctx_self_reference` field is important here as it
525        // affects how LLVM thinks about aliasing with respect to the returned
526        // pointer.
527        //
528        // The intention of this method is to codegen to machine code as `&mut
529        // self.vmctx`, however. While it doesn't show up like this in LLVM IR
530        // (there's an actual load of the field) it does look like that by the
531        // time the backend runs. (that's magic to me, the backend removing
532        // loads...)
533        //
534        // As a final minor note, strict provenance APIs are not stable on Rust
535        // today so the `sptr` crate is used. This crate provides the extension
536        // trait `Strict` but the method names conflict with the nightly methods
537        // so a different syntax is used to invoke methods here.
538        let addr = std::ptr::addr_of!(self.vmctx);
539        Strict::with_addr(self.vmctx_self_reference.as_ptr(), Strict::addr(addr))
540    }
541
542    fn get_exported_func(&mut self, index: FuncIndex) -> ExportFunction {
543        let func_ref = self.get_func_ref(index).unwrap();
544        let func_ref = NonNull::new(func_ref as *const VMFuncRef as *mut _).unwrap();
545        ExportFunction { func_ref }
546    }
547
548    fn get_exported_table(&mut self, index: TableIndex) -> ExportTable {
549        let (definition, vmctx) = if let Some(def_index) = self.module().defined_table_index(index)
550        {
551            (self.table_ptr(def_index), self.vmctx())
552        } else {
553            let import = self.imported_table(index);
554            (import.from, import.vmctx)
555        };
556        ExportTable {
557            definition,
558            vmctx,
559            table: self.module().table_plans[index].clone(),
560        }
561    }
562
563    fn get_exported_memory(&mut self, index: MemoryIndex) -> ExportMemory {
564        let (definition, vmctx, def_index) =
565            if let Some(def_index) = self.module().defined_memory_index(index) {
566                (self.memory_ptr(def_index), self.vmctx(), def_index)
567            } else {
568                let import = self.imported_memory(index);
569                (import.from, import.vmctx, import.index)
570            };
571        ExportMemory {
572            definition,
573            vmctx,
574            memory: self.module().memory_plans[index].clone(),
575            index: def_index,
576        }
577    }
578
579    fn get_exported_global(&mut self, index: GlobalIndex) -> ExportGlobal {
580        ExportGlobal {
581            definition: if let Some(def_index) = self.module().defined_global_index(index) {
582                self.global_ptr(def_index)
583            } else {
584                self.imported_global(index).from
585            },
586            vmctx: self.vmctx(),
587            global: self.module().globals[index],
588        }
589    }
590
591    /// Return an iterator over the exports of this instance.
592    ///
593    /// Specifically, it provides access to the key-value pairs, where the keys
594    /// are export names, and the values are export declarations which can be
595    /// resolved `lookup_by_declaration`.
596    pub fn exports(&self) -> indexmap::map::Iter<String, EntityIndex> {
597        self.module().exports.iter()
598    }
599
600    /// Return a reference to the custom state attached to this instance.
601    #[inline]
602    pub fn host_state(&self) -> &dyn Any {
603        &*self.host_state
604    }
605
606    /// Return the table index for the given `VMTableDefinition`.
607    pub unsafe fn table_index(&mut self, table: &VMTableDefinition) -> DefinedTableIndex {
608        let index = DefinedTableIndex::new(
609            usize::try_from(
610                (table as *const VMTableDefinition)
611                    .offset_from(self.table_ptr(DefinedTableIndex::new(0))),
612            )
613            .unwrap(),
614        );
615        assert!(index.index() < self.tables.len());
616        index
617    }
618
619    /// Grow memory by the specified amount of pages.
620    ///
621    /// Returns `None` if memory can't be grown by the specified amount
622    /// of pages. Returns `Some` with the old size in bytes if growth was
623    /// successful.
624    pub(crate) fn memory_grow(
625        &mut self,
626        index: MemoryIndex,
627        delta: u64,
628    ) -> Result<Option<usize>, Error> {
629        match self.module().defined_memory_index(index) {
630            Some(idx) => self.defined_memory_grow(idx, delta),
631            None => {
632                let import = self.imported_memory(index);
633                unsafe {
634                    Instance::from_vmctx(import.vmctx, |i| {
635                        i.defined_memory_grow(import.index, delta)
636                    })
637                }
638            }
639        }
640    }
641
642    fn defined_memory_grow(
643        &mut self,
644        idx: DefinedMemoryIndex,
645        delta: u64,
646    ) -> Result<Option<usize>, Error> {
647        let store = unsafe { &mut *self.store() };
648        let memory = &mut self.memories[idx].1;
649
650        let result = unsafe { memory.grow(delta, Some(store)) };
651
652        // Update the state used by a non-shared Wasm memory in case the base
653        // pointer and/or the length changed.
654        if memory.as_shared_memory().is_none() {
655            let vmmemory = memory.vmmemory();
656            self.set_memory(idx, vmmemory);
657        }
658
659        result
660    }
661
662    pub(crate) fn table_element_type(&mut self, table_index: TableIndex) -> TableElementType {
663        unsafe { (*self.get_table(table_index)).element_type() }
664    }
665
666    /// Grow table by the specified amount of elements, filling them with
667    /// `init_value`.
668    ///
669    /// Returns `None` if table can't be grown by the specified amount of
670    /// elements, or if `init_value` is the wrong type of table element.
671    pub(crate) fn table_grow(
672        &mut self,
673        table_index: TableIndex,
674        delta: u32,
675        init_value: TableElement,
676    ) -> Result<Option<u32>, Error> {
677        self.with_defined_table_index_and_instance(table_index, |i, instance| {
678            instance.defined_table_grow(i, delta, init_value)
679        })
680    }
681
682    fn defined_table_grow(
683        &mut self,
684        table_index: DefinedTableIndex,
685        delta: u32,
686        init_value: TableElement,
687    ) -> Result<Option<u32>, Error> {
688        let store = unsafe { &mut *self.store() };
689        let table = &mut self
690            .tables
691            .get_mut(table_index)
692            .unwrap_or_else(|| panic!("no table for index {}", table_index.index()))
693            .1;
694
695        let result = unsafe { table.grow(delta, init_value, store) };
696
697        // Keep the `VMContext` pointers used by compiled Wasm code up to
698        // date.
699        let element = self.tables[table_index].1.vmtable();
700        self.set_table(table_index, element);
701
702        result
703    }
704
705    fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
706        let size = mem::size_of::<Self>()
707            .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
708            .unwrap();
709        let align = mem::align_of::<Self>();
710        Layout::from_size_align(size, align).unwrap()
711    }
712
713    /// Construct a new VMFuncRef for the given function
714    /// (imported or defined in this module) and store into the given
715    /// location. Used during lazy initialization.
716    ///
717    /// Note that our current lazy-init scheme actually calls this every
718    /// time the funcref pointer is fetched; this turns out to be better
719    /// than tracking state related to whether it's been initialized
720    /// before, because resetting that state on (re)instantiation is
721    /// very expensive if there are many funcrefs.
722    fn construct_func_ref(
723        &mut self,
724        index: FuncIndex,
725        sig: ModuleInternedTypeIndex,
726        into: *mut VMFuncRef,
727    ) {
728        let type_index = unsafe {
729            let base: *const VMSharedTypeIndex =
730                *self.vmctx_plus_offset_mut(self.offsets().vmctx_type_ids_array());
731            *base.add(sig.index())
732        };
733
734        let func_ref = if let Some(def_index) = self.module().defined_func_index(index) {
735            VMFuncRef {
736                native_call: self
737                    .runtime_info
738                    .native_to_wasm_trampoline(def_index)
739                    .expect("should have native-to-Wasm trampoline for escaping function"),
740                array_call: self
741                    .runtime_info
742                    .array_to_wasm_trampoline(def_index)
743                    .expect("should have array-to-Wasm trampoline for escaping function"),
744                wasm_call: Some(self.runtime_info.function(def_index)),
745                vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()),
746                type_index,
747            }
748        } else {
749            let import = self.imported_function(index);
750            VMFuncRef {
751                native_call: import.native_call,
752                array_call: import.array_call,
753                wasm_call: Some(import.wasm_call),
754                vmctx: import.vmctx,
755                type_index,
756            }
757        };
758
759        // Safety: we have a `&mut self`, so we have exclusive access
760        // to this Instance.
761        unsafe {
762            std::ptr::write(into, func_ref);
763        }
764    }
765
766    /// Get a `&VMFuncRef` for the given `FuncIndex`.
767    ///
768    /// Returns `None` if the index is the reserved index value.
769    ///
770    /// The returned reference is a stable reference that won't be moved and can
771    /// be passed into JIT code.
772    pub(crate) fn get_func_ref(&mut self, index: FuncIndex) -> Option<*mut VMFuncRef> {
773        if index == FuncIndex::reserved_value() {
774            return None;
775        }
776
777        // Safety: we have a `&mut self`, so we have exclusive access
778        // to this Instance.
779        unsafe {
780            // For now, we eagerly initialize an funcref struct in-place
781            // whenever asked for a reference to it. This is mostly
782            // fine, because in practice each funcref is unlikely to be
783            // requested more than a few times: once-ish for funcref
784            // tables used for call_indirect (the usual compilation
785            // strategy places each function in the table at most once),
786            // and once or a few times when fetching exports via API.
787            // Note that for any case driven by table accesses, the lazy
788            // table init behaves like a higher-level cache layer that
789            // protects this initialization from happening multiple
790            // times, via that particular table at least.
791            //
792            // When `ref.func` becomes more commonly used or if we
793            // otherwise see a use-case where this becomes a hotpath,
794            // we can reconsider by using some state to track
795            // "uninitialized" explicitly, for example by zeroing the
796            // funcrefs (perhaps together with other
797            // zeroed-at-instantiate-time state) or using a separate
798            // is-initialized bitmap.
799            //
800            // We arrived at this design because zeroing memory is
801            // expensive, so it's better for instantiation performance
802            // if we don't have to track "is-initialized" state at
803            // all!
804            let func = &self.module().functions[index];
805            let sig = func.signature;
806            let func_ref: *mut VMFuncRef = self
807                .vmctx_plus_offset_mut::<VMFuncRef>(self.offsets().vmctx_func_ref(func.func_ref));
808            self.construct_func_ref(index, sig, func_ref);
809
810            Some(func_ref)
811        }
812    }
813
814    /// The `table.init` operation: initializes a portion of a table with a
815    /// passive element.
816    ///
817    /// # Errors
818    ///
819    /// Returns a `Trap` error when the range within the table is out of bounds
820    /// or the range within the passive element is out of bounds.
821    pub(crate) fn table_init(
822        &mut self,
823        table_index: TableIndex,
824        elem_index: ElemIndex,
825        dst: u32,
826        src: u32,
827        len: u32,
828    ) -> Result<(), Trap> {
829        // TODO: this `clone()` shouldn't be necessary but is used for now to
830        // inform `rustc` that the lifetime of the elements here are
831        // disconnected from the lifetime of `self`.
832        let module = self.module().clone();
833
834        // NB: fall back to an expressions-based list of elements which doesn't
835        // have static type information (as opposed to `Functions`) since we
836        // don't know just yet what type the table has. The type will be be
837        // inferred in the next step within `table_init_segment`.
838        let empty = TableSegmentElements::Expressions(Box::new([]));
839
840        let elements = match module.passive_elements_map.get(&elem_index) {
841            Some(index) if !self.dropped_elements.contains(elem_index) => {
842                &module.passive_elements[*index]
843            }
844            _ => &empty,
845        };
846        self.table_init_segment(table_index, elements, dst, src, len)
847    }
848
849    pub(crate) fn table_init_segment(
850        &mut self,
851        table_index: TableIndex,
852        elements: &TableSegmentElements,
853        dst: u32,
854        src: u32,
855        len: u32,
856    ) -> Result<(), Trap> {
857        // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
858
859        let table = unsafe { &mut *self.get_table(table_index) };
860        let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
861        let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
862
863        match elements {
864            TableSegmentElements::Functions(funcs) => {
865                let elements = funcs
866                    .get(src..)
867                    .and_then(|s| s.get(..len))
868                    .ok_or(Trap::TableOutOfBounds)?;
869                table.init_func(
870                    dst,
871                    elements
872                        .iter()
873                        .map(|idx| self.get_func_ref(*idx).unwrap_or(std::ptr::null_mut())),
874                )?;
875            }
876            TableSegmentElements::Expressions(exprs) => {
877                let ty = table.element_type();
878                let exprs = exprs
879                    .get(src..)
880                    .and_then(|s| s.get(..len))
881                    .ok_or(Trap::TableOutOfBounds)?;
882                match ty {
883                    TableElementType::Func => {
884                        table.init_func(
885                            dst,
886                            exprs.iter().map(|expr| match expr {
887                                TableElementExpression::Null => std::ptr::null_mut(),
888                                TableElementExpression::Function(idx) => {
889                                    self.get_func_ref(*idx).unwrap()
890                                }
891                                TableElementExpression::GlobalGet(idx) => {
892                                    let global = self.defined_or_imported_global_ptr(*idx);
893                                    unsafe { (*global).as_func_ref() }
894                                }
895                            }),
896                        )?;
897                    }
898                    TableElementType::GcRef => {
899                        table.init_gc_refs(
900                            dst,
901                            exprs.iter().map(|expr| match expr {
902                                TableElementExpression::Null => None,
903                                TableElementExpression::Function(_) => unreachable!(),
904                                TableElementExpression::GlobalGet(idx) => {
905                                    let global = self.defined_or_imported_global_ptr(*idx);
906                                    let gc_ref = unsafe { (*global).as_gc_ref() };
907                                    gc_ref.map(|r| {
908                                        let store = unsafe { &mut *self.store() };
909                                        store.gc_store().clone_gc_ref(r)
910                                    })
911                                }
912                            }),
913                        )?;
914                    }
915                }
916            }
917        }
918
919        Ok(())
920    }
921
922    /// Drop an element.
923    pub(crate) fn elem_drop(&mut self, elem_index: ElemIndex) {
924        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
925
926        self.dropped_elements.insert(elem_index);
927
928        // Note that we don't check that we actually removed a segment because
929        // dropping a non-passive segment is a no-op (not a trap).
930    }
931
932    /// Get a locally-defined memory.
933    pub fn get_defined_memory(&mut self, index: DefinedMemoryIndex) -> *mut Memory {
934        ptr::addr_of_mut!(self.memories[index].1)
935    }
936
937    /// Do a `memory.copy`
938    ///
939    /// # Errors
940    ///
941    /// Returns a `Trap` error when the source or destination ranges are out of
942    /// bounds.
943    pub(crate) fn memory_copy(
944        &mut self,
945        dst_index: MemoryIndex,
946        dst: u64,
947        src_index: MemoryIndex,
948        src: u64,
949        len: u64,
950    ) -> Result<(), Trap> {
951        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
952
953        let src_mem = self.get_memory(src_index);
954        let dst_mem = self.get_memory(dst_index);
955
956        let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
957        let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
958
959        // Bounds and casts are checked above, by this point we know that
960        // everything is safe.
961        unsafe {
962            let dst = dst_mem.base.add(dst);
963            let src = src_mem.base.add(src);
964            // FIXME audit whether this is safe in the presence of shared memory
965            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
966            ptr::copy(src, dst, len as usize);
967        }
968
969        Ok(())
970    }
971
972    fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
973        let oob = || Trap::MemoryOutOfBounds;
974        let end = ptr
975            .checked_add(len)
976            .and_then(|i| usize::try_from(i).ok())
977            .ok_or_else(oob)?;
978        if end > max {
979            Err(oob())
980        } else {
981            Ok(ptr as usize)
982        }
983    }
984
985    /// Perform the `memory.fill` operation on a locally defined memory.
986    ///
987    /// # Errors
988    ///
989    /// Returns a `Trap` error if the memory range is out of bounds.
990    pub(crate) fn memory_fill(
991        &mut self,
992        memory_index: MemoryIndex,
993        dst: u64,
994        val: u8,
995        len: u64,
996    ) -> Result<(), Trap> {
997        let memory = self.get_memory(memory_index);
998        let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
999
1000        // Bounds and casts are checked above, by this point we know that
1001        // everything is safe.
1002        unsafe {
1003            let dst = memory.base.add(dst);
1004            // FIXME audit whether this is safe in the presence of shared memory
1005            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1006            ptr::write_bytes(dst, val, len as usize);
1007        }
1008
1009        Ok(())
1010    }
1011
1012    /// Performs the `memory.init` operation.
1013    ///
1014    /// # Errors
1015    ///
1016    /// Returns a `Trap` error if the destination range is out of this module's
1017    /// memory's bounds or if the source range is outside the data segment's
1018    /// bounds.
1019    pub(crate) fn memory_init(
1020        &mut self,
1021        memory_index: MemoryIndex,
1022        data_index: DataIndex,
1023        dst: u64,
1024        src: u32,
1025        len: u32,
1026    ) -> Result<(), Trap> {
1027        let range = match self.module().passive_data_map.get(&data_index).cloned() {
1028            Some(range) if !self.dropped_data.contains(data_index) => range,
1029            _ => 0..0,
1030        };
1031        self.memory_init_segment(memory_index, range, dst, src, len)
1032    }
1033
1034    pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
1035        &self.runtime_info.wasm_data()[range.start as usize..range.end as usize]
1036    }
1037
1038    pub(crate) fn memory_init_segment(
1039        &mut self,
1040        memory_index: MemoryIndex,
1041        range: Range<u32>,
1042        dst: u64,
1043        src: u32,
1044        len: u32,
1045    ) -> Result<(), Trap> {
1046        // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
1047
1048        let memory = self.get_memory(memory_index);
1049        let data = self.wasm_data(range);
1050        let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
1051        let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
1052        let len = len as usize;
1053
1054        unsafe {
1055            let src_start = data.as_ptr().add(src);
1056            let dst_start = memory.base.add(dst);
1057            // FIXME audit whether this is safe in the presence of shared memory
1058            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1059            ptr::copy_nonoverlapping(src_start, dst_start, len);
1060        }
1061
1062        Ok(())
1063    }
1064
1065    /// Drop the given data segment, truncating its length to zero.
1066    pub(crate) fn data_drop(&mut self, data_index: DataIndex) {
1067        self.dropped_data.insert(data_index);
1068
1069        // Note that we don't check that we actually removed a segment because
1070        // dropping a non-passive segment is a no-op (not a trap).
1071    }
1072
1073    /// Get a table by index regardless of whether it is locally-defined
1074    /// or an imported, foreign table. Ensure that the given range of
1075    /// elements in the table is lazily initialized.  We define this
1076    /// operation all-in-one for safety, to ensure the lazy-init
1077    /// happens.
1078    ///
1079    /// Takes an `Iterator` for the index-range to lazy-initialize,
1080    /// for flexibility. This can be a range, single item, or empty
1081    /// sequence, for example. The iterator should return indices in
1082    /// increasing order, so that the break-at-out-of-bounds behavior
1083    /// works correctly.
1084    pub(crate) fn get_table_with_lazy_init(
1085        &mut self,
1086        table_index: TableIndex,
1087        range: impl Iterator<Item = u32>,
1088    ) -> *mut Table {
1089        self.with_defined_table_index_and_instance(table_index, |idx, instance| {
1090            instance.get_defined_table_with_lazy_init(idx, range)
1091        })
1092    }
1093
1094    /// Gets the raw runtime table data structure owned by this instance
1095    /// given the provided `idx`.
1096    ///
1097    /// The `range` specified is eagerly initialized for funcref tables.
1098    pub fn get_defined_table_with_lazy_init(
1099        &mut self,
1100        idx: DefinedTableIndex,
1101        range: impl Iterator<Item = u32>,
1102    ) -> *mut Table {
1103        let elt_ty = self.tables[idx].1.element_type();
1104
1105        if elt_ty == TableElementType::Func {
1106            for i in range {
1107                let gc_store = unsafe { (*self.store()).gc_store() };
1108                let value = match self.tables[idx].1.get(gc_store, i) {
1109                    Some(value) => value,
1110                    None => {
1111                        // Out-of-bounds; caller will handle by likely
1112                        // throwing a trap. No work to do to lazy-init
1113                        // beyond the end.
1114                        break;
1115                    }
1116                };
1117
1118                if !value.is_uninit() {
1119                    continue;
1120                }
1121
1122                // The table element `i` is uninitialized and is now being
1123                // initialized. This must imply that a `precompiled` list of
1124                // function indices is available for this table. The precompiled
1125                // list is extracted and then it is consulted with `i` to
1126                // determine the function that is going to be initialized. Note
1127                // that `i` may be outside the limits of the static
1128                // initialization so it's a fallible `get` instead of an index.
1129                let module = self.module();
1130                let precomputed = match &module.table_initialization.initial_values[idx] {
1131                    TableInitialValue::Null { precomputed } => precomputed,
1132                    TableInitialValue::FuncRef(_)
1133                    | TableInitialValue::GlobalGet(_)
1134                    | TableInitialValue::I31Ref(_) => {
1135                        unreachable!()
1136                    }
1137                };
1138                let func_index = precomputed.get(i as usize).cloned();
1139                let func_ref = func_index
1140                    .and_then(|func_index| self.get_func_ref(func_index))
1141                    .unwrap_or(std::ptr::null_mut());
1142                self.tables[idx]
1143                    .1
1144                    .set(i, TableElement::FuncRef(func_ref))
1145                    .expect("Table type should match and index should be in-bounds");
1146            }
1147        }
1148
1149        ptr::addr_of_mut!(self.tables[idx].1)
1150    }
1151
1152    /// Get a table by index regardless of whether it is locally-defined or an
1153    /// imported, foreign table.
1154    pub(crate) fn get_table(&mut self, table_index: TableIndex) -> *mut Table {
1155        self.with_defined_table_index_and_instance(table_index, |idx, instance| {
1156            ptr::addr_of_mut!(instance.tables[idx].1)
1157        })
1158    }
1159
1160    /// Get a locally-defined table.
1161    pub(crate) fn get_defined_table(&mut self, index: DefinedTableIndex) -> *mut Table {
1162        ptr::addr_of_mut!(self.tables[index].1)
1163    }
1164
1165    pub(crate) fn with_defined_table_index_and_instance<R>(
1166        &mut self,
1167        index: TableIndex,
1168        f: impl FnOnce(DefinedTableIndex, &mut Instance) -> R,
1169    ) -> R {
1170        if let Some(defined_table_index) = self.module().defined_table_index(index) {
1171            f(defined_table_index, self)
1172        } else {
1173            let import = self.imported_table(index);
1174            unsafe {
1175                Instance::from_vmctx(import.vmctx, |foreign_instance| {
1176                    let foreign_table_def = import.from;
1177                    let foreign_table_index = foreign_instance.table_index(&*foreign_table_def);
1178                    f(foreign_table_index, foreign_instance)
1179                })
1180            }
1181        }
1182    }
1183
1184    /// Initialize the VMContext data associated with this Instance.
1185    ///
1186    /// The `VMContext` memory is assumed to be uninitialized; any field
1187    /// that we need in a certain state will be explicitly written by this
1188    /// function.
1189    unsafe fn initialize_vmctx(
1190        &mut self,
1191        module: &Module,
1192        offsets: &VMOffsets<HostPtr>,
1193        store: StorePtr,
1194        imports: Imports,
1195    ) {
1196        assert!(std::ptr::eq(module, self.module().as_ref()));
1197
1198        *self.vmctx_plus_offset_mut(offsets.vmctx_magic()) = VMCONTEXT_MAGIC;
1199        self.set_callee(None);
1200        self.set_store(store.as_raw());
1201
1202        // Initialize shared types
1203        let types = self.runtime_info.type_ids();
1204        *self.vmctx_plus_offset_mut(offsets.vmctx_type_ids_array()) = types.as_ptr();
1205
1206        // Initialize the built-in functions
1207        *self.vmctx_plus_offset_mut(offsets.vmctx_builtin_functions()) =
1208            &VMBuiltinFunctionsArray::INIT;
1209
1210        // Initialize the imports
1211        debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
1212        ptr::copy_nonoverlapping(
1213            imports.functions.as_ptr(),
1214            self.vmctx_plus_offset_mut(offsets.vmctx_imported_functions_begin()),
1215            imports.functions.len(),
1216        );
1217        debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
1218        ptr::copy_nonoverlapping(
1219            imports.tables.as_ptr(),
1220            self.vmctx_plus_offset_mut(offsets.vmctx_imported_tables_begin()),
1221            imports.tables.len(),
1222        );
1223        debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
1224        ptr::copy_nonoverlapping(
1225            imports.memories.as_ptr(),
1226            self.vmctx_plus_offset_mut(offsets.vmctx_imported_memories_begin()),
1227            imports.memories.len(),
1228        );
1229        debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
1230        ptr::copy_nonoverlapping(
1231            imports.globals.as_ptr(),
1232            self.vmctx_plus_offset_mut(offsets.vmctx_imported_globals_begin()),
1233            imports.globals.len(),
1234        );
1235
1236        // N.B.: there is no need to initialize the funcrefs array because we
1237        // eagerly construct each element in it whenever asked for a reference
1238        // to that element. In other words, there is no state needed to track
1239        // the lazy-init, so we don't need to initialize any state now.
1240
1241        // Initialize the defined tables
1242        let mut ptr = self.vmctx_plus_offset_mut(offsets.vmctx_tables_begin());
1243        for i in 0..module.table_plans.len() - module.num_imported_tables {
1244            ptr::write(ptr, self.tables[DefinedTableIndex::new(i)].1.vmtable());
1245            ptr = ptr.add(1);
1246        }
1247
1248        // Initialize the defined memories. This fills in both the
1249        // `defined_memories` table and the `owned_memories` table at the same
1250        // time. Entries in `defined_memories` hold a pointer to a definition
1251        // (all memories) whereas the `owned_memories` hold the actual
1252        // definitions of memories owned (not shared) in the module.
1253        let mut ptr = self.vmctx_plus_offset_mut(offsets.vmctx_memories_begin());
1254        let mut owned_ptr = self.vmctx_plus_offset_mut(offsets.vmctx_owned_memories_begin());
1255        for i in 0..module.memory_plans.len() - module.num_imported_memories {
1256            let defined_memory_index = DefinedMemoryIndex::new(i);
1257            let memory_index = module.memory_index(defined_memory_index);
1258            if module.memory_plans[memory_index].memory.shared {
1259                let def_ptr = self.memories[defined_memory_index]
1260                    .1
1261                    .as_shared_memory()
1262                    .unwrap()
1263                    .vmmemory_ptr();
1264                ptr::write(ptr, def_ptr.cast_mut());
1265            } else {
1266                ptr::write(owned_ptr, self.memories[defined_memory_index].1.vmmemory());
1267                ptr::write(ptr, owned_ptr);
1268                owned_ptr = owned_ptr.add(1);
1269            }
1270            ptr = ptr.add(1);
1271        }
1272
1273        // Initialize the defined globals
1274        self.initialize_vmctx_globals(module);
1275    }
1276
1277    unsafe fn initialize_vmctx_globals(&mut self, module: &Module) {
1278        for (index, init) in module.global_initializers.iter() {
1279            let to = self.global_ptr(index);
1280            let wasm_ty = module.globals[module.global_index(index)].wasm_ty;
1281
1282            // Initialize the global before writing to it
1283            ptr::write(to, VMGlobalDefinition::new());
1284
1285            match *init {
1286                GlobalInit::I32Const(x) => {
1287                    let index = module.global_index(index);
1288                    if index.index() == 0 {
1289                        #[cfg(feature = "wmemcheck")]
1290                        {
1291                            if let Some(wmemcheck) = &mut self.wmemcheck_state {
1292                                wmemcheck.set_stack_size(x as usize);
1293                            }
1294                        }
1295                    }
1296                    *(*to).as_i32_mut() = x;
1297                }
1298                GlobalInit::I64Const(x) => *(*to).as_i64_mut() = x,
1299                GlobalInit::F32Const(x) => *(*to).as_f32_bits_mut() = x,
1300                GlobalInit::F64Const(x) => *(*to).as_f64_bits_mut() = x,
1301                GlobalInit::V128Const(x) => *(*to).as_u128_mut() = x,
1302                GlobalInit::GetGlobal(x) => {
1303                    let from = if let Some(def_x) = module.defined_global_index(x) {
1304                        self.global(def_x)
1305                    } else {
1306                        &*self.imported_global(x).from
1307                    };
1308
1309                    // GC-managed globals may need to invoke GC barriers,
1310                    // everything else is just copy-able bits.
1311                    if wasm_ty.is_gc_heap_type() {
1312                        let gc_ref = (*from)
1313                            .as_gc_ref()
1314                            .map(|r| r.unchecked_copy())
1315                            .map(|r| (*self.store()).gc_store().clone_gc_ref(&r));
1316                        (*to).init_gc_ref(gc_ref);
1317                    } else {
1318                        ptr::copy_nonoverlapping(from, to, 1);
1319                    }
1320                }
1321                GlobalInit::RefFunc(f) => {
1322                    *(*to).as_func_ref_mut() = self.get_func_ref(f).unwrap();
1323                }
1324                GlobalInit::RefNullConst => match wasm_ty {
1325                    // `VMGlobalDefinition::new()` already zeroed out the bits
1326                    WasmValType::Ref(WasmRefType { nullable: true, .. }) => {}
1327                    ty => panic!("unsupported reference type for global: {:?}", ty),
1328                },
1329                GlobalInit::RefI31Const(x) => {
1330                    let gc_ref = VMGcRef::from_i31(I31::wrapping_i32(x));
1331                    (*to).init_gc_ref(Some(gc_ref));
1332                }
1333            }
1334        }
1335    }
1336
1337    fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1338        let mut fault = None;
1339        for (_, (_, memory)) in self.memories.iter() {
1340            let accessible = memory.wasm_accessible();
1341            if accessible.start <= addr && addr < accessible.end {
1342                // All linear memories should be disjoint so assert that no
1343                // prior fault has been found.
1344                assert!(fault.is_none());
1345                fault = Some(WasmFault {
1346                    memory_size: memory.byte_size(),
1347                    wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1348                });
1349            }
1350        }
1351        fault
1352    }
1353}
1354
1355/// A handle holding an `Instance` of a WebAssembly module.
1356#[derive(Debug)]
1357pub struct InstanceHandle {
1358    instance: Option<SendSyncPtr<Instance>>,
1359}
1360
1361impl InstanceHandle {
1362    /// Creates an "empty" instance handle which internally has a null pointer
1363    /// to an instance.
1364    pub fn null() -> InstanceHandle {
1365        InstanceHandle { instance: None }
1366    }
1367
1368    /// Return a raw pointer to the vmctx used by compiled wasm code.
1369    #[inline]
1370    pub fn vmctx(&self) -> *mut VMContext {
1371        self.instance().vmctx()
1372    }
1373
1374    /// Return a reference to a module.
1375    pub fn module(&self) -> &Arc<Module> {
1376        self.instance().module()
1377    }
1378
1379    /// Lookup a function by index.
1380    pub fn get_exported_func(&mut self, export: FuncIndex) -> ExportFunction {
1381        self.instance_mut().get_exported_func(export)
1382    }
1383
1384    /// Lookup a global by index.
1385    pub fn get_exported_global(&mut self, export: GlobalIndex) -> ExportGlobal {
1386        self.instance_mut().get_exported_global(export)
1387    }
1388
1389    /// Lookup a memory by index.
1390    pub fn get_exported_memory(&mut self, export: MemoryIndex) -> ExportMemory {
1391        self.instance_mut().get_exported_memory(export)
1392    }
1393
1394    /// Lookup a table by index.
1395    pub fn get_exported_table(&mut self, export: TableIndex) -> ExportTable {
1396        self.instance_mut().get_exported_table(export)
1397    }
1398
1399    /// Lookup an item with the given index.
1400    pub fn get_export_by_index(&mut self, export: EntityIndex) -> Export {
1401        match export {
1402            EntityIndex::Function(i) => Export::Function(self.get_exported_func(i)),
1403            EntityIndex::Global(i) => Export::Global(self.get_exported_global(i)),
1404            EntityIndex::Table(i) => Export::Table(self.get_exported_table(i)),
1405            EntityIndex::Memory(i) => Export::Memory(self.get_exported_memory(i)),
1406        }
1407    }
1408
1409    /// Return an iterator over the exports of this instance.
1410    ///
1411    /// Specifically, it provides access to the key-value pairs, where the keys
1412    /// are export names, and the values are export declarations which can be
1413    /// resolved `lookup_by_declaration`.
1414    pub fn exports(&self) -> indexmap::map::Iter<String, EntityIndex> {
1415        self.instance().exports()
1416    }
1417
1418    /// Return a reference to the custom state attached to this instance.
1419    pub fn host_state(&self) -> &dyn Any {
1420        self.instance().host_state()
1421    }
1422
1423    /// Get a table defined locally within this module.
1424    pub fn get_defined_table(&mut self, index: DefinedTableIndex) -> *mut Table {
1425        self.instance_mut().get_defined_table(index)
1426    }
1427
1428    /// Get a table defined locally within this module, lazily
1429    /// initializing the given range first.
1430    pub fn get_defined_table_with_lazy_init(
1431        &mut self,
1432        index: DefinedTableIndex,
1433        range: impl Iterator<Item = u32>,
1434    ) -> *mut Table {
1435        let index = self.instance().module().table_index(index);
1436        self.instance_mut().get_table_with_lazy_init(index, range)
1437    }
1438
1439    /// Get all tables within this instance.
1440    ///
1441    /// Returns both import and defined tables.
1442    ///
1443    /// Returns both exported and non-exported tables.
1444    ///
1445    /// Gives access to the full tables space.
1446    pub fn all_tables<'a>(
1447        &'a mut self,
1448    ) -> impl ExactSizeIterator<Item = (TableIndex, ExportTable)> + 'a {
1449        let indices = (0..self.module().table_plans.len())
1450            .map(|i| TableIndex::new(i))
1451            .collect::<Vec<_>>();
1452        indices.into_iter().map(|i| (i, self.get_exported_table(i)))
1453    }
1454
1455    /// Return the tables defined in this instance (not imported).
1456    pub fn defined_tables<'a>(&'a mut self) -> impl ExactSizeIterator<Item = ExportTable> + 'a {
1457        let num_imported = self.module().num_imported_tables;
1458        self.all_tables()
1459            .skip(num_imported)
1460            .map(|(_i, table)| table)
1461    }
1462
1463    /// Get all memories within this instance.
1464    ///
1465    /// Returns both import and defined memories.
1466    ///
1467    /// Returns both exported and non-exported memories.
1468    ///
1469    /// Gives access to the full memories space.
1470    pub fn all_memories<'a>(
1471        &'a mut self,
1472    ) -> impl ExactSizeIterator<Item = (MemoryIndex, ExportMemory)> + 'a {
1473        let indices = (0..self.module().memory_plans.len())
1474            .map(|i| MemoryIndex::new(i))
1475            .collect::<Vec<_>>();
1476        indices
1477            .into_iter()
1478            .map(|i| (i, self.get_exported_memory(i)))
1479    }
1480
1481    /// Return the memories defined in this instance (not imported).
1482    pub fn defined_memories<'a>(&'a mut self) -> impl ExactSizeIterator<Item = ExportMemory> + 'a {
1483        let num_imported = self.module().num_imported_memories;
1484        self.all_memories()
1485            .skip(num_imported)
1486            .map(|(_i, memory)| memory)
1487    }
1488
1489    /// Get all globals within this instance.
1490    ///
1491    /// Returns both import and defined globals.
1492    ///
1493    /// Returns both exported and non-exported globals.
1494    ///
1495    /// Gives access to the full globals space.
1496    pub fn all_globals<'a>(
1497        &'a mut self,
1498    ) -> impl ExactSizeIterator<Item = (GlobalIndex, ExportGlobal)> + 'a {
1499        self.instance_mut().all_globals()
1500    }
1501
1502    /// Get the globals defined in this instance (not imported).
1503    pub fn defined_globals<'a>(
1504        &'a mut self,
1505    ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, ExportGlobal)> + 'a {
1506        self.instance_mut().defined_globals()
1507    }
1508
1509    /// Return a reference to the contained `Instance`.
1510    #[inline]
1511    pub(crate) fn instance(&self) -> &Instance {
1512        unsafe { &*self.instance.unwrap().as_ptr() }
1513    }
1514
1515    pub(crate) fn instance_mut(&mut self) -> &mut Instance {
1516        unsafe { &mut *self.instance.unwrap().as_ptr() }
1517    }
1518
1519    /// Returns the `Store` pointer that was stored on creation
1520    #[inline]
1521    pub fn store(&self) -> *mut dyn Store {
1522        self.instance().store()
1523    }
1524
1525    /// Configure the `*mut dyn Store` internal pointer after-the-fact.
1526    ///
1527    /// This is provided for the original `Store` itself to configure the first
1528    /// self-pointer after the original `Box` has been initialized.
1529    pub unsafe fn set_store(&mut self, store: *mut dyn Store) {
1530        self.instance_mut().set_store(Some(store));
1531    }
1532
1533    /// Returns a clone of this instance.
1534    ///
1535    /// This is unsafe because the returned handle here is just a cheap clone
1536    /// of the internals, there's no lifetime tracking around its validity.
1537    /// You'll need to ensure that the returned handles all go out of scope at
1538    /// the same time.
1539    #[inline]
1540    pub unsafe fn clone(&self) -> InstanceHandle {
1541        InstanceHandle {
1542            instance: self.instance,
1543        }
1544    }
1545
1546    /// Performs post-initialization of an instance after its handle has been
1547    /// created and registered with a store.
1548    ///
1549    /// Failure of this function means that the instance still must persist
1550    /// within the store since failure may indicate partial failure, or some
1551    /// state could be referenced by other instances.
1552    pub fn initialize(&mut self, module: &Module, is_bulk_memory: bool) -> Result<()> {
1553        allocator::initialize_instance(self.instance_mut(), module, is_bulk_memory)
1554    }
1555
1556    /// Attempts to convert from the host `addr` specified to a WebAssembly
1557    /// based address recorded in `WasmFault`.
1558    ///
1559    /// This method will check all linear memories that this instance contains
1560    /// to see if any of them contain `addr`. If one does then `Some` is
1561    /// returned with metadata about the wasm fault. Otherwise `None` is
1562    /// returned and `addr` doesn't belong to this instance.
1563    pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1564        self.instance().wasm_fault(addr)
1565    }
1566}