wasmtime_runtime/vmcontext.rs
1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6use crate::{GcStore, VMGcRef};
7use sptr::Strict;
8use std::cell::UnsafeCell;
9use std::ffi::c_void;
10use std::marker;
11use std::mem;
12use std::ptr::{self, NonNull};
13use std::sync::atomic::{AtomicUsize, Ordering};
14use std::u32;
15pub use vm_host_func_context::{VMArrayCallHostFuncContext, VMNativeCallHostFuncContext};
16use wasmtime_environ::{BuiltinFunctionIndex, DefinedMemoryIndex, Unsigned, VMCONTEXT_MAGIC};
17
18/// A function pointer that exposes the array calling convention.
19///
20/// Regardless of the underlying Wasm function type, all functions using the
21/// array calling convention have the same Rust signature.
22///
23/// Arguments:
24///
25/// * Callee `vmctx` for the function itself.
26///
27/// * Caller's `vmctx` (so that host functions can access the linear memory of
28/// their Wasm callers).
29///
30/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
31/// this function, and where results are returned from this function.
32///
33/// * The capacity of the `ValRaw` buffer. Must always be at least
34/// `max(len(wasm_params), len(wasm_results))`.
35pub type VMArrayCallFunction =
36 unsafe extern "C" fn(*mut VMOpaqueContext, *mut VMOpaqueContext, *mut ValRaw, usize);
37
38/// A function pointer that exposes the native calling convention.
39///
40/// Different Wasm function types end up mapping to different Rust function
41/// types, so this isn't simply a type alias the way that `VMArrayCallFunction`
42/// is.
43///
44/// This is the default calling convention for the target (e.g. System-V or
45/// fast-call) except multiple return values are handled by returning the first
46/// return value in a register and everything else through a return-pointer.
47#[repr(transparent)]
48pub struct VMNativeCallFunction(VMFunctionBody);
49
50/// A function pointer that exposes the Wasm calling convention.
51///
52/// In practice, different Wasm function types end up mapping to different Rust
53/// function types, so this isn't simply a type alias the way that
54/// `VMArrayCallFunction` is. However, the exact details of the calling
55/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
56/// code never does anything with these function pointers except shuffle them
57/// around and pass them back to Wasm.
58#[repr(transparent)]
59pub struct VMWasmCallFunction(VMFunctionBody);
60
61/// An imported function.
62#[derive(Debug, Copy, Clone)]
63#[repr(C)]
64pub struct VMFunctionImport {
65 /// Function pointer to use when calling this imported function from Wasm.
66 pub wasm_call: NonNull<VMWasmCallFunction>,
67
68 /// Function pointer to use when calling this imported function from native code.
69 pub native_call: NonNull<VMNativeCallFunction>,
70
71 /// Function pointer to use when calling this imported function with the
72 /// "array" calling convention that `Func::new` et al use.
73 pub array_call: VMArrayCallFunction,
74
75 /// The VM state associated with this function.
76 ///
77 /// For Wasm functions defined by core wasm instances this will be `*mut
78 /// VMContext`, but for lifted/lowered component model functions this will
79 /// be a `VMComponentContext`, and for a host function it will be a
80 /// `VMHostFuncContext`, etc.
81 pub vmctx: *mut VMOpaqueContext,
82}
83
84// Declare that this type is send/sync, it's the responsibility of users of
85// `VMFunctionImport` to uphold this guarantee.
86unsafe impl Send for VMFunctionImport {}
87unsafe impl Sync for VMFunctionImport {}
88
89#[cfg(test)]
90mod test_vmfunction_import {
91 use super::VMFunctionImport;
92 use memoffset::offset_of;
93 use std::mem::size_of;
94 use wasmtime_environ::{Module, VMOffsets};
95
96 #[test]
97 fn check_vmfunction_import_offsets() {
98 let module = Module::new();
99 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
100 assert_eq!(
101 size_of::<VMFunctionImport>(),
102 usize::from(offsets.size_of_vmfunction_import())
103 );
104 assert_eq!(
105 offset_of!(VMFunctionImport, wasm_call),
106 usize::from(offsets.vmfunction_import_wasm_call())
107 );
108 assert_eq!(
109 offset_of!(VMFunctionImport, native_call),
110 usize::from(offsets.vmfunction_import_native_call())
111 );
112 assert_eq!(
113 offset_of!(VMFunctionImport, array_call),
114 usize::from(offsets.vmfunction_import_array_call())
115 );
116 assert_eq!(
117 offset_of!(VMFunctionImport, vmctx),
118 usize::from(offsets.vmfunction_import_vmctx())
119 );
120 }
121}
122
123/// A placeholder byte-sized type which is just used to provide some amount of type
124/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
125/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
126/// around.
127#[repr(C)]
128pub struct VMFunctionBody(u8);
129
130#[cfg(test)]
131mod test_vmfunction_body {
132 use super::VMFunctionBody;
133 use std::mem::size_of;
134
135 #[test]
136 fn check_vmfunction_body_offsets() {
137 assert_eq!(size_of::<VMFunctionBody>(), 1);
138 }
139}
140
141/// The fields compiled code needs to access to utilize a WebAssembly table
142/// imported from another instance.
143#[derive(Debug, Copy, Clone)]
144#[repr(C)]
145pub struct VMTableImport {
146 /// A pointer to the imported table description.
147 pub from: *mut VMTableDefinition,
148
149 /// A pointer to the `VMContext` that owns the table description.
150 pub vmctx: *mut VMContext,
151}
152
153// Declare that this type is send/sync, it's the responsibility of users of
154// `VMTableImport` to uphold this guarantee.
155unsafe impl Send for VMTableImport {}
156unsafe impl Sync for VMTableImport {}
157
158#[cfg(test)]
159mod test_vmtable_import {
160 use super::VMTableImport;
161 use memoffset::offset_of;
162 use std::mem::size_of;
163 use wasmtime_environ::{Module, VMOffsets};
164
165 #[test]
166 fn check_vmtable_import_offsets() {
167 let module = Module::new();
168 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
169 assert_eq!(
170 size_of::<VMTableImport>(),
171 usize::from(offsets.size_of_vmtable_import())
172 );
173 assert_eq!(
174 offset_of!(VMTableImport, from),
175 usize::from(offsets.vmtable_import_from())
176 );
177 assert_eq!(
178 offset_of!(VMTableImport, vmctx),
179 usize::from(offsets.vmtable_import_vmctx())
180 );
181 }
182}
183
184/// The fields compiled code needs to access to utilize a WebAssembly linear
185/// memory imported from another instance.
186#[derive(Debug, Copy, Clone)]
187#[repr(C)]
188pub struct VMMemoryImport {
189 /// A pointer to the imported memory description.
190 pub from: *mut VMMemoryDefinition,
191
192 /// A pointer to the `VMContext` that owns the memory description.
193 pub vmctx: *mut VMContext,
194
195 /// The index of the memory in the containing `vmctx`.
196 pub index: DefinedMemoryIndex,
197}
198
199// Declare that this type is send/sync, it's the responsibility of users of
200// `VMMemoryImport` to uphold this guarantee.
201unsafe impl Send for VMMemoryImport {}
202unsafe impl Sync for VMMemoryImport {}
203
204#[cfg(test)]
205mod test_vmmemory_import {
206 use super::VMMemoryImport;
207 use memoffset::offset_of;
208 use std::mem::size_of;
209 use wasmtime_environ::{Module, VMOffsets};
210
211 #[test]
212 fn check_vmmemory_import_offsets() {
213 let module = Module::new();
214 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
215 assert_eq!(
216 size_of::<VMMemoryImport>(),
217 usize::from(offsets.size_of_vmmemory_import())
218 );
219 assert_eq!(
220 offset_of!(VMMemoryImport, from),
221 usize::from(offsets.vmmemory_import_from())
222 );
223 assert_eq!(
224 offset_of!(VMMemoryImport, vmctx),
225 usize::from(offsets.vmmemory_import_vmctx())
226 );
227 }
228}
229
230/// The fields compiled code needs to access to utilize a WebAssembly global
231/// variable imported from another instance.
232///
233/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
234/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
235/// require a `vmctx` pointer to access.
236#[derive(Debug, Copy, Clone)]
237#[repr(C)]
238pub struct VMGlobalImport {
239 /// A pointer to the imported global variable description.
240 pub from: *mut VMGlobalDefinition,
241}
242
243// Declare that this type is send/sync, it's the responsibility of users of
244// `VMGlobalImport` to uphold this guarantee.
245unsafe impl Send for VMGlobalImport {}
246unsafe impl Sync for VMGlobalImport {}
247
248#[cfg(test)]
249mod test_vmglobal_import {
250 use super::VMGlobalImport;
251 use memoffset::offset_of;
252 use std::mem::size_of;
253 use wasmtime_environ::{Module, VMOffsets};
254
255 #[test]
256 fn check_vmglobal_import_offsets() {
257 let module = Module::new();
258 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
259 assert_eq!(
260 size_of::<VMGlobalImport>(),
261 usize::from(offsets.size_of_vmglobal_import())
262 );
263 assert_eq!(
264 offset_of!(VMGlobalImport, from),
265 usize::from(offsets.vmglobal_import_from())
266 );
267 }
268}
269
270/// The fields compiled code needs to access to utilize a WebAssembly linear
271/// memory defined within the instance, namely the start address and the
272/// size in bytes.
273#[derive(Debug)]
274#[repr(C)]
275pub struct VMMemoryDefinition {
276 /// The start address.
277 pub base: *mut u8,
278
279 /// The current logical size of this linear memory in bytes.
280 ///
281 /// This is atomic because shared memories must be able to grow their length
282 /// atomically. For relaxed access, see
283 /// [`VMMemoryDefinition::current_length()`].
284 pub current_length: AtomicUsize,
285}
286
287impl VMMemoryDefinition {
288 /// Return the current length of the [`VMMemoryDefinition`] by performing a
289 /// relaxed load; do not use this function for situations in which a precise
290 /// length is needed. Owned memories (i.e., non-shared) will always return a
291 /// precise result (since no concurrent modification is possible) but shared
292 /// memories may see an imprecise value--a `current_length` potentially
293 /// smaller than what some other thread observes. Since Wasm memory only
294 /// grows, this under-estimation may be acceptable in certain cases.
295 pub fn current_length(&self) -> usize {
296 self.current_length.load(Ordering::Relaxed)
297 }
298
299 /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
300 /// `current_length`; see [`VMMemoryDefinition::current_length()`].
301 pub unsafe fn load(ptr: *mut Self) -> Self {
302 let other = &*ptr;
303 VMMemoryDefinition {
304 base: other.base,
305 current_length: other.current_length().into(),
306 }
307 }
308}
309
310#[cfg(test)]
311mod test_vmmemory_definition {
312 use super::VMMemoryDefinition;
313 use memoffset::offset_of;
314 use std::mem::size_of;
315 use wasmtime_environ::{Module, PtrSize, VMOffsets};
316
317 #[test]
318 fn check_vmmemory_definition_offsets() {
319 let module = Module::new();
320 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
321 assert_eq!(
322 size_of::<VMMemoryDefinition>(),
323 usize::from(offsets.ptr.size_of_vmmemory_definition())
324 );
325 assert_eq!(
326 offset_of!(VMMemoryDefinition, base),
327 usize::from(offsets.ptr.vmmemory_definition_base())
328 );
329 assert_eq!(
330 offset_of!(VMMemoryDefinition, current_length),
331 usize::from(offsets.ptr.vmmemory_definition_current_length())
332 );
333 /* TODO: Assert that the size of `current_length` matches.
334 assert_eq!(
335 size_of::<VMMemoryDefinition::current_length>(),
336 usize::from(offsets.size_of_vmmemory_definition_current_length())
337 );
338 */
339 }
340}
341
342/// The fields compiled code needs to access to utilize a WebAssembly table
343/// defined within the instance.
344#[derive(Debug, Copy, Clone)]
345#[repr(C)]
346pub struct VMTableDefinition {
347 /// Pointer to the table data.
348 pub base: *mut u8,
349
350 /// The current number of elements in the table.
351 pub current_elements: u32,
352}
353
354#[cfg(test)]
355mod test_vmtable_definition {
356 use super::VMTableDefinition;
357 use memoffset::offset_of;
358 use std::mem::size_of;
359 use wasmtime_environ::{Module, VMOffsets};
360
361 #[test]
362 fn check_vmtable_definition_offsets() {
363 let module = Module::new();
364 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
365 assert_eq!(
366 size_of::<VMTableDefinition>(),
367 usize::from(offsets.size_of_vmtable_definition())
368 );
369 assert_eq!(
370 offset_of!(VMTableDefinition, base),
371 usize::from(offsets.vmtable_definition_base())
372 );
373 assert_eq!(
374 offset_of!(VMTableDefinition, current_elements),
375 usize::from(offsets.vmtable_definition_current_elements())
376 );
377 }
378}
379
380/// The storage for a WebAssembly global defined within the instance.
381///
382/// TODO: Pack the globals more densely, rather than using the same size
383/// for every type.
384#[derive(Debug)]
385#[repr(C, align(16))]
386pub struct VMGlobalDefinition {
387 storage: [u8; 16],
388 // If more elements are added here, remember to add offset_of tests below!
389}
390
391#[cfg(test)]
392mod test_vmglobal_definition {
393 use super::VMGlobalDefinition;
394 use std::mem::{align_of, size_of};
395 use wasmtime_environ::{Module, PtrSize, VMOffsets};
396
397 #[test]
398 fn check_vmglobal_definition_alignment() {
399 assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
400 assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
401 assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
402 assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
403 assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
404 }
405
406 #[test]
407 fn check_vmglobal_definition_offsets() {
408 let module = Module::new();
409 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
410 assert_eq!(
411 size_of::<VMGlobalDefinition>(),
412 usize::from(offsets.ptr.size_of_vmglobal_definition())
413 );
414 }
415
416 #[test]
417 fn check_vmglobal_begins_aligned() {
418 let module = Module::new();
419 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
420 assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
421 }
422
423 #[test]
424 #[cfg(feature = "gc")]
425 fn check_vmglobal_can_contain_gc_ref() {
426 assert!(size_of::<crate::VMGcRef>() <= size_of::<VMGlobalDefinition>());
427 }
428}
429
430impl VMGlobalDefinition {
431 /// Construct a `VMGlobalDefinition`.
432 pub fn new() -> Self {
433 Self { storage: [0; 16] }
434 }
435
436 /// Return a reference to the value as an i32.
437 pub unsafe fn as_i32(&self) -> &i32 {
438 &*(self.storage.as_ref().as_ptr().cast::<i32>())
439 }
440
441 /// Return a mutable reference to the value as an i32.
442 pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
443 &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>())
444 }
445
446 /// Return a reference to the value as a u32.
447 pub unsafe fn as_u32(&self) -> &u32 {
448 &*(self.storage.as_ref().as_ptr().cast::<u32>())
449 }
450
451 /// Return a mutable reference to the value as an u32.
452 pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
453 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
454 }
455
456 /// Return a reference to the value as an i64.
457 pub unsafe fn as_i64(&self) -> &i64 {
458 &*(self.storage.as_ref().as_ptr().cast::<i64>())
459 }
460
461 /// Return a mutable reference to the value as an i64.
462 pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
463 &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>())
464 }
465
466 /// Return a reference to the value as an u64.
467 pub unsafe fn as_u64(&self) -> &u64 {
468 &*(self.storage.as_ref().as_ptr().cast::<u64>())
469 }
470
471 /// Return a mutable reference to the value as an u64.
472 pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
473 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
474 }
475
476 /// Return a reference to the value as an f32.
477 pub unsafe fn as_f32(&self) -> &f32 {
478 &*(self.storage.as_ref().as_ptr().cast::<f32>())
479 }
480
481 /// Return a mutable reference to the value as an f32.
482 pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
483 &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>())
484 }
485
486 /// Return a reference to the value as f32 bits.
487 pub unsafe fn as_f32_bits(&self) -> &u32 {
488 &*(self.storage.as_ref().as_ptr().cast::<u32>())
489 }
490
491 /// Return a mutable reference to the value as f32 bits.
492 pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
493 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
494 }
495
496 /// Return a reference to the value as an f64.
497 pub unsafe fn as_f64(&self) -> &f64 {
498 &*(self.storage.as_ref().as_ptr().cast::<f64>())
499 }
500
501 /// Return a mutable reference to the value as an f64.
502 pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
503 &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>())
504 }
505
506 /// Return a reference to the value as f64 bits.
507 pub unsafe fn as_f64_bits(&self) -> &u64 {
508 &*(self.storage.as_ref().as_ptr().cast::<u64>())
509 }
510
511 /// Return a mutable reference to the value as f64 bits.
512 pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
513 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
514 }
515
516 /// Return a reference to the value as an u128.
517 pub unsafe fn as_u128(&self) -> &u128 {
518 &*(self.storage.as_ref().as_ptr().cast::<u128>())
519 }
520
521 /// Return a mutable reference to the value as an u128.
522 pub unsafe fn as_u128_mut(&mut self) -> &mut u128 {
523 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u128>())
524 }
525
526 /// Return a reference to the value as u128 bits.
527 pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
528 &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>())
529 }
530
531 /// Return a mutable reference to the value as u128 bits.
532 pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
533 &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>())
534 }
535
536 /// Return a reference to the global value as a borrowed GC reference.
537 pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
538 let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
539 let ret = (*raw_ptr).as_ref();
540 assert!(cfg!(feature = "gc") || ret.is_none());
541 ret
542 }
543
544 /// Initialize a global to the given GC reference.
545 pub unsafe fn init_gc_ref(&mut self, gc_ref: Option<VMGcRef>) {
546 assert!(cfg!(feature = "gc") || gc_ref.is_none());
547 let raw_ptr = self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>();
548 ptr::write(raw_ptr, gc_ref);
549 }
550
551 /// Write a GC reference into this global value.
552 pub unsafe fn write_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
553 assert!(cfg!(feature = "gc") || gc_ref.is_none());
554
555 let dest = &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>());
556 assert!(cfg!(feature = "gc") || dest.is_none());
557
558 gc_store.write_gc_ref(dest, gc_ref)
559 }
560
561 /// Return a reference to the value as a `VMFuncRef`.
562 pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
563 *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>())
564 }
565
566 /// Return a mutable reference to the value as a `VMFuncRef`.
567 pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
568 &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>())
569 }
570}
571
572/// An index into the shared type registry, usable for checking signatures
573/// at indirect calls.
574#[repr(C)]
575#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)]
576pub struct VMSharedTypeIndex(u32);
577
578#[cfg(test)]
579mod test_vmshared_type_index {
580 use super::VMSharedTypeIndex;
581 use std::mem::size_of;
582 use wasmtime_environ::{Module, VMOffsets};
583
584 #[test]
585 fn check_vmshared_type_index() {
586 let module = Module::new();
587 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
588 assert_eq!(
589 size_of::<VMSharedTypeIndex>(),
590 usize::from(offsets.size_of_vmshared_type_index())
591 );
592 }
593}
594
595impl VMSharedTypeIndex {
596 /// Create a new `VMSharedTypeIndex`.
597 #[inline]
598 pub fn new(value: u32) -> Self {
599 assert_ne!(
600 value,
601 u32::MAX,
602 "u32::MAX is reserved for the default value"
603 );
604 Self(value)
605 }
606
607 /// Returns the underlying bits of the index.
608 #[inline]
609 pub fn bits(&self) -> u32 {
610 self.0
611 }
612}
613
614impl Default for VMSharedTypeIndex {
615 #[inline]
616 fn default() -> Self {
617 Self(u32::MAX)
618 }
619}
620
621/// The VM caller-checked "funcref" record, for caller-side signature checking.
622///
623/// It consists of function pointer(s), a type id to be checked by the
624/// caller, and the vmctx closure associated with this function.
625#[derive(Debug, Clone)]
626#[repr(C)]
627pub struct VMFuncRef {
628 /// Function pointer for this funcref if being called via the native calling
629 /// convention.
630 pub native_call: NonNull<VMNativeCallFunction>,
631
632 /// Function pointer for this funcref if being called via the "array"
633 /// calling convention that `Func::new` et al use.
634 pub array_call: VMArrayCallFunction,
635
636 /// Function pointer for this funcref if being called via the calling
637 /// convention we use when compiling Wasm.
638 ///
639 /// Most functions come with a function pointer that we can use when they
640 /// are called from Wasm. The notable exception is when we `Func::wrap` a
641 /// host function, and we don't have a Wasm compiler on hand to compile a
642 /// Wasm-to-native trampoline for the function. In this case, we leave
643 /// `wasm_call` empty until the function is passed as an import to Wasm (or
644 /// otherwise exposed to Wasm via tables/globals). At this point, we look up
645 /// a Wasm-to-native trampoline for the function in the the Wasm's compiled
646 /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
647 /// there is no guarantee that the Wasm module has a trampoline for this
648 /// function's signature. The Wasm module only has trampolines for its
649 /// types, and if this function isn't of one of those types, then the Wasm
650 /// module will not have a trampoline for it. This is actually okay, because
651 /// it means that the Wasm cannot actually call this function. But it does
652 /// mean that this field needs to be an `Option` even though it is non-null
653 /// the vast vast vast majority of the time.
654 pub wasm_call: Option<NonNull<VMWasmCallFunction>>,
655
656 /// Function signature's type id.
657 pub type_index: VMSharedTypeIndex,
658
659 /// The VM state associated with this function.
660 ///
661 /// The actual definition of what this pointer points to depends on the
662 /// function being referenced: for core Wasm functions, this is a `*mut
663 /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
664 /// component functions it is a `*mut VMComponentContext`.
665 pub vmctx: *mut VMOpaqueContext,
666 // If more elements are added here, remember to add offset_of tests below!
667}
668
669unsafe impl Send for VMFuncRef {}
670unsafe impl Sync for VMFuncRef {}
671
672#[cfg(test)]
673mod test_vm_func_ref {
674 use super::VMFuncRef;
675 use memoffset::offset_of;
676 use std::mem::size_of;
677 use wasmtime_environ::{Module, PtrSize, VMOffsets};
678
679 #[test]
680 fn check_vm_func_ref_offsets() {
681 let module = Module::new();
682 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
683 assert_eq!(
684 size_of::<VMFuncRef>(),
685 usize::from(offsets.ptr.size_of_vm_func_ref())
686 );
687 assert_eq!(
688 offset_of!(VMFuncRef, native_call),
689 usize::from(offsets.ptr.vm_func_ref_native_call())
690 );
691 assert_eq!(
692 offset_of!(VMFuncRef, array_call),
693 usize::from(offsets.ptr.vm_func_ref_array_call())
694 );
695 assert_eq!(
696 offset_of!(VMFuncRef, wasm_call),
697 usize::from(offsets.ptr.vm_func_ref_wasm_call())
698 );
699 assert_eq!(
700 offset_of!(VMFuncRef, type_index),
701 usize::from(offsets.ptr.vm_func_ref_type_index())
702 );
703 assert_eq!(
704 offset_of!(VMFuncRef, vmctx),
705 usize::from(offsets.ptr.vm_func_ref_vmctx())
706 );
707 }
708}
709
710macro_rules! define_builtin_array {
711 (
712 $(
713 $( #[$attr:meta] )*
714 $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
715 )*
716 ) => {
717 /// An array that stores addresses of builtin functions. We translate code
718 /// to use indirect calls. This way, we don't have to patch the code.
719 #[repr(C)]
720 pub struct VMBuiltinFunctionsArray {
721 $(
722 $name: unsafe extern "C" fn(
723 $(define_builtin_array!(@ty $param)),*
724 ) $( -> define_builtin_array!(@ty $result))?,
725 )*
726 }
727
728 impl VMBuiltinFunctionsArray {
729 #[allow(unused_doc_comments)]
730 pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
731 $(
732 $name: crate::libcalls::raw::$name,
733 )*
734 };
735 }
736 };
737
738 (@ty i32) => (u32);
739 (@ty i64) => (u64);
740 (@ty reference) => (*mut u8);
741 (@ty pointer) => (*mut u8);
742 (@ty vmctx) => (*mut VMContext);
743}
744
745wasmtime_environ::foreach_builtin_function!(define_builtin_array);
746
747const _: () = {
748 assert!(
749 mem::size_of::<VMBuiltinFunctionsArray>()
750 == mem::size_of::<usize>()
751 * (BuiltinFunctionIndex::builtin_functions_total_number() as usize)
752 )
753};
754
755/// The storage for a WebAssembly invocation argument
756///
757/// TODO: These could be packed more densely, rather than using the same size for every type.
758#[derive(Debug, Copy, Clone)]
759#[repr(C, align(16))]
760pub struct VMInvokeArgument([u8; 16]);
761
762#[cfg(test)]
763mod test_vm_invoke_argument {
764 use super::VMInvokeArgument;
765 use std::mem::{align_of, size_of};
766 use wasmtime_environ::{Module, PtrSize, VMOffsets};
767
768 #[test]
769 fn check_vm_invoke_argument_alignment() {
770 assert_eq!(align_of::<VMInvokeArgument>(), 16);
771 }
772
773 #[test]
774 fn check_vmglobal_definition_offsets() {
775 let module = Module::new();
776 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
777 assert_eq!(
778 size_of::<VMInvokeArgument>(),
779 usize::from(offsets.ptr.size_of_vmglobal_definition())
780 );
781 }
782}
783
784impl VMInvokeArgument {
785 /// Create a new invocation argument filled with zeroes
786 pub fn new() -> Self {
787 Self([0; 16])
788 }
789}
790
791/// Structure used to control interrupting wasm code.
792#[derive(Debug)]
793#[repr(C)]
794pub struct VMRuntimeLimits {
795 /// Current stack limit of the wasm module.
796 ///
797 /// For more information see `crates/cranelift/src/lib.rs`.
798 pub stack_limit: UnsafeCell<usize>,
799
800 /// Indicator of how much fuel has been consumed and is remaining to
801 /// WebAssembly.
802 ///
803 /// This field is typically negative and increments towards positive. Upon
804 /// turning positive a wasm trap will be generated. This field is only
805 /// modified if wasm is configured to consume fuel.
806 pub fuel_consumed: UnsafeCell<i64>,
807
808 /// Deadline epoch for interruption: if epoch-based interruption
809 /// is enabled and the global (per engine) epoch counter is
810 /// observed to reach or exceed this value, the guest code will
811 /// yield if running asynchronously.
812 pub epoch_deadline: UnsafeCell<u64>,
813
814 /// The value of the frame pointer register when we last called from Wasm to
815 /// the host.
816 ///
817 /// Maintained by our Wasm-to-host trampoline, and cleared just before
818 /// calling into Wasm in `catch_traps`.
819 ///
820 /// This member is `0` when Wasm is actively running and has not called out
821 /// to the host.
822 ///
823 /// Used to find the start of a a contiguous sequence of Wasm frames when
824 /// walking the stack.
825 pub last_wasm_exit_fp: UnsafeCell<usize>,
826
827 /// The last Wasm program counter before we called from Wasm to the host.
828 ///
829 /// Maintained by our Wasm-to-host trampoline, and cleared just before
830 /// calling into Wasm in `catch_traps`.
831 ///
832 /// This member is `0` when Wasm is actively running and has not called out
833 /// to the host.
834 ///
835 /// Used when walking a contiguous sequence of Wasm frames.
836 pub last_wasm_exit_pc: UnsafeCell<usize>,
837
838 /// The last host stack pointer before we called into Wasm from the host.
839 ///
840 /// Maintained by our host-to-Wasm trampoline, and cleared just before
841 /// calling into Wasm in `catch_traps`.
842 ///
843 /// This member is `0` when Wasm is actively running and has not called out
844 /// to the host.
845 ///
846 /// When a host function is wrapped into a `wasmtime::Func`, and is then
847 /// called from the host, then this member has the sentinal value of `-1 as
848 /// usize`, meaning that this contiguous sequence of Wasm frames is the
849 /// empty sequence, and it is not safe to dereference the
850 /// `last_wasm_exit_fp`.
851 ///
852 /// Used to find the end of a contiguous sequence of Wasm frames when
853 /// walking the stack.
854 pub last_wasm_entry_sp: UnsafeCell<usize>,
855}
856
857// The `VMRuntimeLimits` type is a pod-type with no destructor, and we don't
858// access any fields from other threads, so add in these trait impls which are
859// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
860// variables in `VMRuntimeLimits`.
861unsafe impl Send for VMRuntimeLimits {}
862unsafe impl Sync for VMRuntimeLimits {}
863
864impl Default for VMRuntimeLimits {
865 fn default() -> VMRuntimeLimits {
866 VMRuntimeLimits {
867 stack_limit: UnsafeCell::new(usize::max_value()),
868 fuel_consumed: UnsafeCell::new(0),
869 epoch_deadline: UnsafeCell::new(0),
870 last_wasm_exit_fp: UnsafeCell::new(0),
871 last_wasm_exit_pc: UnsafeCell::new(0),
872 last_wasm_entry_sp: UnsafeCell::new(0),
873 }
874 }
875}
876
877#[cfg(test)]
878mod test_vmruntime_limits {
879 use super::VMRuntimeLimits;
880 use memoffset::offset_of;
881 use std::mem::size_of;
882 use wasmtime_environ::{Module, PtrSize, VMOffsets};
883
884 #[test]
885 fn vmctx_runtime_limits_offset() {
886 let module = Module::new();
887 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
888 assert_eq!(
889 offsets.vmctx_runtime_limits(),
890 offsets.ptr.vmcontext_runtime_limits().into()
891 );
892 }
893
894 #[test]
895 fn field_offsets() {
896 let module = Module::new();
897 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
898 assert_eq!(
899 offset_of!(VMRuntimeLimits, stack_limit),
900 usize::from(offsets.ptr.vmruntime_limits_stack_limit())
901 );
902 assert_eq!(
903 offset_of!(VMRuntimeLimits, fuel_consumed),
904 usize::from(offsets.ptr.vmruntime_limits_fuel_consumed())
905 );
906 assert_eq!(
907 offset_of!(VMRuntimeLimits, epoch_deadline),
908 usize::from(offsets.ptr.vmruntime_limits_epoch_deadline())
909 );
910 assert_eq!(
911 offset_of!(VMRuntimeLimits, last_wasm_exit_fp),
912 usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_fp())
913 );
914 assert_eq!(
915 offset_of!(VMRuntimeLimits, last_wasm_exit_pc),
916 usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_pc())
917 );
918 assert_eq!(
919 offset_of!(VMRuntimeLimits, last_wasm_entry_sp),
920 usize::from(offsets.ptr.vmruntime_limits_last_wasm_entry_sp())
921 );
922 }
923}
924
925/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
926/// This has information about globals, memories, tables, and other runtime
927/// state associated with the current instance.
928///
929/// The struct here is empty, as the sizes of these fields are dynamic, and
930/// we can't describe them in Rust's type system. Sufficient memory is
931/// allocated at runtime.
932#[derive(Debug)]
933#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
934pub struct VMContext {
935 /// There's some more discussion about this within `wasmtime/src/lib.rs` but
936 /// the idea is that we want to tell the compiler that this contains
937 /// pointers which transitively refers to itself, to suppress some
938 /// optimizations that might otherwise assume this doesn't exist.
939 ///
940 /// The self-referential pointer we care about is the `*mut Store` pointer
941 /// early on in this context, which if you follow through enough levels of
942 /// nesting, eventually can refer back to this `VMContext`
943 pub _marker: marker::PhantomPinned,
944}
945
946impl VMContext {
947 /// Helper function to cast between context types using a debug assertion to
948 /// protect against some mistakes.
949 #[inline]
950 pub unsafe fn from_opaque(opaque: *mut VMOpaqueContext) -> *mut VMContext {
951 // Note that in general the offset of the "magic" field is stored in
952 // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
953 // about converting this pointer to another type we ideally don't want
954 // to read the offset from potentially corrupt memory. Instead it would
955 // be better to catch errors here as soon as possible.
956 //
957 // To accomplish this the `VMContext` structure is laid out with the
958 // magic field at a statically known offset (here it's 0 for now). This
959 // static offset is asserted in `VMOffsets::from` and needs to be kept
960 // in sync with this line for this debug assertion to work.
961 //
962 // Also note that this magic is only ever invalid in the presence of
963 // bugs, meaning we don't actually read the magic and act differently
964 // at runtime depending what it is, so this is a debug assertion as
965 // opposed to a regular assertion.
966 debug_assert_eq!((*opaque).magic, VMCONTEXT_MAGIC);
967 opaque.cast()
968 }
969}
970
971/// A "raw" and unsafe representation of a WebAssembly value.
972///
973/// This is provided for use with the `Func::new_unchecked` and
974/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
975/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
976///
977/// This is notably an "unsafe" way to work with `Val` and it's recommended to
978/// instead use `Val` where possible. An important note about this union is that
979/// fields are all stored in little-endian format, regardless of the endianness
980/// of the host system.
981#[allow(missing_docs)]
982#[repr(C)]
983#[derive(Copy, Clone)]
984pub union ValRaw {
985 /// A WebAssembly `i32` value.
986 ///
987 /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
988 /// type does not assign an interpretation of the upper bit as either signed
989 /// or unsigned. The Rust type `i32` is simply chosen for convenience.
990 ///
991 /// This value is always stored in a little-endian format.
992 i32: i32,
993
994 /// A WebAssembly `i64` value.
995 ///
996 /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
997 /// type does not assign an interpretation of the upper bit as either signed
998 /// or unsigned. The Rust type `i64` is simply chosen for convenience.
999 ///
1000 /// This value is always stored in a little-endian format.
1001 i64: i64,
1002
1003 /// A WebAssembly `f32` value.
1004 ///
1005 /// Note that the payload here is a Rust `u32`. This is to allow passing any
1006 /// representation of NaN into WebAssembly without risk of changing NaN
1007 /// payload bits as its gets passed around the system. Otherwise though this
1008 /// `u32` value is the return value of `f32::to_bits` in Rust.
1009 ///
1010 /// This value is always stored in a little-endian format.
1011 f32: u32,
1012
1013 /// A WebAssembly `f64` value.
1014 ///
1015 /// Note that the payload here is a Rust `u64`. This is to allow passing any
1016 /// representation of NaN into WebAssembly without risk of changing NaN
1017 /// payload bits as its gets passed around the system. Otherwise though this
1018 /// `u64` value is the return value of `f64::to_bits` in Rust.
1019 ///
1020 /// This value is always stored in a little-endian format.
1021 f64: u64,
1022
1023 /// A WebAssembly `v128` value.
1024 ///
1025 /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1026 /// but note that `v128` in WebAssembly is often considered a vector type
1027 /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1028 /// of the underlying bits is left up to the instructions which consume
1029 /// this value.
1030 ///
1031 /// This value is always stored in a little-endian format.
1032 v128: [u8; 16],
1033
1034 /// A WebAssembly `funcref` value (or one of its subtypes).
1035 ///
1036 /// The payload here is a pointer which is runtime-defined. This is one of
1037 /// the main points of unsafety about the `ValRaw` type as the validity of
1038 /// the pointer here is not easily verified and must be preserved by
1039 /// carefully calling the correct functions throughout the runtime.
1040 ///
1041 /// This value is always stored in a little-endian format.
1042 funcref: *mut c_void,
1043
1044 /// A WebAssembly `externref` value (or one of its subtypes).
1045 ///
1046 /// The payload here is a compressed pointer value which is
1047 /// runtime-defined. This is one of the main points of unsafety about the
1048 /// `ValRaw` type as the validity of the pointer here is not easily verified
1049 /// and must be preserved by carefully calling the correct functions
1050 /// throughout the runtime.
1051 ///
1052 /// This value is always stored in a little-endian format.
1053 externref: u32,
1054
1055 /// A WebAssembly `anyref` value (or one of its subtypes).
1056 ///
1057 /// The payload here is a compressed pointer value which is
1058 /// runtime-defined. This is one of the main points of unsafety about the
1059 /// `ValRaw` type as the validity of the pointer here is not easily verified
1060 /// and must be preserved by carefully calling the correct functions
1061 /// throughout the runtime.
1062 ///
1063 /// This value is always stored in a little-endian format.
1064 anyref: u32,
1065}
1066
1067// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1068// are some simple assertions about the shape of the type which are additionally
1069// matched in C.
1070const _: () = {
1071 assert!(std::mem::size_of::<ValRaw>() == 16);
1072 assert!(std::mem::align_of::<ValRaw>() == 8);
1073};
1074
1075// This type is just a bag-of-bits so it's up to the caller to figure out how
1076// to safely deal with threading concerns and safely access interior bits.
1077unsafe impl Send for ValRaw {}
1078unsafe impl Sync for ValRaw {}
1079
1080impl std::fmt::Debug for ValRaw {
1081 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1082 struct Hex<T>(T);
1083 impl<T: std::fmt::LowerHex> std::fmt::Debug for Hex<T> {
1084 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1085 let bytes = std::mem::size_of::<T>();
1086 let hex_digits_per_byte = 2;
1087 let hex_digits = bytes * hex_digits_per_byte;
1088 write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1089 }
1090 }
1091
1092 unsafe {
1093 f.debug_struct("ValRaw")
1094 .field("i32", &Hex(self.i32))
1095 .field("i64", &Hex(self.i64))
1096 .field("f32", &Hex(self.f32))
1097 .field("f64", &Hex(self.f64))
1098 .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1099 .field("funcref", &self.funcref)
1100 .field("externref", &Hex(self.externref))
1101 .field("anyref", &Hex(self.anyref))
1102 .finish()
1103 }
1104 }
1105}
1106
1107impl ValRaw {
1108 /// Creates a WebAssembly `i32` value
1109 #[inline]
1110 pub fn i32(i: i32) -> ValRaw {
1111 // Note that this is intentionally not setting the `i32` field, instead
1112 // setting the `i64` field with a zero-extended version of `i`. For more
1113 // information on this see the comments on `Lower for Result` in the
1114 // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1115 // otherwise constrained to guarantee that the initial 64-bits are
1116 // always initialized.
1117 ValRaw::u64(i.unsigned().into())
1118 }
1119
1120 /// Creates a WebAssembly `i64` value
1121 #[inline]
1122 pub fn i64(i: i64) -> ValRaw {
1123 ValRaw { i64: i.to_le() }
1124 }
1125
1126 /// Creates a WebAssembly `i32` value
1127 #[inline]
1128 pub fn u32(i: u32) -> ValRaw {
1129 // See comments in `ValRaw::i32` for why this is setting the upper
1130 // 32-bits as well.
1131 ValRaw::u64(i.into())
1132 }
1133
1134 /// Creates a WebAssembly `i64` value
1135 #[inline]
1136 pub fn u64(i: u64) -> ValRaw {
1137 ValRaw::i64(i as i64)
1138 }
1139
1140 /// Creates a WebAssembly `f32` value
1141 #[inline]
1142 pub fn f32(i: u32) -> ValRaw {
1143 // See comments in `ValRaw::i32` for why this is setting the upper
1144 // 32-bits as well.
1145 ValRaw::u64(i.into())
1146 }
1147
1148 /// Creates a WebAssembly `f64` value
1149 #[inline]
1150 pub fn f64(i: u64) -> ValRaw {
1151 ValRaw { f64: i.to_le() }
1152 }
1153
1154 /// Creates a WebAssembly `v128` value
1155 #[inline]
1156 pub fn v128(i: u128) -> ValRaw {
1157 ValRaw {
1158 v128: i.to_le_bytes(),
1159 }
1160 }
1161
1162 /// Creates a WebAssembly `funcref` value
1163 #[inline]
1164 pub fn funcref(i: *mut c_void) -> ValRaw {
1165 ValRaw {
1166 funcref: Strict::map_addr(i, |i| i.to_le()),
1167 }
1168 }
1169
1170 /// Creates a WebAssembly `externref` value
1171 #[inline]
1172 pub fn externref(e: u32) -> ValRaw {
1173 assert!(cfg!(feature = "gc") || e == 0);
1174 ValRaw {
1175 externref: e.to_le(),
1176 }
1177 }
1178
1179 /// Creates a WebAssembly `anyref` value
1180 #[inline]
1181 pub fn anyref(r: u32) -> ValRaw {
1182 assert!(cfg!(feature = "gc") || r == 0);
1183 ValRaw { anyref: r.to_le() }
1184 }
1185
1186 /// Gets the WebAssembly `i32` value
1187 #[inline]
1188 pub fn get_i32(&self) -> i32 {
1189 unsafe { i32::from_le(self.i32) }
1190 }
1191
1192 /// Gets the WebAssembly `i64` value
1193 #[inline]
1194 pub fn get_i64(&self) -> i64 {
1195 unsafe { i64::from_le(self.i64) }
1196 }
1197
1198 /// Gets the WebAssembly `i32` value
1199 #[inline]
1200 pub fn get_u32(&self) -> u32 {
1201 self.get_i32().unsigned()
1202 }
1203
1204 /// Gets the WebAssembly `i64` value
1205 #[inline]
1206 pub fn get_u64(&self) -> u64 {
1207 self.get_i64().unsigned()
1208 }
1209
1210 /// Gets the WebAssembly `f32` value
1211 #[inline]
1212 pub fn get_f32(&self) -> u32 {
1213 unsafe { u32::from_le(self.f32) }
1214 }
1215
1216 /// Gets the WebAssembly `f64` value
1217 #[inline]
1218 pub fn get_f64(&self) -> u64 {
1219 unsafe { u64::from_le(self.f64) }
1220 }
1221
1222 /// Gets the WebAssembly `v128` value
1223 #[inline]
1224 pub fn get_v128(&self) -> u128 {
1225 unsafe { u128::from_le_bytes(self.v128) }
1226 }
1227
1228 /// Gets the WebAssembly `funcref` value
1229 #[inline]
1230 pub fn get_funcref(&self) -> *mut c_void {
1231 unsafe { Strict::map_addr(self.funcref, |i| usize::from_le(i)) }
1232 }
1233
1234 /// Gets the WebAssembly `externref` value
1235 #[inline]
1236 pub fn get_externref(&self) -> u32 {
1237 let externref = u32::from_le(unsafe { self.externref });
1238 assert!(cfg!(feature = "gc") || externref == 0);
1239 externref
1240 }
1241
1242 /// Gets the WebAssembly `anyref` value
1243 #[inline]
1244 pub fn get_anyref(&self) -> u32 {
1245 let anyref = u32::from_le(unsafe { self.anyref });
1246 assert!(cfg!(feature = "gc") || anyref == 0);
1247 anyref
1248 }
1249}
1250
1251/// An "opaque" version of `VMContext` which must be explicitly casted to a
1252/// target context.
1253///
1254/// This context is used to represent that contexts specified in
1255/// `VMFuncRef` can have any type and don't have an implicit
1256/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1257/// structure of an opaque context in general and only the code which configured
1258/// the context is able to rely on a particular structure. This is because the
1259/// context pointer configured for `VMFuncRef` is guaranteed to be
1260/// the first parameter passed.
1261///
1262/// Note that Wasmtime currently has a layout where all contexts that are casted
1263/// to an opaque context start with a 32-bit "magic" which can be used in debug
1264/// mode to debug-assert that the casts here are correct and have at least a
1265/// little protection against incorrect casts.
1266pub struct VMOpaqueContext {
1267 pub(crate) magic: u32,
1268 _marker: marker::PhantomPinned,
1269}
1270
1271impl VMOpaqueContext {
1272 /// Helper function to clearly indicate that casts are desired.
1273 #[inline]
1274 pub fn from_vmcontext(ptr: *mut VMContext) -> *mut VMOpaqueContext {
1275 ptr.cast()
1276 }
1277
1278 /// Helper function to clearly indicate that casts are desired.
1279 #[inline]
1280 pub fn from_vm_array_call_host_func_context(
1281 ptr: *mut VMArrayCallHostFuncContext,
1282 ) -> *mut VMOpaqueContext {
1283 ptr.cast()
1284 }
1285
1286 /// Helper function to clearly indicate that casts are desired.
1287 #[inline]
1288 pub fn from_vm_native_call_host_func_context(
1289 ptr: *mut VMNativeCallHostFuncContext,
1290 ) -> *mut VMOpaqueContext {
1291 ptr.cast()
1292 }
1293}