wasmtime_runtime/memory.rs
1//! Memory management for linear memories.
2//!
3//! `RuntimeLinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables.
4
5use crate::mmap::Mmap;
6use crate::vmcontext::VMMemoryDefinition;
7use crate::{MemoryImage, MemoryImageSlot, SendSyncPtr, SharedMemory, Store, WaitResult};
8use anyhow::Error;
9use anyhow::{bail, format_err, Result};
10use std::ops::Range;
11use std::ptr::NonNull;
12use std::sync::Arc;
13use std::time::Instant;
14use wasmtime_environ::{MemoryPlan, MemoryStyle, Trap, WASM32_MAX_PAGES, WASM64_MAX_PAGES};
15
16const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize;
17const WASM_PAGE_SIZE_U64: u64 = wasmtime_environ::WASM_PAGE_SIZE as u64;
18
19/// A memory allocator
20pub trait RuntimeMemoryCreator: Send + Sync {
21 /// Create new RuntimeLinearMemory
22 fn new_memory(
23 &self,
24 plan: &MemoryPlan,
25 minimum: usize,
26 maximum: Option<usize>,
27 // Optionally, a memory image for CoW backing.
28 memory_image: Option<&Arc<MemoryImage>>,
29 ) -> Result<Box<dyn RuntimeLinearMemory>>;
30}
31
32/// A default memory allocator used by Wasmtime
33pub struct DefaultMemoryCreator;
34
35impl RuntimeMemoryCreator for DefaultMemoryCreator {
36 /// Create new MmapMemory
37 fn new_memory(
38 &self,
39 plan: &MemoryPlan,
40 minimum: usize,
41 maximum: Option<usize>,
42 memory_image: Option<&Arc<MemoryImage>>,
43 ) -> Result<Box<dyn RuntimeLinearMemory>> {
44 Ok(Box::new(MmapMemory::new(
45 plan,
46 minimum,
47 maximum,
48 memory_image,
49 )?))
50 }
51}
52
53/// A linear memory
54pub trait RuntimeLinearMemory: Send + Sync {
55 /// Returns the number of allocated bytes.
56 fn byte_size(&self) -> usize;
57
58 /// Returns the maximum number of bytes the memory can grow to.
59 /// Returns `None` if the memory is unbounded.
60 fn maximum_byte_size(&self) -> Option<usize>;
61
62 /// Grows a memory by `delta_pages`.
63 ///
64 /// This performs the necessary checks on the growth before delegating to
65 /// the underlying `grow_to` implementation. A default implementation of
66 /// this memory is provided here since this is assumed to be the same for
67 /// most kinds of memory; one exception is shared memory, which must perform
68 /// all the steps of the default implementation *plus* the required locking.
69 ///
70 /// The `store` is used only for error reporting.
71 fn grow(
72 &mut self,
73 delta_pages: u64,
74 mut store: Option<&mut dyn Store>,
75 ) -> Result<Option<(usize, usize)>, Error> {
76 let old_byte_size = self.byte_size();
77
78 // Wasm spec: when growing by 0 pages, always return the current size.
79 if delta_pages == 0 {
80 return Ok(Some((old_byte_size, old_byte_size)));
81 }
82
83 // The largest wasm-page-aligned region of memory is possible to
84 // represent in a `usize`. This will be impossible for the system to
85 // actually allocate.
86 let absolute_max = 0usize.wrapping_sub(WASM_PAGE_SIZE);
87
88 // Calculate the byte size of the new allocation. Let it overflow up to
89 // `usize::MAX`, then clamp it down to `absolute_max`.
90 let new_byte_size = usize::try_from(delta_pages)
91 .unwrap_or(usize::MAX)
92 .saturating_mul(WASM_PAGE_SIZE)
93 .saturating_add(old_byte_size);
94 let new_byte_size = if new_byte_size > absolute_max {
95 absolute_max
96 } else {
97 new_byte_size
98 };
99
100 let maximum = self.maximum_byte_size();
101 // Store limiter gets first chance to reject memory_growing.
102 if let Some(store) = &mut store {
103 if !store.memory_growing(old_byte_size, new_byte_size, maximum)? {
104 return Ok(None);
105 }
106 }
107
108 // Never exceed maximum, even if limiter permitted it.
109 if let Some(max) = maximum {
110 if new_byte_size > max {
111 if let Some(store) = store {
112 // FIXME: shared memories may not have an associated store
113 // to report the growth failure to but the error should not
114 // be dropped
115 // (https://github.com/bytecodealliance/wasmtime/issues/4240).
116 store.memory_grow_failed(format_err!("Memory maximum size exceeded"))?;
117 }
118 return Ok(None);
119 }
120 }
121
122 match self.grow_to(new_byte_size) {
123 Ok(_) => Ok(Some((old_byte_size, new_byte_size))),
124 Err(e) => {
125 // FIXME: shared memories may not have an associated store to
126 // report the growth failure to but the error should not be
127 // dropped
128 // (https://github.com/bytecodealliance/wasmtime/issues/4240).
129 if let Some(store) = store {
130 store.memory_grow_failed(e)?;
131 }
132 Ok(None)
133 }
134 }
135 }
136
137 /// Grow memory to the specified amount of bytes.
138 ///
139 /// Returns an error if memory can't be grown by the specified amount
140 /// of bytes.
141 fn grow_to(&mut self, size: usize) -> Result<()>;
142
143 /// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm
144 /// code.
145 fn vmmemory(&mut self) -> VMMemoryDefinition;
146
147 /// Does this memory need initialization? It may not if it already
148 /// has initial contents courtesy of the `MemoryImage` passed to
149 /// `RuntimeMemoryCreator::new_memory()`.
150 fn needs_init(&self) -> bool;
151
152 /// Used for optional dynamic downcasting.
153 fn as_any_mut(&mut self) -> &mut dyn std::any::Any;
154
155 /// Returns the range of addresses that may be reached by WebAssembly.
156 ///
157 /// This starts at the base of linear memory and ends at the end of the
158 /// guard pages, if any.
159 fn wasm_accessible(&self) -> Range<usize>;
160}
161
162/// A linear memory instance.
163#[derive(Debug)]
164pub struct MmapMemory {
165 // The underlying allocation.
166 mmap: Mmap,
167
168 // The number of bytes that are accessible in `mmap` and available for
169 // reading and writing.
170 //
171 // This region starts at `pre_guard_size` offset from the base of `mmap`.
172 accessible: usize,
173
174 // The optional maximum accessible size, in bytes, for this linear memory.
175 //
176 // Note that this maximum does not factor in guard pages, so this isn't the
177 // maximum size of the linear address space reservation for this memory.
178 maximum: Option<usize>,
179
180 // The amount of extra bytes to reserve whenever memory grows. This is
181 // specified so that the cost of repeated growth is amortized.
182 extra_to_reserve_on_growth: usize,
183
184 // Size in bytes of extra guard pages before the start and after the end to
185 // optimize loads and stores with constant offsets.
186 pre_guard_size: usize,
187 offset_guard_size: usize,
188
189 // An optional CoW mapping that provides the initial content of this
190 // MmapMemory, if mapped.
191 memory_image: Option<MemoryImageSlot>,
192}
193
194impl MmapMemory {
195 /// Create a new linear memory instance with specified minimum and maximum
196 /// number of wasm pages.
197 pub fn new(
198 plan: &MemoryPlan,
199 minimum: usize,
200 mut maximum: Option<usize>,
201 memory_image: Option<&Arc<MemoryImage>>,
202 ) -> Result<Self> {
203 // It's a programmer error for these two configuration values to exceed
204 // the host available address space, so panic if such a configuration is
205 // found (mostly an issue for hypothetical 32-bit hosts).
206 let offset_guard_bytes = usize::try_from(plan.offset_guard_size).unwrap();
207 let pre_guard_bytes = usize::try_from(plan.pre_guard_size).unwrap();
208
209 let (alloc_bytes, extra_to_reserve_on_growth) = match plan.style {
210 // Dynamic memories start with the minimum size plus the `reserve`
211 // amount specified to grow into.
212 MemoryStyle::Dynamic { reserve } => (minimum, usize::try_from(reserve).unwrap()),
213
214 // Static memories will never move in memory and consequently get
215 // their entire allocation up-front with no extra room to grow into.
216 // Note that the `maximum` is adjusted here to whatever the smaller
217 // of the two is, the `maximum` given or the `bound` specified for
218 // this memory.
219 MemoryStyle::Static { bound } => {
220 assert!(bound >= plan.memory.minimum);
221 let bound_bytes =
222 usize::try_from(bound.checked_mul(WASM_PAGE_SIZE_U64).unwrap()).unwrap();
223 maximum = Some(bound_bytes.min(maximum.unwrap_or(usize::MAX)));
224 (bound_bytes, 0)
225 }
226 };
227
228 let request_bytes = pre_guard_bytes
229 .checked_add(alloc_bytes)
230 .and_then(|i| i.checked_add(extra_to_reserve_on_growth))
231 .and_then(|i| i.checked_add(offset_guard_bytes))
232 .ok_or_else(|| format_err!("cannot allocate {} with guard regions", minimum))?;
233 let mut mmap = Mmap::accessible_reserved(0, request_bytes)?;
234
235 if minimum > 0 {
236 mmap.make_accessible(pre_guard_bytes, minimum)?;
237 }
238
239 // If a memory image was specified, try to create the MemoryImageSlot on
240 // top of our mmap.
241 let memory_image = match memory_image {
242 Some(image) => {
243 let base = unsafe { mmap.as_mut_ptr().add(pre_guard_bytes) };
244 let mut slot = MemoryImageSlot::create(
245 base.cast(),
246 minimum,
247 alloc_bytes + extra_to_reserve_on_growth,
248 );
249 slot.instantiate(minimum, Some(image), &plan)?;
250 // On drop, we will unmap our mmap'd range that this slot was
251 // mapped on top of, so there is no need for the slot to wipe
252 // it with an anonymous mapping first.
253 slot.no_clear_on_drop();
254 Some(slot)
255 }
256 None => None,
257 };
258
259 Ok(Self {
260 mmap,
261 accessible: minimum,
262 maximum,
263 pre_guard_size: pre_guard_bytes,
264 offset_guard_size: offset_guard_bytes,
265 extra_to_reserve_on_growth,
266 memory_image,
267 })
268 }
269}
270
271impl RuntimeLinearMemory for MmapMemory {
272 fn byte_size(&self) -> usize {
273 self.accessible
274 }
275
276 fn maximum_byte_size(&self) -> Option<usize> {
277 self.maximum
278 }
279
280 fn grow_to(&mut self, new_size: usize) -> Result<()> {
281 if new_size > self.mmap.len() - self.offset_guard_size - self.pre_guard_size {
282 // If the new size of this heap exceeds the current size of the
283 // allocation we have, then this must be a dynamic heap. Use
284 // `new_size` to calculate a new size of an allocation, allocate it,
285 // and then copy over the memory from before.
286 let request_bytes = self
287 .pre_guard_size
288 .checked_add(new_size)
289 .and_then(|s| s.checked_add(self.extra_to_reserve_on_growth))
290 .and_then(|s| s.checked_add(self.offset_guard_size))
291 .ok_or_else(|| format_err!("overflow calculating size of memory allocation"))?;
292
293 let mut new_mmap = Mmap::accessible_reserved(0, request_bytes)?;
294 new_mmap.make_accessible(self.pre_guard_size, new_size)?;
295
296 // This method has an exclusive reference to `self.mmap` and just
297 // created `new_mmap` so it should be safe to acquire references
298 // into both of them and copy between them.
299 unsafe {
300 let range = self.pre_guard_size..self.pre_guard_size + self.accessible;
301 let src = self.mmap.slice(range.clone());
302 let dst = new_mmap.slice_mut(range);
303 dst.copy_from_slice(src);
304 }
305
306 // Now drop the MemoryImageSlot, if any. We've lost the CoW
307 // advantages by explicitly copying all data, but we have
308 // preserved all of its content; so we no longer need the
309 // mapping. We need to do this before we (implicitly) drop the
310 // `mmap` field by overwriting it below.
311 drop(self.memory_image.take());
312
313 self.mmap = new_mmap;
314 } else if let Some(image) = self.memory_image.as_mut() {
315 // MemoryImageSlot has its own growth mechanisms; defer to its
316 // implementation.
317 image.set_heap_limit(new_size)?;
318 } else {
319 // If the new size of this heap fits within the existing allocation
320 // then all we need to do is to make the new pages accessible. This
321 // can happen either for "static" heaps which always hit this case,
322 // or "dynamic" heaps which have some space reserved after the
323 // initial allocation to grow into before the heap is moved in
324 // memory.
325 assert!(new_size > self.accessible);
326 self.mmap.make_accessible(
327 self.pre_guard_size + self.accessible,
328 new_size - self.accessible,
329 )?;
330 }
331
332 self.accessible = new_size;
333
334 Ok(())
335 }
336
337 fn vmmemory(&mut self) -> VMMemoryDefinition {
338 VMMemoryDefinition {
339 base: unsafe { self.mmap.as_mut_ptr().add(self.pre_guard_size) },
340 current_length: self.accessible.into(),
341 }
342 }
343
344 fn needs_init(&self) -> bool {
345 // If we're using a CoW mapping, then no initialization
346 // is needed.
347 self.memory_image.is_none()
348 }
349
350 fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
351 self
352 }
353
354 fn wasm_accessible(&self) -> Range<usize> {
355 let base = self.mmap.as_ptr() as usize + self.pre_guard_size;
356 let end = base + (self.mmap.len() - self.pre_guard_size);
357 base..end
358 }
359}
360
361/// A "static" memory where the lifetime of the backing memory is managed
362/// elsewhere. Currently used with the pooling allocator.
363struct StaticMemory {
364 /// The base pointer of this static memory, wrapped up in a send/sync
365 /// wrapper.
366 base: SendSyncPtr<u8>,
367
368 /// The byte capacity of the `base` pointer.
369 capacity: usize,
370
371 /// The current size, in bytes, of this memory.
372 size: usize,
373
374 /// The size, in bytes, of the virtual address allocation starting at `base`
375 /// and going to the end of the guard pages at the end of the linear memory.
376 memory_and_guard_size: usize,
377
378 /// The image management, if any, for this memory. Owned here and
379 /// returned to the pooling allocator when termination occurs.
380 memory_image: MemoryImageSlot,
381}
382
383impl StaticMemory {
384 fn new(
385 base_ptr: *mut u8,
386 base_capacity: usize,
387 initial_size: usize,
388 maximum_size: Option<usize>,
389 memory_image: MemoryImageSlot,
390 memory_and_guard_size: usize,
391 ) -> Result<Self> {
392 if base_capacity < initial_size {
393 bail!(
394 "initial memory size of {} exceeds the pooling allocator's \
395 configured maximum memory size of {} bytes",
396 initial_size,
397 base_capacity,
398 );
399 }
400
401 // Only use the part of the slice that is necessary.
402 let base_capacity = match maximum_size {
403 Some(max) if max < base_capacity => max,
404 _ => base_capacity,
405 };
406
407 Ok(Self {
408 base: SendSyncPtr::new(NonNull::new(base_ptr).unwrap()),
409 capacity: base_capacity,
410 size: initial_size,
411 memory_image,
412 memory_and_guard_size,
413 })
414 }
415}
416
417impl RuntimeLinearMemory for StaticMemory {
418 fn byte_size(&self) -> usize {
419 self.size
420 }
421
422 fn maximum_byte_size(&self) -> Option<usize> {
423 Some(self.capacity)
424 }
425
426 fn grow_to(&mut self, new_byte_size: usize) -> Result<()> {
427 // Never exceed the static memory size; this check should have been made
428 // prior to arriving here.
429 assert!(new_byte_size <= self.capacity);
430
431 self.memory_image.set_heap_limit(new_byte_size)?;
432
433 // Update our accounting of the available size.
434 self.size = new_byte_size;
435 Ok(())
436 }
437
438 fn vmmemory(&mut self) -> VMMemoryDefinition {
439 VMMemoryDefinition {
440 base: self.base.as_ptr(),
441 current_length: self.size.into(),
442 }
443 }
444
445 fn needs_init(&self) -> bool {
446 !self.memory_image.has_image()
447 }
448
449 fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
450 self
451 }
452
453 fn wasm_accessible(&self) -> Range<usize> {
454 let base = self.base.as_ptr() as usize;
455 let end = base + self.memory_and_guard_size;
456 base..end
457 }
458}
459
460/// Representation of a runtime wasm linear memory.
461pub struct Memory(pub(crate) Box<dyn RuntimeLinearMemory>);
462
463impl Memory {
464 /// Create a new dynamic (movable) memory instance for the specified plan.
465 pub fn new_dynamic(
466 plan: &MemoryPlan,
467 creator: &dyn RuntimeMemoryCreator,
468 store: &mut dyn Store,
469 memory_image: Option<&Arc<MemoryImage>>,
470 ) -> Result<Self> {
471 let (minimum, maximum) = Self::limit_new(plan, Some(store))?;
472 let allocation = creator.new_memory(plan, minimum, maximum, memory_image)?;
473 let allocation = if plan.memory.shared {
474 Box::new(SharedMemory::wrap(plan, allocation, plan.memory)?)
475 } else {
476 allocation
477 };
478 Ok(Memory(allocation))
479 }
480
481 /// Create a new static (immovable) memory instance for the specified plan.
482 pub fn new_static(
483 plan: &MemoryPlan,
484 base_ptr: *mut u8,
485 base_capacity: usize,
486 memory_image: MemoryImageSlot,
487 memory_and_guard_size: usize,
488 store: &mut dyn Store,
489 ) -> Result<Self> {
490 let (minimum, maximum) = Self::limit_new(plan, Some(store))?;
491 let pooled_memory = StaticMemory::new(
492 base_ptr,
493 base_capacity,
494 minimum,
495 maximum,
496 memory_image,
497 memory_and_guard_size,
498 )?;
499 let allocation = Box::new(pooled_memory);
500 let allocation: Box<dyn RuntimeLinearMemory> = if plan.memory.shared {
501 // FIXME: since the pooling allocator owns the memory allocation
502 // (which is torn down with the instance), the current shared memory
503 // implementation will cause problems; see
504 // https://github.com/bytecodealliance/wasmtime/issues/4244.
505 todo!("using shared memory with the pooling allocator is a work in progress");
506 } else {
507 allocation
508 };
509 Ok(Memory(allocation))
510 }
511
512 /// Calls the `store`'s limiter to optionally prevent a memory from being allocated.
513 ///
514 /// Returns the minimum size and optional maximum size of the memory, in
515 /// bytes.
516 pub(crate) fn limit_new(
517 plan: &MemoryPlan,
518 store: Option<&mut dyn Store>,
519 ) -> Result<(usize, Option<usize>)> {
520 // Sanity-check what should already be true from wasm module validation.
521 let absolute_max = if plan.memory.memory64 {
522 WASM64_MAX_PAGES
523 } else {
524 WASM32_MAX_PAGES
525 };
526 assert!(plan.memory.minimum <= absolute_max);
527 assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= absolute_max);
528
529 // This is the absolute possible maximum that the module can try to
530 // allocate, which is our entire address space minus a wasm page. That
531 // shouldn't ever actually work in terms of an allocation because
532 // presumably the kernel wants *something* for itself, but this is used
533 // to pass to the `store`'s limiter for a requested size
534 // to approximate the scale of the request that the wasm module is
535 // making. This is necessary because the limiter works on `usize` bytes
536 // whereas we're working with possibly-overflowing `u64` calculations
537 // here. To actually faithfully represent the byte requests of modules
538 // we'd have to represent things as `u128`, but that's kinda
539 // overkill for this purpose.
540 let absolute_max = 0usize.wrapping_sub(WASM_PAGE_SIZE);
541
542 // If the minimum memory size overflows the size of our own address
543 // space, then we can't satisfy this request, but defer the error to
544 // later so the `store` can be informed that an effective oom is
545 // happening.
546 let minimum = plan
547 .memory
548 .minimum
549 .checked_mul(WASM_PAGE_SIZE_U64)
550 .and_then(|m| usize::try_from(m).ok());
551
552 // The plan stores the maximum size in units of wasm pages, but we
553 // use units of bytes. Unlike for the `minimum` size we silently clamp
554 // the effective maximum size to `absolute_max` above if the maximum is
555 // too large. This should be ok since as a wasm runtime we get to
556 // arbitrarily decide the actual maximum size of memory, regardless of
557 // what's actually listed on the memory itself.
558 let mut maximum = plan.memory.maximum.map(|max| {
559 usize::try_from(max)
560 .ok()
561 .and_then(|m| m.checked_mul(WASM_PAGE_SIZE))
562 .unwrap_or(absolute_max)
563 });
564
565 // If this is a 32-bit memory and no maximum is otherwise listed then we
566 // need to still specify a maximum size of 4GB. If the host platform is
567 // 32-bit then there's no need to limit the maximum this way since no
568 // allocation of 4GB can succeed, but for 64-bit platforms this is
569 // required to limit memories to 4GB.
570 if !plan.memory.memory64 && maximum.is_none() {
571 maximum = usize::try_from(1u64 << 32).ok();
572 }
573
574 // Inform the store's limiter what's about to happen. This will let the
575 // limiter reject anything if necessary, and this also guarantees that
576 // we should call the limiter for all requested memories, even if our
577 // `minimum` calculation overflowed. This means that the `minimum` we're
578 // informing the limiter is lossy and may not be 100% accurate, but for
579 // now the expected uses of limiter means that's ok.
580 if let Some(store) = store {
581 // We ignore the store limits for shared memories since they are
582 // technically not created within a store (though, trickily, they
583 // may be associated with one in order to get a `vmctx`).
584 if !plan.memory.shared {
585 if !store.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)? {
586 bail!(
587 "memory minimum size of {} pages exceeds memory limits",
588 plan.memory.minimum
589 );
590 }
591 }
592 }
593
594 // At this point we need to actually handle overflows, so bail out with
595 // an error if we made it this far.
596 let minimum = minimum.ok_or_else(|| {
597 format_err!(
598 "memory minimum size of {} pages exceeds memory limits",
599 plan.memory.minimum
600 )
601 })?;
602 Ok((minimum, maximum))
603 }
604
605 /// Returns the number of allocated wasm pages.
606 pub fn byte_size(&self) -> usize {
607 self.0.byte_size()
608 }
609
610 /// Returns the maximum number of pages the memory can grow to at runtime.
611 ///
612 /// Returns `None` if the memory is unbounded.
613 ///
614 /// The runtime maximum may not be equal to the maximum from the linear memory's
615 /// Wasm type when it is being constrained by an instance allocator.
616 pub fn maximum_byte_size(&self) -> Option<usize> {
617 self.0.maximum_byte_size()
618 }
619
620 /// Returns whether or not this memory needs initialization. It
621 /// may not if it already has initial content thanks to a CoW
622 /// mechanism.
623 pub(crate) fn needs_init(&self) -> bool {
624 self.0.needs_init()
625 }
626
627 /// Grow memory by the specified amount of wasm pages.
628 ///
629 /// Returns `None` if memory can't be grown by the specified amount
630 /// of wasm pages. Returns `Some` with the old size of memory, in bytes, on
631 /// successful growth.
632 ///
633 /// # Safety
634 ///
635 /// Resizing the memory can reallocate the memory buffer for dynamic memories.
636 /// An instance's `VMContext` may have pointers to the memory's base and will
637 /// need to be fixed up after growing the memory.
638 ///
639 /// Generally, prefer using `InstanceHandle::memory_grow`, which encapsulates
640 /// this unsafety.
641 ///
642 /// Ensure that the provided Store is not used to get access any Memory
643 /// which lives inside it.
644 pub unsafe fn grow(
645 &mut self,
646 delta_pages: u64,
647 store: Option<&mut dyn Store>,
648 ) -> Result<Option<usize>, Error> {
649 self.0
650 .grow(delta_pages, store)
651 .map(|opt| opt.map(|(old, _new)| old))
652 }
653
654 /// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
655 pub fn vmmemory(&mut self) -> VMMemoryDefinition {
656 self.0.vmmemory()
657 }
658
659 /// Consume the memory, returning its [`MemoryImageSlot`] if any is present.
660 /// The image should only be present for a subset of memories created with
661 /// [`Memory::new_static()`].
662 #[cfg(feature = "pooling-allocator")]
663 pub fn unwrap_static_image(mut self) -> MemoryImageSlot {
664 let mem = self.0.as_any_mut().downcast_mut::<StaticMemory>().unwrap();
665 std::mem::replace(&mut mem.memory_image, MemoryImageSlot::dummy())
666 }
667
668 /// If the [Memory] is a [SharedMemory], unwrap it and return a clone to
669 /// that shared memory.
670 pub fn as_shared_memory(&mut self) -> Option<&mut SharedMemory> {
671 let as_any = self.0.as_any_mut();
672 if let Some(m) = as_any.downcast_mut::<SharedMemory>() {
673 Some(m)
674 } else {
675 None
676 }
677 }
678
679 /// Implementation of `memory.atomic.notify` for all memories.
680 pub fn atomic_notify(&mut self, addr: u64, count: u32) -> Result<u32, Trap> {
681 match self.0.as_any_mut().downcast_mut::<SharedMemory>() {
682 Some(m) => m.atomic_notify(addr, count),
683 None => {
684 validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
685 Ok(0)
686 }
687 }
688 }
689
690 /// Implementation of `memory.atomic.wait32` for all memories.
691 pub fn atomic_wait32(
692 &mut self,
693 addr: u64,
694 expected: u32,
695 deadline: Option<Instant>,
696 ) -> Result<WaitResult, Trap> {
697 match self.0.as_any_mut().downcast_mut::<SharedMemory>() {
698 Some(m) => m.atomic_wait32(addr, expected, deadline),
699 None => {
700 validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
701 Err(Trap::AtomicWaitNonSharedMemory)
702 }
703 }
704 }
705
706 /// Implementation of `memory.atomic.wait64` for all memories.
707 pub fn atomic_wait64(
708 &mut self,
709 addr: u64,
710 expected: u64,
711 deadline: Option<Instant>,
712 ) -> Result<WaitResult, Trap> {
713 match self.0.as_any_mut().downcast_mut::<SharedMemory>() {
714 Some(m) => m.atomic_wait64(addr, expected, deadline),
715 None => {
716 validate_atomic_addr(&self.vmmemory(), addr, 8, 8)?;
717 Err(Trap::AtomicWaitNonSharedMemory)
718 }
719 }
720 }
721
722 /// Returns the range of bytes that WebAssembly should be able to address in
723 /// this linear memory. Note that this includes guard pages which wasm can
724 /// hit.
725 pub fn wasm_accessible(&self) -> Range<usize> {
726 self.0.wasm_accessible()
727 }
728}
729
730/// In the configurations where bounds checks were elided in JIT code (because
731/// we are using static memories with virtual memory guard pages) this manual
732/// check is here so we don't segfault from Rust. For other configurations,
733/// these checks are required anyways.
734pub fn validate_atomic_addr(
735 def: &VMMemoryDefinition,
736 addr: u64,
737 access_size: u64,
738 access_alignment: u64,
739) -> Result<*mut u8, Trap> {
740 debug_assert!(access_alignment.is_power_of_two());
741 if !(addr % access_alignment == 0) {
742 return Err(Trap::HeapMisaligned);
743 }
744
745 let length = u64::try_from(def.current_length()).unwrap();
746 if !(addr.saturating_add(access_size) < length) {
747 return Err(Trap::MemoryOutOfBounds);
748 }
749
750 Ok(def.base.wrapping_add(addr as usize))
751}