wasmtime_runtime/cow.rs
1//! Copy-on-write initialization support: creation of backing images for
2//! modules, and logic to support mapping these backing images into memory.
3
4use crate::sys::vm::{self, MemoryImageSource};
5use crate::{MmapVec, SendSyncPtr};
6use anyhow::Result;
7use std::ffi::c_void;
8use std::ops::Range;
9use std::ptr::NonNull;
10use std::sync::Arc;
11use wasmtime_environ::{
12 DefinedMemoryIndex, MemoryInitialization, MemoryPlan, MemoryStyle, Module, PrimaryMap,
13};
14
15/// Backing images for memories in a module.
16///
17/// This is meant to be built once, when a module is first loaded/constructed,
18/// and then used many times for instantiation.
19pub struct ModuleMemoryImages {
20 memories: PrimaryMap<DefinedMemoryIndex, Option<Arc<MemoryImage>>>,
21}
22
23impl ModuleMemoryImages {
24 /// Get the MemoryImage for a given memory.
25 pub fn get_memory_image(&self, defined_index: DefinedMemoryIndex) -> Option<&Arc<MemoryImage>> {
26 self.memories[defined_index].as_ref()
27 }
28}
29
30/// One backing image for one memory.
31#[derive(Debug, PartialEq)]
32pub struct MemoryImage {
33 /// The platform-specific source of this image.
34 ///
35 /// This might be a mapped `*.cwasm` file or on Unix it could also be a
36 /// `Memfd` as an anonymous file in memory on Linux. In either case this is
37 /// used as the backing-source for the CoW image.
38 source: MemoryImageSource,
39
40 /// Length of image, in bytes.
41 ///
42 /// Note that initial memory size may be larger; leading and trailing zeroes
43 /// are truncated (handled by backing fd).
44 ///
45 /// Must be a multiple of the system page size.
46 len: usize,
47
48 /// Image starts this many bytes into `source`.
49 ///
50 /// This is 0 for anonymous-backed memfd files and is the offset of the
51 /// data section in a `*.cwasm` file for `*.cwasm`-backed images.
52 ///
53 /// Must be a multiple of the system page size.
54 source_offset: u64,
55
56 /// Image starts this many bytes into heap space.
57 ///
58 /// Must be a multiple of the system page size.
59 linear_memory_offset: usize,
60}
61
62impl MemoryImage {
63 fn new(
64 page_size: u32,
65 offset: u64,
66 data: &[u8],
67 mmap: Option<&MmapVec>,
68 ) -> Result<Option<MemoryImage>> {
69 // Sanity-check that various parameters are page-aligned.
70 let len = data.len();
71 assert_eq!(offset % u64::from(page_size), 0);
72 assert_eq!((len as u32) % page_size, 0);
73 let linear_memory_offset = match usize::try_from(offset) {
74 Ok(offset) => offset,
75 Err(_) => return Ok(None),
76 };
77
78 // If a backing `mmap` is present then `data` should be a sub-slice of
79 // the `mmap`. The sanity-checks here double-check that. Additionally
80 // compilation should have ensured that the `data` section is
81 // page-aligned within `mmap`, so that's also all double-checked here.
82 //
83 // Finally if the `mmap` itself comes from a backing file on disk, such
84 // as a `*.cwasm` file, then that's a valid source of data for the
85 // memory image so we simply return referencing that.
86 //
87 // Note that this path is platform-agnostic in the sense of all
88 // platforms we support support memory mapping copy-on-write data from
89 // files, but for now this is still a Linux-specific region of Wasmtime.
90 // Some work will be needed to get this file compiling for macOS and
91 // Windows.
92 if let Some(mmap) = mmap {
93 let start = mmap.as_ptr() as usize;
94 let end = start + mmap.len();
95 let data_start = data.as_ptr() as usize;
96 let data_end = data_start + data.len();
97 assert!(start <= data_start && data_end <= end);
98 assert_eq!((start as u32) % page_size, 0);
99 assert_eq!((data_start as u32) % page_size, 0);
100 assert_eq!((data_end as u32) % page_size, 0);
101 assert_eq!((mmap.original_offset() as u32) % page_size, 0);
102
103 if let Some(file) = mmap.original_file() {
104 if let Some(source) = MemoryImageSource::from_file(file) {
105 return Ok(Some(MemoryImage {
106 source,
107 source_offset: u64::try_from(mmap.original_offset() + (data_start - start))
108 .unwrap(),
109 linear_memory_offset,
110 len,
111 }));
112 }
113 }
114 }
115
116 // If `mmap` doesn't come from a file then platform-specific mechanisms
117 // may be used to place the data in a form that's amenable to an mmap.
118 if let Some(source) = MemoryImageSource::from_data(data)? {
119 return Ok(Some(MemoryImage {
120 source,
121 source_offset: 0,
122 linear_memory_offset,
123 len,
124 }));
125 }
126
127 Ok(None)
128 }
129
130 unsafe fn map_at(&self, base: *mut u8) -> Result<()> {
131 self.source.map_at(
132 base.add(self.linear_memory_offset),
133 self.len,
134 self.source_offset,
135 )?;
136 Ok(())
137 }
138
139 unsafe fn remap_as_zeros_at(&self, base: *mut u8) -> Result<()> {
140 self.source
141 .remap_as_zeros_at(base.add(self.linear_memory_offset), self.len)?;
142 Ok(())
143 }
144}
145
146impl ModuleMemoryImages {
147 /// Create a new `ModuleMemoryImages` for the given module. This can be
148 /// passed in as part of a `InstanceAllocationRequest` to speed up
149 /// instantiation and execution by using copy-on-write-backed memories.
150 pub fn new(
151 module: &Module,
152 wasm_data: &[u8],
153 mmap: Option<&MmapVec>,
154 ) -> Result<Option<ModuleMemoryImages>> {
155 let map = match &module.memory_initialization {
156 MemoryInitialization::Static { map } => map,
157 _ => return Ok(None),
158 };
159 let mut memories = PrimaryMap::with_capacity(map.len());
160 let page_size = crate::page_size() as u32;
161 for (memory_index, init) in map {
162 // mmap-based-initialization only works for defined memories with a
163 // known starting point of all zeros, so bail out if the mmeory is
164 // imported.
165 let defined_memory = match module.defined_memory_index(memory_index) {
166 Some(idx) => idx,
167 None => return Ok(None),
168 };
169
170 // If there's no initialization for this memory known then we don't
171 // need an image for the memory so push `None` and move on.
172 let init = match init {
173 Some(init) => init,
174 None => {
175 memories.push(None);
176 continue;
177 }
178 };
179
180 // Get the image for this wasm module as a subslice of `wasm_data`,
181 // and then use that to try to create the `MemoryImage`. If this
182 // creation files then we fail creating `ModuleMemoryImages` since this
183 // memory couldn't be represented.
184 let data = &wasm_data[init.data.start as usize..init.data.end as usize];
185 let image = match MemoryImage::new(page_size, init.offset, data, mmap)? {
186 Some(image) => image,
187 None => return Ok(None),
188 };
189
190 let idx = memories.push(Some(Arc::new(image)));
191 assert_eq!(idx, defined_memory);
192 }
193
194 Ok(Some(ModuleMemoryImages { memories }))
195 }
196}
197
198/// Slot management of a copy-on-write image which can be reused for the pooling
199/// allocator.
200///
201/// This data structure manages a slot of linear memory, primarily in the
202/// pooling allocator, which optionally has a contiguous memory image in the
203/// middle of it. Pictorially this data structure manages a virtual memory
204/// region that looks like:
205///
206/// ```text
207/// +--------------------+-------------------+--------------+--------------+
208/// | anonymous | optional | anonymous | PROT_NONE |
209/// | zero | memory | zero | memory |
210/// | memory | image | memory | |
211/// +--------------------+-------------------+--------------+--------------+
212/// | <------+---------->
213/// |<-----+------------> \
214/// | \ image.len
215/// | \
216/// | image.linear_memory_offset
217/// |
218/// \
219/// self.base is this virtual address
220///
221/// <------------------+------------------------------------------------>
222/// \
223/// static_size
224///
225/// <------------------+---------------------------------->
226/// \
227/// accessible
228/// ```
229///
230/// When a `MemoryImageSlot` is created it's told what the `static_size` and
231/// `accessible` limits are. Initially there is assumed to be no image in linear
232/// memory.
233///
234/// When `MemoryImageSlot::instantiate` is called then the method will perform
235/// a "synchronization" to take the image from its prior state to the new state
236/// for the image specified. The first instantiation for example will mmap the
237/// heap image into place. Upon reuse of a slot nothing happens except possibly
238/// shrinking `self.accessible`. When a new image is used then the old image is
239/// mapped to anonymous zero memory and then the new image is mapped in place.
240///
241/// A `MemoryImageSlot` is either `dirty` or it isn't. When a `MemoryImageSlot`
242/// is dirty then it is assumed that any memory beneath `self.accessible` could
243/// have any value. Instantiation cannot happen into a `dirty` slot, however, so
244/// the `MemoryImageSlot::clear_and_remain_ready` returns this memory back to
245/// its original state to mark `dirty = false`. This is done by resetting all
246/// anonymous memory back to zero and the image itself back to its initial
247/// contents.
248///
249/// On Linux this is achieved with the `madvise(MADV_DONTNEED)` syscall. This
250/// syscall will release the physical pages back to the OS but retain the
251/// original mappings, effectively resetting everything back to its initial
252/// state. Non-linux platforms will replace all memory below `self.accessible`
253/// with a fresh zero'd mmap, meaning that reuse is effectively not supported.
254#[derive(Debug)]
255pub struct MemoryImageSlot {
256 /// The base address in virtual memory of the actual heap memory.
257 ///
258 /// Bytes at this address are what is seen by the Wasm guest code.
259 base: SendSyncPtr<u8>,
260
261 /// The maximum static memory size which `self.accessible` can grow to.
262 static_size: usize,
263
264 /// An optional image that is currently being used in this linear memory.
265 ///
266 /// This can be `None` in which case memory is originally all zeros. When
267 /// `Some` the image describes where it's located within the image.
268 image: Option<Arc<MemoryImage>>,
269
270 /// The size of the heap that is readable and writable.
271 ///
272 /// Note that this may extend beyond the actual linear memory heap size in
273 /// the case of dynamic memories in use. Memory accesses to memory below
274 /// `self.accessible` may still page fault as pages are lazily brought in
275 /// but the faults will always be resolved by the kernel.
276 accessible: usize,
277
278 /// Whether this slot may have "dirty" pages (pages written by an
279 /// instantiation). Set by `instantiate()` and cleared by
280 /// `clear_and_remain_ready()`, and used in assertions to ensure
281 /// those methods are called properly.
282 ///
283 /// Invariant: if !dirty, then this memory slot contains a clean
284 /// CoW mapping of `image`, if `Some(..)`, and anonymous-zero
285 /// memory beyond the image up to `static_size`. The addresses
286 /// from offset 0 to `self.accessible` are R+W and set to zero or the
287 /// initial image content, as appropriate. Everything between
288 /// `self.accessible` and `self.static_size` is inaccessible.
289 dirty: bool,
290
291 /// Whether this MemoryImageSlot is responsible for mapping anonymous
292 /// memory (to hold the reservation while overwriting mappings
293 /// specific to this slot) in place when it is dropped. Default
294 /// on, unless the caller knows what they are doing.
295 clear_on_drop: bool,
296}
297
298impl MemoryImageSlot {
299 /// Create a new MemoryImageSlot. Assumes that there is an anonymous
300 /// mmap backing in the given range to start.
301 ///
302 /// The `accessible` parameter descibes how much of linear memory is
303 /// already mapped as R/W with all zero-bytes. The `static_size` value is
304 /// the maximum size of this image which `accessible` cannot grow beyond,
305 /// and all memory from `accessible` from `static_size` should be mapped as
306 /// `PROT_NONE` backed by zero-bytes.
307 pub(crate) fn create(base_addr: *mut c_void, accessible: usize, static_size: usize) -> Self {
308 MemoryImageSlot {
309 base: NonNull::new(base_addr.cast()).unwrap().into(),
310 static_size,
311 accessible,
312 image: None,
313 dirty: false,
314 clear_on_drop: true,
315 }
316 }
317
318 #[cfg(feature = "pooling-allocator")]
319 pub(crate) fn dummy() -> MemoryImageSlot {
320 MemoryImageSlot {
321 // This pointer isn't ever actually used so its value doesn't
322 // matter but we need to satisfy `NonNull` requirement so create a
323 // `dangling` pointer as a sentinel that should cause problems if
324 // it's actually used.
325 base: NonNull::dangling().into(),
326 static_size: 0,
327 image: None,
328 accessible: 0,
329 dirty: false,
330 clear_on_drop: false,
331 }
332 }
333
334 /// Inform the MemoryImageSlot that it should *not* clear the underlying
335 /// address space when dropped. This should be used only when the
336 /// caller will clear or reuse the address space in some other
337 /// way.
338 pub(crate) fn no_clear_on_drop(&mut self) {
339 self.clear_on_drop = false;
340 }
341
342 pub(crate) fn set_heap_limit(&mut self, size_bytes: usize) -> Result<()> {
343 assert!(size_bytes <= self.static_size);
344
345 // If the heap limit already addresses accessible bytes then no syscalls
346 // are necessary since the data is already mapped into the process and
347 // waiting to go.
348 //
349 // This is used for "dynamic" memories where memory is not always
350 // decommitted during recycling (but it's still always reset).
351 if size_bytes <= self.accessible {
352 return Ok(());
353 }
354
355 // Otherwise use `mprotect` to make the new pages read/write.
356 self.set_protection(self.accessible..size_bytes, true)?;
357 self.accessible = size_bytes;
358
359 Ok(())
360 }
361
362 /// Prepares this slot for the instantiation of a new instance with the
363 /// provided linear memory image.
364 ///
365 /// The `initial_size_bytes` parameter indicates the required initial size
366 /// of the heap for the instance. The `maybe_image` is an optional initial
367 /// image for linear memory to contains. The `style` is the way compiled
368 /// code will be accessing this memory.
369 ///
370 /// The purpose of this method is to take a previously pristine slot
371 /// (`!self.dirty`) and transform its prior state into state necessary for
372 /// the given parameters. This could include, for example:
373 ///
374 /// * More memory may be made read/write if `initial_size_bytes` is larger
375 /// than `self.accessible`.
376 /// * For `MemoryStyle::Static` linear memory may be made `PROT_NONE` if
377 /// `self.accessible` is larger than `initial_size_bytes`.
378 /// * If no image was previously in place or if the wrong image was
379 /// previously in place then `mmap` may be used to setup the initial
380 /// image.
381 pub(crate) fn instantiate(
382 &mut self,
383 initial_size_bytes: usize,
384 maybe_image: Option<&Arc<MemoryImage>>,
385 plan: &MemoryPlan,
386 ) -> Result<()> {
387 assert!(!self.dirty);
388 assert!(initial_size_bytes <= self.static_size);
389
390 // First order of business is to blow away the previous linear memory
391 // image if it doesn't match the image specified here. If one is
392 // detected then it's reset with anonymous memory which means that all
393 // of memory up to `self.accessible` will now be read/write and zero.
394 //
395 // Note that this intentionally a "small mmap" which only covers the
396 // extent of the prior initialization image in order to preserve
397 // resident memory that might come before or after the image.
398 if self.image.as_ref() != maybe_image {
399 self.remove_image()?;
400 }
401
402 // The next order of business is to ensure that `self.accessible` is
403 // appropriate. First up is to grow the read/write portion of memory if
404 // it's not large enough to accommodate `initial_size_bytes`.
405 if self.accessible < initial_size_bytes {
406 self.set_protection(self.accessible..initial_size_bytes, true)?;
407 self.accessible = initial_size_bytes;
408 }
409
410 // If (1) the accessible region is not in its initial state, and (2) the
411 // memory relies on virtual memory at all (i.e. has offset guard pages
412 // and/or is static), then we need to reset memory protections. Put
413 // another way, the only time it is safe to not reset protections is
414 // when we are using dynamic memory without any guard pages.
415 if initial_size_bytes < self.accessible
416 && (plan.offset_guard_size > 0 || matches!(plan.style, MemoryStyle::Static { .. }))
417 {
418 self.set_protection(initial_size_bytes..self.accessible, false)?;
419 self.accessible = initial_size_bytes;
420 }
421
422 // Now that memory is sized appropriately the final operation is to
423 // place the new image into linear memory. Note that this operation is
424 // skipped if `self.image` matches `maybe_image`.
425 assert!(initial_size_bytes <= self.accessible);
426 if self.image.as_ref() != maybe_image {
427 if let Some(image) = maybe_image.as_ref() {
428 assert!(
429 image.linear_memory_offset.checked_add(image.len).unwrap()
430 <= initial_size_bytes
431 );
432 if image.len > 0 {
433 unsafe {
434 image.map_at(self.base.as_ptr())?;
435 }
436 }
437 }
438 self.image = maybe_image.cloned();
439 }
440
441 // Flag ourselves as `dirty` which means that the next operation on this
442 // slot is required to be `clear_and_remain_ready`.
443 self.dirty = true;
444
445 Ok(())
446 }
447
448 pub(crate) fn remove_image(&mut self) -> Result<()> {
449 if let Some(image) = &self.image {
450 unsafe {
451 image.remap_as_zeros_at(self.base.as_ptr())?;
452 }
453 self.image = None;
454 }
455 Ok(())
456 }
457
458 /// Resets this linear memory slot back to a "pristine state".
459 ///
460 /// This will reset the memory back to its original contents on Linux or
461 /// reset the contents back to zero on other platforms. The `keep_resident`
462 /// argument is the maximum amount of memory to keep resident in this
463 /// process's memory on Linux. Up to that much memory will be `memset` to
464 /// zero where the rest of it will be reset or released with `madvise`.
465 #[allow(dead_code)] // ignore warnings as this is only used in some cfgs
466 pub(crate) fn clear_and_remain_ready(&mut self, keep_resident: usize) -> Result<()> {
467 assert!(self.dirty);
468
469 unsafe {
470 self.reset_all_memory_contents(keep_resident)?;
471 }
472
473 self.dirty = false;
474 Ok(())
475 }
476
477 #[allow(dead_code)] // ignore warnings as this is only used in some cfgs
478 unsafe fn reset_all_memory_contents(&mut self, keep_resident: usize) -> Result<()> {
479 if !vm::supports_madvise_dontneed() {
480 // If we're not on Linux then there's no generic platform way to
481 // reset memory back to its original state, so instead reset memory
482 // back to entirely zeros with an anonymous backing.
483 //
484 // Additionally the previous image, if any, is dropped here
485 // since it's no longer applicable to this mapping.
486 return self.reset_with_anon_memory();
487 }
488
489 match &self.image {
490 Some(image) => {
491 assert!(self.accessible >= image.linear_memory_offset + image.len);
492 if image.linear_memory_offset < keep_resident {
493 // If the image starts below the `keep_resident` then
494 // memory looks something like this:
495 //
496 // up to `keep_resident` bytes
497 // |
498 // +--------------------------+ remaining_memset
499 // | | /
500 // <--------------> <------->
501 //
502 // image_end
503 // 0 linear_memory_offset | accessible
504 // | | | |
505 // +----------------+--------------+---------+--------+
506 // | dirty memory | image | dirty memory |
507 // +----------------+--------------+---------+--------+
508 //
509 // <------+-------> <-----+-----> <---+---> <--+--->
510 // | | | |
511 // | | | |
512 // memset (1) / | madvise (4)
513 // mmadvise (2) /
514 // /
515 // memset (3)
516 //
517 //
518 // In this situation there are two disjoint regions that are
519 // `memset` manually to zero. Note that `memset (3)` may be
520 // zero bytes large. Furthermore `madvise (4)` may also be
521 // zero bytes large.
522
523 let image_end = image.linear_memory_offset + image.len;
524 let mem_after_image = self.accessible - image_end;
525 let remaining_memset =
526 (keep_resident - image.linear_memory_offset).min(mem_after_image);
527
528 // This is memset (1)
529 std::ptr::write_bytes(self.base.as_ptr(), 0u8, image.linear_memory_offset);
530
531 // This is madvise (2)
532 self.madvise_reset(image.linear_memory_offset, image.len)?;
533
534 // This is memset (3)
535 std::ptr::write_bytes(self.base.as_ptr().add(image_end), 0u8, remaining_memset);
536
537 // This is madvise (4)
538 self.madvise_reset(
539 image_end + remaining_memset,
540 mem_after_image - remaining_memset,
541 )?;
542 } else {
543 // If the image starts after the `keep_resident` threshold
544 // then we memset the start of linear memory and then use
545 // madvise below for the rest of it, including the image.
546 //
547 // 0 keep_resident accessible
548 // | | |
549 // +----------------+---+----------+------------------+
550 // | dirty memory | image | dirty memory |
551 // +----------------+---+----------+------------------+
552 //
553 // <------+-------> <-------------+----------------->
554 // | |
555 // | |
556 // memset (1) madvise (2)
557 //
558 // Here only a single memset is necessary since the image
559 // started after the threshold which we're keeping resident.
560 // Note that the memset may be zero bytes here.
561
562 // This is memset (1)
563 std::ptr::write_bytes(self.base.as_ptr(), 0u8, keep_resident);
564
565 // This is madvise (2)
566 self.madvise_reset(keep_resident, self.accessible - keep_resident)?;
567 }
568 }
569
570 // If there's no memory image for this slot then memset the first
571 // bytes in the memory back to zero while using `madvise` to purge
572 // the rest.
573 None => {
574 let size_to_memset = keep_resident.min(self.accessible);
575 std::ptr::write_bytes(self.base.as_ptr(), 0u8, size_to_memset);
576 self.madvise_reset(size_to_memset, self.accessible - size_to_memset)?;
577 }
578 }
579
580 Ok(())
581 }
582
583 #[allow(dead_code)] // ignore warnings as this is only used in some cfgs
584 unsafe fn madvise_reset(&self, base: usize, len: usize) -> Result<()> {
585 assert!(base + len <= self.accessible);
586 if len == 0 {
587 return Ok(());
588 }
589 vm::madvise_dontneed(self.base.as_ptr().add(base), len)?;
590 Ok(())
591 }
592
593 fn set_protection(&self, range: Range<usize>, readwrite: bool) -> Result<()> {
594 assert!(range.start <= range.end);
595 assert!(range.end <= self.static_size);
596 if range.len() == 0 {
597 return Ok(());
598 }
599
600 unsafe {
601 let start = self.base.as_ptr().add(range.start);
602 if readwrite {
603 vm::expose_existing_mapping(start, range.len())?;
604 } else {
605 vm::hide_existing_mapping(start, range.len())?;
606 }
607 }
608
609 Ok(())
610 }
611
612 pub(crate) fn has_image(&self) -> bool {
613 self.image.is_some()
614 }
615
616 #[allow(dead_code)] // ignore warnings as this is only used in some cfgs
617 pub(crate) fn is_dirty(&self) -> bool {
618 self.dirty
619 }
620
621 /// Map anonymous zeroed memory across the whole slot,
622 /// inaccessible. Used both during instantiate and during drop.
623 fn reset_with_anon_memory(&mut self) -> Result<()> {
624 if self.static_size == 0 {
625 assert!(self.image.is_none());
626 assert_eq!(self.accessible, 0);
627 return Ok(());
628 }
629
630 unsafe {
631 vm::erase_existing_mapping(self.base.as_ptr(), self.static_size)?;
632 }
633
634 self.image = None;
635 self.accessible = 0;
636
637 Ok(())
638 }
639}
640
641impl Drop for MemoryImageSlot {
642 fn drop(&mut self) {
643 // The MemoryImageSlot may be dropped if there is an error during
644 // instantiation: for example, if a memory-growth limiter
645 // disallows a guest from having a memory of a certain size,
646 // after we've already initialized the MemoryImageSlot.
647 //
648 // We need to return this region of the large pool mmap to a
649 // safe state (with no module-specific mappings). The
650 // MemoryImageSlot will not be returned to the MemoryPool, so a new
651 // MemoryImageSlot will be created and overwrite the mappings anyway
652 // on the slot's next use; but for safety and to avoid
653 // resource leaks it's better not to have stale mappings to a
654 // possibly-otherwise-dead module's image.
655 //
656 // To "wipe the slate clean", let's do a mmap of anonymous
657 // memory over the whole region, with PROT_NONE. Note that we
658 // *can't* simply munmap, because that leaves a hole in the
659 // middle of the pooling allocator's big memory area that some
660 // other random mmap may swoop in and take, to be trampled
661 // over by the next MemoryImageSlot later.
662 //
663 // Since we're in drop(), we can't sanely return an error if
664 // this mmap fails. Instead though the result is unwrapped here to
665 // trigger a panic if something goes wrong. Otherwise if this
666 // reset-the-mapping fails then on reuse it might be possible, depending
667 // on precisely where errors happened, that stale memory could get
668 // leaked through.
669 //
670 // The exception to all of this is if the `clear_on_drop` flag
671 // (which is set by default) is false. If so, the owner of
672 // this MemoryImageSlot has indicated that it will clean up in some
673 // other way.
674 if self.clear_on_drop {
675 self.reset_with_anon_memory().unwrap();
676 }
677 }
678}
679
680#[cfg(all(test, target_os = "linux", not(miri)))]
681mod test {
682 use std::sync::Arc;
683
684 use super::{MemoryImage, MemoryImageSlot, MemoryImageSource, MemoryPlan, MemoryStyle};
685 use crate::mmap::Mmap;
686 use anyhow::Result;
687 use wasmtime_environ::Memory;
688
689 fn create_memfd_with_data(offset: usize, data: &[u8]) -> Result<MemoryImage> {
690 // Offset must be page-aligned.
691 let page_size = crate::page_size();
692 assert_eq!(offset & (page_size - 1), 0);
693
694 // The image length is rounded up to the nearest page size
695 let image_len = (data.len() + page_size - 1) & !(page_size - 1);
696
697 Ok(MemoryImage {
698 source: MemoryImageSource::from_data(data)?.unwrap(),
699 len: image_len,
700 source_offset: 0,
701 linear_memory_offset: offset,
702 })
703 }
704
705 fn dummy_memory_plan(style: MemoryStyle) -> MemoryPlan {
706 MemoryPlan {
707 style,
708 memory: Memory {
709 minimum: 0,
710 maximum: None,
711 shared: false,
712 memory64: false,
713 },
714 pre_guard_size: 0,
715 offset_guard_size: 0,
716 }
717 }
718
719 #[test]
720 fn instantiate_no_image() {
721 let plan = dummy_memory_plan(MemoryStyle::Static { bound: 4 << 30 });
722 // 4 MiB mmap'd area, not accessible
723 let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap();
724 // Create a MemoryImageSlot on top of it
725 let mut memfd = MemoryImageSlot::create(mmap.as_mut_ptr() as *mut _, 0, 4 << 20);
726 memfd.no_clear_on_drop();
727 assert!(!memfd.is_dirty());
728 // instantiate with 64 KiB initial size
729 memfd.instantiate(64 << 10, None, &plan).unwrap();
730 assert!(memfd.is_dirty());
731 // We should be able to access this 64 KiB (try both ends) and
732 // it should consist of zeroes.
733 let slice = unsafe { mmap.slice_mut(0..65536) };
734 assert_eq!(0, slice[0]);
735 assert_eq!(0, slice[65535]);
736 slice[1024] = 42;
737 assert_eq!(42, slice[1024]);
738 // grow the heap
739 memfd.set_heap_limit(128 << 10).unwrap();
740 let slice = unsafe { mmap.slice(0..1 << 20) };
741 assert_eq!(42, slice[1024]);
742 assert_eq!(0, slice[131071]);
743 // instantiate again; we should see zeroes, even as the
744 // reuse-anon-mmap-opt kicks in
745 memfd.clear_and_remain_ready(0).unwrap();
746 assert!(!memfd.is_dirty());
747 memfd.instantiate(64 << 10, None, &plan).unwrap();
748 let slice = unsafe { mmap.slice(0..65536) };
749 assert_eq!(0, slice[1024]);
750 }
751
752 #[test]
753 fn instantiate_image() {
754 let plan = dummy_memory_plan(MemoryStyle::Static { bound: 4 << 30 });
755 // 4 MiB mmap'd area, not accessible
756 let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap();
757 // Create a MemoryImageSlot on top of it
758 let mut memfd = MemoryImageSlot::create(mmap.as_mut_ptr() as *mut _, 0, 4 << 20);
759 memfd.no_clear_on_drop();
760 // Create an image with some data.
761 let image = Arc::new(create_memfd_with_data(4096, &[1, 2, 3, 4]).unwrap());
762 // Instantiate with this image
763 memfd.instantiate(64 << 10, Some(&image), &plan).unwrap();
764 assert!(memfd.has_image());
765 let slice = unsafe { mmap.slice_mut(0..65536) };
766 assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
767 slice[4096] = 5;
768 // Clear and re-instantiate same image
769 memfd.clear_and_remain_ready(0).unwrap();
770 memfd.instantiate(64 << 10, Some(&image), &plan).unwrap();
771 let slice = unsafe { mmap.slice_mut(0..65536) };
772 // Should not see mutation from above
773 assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
774 // Clear and re-instantiate no image
775 memfd.clear_and_remain_ready(0).unwrap();
776 memfd.instantiate(64 << 10, None, &plan).unwrap();
777 assert!(!memfd.has_image());
778 let slice = unsafe { mmap.slice_mut(0..65536) };
779 assert_eq!(&[0, 0, 0, 0], &slice[4096..4100]);
780 // Clear and re-instantiate image again
781 memfd.clear_and_remain_ready(0).unwrap();
782 memfd.instantiate(64 << 10, Some(&image), &plan).unwrap();
783 let slice = unsafe { mmap.slice_mut(0..65536) };
784 assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
785 // Create another image with different data.
786 let image2 = Arc::new(create_memfd_with_data(4096, &[10, 11, 12, 13]).unwrap());
787 memfd.clear_and_remain_ready(0).unwrap();
788 memfd.instantiate(128 << 10, Some(&image2), &plan).unwrap();
789 let slice = unsafe { mmap.slice_mut(0..65536) };
790 assert_eq!(&[10, 11, 12, 13], &slice[4096..4100]);
791 // Instantiate the original image again; we should notice it's
792 // a different image and not reuse the mappings.
793 memfd.clear_and_remain_ready(0).unwrap();
794 memfd.instantiate(64 << 10, Some(&image), &plan).unwrap();
795 let slice = unsafe { mmap.slice_mut(0..65536) };
796 assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
797 }
798
799 #[test]
800 #[cfg(target_os = "linux")]
801 fn memset_instead_of_madvise() {
802 let plan = dummy_memory_plan(MemoryStyle::Static { bound: 100 });
803 let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap();
804 let mut memfd = MemoryImageSlot::create(mmap.as_mut_ptr() as *mut _, 0, 4 << 20);
805 memfd.no_clear_on_drop();
806
807 // Test basics with the image
808 for image_off in [0, 4096, 8 << 10] {
809 let image = Arc::new(create_memfd_with_data(image_off, &[1, 2, 3, 4]).unwrap());
810 for amt_to_memset in [0, 4096, 10 << 12, 1 << 20, 10 << 20] {
811 memfd.instantiate(64 << 10, Some(&image), &plan).unwrap();
812 assert!(memfd.has_image());
813 let slice = unsafe { mmap.slice_mut(0..64 << 10) };
814 if image_off > 0 {
815 assert_eq!(slice[image_off - 1], 0);
816 }
817 assert_eq!(slice[image_off + 5], 0);
818 assert_eq!(&[1, 2, 3, 4], &slice[image_off..][..4]);
819 slice[image_off] = 5;
820 assert_eq!(&[5, 2, 3, 4], &slice[image_off..][..4]);
821 memfd.clear_and_remain_ready(amt_to_memset).unwrap();
822 }
823 }
824
825 // Test without an image
826 for amt_to_memset in [0, 4096, 10 << 12, 1 << 20, 10 << 20] {
827 memfd.instantiate(64 << 10, None, &plan).unwrap();
828 let mem = unsafe { mmap.slice_mut(0..64 << 10) };
829 for chunk in mem.chunks_mut(1024) {
830 assert_eq!(chunk[0], 0);
831 chunk[0] = 5;
832 }
833 memfd.clear_and_remain_ready(amt_to_memset).unwrap();
834 }
835 }
836
837 #[test]
838 #[cfg(target_os = "linux")]
839 fn dynamic() {
840 let plan = dummy_memory_plan(MemoryStyle::Dynamic { reserve: 200 });
841
842 let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap();
843 let mut memfd = MemoryImageSlot::create(mmap.as_mut_ptr() as *mut _, 0, 4 << 20);
844 memfd.no_clear_on_drop();
845 let image = Arc::new(create_memfd_with_data(4096, &[1, 2, 3, 4]).unwrap());
846 let initial = 64 << 10;
847
848 // Instantiate the image and test that memory remains accessible after
849 // it's cleared.
850 memfd.instantiate(initial, Some(&image), &plan).unwrap();
851 assert!(memfd.has_image());
852 let slice = unsafe { mmap.slice_mut(0..(64 << 10) + 4096) };
853 assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
854 slice[4096] = 5;
855 assert_eq!(&[5, 2, 3, 4], &slice[4096..4100]);
856 memfd.clear_and_remain_ready(0).unwrap();
857 assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
858
859 // Re-instantiate make sure it preserves memory. Grow a bit and set data
860 // beyond the initial size.
861 memfd.instantiate(initial, Some(&image), &plan).unwrap();
862 assert_eq!(&[1, 2, 3, 4], &slice[4096..4100]);
863 memfd.set_heap_limit(initial * 2).unwrap();
864 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
865 slice[initial] = 100;
866 assert_eq!(&[100, 0], &slice[initial..initial + 2]);
867 memfd.clear_and_remain_ready(0).unwrap();
868
869 // Test that memory is still accessible, but it's been reset
870 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
871
872 // Instantiate again, and again memory beyond the initial size should
873 // still be accessible. Grow into it again and make sure it works.
874 memfd.instantiate(initial, Some(&image), &plan).unwrap();
875 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
876 memfd.set_heap_limit(initial * 2).unwrap();
877 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
878 slice[initial] = 100;
879 assert_eq!(&[100, 0], &slice[initial..initial + 2]);
880 memfd.clear_and_remain_ready(0).unwrap();
881
882 // Reset the image to none and double-check everything is back to zero
883 memfd.instantiate(64 << 10, None, &plan).unwrap();
884 assert!(!memfd.has_image());
885 assert_eq!(&[0, 0, 0, 0], &slice[4096..4100]);
886 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
887 }
888}