wasmer_vm/
mmap.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/main/docs/ATTRIBUTIONS.md
3
4//! Low-level abstraction for allocating and managing zero-filled pages
5//! of memory.
6
7use more_asserts::assert_le;
8use std::io;
9use std::ptr;
10use std::slice;
11
12/// Round `size` up to the nearest multiple of `page_size`.
13fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
14    (size + (page_size - 1)) & !(page_size - 1)
15}
16
17/// A simple struct consisting of a page-aligned pointer to page-aligned
18/// and initially-zeroed memory and a length.
19#[derive(Debug)]
20pub struct Mmap {
21    // Note that this is stored as a `usize` instead of a `*const` or `*mut`
22    // pointer to allow this structure to be natively `Send` and `Sync` without
23    // `unsafe impl`. This type is sendable across threads and shareable since
24    // the coordination all happens at the OS layer.
25    ptr: usize,
26    total_size: usize,
27    accessible_size: usize,
28    sync_on_drop: bool,
29}
30
31/// The type of mmap to create
32#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
33pub enum MmapType {
34    /// The memory is private to the process and not shared with other processes.
35    Private,
36    /// The memory is shared with other processes. This is only supported on Unix.
37    /// When the memory is flushed it will update the file data.
38    Shared,
39}
40
41impl Mmap {
42    /// Construct a new empty instance of `Mmap`.
43    pub fn new() -> Self {
44        // Rust's slices require non-null pointers, even when empty. `Vec`
45        // contains code to create a non-null dangling pointer value when
46        // constructed empty, so we reuse that here.
47        let empty = Vec::<u8>::new();
48        Self {
49            ptr: empty.as_ptr() as usize,
50            total_size: 0,
51            accessible_size: 0,
52            sync_on_drop: false,
53        }
54    }
55
56    /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
57    pub fn with_at_least(size: usize) -> Result<Self, String> {
58        let page_size = region::page::size();
59        let rounded_size = round_up_to_page_size(size, page_size);
60        Self::accessible_reserved(rounded_size, rounded_size, None, MmapType::Private)
61    }
62
63    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
64    /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
65    /// must be native page-size multiples.
66    #[cfg(not(target_os = "windows"))]
67    pub fn accessible_reserved(
68        mut accessible_size: usize,
69        mapping_size: usize,
70        mut backing_file: Option<std::path::PathBuf>,
71        memory_type: MmapType,
72    ) -> Result<Self, String> {
73        use std::os::fd::IntoRawFd;
74
75        let page_size = region::page::size();
76        assert_le!(accessible_size, mapping_size);
77        assert_eq!(mapping_size & (page_size - 1), 0);
78        assert_eq!(accessible_size & (page_size - 1), 0);
79
80        // Mmap may return EINVAL if the size is zero, so just
81        // special-case that.
82        if mapping_size == 0 {
83            return Ok(Self::new());
84        }
85
86        // If there is a backing file, resize the file so that its at least
87        // `mapping_size` bytes.
88        let mut memory_fd = -1;
89        if let Some(backing_file_path) = &mut backing_file {
90            let file = std::fs::OpenOptions::new()
91                .read(true)
92                .write(true)
93                .open(&backing_file_path)
94                .map_err(|e| e.to_string())?;
95
96            let mut backing_file_accessible = backing_file_path.clone();
97            backing_file_accessible.set_extension("accessible");
98
99            let len = file.metadata().map_err(|e| e.to_string())?.len() as usize;
100            if len < mapping_size {
101                std::fs::write(&backing_file_accessible, format!("{len}").as_bytes()).ok();
102
103                file.set_len(mapping_size as u64)
104                    .map_err(|e| e.to_string())?;
105            }
106
107            if backing_file_accessible.exists() {
108                let accessible = std::fs::read_to_string(&backing_file_accessible)
109                    .map_err(|e| e.to_string())?
110                    .parse::<usize>()
111                    .map_err(|e| e.to_string())?;
112                accessible_size = accessible_size.max(accessible);
113            } else {
114                accessible_size = accessible_size.max(len);
115            }
116
117            accessible_size = accessible_size.min(mapping_size);
118            memory_fd = file.into_raw_fd();
119        }
120
121        // Compute the flags
122        let mut flags = match memory_fd {
123            fd if fd < 0 => libc::MAP_ANON,
124            _ => libc::MAP_FILE,
125        };
126        flags |= match memory_type {
127            MmapType::Private => libc::MAP_PRIVATE,
128            MmapType::Shared => libc::MAP_SHARED,
129        };
130
131        Ok(if accessible_size == mapping_size {
132            // Allocate a single read-write region at once.
133            let ptr = unsafe {
134                libc::mmap(
135                    ptr::null_mut(),
136                    mapping_size,
137                    libc::PROT_READ | libc::PROT_WRITE,
138                    flags,
139                    memory_fd,
140                    0,
141                )
142            };
143            if ptr as isize == -1_isize {
144                return Err(io::Error::last_os_error().to_string());
145            }
146
147            Self {
148                ptr: ptr as usize,
149                total_size: mapping_size,
150                accessible_size,
151                sync_on_drop: memory_fd != -1 && memory_type == MmapType::Shared,
152            }
153        } else {
154            // Reserve the mapping size.
155            let ptr = unsafe {
156                libc::mmap(
157                    ptr::null_mut(),
158                    mapping_size,
159                    libc::PROT_NONE,
160                    flags,
161                    memory_fd,
162                    0,
163                )
164            };
165            if ptr as isize == -1_isize {
166                return Err(io::Error::last_os_error().to_string());
167            }
168
169            let mut result = Self {
170                ptr: ptr as usize,
171                total_size: mapping_size,
172                accessible_size,
173                sync_on_drop: memory_fd != -1 && memory_type == MmapType::Shared,
174            };
175
176            if accessible_size != 0 {
177                // Commit the accessible size.
178                result.make_accessible(0, accessible_size)?;
179            }
180
181            result
182        })
183    }
184
185    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
186    /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
187    /// must be native page-size multiples.
188    #[cfg(target_os = "windows")]
189    pub fn accessible_reserved(
190        accessible_size: usize,
191        mapping_size: usize,
192        _backing_file: Option<std::path::PathBuf>,
193        _memory_type: MmapType,
194    ) -> Result<Self, String> {
195        use windows_sys::Win32::System::Memory::{
196            VirtualAlloc, MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE,
197        };
198
199        let page_size = region::page::size();
200        assert_le!(accessible_size, mapping_size);
201        assert_eq!(mapping_size & (page_size - 1), 0);
202        assert_eq!(accessible_size & (page_size - 1), 0);
203
204        // VirtualAlloc may return ERROR_INVALID_PARAMETER if the size is zero,
205        // so just special-case that.
206        if mapping_size == 0 {
207            return Ok(Self::new());
208        }
209
210        Ok(if accessible_size == mapping_size {
211            // Allocate a single read-write region at once.
212            let ptr = unsafe {
213                VirtualAlloc(
214                    ptr::null_mut(),
215                    mapping_size,
216                    MEM_RESERVE | MEM_COMMIT,
217                    PAGE_READWRITE,
218                )
219            };
220            if ptr.is_null() {
221                return Err(io::Error::last_os_error().to_string());
222            }
223
224            Self {
225                ptr: ptr as usize,
226                total_size: mapping_size,
227                accessible_size,
228                sync_on_drop: false,
229            }
230        } else {
231            // Reserve the mapping size.
232            let ptr =
233                unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
234            if ptr.is_null() {
235                return Err(io::Error::last_os_error().to_string());
236            }
237
238            let mut result = Self {
239                ptr: ptr as usize,
240                total_size: mapping_size,
241                accessible_size,
242                sync_on_drop: false,
243            };
244
245            if accessible_size != 0 {
246                // Commit the accessible size.
247                result.make_accessible(0, accessible_size)?;
248            }
249
250            result
251        })
252    }
253
254    /// Make the memory starting at `start` and extending for `len` bytes accessible.
255    /// `start` and `len` must be native page-size multiples and describe a range within
256    /// `self`'s reserved memory.
257    #[cfg(not(target_os = "windows"))]
258    pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
259        let page_size = region::page::size();
260        assert_eq!(start & (page_size - 1), 0);
261        assert_eq!(len & (page_size - 1), 0);
262        assert_le!(len, self.total_size);
263        assert_le!(start, self.total_size - len);
264
265        // Commit the accessible size.
266        let ptr = self.ptr as *const u8;
267        unsafe { region::protect(ptr.add(start), len, region::Protection::READ_WRITE) }
268            .map_err(|e| e.to_string())
269    }
270
271    /// Make the memory starting at `start` and extending for `len` bytes accessible.
272    /// `start` and `len` must be native page-size multiples and describe a range within
273    /// `self`'s reserved memory.
274    #[cfg(target_os = "windows")]
275    pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
276        use std::ffi::c_void;
277        use windows_sys::Win32::System::Memory::{VirtualAlloc, MEM_COMMIT, PAGE_READWRITE};
278        let page_size = region::page::size();
279        assert_eq!(start & (page_size - 1), 0);
280        assert_eq!(len & (page_size - 1), 0);
281        assert_le!(len, self.len());
282        assert_le!(start, self.len() - len);
283
284        // Commit the accessible size.
285        let ptr = self.ptr as *const u8;
286        if unsafe {
287            VirtualAlloc(
288                ptr.add(start) as *mut c_void,
289                len,
290                MEM_COMMIT,
291                PAGE_READWRITE,
292            )
293        }
294        .is_null()
295        {
296            return Err(io::Error::last_os_error().to_string());
297        }
298
299        Ok(())
300    }
301
302    /// Return the allocated memory as a slice of u8.
303    pub fn as_slice(&self) -> &[u8] {
304        unsafe { slice::from_raw_parts(self.ptr as *const u8, self.total_size) }
305    }
306
307    /// Return the allocated memory as a slice of u8.
308    pub fn as_slice_accessible(&self) -> &[u8] {
309        unsafe { slice::from_raw_parts(self.ptr as *const u8, self.accessible_size) }
310    }
311
312    /// Return the allocated memory as a slice of u8.
313    pub fn as_slice_arbitary(&self, size: usize) -> &[u8] {
314        let size = usize::min(size, self.total_size);
315        unsafe { slice::from_raw_parts(self.ptr as *const u8, size) }
316    }
317
318    /// Return the allocated memory as a mutable slice of u8.
319    pub fn as_mut_slice(&mut self) -> &mut [u8] {
320        unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, self.total_size) }
321    }
322
323    /// Return the allocated memory as a mutable slice of u8.
324    pub fn as_mut_slice_accessible(&mut self) -> &mut [u8] {
325        unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, self.accessible_size) }
326    }
327
328    /// Return the allocated memory as a mutable slice of u8.
329    pub fn as_mut_slice_arbitary(&mut self, size: usize) -> &mut [u8] {
330        let size = usize::min(size, self.total_size);
331        unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, size) }
332    }
333
334    /// Return the allocated memory as a pointer to u8.
335    pub fn as_ptr(&self) -> *const u8 {
336        self.ptr as *const u8
337    }
338
339    /// Return the allocated memory as a mutable pointer to u8.
340    pub fn as_mut_ptr(&mut self) -> *mut u8 {
341        self.ptr as *mut u8
342    }
343
344    /// Return the length of the allocated memory.
345    pub fn len(&self) -> usize {
346        self.total_size
347    }
348
349    /// Return whether any memory has been allocated.
350    pub fn is_empty(&self) -> bool {
351        self.len() == 0
352    }
353
354    /// Duplicate in a new memory mapping.
355    #[deprecated = "use `copy` instead"]
356    pub fn duplicate(&mut self, size_hint: Option<usize>) -> Result<Self, String> {
357        self.copy(size_hint)
358    }
359
360    /// Duplicate in a new memory mapping.
361    pub fn copy(&mut self, size_hint: Option<usize>) -> Result<Self, String> {
362        // NOTE: accessible_size != used size as the value is not
363        //       automatically updated when the pre-provisioned space is used
364        let mut copy_size = self.accessible_size;
365        if let Some(size_hint) = size_hint {
366            copy_size = usize::max(copy_size, size_hint);
367        }
368
369        let mut new =
370            Self::accessible_reserved(copy_size, self.total_size, None, MmapType::Private)?;
371        new.as_mut_slice_arbitary(copy_size)
372            .copy_from_slice(self.as_slice_arbitary(copy_size));
373        Ok(new)
374    }
375}
376
377impl Drop for Mmap {
378    #[cfg(not(target_os = "windows"))]
379    fn drop(&mut self) {
380        if self.total_size != 0 {
381            if self.sync_on_drop {
382                let r = unsafe {
383                    libc::msync(
384                        self.ptr as *mut libc::c_void,
385                        self.total_size,
386                        libc::MS_SYNC | libc::MS_INVALIDATE,
387                    )
388                };
389                assert_eq!(r, 0, "msync failed: {}", io::Error::last_os_error());
390            }
391            let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.total_size) };
392            assert_eq!(r, 0, "munmap failed: {}", io::Error::last_os_error());
393        }
394    }
395
396    #[cfg(target_os = "windows")]
397    fn drop(&mut self) {
398        if self.len() != 0 {
399            use std::ffi::c_void;
400            use windows_sys::Win32::System::Memory::{VirtualFree, MEM_RELEASE};
401            let r = unsafe { VirtualFree(self.ptr as *mut c_void, 0, MEM_RELEASE) };
402            assert_ne!(r, 0);
403        }
404    }
405}
406
407fn _assert() {
408    fn _assert_send_sync<T: Send + Sync>() {}
409    _assert_send_sync::<Mmap>();
410}
411
412#[cfg(test)]
413mod tests {
414    use super::*;
415
416    #[test]
417    fn test_round_up_to_page_size() {
418        assert_eq!(round_up_to_page_size(0, 4096), 0);
419        assert_eq!(round_up_to_page_size(1, 4096), 4096);
420        assert_eq!(round_up_to_page_size(4096, 4096), 4096);
421        assert_eq!(round_up_to_page_size(4097, 4096), 8192);
422    }
423}