multiversx_sc_wasm_adapter/wasm_alloc/
leaking_allocator.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
use super::memory_grow::{memory_grow, PageCount, PAGE_SIZE};
use core::{
    alloc::{GlobalAlloc, Layout},
    cell::UnsafeCell,
};

/// A non-thread safe bump-pointer allocator.
/// Does not free or reuse memory.
/// Efficient for small allocations.
///
/// Largely inspired by lol_alloc:
/// https://github.com/Craig-Macomber/lol_alloc
pub struct LeakingAllocator {
    used: UnsafeCell<usize>, // bytes
    size: UnsafeCell<usize>, // bytes
}

/// Single-threaded context only.
unsafe impl Sync for LeakingAllocator {}

impl LeakingAllocator {
    #[allow(clippy::new_without_default)]
    pub const fn new() -> Self {
        LeakingAllocator {
            used: UnsafeCell::new(0),
            size: UnsafeCell::new(0),
        }
    }
}

unsafe impl GlobalAlloc for LeakingAllocator {
    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
        let size: &mut usize = &mut *self.size.get();
        let used: &mut usize = &mut *self.used.get();
        // This assumes PAGE_SIZE is always a multiple of the required alignment, which should be true for all practical use.
        // If this is not true, this could go past size.
        let alignment = layout.align();
        let offset = *used % alignment;
        if offset != 0 {
            *used += alignment - offset;
        }

        let requested_size = layout.size();
        let new_total = *used + requested_size;
        if new_total > *size {
            // Request enough new space for this allocation, even if we have some space left over from the last one incase they end up non-contiguous.
            // Round up to a number of pages
            let requested_pages = requested_size.div_ceil(PAGE_SIZE);
            let previous_page_count = memory_grow(PageCount(requested_pages));

            let previous_size = previous_page_count.size_in_bytes();
            if previous_size != *size {
                // New memory is not contiguous with old: something else allocated in-between.
                // TODO: is handling this case necessary? Maybe make it optional behind a feature?
                // This assumes PAGE_SIZE is always a multiple of the required alignment, which should be true for all practical use.
                *used = previous_size;
            }
            *size = previous_size + requested_pages * PAGE_SIZE;
        }

        let start = *used;
        *used += requested_size;
        start as *mut u8
    }

    unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
}