polkavm_common/
abi.rs

1//! Everything in this module affects the ABI of the guest programs, either by affecting
2//! their observable behavior (no matter how obscure), or changing which programs are accepted by the VM.
3
4use crate::utils::{align_to_next_page_u32, align_to_next_page_u64};
5use core::ops::Range;
6
7const ADDRESS_SPACE_SIZE: u64 = 0x100000000_u64;
8
9/// The minimum page size of the VM.
10pub const VM_MIN_PAGE_SIZE: u32 = 0x1000;
11
12/// The maximum page size of the VM.
13pub const VM_MAX_PAGE_SIZE: u32 = 0x10000;
14
15static_assert!(VM_MIN_PAGE_SIZE <= VM_MAX_PAGE_SIZE);
16
17/// The bottom of the accessible address space inside the VM.
18const VM_ADDRESS_SPACE_BOTTOM: u32 = VM_MAX_PAGE_SIZE;
19
20/// The top of the accessible address space inside the VM.
21const VM_ADDRESS_SPACE_TOP: u32 = (ADDRESS_SPACE_SIZE - VM_MAX_PAGE_SIZE as u64) as u32;
22
23/// The address which, when jumped to, will return to the host.
24///
25/// There isn't actually anything there; it's just a virtual address.
26pub const VM_ADDR_RETURN_TO_HOST: u32 = 0xffff0000;
27static_assert!(VM_ADDR_RETURN_TO_HOST & 0b11 == 0);
28
29/// The maximum byte size of the code blob.
30pub const VM_MAXIMUM_CODE_SIZE: u32 = 32 * 1024 * 1024;
31
32/// The maximum number of entries in the jump table.
33pub const VM_MAXIMUM_JUMP_TABLE_ENTRIES: u32 = 16 * 1024 * 1024;
34
35/// The maximum number of functions the program can import.
36pub const VM_MAXIMUM_IMPORT_COUNT: u32 = 1024;
37
38/// The minimum required alignment of runtime code pointers.
39pub const VM_CODE_ADDRESS_ALIGNMENT: u32 = 2;
40
41#[derive(Clone)]
42pub struct MemoryMapBuilder {
43    page_size: u32,
44    ro_data_size: u32,
45    rw_data_size: u32,
46    stack_size: u32,
47    aux_data_size: u32,
48}
49
50impl MemoryMapBuilder {
51    pub fn new(page_size: u32) -> Self {
52        MemoryMapBuilder {
53            page_size,
54            ro_data_size: 0,
55            rw_data_size: 0,
56            stack_size: 0,
57            aux_data_size: 0,
58        }
59    }
60
61    pub fn ro_data_size(&mut self, value: u32) -> &mut Self {
62        self.ro_data_size = value;
63        self
64    }
65
66    pub fn rw_data_size(&mut self, value: u32) -> &mut Self {
67        self.rw_data_size = value;
68        self
69    }
70
71    pub fn stack_size(&mut self, value: u32) -> &mut Self {
72        self.stack_size = value;
73        self
74    }
75
76    pub fn aux_data_size(&mut self, value: u32) -> &mut Self {
77        self.aux_data_size = value;
78        self
79    }
80
81    pub fn build(&self) -> Result<MemoryMap, &'static str> {
82        let MemoryMapBuilder {
83            page_size,
84            ro_data_size,
85            rw_data_size,
86            stack_size,
87            aux_data_size,
88        } = *self;
89
90        if page_size < VM_MIN_PAGE_SIZE {
91            return Err("invalid page size: page size is too small");
92        }
93
94        if page_size > VM_MAX_PAGE_SIZE {
95            return Err("invalid page size: page size is too big");
96        }
97
98        if !page_size.is_power_of_two() {
99            return Err("invalid page size: page size is not a power of two");
100        }
101
102        let Some(ro_data_address_space) = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), u64::from(ro_data_size)) else {
103            return Err("the size of read-only data is too big");
104        };
105
106        let Some(ro_data_size) = align_to_next_page_u32(page_size, ro_data_size) else {
107            return Err("the size of read-only data is too big");
108        };
109
110        let Some(rw_data_address_space) = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), u64::from(rw_data_size)) else {
111            return Err("the size of read-write data is too big");
112        };
113
114        let original_rw_data_size = rw_data_size;
115        let Some(rw_data_size) = align_to_next_page_u32(page_size, rw_data_size) else {
116            return Err("the size of read-write data is too big");
117        };
118
119        let Some(stack_address_space) = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), u64::from(stack_size)) else {
120            return Err("the size of the stack is too big");
121        };
122
123        let Some(stack_size) = align_to_next_page_u32(page_size, stack_size) else {
124            return Err("the size of the stack is too big");
125        };
126
127        let Some(aux_data_address_space) = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), u64::from(aux_data_size)) else {
128            return Err("the size of the aux data is too big");
129        };
130
131        let Some(aux_data_size) = align_to_next_page_u32(page_size, aux_data_size) else {
132            return Err("the size of the aux data is too big");
133        };
134
135        let mut address_low: u64 = 0;
136
137        address_low += u64::from(VM_ADDRESS_SPACE_BOTTOM);
138        address_low += ro_data_address_space;
139        address_low += u64::from(VM_MAX_PAGE_SIZE);
140
141        let rw_data_address = address_low as u32;
142        let heap_base = address_low + u64::from(original_rw_data_size);
143        address_low += rw_data_address_space;
144        let heap_slack = address_low - heap_base;
145        address_low += u64::from(VM_MAX_PAGE_SIZE);
146
147        let mut address_high: i64 = i64::from(VM_ADDRESS_SPACE_TOP);
148        address_high -= aux_data_address_space as i64;
149        let aux_data_address = address_high as u32;
150        address_high -= i64::from(VM_MAX_PAGE_SIZE);
151        let stack_address_high = address_high as u32;
152        address_high -= stack_address_space as i64;
153
154        if address_low as i64 > address_high {
155            return Err("maximum memory size exceeded");
156        }
157
158        let max_heap_size = address_high as u64 - address_low + heap_slack;
159
160        Ok(MemoryMap {
161            page_size,
162            ro_data_size,
163            rw_data_address,
164            rw_data_size,
165            stack_address_high,
166            stack_size,
167            aux_data_address,
168            aux_data_size,
169            heap_base: heap_base as u32,
170            max_heap_size: max_heap_size as u32,
171        })
172    }
173}
174
175/// The memory map of a given guest program.
176#[derive(Clone)]
177pub struct MemoryMap {
178    page_size: u32,
179    ro_data_size: u32,
180    rw_data_address: u32,
181    rw_data_size: u32,
182    stack_address_high: u32,
183    stack_size: u32,
184    aux_data_address: u32,
185    aux_data_size: u32,
186    heap_base: u32,
187    max_heap_size: u32,
188}
189
190impl MemoryMap {
191    /// The page size of the program.
192    #[inline]
193    pub fn page_size(&self) -> u32 {
194        self.page_size
195    }
196
197    /// The address at which the program's heap starts.
198    #[inline]
199    pub fn heap_base(&self) -> u32 {
200        self.heap_base
201    }
202
203    /// The maximum size of the program's heap.
204    #[inline]
205    pub fn max_heap_size(&self) -> u32 {
206        self.max_heap_size
207    }
208
209    /// The address at where the program's read-only data starts inside of the VM.
210    #[inline]
211    pub fn ro_data_address(&self) -> u32 {
212        VM_ADDRESS_SPACE_BOTTOM
213    }
214
215    /// The size of the program's read-only data.
216    #[inline]
217    pub fn ro_data_size(&self) -> u32 {
218        self.ro_data_size
219    }
220
221    /// The range of addresses where the program's read-only data is inside of the VM.
222    #[inline]
223    pub fn ro_data_range(&self) -> Range<u32> {
224        self.ro_data_address()..self.ro_data_address() + self.ro_data_size()
225    }
226
227    /// The address at where the program's read-write data starts inside of the VM.
228    #[inline]
229    pub fn rw_data_address(&self) -> u32 {
230        self.rw_data_address
231    }
232
233    /// The size of the program's read-write data.
234    #[inline]
235    pub fn rw_data_size(&self) -> u32 {
236        self.rw_data_size
237    }
238
239    /// The range of addresses where the program's read-write data is inside of the VM.
240    #[inline]
241    pub fn rw_data_range(&self) -> Range<u32> {
242        self.rw_data_address()..self.rw_data_address() + self.rw_data_size()
243    }
244
245    /// The address at where the program's stack starts inside of the VM.
246    #[inline]
247    pub fn stack_address_low(&self) -> u32 {
248        self.stack_address_high() - self.stack_size
249    }
250
251    /// The address at where the program's stack ends inside of the VM.
252    #[inline]
253    pub fn stack_address_high(&self) -> u32 {
254        self.stack_address_high
255    }
256
257    /// The size of the program's stack.
258    #[inline]
259    pub fn stack_size(&self) -> u32 {
260        self.stack_size
261    }
262
263    /// The range of addresses where the program's stack is inside of the VM.
264    #[inline]
265    pub fn stack_range(&self) -> Range<u32> {
266        self.stack_address_low()..self.stack_address_high()
267    }
268
269    #[inline]
270    pub fn aux_data_address(&self) -> u32 {
271        self.aux_data_address
272    }
273
274    #[inline]
275    pub fn aux_data_size(&self) -> u32 {
276        self.aux_data_size
277    }
278
279    #[inline]
280    pub fn aux_data_range(&self) -> Range<u32> {
281        self.aux_data_address()..self.aux_data_address() + self.aux_data_size()
282    }
283}
284
285#[test]
286fn test_memory_map() {
287    {
288        let map = MemoryMapBuilder::new(0x4000)
289            .ro_data_size(1)
290            .rw_data_size(1)
291            .stack_size(1)
292            .build()
293            .unwrap();
294        assert_eq!(map.ro_data_address(), 0x10000);
295        assert_eq!(map.ro_data_size(), 0x4000);
296        assert_eq!(map.rw_data_address(), 0x30000);
297        assert_eq!(map.rw_data_size(), 0x4000);
298        assert_eq!(map.stack_size(), 0x4000);
299        assert_eq!(map.stack_address_high(), 0xfffe0000);
300        assert_eq!(map.stack_address_low(), 0xfffdc000);
301
302        assert_eq!(map.heap_base(), 0x30001);
303        assert_eq!(
304            u64::from(map.max_heap_size()),
305            ADDRESS_SPACE_SIZE - u64::from(VM_MAX_PAGE_SIZE) * 4 - u64::from(map.heap_base())
306        );
307    }
308
309    let max_size = (ADDRESS_SPACE_SIZE - u64::from(VM_MAX_PAGE_SIZE) * 5) as u32;
310
311    {
312        // Read-only data takes the whole address space.
313        let map = MemoryMapBuilder::new(0x4000).ro_data_size(max_size).build().unwrap();
314        assert_eq!(map.ro_data_address(), 0x10000);
315        assert_eq!(map.ro_data_size(), max_size);
316        assert_eq!(map.rw_data_address(), map.ro_data_address() + VM_MAX_PAGE_SIZE + max_size);
317        assert_eq!(map.rw_data_size(), 0);
318        assert_eq!(map.stack_address_high(), VM_ADDRESS_SPACE_TOP - VM_MAX_PAGE_SIZE);
319        assert_eq!(map.stack_address_low(), VM_ADDRESS_SPACE_TOP - VM_MAX_PAGE_SIZE);
320        assert_eq!(map.stack_size(), 0);
321
322        assert_eq!(map.heap_base(), map.rw_data_address());
323        assert_eq!(map.max_heap_size(), 0);
324    }
325
326    assert!(MemoryMapBuilder::new(0x4000).ro_data_size(max_size + 1).build().is_err());
327    assert!(MemoryMapBuilder::new(0x4000)
328        .ro_data_size(max_size)
329        .rw_data_size(1)
330        .build()
331        .is_err());
332    assert!(MemoryMapBuilder::new(0x4000).ro_data_size(max_size).stack_size(1).build().is_err());
333
334    {
335        // Read-write data takes the whole address space.
336        let map = MemoryMapBuilder::new(0x4000).rw_data_size(max_size).build().unwrap();
337        assert_eq!(map.ro_data_address(), VM_MAX_PAGE_SIZE);
338        assert_eq!(map.ro_data_size(), 0);
339        assert_eq!(map.rw_data_address(), VM_MAX_PAGE_SIZE * 2);
340        assert_eq!(map.rw_data_size(), max_size);
341        assert_eq!(map.stack_address_high(), VM_ADDRESS_SPACE_TOP - VM_MAX_PAGE_SIZE);
342        assert_eq!(map.stack_address_low(), VM_ADDRESS_SPACE_TOP - VM_MAX_PAGE_SIZE);
343        assert_eq!(map.stack_size(), 0);
344
345        assert_eq!(map.heap_base(), map.rw_data_address() + map.rw_data_size());
346        assert_eq!(map.max_heap_size(), 0);
347    }
348
349    {
350        // Stack takes the whole address space.
351        let map = MemoryMapBuilder::new(0x4000).stack_size(max_size).build().unwrap();
352        assert_eq!(map.ro_data_address(), VM_MAX_PAGE_SIZE);
353        assert_eq!(map.ro_data_size(), 0);
354        assert_eq!(map.rw_data_address(), VM_MAX_PAGE_SIZE * 2);
355        assert_eq!(map.rw_data_size(), 0);
356        assert_eq!(map.stack_address_high(), VM_ADDRESS_SPACE_TOP - VM_MAX_PAGE_SIZE);
357        assert_eq!(map.stack_address_low(), VM_ADDRESS_SPACE_TOP - VM_MAX_PAGE_SIZE - max_size);
358        assert_eq!(map.stack_size(), max_size);
359
360        assert_eq!(map.heap_base(), map.rw_data_address());
361        assert_eq!(map.max_heap_size(), 0);
362    }
363}
364
365#[cfg(kani)]
366mod kani {
367    use super::VM_MAX_PAGE_SIZE;
368    use crate::utils::align_to_next_page_u64;
369
370    #[kani::proof]
371    fn memory_map() {
372        let page_size: u32 = kani::any();
373        let ro_data_size: u32 = kani::any();
374        let rw_data_size: u32 = kani::any();
375        let stack_size: u32 = kani::any();
376        let aux_data_size: u32 = kani::any();
377        kani::assume(page_size >= super::VM_MIN_PAGE_SIZE);
378        kani::assume(page_size <= super::VM_MAX_PAGE_SIZE);
379        kani::assume(page_size.is_power_of_two());
380
381        let map = super::MemoryMapBuilder::new(page_size)
382            .ro_data_size(ro_data_size)
383            .rw_data_size(rw_data_size)
384            .stack_size(stack_size)
385            .aux_data_size(aux_data_size)
386            .build();
387
388        if let Ok(ref map) = map {
389            assert_eq!(map.ro_data_address() % VM_MAX_PAGE_SIZE, 0);
390            assert_eq!(map.rw_data_address() % VM_MAX_PAGE_SIZE, 0);
391            assert_eq!(map.stack_address_high() % VM_MAX_PAGE_SIZE, 0);
392            assert_eq!(map.aux_data_address() % VM_MAX_PAGE_SIZE, 0);
393
394            assert_eq!(map.ro_data_address() % page_size, 0);
395            assert_eq!(map.ro_data_range().end % page_size, 0);
396            assert_eq!(map.rw_data_address() % page_size, 0);
397            assert_eq!(map.rw_data_range().end % page_size, 0);
398            assert_eq!(map.stack_address_high() % page_size, 0);
399            assert_eq!(map.stack_address_low() % page_size, 0);
400            assert_eq!(map.aux_data_address() % page_size, 0);
401            assert_eq!(map.aux_data_range().end % page_size, 0);
402
403            assert!(map.ro_data_address() < map.rw_data_address());
404            assert!(map.rw_data_address() < map.stack_address_low());
405            assert!(map.stack_address_low() <= map.stack_address_high());
406            assert!(map.stack_address_high() < map.aux_data_address());
407
408            assert!(map.rw_data_address() - map.ro_data_range().end >= VM_MAX_PAGE_SIZE);
409            assert!(map.stack_address_low() - map.rw_data_range().end >= VM_MAX_PAGE_SIZE);
410            assert!(map.aux_data_address() - map.stack_address_high() >= VM_MAX_PAGE_SIZE);
411        }
412
413        let total_size = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), ro_data_size as u64).unwrap()
414            + align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), rw_data_size as u64).unwrap()
415            + align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), stack_size as u64).unwrap()
416            + align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), aux_data_size as u64).unwrap();
417
418        // [guard] ro_data [guard] rw_data [guard] stack [guard] aux [guard]
419        let max_size = 0x100000000 - u64::from(VM_MAX_PAGE_SIZE) * 5;
420        assert_eq!(map.is_err(), total_size > max_size);
421    }
422}