fuel_vm/interpreter/
memory.rs

1#![cfg(feature = "alloc")]
2
3use super::{
4    internal::inc_pc,
5    Interpreter,
6};
7use crate::{
8    constraints::reg_key::*,
9    consts::*,
10    error::SimpleResult,
11};
12
13use fuel_asm::{
14    Imm12,
15    Imm24,
16    PanicReason,
17    RegId,
18};
19use fuel_types::{
20    fmt_truncated_hex,
21    RegisterId,
22    Word,
23};
24
25use core::{
26    fmt,
27    ops::Range,
28};
29
30#[cfg(any(test, feature = "test-helpers"))]
31use core::ops::{
32    Index,
33    IndexMut,
34    RangeFrom,
35    RangeTo,
36};
37
38use crate::error::{
39    IoResult,
40    RuntimeError,
41};
42use alloc::vec::Vec;
43use fuel_storage::{
44    Mappable,
45    StorageRead,
46};
47
48#[cfg(test)]
49mod tests;
50
51#[cfg(test)]
52mod impl_tests;
53
54#[cfg(test)]
55mod allocation_tests;
56
57#[cfg(test)]
58mod stack_tests;
59
60/// The trait for the memory.
61pub trait Memory: AsRef<MemoryInstance> + AsMut<MemoryInstance> {}
62
63impl<M> Memory for M where M: AsRef<MemoryInstance> + AsMut<MemoryInstance> {}
64
65/// The memory of the VM, represented as stack and heap.
66#[derive(Clone, Eq)]
67pub struct MemoryInstance {
68    /// Stack. Grows upwards.
69    stack: Vec<u8>,
70    /// Heap. Grows downwards from MEM_SIZE.
71    heap: Vec<u8>,
72    /// Lowest allowed heap address, i.e. hp register value.
73    /// This is needed since we can allocate extra heap for performance reasons.
74    hp: usize,
75}
76
77impl Default for MemoryInstance {
78    fn default() -> Self {
79        Self::new()
80    }
81}
82
83impl fmt::Debug for MemoryInstance {
84    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
85        write!(f, "Memory {{ stack: ")?;
86        fmt_truncated_hex::<16>(&self.stack, f)?;
87        write!(f, ", heap: ")?;
88        let off = self.hp.saturating_sub(self.heap_offset());
89        fmt_truncated_hex::<16>(&self.heap[off..], f)?;
90        write!(f, ", hp: {} }}", self.hp)
91    }
92}
93
94impl PartialEq for MemoryInstance {
95    /// Equality comparison of the accessible memory.
96    #[allow(clippy::arithmetic_side_effects)] // Safety: hp is kept valid everywhere
97    fn eq(&self, other: &Self) -> bool {
98        self.stack == other.stack && self.hp == other.hp && {
99            let self_hs = self.hp - self.heap_offset();
100            let other_hs = other.hp - other.heap_offset();
101            self.heap[self_hs..] == other.heap[other_hs..]
102        }
103    }
104}
105
106impl AsRef<MemoryInstance> for MemoryInstance {
107    fn as_ref(&self) -> &MemoryInstance {
108        self
109    }
110}
111impl AsMut<MemoryInstance> for MemoryInstance {
112    fn as_mut(&mut self) -> &mut MemoryInstance {
113        self
114    }
115}
116
117impl MemoryInstance {
118    /// Create a new VM memory.
119    pub fn new() -> Self {
120        Self {
121            stack: Vec::new(),
122            heap: Vec::new(),
123            hp: MEM_SIZE,
124        }
125    }
126
127    /// Resets memory to initial state, keeping the original allocations.
128    pub fn reset(&mut self) {
129        self.stack.truncate(0);
130        self.hp = MEM_SIZE;
131    }
132
133    /// Offset of the heap section
134    fn heap_offset(&self) -> usize {
135        MEM_SIZE.saturating_sub(self.heap.len())
136    }
137
138    /// Grows the stack to be at least `new_sp` bytes.
139    pub fn grow_stack(&mut self, new_sp: Word) -> Result<(), PanicReason> {
140        if new_sp > VM_MAX_RAM {
141            return Err(PanicReason::MemoryOverflow);
142        }
143        #[allow(clippy::cast_possible_truncation)] // Safety: VM_MAX_RAM is usize
144        let new_sp = new_sp as usize;
145
146        if new_sp > self.stack.len() {
147            if new_sp > self.hp {
148                return Err(PanicReason::MemoryGrowthOverlap)
149            }
150
151            self.stack.resize(new_sp, 0);
152        }
153        Ok(())
154    }
155
156    /// Grows the heap by `amount` bytes. Updates hp register.
157    pub fn grow_heap_by(
158        &mut self,
159        sp_reg: Reg<SP>,
160        mut hp_reg: RegMut<HP>,
161        amount: Word,
162    ) -> Result<(), PanicReason> {
163        debug_assert_eq!(
164            self.hp as Word, *hp_reg,
165            "HP register changed without memory update"
166        );
167
168        let amount = usize::try_from(amount).map_err(|_| PanicReason::MemoryOverflow)?;
169        let new_hp = self
170            .hp
171            .checked_sub(amount)
172            .ok_or(PanicReason::MemoryOverflow)?;
173
174        if (new_hp as Word) < *sp_reg {
175            return Err(PanicReason::MemoryGrowthOverlap)
176        }
177
178        #[allow(clippy::arithmetic_side_effects)] // Safety: self.hp is in heap
179        let new_len = MEM_SIZE - new_hp;
180
181        #[allow(clippy::arithmetic_side_effects)] // Safety: self.hp is in heap
182        if self.heap.len() >= new_len {
183            // No need to reallocate, but we need to zero the new space
184            // in case it was used before a memory reset.
185            let start = new_hp - self.heap_offset();
186            let end = self.hp - self.heap_offset();
187            self.heap[start..end].fill(0);
188        } else {
189            // Reallocation is needed.
190            // To reduce frequent reallocations, allocate at least 256 bytes at once.
191            // After that, double the allocation every time.
192            let cap = new_len.next_power_of_two().clamp(256, MEM_SIZE);
193            let old_len = self.heap.len();
194            let prefix_zeroes = cap - old_len;
195            self.heap.resize(cap, 0);
196            self.heap.copy_within(..old_len, prefix_zeroes);
197            self.heap[..prefix_zeroes].fill(0);
198        }
199
200        self.hp = new_hp;
201        *hp_reg = new_hp as Word;
202
203        // If heap enters region where stack has been, truncate the stack
204        self.stack.truncate(new_hp);
205
206        Ok(())
207    }
208
209    /// Verify that the memory range is accessble and return it as a range.
210    pub fn verify<A: ToAddr, B: ToAddr>(
211        &self,
212        addr: A,
213        count: B,
214    ) -> Result<MemoryRange, PanicReason> {
215        let start = addr.to_addr()?;
216        let len = count.to_addr()?;
217        let end = start.saturating_add(len);
218        if end > MEM_SIZE {
219            return Err(PanicReason::MemoryOverflow)
220        }
221
222        if end <= self.stack.len() || start >= self.hp {
223            Ok(MemoryRange(start..end))
224        } else {
225            Err(PanicReason::UninitalizedMemoryAccess)
226        }
227    }
228
229    /// Verify a constant-sized memory range.
230    pub fn verify_const<A: ToAddr, const C: usize>(
231        &self,
232        addr: A,
233    ) -> Result<MemoryRange, PanicReason> {
234        self.verify(addr, C)
235    }
236
237    /// Returns a reference to memory for reading, if possible.
238    #[allow(clippy::arithmetic_side_effects)] // Safety: subtractions are checked
239    pub fn read<A: ToAddr, C: ToAddr>(
240        &self,
241        addr: A,
242        count: C,
243    ) -> Result<&[u8], PanicReason> {
244        let range = self.verify(addr, count)?;
245
246        if range.end() <= self.stack.len() {
247            Ok(&self.stack[range.usizes()])
248        } else if range.start() >= self.heap_offset() {
249            let start = range.start() - self.heap_offset();
250            let end = range.end() - self.heap_offset();
251            Ok(&self.heap[start..end])
252        } else {
253            unreachable!("Range was verified to be valid")
254        }
255    }
256
257    /// Reads a constant-sized byte array from memory, if possible.
258    pub fn read_bytes<A: ToAddr, const C: usize>(
259        &self,
260        at: A,
261    ) -> Result<[u8; C], PanicReason> {
262        let mut result = [0; C];
263        result.copy_from_slice(self.read(at, C)?);
264        Ok(result)
265    }
266
267    /// Gets write access to memory, if possible.
268    /// Doesn't perform any ownership checks.
269    #[allow(clippy::arithmetic_side_effects)] // Safety: subtractions are checked
270    pub fn write_noownerchecks<A: ToAddr, B: ToAddr>(
271        &mut self,
272        addr: A,
273        len: B,
274    ) -> Result<&mut [u8], PanicReason> {
275        let range = self.verify(addr, len)?;
276        if range.end() <= self.stack.len() {
277            Ok(&mut self.stack[range.usizes()])
278        } else if range.start() >= self.heap_offset() {
279            let start = range.start() - self.heap_offset();
280            let end = range.end() - self.heap_offset();
281            Ok(&mut self.heap[start..end])
282        } else {
283            unreachable!("Range was verified to be valid")
284        }
285    }
286
287    /// Writes a constant-sized byte array to memory, if possible.
288    /// Doesn't perform any ownership checks.
289    pub fn write_bytes_noownerchecks<A: ToAddr, const C: usize>(
290        &mut self,
291        addr: A,
292        data: [u8; C],
293    ) -> Result<(), PanicReason> {
294        self.write_noownerchecks(addr, C)?.copy_from_slice(&data);
295        Ok(())
296    }
297
298    /// Checks that memory is writable and returns a mutable slice to it.
299    pub fn write<A: ToAddr, C: ToAddr>(
300        &mut self,
301        owner: OwnershipRegisters,
302        addr: A,
303        len: C,
304    ) -> Result<&mut [u8], PanicReason> {
305        let range = self.verify(addr, len)?;
306        owner.verify_ownership(&range)?;
307        self.write_noownerchecks(range.start(), range.len())
308    }
309
310    /// Writes a constant-sized byte array to memory, checking for ownership.
311    pub fn write_bytes<A: ToAddr, const C: usize>(
312        &mut self,
313        owner: OwnershipRegisters,
314        addr: A,
315        data: [u8; C],
316    ) -> Result<(), PanicReason> {
317        self.write(owner, addr, data.len())?.copy_from_slice(&data);
318        Ok(())
319    }
320
321    /// Copies the memory from `src` to `dst` verifying ownership.
322    #[inline]
323    #[track_caller]
324    pub fn memcopy(
325        &mut self,
326        dst: Word,
327        src: Word,
328        length: Word,
329        owner: OwnershipRegisters,
330    ) -> Result<(), PanicReason> {
331        let dst_range = self.verify(dst, length)?;
332        let src_range = self.verify(src, length)?;
333
334        if dst_range.start() <= src_range.start() && src_range.start() < dst_range.end()
335            || src_range.start() <= dst_range.start()
336                && dst_range.start() < src_range.end()
337            || dst_range.start() < src_range.end() && src_range.end() <= dst_range.end()
338            || src_range.start() < dst_range.end() && dst_range.end() <= src_range.end()
339        {
340            return Err(PanicReason::MemoryWriteOverlap)
341        }
342
343        owner.verify_ownership(&dst_range)?;
344
345        if src_range.end() <= self.stack.len() {
346            if dst_range.end() <= self.stack.len() {
347                self.stack
348                    .copy_within(src_range.usizes(), dst_range.start());
349            } else if dst_range.start() >= self.heap_offset() {
350                #[allow(clippy::arithmetic_side_effects)]
351                // Safety: subtractions are checked above
352                let dst_start = dst_range.start() - self.heap_offset();
353                #[allow(clippy::arithmetic_side_effects)]
354                // Safety: subtractions are checked above
355                let dst_end = dst_range.end() - self.heap_offset();
356
357                let src_array = &self.stack[src_range.usizes()];
358                let dst_array = &mut self.heap[dst_start..dst_end];
359                dst_array.copy_from_slice(src_array);
360            } else {
361                unreachable!("Range was verified to be valid")
362            }
363        } else if src_range.start() >= self.heap_offset() {
364            #[allow(clippy::arithmetic_side_effects)]
365            // Safety: subtractions are checked above
366            let src_start = src_range.start() - self.heap_offset();
367            #[allow(clippy::arithmetic_side_effects)]
368            // Safety: subtractions are checked above
369            let src_end = src_range.end() - self.heap_offset();
370
371            if dst_range.end() <= self.stack.len() {
372                let src_array = &self.heap[src_start..src_end];
373
374                let dst_array = &mut self.stack[dst_range.usizes()];
375                dst_array.copy_from_slice(src_array);
376            } else if dst_range.start() >= self.heap_offset() {
377                #[allow(clippy::arithmetic_side_effects)]
378                // Safety: subtractions are checked above
379                let dst_start = dst_range.start() - self.heap_offset();
380
381                self.heap.copy_within(src_start..src_end, dst_start);
382            } else {
383                unreachable!("Range was verified to be valid")
384            }
385        } else {
386            unreachable!("Range was verified to be valid")
387        }
388
389        Ok(())
390    }
391
392    /// Memory access to the raw stack buffer.
393    /// Note that for efficiency reasons this might not match sp value.
394    #[cfg(any(test, feature = "test-helpers"))]
395    pub fn stack_raw(&self) -> &[u8] {
396        &self.stack
397    }
398
399    /// Memory access to the raw heap buffer.
400    /// Note that for efficiency reasons this might not match hp value.
401    #[cfg(any(test, feature = "test-helpers"))]
402    pub fn heap_raw(&self) -> &[u8] {
403        &self.heap
404    }
405
406    /// Returns a `MemoryRollbackData` that can be used to achieve the state of the
407    /// `desired_memory_state` instance.
408    pub fn collect_rollback_data(
409        &self,
410        desired_memory_state: &MemoryInstance,
411    ) -> Option<MemoryRollbackData> {
412        if self == desired_memory_state {
413            return None
414        }
415
416        let sp = desired_memory_state.stack.len();
417        let hp = desired_memory_state.hp;
418
419        assert!(
420            hp >= self.hp,
421            "We only allow shrinking of the heap during rollback"
422        );
423
424        let stack_changes =
425            get_changes(&self.stack[..sp], &desired_memory_state.stack[..sp], 0);
426
427        let heap_start = hp
428            .checked_sub(self.heap_offset())
429            .expect("Memory is invalid, hp is out of bounds");
430        let heap = &self.heap[heap_start..];
431        let desired_heap_start = hp
432            .checked_sub(desired_memory_state.heap_offset())
433            .expect("Memory is invalid, hp is out of bounds");
434        let desired_heap = &desired_memory_state.heap[desired_heap_start..];
435
436        let heap_changes = get_changes(heap, desired_heap, hp);
437
438        Some(MemoryRollbackData {
439            sp,
440            hp,
441            stack_changes,
442            heap_changes,
443        })
444    }
445
446    /// Rollbacks the memory changes returning the memory to the old state.
447    pub fn rollback(&mut self, data: &MemoryRollbackData) {
448        self.stack.resize(data.sp, 0);
449        assert!(
450            data.hp >= self.hp,
451            "We only allow shrinking of the heap during rollback"
452        );
453        self.hp = data.hp;
454
455        for change in &data.stack_changes {
456            self.stack[change.global_start
457                ..change.global_start.saturating_add(change.data.len())]
458                .copy_from_slice(&change.data);
459        }
460
461        let offset = self.heap_offset();
462        for change in &data.heap_changes {
463            let local_start = change
464                .global_start
465                .checked_sub(offset)
466                .expect("Invalid offset");
467            self.heap[local_start..local_start.saturating_add(change.data.len())]
468                .copy_from_slice(&change.data);
469        }
470    }
471}
472
473fn get_changes(
474    latest_array: &[u8],
475    desired_array: &[u8],
476    offset: usize,
477) -> Vec<MemorySliceChange> {
478    let mut changes = Vec::new();
479    let mut range = None;
480    for (i, (old, new)) in latest_array.iter().zip(desired_array.iter()).enumerate() {
481        if old != new {
482            range = match range {
483                None => Some((i, 1usize)),
484                Some((start, count)) => Some((start, count.saturating_add(1))),
485            };
486        } else if let Some((start, count)) = range.take() {
487            changes.push(MemorySliceChange {
488                global_start: offset.saturating_add(start),
489                data: desired_array[start..start.saturating_add(count)].to_vec(),
490            });
491        }
492    }
493    if let Some((start, count)) = range.take() {
494        changes.push(MemorySliceChange {
495            global_start: offset.saturating_add(start),
496            data: desired_array[start..start.saturating_add(count)].to_vec(),
497        });
498    }
499    changes
500}
501
502#[derive(Debug, Clone)]
503struct MemorySliceChange {
504    global_start: usize,
505    data: Vec<u8>,
506}
507
508/// The container for the data used to rollback memory changes.
509#[derive(Debug, Clone)]
510pub struct MemoryRollbackData {
511    /// Desired stack pointer.
512    sp: usize,
513    /// Desired heap pointer. Desired heap pointer can't be less than the current one.
514    hp: usize,
515    /// Changes to the stack to achieve the desired state of the stack.
516    stack_changes: Vec<MemorySliceChange>,
517    /// Changes to the heap to achieve the desired state of the heap.
518    heap_changes: Vec<MemorySliceChange>,
519}
520
521#[cfg(feature = "test-helpers")]
522impl From<Vec<u8>> for MemoryInstance {
523    fn from(stack: Vec<u8>) -> Self {
524        Self {
525            stack,
526            ..Self::new()
527        }
528    }
529}
530
531#[cfg(any(test, feature = "test-helpers"))]
532impl Index<Range<usize>> for MemoryInstance {
533    type Output = [u8];
534
535    fn index(&self, index: Range<usize>) -> &Self::Output {
536        self.read(index.start, index.len())
537            .expect("Memory range out of bounds")
538    }
539}
540
541#[cfg(any(test, feature = "test-helpers"))]
542impl Index<RangeFrom<usize>> for MemoryInstance {
543    type Output = [u8];
544
545    fn index(&self, index: RangeFrom<usize>) -> &Self::Output {
546        &self[index.start..MEM_SIZE]
547    }
548}
549
550#[cfg(any(test, feature = "test-helpers"))]
551impl Index<RangeTo<usize>> for MemoryInstance {
552    type Output = [u8];
553
554    fn index(&self, index: RangeTo<usize>) -> &Self::Output {
555        &self[0..index.end]
556    }
557}
558
559#[cfg(any(test, feature = "test-helpers"))]
560impl IndexMut<Range<usize>> for MemoryInstance {
561    fn index_mut(&mut self, index: Range<usize>) -> &mut Self::Output {
562        self.write_noownerchecks(index.start, index.len())
563            .expect("Memory range out of bounds")
564    }
565}
566
567/// Used to handle `Word` to `usize` conversions for memory addresses,
568/// as well as checking that the resulting value is withing the VM ram boundaries.
569pub trait ToAddr {
570    /// Converts a value to `usize` used for memory addresses.
571    /// Returns `Err` with `MemoryOverflow` if the resulting value does't fit in the VM
572    /// memory. This can be used for both addresses and offsets.
573    fn to_addr(self) -> Result<usize, PanicReason>;
574}
575
576impl ToAddr for usize {
577    fn to_addr(self) -> Result<usize, PanicReason> {
578        if self > MEM_SIZE {
579            return Err(PanicReason::MemoryOverflow)
580        }
581        Ok(self)
582    }
583}
584
585impl ToAddr for Word {
586    fn to_addr(self) -> Result<usize, PanicReason> {
587        let value = usize::try_from(self).map_err(|_| PanicReason::MemoryOverflow)?;
588        value.to_addr()
589    }
590}
591
592#[cfg(feature = "test-helpers")]
593/// Implemented for `i32` to allow integer literals. Panics on negative values.
594impl ToAddr for i32 {
595    fn to_addr(self) -> Result<usize, PanicReason> {
596        if self < 0 {
597            panic!("Negative memory address");
598        }
599        let value = usize::try_from(self).map_err(|_| PanicReason::MemoryOverflow)?;
600        value.to_addr()
601    }
602}
603
604/// A range of memory. No guarantees are made about validity of access.
605#[derive(Debug, Clone, PartialEq, Eq, Hash)]
606#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
607pub struct MemoryRange(Range<usize>);
608
609impl MemoryRange {
610    /// Create a new memory range. Cannot panic, but the range may be invalid.
611    pub const fn new(start: usize, len: usize) -> Self {
612        Self(start..start.saturating_add(len))
613    }
614
615    /// Start of the range.
616    pub fn start(&self) -> usize {
617        self.0.start
618    }
619
620    /// End of the range. One past the last byte.
621    pub fn end(&self) -> usize {
622        self.0.end
623    }
624
625    /// Is the range empty?
626    pub fn is_empty(&self) -> bool {
627        self.len() == 0
628    }
629
630    /// Length of the range.
631    pub fn len(&self) -> usize {
632        self.0.len()
633    }
634
635    /// Returns the range as a `usize` range.
636    pub fn usizes(&self) -> Range<usize> {
637        self.0.clone()
638    }
639
640    /// Returns the range as a `Word` range.
641    pub fn words(&self) -> Range<Word> {
642        self.0.start as Word..self.0.end as Word
643    }
644
645    /// Splits range at given relative offset. Panics if offset > range length.
646    pub fn split_at_offset(self, at: usize) -> (Self, Self) {
647        let mid = self.0.start.saturating_add(at);
648        assert!(mid <= self.0.end);
649        (Self(self.0.start..mid), Self(mid..self.0.end))
650    }
651}
652
653impl<M, S, Tx, Ecal> Interpreter<M, S, Tx, Ecal>
654where
655    M: Memory,
656{
657    /// Return the registers used to determine ownership.
658    pub(crate) fn ownership_registers(&self) -> OwnershipRegisters {
659        OwnershipRegisters::new(self)
660    }
661
662    pub(crate) fn stack_pointer_overflow<F>(&mut self, f: F, v: Word) -> SimpleResult<()>
663    where
664        F: FnOnce(Word, Word) -> (Word, bool),
665    {
666        let (
667            SystemRegisters {
668                sp, ssp, hp, pc, ..
669            },
670            _,
671        ) = split_registers(&mut self.registers);
672        stack_pointer_overflow(
673            sp,
674            ssp.as_ref(),
675            hp.as_ref(),
676            pc,
677            f,
678            v,
679            self.memory.as_mut(),
680        )
681    }
682
683    pub(crate) fn push_selected_registers(
684        &mut self,
685        segment: ProgramRegistersSegment,
686        bitmask: Imm24,
687    ) -> SimpleResult<()> {
688        let (
689            SystemRegisters {
690                sp, ssp, hp, pc, ..
691            },
692            program_regs,
693        ) = split_registers(&mut self.registers);
694        push_selected_registers(
695            self.memory.as_mut(),
696            sp,
697            ssp.as_ref(),
698            hp.as_ref(),
699            pc,
700            &program_regs,
701            segment,
702            bitmask,
703        )
704    }
705
706    pub(crate) fn pop_selected_registers(
707        &mut self,
708        segment: ProgramRegistersSegment,
709        bitmask: Imm24,
710    ) -> SimpleResult<()> {
711        let (
712            SystemRegisters {
713                sp, ssp, hp, pc, ..
714            },
715            mut program_regs,
716        ) = split_registers(&mut self.registers);
717        pop_selected_registers(
718            self.memory.as_mut(),
719            sp,
720            ssp.as_ref(),
721            hp.as_ref(),
722            pc,
723            &mut program_regs,
724            segment,
725            bitmask,
726        )
727    }
728
729    pub(crate) fn load_byte(
730        &mut self,
731        ra: RegisterId,
732        b: Word,
733        c: Word,
734    ) -> SimpleResult<()> {
735        let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
736        let result = &mut w[WriteRegKey::try_from(ra)?];
737        load_byte(self.memory.as_ref(), pc, result, b, c)
738    }
739
740    pub(crate) fn load_word(
741        &mut self,
742        ra: RegisterId,
743        b: Word,
744        c: Imm12,
745    ) -> SimpleResult<()> {
746        let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
747        let result = &mut w[WriteRegKey::try_from(ra)?];
748        load_word(self.memory.as_ref(), pc, result, b, c)
749    }
750
751    pub(crate) fn store_byte(&mut self, a: Word, b: Word, c: Word) -> SimpleResult<()> {
752        let owner = self.ownership_registers();
753        store_byte(
754            self.memory.as_mut(),
755            owner,
756            self.registers.pc_mut(),
757            a,
758            b,
759            c,
760        )
761    }
762
763    pub(crate) fn store_word(&mut self, a: Word, b: Word, c: Imm12) -> SimpleResult<()> {
764        let owner = self.ownership_registers();
765        store_word(
766            self.memory.as_mut(),
767            owner,
768            self.registers.pc_mut(),
769            a,
770            b,
771            c,
772        )
773    }
774
775    /// Expand heap by `amount` bytes.
776    pub fn allocate(&mut self, amount: Word) -> SimpleResult<()> {
777        let (SystemRegisters { hp, sp, .. }, _) = split_registers(&mut self.registers);
778        self.memory.as_mut().grow_heap_by(sp.as_ref(), hp, amount)?;
779        Ok(())
780    }
781
782    pub(crate) fn malloc(&mut self, a: Word) -> SimpleResult<()> {
783        let (SystemRegisters { hp, sp, pc, .. }, _) =
784            split_registers(&mut self.registers);
785        malloc(hp, sp.as_ref(), pc, a, self.memory.as_mut())
786    }
787
788    pub(crate) fn memclear(&mut self, a: Word, b: Word) -> SimpleResult<()> {
789        let owner = self.ownership_registers();
790        memclear(self.memory.as_mut(), owner, self.registers.pc_mut(), a, b)
791    }
792
793    pub(crate) fn memcopy(&mut self, a: Word, b: Word, c: Word) -> SimpleResult<()> {
794        let owner = self.ownership_registers();
795        memcopy(
796            self.memory.as_mut(),
797            owner,
798            self.registers.pc_mut(),
799            a,
800            b,
801            c,
802        )
803    }
804
805    pub(crate) fn memeq(
806        &mut self,
807        ra: RegisterId,
808        b: Word,
809        c: Word,
810        d: Word,
811    ) -> SimpleResult<()> {
812        let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
813        let result = &mut w[WriteRegKey::try_from(ra)?];
814        memeq(self.memory.as_mut(), result, pc, b, c, d)
815    }
816}
817
818/// Update stack pointer, checking for validity first.
819pub(crate) fn try_update_stack_pointer(
820    mut sp: RegMut<SP>,
821    ssp: Reg<SSP>,
822    hp: Reg<HP>,
823    new_sp: Word,
824    memory: &mut MemoryInstance,
825) -> SimpleResult<()> {
826    if new_sp < *ssp {
827        Err(PanicReason::MemoryOverflow.into())
828    } else if new_sp > *hp {
829        Err(PanicReason::MemoryGrowthOverlap.into())
830    } else {
831        *sp = new_sp;
832        memory.grow_stack(new_sp)?;
833        Ok(())
834    }
835}
836
837pub(crate) fn stack_pointer_overflow<F>(
838    sp: RegMut<SP>,
839    ssp: Reg<SSP>,
840    hp: Reg<HP>,
841    pc: RegMut<PC>,
842    f: F,
843    v: Word,
844    memory: &mut MemoryInstance,
845) -> SimpleResult<()>
846where
847    F: FnOnce(Word, Word) -> (Word, bool),
848{
849    let (new_sp, overflow) = f(*sp, v);
850
851    if overflow {
852        return Err(PanicReason::MemoryOverflow.into())
853    }
854
855    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
856    Ok(inc_pc(pc)?)
857}
858
859#[allow(clippy::too_many_arguments)]
860pub(crate) fn push_selected_registers(
861    memory: &mut MemoryInstance,
862    sp: RegMut<SP>,
863    ssp: Reg<SSP>,
864    hp: Reg<HP>,
865    pc: RegMut<PC>,
866    program_regs: &ProgramRegisters,
867    segment: ProgramRegistersSegment,
868    bitmask: Imm24,
869) -> SimpleResult<()> {
870    let bitmask = bitmask.to_u32();
871
872    // First update the new stack pointer, as that's the only error condition
873    let count: u64 = bitmask.count_ones().into();
874    let write_size = count
875        .checked_mul(WORD_SIZE as u64)
876        .expect("Bitmask size times 8 can never oveflow");
877    let write_at = *sp;
878    // If this would overflow, the stack pointer update below will fail
879    let new_sp = write_at.saturating_add(write_size);
880    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
881
882    // Write the registers to the stack
883    let mut it = memory
884        .write_noownerchecks(write_at, write_size)?
885        .chunks_exact_mut(WORD_SIZE);
886    for (i, reg) in program_regs.segment(segment).iter().enumerate() {
887        if (bitmask & (1 << i)) != 0 {
888            let item = it
889                .next()
890                .expect("Memory range mismatched with register count");
891            item.copy_from_slice(&reg.to_be_bytes());
892        }
893    }
894
895    Ok(inc_pc(pc)?)
896}
897
898#[allow(clippy::too_many_arguments)]
899pub(crate) fn pop_selected_registers(
900    memory: &mut MemoryInstance,
901    sp: RegMut<SP>,
902    ssp: Reg<SSP>,
903    hp: Reg<HP>,
904    pc: RegMut<PC>,
905    program_regs: &mut ProgramRegisters,
906    segment: ProgramRegistersSegment,
907    bitmask: Imm24,
908) -> SimpleResult<()> {
909    let bitmask = bitmask.to_u32();
910
911    // First update the stack pointer, as that's the only error condition
912    let count: u64 = bitmask.count_ones().into();
913    let size_in_stack = count
914        .checked_mul(WORD_SIZE as u64)
915        .expect("Bitmask size times 8 can never oveflow");
916    let new_sp = sp
917        .checked_sub(size_in_stack)
918        .ok_or(PanicReason::MemoryOverflow)?;
919    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
920
921    // Restore registers from the stack
922    let mut it = memory.read(new_sp, size_in_stack)?.chunks_exact(WORD_SIZE);
923    for (i, reg) in program_regs.segment_mut(segment).iter_mut().enumerate() {
924        if (bitmask & (1 << i)) != 0 {
925            let mut buf = [0u8; WORD_SIZE];
926            buf.copy_from_slice(it.next().expect("Count mismatch"));
927            *reg = Word::from_be_bytes(buf);
928        }
929    }
930
931    Ok(inc_pc(pc)?)
932}
933
934pub(crate) fn load_byte(
935    memory: &MemoryInstance,
936    pc: RegMut<PC>,
937    result: &mut Word,
938    b: Word,
939    c: Word,
940) -> SimpleResult<()> {
941    let [b] = memory.read_bytes(b.saturating_add(c))?;
942    *result = b as Word;
943    Ok(inc_pc(pc)?)
944}
945
946pub(crate) fn load_word(
947    memory: &MemoryInstance,
948    pc: RegMut<PC>,
949    result: &mut Word,
950    b: Word,
951    c: Imm12,
952) -> SimpleResult<()> {
953    let offset = u64::from(c)
954        .checked_mul(WORD_SIZE as u64)
955        .expect("u12 * 8 cannot overflow a Word");
956    let addr = b.checked_add(offset).ok_or(PanicReason::MemoryOverflow)?;
957    *result = Word::from_be_bytes(memory.read_bytes(addr)?);
958    Ok(inc_pc(pc)?)
959}
960
961#[allow(clippy::cast_possible_truncation)]
962pub(crate) fn store_byte(
963    memory: &mut MemoryInstance,
964    owner: OwnershipRegisters,
965    pc: RegMut<PC>,
966    a: Word,
967    b: Word,
968    c: Word,
969) -> SimpleResult<()> {
970    memory.write_bytes(owner, a.saturating_add(c), [b as u8])?;
971    Ok(inc_pc(pc)?)
972}
973
974pub(crate) fn store_word(
975    memory: &mut MemoryInstance,
976    owner: OwnershipRegisters,
977    pc: RegMut<PC>,
978    a: Word,
979    b: Word,
980    c: Imm12,
981) -> SimpleResult<()> {
982    #[allow(clippy::arithmetic_side_effects)]
983    let offset = u64::from(c)
984        .checked_mul(WORD_SIZE as u64)
985        .expect("12-bits number multiplied by 8 cannot overflow a Word");
986    let addr = a.saturating_add(offset);
987    memory.write_bytes(owner, addr, b.to_be_bytes())?;
988    Ok(inc_pc(pc)?)
989}
990
991pub(crate) fn malloc(
992    hp: RegMut<HP>,
993    sp: Reg<SP>,
994    pc: RegMut<PC>,
995    amount: Word,
996    memory: &mut MemoryInstance,
997) -> SimpleResult<()> {
998    memory.grow_heap_by(sp, hp, amount)?;
999    Ok(inc_pc(pc)?)
1000}
1001
1002pub(crate) fn memclear(
1003    memory: &mut MemoryInstance,
1004    owner: OwnershipRegisters,
1005    pc: RegMut<PC>,
1006    a: Word,
1007    b: Word,
1008) -> SimpleResult<()> {
1009    memory.write(owner, a, b)?.fill(0);
1010    Ok(inc_pc(pc)?)
1011}
1012
1013pub(crate) fn memcopy(
1014    memory: &mut MemoryInstance,
1015    owner: OwnershipRegisters,
1016    pc: RegMut<PC>,
1017    dst: Word,
1018    src: Word,
1019    length: Word,
1020) -> SimpleResult<()> {
1021    memory.memcopy(dst, src, length, owner)?;
1022
1023    Ok(inc_pc(pc)?)
1024}
1025
1026pub(crate) fn memeq(
1027    memory: &mut MemoryInstance,
1028    result: &mut Word,
1029    pc: RegMut<PC>,
1030    b: Word,
1031    c: Word,
1032    d: Word,
1033) -> SimpleResult<()> {
1034    *result = (memory.read(b, d)? == memory.read(c, d)?) as Word;
1035    Ok(inc_pc(pc)?)
1036}
1037
1038#[derive(Debug, Clone, Copy)]
1039pub struct OwnershipRegisters {
1040    pub(crate) sp: u64,
1041    pub(crate) ssp: u64,
1042    pub(crate) hp: u64,
1043    /// Previous heap pointer, used for external contexts.
1044    /// Otherwise, it's just memory size.
1045    pub(crate) prev_hp: u64,
1046}
1047
1048impl OwnershipRegisters {
1049    pub(crate) fn new<M, S, Tx, Ecal>(vm: &Interpreter<M, S, Tx, Ecal>) -> Self {
1050        let prev_hp = vm
1051            .frames
1052            .last()
1053            .map(|frame| frame.registers()[RegId::HP])
1054            .unwrap_or(VM_MAX_RAM);
1055
1056        OwnershipRegisters {
1057            sp: vm.registers[RegId::SP],
1058            ssp: vm.registers[RegId::SSP],
1059            hp: vm.registers[RegId::HP],
1060            prev_hp,
1061        }
1062    }
1063
1064    /// Create an instance that only allows stack writes.
1065    pub(crate) fn only_allow_stack_write(sp: u64, ssp: u64, hp: u64) -> Self {
1066        debug_assert!(sp <= VM_MAX_RAM);
1067        debug_assert!(ssp <= VM_MAX_RAM);
1068        debug_assert!(hp <= VM_MAX_RAM);
1069        debug_assert!(ssp <= sp);
1070        debug_assert!(sp <= hp);
1071        OwnershipRegisters {
1072            sp,
1073            ssp,
1074            hp,
1075            prev_hp: hp,
1076        }
1077    }
1078
1079    /// Allows all writes, whole memory is stack.allocated
1080    #[cfg(test)]
1081    pub(crate) fn test_full_stack() -> Self {
1082        OwnershipRegisters {
1083            sp: VM_MAX_RAM,
1084            ssp: 0,
1085            hp: VM_MAX_RAM,
1086            prev_hp: VM_MAX_RAM,
1087        }
1088    }
1089
1090    pub(crate) fn verify_ownership(
1091        &self,
1092        range: &MemoryRange,
1093    ) -> Result<(), PanicReason> {
1094        if self.has_ownership_range(&range.words()) {
1095            Ok(())
1096        } else {
1097            Err(PanicReason::MemoryOwnership)
1098        }
1099    }
1100
1101    pub fn has_ownership_range(&self, range: &Range<Word>) -> bool {
1102        self.has_ownership_stack(range) || self.has_ownership_heap(range)
1103    }
1104
1105    /// Empty range is owned iff the range.start is owned
1106    pub(crate) fn has_ownership_stack(&self, range: &Range<Word>) -> bool {
1107        if range.is_empty() && range.start == self.ssp {
1108            return true
1109        }
1110
1111        if !(self.ssp..self.sp).contains(&range.start) {
1112            return false
1113        }
1114
1115        if range.end > VM_MAX_RAM {
1116            return false
1117        }
1118
1119        (self.ssp..=self.sp).contains(&range.end)
1120    }
1121
1122    /// Empty range is owned iff the range.start is owned
1123    pub(crate) fn has_ownership_heap(&self, range: &Range<Word>) -> bool {
1124        if range.is_empty() && range.start == self.hp {
1125            return true
1126        }
1127
1128        if range.start < self.hp {
1129            return false
1130        }
1131
1132        self.hp != self.prev_hp && range.end <= self.prev_hp
1133    }
1134}
1135
1136/// Attempt copy from the storage to memory, filling zero bytes when exceeding slice
1137/// boundaries. Performs overflow and memory range checks, but no ownership checks.
1138/// Note that if `src_offset` is larger than `src.len()`, the whole range will be
1139/// zero-filled.
1140#[allow(clippy::too_many_arguments)]
1141pub(crate) fn copy_from_storage_zero_fill<M, S>(
1142    memory: &mut MemoryInstance,
1143    owner: OwnershipRegisters,
1144    storage: &S,
1145    dst_addr: Word,
1146    dst_len: Word,
1147    src_id: &M::Key,
1148    src_offset: u64,
1149    src_len: usize,
1150    no_found_error: PanicReason,
1151) -> IoResult<(), S::Error>
1152where
1153    M: Mappable,
1154    S: StorageRead<M>,
1155{
1156    let write_buffer = memory.write(owner, dst_addr, dst_len)?;
1157    let mut empty_offset = 0;
1158
1159    if src_offset < src_len as Word {
1160        let src_offset =
1161            u32::try_from(src_offset).map_err(|_| PanicReason::MemoryOverflow)?;
1162
1163        let src_read_length = src_len.saturating_sub(src_offset as usize);
1164        let src_read_length = src_read_length.min(write_buffer.len());
1165
1166        let (src_read_buffer, _) = write_buffer.split_at_mut(src_read_length);
1167        storage
1168            .read(src_id, src_offset as usize, src_read_buffer)
1169            .transpose()
1170            .ok_or(no_found_error)?
1171            .map_err(RuntimeError::Storage)?;
1172
1173        empty_offset = src_read_length;
1174    }
1175
1176    write_buffer[empty_offset..].fill(0);
1177
1178    Ok(())
1179}