1#![cfg(feature = "alloc")]
2
3use super::{
4 internal::inc_pc,
5 Interpreter,
6};
7use crate::{
8 constraints::reg_key::*,
9 consts::*,
10 error::SimpleResult,
11};
12
13use fuel_asm::{
14 Imm12,
15 Imm24,
16 PanicReason,
17 RegId,
18};
19use fuel_types::{
20 fmt_truncated_hex,
21 Word,
22};
23
24use core::{
25 fmt,
26 ops::Range,
27};
28
29#[cfg(any(test, feature = "test-helpers"))]
30use core::ops::{
31 Index,
32 IndexMut,
33 RangeFrom,
34 RangeTo,
35};
36
37use crate::error::{
38 IoResult,
39 RuntimeError,
40};
41use alloc::vec::Vec;
42use fuel_storage::{
43 Mappable,
44 StorageRead,
45};
46
47#[cfg(test)]
48mod tests;
49
50#[cfg(test)]
51mod impl_tests;
52
53#[cfg(test)]
54mod allocation_tests;
55
56#[cfg(test)]
57mod stack_tests;
58
59pub trait Memory: AsRef<MemoryInstance> + AsMut<MemoryInstance> {}
61
62impl<M> Memory for M where M: AsRef<MemoryInstance> + AsMut<MemoryInstance> {}
63
64#[derive(Clone, Eq)]
66pub struct MemoryInstance {
67 stack: Vec<u8>,
69 heap: Vec<u8>,
71 hp: usize,
74}
75
76impl Default for MemoryInstance {
77 fn default() -> Self {
78 Self::new()
79 }
80}
81
82impl fmt::Debug for MemoryInstance {
83 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
84 write!(f, "Memory {{ stack: ")?;
85 fmt_truncated_hex::<16>(&self.stack, f)?;
86 write!(f, ", heap: ")?;
87 let off = self.hp.saturating_sub(self.heap_offset());
88 fmt_truncated_hex::<16>(&self.heap[off..], f)?;
89 write!(f, ", hp: {} }}", self.hp)
90 }
91}
92
93impl PartialEq for MemoryInstance {
94 #[allow(clippy::arithmetic_side_effects)] fn eq(&self, other: &Self) -> bool {
97 self.stack == other.stack && self.hp == other.hp && {
98 let self_hs = self.hp - self.heap_offset();
99 let other_hs = other.hp - other.heap_offset();
100 self.heap[self_hs..] == other.heap[other_hs..]
101 }
102 }
103}
104
105impl AsRef<MemoryInstance> for MemoryInstance {
106 fn as_ref(&self) -> &MemoryInstance {
107 self
108 }
109}
110impl AsMut<MemoryInstance> for MemoryInstance {
111 fn as_mut(&mut self) -> &mut MemoryInstance {
112 self
113 }
114}
115
116impl MemoryInstance {
117 pub fn new() -> Self {
119 Self {
120 stack: Vec::new(),
121 heap: Vec::new(),
122 hp: MEM_SIZE,
123 }
124 }
125
126 pub fn reset(&mut self) {
128 self.stack.truncate(0);
129 self.hp = MEM_SIZE;
130 }
131
132 fn heap_offset(&self) -> usize {
134 MEM_SIZE.saturating_sub(self.heap.len())
135 }
136
137 pub fn grow_stack(&mut self, new_sp: Word) -> Result<(), PanicReason> {
139 if new_sp > VM_MAX_RAM {
140 return Err(PanicReason::MemoryOverflow);
141 }
142 #[allow(clippy::cast_possible_truncation)] let new_sp = new_sp as usize;
144
145 if new_sp > self.stack.len() {
146 if new_sp > self.hp {
147 return Err(PanicReason::MemoryGrowthOverlap)
148 }
149
150 self.stack.resize(new_sp, 0);
151 }
152 Ok(())
153 }
154
155 pub fn grow_heap_by(
157 &mut self,
158 sp_reg: Reg<SP>,
159 mut hp_reg: RegMut<HP>,
160 amount: Word,
161 ) -> Result<(), PanicReason> {
162 debug_assert_eq!(
163 self.hp as Word, *hp_reg,
164 "HP register changed without memory update"
165 );
166
167 let amount = usize::try_from(amount).map_err(|_| PanicReason::MemoryOverflow)?;
168 let new_hp = self
169 .hp
170 .checked_sub(amount)
171 .ok_or(PanicReason::MemoryOverflow)?;
172
173 if (new_hp as Word) < *sp_reg {
174 return Err(PanicReason::MemoryGrowthOverlap)
175 }
176
177 #[allow(clippy::arithmetic_side_effects)] let new_len = MEM_SIZE - new_hp;
179
180 #[allow(clippy::arithmetic_side_effects)] if self.heap.len() >= new_len {
182 let start = new_hp - self.heap_offset();
185 let end = self.hp - self.heap_offset();
186 self.heap[start..end].fill(0);
187 } else {
188 let cap = new_len.next_power_of_two().clamp(256, MEM_SIZE);
192 let old_len = self.heap.len();
193 let prefix_zeroes = cap - old_len;
194 self.heap.resize(cap, 0);
195 self.heap.copy_within(..old_len, prefix_zeroes);
196 self.heap[..prefix_zeroes].fill(0);
197 }
198
199 self.hp = new_hp;
200 *hp_reg = new_hp as Word;
201
202 self.stack.truncate(new_hp);
204
205 Ok(())
206 }
207
208 pub fn verify<A: ToAddr, B: ToAddr>(
210 &self,
211 addr: A,
212 count: B,
213 ) -> Result<MemoryRange, PanicReason> {
214 let start = addr.to_addr()?;
215 let len = count.to_addr()?;
216 let end = start.saturating_add(len);
217 if end > MEM_SIZE {
218 return Err(PanicReason::MemoryOverflow)
219 }
220
221 if end <= self.stack.len() || start >= self.hp {
222 Ok(MemoryRange(start..end))
223 } else {
224 Err(PanicReason::UninitalizedMemoryAccess)
225 }
226 }
227
228 pub fn verify_const<A: ToAddr, const C: usize>(
230 &self,
231 addr: A,
232 ) -> Result<MemoryRange, PanicReason> {
233 self.verify(addr, C)
234 }
235
236 #[allow(clippy::arithmetic_side_effects)] pub fn read<A: ToAddr, C: ToAddr>(
239 &self,
240 addr: A,
241 count: C,
242 ) -> Result<&[u8], PanicReason> {
243 let range = self.verify(addr, count)?;
244
245 if range.end() <= self.stack.len() {
246 Ok(&self.stack[range.usizes()])
247 } else if range.start() >= self.heap_offset() {
248 let start = range.start() - self.heap_offset();
249 let end = range.end() - self.heap_offset();
250 Ok(&self.heap[start..end])
251 } else {
252 unreachable!("Range was verified to be valid")
253 }
254 }
255
256 pub fn read_bytes<A: ToAddr, const C: usize>(
258 &self,
259 at: A,
260 ) -> Result<[u8; C], PanicReason> {
261 let mut result = [0; C];
262 result.copy_from_slice(self.read(at, C)?);
263 Ok(result)
264 }
265
266 #[allow(clippy::arithmetic_side_effects)] pub fn write_noownerchecks<A: ToAddr, B: ToAddr>(
270 &mut self,
271 addr: A,
272 len: B,
273 ) -> Result<&mut [u8], PanicReason> {
274 let range = self.verify(addr, len)?;
275 if range.end() <= self.stack.len() {
276 Ok(&mut self.stack[range.usizes()])
277 } else if range.start() >= self.heap_offset() {
278 let start = range.start() - self.heap_offset();
279 let end = range.end() - self.heap_offset();
280 Ok(&mut self.heap[start..end])
281 } else {
282 unreachable!("Range was verified to be valid")
283 }
284 }
285
286 pub fn write_bytes_noownerchecks<A: ToAddr, const C: usize>(
289 &mut self,
290 addr: A,
291 data: [u8; C],
292 ) -> Result<(), PanicReason> {
293 self.write_noownerchecks(addr, C)?.copy_from_slice(&data);
294 Ok(())
295 }
296
297 pub fn write<A: ToAddr, C: ToAddr>(
299 &mut self,
300 owner: OwnershipRegisters,
301 addr: A,
302 len: C,
303 ) -> Result<&mut [u8], PanicReason> {
304 let range = self.verify(addr, len)?;
305 owner.verify_ownership(&range)?;
306 self.write_noownerchecks(range.start(), range.len())
307 }
308
309 pub fn write_bytes<A: ToAddr, const C: usize>(
311 &mut self,
312 owner: OwnershipRegisters,
313 addr: A,
314 data: [u8; C],
315 ) -> Result<(), PanicReason> {
316 self.write(owner, addr, data.len())?.copy_from_slice(&data);
317 Ok(())
318 }
319
320 #[inline]
322 #[track_caller]
323 pub fn memcopy(
324 &mut self,
325 dst: Word,
326 src: Word,
327 length: Word,
328 owner: OwnershipRegisters,
329 ) -> Result<(), PanicReason> {
330 let dst_range = self.verify(dst, length)?;
331 let src_range = self.verify(src, length)?;
332
333 if dst_range.start() <= src_range.start() && src_range.start() < dst_range.end()
334 || src_range.start() <= dst_range.start()
335 && dst_range.start() < src_range.end()
336 || dst_range.start() < src_range.end() && src_range.end() <= dst_range.end()
337 || src_range.start() < dst_range.end() && dst_range.end() <= src_range.end()
338 {
339 return Err(PanicReason::MemoryWriteOverlap)
340 }
341
342 owner.verify_ownership(&dst_range)?;
343
344 if src_range.end() <= self.stack.len() {
345 if dst_range.end() <= self.stack.len() {
346 self.stack
347 .copy_within(src_range.usizes(), dst_range.start());
348 } else if dst_range.start() >= self.heap_offset() {
349 #[allow(clippy::arithmetic_side_effects)]
350 let dst_start = dst_range.start() - self.heap_offset();
352 #[allow(clippy::arithmetic_side_effects)]
353 let dst_end = dst_range.end() - self.heap_offset();
355
356 let src_array = &self.stack[src_range.usizes()];
357 let dst_array = &mut self.heap[dst_start..dst_end];
358 dst_array.copy_from_slice(src_array);
359 } else {
360 unreachable!("Range was verified to be valid")
361 }
362 } else if src_range.start() >= self.heap_offset() {
363 #[allow(clippy::arithmetic_side_effects)]
364 let src_start = src_range.start() - self.heap_offset();
366 #[allow(clippy::arithmetic_side_effects)]
367 let src_end = src_range.end() - self.heap_offset();
369
370 if dst_range.end() <= self.stack.len() {
371 let src_array = &self.heap[src_start..src_end];
372
373 let dst_array = &mut self.stack[dst_range.usizes()];
374 dst_array.copy_from_slice(src_array);
375 } else if dst_range.start() >= self.heap_offset() {
376 #[allow(clippy::arithmetic_side_effects)]
377 let dst_start = dst_range.start() - self.heap_offset();
379
380 self.heap.copy_within(src_start..src_end, dst_start);
381 } else {
382 unreachable!("Range was verified to be valid")
383 }
384 } else {
385 unreachable!("Range was verified to be valid")
386 }
387
388 Ok(())
389 }
390
391 #[cfg(any(test, feature = "test-helpers"))]
394 pub fn stack_raw(&self) -> &[u8] {
395 &self.stack
396 }
397
398 #[cfg(any(test, feature = "test-helpers"))]
401 pub fn heap_raw(&self) -> &[u8] {
402 &self.heap
403 }
404
405 pub fn collect_rollback_data(
408 &self,
409 desired_memory_state: &MemoryInstance,
410 ) -> Option<MemoryRollbackData> {
411 if self == desired_memory_state {
412 return None
413 }
414
415 let sp = desired_memory_state.stack.len();
416 let hp = desired_memory_state.hp;
417
418 assert!(
419 hp >= self.hp,
420 "We only allow shrinking of the heap during rollback"
421 );
422
423 let stack_changes =
424 get_changes(&self.stack[..sp], &desired_memory_state.stack[..sp], 0);
425
426 let heap_start = hp
427 .checked_sub(self.heap_offset())
428 .expect("Memory is invalid, hp is out of bounds");
429 let heap = &self.heap[heap_start..];
430 let desired_heap_start = hp
431 .checked_sub(desired_memory_state.heap_offset())
432 .expect("Memory is invalid, hp is out of bounds");
433 let desired_heap = &desired_memory_state.heap[desired_heap_start..];
434
435 let heap_changes = get_changes(heap, desired_heap, hp);
436
437 Some(MemoryRollbackData {
438 sp,
439 hp,
440 stack_changes,
441 heap_changes,
442 })
443 }
444
445 pub fn rollback(&mut self, data: &MemoryRollbackData) {
447 self.stack.resize(data.sp, 0);
448 assert!(
449 data.hp >= self.hp,
450 "We only allow shrinking of the heap during rollback"
451 );
452 self.hp = data.hp;
453
454 for change in &data.stack_changes {
455 self.stack[change.global_start
456 ..change.global_start.saturating_add(change.data.len())]
457 .copy_from_slice(&change.data);
458 }
459
460 let offset = self.heap_offset();
461 for change in &data.heap_changes {
462 let local_start = change
463 .global_start
464 .checked_sub(offset)
465 .expect("Invalid offset");
466 self.heap[local_start..local_start.saturating_add(change.data.len())]
467 .copy_from_slice(&change.data);
468 }
469 }
470}
471
472fn get_changes(
473 latest_array: &[u8],
474 desired_array: &[u8],
475 offset: usize,
476) -> Vec<MemorySliceChange> {
477 let mut changes = Vec::new();
478 let mut range = None;
479 for (i, (old, new)) in latest_array.iter().zip(desired_array.iter()).enumerate() {
480 if old != new {
481 range = match range {
482 None => Some((i, 1usize)),
483 Some((start, count)) => Some((start, count.saturating_add(1))),
484 };
485 } else if let Some((start, count)) = range.take() {
486 changes.push(MemorySliceChange {
487 global_start: offset.saturating_add(start),
488 data: desired_array[start..start.saturating_add(count)].to_vec(),
489 });
490 }
491 }
492 if let Some((start, count)) = range.take() {
493 changes.push(MemorySliceChange {
494 global_start: offset.saturating_add(start),
495 data: desired_array[start..start.saturating_add(count)].to_vec(),
496 });
497 }
498 changes
499}
500
501#[derive(Debug, Clone)]
502struct MemorySliceChange {
503 global_start: usize,
504 data: Vec<u8>,
505}
506
507#[derive(Debug, Clone)]
509pub struct MemoryRollbackData {
510 sp: usize,
512 hp: usize,
514 stack_changes: Vec<MemorySliceChange>,
516 heap_changes: Vec<MemorySliceChange>,
518}
519
520#[cfg(feature = "test-helpers")]
521impl From<Vec<u8>> for MemoryInstance {
522 fn from(stack: Vec<u8>) -> Self {
523 Self {
524 stack,
525 ..Self::new()
526 }
527 }
528}
529
530#[cfg(any(test, feature = "test-helpers"))]
531impl Index<Range<usize>> for MemoryInstance {
532 type Output = [u8];
533
534 fn index(&self, index: Range<usize>) -> &Self::Output {
535 self.read(index.start, index.len())
536 .expect("Memory range out of bounds")
537 }
538}
539
540#[cfg(any(test, feature = "test-helpers"))]
541impl Index<RangeFrom<usize>> for MemoryInstance {
542 type Output = [u8];
543
544 fn index(&self, index: RangeFrom<usize>) -> &Self::Output {
545 &self[index.start..MEM_SIZE]
546 }
547}
548
549#[cfg(any(test, feature = "test-helpers"))]
550impl Index<RangeTo<usize>> for MemoryInstance {
551 type Output = [u8];
552
553 fn index(&self, index: RangeTo<usize>) -> &Self::Output {
554 &self[0..index.end]
555 }
556}
557
558#[cfg(any(test, feature = "test-helpers"))]
559impl IndexMut<Range<usize>> for MemoryInstance {
560 fn index_mut(&mut self, index: Range<usize>) -> &mut Self::Output {
561 self.write_noownerchecks(index.start, index.len())
562 .expect("Memory range out of bounds")
563 }
564}
565
566pub trait ToAddr {
569 fn to_addr(self) -> Result<usize, PanicReason>;
573}
574
575impl ToAddr for usize {
576 fn to_addr(self) -> Result<usize, PanicReason> {
577 if self > MEM_SIZE {
578 return Err(PanicReason::MemoryOverflow)
579 }
580 Ok(self)
581 }
582}
583
584impl ToAddr for Word {
585 fn to_addr(self) -> Result<usize, PanicReason> {
586 let value = usize::try_from(self).map_err(|_| PanicReason::MemoryOverflow)?;
587 value.to_addr()
588 }
589}
590
591#[cfg(feature = "test-helpers")]
592impl ToAddr for i32 {
594 fn to_addr(self) -> Result<usize, PanicReason> {
595 if self < 0 {
596 panic!("Negative memory address");
597 }
598 let value = usize::try_from(self).map_err(|_| PanicReason::MemoryOverflow)?;
599 value.to_addr()
600 }
601}
602
603#[derive(Debug, Clone, PartialEq, Eq, Hash)]
605#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
606pub struct MemoryRange(Range<usize>);
607
608impl MemoryRange {
609 pub const fn new(start: usize, len: usize) -> Self {
611 Self(start..start.saturating_add(len))
612 }
613
614 pub fn start(&self) -> usize {
616 self.0.start
617 }
618
619 pub fn end(&self) -> usize {
621 self.0.end
622 }
623
624 pub fn is_empty(&self) -> bool {
626 self.len() == 0
627 }
628
629 pub fn len(&self) -> usize {
631 self.0.len()
632 }
633
634 pub fn usizes(&self) -> Range<usize> {
636 self.0.clone()
637 }
638
639 pub fn words(&self) -> Range<Word> {
641 self.0.start as Word..self.0.end as Word
642 }
643
644 pub fn split_at_offset(self, at: usize) -> (Self, Self) {
646 let mid = self.0.start.saturating_add(at);
647 assert!(mid <= self.0.end);
648 (Self(self.0.start..mid), Self(mid..self.0.end))
649 }
650}
651
652impl<M, S, Tx, Ecal, V> Interpreter<M, S, Tx, Ecal, V>
653where
654 M: Memory,
655{
656 pub(crate) fn ownership_registers(&self) -> OwnershipRegisters {
658 OwnershipRegisters::new(self)
659 }
660
661 pub(crate) fn stack_pointer_overflow<F>(&mut self, f: F, v: Word) -> SimpleResult<()>
662 where
663 F: FnOnce(Word, Word) -> (Word, bool),
664 {
665 let (
666 SystemRegisters {
667 sp, ssp, hp, pc, ..
668 },
669 _,
670 ) = split_registers(&mut self.registers);
671 stack_pointer_overflow(
672 sp,
673 ssp.as_ref(),
674 hp.as_ref(),
675 pc,
676 f,
677 v,
678 self.memory.as_mut(),
679 )
680 }
681
682 pub(crate) fn push_selected_registers(
683 &mut self,
684 segment: ProgramRegistersSegment,
685 bitmask: Imm24,
686 ) -> SimpleResult<()> {
687 let (
688 SystemRegisters {
689 sp, ssp, hp, pc, ..
690 },
691 program_regs,
692 ) = split_registers(&mut self.registers);
693 push_selected_registers(
694 self.memory.as_mut(),
695 sp,
696 ssp.as_ref(),
697 hp.as_ref(),
698 pc,
699 &program_regs,
700 segment,
701 bitmask,
702 )
703 }
704
705 pub(crate) fn pop_selected_registers(
706 &mut self,
707 segment: ProgramRegistersSegment,
708 bitmask: Imm24,
709 ) -> SimpleResult<()> {
710 let (
711 SystemRegisters {
712 sp, ssp, hp, pc, ..
713 },
714 mut program_regs,
715 ) = split_registers(&mut self.registers);
716 pop_selected_registers(
717 self.memory.as_mut(),
718 sp,
719 ssp.as_ref(),
720 hp.as_ref(),
721 pc,
722 &mut program_regs,
723 segment,
724 bitmask,
725 )
726 }
727
728 pub(crate) fn load_byte(&mut self, ra: RegId, b: Word, c: Word) -> SimpleResult<()> {
729 let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
730 let result = &mut w[WriteRegKey::try_from(ra)?];
731 load_byte(self.memory.as_ref(), pc, result, b, c)
732 }
733
734 pub(crate) fn load_word(&mut self, ra: RegId, b: Word, c: Imm12) -> SimpleResult<()> {
735 let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
736 let result = &mut w[WriteRegKey::try_from(ra)?];
737 load_word(self.memory.as_ref(), pc, result, b, c)
738 }
739
740 pub(crate) fn store_byte(&mut self, a: Word, b: Word, c: Word) -> SimpleResult<()> {
741 let owner = self.ownership_registers();
742 store_byte(
743 self.memory.as_mut(),
744 owner,
745 self.registers.pc_mut(),
746 a,
747 b,
748 c,
749 )
750 }
751
752 pub(crate) fn store_word(&mut self, a: Word, b: Word, c: Imm12) -> SimpleResult<()> {
753 let owner = self.ownership_registers();
754 store_word(
755 self.memory.as_mut(),
756 owner,
757 self.registers.pc_mut(),
758 a,
759 b,
760 c,
761 )
762 }
763
764 pub fn allocate(&mut self, amount: Word) -> SimpleResult<()> {
766 let (SystemRegisters { hp, sp, .. }, _) = split_registers(&mut self.registers);
767 self.memory.as_mut().grow_heap_by(sp.as_ref(), hp, amount)?;
768 Ok(())
769 }
770
771 pub(crate) fn malloc(&mut self, a: Word) -> SimpleResult<()> {
772 let (SystemRegisters { hp, sp, pc, .. }, _) =
773 split_registers(&mut self.registers);
774 malloc(hp, sp.as_ref(), pc, a, self.memory.as_mut())
775 }
776
777 pub(crate) fn memclear(&mut self, a: Word, b: Word) -> SimpleResult<()> {
778 let owner = self.ownership_registers();
779 memclear(self.memory.as_mut(), owner, self.registers.pc_mut(), a, b)
780 }
781
782 pub(crate) fn memcopy(&mut self, a: Word, b: Word, c: Word) -> SimpleResult<()> {
783 let owner = self.ownership_registers();
784 memcopy(
785 self.memory.as_mut(),
786 owner,
787 self.registers.pc_mut(),
788 a,
789 b,
790 c,
791 )
792 }
793
794 pub(crate) fn memeq(
795 &mut self,
796 ra: RegId,
797 b: Word,
798 c: Word,
799 d: Word,
800 ) -> SimpleResult<()> {
801 let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
802 let result = &mut w[WriteRegKey::try_from(ra)?];
803 memeq(self.memory.as_mut(), result, pc, b, c, d)
804 }
805}
806
807pub(crate) fn try_update_stack_pointer(
809 mut sp: RegMut<SP>,
810 ssp: Reg<SSP>,
811 hp: Reg<HP>,
812 new_sp: Word,
813 memory: &mut MemoryInstance,
814) -> SimpleResult<()> {
815 if new_sp < *ssp {
816 Err(PanicReason::MemoryOverflow.into())
817 } else if new_sp > *hp {
818 Err(PanicReason::MemoryGrowthOverlap.into())
819 } else {
820 *sp = new_sp;
821 memory.grow_stack(new_sp)?;
822 Ok(())
823 }
824}
825
826pub(crate) fn stack_pointer_overflow<F>(
827 sp: RegMut<SP>,
828 ssp: Reg<SSP>,
829 hp: Reg<HP>,
830 pc: RegMut<PC>,
831 f: F,
832 v: Word,
833 memory: &mut MemoryInstance,
834) -> SimpleResult<()>
835where
836 F: FnOnce(Word, Word) -> (Word, bool),
837{
838 let (new_sp, overflow) = f(*sp, v);
839
840 if overflow {
841 return Err(PanicReason::MemoryOverflow.into())
842 }
843
844 try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
845 Ok(inc_pc(pc)?)
846}
847
848#[allow(clippy::too_many_arguments)]
849pub(crate) fn push_selected_registers(
850 memory: &mut MemoryInstance,
851 sp: RegMut<SP>,
852 ssp: Reg<SSP>,
853 hp: Reg<HP>,
854 pc: RegMut<PC>,
855 program_regs: &ProgramRegisters,
856 segment: ProgramRegistersSegment,
857 bitmask: Imm24,
858) -> SimpleResult<()> {
859 let bitmask = bitmask.to_u32();
860
861 let count: u64 = bitmask.count_ones().into();
863 let write_size = count
864 .checked_mul(WORD_SIZE as u64)
865 .expect("Bitmask size times 8 can never oveflow");
866 let write_at = *sp;
867 let new_sp = write_at.saturating_add(write_size);
869 try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
870
871 let mut it = memory
873 .write_noownerchecks(write_at, write_size)?
874 .chunks_exact_mut(WORD_SIZE);
875 for (i, reg) in program_regs.segment(segment).iter().enumerate() {
876 if (bitmask & (1 << i)) != 0 {
877 let item = it
878 .next()
879 .expect("Memory range mismatched with register count");
880 item.copy_from_slice(®.to_be_bytes());
881 }
882 }
883
884 Ok(inc_pc(pc)?)
885}
886
887#[allow(clippy::too_many_arguments)]
888pub(crate) fn pop_selected_registers(
889 memory: &mut MemoryInstance,
890 sp: RegMut<SP>,
891 ssp: Reg<SSP>,
892 hp: Reg<HP>,
893 pc: RegMut<PC>,
894 program_regs: &mut ProgramRegisters,
895 segment: ProgramRegistersSegment,
896 bitmask: Imm24,
897) -> SimpleResult<()> {
898 let bitmask = bitmask.to_u32();
899
900 let count: u64 = bitmask.count_ones().into();
902 let size_in_stack = count
903 .checked_mul(WORD_SIZE as u64)
904 .expect("Bitmask size times 8 can never oveflow");
905 let new_sp = sp
906 .checked_sub(size_in_stack)
907 .ok_or(PanicReason::MemoryOverflow)?;
908 try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
909
910 let mut it = memory.read(new_sp, size_in_stack)?.chunks_exact(WORD_SIZE);
912 for (i, reg) in program_regs.segment_mut(segment).iter_mut().enumerate() {
913 if (bitmask & (1 << i)) != 0 {
914 let mut buf = [0u8; WORD_SIZE];
915 buf.copy_from_slice(it.next().expect("Count mismatch"));
916 *reg = Word::from_be_bytes(buf);
917 }
918 }
919
920 Ok(inc_pc(pc)?)
921}
922
923pub(crate) fn load_byte(
924 memory: &MemoryInstance,
925 pc: RegMut<PC>,
926 result: &mut Word,
927 b: Word,
928 c: Word,
929) -> SimpleResult<()> {
930 let [b] = memory.read_bytes(b.saturating_add(c))?;
931 *result = b as Word;
932 Ok(inc_pc(pc)?)
933}
934
935pub(crate) fn load_word(
936 memory: &MemoryInstance,
937 pc: RegMut<PC>,
938 result: &mut Word,
939 b: Word,
940 c: Imm12,
941) -> SimpleResult<()> {
942 let offset = u64::from(c)
943 .checked_mul(WORD_SIZE as u64)
944 .expect("u12 * 8 cannot overflow a Word");
945 let addr = b.checked_add(offset).ok_or(PanicReason::MemoryOverflow)?;
946 *result = Word::from_be_bytes(memory.read_bytes(addr)?);
947 Ok(inc_pc(pc)?)
948}
949
950#[allow(clippy::cast_possible_truncation)]
951pub(crate) fn store_byte(
952 memory: &mut MemoryInstance,
953 owner: OwnershipRegisters,
954 pc: RegMut<PC>,
955 a: Word,
956 b: Word,
957 c: Word,
958) -> SimpleResult<()> {
959 memory.write_bytes(owner, a.saturating_add(c), [b as u8])?;
960 Ok(inc_pc(pc)?)
961}
962
963pub(crate) fn store_word(
964 memory: &mut MemoryInstance,
965 owner: OwnershipRegisters,
966 pc: RegMut<PC>,
967 a: Word,
968 b: Word,
969 c: Imm12,
970) -> SimpleResult<()> {
971 #[allow(clippy::arithmetic_side_effects)]
972 let offset = u64::from(c)
973 .checked_mul(WORD_SIZE as u64)
974 .expect("12-bits number multiplied by 8 cannot overflow a Word");
975 let addr = a.saturating_add(offset);
976 memory.write_bytes(owner, addr, b.to_be_bytes())?;
977 Ok(inc_pc(pc)?)
978}
979
980pub(crate) fn malloc(
981 hp: RegMut<HP>,
982 sp: Reg<SP>,
983 pc: RegMut<PC>,
984 amount: Word,
985 memory: &mut MemoryInstance,
986) -> SimpleResult<()> {
987 memory.grow_heap_by(sp, hp, amount)?;
988 Ok(inc_pc(pc)?)
989}
990
991pub(crate) fn memclear(
992 memory: &mut MemoryInstance,
993 owner: OwnershipRegisters,
994 pc: RegMut<PC>,
995 a: Word,
996 b: Word,
997) -> SimpleResult<()> {
998 memory.write(owner, a, b)?.fill(0);
999 Ok(inc_pc(pc)?)
1000}
1001
1002pub(crate) fn memcopy(
1003 memory: &mut MemoryInstance,
1004 owner: OwnershipRegisters,
1005 pc: RegMut<PC>,
1006 dst: Word,
1007 src: Word,
1008 length: Word,
1009) -> SimpleResult<()> {
1010 memory.memcopy(dst, src, length, owner)?;
1011
1012 Ok(inc_pc(pc)?)
1013}
1014
1015pub(crate) fn memeq(
1016 memory: &mut MemoryInstance,
1017 result: &mut Word,
1018 pc: RegMut<PC>,
1019 b: Word,
1020 c: Word,
1021 d: Word,
1022) -> SimpleResult<()> {
1023 *result = (memory.read(b, d)? == memory.read(c, d)?) as Word;
1024 Ok(inc_pc(pc)?)
1025}
1026
1027#[derive(Debug, Clone, Copy)]
1028pub struct OwnershipRegisters {
1029 pub(crate) sp: u64,
1030 pub(crate) ssp: u64,
1031 pub(crate) hp: u64,
1032 pub(crate) prev_hp: u64,
1035}
1036
1037impl OwnershipRegisters {
1038 pub(crate) fn new<M, S, Tx, Ecal, V>(vm: &Interpreter<M, S, Tx, Ecal, V>) -> Self {
1039 let prev_hp = vm
1040 .frames
1041 .last()
1042 .map(|frame| frame.registers()[RegId::HP])
1043 .unwrap_or(VM_MAX_RAM);
1044
1045 OwnershipRegisters {
1046 sp: vm.registers[RegId::SP],
1047 ssp: vm.registers[RegId::SSP],
1048 hp: vm.registers[RegId::HP],
1049 prev_hp,
1050 }
1051 }
1052
1053 pub(crate) fn only_allow_stack_write(sp: u64, ssp: u64, hp: u64) -> Self {
1055 debug_assert!(sp <= VM_MAX_RAM);
1056 debug_assert!(ssp <= VM_MAX_RAM);
1057 debug_assert!(hp <= VM_MAX_RAM);
1058 debug_assert!(ssp <= sp);
1059 debug_assert!(sp <= hp);
1060 OwnershipRegisters {
1061 sp,
1062 ssp,
1063 hp,
1064 prev_hp: hp,
1065 }
1066 }
1067
1068 #[cfg(test)]
1070 pub(crate) fn test_full_stack() -> Self {
1071 OwnershipRegisters {
1072 sp: VM_MAX_RAM,
1073 ssp: 0,
1074 hp: VM_MAX_RAM,
1075 prev_hp: VM_MAX_RAM,
1076 }
1077 }
1078
1079 pub(crate) fn verify_ownership(
1080 &self,
1081 range: &MemoryRange,
1082 ) -> Result<(), PanicReason> {
1083 if self.has_ownership_range(&range.words()) {
1084 Ok(())
1085 } else {
1086 Err(PanicReason::MemoryOwnership)
1087 }
1088 }
1089
1090 pub fn has_ownership_range(&self, range: &Range<Word>) -> bool {
1091 self.has_ownership_stack(range) || self.has_ownership_heap(range)
1092 }
1093
1094 pub(crate) fn has_ownership_stack(&self, range: &Range<Word>) -> bool {
1096 if range.is_empty() && range.start == self.ssp {
1097 return true
1098 }
1099
1100 if !(self.ssp..self.sp).contains(&range.start) {
1101 return false
1102 }
1103
1104 if range.end > VM_MAX_RAM {
1105 return false
1106 }
1107
1108 (self.ssp..=self.sp).contains(&range.end)
1109 }
1110
1111 pub(crate) fn has_ownership_heap(&self, range: &Range<Word>) -> bool {
1113 if range.is_empty() && range.start == self.hp {
1114 return true
1115 }
1116
1117 if range.start < self.hp {
1118 return false
1119 }
1120
1121 self.hp != self.prev_hp && range.end <= self.prev_hp
1122 }
1123}
1124
1125#[allow(clippy::too_many_arguments)]
1130pub(crate) fn copy_from_storage_zero_fill<M, S>(
1131 memory: &mut MemoryInstance,
1132 owner: OwnershipRegisters,
1133 storage: &S,
1134 dst_addr: Word,
1135 dst_len: Word,
1136 src_id: &M::Key,
1137 src_offset: u64,
1138 src_len: usize,
1139 not_found_error: PanicReason,
1140) -> IoResult<(), S::Error>
1141where
1142 M: Mappable,
1143 S: StorageRead<M>,
1144{
1145 let write_buffer = memory.write(owner, dst_addr, dst_len)?;
1146 let mut empty_offset = 0;
1147
1148 if src_offset < src_len as Word {
1149 let src_offset =
1150 u32::try_from(src_offset).map_err(|_| PanicReason::MemoryOverflow)?;
1151
1152 let src_read_length = src_len.saturating_sub(src_offset as usize);
1153 let src_read_length = src_read_length.min(write_buffer.len());
1154
1155 let (src_read_buffer, _) = write_buffer.split_at_mut(src_read_length);
1156 let found = storage
1157 .read(src_id, src_offset as usize, src_read_buffer)
1158 .map_err(RuntimeError::Storage)?;
1159 if !found {
1160 return Err(not_found_error.into());
1161 }
1162
1163 empty_offset = src_read_length;
1164 }
1165
1166 write_buffer[empty_offset..].fill(0);
1167
1168 Ok(())
1169}