1use crate::{
4 aligned_memory::Pod,
5 ebpf,
6 error::{EbpfError, ProgramResult},
7 program::SBPFVersion,
8 vm::Config,
9};
10use std::{
11 array,
12 cell::{Cell, UnsafeCell},
13 fmt, mem,
14 ops::Range,
15 ptr::{self, copy_nonoverlapping},
16};
17
18#[derive(Debug, Copy, Clone, Default, Eq, PartialEq)]
37pub enum MemoryState {
38 #[default]
40 Readable,
41 Writable,
43 Cow(u64),
46}
47
48pub type MemoryCowCallback = Box<dyn Fn(u64) -> Result<u64, ()>>;
50
51#[derive(Default, Eq, PartialEq)]
53#[repr(C, align(32))]
54pub struct MemoryRegion {
55 pub host_addr: Cell<u64>,
57 pub vm_addr: u64,
59 pub vm_addr_end: u64,
61 pub len: u64,
63 pub vm_gap_shift: u8,
65 pub state: Cell<MemoryState>,
67}
68
69impl MemoryRegion {
70 fn new(slice: &[u8], vm_addr: u64, vm_gap_size: u64, state: MemoryState) -> Self {
71 let mut vm_addr_end = vm_addr.saturating_add(slice.len() as u64);
72 let mut vm_gap_shift = (std::mem::size_of::<u64>() as u8)
73 .saturating_mul(8)
74 .saturating_sub(1);
75 if vm_gap_size > 0 {
76 vm_addr_end = vm_addr_end.saturating_add(slice.len() as u64);
77 vm_gap_shift = vm_gap_shift.saturating_sub(vm_gap_size.leading_zeros() as u8);
78 debug_assert_eq!(Some(vm_gap_size), 1_u64.checked_shl(vm_gap_shift as u32));
79 };
80 MemoryRegion {
81 host_addr: Cell::new(slice.as_ptr() as u64),
82 vm_addr,
83 vm_addr_end,
84 len: slice.len() as u64,
85 vm_gap_shift,
86 state: Cell::new(state),
87 }
88 }
89
90 pub fn new_for_testing(
92 slice: &[u8],
93 vm_addr: u64,
94 vm_gap_size: u64,
95 state: MemoryState,
96 ) -> Self {
97 Self::new(slice, vm_addr, vm_gap_size, state)
98 }
99
100 pub fn new_readonly(slice: &[u8], vm_addr: u64) -> Self {
102 Self::new(slice, vm_addr, 0, MemoryState::Readable)
103 }
104
105 pub fn new_writable(slice: &mut [u8], vm_addr: u64) -> Self {
107 Self::new(&*slice, vm_addr, 0, MemoryState::Writable)
108 }
109
110 pub fn new_cow(slice: &[u8], vm_addr: u64, cow_id: u64) -> Self {
114 Self::new(slice, vm_addr, 0, MemoryState::Cow(cow_id))
115 }
116
117 pub fn new_writable_gapped(slice: &mut [u8], vm_addr: u64, vm_gap_size: u64) -> Self {
119 Self::new(&*slice, vm_addr, vm_gap_size, MemoryState::Writable)
120 }
121
122 pub fn vm_to_host(&self, vm_addr: u64, len: u64) -> ProgramResult {
124 if vm_addr < self.vm_addr {
128 return ProgramResult::Err(EbpfError::InvalidVirtualAddress(vm_addr));
129 }
130
131 let begin_offset = vm_addr.saturating_sub(self.vm_addr);
132 let is_in_gap = (begin_offset
133 .checked_shr(self.vm_gap_shift as u32)
134 .unwrap_or(0)
135 & 1)
136 == 1;
137 let gap_mask = (-1i64).checked_shl(self.vm_gap_shift as u32).unwrap_or(0) as u64;
138 let gapped_offset =
139 (begin_offset & gap_mask).checked_shr(1).unwrap_or(0) | (begin_offset & !gap_mask);
140 if let Some(end_offset) = gapped_offset.checked_add(len) {
141 if end_offset <= self.len && !is_in_gap {
142 return ProgramResult::Ok(self.host_addr.get().saturating_add(gapped_offset));
143 }
144 }
145 ProgramResult::Err(EbpfError::InvalidVirtualAddress(vm_addr))
146 }
147}
148
149impl fmt::Debug for MemoryRegion {
150 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
151 write!(
152 f,
153 "host_addr: {:#x?}-{:#x?}, vm_addr: {:#x?}-{:#x?}, len: {}",
154 self.host_addr,
155 self.host_addr.get().saturating_add(self.len),
156 self.vm_addr,
157 self.vm_addr_end,
158 self.len
159 )
160 }
161}
162impl std::cmp::PartialOrd for MemoryRegion {
163 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
164 Some(self.cmp(other))
165 }
166}
167impl std::cmp::Ord for MemoryRegion {
168 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
169 self.vm_addr.cmp(&other.vm_addr)
170 }
171}
172
173#[derive(Clone, Copy, PartialEq, Eq, Debug)]
175pub enum AccessType {
176 Load,
178 Store,
180}
181
182pub struct UnalignedMemoryMapping<'a> {
184 regions: Box<[MemoryRegion]>,
186 region_addresses: Box<[u64]>,
188 cache: UnsafeCell<MappingCache>,
190 config: &'a Config,
192 sbpf_version: SBPFVersion,
194 cow_cb: Option<MemoryCowCallback>,
196}
197
198impl fmt::Debug for UnalignedMemoryMapping<'_> {
199 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
200 f.debug_struct("UnalignedMemoryMapping")
201 .field("regions", &self.regions)
202 .field("region_addresses", &self.region_addresses)
203 .field("cache", &self.cache)
204 .field("config", &self.config)
205 .field(
206 "cow_cb",
207 &self
208 .cow_cb
209 .as_ref()
210 .map(|cb| format!("Some({:p})", &cb))
211 .unwrap_or_else(|| "None".to_string()),
212 )
213 .finish()
214 }
215}
216
217impl<'a> UnalignedMemoryMapping<'a> {
218 fn construct_eytzinger_order(
219 &mut self,
220 ascending_regions: &mut [MemoryRegion],
221 mut in_index: usize,
222 out_index: usize,
223 ) -> usize {
224 if out_index >= self.regions.len() {
225 return in_index;
226 }
227 in_index = self.construct_eytzinger_order(
228 ascending_regions,
229 in_index,
230 out_index.saturating_mul(2).saturating_add(1),
231 );
232 self.regions[out_index] = mem::take(&mut ascending_regions[in_index]);
233 self.region_addresses[out_index] = self.regions[out_index].vm_addr;
234 self.construct_eytzinger_order(
235 ascending_regions,
236 in_index.saturating_add(1),
237 out_index.saturating_mul(2).saturating_add(2),
238 )
239 }
240
241 fn new_internal(
242 mut regions: Vec<MemoryRegion>,
243 cow_cb: Option<MemoryCowCallback>,
244 config: &'a Config,
245 sbpf_version: SBPFVersion,
246 ) -> Result<Self, EbpfError> {
247 regions.sort();
248 for index in 1..regions.len() {
249 let first = ®ions[index.saturating_sub(1)];
250 let second = ®ions[index];
251 if first.vm_addr_end > second.vm_addr {
252 return Err(EbpfError::InvalidMemoryRegion(index));
253 }
254 }
255
256 let mut result = Self {
257 regions: (0..regions.len())
258 .map(|_| MemoryRegion::default())
259 .collect::<Vec<_>>()
260 .into_boxed_slice(),
261 region_addresses: vec![0; regions.len()].into_boxed_slice(),
262 cache: UnsafeCell::new(MappingCache::new()),
263 config,
264 sbpf_version,
265 cow_cb,
266 };
267 result.construct_eytzinger_order(&mut regions, 0, 0);
268 Ok(result)
269 }
270
271 pub fn new(
273 regions: Vec<MemoryRegion>,
274 config: &'a Config,
275 sbpf_version: SBPFVersion,
276 ) -> Result<Self, EbpfError> {
277 Self::new_internal(regions, None, config, sbpf_version)
278 }
279
280 pub fn new_with_cow(
284 regions: Vec<MemoryRegion>,
285 cow_cb: MemoryCowCallback,
286 config: &'a Config,
287 sbpf_version: SBPFVersion,
288 ) -> Result<Self, EbpfError> {
289 Self::new_internal(regions, Some(cow_cb), config, sbpf_version)
290 }
291
292 #[allow(clippy::arithmetic_side_effects)]
293 fn find_region(&self, cache: &mut MappingCache, vm_addr: u64) -> Option<&MemoryRegion> {
294 if let Some(index) = cache.find(vm_addr) {
295 Some(unsafe { self.regions.get_unchecked(index - 1) })
299 } else {
300 let mut index = 1;
301 while index <= self.region_addresses.len() {
302 index = (index << 1)
306 + unsafe { *self.region_addresses.get_unchecked(index - 1) <= vm_addr }
307 as usize;
308 }
309 index >>= index.trailing_zeros() + 1;
310 if index == 0 {
311 return None;
312 }
313 let region = unsafe { self.regions.get_unchecked(index - 1) };
317 cache.insert(region.vm_addr..region.vm_addr_end, index);
318 Some(region)
319 }
320 }
321
322 pub fn map(&self, access_type: AccessType, vm_addr: u64, len: u64) -> ProgramResult {
324 let cache = unsafe { &mut *self.cache.get() };
329
330 let region = match self.find_region(cache, vm_addr) {
331 Some(res) => res,
332 None => {
333 return generate_access_violation(
334 self.config,
335 self.sbpf_version,
336 access_type,
337 vm_addr,
338 len,
339 )
340 }
341 };
342
343 if access_type == AccessType::Load || ensure_writable_region(region, &self.cow_cb) {
344 if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, len) {
345 return ProgramResult::Ok(host_addr);
346 }
347 }
348
349 generate_access_violation(self.config, self.sbpf_version, access_type, vm_addr, len)
350 }
351
352 #[inline(always)]
356 pub fn load<T: Pod + Into<u64>>(&self, mut vm_addr: u64) -> ProgramResult {
357 let mut len = mem::size_of::<T>() as u64;
358 debug_assert!(len <= mem::size_of::<u64>() as u64);
359
360 let cache = unsafe { &mut *self.cache.get() };
365
366 let mut region = match self.find_region(cache, vm_addr) {
367 Some(region) => {
368 if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, len) {
369 return ProgramResult::Ok(unsafe {
371 ptr::read_unaligned::<T>(host_addr as *const _).into()
372 });
373 }
374
375 region
376 }
377 None => {
378 return generate_access_violation(
379 self.config,
380 self.sbpf_version,
381 AccessType::Load,
382 vm_addr,
383 len,
384 )
385 }
386 };
387
388 let initial_len = len;
390 let initial_vm_addr = vm_addr;
391 let mut value = 0u64;
392 let mut ptr = std::ptr::addr_of_mut!(value).cast::<u8>();
393
394 while len > 0 {
395 let load_len = len.min(region.vm_addr_end.saturating_sub(vm_addr));
396 if load_len == 0 {
397 break;
398 }
399 if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, load_len) {
400 unsafe {
404 copy_nonoverlapping(host_addr as *const _, ptr, load_len as usize);
405 ptr = ptr.add(load_len as usize);
406 };
407 len = len.saturating_sub(load_len);
408 if len == 0 {
409 return ProgramResult::Ok(value);
410 }
411 vm_addr = vm_addr.saturating_add(load_len);
412 region = match self.find_region(cache, vm_addr) {
413 Some(region) => region,
414 None => break,
415 };
416 } else {
417 break;
418 }
419 }
420
421 generate_access_violation(
422 self.config,
423 self.sbpf_version,
424 AccessType::Load,
425 initial_vm_addr,
426 initial_len,
427 )
428 }
429
430 #[inline]
434 pub fn store<T: Pod>(&self, value: T, mut vm_addr: u64) -> ProgramResult {
435 let mut len = mem::size_of::<T>() as u64;
436
437 let cache = unsafe { &mut *self.cache.get() };
442
443 let mut src = std::ptr::addr_of!(value).cast::<u8>();
444
445 let mut region = match self.find_region(cache, vm_addr) {
446 Some(region) if ensure_writable_region(region, &self.cow_cb) => {
447 if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, len) {
449 unsafe { ptr::write_unaligned(host_addr as *mut _, value) };
453 return ProgramResult::Ok(host_addr);
454 }
455 region
456 }
457 _ => {
458 return generate_access_violation(
459 self.config,
460 self.sbpf_version,
461 AccessType::Store,
462 vm_addr,
463 len,
464 )
465 }
466 };
467
468 let initial_len = len;
470 let initial_vm_addr = vm_addr;
471
472 while len > 0 {
473 if !ensure_writable_region(region, &self.cow_cb) {
474 break;
475 }
476
477 let write_len = len.min(region.vm_addr_end.saturating_sub(vm_addr));
478 if write_len == 0 {
479 break;
480 }
481 if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, write_len) {
482 unsafe { copy_nonoverlapping(src, host_addr as *mut _, write_len as usize) };
485 len = len.saturating_sub(write_len);
486 if len == 0 {
487 return ProgramResult::Ok(host_addr);
488 }
489 src = unsafe { src.add(write_len as usize) };
490 vm_addr = vm_addr.saturating_add(write_len);
491 region = match self.find_region(cache, vm_addr) {
492 Some(region) => region,
493 None => break,
494 };
495 } else {
496 break;
497 }
498 }
499
500 generate_access_violation(
501 self.config,
502 self.sbpf_version,
503 AccessType::Store,
504 initial_vm_addr,
505 initial_len,
506 )
507 }
508
509 pub fn region(
511 &self,
512 access_type: AccessType,
513 vm_addr: u64,
514 ) -> Result<&MemoryRegion, EbpfError> {
515 let cache = unsafe { &mut *self.cache.get() };
520 if let Some(region) = self.find_region(cache, vm_addr) {
521 if (region.vm_addr..region.vm_addr_end).contains(&vm_addr)
522 && (access_type == AccessType::Load || ensure_writable_region(region, &self.cow_cb))
523 {
524 return Ok(region);
525 }
526 }
527 Err(
528 generate_access_violation(self.config, self.sbpf_version, access_type, vm_addr, 0)
529 .unwrap_err(),
530 )
531 }
532
533 pub fn get_regions(&self) -> &[MemoryRegion] {
535 &self.regions
536 }
537
538 pub fn replace_region(&mut self, index: usize, region: MemoryRegion) -> Result<(), EbpfError> {
540 if index >= self.regions.len() || self.regions[index].vm_addr != region.vm_addr {
541 return Err(EbpfError::InvalidMemoryRegion(index));
542 }
543 self.regions[index] = region;
544 self.cache.get_mut().flush();
545 Ok(())
546 }
547}
548
549pub struct AlignedMemoryMapping<'a> {
552 regions: Box<[MemoryRegion]>,
554 config: &'a Config,
556 sbpf_version: SBPFVersion,
558 cow_cb: Option<MemoryCowCallback>,
560}
561
562impl fmt::Debug for AlignedMemoryMapping<'_> {
563 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
564 f.debug_struct("AlignedMemoryMapping")
565 .field("regions", &self.regions)
566 .field("config", &self.config)
567 .field(
568 "cow_cb",
569 &self
570 .cow_cb
571 .as_ref()
572 .map(|cb| format!("Some({:p})", &cb))
573 .unwrap_or_else(|| "None".to_string()),
574 )
575 .finish()
576 }
577}
578
579impl<'a> AlignedMemoryMapping<'a> {
580 fn new_internal(
581 mut regions: Vec<MemoryRegion>,
582 cow_cb: Option<MemoryCowCallback>,
583 config: &'a Config,
584 sbpf_version: SBPFVersion,
585 ) -> Result<Self, EbpfError> {
586 regions.insert(0, MemoryRegion::new_readonly(&[], 0));
587 regions.sort();
588 for (index, region) in regions.iter().enumerate() {
589 if region
590 .vm_addr
591 .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32)
592 .unwrap_or(0)
593 != index as u64
594 {
595 return Err(EbpfError::InvalidMemoryRegion(index));
596 }
597 }
598 Ok(Self {
599 regions: regions.into_boxed_slice(),
600 config,
601 sbpf_version,
602 cow_cb,
603 })
604 }
605
606 pub fn new(
608 regions: Vec<MemoryRegion>,
609 config: &'a Config,
610 sbpf_version: SBPFVersion,
611 ) -> Result<Self, EbpfError> {
612 Self::new_internal(regions, None, config, sbpf_version)
613 }
614
615 pub fn new_with_cow(
619 regions: Vec<MemoryRegion>,
620 cow_cb: MemoryCowCallback,
621 config: &'a Config,
622 sbpf_version: SBPFVersion,
623 ) -> Result<Self, EbpfError> {
624 Self::new_internal(regions, Some(cow_cb), config, sbpf_version)
625 }
626
627 pub fn map(&self, access_type: AccessType, vm_addr: u64, len: u64) -> ProgramResult {
629 let index = vm_addr
630 .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32)
631 .unwrap_or(0) as usize;
632 if (1..self.regions.len()).contains(&index) {
633 let region = &self.regions[index];
634 if access_type == AccessType::Load || ensure_writable_region(region, &self.cow_cb) {
635 if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, len) {
636 return ProgramResult::Ok(host_addr);
637 }
638 }
639 }
640 generate_access_violation(self.config, self.sbpf_version, access_type, vm_addr, len)
641 }
642
643 #[inline]
647 pub fn load<T: Pod + Into<u64>>(&self, vm_addr: u64) -> ProgramResult {
648 let len = mem::size_of::<T>() as u64;
649 match self.map(AccessType::Load, vm_addr, len) {
650 ProgramResult::Ok(host_addr) => {
651 ProgramResult::Ok(unsafe { ptr::read_unaligned::<T>(host_addr as *const _) }.into())
652 }
653 err => err,
654 }
655 }
656
657 #[inline]
661 pub fn store<T: Pod>(&self, value: T, vm_addr: u64) -> ProgramResult {
662 let len = mem::size_of::<T>() as u64;
663 debug_assert!(len <= mem::size_of::<u64>() as u64);
664
665 match self.map(AccessType::Store, vm_addr, len) {
666 ProgramResult::Ok(host_addr) => {
667 unsafe {
670 ptr::write_unaligned(host_addr as *mut T, value);
671 }
672 ProgramResult::Ok(host_addr)
673 }
674
675 err => err,
676 }
677 }
678
679 pub fn region(
681 &self,
682 access_type: AccessType,
683 vm_addr: u64,
684 ) -> Result<&MemoryRegion, EbpfError> {
685 let index = vm_addr
686 .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32)
687 .unwrap_or(0) as usize;
688 if (1..self.regions.len()).contains(&index) {
689 let region = &self.regions[index];
690 if (region.vm_addr..region.vm_addr_end).contains(&vm_addr)
691 && (access_type == AccessType::Load || ensure_writable_region(region, &self.cow_cb))
692 {
693 return Ok(region);
694 }
695 }
696 Err(
697 generate_access_violation(self.config, self.sbpf_version, access_type, vm_addr, 0)
698 .unwrap_err(),
699 )
700 }
701
702 pub fn get_regions(&self) -> &[MemoryRegion] {
704 &self.regions
705 }
706
707 pub fn replace_region(&mut self, index: usize, region: MemoryRegion) -> Result<(), EbpfError> {
709 if index >= self.regions.len() {
710 return Err(EbpfError::InvalidMemoryRegion(index));
711 }
712 let begin_index = region
713 .vm_addr
714 .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32)
715 .unwrap_or(0) as usize;
716 let end_index = region
717 .vm_addr
718 .saturating_add(region.len.saturating_sub(1))
719 .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32)
720 .unwrap_or(0) as usize;
721 if begin_index != index || end_index != index {
722 return Err(EbpfError::InvalidMemoryRegion(index));
723 }
724 self.regions[index] = region;
725 Ok(())
726 }
727}
728
729#[derive(Debug)]
731pub enum MemoryMapping<'a> {
732 Identity,
734 Aligned(AlignedMemoryMapping<'a>),
737 Unaligned(UnalignedMemoryMapping<'a>),
739}
740
741impl<'a> MemoryMapping<'a> {
742 pub(crate) fn new_identity() -> Self {
743 MemoryMapping::Identity
744 }
745
746 pub fn new(
751 regions: Vec<MemoryRegion>,
752 config: &'a Config,
753 sbpf_version: SBPFVersion,
754 ) -> Result<Self, EbpfError> {
755 if config.aligned_memory_mapping {
756 AlignedMemoryMapping::new(regions, config, sbpf_version).map(MemoryMapping::Aligned)
757 } else {
758 UnalignedMemoryMapping::new(regions, config, sbpf_version).map(MemoryMapping::Unaligned)
759 }
760 }
761
762 pub fn new_with_cow(
767 regions: Vec<MemoryRegion>,
768 cow_cb: MemoryCowCallback,
769 config: &'a Config,
770 sbpf_version: SBPFVersion,
771 ) -> Result<Self, EbpfError> {
772 if config.aligned_memory_mapping {
773 AlignedMemoryMapping::new_with_cow(regions, cow_cb, config, sbpf_version)
774 .map(MemoryMapping::Aligned)
775 } else {
776 UnalignedMemoryMapping::new_with_cow(regions, cow_cb, config, sbpf_version)
777 .map(MemoryMapping::Unaligned)
778 }
779 }
780
781 pub fn map(&self, access_type: AccessType, vm_addr: u64, len: u64) -> ProgramResult {
783 match self {
784 MemoryMapping::Identity => ProgramResult::Ok(vm_addr),
785 MemoryMapping::Aligned(m) => m.map(access_type, vm_addr, len),
786 MemoryMapping::Unaligned(m) => m.map(access_type, vm_addr, len),
787 }
788 }
789
790 #[inline]
794 pub fn load<T: Pod + Into<u64>>(&self, vm_addr: u64) -> ProgramResult {
795 match self {
796 MemoryMapping::Identity => unsafe {
797 ProgramResult::Ok(ptr::read_unaligned(vm_addr as *const T).into())
798 },
799 MemoryMapping::Aligned(m) => m.load::<T>(vm_addr),
800 MemoryMapping::Unaligned(m) => m.load::<T>(vm_addr),
801 }
802 }
803
804 #[inline]
808 pub fn store<T: Pod>(&self, value: T, vm_addr: u64) -> ProgramResult {
809 match self {
810 MemoryMapping::Identity => unsafe {
811 ptr::write_unaligned(vm_addr as *mut T, value);
812 ProgramResult::Ok(0)
813 },
814 MemoryMapping::Aligned(m) => m.store(value, vm_addr),
815 MemoryMapping::Unaligned(m) => m.store(value, vm_addr),
816 }
817 }
818
819 pub fn region(
821 &self,
822 access_type: AccessType,
823 vm_addr: u64,
824 ) -> Result<&MemoryRegion, EbpfError> {
825 match self {
826 MemoryMapping::Identity => Err(EbpfError::InvalidMemoryRegion(0)),
827 MemoryMapping::Aligned(m) => m.region(access_type, vm_addr),
828 MemoryMapping::Unaligned(m) => m.region(access_type, vm_addr),
829 }
830 }
831
832 pub fn get_regions(&self) -> &[MemoryRegion] {
834 match self {
835 MemoryMapping::Identity => &[],
836 MemoryMapping::Aligned(m) => m.get_regions(),
837 MemoryMapping::Unaligned(m) => m.get_regions(),
838 }
839 }
840
841 pub fn replace_region(&mut self, index: usize, region: MemoryRegion) -> Result<(), EbpfError> {
843 match self {
844 MemoryMapping::Identity => Err(EbpfError::InvalidMemoryRegion(index)),
845 MemoryMapping::Aligned(m) => m.replace_region(index, region),
846 MemoryMapping::Unaligned(m) => m.replace_region(index, region),
847 }
848 }
849}
850
851fn ensure_writable_region(region: &MemoryRegion, cow_cb: &Option<MemoryCowCallback>) -> bool {
855 match (region.state.get(), cow_cb) {
856 (MemoryState::Writable, _) => true,
857 (MemoryState::Cow(cow_id), Some(cb)) => match cb(cow_id) {
858 Ok(host_addr) => {
859 region.host_addr.replace(host_addr);
860 region.state.replace(MemoryState::Writable);
861 true
862 }
863 Err(_) => false,
864 },
865 _ => false,
866 }
867}
868
869fn generate_access_violation(
871 config: &Config,
872 sbpf_version: SBPFVersion,
873 access_type: AccessType,
874 vm_addr: u64,
875 len: u64,
876) -> ProgramResult {
877 let stack_frame = (vm_addr as i64)
878 .saturating_sub(ebpf::MM_STACK_START as i64)
879 .checked_div(config.stack_frame_size as i64)
880 .unwrap_or(0);
881 if !sbpf_version.dynamic_stack_frames()
882 && (-1..(config.max_call_depth as i64).saturating_add(1)).contains(&stack_frame)
883 {
884 ProgramResult::Err(EbpfError::StackAccessViolation(
885 access_type,
886 vm_addr,
887 len,
888 stack_frame,
889 ))
890 } else {
891 let region_name = match vm_addr & (!ebpf::MM_RODATA_START.saturating_sub(1)) {
892 ebpf::MM_RODATA_START => "program",
893 ebpf::MM_STACK_START => "stack",
894 ebpf::MM_HEAP_START => "heap",
895 ebpf::MM_INPUT_START => "input",
896 _ => "unknown",
897 };
898 ProgramResult::Err(EbpfError::AccessViolation(
899 access_type,
900 vm_addr,
901 len,
902 region_name,
903 ))
904 }
905}
906
907#[derive(Debug)]
909struct MappingCache {
910 entries: [(Range<u64>, usize); MappingCache::SIZE as usize],
912 head: isize,
917}
918
919impl MappingCache {
920 const SIZE: isize = 4;
921
922 fn new() -> MappingCache {
923 MappingCache {
924 entries: array::from_fn(|_| (0..0, 0)),
925 head: 0,
926 }
927 }
928
929 #[allow(clippy::arithmetic_side_effects)]
930 #[inline]
931 fn find(&self, vm_addr: u64) -> Option<usize> {
932 for i in 0..Self::SIZE {
933 let index = (self.head + i) % Self::SIZE;
934 let (vm_range, region_index) = unsafe { self.entries.get_unchecked(index as usize) };
937 if vm_range.contains(&vm_addr) {
938 return Some(*region_index);
939 }
940 }
941
942 None
943 }
944
945 #[allow(clippy::arithmetic_side_effects)]
946 #[inline]
947 fn insert(&mut self, vm_range: Range<u64>, region_index: usize) {
948 self.head = (self.head - 1).rem_euclid(Self::SIZE);
949 unsafe { *self.entries.get_unchecked_mut(self.head as usize) = (vm_range, region_index) };
952 }
953
954 #[inline]
955 fn flush(&mut self) {
956 self.entries = array::from_fn(|_| (0..0, 0));
957 self.head = 0;
958 }
959}
960
961#[cfg(test)]
962mod test {
963 use std::{cell::RefCell, rc::Rc};
964 use test_utils::assert_error;
965
966 use super::*;
967
968 #[test]
969 fn test_mapping_cache() {
970 let mut cache = MappingCache::new();
971 assert_eq!(cache.find(0), None);
972
973 let mut ranges = vec![10u64..20, 20..30, 30..40, 40..50];
974 for (region, range) in ranges.iter().cloned().enumerate() {
975 cache.insert(range, region);
976 }
977 for (region, range) in ranges.iter().enumerate() {
978 if region > 0 {
979 assert_eq!(cache.find(range.start - 1), Some(region - 1));
980 } else {
981 assert_eq!(cache.find(range.start - 1), None);
982 }
983 assert_eq!(cache.find(range.start), Some(region));
984 assert_eq!(cache.find(range.start + 1), Some(region));
985 assert_eq!(cache.find(range.end - 1), Some(region));
986 if region < 3 {
987 assert_eq!(cache.find(range.end), Some(region + 1));
988 } else {
989 assert_eq!(cache.find(range.end), None);
990 }
991 }
992
993 cache.insert(50..60, 4);
994 ranges.push(50..60);
995 for (region, range) in ranges.iter().enumerate() {
996 if region == 0 {
997 assert_eq!(cache.find(range.start), None);
998 continue;
999 }
1000 if region > 1 {
1001 assert_eq!(cache.find(range.start - 1), Some(region - 1));
1002 } else {
1003 assert_eq!(cache.find(range.start - 1), None);
1004 }
1005 assert_eq!(cache.find(range.start), Some(region));
1006 assert_eq!(cache.find(range.start + 1), Some(region));
1007 assert_eq!(cache.find(range.end - 1), Some(region));
1008 if region < 4 {
1009 assert_eq!(cache.find(range.end), Some(region + 1));
1010 } else {
1011 assert_eq!(cache.find(range.end), None);
1012 }
1013 }
1014 }
1015
1016 #[test]
1017 fn test_mapping_cache_flush() {
1018 let mut cache = MappingCache::new();
1019 assert_eq!(cache.find(0), None);
1020 cache.insert(0..10, 0);
1021 assert_eq!(cache.find(0), Some(0));
1022 cache.flush();
1023 assert_eq!(cache.find(0), None);
1024 }
1025
1026 #[test]
1027 fn test_map_empty() {
1028 let config = Config::default();
1029 let m = UnalignedMemoryMapping::new(vec![], &config, SBPFVersion::V3).unwrap();
1030 assert_error!(
1031 m.map(AccessType::Load, ebpf::MM_INPUT_START, 8),
1032 "AccessViolation"
1033 );
1034
1035 let m = AlignedMemoryMapping::new(vec![], &config, SBPFVersion::V3).unwrap();
1036 assert_error!(
1037 m.map(AccessType::Load, ebpf::MM_INPUT_START, 8),
1038 "AccessViolation"
1039 );
1040 }
1041
1042 #[test]
1043 fn test_gapped_map() {
1044 for aligned_memory_mapping in [false, true] {
1045 let config = Config {
1046 aligned_memory_mapping,
1047 ..Config::default()
1048 };
1049 let mut mem1 = vec![0xff; 8];
1050 let m = MemoryMapping::new(
1051 vec![
1052 MemoryRegion::new_readonly(&[0; 8], ebpf::MM_RODATA_START),
1053 MemoryRegion::new_writable_gapped(&mut mem1, ebpf::MM_STACK_START, 2),
1054 ],
1055 &config,
1056 SBPFVersion::V3,
1057 )
1058 .unwrap();
1059 for frame in 0..4 {
1060 let address = ebpf::MM_STACK_START + frame * 4;
1061 assert!(m.region(AccessType::Load, address).is_ok());
1062 assert!(m.map(AccessType::Load, address, 2).is_ok());
1063 assert_error!(m.map(AccessType::Load, address + 2, 2), "AccessViolation");
1064 assert_eq!(m.load::<u16>(address).unwrap(), 0xFFFF);
1065 assert_error!(m.load::<u16>(address + 2), "AccessViolation");
1066 assert!(m.store::<u16>(0xFFFF, address).is_ok());
1067 assert_error!(m.store::<u16>(0xFFFF, address + 2), "AccessViolation");
1068 }
1069 }
1070 }
1071
1072 #[test]
1073 fn test_unaligned_map_overlap() {
1074 let config = Config::default();
1075 let mem1 = [1, 2, 3, 4];
1076 let mem2 = [5, 6];
1077 assert_error!(
1078 UnalignedMemoryMapping::new(
1079 vec![
1080 MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START),
1081 MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64 - 1),
1082 ],
1083 &config,
1084 SBPFVersion::V3,
1085 ),
1086 "InvalidMemoryRegion(1)"
1087 );
1088 assert!(UnalignedMemoryMapping::new(
1089 vec![
1090 MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START),
1091 MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64),
1092 ],
1093 &config,
1094 SBPFVersion::V3,
1095 )
1096 .is_ok());
1097 }
1098
1099 #[test]
1100 fn test_unaligned_map() {
1101 let config = Config::default();
1102 let mut mem1 = [11];
1103 let mem2 = [22, 22];
1104 let mem3 = [33];
1105 let mem4 = [44, 44];
1106 let m = UnalignedMemoryMapping::new(
1107 vec![
1108 MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START),
1109 MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64),
1110 MemoryRegion::new_readonly(
1111 &mem3,
1112 ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64,
1113 ),
1114 MemoryRegion::new_readonly(
1115 &mem4,
1116 ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64,
1117 ),
1118 ],
1119 &config,
1120 SBPFVersion::V3,
1121 )
1122 .unwrap();
1123
1124 assert_eq!(
1125 m.map(AccessType::Load, ebpf::MM_INPUT_START, 1).unwrap(),
1126 mem1.as_ptr() as u64
1127 );
1128
1129 assert_eq!(
1130 m.map(AccessType::Store, ebpf::MM_INPUT_START, 1).unwrap(),
1131 mem1.as_ptr() as u64
1132 );
1133
1134 assert_error!(
1135 m.map(AccessType::Load, ebpf::MM_INPUT_START, 2),
1136 "AccessViolation"
1137 );
1138
1139 assert_eq!(
1140 m.map(
1141 AccessType::Load,
1142 ebpf::MM_INPUT_START + mem1.len() as u64,
1143 1,
1144 )
1145 .unwrap(),
1146 mem2.as_ptr() as u64
1147 );
1148
1149 assert_eq!(
1150 m.map(
1151 AccessType::Load,
1152 ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64,
1153 1,
1154 )
1155 .unwrap(),
1156 mem3.as_ptr() as u64
1157 );
1158
1159 assert_eq!(
1160 m.map(
1161 AccessType::Load,
1162 ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64,
1163 1,
1164 )
1165 .unwrap(),
1166 mem4.as_ptr() as u64
1167 );
1168
1169 assert_error!(
1170 m.map(
1171 AccessType::Load,
1172 ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len() + mem4.len()) as u64,
1173 1,
1174 ),
1175 "AccessViolation"
1176 );
1177 }
1178
1179 #[test]
1180 fn test_unaligned_region() {
1181 let config = Config {
1182 aligned_memory_mapping: false,
1183 ..Config::default()
1184 };
1185
1186 let mut mem1 = vec![0xFF; 4];
1187 let mem2 = vec![0xDD; 4];
1188 let m = MemoryMapping::new(
1189 vec![
1190 MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START),
1191 MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + 4),
1192 ],
1193 &config,
1194 SBPFVersion::V3,
1195 )
1196 .unwrap();
1197 assert_error!(
1198 m.region(AccessType::Load, ebpf::MM_INPUT_START - 1),
1199 "AccessViolation"
1200 );
1201 assert_eq!(
1202 m.region(AccessType::Load, ebpf::MM_INPUT_START)
1203 .unwrap()
1204 .host_addr
1205 .get(),
1206 mem1.as_ptr() as u64
1207 );
1208 assert_eq!(
1209 m.region(AccessType::Load, ebpf::MM_INPUT_START + 3)
1210 .unwrap()
1211 .host_addr
1212 .get(),
1213 mem1.as_ptr() as u64
1214 );
1215 assert_error!(
1216 m.region(AccessType::Store, ebpf::MM_INPUT_START + 4),
1217 "AccessViolation"
1218 );
1219 assert_eq!(
1220 m.region(AccessType::Load, ebpf::MM_INPUT_START + 4)
1221 .unwrap()
1222 .host_addr
1223 .get(),
1224 mem2.as_ptr() as u64
1225 );
1226 assert_eq!(
1227 m.region(AccessType::Load, ebpf::MM_INPUT_START + 7)
1228 .unwrap()
1229 .host_addr
1230 .get(),
1231 mem2.as_ptr() as u64
1232 );
1233 assert_error!(
1234 m.region(AccessType::Load, ebpf::MM_INPUT_START + 8),
1235 "AccessViolation"
1236 );
1237 }
1238
1239 #[test]
1240 fn test_aligned_region() {
1241 let config = Config {
1242 aligned_memory_mapping: true,
1243 ..Config::default()
1244 };
1245
1246 let mut mem1 = vec![0xFF; 4];
1247 let mem2 = vec![0xDD; 4];
1248 let m = MemoryMapping::new(
1249 vec![
1250 MemoryRegion::new_writable(&mut mem1, ebpf::MM_RODATA_START),
1251 MemoryRegion::new_readonly(&mem2, ebpf::MM_STACK_START),
1252 ],
1253 &config,
1254 SBPFVersion::V3,
1255 )
1256 .unwrap();
1257 assert_error!(
1258 m.region(AccessType::Load, ebpf::MM_RODATA_START - 1),
1259 "AccessViolation"
1260 );
1261 assert_eq!(
1262 m.region(AccessType::Load, ebpf::MM_RODATA_START)
1263 .unwrap()
1264 .host_addr
1265 .get(),
1266 mem1.as_ptr() as u64
1267 );
1268 assert_eq!(
1269 m.region(AccessType::Load, ebpf::MM_RODATA_START + 3)
1270 .unwrap()
1271 .host_addr
1272 .get(),
1273 mem1.as_ptr() as u64
1274 );
1275 assert_error!(
1276 m.region(AccessType::Load, ebpf::MM_RODATA_START + 4),
1277 "AccessViolation"
1278 );
1279
1280 assert_error!(
1281 m.region(AccessType::Store, ebpf::MM_STACK_START),
1282 "AccessViolation"
1283 );
1284 assert_eq!(
1285 m.region(AccessType::Load, ebpf::MM_STACK_START)
1286 .unwrap()
1287 .host_addr
1288 .get(),
1289 mem2.as_ptr() as u64
1290 );
1291 assert_eq!(
1292 m.region(AccessType::Load, ebpf::MM_STACK_START + 3)
1293 .unwrap()
1294 .host_addr
1295 .get(),
1296 mem2.as_ptr() as u64
1297 );
1298 assert_error!(
1299 m.region(AccessType::Load, ebpf::MM_INPUT_START + 4),
1300 "AccessViolation"
1301 );
1302 }
1303
1304 #[test]
1305 fn test_unaligned_map_load() {
1306 let config = Config {
1307 aligned_memory_mapping: false,
1308 ..Config::default()
1309 };
1310 let mem1 = [0x11, 0x22];
1311 let mem2 = [0x33];
1312 let mem3 = [0x44, 0x55, 0x66];
1313 let mem4 = [0x77, 0x88, 0x99];
1314 let m = MemoryMapping::new(
1315 vec![
1316 MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START),
1317 MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64),
1318 MemoryRegion::new_readonly(
1319 &mem3,
1320 ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64,
1321 ),
1322 MemoryRegion::new_readonly(
1323 &mem4,
1324 ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64,
1325 ),
1326 ],
1327 &config,
1328 SBPFVersion::V3,
1329 )
1330 .unwrap();
1331
1332 assert_eq!(m.load::<u16>(ebpf::MM_INPUT_START).unwrap(), 0x2211);
1333 assert_eq!(m.load::<u32>(ebpf::MM_INPUT_START).unwrap(), 0x44332211);
1334 assert_eq!(
1335 m.load::<u64>(ebpf::MM_INPUT_START).unwrap(),
1336 0x8877665544332211
1337 );
1338 assert_eq!(m.load::<u16>(ebpf::MM_INPUT_START + 1).unwrap(), 0x3322);
1339 assert_eq!(m.load::<u32>(ebpf::MM_INPUT_START + 1).unwrap(), 0x55443322);
1340 assert_eq!(
1341 m.load::<u64>(ebpf::MM_INPUT_START + 1).unwrap(),
1342 0x9988776655443322
1343 );
1344 }
1345
1346 #[test]
1347 fn test_unaligned_map_store() {
1348 let config = Config {
1349 aligned_memory_mapping: false,
1350 ..Config::default()
1351 };
1352 let mut mem1 = vec![0xff, 0xff];
1353 let mut mem2 = vec![0xff];
1354 let mut mem3 = vec![0xff, 0xff, 0xff];
1355 let mut mem4 = vec![0xff, 0xff];
1356 let m = MemoryMapping::new(
1357 vec![
1358 MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START),
1359 MemoryRegion::new_writable(&mut mem2, ebpf::MM_INPUT_START + mem1.len() as u64),
1360 MemoryRegion::new_writable(
1361 &mut mem3,
1362 ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64,
1363 ),
1364 MemoryRegion::new_writable(
1365 &mut mem4,
1366 ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64,
1367 ),
1368 ],
1369 &config,
1370 SBPFVersion::V3,
1371 )
1372 .unwrap();
1373 m.store(0x1122u16, ebpf::MM_INPUT_START).unwrap();
1374 assert_eq!(m.load::<u16>(ebpf::MM_INPUT_START).unwrap(), 0x1122);
1375
1376 m.store(0x33445566u32, ebpf::MM_INPUT_START).unwrap();
1377 assert_eq!(m.load::<u32>(ebpf::MM_INPUT_START).unwrap(), 0x33445566);
1378
1379 m.store(0x778899AABBCCDDEEu64, ebpf::MM_INPUT_START)
1380 .unwrap();
1381 assert_eq!(
1382 m.load::<u64>(ebpf::MM_INPUT_START).unwrap(),
1383 0x778899AABBCCDDEE
1384 );
1385 }
1386
1387 #[test]
1388 fn test_unaligned_map_load_store_fast_paths() {
1389 let config = Config {
1390 aligned_memory_mapping: false,
1391 ..Config::default()
1392 };
1393 let mut mem1 = vec![0xff; 8];
1394 let m = MemoryMapping::new(
1395 vec![MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START)],
1396 &config,
1397 SBPFVersion::V3,
1398 )
1399 .unwrap();
1400
1401 m.store(0x1122334455667788u64, ebpf::MM_INPUT_START)
1402 .unwrap();
1403 assert_eq!(
1404 m.load::<u64>(ebpf::MM_INPUT_START).unwrap(),
1405 0x1122334455667788
1406 );
1407 m.store(0x22334455u32, ebpf::MM_INPUT_START).unwrap();
1408 assert_eq!(m.load::<u32>(ebpf::MM_INPUT_START).unwrap(), 0x22334455);
1409
1410 m.store(0x3344u16, ebpf::MM_INPUT_START).unwrap();
1411 assert_eq!(m.load::<u16>(ebpf::MM_INPUT_START).unwrap(), 0x3344);
1412
1413 m.store(0x55u8, ebpf::MM_INPUT_START).unwrap();
1414 assert_eq!(m.load::<u8>(ebpf::MM_INPUT_START).unwrap(), 0x55);
1415 }
1416
1417 #[test]
1418 fn test_unaligned_map_load_store_slow_paths() {
1419 let config = Config {
1420 aligned_memory_mapping: false,
1421 ..Config::default()
1422 };
1423 let mut mem1 = vec![0xff; 7];
1424 let mut mem2 = vec![0xff];
1425 let m = MemoryMapping::new(
1426 vec![
1427 MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START),
1428 MemoryRegion::new_writable(&mut mem2, ebpf::MM_INPUT_START + 7),
1429 ],
1430 &config,
1431 SBPFVersion::V3,
1432 )
1433 .unwrap();
1434
1435 m.store(0x1122334455667788u64, ebpf::MM_INPUT_START)
1436 .unwrap();
1437 assert_eq!(
1438 m.load::<u64>(ebpf::MM_INPUT_START).unwrap(),
1439 0x1122334455667788
1440 );
1441 m.store(0xAABBCCDDu32, ebpf::MM_INPUT_START + 4).unwrap();
1442 assert_eq!(m.load::<u32>(ebpf::MM_INPUT_START + 4).unwrap(), 0xAABBCCDD);
1443
1444 m.store(0xEEFFu16, ebpf::MM_INPUT_START + 6).unwrap();
1445 assert_eq!(m.load::<u16>(ebpf::MM_INPUT_START + 6).unwrap(), 0xEEFF);
1446 }
1447
1448 #[test]
1449 fn test_unaligned_map_store_out_of_bounds() {
1450 let config = Config {
1451 aligned_memory_mapping: false,
1452 ..Config::default()
1453 };
1454
1455 let mut mem1 = vec![0xFF];
1456 let m = MemoryMapping::new(
1457 vec![MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START)],
1458 &config,
1459 SBPFVersion::V3,
1460 )
1461 .unwrap();
1462 m.store(0x11u8, ebpf::MM_INPUT_START).unwrap();
1463 assert_error!(m.store(0x11u8, ebpf::MM_INPUT_START - 1), "AccessViolation");
1464 assert_error!(m.store(0x11u8, ebpf::MM_INPUT_START + 1), "AccessViolation");
1465 assert_error!(m.store(0x11u8, ebpf::MM_INPUT_START + 2), "AccessViolation");
1468
1469 let mut mem1 = vec![0xFF; 4];
1470 let mut mem2 = vec![0xDD; 4];
1471 let m = MemoryMapping::new(
1472 vec![
1473 MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START),
1474 MemoryRegion::new_writable(&mut mem2, ebpf::MM_INPUT_START + 4),
1475 ],
1476 &config,
1477 SBPFVersion::V3,
1478 )
1479 .unwrap();
1480 m.store(0x1122334455667788u64, ebpf::MM_INPUT_START)
1481 .unwrap();
1482 assert_eq!(
1483 m.load::<u64>(ebpf::MM_INPUT_START).unwrap(),
1484 0x1122334455667788u64
1485 );
1486 assert_error!(
1487 m.store(0x1122334455667788u64, ebpf::MM_INPUT_START + 1),
1488 "AccessViolation"
1489 );
1490 }
1491
1492 #[test]
1493 fn test_unaligned_map_load_out_of_bounds() {
1494 let config = Config {
1495 aligned_memory_mapping: false,
1496 ..Config::default()
1497 };
1498
1499 let mem1 = vec![0xff];
1500 let m = MemoryMapping::new(
1501 vec![MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START)],
1502 &config,
1503 SBPFVersion::V3,
1504 )
1505 .unwrap();
1506 assert_eq!(m.load::<u8>(ebpf::MM_INPUT_START).unwrap(), 0xff);
1507 assert_error!(m.load::<u8>(ebpf::MM_INPUT_START - 1), "AccessViolation");
1508 assert_error!(m.load::<u8>(ebpf::MM_INPUT_START + 1), "AccessViolation");
1509 assert_error!(m.load::<u8>(ebpf::MM_INPUT_START + 2), "AccessViolation");
1510
1511 let mem1 = vec![0xFF; 4];
1512 let mem2 = vec![0xDD; 4];
1513 let m = MemoryMapping::new(
1514 vec![
1515 MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START),
1516 MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + 4),
1517 ],
1518 &config,
1519 SBPFVersion::V3,
1520 )
1521 .unwrap();
1522 assert_eq!(
1523 m.load::<u64>(ebpf::MM_INPUT_START).unwrap(),
1524 0xDDDDDDDDFFFFFFFF
1525 );
1526 assert_error!(m.load::<u64>(ebpf::MM_INPUT_START + 1), "AccessViolation");
1527 }
1528
1529 #[test]
1530 #[should_panic(expected = "AccessViolation")]
1531 fn test_store_readonly() {
1532 let config = Config {
1533 aligned_memory_mapping: false,
1534 ..Config::default()
1535 };
1536 let mut mem1 = vec![0xff, 0xff];
1537 let mem2 = vec![0xff, 0xff];
1538 let m = MemoryMapping::new(
1539 vec![
1540 MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START),
1541 MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64),
1542 ],
1543 &config,
1544 SBPFVersion::V3,
1545 )
1546 .unwrap();
1547 m.store(0x11223344, ebpf::MM_INPUT_START).unwrap();
1548 }
1549
1550 #[test]
1551 fn test_unaligned_map_replace_region() {
1552 let config = Config::default();
1553 let mem1 = [11];
1554 let mem2 = [22, 22];
1555 let mem3 = [33];
1556 let mut m = UnalignedMemoryMapping::new(
1557 vec![
1558 MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START),
1559 MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64),
1560 ],
1561 &config,
1562 SBPFVersion::V3,
1563 )
1564 .unwrap();
1565
1566 assert_eq!(
1567 m.map(AccessType::Load, ebpf::MM_INPUT_START, 1).unwrap(),
1568 mem1.as_ptr() as u64
1569 );
1570
1571 assert_eq!(
1572 m.map(
1573 AccessType::Load,
1574 ebpf::MM_INPUT_START + mem1.len() as u64,
1575 1,
1576 )
1577 .unwrap(),
1578 mem2.as_ptr() as u64
1579 );
1580
1581 assert_error!(
1582 m.replace_region(
1583 2,
1584 MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64)
1585 ),
1586 "InvalidMemoryRegion(2)"
1587 );
1588
1589 let region_index = m
1590 .get_regions()
1591 .iter()
1592 .position(|mem| mem.vm_addr == ebpf::MM_INPUT_START + mem1.len() as u64)
1593 .unwrap();
1594
1595 assert_error!(
1597 m.replace_region(
1598 region_index,
1599 MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64 + 1)
1600 ),
1601 "InvalidMemoryRegion({})",
1602 region_index
1603 );
1604
1605 m.replace_region(
1606 region_index,
1607 MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64),
1608 )
1609 .unwrap();
1610
1611 assert_eq!(
1612 m.map(
1613 AccessType::Load,
1614 ebpf::MM_INPUT_START + mem1.len() as u64,
1615 1,
1616 )
1617 .unwrap(),
1618 mem3.as_ptr() as u64
1619 );
1620 }
1621
1622 #[test]
1623 fn test_aligned_map_replace_region() {
1624 let config = Config::default();
1625 let mem1 = [11];
1626 let mem2 = [22, 22];
1627 let mem3 = [33, 33];
1628 let mut m = AlignedMemoryMapping::new(
1629 vec![
1630 MemoryRegion::new_readonly(&mem1, ebpf::MM_RODATA_START),
1631 MemoryRegion::new_readonly(&mem2, ebpf::MM_STACK_START),
1632 ],
1633 &config,
1634 SBPFVersion::V3,
1635 )
1636 .unwrap();
1637
1638 assert_eq!(
1639 m.map(AccessType::Load, ebpf::MM_STACK_START, 1).unwrap(),
1640 mem2.as_ptr() as u64
1641 );
1642
1643 assert_error!(
1645 m.replace_region(3, MemoryRegion::new_readonly(&mem3, ebpf::MM_STACK_START)),
1646 "InvalidMemoryRegion(3)"
1647 );
1648
1649 assert_error!(
1651 m.replace_region(2, MemoryRegion::new_readonly(&mem3, ebpf::MM_HEAP_START)),
1652 "InvalidMemoryRegion(2)"
1653 );
1654
1655 assert_error!(
1657 m.replace_region(
1658 2,
1659 MemoryRegion::new_readonly(&mem3, ebpf::MM_HEAP_START - 1)
1660 ),
1661 "InvalidMemoryRegion(2)"
1662 );
1663
1664 m.replace_region(2, MemoryRegion::new_readonly(&mem3, ebpf::MM_STACK_START))
1665 .unwrap();
1666
1667 assert_eq!(
1668 m.map(AccessType::Load, ebpf::MM_STACK_START, 1).unwrap(),
1669 mem3.as_ptr() as u64
1670 );
1671 }
1672
1673 #[test]
1674 fn test_cow_map() {
1675 for aligned_memory_mapping in [true, false] {
1676 let config = Config {
1677 aligned_memory_mapping,
1678 ..Config::default()
1679 };
1680 let original = [11, 22];
1681 let copied = Rc::new(RefCell::new(Vec::new()));
1682
1683 let c = Rc::clone(&copied);
1684 let m = MemoryMapping::new_with_cow(
1685 vec![MemoryRegion::new_cow(&original, ebpf::MM_RODATA_START, 42)],
1686 Box::new(move |_| {
1687 c.borrow_mut().extend_from_slice(&original);
1688 Ok(c.borrow().as_slice().as_ptr() as u64)
1689 }),
1690 &config,
1691 SBPFVersion::V3,
1692 )
1693 .unwrap();
1694
1695 assert_eq!(
1696 m.map(AccessType::Load, ebpf::MM_RODATA_START, 1).unwrap(),
1697 original.as_ptr() as u64
1698 );
1699 assert_eq!(
1700 m.map(AccessType::Store, ebpf::MM_RODATA_START, 1).unwrap(),
1701 copied.borrow().as_ptr() as u64
1702 );
1703 }
1704 }
1705
1706 #[test]
1707 fn test_cow_load_store() {
1708 for aligned_memory_mapping in [true, false] {
1709 let config = Config {
1710 aligned_memory_mapping,
1711 ..Config::default()
1712 };
1713 let original = [11, 22];
1714 let copied = Rc::new(RefCell::new(Vec::new()));
1715
1716 let c = Rc::clone(&copied);
1717 let m = MemoryMapping::new_with_cow(
1718 vec![MemoryRegion::new_cow(&original, ebpf::MM_RODATA_START, 42)],
1719 Box::new(move |_| {
1720 c.borrow_mut().extend_from_slice(&original);
1721 Ok(c.borrow().as_slice().as_ptr() as u64)
1722 }),
1723 &config,
1724 SBPFVersion::V3,
1725 )
1726 .unwrap();
1727
1728 assert_eq!(
1729 m.map(AccessType::Load, ebpf::MM_RODATA_START, 1).unwrap(),
1730 original.as_ptr() as u64
1731 );
1732
1733 assert_eq!(m.load::<u8>(ebpf::MM_RODATA_START).unwrap(), 11);
1734 assert_eq!(m.load::<u8>(ebpf::MM_RODATA_START + 1).unwrap(), 22);
1735 assert!(copied.borrow().is_empty());
1736
1737 m.store(33u8, ebpf::MM_RODATA_START).unwrap();
1738 assert_eq!(original[0], 11);
1739 assert_eq!(m.load::<u8>(ebpf::MM_RODATA_START).unwrap(), 33);
1740 assert_eq!(m.load::<u8>(ebpf::MM_RODATA_START + 1).unwrap(), 22);
1741 }
1742 }
1743
1744 #[test]
1745 fn test_cow_region_id() {
1746 for aligned_memory_mapping in [true, false] {
1747 let config = Config {
1748 aligned_memory_mapping,
1749 ..Config::default()
1750 };
1751 let original1 = [11, 22];
1752 let original2 = [33, 44];
1753 let copied = Rc::new(RefCell::new(Vec::new()));
1754
1755 let c = Rc::clone(&copied);
1756 let m = MemoryMapping::new_with_cow(
1757 vec![
1758 MemoryRegion::new_cow(&original1, ebpf::MM_RODATA_START, 42),
1759 MemoryRegion::new_cow(&original2, ebpf::MM_RODATA_START + 0x100000000, 24),
1760 ],
1761 Box::new(move |id| {
1762 assert_eq!(id, 42);
1765 c.borrow_mut().extend_from_slice(&original1);
1766 Ok(c.borrow().as_slice().as_ptr() as u64)
1767 }),
1768 &config,
1769 SBPFVersion::V3,
1770 )
1771 .unwrap();
1772
1773 m.store(55u8, ebpf::MM_RODATA_START).unwrap();
1774 assert_eq!(original1[0], 11);
1775 assert_eq!(m.load::<u8>(ebpf::MM_RODATA_START).unwrap(), 55);
1776 }
1777 }
1778
1779 #[test]
1780 #[should_panic(expected = "AccessViolation")]
1781 fn test_map_cow_error() {
1782 let config = Config::default();
1783 let original = [11, 22];
1784
1785 let m = MemoryMapping::new_with_cow(
1786 vec![MemoryRegion::new_cow(&original, ebpf::MM_RODATA_START, 42)],
1787 Box::new(|_| Err(())),
1788 &config,
1789 SBPFVersion::V3,
1790 )
1791 .unwrap();
1792
1793 m.map(AccessType::Store, ebpf::MM_RODATA_START, 1).unwrap();
1794 }
1795
1796 #[test]
1797 #[should_panic(expected = "AccessViolation")]
1798 fn test_store_cow_error() {
1799 let config = Config::default();
1800 let original = [11, 22];
1801
1802 let m = MemoryMapping::new_with_cow(
1803 vec![MemoryRegion::new_cow(&original, ebpf::MM_RODATA_START, 42)],
1804 Box::new(|_| Err(())),
1805 &config,
1806 SBPFVersion::V3,
1807 )
1808 .unwrap();
1809
1810 m.store(33u8, ebpf::MM_RODATA_START).unwrap();
1811 }
1812}