use core::{
alloc::Layout,
marker::PhantomData,
ptr::{slice_from_raw_parts, slice_from_raw_parts_mut, NonNull},
};
use log::trace;
use crate::{
err::{PagingError, PagingResult},
iter::TableIter,
page_table_entry::Pte,
Access, MapConfig, PTEArch, PTEGeneric, PTEInfo,
};
#[derive(Clone, Copy)]
pub struct PageTableRef<'a, P: PTEArch> {
addr: usize,
level: usize,
walk: PageWalk,
_marker: PhantomData<&'a P>,
}
impl<'a, P: PTEArch> PageTableRef<'a, P> {
pub fn create_empty(access: &mut impl Access) -> PagingResult<Self> {
Self::new_with_level(P::level(), access)
}
pub fn new_with_level(level: usize, access: &mut impl Access) -> PagingResult<Self> {
assert!(level > 0);
let addr = unsafe { Self::alloc_table(access)? };
Ok(PageTableRef::from_addr(addr, level))
}
pub fn from_addr(addr: usize, level: usize) -> Self {
let walk = PageWalk::new(P::page_size());
Self {
addr,
level,
walk,
_marker: PhantomData,
}
}
pub fn level(&self) -> usize {
self.level
}
pub fn paddr(&self) -> usize {
self.addr
}
pub unsafe fn map_region_with_handle(
&mut self,
cfg: MapConfig,
size: usize,
allow_block: bool,
access: &mut impl Access,
on_page_mapped: Option<&impl Fn(*const u8)>,
) -> PagingResult {
let mut vaddr = cfg.vaddr;
let mut paddr = cfg.paddr;
let mut size = size;
trace!(
"map_region: [{:#x}, {:#x}) -> [{:#x}, {:#x}) {:?}",
vaddr as usize,
vaddr as usize + size,
paddr,
paddr + size,
cfg.setting,
);
while size > 0 {
let page_level = if allow_block {
self.detect_page_level(vaddr, size)
} else {
1
};
let page_size = self.walk.level_entry_size(page_level);
trace!("page_size: {page_size:#X}");
{
let this = &mut *self;
let cfg: &MapConfig = &MapConfig {
vaddr,
paddr,
setting: cfg.setting,
};
let align = 1 << this.walk.level_entry_size_shift(page_level);
let page_size = P::page_size();
assert!(
cfg.vaddr as usize % align == 0,
"vaddr must be aligned to {align:#X}"
);
assert!(
cfg.paddr % page_size == 0,
"paddr must be aligned to {page_size:#X}"
);
this.get_entry_or_create(cfg, page_level, access)?;
Ok(())
}?;
if let Some(f) = on_page_mapped {
f(vaddr);
}
vaddr = vaddr.add(page_size);
paddr += page_size;
size -= page_size;
}
Ok(())
}
fn detect_page_level(&self, vaddr: *const u8, size: usize) -> usize {
let max_level = 4;
for level in (0..max_level).rev() {
let page_size = self.walk.level_entry_size(level);
if vaddr as usize % page_size == 0 && size >= page_size {
return level;
}
}
1
}
pub unsafe fn map_region(
&mut self,
config: MapConfig,
size: usize,
allow_block: bool,
access: &mut impl Access,
) -> PagingResult<()> {
self.map_region_with_handle(
config,
size,
allow_block,
access,
None::<fn(*const u8)>.as_ref(),
)
}
pub fn as_slice(&self, access: &impl Access) -> &'a [Pte<P>] {
unsafe {
&*slice_from_raw_parts(
(self.addr + access.va_offset()) as *const Pte<P>,
self.walk.table_size,
)
}
}
unsafe fn sub_table_or_create(
&mut self,
idx: usize,
map_cfg: &MapConfig,
access: &mut impl Access,
) -> PagingResult<PageTableRef<'a, P>> {
let mut pte = self.get_pte(idx, access);
let sub_level = self.level - 1;
if pte.valid() {
return Ok(Self::from_addr(pte.paddr, sub_level));
} else {
let table = Self::new_with_level(sub_level, access)?;
let ptr = table.addr;
pte.is_valid = true;
pte.paddr = ptr;
pte.is_block = false;
pte.setting = map_cfg.setting;
let s = self.as_slice_mut(access);
s[idx] = P::new_pte(pte);
Ok(table)
}
}
unsafe fn get_entry_or_create(
&mut self,
map_cfg: &MapConfig,
level: usize,
access: &mut impl Access,
) -> PagingResult<()> {
let mut table = *self;
while table.level > 0 {
let idx = table.index_of_table(map_cfg.vaddr);
if table.level == level {
table.as_slice_mut(access)[idx] =
P::new_pte(PTEGeneric::new(map_cfg.paddr, level > 1, map_cfg.setting));
return Ok(());
}
table = table.sub_table_or_create(idx, map_cfg, access)?;
}
Err(PagingError::NotAligned)
}
pub fn release(&mut self, access: &mut impl Access) {
self._release(0usize as _, access);
unsafe {
access.dealloc(
self.addr.to_virt(access),
Layout::from_size_align_unchecked(P::page_size(), P::page_size()),
);
}
}
fn _release(&mut self, start_vaddr: *const u8, access: &mut impl Access) -> Option<()> {
let start_vaddr_usize: usize = start_vaddr as _;
let entries = self.as_slice(access);
if self.level == 1 {
return Some(());
}
for (i, entry) in entries.iter().enumerate() {
let vaddr_usize = start_vaddr_usize + i * self.entry_size();
let vaddr = vaddr_usize as _;
let pte = entry.read();
if pte.valid() {
let is_block = pte.is_block;
if self.level > 1 && !is_block {
let mut table_ref = self.next_table(i, access)?;
table_ref._release(vaddr, access)?;
}
unsafe {
access.dealloc(
pte.paddr.to_virt(access),
Layout::from_size_align_unchecked(P::page_size(), P::page_size()),
);
}
}
}
Some(())
}
fn next_table(&self, idx: usize, access: &impl Access) -> Option<Self> {
let pte = self.get_pte(idx, access);
if pte.is_block {
return None;
}
if pte.valid() {
return Some(Self::from_addr(pte.paddr, self.level - 1));
} else {
None
}
}
fn index_of_table(&self, vaddr: *const u8) -> usize {
self.walk.index_of_table(self.level, vaddr)
}
pub fn entry_size(&self) -> usize {
self.walk.level_entry_size(self.level)
}
pub fn table_size(&self) -> usize {
self.walk.table_size
}
fn as_slice_mut(&mut self, access: &impl Access) -> &'a mut [usize] {
unsafe {
&mut *slice_from_raw_parts_mut(
(self.addr + access.va_offset()) as *mut usize,
self.walk.table_size,
)
}
}
fn get_pte(&self, idx: usize, access: &impl Access) -> PTEGeneric {
let s = self.as_slice(access);
s[idx].read()
}
unsafe fn alloc_table(access: &mut impl Access) -> PagingResult<usize> {
let page_size = P::page_size();
let layout = Layout::from_size_align_unchecked(page_size, page_size);
if let Some(addr) = access.alloc(layout) {
addr.write_bytes(0, page_size);
Ok(addr.as_ptr() as usize - access.va_offset())
} else {
Err(PagingError::NoMemory)
}
}
pub fn iter_all<A: Access>(&self, access: &'a A) -> impl Iterator<Item = PTEInfo> + 'a {
TableIter::new(0 as _, *self, access)
}
}
const fn log2(value: usize) -> usize {
assert!(value > 0, "Value must be positive and non-zero");
match value {
512 => 9,
4096 => 12,
_ => {
let mut v = value;
let mut result = 0;
while v > 1 {
v >>= 1; result += 1;
}
result
}
}
}
pub trait PVConvert {
fn to_virt<T>(&self, access: &impl Access) -> NonNull<T>;
}
impl PVConvert for usize {
fn to_virt<T>(&self, access: &impl Access) -> NonNull<T> {
unsafe { NonNull::new_unchecked((self + access.va_offset()) as *mut u8) }.cast()
}
}
#[derive(Debug, Clone, Copy)]
pub struct PageWalk {
table_size: usize,
table_size_pow: usize,
page_size_pow: usize,
}
impl PageWalk {
fn new(page_size: usize) -> Self {
let table_size = page_size / size_of::<usize>();
let table_size_pow = log2(table_size);
let page_size_pow = log2(page_size);
Self {
table_size,
table_size_pow,
page_size_pow,
}
}
fn level_entry_size_shift(&self, level: usize) -> usize {
self.page_size_pow + (level - 1) * self.table_size_pow
}
fn index_of_table(&self, level: usize, vaddr: *const u8) -> usize {
(vaddr as usize >> self.level_entry_size_shift(level)) & (self.table_size - 1)
}
fn level_entry_size(&self, level: usize) -> usize {
1 << self.level_entry_size_shift(level)
}
}
#[cfg(test)]
mod test {
extern crate std;
use super::*;
const MB: usize = 1024 * 1024;
const GB: usize = 1024 * MB;
#[test]
fn test_log2() {
assert_eq!(log2(512), 9);
assert_eq!(log2(4096), 12);
}
#[test]
fn test_level_entry_memory_size() {
let w = PageWalk::new(4096);
assert_eq!(w.level_entry_size(1), 4096);
assert_eq!(w.level_entry_size(2), 2 * MB);
assert_eq!(w.level_entry_size(3), GB);
assert_eq!(w.level_entry_size(4), 512 * GB);
}
#[test]
fn test_idx_of_table() {
let w = PageWalk::new(4096);
assert_eq!(w.index_of_table(1, 0 as _), 0);
assert_eq!(w.index_of_table(1, 0x1000 as _), 1);
assert_eq!(w.index_of_table(1, 0x2000 as _), 2);
assert_eq!(w.index_of_table(2, 0 as _), 0);
assert_eq!(w.index_of_table(2, (2 * MB) as _), 1);
assert_eq!(w.index_of_table(3, GB as _), 1);
assert_eq!(w.index_of_table(4, (512 * GB) as _), 1);
}
}