use bitflags::*;
use core::convert::{From, Into};
use core::fmt;
use core::ops;
macro_rules! check_flag {
($doc:meta, $fun:ident, $flag:expr) => (
#[$doc]
pub fn $fun(self) -> bool {
self.flags().contains($flag)
}
)
}
#[inline(always)]
fn align_down(addr: u64, align: u64) -> u64 {
addr & !(align - 1)
}
#[inline(always)]
fn align_up(addr: u64, align: u64) -> u64 {
let align_mask = align - 1;
if addr & align_mask == 0 {
addr
} else {
(addr | align_mask) + 1
}
}
#[repr(transparent)]
#[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
pub struct PAddr(pub u64);
impl PAddr {
pub fn as_u64(self) -> u64 {
self.0
}
pub fn as_usize(self) -> usize {
self.0 as usize
}
pub const fn zero() -> Self {
PAddr(0)
}
pub fn is_zero(self) -> bool {
self == PAddr::zero()
}
fn align_up<U>(self, align: U) -> Self
where
U: Into<u64>,
{
PAddr(align_up(self.0, align.into()))
}
fn align_down<U>(self, align: U) -> Self
where
U: Into<u64>,
{
PAddr(align_down(self.0, align.into()))
}
pub fn base_page_offset(self) -> u64 {
self.0 & (BASE_PAGE_SIZE as u64 - 1)
}
pub fn large_page_offset(self) -> u64 {
self.0 & (LARGE_PAGE_SIZE as u64 - 1)
}
pub fn huge_page_offset(self) -> u64 {
self.0 & (HUGE_PAGE_SIZE as u64 - 1)
}
pub fn align_down_to_base_page(self) -> Self {
self.align_down(BASE_PAGE_SIZE as u64)
}
pub fn align_down_to_large_page(self) -> Self {
self.align_down(LARGE_PAGE_SIZE as u64)
}
pub fn align_down_to_huge_page(self) -> Self {
self.align_down(HUGE_PAGE_SIZE as u64)
}
pub fn align_up_to_base_page(self) -> Self {
self.align_up(BASE_PAGE_SIZE as u64)
}
pub fn align_up_to_large_page(self) -> Self {
self.align_up(LARGE_PAGE_SIZE as u64)
}
pub fn align_up_to_huge_page(self) -> Self {
self.align_up(HUGE_PAGE_SIZE as u64)
}
pub fn is_base_page_aligned(self) -> bool {
self.align_down(BASE_PAGE_SIZE as u64) == self
}
pub fn is_large_page_aligned(self) -> bool {
self.align_down(LARGE_PAGE_SIZE as u64) == self
}
pub fn is_huge_page_aligned(self) -> bool {
self.align_down(HUGE_PAGE_SIZE as u64) == self
}
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64> + Copy,
{
if !align.into().is_power_of_two() {
return false;
}
self.align_down(align) == self
}
}
impl From<u64> for PAddr {
fn from(num: u64) -> Self {
PAddr(num)
}
}
impl From<usize> for PAddr {
fn from(num: usize) -> Self {
PAddr(num as u64)
}
}
impl From<i32> for PAddr {
fn from(num: i32) -> Self {
PAddr(num as u64)
}
}
impl Into<u64> for PAddr {
fn into(self) -> u64 {
self.0
}
}
impl Into<usize> for PAddr {
fn into(self) -> usize {
self.0 as usize
}
}
impl ops::Add for PAddr {
type Output = PAddr;
fn add(self, rhs: PAddr) -> Self::Output {
PAddr(self.0 + rhs.0)
}
}
impl ops::Add<u64> for PAddr {
type Output = PAddr;
fn add(self, rhs: u64) -> Self::Output {
PAddr::from(self.0 + rhs)
}
}
impl ops::Add<usize> for PAddr {
type Output = PAddr;
fn add(self, rhs: usize) -> Self::Output {
PAddr::from(self.0 + rhs as u64)
}
}
impl ops::AddAssign for PAddr {
fn add_assign(&mut self, other: PAddr) {
*self = PAddr::from(self.0 + other.0);
}
}
impl ops::AddAssign<u64> for PAddr {
fn add_assign(&mut self, offset: u64) {
*self = PAddr::from(self.0 + offset);
}
}
impl ops::Sub for PAddr {
type Output = PAddr;
fn sub(self, rhs: PAddr) -> Self::Output {
PAddr::from(self.0 - rhs.0)
}
}
impl ops::Sub<u64> for PAddr {
type Output = PAddr;
fn sub(self, rhs: u64) -> Self::Output {
PAddr::from(self.0 - rhs)
}
}
impl ops::Sub<usize> for PAddr {
type Output = PAddr;
fn sub(self, rhs: usize) -> Self::Output {
PAddr::from(self.0 - rhs as u64)
}
}
impl ops::Rem for PAddr {
type Output = PAddr;
fn rem(self, rhs: PAddr) -> Self::Output {
PAddr(self.0 % rhs.0)
}
}
impl ops::Rem<u64> for PAddr {
type Output = u64;
fn rem(self, rhs: u64) -> Self::Output {
self.0 % rhs
}
}
impl ops::Rem<usize> for PAddr {
type Output = u64;
fn rem(self, rhs: usize) -> Self::Output {
self.0 % (rhs as u64)
}
}
impl ops::BitAnd for PAddr {
type Output = Self;
fn bitand(self, rhs: Self) -> Self {
PAddr(self.0 & rhs.0)
}
}
impl ops::BitAnd<u64> for PAddr {
type Output = u64;
fn bitand(self, rhs: u64) -> Self::Output {
Into::<u64>::into(self) & rhs
}
}
impl ops::BitOr for PAddr {
type Output = PAddr;
fn bitor(self, rhs: PAddr) -> Self::Output {
PAddr(self.0 | rhs.0)
}
}
impl ops::BitOr<u64> for PAddr {
type Output = u64;
fn bitor(self, rhs: u64) -> Self::Output {
self.0 | rhs
}
}
impl ops::Shr<u64> for PAddr {
type Output = u64;
fn shr(self, rhs: u64) -> Self::Output {
self.0 >> rhs
}
}
impl fmt::Binary for PAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Display for PAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Debug for PAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}", self.0)
}
}
impl fmt::LowerHex for PAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Octal for PAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::UpperHex for PAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Pointer for PAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use core::fmt::LowerHex;
self.0.fmt(f)
}
}
#[repr(transparent)]
#[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
pub struct VAddr(pub u64);
impl VAddr {
pub const fn from_u64(v: u64) -> Self {
VAddr(v)
}
pub const fn from_usize(v: usize) -> Self {
VAddr(v as u64)
}
pub const fn as_u64(self) -> u64 {
self.0
}
pub const fn as_usize(self) -> usize {
self.0 as usize
}
pub fn as_mut_ptr<T>(self) -> *mut T {
self.0 as *mut T
}
pub fn as_ptr<T>(self) -> *const T {
self.0 as *const T
}
pub const fn zero() -> Self {
VAddr(0)
}
pub fn is_zero(self) -> bool {
self == VAddr::zero()
}
fn align_up<U>(self, align: U) -> Self
where
U: Into<u64>,
{
VAddr(align_up(self.0, align.into()))
}
fn align_down<U>(self, align: U) -> Self
where
U: Into<u64>,
{
VAddr(align_down(self.0, align.into()))
}
pub fn base_page_offset(self) -> u64 {
self.0 & (BASE_PAGE_SIZE as u64 - 1)
}
pub fn large_page_offset(self) -> u64 {
self.0 & (LARGE_PAGE_SIZE as u64 - 1)
}
pub fn huge_page_offset(self) -> u64 {
self.0 & (HUGE_PAGE_SIZE as u64 - 1)
}
pub fn align_down_to_base_page(self) -> Self {
self.align_down(BASE_PAGE_SIZE as u64)
}
pub fn align_down_to_large_page(self) -> Self {
self.align_down(LARGE_PAGE_SIZE as u64)
}
pub fn align_down_to_huge_page(self) -> Self {
self.align_down(HUGE_PAGE_SIZE as u64)
}
pub fn align_up_to_base_page(self) -> Self {
self.align_up(BASE_PAGE_SIZE as u64)
}
pub fn align_up_to_large_page(self) -> Self {
self.align_up(LARGE_PAGE_SIZE as u64)
}
pub fn align_up_to_huge_page(self) -> Self {
self.align_up(HUGE_PAGE_SIZE as u64)
}
pub fn is_base_page_aligned(self) -> bool {
self.align_down(BASE_PAGE_SIZE as u64) == self
}
pub fn is_large_page_aligned(self) -> bool {
self.align_down(LARGE_PAGE_SIZE as u64) == self
}
pub fn is_huge_page_aligned(self) -> bool {
self.align_down(HUGE_PAGE_SIZE as u64) == self
}
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64> + Copy,
{
if !align.into().is_power_of_two() {
return false;
}
self.align_down(align) == self
}
}
impl From<u64> for VAddr {
fn from(num: u64) -> Self {
VAddr(num)
}
}
impl From<i32> for VAddr {
fn from(num: i32) -> Self {
VAddr(num as u64)
}
}
impl Into<u64> for VAddr {
fn into(self) -> u64 {
self.0
}
}
impl From<usize> for VAddr {
fn from(num: usize) -> Self {
VAddr(num as u64)
}
}
impl Into<usize> for VAddr {
fn into(self) -> usize {
self.0 as usize
}
}
impl ops::Add for VAddr {
type Output = VAddr;
fn add(self, rhs: VAddr) -> Self::Output {
VAddr(self.0 + rhs.0)
}
}
impl ops::Add<u64> for VAddr {
type Output = VAddr;
fn add(self, rhs: u64) -> Self::Output {
VAddr(self.0 + rhs)
}
}
impl ops::Add<usize> for VAddr {
type Output = VAddr;
fn add(self, rhs: usize) -> Self::Output {
VAddr::from(self.0 + rhs as u64)
}
}
impl ops::AddAssign for VAddr {
fn add_assign(&mut self, other: VAddr) {
*self = VAddr::from(self.0 + other.0);
}
}
impl ops::AddAssign<u64> for VAddr {
fn add_assign(&mut self, offset: u64) {
*self = VAddr::from(self.0 + offset);
}
}
impl ops::AddAssign<usize> for VAddr {
fn add_assign(&mut self, offset: usize) {
*self = VAddr::from(self.0 + offset as u64);
}
}
impl ops::Sub for VAddr {
type Output = VAddr;
fn sub(self, rhs: VAddr) -> Self::Output {
VAddr::from(self.0 - rhs.0)
}
}
impl ops::Sub<u64> for VAddr {
type Output = VAddr;
fn sub(self, rhs: u64) -> Self::Output {
VAddr::from(self.0 - rhs)
}
}
impl ops::Sub<usize> for VAddr {
type Output = VAddr;
fn sub(self, rhs: usize) -> Self::Output {
VAddr::from(self.0 - rhs as u64)
}
}
impl ops::Rem for VAddr {
type Output = VAddr;
fn rem(self, rhs: VAddr) -> Self::Output {
VAddr(self.0 % rhs.0)
}
}
impl ops::Rem<u64> for VAddr {
type Output = u64;
fn rem(self, rhs: Self::Output) -> Self::Output {
self.0 % rhs
}
}
impl ops::Rem<usize> for VAddr {
type Output = usize;
fn rem(self, rhs: Self::Output) -> Self::Output {
self.as_usize() % rhs
}
}
impl ops::BitAnd for VAddr {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
VAddr(self.0 & rhs.0)
}
}
impl ops::BitAnd<u64> for VAddr {
type Output = VAddr;
fn bitand(self, rhs: u64) -> Self::Output {
VAddr(self.0 & rhs)
}
}
impl ops::BitAnd<usize> for VAddr {
type Output = VAddr;
fn bitand(self, rhs: usize) -> Self::Output {
VAddr(self.0 & rhs as u64)
}
}
impl ops::BitAnd<i32> for VAddr {
type Output = VAddr;
fn bitand(self, rhs: i32) -> Self::Output {
VAddr(self.0 & rhs as u64)
}
}
impl ops::BitOr for VAddr {
type Output = VAddr;
fn bitor(self, rhs: VAddr) -> VAddr {
VAddr(self.0 | rhs.0)
}
}
impl ops::BitOr<u64> for VAddr {
type Output = VAddr;
fn bitor(self, rhs: u64) -> Self::Output {
VAddr(self.0 | rhs)
}
}
impl ops::BitOr<usize> for VAddr {
type Output = VAddr;
fn bitor(self, rhs: usize) -> Self::Output {
VAddr(self.0 | rhs as u64)
}
}
impl ops::Shr<u64> for VAddr {
type Output = u64;
fn shr(self, rhs: u64) -> Self::Output {
self.0 >> rhs as u64
}
}
impl ops::Shr<usize> for VAddr {
type Output = u64;
fn shr(self, rhs: usize) -> Self::Output {
self.0 >> rhs as u64
}
}
impl ops::Shr<i32> for VAddr {
type Output = u64;
fn shr(self, rhs: i32) -> Self::Output {
self.0 >> rhs as u64
}
}
impl fmt::Binary for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Display for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}", self.0)
}
}
impl fmt::Debug for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}", self.0)
}
}
impl fmt::LowerHex for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Octal for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::UpperHex for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Pointer for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use core::fmt::LowerHex;
self.0.fmt(f)
}
}
pub const BASE_PAGE_SHIFT: usize = 12;
pub const BASE_PAGE_SIZE: usize = 4096;
pub const LARGE_PAGE_SIZE: usize = 1024 * 1024 * 2;
pub const HUGE_PAGE_SIZE: usize = 1024 * 1024 * 1024;
#[cfg(target_arch = "x86_64")]
pub const PML4_SLOT_SIZE: usize = HUGE_PAGE_SIZE * 512;
pub const CACHE_LINE_SIZE: usize = 64;
pub struct Page([u8; BASE_PAGE_SIZE]);
pub struct LargePage([u8; LARGE_PAGE_SIZE]);
pub struct HugePage([u8; HUGE_PAGE_SIZE]);
pub const MAXPHYADDR: u64 = 52;
const ADDRESS_MASK: u64 = ((1 << MAXPHYADDR) - 1) & !0xfff;
pub const PAGE_SIZE_ENTRIES: usize = 512;
pub type PML4 = [PML4Entry; PAGE_SIZE_ENTRIES];
pub type PDPT = [PDPTEntry; PAGE_SIZE_ENTRIES];
pub type PD = [PDEntry; PAGE_SIZE_ENTRIES];
pub type PT = [PTEntry; PAGE_SIZE_ENTRIES];
#[cfg(target_arch = "x86_64")]
#[inline]
pub fn pml4_index(addr: VAddr) -> usize {
((addr >> 39usize) & 0b111111111) as usize
}
#[inline]
pub fn pdpt_index(addr: VAddr) -> usize {
((addr >> 30usize) & 0b111111111) as usize
}
#[inline]
pub fn pd_index(addr: VAddr) -> usize {
((addr >> 21usize) & 0b111111111) as usize
}
#[inline]
pub fn pt_index(addr: VAddr) -> usize {
((addr >> 12usize) & 0b111111111) as usize
}
bitflags! {
#[repr(transparent)]
pub struct PML4Flags: u64 {
const P = bit!(0);
const RW = bit!(1);
const US = bit!(2);
const PWT = bit!(3);
const PCD = bit!(4);
const A = bit!(5);
const XD = bit!(63);
}
}
#[repr(transparent)]
#[derive(Clone, Copy)]
pub struct PML4Entry(pub u64);
impl fmt::Debug for PML4Entry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"PML4Entry {{ {:#x}, {:?} }}",
self.address(),
self.flags()
)
}
}
impl PML4Entry {
pub fn new(pml4: PAddr, flags: PML4Flags) -> PML4Entry {
let pml4_val = pml4 & ADDRESS_MASK;
assert!(pml4_val == pml4.into());
assert!(pml4 % BASE_PAGE_SIZE == 0);
PML4Entry(pml4_val | flags.bits)
}
pub fn address(self) -> PAddr {
PAddr::from(self.0 & ADDRESS_MASK)
}
pub fn flags(self) -> PML4Flags {
PML4Flags::from_bits_truncate(self.0)
}
check_flag!(doc = "Is page present?", is_present, PML4Flags::P);
check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 512-GByte region, controlled by this entry (see Section 4.6)",
is_writeable, PML4Flags::RW);
check_flag!(doc = "User/supervisor; if 0, user-mode accesses are not allowed to the 512-GByte region controlled by this entry.",
is_user_mode_allowed, PML4Flags::US);
check_flag!(doc = "Page-level write-through; indirectly determines the memory type used to access the page-directory-pointer table referenced by this entry.",
is_page_write_through, PML4Flags::PWT);
check_flag!(doc = "Page-level cache disable; indirectly determines the memory type used to access the page-directory-pointer table referenced by this entry.",
is_page_level_cache_disabled, PML4Flags::PCD);
check_flag!(
doc =
"Accessed; indicates whether this entry has been used for linear-address translation.",
is_accessed,
PML4Flags::A
);
check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 512-GByte region.",
is_instruction_fetching_disabled, PML4Flags::XD);
}
bitflags! {
#[repr(transparent)]
pub struct PDPTFlags: u64 {
const P = bit!(0);
const RW = bit!(1);
const US = bit!(2);
const PWT = bit!(3);
const PCD = bit!(4);
const A = bit!(5);
const D = bit!(6);
const PS = bit!(7);
const G = bit!(8);
const PAT = bit!(12);
const XD = bit!(63);
}
}
#[repr(transparent)]
#[derive(Clone, Copy)]
pub struct PDPTEntry(pub u64);
impl fmt::Debug for PDPTEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"PDPTEntry {{ {:#x}, {:?} }}",
self.address(),
self.flags()
)
}
}
impl PDPTEntry {
pub fn new(pd: PAddr, flags: PDPTFlags) -> PDPTEntry {
let pd_val = pd & ADDRESS_MASK;
assert!(pd_val == pd.into());
assert!(pd % BASE_PAGE_SIZE == 0);
PDPTEntry(pd_val | flags.bits)
}
pub fn address(self) -> PAddr {
PAddr::from(self.0 & ADDRESS_MASK)
}
pub fn flags(self) -> PDPTFlags {
PDPTFlags::from_bits_truncate(self.0)
}
check_flag!(doc = "Is page present?", is_present, PDPTFlags::P);
check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 1-GByte region controlled by this entry.",
is_writeable, PDPTFlags::RW);
check_flag!(doc = "User/supervisor; user-mode accesses are not allowed to the 1-GByte region controlled by this entry.",
is_user_mode_allowed, PDPTFlags::US);
check_flag!(
doc = "Page-level write-through.",
is_page_write_through,
PDPTFlags::PWT
);
check_flag!(
doc = "Page-level cache disable.",
is_page_level_cache_disabled,
PDPTFlags::PCD
);
check_flag!(
doc =
"Accessed; indicates whether this entry has been used for linear-address translation.",
is_accessed,
PDPTFlags::A
);
check_flag!(doc = "Indirectly determines the memory type used to access the 1-GByte page referenced by this entry. if not PS this is ignored.",
is_pat, PDPTFlags::PAT);
check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 512-GByte region.",
is_instruction_fetching_disabled, PDPTFlags::XD);
check_flag!(doc = "Page size; if set this entry maps a 1-GByte page; otherwise, this entry references a page directory.",
is_page, PDPTFlags::PS);
}
bitflags! {
#[repr(transparent)]
pub struct PDFlags: u64 {
const P = bit!(0);
const RW = bit!(1);
const US = bit!(2);
const PWT = bit!(3);
const PCD = bit!(4);
const A = bit!(5);
const D = bit!(6);
const PS = bit!(7);
const G = bit!(8);
const PAT = bit!(12);
const XD = bit!(63);
}
}
#[repr(transparent)]
#[derive(Clone, Copy)]
pub struct PDEntry(pub u64);
impl fmt::Debug for PDEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "PDEntry {{ {:#x}, {:?} }}", self.address(), self.flags())
}
}
impl PDEntry {
pub fn new(pt: PAddr, flags: PDFlags) -> PDEntry {
let pt_val = pt & ADDRESS_MASK;
assert!(pt_val == pt.into());
assert!(pt % BASE_PAGE_SIZE == 0);
PDEntry(pt_val | flags.bits)
}
pub fn address(self) -> PAddr {
PAddr::from(self.0 & ADDRESS_MASK)
}
pub fn flags(self) -> PDFlags {
PDFlags::from_bits_truncate(self.0)
}
check_flag!(
doc = "Present; must be 1 to map a 2-MByte page or reference a page table.",
is_present,
PDFlags::P
);
check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 2-MByte region controlled by this entry",
is_writeable, PDFlags::RW);
check_flag!(doc = "User/supervisor; user-mode accesses are not allowed to the 2-MByte region controlled by this entry.",
is_user_mode_allowed, PDFlags::US);
check_flag!(
doc = "Page-level write-through.",
is_page_write_through,
PDFlags::PWT
);
check_flag!(
doc = "Page-level cache disable.",
is_page_level_cache_disabled,
PDFlags::PCD
);
check_flag!(doc = "Accessed; if PS set indicates whether software has accessed the 2-MByte page else indicates whether this entry has been used for linear-address translation.",
is_accessed, PDFlags::A);
check_flag!(doc = "Dirty; if PS set indicates whether software has written to the 2-MByte page referenced by this entry else ignored.",
is_dirty, PDFlags::D);
check_flag!(doc = "Page size; if set this entry maps a 2-MByte page; otherwise, this entry references a page directory.",
is_page, PDFlags::PS);
check_flag!(doc = "Global; if PS && CR4.PGE = 1, determines whether the translation is global; ignored otherwise if not PS this is ignored.",
is_global, PDFlags::G);
check_flag!(doc = "Indirectly determines the memory type used to access the 2-MByte page referenced by this entry. if not PS this is ignored.",
is_pat, PDFlags::PAT);
check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 2-Mbyte region.",
is_instruction_fetching_disabled, PDFlags::XD);
}
bitflags! {
#[repr(transparent)]
pub struct PTFlags: u64 {
const P = bit!(0);
const RW = bit!(1);
const US = bit!(2);
const PWT = bit!(3);
const PCD = bit!(4);
const A = bit!(5);
const D = bit!(6);
const G = bit!(8);
const XD = bit!(63);
}
}
#[repr(transparent)]
#[derive(Clone, Copy)]
pub struct PTEntry(pub u64);
impl fmt::Debug for PTEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "PTEntry {{ {:#x}, {:?} }}", self.address(), self.flags())
}
}
impl PTEntry {
pub fn new(page: PAddr, flags: PTFlags) -> PTEntry {
let page_val = page & ADDRESS_MASK;
assert!(page_val == page.into());
assert!(page % BASE_PAGE_SIZE == 0);
PTEntry(page_val | flags.bits)
}
pub fn address(self) -> PAddr {
PAddr::from(self.0 & ADDRESS_MASK)
}
pub fn flags(self) -> PTFlags {
PTFlags::from_bits_truncate(self.0)
}
check_flag!(
doc = "Present; must be 1 to map a 4-KByte page or reference a page table.",
is_present,
PTFlags::P
);
check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 4-KByte region controlled by this entry",
is_writeable, PTFlags::RW);
check_flag!(doc = "User/supervisor; user-mode accesses are not allowed to the 4-KByte region controlled by this entry.",
is_user_mode_allowed, PTFlags::US);
check_flag!(
doc = "Page-level write-through.",
is_page_write_through,
PTFlags::PWT
);
check_flag!(
doc = "Page-level cache disable.",
is_page_level_cache_disabled,
PTFlags::PCD
);
check_flag!(doc = "Accessed; if PS set indicates whether software has accessed the 4-KByte page else indicates whether this entry has been used for linear-address translation.",
is_accessed, PTFlags::A);
check_flag!(doc = "Dirty; if PD_PS set indicates whether software has written to the 4-KByte page referenced by this entry else ignored.",
is_dirty, PTFlags::D);
check_flag!(doc = "Global; if PS && CR4.PGE = 1, determines whether the translation is global; ignored otherwise if not PS this is ignored.",
is_global, PTFlags::G);
check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 4-KByte region.",
is_instruction_fetching_disabled, PTFlags::XD);
}
#[cfg(all(test, feature = "utest"))]
mod test {
use super::*;
#[test]
fn paddr_align() {
let base = PAddr::from(0x1000);
assert_eq!(base.base_page_offset(), 0x0);
assert_eq!(base.large_page_offset(), 0x1000);
assert_eq!(base.huge_page_offset(), 0x1000);
assert_eq!(base.align_down_to_base_page(), PAddr::from(0x1000));
assert_eq!(base.align_down_to_large_page(), PAddr::from(0x0));
assert_eq!(base.align_down_to_huge_page(), PAddr::from(0x0));
assert_eq!(base.align_up_to_base_page(), PAddr::from(0x1000));
assert_eq!(base.align_up_to_large_page(), PAddr::from(0x200000));
assert_eq!(base.align_up_to_huge_page(), PAddr::from(1073741824));
assert!(base.is_base_page_aligned());
assert!(!base.is_large_page_aligned());
assert!(!base.is_huge_page_aligned());
assert!(base.is_aligned(0x1u64));
assert!(base.is_aligned(0x2u64));
assert!(!base.is_aligned(0x3u64));
assert!(base.is_aligned(0x4u64));
let base = PAddr::from(0x1001);
assert_eq!(base.base_page_offset(), 0x1);
assert_eq!(base.large_page_offset(), 0x1001);
assert_eq!(base.huge_page_offset(), 0x1001);
assert_eq!(base.align_down_to_base_page(), PAddr::from(0x1000));
assert_eq!(base.align_down_to_large_page(), PAddr::from(0x0));
assert_eq!(base.align_down_to_huge_page(), PAddr::from(0x0));
assert_eq!(base.align_up_to_base_page(), PAddr::from(0x2000));
assert_eq!(base.align_up_to_large_page(), PAddr::from(0x200000));
assert_eq!(base.align_up_to_huge_page(), PAddr::from(1073741824));
assert!(!base.is_base_page_aligned());
assert!(!base.is_large_page_aligned());
assert!(!base.is_huge_page_aligned());
assert!(base.is_aligned(0x1u64));
assert!(!base.is_aligned(0x2u64));
assert!(!base.is_aligned(0x3u64));
assert!(!base.is_aligned(0x4u64));
let base = PAddr::from(0x200000);
assert_eq!(base.base_page_offset(), 0x0);
assert_eq!(base.large_page_offset(), 0x0);
assert_eq!(base.huge_page_offset(), 0x200000);
assert_eq!(base.align_down_to_base_page(), PAddr::from(0x200000));
assert_eq!(base.align_down_to_large_page(), PAddr::from(0x200000));
assert_eq!(base.align_down_to_huge_page(), PAddr::from(0x0));
assert_eq!(base.align_up_to_base_page(), PAddr::from(0x200000));
assert_eq!(base.align_up_to_large_page(), PAddr::from(0x200000));
assert_eq!(base.align_up_to_huge_page(), PAddr::from(1073741824));
assert!(base.is_base_page_aligned());
assert!(base.is_large_page_aligned());
assert!(!base.is_huge_page_aligned());
assert!(base.is_aligned(0x1u64));
assert!(base.is_aligned(0x2u64));
assert!(!base.is_aligned(0x3u64));
assert!(base.is_aligned(0x4u64));
let base = PAddr::from(0x200002);
assert_eq!(base.base_page_offset(), 0x2);
assert_eq!(base.large_page_offset(), 0x2);
assert_eq!(base.huge_page_offset(), 0x200002);
assert_eq!(base.align_down_to_base_page(), PAddr::from(0x200000));
assert_eq!(base.align_down_to_large_page(), PAddr::from(0x200000));
assert_eq!(base.align_down_to_huge_page(), PAddr::from(0x0));
assert_eq!(base.align_up_to_base_page(), PAddr::from(0x201000));
assert_eq!(base.align_up_to_large_page(), PAddr::from(0x400000));
assert_eq!(base.align_up_to_huge_page(), PAddr::from(1073741824));
assert!(!base.is_base_page_aligned());
assert!(!base.is_large_page_aligned());
assert!(!base.is_huge_page_aligned());
assert!(base.is_aligned(0x1u64));
assert!(base.is_aligned(0x2u64));
assert!(!base.is_aligned(0x3u64));
assert!(!base.is_aligned(0x4u64));
}
#[test]
fn vaddr_align() {
let base = VAddr::from(0x1000);
assert_eq!(base.base_page_offset(), 0x0);
assert_eq!(base.large_page_offset(), 0x1000);
assert_eq!(base.huge_page_offset(), 0x1000);
assert_eq!(base.align_down_to_base_page(), VAddr::from(0x1000));
assert_eq!(base.align_down_to_large_page(), VAddr::from(0x0));
assert_eq!(base.align_down_to_huge_page(), VAddr::from(0x0));
assert_eq!(base.align_up_to_base_page(), VAddr::from(0x1000));
assert_eq!(base.align_up_to_large_page(), VAddr::from(0x200000));
assert_eq!(base.align_up_to_huge_page(), VAddr::from(1073741824));
assert!(base.is_base_page_aligned());
assert!(!base.is_large_page_aligned());
assert!(!base.is_huge_page_aligned());
assert!(base.is_aligned(0x1u64));
assert!(base.is_aligned(0x2u64));
assert!(!base.is_aligned(0x3u64));
assert!(base.is_aligned(0x4u64));
let base = VAddr::from(0x1001);
assert_eq!(base.base_page_offset(), 0x1);
assert_eq!(base.large_page_offset(), 0x1001);
assert_eq!(base.huge_page_offset(), 0x1001);
assert_eq!(base.align_down_to_base_page(), VAddr::from(0x1000));
assert_eq!(base.align_down_to_large_page(), VAddr::from(0x0));
assert_eq!(base.align_down_to_huge_page(), VAddr::from(0x0));
assert_eq!(base.align_up_to_base_page(), VAddr::from(0x2000));
assert_eq!(base.align_up_to_large_page(), VAddr::from(0x200000));
assert_eq!(base.align_up_to_huge_page(), VAddr::from(1073741824));
assert!(!base.is_base_page_aligned());
assert!(!base.is_large_page_aligned());
assert!(!base.is_huge_page_aligned());
assert!(base.is_aligned(0x1u64));
assert!(!base.is_aligned(0x2u64));
assert!(!base.is_aligned(0x3u64));
assert!(!base.is_aligned(0x4u64));
let base = VAddr::from(0x200000);
assert_eq!(base.base_page_offset(), 0x0);
assert_eq!(base.large_page_offset(), 0x0);
assert_eq!(base.huge_page_offset(), 0x200000);
assert_eq!(base.align_down_to_base_page(), VAddr::from(0x200000));
assert_eq!(base.align_down_to_large_page(), VAddr::from(0x200000));
assert_eq!(base.align_down_to_huge_page(), VAddr::from(0x0));
assert_eq!(base.align_up_to_base_page(), VAddr::from(0x200000));
assert_eq!(base.align_up_to_large_page(), VAddr::from(0x200000));
assert_eq!(base.align_up_to_huge_page(), VAddr::from(1073741824));
assert!(base.is_base_page_aligned());
assert!(base.is_large_page_aligned());
assert!(!base.is_huge_page_aligned());
assert!(base.is_aligned(0x1u64));
assert!(base.is_aligned(0x2u64));
assert!(!base.is_aligned(0x3u64));
assert!(base.is_aligned(0x4u64));
let base = VAddr::from(0x200002);
assert_eq!(base.base_page_offset(), 0x2);
assert_eq!(base.large_page_offset(), 0x2);
assert_eq!(base.huge_page_offset(), 0x200002);
assert_eq!(base.align_down_to_base_page(), VAddr::from(0x200000));
assert_eq!(base.align_down_to_large_page(), VAddr::from(0x200000));
assert_eq!(base.align_down_to_huge_page(), VAddr::from(0x0));
assert_eq!(base.align_up_to_base_page(), VAddr::from(0x201000));
assert_eq!(base.align_up_to_large_page(), VAddr::from(0x400000));
assert_eq!(base.align_up_to_huge_page(), VAddr::from(1073741824));
assert!(!base.is_base_page_aligned());
assert!(!base.is_large_page_aligned());
assert!(!base.is_huge_page_aligned());
assert!(base.is_aligned(0x1u64));
assert!(base.is_aligned(0x2u64));
assert!(!base.is_aligned(0x3u64));
assert!(!base.is_aligned(0x4u64));
}
}