Finish basics of new paging module

This commit is contained in:
Philipp Oppermann
2015-12-03 21:17:18 +01:00
parent 562221d725
commit a8df7b2e4d
11 changed files with 122 additions and 698 deletions

View File

@@ -1,7 +1,6 @@
pub use self::area_frame_allocator::AreaFrameAllocator;
pub mod paging;
pub mod paging_new;
mod area_frame_allocator;
pub const PAGE_SIZE: usize = 4096;

View File

@@ -0,0 +1,24 @@
use memory::Frame;
use super::Page;
use super::entry::{EntryFlags, PRESENT};
use memory::FrameAllocator;
use super::table::P4;
pub fn map<A>(page: &Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
map_to(page, frame, flags, allocator)
}
pub fn map_to<A>(page: &Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let p4 = unsafe { &mut *P4 };
let mut p3 = p4.next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(!p1[page.p1_index()].flags().contains(PRESENT));
p1[page.p1_index()].set(frame, flags | PRESENT);
}

View File

@@ -1,88 +1,38 @@
use memory::{Frame, FrameAllocator};
mod entry;
mod table;
pub mod translate;
pub mod mapping;
pub fn test<A>(frame_allocator: &mut A)
where A: super::FrameAllocator
{
use self::entry::PRESENT;
mapping::map(&Page::containing_address(0xdeadbeaa000),
PRESENT,
frame_allocator);
mapping::map(&Page::containing_address(0xdeadbeab000),
PRESENT,
frame_allocator);
mapping::map(&Page::containing_address(0xdeadbeac000),
PRESENT,
frame_allocator);
mapping::map(&Page::containing_address(0xdeadbead000),
PRESENT,
frame_allocator);
mapping::map(&Page::containing_address(0xcafebeaf000),
PRESENT,
frame_allocator);
mapping::map(&Page::containing_address(0x0),
PRESENT,
frame_allocator);
}
pub const PAGE_SIZE: usize = 4096;
const ENTRY_SIZE: usize = 8;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
pub fn translate(virtual_address: usize) -> Option<PhysicalAddress> {
let page = Page::containing_address(virtual_address);
let offset = virtual_address % PAGE_SIZE;
let frame_number = {
let p4_entry = page.p4_table().entry(page.p4_index());
assert!(!p4_entry.flags().contains(HUGE_PAGE));
if !p4_entry.flags().contains(PRESENT) {
return None;
}
let p3_entry = unsafe { page.p3_table() }.entry(page.p3_index());
if !p3_entry.flags().contains(PRESENT) {
return None;
}
if p3_entry.flags().contains(HUGE_PAGE) {
// 1GiB page (address must be 1GiB aligned)
let start_frame_number = p3_entry.pointed_frame().number;
assert!(start_frame_number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
start_frame_number + page.p2_index() * ENTRY_COUNT + page.p1_index()
} else {
// 2MiB or 4KiB page
let p2_entry = unsafe { page.p2_table() }.entry(page.p2_index());
if !p2_entry.flags().contains(PRESENT) {
return None;
}
if p2_entry.flags().contains(HUGE_PAGE) {
// 2MiB page (address must be 2MiB aligned)
let start_frame_number = p2_entry.pointed_frame().number;
assert!(start_frame_number % ENTRY_COUNT == 0);
start_frame_number + page.p1_index()
} else {
// standard 4KiB page
let p1_entry = unsafe { page.p1_table() }.entry(page.p1_index());
assert!(!p1_entry.flags().contains(HUGE_PAGE));
if !p1_entry.flags().contains(PRESENT) {
return None;
}
p1_entry.pointed_frame().number
}
}
};
Some(frame_number * PAGE_SIZE + offset)
}
pub fn map_to<A>(page: &Page, frame: Frame, flags: TableEntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let p4_index = page.p4_index();
let p3_index = page.p3_index();
let p2_index = page.p2_index();
let p1_index = page.p1_index();
let mut p4 = page.p4_table();
if !p4.entry(p4_index).flags().contains(PRESENT) {
let frame = allocator.allocate_frame().expect("no frames available");
p4.set_entry(p4_index, TableEntry::new(frame, PRESENT | WRITABLE));
unsafe { page.p3_table() }.zero();
}
let mut p3 = unsafe { page.p3_table() };
if !p3.entry(p3_index).flags().contains(PRESENT) {
let frame = allocator.allocate_frame().expect("no frames available");
p3.set_entry(p3_index, TableEntry::new(frame, PRESENT | WRITABLE));
unsafe { page.p2_table() }.zero();
}
let mut p2 = unsafe { page.p2_table() };
if !p2.entry(p2_index).flags().contains(PRESENT) {
let frame = allocator.allocate_frame().expect("no frames available");
p2.set_entry(p2_index, TableEntry::new(frame, PRESENT | WRITABLE));
unsafe { page.p1_table() }.zero();
}
let mut p1 = unsafe { page.p1_table() };
assert!(!p1.entry(p1_index).flags().contains(PRESENT));
p1.set_entry(p1_index, TableEntry::new(frame, flags));
}
pub struct Page {
number: usize,
}
@@ -119,90 +69,4 @@ impl Page {
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
const fn p4_table(&self) -> Table {
Table(Page { number: 0o_777_777_777_777 })
}
/// # Safety
/// Only valid if the corresponding entry in the parent table is PRESENT and not HUGE_PAGE.
unsafe fn p3_table(&self) -> Table {
Table(Page { number: 0o_777_777_777_000 | self.p4_index() })
}
/// # Safety
/// Only valid if the corresponding entry in the parent table is PRESENT and not HUGE_PAGE.
unsafe fn p2_table(&self) -> Table {
Table(Page { number: 0o_777_777_000_000 | (self.p4_index() << 9) | self.p3_index() })
}
/// # Safety
/// Only valid if the corresponding entry in the parent table is PRESENT and not HUGE_PAGE.
unsafe fn p1_table(&self) -> Table {
Table(Page {
number: 0o_777_000_000_000 | (self.p4_index() << 18) | (self.p3_index() << 9) |
self.p2_index(),
})
}
}
struct Table(Page);
impl Table {
fn entry(&self, index: usize) -> TableEntry {
assert!(index < ENTRY_COUNT);
let entry_address = self.0.start_address() + index * ENTRY_SIZE;
unsafe { *(entry_address as *const _) }
}
fn set_entry(&mut self, index: usize, value: TableEntry) {
assert!(index < ENTRY_COUNT);
let entry_address = self.0.start_address() + index * ENTRY_SIZE;
unsafe { *(entry_address as *mut _) = value }
}
fn zero(&mut self) {
let page = self.0.start_address() as *mut [TableEntry; ENTRY_COUNT];
unsafe { *page = [TableEntry::unused(); ENTRY_COUNT] };
}
}
#[derive(Debug, Clone, Copy)]
struct TableEntry(u64);
impl TableEntry {
const fn unused() -> TableEntry {
TableEntry(0)
}
fn new(frame: Frame, flags: TableEntryFlags) -> TableEntry {
let frame_addr = (frame.number << 12) & 0x000fffff_fffff000;
TableEntry((frame_addr as u64) | flags.bits())
}
fn flags(&self) -> TableEntryFlags {
TableEntryFlags::from_bits_truncate(self.0)
}
fn pointed_frame(&self) -> Frame {
Frame { number: ((self.0 & 0x000fffff_fffff000) >> 12) as usize }
}
}
bitflags! {
flags TableEntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
mod tables;

View File

@@ -1,52 +0,0 @@
use core::marker::PhantomData;
use super::{VirtualAddress, Page, ENTRY_COUNT};
use super::table::{Entry, Table, PRESENT};
use super::levels::{TableLevel, HierachicalLevel, Level4, Level3, Level2, Level1};
pub fn P4_entry(address: VirtualAddress) -> EntryRef<Level4> {
let p4_page = Page { number: 0o_777_777_777_777 };
let p4 = p4_page.start_address() as *mut Table;
EntryRef {
target_address: address,
table: p4,
_phantom: PhantomData,
}
}
pub struct EntryRef<Level> {
target_address: VirtualAddress,
table: *mut Table,
_phantom: PhantomData<Level>,
}
impl<L> EntryRef<L> where L: HierachicalLevel
{
pub fn next_level(&self) -> Option<EntryRef<L::NextLevel>> {
if self.entry().flags().contains(PRESENT) {
let next_table_page = {
let table_page = Page::containing_address(self.table as usize);
let index = table_index::<L>(self.target_address);
Page { number: ((table_page.number << 9) & 0o_777_777_777_777) | index }
};
let next_table = next_table_page.start_address() as *mut Table;
Some(EntryRef {
target_address: self.target_address,
table: next_table,
_phantom: PhantomData,
})
} else {
None
}
}
fn entry(&self) -> &Entry {
unsafe { &(*self.table).0[table_index::<L>(self.target_address)] }
}
}
fn table_index<L>(address: VirtualAddress) -> usize
where L: TableLevel
{
let shift = 12 + (L::level_number() - 1) * 9;
(address >> shift) & 0o777
}

View File

@@ -1,125 +0,0 @@
use core::marker::PhantomData;
pub const fn P4(page: &Page) -> Table<Level4> {
Table {
table_page: Page { number: 0o_777_777_777_777 },
target_page_number: page.number,
_phantom: PhantomData,
}
}
impl<L> Table<L> where L: TableLevel
{
pub fn index_of(page: &Page) -> usize {
Self::index_of_page_number(page.number)
}
fn index_of_page_number(page_number: usize) -> usize {
let s = (L::level_number() - 1) * 9;
(page_number >> s) & 0o777
}
fn index(&self) -> usize {
Self::index_of_page_number(self.target_page_number)
}
}
use memory::{Frame, FrameAllocator};
pub struct Table<Level> {
table_page: Page,
target_page_number: usize,
_phantom: PhantomData<Level>,
}
impl<L> Table<L> where L: TableLevel
{
fn entry(&self) -> TableEntry {
let entry_address = self.table_page.start_address() + self.index() * ENTRY_SIZE;
unsafe { *(entry_address as *const _) }
}
fn set_entry(&mut self, value: TableEntry) {
let entry_address = self.table_page.start_address() + self.index() * ENTRY_SIZE;
unsafe { *(entry_address as *mut _) = value }
}
fn zero(&mut self) {
let page = self.table_page.start_address() as *mut [TableEntry; ENTRY_COUNT];
unsafe { *page = [TableEntry::unused(); ENTRY_COUNT] };
}
}
impl<L> Table<L> where L: HierachicalLevel
{
fn next_table_internal(&self) -> Table<L::NextLevel> {
Table {
table_page: Page {
number: ((self.table_page.number << 9) & 0o_777_777_777_777) | self.index(),
},
target_page_number: self.target_page_number,
_phantom: PhantomData,
}
}
fn next_table(&self) -> Option<Table<L::NextLevel>> {
if self.entry().flags().contains(PRESENT) {
Some(self.next_table_internal())
} else {
None
}
}
fn next_table_create<A>(&mut self, allocator: &mut A) -> Table<L::NextLevel>
where A: FrameAllocator
{
match self.next_table() {
Some(table) => table,
None => {
let frame = allocator.allocate_frame().expect("no frames available");
self.set_entry(TableEntry::new(frame, PRESENT | WRITABLE));
let mut next_table = self.next_table_internal();
next_table.zero();
next_table
}
}
}
}
#[derive(Debug, Clone, Copy)]
struct TableEntry(u64);
impl TableEntry {
const fn unused() -> TableEntry {
TableEntry(0)
}
fn new(frame: Frame, flags: TableEntryFlags) -> TableEntry {
let frame_addr = (frame.number << 12) & 0x000fffff_fffff000;
TableEntry((frame_addr as u64) | flags.bits())
}
fn flags(&self) -> TableEntryFlags {
TableEntryFlags::from_bits_truncate(self.0)
}
fn pointed_frame(&self) -> Frame {
Frame { number: ((self.0 & 0x000fffff_fffff000) >> 12) as usize }
}
}
bitflags! {
flags TableEntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}

View File

@@ -1,32 +1,16 @@
use super::{ENTRY_COUNT, Page};
use super::entry::{Entry, PRESENT, HUGE_PAGE};
use super::levels::{TableLevel, HierachicalLevel, Level4};
use memory::FrameAllocator;
use memory::paging::{ENTRY_COUNT, Page};
use memory::paging::entry::*;
use core::ops::{Index, IndexMut};
use core::marker::PhantomData;
pub const P4: *const Table<Level4> = 0xffffffff_fffff000 as *const _;
pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
pub struct Table<L: TableLevel> {
entries: [Entry; ENTRY_COUNT],
_phantom: PhantomData<L>,
}
impl<L> Index<usize> for Table<L> where L: TableLevel
{
type Output = Entry;
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L> where L: TableLevel
{
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}
impl<L> Table<L> where L: TableLevel
{
pub fn zero(&mut self) {
@@ -46,6 +30,22 @@ impl<L> Table<L> where L: HierachicalLevel
self.next_table_address(index).map(|t| unsafe { &mut *(t as *mut _) })
}
pub fn next_table_create<A>(&mut self,
index: usize,
allocator: &mut A)
-> &mut Table<L::NextLevel>
where A: FrameAllocator
{
if let None = self.next_table_address(index) {
assert!(!self.entries[index].flags().contains(HUGE_PAGE),
"mapping code does not support huge pages");
let frame = allocator.allocate_frame().expect("no frames available");
self.entries[index].set(frame, PRESENT | WRITABLE);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = self[index].flags();
if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) {
@@ -60,3 +60,50 @@ impl<L> Table<L> where L: HierachicalLevel
}
}
}
impl<L> Index<usize> for Table<L> where L: TableLevel
{
type Output = Entry;
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L> where L: TableLevel
{
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}
pub trait TableLevel {}
pub enum Level4 {}
#[allow(dead_code)]
enum Level3 {}
#[allow(dead_code)]
enum Level2 {}
#[allow(dead_code)]
enum Level1 {}
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
trait HierachicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierachicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierachicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierachicalLevel for Level2 {
type NextLevel = Level1;
}

View File

@@ -1,256 +0,0 @@
use core::marker::PhantomData;
pub const fn P4(page: &Page) -> Table<Level4> {
Table {
table_page: Page { number: 0o_777_777_777_777 },
target_page_number: page.number,
_phantom: PhantomData,
}
}
pub fn translate(virtual_address: usize) -> Option<PhysicalAddress> {
let page = Page::containing_address(virtual_address);
let offset = virtual_address % PAGE_SIZE;
let frame_number = {
let p3 = match P4(&page).next_table() {
None => return None,
Some(t) => t,
};
if p3.entry().flags().contains(PRESENT | HUGE_PAGE) {
// 1GiB page (address must be 1GiB aligned)
let start_frame_number = p3.entry().pointed_frame().number;
assert!(start_frame_number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
start_frame_number + Table::<Level2>::index_of(&page) * ENTRY_COUNT +
Table::<Level1>::index_of(&page)
} else {
// 2MiB or 4KiB page
let p2 = match p3.next_table() {
None => return None,
Some(t) => t,
};
if p2.entry().flags().contains(PRESENT | HUGE_PAGE) {
// 2MiB page (address must be 2MiB aligned)
let start_frame_number = p2.entry().pointed_frame().number;
assert!(start_frame_number % ENTRY_COUNT == 0);
start_frame_number + Table::<Level2>::index_of(&page)
} else {
// standard 4KiB page
let p1 = match p2.next_table() {
None => return None,
Some(t) => t,
};
p1.entry().pointed_frame().number
}
}
};
Some(frame_number * PAGE_SIZE + offset)
}
pub fn map_to<A>(page: &Page, frame: Frame, flags: TableEntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let mut p3 = P4(page).next_table_create(allocator);
let mut p2 = p3.next_table_create(allocator);
let mut p1 = p2.next_table_create(allocator);
assert!(!p1.entry().flags().contains(PRESENT));
p1.set_entry(TableEntry::new(frame, flags));
}
trait TableLevel{
fn level_number() -> usize;
}
pub enum Level1 {}
pub enum Level2 {}
pub enum Level3 {}
pub enum Level4 {}
impl TableLevel for Level4 {
fn level_number() -> usize {
4
}
}
impl TableLevel for Level3 {
fn level_number() -> usize {
3
}
}
impl TableLevel for Level2 {
fn level_number() -> usize {
2
}
}
impl TableLevel for Level1 {
fn level_number() -> usize {
1
}
}
trait HierachicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierachicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierachicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierachicalLevel for Level2 {
type NextLevel = Level1;
}
impl<L> Table<L> where L: TableLevel
{
pub fn index_of(page: &Page) -> usize {
Self::index_of_page_number(page.number)
}
fn index_of_page_number(page_number: usize) -> usize {
let s = (L::level_number() - 1) * 9;
(page_number >> s) & 0o777
}
fn index(&self) -> usize {
Self::index_of_page_number(self.target_page_number)
}
}
use memory::{Frame, FrameAllocator};
pub const PAGE_SIZE: usize = 4096;
const ENTRY_SIZE: usize = 8;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
pub struct Page {
number: usize,
}
impl Page {
fn containing_address(address: VirtualAddress) -> Page {
match address {
addr if addr < 0o_400_000_000_000_0000 => Page { number: addr / PAGE_SIZE },
addr if addr >= 0o177777_400_000_000_000_0000 => {
Page { number: (address / PAGE_SIZE) & 0o_777_777_777_777 }
}
_ => panic!("invalid address: 0x{:x}", address),
}
}
pub fn start_address(&self) -> VirtualAddress {
if self.number >= 0x800000000 {
// sign extension necessary
(self.number << 12) | 0xffff_000000000000
} else {
self.number << 12
}
}
}
pub struct Table<Level> {
table_page: Page,
target_page_number: usize,
_phantom: PhantomData<Level>,
}
impl<L> Table<L> where L: TableLevel
{
fn entry(&self) -> TableEntry {
let entry_address = self.table_page.start_address() + self.index() * ENTRY_SIZE;
unsafe { *(entry_address as *const _) }
}
fn set_entry(&mut self, value: TableEntry) {
let entry_address = self.table_page.start_address() + self.index() * ENTRY_SIZE;
unsafe { *(entry_address as *mut _) = value }
}
fn zero(&mut self) {
let page = self.table_page.start_address() as *mut [TableEntry; ENTRY_COUNT];
unsafe { *page = [TableEntry::unused(); ENTRY_COUNT] };
}
}
impl<L> Table<L> where L: HierachicalLevel
{
fn next_table_internal(&self) -> Table<L::NextLevel> {
Table {
table_page: Page {
number: ((self.table_page.number << 9) & 0o_777_777_777_777) | self.index(),
},
target_page_number: self.target_page_number,
_phantom: PhantomData,
}
}
fn next_table(&self) -> Option<Table<L::NextLevel>> {
if self.entry().flags().contains(PRESENT) {
Some(self.next_table_internal())
} else {
None
}
}
fn next_table_create<A>(&mut self, allocator: &mut A) -> Table<L::NextLevel>
where A: FrameAllocator
{
match self.next_table() {
Some(table) => table,
None => {
let frame = allocator.allocate_frame().expect("no frames available");
self.set_entry(TableEntry::new(frame, PRESENT | WRITABLE));
let mut next_table = self.next_table_internal();
next_table.zero();
next_table
}
}
}
}
#[derive(Debug, Clone, Copy)]
struct TableEntry(u64);
impl TableEntry {
const fn unused() -> TableEntry {
TableEntry(0)
}
fn new(frame: Frame, flags: TableEntryFlags) -> TableEntry {
let frame_addr = (frame.number << 12) & 0x000fffff_fffff000;
TableEntry((frame_addr as u64) | flags.bits())
}
fn flags(&self) -> TableEntryFlags {
TableEntryFlags::from_bits_truncate(self.0)
}
fn pointed_frame(&self) -> Frame {
Frame { number: ((self.0 & 0x000fffff_fffff000) >> 12) as usize }
}
}
bitflags! {
flags TableEntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}

View File

@@ -1,10 +1,9 @@
use super::{VirtualAddress, PhysicalAddress, Page, PAGE_SIZE, ENTRY_COUNT};
use super::table::{Table, P4};
use super::table::P4;
use super::entry::{PRESENT, HUGE_PAGE};
use memory::Frame;
pub fn translate(virtual_address: usize) -> Option<PhysicalAddress> {
pub fn translate(virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let page = Page::containing_address(virtual_address);
let offset = virtual_address % PAGE_SIZE;

View File

@@ -1,27 +0,0 @@
pub trait TableLevel {}
pub struct Level4;
pub struct Level3;
pub struct Level2;
pub struct Level1;
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierachicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierachicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierachicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierachicalLevel for Level2 {
type NextLevel = Level1;
}

View File

@@ -1,49 +0,0 @@
mod entry;
mod table;
mod levels;
mod translate;
pub const PAGE_SIZE: usize = 4096;
const ENTRY_SIZE: usize = 8;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
pub struct Page {
number: usize,
}
impl Page {
fn containing_address(address: VirtualAddress) -> Page {
match address {
addr if addr < 0o_400_000_000_000_0000 => Page { number: addr / PAGE_SIZE },
addr if addr >= 0o177777_400_000_000_000_0000 => {
Page { number: (address / PAGE_SIZE) & 0o_777_777_777_777 }
}
_ => panic!("invalid address: 0x{:x}", address),
}
}
pub fn start_address(&self) -> VirtualAddress {
if self.number >= 0x800000000 {
// sign extension necessary
(self.number << 12) | 0xffff_000000000000
} else {
self.number << 12
}
}
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
}