From ae49ab5072cc690f3a3691a63d4b8f710618e77f Mon Sep 17 00:00:00 2001 From: Philipp Oppermann Date: Thu, 3 Dec 2015 17:27:53 +0100 Subject: [PATCH] Add alternative paging module with new design --- src/memory/mod.rs | 3 +- src/memory/paging_new/entry.rs | 41 ++++++++++++++++++++ src/memory/paging_new/levels.rs | 27 +++++++++++++ src/memory/paging_new/mod.rs | 49 +++++++++++++++++++++++ src/memory/paging_new/table.rs | 62 ++++++++++++++++++++++++++++++ src/memory/paging_new/translate.rs | 43 +++++++++++++++++++++ 6 files changed, 224 insertions(+), 1 deletion(-) create mode 100644 src/memory/paging_new/entry.rs create mode 100644 src/memory/paging_new/levels.rs create mode 100644 src/memory/paging_new/mod.rs create mode 100644 src/memory/paging_new/table.rs create mode 100644 src/memory/paging_new/translate.rs diff --git a/src/memory/mod.rs b/src/memory/mod.rs index 062cf380..2225308f 100644 --- a/src/memory/mod.rs +++ b/src/memory/mod.rs @@ -1,6 +1,7 @@ pub use self::area_frame_allocator::AreaFrameAllocator; -mod paging; +pub mod paging; +pub mod paging_new; mod area_frame_allocator; pub const PAGE_SIZE: usize = 4096; diff --git a/src/memory/paging_new/entry.rs b/src/memory/paging_new/entry.rs new file mode 100644 index 00000000..585d06a6 --- /dev/null +++ b/src/memory/paging_new/entry.rs @@ -0,0 +1,41 @@ +use memory::Frame; + +pub struct Entry(u64); + +impl Entry { + pub fn unused(&self) -> bool { + self.0 == 0 + } + + pub fn set_unused(&mut self) { + self.0 = 0; + } + + pub fn flags(&self) -> EntryFlags { + EntryFlags::from_bits_truncate(self.0) + } + + pub fn pointed_frame(&self) -> Frame { + Frame { number: ((self.0 & 0x000fffff_fffff000) >> 12) as usize } + } + + pub fn set(&mut self, frame: Frame, flags: EntryFlags) { + let frame_addr = (frame.number << 12) & 0x000fffff_fffff000; + self.0 = (frame_addr as u64) | flags.bits(); + } +} + +bitflags! { + flags EntryFlags: u64 { + const PRESENT = 1 << 0, + const WRITABLE = 1 << 1, + const USER_ACCESSIBLE = 1 << 2, + const WRITE_THROUGH = 1 << 3, + const NO_CACHE = 1 << 4, + const ACCESSED = 1 << 5, + const DIRTY = 1 << 6, + const HUGE_PAGE = 1 << 7, + const GLOBAL = 1 << 8, + const NO_EXECUTE = 1 << 63, + } +} diff --git a/src/memory/paging_new/levels.rs b/src/memory/paging_new/levels.rs new file mode 100644 index 00000000..ca28b8d3 --- /dev/null +++ b/src/memory/paging_new/levels.rs @@ -0,0 +1,27 @@ +pub trait TableLevel {} + +pub struct Level4; +pub struct Level3; +pub struct Level2; +pub struct Level1; + +impl TableLevel for Level4 {} +impl TableLevel for Level3 {} +impl TableLevel for Level2 {} +impl TableLevel for Level1 {} + +pub trait HierachicalLevel: TableLevel { + type NextLevel: TableLevel; +} + +impl HierachicalLevel for Level4 { + type NextLevel = Level3; +} + +impl HierachicalLevel for Level3 { + type NextLevel = Level2; +} + +impl HierachicalLevel for Level2 { + type NextLevel = Level1; +} diff --git a/src/memory/paging_new/mod.rs b/src/memory/paging_new/mod.rs new file mode 100644 index 00000000..7685f86f --- /dev/null +++ b/src/memory/paging_new/mod.rs @@ -0,0 +1,49 @@ +mod entry; +mod table; +mod levels; +mod translate; + +pub const PAGE_SIZE: usize = 4096; +const ENTRY_SIZE: usize = 8; +const ENTRY_COUNT: usize = 512; + +pub type PhysicalAddress = usize; +pub type VirtualAddress = usize; + +pub struct Page { + number: usize, +} + +impl Page { + fn containing_address(address: VirtualAddress) -> Page { + match address { + addr if addr < 0o_400_000_000_000_0000 => Page { number: addr / PAGE_SIZE }, + addr if addr >= 0o177777_400_000_000_000_0000 => { + Page { number: (address / PAGE_SIZE) & 0o_777_777_777_777 } + } + _ => panic!("invalid address: 0x{:x}", address), + } + } + + pub fn start_address(&self) -> VirtualAddress { + if self.number >= 0x800000000 { + // sign extension necessary + (self.number << 12) | 0xffff_000000000000 + } else { + self.number << 12 + } + } + + fn p4_index(&self) -> usize { + (self.number >> 27) & 0o777 + } + fn p3_index(&self) -> usize { + (self.number >> 18) & 0o777 + } + fn p2_index(&self) -> usize { + (self.number >> 9) & 0o777 + } + fn p1_index(&self) -> usize { + (self.number >> 0) & 0o777 + } +} diff --git a/src/memory/paging_new/table.rs b/src/memory/paging_new/table.rs new file mode 100644 index 00000000..cb4b3c49 --- /dev/null +++ b/src/memory/paging_new/table.rs @@ -0,0 +1,62 @@ +use super::{ENTRY_COUNT, Page}; +use super::entry::{Entry, PRESENT, HUGE_PAGE}; +use super::levels::{TableLevel, HierachicalLevel, Level4}; +use core::ops::{Index, IndexMut}; +use core::marker::PhantomData; + +pub const P4: *const Table = 0xffffffff_fffff000 as *const _; + +pub struct Table { + entries: [Entry; ENTRY_COUNT], + _phantom: PhantomData, +} + +impl Index for Table where L: TableLevel +{ + type Output = Entry; + + fn index(&self, index: usize) -> &Entry { + &self.entries[index] + } +} + +impl IndexMut for Table where L: TableLevel +{ + fn index_mut(&mut self, index: usize) -> &mut Entry { + &mut self.entries[index] + } +} + +impl Table where L: TableLevel +{ + pub fn zero(&mut self) { + for entry in self.entries.iter_mut() { + entry.set_unused(); + } + } +} + +impl Table where L: HierachicalLevel +{ + pub fn next_table(&self, index: usize) -> Option<&Table> { + self.next_table_address(index).map(|t| unsafe { &*(t as *const _) }) + } + + pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table> { + self.next_table_address(index).map(|t| unsafe { &mut *(t as *mut _) }) + } + + fn next_table_address(&self, index: usize) -> Option { + let entry_flags = self[index].flags(); + if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) { + let table_page = Page::containing_address(self as *const _ as usize); + assert!(table_page.number >= 0o_777_000_000_000); + let next_table_page = Page { + number: ((table_page.number << 9) & 0o_777_777_777_777) | index, + }; + Some(next_table_page.start_address()) + } else { + None + } + } +} diff --git a/src/memory/paging_new/translate.rs b/src/memory/paging_new/translate.rs new file mode 100644 index 00000000..d9b3ac32 --- /dev/null +++ b/src/memory/paging_new/translate.rs @@ -0,0 +1,43 @@ +use super::{VirtualAddress, PhysicalAddress, Page, PAGE_SIZE, ENTRY_COUNT}; +use super::table::{Table, P4}; +use super::entry::{PRESENT, HUGE_PAGE}; +use memory::Frame; + + +pub fn translate(virtual_address: usize) -> Option { + let page = Page::containing_address(virtual_address); + let offset = virtual_address % PAGE_SIZE; + + let p4 = unsafe { &*P4 }; + + let huge_page = || { + p4.next_table(page.p4_index()) + .and_then(|p3| { + // 1GiB page? + if p3[page.p3_index()].flags().contains(HUGE_PAGE | PRESENT) { + let start_frame_number = p3[page.p3_index()].pointed_frame().number; + // address must be 1GiB aligned + assert!(start_frame_number % (ENTRY_COUNT * ENTRY_COUNT) == 0); + return Some(start_frame_number + page.p2_index() * ENTRY_COUNT + page.p1_index()); + } + if let Some(p2) = p3.next_table(page.p3_index()) { + // 2MiB page? + if p2[page.p2_index()].flags().contains(HUGE_PAGE | PRESENT) { + let start_frame_number = p2[page.p2_index()].pointed_frame().number; + // address must be 2MiB aligned + assert!(start_frame_number % ENTRY_COUNT == 0); + return Some(start_frame_number + page.p1_index()); + } + } + None + }) + .map(|start_frame_number| Frame { number: start_frame_number }) + }; + + p4.next_table(page.p4_index()) + .and_then(|p3| p3.next_table(page.p3_index())) + .and_then(|p2| p2.next_table(page.p2_index())) + .map(|p1| p1[page.p1_index()].pointed_frame()) + .or_else(huge_page) + .map(|frame| frame.number * PAGE_SIZE + offset) +}