diff --git a/src/memory/paging/mapper.rs b/src/memory/paging/mapper.rs
new file mode 100644
index 00000000..8eda53d1
--- /dev/null
+++ b/src/memory/paging/mapper.rs
@@ -0,0 +1,118 @@
+use super::{VirtualAddress, PhysicalAddress, Page, ENTRY_COUNT};
+use super::entry::*;
+use super::table::{self, Table, Level4, Level1};
+use memory::{PAGE_SIZE, Frame, FrameAllocator};
+use core::ptr::Unique;
+
+pub struct Mapper {
+ p4: Unique
>,
+}
+
+impl Mapper {
+ pub unsafe fn new() -> Mapper {
+ Mapper {
+ p4: Unique::new_unchecked(table::P4),
+ }
+ }
+
+ pub fn p4(&self) -> &Table {
+ unsafe { self.p4.as_ref() }
+ }
+
+ pub fn p4_mut(&mut self) -> &mut Table {
+ unsafe { self.p4.as_mut() }
+ }
+
+ pub fn translate(&self, virtual_address: VirtualAddress) -> Option {
+ let offset = virtual_address % PAGE_SIZE;
+ self.translate_page(Page::containing_address(virtual_address))
+ .map(|frame| frame.number * PAGE_SIZE + offset)
+ }
+
+ pub fn translate_page(&self, page: Page) -> Option {
+ let p3 = self.p4().next_table(page.p4_index());
+
+ let huge_page = || {
+ p3.and_then(|p3| {
+ let p3_entry = &p3[page.p3_index()];
+ // 1GiB page?
+ if let Some(start_frame) = p3_entry.pointed_frame() {
+ if p3_entry.flags().contains(HUGE_PAGE) {
+ // address must be 1GiB aligned
+ assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
+ return Some(Frame {
+ number: start_frame.number + page.p2_index() *
+ ENTRY_COUNT + page.p1_index(),
+ });
+ }
+ }
+ if let Some(p2) = p3.next_table(page.p3_index()) {
+ let p2_entry = &p2[page.p2_index()];
+ // 2MiB page?
+ if let Some(start_frame) = p2_entry.pointed_frame() {
+ if p2_entry.flags().contains(HUGE_PAGE) {
+ // address must be 2MiB aligned
+ assert!(start_frame.number % ENTRY_COUNT == 0);
+ return Some(Frame {
+ number: start_frame.number + page.p1_index()
+ });
+ }
+ }
+ }
+ None
+ })
+ };
+
+ p3.and_then(|p3| p3.next_table(page.p3_index()))
+ .and_then(|p2| p2.next_table(page.p2_index()))
+ .and_then(|p1| p1[page.p1_index()].pointed_frame())
+ .or_else(huge_page)
+ }
+
+ pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags,
+ allocator: &mut A)
+ where A: FrameAllocator
+ {
+ let p4 = self.p4_mut();
+ let mut p3 = p4.next_table_create(page.p4_index(), allocator);
+ let mut p2 = p3.next_table_create(page.p3_index(), allocator);
+ let mut p1 = p2.next_table_create(page.p2_index(), allocator);
+
+ assert!(p1[page.p1_index()].is_unused());
+ p1[page.p1_index()].set(frame, flags | PRESENT);
+ }
+
+ pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
+ where A: FrameAllocator
+ {
+ let frame = allocator.allocate_frame().expect("out of memory");
+ self.map_to(page, frame, flags, allocator)
+ }
+
+ pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
+ where A: FrameAllocator
+ {
+ let page = Page::containing_address(frame.start_address());
+ self.map_to(page, frame, flags, allocator)
+ }
+
+ pub fn unmap(&mut self, page: Page, allocator: &mut A)
+ where A: FrameAllocator
+ {
+ use x86_64::instructions::tlb;
+ use x86_64::VirtualAddress;
+
+ assert!(self.translate(page.start_address()).is_some());
+
+ let p1 = self.p4_mut()
+ .next_table_mut(page.p4_index())
+ .and_then(|p3| p3.next_table_mut(page.p3_index()))
+ .and_then(|p2| p2.next_table_mut(page.p2_index()))
+ .expect("mapping code does not support huge pages");
+ let frame = p1[page.p1_index()].pointed_frame().unwrap();
+ p1[page.p1_index()].set_unused();
+ tlb::flush(VirtualAddress(page.start_address()));
+ // TODO free p(1,2,3) table if empty
+ //allocator.deallocate_frame(frame);
+ }
+}
diff --git a/src/memory/paging/mod.rs b/src/memory/paging/mod.rs
index 25e1e772..873e9cfe 100644
--- a/src/memory/paging/mod.rs
+++ b/src/memory/paging/mod.rs
@@ -7,6 +7,7 @@ use self::temporary_page::TemporaryPage;
mod entry;
mod table;
mod temporary_page;
+mod mapper;
const ENTRY_COUNT: usize = 512;
@@ -44,138 +45,6 @@ impl Page {
}
}
-pub struct ActivePageTable {
- p4: Unique>,
-}
-
-impl ActivePageTable {
- pub unsafe fn new() -> ActivePageTable {
- ActivePageTable {
- p4: Unique::new_unchecked(table::P4),
- }
- }
-
- fn p4(&self) -> &Table {
- unsafe { self.p4.as_ref() }
- }
-
- fn p4_mut(&mut self) -> &mut Table {
- unsafe { self.p4.as_mut() }
- }
-
- pub fn translate(&self, virtual_address: VirtualAddress) -> Option {
- let offset = virtual_address % PAGE_SIZE;
- self.translate_page(Page::containing_address(virtual_address))
- .map(|frame| frame.number * PAGE_SIZE + offset)
- }
-
- fn translate_page(&self, page: Page) -> Option {
- use self::entry::HUGE_PAGE;
-
- let p3 = self.p4().next_table(page.p4_index());
-
- let huge_page = || {
- p3.and_then(|p3| {
- let p3_entry = &p3[page.p3_index()];
- // 1GiB page?
- if let Some(start_frame) = p3_entry.pointed_frame() {
- if p3_entry.flags().contains(HUGE_PAGE) {
- // address must be 1GiB aligned
- assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
- return Some(Frame {
- number: start_frame.number + page.p2_index() *
- ENTRY_COUNT + page.p1_index(),
- });
- }
- }
- if let Some(p2) = p3.next_table(page.p3_index()) {
- let p2_entry = &p2[page.p2_index()];
- // 2MiB page?
- if let Some(start_frame) = p2_entry.pointed_frame() {
- if p2_entry.flags().contains(HUGE_PAGE) {
- // address must be 2MiB aligned
- assert!(start_frame.number % ENTRY_COUNT == 0);
- return Some(Frame {
- number: start_frame.number + page.p1_index()
- });
- }
- }
- }
- None
- })
- };
-
- p3.and_then(|p3| p3.next_table(page.p3_index()))
- .and_then(|p2| p2.next_table(page.p2_index()))
- .and_then(|p1| p1[page.p1_index()].pointed_frame())
- .or_else(huge_page)
- }
-
- pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags,
- allocator: &mut A)
- where A: FrameAllocator
- {
- let p4 = self.p4_mut();
- let mut p3 = p4.next_table_create(page.p4_index(), allocator);
- let mut p2 = p3.next_table_create(page.p3_index(), allocator);
- let mut p1 = p2.next_table_create(page.p2_index(), allocator);
-
- assert!(p1[page.p1_index()].is_unused());
- p1[page.p1_index()].set(frame, flags | PRESENT);
- }
-
- pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
- where A: FrameAllocator
- {
- let frame = allocator.allocate_frame().expect("out of memory");
- self.map_to(page, frame, flags, allocator)
- }
-
- pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
- where A: FrameAllocator
- {
- let page = Page::containing_address(frame.start_address());
- self.map_to(page, frame, flags, allocator)
- }
-
- fn unmap(&mut self, page: Page, allocator: &mut A)
- where A: FrameAllocator
- {
- use x86_64::instructions::tlb;
- use x86_64::VirtualAddress;
-
- assert!(self.translate(page.start_address()).is_some());
-
- let p1 = self.p4_mut()
- .next_table_mut(page.p4_index())
- .and_then(|p3| p3.next_table_mut(page.p3_index()))
- .and_then(|p2| p2.next_table_mut(page.p2_index()))
- .expect("mapping code does not support huge pages");
- let frame = p1[page.p1_index()].pointed_frame().unwrap();
- p1[page.p1_index()].set_unused();
- tlb::flush(VirtualAddress(page.start_address()));
- // TODO free p(1,2,3) table if empty
- //allocator.deallocate_frame(frame);
- }
-
- pub fn with(&mut self,
- table: &mut InactivePageTable,
- f: F)
- where F: FnOnce(&mut ActivePageTable)
- {
- use x86_64::instructions::tlb;
-
- // overwrite recursive mapping
- self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
- tlb::flush_all();
-
- // execute f in the new context
- f(self);
-
- // TODO restore recursive mapping to original p4 table
- }
-}
-
pub struct InactivePageTable {
p4_frame: Frame,
}