From a100d5f63f46c02c6a1089f7ed3cd62b21dc6f63 Mon Sep 17 00:00:00 2001 From: Philipp Oppermann Date: Thu, 31 Dec 2015 02:23:02 +0100 Subject: [PATCH] Split mapping functions of ActivePageTable into mapper subtype --- src/memory/paging/mapper.rs | 118 ++++++++++++++++++++++++++++++++++++ src/memory/paging/mod.rs | 117 ++++++----------------------------- 2 files changed, 138 insertions(+), 97 deletions(-) create mode 100644 src/memory/paging/mapper.rs diff --git a/src/memory/paging/mapper.rs b/src/memory/paging/mapper.rs new file mode 100644 index 00000000..ef949955 --- /dev/null +++ b/src/memory/paging/mapper.rs @@ -0,0 +1,118 @@ +// Copyright 2015 Philipp Oppermann. See the README.md +// file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::{VirtualAddress, PhysicalAddress, Page, ENTRY_COUNT}; +use super::entry::*; +use super::table::{self, Table, Level4, Level1}; +use memory::{PAGE_SIZE, Frame, FrameAllocator}; +use core::ptr::Unique; + +pub struct Mapper { + p4: Unique>, +} + +impl Mapper { + pub unsafe fn new() -> Mapper { + Mapper { p4: Unique::new(table::P4) } + } + + pub fn p4(&self) -> &Table { + unsafe { self.p4.get() } + } + + pub fn p4_mut(&mut self) -> &mut Table { + unsafe { self.p4.get_mut() } + } + + pub fn translate(&self, virtual_address: VirtualAddress) -> Option { + let offset = virtual_address % PAGE_SIZE; + self.translate_page(Page::containing_address(virtual_address)) + .map(|frame| frame.number * PAGE_SIZE + offset) + } + + pub fn translate_page(&self, page: Page) -> Option { + let p3 = self.p4().next_table(page.p4_index()); + + let huge_page = || { + p3.and_then(|p3| { + let p3_entry = &p3[page.p3_index()]; + // 1GiB page? + if let Some(start_frame) = p3_entry.pointed_frame() { + if p3_entry.flags().contains(HUGE_PAGE) { + // address must be 1GiB aligned + assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0); + return Some(Frame { + number: start_frame.number + page.p2_index() * ENTRY_COUNT + + page.p1_index(), + }); + } + } + if let Some(p2) = p3.next_table(page.p3_index()) { + let p2_entry = &p2[page.p2_index()]; + // 2MiB page? + if let Some(start_frame) = p2_entry.pointed_frame() { + if p2_entry.flags().contains(HUGE_PAGE) { + // address must be 2MiB aligned + assert!(start_frame.number % ENTRY_COUNT == 0); + return Some(Frame { number: start_frame.number + page.p1_index() }); + } + } + } + None + }) + }; + + p3.and_then(|p3| p3.next_table(page.p3_index())) + .and_then(|p2| p2.next_table(page.p2_index())) + .and_then(|p1| p1[page.p1_index()].pointed_frame()) + .or_else(huge_page) + } + + pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A) + where A: FrameAllocator + { + let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator); + let mut p2 = p3.next_table_create(page.p3_index(), allocator); + let mut p1 = p2.next_table_create(page.p2_index(), allocator); + + assert!(p1[page.p1_index()].is_unused()); + p1[page.p1_index()].set(frame, flags | PRESENT); + } + + pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A) + where A: FrameAllocator + { + let frame = allocator.allocate_frame().expect("out of memory"); + self.map_to(page, frame, flags, allocator) + } + + pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A) + where A: FrameAllocator + { + let page = Page::containing_address(frame.start_address()); + self.map_to(page, frame, flags, allocator) + } + + pub fn unmap(&mut self, page: Page, allocator: &mut A) + where A: FrameAllocator + { + assert!(self.translate(page.start_address()).is_some()); + + let p1 = self.p4_mut() + .next_table_mut(page.p4_index()) + .and_then(|p3| p3.next_table_mut(page.p3_index())) + .and_then(|p2| p2.next_table_mut(page.p2_index())) + .expect("mapping code does not support huge pages"); + let frame = p1[page.p1_index()].pointed_frame().unwrap(); + p1[page.p1_index()].set_unused(); + unsafe { ::x86::tlb::flush(page.start_address()) }; + // TODO free p(1,2,3) table if empty + // allocator.deallocate_frame(frame); + } +} diff --git a/src/memory/paging/mod.rs b/src/memory/paging/mod.rs index ed671fdc..1439f35f 100644 --- a/src/memory/paging/mod.rs +++ b/src/memory/paging/mod.rs @@ -11,11 +11,14 @@ pub use self::entry::*; use memory::{PAGE_SIZE, Frame, FrameAllocator}; use self::table::{Table, Level4}; use self::temporary_page::TemporaryPage; +pub use self::mapper::Mapper; +use core::ops::{Deref, DerefMut}; use core::ptr::Unique; mod entry; mod table; mod temporary_page; +mod mapper; const ENTRY_COUNT: usize = 512; @@ -54,106 +57,26 @@ impl Page { } pub struct ActivePageTable { - p4: Unique>, + mapper: Mapper, +} + +impl Deref for ActivePageTable { + type Target = Mapper; + + fn deref(&self) -> &Mapper { + &self.mapper + } +} + +impl DerefMut for ActivePageTable { + fn deref_mut(&mut self) -> &mut Mapper { + &mut self.mapper + } } impl ActivePageTable { - pub unsafe fn new() -> ActivePageTable { - ActivePageTable { p4: Unique::new(table::P4) } - } - - fn p4(&self) -> &Table { - unsafe { self.p4.get() } - } - - fn p4_mut(&mut self) -> &mut Table { - unsafe { self.p4.get_mut() } - } - - pub fn translate(&self, virtual_address: VirtualAddress) -> Option { - let offset = virtual_address % PAGE_SIZE; - self.translate_page(Page::containing_address(virtual_address)) - .map(|frame| frame.number * PAGE_SIZE + offset) - } - - fn translate_page(&self, page: Page) -> Option { - let p3 = self.p4().next_table(page.p4_index()); - - let huge_page = || { - p3.and_then(|p3| { - let p3_entry = &p3[page.p3_index()]; - // 1GiB page? - if let Some(start_frame) = p3_entry.pointed_frame() { - if p3_entry.flags().contains(HUGE_PAGE) { - // address must be 1GiB aligned - assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0); - return Some(Frame { - number: start_frame.number + page.p2_index() * ENTRY_COUNT + - page.p1_index(), - }); - } - } - if let Some(p2) = p3.next_table(page.p3_index()) { - let p2_entry = &p2[page.p2_index()]; - // 2MiB page? - if let Some(start_frame) = p2_entry.pointed_frame() { - if p2_entry.flags().contains(HUGE_PAGE) { - // address must be 2MiB aligned - assert!(start_frame.number % ENTRY_COUNT == 0); - return Some(Frame { number: start_frame.number + page.p1_index() }); - } - } - } - None - }) - }; - - p3.and_then(|p3| p3.next_table(page.p3_index())) - .and_then(|p2| p2.next_table(page.p2_index())) - .and_then(|p1| p1[page.p1_index()].pointed_frame()) - .or_else(huge_page) - } - - pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A) - where A: FrameAllocator - { - let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator); - let mut p2 = p3.next_table_create(page.p3_index(), allocator); - let mut p1 = p2.next_table_create(page.p2_index(), allocator); - - assert!(p1[page.p1_index()].is_unused()); - p1[page.p1_index()].set(frame, flags | PRESENT); - } - - pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A) - where A: FrameAllocator - { - let frame = allocator.allocate_frame().expect("out of memory"); - self.map_to(page, frame, flags, allocator) - } - - pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A) - where A: FrameAllocator - { - let page = Page::containing_address(frame.start_address()); - self.map_to(page, frame, flags, allocator) - } - - fn unmap(&mut self, page: Page, allocator: &mut A) - where A: FrameAllocator - { - assert!(self.translate(page.start_address()).is_some()); - - let p1 = self.p4_mut() - .next_table_mut(page.p4_index()) - .and_then(|p3| p3.next_table_mut(page.p3_index())) - .and_then(|p2| p2.next_table_mut(page.p2_index())) - .expect("mapping code does not support huge pages"); - let frame = p1[page.p1_index()].pointed_frame().unwrap(); - p1[page.p1_index()].set_unused(); - unsafe { ::x86::tlb::flush(page.start_address()) }; - // TODO free p(1,2,3) table if empty - // allocator.deallocate_frame(frame); + unsafe fn new() -> ActivePageTable { + ActivePageTable { mapper: Mapper::new() } } }