Finish basics of new paging module

This commit is contained in:
Philipp Oppermann
2015-12-03 21:17:18 +01:00
parent 562221d725
commit a8df7b2e4d
11 changed files with 122 additions and 698 deletions

View File

@@ -1,88 +1,38 @@
use memory::{Frame, FrameAllocator};
mod entry;
mod table;
pub mod translate;
pub mod mapping;
pub fn test<A>(frame_allocator: &mut A)
where A: super::FrameAllocator
{
use self::entry::PRESENT;
mapping::map(&Page::containing_address(0xdeadbeaa000),
PRESENT,
frame_allocator);
mapping::map(&Page::containing_address(0xdeadbeab000),
PRESENT,
frame_allocator);
mapping::map(&Page::containing_address(0xdeadbeac000),
PRESENT,
frame_allocator);
mapping::map(&Page::containing_address(0xdeadbead000),
PRESENT,
frame_allocator);
mapping::map(&Page::containing_address(0xcafebeaf000),
PRESENT,
frame_allocator);
mapping::map(&Page::containing_address(0x0),
PRESENT,
frame_allocator);
}
pub const PAGE_SIZE: usize = 4096;
const ENTRY_SIZE: usize = 8;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
pub fn translate(virtual_address: usize) -> Option<PhysicalAddress> {
let page = Page::containing_address(virtual_address);
let offset = virtual_address % PAGE_SIZE;
let frame_number = {
let p4_entry = page.p4_table().entry(page.p4_index());
assert!(!p4_entry.flags().contains(HUGE_PAGE));
if !p4_entry.flags().contains(PRESENT) {
return None;
}
let p3_entry = unsafe { page.p3_table() }.entry(page.p3_index());
if !p3_entry.flags().contains(PRESENT) {
return None;
}
if p3_entry.flags().contains(HUGE_PAGE) {
// 1GiB page (address must be 1GiB aligned)
let start_frame_number = p3_entry.pointed_frame().number;
assert!(start_frame_number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
start_frame_number + page.p2_index() * ENTRY_COUNT + page.p1_index()
} else {
// 2MiB or 4KiB page
let p2_entry = unsafe { page.p2_table() }.entry(page.p2_index());
if !p2_entry.flags().contains(PRESENT) {
return None;
}
if p2_entry.flags().contains(HUGE_PAGE) {
// 2MiB page (address must be 2MiB aligned)
let start_frame_number = p2_entry.pointed_frame().number;
assert!(start_frame_number % ENTRY_COUNT == 0);
start_frame_number + page.p1_index()
} else {
// standard 4KiB page
let p1_entry = unsafe { page.p1_table() }.entry(page.p1_index());
assert!(!p1_entry.flags().contains(HUGE_PAGE));
if !p1_entry.flags().contains(PRESENT) {
return None;
}
p1_entry.pointed_frame().number
}
}
};
Some(frame_number * PAGE_SIZE + offset)
}
pub fn map_to<A>(page: &Page, frame: Frame, flags: TableEntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let p4_index = page.p4_index();
let p3_index = page.p3_index();
let p2_index = page.p2_index();
let p1_index = page.p1_index();
let mut p4 = page.p4_table();
if !p4.entry(p4_index).flags().contains(PRESENT) {
let frame = allocator.allocate_frame().expect("no frames available");
p4.set_entry(p4_index, TableEntry::new(frame, PRESENT | WRITABLE));
unsafe { page.p3_table() }.zero();
}
let mut p3 = unsafe { page.p3_table() };
if !p3.entry(p3_index).flags().contains(PRESENT) {
let frame = allocator.allocate_frame().expect("no frames available");
p3.set_entry(p3_index, TableEntry::new(frame, PRESENT | WRITABLE));
unsafe { page.p2_table() }.zero();
}
let mut p2 = unsafe { page.p2_table() };
if !p2.entry(p2_index).flags().contains(PRESENT) {
let frame = allocator.allocate_frame().expect("no frames available");
p2.set_entry(p2_index, TableEntry::new(frame, PRESENT | WRITABLE));
unsafe { page.p1_table() }.zero();
}
let mut p1 = unsafe { page.p1_table() };
assert!(!p1.entry(p1_index).flags().contains(PRESENT));
p1.set_entry(p1_index, TableEntry::new(frame, flags));
}
pub struct Page {
number: usize,
}
@@ -119,90 +69,4 @@ impl Page {
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
const fn p4_table(&self) -> Table {
Table(Page { number: 0o_777_777_777_777 })
}
/// # Safety
/// Only valid if the corresponding entry in the parent table is PRESENT and not HUGE_PAGE.
unsafe fn p3_table(&self) -> Table {
Table(Page { number: 0o_777_777_777_000 | self.p4_index() })
}
/// # Safety
/// Only valid if the corresponding entry in the parent table is PRESENT and not HUGE_PAGE.
unsafe fn p2_table(&self) -> Table {
Table(Page { number: 0o_777_777_000_000 | (self.p4_index() << 9) | self.p3_index() })
}
/// # Safety
/// Only valid if the corresponding entry in the parent table is PRESENT and not HUGE_PAGE.
unsafe fn p1_table(&self) -> Table {
Table(Page {
number: 0o_777_000_000_000 | (self.p4_index() << 18) | (self.p3_index() << 9) |
self.p2_index(),
})
}
}
struct Table(Page);
impl Table {
fn entry(&self, index: usize) -> TableEntry {
assert!(index < ENTRY_COUNT);
let entry_address = self.0.start_address() + index * ENTRY_SIZE;
unsafe { *(entry_address as *const _) }
}
fn set_entry(&mut self, index: usize, value: TableEntry) {
assert!(index < ENTRY_COUNT);
let entry_address = self.0.start_address() + index * ENTRY_SIZE;
unsafe { *(entry_address as *mut _) = value }
}
fn zero(&mut self) {
let page = self.0.start_address() as *mut [TableEntry; ENTRY_COUNT];
unsafe { *page = [TableEntry::unused(); ENTRY_COUNT] };
}
}
#[derive(Debug, Clone, Copy)]
struct TableEntry(u64);
impl TableEntry {
const fn unused() -> TableEntry {
TableEntry(0)
}
fn new(frame: Frame, flags: TableEntryFlags) -> TableEntry {
let frame_addr = (frame.number << 12) & 0x000fffff_fffff000;
TableEntry((frame_addr as u64) | flags.bits())
}
fn flags(&self) -> TableEntryFlags {
TableEntryFlags::from_bits_truncate(self.0)
}
fn pointed_frame(&self) -> Frame {
Frame { number: ((self.0 & 0x000fffff_fffff000) >> 12) as usize }
}
}
bitflags! {
flags TableEntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
mod tables;