// Copyright 2016 Philipp Oppermann. See the README.md // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. use super::{VirtualAddress, PhysicalAddress, Page, ENTRY_COUNT}; use super::entry::*; use super::table::{self, Table, Level4}; use memory::{PAGE_SIZE, Frame, FrameAllocator}; use core::ptr::Unique; pub struct Mapper { p4: Unique>, } impl Mapper { pub unsafe fn new() -> Mapper { Mapper { p4: Unique::new(table::P4) } } pub fn p4(&self) -> &Table { unsafe { self.p4.get() } } pub fn p4_mut(&mut self) -> &mut Table { unsafe { self.p4.get_mut() } } pub fn translate(&self, virtual_address: VirtualAddress) -> Option { let offset = virtual_address % PAGE_SIZE; self.translate_page(Page::containing_address(virtual_address)) .map(|frame| frame.number * PAGE_SIZE + offset) } pub fn translate_page(&self, page: Page) -> Option { let p3 = self.p4().next_table(page.p4_index()); let huge_page = || { p3.and_then(|p3| { let p3_entry = &p3[page.p3_index()]; // 1GiB page? if let Some(start_frame) = p3_entry.pointed_frame() { if p3_entry.flags().contains(HUGE_PAGE) { // address must be 1GiB aligned assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0); return Some(Frame { number: start_frame.number + page.p2_index() * ENTRY_COUNT + page.p1_index(), }); } } if let Some(p2) = p3.next_table(page.p3_index()) { let p2_entry = &p2[page.p2_index()]; // 2MiB page? if let Some(start_frame) = p2_entry.pointed_frame() { if p2_entry.flags().contains(HUGE_PAGE) { // address must be 2MiB aligned assert!(start_frame.number % ENTRY_COUNT == 0); return Some(Frame { number: start_frame.number + page.p1_index() }); } } } None }) }; p3.and_then(|p3| p3.next_table(page.p3_index())) .and_then(|p2| p2.next_table(page.p2_index())) .and_then(|p1| p1[page.p1_index()].pointed_frame()) .or_else(huge_page) } pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A) where A: FrameAllocator { let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator); let mut p2 = p3.next_table_create(page.p3_index(), allocator); let mut p1 = p2.next_table_create(page.p2_index(), allocator); assert!(p1[page.p1_index()].is_unused()); p1[page.p1_index()].set(frame, flags | PRESENT); } pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A) where A: FrameAllocator { let frame = allocator.allocate_frame().expect("out of memory"); self.map_to(page, frame, flags, allocator) } pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A) where A: FrameAllocator { let page = Page::containing_address(frame.start_address()); self.map_to(page, frame, flags, allocator) } pub fn unmap(&mut self, page: Page, allocator: &mut A) where A: FrameAllocator { assert!(self.translate(page.start_address()).is_some()); let p1 = self.p4_mut() .next_table_mut(page.p4_index()) .and_then(|p3| p3.next_table_mut(page.p3_index())) .and_then(|p2| p2.next_table_mut(page.p2_index())) .expect("mapping code does not support huge pages"); let frame = p1[page.p1_index()].pointed_frame().unwrap(); p1[page.p1_index()].set_unused(); unsafe { ::x86::tlb::flush(page.start_address()) }; // TODO free p(1,2,3) table if empty // allocator.deallocate_frame(frame); } }