Make translate and map_to safe by making them RecursivePageTable methods

This commit is contained in:
Philipp Oppermann
2015-12-09 01:01:32 +01:00
parent fb7d2d22b6
commit 86d8e99271

View File

@@ -57,61 +57,59 @@ impl RecursivePageTable {
fn p4_mut(&mut self) -> &mut Table<Level4> { fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.get_mut() } unsafe { self.p4.get_mut() }
} }
}
pub unsafe fn translate(virtual_address: VirtualAddress) -> Option<PhysicalAddress> { pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address % PAGE_SIZE; let offset = virtual_address % PAGE_SIZE;
translate_page(Page::containing_address(virtual_address)) self.translate_page(Page::containing_address(virtual_address))
.map(|frame| frame.number * PAGE_SIZE + offset) .map(|frame| frame.number * PAGE_SIZE + offset)
} }
unsafe fn translate_page(page: Page) -> Option<Frame> { fn translate_page(&self, page: Page) -> Option<Frame> {
let p3 = self.p4().next_table(page.p4_index());
let p3 = unsafe { &*table::P4 }.next_table(page.p4_index()); let huge_page = || {
p3.and_then(|p3| {
let huge_page = || { let p3_entry = &p3[page.p3_index()];
p3.and_then(|p3| { // 1GiB page?
let p3_entry = &p3[page.p3_index()]; if let Some(start_frame) = p3_entry.pointed_frame() {
// 1GiB page? if p3_entry.flags().contains(HUGE_PAGE) {
if let Some(start_frame) = p3_entry.pointed_frame() { // address must be 1GiB aligned
if p3_entry.flags().contains(HUGE_PAGE) { assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
// address must be 1GiB aligned return Some(Frame {
assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0); number: start_frame.number + page.p2_index() * ENTRY_COUNT +
return Some(Frame { page.p1_index(),
number: start_frame.number + page.p2_index() * ENTRY_COUNT + });
page.p1_index(),
});
}
}
if let Some(p2) = p3.next_table(page.p3_index()) {
let p2_entry = &p2[page.p2_index()];
// 2MiB page?
if let Some(start_frame) = p2_entry.pointed_frame() {
if p2_entry.flags().contains(HUGE_PAGE) {
// address must be 2MiB aligned
assert!(start_frame.number % ENTRY_COUNT == 0);
return Some(Frame { number: start_frame.number + page.p1_index() });
} }
} }
} if let Some(p2) = p3.next_table(page.p3_index()) {
None let p2_entry = &p2[page.p2_index()];
}) // 2MiB page?
}; if let Some(start_frame) = p2_entry.pointed_frame() {
if p2_entry.flags().contains(HUGE_PAGE) {
// address must be 2MiB aligned
assert!(start_frame.number % ENTRY_COUNT == 0);
return Some(Frame { number: start_frame.number + page.p1_index() });
}
}
}
None
})
};
p3.and_then(|p3| p3.next_table(page.p3_index())) p3.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index())) .and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame()) .and_then(|p1| p1[page.p1_index()].pointed_frame())
.or_else(huge_page) .or_else(huge_page)
} }
pub unsafe fn map_to<A>(page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A) pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator where A: FrameAllocator
{ {
let p4 = unsafe { &mut *table::P4 }; let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator);
let mut p3 = p4.next_table_create(page.p4_index(), allocator); let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator); let mut p1 = p2.next_table_create(page.p2_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(p1[page.p1_index()].is_unused());
assert!(p1[page.p1_index()].is_unused()); p1[page.p1_index()].set(frame, flags | PRESENT);
p1[page.p1_index()].set(frame, flags | PRESENT); }
} }