// Copyright 2016 Philipp Oppermann. See the README.md // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub use self::entry::*; use memory::{PAGE_SIZE, Frame, FrameAllocator}; use self::temporary_page::TemporaryPage; pub use self::mapper::Mapper; use core::ops::{Deref, DerefMut}; use multiboot2::BootInformation; mod entry; mod table; mod temporary_page; mod mapper; const ENTRY_COUNT: usize = 512; pub type PhysicalAddress = usize; pub type VirtualAddress = usize; #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct Page { number: usize, } impl Page { pub fn containing_address(address: VirtualAddress) -> Page { assert!(address < 0x0000_8000_0000_0000 || address >= 0xffff_8000_0000_0000, "invalid address: 0x{:x}", address); Page { number: address / PAGE_SIZE } } fn start_address(&self) -> usize { self.number * PAGE_SIZE } fn p4_index(&self) -> usize { (self.number >> 27) & 0o777 } fn p3_index(&self) -> usize { (self.number >> 18) & 0o777 } fn p2_index(&self) -> usize { (self.number >> 9) & 0o777 } fn p1_index(&self) -> usize { (self.number >> 0) & 0o777 } pub fn range_inclusive(start: Page, end: Page) -> PageIter { PageIter { start: start, end: end, } } } pub struct PageIter { start: Page, end: Page, } impl Iterator for PageIter { type Item = Page; fn next(&mut self) -> Option { if self.start <= self.end { let page = self.start; self.start.number += 1; Some(page) } else { None } } } pub struct ActivePageTable { mapper: Mapper, } impl Deref for ActivePageTable { type Target = Mapper; fn deref(&self) -> &Mapper { &self.mapper } } impl DerefMut for ActivePageTable { fn deref_mut(&mut self) -> &mut Mapper { &mut self.mapper } } impl ActivePageTable { unsafe fn new() -> ActivePageTable { ActivePageTable { mapper: Mapper::new() } } pub fn with(&mut self, table: &mut InactivePageTable, temporary_page: &mut temporary_page::TemporaryPage, // new f: F) where F: FnOnce(&mut Mapper) { use x86::shared::{control_regs, tlb}; let flush_tlb = || unsafe { tlb::flush_all() }; { let backup = Frame::containing_address(unsafe { control_regs::cr3() } as usize); // map temporary_page to current p4 table let p4_table = temporary_page.map_table_frame(backup.clone(), self); // overwrite recursive mapping self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE); flush_tlb(); // execute f in the new context f(self); // restore recursive mapping to original p4 table p4_table[511].set(backup, PRESENT | WRITABLE); flush_tlb(); } temporary_page.unmap(self); } pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable { use x86::shared::control_regs; let old_table = InactivePageTable { p4_frame: Frame::containing_address(unsafe { control_regs::cr3() } as usize), }; unsafe { control_regs::cr3_write(new_table.p4_frame.start_address()); } old_table } } pub struct InactivePageTable { p4_frame: Frame, } impl InactivePageTable { pub fn new(frame: Frame, active_table: &mut ActivePageTable, temporary_page: &mut TemporaryPage) -> InactivePageTable { { let table = temporary_page.map_table_frame(frame.clone(), active_table); table.zero(); table[511].set(frame.clone(), PRESENT | WRITABLE); } temporary_page.unmap(active_table); InactivePageTable { p4_frame: frame } } } pub fn remap_the_kernel(allocator: &mut A, boot_info: &BootInformation) -> ActivePageTable where A: FrameAllocator { let mut temporary_page = TemporaryPage::new(Page { number: 0xcafebabe }, allocator); let mut active_table = unsafe { ActivePageTable::new() }; let mut new_table = { let frame = allocator.allocate_frame().expect("no more frames"); InactivePageTable::new(frame, &mut active_table, &mut temporary_page) }; active_table.with(&mut new_table, &mut temporary_page, |mapper| { let elf_sections_tag = boot_info.elf_sections_tag() .expect("Memory map tag required"); // identity map the allocated kernel sections for section in elf_sections_tag.sections() { if !section.is_allocated() { // section is not loaded to memory continue; } assert!(section.addr as usize % PAGE_SIZE == 0, "sections need to be page aligned"); println!("mapping section at addr: {:#x}, size: {:#x}", section.addr, section.size); let flags = EntryFlags::from_elf_section_flags(section); let start_frame = Frame::containing_address(section.start_address()); let end_frame = Frame::containing_address(section.end_address() - 1); for frame in Frame::range_inclusive(start_frame, end_frame) { mapper.identity_map(frame, flags, allocator); } } // identity map the VGA text buffer let vga_buffer_frame = Frame::containing_address(0xb8000); mapper.identity_map(vga_buffer_frame, WRITABLE, allocator); // identity map the multiboot info structure let multiboot_start = Frame::containing_address(boot_info.start_address()); let multiboot_end = Frame::containing_address(boot_info.end_address() - 1); for frame in Frame::range_inclusive(multiboot_start, multiboot_end) { mapper.identity_map(frame, PRESENT, allocator); } }); let old_table = active_table.switch(new_table); println!("NEW TABLE!!!"); let old_p4_page = Page::containing_address(old_table.p4_frame.start_address()); active_table.unmap(old_p4_page, allocator); println!("guard page at {:#x}", old_p4_page.start_address()); active_table }