Compare commits

...

35 Commits

Author SHA1 Message Date
Philipp Oppermann
93aff8cfa8 Test our exception handler by invoking a breakpoint exception 2017-11-19 14:22:24 +01:00
Philipp Oppermann
fab320271a Create and load an IDT 2017-11-19 14:21:51 +01:00
Philipp Oppermann
7becaf5f30 Add a dependency on lazy_static 2017-11-19 14:21:51 +01:00
Philipp Oppermann
3bbc2a0bdc Add a simple handler function for the breakpoint exception 2017-11-19 14:21:36 +01:00
Philipp Oppermann
c2d22af1c7 Create a new interrupts module 2017-11-19 14:21:12 +01:00
Philipp Oppermann
0ddd214a1b Update Readme for “Handling Exceptions” post 2017-11-19 14:21:00 +01:00
Philipp Oppermann
ad211de615 Use linked list allocator instead of bump allocator 2017-11-19 14:20:46 +01:00
Philipp Oppermann
01f8c43ffb Map the heap pages to physical frames 2017-11-19 14:20:46 +01:00
Philipp Oppermann
f2bbe43099 Use once crate to ensure that memory::init is only called once 2017-11-19 14:20:45 +01:00
Philipp Oppermann
76550dcd95 Refactor: Move memory initialization to memory::init function 2017-11-19 14:20:45 +01:00
Philipp Oppermann
c0d403abbe Set a global allocator 2017-11-19 14:20:45 +01:00
Philipp Oppermann
9dc998222a Make the bump allocator lock free and impl Alloc for shared reference 2017-11-19 14:20:45 +01:00
Philipp Oppermann
1f6633fe44 Add a heap_allocator module with a basic bump allocator 2017-11-19 14:20:45 +01:00
Philipp Oppermann
5f017124dd Add a dependency on the alloc crate 2017-11-19 14:20:45 +01:00
Philipp Oppermann
36369cfbe2 Update Readme for “Kernel Heap” post 2017-11-19 14:20:45 +01:00
Philipp Oppermann
c6dd37dcc5 Turn page of old P4 into guard page 2017-11-19 11:44:50 +01:00
Philipp Oppermann
2a3ce863fd Enable write protect bit to improve safety 2017-11-19 11:44:50 +01:00
Philipp Oppermann
ffaddeb84e Fix boot loop by enabling the NXE bit in the EFER register 2017-11-19 11:44:50 +01:00
Philipp Oppermann
0ce2b46ad1 Use the correct entry flags for kernel sections 2017-11-19 11:44:50 +01:00
Philipp Oppermann
11993b7e15 Also identity map the multiboot info structure 2017-11-19 11:44:50 +01:00
Philipp Oppermann
fdacfb24f0 Fix boot loop by identity mapping the vga buffer 2017-11-19 11:44:50 +01:00
Philipp Oppermann
564c6b64e9 Switch to the new page table after mapping all sections 2017-11-19 11:44:50 +01:00
Philipp Oppermann
fa3b572d68 Only print kernel/multiboot start/end in rust_main 2017-11-19 11:44:50 +01:00
Philipp Oppermann
40f1732ccb Call remap_the_kernel from rust_main 2017-11-19 11:44:49 +01:00
Philipp Oppermann
5d898d8474 Page align all sections as they will be individually mapped 2017-11-19 11:44:49 +01:00
Philipp Oppermann
e029eabe18 Add a remap_the_kernel function 2017-11-19 11:44:49 +01:00
Philipp Oppermann
60d7c736a5 Add a (now safe) with method 2017-11-19 11:44:49 +01:00
Philipp Oppermann
2f30b0f7cf Create a new ActivePageTable struct that derefs to Mapper 2017-11-19 11:44:49 +01:00
Philipp Oppermann
ce9c4d6e43 Refactor: Move ActivePageTable to new mapper submodule and rename to Mapper 2017-11-19 11:44:49 +01:00
Philipp Oppermann
4160cb6d7e Begin creating an ActivePageTable::with function 2017-11-19 11:42:33 +01:00
Philipp Oppermann
b2e79752fd Fix InactivePageTable::new function using a temporary page 2017-11-19 11:42:33 +01:00
Philipp Oppermann
11ae7e8aeb Create a temporary_page module 2017-11-19 11:42:33 +01:00
Philipp Oppermann
1737f48284 Create a InactivePageTable struct 2017-11-19 11:42:33 +01:00
Philipp Oppermann
1f1f6c62da Derive Copy and Clone for Page and add a Frame::clone method 2017-11-19 11:42:33 +01:00
Philipp Oppermann
bb3f17c0b9 Update Readme for “Remap the Kernel” post 2017-11-19 11:42:33 +01:00
12 changed files with 668 additions and 147 deletions

View File

@@ -13,3 +13,9 @@ spin = "0.4.5"
multiboot2 = "0.1.0"
bitflags = "0.7.0"
x86_64 = "0.1.2"
once = "0.3.3"
linked_list_allocator = "0.4.2"
[dependencies.lazy_static]
version = "0.2.4"
features = ["spin_no_std"]

View File

@@ -1,7 +1,7 @@
# Blog OS (Page Tables)
[![Build Status](https://travis-ci.org/phil-opp/blog_os.svg?branch=post_6)](https://travis-ci.org/phil-opp/blog_os/branches)
# Blog OS (Handling Exceptions)
[![Build Status](https://travis-ci.org/phil-opp/blog_os.svg?branch=post_9)](https://travis-ci.org/phil-opp/blog_os/branches)
This repository contains the source code for the [Page Tables](http://os.phil-opp.com/modifying-page-tables.html) post of the [Writing an OS in Rust](http://os.phil-opp.com) series.
This repository contains the source code for the [Handling Exceptions](http://os.phil-opp.com/handling-exceptions.html) post of the [Writing an OS in Rust](http://os.phil-opp.com) series.
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**

2
Xargo.toml Normal file
View File

@@ -0,0 +1,2 @@
[target.x86_64-blog_os.dependencies]
alloc = {}

View File

@@ -1,24 +1,53 @@
ENTRY(start)
SECTIONS {
. = 1M;
. = 1M;
.boot :
{
/* ensure that the multiboot header is at the beginning */
KEEP(*(.multiboot_header))
}
.rodata :
{
/* ensure that the multiboot header is at the beginning */
KEEP(*(.multiboot_header))
*(.rodata .rodata.*)
. = ALIGN(4K);
}
.text :
{
*(.text .text.*)
}
.text :
{
*(.text .text.*)
. = ALIGN(4K);
}
.rodata : {
*(.rodata .rodata.*)
}
.data :
{
*(.data .data.*)
. = ALIGN(4K);
}
.data.rel.ro : {
*(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*)
}
.bss :
{
*(.bss .bss.*)
. = ALIGN(4K);
}
.got :
{
*(.got)
. = ALIGN(4K);
}
.got.plt :
{
*(.got.plt)
. = ALIGN(4K);
}
.data.rel.ro : ALIGN(4K) {
*(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*)
. = ALIGN(4K);
}
.gcc_except_table : ALIGN(4K) {
*(.gcc_except_table)
. = ALIGN(4K);
}
}

19
src/interrupts.rs Normal file
View File

@@ -0,0 +1,19 @@
use x86_64::structures::idt::{Idt, ExceptionStackFrame};
lazy_static! {
static ref IDT: Idt = {
let mut idt = Idt::new();
idt.breakpoint.set_handler_fn(breakpoint_handler);
idt
};
}
pub fn init() {
IDT.load();
}
extern "x86-interrupt" fn breakpoint_handler(
stack_frame: &mut ExceptionStackFrame)
{
println!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame);
}

View File

@@ -1,9 +1,17 @@
#![feature(lang_items)]
#![feature(const_fn)]
#![feature(const_unique_new)]
#![feature(alloc)]
#![feature(const_unique_new, const_atomic_usize_new)]
#![feature(unique)]
#![feature(allocator_api)]
#![feature(global_allocator)]
#![feature(abi_x86_interrupt)]
#![no_std]
#[macro_use]
extern crate alloc;
extern crate rlibc;
extern crate volatile;
extern crate spin;
@@ -11,49 +19,63 @@ extern crate multiboot2;
#[macro_use]
extern crate bitflags;
extern crate x86_64;
#[macro_use]
extern crate once;
extern crate linked_list_allocator;
#[macro_use]
extern crate lazy_static;
#[macro_use]
mod vga_buffer;
mod memory;
mod interrupts;
#[no_mangle]
pub extern fn rust_main(multiboot_information_address: usize) {
use memory::FrameAllocator;
pub extern "C" fn rust_main(multiboot_information_address: usize) {
// ATTENTION: we have a very small stack and no guard page
vga_buffer::clear_screen();
println!("Hello World{}", "!");
let boot_info = unsafe{ multiboot2::load(multiboot_information_address) };
let memory_map_tag = boot_info.memory_map_tag()
.expect("Memory map tag required");
let boot_info = unsafe {
multiboot2::load(multiboot_information_address)
};
enable_nxe_bit();
enable_write_protect_bit();
println!("memory areas:");
for area in memory_map_tag.memory_areas() {
println!(" start: 0x{:x}, length: 0x{:x}",
area.base_addr, area.length);
// set up guard page and map the heap pages
memory::init(boot_info);
unsafe {
HEAP_ALLOCATOR.lock().init(HEAP_START, HEAP_START + HEAP_SIZE);
}
let elf_sections_tag = boot_info.elf_sections_tag()
.expect("Elf-sections tag required");
// initialize our IDT
interrupts::init();
println!("kernel sections:");
for section in elf_sections_tag.sections() {
println!(" addr: 0x{:x}, size: 0x{:x}, flags: 0x{:x}",
section.addr, section.size, section.flags);
for i in 0..10000 {
format!("Some String");
}
let kernel_start = elf_sections_tag.sections().map(|s| s.addr)
.min().unwrap();
let kernel_end = elf_sections_tag.sections().map(|s| s.addr + s.size)
.max().unwrap();
let multiboot_start = multiboot_information_address;
let multiboot_end = multiboot_start + (boot_info.total_size as usize);
// invoke a breakpoint exception
x86_64::instructions::interrupts::int3();
let mut frame_allocator = memory::AreaFrameAllocator::new(
kernel_start as usize, kernel_end as usize, multiboot_start,
multiboot_end, memory_map_tag.memory_areas());
println!("It did not crash!");
loop {}
}
loop{}
fn enable_nxe_bit() {
use x86_64::registers::msr::{IA32_EFER, rdmsr, wrmsr};
let nxe_bit = 1 << 11;
unsafe {
let efer = rdmsr(IA32_EFER);
wrmsr(IA32_EFER, efer | nxe_bit);
}
}
fn enable_write_protect_bit() {
use x86_64::registers::control_regs::{cr0, cr0_write, Cr0};
unsafe { cr0_write(cr0() | Cr0::WRITE_PROTECT) };
}
#[lang = "eh_personality"] extern fn eh_personality() {}
@@ -65,3 +87,11 @@ pub extern fn panic_fmt(fmt: core::fmt::Arguments, file: &'static str, line: u32
println!(" {}", fmt);
loop{}
}
use linked_list_allocator::LockedHeap;
pub const HEAP_START: usize = 0o_000_001_000_000_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
#[global_allocator]
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();

View File

@@ -0,0 +1,62 @@
use alloc::heap::{Alloc, AllocErr, Layout};
use core::sync::atomic::{AtomicUsize, Ordering};
/// A simple allocator that allocates memory linearly and ignores freed memory.
#[derive(Debug)]
pub struct BumpAllocator {
heap_start: usize,
heap_end: usize,
next: AtomicUsize,
}
impl BumpAllocator {
pub const fn new(heap_start: usize, heap_end: usize) -> Self {
Self { heap_start, heap_end, next: AtomicUsize::new(heap_start) }
}
}
unsafe impl<'a> Alloc for &'a BumpAllocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
loop {
// load current state of the `next` field
let current_next = self.next.load(Ordering::Relaxed);
let alloc_start = align_up(current_next, layout.align());
let alloc_end = alloc_start.saturating_add(layout.size());
if alloc_end <= self.heap_end {
// update the `next` pointer if it still has the value `current_next`
let next_now = self.next.compare_and_swap(current_next, alloc_end,
Ordering::Relaxed);
if next_now == current_next {
// next address was successfully updated, allocation succeeded
return Ok(alloc_start as *mut u8);
}
} else {
return Err(AllocErr::Exhausted{ request: layout })
}
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
// do nothing, leak memory
}
}
/// Align downwards. Returns the greatest x with alignment `align`
/// so that x <= addr. The alignment must be a power of 2.
pub fn align_down(addr: usize, align: usize) -> usize {
if align.is_power_of_two() {
addr & !(align - 1)
} else if align == 0 {
addr
} else {
panic!("`align` must be a power of 2");
}
}
/// Align upwards. Returns the smallest x with alignment `align`
/// so that x >= addr. The alignment must be a power of 2.
pub fn align_up(addr: usize, align: usize) -> usize {
align_down(addr + align - 1, align)
}

View File

@@ -1,11 +1,55 @@
pub use self::area_frame_allocator::AreaFrameAllocator;
pub use self::paging::remap_the_kernel;
use self::paging::PhysicalAddress;
use multiboot2::BootInformation;
mod area_frame_allocator;
pub mod heap_allocator;
mod paging;
pub const PAGE_SIZE: usize = 4096;
pub fn init(boot_info: &BootInformation) {
assert_has_not_been_called!("memory::init must be called only once");
let memory_map_tag = boot_info.memory_map_tag().expect(
"Memory map tag required");
let elf_sections_tag = boot_info.elf_sections_tag().expect(
"Elf sections tag required");
let kernel_start = elf_sections_tag.sections()
.filter(|s| s.is_allocated()).map(|s| s.addr).min().unwrap();
let kernel_end = elf_sections_tag.sections()
.filter(|s| s.is_allocated()).map(|s| s.addr + s.size).max()
.unwrap();
println!("kernel start: {:#x}, kernel end: {:#x}",
kernel_start,
kernel_end);
println!("multiboot start: {:#x}, multiboot end: {:#x}",
boot_info.start_address(),
boot_info.end_address());
let mut frame_allocator = AreaFrameAllocator::new(
kernel_start as usize, kernel_end as usize,
boot_info.start_address(), boot_info.end_address(),
memory_map_tag.memory_areas());
let mut active_table = paging::remap_the_kernel(&mut frame_allocator,
boot_info);
use self::paging::Page;
use {HEAP_START, HEAP_SIZE};
let heap_start_page = Page::containing_address(HEAP_START);
let heap_end_page = Page::containing_address(HEAP_START + HEAP_SIZE-1);
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, paging::WRITABLE, &mut frame_allocator);
}
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Frame {
number: usize,
@@ -19,8 +63,38 @@ impl Frame {
fn start_address(&self) -> PhysicalAddress {
self.number * PAGE_SIZE
}
fn clone(&self) -> Frame {
Frame { number: self.number }
}
fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
FrameIter {
start: start,
end: end,
}
}
}
struct FrameIter {
start: Frame,
end: Frame,
}
impl Iterator for FrameIter {
type Item = Frame;
fn next(&mut self) -> Option<Frame> {
if self.start <= self.end {
let frame = self.start.clone();
self.start.number += 1;
Some(frame)
} else {
None
}
}
}
pub trait FrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame>;
fn deallocate_frame(&mut self, frame: Frame);

View File

@@ -1,4 +1,5 @@
use memory::Frame;
use multiboot2::ElfSection;
pub struct Entry(u64);
@@ -45,3 +46,26 @@ bitflags! {
const NO_EXECUTE = 1 << 63,
}
}
impl EntryFlags {
pub fn from_elf_section_flags(section: &ElfSection) -> EntryFlags {
use multiboot2::{ELF_SECTION_ALLOCATED, ELF_SECTION_WRITABLE,
ELF_SECTION_EXECUTABLE};
let mut flags = EntryFlags::empty();
if section.flags().contains(ELF_SECTION_ALLOCATED) {
// section is loaded to memory
flags = flags | PRESENT;
}
if section.flags().contains(ELF_SECTION_WRITABLE) {
flags = flags | WRITABLE;
}
if !section.flags().contains(ELF_SECTION_EXECUTABLE) {
flags = flags | NO_EXECUTE;
}
flags
}
}

118
src/memory/paging/mapper.rs Normal file
View File

@@ -0,0 +1,118 @@
use super::{VirtualAddress, PhysicalAddress, Page, ENTRY_COUNT};
use super::entry::*;
use super::table::{self, Table, Level4, Level1};
use memory::{PAGE_SIZE, Frame, FrameAllocator};
use core::ptr::Unique;
pub struct Mapper {
p4: Unique<Table<Level4>>,
}
impl Mapper {
pub unsafe fn new() -> Mapper {
Mapper {
p4: Unique::new_unchecked(table::P4),
}
}
pub fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.as_ref() }
}
pub fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.as_mut() }
}
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| frame.number * PAGE_SIZE + offset)
}
pub fn translate_page(&self, page: Page) -> Option<Frame> {
let p3 = self.p4().next_table(page.p4_index());
let huge_page = || {
p3.and_then(|p3| {
let p3_entry = &p3[page.p3_index()];
// 1GiB page?
if let Some(start_frame) = p3_entry.pointed_frame() {
if p3_entry.flags().contains(HUGE_PAGE) {
// address must be 1GiB aligned
assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
return Some(Frame {
number: start_frame.number + page.p2_index() *
ENTRY_COUNT + page.p1_index(),
});
}
}
if let Some(p2) = p3.next_table(page.p3_index()) {
let p2_entry = &p2[page.p2_index()];
// 2MiB page?
if let Some(start_frame) = p2_entry.pointed_frame() {
if p2_entry.flags().contains(HUGE_PAGE) {
// address must be 2MiB aligned
assert!(start_frame.number % ENTRY_COUNT == 0);
return Some(Frame {
number: start_frame.number + page.p1_index()
});
}
}
}
None
})
};
p3.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
.or_else(huge_page)
}
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags,
allocator: &mut A)
where A: FrameAllocator
{
let p4 = self.p4_mut();
let mut p3 = p4.next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | PRESENT);
}
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
}
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let page = Page::containing_address(frame.start_address());
self.map_to(page, frame, flags, allocator)
}
pub fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
{
use x86_64::instructions::tlb;
use x86_64::VirtualAddress;
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
tlb::flush(VirtualAddress(page.start_address()));
// TODO free p(1,2,3) table if empty
//allocator.deallocate_frame(frame);
}
}

View File

@@ -1,16 +1,23 @@
pub use self::entry::*;
pub use self::mapper::Mapper;
use core::ops::{Deref, DerefMut};
use core::ptr::Unique;
use memory::{PAGE_SIZE, Frame, FrameAllocator};
use multiboot2::BootInformation;
use self::table::{Table, Level4};
use self::temporary_page::TemporaryPage;
mod entry;
mod table;
mod temporary_page;
mod mapper;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
number: usize,
}
@@ -39,119 +46,190 @@ impl Page {
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
PageIter {
start: start,
end: end,
}
}
}
pub struct PageIter {
start: Page,
end: Page,
}
impl Iterator for PageIter {
type Item = Page;
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
Some(page)
} else {
None
}
}
}
pub struct ActivePageTable {
p4: Unique<Table<Level4>>,
mapper: Mapper,
}
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
}
}
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
}
impl ActivePageTable {
pub unsafe fn new() -> ActivePageTable {
unsafe fn new() -> ActivePageTable {
ActivePageTable {
p4: Unique::new_unchecked(table::P4),
mapper: Mapper::new(),
}
}
fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.as_ref() }
}
fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.as_mut() }
}
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| frame.number * PAGE_SIZE + offset)
}
fn translate_page(&self, page: Page) -> Option<Frame> {
use self::entry::HUGE_PAGE;
let p3 = self.p4().next_table(page.p4_index());
let huge_page = || {
p3.and_then(|p3| {
let p3_entry = &p3[page.p3_index()];
// 1GiB page?
if let Some(start_frame) = p3_entry.pointed_frame() {
if p3_entry.flags().contains(HUGE_PAGE) {
// address must be 1GiB aligned
assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
return Some(Frame {
number: start_frame.number + page.p2_index() *
ENTRY_COUNT + page.p1_index(),
});
}
}
if let Some(p2) = p3.next_table(page.p3_index()) {
let p2_entry = &p2[page.p2_index()];
// 2MiB page?
if let Some(start_frame) = p2_entry.pointed_frame() {
if p2_entry.flags().contains(HUGE_PAGE) {
// address must be 2MiB aligned
assert!(start_frame.number % ENTRY_COUNT == 0);
return Some(Frame {
number: start_frame.number + page.p1_index()
});
}
}
}
None
})
};
p3.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
.or_else(huge_page)
}
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags,
allocator: &mut A)
where A: FrameAllocator
{
let p4 = self.p4_mut();
let mut p3 = p4.next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | PRESENT);
}
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
}
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let page = Page::containing_address(frame.start_address());
self.map_to(page, frame, flags, allocator)
}
fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
pub fn with<F>(&mut self,
table: &mut InactivePageTable,
temporary_page: &mut temporary_page::TemporaryPage, // new
f: F)
where F: FnOnce(&mut Mapper)
{
use x86_64::instructions::tlb;
use x86_64::VirtualAddress;
use x86_64::registers::control_regs;
assert!(self.translate(page.start_address()).is_some());
{
let backup = Frame::containing_address(
control_regs::cr3().0 as usize);
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
tlb::flush(VirtualAddress(page.start_address()));
// TODO free p(1,2,3) table if empty
//allocator.deallocate_frame(frame);
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
tlb::flush_all();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE);
tlb::flush_all();
}
temporary_page.unmap(self);
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86_64::PhysicalAddress;
use x86_64::registers::control_regs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
control_regs::cr3().0 as usize
),
};
unsafe {
control_regs::cr3_write(PhysicalAddress(
new_table.p4_frame.start_address() as u64));
}
old_table
}
}
pub struct InactivePageTable {
p4_frame: Frame,
}
impl InactivePageTable {
pub fn new(frame: Frame,
active_table: &mut ActivePageTable,
temporary_page: &mut TemporaryPage)
-> InactivePageTable {
{
let table = temporary_page.map_table_frame(frame.clone(),
active_table);
// now we are able to zero the table
table.zero();
// set up recursive mapping for the table
table[511].set(frame.clone(), PRESENT | WRITABLE);
}
temporary_page.unmap(active_table);
InactivePageTable { p4_frame: frame }
}
}
pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
-> ActivePageTable
where A: FrameAllocator
{
let mut temporary_page = TemporaryPage::new(Page { number: 0xcafebabe },
allocator);
let mut active_table = unsafe { ActivePageTable::new() };
let mut new_table = {
let frame = allocator.allocate_frame().expect("no more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let elf_sections_tag = boot_info.elf_sections_tag()
.expect("Memory map tag required");
for section in elf_sections_tag.sections() {
use self::entry::WRITABLE;
if !section.is_allocated() {
// section is not loaded to memory
continue;
}
assert!(section.start_address() % PAGE_SIZE == 0,
"sections need to be page aligned");
println!("mapping section at addr: {:#x}, size: {:#x}",
section.addr, section.size);
let flags = EntryFlags::from_elf_section_flags(section);
let start_frame = Frame::containing_address(section.start_address());
let end_frame = Frame::containing_address(section.end_address() - 1);
for frame in Frame::range_inclusive(start_frame, end_frame) {
mapper.identity_map(frame, flags, allocator);
}
}
// identity map the VGA text buffer
let vga_buffer_frame = Frame::containing_address(0xb8000);
mapper.identity_map(vga_buffer_frame, WRITABLE, allocator);
// identity map the multiboot info structure
let multiboot_start = Frame::containing_address(boot_info.start_address());
let multiboot_end = Frame::containing_address(boot_info.end_address() - 1);
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
mapper.identity_map(frame, PRESENT, allocator);
}
});
let old_table = active_table.switch(new_table);
println!("NEW TABLE!!!");
// turn the old p4 page into a guard page
let old_p4_page = Page::containing_address(
old_table.p4_frame.start_address()
);
active_table.unmap(old_p4_page, allocator);
println!("guard page at {:#x}", old_p4_page.start_address());
active_table
}

View File

@@ -0,0 +1,79 @@
use super::{Page, ActivePageTable, VirtualAddress};
use super::table::{Table, Level1};
use memory::{Frame, FrameAllocator};
pub struct TemporaryPage {
page: Page,
allocator: TinyAllocator,
}
impl TemporaryPage {
pub fn new<A>(page: Page, allocator: &mut A) -> TemporaryPage
where A: FrameAllocator
{
TemporaryPage {
page: page,
allocator: TinyAllocator::new(allocator),
}
}
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable)
-> VirtualAddress
{
use super::entry::WRITABLE;
assert!(active_table.translate_page(self.page).is_none(),
"temporary page is already mapped");
active_table.map_to(self.page, frame, WRITABLE, &mut self.allocator);
self.page.start_address()
}
/// Unmaps the temporary page in the active table.
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
active_table.unmap(self.page, &mut self.allocator)
}
/// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&mut self,
frame: Frame,
active_table: &mut ActivePageTable)
-> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, active_table) as *mut Table<Level1>) }
}
}
struct TinyAllocator([Option<Frame>; 3]);
impl TinyAllocator {
fn new<A>(allocator: &mut A) -> TinyAllocator
where A: FrameAllocator
{
let mut f = || allocator.allocate_frame();
let frames = [f(), f(), f()];
TinyAllocator(frames)
}
}
impl FrameAllocator for TinyAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
for frame_option in &mut self.0 {
if frame_option.is_some() {
return frame_option.take();
}
}
None
}
fn deallocate_frame(&mut self, frame: Frame) {
for frame_option in &mut self.0 {
if frame_option.is_none() {
*frame_option = Some(frame);
return;
}
}
panic!("Tiny allocator can hold only 3 frames.");
}
}