mirror of
https://github.com/phil-opp/blog_os.git
synced 2025-12-16 14:27:49 +00:00
Compare commits
52 Commits
first_edit
...
first_edit
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ad211de615 | ||
|
|
01f8c43ffb | ||
|
|
f2bbe43099 | ||
|
|
76550dcd95 | ||
|
|
c0d403abbe | ||
|
|
9dc998222a | ||
|
|
1f6633fe44 | ||
|
|
5f017124dd | ||
|
|
36369cfbe2 | ||
|
|
c6dd37dcc5 | ||
|
|
2a3ce863fd | ||
|
|
ffaddeb84e | ||
|
|
0ce2b46ad1 | ||
|
|
11993b7e15 | ||
|
|
fdacfb24f0 | ||
|
|
564c6b64e9 | ||
|
|
fa3b572d68 | ||
|
|
40f1732ccb | ||
|
|
5d898d8474 | ||
|
|
e029eabe18 | ||
|
|
60d7c736a5 | ||
|
|
2f30b0f7cf | ||
|
|
ce9c4d6e43 | ||
|
|
4160cb6d7e | ||
|
|
b2e79752fd | ||
|
|
11ae7e8aeb | ||
|
|
1737f48284 | ||
|
|
1f1f6c62da | ||
|
|
bb3f17c0b9 | ||
|
|
1007c5157d | ||
|
|
55e602dc61 | ||
|
|
eb5c9afa51 | ||
|
|
0ee4ed738a | ||
|
|
b49cb6986c | ||
|
|
15c9f43622 | ||
|
|
3696c7bacb | ||
|
|
bb473c7907 | ||
|
|
dedf6e8959 | ||
|
|
83d56aa9de | ||
|
|
a7170a4a44 | ||
|
|
0874625269 | ||
|
|
3706331a43 | ||
|
|
8fcea7951b | ||
|
|
e50d70c02e | ||
|
|
2b7fa410fb | ||
|
|
62d655fbdd | ||
|
|
fe464463eb | ||
|
|
148d506629 | ||
|
|
f957f2ecde | ||
|
|
c4c27c10e6 | ||
|
|
c7c02d7dca | ||
|
|
9f84e37e1b |
@@ -11,3 +11,7 @@ rlibc = "1.0"
|
||||
volatile = "0.1.0"
|
||||
spin = "0.4.5"
|
||||
multiboot2 = "0.1.0"
|
||||
bitflags = "0.7.0"
|
||||
x86_64 = "0.1.2"
|
||||
once = "0.3.3"
|
||||
linked_list_allocator = "0.4.2"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Blog OS (Allocating Frames)
|
||||
[](https://travis-ci.org/phil-opp/blog_os/branches)
|
||||
# Blog OS (Kernel Heap)
|
||||
[](https://travis-ci.org/phil-opp/blog_os/branches)
|
||||
|
||||
This repository contains the source code for the [Allocating Frames](http://os.phil-opp.com/allocating-frames.html) post of the [Writing an OS in Rust](http://os.phil-opp.com) series.
|
||||
This repository contains the source code for the [Kernel Heap](http://os.phil-opp.com/kernel-heap.html) post of the [Writing an OS in Rust](http://os.phil-opp.com) series.
|
||||
|
||||
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**
|
||||
|
||||
|
||||
2
Xargo.toml
Normal file
2
Xargo.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[target.x86_64-blog_os.dependencies]
|
||||
alloc = {}
|
||||
@@ -85,6 +85,11 @@ check_long_mode:
|
||||
jmp error
|
||||
|
||||
set_up_page_tables:
|
||||
; map P4 table recursively
|
||||
mov eax, p4_table
|
||||
or eax, 0b11 ; present + writable
|
||||
mov [p4_table + 511 * 8], eax
|
||||
|
||||
; map first P4 entry to P3 table
|
||||
mov eax, p3_table
|
||||
or eax, 0b11 ; present + writable
|
||||
|
||||
@@ -1,24 +1,53 @@
|
||||
ENTRY(start)
|
||||
|
||||
SECTIONS {
|
||||
. = 1M;
|
||||
. = 1M;
|
||||
|
||||
.boot :
|
||||
{
|
||||
/* ensure that the multiboot header is at the beginning */
|
||||
KEEP(*(.multiboot_header))
|
||||
}
|
||||
.rodata :
|
||||
{
|
||||
/* ensure that the multiboot header is at the beginning */
|
||||
KEEP(*(.multiboot_header))
|
||||
*(.rodata .rodata.*)
|
||||
. = ALIGN(4K);
|
||||
}
|
||||
|
||||
.text :
|
||||
{
|
||||
*(.text .text.*)
|
||||
}
|
||||
.text :
|
||||
{
|
||||
*(.text .text.*)
|
||||
. = ALIGN(4K);
|
||||
}
|
||||
|
||||
.rodata : {
|
||||
*(.rodata .rodata.*)
|
||||
}
|
||||
.data :
|
||||
{
|
||||
*(.data .data.*)
|
||||
. = ALIGN(4K);
|
||||
}
|
||||
|
||||
.data.rel.ro : {
|
||||
*(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*)
|
||||
}
|
||||
.bss :
|
||||
{
|
||||
*(.bss .bss.*)
|
||||
. = ALIGN(4K);
|
||||
}
|
||||
|
||||
.got :
|
||||
{
|
||||
*(.got)
|
||||
. = ALIGN(4K);
|
||||
}
|
||||
|
||||
.got.plt :
|
||||
{
|
||||
*(.got.plt)
|
||||
. = ALIGN(4K);
|
||||
}
|
||||
|
||||
.data.rel.ro : ALIGN(4K) {
|
||||
*(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*)
|
||||
. = ALIGN(4K);
|
||||
}
|
||||
|
||||
.gcc_except_table : ALIGN(4K) {
|
||||
*(.gcc_except_table)
|
||||
. = ALIGN(4K);
|
||||
}
|
||||
}
|
||||
|
||||
84
src/lib.rs
84
src/lib.rs
@@ -1,63 +1,73 @@
|
||||
#![feature(lang_items)]
|
||||
#![feature(const_fn)]
|
||||
#![feature(const_unique_new)]
|
||||
#![feature(alloc)]
|
||||
#![feature(const_unique_new, const_atomic_usize_new)]
|
||||
#![feature(unique)]
|
||||
#![feature(allocator_api)]
|
||||
#![feature(global_allocator)]
|
||||
#![no_std]
|
||||
|
||||
|
||||
#[macro_use]
|
||||
extern crate alloc;
|
||||
|
||||
extern crate rlibc;
|
||||
extern crate volatile;
|
||||
extern crate spin;
|
||||
extern crate multiboot2;
|
||||
#[macro_use]
|
||||
extern crate bitflags;
|
||||
extern crate x86_64;
|
||||
#[macro_use]
|
||||
extern crate once;
|
||||
extern crate linked_list_allocator;
|
||||
|
||||
#[macro_use]
|
||||
mod vga_buffer;
|
||||
mod memory;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern fn rust_main(multiboot_information_address: usize) {
|
||||
use memory::FrameAllocator;
|
||||
|
||||
pub extern "C" fn rust_main(multiboot_information_address: usize) {
|
||||
// ATTENTION: we have a very small stack and no guard page
|
||||
vga_buffer::clear_screen();
|
||||
println!("Hello World{}", "!");
|
||||
|
||||
let boot_info = unsafe{ multiboot2::load(multiboot_information_address) };
|
||||
let memory_map_tag = boot_info.memory_map_tag()
|
||||
.expect("Memory map tag required");
|
||||
let boot_info = unsafe {
|
||||
multiboot2::load(multiboot_information_address)
|
||||
};
|
||||
enable_nxe_bit();
|
||||
enable_write_protect_bit();
|
||||
|
||||
println!("memory areas:");
|
||||
for area in memory_map_tag.memory_areas() {
|
||||
println!(" start: 0x{:x}, length: 0x{:x}",
|
||||
area.base_addr, area.length);
|
||||
// set up guard page and map the heap pages
|
||||
memory::init(boot_info);
|
||||
|
||||
unsafe {
|
||||
HEAP_ALLOCATOR.lock().init(HEAP_START, HEAP_START + HEAP_SIZE);
|
||||
}
|
||||
|
||||
let elf_sections_tag = boot_info.elf_sections_tag()
|
||||
.expect("Elf-sections tag required");
|
||||
|
||||
println!("kernel sections:");
|
||||
for section in elf_sections_tag.sections() {
|
||||
println!(" addr: 0x{:x}, size: 0x{:x}, flags: 0x{:x}",
|
||||
section.addr, section.size, section.flags);
|
||||
for i in 0..10000 {
|
||||
format!("Some String");
|
||||
}
|
||||
|
||||
let kernel_start = elf_sections_tag.sections().map(|s| s.addr)
|
||||
.min().unwrap();
|
||||
let kernel_end = elf_sections_tag.sections().map(|s| s.addr + s.size)
|
||||
.max().unwrap();
|
||||
let multiboot_start = multiboot_information_address;
|
||||
let multiboot_end = multiboot_start + (boot_info.total_size as usize);
|
||||
println!("It did not crash!");
|
||||
|
||||
let mut frame_allocator = memory::AreaFrameAllocator::new(
|
||||
kernel_start as usize, kernel_end as usize, multiboot_start,
|
||||
multiboot_end, memory_map_tag.memory_areas());
|
||||
loop {}
|
||||
}
|
||||
|
||||
for i in 0.. {
|
||||
if let None = frame_allocator.allocate_frame() {
|
||||
println!("allocated {} frames", i);
|
||||
break;
|
||||
}
|
||||
fn enable_nxe_bit() {
|
||||
use x86_64::registers::msr::{IA32_EFER, rdmsr, wrmsr};
|
||||
|
||||
let nxe_bit = 1 << 11;
|
||||
unsafe {
|
||||
let efer = rdmsr(IA32_EFER);
|
||||
wrmsr(IA32_EFER, efer | nxe_bit);
|
||||
}
|
||||
}
|
||||
|
||||
loop{}
|
||||
fn enable_write_protect_bit() {
|
||||
use x86_64::registers::control_regs::{cr0, cr0_write, Cr0};
|
||||
|
||||
unsafe { cr0_write(cr0() | Cr0::WRITE_PROTECT) };
|
||||
}
|
||||
|
||||
#[lang = "eh_personality"] extern fn eh_personality() {}
|
||||
@@ -69,3 +79,11 @@ pub extern fn panic_fmt(fmt: core::fmt::Arguments, file: &'static str, line: u32
|
||||
println!(" {}", fmt);
|
||||
loop{}
|
||||
}
|
||||
|
||||
use linked_list_allocator::LockedHeap;
|
||||
|
||||
pub const HEAP_START: usize = 0o_000_001_000_000_0000;
|
||||
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
|
||||
|
||||
#[global_allocator]
|
||||
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
|
||||
|
||||
62
src/memory/heap_allocator.rs
Normal file
62
src/memory/heap_allocator.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
use alloc::heap::{Alloc, AllocErr, Layout};
|
||||
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
/// A simple allocator that allocates memory linearly and ignores freed memory.
|
||||
#[derive(Debug)]
|
||||
pub struct BumpAllocator {
|
||||
heap_start: usize,
|
||||
heap_end: usize,
|
||||
next: AtomicUsize,
|
||||
}
|
||||
|
||||
impl BumpAllocator {
|
||||
pub const fn new(heap_start: usize, heap_end: usize) -> Self {
|
||||
Self { heap_start, heap_end, next: AtomicUsize::new(heap_start) }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a> Alloc for &'a BumpAllocator {
|
||||
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
|
||||
loop {
|
||||
// load current state of the `next` field
|
||||
let current_next = self.next.load(Ordering::Relaxed);
|
||||
let alloc_start = align_up(current_next, layout.align());
|
||||
let alloc_end = alloc_start.saturating_add(layout.size());
|
||||
|
||||
if alloc_end <= self.heap_end {
|
||||
// update the `next` pointer if it still has the value `current_next`
|
||||
let next_now = self.next.compare_and_swap(current_next, alloc_end,
|
||||
Ordering::Relaxed);
|
||||
if next_now == current_next {
|
||||
// next address was successfully updated, allocation succeeded
|
||||
return Ok(alloc_start as *mut u8);
|
||||
}
|
||||
} else {
|
||||
return Err(AllocErr::Exhausted{ request: layout })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
|
||||
// do nothing, leak memory
|
||||
}
|
||||
}
|
||||
|
||||
/// Align downwards. Returns the greatest x with alignment `align`
|
||||
/// so that x <= addr. The alignment must be a power of 2.
|
||||
pub fn align_down(addr: usize, align: usize) -> usize {
|
||||
if align.is_power_of_two() {
|
||||
addr & !(align - 1)
|
||||
} else if align == 0 {
|
||||
addr
|
||||
} else {
|
||||
panic!("`align` must be a power of 2");
|
||||
}
|
||||
}
|
||||
|
||||
/// Align upwards. Returns the smallest x with alignment `align`
|
||||
/// so that x >= addr. The alignment must be a power of 2.
|
||||
pub fn align_up(addr: usize, align: usize) -> usize {
|
||||
align_down(addr + align - 1, align)
|
||||
}
|
||||
@@ -1,9 +1,55 @@
|
||||
pub use self::area_frame_allocator::AreaFrameAllocator;
|
||||
pub use self::paging::remap_the_kernel;
|
||||
use self::paging::PhysicalAddress;
|
||||
use multiboot2::BootInformation;
|
||||
|
||||
mod area_frame_allocator;
|
||||
pub mod heap_allocator;
|
||||
mod paging;
|
||||
|
||||
pub const PAGE_SIZE: usize = 4096;
|
||||
|
||||
|
||||
pub fn init(boot_info: &BootInformation) {
|
||||
assert_has_not_been_called!("memory::init must be called only once");
|
||||
|
||||
let memory_map_tag = boot_info.memory_map_tag().expect(
|
||||
"Memory map tag required");
|
||||
let elf_sections_tag = boot_info.elf_sections_tag().expect(
|
||||
"Elf sections tag required");
|
||||
|
||||
let kernel_start = elf_sections_tag.sections()
|
||||
.filter(|s| s.is_allocated()).map(|s| s.addr).min().unwrap();
|
||||
let kernel_end = elf_sections_tag.sections()
|
||||
.filter(|s| s.is_allocated()).map(|s| s.addr + s.size).max()
|
||||
.unwrap();
|
||||
|
||||
println!("kernel start: {:#x}, kernel end: {:#x}",
|
||||
kernel_start,
|
||||
kernel_end);
|
||||
println!("multiboot start: {:#x}, multiboot end: {:#x}",
|
||||
boot_info.start_address(),
|
||||
boot_info.end_address());
|
||||
|
||||
let mut frame_allocator = AreaFrameAllocator::new(
|
||||
kernel_start as usize, kernel_end as usize,
|
||||
boot_info.start_address(), boot_info.end_address(),
|
||||
memory_map_tag.memory_areas());
|
||||
|
||||
let mut active_table = paging::remap_the_kernel(&mut frame_allocator,
|
||||
boot_info);
|
||||
|
||||
use self::paging::Page;
|
||||
use {HEAP_START, HEAP_SIZE};
|
||||
|
||||
let heap_start_page = Page::containing_address(HEAP_START);
|
||||
let heap_end_page = Page::containing_address(HEAP_START + HEAP_SIZE-1);
|
||||
|
||||
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
|
||||
active_table.map(page, paging::WRITABLE, &mut frame_allocator);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Frame {
|
||||
number: usize,
|
||||
@@ -13,8 +59,42 @@ impl Frame {
|
||||
fn containing_address(address: usize) -> Frame {
|
||||
Frame{ number: address / PAGE_SIZE }
|
||||
}
|
||||
|
||||
fn start_address(&self) -> PhysicalAddress {
|
||||
self.number * PAGE_SIZE
|
||||
}
|
||||
|
||||
fn clone(&self) -> Frame {
|
||||
Frame { number: self.number }
|
||||
}
|
||||
|
||||
fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
|
||||
FrameIter {
|
||||
start: start,
|
||||
end: end,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct FrameIter {
|
||||
start: Frame,
|
||||
end: Frame,
|
||||
}
|
||||
|
||||
impl Iterator for FrameIter {
|
||||
type Item = Frame;
|
||||
|
||||
fn next(&mut self) -> Option<Frame> {
|
||||
if self.start <= self.end {
|
||||
let frame = self.start.clone();
|
||||
self.start.number += 1;
|
||||
Some(frame)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FrameAllocator {
|
||||
fn allocate_frame(&mut self) -> Option<Frame>;
|
||||
fn deallocate_frame(&mut self, frame: Frame);
|
||||
|
||||
71
src/memory/paging/entry.rs
Normal file
71
src/memory/paging/entry.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use memory::Frame;
|
||||
use multiboot2::ElfSection;
|
||||
|
||||
pub struct Entry(u64);
|
||||
|
||||
impl Entry {
|
||||
pub fn is_unused(&self) -> bool {
|
||||
self.0 == 0
|
||||
}
|
||||
|
||||
pub fn set_unused(&mut self) {
|
||||
self.0 = 0;
|
||||
}
|
||||
|
||||
pub fn flags(&self) -> EntryFlags {
|
||||
EntryFlags::from_bits_truncate(self.0)
|
||||
}
|
||||
|
||||
pub fn pointed_frame(&self) -> Option<Frame> {
|
||||
if self.flags().contains(PRESENT) {
|
||||
Some(Frame::containing_address(
|
||||
self.0 as usize & 0x000fffff_fffff000
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
|
||||
assert!(frame.start_address() & !0x000fffff_fffff000 == 0);
|
||||
self.0 = (frame.start_address() as u64) | flags.bits();
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
pub flags EntryFlags: u64 {
|
||||
const PRESENT = 1 << 0,
|
||||
const WRITABLE = 1 << 1,
|
||||
const USER_ACCESSIBLE = 1 << 2,
|
||||
const WRITE_THROUGH = 1 << 3,
|
||||
const NO_CACHE = 1 << 4,
|
||||
const ACCESSED = 1 << 5,
|
||||
const DIRTY = 1 << 6,
|
||||
const HUGE_PAGE = 1 << 7,
|
||||
const GLOBAL = 1 << 8,
|
||||
const NO_EXECUTE = 1 << 63,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl EntryFlags {
|
||||
pub fn from_elf_section_flags(section: &ElfSection) -> EntryFlags {
|
||||
use multiboot2::{ELF_SECTION_ALLOCATED, ELF_SECTION_WRITABLE,
|
||||
ELF_SECTION_EXECUTABLE};
|
||||
|
||||
let mut flags = EntryFlags::empty();
|
||||
|
||||
if section.flags().contains(ELF_SECTION_ALLOCATED) {
|
||||
// section is loaded to memory
|
||||
flags = flags | PRESENT;
|
||||
}
|
||||
if section.flags().contains(ELF_SECTION_WRITABLE) {
|
||||
flags = flags | WRITABLE;
|
||||
}
|
||||
if !section.flags().contains(ELF_SECTION_EXECUTABLE) {
|
||||
flags = flags | NO_EXECUTE;
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
}
|
||||
118
src/memory/paging/mapper.rs
Normal file
118
src/memory/paging/mapper.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use super::{VirtualAddress, PhysicalAddress, Page, ENTRY_COUNT};
|
||||
use super::entry::*;
|
||||
use super::table::{self, Table, Level4, Level1};
|
||||
use memory::{PAGE_SIZE, Frame, FrameAllocator};
|
||||
use core::ptr::Unique;
|
||||
|
||||
pub struct Mapper {
|
||||
p4: Unique<Table<Level4>>,
|
||||
}
|
||||
|
||||
impl Mapper {
|
||||
pub unsafe fn new() -> Mapper {
|
||||
Mapper {
|
||||
p4: Unique::new_unchecked(table::P4),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn p4(&self) -> &Table<Level4> {
|
||||
unsafe { self.p4.as_ref() }
|
||||
}
|
||||
|
||||
pub fn p4_mut(&mut self) -> &mut Table<Level4> {
|
||||
unsafe { self.p4.as_mut() }
|
||||
}
|
||||
|
||||
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
|
||||
let offset = virtual_address % PAGE_SIZE;
|
||||
self.translate_page(Page::containing_address(virtual_address))
|
||||
.map(|frame| frame.number * PAGE_SIZE + offset)
|
||||
}
|
||||
|
||||
pub fn translate_page(&self, page: Page) -> Option<Frame> {
|
||||
let p3 = self.p4().next_table(page.p4_index());
|
||||
|
||||
let huge_page = || {
|
||||
p3.and_then(|p3| {
|
||||
let p3_entry = &p3[page.p3_index()];
|
||||
// 1GiB page?
|
||||
if let Some(start_frame) = p3_entry.pointed_frame() {
|
||||
if p3_entry.flags().contains(HUGE_PAGE) {
|
||||
// address must be 1GiB aligned
|
||||
assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
|
||||
return Some(Frame {
|
||||
number: start_frame.number + page.p2_index() *
|
||||
ENTRY_COUNT + page.p1_index(),
|
||||
});
|
||||
}
|
||||
}
|
||||
if let Some(p2) = p3.next_table(page.p3_index()) {
|
||||
let p2_entry = &p2[page.p2_index()];
|
||||
// 2MiB page?
|
||||
if let Some(start_frame) = p2_entry.pointed_frame() {
|
||||
if p2_entry.flags().contains(HUGE_PAGE) {
|
||||
// address must be 2MiB aligned
|
||||
assert!(start_frame.number % ENTRY_COUNT == 0);
|
||||
return Some(Frame {
|
||||
number: start_frame.number + page.p1_index()
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
};
|
||||
|
||||
p3.and_then(|p3| p3.next_table(page.p3_index()))
|
||||
.and_then(|p2| p2.next_table(page.p2_index()))
|
||||
.and_then(|p1| p1[page.p1_index()].pointed_frame())
|
||||
.or_else(huge_page)
|
||||
}
|
||||
|
||||
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags,
|
||||
allocator: &mut A)
|
||||
where A: FrameAllocator
|
||||
{
|
||||
let p4 = self.p4_mut();
|
||||
let mut p3 = p4.next_table_create(page.p4_index(), allocator);
|
||||
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
|
||||
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
|
||||
|
||||
assert!(p1[page.p1_index()].is_unused());
|
||||
p1[page.p1_index()].set(frame, flags | PRESENT);
|
||||
}
|
||||
|
||||
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
|
||||
where A: FrameAllocator
|
||||
{
|
||||
let frame = allocator.allocate_frame().expect("out of memory");
|
||||
self.map_to(page, frame, flags, allocator)
|
||||
}
|
||||
|
||||
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
|
||||
where A: FrameAllocator
|
||||
{
|
||||
let page = Page::containing_address(frame.start_address());
|
||||
self.map_to(page, frame, flags, allocator)
|
||||
}
|
||||
|
||||
pub fn unmap<A>(&mut self, page: Page, allocator: &mut A)
|
||||
where A: FrameAllocator
|
||||
{
|
||||
use x86_64::instructions::tlb;
|
||||
use x86_64::VirtualAddress;
|
||||
|
||||
assert!(self.translate(page.start_address()).is_some());
|
||||
|
||||
let p1 = self.p4_mut()
|
||||
.next_table_mut(page.p4_index())
|
||||
.and_then(|p3| p3.next_table_mut(page.p3_index()))
|
||||
.and_then(|p2| p2.next_table_mut(page.p2_index()))
|
||||
.expect("mapping code does not support huge pages");
|
||||
let frame = p1[page.p1_index()].pointed_frame().unwrap();
|
||||
p1[page.p1_index()].set_unused();
|
||||
tlb::flush(VirtualAddress(page.start_address()));
|
||||
// TODO free p(1,2,3) table if empty
|
||||
//allocator.deallocate_frame(frame);
|
||||
}
|
||||
}
|
||||
235
src/memory/paging/mod.rs
Normal file
235
src/memory/paging/mod.rs
Normal file
@@ -0,0 +1,235 @@
|
||||
pub use self::entry::*;
|
||||
pub use self::mapper::Mapper;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use core::ptr::Unique;
|
||||
use memory::{PAGE_SIZE, Frame, FrameAllocator};
|
||||
use multiboot2::BootInformation;
|
||||
use self::table::{Table, Level4};
|
||||
use self::temporary_page::TemporaryPage;
|
||||
|
||||
mod entry;
|
||||
mod table;
|
||||
mod temporary_page;
|
||||
mod mapper;
|
||||
|
||||
const ENTRY_COUNT: usize = 512;
|
||||
|
||||
pub type PhysicalAddress = usize;
|
||||
pub type VirtualAddress = usize;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Page {
|
||||
number: usize,
|
||||
}
|
||||
|
||||
impl Page {
|
||||
pub fn containing_address(address: VirtualAddress) -> Page {
|
||||
assert!(address < 0x0000_8000_0000_0000 ||
|
||||
address >= 0xffff_8000_0000_0000,
|
||||
"invalid address: 0x{:x}", address);
|
||||
Page { number: address / PAGE_SIZE }
|
||||
}
|
||||
|
||||
fn start_address(&self) -> usize {
|
||||
self.number * PAGE_SIZE
|
||||
}
|
||||
|
||||
fn p4_index(&self) -> usize {
|
||||
(self.number >> 27) & 0o777
|
||||
}
|
||||
fn p3_index(&self) -> usize {
|
||||
(self.number >> 18) & 0o777
|
||||
}
|
||||
fn p2_index(&self) -> usize {
|
||||
(self.number >> 9) & 0o777
|
||||
}
|
||||
fn p1_index(&self) -> usize {
|
||||
(self.number >> 0) & 0o777
|
||||
}
|
||||
|
||||
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
|
||||
PageIter {
|
||||
start: start,
|
||||
end: end,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PageIter {
|
||||
start: Page,
|
||||
end: Page,
|
||||
}
|
||||
|
||||
impl Iterator for PageIter {
|
||||
type Item = Page;
|
||||
|
||||
fn next(&mut self) -> Option<Page> {
|
||||
if self.start <= self.end {
|
||||
let page = self.start;
|
||||
self.start.number += 1;
|
||||
Some(page)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ActivePageTable {
|
||||
mapper: Mapper,
|
||||
}
|
||||
|
||||
impl Deref for ActivePageTable {
|
||||
type Target = Mapper;
|
||||
|
||||
fn deref(&self) -> &Mapper {
|
||||
&self.mapper
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for ActivePageTable {
|
||||
fn deref_mut(&mut self) -> &mut Mapper {
|
||||
&mut self.mapper
|
||||
}
|
||||
}
|
||||
|
||||
impl ActivePageTable {
|
||||
unsafe fn new() -> ActivePageTable {
|
||||
ActivePageTable {
|
||||
mapper: Mapper::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with<F>(&mut self,
|
||||
table: &mut InactivePageTable,
|
||||
temporary_page: &mut temporary_page::TemporaryPage, // new
|
||||
f: F)
|
||||
where F: FnOnce(&mut Mapper)
|
||||
{
|
||||
use x86_64::instructions::tlb;
|
||||
use x86_64::registers::control_regs;
|
||||
|
||||
{
|
||||
let backup = Frame::containing_address(
|
||||
control_regs::cr3().0 as usize);
|
||||
|
||||
// map temporary_page to current p4 table
|
||||
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
|
||||
|
||||
// overwrite recursive mapping
|
||||
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
|
||||
tlb::flush_all();
|
||||
|
||||
// execute f in the new context
|
||||
f(self);
|
||||
|
||||
// restore recursive mapping to original p4 table
|
||||
p4_table[511].set(backup, PRESENT | WRITABLE);
|
||||
tlb::flush_all();
|
||||
}
|
||||
|
||||
temporary_page.unmap(self);
|
||||
}
|
||||
|
||||
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
|
||||
use x86_64::PhysicalAddress;
|
||||
use x86_64::registers::control_regs;
|
||||
|
||||
let old_table = InactivePageTable {
|
||||
p4_frame: Frame::containing_address(
|
||||
control_regs::cr3().0 as usize
|
||||
),
|
||||
};
|
||||
unsafe {
|
||||
control_regs::cr3_write(PhysicalAddress(
|
||||
new_table.p4_frame.start_address() as u64));
|
||||
}
|
||||
old_table
|
||||
}
|
||||
}
|
||||
|
||||
pub struct InactivePageTable {
|
||||
p4_frame: Frame,
|
||||
}
|
||||
|
||||
impl InactivePageTable {
|
||||
pub fn new(frame: Frame,
|
||||
active_table: &mut ActivePageTable,
|
||||
temporary_page: &mut TemporaryPage)
|
||||
-> InactivePageTable {
|
||||
{
|
||||
let table = temporary_page.map_table_frame(frame.clone(),
|
||||
active_table);
|
||||
// now we are able to zero the table
|
||||
table.zero();
|
||||
// set up recursive mapping for the table
|
||||
table[511].set(frame.clone(), PRESENT | WRITABLE);
|
||||
}
|
||||
temporary_page.unmap(active_table);
|
||||
|
||||
InactivePageTable { p4_frame: frame }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
|
||||
-> ActivePageTable
|
||||
where A: FrameAllocator
|
||||
{
|
||||
let mut temporary_page = TemporaryPage::new(Page { number: 0xcafebabe },
|
||||
allocator);
|
||||
|
||||
let mut active_table = unsafe { ActivePageTable::new() };
|
||||
let mut new_table = {
|
||||
let frame = allocator.allocate_frame().expect("no more frames");
|
||||
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
|
||||
};
|
||||
|
||||
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
|
||||
let elf_sections_tag = boot_info.elf_sections_tag()
|
||||
.expect("Memory map tag required");
|
||||
|
||||
for section in elf_sections_tag.sections() {
|
||||
use self::entry::WRITABLE;
|
||||
|
||||
if !section.is_allocated() {
|
||||
// section is not loaded to memory
|
||||
continue;
|
||||
}
|
||||
assert!(section.start_address() % PAGE_SIZE == 0,
|
||||
"sections need to be page aligned");
|
||||
|
||||
println!("mapping section at addr: {:#x}, size: {:#x}",
|
||||
section.addr, section.size);
|
||||
|
||||
let flags = EntryFlags::from_elf_section_flags(section);
|
||||
|
||||
let start_frame = Frame::containing_address(section.start_address());
|
||||
let end_frame = Frame::containing_address(section.end_address() - 1);
|
||||
for frame in Frame::range_inclusive(start_frame, end_frame) {
|
||||
mapper.identity_map(frame, flags, allocator);
|
||||
}
|
||||
}
|
||||
|
||||
// identity map the VGA text buffer
|
||||
let vga_buffer_frame = Frame::containing_address(0xb8000);
|
||||
mapper.identity_map(vga_buffer_frame, WRITABLE, allocator);
|
||||
|
||||
// identity map the multiboot info structure
|
||||
let multiboot_start = Frame::containing_address(boot_info.start_address());
|
||||
let multiboot_end = Frame::containing_address(boot_info.end_address() - 1);
|
||||
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
|
||||
mapper.identity_map(frame, PRESENT, allocator);
|
||||
}
|
||||
});
|
||||
|
||||
let old_table = active_table.switch(new_table);
|
||||
println!("NEW TABLE!!!");
|
||||
|
||||
// turn the old p4 page into a guard page
|
||||
let old_p4_page = Page::containing_address(
|
||||
old_table.p4_frame.start_address()
|
||||
);
|
||||
active_table.unmap(old_p4_page, allocator);
|
||||
println!("guard page at {:#x}", old_p4_page.start_address());
|
||||
|
||||
active_table
|
||||
}
|
||||
100
src/memory/paging/table.rs
Normal file
100
src/memory/paging/table.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
use core::marker::PhantomData;
|
||||
use core::ops::{Index, IndexMut};
|
||||
use memory::FrameAllocator;
|
||||
use memory::paging::entry::*;
|
||||
use memory::paging::ENTRY_COUNT;
|
||||
|
||||
pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
|
||||
|
||||
pub struct Table<L: TableLevel> {
|
||||
entries: [Entry; ENTRY_COUNT],
|
||||
level: PhantomData<L>,
|
||||
}
|
||||
|
||||
impl<L> Table<L> where L: TableLevel {
|
||||
pub fn zero(&mut self) {
|
||||
for entry in self.entries.iter_mut() {
|
||||
entry.set_unused();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L> Table<L> where L: HierarchicalLevel {
|
||||
fn next_table_address(&self, index: usize) -> Option<usize> {
|
||||
let entry_flags = self[index].flags();
|
||||
if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) {
|
||||
let table_address = self as *const _ as usize;
|
||||
Some((table_address << 9) | (index << 12))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
|
||||
self.next_table_address(index)
|
||||
.map(|address| unsafe { &*(address as *const _) })
|
||||
}
|
||||
|
||||
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
|
||||
self.next_table_address(index)
|
||||
.map(|address| unsafe { &mut *(address as *mut _) })
|
||||
}
|
||||
|
||||
pub fn next_table_create<A>(&mut self,
|
||||
index: usize,
|
||||
allocator: &mut A)
|
||||
-> &mut Table<L::NextLevel>
|
||||
where A: FrameAllocator
|
||||
{
|
||||
if self.next_table(index).is_none() {
|
||||
assert!(!self.entries[index].flags().contains(HUGE_PAGE),
|
||||
"mapping code does not support huge pages");
|
||||
let frame = allocator.allocate_frame().expect("no frames available");
|
||||
self.entries[index].set(frame, PRESENT | WRITABLE);
|
||||
self.next_table_mut(index).unwrap().zero();
|
||||
}
|
||||
self.next_table_mut(index).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<L> Index<usize> for Table<L> where L: TableLevel {
|
||||
type Output = Entry;
|
||||
|
||||
fn index(&self, index: usize) -> &Entry {
|
||||
&self.entries[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Entry {
|
||||
&mut self.entries[index]
|
||||
}
|
||||
}
|
||||
|
||||
pub trait TableLevel {}
|
||||
|
||||
pub enum Level4 {}
|
||||
pub enum Level3 {}
|
||||
pub enum Level2 {}
|
||||
pub enum Level1 {}
|
||||
|
||||
impl TableLevel for Level4 {}
|
||||
impl TableLevel for Level3 {}
|
||||
impl TableLevel for Level2 {}
|
||||
impl TableLevel for Level1 {}
|
||||
|
||||
pub trait HierarchicalLevel: TableLevel {
|
||||
type NextLevel: TableLevel;
|
||||
}
|
||||
|
||||
impl HierarchicalLevel for Level4 {
|
||||
type NextLevel = Level3;
|
||||
}
|
||||
|
||||
impl HierarchicalLevel for Level3 {
|
||||
type NextLevel = Level2;
|
||||
}
|
||||
|
||||
impl HierarchicalLevel for Level2 {
|
||||
type NextLevel = Level1;
|
||||
}
|
||||
79
src/memory/paging/temporary_page.rs
Normal file
79
src/memory/paging/temporary_page.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
use super::{Page, ActivePageTable, VirtualAddress};
|
||||
use super::table::{Table, Level1};
|
||||
use memory::{Frame, FrameAllocator};
|
||||
|
||||
pub struct TemporaryPage {
|
||||
page: Page,
|
||||
allocator: TinyAllocator,
|
||||
}
|
||||
|
||||
impl TemporaryPage {
|
||||
pub fn new<A>(page: Page, allocator: &mut A) -> TemporaryPage
|
||||
where A: FrameAllocator
|
||||
{
|
||||
TemporaryPage {
|
||||
page: page,
|
||||
allocator: TinyAllocator::new(allocator),
|
||||
}
|
||||
}
|
||||
|
||||
/// Maps the temporary page to the given frame in the active table.
|
||||
/// Returns the start address of the temporary page.
|
||||
pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable)
|
||||
-> VirtualAddress
|
||||
{
|
||||
use super::entry::WRITABLE;
|
||||
|
||||
assert!(active_table.translate_page(self.page).is_none(),
|
||||
"temporary page is already mapped");
|
||||
active_table.map_to(self.page, frame, WRITABLE, &mut self.allocator);
|
||||
self.page.start_address()
|
||||
}
|
||||
|
||||
/// Unmaps the temporary page in the active table.
|
||||
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
|
||||
active_table.unmap(self.page, &mut self.allocator)
|
||||
}
|
||||
|
||||
/// Maps the temporary page to the given page table frame in the active
|
||||
/// table. Returns a reference to the now mapped table.
|
||||
pub fn map_table_frame(&mut self,
|
||||
frame: Frame,
|
||||
active_table: &mut ActivePageTable)
|
||||
-> &mut Table<Level1> {
|
||||
unsafe { &mut *(self.map(frame, active_table) as *mut Table<Level1>) }
|
||||
}
|
||||
}
|
||||
|
||||
struct TinyAllocator([Option<Frame>; 3]);
|
||||
|
||||
impl TinyAllocator {
|
||||
fn new<A>(allocator: &mut A) -> TinyAllocator
|
||||
where A: FrameAllocator
|
||||
{
|
||||
let mut f = || allocator.allocate_frame();
|
||||
let frames = [f(), f(), f()];
|
||||
TinyAllocator(frames)
|
||||
}
|
||||
}
|
||||
|
||||
impl FrameAllocator for TinyAllocator {
|
||||
fn allocate_frame(&mut self) -> Option<Frame> {
|
||||
for frame_option in &mut self.0 {
|
||||
if frame_option.is_some() {
|
||||
return frame_option.take();
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn deallocate_frame(&mut self, frame: Frame) {
|
||||
for frame_option in &mut self.0 {
|
||||
if frame_option.is_none() {
|
||||
*frame_option = Some(frame);
|
||||
return;
|
||||
}
|
||||
}
|
||||
panic!("Tiny allocator can hold only 3 frames.");
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user