Compare commits

...

81 Commits

Author SHA1 Message Date
Philipp Oppermann
1eb8f0f2c7 Wip 2019-06-17 13:23:39 +02:00
Philipp Oppermann
398ca5357c WIP 2019-03-18 18:28:04 +01:00
Philipp Oppermann
4acf12bb69 Merge branch 'post-09' into post-10 2019-03-14 11:18:40 +01:00
Philipp Oppermann
6f25c34d46 Merge branch 'post-08' into post-09 2019-03-14 11:18:22 +01:00
Philipp Oppermann
161d5fe7be Merge pull request #569 from phil-opp/post-10-new
Update post-10 branch for new "Paging Implementation" post
2019-03-14 10:49:55 +01:00
Philipp Oppermann
9bf4ea7341 Use BootInfoFrameAllocator to create a 0xdeadbeaf000 mapping 2019-03-14 10:30:37 +01:00
Philipp Oppermann
a1bf5651fc Create an init_frame_allocator function 2019-03-14 10:30:37 +01:00
Philipp Oppermann
763228c859 Create a generic BootInfoFrameAllocator type 2019-03-14 10:30:37 +01:00
Philipp Oppermann
770af27d75 Create a new mapping and write through it to the screen 2019-03-14 10:30:37 +01:00
Philipp Oppermann
3e59283c19 Create an EmptyFrameAllocator 2019-03-14 10:30:37 +01:00
Philipp Oppermann
6146ccba2d Add a memory::create_example_mapping function 2019-03-14 10:30:37 +01:00
Philipp Oppermann
b0e1527a95 Delete our memory::translate_addr function again 2019-03-14 10:30:37 +01:00
Philipp Oppermann
cb4410c84e Update kernel_main to use MapperAllSizes::translate_addr 2019-03-14 10:30:37 +01:00
Philipp Oppermann
98b5976656 Create a memory::init function that initializes a MappedPageTable 2019-03-14 10:30:37 +01:00
Philipp Oppermann
9335386928 Add and test a memory::translate_addr function 2019-03-14 10:30:37 +01:00
Philipp Oppermann
7c30d62f33 Also show non-empty level 3 table entries 2019-03-14 10:30:37 +01:00
Philipp Oppermann
61683bccda Print non-empty level 4 table entries 2019-03-14 10:30:37 +01:00
Philipp Oppermann
e1ec5159b8 Add boot info argument and use entry_point macro 2019-03-14 10:30:37 +01:00
Philipp Oppermann
7b7d19592f Enable map_physical_memory feature of bootloader 2019-03-14 10:30:37 +01:00
Philipp Oppermann
e387c0b6b8 Create a memory::active_level_4_table function 2019-03-14 10:30:37 +01:00
Philipp Oppermann
d5abc119f3 Update Readme for Paging Implementation post 2019-03-14 10:30:37 +01:00
Philipp Oppermann
59da6e5620 Update bootloader to version 0.4.0 2019-03-14 10:30:37 +01:00
Philipp Oppermann
ef1cc0ed4f Reset code to post-09 branch for new 'Paging Implementation' post 2019-03-14 10:20:46 +01:00
Philipp Oppermann
90f0caec1a Merge branch 'post-09' into post-10 2019-03-14 10:17:37 +01:00
Philipp Oppermann
7198a4d110 The code for reading the level 4 table was moved to the next post 2019-03-12 17:49:31 +01:00
Philipp Oppermann
5c0fb63f33 Merge branch 'post-09' into post-10 2019-03-12 17:48:43 +01:00
Philipp Oppermann
6ffcb2cf1a Merge branch 'post-08' into post-09 2019-03-12 17:48:43 +01:00
Philipp Oppermann
036a8e7608 Merge branch 'post-09' into post-10 2019-03-09 14:21:12 +01:00
Philipp Oppermann
3b960751f4 Merge branch 'post-08' into post-09 2019-03-09 14:21:12 +01:00
Philipp Oppermann
10c4d0509d Update post-10 code for changes in x86_64 0.5.0
We no longer need a custom translate function as we can directly use MapperAllSizes::translate_addr.
2019-03-09 12:40:27 +01:00
Philipp Oppermann
57998ea4f8 Merge branch 'post-09' into post-10 2019-03-09 12:39:25 +01:00
Philipp Oppermann
f05aaeb0ac Update post-09 code for changes in x86_64 0.5.0 2019-03-09 12:39:14 +01:00
Philipp Oppermann
78a30984bc Merge branch 'post-08' into post-09 2019-03-09 12:38:25 +01:00
Philipp Oppermann
ef9a629ddc Merge branch 'post-09' into post-10 2019-02-25 17:04:56 +01:00
Philipp Oppermann
18d8d311cb Merge branch 'post-08' into post-09 2019-02-25 17:04:40 +01:00
Philipp Oppermann
d974cf5200 Merge branch 'post-09' into post-10 2019-02-25 16:37:31 +01:00
Philipp Oppermann
94447af25a Merge branch 'post-08' into post-09 2019-02-25 16:37:30 +01:00
Philipp Oppermann
36d6c6d0e9 Merge branch 'post-09' into post-10 2019-02-12 19:31:09 +01:00
Philipp Oppermann
76d3715eef Merge branch 'post-08' into post-09 2019-02-12 19:31:09 +01:00
Philipp Oppermann
ff49104764 Merge branch 'post-09' into post-10 2019-02-07 18:48:39 +01:00
Philipp Oppermann
bda1b8929c Merge branch 'post-08' into post-09 2019-02-07 18:48:39 +01:00
Philipp Oppermann
03e43da9f9 Merge branch 'post-09' into post-10 2019-02-07 16:17:54 +01:00
Philipp Oppermann
58e171cce7 Merge branch 'post-08' into post-09 2019-02-07 16:15:10 +01:00
Philipp Oppermann
e696d65b60 Merge branch 'post-09' into post-10 2019-02-05 15:23:49 +01:00
Philipp Oppermann
ef09418cbf Merge branch 'post-08' into post-09 2019-02-05 15:23:49 +01:00
Philipp Oppermann
ebf626061f Merge branch 'post-09' into post-10 2019-02-05 14:59:15 +01:00
Philipp Oppermann
4c9352d898 Merge branch 'post-08' into post-09 2019-02-05 14:59:15 +01:00
Philipp Oppermann
c3d023ad40 Merge branch 'post-09' into post-10 2019-02-05 10:46:08 +01:00
Philipp Oppermann
f3cf5b51de Merge branch 'post-08' into post-09 2019-02-05 10:46:08 +01:00
Philipp Oppermann
0a10b3e784 Merge branch 'post-09' into post-10 2019-02-05 10:45:12 +01:00
Philipp Oppermann
9617680e45 Merge branch 'post-08' into post-09 2019-02-05 10:45:12 +01:00
Philipp Oppermann
e0f66a8196 Merge branch 'post-09' into post-10 2019-01-29 12:16:54 +01:00
Philipp Oppermann
ba54fd2503 Merge branch 'post-08' into post-09 2019-01-29 12:16:54 +01:00
Philipp Oppermann
e5dfbd4b23 Merge branch 'post-09' into post-10 2019-01-28 11:51:35 +01:00
Philipp Oppermann
4e6ce8d16e Merge branch 'post-08' into post-09 2019-01-28 11:51:23 +01:00
Philipp Oppermann
5ad2962389 Merge branch 'post-09' into post-10 2019-01-28 11:44:14 +01:00
Philipp Oppermann
82e6c4b066 Merge branch 'post-08' into post-09 2019-01-28 11:44:14 +01:00
Philipp Oppermann
a56e22b6fc Use BootInfoFrameAllocator instead of EmptyFrameAllocator 2019-01-28 11:30:27 +01:00
Philipp Oppermann
67f536d7c6 Add a BootInfoFrameAllocator 2019-01-28 11:28:51 +01:00
Philipp Oppermann
741224411b Use the BootInfo struct passed by the bootloader 2019-01-28 11:24:16 +01:00
Philipp Oppermann
818417d119 Try to create example mapping for page 0xdeadbeaf000 2019-01-28 11:23:46 +01:00
Philipp Oppermann
f272785861 Create example mapping for page 0x1000 2019-01-28 11:19:46 +01:00
Philipp Oppermann
5d807ee622 Run rustfmt 2019-01-28 11:19:34 +01:00
Philipp Oppermann
90c3cdf0f3 Update Readme for Advanced Paging post 2019-01-27 17:14:58 +01:00
Philipp Oppermann
2bc233b2f6 Merge branch 'post-09' into post-10 2019-01-27 17:14:12 +01:00
Philipp Oppermann
0df629df47 Update Readme for Introduction to Paging post 2019-01-27 17:13:57 +01:00
Philipp Oppermann
cca85de5ed Merge branch 'post-08' into post-09 2019-01-27 17:13:21 +01:00
Philipp Oppermann
051b23f577 Merge branch 'post-09' into post-10 2019-01-27 16:33:54 +01:00
Philipp Oppermann
97e884e6a3 Merge branch 'post-08' into post-09 2019-01-27 16:33:54 +01:00
Philipp Oppermann
bd1f5345da Merge branch 'z_post_08' into z_post_09 2019-01-27 15:46:59 +01:00
Philipp Oppermann
f2bc2d33f0 Rewrite translation function on top of RecursivePageTable 2019-01-27 14:38:49 +01:00
Philipp Oppermann
38a121a887 Test translate_addr by translating some virtual addresses 2019-01-27 14:38:49 +01:00
Philipp Oppermann
8b380f0692 Create a new memory module with a translate_addr function 2019-01-27 14:38:49 +01:00
Philipp Oppermann
f23ee04161 Merge branch 'z_post_08' into z_post_09 2019-01-27 14:30:32 +01:00
Philipp Oppermann
b184f7d996 Move the testing code to the end of _start 2019-01-26 12:57:32 +01:00
Philipp Oppermann
7c07a67bf5 Merge branch 'z_post_08' into z_post_09 2019-01-25 14:47:46 +01:00
Philipp Oppermann
abaf5bd862 Use PageTable struct of x86_64 crate for accessing entries 2019-01-25 14:29:50 +01:00
Philipp Oppermann
b7005b766f Print first 10 entries of level 4 page table 2019-01-25 14:29:04 +01:00
Philipp Oppermann
91ca04e8c2 Retrieve address of level 4 page table 2019-01-25 14:28:23 +01:00
Philipp Oppermann
ada45c6e52 Provoke page fault 2019-01-25 14:27:31 +01:00
Philipp Oppermann
207a466707 Add a page fault handler 2019-01-25 14:26:35 +01:00
15 changed files with 560 additions and 9 deletions

View File

@@ -5,7 +5,7 @@ authors = ["Philipp Oppermann <dev@phil-opp.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
bootloader = "0.4.0" bootloader = { version = "0.4.0", features = ["map_physical_memory"]}
volatile = "0.2.3" volatile = "0.2.3"
spin = "0.4.9" spin = "0.4.9"
uart_16550 = "0.1.0" uart_16550 = "0.1.0"

View File

@@ -1,10 +1,10 @@
# Blog OS (Hardware Interrupts) # Blog OS (Paging Implementation)
[![Build Status](https://travis-ci.org/phil-opp/blog_os.svg?branch=post-08)](https://travis-ci.org/phil-opp/blog_os/branches) [![Build Status](https://travis-ci.org/phil-opp/blog_os.svg?branch=post-10)](https://travis-ci.org/phil-opp/blog_os/branches)
This repository contains the source code for the [Hardware Interrupts][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series. This repository contains the source code for the [Paging Implementation][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
[post]: https://os.phil-opp.com/hardware-interrupts/ [post]: https://os.phil-opp.com/paging-implementation/
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.** **Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**

View File

@@ -1,8 +1,11 @@
#![cfg_attr(not(test), no_std)] #![cfg_attr(not(test), no_std)]
#![cfg_attr(not(test), no_main)] // disable all Rust-level entry points #![cfg_attr(not(test), no_main)] // disable all Rust-level entry points
#![cfg_attr(test, allow(unused_imports))] #![cfg_attr(test, allow(unused_imports))]
#![feature(alloc_error_handler)]
use blog_os::memory::allocator::DummyAllocator;
use blog_os::{exit_qemu, serial_println}; use blog_os::{exit_qemu, serial_println};
use core::alloc::Layout;
use core::panic::PanicInfo; use core::panic::PanicInfo;
/// This function is the entry point, since the linker looks for a function /// This function is the entry point, since the linker looks for a function
@@ -31,3 +34,11 @@ fn panic(info: &PanicInfo) -> ! {
} }
loop {} loop {}
} }
#[global_allocator]
static ALLOCATOR: DummyAllocator = DummyAllocator;
#[alloc_error_handler]
fn out_of_memory(layout: Layout) -> ! {
panic!("out of memory: allocation for {:?} failed", layout);
}

View File

@@ -1,8 +1,11 @@
#![no_std] #![no_std]
#![cfg_attr(not(test), no_main)] #![cfg_attr(not(test), no_main)]
#![cfg_attr(test, allow(dead_code, unused_macros, unused_imports))] #![cfg_attr(test, allow(dead_code, unused_macros, unused_imports))]
#![feature(alloc_error_handler)]
use blog_os::memory::allocator::DummyAllocator;
use blog_os::{exit_qemu, serial_println}; use blog_os::{exit_qemu, serial_println};
use core::alloc::Layout;
use core::panic::PanicInfo; use core::panic::PanicInfo;
#[cfg(not(test))] #[cfg(not(test))]
@@ -32,3 +35,11 @@ fn panic(info: &PanicInfo) -> ! {
} }
loop {} loop {}
} }
#[global_allocator]
static ALLOCATOR: DummyAllocator = DummyAllocator;
#[alloc_error_handler]
fn out_of_memory(layout: Layout) -> ! {
panic!("out of memory: allocation for {:?} failed", layout);
}

View File

@@ -2,8 +2,11 @@
#![no_std] #![no_std]
#![cfg_attr(not(test), no_main)] #![cfg_attr(not(test), no_main)]
#![cfg_attr(test, allow(dead_code, unused_macros, unused_imports))] #![cfg_attr(test, allow(dead_code, unused_macros, unused_imports))]
#![feature(alloc_error_handler)]
use blog_os::memory::allocator::DummyAllocator;
use blog_os::{exit_qemu, serial_println}; use blog_os::{exit_qemu, serial_println};
use core::alloc::Layout;
use core::panic::PanicInfo; use core::panic::PanicInfo;
use lazy_static::lazy_static; use lazy_static::lazy_static;
@@ -45,6 +48,14 @@ fn panic(info: &PanicInfo) -> ! {
loop {} loop {}
} }
#[global_allocator]
static ALLOCATOR: DummyAllocator = DummyAllocator;
#[alloc_error_handler]
fn out_of_memory(layout: Layout) -> ! {
panic!("out of memory: allocation for {:?} failed", layout);
}
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame};
lazy_static! { lazy_static! {

View File

@@ -1,8 +1,11 @@
#![cfg_attr(not(test), no_std)] #![cfg_attr(not(test), no_std)]
#![cfg_attr(not(test), no_main)] #![cfg_attr(not(test), no_main)]
#![cfg_attr(test, allow(unused_imports))] #![cfg_attr(test, allow(unused_imports))]
#![feature(alloc_error_handler)]
use blog_os::memory::allocator::DummyAllocator;
use blog_os::{exit_qemu, serial_println}; use blog_os::{exit_qemu, serial_println};
use core::alloc::Layout;
use core::panic::PanicInfo; use core::panic::PanicInfo;
#[cfg(not(test))] #[cfg(not(test))]
@@ -21,3 +24,11 @@ fn panic(_info: &PanicInfo) -> ! {
} }
loop {} loop {}
} }
#[global_allocator]
static ALLOCATOR: DummyAllocator = DummyAllocator;
#[alloc_error_handler]
fn out_of_memory(layout: Layout) -> ! {
panic!("out of memory: allocation for {:?} failed", layout);
}

View File

@@ -8,7 +8,7 @@ use crate::{gdt, hlt_loop, print, println};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use pic8259_simple::ChainedPics; use pic8259_simple::ChainedPics;
use spin; use spin;
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode};
pub const PIC_1_OFFSET: u8 = 32; pub const PIC_1_OFFSET: u8 = 32;
pub const PIC_2_OFFSET: u8 = PIC_1_OFFSET + 8; pub const PIC_2_OFFSET: u8 = PIC_1_OFFSET + 8;
@@ -37,6 +37,7 @@ lazy_static! {
static ref IDT: InterruptDescriptorTable = { static ref IDT: InterruptDescriptorTable = {
let mut idt = InterruptDescriptorTable::new(); let mut idt = InterruptDescriptorTable::new();
idt.breakpoint.set_handler_fn(breakpoint_handler); idt.breakpoint.set_handler_fn(breakpoint_handler);
idt.page_fault.set_handler_fn(page_fault_handler);
unsafe { unsafe {
idt.double_fault idt.double_fault
.set_handler_fn(double_fault_handler) .set_handler_fn(double_fault_handler)
@@ -56,6 +57,19 @@ extern "x86-interrupt" fn breakpoint_handler(stack_frame: &mut InterruptStackFra
println!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame); println!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame);
} }
extern "x86-interrupt" fn page_fault_handler(
stack_frame: &mut InterruptStackFrame,
_error_code: PageFaultErrorCode,
) {
use crate::hlt_loop;
use x86_64::registers::control::Cr2;
println!("EXCEPTION: PAGE FAULT");
println!("Accessed Address: {:?}", Cr2::read());
println!("{:#?}", stack_frame);
hlt_loop();
}
extern "x86-interrupt" fn double_fault_handler( extern "x86-interrupt" fn double_fault_handler(
stack_frame: &mut InterruptStackFrame, stack_frame: &mut InterruptStackFrame,
_error_code: u64, _error_code: u64,

View File

@@ -1,8 +1,13 @@
#![cfg_attr(not(test), no_std)] #![cfg_attr(not(test), no_std)]
#![feature(abi_x86_interrupt)] #![feature(abi_x86_interrupt)]
#![feature(alloc)]
#![feature(const_fn)]
extern crate alloc;
pub mod gdt; pub mod gdt;
pub mod interrupts; pub mod interrupts;
pub mod memory;
pub mod serial; pub mod serial;
pub mod vga_buffer; pub mod vga_buffer;

View File

@@ -1,14 +1,27 @@
#![cfg_attr(not(test), no_std)] #![cfg_attr(not(test), no_std)]
#![cfg_attr(not(test), no_main)] #![cfg_attr(not(test), no_main)]
#![cfg_attr(test, allow(unused_imports))] #![cfg_attr(test, allow(unused_imports))]
#![feature(alloc)]
#![feature(alloc_error_handler)]
use blog_os::println; extern crate alloc;
use alloc::vec::Vec;
use blog_os::{
memory::allocator::{BumpAllocator, LinkedListAllocator, LockedAllocator, BucketAllocator},
println,
};
use bootloader::{entry_point, BootInfo};
use core::alloc::Layout;
use core::panic::PanicInfo; use core::panic::PanicInfo;
entry_point!(kernel_main);
#[cfg(not(test))] #[cfg(not(test))]
#[no_mangle] fn kernel_main(boot_info: &'static BootInfo) -> ! {
pub extern "C" fn _start() -> ! {
use blog_os::interrupts::PICS; use blog_os::interrupts::PICS;
use blog_os::memory;
use x86_64::{structures::paging::Page, VirtAddr};
println!("Hello World{}", "!"); println!("Hello World{}", "!");
@@ -17,6 +30,25 @@ pub extern "C" fn _start() -> ! {
unsafe { PICS.lock().initialize() }; unsafe { PICS.lock().initialize() };
x86_64::instructions::interrupts::enable(); x86_64::instructions::interrupts::enable();
let mut mapper = unsafe { memory::init(boot_info.physical_memory_offset) };
let mut frame_allocator = memory::init_frame_allocator(&boot_info.memory_map);
let heap_start = VirtAddr::new(HEAP_START);
let heap_end = VirtAddr::new(HEAP_END);
memory::map_heap(heap_start, heap_end, &mut mapper, &mut frame_allocator)
.expect("map_heap failed");
ALLOCATOR.lock().underlying().add_memory(heap_start, HEAP_END - HEAP_START);
//let mut x = Vec::with_capacity(1000);
let mut x = Vec::new();
for i in 0..1000 {
x.push(i);
}
println!("{:?}", *ALLOCATOR.lock());
println!("with vec of size {}: {}", x.len(), x.iter().sum::<i32>());
println!("with formular: {}", 999 * 1000 / 2);
println!("It did not crash!"); println!("It did not crash!");
blog_os::hlt_loop(); blog_os::hlt_loop();
} }
@@ -28,3 +60,15 @@ fn panic(info: &PanicInfo) -> ! {
println!("{}", info); println!("{}", info);
blog_os::hlt_loop(); blog_os::hlt_loop();
} }
const HEAP_START: u64 = 0o_001_000_000_0000;
const HEAP_END: u64 = HEAP_START + 10 * 0x1000;
#[global_allocator]
static ALLOCATOR: LockedAllocator<BucketAllocator<LinkedListAllocator>> =
LockedAllocator::new(BucketAllocator::new(LinkedListAllocator::empty()));
#[alloc_error_handler]
fn out_of_memory(layout: Layout) -> ! {
panic!("out of memory: allocation for {:?} failed", layout);
}

120
src/memory.rs Normal file
View File

@@ -0,0 +1,120 @@
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
use x86_64::{
structures::paging::{
mapper, FrameAllocator, MappedPageTable, Mapper, MapperAllSizes, Page, PageTable,
PhysFrame, Size4KiB,
},
PhysAddr, VirtAddr,
};
pub mod allocator;
/// Initialize a new MappedPageTable.
///
/// This function is unsafe because the caller must guarantee that the
/// complete physical memory is mapped to virtual memory at the passed
/// `physical_memory_offset`. Also, this function must be only called once
/// to avoid aliasing `&mut` references (which is undefined behavior).
pub unsafe fn init(physical_memory_offset: u64) -> impl MapperAllSizes {
let level_4_table = active_level_4_table(physical_memory_offset);
let phys_to_virt = move |frame: PhysFrame| -> *mut PageTable {
let phys = frame.start_address().as_u64();
let virt = VirtAddr::new(phys + physical_memory_offset);
virt.as_mut_ptr()
};
MappedPageTable::new(level_4_table, phys_to_virt)
}
/// Create a FrameAllocator from the passed memory map
pub fn init_frame_allocator(
memory_map: &'static MemoryMap,
) -> BootInfoFrameAllocator<impl Iterator<Item = PhysFrame>> {
// get usable regions from memory map
let regions = memory_map
.iter()
.filter(|r| r.region_type == MemoryRegionType::Usable);
// map each region to its address range
let addr_ranges = regions.map(|r| r.range.start_addr()..r.range.end_addr());
// transform to an iterator of frame start addresses
let frame_addresses = addr_ranges.flat_map(|r| r.into_iter().step_by(4096));
// create `PhysFrame` types from the start addresses
let frames = frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)));
BootInfoFrameAllocator { frames }
}
/// Returns a mutable reference to the active level 4 table.
///
/// This function is unsafe because the caller must guarantee that the
/// complete physical memory is mapped to virtual memory at the passed
/// `physical_memory_offset`. Also, this function must be only called once
/// to avoid aliasing `&mut` references (which is undefined behavior).
unsafe fn active_level_4_table(physical_memory_offset: u64) -> &'static mut PageTable {
use x86_64::{registers::control::Cr3, VirtAddr};
let (level_4_table_frame, _) = Cr3::read();
let phys = level_4_table_frame.start_address();
let virt = VirtAddr::new(phys.as_u64() + physical_memory_offset);
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
&mut *page_table_ptr // unsafe
}
pub fn map_heap(
heap_start: VirtAddr,
heap_end: VirtAddr,
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<(), MapHeapError> {
use x86_64::structures::paging::PageTableFlags as Flags;
let flags = Flags::PRESENT | Flags::WRITABLE;
let start_page = Page::containing_address(heap_start);
let end_page = Page::containing_address(heap_end - 1u64);
for page in Page::range_inclusive(start_page, end_page) {
let frame = frame_allocator.allocate_frame();
let frame = frame.ok_or(MapHeapError::FrameAllocationFailed)?;
unsafe { mapper.map_to(page, frame, flags, frame_allocator)?.flush() };
}
Ok(())
}
#[derive(Debug)]
pub enum MapHeapError {
FrameAllocationFailed,
MapToError(mapper::MapToError),
}
impl From<mapper::MapToError> for MapHeapError {
fn from(err: mapper::MapToError) -> Self {
MapHeapError::MapToError(err)
}
}
/// A FrameAllocator that always returns `None`.
pub struct EmptyFrameAllocator;
impl FrameAllocator<Size4KiB> for EmptyFrameAllocator {
fn allocate_frame(&mut self) -> Option<PhysFrame> {
None
}
}
pub struct BootInfoFrameAllocator<I>
where
I: Iterator<Item = PhysFrame>,
{
frames: I,
}
impl<I> FrameAllocator<Size4KiB> for BootInfoFrameAllocator<I>
where
I: Iterator<Item = PhysFrame>,
{
fn allocate_frame(&mut self) -> Option<PhysFrame> {
self.frames.next()
}
}

49
src/memory/allocator.rs Normal file
View File

@@ -0,0 +1,49 @@
pub use bump::BumpAllocator;
pub use dummy::DummyAllocator;
pub use linked_list::LinkedListAllocator;
pub use bucket::BucketAllocator;
use core::alloc::{GlobalAlloc, Layout};
use spin::{Mutex, MutexGuard};
mod bump;
mod dummy;
mod linked_list;
mod bucket;
pub struct LockedAllocator<T> {
allocator: Mutex<T>,
}
impl<T> LockedAllocator<T> {
pub const fn new(allocator: T) -> Self {
Self {
allocator: Mutex::new(allocator),
}
}
}
impl<T> LockedAllocator<T> {
pub fn lock(&self) -> MutexGuard<T> {
self.allocator.lock()
}
}
unsafe impl<T> GlobalAlloc for LockedAllocator<T>
where
T: MutGlobalAlloc,
{
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.allocator.lock().alloc(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.allocator.lock().dealloc(ptr, layout)
}
}
pub trait MutGlobalAlloc {
fn alloc(&mut self, layout: Layout) -> *mut u8;
fn dealloc(&mut self, ptr: *mut u8, layout: Layout);
}

View File

@@ -0,0 +1,126 @@
use super::MutGlobalAlloc;
use core::alloc::Layout;
use core::mem::size_of;
use core::fmt::{self, Debug};
use core::cmp;
#[derive(Debug)]
pub struct BucketAllocator<A> where A: MutGlobalAlloc + Debug {
underlying: A,
buckets: [Bucket; 10],
}
impl<A> BucketAllocator<A> where A: MutGlobalAlloc + Debug {
pub const fn new(underlying: A) -> Self {
Self {
underlying,
buckets: [
Bucket::new(size_of::<Region>()),
Bucket::new(16),
Bucket::new(32),
Bucket::new(64),
Bucket::new(128),
Bucket::new(256),
Bucket::new(512),
Bucket::new(1024),
Bucket::new(2048),
Bucket::new(4096),
]
}
}
pub fn underlying(&mut self) -> &mut A {
&mut self.underlying
}
}
pub struct Bucket {
size: usize,
head: Option<&'static mut Region>,
}
impl Bucket {
const fn new(size: usize) -> Self {
Bucket {
size,
head: None,
}
}
fn layout(&self) -> Layout {
Layout::from_size_align(self.size, self.size).unwrap()
}
}
impl fmt::Debug for Bucket {
fn fmt(&self, f: &mut fmt::Formatter)-> fmt::Result {
let mut regions = 0;
let mut current = &self.head;
while let Some(region) = current {
current = &region.next;
regions += 1;
}
f.debug_struct("Bucket").field("size", &self.size).field("regions", &regions).finish()
}
}
#[derive(Debug)]
struct Region {
next: Option<&'static mut Region>,
}
impl Region {
fn new() -> Self {
Self {
next: None,
}
}
fn as_mut_u8(&'static mut self) -> *mut u8 {
self as *mut Region as *mut u8
}
unsafe fn from_mut_u8(ptr: *mut u8) -> &'static mut Self {
(ptr as *mut Region).write(Region::new());
&mut *(ptr as *mut Region)
}
}
impl<A> BucketAllocator<A> where A: MutGlobalAlloc + Debug {
fn get_bucket_index(&self, layout: &Layout) -> Option<usize> {
let required_bucket_size = cmp::max(layout.size(), layout.align());
match self.buckets.binary_search_by(|bucket| bucket.size.cmp(&required_bucket_size)) {
Ok(index) => Some(index),
Err(index) if index < self.buckets.len() => Some(index),
Err(_) => None,
}
}
}
impl<A> MutGlobalAlloc for BucketAllocator<A> where A: MutGlobalAlloc + Debug {
fn alloc(&mut self, layout: Layout) -> *mut u8 {
if let Some(bucket_index) = self.get_bucket_index(&layout) {
let bucket = &mut self.buckets[bucket_index];
if let Some(head) = bucket.head.take() {
let next = head.next.take();
bucket.head = next;
return head.as_mut_u8();
} else {
self.underlying.alloc(bucket.layout())
}
} else {
self.underlying.alloc(layout)
}
}
fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(bucket_index) = self.get_bucket_index(&layout) {
let bucket = &mut self.buckets[bucket_index];
let region = unsafe {Region::from_mut_u8(ptr)};
region.next = bucket.head.take();
bucket.head = Some(region);
} else {
self.underlying.dealloc(ptr, layout);
}
}
}

View File

@@ -0,0 +1,38 @@
use super::MutGlobalAlloc;
use core::alloc::Layout;
use x86_64::align_up;
pub struct BumpAllocator {
heap_start: u64,
heap_end: u64,
next: u64,
}
impl BumpAllocator {
pub const fn new(heap_start: u64, heap_end: u64) -> Self {
Self {
heap_start,
heap_end,
next: heap_start,
}
}
}
impl MutGlobalAlloc for BumpAllocator {
fn alloc(&mut self, layout: Layout) -> *mut u8 {
let alloc_start = align_up(self.next, layout.align() as u64);
let alloc_end = alloc_start.saturating_add(layout.size() as u64);
if alloc_end >= self.heap_end {
// out of memory
return 0 as *mut u8;
}
self.next = alloc_end;
alloc_start as *mut u8
}
fn dealloc(&mut self, _ptr: *mut u8, _layout: Layout) {
panic!("BumpAllocator::dealloc called");
}
}

View File

@@ -0,0 +1,16 @@
use core::alloc::{GlobalAlloc, Layout};
/// A dummy allocator that panics on every `alloc` or `dealloc` call.
pub struct DummyAllocator;
unsafe impl GlobalAlloc for DummyAllocator {
/// Always panics.
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
panic!("DummyAllocator::alloc called");
}
/// Always panics.
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
panic!("DummyAllocator::dealloc called");
}
}

View File

@@ -0,0 +1,95 @@
use super::MutGlobalAlloc;
use core::alloc::Layout;
use core::mem;
use x86_64::{align_up, VirtAddr};
#[derive(Debug)]
pub struct LinkedListAllocator {
head: Region,
}
impl LinkedListAllocator {
pub const fn empty() -> Self {
let head = Region {
size: 0,
next: None,
};
Self { head }
}
pub unsafe fn new(heap_start: VirtAddr, heap_size: u64) -> Self {
let mut allocator = Self::empty();
allocator.add_memory(heap_start, heap_size);
allocator
}
pub fn add_memory(&mut self, start: VirtAddr, size: u64) {
let aligned = start.align_up(mem::size_of::<Region>() as u64);
let mut region = Region {
size: size - (aligned - start),
next: None
};
mem::swap(&mut self.head.next, &mut region.next);
let region_ptr: *mut Region = aligned.as_mut_ptr();
unsafe { region_ptr.write(region) };
self.head.next = Some(unsafe { &mut *region_ptr });
}
}
impl MutGlobalAlloc for LinkedListAllocator {
fn alloc(&mut self, layout: Layout) -> *mut u8 {
let size = align_up(layout.size() as u64, mem::size_of::<Region>() as u64);
let mut current = &mut self.head;
loop {
let next = match current.next {
Some(ref mut next) => next,
None => break,
};
let next_start = VirtAddr::new(*next as *mut Region as u64);
let next_end = next_start + next.size;
let alloc_start = next_start.align_up(layout.align() as u64);
let alloc_end = alloc_start + size;
// check if Region large enough
if alloc_end <= next_end {
// remove Region from list
let next_next = next.next.take();
current.next = next_next;
// insert remaining Region to list
self.add_memory(alloc_end, next_end - alloc_end);
// return allocated memory
return alloc_start.as_mut_ptr();
}
// continue with next element
//
// This is basically `current = next`, but we need a new `match` expression because
// the compiler can't figure the lifetimes out when we use the `next` binding
// from above.
current = match current.next {
Some(ref mut next) => next,
None => unreachable!(),
};
}
// no large enough Region found
0 as *mut u8
}
fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
let size = align_up(layout.size() as u64, mem::size_of::<Region>() as u64);
self.add_memory(VirtAddr::new(ptr as u64), size);
}
}
#[derive(Debug)]
struct Region {
size: u64,
next: Option<&'static mut Region>,
}
// TODO recycle alignment