Compare commits

...

2 Commits

Author SHA1 Message Date
Philipp Oppermann
1eb8f0f2c7 Wip 2019-06-17 13:23:39 +02:00
Philipp Oppermann
398ca5357c WIP 2019-03-18 18:28:04 +01:00
12 changed files with 437 additions and 16 deletions

View File

@@ -1,8 +1,11 @@
#![cfg_attr(not(test), no_std)] #![cfg_attr(not(test), no_std)]
#![cfg_attr(not(test), no_main)] // disable all Rust-level entry points #![cfg_attr(not(test), no_main)] // disable all Rust-level entry points
#![cfg_attr(test, allow(unused_imports))] #![cfg_attr(test, allow(unused_imports))]
#![feature(alloc_error_handler)]
use blog_os::memory::allocator::DummyAllocator;
use blog_os::{exit_qemu, serial_println}; use blog_os::{exit_qemu, serial_println};
use core::alloc::Layout;
use core::panic::PanicInfo; use core::panic::PanicInfo;
/// This function is the entry point, since the linker looks for a function /// This function is the entry point, since the linker looks for a function
@@ -31,3 +34,11 @@ fn panic(info: &PanicInfo) -> ! {
} }
loop {} loop {}
} }
#[global_allocator]
static ALLOCATOR: DummyAllocator = DummyAllocator;
#[alloc_error_handler]
fn out_of_memory(layout: Layout) -> ! {
panic!("out of memory: allocation for {:?} failed", layout);
}

View File

@@ -1,8 +1,11 @@
#![no_std] #![no_std]
#![cfg_attr(not(test), no_main)] #![cfg_attr(not(test), no_main)]
#![cfg_attr(test, allow(dead_code, unused_macros, unused_imports))] #![cfg_attr(test, allow(dead_code, unused_macros, unused_imports))]
#![feature(alloc_error_handler)]
use blog_os::memory::allocator::DummyAllocator;
use blog_os::{exit_qemu, serial_println}; use blog_os::{exit_qemu, serial_println};
use core::alloc::Layout;
use core::panic::PanicInfo; use core::panic::PanicInfo;
#[cfg(not(test))] #[cfg(not(test))]
@@ -32,3 +35,11 @@ fn panic(info: &PanicInfo) -> ! {
} }
loop {} loop {}
} }
#[global_allocator]
static ALLOCATOR: DummyAllocator = DummyAllocator;
#[alloc_error_handler]
fn out_of_memory(layout: Layout) -> ! {
panic!("out of memory: allocation for {:?} failed", layout);
}

View File

@@ -2,8 +2,11 @@
#![no_std] #![no_std]
#![cfg_attr(not(test), no_main)] #![cfg_attr(not(test), no_main)]
#![cfg_attr(test, allow(dead_code, unused_macros, unused_imports))] #![cfg_attr(test, allow(dead_code, unused_macros, unused_imports))]
#![feature(alloc_error_handler)]
use blog_os::memory::allocator::DummyAllocator;
use blog_os::{exit_qemu, serial_println}; use blog_os::{exit_qemu, serial_println};
use core::alloc::Layout;
use core::panic::PanicInfo; use core::panic::PanicInfo;
use lazy_static::lazy_static; use lazy_static::lazy_static;
@@ -45,6 +48,14 @@ fn panic(info: &PanicInfo) -> ! {
loop {} loop {}
} }
#[global_allocator]
static ALLOCATOR: DummyAllocator = DummyAllocator;
#[alloc_error_handler]
fn out_of_memory(layout: Layout) -> ! {
panic!("out of memory: allocation for {:?} failed", layout);
}
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame};
lazy_static! { lazy_static! {

View File

@@ -1,8 +1,11 @@
#![cfg_attr(not(test), no_std)] #![cfg_attr(not(test), no_std)]
#![cfg_attr(not(test), no_main)] #![cfg_attr(not(test), no_main)]
#![cfg_attr(test, allow(unused_imports))] #![cfg_attr(test, allow(unused_imports))]
#![feature(alloc_error_handler)]
use blog_os::memory::allocator::DummyAllocator;
use blog_os::{exit_qemu, serial_println}; use blog_os::{exit_qemu, serial_println};
use core::alloc::Layout;
use core::panic::PanicInfo; use core::panic::PanicInfo;
#[cfg(not(test))] #[cfg(not(test))]
@@ -21,3 +24,11 @@ fn panic(_info: &PanicInfo) -> ! {
} }
loop {} loop {}
} }
#[global_allocator]
static ALLOCATOR: DummyAllocator = DummyAllocator;
#[alloc_error_handler]
fn out_of_memory(layout: Layout) -> ! {
panic!("out of memory: allocation for {:?} failed", layout);
}

View File

@@ -1,5 +1,9 @@
#![cfg_attr(not(test), no_std)] #![cfg_attr(not(test), no_std)]
#![feature(abi_x86_interrupt)] #![feature(abi_x86_interrupt)]
#![feature(alloc)]
#![feature(const_fn)]
extern crate alloc;
pub mod gdt; pub mod gdt;
pub mod interrupts; pub mod interrupts;

View File

@@ -1,9 +1,18 @@
#![cfg_attr(not(test), no_std)] #![cfg_attr(not(test), no_std)]
#![cfg_attr(not(test), no_main)] #![cfg_attr(not(test), no_main)]
#![cfg_attr(test, allow(unused_imports))] #![cfg_attr(test, allow(unused_imports))]
#![feature(alloc)]
#![feature(alloc_error_handler)]
use blog_os::println; extern crate alloc;
use alloc::vec::Vec;
use blog_os::{
memory::allocator::{BumpAllocator, LinkedListAllocator, LockedAllocator, BucketAllocator},
println,
};
use bootloader::{entry_point, BootInfo}; use bootloader::{entry_point, BootInfo};
use core::alloc::Layout;
use core::panic::PanicInfo; use core::panic::PanicInfo;
entry_point!(kernel_main); entry_point!(kernel_main);
@@ -24,13 +33,21 @@ fn kernel_main(boot_info: &'static BootInfo) -> ! {
let mut mapper = unsafe { memory::init(boot_info.physical_memory_offset) }; let mut mapper = unsafe { memory::init(boot_info.physical_memory_offset) };
let mut frame_allocator = memory::init_frame_allocator(&boot_info.memory_map); let mut frame_allocator = memory::init_frame_allocator(&boot_info.memory_map);
// map a previously unmapped page let heap_start = VirtAddr::new(HEAP_START);
let page = Page::containing_address(VirtAddr::new(0xdeadbeaf000)); let heap_end = VirtAddr::new(HEAP_END);
memory::create_example_mapping(page, &mut mapper, &mut frame_allocator); memory::map_heap(heap_start, heap_end, &mut mapper, &mut frame_allocator)
.expect("map_heap failed");
// write the string `New!` to the screen through the new mapping ALLOCATOR.lock().underlying().add_memory(heap_start, HEAP_END - HEAP_START);
let page_ptr: *mut u64 = page.start_address().as_mut_ptr();
unsafe { page_ptr.offset(400).write_volatile(0x_f021_f077_f065_f04e) }; //let mut x = Vec::with_capacity(1000);
let mut x = Vec::new();
for i in 0..1000 {
x.push(i);
}
println!("{:?}", *ALLOCATOR.lock());
println!("with vec of size {}: {}", x.len(), x.iter().sum::<i32>());
println!("with formular: {}", 999 * 1000 / 2);
println!("It did not crash!"); println!("It did not crash!");
blog_os::hlt_loop(); blog_os::hlt_loop();
@@ -43,3 +60,15 @@ fn panic(info: &PanicInfo) -> ! {
println!("{}", info); println!("{}", info);
blog_os::hlt_loop(); blog_os::hlt_loop();
} }
const HEAP_START: u64 = 0o_001_000_000_0000;
const HEAP_END: u64 = HEAP_START + 10 * 0x1000;
#[global_allocator]
static ALLOCATOR: LockedAllocator<BucketAllocator<LinkedListAllocator>> =
LockedAllocator::new(BucketAllocator::new(LinkedListAllocator::empty()));
#[alloc_error_handler]
fn out_of_memory(layout: Layout) -> ! {
panic!("out of memory: allocation for {:?} failed", layout);
}

View File

@@ -1,12 +1,14 @@
use bootloader::bootinfo::{MemoryMap, MemoryRegionType}; use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
use x86_64::{ use x86_64::{
structures::paging::{ structures::paging::{
FrameAllocator, MappedPageTable, Mapper, MapperAllSizes, Page, PageTable, PhysFrame, mapper, FrameAllocator, MappedPageTable, Mapper, MapperAllSizes, Page, PageTable,
Size4KiB, PhysFrame, Size4KiB,
}, },
PhysAddr, VirtAddr, PhysAddr, VirtAddr,
}; };
pub mod allocator;
/// Initialize a new MappedPageTable. /// Initialize a new MappedPageTable.
/// ///
/// This function is unsafe because the caller must guarantee that the /// This function is unsafe because the caller must guarantee that the
@@ -59,19 +61,37 @@ unsafe fn active_level_4_table(physical_memory_offset: u64) -> &'static mut Page
&mut *page_table_ptr // unsafe &mut *page_table_ptr // unsafe
} }
/// Creates an example mapping for the given page to frame `0xb8000`. pub fn map_heap(
pub fn create_example_mapping( heap_start: VirtAddr,
page: Page, heap_end: VirtAddr,
mapper: &mut impl Mapper<Size4KiB>, mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>, frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) { ) -> Result<(), MapHeapError> {
use x86_64::structures::paging::PageTableFlags as Flags; use x86_64::structures::paging::PageTableFlags as Flags;
let frame = PhysFrame::containing_address(PhysAddr::new(0xb8000));
let flags = Flags::PRESENT | Flags::WRITABLE; let flags = Flags::PRESENT | Flags::WRITABLE;
let start_page = Page::containing_address(heap_start);
let end_page = Page::containing_address(heap_end - 1u64);
let map_to_result = unsafe { mapper.map_to(page, frame, flags, frame_allocator) }; for page in Page::range_inclusive(start_page, end_page) {
map_to_result.expect("map_to failed").flush(); let frame = frame_allocator.allocate_frame();
let frame = frame.ok_or(MapHeapError::FrameAllocationFailed)?;
unsafe { mapper.map_to(page, frame, flags, frame_allocator)?.flush() };
}
Ok(())
}
#[derive(Debug)]
pub enum MapHeapError {
FrameAllocationFailed,
MapToError(mapper::MapToError),
}
impl From<mapper::MapToError> for MapHeapError {
fn from(err: mapper::MapToError) -> Self {
MapHeapError::MapToError(err)
}
} }
/// A FrameAllocator that always returns `None`. /// A FrameAllocator that always returns `None`.

49
src/memory/allocator.rs Normal file
View File

@@ -0,0 +1,49 @@
pub use bump::BumpAllocator;
pub use dummy::DummyAllocator;
pub use linked_list::LinkedListAllocator;
pub use bucket::BucketAllocator;
use core::alloc::{GlobalAlloc, Layout};
use spin::{Mutex, MutexGuard};
mod bump;
mod dummy;
mod linked_list;
mod bucket;
pub struct LockedAllocator<T> {
allocator: Mutex<T>,
}
impl<T> LockedAllocator<T> {
pub const fn new(allocator: T) -> Self {
Self {
allocator: Mutex::new(allocator),
}
}
}
impl<T> LockedAllocator<T> {
pub fn lock(&self) -> MutexGuard<T> {
self.allocator.lock()
}
}
unsafe impl<T> GlobalAlloc for LockedAllocator<T>
where
T: MutGlobalAlloc,
{
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.allocator.lock().alloc(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.allocator.lock().dealloc(ptr, layout)
}
}
pub trait MutGlobalAlloc {
fn alloc(&mut self, layout: Layout) -> *mut u8;
fn dealloc(&mut self, ptr: *mut u8, layout: Layout);
}

View File

@@ -0,0 +1,126 @@
use super::MutGlobalAlloc;
use core::alloc::Layout;
use core::mem::size_of;
use core::fmt::{self, Debug};
use core::cmp;
#[derive(Debug)]
pub struct BucketAllocator<A> where A: MutGlobalAlloc + Debug {
underlying: A,
buckets: [Bucket; 10],
}
impl<A> BucketAllocator<A> where A: MutGlobalAlloc + Debug {
pub const fn new(underlying: A) -> Self {
Self {
underlying,
buckets: [
Bucket::new(size_of::<Region>()),
Bucket::new(16),
Bucket::new(32),
Bucket::new(64),
Bucket::new(128),
Bucket::new(256),
Bucket::new(512),
Bucket::new(1024),
Bucket::new(2048),
Bucket::new(4096),
]
}
}
pub fn underlying(&mut self) -> &mut A {
&mut self.underlying
}
}
pub struct Bucket {
size: usize,
head: Option<&'static mut Region>,
}
impl Bucket {
const fn new(size: usize) -> Self {
Bucket {
size,
head: None,
}
}
fn layout(&self) -> Layout {
Layout::from_size_align(self.size, self.size).unwrap()
}
}
impl fmt::Debug for Bucket {
fn fmt(&self, f: &mut fmt::Formatter)-> fmt::Result {
let mut regions = 0;
let mut current = &self.head;
while let Some(region) = current {
current = &region.next;
regions += 1;
}
f.debug_struct("Bucket").field("size", &self.size).field("regions", &regions).finish()
}
}
#[derive(Debug)]
struct Region {
next: Option<&'static mut Region>,
}
impl Region {
fn new() -> Self {
Self {
next: None,
}
}
fn as_mut_u8(&'static mut self) -> *mut u8 {
self as *mut Region as *mut u8
}
unsafe fn from_mut_u8(ptr: *mut u8) -> &'static mut Self {
(ptr as *mut Region).write(Region::new());
&mut *(ptr as *mut Region)
}
}
impl<A> BucketAllocator<A> where A: MutGlobalAlloc + Debug {
fn get_bucket_index(&self, layout: &Layout) -> Option<usize> {
let required_bucket_size = cmp::max(layout.size(), layout.align());
match self.buckets.binary_search_by(|bucket| bucket.size.cmp(&required_bucket_size)) {
Ok(index) => Some(index),
Err(index) if index < self.buckets.len() => Some(index),
Err(_) => None,
}
}
}
impl<A> MutGlobalAlloc for BucketAllocator<A> where A: MutGlobalAlloc + Debug {
fn alloc(&mut self, layout: Layout) -> *mut u8 {
if let Some(bucket_index) = self.get_bucket_index(&layout) {
let bucket = &mut self.buckets[bucket_index];
if let Some(head) = bucket.head.take() {
let next = head.next.take();
bucket.head = next;
return head.as_mut_u8();
} else {
self.underlying.alloc(bucket.layout())
}
} else {
self.underlying.alloc(layout)
}
}
fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(bucket_index) = self.get_bucket_index(&layout) {
let bucket = &mut self.buckets[bucket_index];
let region = unsafe {Region::from_mut_u8(ptr)};
region.next = bucket.head.take();
bucket.head = Some(region);
} else {
self.underlying.dealloc(ptr, layout);
}
}
}

View File

@@ -0,0 +1,38 @@
use super::MutGlobalAlloc;
use core::alloc::Layout;
use x86_64::align_up;
pub struct BumpAllocator {
heap_start: u64,
heap_end: u64,
next: u64,
}
impl BumpAllocator {
pub const fn new(heap_start: u64, heap_end: u64) -> Self {
Self {
heap_start,
heap_end,
next: heap_start,
}
}
}
impl MutGlobalAlloc for BumpAllocator {
fn alloc(&mut self, layout: Layout) -> *mut u8 {
let alloc_start = align_up(self.next, layout.align() as u64);
let alloc_end = alloc_start.saturating_add(layout.size() as u64);
if alloc_end >= self.heap_end {
// out of memory
return 0 as *mut u8;
}
self.next = alloc_end;
alloc_start as *mut u8
}
fn dealloc(&mut self, _ptr: *mut u8, _layout: Layout) {
panic!("BumpAllocator::dealloc called");
}
}

View File

@@ -0,0 +1,16 @@
use core::alloc::{GlobalAlloc, Layout};
/// A dummy allocator that panics on every `alloc` or `dealloc` call.
pub struct DummyAllocator;
unsafe impl GlobalAlloc for DummyAllocator {
/// Always panics.
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
panic!("DummyAllocator::alloc called");
}
/// Always panics.
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
panic!("DummyAllocator::dealloc called");
}
}

View File

@@ -0,0 +1,95 @@
use super::MutGlobalAlloc;
use core::alloc::Layout;
use core::mem;
use x86_64::{align_up, VirtAddr};
#[derive(Debug)]
pub struct LinkedListAllocator {
head: Region,
}
impl LinkedListAllocator {
pub const fn empty() -> Self {
let head = Region {
size: 0,
next: None,
};
Self { head }
}
pub unsafe fn new(heap_start: VirtAddr, heap_size: u64) -> Self {
let mut allocator = Self::empty();
allocator.add_memory(heap_start, heap_size);
allocator
}
pub fn add_memory(&mut self, start: VirtAddr, size: u64) {
let aligned = start.align_up(mem::size_of::<Region>() as u64);
let mut region = Region {
size: size - (aligned - start),
next: None
};
mem::swap(&mut self.head.next, &mut region.next);
let region_ptr: *mut Region = aligned.as_mut_ptr();
unsafe { region_ptr.write(region) };
self.head.next = Some(unsafe { &mut *region_ptr });
}
}
impl MutGlobalAlloc for LinkedListAllocator {
fn alloc(&mut self, layout: Layout) -> *mut u8 {
let size = align_up(layout.size() as u64, mem::size_of::<Region>() as u64);
let mut current = &mut self.head;
loop {
let next = match current.next {
Some(ref mut next) => next,
None => break,
};
let next_start = VirtAddr::new(*next as *mut Region as u64);
let next_end = next_start + next.size;
let alloc_start = next_start.align_up(layout.align() as u64);
let alloc_end = alloc_start + size;
// check if Region large enough
if alloc_end <= next_end {
// remove Region from list
let next_next = next.next.take();
current.next = next_next;
// insert remaining Region to list
self.add_memory(alloc_end, next_end - alloc_end);
// return allocated memory
return alloc_start.as_mut_ptr();
}
// continue with next element
//
// This is basically `current = next`, but we need a new `match` expression because
// the compiler can't figure the lifetimes out when we use the `next` binding
// from above.
current = match current.next {
Some(ref mut next) => next,
None => unreachable!(),
};
}
// no large enough Region found
0 as *mut u8
}
fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
let size = align_up(layout.size() as u64, mem::size_of::<Region>() as u64);
self.add_memory(VirtAddr::new(ptr as u64), size);
}
}
#[derive(Debug)]
struct Region {
size: u64,
next: Option<&'static mut Region>,
}
// TODO recycle alignment