mirror of
https://github.com/phil-opp/blog_os.git
synced 2025-12-16 14:27:49 +00:00
Rewrite bump allocator and integrate it in main crate
This commit is contained in:
@@ -12,9 +12,7 @@ rlibc = "1.0"
|
|||||||
spin = "0.4.5"
|
spin = "0.4.5"
|
||||||
volatile = "0.1.0"
|
volatile = "0.1.0"
|
||||||
x86_64 = "0.1.2"
|
x86_64 = "0.1.2"
|
||||||
|
linked_list_allocator = "0.4.2"
|
||||||
[dependencies.hole_list_allocator]
|
|
||||||
path = "libs/hole_list_allocator"
|
|
||||||
|
|
||||||
[dependencies.lazy_static]
|
[dependencies.lazy_static]
|
||||||
features = ["spin_no_std"]
|
features = ["spin_no_std"]
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
[package]
|
|
||||||
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
|
|
||||||
name = "bump_allocator"
|
|
||||||
version = "0.1.0"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
spin = "0.4.5"
|
|
||||||
@@ -1,141 +0,0 @@
|
|||||||
// Copyright 2016 Philipp Oppermann. See the README.md
|
|
||||||
// file at the top-level directory of this distribution.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
||||||
// option. This file may not be copied, modified, or distributed
|
|
||||||
// except according to those terms.
|
|
||||||
|
|
||||||
#![feature(const_fn)]
|
|
||||||
#![feature(allocator_api)]
|
|
||||||
#![feature(alloc)]
|
|
||||||
#![feature(global_allocator)]
|
|
||||||
|
|
||||||
#![no_std]
|
|
||||||
|
|
||||||
use alloc::heap::{Alloc, AllocErr, Layout};
|
|
||||||
use spin::Mutex;
|
|
||||||
|
|
||||||
extern crate alloc;
|
|
||||||
extern crate spin;
|
|
||||||
|
|
||||||
|
|
||||||
struct LockedHeap {
|
|
||||||
heap: Mutex<Heap>,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#[global_allocator]
|
|
||||||
static GLOBAL_ALLOC: LockedHeap = LockedHeap::empty();
|
|
||||||
|
|
||||||
|
|
||||||
pub unsafe fn init(start: usize, size: usize) {
|
|
||||||
GLOBAL_ALLOC.init(start, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The heap is protected by the LockedHeap structure.
|
|
||||||
impl LockedHeap {
|
|
||||||
/// Creates a protected empty heap. All allocate calls will return
|
|
||||||
/// 'AllocErr`.
|
|
||||||
pub const fn empty() -> LockedHeap {
|
|
||||||
LockedHeap {
|
|
||||||
heap : Mutex::new(Heap::empty())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// Initializes the heap.
|
|
||||||
unsafe fn init(&self, start: usize, size: usize) {
|
|
||||||
self.heap.lock().init(start, size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The interface used for all allocation of heap structures.
|
|
||||||
unsafe impl<'a> Alloc for &'a LockedHeap {
|
|
||||||
|
|
||||||
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
|
|
||||||
self.heap.lock().allocate(layout)
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
|
|
||||||
self.heap.lock().dealloc(ptr, layout)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// A fixed size heap with a reference to the beginning of free space.
|
|
||||||
pub struct Heap {
|
|
||||||
start: usize,
|
|
||||||
end: usize,
|
|
||||||
next: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Heap {
|
|
||||||
/// Creates an empty heap.
|
|
||||||
///
|
|
||||||
/// All allocate calls will return `AllocErr`.
|
|
||||||
pub const fn empty() -> Heap {
|
|
||||||
Heap {
|
|
||||||
start: 0,
|
|
||||||
end: 0,
|
|
||||||
next: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Initalizes the heap given start and size.
|
|
||||||
///
|
|
||||||
/// # Safety
|
|
||||||
///
|
|
||||||
/// This is unsafe, the start address must be valid and the memory
|
|
||||||
/// in the `[start, start + size)` range must not be used for
|
|
||||||
/// anything else. The function is unsafe because it can cause
|
|
||||||
/// undefined behavior if the given address or size are invalid.
|
|
||||||
unsafe fn init(&mut self, start: usize, size: usize) {
|
|
||||||
self.start = start;
|
|
||||||
self.end = start + size;
|
|
||||||
self.next = start;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Allocates a chunk of the given size with the given alignment.
|
|
||||||
///
|
|
||||||
/// Returns a pointer to the beginning of that chunk if it was
|
|
||||||
/// successful, else it returns an AllocErr.
|
|
||||||
unsafe fn allocate(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
|
|
||||||
let alloc_start = align_up(self.next, layout.align());
|
|
||||||
let alloc_end = alloc_start.saturating_add(layout.size());
|
|
||||||
|
|
||||||
if alloc_end <= self.end {
|
|
||||||
self.next = alloc_end;
|
|
||||||
Ok(alloc_start as *mut u8)
|
|
||||||
} else {
|
|
||||||
Err(AllocErr::Exhausted{request: layout})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deallocates the block refered to by the given pointer and
|
|
||||||
/// described by the layout.
|
|
||||||
unsafe fn dealloc(&mut self, _ptr: *mut u8, _layout: Layout) {
|
|
||||||
// Sofar nothing - don't worry, RAM is cheap
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/// Align downwards. Returns the greatest x with alignment `align`
|
|
||||||
/// so that x <= addr. The alignment must be a power of 2.
|
|
||||||
pub fn align_down(addr: usize, align: usize) -> usize {
|
|
||||||
if align.is_power_of_two() {
|
|
||||||
addr & !(align - 1)
|
|
||||||
} else if align == 0 {
|
|
||||||
addr
|
|
||||||
} else {
|
|
||||||
panic!("`align` must be a power of 2");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Align upwards. Returns the smallest x with alignment `align`
|
|
||||||
/// so that x >= addr. The alignment must be a power of 2.
|
|
||||||
pub fn align_up(addr: usize, align: usize) -> usize {
|
|
||||||
align_down(addr + align - 1, align)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
2
libs/hole_list_allocator/.gitignore
vendored
2
libs/hole_list_allocator/.gitignore
vendored
@@ -1,2 +0,0 @@
|
|||||||
# Generated by Cargo
|
|
||||||
/target/
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
[package]
|
|
||||||
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
|
|
||||||
name = "hole_list_allocator"
|
|
||||||
version = "0.1.0"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
linked_list_allocator = { git = "https://github.com/phil-opp/linked-list-allocator.git"}
|
|
||||||
spin = "0.4.5"
|
|
||||||
|
|
||||||
[dependencies.lazy_static]
|
|
||||||
version = "0.2.1"
|
|
||||||
features = ["spin_no_std"]
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
// Copyright 2016 Philipp Oppermann. See the README.md
|
|
||||||
// file at the top-level directory of this distribution.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
||||||
// option. This file may not be copied, modified, or distributed
|
|
||||||
// except according to those terms.
|
|
||||||
|
|
||||||
#![feature(const_fn)]
|
|
||||||
#![feature(allocator_api)]
|
|
||||||
#![feature(alloc)]
|
|
||||||
#![feature(global_allocator)]
|
|
||||||
#![no_std]
|
|
||||||
#![deny(warnings)]
|
|
||||||
|
|
||||||
extern crate alloc;
|
|
||||||
extern crate spin;
|
|
||||||
extern crate linked_list_allocator;
|
|
||||||
|
|
||||||
use alloc::heap::{Alloc, AllocErr, Layout};
|
|
||||||
use spin::Mutex;
|
|
||||||
use linked_list_allocator::Heap;
|
|
||||||
|
|
||||||
|
|
||||||
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
|
|
||||||
|
|
||||||
//Set up the heap
|
|
||||||
pub unsafe fn init(offset: usize, size: usize) {
|
|
||||||
*HEAP.lock() = Some(Heap::new(offset, size));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Allocator;
|
|
||||||
|
|
||||||
unsafe impl<'a> Alloc for &'a Allocator {
|
|
||||||
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
|
|
||||||
if let Some(ref mut heap) = *HEAP.lock() {
|
|
||||||
heap.allocate_first_fit(layout)
|
|
||||||
} else {
|
|
||||||
panic!("Heap not initialized!");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
|
|
||||||
if let Some(ref mut heap) = *HEAP.lock() {
|
|
||||||
heap.deallocate(ptr, layout)
|
|
||||||
} else {
|
|
||||||
panic!("heap not initalized");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//Our allocator static
|
|
||||||
#[global_allocator]
|
|
||||||
static GLOBAL_ALLOC: Allocator = Allocator;
|
|
||||||
40
src/lib.rs
40
src/lib.rs
@@ -13,9 +13,13 @@
|
|||||||
#![feature(asm)]
|
#![feature(asm)]
|
||||||
#![feature(naked_functions)]
|
#![feature(naked_functions)]
|
||||||
#![feature(abi_x86_interrupt)]
|
#![feature(abi_x86_interrupt)]
|
||||||
#![feature(const_unique_new)]
|
#![feature(const_unique_new, const_atomic_usize_new)]
|
||||||
|
#![feature(allocator_api)]
|
||||||
|
#![feature(global_allocator)]
|
||||||
#![no_std]
|
#![no_std]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate alloc;
|
||||||
extern crate rlibc;
|
extern crate rlibc;
|
||||||
extern crate volatile;
|
extern crate volatile;
|
||||||
extern crate spin;
|
extern crate spin;
|
||||||
@@ -25,25 +29,15 @@ extern crate bitflags;
|
|||||||
extern crate x86_64;
|
extern crate x86_64;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate once;
|
extern crate once;
|
||||||
extern crate bit_field;
|
extern crate linked_list_allocator;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate lazy_static;
|
extern crate lazy_static;
|
||||||
|
extern crate bit_field;
|
||||||
extern crate hole_list_allocator as allocator;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate alloc;
|
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod vga_buffer;
|
mod vga_buffer;
|
||||||
mod memory;
|
mod memory;
|
||||||
|
|
||||||
mod interrupts;
|
mod interrupts;
|
||||||
|
|
||||||
|
|
||||||
pub const HEAP_START: usize = 0o_000_001_000_000_0000;
|
|
||||||
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
|
|
||||||
|
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern "C" fn rust_main(multiboot_information_address: usize) {
|
pub extern "C" fn rust_main(multiboot_information_address: usize) {
|
||||||
// ATTENTION: we have a very small stack and no guard page
|
// ATTENTION: we have a very small stack and no guard page
|
||||||
@@ -57,9 +51,13 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
|
|||||||
// set up guard page and map the heap pages
|
// set up guard page and map the heap pages
|
||||||
let mut memory_controller = memory::init(boot_info);
|
let mut memory_controller = memory::init(boot_info);
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
HEAP_ALLOCATOR.lock().init(HEAP_START, HEAP_START + HEAP_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
// initialize our IDT
|
// initialize our IDT
|
||||||
interrupts::init(&mut memory_controller);
|
interrupts::init(&mut memory_controller);
|
||||||
|
|
||||||
fn stack_overflow() {
|
fn stack_overflow() {
|
||||||
stack_overflow(); // for each recursion, the return address is pushed
|
stack_overflow(); // for each recursion, the return address is pushed
|
||||||
}
|
}
|
||||||
@@ -67,6 +65,7 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
|
|||||||
// trigger a stack overflow
|
// trigger a stack overflow
|
||||||
stack_overflow();
|
stack_overflow();
|
||||||
|
|
||||||
|
|
||||||
println!("It did not crash!");
|
println!("It did not crash!");
|
||||||
loop {}
|
loop {}
|
||||||
}
|
}
|
||||||
@@ -101,8 +100,13 @@ pub extern "C" fn panic_fmt(fmt: core::fmt::Arguments, file: &'static str, line:
|
|||||||
loop {}
|
loop {}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern "C" fn _Unwind_Resume() -> ! {
|
pub extern "C" fn _Unwind_Resume() -> ! { loop {} }
|
||||||
loop {}
|
|
||||||
}
|
use linked_list_allocator::LockedHeap;
|
||||||
|
|
||||||
|
pub const HEAP_START: usize = 0o_000_001_000_000_0000;
|
||||||
|
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
|
||||||
|
|
||||||
|
#[global_allocator]
|
||||||
|
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
|
||||||
|
|||||||
62
src/memory/heap_allocator.rs
Normal file
62
src/memory/heap_allocator.rs
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
use alloc::heap::{Alloc, AllocErr, Layout};
|
||||||
|
|
||||||
|
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
|
/// A simple allocator that allocates memory linearly and ignores freed memory.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BumpAllocator {
|
||||||
|
heap_start: usize,
|
||||||
|
heap_end: usize,
|
||||||
|
next: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BumpAllocator {
|
||||||
|
pub const fn new(heap_start: usize, heap_end: usize) -> Self {
|
||||||
|
Self { heap_start, heap_end, next: AtomicUsize::new(heap_start) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl<'a> Alloc for &'a BumpAllocator {
|
||||||
|
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
|
||||||
|
loop {
|
||||||
|
// load current state of the `next` field
|
||||||
|
let current_next = self.next.load(Ordering::Relaxed);
|
||||||
|
let alloc_start = align_up(current_next, layout.align());
|
||||||
|
let alloc_end = alloc_start.saturating_add(layout.size());
|
||||||
|
|
||||||
|
if alloc_end <= self.heap_end {
|
||||||
|
// update the `next` pointer if it still has the value `current_next`
|
||||||
|
let next_now = self.next.compare_and_swap(current_next, alloc_end,
|
||||||
|
Ordering::Relaxed);
|
||||||
|
if next_now == current_next {
|
||||||
|
// next address was successfully updated, allocation succeeded
|
||||||
|
return Ok(alloc_start as *mut u8);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(AllocErr::Exhausted{ request: layout })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
|
||||||
|
// do nothing, leak memory
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Align downwards. Returns the greatest x with alignment `align`
|
||||||
|
/// so that x <= addr. The alignment must be a power of 2.
|
||||||
|
pub fn align_down(addr: usize, align: usize) -> usize {
|
||||||
|
if align.is_power_of_two() {
|
||||||
|
addr & !(align - 1)
|
||||||
|
} else if align == 0 {
|
||||||
|
addr
|
||||||
|
} else {
|
||||||
|
panic!("`align` must be a power of 2");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Align upwards. Returns the smallest x with alignment `align`
|
||||||
|
/// so that x >= addr. The alignment must be a power of 2.
|
||||||
|
pub fn align_up(addr: usize, align: usize) -> usize {
|
||||||
|
align_down(addr + align - 1, align)
|
||||||
|
}
|
||||||
@@ -12,9 +12,9 @@ pub use self::paging::remap_the_kernel;
|
|||||||
pub use self::stack_allocator::Stack;
|
pub use self::stack_allocator::Stack;
|
||||||
use self::paging::PhysicalAddress;
|
use self::paging::PhysicalAddress;
|
||||||
use multiboot2::BootInformation;
|
use multiboot2::BootInformation;
|
||||||
use allocator;
|
|
||||||
|
|
||||||
mod area_frame_allocator;
|
mod area_frame_allocator;
|
||||||
|
pub mod heap_allocator;
|
||||||
mod paging;
|
mod paging;
|
||||||
mod stack_allocator;
|
mod stack_allocator;
|
||||||
|
|
||||||
@@ -72,11 +72,6 @@ pub fn init(boot_info: &BootInformation) -> MemoryController {
|
|||||||
active_table.map(page, paging::WRITABLE, &mut frame_allocator);
|
active_table.map(page, paging::WRITABLE, &mut frame_allocator);
|
||||||
}
|
}
|
||||||
|
|
||||||
//Init the heap
|
|
||||||
unsafe {
|
|
||||||
allocator::init(HEAP_START, HEAP_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
let stack_allocator = {
|
let stack_allocator = {
|
||||||
let stack_alloc_start = heap_end_page + 1;
|
let stack_alloc_start = heap_end_page + 1;
|
||||||
let stack_alloc_end = stack_alloc_start + 100;
|
let stack_alloc_end = stack_alloc_start + 100;
|
||||||
|
|||||||
Reference in New Issue
Block a user