mirror of
https://github.com/phil-opp/blog_os.git
synced 2025-12-16 14:27:49 +00:00
Compare commits
110 Commits
post-10
...
70b46ee8cf
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
70b46ee8cf | ||
|
|
4b368f2e5b | ||
|
|
121dc393f8 | ||
|
|
08a6e13df4 | ||
|
|
94149c93f9 | ||
|
|
194fa3c3fe | ||
|
|
0da8a4bc6b | ||
|
|
6de3aeaac3 | ||
|
|
a26d6c1b54 | ||
|
|
aa4f62187e | ||
|
|
69fe1aed12 | ||
|
|
bc7335ce6c | ||
|
|
6dc837db7c | ||
|
|
4f6558c021 | ||
|
|
9ea911b0bb | ||
|
|
39ee714093 | ||
|
|
9c546d4899 | ||
|
|
e59a2d1249 | ||
|
|
9bcbf17073 | ||
|
|
bd148f9bf2 | ||
|
|
64aeb0e98e | ||
|
|
06f064aefa | ||
|
|
f5b5251092 | ||
|
|
515ab6f1bf | ||
|
|
70714bc27d | ||
|
|
1893caa212 | ||
|
|
a859eddacf | ||
|
|
6b4666fbaf | ||
|
|
371f86175b | ||
|
|
b0ec3ba71f | ||
|
|
235b56a8e3 | ||
|
|
e3b19929b7 | ||
|
|
2359915246 | ||
|
|
1acc894846 | ||
|
|
3508e8a058 | ||
|
|
d8478d537b | ||
|
|
64264ce6ec | ||
|
|
56be80d62b | ||
|
|
60664c8876 | ||
|
|
ca120eed8c | ||
|
|
a6a3e30275 | ||
|
|
1ced86e362 | ||
|
|
4b0c11a7de | ||
|
|
59639c8fe7 | ||
|
|
d57a9d3f1f | ||
|
|
63be7cda6b | ||
|
|
380e54d6b8 | ||
|
|
7e05d80506 | ||
|
|
180e6ba36c | ||
|
|
ed93cfb926 | ||
|
|
ca3dfc719c | ||
|
|
bc618ad457 | ||
|
|
f7b5e22f68 | ||
|
|
340cbaba36 | ||
|
|
231dfacf3b | ||
|
|
9bf21a0392 | ||
|
|
487b130c85 | ||
|
|
d28ad91407 | ||
|
|
43b5f835ab | ||
|
|
91790d8007 | ||
|
|
93cb9dfdbe | ||
|
|
b0cb401b3b | ||
|
|
bd4487cefa | ||
|
|
cc8ecaadc2 | ||
|
|
c66758ccad | ||
|
|
f54a2d7574 | ||
|
|
e6c4c63c00 | ||
|
|
ed543a4591 | ||
|
|
0d661fcfdd | ||
|
|
6f12524245 | ||
|
|
9828ec0eff | ||
|
|
d95da1d511 | ||
|
|
7bfacb39c4 | ||
|
|
3c28d7f364 | ||
|
|
83b67df8af | ||
|
|
714d9cef7d | ||
|
|
3735ae51c9 | ||
|
|
a430d5ef06 | ||
|
|
f4331daa25 | ||
|
|
921dd54207 | ||
|
|
3a6d3153a4 | ||
|
|
9fb6c1d0bd | ||
|
|
00fedc801e | ||
|
|
0f74db4812 | ||
|
|
93fd330ab9 | ||
|
|
3ad5f117c2 | ||
|
|
d1678f5a96 | ||
|
|
002d6f255f | ||
|
|
6c3bf0b10f | ||
|
|
7a792f5cb0 | ||
|
|
93b4dcf434 | ||
|
|
821dd2adb4 | ||
|
|
d636939b51 | ||
|
|
9b7326541e | ||
|
|
4f234b67ef | ||
|
|
7381e11f3c | ||
|
|
a9fe65a0ce | ||
|
|
2001814119 | ||
|
|
a5c50e7408 | ||
|
|
70a52c291d | ||
|
|
c56bfa27e4 | ||
|
|
55aec9ebf3 | ||
|
|
2e1d132a9a | ||
|
|
63e8577d77 | ||
|
|
75d826bf69 | ||
|
|
45be3f0648 | ||
|
|
055c560a7a | ||
|
|
e87044a7ee | ||
|
|
08d2289dad | ||
|
|
7c84dbaa1d |
@@ -1,10 +1,10 @@
|
||||
# Blog OS (Heap Allocation)
|
||||
# Blog OS (Allocator Designs)
|
||||
|
||||
[](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Code%22+branch%3Apost-10)
|
||||
[](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Code%22+branch%3Apost-11)
|
||||
|
||||
This repository contains the source code for the [Heap Allocation][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
|
||||
This repository contains the source code for the [Allocator Designs][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
|
||||
|
||||
[post]: https://os.phil-opp.com/heap-allocation/
|
||||
[post]: https://os.phil-opp.com/allocator-designs/
|
||||
|
||||
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use alloc::alloc::{GlobalAlloc, Layout};
|
||||
use core::ptr::null_mut;
|
||||
use linked_list_allocator::LockedHeap;
|
||||
use fixed_size_block::FixedSizeBlockAllocator;
|
||||
use x86_64::{
|
||||
VirtAddr,
|
||||
structures::paging::{
|
||||
@@ -8,11 +8,15 @@ use x86_64::{
|
||||
},
|
||||
};
|
||||
|
||||
pub mod bump;
|
||||
pub mod fixed_size_block;
|
||||
pub mod linked_list;
|
||||
|
||||
pub const HEAP_START: usize = 0x_4444_4444_0000;
|
||||
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOCATOR: LockedHeap = LockedHeap::empty();
|
||||
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(FixedSizeBlockAllocator::new());
|
||||
|
||||
pub fn init_heap(
|
||||
mapper: &mut impl Mapper<Size4KiB>,
|
||||
@@ -52,3 +56,27 @@ unsafe impl GlobalAlloc for Dummy {
|
||||
panic!("dealloc should be never called")
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper around spin::Mutex to permit trait implementations.
|
||||
pub struct Locked<A> {
|
||||
inner: spin::Mutex<A>,
|
||||
}
|
||||
|
||||
impl<A> Locked<A> {
|
||||
pub const fn new(inner: A) -> Self {
|
||||
Locked {
|
||||
inner: spin::Mutex::new(inner),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn lock(&self) -> spin::MutexGuard<A> {
|
||||
self.inner.lock()
|
||||
}
|
||||
}
|
||||
|
||||
/// Align the given address `addr` upwards to alignment `align`.
|
||||
///
|
||||
/// Requires that `align` is a power of two.
|
||||
fn align_up(addr: usize, align: usize) -> usize {
|
||||
(addr + align - 1) & !(align - 1)
|
||||
}
|
||||
|
||||
61
src/allocator/bump.rs
Normal file
61
src/allocator/bump.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use super::{Locked, align_up};
|
||||
use alloc::alloc::{GlobalAlloc, Layout};
|
||||
use core::ptr;
|
||||
|
||||
pub struct BumpAllocator {
|
||||
heap_start: usize,
|
||||
heap_end: usize,
|
||||
next: usize,
|
||||
allocations: usize,
|
||||
}
|
||||
|
||||
impl BumpAllocator {
|
||||
/// Creates a new empty bump allocator.
|
||||
pub const fn new() -> Self {
|
||||
BumpAllocator {
|
||||
heap_start: 0,
|
||||
heap_end: 0,
|
||||
next: 0,
|
||||
allocations: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes the bump allocator with the given heap bounds.
|
||||
///
|
||||
/// This method is unsafe because the caller must ensure that the given
|
||||
/// memory range is unused. Also, this method must be called only once.
|
||||
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||
self.heap_start = heap_start;
|
||||
self.heap_end = heap_start.saturating_add(heap_size);
|
||||
self.next = heap_start;
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
let mut bump = self.lock(); // get a mutable reference
|
||||
|
||||
let alloc_start = align_up(bump.next, layout.align());
|
||||
let alloc_end = match alloc_start.checked_add(layout.size()) {
|
||||
Some(end) => end,
|
||||
None => return ptr::null_mut(),
|
||||
};
|
||||
|
||||
if alloc_end > bump.heap_end {
|
||||
ptr::null_mut() // out of memory
|
||||
} else {
|
||||
bump.next = alloc_end;
|
||||
bump.allocations += 1;
|
||||
alloc_start as *mut u8
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
|
||||
let mut bump = self.lock(); // get a mutable reference
|
||||
|
||||
bump.allocations -= 1;
|
||||
if bump.allocations == 0 {
|
||||
bump.next = bump.heap_start;
|
||||
}
|
||||
}
|
||||
}
|
||||
109
src/allocator/fixed_size_block.rs
Normal file
109
src/allocator/fixed_size_block.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use super::Locked;
|
||||
use alloc::alloc::{GlobalAlloc, Layout};
|
||||
use core::{
|
||||
mem,
|
||||
ptr::{self, NonNull},
|
||||
};
|
||||
|
||||
/// The block sizes to use.
|
||||
///
|
||||
/// The sizes must each be power of 2 because they are also used as
|
||||
/// the block alignment (alignments must be always powers of 2).
|
||||
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
|
||||
|
||||
/// Choose an appropriate block size for the given layout.
|
||||
///
|
||||
/// Returns an index into the `BLOCK_SIZES` array.
|
||||
fn list_index(layout: &Layout) -> Option<usize> {
|
||||
let required_block_size = layout.size().max(layout.align());
|
||||
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
|
||||
}
|
||||
|
||||
struct ListNode {
|
||||
next: Option<&'static mut ListNode>,
|
||||
}
|
||||
|
||||
pub struct FixedSizeBlockAllocator {
|
||||
list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()],
|
||||
fallback_allocator: linked_list_allocator::Heap,
|
||||
}
|
||||
|
||||
impl FixedSizeBlockAllocator {
|
||||
/// Creates an empty FixedSizeBlockAllocator.
|
||||
pub const fn new() -> Self {
|
||||
const EMPTY: Option<&'static mut ListNode> = None;
|
||||
FixedSizeBlockAllocator {
|
||||
list_heads: [EMPTY; BLOCK_SIZES.len()],
|
||||
fallback_allocator: linked_list_allocator::Heap::empty(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the allocator with the given heap bounds.
|
||||
///
|
||||
/// This function is unsafe because the caller must guarantee that the given
|
||||
/// heap bounds are valid and that the heap is unused. This method must be
|
||||
/// called only once.
|
||||
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||
unsafe {
|
||||
self.fallback_allocator.init(heap_start, heap_size);
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates using the fallback allocator.
|
||||
fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 {
|
||||
match self.fallback_allocator.allocate_first_fit(layout) {
|
||||
Ok(ptr) => ptr.as_ptr(),
|
||||
Err(_) => ptr::null_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
let mut allocator = self.lock();
|
||||
match list_index(&layout) {
|
||||
Some(index) => {
|
||||
match allocator.list_heads[index].take() {
|
||||
Some(node) => {
|
||||
allocator.list_heads[index] = node.next.take();
|
||||
node as *mut ListNode as *mut u8
|
||||
}
|
||||
None => {
|
||||
// no block exists in list => allocate new block
|
||||
let block_size = BLOCK_SIZES[index];
|
||||
// only works if all block sizes are a power of 2
|
||||
let block_align = block_size;
|
||||
let layout = Layout::from_size_align(block_size, block_align).unwrap();
|
||||
allocator.fallback_alloc(layout)
|
||||
}
|
||||
}
|
||||
}
|
||||
None => allocator.fallback_alloc(layout),
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
let mut allocator = self.lock();
|
||||
match list_index(&layout) {
|
||||
Some(index) => {
|
||||
let new_node = ListNode {
|
||||
next: allocator.list_heads[index].take(),
|
||||
};
|
||||
// verify that block has size and alignment required for storing node
|
||||
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
|
||||
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
|
||||
let new_node_ptr = ptr as *mut ListNode;
|
||||
unsafe {
|
||||
new_node_ptr.write(new_node);
|
||||
allocator.list_heads[index] = Some(&mut *new_node_ptr);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
let ptr = NonNull::new(ptr).unwrap();
|
||||
unsafe {
|
||||
allocator.fallback_allocator.deallocate(ptr, layout);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
151
src/allocator/linked_list.rs
Normal file
151
src/allocator/linked_list.rs
Normal file
@@ -0,0 +1,151 @@
|
||||
use super::{Locked, align_up};
|
||||
use alloc::alloc::{GlobalAlloc, Layout};
|
||||
use core::{mem, ptr};
|
||||
|
||||
struct ListNode {
|
||||
size: usize,
|
||||
next: Option<&'static mut ListNode>,
|
||||
}
|
||||
|
||||
impl ListNode {
|
||||
const fn new(size: usize) -> Self {
|
||||
ListNode { size, next: None }
|
||||
}
|
||||
|
||||
fn start_addr(&self) -> usize {
|
||||
self as *const Self as usize
|
||||
}
|
||||
|
||||
fn end_addr(&self) -> usize {
|
||||
self.start_addr() + self.size
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LinkedListAllocator {
|
||||
head: ListNode,
|
||||
}
|
||||
|
||||
impl LinkedListAllocator {
|
||||
/// Creates an empty LinkedListAllocator.
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
head: ListNode::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the allocator with the given heap bounds.
|
||||
///
|
||||
/// This function is unsafe because the caller must guarantee that the given
|
||||
/// heap bounds are valid and that the heap is unused. This method must be
|
||||
/// called only once.
|
||||
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||
unsafe {
|
||||
self.add_free_region(heap_start, heap_size);
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds the given memory region to the front of the list.
|
||||
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
|
||||
// ensure that the freed region is capable of holding ListNode
|
||||
assert_eq!(align_up(addr, mem::align_of::<ListNode>()), addr);
|
||||
assert!(size >= mem::size_of::<ListNode>());
|
||||
|
||||
// create a new list node and append it at the start of the list
|
||||
let mut node = ListNode::new(size);
|
||||
node.next = self.head.next.take();
|
||||
let node_ptr = addr as *mut ListNode;
|
||||
unsafe {
|
||||
node_ptr.write(node);
|
||||
self.head.next = Some(&mut *node_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Looks for a free region with the given size and alignment and removes
|
||||
/// it from the list.
|
||||
///
|
||||
/// Returns a tuple of the list node and the start address of the allocation.
|
||||
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
|
||||
// reference to current list node, updated for each iteration
|
||||
let mut current = &mut self.head;
|
||||
// look for a large enough memory region in linked list
|
||||
while let Some(ref mut region) = current.next {
|
||||
if let Ok(alloc_start) = Self::alloc_from_region(®ion, size, align) {
|
||||
// region suitable for allocation -> remove node from list
|
||||
let next = region.next.take();
|
||||
let ret = Some((current.next.take().unwrap(), alloc_start));
|
||||
current.next = next;
|
||||
return ret;
|
||||
} else {
|
||||
// region not suitable -> continue with next region
|
||||
current = current.next.as_mut().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// no suitable region found
|
||||
None
|
||||
}
|
||||
|
||||
/// Try to use the given region for an allocation with given size and alignment.
|
||||
///
|
||||
/// Returns the allocation start address on success.
|
||||
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> {
|
||||
let alloc_start = align_up(region.start_addr(), align);
|
||||
let alloc_end = alloc_start.checked_add(size).ok_or(())?;
|
||||
|
||||
if alloc_end > region.end_addr() {
|
||||
// region too small
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let excess_size = region.end_addr() - alloc_end;
|
||||
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
|
||||
// rest of region too small to hold a ListNode (required because the
|
||||
// allocation splits the region in a used and a free part)
|
||||
return Err(());
|
||||
}
|
||||
|
||||
// region suitable for allocation
|
||||
Ok(alloc_start)
|
||||
}
|
||||
|
||||
/// Adjust the given layout so that the resulting allocated memory
|
||||
/// region is also capable of storing a `ListNode`.
|
||||
///
|
||||
/// Returns the adjusted size and alignment as a (size, align) tuple.
|
||||
fn size_align(layout: Layout) -> (usize, usize) {
|
||||
let layout = layout
|
||||
.align_to(mem::align_of::<ListNode>())
|
||||
.expect("adjusting alignment failed")
|
||||
.pad_to_align();
|
||||
let size = layout.size().max(mem::size_of::<ListNode>());
|
||||
(size, layout.align())
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
// perform layout adjustments
|
||||
let (size, align) = LinkedListAllocator::size_align(layout);
|
||||
let mut allocator = self.lock();
|
||||
|
||||
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
|
||||
let alloc_end = alloc_start.checked_add(size).expect("overflow");
|
||||
let excess_size = region.end_addr() - alloc_end;
|
||||
if excess_size > 0 {
|
||||
unsafe {
|
||||
allocator.add_free_region(alloc_end, excess_size);
|
||||
}
|
||||
}
|
||||
alloc_start as *mut u8
|
||||
} else {
|
||||
ptr::null_mut()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
// perform layout adjustments
|
||||
let (size, _) = LinkedListAllocator::size_align(layout);
|
||||
|
||||
unsafe { self.lock().add_free_region(ptr as usize, size) }
|
||||
}
|
||||
}
|
||||
@@ -54,6 +54,16 @@ fn many_boxes() {
|
||||
}
|
||||
}
|
||||
|
||||
#[test_case]
|
||||
fn many_boxes_long_lived() {
|
||||
let long_lived = Box::new(1); // new
|
||||
for i in 0..HEAP_SIZE {
|
||||
let x = Box::new(i);
|
||||
assert_eq!(*x, i);
|
||||
}
|
||||
assert_eq!(*long_lived, 1); // new
|
||||
}
|
||||
|
||||
#[panic_handler]
|
||||
fn panic(info: &PanicInfo) -> ! {
|
||||
blog_os::test_panic_handler(info)
|
||||
|
||||
Reference in New Issue
Block a user