Compare commits

...

110 Commits

Author SHA1 Message Date
Philipp Oppermann
70b46ee8cf Merge branch 'post-10' into post-11 2025-08-07 13:09:45 +02:00
Philipp Oppermann
4b368f2e5b Merge branch 'post-10' into post-11 2025-08-07 13:08:03 +02:00
Philipp Oppermann
121dc393f8 Run cargo fmt for post-11 2025-03-27 16:14:46 +01:00
Philipp Oppermann
08a6e13df4 Merge branch 'post-10' into post-11 2025-03-27 16:14:36 +01:00
Philipp Oppermann
94149c93f9 Merge branch 'post-10' into post-11 2025-03-27 16:13:38 +01:00
Philipp Oppermann
194fa3c3fe Merge branch 'post-10' into post-11 2025-03-27 16:13:24 +01:00
Philipp Oppermann
0da8a4bc6b Merge branch 'post-10' into post-11 2025-03-27 15:52:05 +01:00
Philipp Oppermann
6de3aeaac3 Update allocator code to use unsafe also in unsafe functions
Required since Rust 2024
2025-03-27 15:32:15 +01:00
Philipp Oppermann
a26d6c1b54 Merge branch 'post-10' into post-11 2025-03-27 15:23:12 +01:00
Philipp Oppermann
aa4f62187e Merge branch 'post-10' into post-11 2025-03-27 15:15:32 +01:00
Philipp Oppermann
69fe1aed12 Merge branch 'post-10' into post-11 2025-03-24 11:09:24 +01:00
Philipp Oppermann
bc7335ce6c Merge branch 'post-10' into post-11 2025-02-10 12:36:07 +01:00
Philipp Oppermann
6dc837db7c Merge branch 'post-10' into post-11 2025-02-10 11:16:22 +01:00
Philipp Oppermann
4f6558c021 Merge branch 'post-10' into post-11 2025-01-15 19:58:30 +01:00
Philipp Oppermann
9ea911b0bb Remove stable const_mut_refs feature 2025-01-15 19:41:40 +01:00
Philipp Oppermann
39ee714093 Merge branch 'post-10' into post-11 2024-05-30 11:00:28 +02:00
Philipp Oppermann
9c546d4899 Merge branch 'post-10' into post-11 2024-04-30 15:39:16 +02:00
Philipp Oppermann
e59a2d1249 Merge branch 'post-10' into post-11 2024-02-16 15:59:40 +01:00
Philipp Oppermann
9bcbf17073 Merge branch 'post-10' into post-11 2024-02-16 13:23:46 +01:00
Philipp Oppermann
bd148f9bf2 Merge branch 'post-10' into post-11 2024-02-16 13:14:09 +01:00
Philipp Oppermann
64aeb0e98e Merge branch 'post-10' into post-11 2024-02-16 12:39:56 +01:00
Philipp Oppermann
06f064aefa Merge branch 'post-10' into post-11 2024-01-28 11:45:38 +01:00
Philipp Oppermann
f5b5251092 Merge branch 'post-10' into post-11 2023-04-29 12:27:12 +02:00
Philipp Oppermann
515ab6f1bf Merge branch 'post-10' into post-11 2022-09-25 11:32:07 +02:00
Philipp Oppermann
70714bc27d Merge branch 'post-10' into post-11 2022-04-08 10:07:31 +02:00
Philipp Oppermann
1893caa212 Merge branch 'post-10' into post-11 2022-01-18 10:06:44 +01:00
Philipp Oppermann
a859eddacf Merge branch 'post-10' into post-11 2021-12-22 09:27:53 +01:00
Philipp Oppermann
6b4666fbaf Merge branch 'post-10' into post-11 2021-12-21 13:56:10 +01:00
Philipp Oppermann
371f86175b Merge branch 'post-10' into post-11 2021-08-22 14:44:44 +02:00
Philipp Oppermann
b0ec3ba71f Merge branch 'post-10' into post-11
# Conflicts:
#	README.md
2021-05-21 19:58:20 +02:00
Philipp Oppermann
235b56a8e3 Merge branch 'post-10' into post-11 2021-05-20 14:33:10 +02:00
Philipp Oppermann
e3b19929b7 Merge branch 'post-10' into post-11 2021-05-17 12:51:46 +02:00
Philipp Oppermann
2359915246 Merge branch 'post-10' into post-11 2021-05-17 12:47:38 +02:00
Philipp Oppermann
1acc894846 Merge branch 'post-10' into post-11 2021-05-17 12:45:11 +02:00
Philipp Oppermann
3508e8a058 Merge branch 'post-10' into post-11 2021-05-17 11:47:14 +02:00
Philipp Oppermann
d8478d537b Merge branch 'post-10' into post-11 2021-05-17 11:44:50 +02:00
Philipp Oppermann
64264ce6ec Merge branch 'post-10' into post-11 2021-05-17 11:41:30 +02:00
Philipp Oppermann
56be80d62b Merge branch 'post-10' into post-11 2021-05-17 11:31:03 +02:00
Philipp Oppermann
60664c8876 Merge branch 'post-10' into post-11 2021-05-17 11:30:02 +02:00
Philipp Oppermann
ca120eed8c Merge branch 'post-10' into post-11 2021-05-17 11:29:23 +02:00
Philipp Oppermann
a6a3e30275 Merge branch 'post-10' into post-11 2021-03-10 09:23:02 +01:00
Philipp Oppermann
1ced86e362 Merge branch 'post-10' into post-11 2021-03-10 09:15:58 +01:00
Philipp Oppermann
4b0c11a7de Merge branch 'post-10' into post-11 2021-03-10 09:13:39 +01:00
Philipp Oppermann
59639c8fe7 Merge branch 'post-10' into post-11 2021-03-10 08:53:42 +01:00
Philipp Oppermann
d57a9d3f1f Merge pull request #921 from phil-opp/post-11-const-array-init
Use constant instead of removed `const_in_array_repeat_expressions` feature
2021-02-02 11:46:07 +01:00
Philipp Oppermann
63be7cda6b Use constant instead of removed const_in_array_repeat_expressions feature 2021-02-02 11:32:30 +01:00
Philipp Oppermann
380e54d6b8 Merge branch 'post-10' into post-11 2021-02-02 11:06:38 +01:00
Philipp Oppermann
7e05d80506 Merge branch 'post-10' into post-11 2021-02-02 10:52:06 +01:00
Philipp Oppermann
180e6ba36c Merge branch 'post-10' into post-11 2020-11-22 13:46:58 +01:00
Philipp Oppermann
ed93cfb926 Merge branch 'post-10' into post-11 2020-10-08 17:02:00 +02:00
Philipp Oppermann
ca3dfc719c Merge branch 'post-10' into post-11 2020-10-02 23:38:21 +02:00
Philipp Oppermann
bc618ad457 Merge branch 'post-10' into post-11 2020-09-29 12:22:06 +02:00
Philipp Oppermann
f7b5e22f68 Merge branch 'post-10' into post-11 2020-09-29 12:19:54 +02:00
Philipp Oppermann
340cbaba36 Use new const_mut_refs feature gate (#860) 2020-09-24 10:55:06 +02:00
Philipp Oppermann
231dfacf3b Merge branch 'post-10' into post-11 2020-09-24 10:34:56 +02:00
Philipp Oppermann
9bf21a0392 Merge branch 'post-10' into post-11 2020-09-24 10:28:11 +02:00
Philipp Oppermann
487b130c85 Merge branch 'post-10' into post-11 2020-09-24 09:11:02 +02:00
Philipp Oppermann
d28ad91407 Merge branch 'post-10' into post-11 2020-08-16 18:03:10 +02:00
Philipp Oppermann
43b5f835ab Merge branch 'post-10' into post-11 2020-08-16 17:59:56 +02:00
Philipp Oppermann
91790d8007 Merge branch 'post-10' into post-11 2020-07-17 15:57:35 +02:00
Philipp Oppermann
93cb9dfdbe Merge branch 'post-10' into post-11 2020-07-17 11:48:35 +02:00
Philipp Oppermann
b0cb401b3b Merge branch 'post-10' into post-11 2020-07-17 11:30:14 +02:00
Philipp Oppermann
bd4487cefa Merge branch 'post-10' into post-11 2020-06-08 12:30:28 +02:00
Philipp Oppermann
cc8ecaadc2 Remove superfluous printing from many_boxes_long_lived test 2020-06-08 12:09:49 +02:00
Philipp Oppermann
c66758ccad Merge branch 'post-10' into post-11 2020-06-08 12:09:01 +02:00
Philipp Oppermann
f54a2d7574 Merge branch 'post-10' into post-11 2020-06-08 12:06:37 +02:00
Philipp Oppermann
e6c4c63c00 Update post-11 code for #813 2020-06-05 15:52:01 +02:00
Philipp Oppermann
ed543a4591 Merge branch 'post-10' into post-11 2020-05-22 10:54:54 +02:00
Philipp Oppermann
0d661fcfdd Merge branch 'post-10' into post-11 2020-05-21 09:29:56 +02:00
Philipp Oppermann
6f12524245 Merge branch 'post-10' into post-11 2020-05-20 14:56:19 +02:00
Philipp Oppermann
9828ec0eff Merge branch 'post-10' into post-11 2020-05-20 14:30:53 +02:00
Philipp Oppermann
d95da1d511 Merge branch 'post-10' into post-11 2020-05-20 14:10:17 +02:00
Aaron Hill
7bfacb39c4 Remove #![feature(alloc_layout_extra)] (#806)
A subset of this feature was stabilized in https://github.com/rust-lang/rust/pull/69362,
and none of the still-unstable methods are in use in `blog_os`
2020-05-20 10:51:09 +02:00
Philipp Oppermann
3c28d7f364 Update README.md 2020-03-20 16:09:09 +01:00
Philipp Oppermann
83b67df8af Merge branch 'post-10' into post-11 2020-03-08 14:39:38 +01:00
Philipp Oppermann
714d9cef7d Merge branch 'post-10' into post-11 2020-03-08 14:30:38 +01:00
Philipp Oppermann
3735ae51c9 Merge branch 'post-10' into post-11 2020-03-08 14:29:03 +01:00
Philipp Oppermann
a430d5ef06 Merge branch 'post-10' into post-11 2020-03-06 11:58:45 +01:00
Philipp Oppermann
f4331daa25 Merge branch 'post-10' into post-11 2020-02-26 12:48:22 +01:00
Philipp Oppermann
921dd54207 Merge branch 'post-10' into post-11 2020-02-26 12:22:10 +01:00
Philipp Oppermann
3a6d3153a4 Don't panic on overflow in allocator; return null pointer instead (#738) 2020-02-04 09:47:39 +01:00
Philipp Oppermann
9fb6c1d0bd Merge branch 'post-10' into post-11 2020-02-04 09:39:02 +01:00
Philipp Oppermann
00fedc801e Use LinkedListAllocator::lock instead of self.inner.lock() 2020-01-30 13:03:34 +01:00
Philipp Oppermann
0f74db4812 Implement align_up using align_offset from Rust's standard library (#723)
Improve `align_up` performance using a bitmask
2020-01-28 10:39:14 +01:00
Philipp Oppermann
93fd330ab9 Use bitmask instead of align_offset 2020-01-28 10:29:53 +01:00
Philipp Oppermann
3ad5f117c2 Use checked addition for allocator implementations (#726) 2020-01-27 13:25:08 +01:00
Philipp Oppermann
d1678f5a96 Implement align_up using align_offset from Rust's standard library 2020-01-22 11:35:29 +01:00
Philipp Oppermann
002d6f255f Set FixedSizeBlockAllocator as global allocator 2020-01-20 14:09:00 +01:00
Philipp Oppermann
6c3bf0b10f Implement GlobalAlloc::dealloc 2020-01-20 14:07:47 +01:00
Philipp Oppermann
7a792f5cb0 Implement GlobalAlloc::alloc 2020-01-20 14:07:02 +01:00
Philipp Oppermann
93b4dcf434 Add skeleton for GlobalAlloc implementation 2020-01-20 14:06:26 +01:00
Philipp Oppermann
821dd2adb4 Add function to calculate the list index 2020-01-20 14:05:24 +01:00
Philipp Oppermann
d636939b51 Add FixedSizeBlockAllocator::fallback_alloc method 2020-01-20 14:05:01 +01:00
Philipp Oppermann
9b7326541e Add FixedSizeBlockAllocator type 2020-01-20 14:04:13 +01:00
Philipp Oppermann
4f234b67ef Add ListNode type and BLOCK_SIZES constant 2020-01-20 14:02:57 +01:00
Philipp Oppermann
7381e11f3c Create a new fixed_size_block allocator submodule 2020-01-20 14:01:59 +01:00
Philipp Oppermann
a9fe65a0ce Use new LinkedListAllocator 2020-01-10 13:04:46 +01:00
Philipp Oppermann
2001814119 Implement LinkedListAllocator::size_align 2020-01-10 13:00:16 +01:00
Philipp Oppermann
a5c50e7408 Implement GlobalAlloc for LinkedListAllocator 2020-01-10 11:52:04 +01:00
Philipp Oppermann
70a52c291d Implement alloc_from_region 2020-01-10 11:48:56 +01:00
Philipp Oppermann
c56bfa27e4 Implement find_region 2020-01-10 11:46:10 +01:00
Philipp Oppermann
55aec9ebf3 Apply rustfmt to ListNode::new function 2020-01-10 11:44:38 +01:00
Philipp Oppermann
2e1d132a9a Implement add_free_region 2020-01-10 11:44:17 +01:00
Philipp Oppermann
63e8577d77 Create a basic LinkedListAllocator type 2020-01-10 11:42:04 +01:00
Philipp Oppermann
75d826bf69 Add a test that memory is reused with a long lived allocation
This test fails for the bump allocator because it can only free the complete heap at once, which is prevented by the single long-lived allocation.
2020-01-09 15:45:38 +01:00
Philipp Oppermann
45be3f0648 Use our BumpAllocator instead of linked_list_allocator crate 2020-01-09 15:37:43 +01:00
Philipp Oppermann
055c560a7a Add an align_up function 2020-01-09 15:36:06 +01:00
Philipp Oppermann
e87044a7ee Implement GlobalAlloc for BumpAllocator 2020-01-09 15:35:03 +01:00
Philipp Oppermann
08d2289dad Add a Locked wrapper type that can be used to implement GlobalAlloc 2020-01-09 15:34:04 +01:00
Philipp Oppermann
7c84dbaa1d Create a basic BumpAllocator type 2020-01-09 15:25:37 +01:00
6 changed files with 365 additions and 6 deletions

View File

@@ -1,10 +1,10 @@
# Blog OS (Heap Allocation)
# Blog OS (Allocator Designs)
[![Build Status](https://github.com/phil-opp/blog_os/workflows/Code/badge.svg?branch=post-10)](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Code%22+branch%3Apost-10)
[![Build Status](https://github.com/phil-opp/blog_os/workflows/Code/badge.svg?branch=post-11)](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Code%22+branch%3Apost-11)
This repository contains the source code for the [Heap Allocation][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
This repository contains the source code for the [Allocator Designs][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
[post]: https://os.phil-opp.com/heap-allocation/
[post]: https://os.phil-opp.com/allocator-designs/
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**

View File

@@ -1,6 +1,6 @@
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr::null_mut;
use linked_list_allocator::LockedHeap;
use fixed_size_block::FixedSizeBlockAllocator;
use x86_64::{
VirtAddr,
structures::paging::{
@@ -8,11 +8,15 @@ use x86_64::{
},
};
pub mod bump;
pub mod fixed_size_block;
pub mod linked_list;
pub const HEAP_START: usize = 0x_4444_4444_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
#[global_allocator]
static ALLOCATOR: LockedHeap = LockedHeap::empty();
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(FixedSizeBlockAllocator::new());
pub fn init_heap(
mapper: &mut impl Mapper<Size4KiB>,
@@ -52,3 +56,27 @@ unsafe impl GlobalAlloc for Dummy {
panic!("dealloc should be never called")
}
}
/// A wrapper around spin::Mutex to permit trait implementations.
pub struct Locked<A> {
inner: spin::Mutex<A>,
}
impl<A> Locked<A> {
pub const fn new(inner: A) -> Self {
Locked {
inner: spin::Mutex::new(inner),
}
}
pub fn lock(&self) -> spin::MutexGuard<A> {
self.inner.lock()
}
}
/// Align the given address `addr` upwards to alignment `align`.
///
/// Requires that `align` is a power of two.
fn align_up(addr: usize, align: usize) -> usize {
(addr + align - 1) & !(align - 1)
}

61
src/allocator/bump.rs Normal file
View File

@@ -0,0 +1,61 @@
use super::{Locked, align_up};
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr;
pub struct BumpAllocator {
heap_start: usize,
heap_end: usize,
next: usize,
allocations: usize,
}
impl BumpAllocator {
/// Creates a new empty bump allocator.
pub const fn new() -> Self {
BumpAllocator {
heap_start: 0,
heap_end: 0,
next: 0,
allocations: 0,
}
}
/// Initializes the bump allocator with the given heap bounds.
///
/// This method is unsafe because the caller must ensure that the given
/// memory range is unused. Also, this method must be called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.heap_start = heap_start;
self.heap_end = heap_start.saturating_add(heap_size);
self.next = heap_start;
}
}
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut bump = self.lock(); // get a mutable reference
let alloc_start = align_up(bump.next, layout.align());
let alloc_end = match alloc_start.checked_add(layout.size()) {
Some(end) => end,
None => return ptr::null_mut(),
};
if alloc_end > bump.heap_end {
ptr::null_mut() // out of memory
} else {
bump.next = alloc_end;
bump.allocations += 1;
alloc_start as *mut u8
}
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
let mut bump = self.lock(); // get a mutable reference
bump.allocations -= 1;
if bump.allocations == 0 {
bump.next = bump.heap_start;
}
}
}

View File

@@ -0,0 +1,109 @@
use super::Locked;
use alloc::alloc::{GlobalAlloc, Layout};
use core::{
mem,
ptr::{self, NonNull},
};
/// The block sizes to use.
///
/// The sizes must each be power of 2 because they are also used as
/// the block alignment (alignments must be always powers of 2).
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
/// Choose an appropriate block size for the given layout.
///
/// Returns an index into the `BLOCK_SIZES` array.
fn list_index(layout: &Layout) -> Option<usize> {
let required_block_size = layout.size().max(layout.align());
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
}
struct ListNode {
next: Option<&'static mut ListNode>,
}
pub struct FixedSizeBlockAllocator {
list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap,
}
impl FixedSizeBlockAllocator {
/// Creates an empty FixedSizeBlockAllocator.
pub const fn new() -> Self {
const EMPTY: Option<&'static mut ListNode> = None;
FixedSizeBlockAllocator {
list_heads: [EMPTY; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap::empty(),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
unsafe {
self.fallback_allocator.init(heap_start, heap_size);
}
}
/// Allocates using the fallback allocator.
fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 {
match self.fallback_allocator.allocate_first_fit(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(_) => ptr::null_mut(),
}
}
}
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
match allocator.list_heads[index].take() {
Some(node) => {
allocator.list_heads[index] = node.next.take();
node as *mut ListNode as *mut u8
}
None => {
// no block exists in list => allocate new block
let block_size = BLOCK_SIZES[index];
// only works if all block sizes are a power of 2
let block_align = block_size;
let layout = Layout::from_size_align(block_size, block_align).unwrap();
allocator.fallback_alloc(layout)
}
}
}
None => allocator.fallback_alloc(layout),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
let new_node = ListNode {
next: allocator.list_heads[index].take(),
};
// verify that block has size and alignment required for storing node
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
let new_node_ptr = ptr as *mut ListNode;
unsafe {
new_node_ptr.write(new_node);
allocator.list_heads[index] = Some(&mut *new_node_ptr);
}
}
None => {
let ptr = NonNull::new(ptr).unwrap();
unsafe {
allocator.fallback_allocator.deallocate(ptr, layout);
}
}
}
}
}

View File

@@ -0,0 +1,151 @@
use super::{Locked, align_up};
use alloc::alloc::{GlobalAlloc, Layout};
use core::{mem, ptr};
struct ListNode {
size: usize,
next: Option<&'static mut ListNode>,
}
impl ListNode {
const fn new(size: usize) -> Self {
ListNode { size, next: None }
}
fn start_addr(&self) -> usize {
self as *const Self as usize
}
fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}
pub struct LinkedListAllocator {
head: ListNode,
}
impl LinkedListAllocator {
/// Creates an empty LinkedListAllocator.
pub const fn new() -> Self {
Self {
head: ListNode::new(0),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
unsafe {
self.add_free_region(heap_start, heap_size);
}
}
/// Adds the given memory region to the front of the list.
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
// ensure that the freed region is capable of holding ListNode
assert_eq!(align_up(addr, mem::align_of::<ListNode>()), addr);
assert!(size >= mem::size_of::<ListNode>());
// create a new list node and append it at the start of the list
let mut node = ListNode::new(size);
node.next = self.head.next.take();
let node_ptr = addr as *mut ListNode;
unsafe {
node_ptr.write(node);
self.head.next = Some(&mut *node_ptr);
}
}
/// Looks for a free region with the given size and alignment and removes
/// it from the list.
///
/// Returns a tuple of the list node and the start address of the allocation.
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
// reference to current list node, updated for each iteration
let mut current = &mut self.head;
// look for a large enough memory region in linked list
while let Some(ref mut region) = current.next {
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
// region suitable for allocation -> remove node from list
let next = region.next.take();
let ret = Some((current.next.take().unwrap(), alloc_start));
current.next = next;
return ret;
} else {
// region not suitable -> continue with next region
current = current.next.as_mut().unwrap();
}
}
// no suitable region found
None
}
/// Try to use the given region for an allocation with given size and alignment.
///
/// Returns the allocation start address on success.
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> {
let alloc_start = align_up(region.start_addr(), align);
let alloc_end = alloc_start.checked_add(size).ok_or(())?;
if alloc_end > region.end_addr() {
// region too small
return Err(());
}
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
// rest of region too small to hold a ListNode (required because the
// allocation splits the region in a used and a free part)
return Err(());
}
// region suitable for allocation
Ok(alloc_start)
}
/// Adjust the given layout so that the resulting allocated memory
/// region is also capable of storing a `ListNode`.
///
/// Returns the adjusted size and alignment as a (size, align) tuple.
fn size_align(layout: Layout) -> (usize, usize) {
let layout = layout
.align_to(mem::align_of::<ListNode>())
.expect("adjusting alignment failed")
.pad_to_align();
let size = layout.size().max(mem::size_of::<ListNode>());
(size, layout.align())
}
}
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// perform layout adjustments
let (size, align) = LinkedListAllocator::size_align(layout);
let mut allocator = self.lock();
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
let alloc_end = alloc_start.checked_add(size).expect("overflow");
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 {
unsafe {
allocator.add_free_region(alloc_end, excess_size);
}
}
alloc_start as *mut u8
} else {
ptr::null_mut()
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// perform layout adjustments
let (size, _) = LinkedListAllocator::size_align(layout);
unsafe { self.lock().add_free_region(ptr as usize, size) }
}
}

View File

@@ -54,6 +54,16 @@ fn many_boxes() {
}
}
#[test_case]
fn many_boxes_long_lived() {
let long_lived = Box::new(1); // new
for i in 0..HEAP_SIZE {
let x = Box::new(i);
assert_eq!(*x, i);
}
assert_eq!(*long_lived, 1); // new
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
blog_os::test_panic_handler(info)