Compare commits

...

77 Commits

Author SHA1 Message Date
Philipp Oppermann
9258e04b7a Update to latest version 2020-07-07 11:22:06 +02:00
Philipp Oppermann
705435da8a Test for new idt::set_default_handler macro of x86_64 crate 2020-07-06 17:05:42 +02:00
Philipp Oppermann
991c3c863a Merge branch 'post-11' into post-12 2020-06-08 12:30:28 +02:00
Philipp Oppermann
bd4487cefa Merge branch 'post-10' into post-11 2020-06-08 12:30:28 +02:00
Philipp Oppermann
ae504f6a61 Merge branch 'post-11' into post-12 2020-06-08 12:09:51 +02:00
Philipp Oppermann
cc8ecaadc2 Remove superfluous printing from many_boxes_long_lived test 2020-06-08 12:09:49 +02:00
Philipp Oppermann
07b4e93ec7 Merge branch 'post-11' into post-12 2020-06-08 12:09:01 +02:00
Philipp Oppermann
c66758ccad Merge branch 'post-10' into post-11 2020-06-08 12:09:01 +02:00
Philipp Oppermann
0a97c2c97b Merge branch 'post-11' into post-12 2020-06-08 12:06:49 +02:00
Philipp Oppermann
f54a2d7574 Merge branch 'post-10' into post-11 2020-06-08 12:06:37 +02:00
Philipp Oppermann
c2790cf352 Merge branch 'post-11' into post-12 2020-06-05 15:52:04 +02:00
Philipp Oppermann
e6c4c63c00 Update post-11 code for #813 2020-06-05 15:52:01 +02:00
Philipp Oppermann
0e7a2dc6ae Merge branch 'post-11' into post-12 2020-05-22 10:54:54 +02:00
Philipp Oppermann
ed543a4591 Merge branch 'post-10' into post-11 2020-05-22 10:54:54 +02:00
Philipp Oppermann
080be9bca6 Merge branch 'post-11' into post-12 2020-05-21 09:29:56 +02:00
Philipp Oppermann
0d661fcfdd Merge branch 'post-10' into post-11 2020-05-21 09:29:56 +02:00
Philipp Oppermann
0404ceb1af Merge branch 'post-11' into post-12 2020-05-20 14:56:19 +02:00
Philipp Oppermann
6f12524245 Merge branch 'post-10' into post-11 2020-05-20 14:56:19 +02:00
Philipp Oppermann
60b5f3402e Merge branch 'post-11' into post-12 2020-05-20 14:31:12 +02:00
Philipp Oppermann
9828ec0eff Merge branch 'post-10' into post-11 2020-05-20 14:30:53 +02:00
Philipp Oppermann
a076ff16ad Merge branch 'post-11' into post-12 2020-05-20 14:10:59 +02:00
Philipp Oppermann
d95da1d511 Merge branch 'post-10' into post-11 2020-05-20 14:10:17 +02:00
Philipp Oppermann
84c1070b0b Merge branch 'post-11' into post-12 2020-05-20 10:51:40 +02:00
Aaron Hill
7bfacb39c4 Remove #![feature(alloc_layout_extra)] (#806)
A subset of this feature was stabilized in https://github.com/rust-lang/rust/pull/69362,
and none of the still-unstable methods are in use in `blog_os`
2020-05-20 10:51:09 +02:00
Philipp Oppermann
f118749cb2 Merge pull request #804 from phil-opp/post-12-merge-queues
Simplify executor by merging task_queue and wake_queue
2020-05-19 14:32:51 +02:00
Philipp Oppermann
9887c1257d Keep task in map instead of repeatedly removing it
Also: Use entry API on waker_cache map.
2020-05-19 14:24:51 +02:00
Philipp Oppermann
ee0c11d316 Simplify executor by merging task_queue and wake_queue 2020-05-18 12:07:45 +02:00
Philipp Oppermann
b862534be5 Use a static counter for assigning task IDs (#782)
Deriving the task ID from the heap address of the future does not work for zero-sized futures because they are not backed by a real allocation.
2020-04-01 12:53:25 +02:00
Philipp Oppermann
e465c5b278 Put the CPU to sleep when no task is ready 2020-03-27 17:17:34 +01:00
Philipp Oppermann
50b4b89ac2 Implement an executor with waker support 2020-03-27 17:01:33 +01:00
Philipp Oppermann
d7b144364d Add an asynchronous print_keypresses task 2020-03-27 17:01:33 +01:00
Philipp Oppermann
d63ddde756 Add waker support to ScancodeStream 2020-03-27 17:01:33 +01:00
Philipp Oppermann
79dbd2968a Create a ScancodeStream based on the SCANCODE_QUEUE 2020-03-27 15:46:26 +01:00
Philipp Oppermann
b1be646e46 Add an add_scancode function and call it from keyboard interrupt handler 2020-03-27 13:25:43 +01:00
Philipp Oppermann
71b10a70df Add dependencies on crossbeam and conquer_once to define a SCANCODE_QUEUE 2020-03-27 13:16:05 +01:00
Philipp Oppermann
629fe0fdec Use SimpleExecutor in kernel_main to run example_task 2020-03-27 13:04:33 +01:00
Philipp Oppermann
b25b841467 Create a SimpleExecutor that polls tasks in a busy loop 2020-03-27 13:03:24 +01:00
Philipp Oppermann
dac7e67403 Add a task module with a Task struct 2020-03-27 12:55:40 +01:00
Philipp Oppermann
2cc188a403 Add some example async functions 2020-03-27 12:48:55 +01:00
Philipp Oppermann
fbeedde68f Update Readme for async/await post 2020-03-27 12:26:50 +01:00
Philipp Oppermann
3c28d7f364 Update README.md 2020-03-20 16:09:09 +01:00
Philipp Oppermann
83b67df8af Merge branch 'post-10' into post-11 2020-03-08 14:39:38 +01:00
Philipp Oppermann
714d9cef7d Merge branch 'post-10' into post-11 2020-03-08 14:30:38 +01:00
Philipp Oppermann
3735ae51c9 Merge branch 'post-10' into post-11 2020-03-08 14:29:03 +01:00
Philipp Oppermann
a430d5ef06 Merge branch 'post-10' into post-11 2020-03-06 11:58:45 +01:00
Philipp Oppermann
f4331daa25 Merge branch 'post-10' into post-11 2020-02-26 12:48:22 +01:00
Philipp Oppermann
921dd54207 Merge branch 'post-10' into post-11 2020-02-26 12:22:10 +01:00
Philipp Oppermann
3a6d3153a4 Don't panic on overflow in allocator; return null pointer instead (#738) 2020-02-04 09:47:39 +01:00
Philipp Oppermann
9fb6c1d0bd Merge branch 'post-10' into post-11 2020-02-04 09:39:02 +01:00
Philipp Oppermann
00fedc801e Use LinkedListAllocator::lock instead of self.inner.lock() 2020-01-30 13:03:34 +01:00
Philipp Oppermann
0f74db4812 Implement align_up using align_offset from Rust's standard library (#723)
Improve `align_up` performance using a bitmask
2020-01-28 10:39:14 +01:00
Philipp Oppermann
93fd330ab9 Use bitmask instead of align_offset 2020-01-28 10:29:53 +01:00
Philipp Oppermann
3ad5f117c2 Use checked addition for allocator implementations (#726) 2020-01-27 13:25:08 +01:00
Philipp Oppermann
d1678f5a96 Implement align_up using align_offset from Rust's standard library 2020-01-22 11:35:29 +01:00
Philipp Oppermann
002d6f255f Set FixedSizeBlockAllocator as global allocator 2020-01-20 14:09:00 +01:00
Philipp Oppermann
6c3bf0b10f Implement GlobalAlloc::dealloc 2020-01-20 14:07:47 +01:00
Philipp Oppermann
7a792f5cb0 Implement GlobalAlloc::alloc 2020-01-20 14:07:02 +01:00
Philipp Oppermann
93b4dcf434 Add skeleton for GlobalAlloc implementation 2020-01-20 14:06:26 +01:00
Philipp Oppermann
821dd2adb4 Add function to calculate the list index 2020-01-20 14:05:24 +01:00
Philipp Oppermann
d636939b51 Add FixedSizeBlockAllocator::fallback_alloc method 2020-01-20 14:05:01 +01:00
Philipp Oppermann
9b7326541e Add FixedSizeBlockAllocator type 2020-01-20 14:04:13 +01:00
Philipp Oppermann
4f234b67ef Add ListNode type and BLOCK_SIZES constant 2020-01-20 14:02:57 +01:00
Philipp Oppermann
7381e11f3c Create a new fixed_size_block allocator submodule 2020-01-20 14:01:59 +01:00
Philipp Oppermann
a9fe65a0ce Use new LinkedListAllocator 2020-01-10 13:04:46 +01:00
Philipp Oppermann
2001814119 Implement LinkedListAllocator::size_align 2020-01-10 13:00:16 +01:00
Philipp Oppermann
a5c50e7408 Implement GlobalAlloc for LinkedListAllocator 2020-01-10 11:52:04 +01:00
Philipp Oppermann
70a52c291d Implement alloc_from_region 2020-01-10 11:48:56 +01:00
Philipp Oppermann
c56bfa27e4 Implement find_region 2020-01-10 11:46:10 +01:00
Philipp Oppermann
55aec9ebf3 Apply rustfmt to ListNode::new function 2020-01-10 11:44:38 +01:00
Philipp Oppermann
2e1d132a9a Implement add_free_region 2020-01-10 11:44:17 +01:00
Philipp Oppermann
63e8577d77 Create a basic LinkedListAllocator type 2020-01-10 11:42:04 +01:00
Philipp Oppermann
75d826bf69 Add a test that memory is reused with a long lived allocation
This test fails for the bump allocator because it can only free the complete heap at once, which is prevented by the single long-lived allocation.
2020-01-09 15:45:38 +01:00
Philipp Oppermann
45be3f0648 Use our BumpAllocator instead of linked_list_allocator crate 2020-01-09 15:37:43 +01:00
Philipp Oppermann
055c560a7a Add an align_up function 2020-01-09 15:36:06 +01:00
Philipp Oppermann
e87044a7ee Implement GlobalAlloc for BumpAllocator 2020-01-09 15:35:03 +01:00
Philipp Oppermann
08d2289dad Add a Locked wrapper type that can be used to implement GlobalAlloc 2020-01-09 15:34:04 +01:00
Philipp Oppermann
7c84dbaa1d Create a basic BumpAllocator type 2020-01-09 15:25:37 +01:00
15 changed files with 804 additions and 57 deletions

136
Cargo.lock generated
View File

@@ -1,5 +1,11 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "autocfg"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
[[package]]
name = "bit_field"
version = "0.9.0"
@@ -17,6 +23,9 @@ name = "blog_os"
version = "0.1.0"
dependencies = [
"bootloader",
"conquer-once",
"crossbeam-queue",
"futures-util",
"lazy_static",
"linked_list_allocator",
"pc-keyboard",
@@ -24,7 +33,7 @@ dependencies = [
"spin",
"uart_16550",
"volatile",
"x86_64",
"x86_64 0.11.1",
]
[[package]]
@@ -33,12 +42,76 @@ version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44ac0bdf4930c3c4d7f0d04eb6f15d7dcb9d5972b1ff9cd2bee0128112260fc7"
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "conquer-once"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f7644600a548ecad74e4a918392af1798f7dd045be610be3203b9e129b4f98f"
dependencies = [
"conquer-util",
]
[[package]]
name = "conquer-util"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "654fb2472cc369d311c547103a1fa81d467bef370ae7a0680f65939895b1182a"
[[package]]
name = "cpuio"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d531514efb06912141fa65967447de805691b685a7565c87d1765afe34a98aa7"
[[package]]
name = "crossbeam-queue"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
dependencies = [
"autocfg",
"cfg-if",
]
[[package]]
name = "futures-core"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a"
[[package]]
name = "futures-task"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27"
[[package]]
name = "futures-util"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5"
dependencies = [
"futures-core",
"futures-task",
"pin-utils",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
@@ -81,6 +154,30 @@ dependencies = [
"cpuio",
]
[[package]]
name = "pin-utils"
version = "0.1.0-alpha.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587"
[[package]]
name = "proc-macro2"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa"
dependencies = [
"unicode-xid",
]
[[package]]
name = "quote"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
dependencies = [
"proc-macro2",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
@@ -102,6 +199,17 @@ dependencies = [
"lock_api",
]
[[package]]
name = "syn"
version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "uart_16550"
version = "0.2.7"
@@ -109,9 +217,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e58fc40dc1712664fc9b0a7bd8ca2f21ab49960924fb245a80a05e1e92f3dfe9"
dependencies = [
"bitflags",
"x86_64",
"x86_64 0.11.0",
]
[[package]]
name = "unicode-xid"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
[[package]]
name = "volatile"
version = "0.2.6"
@@ -127,3 +241,21 @@ dependencies = [
"bit_field",
"bitflags",
]
[[package]]
name = "x86_64"
version = "0.11.1"
dependencies = [
"bit_field",
"bitflags",
"x86_64-idt-general-handler",
]
[[package]]
name = "x86_64-idt-general-handler"
version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

View File

@@ -16,7 +16,7 @@ harness = false
bootloader = { version = "0.9.3", features = ["map_physical_memory"]}
volatile = "0.2.6"
spin = "0.5.2"
x86_64 = "0.11.0"
x86_64 = { path = "../../x86_64" }
uart_16550 = "0.2.0"
pic8259_simple = "0.2.0"
pc-keyboard = "0.5.0"
@@ -26,6 +26,19 @@ linked_list_allocator = "0.8.0"
version = "1.0"
features = ["spin_no_std"]
[dependencies.crossbeam-queue]
version = "0.2.1"
default-features = false
features = ["alloc"]
[dependencies.conquer-once]
version = "0.2.0"
default-features = false
[dependencies.futures-util]
version = "0.3.4"
default-features = false
features = ["alloc"]
[package.metadata.bootimage]
test-args = [

View File

@@ -1,10 +1,10 @@
# Blog OS (Heap Allocation)
# Blog OS (Async/Await)
[![Build Status](https://github.com/phil-opp/blog_os/workflows/Build%20Code/badge.svg?branch=post-10)](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Build+Code%22+branch%3Apost-10)
[![Build Status](https://github.com/phil-opp/blog_os/workflows/Build%20Code/badge.svg?branch=post-12)](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Build+Code%22+branch%3Apost-12)
This repository contains the source code for the [Heap Allocation][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
This repository contains the source code for the [Async/Await][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
[post]: https://os.phil-opp.com/heap-allocation/
[post]: https://os.phil-opp.com/async-await/
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**

View File

@@ -1,6 +1,6 @@
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr::null_mut;
use linked_list_allocator::LockedHeap;
use fixed_size_block::FixedSizeBlockAllocator;
use x86_64::{
structures::paging::{
mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB,
@@ -8,11 +8,15 @@ use x86_64::{
VirtAddr,
};
pub mod bump;
pub mod fixed_size_block;
pub mod linked_list;
pub const HEAP_START: usize = 0x_4444_4444_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
#[global_allocator]
static ALLOCATOR: LockedHeap = LockedHeap::empty();
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(FixedSizeBlockAllocator::new());
pub fn init_heap(
mapper: &mut impl Mapper<Size4KiB>,
@@ -52,3 +56,27 @@ unsafe impl GlobalAlloc for Dummy {
panic!("dealloc should be never called")
}
}
/// A wrapper around spin::Mutex to permit trait implementations.
pub struct Locked<A> {
inner: spin::Mutex<A>,
}
impl<A> Locked<A> {
pub const fn new(inner: A) -> Self {
Locked {
inner: spin::Mutex::new(inner),
}
}
pub fn lock(&self) -> spin::MutexGuard<A> {
self.inner.lock()
}
}
/// Align the given address `addr` upwards to alignment `align`.
///
/// Requires that `align` is a power of two.
fn align_up(addr: usize, align: usize) -> usize {
(addr + align - 1) & !(align - 1)
}

61
src/allocator/bump.rs Normal file
View File

@@ -0,0 +1,61 @@
use super::{align_up, Locked};
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr;
pub struct BumpAllocator {
heap_start: usize,
heap_end: usize,
next: usize,
allocations: usize,
}
impl BumpAllocator {
/// Creates a new empty bump allocator.
pub const fn new() -> Self {
BumpAllocator {
heap_start: 0,
heap_end: 0,
next: 0,
allocations: 0,
}
}
/// Initializes the bump allocator with the given heap bounds.
///
/// This method is unsafe because the caller must ensure that the given
/// memory range is unused. Also, this method must be called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.heap_start = heap_start;
self.heap_end = heap_start.saturating_add(heap_size);
self.next = heap_start;
}
}
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut bump = self.lock(); // get a mutable reference
let alloc_start = align_up(bump.next, layout.align());
let alloc_end = match alloc_start.checked_add(layout.size()) {
Some(end) => end,
None => return ptr::null_mut(),
};
if alloc_end > bump.heap_end {
ptr::null_mut() // out of memory
} else {
bump.next = alloc_end;
bump.allocations += 1;
alloc_start as *mut u8
}
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
let mut bump = self.lock(); // get a mutable reference
bump.allocations -= 1;
if bump.allocations == 0 {
bump.next = bump.heap_start;
}
}
}

View File

@@ -0,0 +1,102 @@
use super::Locked;
use alloc::alloc::{GlobalAlloc, Layout};
use core::{
mem,
ptr::{self, NonNull},
};
/// The block sizes to use.
///
/// The sizes must each be power of 2 because they are also used as
/// the block alignment (alignments must be always powers of 2).
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
/// Choose an appropriate block size for the given layout.
///
/// Returns an index into the `BLOCK_SIZES` array.
fn list_index(layout: &Layout) -> Option<usize> {
let required_block_size = layout.size().max(layout.align());
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
}
struct ListNode {
next: Option<&'static mut ListNode>,
}
pub struct FixedSizeBlockAllocator {
list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap,
}
impl FixedSizeBlockAllocator {
/// Creates an empty FixedSizeBlockAllocator.
pub const fn new() -> Self {
FixedSizeBlockAllocator {
list_heads: [None; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap::empty(),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.fallback_allocator.init(heap_start, heap_size);
}
/// Allocates using the fallback allocator.
fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 {
match self.fallback_allocator.allocate_first_fit(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(_) => ptr::null_mut(),
}
}
}
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
match allocator.list_heads[index].take() {
Some(node) => {
allocator.list_heads[index] = node.next.take();
node as *mut ListNode as *mut u8
}
None => {
// no block exists in list => allocate new block
let block_size = BLOCK_SIZES[index];
// only works if all block sizes are a power of 2
let block_align = block_size;
let layout = Layout::from_size_align(block_size, block_align).unwrap();
allocator.fallback_alloc(layout)
}
}
}
None => allocator.fallback_alloc(layout),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
let new_node = ListNode {
next: allocator.list_heads[index].take(),
};
// verify that block has size and alignment required for storing node
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
let new_node_ptr = ptr as *mut ListNode;
new_node_ptr.write(new_node);
allocator.list_heads[index] = Some(&mut *new_node_ptr);
}
None => {
let ptr = NonNull::new(ptr).unwrap();
allocator.fallback_allocator.deallocate(ptr, layout);
}
}
}
}

View File

@@ -0,0 +1,145 @@
use super::{align_up, Locked};
use alloc::alloc::{GlobalAlloc, Layout};
use core::{mem, ptr};
struct ListNode {
size: usize,
next: Option<&'static mut ListNode>,
}
impl ListNode {
const fn new(size: usize) -> Self {
ListNode { size, next: None }
}
fn start_addr(&self) -> usize {
self as *const Self as usize
}
fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}
pub struct LinkedListAllocator {
head: ListNode,
}
impl LinkedListAllocator {
/// Creates an empty LinkedListAllocator.
pub const fn new() -> Self {
Self {
head: ListNode::new(0),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.add_free_region(heap_start, heap_size);
}
/// Adds the given memory region to the front of the list.
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
// ensure that the freed region is capable of holding ListNode
assert_eq!(align_up(addr, mem::align_of::<ListNode>()), addr);
assert!(size >= mem::size_of::<ListNode>());
// create a new list node and append it at the start of the list
let mut node = ListNode::new(size);
node.next = self.head.next.take();
let node_ptr = addr as *mut ListNode;
node_ptr.write(node);
self.head.next = Some(&mut *node_ptr)
}
/// Looks for a free region with the given size and alignment and removes
/// it from the list.
///
/// Returns a tuple of the list node and the start address of the allocation.
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
// reference to current list node, updated for each iteration
let mut current = &mut self.head;
// look for a large enough memory region in linked list
while let Some(ref mut region) = current.next {
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
// region suitable for allocation -> remove node from list
let next = region.next.take();
let ret = Some((current.next.take().unwrap(), alloc_start));
current.next = next;
return ret;
} else {
// region not suitable -> continue with next region
current = current.next.as_mut().unwrap();
}
}
// no suitable region found
None
}
/// Try to use the given region for an allocation with given size and alignment.
///
/// Returns the allocation start address on success.
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> {
let alloc_start = align_up(region.start_addr(), align);
let alloc_end = alloc_start.checked_add(size).ok_or(())?;
if alloc_end > region.end_addr() {
// region too small
return Err(());
}
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
// rest of region too small to hold a ListNode (required because the
// allocation splits the region in a used and a free part)
return Err(());
}
// region suitable for allocation
Ok(alloc_start)
}
/// Adjust the given layout so that the resulting allocated memory
/// region is also capable of storing a `ListNode`.
///
/// Returns the adjusted size and alignment as a (size, align) tuple.
fn size_align(layout: Layout) -> (usize, usize) {
let layout = layout
.align_to(mem::align_of::<ListNode>())
.expect("adjusting alignment failed")
.pad_to_align();
let size = layout.size().max(mem::size_of::<ListNode>());
(size, layout.align())
}
}
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// perform layout adjustments
let (size, align) = LinkedListAllocator::size_align(layout);
let mut allocator = self.lock();
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
let alloc_end = alloc_start.checked_add(size).expect("overflow");
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 {
allocator.add_free_region(alloc_end, excess_size);
}
alloc_start as *mut u8
} else {
ptr::null_mut()
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// perform layout adjustments
let (size, _) = LinkedListAllocator::size_align(layout);
self.lock().add_free_region(ptr as usize, size)
}
}

View File

@@ -2,7 +2,9 @@ use crate::{gdt, hlt_loop, print, println};
use lazy_static::lazy_static;
use pic8259_simple::ChainedPics;
use spin;
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode};
use x86_64::structures::idt::{
self, InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode,
};
pub const PIC_1_OFFSET: u8 = 32;
pub const PIC_2_OFFSET: u8 = PIC_1_OFFSET + 8;
@@ -30,6 +32,8 @@ pub static PICS: spin::Mutex<ChainedPics> =
lazy_static! {
static ref IDT: InterruptDescriptorTable = {
let mut idt = InterruptDescriptorTable::new();
idt::set_general_handler!(&mut idt, default_handler);
idt.breakpoint.set_handler_fn(breakpoint_handler);
idt.page_fault.set_handler_fn(page_fault_handler);
unsafe {
@@ -37,8 +41,6 @@ lazy_static! {
.set_handler_fn(double_fault_handler)
.set_stack_index(gdt::DOUBLE_FAULT_IST_INDEX);
}
idt[InterruptIndex::Timer.as_usize()].set_handler_fn(timer_interrupt_handler);
idt[InterruptIndex::Keyboard.as_usize()].set_handler_fn(keyboard_interrupt_handler);
idt
};
}
@@ -47,6 +49,18 @@ pub fn init_idt() {
IDT.load();
}
fn default_handler(stack_frame: &mut InterruptStackFrame, index: u8, error_code: Option<u64>) {
if index == 32 {
print!("{} ", index);
} else {
println!("INTERRUPT {}: \n{:#?}", index, stack_frame);
}
unsafe {
PICS.lock().notify_end_of_interrupt(index);
}
}
extern "x86-interrupt" fn breakpoint_handler(stack_frame: &mut InterruptStackFrame) {
println!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame);
}
@@ -80,28 +94,11 @@ extern "x86-interrupt" fn timer_interrupt_handler(_stack_frame: &mut InterruptSt
}
extern "x86-interrupt" fn keyboard_interrupt_handler(_stack_frame: &mut InterruptStackFrame) {
use pc_keyboard::{layouts, DecodedKey, HandleControl, Keyboard, ScancodeSet1};
use spin::Mutex;
use x86_64::instructions::port::Port;
lazy_static! {
static ref KEYBOARD: Mutex<Keyboard<layouts::Us104Key, ScancodeSet1>> = Mutex::new(
Keyboard::new(layouts::Us104Key, ScancodeSet1, HandleControl::Ignore)
);
}
let mut keyboard = KEYBOARD.lock();
let mut port = Port::new(0x60);
let scancode: u8 = unsafe { port.read() };
if let Ok(Some(key_event)) = keyboard.add_byte(scancode) {
if let Some(key) = keyboard.process_keyevent(key_event) {
match key {
DecodedKey::Unicode(character) => print!("{}", character),
DecodedKey::RawKey(key) => print!("{:?}", key),
}
}
}
crate::task::keyboard::add_scancode(scancode);
unsafe {
PICS.lock()

View File

@@ -3,6 +3,9 @@
#![feature(custom_test_frameworks)]
#![feature(abi_x86_interrupt)]
#![feature(alloc_error_handler)]
#![feature(const_fn)]
#![feature(const_in_array_repeat_expressions)]
#![feature(wake_trait)]
#![test_runner(crate::test_runner)]
#![reexport_test_harness_main = "test_main"]
@@ -15,6 +18,7 @@ pub mod gdt;
pub mod interrupts;
pub mod memory;
pub mod serial;
pub mod task;
pub mod vga_buffer;
pub fn init() {

View File

@@ -6,8 +6,8 @@
extern crate alloc;
use alloc::{boxed::Box, rc::Rc, vec, vec::Vec};
use blog_os::println;
use blog_os::task::{executor::Executor, keyboard, Task};
use bootloader::{entry_point, BootInfo};
use core::panic::PanicInfo;
@@ -27,35 +27,13 @@ fn kernel_main(boot_info: &'static BootInfo) -> ! {
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");
// allocate a number on the heap
let heap_value = Box::new(41);
println!("heap_value at {:p}", heap_value);
// create a dynamically sized vector
let mut vec = Vec::new();
for i in 0..500 {
vec.push(i);
}
println!("vec at {:p}", vec.as_slice());
// create a reference counted vector -> will be freed when count reaches 0
let reference_counted = Rc::new(vec![1, 2, 3]);
let cloned_reference = reference_counted.clone();
println!(
"current reference count is {}",
Rc::strong_count(&cloned_reference)
);
core::mem::drop(reference_counted);
println!(
"reference count is {} now",
Rc::strong_count(&cloned_reference)
);
#[cfg(test)]
test_main();
println!("It did not crash!");
blog_os::hlt_loop();
let mut executor = Executor::new();
executor.spawn(Task::new(example_task()));
executor.spawn(Task::new(keyboard::print_keypresses()));
executor.run();
}
/// This function is called on panic.
@@ -72,6 +50,15 @@ fn panic(info: &PanicInfo) -> ! {
blog_os::test_panic_handler(info)
}
async fn async_number() -> u32 {
42
}
async fn example_task() {
let number = async_number().await;
println!("async number: {}", number);
}
#[test_case]
fn trivial_assertion() {
assert_eq!(1, 1);

102
src/task/executor.rs Normal file
View File

@@ -0,0 +1,102 @@
use super::{Task, TaskId};
use alloc::{collections::BTreeMap, sync::Arc, task::Wake};
use core::task::{Context, Poll, Waker};
use crossbeam_queue::ArrayQueue;
pub struct Executor {
tasks: BTreeMap<TaskId, Task>,
task_queue: Arc<ArrayQueue<TaskId>>,
waker_cache: BTreeMap<TaskId, Waker>,
}
impl Executor {
pub fn new() -> Self {
Executor {
tasks: BTreeMap::new(),
task_queue: Arc::new(ArrayQueue::new(100)),
waker_cache: BTreeMap::new(),
}
}
pub fn spawn(&mut self, task: Task) {
let task_id = task.id;
if self.tasks.insert(task.id, task).is_some() {
panic!("task with same ID already in tasks");
}
self.task_queue.push(task_id).expect("queue full");
}
pub fn run(&mut self) -> ! {
loop {
self.run_ready_tasks();
self.sleep_if_idle();
}
}
fn run_ready_tasks(&mut self) {
// destructure `self` to avoid borrow checker errors
let Self {
tasks,
task_queue,
waker_cache,
} = self;
while let Ok(task_id) = task_queue.pop() {
let task = match tasks.get_mut(&task_id) {
Some(task) => task,
None => continue, // task no longer exists
};
let waker = waker_cache
.entry(task_id)
.or_insert_with(|| TaskWaker::new(task_id, task_queue.clone()));
let mut context = Context::from_waker(waker);
match task.poll(&mut context) {
Poll::Ready(()) => {
// task done -> remove it and its cached waker
tasks.remove(&task_id);
waker_cache.remove(&task_id);
}
Poll::Pending => {}
}
}
}
fn sleep_if_idle(&self) {
use x86_64::instructions::interrupts::{self, enable_interrupts_and_hlt};
interrupts::disable();
if self.task_queue.is_empty() {
enable_interrupts_and_hlt();
} else {
interrupts::enable();
}
}
}
struct TaskWaker {
task_id: TaskId,
task_queue: Arc<ArrayQueue<TaskId>>,
}
impl TaskWaker {
fn new(task_id: TaskId, task_queue: Arc<ArrayQueue<TaskId>>) -> Waker {
Waker::from(Arc::new(TaskWaker {
task_id,
task_queue,
}))
}
fn wake_task(&self) {
self.task_queue.push(self.task_id).expect("task_queue full");
}
}
impl Wake for TaskWaker {
fn wake(self: Arc<Self>) {
self.wake_task();
}
fn wake_by_ref(self: &Arc<Self>) {
self.wake_task();
}
}

83
src/task/keyboard.rs Normal file
View File

@@ -0,0 +1,83 @@
use crate::{print, println};
use conquer_once::spin::OnceCell;
use core::{
pin::Pin,
task::{Context, Poll},
};
use crossbeam_queue::ArrayQueue;
use futures_util::{
stream::{Stream, StreamExt},
task::AtomicWaker,
};
use pc_keyboard::{layouts, DecodedKey, HandleControl, Keyboard, ScancodeSet1};
static SCANCODE_QUEUE: OnceCell<ArrayQueue<u8>> = OnceCell::uninit();
static WAKER: AtomicWaker = AtomicWaker::new();
/// Called by the keyboard interrupt handler
///
/// Must not block or allocate.
pub(crate) fn add_scancode(scancode: u8) {
if let Ok(queue) = SCANCODE_QUEUE.try_get() {
if let Err(_) = queue.push(scancode) {
println!("WARNING: scancode queue full; dropping keyboard input");
} else {
WAKER.wake();
}
} else {
println!("WARNING: scancode queue uninitialized");
}
}
pub struct ScancodeStream {
_private: (),
}
impl ScancodeStream {
pub fn new() -> Self {
SCANCODE_QUEUE
.try_init_once(|| ArrayQueue::new(100))
.expect("ScancodeStream::new should only be called once");
ScancodeStream { _private: () }
}
}
impl Stream for ScancodeStream {
type Item = u8;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<u8>> {
let queue = SCANCODE_QUEUE
.try_get()
.expect("scancode queue not initialized");
// fast path
if let Ok(scancode) = queue.pop() {
return Poll::Ready(Some(scancode));
}
WAKER.register(&cx.waker());
match queue.pop() {
Ok(scancode) => {
WAKER.take();
Poll::Ready(Some(scancode))
}
Err(crossbeam_queue::PopError) => Poll::Pending,
}
}
}
pub async fn print_keypresses() {
let mut scancodes = ScancodeStream::new();
let mut keyboard = Keyboard::new(layouts::Us104Key, ScancodeSet1, HandleControl::Ignore);
while let Some(scancode) = scancodes.next().await {
if let Ok(Some(key_event)) = keyboard.add_byte(scancode) {
if let Some(key) = keyboard.process_keyevent(key_event) {
match key {
DecodedKey::Unicode(character) => print!("{}", character),
DecodedKey::RawKey(key) => print!("{:?}", key),
}
}
}
}
}

39
src/task/mod.rs Normal file
View File

@@ -0,0 +1,39 @@
use alloc::boxed::Box;
use core::{
future::Future,
pin::Pin,
sync::atomic::{AtomicU64, Ordering},
task::{Context, Poll},
};
pub mod executor;
pub mod keyboard;
pub mod simple_executor;
pub struct Task {
id: TaskId,
future: Pin<Box<dyn Future<Output = ()>>>,
}
impl Task {
pub fn new(future: impl Future<Output = ()> + 'static) -> Task {
Task {
id: TaskId::new(),
future: Box::pin(future),
}
}
fn poll(&mut self, context: &mut Context) -> Poll<()> {
self.future.as_mut().poll(context)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
struct TaskId(u64);
impl TaskId {
fn new() -> Self {
static NEXT_ID: AtomicU64 = AtomicU64::new(0);
TaskId(NEXT_ID.fetch_add(1, Ordering::Relaxed))
}
}

View File

@@ -0,0 +1,44 @@
use super::Task;
use alloc::collections::VecDeque;
use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
pub struct SimpleExecutor {
task_queue: VecDeque<Task>,
}
impl SimpleExecutor {
pub fn new() -> SimpleExecutor {
SimpleExecutor {
task_queue: VecDeque::new(),
}
}
pub fn spawn(&mut self, task: Task) {
self.task_queue.push_back(task)
}
pub fn run(&mut self) {
while let Some(mut task) = self.task_queue.pop_front() {
let waker = dummy_waker();
let mut context = Context::from_waker(&waker);
match task.poll(&mut context) {
Poll::Ready(()) => {} // task done
Poll::Pending => self.task_queue.push_back(task),
}
}
}
}
fn dummy_raw_waker() -> RawWaker {
fn no_op(_: *const ()) {}
fn clone(_: *const ()) -> RawWaker {
dummy_raw_waker()
}
let vtable = &RawWakerVTable::new(clone, no_op, no_op, no_op);
RawWaker::new(0 as *const (), vtable)
}
fn dummy_waker() -> Waker {
unsafe { Waker::from_raw(dummy_raw_waker()) }
}

View File

@@ -54,6 +54,16 @@ fn many_boxes() {
}
}
#[test_case]
fn many_boxes_long_lived() {
let long_lived = Box::new(1); // new
for i in 0..HEAP_SIZE {
let x = Box::new(i);
assert_eq!(*x, i);
}
assert_eq!(*long_lived, 1); // new
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
blog_os::test_panic_handler(info)