mirror of
https://github.com/phil-opp/blog_os.git
synced 2025-12-16 14:27:49 +00:00
Compare commits
42 Commits
f0149947cf
...
post-12-wi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3fc7bf6aa1 | ||
|
|
b337f65abb | ||
|
|
22c6bd5aa7 | ||
|
|
87719f2260 | ||
|
|
0caf5c351e | ||
|
|
cb7bb0ddef | ||
|
|
7ad30651fb | ||
|
|
49923acb3f | ||
|
|
f2b1f3a593 | ||
|
|
5e2e0b629e | ||
|
|
35379c90e6 | ||
|
|
e5d10fcaec | ||
|
|
e1242a867f | ||
|
|
cd138a3a1b | ||
|
|
11a0eb679c | ||
|
|
241c1ab2c9 | ||
|
|
b75406b37e | ||
|
|
c3450b6df7 | ||
|
|
ce1fdcf768 | ||
|
|
002d6f255f | ||
|
|
6c3bf0b10f | ||
|
|
7a792f5cb0 | ||
|
|
93b4dcf434 | ||
|
|
821dd2adb4 | ||
|
|
d636939b51 | ||
|
|
9b7326541e | ||
|
|
4f234b67ef | ||
|
|
7381e11f3c | ||
|
|
a9fe65a0ce | ||
|
|
2001814119 | ||
|
|
a5c50e7408 | ||
|
|
70a52c291d | ||
|
|
c56bfa27e4 | ||
|
|
55aec9ebf3 | ||
|
|
2e1d132a9a | ||
|
|
63e8577d77 | ||
|
|
75d826bf69 | ||
|
|
45be3f0648 | ||
|
|
055c560a7a | ||
|
|
e87044a7ee | ||
|
|
08d2289dad | ||
|
|
7c84dbaa1d |
@@ -33,3 +33,6 @@ test-args = [
|
|||||||
"-display", "none"
|
"-display", "none"
|
||||||
]
|
]
|
||||||
test-success-exit-code = 33 # (0x10 << 1) | 1
|
test-success-exit-code = 33 # (0x10 << 1) | 1
|
||||||
|
|
||||||
|
[profile.release]
|
||||||
|
lto = true
|
||||||
58
src/allocator/bump.rs
Normal file
58
src/allocator/bump.rs
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
use super::{align_up, Locked};
|
||||||
|
use alloc::alloc::{GlobalAlloc, Layout};
|
||||||
|
use core::ptr;
|
||||||
|
|
||||||
|
pub struct BumpAllocator {
|
||||||
|
heap_start: usize,
|
||||||
|
heap_end: usize,
|
||||||
|
next: usize,
|
||||||
|
allocations: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BumpAllocator {
|
||||||
|
/// Creates a new empty bump allocator.
|
||||||
|
pub const fn new() -> Self {
|
||||||
|
BumpAllocator {
|
||||||
|
heap_start: 0,
|
||||||
|
heap_end: 0,
|
||||||
|
next: 0,
|
||||||
|
allocations: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initializes the bump allocator with the given heap bounds.
|
||||||
|
///
|
||||||
|
/// This method is unsafe because the caller must ensure that the given
|
||||||
|
/// memory range is unused. Also, this method must be called only once.
|
||||||
|
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||||
|
self.heap_start = heap_start;
|
||||||
|
self.heap_end = heap_start + heap_size;
|
||||||
|
self.next = heap_start;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
|
||||||
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||||
|
let mut bump = self.lock(); // get a mutable reference
|
||||||
|
|
||||||
|
let alloc_start = align_up(bump.next, layout.align());
|
||||||
|
let alloc_end = alloc_start + layout.size();
|
||||||
|
|
||||||
|
if alloc_end > bump.heap_end {
|
||||||
|
ptr::null_mut() // out of memory
|
||||||
|
} else {
|
||||||
|
bump.next = alloc_end;
|
||||||
|
bump.allocations += 1;
|
||||||
|
alloc_start as *mut u8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
|
||||||
|
let mut bump = self.lock(); // get a mutable reference
|
||||||
|
|
||||||
|
bump.allocations -= 1;
|
||||||
|
if bump.allocations == 0 {
|
||||||
|
bump.next = bump.heap_start;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
102
src/allocator/fixed_size_block.rs
Normal file
102
src/allocator/fixed_size_block.rs
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
use super::Locked;
|
||||||
|
use alloc::alloc::{GlobalAlloc, Layout};
|
||||||
|
use core::{
|
||||||
|
mem,
|
||||||
|
ptr::{self, NonNull},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// The block sizes to use.
|
||||||
|
///
|
||||||
|
/// The sizes must each be power of 2 because they are also used as
|
||||||
|
/// the block alignment (alignments must be always powers of 2).
|
||||||
|
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
|
||||||
|
|
||||||
|
/// Choose an appropriate block size for the given layout.
|
||||||
|
///
|
||||||
|
/// Returns an index into the `BLOCK_SIZES` array.
|
||||||
|
fn list_index(layout: &Layout) -> Option<usize> {
|
||||||
|
let required_block_size = layout.size().max(layout.align());
|
||||||
|
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ListNode {
|
||||||
|
next: Option<&'static mut ListNode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FixedSizeBlockAllocator {
|
||||||
|
list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()],
|
||||||
|
fallback_allocator: linked_list_allocator::Heap,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FixedSizeBlockAllocator {
|
||||||
|
/// Creates an empty FixedSizeBlockAllocator.
|
||||||
|
pub const fn new() -> Self {
|
||||||
|
FixedSizeBlockAllocator {
|
||||||
|
list_heads: [None; BLOCK_SIZES.len()],
|
||||||
|
fallback_allocator: linked_list_allocator::Heap::empty(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize the allocator with the given heap bounds.
|
||||||
|
///
|
||||||
|
/// This function is unsafe because the caller must guarantee that the given
|
||||||
|
/// heap bounds are valid and that the heap is unused. This method must be
|
||||||
|
/// called only once.
|
||||||
|
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||||
|
self.fallback_allocator.init(heap_start, heap_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocates using the fallback allocator.
|
||||||
|
fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 {
|
||||||
|
match self.fallback_allocator.allocate_first_fit(layout) {
|
||||||
|
Ok(ptr) => ptr.as_ptr(),
|
||||||
|
Err(_) => ptr::null_mut(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
|
||||||
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||||
|
let mut allocator = self.lock();
|
||||||
|
match list_index(&layout) {
|
||||||
|
Some(index) => {
|
||||||
|
match allocator.list_heads[index].take() {
|
||||||
|
Some(node) => {
|
||||||
|
allocator.list_heads[index] = node.next.take();
|
||||||
|
node as *mut ListNode as *mut u8
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
// no block exists in list => allocate new block
|
||||||
|
let block_size = BLOCK_SIZES[index];
|
||||||
|
// only works if all block sizes are a power of 2
|
||||||
|
let block_align = block_size;
|
||||||
|
let layout = Layout::from_size_align(block_size, block_align).unwrap();
|
||||||
|
allocator.fallback_alloc(layout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => allocator.fallback_alloc(layout),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||||
|
let mut allocator = self.lock();
|
||||||
|
match list_index(&layout) {
|
||||||
|
Some(index) => {
|
||||||
|
let new_node = ListNode {
|
||||||
|
next: allocator.list_heads[index].take(),
|
||||||
|
};
|
||||||
|
// verify that block has size and alignment required for storing node
|
||||||
|
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
|
||||||
|
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
|
||||||
|
let new_node_ptr = ptr as *mut ListNode;
|
||||||
|
new_node_ptr.write(new_node);
|
||||||
|
allocator.list_heads[index] = Some(&mut *new_node_ptr);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let ptr = NonNull::new(ptr).unwrap();
|
||||||
|
allocator.fallback_allocator.deallocate(ptr, layout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
145
src/allocator/linked_list.rs
Normal file
145
src/allocator/linked_list.rs
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
use super::{align_up, Locked};
|
||||||
|
use alloc::alloc::{GlobalAlloc, Layout};
|
||||||
|
use core::{mem, ptr};
|
||||||
|
|
||||||
|
struct ListNode {
|
||||||
|
size: usize,
|
||||||
|
next: Option<&'static mut ListNode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ListNode {
|
||||||
|
const fn new(size: usize) -> Self {
|
||||||
|
ListNode { size, next: None }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_addr(&self) -> usize {
|
||||||
|
self as *const Self as usize
|
||||||
|
}
|
||||||
|
|
||||||
|
fn end_addr(&self) -> usize {
|
||||||
|
self.start_addr() + self.size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct LinkedListAllocator {
|
||||||
|
head: ListNode,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LinkedListAllocator {
|
||||||
|
/// Creates an empty LinkedListAllocator.
|
||||||
|
pub const fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
head: ListNode::new(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize the allocator with the given heap bounds.
|
||||||
|
///
|
||||||
|
/// This function is unsafe because the caller must guarantee that the given
|
||||||
|
/// heap bounds are valid and that the heap is unused. This method must be
|
||||||
|
/// called only once.
|
||||||
|
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||||
|
self.add_free_region(heap_start, heap_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds the given memory region to the front of the list.
|
||||||
|
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
|
||||||
|
// ensure that the freed region is capable of holding ListNode
|
||||||
|
assert!(align_up(addr, mem::align_of::<ListNode>()) == addr);
|
||||||
|
assert!(size >= mem::size_of::<ListNode>());
|
||||||
|
|
||||||
|
// create a new list node and append it at the start of the list
|
||||||
|
let mut node = ListNode::new(size);
|
||||||
|
node.next = self.head.next.take();
|
||||||
|
let node_ptr = addr as *mut ListNode;
|
||||||
|
node_ptr.write(node);
|
||||||
|
self.head.next = Some(&mut *node_ptr)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Looks for a free region with the given size and alignment and removes
|
||||||
|
/// it from the list.
|
||||||
|
///
|
||||||
|
/// Returns a tuple of the list node and the start address of the allocation.
|
||||||
|
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
|
||||||
|
// reference to current list node, updated for each iteration
|
||||||
|
let mut current = &mut self.head;
|
||||||
|
// look for a large enough memory region in linked list
|
||||||
|
while let Some(ref mut region) = current.next {
|
||||||
|
if let Ok(alloc_start) = Self::alloc_from_region(®ion, size, align) {
|
||||||
|
// region suitable for allocation -> remove node from list
|
||||||
|
let next = region.next.take();
|
||||||
|
let ret = Some((current.next.take().unwrap(), alloc_start));
|
||||||
|
current.next = next;
|
||||||
|
return ret;
|
||||||
|
} else {
|
||||||
|
// region not suitable -> continue with next region
|
||||||
|
current = current.next.as_mut().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// no suitable region found
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to use the given region for an allocation with given size and alignment.
|
||||||
|
///
|
||||||
|
/// Returns the allocation start address on success.
|
||||||
|
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> {
|
||||||
|
let alloc_start = align_up(region.start_addr(), align);
|
||||||
|
let alloc_end = alloc_start + size;
|
||||||
|
|
||||||
|
if alloc_end > region.end_addr() {
|
||||||
|
// region too small
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let excess_size = region.end_addr() - alloc_end;
|
||||||
|
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
|
||||||
|
// rest of region too small to hold a ListNode (required because the
|
||||||
|
// allocation splits the region in a used and a free part)
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// region suitable for allocation
|
||||||
|
Ok(alloc_start)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adjust the given layout so that the resulting allocated memory
|
||||||
|
/// region is also capable of storing a `ListNode`.
|
||||||
|
///
|
||||||
|
/// Returns the adjusted size and alignment as a (size, align) tuple.
|
||||||
|
fn size_align(layout: Layout) -> (usize, usize) {
|
||||||
|
let layout = layout
|
||||||
|
.align_to(mem::align_of::<ListNode>())
|
||||||
|
.expect("adjusting alignment failed")
|
||||||
|
.pad_to_align();
|
||||||
|
let size = layout.size().max(mem::size_of::<ListNode>());
|
||||||
|
(size, layout.align())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
|
||||||
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||||
|
// perform layout adjustments
|
||||||
|
let (size, align) = LinkedListAllocator::size_align(layout);
|
||||||
|
let mut allocator = self.inner.lock();
|
||||||
|
|
||||||
|
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
|
||||||
|
let alloc_end = alloc_start + size;
|
||||||
|
let excess_size = region.end_addr() - alloc_end;
|
||||||
|
if excess_size > 0 {
|
||||||
|
allocator.add_free_region(alloc_end, excess_size);
|
||||||
|
}
|
||||||
|
alloc_start as *mut u8
|
||||||
|
} else {
|
||||||
|
ptr::null_mut()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||||
|
// perform layout adjustments
|
||||||
|
let (size, _) = LinkedListAllocator::size_align(layout);
|
||||||
|
|
||||||
|
self.inner.lock().add_free_region(ptr as usize, size)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
use alloc::alloc::{GlobalAlloc, Layout};
|
use alloc::alloc::{GlobalAlloc, Layout};
|
||||||
use core::ptr::null_mut;
|
use core::ptr::null_mut;
|
||||||
use linked_list_allocator::LockedHeap;
|
use fixed_size_block::FixedSizeBlockAllocator;
|
||||||
use x86_64::{
|
use x86_64::{
|
||||||
structures::paging::{
|
structures::paging::{
|
||||||
mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB,
|
mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB,
|
||||||
@@ -8,11 +8,15 @@ use x86_64::{
|
|||||||
VirtAddr,
|
VirtAddr,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub mod bump;
|
||||||
|
pub mod fixed_size_block;
|
||||||
|
pub mod linked_list;
|
||||||
|
|
||||||
pub const HEAP_START: usize = 0x_4444_4444_0000;
|
pub const HEAP_START: usize = 0x_4444_4444_0000;
|
||||||
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
|
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
|
||||||
|
|
||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
static ALLOCATOR: LockedHeap = LockedHeap::empty();
|
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(FixedSizeBlockAllocator::new());
|
||||||
|
|
||||||
pub fn init_heap(
|
pub fn init_heap(
|
||||||
mapper: &mut impl Mapper<Size4KiB>,
|
mapper: &mut impl Mapper<Size4KiB>,
|
||||||
@@ -52,3 +56,29 @@ unsafe impl GlobalAlloc for Dummy {
|
|||||||
panic!("dealloc should be never called")
|
panic!("dealloc should be never called")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A wrapper around spin::Mutex to permit trait implementations.
|
||||||
|
pub struct Locked<A> {
|
||||||
|
inner: spin::Mutex<A>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<A> Locked<A> {
|
||||||
|
pub const fn new(inner: A) -> Self {
|
||||||
|
Locked {
|
||||||
|
inner: spin::Mutex::new(inner),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn lock(&self) -> spin::MutexGuard<A> {
|
||||||
|
self.inner.lock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn align_up(addr: usize, align: usize) -> usize {
|
||||||
|
let remainder = addr % align;
|
||||||
|
if remainder == 0 {
|
||||||
|
addr // addr already aligned
|
||||||
|
} else {
|
||||||
|
addr - remainder + align
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -77,6 +77,7 @@ extern "x86-interrupt" fn timer_interrupt_handler(_stack_frame: &mut InterruptSt
|
|||||||
PICS.lock()
|
PICS.lock()
|
||||||
.notify_end_of_interrupt(InterruptIndex::Timer.as_u8());
|
.notify_end_of_interrupt(InterruptIndex::Timer.as_u8());
|
||||||
}
|
}
|
||||||
|
crate::multitasking::invoke_scheduler();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "x86-interrupt" fn keyboard_interrupt_handler(_stack_frame: &mut InterruptStackFrame) {
|
extern "x86-interrupt" fn keyboard_interrupt_handler(_stack_frame: &mut InterruptStackFrame) {
|
||||||
|
|||||||
10
src/lib.rs
10
src/lib.rs
@@ -3,6 +3,15 @@
|
|||||||
#![feature(custom_test_frameworks)]
|
#![feature(custom_test_frameworks)]
|
||||||
#![feature(abi_x86_interrupt)]
|
#![feature(abi_x86_interrupt)]
|
||||||
#![feature(alloc_error_handler)]
|
#![feature(alloc_error_handler)]
|
||||||
|
#![feature(const_fn)]
|
||||||
|
#![feature(alloc_layout_extra)]
|
||||||
|
#![feature(const_in_array_repeat_expressions)]
|
||||||
|
#![feature(global_asm)]
|
||||||
|
#![feature(asm)]
|
||||||
|
#![feature(raw)]
|
||||||
|
#![feature(never_type)]
|
||||||
|
#![feature(naked_functions)]
|
||||||
|
#![feature(option_expect_none)]
|
||||||
#![test_runner(crate::test_runner)]
|
#![test_runner(crate::test_runner)]
|
||||||
#![reexport_test_harness_main = "test_main"]
|
#![reexport_test_harness_main = "test_main"]
|
||||||
|
|
||||||
@@ -14,6 +23,7 @@ pub mod allocator;
|
|||||||
pub mod gdt;
|
pub mod gdt;
|
||||||
pub mod interrupts;
|
pub mod interrupts;
|
||||||
pub mod memory;
|
pub mod memory;
|
||||||
|
pub mod multitasking;
|
||||||
pub mod serial;
|
pub mod serial;
|
||||||
pub mod vga_buffer;
|
pub mod vga_buffer;
|
||||||
|
|
||||||
|
|||||||
36
src/main.rs
36
src/main.rs
@@ -7,7 +7,8 @@
|
|||||||
extern crate alloc;
|
extern crate alloc;
|
||||||
|
|
||||||
use alloc::{boxed::Box, rc::Rc, vec, vec::Vec};
|
use alloc::{boxed::Box, rc::Rc, vec, vec::Vec};
|
||||||
use blog_os::println;
|
use blog_os::multitasking::{self, thread::Thread, with_scheduler};
|
||||||
|
use blog_os::{print, println};
|
||||||
use bootloader::{entry_point, BootInfo};
|
use bootloader::{entry_point, BootInfo};
|
||||||
use core::panic::PanicInfo;
|
use core::panic::PanicInfo;
|
||||||
|
|
||||||
@@ -54,14 +55,45 @@ fn kernel_main(boot_info: &'static BootInfo) -> ! {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
test_main();
|
test_main();
|
||||||
|
|
||||||
|
let idle_thread = Thread::create(idle_thread, 2, &mut mapper, &mut frame_allocator).unwrap();
|
||||||
|
with_scheduler(|s| s.set_idle_thread(idle_thread));
|
||||||
|
|
||||||
|
for _ in 0..10 {
|
||||||
|
let thread = Thread::create(thread_entry, 2, &mut mapper, &mut frame_allocator).unwrap();
|
||||||
|
with_scheduler(|s| s.add_new_thread(thread));
|
||||||
|
}
|
||||||
|
let thread =
|
||||||
|
Thread::create_from_closure(|| thread_entry(), 2, &mut mapper, &mut frame_allocator)
|
||||||
|
.unwrap();
|
||||||
|
with_scheduler(|s| s.add_new_thread(thread));
|
||||||
|
|
||||||
println!("It did not crash!");
|
println!("It did not crash!");
|
||||||
blog_os::hlt_loop();
|
thread_entry();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn idle_thread() -> ! {
|
||||||
|
loop {
|
||||||
|
x86_64::instructions::hlt();
|
||||||
|
multitasking::yield_now();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn thread_entry() -> ! {
|
||||||
|
let thread_id = with_scheduler(|s| s.current_thread_id()).as_u64();
|
||||||
|
for _ in 0..=thread_id {
|
||||||
|
print!("{}", thread_id);
|
||||||
|
x86_64::instructions::hlt();
|
||||||
|
}
|
||||||
|
multitasking::exit_thread();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This function is called on panic.
|
/// This function is called on panic.
|
||||||
#[cfg(not(test))]
|
#[cfg(not(test))]
|
||||||
#[panic_handler]
|
#[panic_handler]
|
||||||
fn panic(info: &PanicInfo) -> ! {
|
fn panic(info: &PanicInfo) -> ! {
|
||||||
|
unsafe {
|
||||||
|
blog_os::vga_buffer::WRITER.force_unlock();
|
||||||
|
}
|
||||||
println!("{}", info);
|
println!("{}", info);
|
||||||
blog_os::hlt_loop();
|
blog_os::hlt_loop();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
|
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
|
||||||
use x86_64::{
|
use x86_64::{
|
||||||
structures::paging::{
|
structures::paging::{
|
||||||
FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PhysFrame, Size4KiB,
|
mapper, FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PhysFrame, Size4KiB,
|
||||||
UnusedPhysFrame,
|
UnusedPhysFrame,
|
||||||
},
|
},
|
||||||
PhysAddr, VirtAddr,
|
PhysAddr, VirtAddr,
|
||||||
@@ -36,6 +36,54 @@ unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) -> &'static mut
|
|||||||
&mut *page_table_ptr // unsafe
|
&mut *page_table_ptr // unsafe
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub struct StackBounds {
|
||||||
|
start: VirtAddr,
|
||||||
|
end: VirtAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StackBounds {
|
||||||
|
pub fn start(&self) -> VirtAddr {
|
||||||
|
self.start
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn end(&self) -> VirtAddr {
|
||||||
|
self.end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_stack(
|
||||||
|
size_in_pages: u64,
|
||||||
|
mapper: &mut impl Mapper<Size4KiB>,
|
||||||
|
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
|
||||||
|
) -> Result<StackBounds, mapper::MapToError> {
|
||||||
|
use core::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
use x86_64::structures::paging::PageTableFlags as Flags;
|
||||||
|
|
||||||
|
static STACK_ALLOC_NEXT: AtomicU64 = AtomicU64::new(0x_5555_5555_0000);
|
||||||
|
|
||||||
|
let guard_page_start = STACK_ALLOC_NEXT.fetch_add(
|
||||||
|
(size_in_pages + 1) * Page::<Size4KiB>::SIZE,
|
||||||
|
Ordering::SeqCst,
|
||||||
|
);
|
||||||
|
let guard_page = Page::from_start_address(VirtAddr::new(guard_page_start))
|
||||||
|
.expect("`STACK_ALLOC_NEXT` not page aligned");
|
||||||
|
|
||||||
|
let stack_start = guard_page + 1;
|
||||||
|
let stack_end = stack_start + size_in_pages;
|
||||||
|
let flags = Flags::PRESENT | Flags::WRITABLE;
|
||||||
|
for page in Page::range(stack_start, stack_end) {
|
||||||
|
let frame = frame_allocator
|
||||||
|
.allocate_frame()
|
||||||
|
.ok_or(mapper::MapToError::FrameAllocationFailed)?;
|
||||||
|
mapper.map_to(page, frame, flags, frame_allocator)?.flush();
|
||||||
|
}
|
||||||
|
Ok(StackBounds {
|
||||||
|
start: stack_start.start_address(),
|
||||||
|
end: stack_end.start_address(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates an example mapping for the given page to frame `0xb8000`.
|
/// Creates an example mapping for the given page to frame `0xb8000`.
|
||||||
pub fn create_example_mapping(
|
pub fn create_example_mapping(
|
||||||
page: Page,
|
page: Page,
|
||||||
|
|||||||
105
src/multitasking/context_switch.rs
Normal file
105
src/multitasking/context_switch.rs
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
use super::{with_scheduler, SwitchReason};
|
||||||
|
use crate::multitasking::thread::ThreadId;
|
||||||
|
use alloc::boxed::Box;
|
||||||
|
use core::mem;
|
||||||
|
use core::raw::TraitObject;
|
||||||
|
use x86_64::VirtAddr;
|
||||||
|
|
||||||
|
pub struct Stack {
|
||||||
|
pointer: VirtAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Stack {
|
||||||
|
pub unsafe fn new(stack_pointer: VirtAddr) -> Self {
|
||||||
|
Stack {
|
||||||
|
pointer: stack_pointer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_stack_pointer(self) -> VirtAddr {
|
||||||
|
self.pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_up_for_closure(&mut self, closure: Box<dyn FnOnce() -> !>) {
|
||||||
|
let trait_object: TraitObject = unsafe { mem::transmute(closure) };
|
||||||
|
unsafe { self.push(trait_object.data) };
|
||||||
|
unsafe { self.push(trait_object.vtable) };
|
||||||
|
|
||||||
|
self.set_up_for_entry_point(call_closure_entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_up_for_entry_point(&mut self, entry_point: fn() -> !) {
|
||||||
|
unsafe { self.push(entry_point) };
|
||||||
|
let rflags: u64 = 0x200;
|
||||||
|
unsafe { self.push(rflags) };
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn push<T>(&mut self, value: T) {
|
||||||
|
self.pointer -= core::mem::size_of::<T>();
|
||||||
|
let ptr: *mut T = self.pointer.as_mut_ptr();
|
||||||
|
ptr.write(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub unsafe fn context_switch_to(
|
||||||
|
new_stack_pointer: VirtAddr,
|
||||||
|
prev_thread_id: ThreadId,
|
||||||
|
switch_reason: SwitchReason,
|
||||||
|
) {
|
||||||
|
asm!(
|
||||||
|
"call asm_context_switch"
|
||||||
|
:
|
||||||
|
: "{rdi}"(new_stack_pointer), "{rsi}"(prev_thread_id), "{rdx}"(switch_reason as u64)
|
||||||
|
: "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "r8", "r9", "r10",
|
||||||
|
"r11", "r12", "r13", "r14", "r15", "rflags", "memory"
|
||||||
|
: "intel", "volatile"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
global_asm!(
|
||||||
|
"
|
||||||
|
.intel_syntax noprefix
|
||||||
|
|
||||||
|
// asm_context_switch(stack_pointer: u64, thread_id: u64)
|
||||||
|
asm_context_switch:
|
||||||
|
pushfq
|
||||||
|
|
||||||
|
mov rax, rsp
|
||||||
|
mov rsp, rdi
|
||||||
|
|
||||||
|
mov rdi, rax
|
||||||
|
call add_paused_thread
|
||||||
|
|
||||||
|
popfq
|
||||||
|
ret
|
||||||
|
"
|
||||||
|
);
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn add_paused_thread(
|
||||||
|
paused_stack_pointer: VirtAddr,
|
||||||
|
paused_thread_id: ThreadId,
|
||||||
|
switch_reason: SwitchReason,
|
||||||
|
) {
|
||||||
|
with_scheduler(|s| s.add_paused_thread(paused_stack_pointer, paused_thread_id, switch_reason));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[naked]
|
||||||
|
fn call_closure_entry() -> ! {
|
||||||
|
unsafe {
|
||||||
|
asm!("
|
||||||
|
pop rsi
|
||||||
|
pop rdi
|
||||||
|
call call_closure
|
||||||
|
" ::: "mem" : "intel", "volatile")
|
||||||
|
};
|
||||||
|
unreachable!();
|
||||||
|
}
|
||||||
|
|
||||||
|
// no_mangle required because of https://github.com/rust-lang/rust/issues/68136
|
||||||
|
#[no_mangle]
|
||||||
|
extern "C" fn call_closure(data: *mut (), vtable: *mut ()) -> ! {
|
||||||
|
let trait_object = TraitObject { data, vtable };
|
||||||
|
let f: Box<dyn FnOnce() -> !> = unsafe { mem::transmute(trait_object) };
|
||||||
|
f()
|
||||||
|
}
|
||||||
57
src/multitasking/mod.rs
Normal file
57
src/multitasking/mod.rs
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
use scheduler::Scheduler;
|
||||||
|
|
||||||
|
pub mod context_switch;
|
||||||
|
pub mod scheduler;
|
||||||
|
pub mod thread;
|
||||||
|
|
||||||
|
static SCHEDULER: spin::Mutex<Option<Scheduler>> = spin::Mutex::new(None);
|
||||||
|
|
||||||
|
#[repr(u64)]
|
||||||
|
pub enum SwitchReason {
|
||||||
|
Paused,
|
||||||
|
Yield,
|
||||||
|
Blocked,
|
||||||
|
Exit,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn invoke_scheduler() {
|
||||||
|
let next = SCHEDULER
|
||||||
|
.try_lock()
|
||||||
|
.and_then(|mut scheduler| scheduler.as_mut().and_then(|s| s.schedule()));
|
||||||
|
if let Some((next_stack_pointer, prev_thread_id)) = next {
|
||||||
|
unsafe {
|
||||||
|
context_switch::context_switch_to(
|
||||||
|
next_stack_pointer,
|
||||||
|
prev_thread_id,
|
||||||
|
SwitchReason::Paused,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn exit_thread() -> ! {
|
||||||
|
synchronous_context_switch(SwitchReason::Exit).expect("can't exit last thread");
|
||||||
|
unreachable!("finished thread continued");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn yield_now() {
|
||||||
|
let _ = synchronous_context_switch(SwitchReason::Yield);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn synchronous_context_switch(reason: SwitchReason) -> Result<(), ()> {
|
||||||
|
let next = with_scheduler(|s| s.schedule());
|
||||||
|
match next {
|
||||||
|
Some((next_stack_pointer, prev_thread_id)) => unsafe {
|
||||||
|
context_switch::context_switch_to(next_stack_pointer, prev_thread_id, reason);
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
None => Err(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_scheduler<F, T>(f: F) -> T
|
||||||
|
where
|
||||||
|
F: FnOnce(&mut Scheduler) -> T,
|
||||||
|
{
|
||||||
|
f(SCHEDULER.lock().get_or_insert_with(Scheduler::new))
|
||||||
|
}
|
||||||
122
src/multitasking/scheduler.rs
Normal file
122
src/multitasking/scheduler.rs
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
use super::SwitchReason;
|
||||||
|
use crate::multitasking::thread::{Thread, ThreadId};
|
||||||
|
use alloc::collections::{BTreeMap, BTreeSet, VecDeque};
|
||||||
|
use core::mem;
|
||||||
|
use x86_64::VirtAddr;
|
||||||
|
|
||||||
|
pub struct Scheduler {
|
||||||
|
threads: BTreeMap<ThreadId, Thread>,
|
||||||
|
idle_thread_id: Option<ThreadId>,
|
||||||
|
current_thread_id: ThreadId,
|
||||||
|
paused_threads: VecDeque<ThreadId>,
|
||||||
|
blocked_threads: BTreeSet<ThreadId>,
|
||||||
|
wakeups: BTreeSet<ThreadId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Scheduler {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
let root_thread = Thread::create_root_thread();
|
||||||
|
let root_id = root_thread.id();
|
||||||
|
let mut threads = BTreeMap::new();
|
||||||
|
threads
|
||||||
|
.insert(root_id, root_thread)
|
||||||
|
.expect_none("map is not empty after creation");
|
||||||
|
Scheduler {
|
||||||
|
threads,
|
||||||
|
current_thread_id: root_id,
|
||||||
|
paused_threads: VecDeque::new(),
|
||||||
|
blocked_threads: BTreeSet::new(),
|
||||||
|
wakeups: BTreeSet::new(),
|
||||||
|
idle_thread_id: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn next_thread(&mut self) -> Option<ThreadId> {
|
||||||
|
self.paused_threads.pop_front()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn schedule(&mut self) -> Option<(VirtAddr, ThreadId)> {
|
||||||
|
let mut next_thread_id = self.next_thread();
|
||||||
|
if next_thread_id.is_none() && Some(self.current_thread_id) != self.idle_thread_id {
|
||||||
|
next_thread_id = self.idle_thread_id
|
||||||
|
}
|
||||||
|
if let Some(next_id) = next_thread_id {
|
||||||
|
let next_thread = self
|
||||||
|
.threads
|
||||||
|
.get_mut(&next_id)
|
||||||
|
.expect("next thread does not exist");
|
||||||
|
let next_stack_pointer = next_thread
|
||||||
|
.stack_pointer()
|
||||||
|
.take()
|
||||||
|
.expect("paused thread has no stack pointer");
|
||||||
|
let prev_thread_id = mem::replace(&mut self.current_thread_id, next_thread.id());
|
||||||
|
Some((next_stack_pointer, prev_thread_id))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn add_paused_thread(
|
||||||
|
&mut self,
|
||||||
|
paused_stack_pointer: VirtAddr,
|
||||||
|
paused_thread_id: ThreadId,
|
||||||
|
switch_reason: SwitchReason,
|
||||||
|
) {
|
||||||
|
let paused_thread = self
|
||||||
|
.threads
|
||||||
|
.get_mut(&paused_thread_id)
|
||||||
|
.expect("paused thread does not exist");
|
||||||
|
paused_thread
|
||||||
|
.stack_pointer()
|
||||||
|
.replace(paused_stack_pointer)
|
||||||
|
.expect_none("running thread should have stack pointer set to None");
|
||||||
|
if Some(paused_thread_id) == self.idle_thread_id {
|
||||||
|
return; // do nothing
|
||||||
|
}
|
||||||
|
match switch_reason {
|
||||||
|
SwitchReason::Paused | SwitchReason::Yield => {
|
||||||
|
self.paused_threads.push_back(paused_thread_id)
|
||||||
|
}
|
||||||
|
SwitchReason::Blocked => {
|
||||||
|
self.blocked_threads.insert(paused_thread_id);
|
||||||
|
self.check_for_wakeup(paused_thread_id);
|
||||||
|
}
|
||||||
|
SwitchReason::Exit => {
|
||||||
|
let thread = self
|
||||||
|
.threads
|
||||||
|
.remove(&paused_thread_id)
|
||||||
|
.expect("thread not found");
|
||||||
|
// TODO: free stack memory again
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_new_thread(&mut self, thread: Thread) {
|
||||||
|
let thread_id = thread.id();
|
||||||
|
self.threads
|
||||||
|
.insert(thread_id, thread)
|
||||||
|
.expect_none("thread already exists");
|
||||||
|
self.paused_threads.push_back(thread_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_idle_thread(&mut self, thread: Thread) {
|
||||||
|
let thread_id = thread.id();
|
||||||
|
self.threads
|
||||||
|
.insert(thread_id, thread)
|
||||||
|
.expect_none("thread already exists");
|
||||||
|
self.idle_thread_id
|
||||||
|
.replace(thread_id)
|
||||||
|
.expect_none("idle thread should be set only once");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn current_thread_id(&self) -> ThreadId {
|
||||||
|
self.current_thread_id
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_for_wakeup(&mut self, thread_id: ThreadId) {
|
||||||
|
if self.wakeups.remove(&thread_id) {
|
||||||
|
assert!(self.blocked_threads.remove(&thread_id));
|
||||||
|
self.paused_threads.push_back(thread_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
82
src/multitasking/thread.rs
Normal file
82
src/multitasking/thread.rs
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
use crate::memory::{alloc_stack, StackBounds};
|
||||||
|
use crate::multitasking::context_switch::Stack;
|
||||||
|
use alloc::boxed::Box;
|
||||||
|
use x86_64::{
|
||||||
|
structures::paging::{mapper, FrameAllocator, Mapper, Size4KiB},
|
||||||
|
VirtAddr,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub struct ThreadId(u64);
|
||||||
|
|
||||||
|
impl ThreadId {
|
||||||
|
pub fn as_u64(&self) -> u64 {
|
||||||
|
self.0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new() -> Self {
|
||||||
|
use core::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
static NEXT_THREAD_ID: AtomicU64 = AtomicU64::new(1);
|
||||||
|
ThreadId(NEXT_THREAD_ID.fetch_add(1, Ordering::SeqCst))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Thread {
|
||||||
|
id: ThreadId,
|
||||||
|
stack_pointer: Option<VirtAddr>,
|
||||||
|
stack_bounds: Option<StackBounds>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Thread {
|
||||||
|
pub fn create(
|
||||||
|
entry_point: fn() -> !,
|
||||||
|
stack_size: u64,
|
||||||
|
mapper: &mut impl Mapper<Size4KiB>,
|
||||||
|
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
|
||||||
|
) -> Result<Self, mapper::MapToError> {
|
||||||
|
let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?;
|
||||||
|
let mut stack = unsafe { Stack::new(stack_bounds.end()) };
|
||||||
|
stack.set_up_for_entry_point(entry_point);
|
||||||
|
Ok(Self::new(stack.get_stack_pointer(), stack_bounds))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_from_closure<F>(
|
||||||
|
closure: F,
|
||||||
|
stack_size: u64,
|
||||||
|
mapper: &mut impl Mapper<Size4KiB>,
|
||||||
|
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
|
||||||
|
) -> Result<Self, mapper::MapToError>
|
||||||
|
where
|
||||||
|
F: FnOnce() -> ! + 'static + Send + Sync,
|
||||||
|
{
|
||||||
|
let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?;
|
||||||
|
let mut stack = unsafe { Stack::new(stack_bounds.end()) };
|
||||||
|
stack.set_up_for_closure(Box::new(closure));
|
||||||
|
Ok(Self::new(stack.get_stack_pointer(), stack_bounds))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new(stack_pointer: VirtAddr, stack_bounds: StackBounds) -> Self {
|
||||||
|
Thread {
|
||||||
|
id: ThreadId::new(),
|
||||||
|
stack_pointer: Some(stack_pointer),
|
||||||
|
stack_bounds: Some(stack_bounds),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn create_root_thread() -> Self {
|
||||||
|
Thread {
|
||||||
|
id: ThreadId(0),
|
||||||
|
stack_pointer: None,
|
||||||
|
stack_bounds: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn id(&self) -> ThreadId {
|
||||||
|
self.id
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn stack_pointer(&mut self) -> &mut Option<VirtAddr> {
|
||||||
|
&mut self.stack_pointer
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -58,6 +58,18 @@ fn many_boxes() {
|
|||||||
serial_println!("[ok]");
|
serial_println!("[ok]");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test_case]
|
||||||
|
fn many_boxes_long_lived() {
|
||||||
|
serial_print!("many_boxes_long_lived... ");
|
||||||
|
let long_lived = Box::new(1); // new
|
||||||
|
for i in 0..HEAP_SIZE {
|
||||||
|
let x = Box::new(i);
|
||||||
|
assert_eq!(*x, i);
|
||||||
|
}
|
||||||
|
assert_eq!(*long_lived, 1); // new
|
||||||
|
serial_println!("[ok]");
|
||||||
|
}
|
||||||
|
|
||||||
#[panic_handler]
|
#[panic_handler]
|
||||||
fn panic(info: &PanicInfo) -> ! {
|
fn panic(info: &PanicInfo) -> ! {
|
||||||
blog_os::test_panic_handler(info)
|
blog_os::test_panic_handler(info)
|
||||||
|
|||||||
Reference in New Issue
Block a user