diff --git a/src/interrupts.rs b/src/interrupts.rs index e8508d34..4f4baec7 100644 --- a/src/interrupts.rs +++ b/src/interrupts.rs @@ -77,7 +77,7 @@ extern "x86-interrupt" fn timer_interrupt_handler(_stack_frame: &mut InterruptSt PICS.lock() .notify_end_of_interrupt(InterruptIndex::Timer.as_u8()); } - crate::threads::schedule(); + crate::multitasking::invoke_scheduler(); } extern "x86-interrupt" fn keyboard_interrupt_handler(_stack_frame: &mut InterruptStackFrame) { diff --git a/src/lib.rs b/src/lib.rs index ca194021..519297a0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,6 +11,7 @@ #![feature(raw)] #![feature(never_type)] #![feature(naked_functions)] +#![feature(option_expect_none)] #![test_runner(crate::test_runner)] #![reexport_test_harness_main = "test_main"] @@ -23,7 +24,7 @@ pub mod gdt; pub mod interrupts; pub mod memory; pub mod serial; -pub mod threads; +pub mod multitasking; pub mod vga_buffer; pub fn init() { diff --git a/src/main.rs b/src/main.rs index 5fe62f59..4f5269e8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -8,6 +8,7 @@ extern crate alloc; use alloc::{boxed::Box, rc::Rc, vec, vec::Vec}; use blog_os::{print, println}; +use blog_os::multitasking::{thread::Thread, with_scheduler}; use bootloader::{entry_point, BootInfo}; use core::panic::PanicInfo; @@ -54,30 +55,27 @@ fn kernel_main(boot_info: &'static BootInfo) -> ! { #[cfg(test)] test_main(); - use blog_os::threads::{create_thread, create_thread_from_closure}; - - for i in 0..10 { - create_thread(thread, 2, &mut mapper, &mut frame_allocator); + for _ in 0..10 { + let thread = Thread::create(thread_entry, 2, &mut mapper, &mut frame_allocator).unwrap(); + with_scheduler(|s| s.add_new_thread(thread)); } - create_thread_from_closure( - || loop { - print!("{}", blog_os::threads::current_thread_id().as_u64()); + let thread = Thread::create_from_closure(|| loop { + print!("{}", with_scheduler(|s| s.current_thread_id()).as_u64()); x86_64::instructions::hlt(); }, 2, &mut mapper, &mut frame_allocator, - ); + ).unwrap(); + with_scheduler(|s| s.add_new_thread(thread)); println!("It did not crash!"); - thread(); - - blog_os::hlt_loop(); + thread_entry(); } -fn thread() -> ! { +fn thread_entry() -> ! { loop { - print!("{}", blog_os::threads::current_thread_id().as_u64()); + print!("{}", with_scheduler(|s| s.current_thread_id()).as_u64()); x86_64::instructions::hlt(); } } diff --git a/src/memory.rs b/src/memory.rs index c2ebfab0..ded99ee8 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -2,7 +2,7 @@ use bootloader::bootinfo::{MemoryMap, MemoryRegionType}; use x86_64::{ structures::paging::{ FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PhysFrame, Size4KiB, - UnusedPhysFrame, + UnusedPhysFrame,mapper, }, PhysAddr, VirtAddr, }; @@ -36,34 +36,49 @@ unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) -> &'static mut &mut *page_table_ptr // unsafe } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct StackBounds { + start: VirtAddr, + end: VirtAddr, +} + +impl StackBounds { + pub fn start(&self) -> VirtAddr { + self.start + } + + pub fn end(&self) -> VirtAddr { + self.end + } +} + pub fn alloc_stack( size_in_pages: u64, mapper: &mut impl Mapper, frame_allocator: &mut impl FrameAllocator, -) -> Result { +) -> Result { use core::sync::atomic::{AtomicU64, Ordering}; use x86_64::structures::paging::PageTableFlags as Flags; - const PAGE_SIZE: u64 = 4096; static STACK_ALLOC_NEXT: AtomicU64 = AtomicU64::new(0x_5555_5555_0000); let guard_page_start = - STACK_ALLOC_NEXT.fetch_add((size_in_pages + 1) * PAGE_SIZE, Ordering::SeqCst); - // skip one page as guard page - let stack_start_addr = VirtAddr::new(guard_page_start + PAGE_SIZE); - let stack_end_addr = stack_start_addr + size_in_pages * PAGE_SIZE; + STACK_ALLOC_NEXT.fetch_add((size_in_pages + 1) * Page::::SIZE, Ordering::SeqCst); + let guard_page = Page::from_start_address(VirtAddr::new(guard_page_start)) + .expect("`STACK_ALLOC_NEXT` not page aligned"); + let stack_start = guard_page + 1; + let stack_end = stack_start + size_in_pages; let flags = Flags::PRESENT | Flags::WRITABLE; - let stack_start_page = Page::from_start_address(stack_start_addr).unwrap(); - let stack_end_page = Page::from_start_address(stack_end_addr).unwrap(); - for page in Page::range(stack_start_page, stack_end_page) { - let frame = frame_allocator.allocate_frame().ok_or(())?; + for page in Page::range(stack_start, stack_end) { + let frame = frame_allocator.allocate_frame().ok_or(mapper::MapToError::FrameAllocationFailed)?; mapper - .map_to(page, frame, flags, frame_allocator) - .map_err(|_| ())? - .flush(); + .map_to(page, frame, flags, frame_allocator)?.flush(); } - Ok(stack_end_addr) + Ok(StackBounds { + start: stack_start.start_address(), + end: stack_end.start_address(), + }) } /// Creates an example mapping for the given page to frame `0xb8000`. diff --git a/src/multitasking/context_switch.rs b/src/multitasking/context_switch.rs new file mode 100644 index 00000000..ba2000b2 --- /dev/null +++ b/src/multitasking/context_switch.rs @@ -0,0 +1,93 @@ +use alloc::boxed::Box; +use x86_64::VirtAddr; +use core::raw::TraitObject; +use crate::multitasking::thread::ThreadId; +use core::mem; +use super::with_scheduler; + +pub struct Stack { + pointer: VirtAddr, +} + +impl Stack { + pub unsafe fn new(stack_pointer: VirtAddr) -> Self { + Stack { pointer: stack_pointer, } + } + + pub fn get_stack_pointer(self) -> VirtAddr { + self.pointer + } + + pub fn set_up_for_closure(&mut self, closure: Box !>) { + let trait_object: TraitObject = unsafe { mem::transmute(closure) }; + unsafe { self.push(trait_object.data) }; + unsafe { self.push(trait_object.vtable) }; + + self.set_up_for_entry_point(call_closure_entry); + } + + pub fn set_up_for_entry_point(&mut self, entry_point: fn() -> !) { + unsafe { self.push(entry_point) }; + let rflags: u64 = 0x200; + unsafe { self.push(rflags) }; + } + + unsafe fn push(&mut self, value: T) { + self.pointer -= core::mem::size_of::(); + let ptr: *mut T = self.pointer.as_mut_ptr(); + ptr.write(value); + } +} + +pub unsafe fn context_switch_to(thread_id: ThreadId, stack_pointer: VirtAddr) { + asm!( + "call asm_context_switch" + : + : "{rdi}"(stack_pointer), "{rsi}"(thread_id) + : "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rpb", "r8", "r9", "r10", + "r11", "r12", "r13", "r14", "r15", "rflags", "memory" + : "intel", "volatile" + ); +} + +global_asm!( + " + .intel_syntax noprefix + + // asm_context_switch(stack_pointer: u64, thread_id: u64) + asm_context_switch: + pushfq + + mov rax, rsp + mov rsp, rdi + + mov rdi, rax + call add_paused_thread + + popfq + ret +" +); + +#[no_mangle] +pub extern "C" fn add_paused_thread(paused_stack_pointer: VirtAddr, new_thread_id: ThreadId) { + with_scheduler(|s| s.add_paused_thread(paused_stack_pointer, new_thread_id)); +} + +#[naked] +fn call_closure_entry() -> ! { + unsafe { asm!(" + pop rsi + pop rdi + call call_closure + " ::: "mem" : "intel", "volatile") }; + unreachable!(); +} + +// no_mangle required because of https://github.com/rust-lang/rust/issues/68136 +#[no_mangle] +extern "C" fn call_closure(data: *mut (), vtable: *mut ()) -> ! { + let trait_object = TraitObject { data, vtable }; + let f: Box !> = unsafe { mem::transmute(trait_object) }; + f() +} diff --git a/src/multitasking/mod.rs b/src/multitasking/mod.rs new file mode 100644 index 00000000..c2670edf --- /dev/null +++ b/src/multitasking/mod.rs @@ -0,0 +1,20 @@ +use scheduler::Scheduler; + +pub mod thread; +pub mod scheduler; +pub mod context_switch; + +static SCHEDULER: spin::Mutex> = spin::Mutex::new(None); + +pub fn invoke_scheduler() { + let next = SCHEDULER + .try_lock() + .and_then(|mut scheduler| scheduler.as_mut().and_then(|s| s.schedule())); + if let Some((next_id, next_stack_pointer)) = next { + unsafe { context_switch::context_switch_to(next_id, next_stack_pointer) }; + } +} + +pub fn with_scheduler(f: F) -> T where F: FnOnce(&mut Scheduler) -> T { + f(SCHEDULER.lock().get_or_insert_with(Scheduler::new)) +} diff --git a/src/multitasking/scheduler.rs b/src/multitasking/scheduler.rs new file mode 100644 index 00000000..3afd340c --- /dev/null +++ b/src/multitasking/scheduler.rs @@ -0,0 +1,57 @@ +use crate::multitasking::thread::{Thread, ThreadId}; +use alloc::collections::{VecDeque, BTreeMap}; +use x86_64::VirtAddr; +use core::mem; + +pub struct Scheduler { + threads: BTreeMap, + current_thread_id: ThreadId, + paused_threads: VecDeque, +} + +impl Scheduler { + pub fn new() -> Self { + let root_thread = Thread::create_root_thread(); + let root_id = root_thread.id(); + let mut threads = BTreeMap::new(); + threads.insert(root_id, root_thread).expect_none("map is not empty after creation"); + Scheduler { + threads, + current_thread_id: root_id, + paused_threads: VecDeque::new(), + } + } + + fn next_thread(&mut self) -> Option { + self.paused_threads.pop_front() + } + + pub fn schedule(&mut self) -> Option<(ThreadId, VirtAddr)> { + if let Some(next_id) = self.next_thread() { + let next_thread = self.threads.get_mut(&next_id).expect("next thread does not exist"); + let next_stack_pointer = next_thread.stack_pointer().take() + .expect("paused thread has no stack pointer"); + Some((next_id, next_stack_pointer)) + } else { + None + } + } + + pub(super) fn add_paused_thread(&mut self, paused_stack_pointer: VirtAddr, next_thread_id: ThreadId) { + let paused_thread_id = mem::replace(&mut self.current_thread_id, next_thread_id); + let paused_thread = self.threads.get_mut(&paused_thread_id).expect("paused thread does not exist"); + paused_thread.stack_pointer().replace(paused_stack_pointer) + .expect_none("running thread should have stack pointer set to None"); + self.paused_threads.push_back(paused_thread_id); + } + + pub fn add_new_thread(&mut self, thread: Thread) { + let thread_id = thread.id(); + self.threads.insert(thread_id, thread).expect_none("thread already exists"); + self.paused_threads.push_back(thread_id); + } + + pub fn current_thread_id(&self) -> ThreadId { + self.current_thread_id + } +} \ No newline at end of file diff --git a/src/multitasking/thread.rs b/src/multitasking/thread.rs new file mode 100644 index 00000000..9ffe8ada --- /dev/null +++ b/src/multitasking/thread.rs @@ -0,0 +1,76 @@ +use x86_64::{VirtAddr, structures::paging::{Mapper, mapper, FrameAllocator, Size4KiB}}; +use alloc::boxed::Box; +use crate::memory::{alloc_stack, StackBounds}; +use crate::multitasking::context_switch::Stack; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct ThreadId(u64); + +impl ThreadId { + pub fn as_u64(&self) -> u64 { + self.0 + } + + fn new() -> Self { + use core::sync::atomic::{AtomicU64, Ordering}; + static NEXT_THREAD_ID: AtomicU64 = AtomicU64::new(1); + ThreadId(NEXT_THREAD_ID.fetch_add(1, Ordering::SeqCst)) + } +} + +#[derive(Debug)] +pub struct Thread { + id: ThreadId, + stack_pointer: Option, + stack_bounds: Option, +} + +impl Thread { + pub fn create( + entry_point: fn() -> !, + stack_size: u64, + mapper: &mut impl Mapper, + frame_allocator: &mut impl FrameAllocator, + ) -> Result { + let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?; + let mut stack = unsafe { Stack::new(stack_bounds.end()) }; + stack.set_up_for_entry_point(entry_point); + Ok(Self::new(stack.get_stack_pointer(), stack_bounds)) + } + + pub fn create_from_closure( + closure: F, + stack_size: u64, + mapper: &mut impl Mapper, + frame_allocator: &mut impl FrameAllocator, + ) -> Result where F: FnOnce() -> ! + 'static + Send + Sync { + let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?; + let mut stack = unsafe { Stack::new(stack_bounds.end()) }; + stack.set_up_for_closure(Box::new(closure)); + Ok(Self::new(stack.get_stack_pointer(), stack_bounds)) + } + + fn new(stack_pointer: VirtAddr, stack_bounds: StackBounds) -> Self { + Thread { + id: ThreadId::new(), + stack_pointer: Some(stack_pointer), + stack_bounds: Some(stack_bounds), + } + } + + pub(super) fn create_root_thread() -> Self { + Thread { + id: ThreadId(0), + stack_pointer: None, + stack_bounds: None, + } + } + + pub fn id(&self) -> ThreadId { + self.id + } + + pub(super) fn stack_pointer(&mut self) -> &mut Option { + &mut self.stack_pointer + } +} diff --git a/src/threads.rs b/src/threads.rs deleted file mode 100644 index 4a938149..00000000 --- a/src/threads.rs +++ /dev/null @@ -1,244 +0,0 @@ -use alloc::collections::VecDeque; -use x86_64::structures::paging::{FrameAllocator, Mapper, Size4KiB}; -use x86_64::VirtAddr; -use core::mem; - -static SCHEDULER: spin::Mutex> = spin::Mutex::new(None); - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct ThreadId(pub u64); - -impl ThreadId { - pub fn as_u64(&self) -> u64 { - self.0 - } -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct StackPointer(VirtAddr); - -impl StackPointer { - unsafe fn new(pointer: VirtAddr) -> Self { - StackPointer(pointer) - } - - pub fn allocate( - stack_size: u64, - mapper: &mut impl Mapper, - frame_allocator: &mut impl FrameAllocator, - ) -> Result { - crate::memory::alloc_stack(stack_size, mapper, frame_allocator).map(Self) - } - - unsafe fn push_to_stack(&mut self, value: T) { - self.0 -= core::mem::size_of::(); - let ptr: *mut T = self.0.as_mut_ptr(); - ptr.write(value); - } - - fn as_u64(&self) -> u64 { - self.0.as_u64() - } -} - -pub struct Thread { - id: ThreadId, - stack_pointer: StackPointer, -} - -impl Thread { - pub unsafe fn new(entry_point: fn() -> !, stack_top: StackPointer) -> Self { - use core::sync::atomic::{AtomicU64, Ordering}; - static NextThreadId: AtomicU64 = AtomicU64::new(1); - - let mut stack_pointer = stack_top; - Self::set_up_stack(&mut stack_pointer, entry_point); - - Thread { - id: ThreadId(NextThreadId.fetch_add(1, Ordering::SeqCst)), - stack_pointer, - } - } - - pub unsafe fn new_from_closure(closure: F, stack_top: StackPointer) -> Self - where - F: FnOnce() -> ! + Send + Sync + 'static, - { - use alloc::boxed::Box; - use core::{mem, raw::TraitObject}; - - let boxed: ThreadClosure = Box::new(closure); - let trait_object: TraitObject = unsafe { mem::transmute(boxed) }; - - // push trait object - let mut stack_pointer = stack_top; - unsafe { stack_pointer.push_to_stack(trait_object.data) }; - unsafe { stack_pointer.push_to_stack(trait_object.vtable) }; - - let entry_point = call_closure_entry as unsafe fn() -> !; - unsafe { Self::new(mem::transmute(entry_point), stack_pointer) } - } - - pub fn create( - entry_point: fn() -> !, - stack_size: u64, - mapper: &mut impl Mapper, - frame_allocator: &mut impl FrameAllocator, - ) -> Result { - let stack_top = StackPointer::allocate(stack_size, mapper, frame_allocator)?; - Ok(unsafe { Self::new(entry_point, stack_top) }) - } - - pub fn create_from_closure( - entry_point: F, - stack_size: u64, - mapper: &mut impl Mapper, - frame_allocator: &mut impl FrameAllocator, - ) -> Result - where - F: FnOnce() -> ! + Send + Sync + 'static, - { - let stack_top = StackPointer::allocate(stack_size, mapper, frame_allocator)?; - Ok(unsafe { Self::new_from_closure(entry_point, stack_top) }) - } - - fn set_up_stack(stack_top: &mut StackPointer, entry_point: fn() -> !) { - unsafe { stack_top.push_to_stack(entry_point) }; - let rflags: u64 = 0x200; - unsafe { stack_top.push_to_stack(rflags) }; - } -} - -struct Scheduler { - current_thread_id: ThreadId, - paused_threads: VecDeque, -} - -impl Scheduler { - fn new() -> Self { - Scheduler { - current_thread_id: ThreadId(0), - paused_threads: VecDeque::new(), - } - } - - fn next_thread(&mut self) -> Option { - self.paused_threads.pop_front() - } - - fn add_paused_thread(&mut self, stack_pointer: StackPointer, new_thread_id: ThreadId) { - let thread_id = mem::replace(&mut self.current_thread_id, new_thread_id); - let thread = Thread { id: thread_id, stack_pointer}; - self.paused_threads.push_back(thread); - } - - fn add_new_thread(&mut self, thread: Thread) { - self.paused_threads.push_back(thread); - } - - pub fn current_thread_id(&self) -> ThreadId { - self.current_thread_id - } -} - -pub unsafe fn context_switch(thread: Thread) { - asm!( - "call asm_context_switch" - : - : "{rdi}"(thread.stack_pointer.as_u64()), "{rsi}"(thread.id.0) - : "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rpb", "r8", "r9", "r10", - "r11", "r12", "r13", "r14", "r15", "rflags", "memory" - : "intel", "volatile" - ); -} - -global_asm!( - " - .intel_syntax noprefix - - // asm_context_switch(stack_pointer: u64, thread_id: u64) - asm_context_switch: - pushfq - - mov rax, rsp - mov rsp, rdi - - mov rdi, rax - call add_paused_thread - - popfq - ret -" -); - -pub fn schedule() { - let next = SCHEDULER - .try_lock() - .and_then(|mut scheduler| scheduler.as_mut().and_then(|s| s.next_thread())); - if let Some(next) = next { - unsafe { context_switch(next) }; - } -} - -pub fn with_scheduler(f: F) -> T where F: FnOnce(&mut Scheduler) -> T { - f(SCHEDULER.lock().get_or_insert_with(Scheduler::new)) -} - -static PAUSED_THREADS: spin::Mutex>> = spin::Mutex::new(None); - -#[no_mangle] -pub extern "C" fn add_paused_thread(stack_pointer: u64, new_thread_id: u64) { - let stack_pointer = StackPointer(VirtAddr::new(stack_pointer)); - let new_thread_id = ThreadId(new_thread_id); - - with_scheduler(|s| s.add_paused_thread(stack_pointer, new_thread_id)); -} - -pub fn create_thread( - entry_point: fn() -> !, - stack_size: u64, - mapper: &mut impl Mapper, - frame_allocator: &mut impl FrameAllocator, -) -> Result<(), ()> { - let thread = Thread::create(entry_point, stack_size, mapper, frame_allocator)?; - with_scheduler(|s| s.add_new_thread(thread)); - Ok(()) -} - -pub fn create_thread_from_closure( - closure: F, - stack_size: u64, - mapper: &mut impl Mapper, - frame_allocator: &mut impl FrameAllocator, -) -> Result<(), ()> where - F: FnOnce() -> ! + 'static + Send + Sync, -{ - let thread = Thread::create_from_closure(closure, stack_size, mapper, frame_allocator)?; - with_scheduler(|s| s.add_new_thread(thread)); - Ok(()) -} - -pub fn current_thread_id() -> ThreadId { - with_scheduler(|s| s.current_thread_id()) -} - -type ThreadClosure = alloc::boxed::Box !>; - -#[no_mangle] -unsafe fn call_closure(data: *mut (), vtable: *mut ()) -> ! { - use core::{mem, raw::TraitObject}; - - let trait_object = TraitObject { data, vtable }; - let f: ThreadClosure = mem::transmute(trait_object); - f() -} - -#[naked] -unsafe fn call_closure_entry() -> ! { - asm!(" - pop rsi - pop rdi - call call_closure - " ::: "mem" : "intel", "volatile"); - unreachable!(); -}