mirror of
https://github.com/phil-opp/blog_os.git
synced 2025-12-16 14:27:49 +00:00
Run cargo fmt
This commit is contained in:
@@ -23,8 +23,8 @@ pub mod allocator;
|
|||||||
pub mod gdt;
|
pub mod gdt;
|
||||||
pub mod interrupts;
|
pub mod interrupts;
|
||||||
pub mod memory;
|
pub mod memory;
|
||||||
pub mod serial;
|
|
||||||
pub mod multitasking;
|
pub mod multitasking;
|
||||||
|
pub mod serial;
|
||||||
pub mod vga_buffer;
|
pub mod vga_buffer;
|
||||||
|
|
||||||
pub fn init() {
|
pub fn init() {
|
||||||
|
|||||||
12
src/main.rs
12
src/main.rs
@@ -7,8 +7,8 @@
|
|||||||
extern crate alloc;
|
extern crate alloc;
|
||||||
|
|
||||||
use alloc::{boxed::Box, rc::Rc, vec, vec::Vec};
|
use alloc::{boxed::Box, rc::Rc, vec, vec::Vec};
|
||||||
use blog_os::{print, println};
|
|
||||||
use blog_os::multitasking::{thread::Thread, with_scheduler};
|
use blog_os::multitasking::{thread::Thread, with_scheduler};
|
||||||
|
use blog_os::{print, println};
|
||||||
use bootloader::{entry_point, BootInfo};
|
use bootloader::{entry_point, BootInfo};
|
||||||
use core::panic::PanicInfo;
|
use core::panic::PanicInfo;
|
||||||
|
|
||||||
@@ -59,14 +59,16 @@ fn kernel_main(boot_info: &'static BootInfo) -> ! {
|
|||||||
let thread = Thread::create(thread_entry, 2, &mut mapper, &mut frame_allocator).unwrap();
|
let thread = Thread::create(thread_entry, 2, &mut mapper, &mut frame_allocator).unwrap();
|
||||||
with_scheduler(|s| s.add_new_thread(thread));
|
with_scheduler(|s| s.add_new_thread(thread));
|
||||||
}
|
}
|
||||||
let thread = Thread::create_from_closure(|| loop {
|
let thread = Thread::create_from_closure(
|
||||||
|
|| loop {
|
||||||
print!("{}", with_scheduler(|s| s.current_thread_id()).as_u64());
|
print!("{}", with_scheduler(|s| s.current_thread_id()).as_u64());
|
||||||
x86_64::instructions::hlt();
|
x86_64::instructions::hlt();
|
||||||
},
|
},
|
||||||
2,
|
2,
|
||||||
&mut mapper,
|
&mut mapper,
|
||||||
&mut frame_allocator,
|
&mut frame_allocator,
|
||||||
).unwrap();
|
)
|
||||||
|
.unwrap();
|
||||||
with_scheduler(|s| s.add_new_thread(thread));
|
with_scheduler(|s| s.add_new_thread(thread));
|
||||||
|
|
||||||
println!("It did not crash!");
|
println!("It did not crash!");
|
||||||
@@ -84,7 +86,9 @@ fn thread_entry() -> ! {
|
|||||||
#[cfg(not(test))]
|
#[cfg(not(test))]
|
||||||
#[panic_handler]
|
#[panic_handler]
|
||||||
fn panic(info: &PanicInfo) -> ! {
|
fn panic(info: &PanicInfo) -> ! {
|
||||||
unsafe { blog_os::vga_buffer::WRITER.force_unlock(); }
|
unsafe {
|
||||||
|
blog_os::vga_buffer::WRITER.force_unlock();
|
||||||
|
}
|
||||||
println!("{}", info);
|
println!("{}", info);
|
||||||
blog_os::hlt_loop();
|
blog_os::hlt_loop();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
|
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
|
||||||
use x86_64::{
|
use x86_64::{
|
||||||
structures::paging::{
|
structures::paging::{
|
||||||
FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PhysFrame, Size4KiB,
|
mapper, FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PhysFrame, Size4KiB,
|
||||||
UnusedPhysFrame,mapper,
|
UnusedPhysFrame,
|
||||||
},
|
},
|
||||||
PhysAddr, VirtAddr,
|
PhysAddr, VirtAddr,
|
||||||
};
|
};
|
||||||
@@ -62,8 +62,10 @@ pub fn alloc_stack(
|
|||||||
|
|
||||||
static STACK_ALLOC_NEXT: AtomicU64 = AtomicU64::new(0x_5555_5555_0000);
|
static STACK_ALLOC_NEXT: AtomicU64 = AtomicU64::new(0x_5555_5555_0000);
|
||||||
|
|
||||||
let guard_page_start =
|
let guard_page_start = STACK_ALLOC_NEXT.fetch_add(
|
||||||
STACK_ALLOC_NEXT.fetch_add((size_in_pages + 1) * Page::<Size4KiB>::SIZE, Ordering::SeqCst);
|
(size_in_pages + 1) * Page::<Size4KiB>::SIZE,
|
||||||
|
Ordering::SeqCst,
|
||||||
|
);
|
||||||
let guard_page = Page::from_start_address(VirtAddr::new(guard_page_start))
|
let guard_page = Page::from_start_address(VirtAddr::new(guard_page_start))
|
||||||
.expect("`STACK_ALLOC_NEXT` not page aligned");
|
.expect("`STACK_ALLOC_NEXT` not page aligned");
|
||||||
|
|
||||||
@@ -71,9 +73,10 @@ pub fn alloc_stack(
|
|||||||
let stack_end = stack_start + size_in_pages;
|
let stack_end = stack_start + size_in_pages;
|
||||||
let flags = Flags::PRESENT | Flags::WRITABLE;
|
let flags = Flags::PRESENT | Flags::WRITABLE;
|
||||||
for page in Page::range(stack_start, stack_end) {
|
for page in Page::range(stack_start, stack_end) {
|
||||||
let frame = frame_allocator.allocate_frame().ok_or(mapper::MapToError::FrameAllocationFailed)?;
|
let frame = frame_allocator
|
||||||
mapper
|
.allocate_frame()
|
||||||
.map_to(page, frame, flags, frame_allocator)?.flush();
|
.ok_or(mapper::MapToError::FrameAllocationFailed)?;
|
||||||
|
mapper.map_to(page, frame, flags, frame_allocator)?.flush();
|
||||||
}
|
}
|
||||||
Ok(StackBounds {
|
Ok(StackBounds {
|
||||||
start: stack_start.start_address(),
|
start: stack_start.start_address(),
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use alloc::boxed::Box;
|
|
||||||
use x86_64::VirtAddr;
|
|
||||||
use core::raw::TraitObject;
|
|
||||||
use crate::multitasking::thread::ThreadId;
|
|
||||||
use core::mem;
|
|
||||||
use super::with_scheduler;
|
use super::with_scheduler;
|
||||||
|
use crate::multitasking::thread::ThreadId;
|
||||||
|
use alloc::boxed::Box;
|
||||||
|
use core::mem;
|
||||||
|
use core::raw::TraitObject;
|
||||||
|
use x86_64::VirtAddr;
|
||||||
|
|
||||||
pub struct Stack {
|
pub struct Stack {
|
||||||
pointer: VirtAddr,
|
pointer: VirtAddr,
|
||||||
@@ -11,7 +11,9 @@ pub struct Stack {
|
|||||||
|
|
||||||
impl Stack {
|
impl Stack {
|
||||||
pub unsafe fn new(stack_pointer: VirtAddr) -> Self {
|
pub unsafe fn new(stack_pointer: VirtAddr) -> Self {
|
||||||
Stack { pointer: stack_pointer, }
|
Stack {
|
||||||
|
pointer: stack_pointer,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_stack_pointer(self) -> VirtAddr {
|
pub fn get_stack_pointer(self) -> VirtAddr {
|
||||||
@@ -22,7 +24,7 @@ impl Stack {
|
|||||||
let trait_object: TraitObject = unsafe { mem::transmute(closure) };
|
let trait_object: TraitObject = unsafe { mem::transmute(closure) };
|
||||||
unsafe { self.push(trait_object.data) };
|
unsafe { self.push(trait_object.data) };
|
||||||
unsafe { self.push(trait_object.vtable) };
|
unsafe { self.push(trait_object.vtable) };
|
||||||
|
|
||||||
self.set_up_for_entry_point(call_closure_entry);
|
self.set_up_for_entry_point(call_closure_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,11 +78,13 @@ pub extern "C" fn add_paused_thread(paused_stack_pointer: VirtAddr, new_thread_i
|
|||||||
|
|
||||||
#[naked]
|
#[naked]
|
||||||
fn call_closure_entry() -> ! {
|
fn call_closure_entry() -> ! {
|
||||||
unsafe { asm!("
|
unsafe {
|
||||||
|
asm!("
|
||||||
pop rsi
|
pop rsi
|
||||||
pop rdi
|
pop rdi
|
||||||
call call_closure
|
call call_closure
|
||||||
" ::: "mem" : "intel", "volatile") };
|
" ::: "mem" : "intel", "volatile")
|
||||||
|
};
|
||||||
unreachable!();
|
unreachable!();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use scheduler::Scheduler;
|
use scheduler::Scheduler;
|
||||||
|
|
||||||
pub mod thread;
|
|
||||||
pub mod scheduler;
|
|
||||||
pub mod context_switch;
|
pub mod context_switch;
|
||||||
|
pub mod scheduler;
|
||||||
|
pub mod thread;
|
||||||
|
|
||||||
static SCHEDULER: spin::Mutex<Option<Scheduler>> = spin::Mutex::new(None);
|
static SCHEDULER: spin::Mutex<Option<Scheduler>> = spin::Mutex::new(None);
|
||||||
|
|
||||||
@@ -15,6 +15,9 @@ pub fn invoke_scheduler() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_scheduler<F, T>(f: F) -> T where F: FnOnce(&mut Scheduler) -> T {
|
pub fn with_scheduler<F, T>(f: F) -> T
|
||||||
|
where
|
||||||
|
F: FnOnce(&mut Scheduler) -> T,
|
||||||
|
{
|
||||||
f(SCHEDULER.lock().get_or_insert_with(Scheduler::new))
|
f(SCHEDULER.lock().get_or_insert_with(Scheduler::new))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use crate::multitasking::thread::{Thread, ThreadId};
|
use crate::multitasking::thread::{Thread, ThreadId};
|
||||||
use alloc::collections::{VecDeque, BTreeMap};
|
use alloc::collections::{BTreeMap, VecDeque};
|
||||||
use x86_64::VirtAddr;
|
|
||||||
use core::mem;
|
use core::mem;
|
||||||
|
use x86_64::VirtAddr;
|
||||||
|
|
||||||
pub struct Scheduler {
|
pub struct Scheduler {
|
||||||
threads: BTreeMap<ThreadId, Thread>,
|
threads: BTreeMap<ThreadId, Thread>,
|
||||||
@@ -14,7 +14,9 @@ impl Scheduler {
|
|||||||
let root_thread = Thread::create_root_thread();
|
let root_thread = Thread::create_root_thread();
|
||||||
let root_id = root_thread.id();
|
let root_id = root_thread.id();
|
||||||
let mut threads = BTreeMap::new();
|
let mut threads = BTreeMap::new();
|
||||||
threads.insert(root_id, root_thread).expect_none("map is not empty after creation");
|
threads
|
||||||
|
.insert(root_id, root_thread)
|
||||||
|
.expect_none("map is not empty after creation");
|
||||||
Scheduler {
|
Scheduler {
|
||||||
threads,
|
threads,
|
||||||
current_thread_id: root_id,
|
current_thread_id: root_id,
|
||||||
@@ -28,8 +30,13 @@ impl Scheduler {
|
|||||||
|
|
||||||
pub fn schedule(&mut self) -> Option<(ThreadId, VirtAddr)> {
|
pub fn schedule(&mut self) -> Option<(ThreadId, VirtAddr)> {
|
||||||
if let Some(next_id) = self.next_thread() {
|
if let Some(next_id) = self.next_thread() {
|
||||||
let next_thread = self.threads.get_mut(&next_id).expect("next thread does not exist");
|
let next_thread = self
|
||||||
let next_stack_pointer = next_thread.stack_pointer().take()
|
.threads
|
||||||
|
.get_mut(&next_id)
|
||||||
|
.expect("next thread does not exist");
|
||||||
|
let next_stack_pointer = next_thread
|
||||||
|
.stack_pointer()
|
||||||
|
.take()
|
||||||
.expect("paused thread has no stack pointer");
|
.expect("paused thread has no stack pointer");
|
||||||
Some((next_id, next_stack_pointer))
|
Some((next_id, next_stack_pointer))
|
||||||
} else {
|
} else {
|
||||||
@@ -37,21 +44,32 @@ impl Scheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn add_paused_thread(&mut self, paused_stack_pointer: VirtAddr, next_thread_id: ThreadId) {
|
pub(super) fn add_paused_thread(
|
||||||
|
&mut self,
|
||||||
|
paused_stack_pointer: VirtAddr,
|
||||||
|
next_thread_id: ThreadId,
|
||||||
|
) {
|
||||||
let paused_thread_id = mem::replace(&mut self.current_thread_id, next_thread_id);
|
let paused_thread_id = mem::replace(&mut self.current_thread_id, next_thread_id);
|
||||||
let paused_thread = self.threads.get_mut(&paused_thread_id).expect("paused thread does not exist");
|
let paused_thread = self
|
||||||
paused_thread.stack_pointer().replace(paused_stack_pointer)
|
.threads
|
||||||
|
.get_mut(&paused_thread_id)
|
||||||
|
.expect("paused thread does not exist");
|
||||||
|
paused_thread
|
||||||
|
.stack_pointer()
|
||||||
|
.replace(paused_stack_pointer)
|
||||||
.expect_none("running thread should have stack pointer set to None");
|
.expect_none("running thread should have stack pointer set to None");
|
||||||
self.paused_threads.push_back(paused_thread_id);
|
self.paused_threads.push_back(paused_thread_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_new_thread(&mut self, thread: Thread) {
|
pub fn add_new_thread(&mut self, thread: Thread) {
|
||||||
let thread_id = thread.id();
|
let thread_id = thread.id();
|
||||||
self.threads.insert(thread_id, thread).expect_none("thread already exists");
|
self.threads
|
||||||
|
.insert(thread_id, thread)
|
||||||
|
.expect_none("thread already exists");
|
||||||
self.paused_threads.push_back(thread_id);
|
self.paused_threads.push_back(thread_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn current_thread_id(&self) -> ThreadId {
|
pub fn current_thread_id(&self) -> ThreadId {
|
||||||
self.current_thread_id
|
self.current_thread_id
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
use x86_64::{VirtAddr, structures::paging::{Mapper, mapper, FrameAllocator, Size4KiB}};
|
|
||||||
use alloc::boxed::Box;
|
|
||||||
use crate::memory::{alloc_stack, StackBounds};
|
use crate::memory::{alloc_stack, StackBounds};
|
||||||
use crate::multitasking::context_switch::Stack;
|
use crate::multitasking::context_switch::Stack;
|
||||||
|
use alloc::boxed::Box;
|
||||||
|
use x86_64::{
|
||||||
|
structures::paging::{mapper, FrameAllocator, Mapper, Size4KiB},
|
||||||
|
VirtAddr,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
pub struct ThreadId(u64);
|
pub struct ThreadId(u64);
|
||||||
@@ -10,7 +13,7 @@ impl ThreadId {
|
|||||||
pub fn as_u64(&self) -> u64 {
|
pub fn as_u64(&self) -> u64 {
|
||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
use core::sync::atomic::{AtomicU64, Ordering};
|
use core::sync::atomic::{AtomicU64, Ordering};
|
||||||
static NEXT_THREAD_ID: AtomicU64 = AtomicU64::new(1);
|
static NEXT_THREAD_ID: AtomicU64 = AtomicU64::new(1);
|
||||||
@@ -32,7 +35,7 @@ impl Thread {
|
|||||||
mapper: &mut impl Mapper<Size4KiB>,
|
mapper: &mut impl Mapper<Size4KiB>,
|
||||||
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
|
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
|
||||||
) -> Result<Self, mapper::MapToError> {
|
) -> Result<Self, mapper::MapToError> {
|
||||||
let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?;
|
let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?;
|
||||||
let mut stack = unsafe { Stack::new(stack_bounds.end()) };
|
let mut stack = unsafe { Stack::new(stack_bounds.end()) };
|
||||||
stack.set_up_for_entry_point(entry_point);
|
stack.set_up_for_entry_point(entry_point);
|
||||||
Ok(Self::new(stack.get_stack_pointer(), stack_bounds))
|
Ok(Self::new(stack.get_stack_pointer(), stack_bounds))
|
||||||
@@ -43,13 +46,16 @@ impl Thread {
|
|||||||
stack_size: u64,
|
stack_size: u64,
|
||||||
mapper: &mut impl Mapper<Size4KiB>,
|
mapper: &mut impl Mapper<Size4KiB>,
|
||||||
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
|
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
|
||||||
) -> Result<Self, mapper::MapToError> where F: FnOnce() -> ! + 'static + Send + Sync {
|
) -> Result<Self, mapper::MapToError>
|
||||||
let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?;
|
where
|
||||||
|
F: FnOnce() -> ! + 'static + Send + Sync,
|
||||||
|
{
|
||||||
|
let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?;
|
||||||
let mut stack = unsafe { Stack::new(stack_bounds.end()) };
|
let mut stack = unsafe { Stack::new(stack_bounds.end()) };
|
||||||
stack.set_up_for_closure(Box::new(closure));
|
stack.set_up_for_closure(Box::new(closure));
|
||||||
Ok(Self::new(stack.get_stack_pointer(), stack_bounds))
|
Ok(Self::new(stack.get_stack_pointer(), stack_bounds))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new(stack_pointer: VirtAddr, stack_bounds: StackBounds) -> Self {
|
fn new(stack_pointer: VirtAddr, stack_bounds: StackBounds) -> Self {
|
||||||
Thread {
|
Thread {
|
||||||
id: ThreadId::new(),
|
id: ThreadId::new(),
|
||||||
@@ -57,7 +63,7 @@ impl Thread {
|
|||||||
stack_bounds: Some(stack_bounds),
|
stack_bounds: Some(stack_bounds),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn create_root_thread() -> Self {
|
pub(super) fn create_root_thread() -> Self {
|
||||||
Thread {
|
Thread {
|
||||||
id: ThreadId(0),
|
id: ThreadId(0),
|
||||||
|
|||||||
Reference in New Issue
Block a user