Compare commits

..

1 Commits

Author SHA1 Message Date
Philipp Oppermann
e3e8a3e112 Align the double fault stack through a new wrapper type 2020-02-19 10:27:26 +01:00
19 changed files with 37 additions and 1247 deletions

33
Cargo.lock generated
View File

@@ -27,9 +27,6 @@ version = "0.1.0"
dependencies = [
"bootloader",
"lazy_static",
"linked_list_allocator",
"pc-keyboard",
"pic8259_simple",
"spin",
"uart_16550",
"volatile",
@@ -51,12 +48,6 @@ dependencies = [
"rustc_version",
]
[[package]]
name = "cpuio"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22b8e308ccfc5acf3b82f79c0eac444cf6114cb2ac67a230ca6c177210068daa"
[[package]]
name = "lazy_static"
version = "1.4.0"
@@ -66,36 +57,12 @@ dependencies = [
"spin",
]
[[package]]
name = "linked_list_allocator"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47314ec1d29aa869ee7cb5a5be57be9b1055c56567d59c3fb6689926743e0bea"
dependencies = [
"spin",
]
[[package]]
name = "nodrop"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
[[package]]
name = "pc-keyboard"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fff50ab09ba31bcebc0669f4e64c0952fae1acdca9e6e0587e68e4e8443808ac"
[[package]]
name = "pic8259_simple"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc64b2fd10828da8521b6cdabe0679385d7d2a3a6d4c336b819d1fa31ba35c72"
dependencies = [
"cpuio",
]
[[package]]
name = "rustc_version"
version = "0.2.3"

View File

@@ -13,14 +13,11 @@ name = "stack_overflow"
harness = false
[dependencies]
bootloader = { version = "0.8.0", features = ["map_physical_memory"]}
bootloader = "0.8.0"
volatile = "0.2.6"
spin = "0.5.2"
x86_64 = "0.8.1"
uart_16550 = "0.2.0"
pic8259_simple = "0.1.1"
pc-keyboard = "0.3.1"
linked_list_allocator = "0.6.4"
[dependencies.lazy_static]
version = "1.0"
@@ -33,6 +30,3 @@ test-args = [
"-display", "none"
]
test-success-exit-code = 33 # (0x10 << 1) | 1
[profile.release]
lto = true

View File

@@ -1,10 +1,10 @@
# Blog OS (Heap Allocation)
# Blog OS (Double Faults)
[![Build Status](https://github.com/phil-opp/blog_os/workflows/Build%20Code/badge.svg?branch=post-10)](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Build+Code%22+branch%3Apost-10)
[![Build Status](https://github.com/phil-opp/blog_os/workflows/Build%20Code/badge.svg?branch=post-06)](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Build+Code%22+branch%3Apost-06)
This repository contains the source code for the [Heap Allocation][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
This repository contains the source code for the [Double Faults][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
[post]: https://os.phil-opp.com/heap-allocation/
[post]: https://os.phil-opp.com/double-fault-exceptions/
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**

View File

@@ -1,58 +0,0 @@
use super::{align_up, Locked};
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr;
pub struct BumpAllocator {
heap_start: usize,
heap_end: usize,
next: usize,
allocations: usize,
}
impl BumpAllocator {
/// Creates a new empty bump allocator.
pub const fn new() -> Self {
BumpAllocator {
heap_start: 0,
heap_end: 0,
next: 0,
allocations: 0,
}
}
/// Initializes the bump allocator with the given heap bounds.
///
/// This method is unsafe because the caller must ensure that the given
/// memory range is unused. Also, this method must be called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.heap_start = heap_start;
self.heap_end = heap_start + heap_size;
self.next = heap_start;
}
}
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut bump = self.lock(); // get a mutable reference
let alloc_start = align_up(bump.next, layout.align());
let alloc_end = alloc_start + layout.size();
if alloc_end > bump.heap_end {
ptr::null_mut() // out of memory
} else {
bump.next = alloc_end;
bump.allocations += 1;
alloc_start as *mut u8
}
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
let mut bump = self.lock(); // get a mutable reference
bump.allocations -= 1;
if bump.allocations == 0 {
bump.next = bump.heap_start;
}
}
}

View File

@@ -1,102 +0,0 @@
use super::Locked;
use alloc::alloc::{GlobalAlloc, Layout};
use core::{
mem,
ptr::{self, NonNull},
};
/// The block sizes to use.
///
/// The sizes must each be power of 2 because they are also used as
/// the block alignment (alignments must be always powers of 2).
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
/// Choose an appropriate block size for the given layout.
///
/// Returns an index into the `BLOCK_SIZES` array.
fn list_index(layout: &Layout) -> Option<usize> {
let required_block_size = layout.size().max(layout.align());
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
}
struct ListNode {
next: Option<&'static mut ListNode>,
}
pub struct FixedSizeBlockAllocator {
list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap,
}
impl FixedSizeBlockAllocator {
/// Creates an empty FixedSizeBlockAllocator.
pub const fn new() -> Self {
FixedSizeBlockAllocator {
list_heads: [None; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap::empty(),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.fallback_allocator.init(heap_start, heap_size);
}
/// Allocates using the fallback allocator.
fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 {
match self.fallback_allocator.allocate_first_fit(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(_) => ptr::null_mut(),
}
}
}
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
match allocator.list_heads[index].take() {
Some(node) => {
allocator.list_heads[index] = node.next.take();
node as *mut ListNode as *mut u8
}
None => {
// no block exists in list => allocate new block
let block_size = BLOCK_SIZES[index];
// only works if all block sizes are a power of 2
let block_align = block_size;
let layout = Layout::from_size_align(block_size, block_align).unwrap();
allocator.fallback_alloc(layout)
}
}
}
None => allocator.fallback_alloc(layout),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
let new_node = ListNode {
next: allocator.list_heads[index].take(),
};
// verify that block has size and alignment required for storing node
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
let new_node_ptr = ptr as *mut ListNode;
new_node_ptr.write(new_node);
allocator.list_heads[index] = Some(&mut *new_node_ptr);
}
None => {
let ptr = NonNull::new(ptr).unwrap();
allocator.fallback_allocator.deallocate(ptr, layout);
}
}
}
}

View File

@@ -1,145 +0,0 @@
use super::{align_up, Locked};
use alloc::alloc::{GlobalAlloc, Layout};
use core::{mem, ptr};
struct ListNode {
size: usize,
next: Option<&'static mut ListNode>,
}
impl ListNode {
const fn new(size: usize) -> Self {
ListNode { size, next: None }
}
fn start_addr(&self) -> usize {
self as *const Self as usize
}
fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}
pub struct LinkedListAllocator {
head: ListNode,
}
impl LinkedListAllocator {
/// Creates an empty LinkedListAllocator.
pub const fn new() -> Self {
Self {
head: ListNode::new(0),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.add_free_region(heap_start, heap_size);
}
/// Adds the given memory region to the front of the list.
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
// ensure that the freed region is capable of holding ListNode
assert!(align_up(addr, mem::align_of::<ListNode>()) == addr);
assert!(size >= mem::size_of::<ListNode>());
// create a new list node and append it at the start of the list
let mut node = ListNode::new(size);
node.next = self.head.next.take();
let node_ptr = addr as *mut ListNode;
node_ptr.write(node);
self.head.next = Some(&mut *node_ptr)
}
/// Looks for a free region with the given size and alignment and removes
/// it from the list.
///
/// Returns a tuple of the list node and the start address of the allocation.
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
// reference to current list node, updated for each iteration
let mut current = &mut self.head;
// look for a large enough memory region in linked list
while let Some(ref mut region) = current.next {
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
// region suitable for allocation -> remove node from list
let next = region.next.take();
let ret = Some((current.next.take().unwrap(), alloc_start));
current.next = next;
return ret;
} else {
// region not suitable -> continue with next region
current = current.next.as_mut().unwrap();
}
}
// no suitable region found
None
}
/// Try to use the given region for an allocation with given size and alignment.
///
/// Returns the allocation start address on success.
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> {
let alloc_start = align_up(region.start_addr(), align);
let alloc_end = alloc_start + size;
if alloc_end > region.end_addr() {
// region too small
return Err(());
}
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
// rest of region too small to hold a ListNode (required because the
// allocation splits the region in a used and a free part)
return Err(());
}
// region suitable for allocation
Ok(alloc_start)
}
/// Adjust the given layout so that the resulting allocated memory
/// region is also capable of storing a `ListNode`.
///
/// Returns the adjusted size and alignment as a (size, align) tuple.
fn size_align(layout: Layout) -> (usize, usize) {
let layout = layout
.align_to(mem::align_of::<ListNode>())
.expect("adjusting alignment failed")
.pad_to_align();
let size = layout.size().max(mem::size_of::<ListNode>());
(size, layout.align())
}
}
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// perform layout adjustments
let (size, align) = LinkedListAllocator::size_align(layout);
let mut allocator = self.inner.lock();
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
let alloc_end = alloc_start + size;
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 {
allocator.add_free_region(alloc_end, excess_size);
}
alloc_start as *mut u8
} else {
ptr::null_mut()
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// perform layout adjustments
let (size, _) = LinkedListAllocator::size_align(layout);
self.inner.lock().add_free_region(ptr as usize, size)
}
}

View File

@@ -1,84 +0,0 @@
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr::null_mut;
use fixed_size_block::FixedSizeBlockAllocator;
use x86_64::{
structures::paging::{
mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB,
},
VirtAddr,
};
pub mod bump;
pub mod fixed_size_block;
pub mod linked_list;
pub const HEAP_START: usize = 0x_4444_4444_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
#[global_allocator]
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(FixedSizeBlockAllocator::new());
pub fn init_heap(
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<(), MapToError> {
let page_range = {
let heap_start = VirtAddr::new(HEAP_START as u64);
let heap_end = heap_start + HEAP_SIZE - 1u64;
let heap_start_page = Page::containing_address(heap_start);
let heap_end_page = Page::containing_address(heap_end);
Page::range_inclusive(heap_start_page, heap_end_page)
};
for page in page_range {
let frame = frame_allocator
.allocate_frame()
.ok_or(MapToError::FrameAllocationFailed)?;
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
mapper.map_to(page, frame, flags, frame_allocator)?.flush();
}
unsafe {
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
}
Ok(())
}
pub struct Dummy;
unsafe impl GlobalAlloc for Dummy {
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
null_mut()
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
panic!("dealloc should be never called")
}
}
/// A wrapper around spin::Mutex to permit trait implementations.
pub struct Locked<A> {
inner: spin::Mutex<A>,
}
impl<A> Locked<A> {
pub const fn new(inner: A) -> Self {
Locked {
inner: spin::Mutex::new(inner),
}
}
pub fn lock(&self) -> spin::MutexGuard<A> {
self.inner.lock()
}
}
fn align_up(addr: usize, align: usize) -> usize {
let remainder = addr % align;
if remainder == 0 {
addr // addr already aligned
} else {
addr - remainder + align
}
}

View File

@@ -10,7 +10,11 @@ lazy_static! {
let mut tss = TaskStateSegment::new();
tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = {
const STACK_SIZE: usize = 4096;
static mut STACK: [u8; STACK_SIZE] = [0; STACK_SIZE];
#[repr(align(16))]
struct Stack([u8; STACK_SIZE]);
static mut STACK: Stack = Stack([0; STACK_SIZE]);
let stack_start = VirtAddr::from_ptr(unsafe { &STACK });
let stack_end = stack_start + STACK_SIZE;

View File

@@ -1,44 +1,16 @@
use crate::{gdt, hlt_loop, print, println};
use crate::{gdt, println};
use lazy_static::lazy_static;
use pic8259_simple::ChainedPics;
use spin;
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode};
pub const PIC_1_OFFSET: u8 = 32;
pub const PIC_2_OFFSET: u8 = PIC_1_OFFSET + 8;
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum InterruptIndex {
Timer = PIC_1_OFFSET,
Keyboard,
}
impl InterruptIndex {
fn as_u8(self) -> u8 {
self as u8
}
fn as_usize(self) -> usize {
usize::from(self.as_u8())
}
}
pub static PICS: spin::Mutex<ChainedPics> =
spin::Mutex::new(unsafe { ChainedPics::new(PIC_1_OFFSET, PIC_2_OFFSET) });
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame};
lazy_static! {
static ref IDT: InterruptDescriptorTable = {
let mut idt = InterruptDescriptorTable::new();
idt.breakpoint.set_handler_fn(breakpoint_handler);
idt.page_fault.set_handler_fn(page_fault_handler);
unsafe {
idt.double_fault
.set_handler_fn(double_fault_handler)
.set_stack_index(gdt::DOUBLE_FAULT_IST_INDEX);
}
idt[InterruptIndex::Timer.as_usize()].set_handler_fn(timer_interrupt_handler);
idt[InterruptIndex::Keyboard.as_usize()].set_handler_fn(keyboard_interrupt_handler);
idt
};
}
@@ -51,19 +23,6 @@ extern "x86-interrupt" fn breakpoint_handler(stack_frame: &mut InterruptStackFra
println!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame);
}
extern "x86-interrupt" fn page_fault_handler(
stack_frame: &mut InterruptStackFrame,
error_code: PageFaultErrorCode,
) {
use x86_64::registers::control::Cr2;
println!("EXCEPTION: PAGE FAULT");
println!("Accessed Address: {:?}", Cr2::read());
println!("Error Code: {:?}", error_code);
println!("{:#?}", stack_frame);
hlt_loop();
}
extern "x86-interrupt" fn double_fault_handler(
stack_frame: &mut InterruptStackFrame,
_error_code: u64,
@@ -71,44 +30,6 @@ extern "x86-interrupt" fn double_fault_handler(
panic!("EXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame);
}
extern "x86-interrupt" fn timer_interrupt_handler(_stack_frame: &mut InterruptStackFrame) {
print!(".");
unsafe {
PICS.lock()
.notify_end_of_interrupt(InterruptIndex::Timer.as_u8());
}
crate::multitasking::invoke_scheduler();
}
extern "x86-interrupt" fn keyboard_interrupt_handler(_stack_frame: &mut InterruptStackFrame) {
use pc_keyboard::{layouts, DecodedKey, Keyboard, ScancodeSet1};
use spin::Mutex;
use x86_64::instructions::port::Port;
lazy_static! {
static ref KEYBOARD: Mutex<Keyboard<layouts::Us104Key, ScancodeSet1>> =
Mutex::new(Keyboard::new(layouts::Us104Key, ScancodeSet1));
}
let mut keyboard = KEYBOARD.lock();
let mut port = Port::new(0x60);
let scancode: u8 = unsafe { port.read() };
if let Ok(Some(key_event)) = keyboard.add_byte(scancode) {
if let Some(key) = keyboard.process_keyevent(key_event) {
match key {
DecodedKey::Unicode(character) => print!("{}", character),
DecodedKey::RawKey(key) => print!("{:?}", key),
}
}
}
unsafe {
PICS.lock()
.notify_end_of_interrupt(InterruptIndex::Keyboard.as_u8());
}
}
#[cfg(test)]
use crate::{serial_print, serial_println};

View File

@@ -2,36 +2,19 @@
#![cfg_attr(test, no_main)]
#![feature(custom_test_frameworks)]
#![feature(abi_x86_interrupt)]
#![feature(alloc_error_handler)]
#![feature(const_fn)]
#![feature(alloc_layout_extra)]
#![feature(const_in_array_repeat_expressions)]
#![feature(global_asm)]
#![feature(asm)]
#![feature(raw)]
#![feature(never_type)]
#![feature(naked_functions)]
#![feature(option_expect_none)]
#![test_runner(crate::test_runner)]
#![reexport_test_harness_main = "test_main"]
extern crate alloc;
use core::panic::PanicInfo;
pub mod allocator;
pub mod gdt;
pub mod interrupts;
pub mod memory;
pub mod multitasking;
pub mod serial;
pub mod vga_buffer;
pub fn init() {
gdt::init();
interrupts::init_idt();
unsafe { interrupts::PICS.lock().initialize() };
x86_64::instructions::interrupts::enable();
}
pub fn test_runner(tests: &[&dyn Fn()]) {
@@ -46,7 +29,7 @@ pub fn test_panic_handler(info: &PanicInfo) -> ! {
serial_println!("[failed]\n");
serial_println!("Error: {}\n", info);
exit_qemu(QemuExitCode::Failed);
hlt_loop();
loop {}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@@ -65,24 +48,13 @@ pub fn exit_qemu(exit_code: QemuExitCode) {
}
}
pub fn hlt_loop() -> ! {
loop {
x86_64::instructions::hlt();
}
}
#[cfg(test)]
use bootloader::{entry_point, BootInfo};
#[cfg(test)]
entry_point!(test_kernel_main);
/// Entry point for `cargo xtest`
#[cfg(test)]
fn test_kernel_main(_boot_info: &'static BootInfo) -> ! {
#[no_mangle]
pub extern "C" fn _start() -> ! {
init();
test_main();
hlt_loop();
loop {}
}
#[cfg(test)]
@@ -90,8 +62,3 @@ fn test_kernel_main(_boot_info: &'static BootInfo) -> ! {
fn panic(info: &PanicInfo) -> ! {
test_panic_handler(info)
}
#[alloc_error_handler]
fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! {
panic!("allocation error: {:?}", layout)
}

View File

@@ -4,98 +4,35 @@
#![test_runner(blog_os::test_runner)]
#![reexport_test_harness_main = "test_main"]
extern crate alloc;
use alloc::{boxed::Box, rc::Rc, vec, vec::Vec};
use blog_os::multitasking::{self, thread::Thread, with_scheduler};
use blog_os::{print, println};
use bootloader::{entry_point, BootInfo};
use blog_os::println;
use core::panic::PanicInfo;
entry_point!(kernel_main);
fn kernel_main(boot_info: &'static BootInfo) -> ! {
use blog_os::allocator;
use blog_os::memory::{self, BootInfoFrameAllocator};
use x86_64::VirtAddr;
#[no_mangle]
pub extern "C" fn _start() -> ! {
println!("Hello World{}", "!");
blog_os::init();
let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
let mut mapper = unsafe { memory::init(phys_mem_offset) };
let mut frame_allocator = unsafe { BootInfoFrameAllocator::init(&boot_info.memory_map) };
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");
// allocate a number on the heap
let heap_value = Box::new(41);
println!("heap_value at {:p}", heap_value);
// create a dynamically sized vector
let mut vec = Vec::new();
for i in 0..500 {
vec.push(i);
fn stack_overflow() {
stack_overflow(); // for each recursion, the return address is pushed
}
println!("vec at {:p}", vec.as_slice());
// create a reference counted vector -> will be freed when count reaches 0
let reference_counted = Rc::new(vec![1, 2, 3]);
let cloned_reference = reference_counted.clone();
println!(
"current reference count is {}",
Rc::strong_count(&cloned_reference)
);
core::mem::drop(reference_counted);
println!(
"reference count is {} now",
Rc::strong_count(&cloned_reference)
);
// uncomment line below to trigger a stack overflow
// stack_overflow();
#[cfg(test)]
test_main();
let idle_thread = Thread::create(idle_thread, 2, &mut mapper, &mut frame_allocator).unwrap();
with_scheduler(|s| s.set_idle_thread(idle_thread));
for _ in 0..10 {
let thread = Thread::create(thread_entry, 2, &mut mapper, &mut frame_allocator).unwrap();
with_scheduler(|s| s.add_new_thread(thread));
}
let thread =
Thread::create_from_closure(|| thread_entry(), 2, &mut mapper, &mut frame_allocator)
.unwrap();
with_scheduler(|s| s.add_new_thread(thread));
println!("It did not crash!");
thread_entry();
}
fn idle_thread() -> ! {
loop {
x86_64::instructions::hlt();
multitasking::yield_now();
}
}
fn thread_entry() -> ! {
let thread_id = with_scheduler(|s| s.current_thread_id()).as_u64();
for _ in 0..=thread_id {
print!("{}", thread_id);
x86_64::instructions::hlt();
}
multitasking::exit_thread();
loop {}
}
/// This function is called on panic.
#[cfg(not(test))]
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
unsafe {
blog_os::vga_buffer::WRITER.force_unlock();
}
println!("{}", info);
blog_os::hlt_loop();
loop {}
}
#[cfg(test)]

View File

@@ -1,154 +0,0 @@
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
use x86_64::{
structures::paging::{
mapper, FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PhysFrame, Size4KiB,
UnusedPhysFrame,
},
PhysAddr, VirtAddr,
};
/// Initialize a new OffsetPageTable.
///
/// This function is unsafe because the caller must guarantee that the
/// complete physical memory is mapped to virtual memory at the passed
/// `physical_memory_offset`. Also, this function must be only called once
/// to avoid aliasing `&mut` references (which is undefined behavior).
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
let level_4_table = active_level_4_table(physical_memory_offset);
OffsetPageTable::new(level_4_table, physical_memory_offset)
}
/// Returns a mutable reference to the active level 4 table.
///
/// This function is unsafe because the caller must guarantee that the
/// complete physical memory is mapped to virtual memory at the passed
/// `physical_memory_offset`. Also, this function must be only called once
/// to avoid aliasing `&mut` references (which is undefined behavior).
unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) -> &'static mut PageTable {
use x86_64::registers::control::Cr3;
let (level_4_table_frame, _) = Cr3::read();
let phys = level_4_table_frame.start_address();
let virt = physical_memory_offset + phys.as_u64();
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
&mut *page_table_ptr // unsafe
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct StackBounds {
start: VirtAddr,
end: VirtAddr,
}
impl StackBounds {
pub fn start(&self) -> VirtAddr {
self.start
}
pub fn end(&self) -> VirtAddr {
self.end
}
}
pub fn alloc_stack(
size_in_pages: u64,
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<StackBounds, mapper::MapToError> {
use core::sync::atomic::{AtomicU64, Ordering};
use x86_64::structures::paging::PageTableFlags as Flags;
static STACK_ALLOC_NEXT: AtomicU64 = AtomicU64::new(0x_5555_5555_0000);
let guard_page_start = STACK_ALLOC_NEXT.fetch_add(
(size_in_pages + 1) * Page::<Size4KiB>::SIZE,
Ordering::SeqCst,
);
let guard_page = Page::from_start_address(VirtAddr::new(guard_page_start))
.expect("`STACK_ALLOC_NEXT` not page aligned");
let stack_start = guard_page + 1;
let stack_end = stack_start + size_in_pages;
let flags = Flags::PRESENT | Flags::WRITABLE;
for page in Page::range(stack_start, stack_end) {
let frame = frame_allocator
.allocate_frame()
.ok_or(mapper::MapToError::FrameAllocationFailed)?;
mapper.map_to(page, frame, flags, frame_allocator)?.flush();
}
Ok(StackBounds {
start: stack_start.start_address(),
end: stack_end.start_address(),
})
}
/// Creates an example mapping for the given page to frame `0xb8000`.
pub fn create_example_mapping(
page: Page,
mapper: &mut OffsetPageTable,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) {
use x86_64::structures::paging::PageTableFlags as Flags;
let frame = PhysFrame::containing_address(PhysAddr::new(0xb8000));
// FIXME: ONLY FOR TEMPORARY TESTING
let unused_frame = unsafe { UnusedPhysFrame::new(frame) };
let flags = Flags::PRESENT | Flags::WRITABLE;
let map_to_result = mapper.map_to(page, unused_frame, flags, frame_allocator);
map_to_result.expect("map_to failed").flush();
}
/// A FrameAllocator that always returns `None`.
pub struct EmptyFrameAllocator;
unsafe impl FrameAllocator<Size4KiB> for EmptyFrameAllocator {
fn allocate_frame(&mut self) -> Option<UnusedPhysFrame> {
None
}
}
/// A FrameAllocator that returns usable frames from the bootloader's memory map.
pub struct BootInfoFrameAllocator {
memory_map: &'static MemoryMap,
next: usize,
}
impl BootInfoFrameAllocator {
/// Create a FrameAllocator from the passed memory map.
///
/// This function is unsafe because the caller must guarantee that the passed
/// memory map is valid. The main requirement is that all frames that are marked
/// as `USABLE` in it are really unused.
pub unsafe fn init(memory_map: &'static MemoryMap) -> Self {
BootInfoFrameAllocator {
memory_map,
next: 0,
}
}
/// Returns an iterator over the usable frames specified in the memory map.
fn usable_frames(&self) -> impl Iterator<Item = UnusedPhysFrame> {
// get usable regions from memory map
let regions = self.memory_map.iter();
let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable);
// map each region to its address range
let addr_ranges = usable_regions.map(|r| r.range.start_addr()..r.range.end_addr());
// transform to an iterator of frame start addresses
let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096));
// create `PhysFrame` types from the start addresses
let frames = frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)));
// we know that the frames are really unused
frames.map(|f| unsafe { UnusedPhysFrame::new(f) })
}
}
unsafe impl FrameAllocator<Size4KiB> for BootInfoFrameAllocator {
fn allocate_frame(&mut self) -> Option<UnusedPhysFrame> {
let frame = self.usable_frames().nth(self.next);
self.next += 1;
frame
}
}

View File

@@ -1,105 +0,0 @@
use super::{with_scheduler, SwitchReason};
use crate::multitasking::thread::ThreadId;
use alloc::boxed::Box;
use core::mem;
use core::raw::TraitObject;
use x86_64::VirtAddr;
pub struct Stack {
pointer: VirtAddr,
}
impl Stack {
pub unsafe fn new(stack_pointer: VirtAddr) -> Self {
Stack {
pointer: stack_pointer,
}
}
pub fn get_stack_pointer(self) -> VirtAddr {
self.pointer
}
pub fn set_up_for_closure(&mut self, closure: Box<dyn FnOnce() -> !>) {
let trait_object: TraitObject = unsafe { mem::transmute(closure) };
unsafe { self.push(trait_object.data) };
unsafe { self.push(trait_object.vtable) };
self.set_up_for_entry_point(call_closure_entry);
}
pub fn set_up_for_entry_point(&mut self, entry_point: fn() -> !) {
unsafe { self.push(entry_point) };
let rflags: u64 = 0x200;
unsafe { self.push(rflags) };
}
unsafe fn push<T>(&mut self, value: T) {
self.pointer -= core::mem::size_of::<T>();
let ptr: *mut T = self.pointer.as_mut_ptr();
ptr.write(value);
}
}
pub unsafe fn context_switch_to(
new_stack_pointer: VirtAddr,
prev_thread_id: ThreadId,
switch_reason: SwitchReason,
) {
asm!(
"call asm_context_switch"
:
: "{rdi}"(new_stack_pointer), "{rsi}"(prev_thread_id), "{rdx}"(switch_reason as u64)
: "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15", "rflags", "memory"
: "intel", "volatile"
);
}
global_asm!(
"
.intel_syntax noprefix
// asm_context_switch(stack_pointer: u64, thread_id: u64)
asm_context_switch:
pushfq
mov rax, rsp
mov rsp, rdi
mov rdi, rax
call add_paused_thread
popfq
ret
"
);
#[no_mangle]
pub extern "C" fn add_paused_thread(
paused_stack_pointer: VirtAddr,
paused_thread_id: ThreadId,
switch_reason: SwitchReason,
) {
with_scheduler(|s| s.add_paused_thread(paused_stack_pointer, paused_thread_id, switch_reason));
}
#[naked]
fn call_closure_entry() -> ! {
unsafe {
asm!("
pop rsi
pop rdi
call call_closure
" ::: "mem" : "intel", "volatile")
};
unreachable!();
}
// no_mangle required because of https://github.com/rust-lang/rust/issues/68136
#[no_mangle]
extern "C" fn call_closure(data: *mut (), vtable: *mut ()) -> ! {
let trait_object = TraitObject { data, vtable };
let f: Box<dyn FnOnce() -> !> = unsafe { mem::transmute(trait_object) };
f()
}

View File

@@ -1,57 +0,0 @@
use scheduler::Scheduler;
pub mod context_switch;
pub mod scheduler;
pub mod thread;
static SCHEDULER: spin::Mutex<Option<Scheduler>> = spin::Mutex::new(None);
#[repr(u64)]
pub enum SwitchReason {
Paused,
Yield,
Blocked,
Exit,
}
pub fn invoke_scheduler() {
let next = SCHEDULER
.try_lock()
.and_then(|mut scheduler| scheduler.as_mut().and_then(|s| s.schedule()));
if let Some((next_stack_pointer, prev_thread_id)) = next {
unsafe {
context_switch::context_switch_to(
next_stack_pointer,
prev_thread_id,
SwitchReason::Paused,
)
};
}
}
pub fn exit_thread() -> ! {
synchronous_context_switch(SwitchReason::Exit).expect("can't exit last thread");
unreachable!("finished thread continued");
}
pub fn yield_now() {
let _ = synchronous_context_switch(SwitchReason::Yield);
}
fn synchronous_context_switch(reason: SwitchReason) -> Result<(), ()> {
let next = with_scheduler(|s| s.schedule());
match next {
Some((next_stack_pointer, prev_thread_id)) => unsafe {
context_switch::context_switch_to(next_stack_pointer, prev_thread_id, reason);
Ok(())
},
None => Err(()),
}
}
pub fn with_scheduler<F, T>(f: F) -> T
where
F: FnOnce(&mut Scheduler) -> T,
{
f(SCHEDULER.lock().get_or_insert_with(Scheduler::new))
}

View File

@@ -1,122 +0,0 @@
use super::SwitchReason;
use crate::multitasking::thread::{Thread, ThreadId};
use alloc::collections::{BTreeMap, BTreeSet, VecDeque};
use core::mem;
use x86_64::VirtAddr;
pub struct Scheduler {
threads: BTreeMap<ThreadId, Thread>,
idle_thread_id: Option<ThreadId>,
current_thread_id: ThreadId,
paused_threads: VecDeque<ThreadId>,
blocked_threads: BTreeSet<ThreadId>,
wakeups: BTreeSet<ThreadId>,
}
impl Scheduler {
pub fn new() -> Self {
let root_thread = Thread::create_root_thread();
let root_id = root_thread.id();
let mut threads = BTreeMap::new();
threads
.insert(root_id, root_thread)
.expect_none("map is not empty after creation");
Scheduler {
threads,
current_thread_id: root_id,
paused_threads: VecDeque::new(),
blocked_threads: BTreeSet::new(),
wakeups: BTreeSet::new(),
idle_thread_id: None,
}
}
fn next_thread(&mut self) -> Option<ThreadId> {
self.paused_threads.pop_front()
}
pub fn schedule(&mut self) -> Option<(VirtAddr, ThreadId)> {
let mut next_thread_id = self.next_thread();
if next_thread_id.is_none() && Some(self.current_thread_id) != self.idle_thread_id {
next_thread_id = self.idle_thread_id
}
if let Some(next_id) = next_thread_id {
let next_thread = self
.threads
.get_mut(&next_id)
.expect("next thread does not exist");
let next_stack_pointer = next_thread
.stack_pointer()
.take()
.expect("paused thread has no stack pointer");
let prev_thread_id = mem::replace(&mut self.current_thread_id, next_thread.id());
Some((next_stack_pointer, prev_thread_id))
} else {
None
}
}
pub(super) fn add_paused_thread(
&mut self,
paused_stack_pointer: VirtAddr,
paused_thread_id: ThreadId,
switch_reason: SwitchReason,
) {
let paused_thread = self
.threads
.get_mut(&paused_thread_id)
.expect("paused thread does not exist");
paused_thread
.stack_pointer()
.replace(paused_stack_pointer)
.expect_none("running thread should have stack pointer set to None");
if Some(paused_thread_id) == self.idle_thread_id {
return; // do nothing
}
match switch_reason {
SwitchReason::Paused | SwitchReason::Yield => {
self.paused_threads.push_back(paused_thread_id)
}
SwitchReason::Blocked => {
self.blocked_threads.insert(paused_thread_id);
self.check_for_wakeup(paused_thread_id);
}
SwitchReason::Exit => {
let thread = self
.threads
.remove(&paused_thread_id)
.expect("thread not found");
// TODO: free stack memory again
}
}
}
pub fn add_new_thread(&mut self, thread: Thread) {
let thread_id = thread.id();
self.threads
.insert(thread_id, thread)
.expect_none("thread already exists");
self.paused_threads.push_back(thread_id);
}
pub fn set_idle_thread(&mut self, thread: Thread) {
let thread_id = thread.id();
self.threads
.insert(thread_id, thread)
.expect_none("thread already exists");
self.idle_thread_id
.replace(thread_id)
.expect_none("idle thread should be set only once");
}
pub fn current_thread_id(&self) -> ThreadId {
self.current_thread_id
}
fn check_for_wakeup(&mut self, thread_id: ThreadId) {
if self.wakeups.remove(&thread_id) {
assert!(self.blocked_threads.remove(&thread_id));
self.paused_threads.push_back(thread_id);
}
}
}

View File

@@ -1,82 +0,0 @@
use crate::memory::{alloc_stack, StackBounds};
use crate::multitasking::context_switch::Stack;
use alloc::boxed::Box;
use x86_64::{
structures::paging::{mapper, FrameAllocator, Mapper, Size4KiB},
VirtAddr,
};
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct ThreadId(u64);
impl ThreadId {
pub fn as_u64(&self) -> u64 {
self.0
}
fn new() -> Self {
use core::sync::atomic::{AtomicU64, Ordering};
static NEXT_THREAD_ID: AtomicU64 = AtomicU64::new(1);
ThreadId(NEXT_THREAD_ID.fetch_add(1, Ordering::SeqCst))
}
}
#[derive(Debug)]
pub struct Thread {
id: ThreadId,
stack_pointer: Option<VirtAddr>,
stack_bounds: Option<StackBounds>,
}
impl Thread {
pub fn create(
entry_point: fn() -> !,
stack_size: u64,
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<Self, mapper::MapToError> {
let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?;
let mut stack = unsafe { Stack::new(stack_bounds.end()) };
stack.set_up_for_entry_point(entry_point);
Ok(Self::new(stack.get_stack_pointer(), stack_bounds))
}
pub fn create_from_closure<F>(
closure: F,
stack_size: u64,
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<Self, mapper::MapToError>
where
F: FnOnce() -> ! + 'static + Send + Sync,
{
let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?;
let mut stack = unsafe { Stack::new(stack_bounds.end()) };
stack.set_up_for_closure(Box::new(closure));
Ok(Self::new(stack.get_stack_pointer(), stack_bounds))
}
fn new(stack_pointer: VirtAddr, stack_bounds: StackBounds) -> Self {
Thread {
id: ThreadId::new(),
stack_pointer: Some(stack_pointer),
stack_bounds: Some(stack_bounds),
}
}
pub(super) fn create_root_thread() -> Self {
Thread {
id: ThreadId(0),
stack_pointer: None,
stack_bounds: None,
}
}
pub fn id(&self) -> ThreadId {
self.id
}
pub(super) fn stack_pointer(&mut self) -> &mut Option<VirtAddr> {
&mut self.stack_pointer
}
}

View File

@@ -13,14 +13,10 @@ lazy_static! {
#[doc(hidden)]
pub fn _print(args: ::core::fmt::Arguments) {
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
SERIAL1
.lock()
.write_fmt(args)
.expect("Printing to serial failed");
});
SERIAL1
.lock()
.write_fmt(args)
.expect("Printing to serial failed");
}
/// Prints to the host through the serial interface.

View File

@@ -166,16 +166,11 @@ macro_rules! println {
($($arg:tt)*) => ($crate::print!("{}\n", format_args!($($arg)*)));
}
/// Prints the given formatted string to the VGA text buffer
/// through the global `WRITER` instance.
/// Prints the given formatted string to the VGA text buffer through the global `WRITER` instance.
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
WRITER.lock().write_fmt(args).unwrap();
});
WRITER.lock().write_fmt(args).unwrap();
}
#[test_case]
@@ -196,20 +191,14 @@ fn test_println_many() {
#[test_case]
fn test_println_output() {
use core::fmt::Write;
use x86_64::instructions::interrupts;
serial_print!("test_println_output... ");
let s = "Some test string that fits on a single line";
interrupts::without_interrupts(|| {
let mut writer = WRITER.lock();
writeln!(writer, "\n{}", s).expect("writeln failed");
for (i, c) in s.chars().enumerate() {
let screen_char = writer.buffer.chars[BUFFER_HEIGHT - 2][i].read();
assert_eq!(char::from(screen_char.ascii_character), c);
}
});
println!("{}", s);
for (i, c) in s.chars().enumerate() {
let screen_char = WRITER.lock().buffer.chars[BUFFER_HEIGHT - 2][i].read();
assert_eq!(char::from(screen_char.ascii_character), c);
}
serial_println!("[ok]");
}

View File

@@ -1,76 +0,0 @@
#![no_std]
#![no_main]
#![feature(custom_test_frameworks)]
#![test_runner(blog_os::test_runner)]
#![reexport_test_harness_main = "test_main"]
extern crate alloc;
use alloc::{boxed::Box, vec::Vec};
use blog_os::{allocator::HEAP_SIZE, serial_print, serial_println};
use bootloader::{entry_point, BootInfo};
use core::panic::PanicInfo;
entry_point!(main);
fn main(boot_info: &'static BootInfo) -> ! {
use blog_os::allocator;
use blog_os::memory::{self, BootInfoFrameAllocator};
use x86_64::VirtAddr;
blog_os::init();
let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
let mut mapper = unsafe { memory::init(phys_mem_offset) };
let mut frame_allocator = unsafe { BootInfoFrameAllocator::init(&boot_info.memory_map) };
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");
test_main();
loop {}
}
#[test_case]
fn simple_allocation() {
serial_print!("simple_allocation... ");
let heap_value = Box::new(41);
assert_eq!(*heap_value, 41);
serial_println!("[ok]");
}
#[test_case]
fn large_vec() {
serial_print!("large_vec... ");
let n = 1000;
let mut vec = Vec::new();
for i in 0..n {
vec.push(i);
}
assert_eq!(vec.iter().sum::<u64>(), (n - 1) * n / 2);
serial_println!("[ok]");
}
#[test_case]
fn many_boxes() {
serial_print!("many_boxes... ");
for i in 0..HEAP_SIZE {
let x = Box::new(i);
assert_eq!(*x, i);
}
serial_println!("[ok]");
}
#[test_case]
fn many_boxes_long_lived() {
serial_print!("many_boxes_long_lived... ");
let long_lived = Box::new(1); // new
for i in 0..HEAP_SIZE {
let x = Box::new(i);
assert_eq!(*x, i);
}
assert_eq!(*long_lived, 1); // new
serial_println!("[ok]");
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
blog_os::test_panic_handler(info)
}