Compare commits

..

5 Commits

Author SHA1 Message Date
Philipp Oppermann
bb5899e437 Merge branch 'post-03' into post-04 2025-09-03 17:58:33 +02:00
Philipp Oppermann
93d0daa1e0 Merge branch 'post-02' into post-03 2025-09-03 17:58:28 +02:00
Philipp Oppermann
83e6c0bc00 Merge pull request #1436 from phil-opp/post-02-fix-target
Fix: `target-pointer-width` field now expects an integer
2025-09-03 17:56:38 +02:00
Philipp Oppermann
2b11ad8397 Fix: target-pointer-width field now expects an integer 2025-09-03 17:53:14 +02:00
Philipp Oppermann
fa51f3adbf Update to latest bootloader version 2025-09-03 17:52:56 +02:00
18 changed files with 28 additions and 961 deletions

View File

@@ -1,5 +1,5 @@
[unstable]
build-std = ["core", "compiler_builtins", "alloc"]
build-std = ["core", "compiler_builtins"]
build-std-features = ["compiler-builtins-mem"]
[build]

62
Cargo.lock generated
View File

@@ -2,12 +2,6 @@
# It is not intended for manual editing.
version = 4
[[package]]
name = "autocfg"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "bit_field"
version = "0.10.2"
@@ -32,9 +26,6 @@ version = "0.1.0"
dependencies = [
"bootloader",
"lazy_static",
"linked_list_allocator",
"pc-keyboard",
"pic8259",
"spin 0.5.2",
"uart_16550",
"volatile 0.2.7",
@@ -43,9 +34,9 @@ dependencies = [
[[package]]
name = "bootloader"
version = "0.9.32"
version = "0.9.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ea119c3ed05625c179e09d17d0914570a3753ca09c890a73d98f6b72aea00d2"
checksum = "7bdfddac270bbdd45903296bc1caf29a7fdce6b326aaf0bbab7f04c5f98b7447"
[[package]]
name = "lazy_static"
@@ -56,52 +47,12 @@ dependencies = [
"spin 0.9.8",
]
[[package]]
name = "linked_list_allocator"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "549ce1740e46b291953c4340adcd74c59bcf4308f4cac050fd33ba91b7168f4a"
dependencies = [
"spinning_top",
]
[[package]]
name = "lock_api"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "pc-keyboard"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed089a1fbffe3337a1a345501c981f1eb1e47e69de5a40e852433e12953c3174"
[[package]]
name = "pic8259"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb844b5b01db1e0b17938685738f113bfc903846f18932b378bc0eabfa40e194"
dependencies = [
"x86_64",
]
[[package]]
name = "rustversion"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "spin"
version = "0.5.2"
@@ -114,15 +65,6 @@ version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
[[package]]
name = "spinning_top"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0"
dependencies = [
"lock_api",
]
[[package]]
name = "uart_16550"
version = "0.2.19"

View File

@@ -8,19 +8,12 @@ edition = "2024"
name = "should_panic"
harness = false
[[test]]
name = "stack_overflow"
harness = false
[dependencies]
bootloader = { version = "0.9", features = ["map_physical_memory"] }
bootloader = "0.9"
volatile = "0.2.6"
spin = "0.5.2"
x86_64 = "0.14.2"
uart_16550 = "0.2.0"
pic8259 = "0.10.1"
pc-keyboard = "0.7.0"
linked_list_allocator = "0.9.0"
[dependencies.lazy_static]
version = "1.0"

View File

@@ -1,10 +1,10 @@
# Blog OS (Allocator Designs)
# Blog OS (Testing)
[![Build Status](https://github.com/phil-opp/blog_os/workflows/Code/badge.svg?branch=post-11)](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Code%22+branch%3Apost-11)
[![Build Status](https://github.com/phil-opp/blog_os/workflows/Code/badge.svg?branch=post-04)](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Code%22+branch%3Apost-04)
This repository contains the source code for the [Allocator Designs][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
This repository contains the source code for the [Testing][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
[post]: https://os.phil-opp.com/allocator-designs/
[post]: https://os.phil-opp.com/testing/
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**

View File

@@ -1,82 +0,0 @@
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr::null_mut;
use fixed_size_block::FixedSizeBlockAllocator;
use x86_64::{
VirtAddr,
structures::paging::{
FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB, mapper::MapToError,
},
};
pub mod bump;
pub mod fixed_size_block;
pub mod linked_list;
pub const HEAP_START: usize = 0x_4444_4444_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
#[global_allocator]
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(FixedSizeBlockAllocator::new());
pub fn init_heap(
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<(), MapToError<Size4KiB>> {
let page_range = {
let heap_start = VirtAddr::new(HEAP_START as u64);
let heap_end = heap_start + HEAP_SIZE - 1u64;
let heap_start_page = Page::containing_address(heap_start);
let heap_end_page = Page::containing_address(heap_end);
Page::range_inclusive(heap_start_page, heap_end_page)
};
for page in page_range {
let frame = frame_allocator
.allocate_frame()
.ok_or(MapToError::FrameAllocationFailed)?;
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
unsafe { mapper.map_to(page, frame, flags, frame_allocator)?.flush() };
}
unsafe {
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
}
Ok(())
}
pub struct Dummy;
unsafe impl GlobalAlloc for Dummy {
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
null_mut()
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
panic!("dealloc should be never called")
}
}
/// A wrapper around spin::Mutex to permit trait implementations.
pub struct Locked<A> {
inner: spin::Mutex<A>,
}
impl<A> Locked<A> {
pub const fn new(inner: A) -> Self {
Locked {
inner: spin::Mutex::new(inner),
}
}
pub fn lock(&self) -> spin::MutexGuard<A> {
self.inner.lock()
}
}
/// Align the given address `addr` upwards to alignment `align`.
///
/// Requires that `align` is a power of two.
fn align_up(addr: usize, align: usize) -> usize {
(addr + align - 1) & !(align - 1)
}

View File

@@ -1,61 +0,0 @@
use super::{Locked, align_up};
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr;
pub struct BumpAllocator {
heap_start: usize,
heap_end: usize,
next: usize,
allocations: usize,
}
impl BumpAllocator {
/// Creates a new empty bump allocator.
pub const fn new() -> Self {
BumpAllocator {
heap_start: 0,
heap_end: 0,
next: 0,
allocations: 0,
}
}
/// Initializes the bump allocator with the given heap bounds.
///
/// This method is unsafe because the caller must ensure that the given
/// memory range is unused. Also, this method must be called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.heap_start = heap_start;
self.heap_end = heap_start.saturating_add(heap_size);
self.next = heap_start;
}
}
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut bump = self.lock(); // get a mutable reference
let alloc_start = align_up(bump.next, layout.align());
let alloc_end = match alloc_start.checked_add(layout.size()) {
Some(end) => end,
None => return ptr::null_mut(),
};
if alloc_end > bump.heap_end {
ptr::null_mut() // out of memory
} else {
bump.next = alloc_end;
bump.allocations += 1;
alloc_start as *mut u8
}
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
let mut bump = self.lock(); // get a mutable reference
bump.allocations -= 1;
if bump.allocations == 0 {
bump.next = bump.heap_start;
}
}
}

View File

@@ -1,109 +0,0 @@
use super::Locked;
use alloc::alloc::{GlobalAlloc, Layout};
use core::{
mem,
ptr::{self, NonNull},
};
/// The block sizes to use.
///
/// The sizes must each be power of 2 because they are also used as
/// the block alignment (alignments must be always powers of 2).
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
/// Choose an appropriate block size for the given layout.
///
/// Returns an index into the `BLOCK_SIZES` array.
fn list_index(layout: &Layout) -> Option<usize> {
let required_block_size = layout.size().max(layout.align());
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
}
struct ListNode {
next: Option<&'static mut ListNode>,
}
pub struct FixedSizeBlockAllocator {
list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap,
}
impl FixedSizeBlockAllocator {
/// Creates an empty FixedSizeBlockAllocator.
pub const fn new() -> Self {
const EMPTY: Option<&'static mut ListNode> = None;
FixedSizeBlockAllocator {
list_heads: [EMPTY; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap::empty(),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
unsafe {
self.fallback_allocator.init(heap_start, heap_size);
}
}
/// Allocates using the fallback allocator.
fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 {
match self.fallback_allocator.allocate_first_fit(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(_) => ptr::null_mut(),
}
}
}
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
match allocator.list_heads[index].take() {
Some(node) => {
allocator.list_heads[index] = node.next.take();
node as *mut ListNode as *mut u8
}
None => {
// no block exists in list => allocate new block
let block_size = BLOCK_SIZES[index];
// only works if all block sizes are a power of 2
let block_align = block_size;
let layout = Layout::from_size_align(block_size, block_align).unwrap();
allocator.fallback_alloc(layout)
}
}
}
None => allocator.fallback_alloc(layout),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
let new_node = ListNode {
next: allocator.list_heads[index].take(),
};
// verify that block has size and alignment required for storing node
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
let new_node_ptr = ptr as *mut ListNode;
unsafe {
new_node_ptr.write(new_node);
allocator.list_heads[index] = Some(&mut *new_node_ptr);
}
}
None => {
let ptr = NonNull::new(ptr).unwrap();
unsafe {
allocator.fallback_allocator.deallocate(ptr, layout);
}
}
}
}
}

View File

@@ -1,151 +0,0 @@
use super::{Locked, align_up};
use alloc::alloc::{GlobalAlloc, Layout};
use core::{mem, ptr};
struct ListNode {
size: usize,
next: Option<&'static mut ListNode>,
}
impl ListNode {
const fn new(size: usize) -> Self {
ListNode { size, next: None }
}
fn start_addr(&self) -> usize {
self as *const Self as usize
}
fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}
pub struct LinkedListAllocator {
head: ListNode,
}
impl LinkedListAllocator {
/// Creates an empty LinkedListAllocator.
pub const fn new() -> Self {
Self {
head: ListNode::new(0),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
unsafe {
self.add_free_region(heap_start, heap_size);
}
}
/// Adds the given memory region to the front of the list.
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
// ensure that the freed region is capable of holding ListNode
assert_eq!(align_up(addr, mem::align_of::<ListNode>()), addr);
assert!(size >= mem::size_of::<ListNode>());
// create a new list node and append it at the start of the list
let mut node = ListNode::new(size);
node.next = self.head.next.take();
let node_ptr = addr as *mut ListNode;
unsafe {
node_ptr.write(node);
self.head.next = Some(&mut *node_ptr);
}
}
/// Looks for a free region with the given size and alignment and removes
/// it from the list.
///
/// Returns a tuple of the list node and the start address of the allocation.
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
// reference to current list node, updated for each iteration
let mut current = &mut self.head;
// look for a large enough memory region in linked list
while let Some(ref mut region) = current.next {
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
// region suitable for allocation -> remove node from list
let next = region.next.take();
let ret = Some((current.next.take().unwrap(), alloc_start));
current.next = next;
return ret;
} else {
// region not suitable -> continue with next region
current = current.next.as_mut().unwrap();
}
}
// no suitable region found
None
}
/// Try to use the given region for an allocation with given size and alignment.
///
/// Returns the allocation start address on success.
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> {
let alloc_start = align_up(region.start_addr(), align);
let alloc_end = alloc_start.checked_add(size).ok_or(())?;
if alloc_end > region.end_addr() {
// region too small
return Err(());
}
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
// rest of region too small to hold a ListNode (required because the
// allocation splits the region in a used and a free part)
return Err(());
}
// region suitable for allocation
Ok(alloc_start)
}
/// Adjust the given layout so that the resulting allocated memory
/// region is also capable of storing a `ListNode`.
///
/// Returns the adjusted size and alignment as a (size, align) tuple.
fn size_align(layout: Layout) -> (usize, usize) {
let layout = layout
.align_to(mem::align_of::<ListNode>())
.expect("adjusting alignment failed")
.pad_to_align();
let size = layout.size().max(mem::size_of::<ListNode>());
(size, layout.align())
}
}
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// perform layout adjustments
let (size, align) = LinkedListAllocator::size_align(layout);
let mut allocator = self.lock();
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
let alloc_end = alloc_start.checked_add(size).expect("overflow");
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 {
unsafe {
allocator.add_free_region(alloc_end, excess_size);
}
}
alloc_start as *mut u8
} else {
ptr::null_mut()
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// perform layout adjustments
let (size, _) = LinkedListAllocator::size_align(layout);
unsafe { self.lock().add_free_region(ptr as usize, size) }
}
}

View File

@@ -1,52 +0,0 @@
use lazy_static::lazy_static;
use x86_64::VirtAddr;
use x86_64::structures::gdt::{Descriptor, GlobalDescriptorTable, SegmentSelector};
use x86_64::structures::tss::TaskStateSegment;
pub const DOUBLE_FAULT_IST_INDEX: u16 = 0;
lazy_static! {
static ref TSS: TaskStateSegment = {
let mut tss = TaskStateSegment::new();
tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = {
const STACK_SIZE: usize = 4096 * 5;
static mut STACK: [u8; STACK_SIZE] = [0; STACK_SIZE];
let stack_start = VirtAddr::from_ptr(&raw const STACK);
let stack_end = stack_start + STACK_SIZE;
stack_end
};
tss
};
}
lazy_static! {
static ref GDT: (GlobalDescriptorTable, Selectors) = {
let mut gdt = GlobalDescriptorTable::new();
let code_selector = gdt.add_entry(Descriptor::kernel_code_segment());
let tss_selector = gdt.add_entry(Descriptor::tss_segment(&TSS));
(
gdt,
Selectors {
code_selector,
tss_selector,
},
)
};
}
struct Selectors {
code_selector: SegmentSelector,
tss_selector: SegmentSelector,
}
pub fn init() {
use x86_64::instructions::segmentation::{CS, Segment};
use x86_64::instructions::tables::load_tss;
GDT.0.load();
unsafe {
CS::set_reg(GDT.1.code_selector);
load_tss(GDT.1.tss_selector);
}
}

View File

@@ -1,119 +0,0 @@
use crate::{gdt, hlt_loop, print, println};
use lazy_static::lazy_static;
use pic8259::ChainedPics;
use spin;
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode};
pub const PIC_1_OFFSET: u8 = 32;
pub const PIC_2_OFFSET: u8 = PIC_1_OFFSET + 8;
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum InterruptIndex {
Timer = PIC_1_OFFSET,
Keyboard,
}
impl InterruptIndex {
fn as_u8(self) -> u8 {
self as u8
}
fn as_usize(self) -> usize {
usize::from(self.as_u8())
}
}
pub static PICS: spin::Mutex<ChainedPics> =
spin::Mutex::new(unsafe { ChainedPics::new(PIC_1_OFFSET, PIC_2_OFFSET) });
lazy_static! {
static ref IDT: InterruptDescriptorTable = {
let mut idt = InterruptDescriptorTable::new();
idt.breakpoint.set_handler_fn(breakpoint_handler);
idt.page_fault.set_handler_fn(page_fault_handler);
unsafe {
idt.double_fault
.set_handler_fn(double_fault_handler)
.set_stack_index(gdt::DOUBLE_FAULT_IST_INDEX);
}
idt[InterruptIndex::Timer.as_usize()].set_handler_fn(timer_interrupt_handler);
idt[InterruptIndex::Keyboard.as_usize()].set_handler_fn(keyboard_interrupt_handler);
idt
};
}
pub fn init_idt() {
IDT.load();
}
extern "x86-interrupt" fn breakpoint_handler(stack_frame: InterruptStackFrame) {
println!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame);
}
extern "x86-interrupt" fn page_fault_handler(
stack_frame: InterruptStackFrame,
error_code: PageFaultErrorCode,
) {
use x86_64::registers::control::Cr2;
println!("EXCEPTION: PAGE FAULT");
println!("Accessed Address: {:?}", Cr2::read());
println!("Error Code: {:?}", error_code);
println!("{:#?}", stack_frame);
hlt_loop();
}
extern "x86-interrupt" fn double_fault_handler(
stack_frame: InterruptStackFrame,
_error_code: u64,
) -> ! {
panic!("EXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame);
}
extern "x86-interrupt" fn timer_interrupt_handler(_stack_frame: InterruptStackFrame) {
print!(".");
unsafe {
PICS.lock()
.notify_end_of_interrupt(InterruptIndex::Timer.as_u8());
}
}
extern "x86-interrupt" fn keyboard_interrupt_handler(_stack_frame: InterruptStackFrame) {
use pc_keyboard::{DecodedKey, HandleControl, Keyboard, ScancodeSet1, layouts};
use spin::Mutex;
use x86_64::instructions::port::Port;
lazy_static! {
static ref KEYBOARD: Mutex<Keyboard<layouts::Us104Key, ScancodeSet1>> =
Mutex::new(Keyboard::new(
ScancodeSet1::new(),
layouts::Us104Key,
HandleControl::Ignore
));
}
let mut keyboard = KEYBOARD.lock();
let mut port = Port::new(0x60);
let scancode: u8 = unsafe { port.read() };
if let Ok(Some(key_event)) = keyboard.add_byte(scancode) {
if let Some(key) = keyboard.process_keyevent(key_event) {
match key {
DecodedKey::Unicode(character) => print!("{}", character),
DecodedKey::RawKey(key) => print!("{:?}", key),
}
}
}
unsafe {
PICS.lock()
.notify_end_of_interrupt(InterruptIndex::Keyboard.as_u8());
}
}
#[test_case]
fn test_breakpoint_exception() {
// invoke a breakpoint exception
x86_64::instructions::interrupts::int3();
}

View File

@@ -1,26 +1,14 @@
#![no_std]
#![cfg_attr(test, no_main)]
#![feature(custom_test_frameworks)]
#![feature(abi_x86_interrupt)]
#![test_runner(crate::test_runner)]
#![reexport_test_harness_main = "test_main"]
extern crate alloc;
use core::panic::PanicInfo;
pub mod allocator;
pub mod gdt;
pub mod interrupts;
pub mod memory;
pub mod serial;
pub mod vga_buffer;
pub fn init() {
gdt::init();
interrupts::init_idt();
unsafe { interrupts::PICS.lock().initialize() };
x86_64::instructions::interrupts::enable();
}
pub trait Testable {
fn run(&self) -> ();
}
@@ -48,7 +36,7 @@ pub fn test_panic_handler(info: &PanicInfo) -> ! {
serial_println!("[failed]\n");
serial_println!("Error: {}\n", info);
exit_qemu(QemuExitCode::Failed);
hlt_loop();
loop {}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@@ -67,24 +55,12 @@ pub fn exit_qemu(exit_code: QemuExitCode) {
}
}
pub fn hlt_loop() -> ! {
loop {
x86_64::instructions::hlt();
}
}
#[cfg(test)]
use bootloader::{BootInfo, entry_point};
#[cfg(test)]
entry_point!(test_kernel_main);
/// Entry point for `cargo xtest`
#[cfg(test)]
fn test_kernel_main(_boot_info: &'static BootInfo) -> ! {
init();
#[unsafe(no_mangle)]
pub extern "C" fn _start() -> ! {
test_main();
hlt_loop();
loop {}
}
#[cfg(test)]

View File

@@ -4,58 +4,17 @@
#![test_runner(blog_os::test_runner)]
#![reexport_test_harness_main = "test_main"]
extern crate alloc;
use alloc::{boxed::Box, rc::Rc, vec, vec::Vec};
use blog_os::println;
use bootloader::{BootInfo, entry_point};
use core::panic::PanicInfo;
entry_point!(kernel_main);
fn kernel_main(boot_info: &'static BootInfo) -> ! {
use blog_os::allocator;
use blog_os::memory::{self, BootInfoFrameAllocator};
use x86_64::VirtAddr;
#[unsafe(no_mangle)]
pub extern "C" fn _start() -> ! {
println!("Hello World{}", "!");
blog_os::init();
let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
let mut mapper = unsafe { memory::init(phys_mem_offset) };
let mut frame_allocator = unsafe { BootInfoFrameAllocator::init(&boot_info.memory_map) };
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");
// allocate a number on the heap
let heap_value = Box::new(41);
println!("heap_value at {:p}", heap_value);
// create a dynamically sized vector
let mut vec = Vec::new();
for i in 0..500 {
vec.push(i);
}
println!("vec at {:p}", vec.as_slice());
// create a reference counted vector -> will be freed when count reaches 0
let reference_counted = Rc::new(vec![1, 2, 3]);
let cloned_reference = reference_counted.clone();
println!(
"current reference count is {}",
Rc::strong_count(&cloned_reference)
);
core::mem::drop(reference_counted);
println!(
"reference count is {} now",
Rc::strong_count(&cloned_reference)
);
#[cfg(test)]
test_main();
println!("It did not crash!");
blog_os::hlt_loop();
loop {}
}
/// This function is called on panic.
@@ -63,7 +22,7 @@ fn kernel_main(boot_info: &'static BootInfo) -> ! {
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
println!("{}", info);
blog_os::hlt_loop();
loop {}
}
#[cfg(test)]

View File

@@ -1,86 +0,0 @@
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
use x86_64::{
PhysAddr, VirtAddr,
structures::paging::{FrameAllocator, OffsetPageTable, PageTable, PhysFrame, Size4KiB},
};
/// Initialize a new OffsetPageTable.
///
/// This function is unsafe because the caller must guarantee that the
/// complete physical memory is mapped to virtual memory at the passed
/// `physical_memory_offset`. Also, this function must be only called once
/// to avoid aliasing `&mut` references (which is undefined behavior).
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
unsafe {
let level_4_table = active_level_4_table(physical_memory_offset);
OffsetPageTable::new(level_4_table, physical_memory_offset)
}
}
/// Returns a mutable reference to the active level 4 table.
///
/// This function is unsafe because the caller must guarantee that the
/// complete physical memory is mapped to virtual memory at the passed
/// `physical_memory_offset`. Also, this function must be only called once
/// to avoid aliasing `&mut` references (which is undefined behavior).
unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) -> &'static mut PageTable {
use x86_64::registers::control::Cr3;
let (level_4_table_frame, _) = Cr3::read();
let phys = level_4_table_frame.start_address();
let virt = physical_memory_offset + phys.as_u64();
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
unsafe { &mut *page_table_ptr }
}
/// A FrameAllocator that always returns `None`.
pub struct EmptyFrameAllocator;
unsafe impl FrameAllocator<Size4KiB> for EmptyFrameAllocator {
fn allocate_frame(&mut self) -> Option<PhysFrame> {
None
}
}
/// A FrameAllocator that returns usable frames from the bootloader's memory map.
pub struct BootInfoFrameAllocator {
memory_map: &'static MemoryMap,
next: usize,
}
impl BootInfoFrameAllocator {
/// Create a FrameAllocator from the passed memory map.
///
/// This function is unsafe because the caller must guarantee that the passed
/// memory map is valid. The main requirement is that all frames that are marked
/// as `USABLE` in it are really unused.
pub unsafe fn init(memory_map: &'static MemoryMap) -> Self {
BootInfoFrameAllocator {
memory_map,
next: 0,
}
}
/// Returns an iterator over the usable frames specified in the memory map.
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
// get usable regions from memory map
let regions = self.memory_map.iter();
let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable);
// map each region to its address range
let addr_ranges = usable_regions.map(|r| r.range.start_addr()..r.range.end_addr());
// transform to an iterator of frame start addresses
let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096));
// create `PhysFrame` types from the start addresses
frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)))
}
}
unsafe impl FrameAllocator<Size4KiB> for BootInfoFrameAllocator {
fn allocate_frame(&mut self) -> Option<PhysFrame> {
let frame = self.usable_frames().nth(self.next);
self.next += 1;
frame
}
}

View File

@@ -13,14 +13,10 @@ lazy_static! {
#[doc(hidden)]
pub fn _print(args: ::core::fmt::Arguments) {
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
SERIAL1
.lock()
.write_fmt(args)
.expect("Printing to serial failed");
});
SERIAL1
.lock()
.write_fmt(args)
.expect("Printing to serial failed");
}
/// Prints to the host through the serial interface.

View File

@@ -163,16 +163,11 @@ macro_rules! println {
($($arg:tt)*) => ($crate::print!("{}\n", format_args!($($arg)*)));
}
/// Prints the given formatted string to the VGA text buffer
/// through the global `WRITER` instance.
/// Prints the given formatted string to the VGA text buffer through the global `WRITER` instance.
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
WRITER.lock().write_fmt(args).unwrap();
});
WRITER.lock().write_fmt(args).unwrap();
}
#[test_case]
@@ -189,16 +184,10 @@ fn test_println_many() {
#[test_case]
fn test_println_output() {
use core::fmt::Write;
use x86_64::instructions::interrupts;
let s = "Some test string that fits on a single line";
interrupts::without_interrupts(|| {
let mut writer = WRITER.lock();
writeln!(writer, "\n{}", s).expect("writeln failed");
for (i, c) in s.chars().enumerate() {
let screen_char = writer.buffer.chars[BUFFER_HEIGHT - 2][i].read();
assert_eq!(char::from(screen_char.ascii_character), c);
}
});
println!("{}", s);
for (i, c) in s.chars().enumerate() {
let screen_char = WRITER.lock().buffer.chars[BUFFER_HEIGHT - 2][i].read();
assert_eq!(char::from(screen_char.ascii_character), c);
}
}

View File

@@ -1,70 +0,0 @@
#![no_std]
#![no_main]
#![feature(custom_test_frameworks)]
#![test_runner(blog_os::test_runner)]
#![reexport_test_harness_main = "test_main"]
extern crate alloc;
use alloc::{boxed::Box, vec::Vec};
use blog_os::allocator::HEAP_SIZE;
use bootloader::{BootInfo, entry_point};
use core::panic::PanicInfo;
entry_point!(main);
fn main(boot_info: &'static BootInfo) -> ! {
use blog_os::allocator;
use blog_os::memory::{self, BootInfoFrameAllocator};
use x86_64::VirtAddr;
blog_os::init();
let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
let mut mapper = unsafe { memory::init(phys_mem_offset) };
let mut frame_allocator = unsafe { BootInfoFrameAllocator::init(&boot_info.memory_map) };
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");
test_main();
loop {}
}
#[test_case]
fn simple_allocation() {
let heap_value_1 = Box::new(41);
let heap_value_2 = Box::new(13);
assert_eq!(*heap_value_1, 41);
assert_eq!(*heap_value_2, 13);
}
#[test_case]
fn large_vec() {
let n = 1000;
let mut vec = Vec::new();
for i in 0..n {
vec.push(i);
}
assert_eq!(vec.iter().sum::<u64>(), (n - 1) * n / 2);
}
#[test_case]
fn many_boxes() {
for i in 0..HEAP_SIZE {
let x = Box::new(i);
assert_eq!(*x, i);
}
}
#[test_case]
fn many_boxes_long_lived() {
let long_lived = Box::new(1); // new
for i in 0..HEAP_SIZE {
let x = Box::new(i);
assert_eq!(*x, i);
}
assert_eq!(*long_lived, 1); // new
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
blog_os::test_panic_handler(info)
}

View File

@@ -1,58 +0,0 @@
#![no_std]
#![no_main]
#![feature(abi_x86_interrupt)]
use blog_os::{QemuExitCode, exit_qemu, serial_print, serial_println};
use core::panic::PanicInfo;
use lazy_static::lazy_static;
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame};
#[unsafe(no_mangle)]
pub extern "C" fn _start() -> ! {
serial_print!("stack_overflow::stack_overflow...\t");
blog_os::gdt::init();
init_test_idt();
// trigger a stack overflow
stack_overflow();
panic!("Execution continued after stack overflow");
}
#[allow(unconditional_recursion)]
fn stack_overflow() {
stack_overflow(); // for each recursion, the return address is pushed
volatile::Volatile::new(0).read(); // prevent tail recursion optimizations
}
lazy_static! {
static ref TEST_IDT: InterruptDescriptorTable = {
let mut idt = InterruptDescriptorTable::new();
unsafe {
idt.double_fault
.set_handler_fn(test_double_fault_handler)
.set_stack_index(blog_os::gdt::DOUBLE_FAULT_IST_INDEX);
}
idt
};
}
pub fn init_test_idt() {
TEST_IDT.load();
}
extern "x86-interrupt" fn test_double_fault_handler(
_stack_frame: InterruptStackFrame,
_error_code: u64,
) -> ! {
serial_println!("[ok]");
exit_qemu(QemuExitCode::Success);
loop {}
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
blog_os::test_panic_handler(info)
}

View File

@@ -3,7 +3,7 @@
"data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128",
"arch": "x86_64",
"target-endian": "little",
"target-pointer-width": "64",
"target-pointer-width": 64,
"target-c-int-width": 32,
"os": "none",
"executables": true,