mirror of
https://github.com/phil-opp/blog_os.git
synced 2025-12-16 14:27:49 +00:00
Create TSS and GDT modules and use a double fault stack
This commit is contained in:
120
src/interrupts/gdt.rs
Normal file
120
src/interrupts/gdt.rs
Normal file
@@ -0,0 +1,120 @@
|
||||
use bit_field::BitField;
|
||||
use collections::vec::Vec;
|
||||
use interrupts::tss::TaskStateSegment;
|
||||
|
||||
pub struct Gdt(Vec<u64>);
|
||||
|
||||
impl Gdt {
|
||||
pub fn new() -> Gdt {
|
||||
let zero_entry = 0;
|
||||
Gdt(vec![zero_entry])
|
||||
}
|
||||
|
||||
pub fn add_entry(&mut self, entry: Entry) -> Selector {
|
||||
use core::mem::size_of;
|
||||
let index = self.0.len() * size_of::<u64>();
|
||||
|
||||
match entry {
|
||||
Entry::UserSegment(entry) => self.0.push(entry),
|
||||
Entry::SystemSegment(entry_low, entry_high) => {
|
||||
self.0.push(entry_low);
|
||||
self.0.push(entry_high);
|
||||
}
|
||||
}
|
||||
|
||||
Selector(index as u16)
|
||||
}
|
||||
|
||||
pub fn load(&'static self) {
|
||||
use x86::dtables::{DescriptorTablePointer, lgdt};
|
||||
use core::mem::size_of;
|
||||
|
||||
let ptr = DescriptorTablePointer {
|
||||
base: self.0.as_ptr() as u64,
|
||||
limit: (self.0.len() * size_of::<u64>() - 1) as u16,
|
||||
};
|
||||
|
||||
unsafe { lgdt(&ptr) };
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Entry {
|
||||
UserSegment(u64),
|
||||
SystemSegment(u64, u64),
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
pub fn code_segment() -> Entry {
|
||||
let flags = DESCRIPTOR_TYPE | PRESENT | READ_WRITE | EXECUTABLE | LONG_MODE;
|
||||
Entry::UserSegment(flags.bits())
|
||||
}
|
||||
|
||||
pub fn data_segment() -> Entry {
|
||||
let flags = DESCRIPTOR_TYPE | PRESENT | READ_WRITE;
|
||||
Entry::UserSegment(flags.bits())
|
||||
}
|
||||
|
||||
pub fn tss_segment(tss: &'static TaskStateSegment) -> Entry {
|
||||
use core::mem::size_of;
|
||||
|
||||
let ptr = tss as *const _ as u64;
|
||||
|
||||
let mut low = PRESENT.bits();
|
||||
low.set_range(0..16, (size_of::<TaskStateSegment>() - 1) as u64);
|
||||
low.set_range(16..40, ptr.get_range(0..24));
|
||||
low.set_range(40..44, 0b1001); // type: available 64-bit tss
|
||||
|
||||
let mut high = 0;
|
||||
high.set_range(0..32, ptr.get_range(32..64));
|
||||
|
||||
Entry::SystemSegment(low, high)
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
flags EntryFlags: u64 {
|
||||
const READ_WRITE = 1 << 41,
|
||||
const CONFORMING = 1 << 42,
|
||||
const EXECUTABLE = 1 << 43,
|
||||
const DESCRIPTOR_TYPE = 1 << 44,
|
||||
const PRESENT = 1 << 47,
|
||||
const LONG_MODE = 1 << 53,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Selector(u16);
|
||||
|
||||
impl Selector {
|
||||
pub fn new() -> Selector {
|
||||
Selector(0)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reload_segment_registers(code_selector: Selector, data_selector: Selector) {
|
||||
|
||||
let current_code_selector: u16;
|
||||
let current_data_selector: u16;
|
||||
|
||||
unsafe {
|
||||
asm!("mov $0, cs" : "=r" (current_code_selector) ::: "intel");
|
||||
asm!("mov $0, ds" : "=r" (current_data_selector) ::: "intel");
|
||||
}
|
||||
assert_eq!(code_selector.0, current_code_selector);
|
||||
assert_eq!(data_selector.0, current_data_selector);
|
||||
|
||||
// jmp ax:.new_code_segment // TODO
|
||||
// .new_code_segment:
|
||||
// unsafe { asm!("
|
||||
// mov ax, $1
|
||||
// mov ss, ax
|
||||
// mov ds, ax
|
||||
// mov es, ax
|
||||
// ":: "r" (code_selector.0), "r" (data_selector.0) :: "intel")};
|
||||
//
|
||||
}
|
||||
|
||||
/// Load the task state register.
|
||||
pub unsafe fn load_ltr(selector: Selector) {
|
||||
asm!("ltr $0" :: "r" (selector));
|
||||
}
|
||||
@@ -102,14 +102,14 @@ impl EntryOptions {
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn set_privilege_level(&mut self, dpl: u16) -> &mut Self {
|
||||
self.0.set_range(13..15, dpl);
|
||||
pub fn set_privilege_level(&mut self, dpl: u8) -> &mut Self {
|
||||
self.0.set_range(13..15, dpl.into());
|
||||
self
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn set_stack_index(&mut self, index: u16) -> &mut Self {
|
||||
self.0.set_range(0..3, index);
|
||||
pub fn set_stack_index(&mut self, index: u8) -> &mut Self {
|
||||
self.0.set_range(0..3, index.into());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,12 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use spin::Once;
|
||||
use memory::StackPointer;
|
||||
|
||||
mod idt;
|
||||
mod tss;
|
||||
mod gdt;
|
||||
|
||||
macro_rules! save_scratch_registers {
|
||||
() => {
|
||||
@@ -86,22 +91,53 @@ macro_rules! handler_with_error_code {
|
||||
}}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref IDT: idt::Idt = {
|
||||
static IDT: Once<idt::Idt> = Once::new();
|
||||
static TSS: Once<tss::TaskStateSegment> = Once::new();
|
||||
static GDT: Once<gdt::Gdt> = Once::new();
|
||||
|
||||
pub fn init(double_fault_stack: StackPointer) {
|
||||
let mut double_fault_ist_index = 0;
|
||||
|
||||
let tss = TSS.call_once(|| {
|
||||
let mut tss = tss::TaskStateSegment::new();
|
||||
|
||||
double_fault_ist_index = tss.interrupt_stacks
|
||||
.insert_stack(double_fault_stack)
|
||||
.expect("IST flush_all");
|
||||
|
||||
tss
|
||||
});
|
||||
|
||||
let mut code_selector = gdt::Selector::new();
|
||||
let mut data_selector = gdt::Selector::new();
|
||||
let mut tss_selector = gdt::Selector::new();
|
||||
let gdt = GDT.call_once(|| {
|
||||
let mut gdt = gdt::Gdt::new();
|
||||
|
||||
code_selector = gdt.add_entry(gdt::Entry::code_segment());
|
||||
data_selector = gdt.add_entry(gdt::Entry::data_segment());
|
||||
tss_selector = gdt.add_entry(gdt::Entry::tss_segment(tss));
|
||||
|
||||
gdt
|
||||
});
|
||||
gdt.load();
|
||||
gdt::reload_segment_registers(code_selector, data_selector);
|
||||
unsafe { gdt::load_ltr(tss_selector) };
|
||||
|
||||
let idt = IDT.call_once(|| {
|
||||
let mut idt = idt::Idt::new();
|
||||
|
||||
idt.set_handler(0, handler!(divide_by_zero_handler));
|
||||
idt.set_handler(3, handler!(breakpoint_handler));
|
||||
idt.set_handler(6, handler!(invalid_opcode_handler));
|
||||
idt.set_handler(8, handler_with_error_code!(double_fault_handler));
|
||||
idt.set_handler(8, handler_with_error_code!(double_fault_handler))
|
||||
.set_stack_index(double_fault_ist_index);
|
||||
idt.set_handler(14, handler_with_error_code!(page_fault_handler));
|
||||
|
||||
idt
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
pub fn init() {
|
||||
IDT.load();
|
||||
idt.load();
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
50
src/interrupts/tss.rs
Normal file
50
src/interrupts/tss.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
use memory::StackPointer;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[repr(C, packed)]
|
||||
pub struct TaskStateSegment {
|
||||
reserved_0: u32,
|
||||
pub privilege_stacks: PrivilegeStackTable,
|
||||
reserved_1: u64,
|
||||
pub interrupt_stacks: InterruptStackTable,
|
||||
reserved_2: u64,
|
||||
reserved_3: u16,
|
||||
iomap_base: u16,
|
||||
}
|
||||
|
||||
impl TaskStateSegment {
|
||||
pub fn new() -> TaskStateSegment {
|
||||
TaskStateSegment {
|
||||
privilege_stacks: PrivilegeStackTable([None, None, None]),
|
||||
interrupt_stacks: InterruptStackTable::new(),
|
||||
iomap_base: 0,
|
||||
reserved_0: 0,
|
||||
reserved_1: 0,
|
||||
reserved_2: 0,
|
||||
reserved_3: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PrivilegeStackTable([Option<StackPointer>; 3]);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct InterruptStackTable([Option<StackPointer>; 7]);
|
||||
|
||||
impl InterruptStackTable {
|
||||
pub fn new() -> InterruptStackTable {
|
||||
InterruptStackTable([None, None, None, None, None, None, None])
|
||||
}
|
||||
|
||||
pub fn insert_stack(&mut self, stack_pointer: StackPointer) -> Result<u8, StackPointer> {
|
||||
// TSS index starts at 1
|
||||
for (entry, i) in self.0.iter_mut().zip(1..) {
|
||||
if entry.is_none() {
|
||||
*entry = Some(stack_pointer);
|
||||
return Ok(i);
|
||||
}
|
||||
}
|
||||
Err(stack_pointer)
|
||||
}
|
||||
}
|
||||
13
src/lib.rs
13
src/lib.rs
@@ -13,6 +13,8 @@
|
||||
#![feature(asm)]
|
||||
#![feature(naked_functions)]
|
||||
#![feature(core_intrinsics)]
|
||||
#![feature(nonzero)]
|
||||
#![feature(drop_types_in_const)]
|
||||
#![no_std]
|
||||
|
||||
extern crate rlibc;
|
||||
@@ -51,10 +53,15 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
|
||||
enable_write_protect_bit();
|
||||
|
||||
// set up guard page and map the heap pages
|
||||
memory::init(boot_info);
|
||||
let mut memory_controller = memory::init(boot_info);
|
||||
|
||||
|
||||
// initialize our IDT
|
||||
interrupts::init();
|
||||
let double_fault_stack = memory_controller.alloc_stack(1)
|
||||
.expect("could not allocate double fault stack");
|
||||
interrupts::init(double_fault_stack);
|
||||
|
||||
unsafe { int!(3) };
|
||||
|
||||
stack_overflow();
|
||||
// trigger a debug exception
|
||||
@@ -69,7 +76,7 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
|
||||
}
|
||||
|
||||
fn stack_overflow() {
|
||||
let _large_array = [1; 100000];
|
||||
stack_overflow();
|
||||
}
|
||||
|
||||
int_overflow();
|
||||
|
||||
@@ -9,15 +9,17 @@
|
||||
|
||||
pub use self::area_frame_allocator::AreaFrameAllocator;
|
||||
pub use self::paging::remap_the_kernel;
|
||||
pub use self::stack_allocator::{StackAllocator, StackPointer};
|
||||
use self::paging::PhysicalAddress;
|
||||
use multiboot2::BootInformation;
|
||||
|
||||
mod area_frame_allocator;
|
||||
mod paging;
|
||||
mod stack_allocator;
|
||||
|
||||
pub const PAGE_SIZE: usize = 4096;
|
||||
|
||||
pub fn init(boot_info: &BootInformation) {
|
||||
pub fn init(boot_info: &BootInformation) -> MemoryController {
|
||||
assert_has_not_been_called!("memory::init must be called only once");
|
||||
|
||||
let memory_map_tag = boot_info.memory_map_tag().expect("Memory map tag required");
|
||||
@@ -58,6 +60,35 @@ pub fn init(boot_info: &BootInformation) {
|
||||
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
|
||||
active_table.map(page, paging::WRITABLE, &mut frame_allocator);
|
||||
}
|
||||
|
||||
let stack_allocator = {
|
||||
let stack_alloc_start_page = heap_end_page + 1;
|
||||
let stack_alloc_end_page = stack_alloc_start_page + 100;
|
||||
let stack_alloc_page_range = Page::range_inclusive(stack_alloc_start_page,
|
||||
stack_alloc_end_page);
|
||||
stack_allocator::new_stack_allocator(stack_alloc_page_range)
|
||||
};
|
||||
|
||||
MemoryController {
|
||||
active_table: active_table,
|
||||
frame_allocator: frame_allocator,
|
||||
stack_allocator: stack_allocator,
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MemoryController {
|
||||
active_table: paging::ActivePageTable,
|
||||
frame_allocator: AreaFrameAllocator,
|
||||
stack_allocator: StackAllocator,
|
||||
}
|
||||
|
||||
impl MemoryController {
|
||||
pub fn alloc_stack(&mut self, size_in_pages: usize) -> Result<StackPointer, ()> {
|
||||
let &mut MemoryController { ref mut active_table,
|
||||
ref mut frame_allocator,
|
||||
ref mut stack_allocator } = self;
|
||||
stack_allocator.alloc_stack(active_table, frame_allocator, size_in_pages)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
|
||||
@@ -11,7 +11,7 @@ pub use self::entry::*;
|
||||
use memory::{PAGE_SIZE, Frame, FrameAllocator};
|
||||
use self::temporary_page::TemporaryPage;
|
||||
pub use self::mapper::Mapper;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use core::ops::{Deref, DerefMut, Add};
|
||||
use multiboot2::BootInformation;
|
||||
|
||||
mod entry;
|
||||
@@ -37,7 +37,7 @@ impl Page {
|
||||
Page { number: address / PAGE_SIZE }
|
||||
}
|
||||
|
||||
fn start_address(&self) -> usize {
|
||||
pub fn start_address(&self) -> usize {
|
||||
self.number * PAGE_SIZE
|
||||
}
|
||||
|
||||
@@ -62,6 +62,14 @@ impl Page {
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<usize> for Page {
|
||||
type Output = Page;
|
||||
|
||||
fn add(self, rhs: usize) -> Page {
|
||||
Page { number: self.number + rhs }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PageIter {
|
||||
start: Page,
|
||||
end: Page,
|
||||
|
||||
51
src/memory/stack_allocator.rs
Normal file
51
src/memory/stack_allocator.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use memory::paging::{self, Page, PageIter, ActivePageTable};
|
||||
use memory::{PAGE_SIZE, FrameAllocator};
|
||||
use core::nonzero::NonZero;
|
||||
|
||||
pub fn new_stack_allocator(page_range: PageIter) -> StackAllocator {
|
||||
StackAllocator { range: page_range }
|
||||
}
|
||||
|
||||
pub struct StackAllocator {
|
||||
range: PageIter,
|
||||
}
|
||||
|
||||
impl StackAllocator {
|
||||
pub fn alloc_stack<FA: FrameAllocator>(&mut self,
|
||||
active_table: &mut ActivePageTable,
|
||||
frame_allocator: &mut FA,
|
||||
size_in_pages: usize)
|
||||
-> Result<StackPointer, ()> {
|
||||
if size_in_pages == 0 {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let _guard_page = self.range.next().ok_or(())?;
|
||||
|
||||
let stack_start = self.range.next().ok_or(())?;
|
||||
let stack_end = if size_in_pages == 1 {
|
||||
stack_start
|
||||
} else {
|
||||
self.range.nth(size_in_pages - 1).ok_or(())?
|
||||
};
|
||||
|
||||
for page in Page::range_inclusive(stack_start, stack_end) {
|
||||
active_table.map(page, paging::WRITABLE, frame_allocator);
|
||||
}
|
||||
|
||||
let top_of_stack = stack_end.start_address() + PAGE_SIZE;
|
||||
StackPointer::new(top_of_stack).ok_or(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StackPointer(NonZero<usize>);
|
||||
|
||||
impl StackPointer {
|
||||
fn new(ptr: usize) -> Option<StackPointer> {
|
||||
match ptr {
|
||||
0 => None,
|
||||
ptr => Some(StackPointer(unsafe { NonZero::new(ptr) })),
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user