Switch to x86_64 crate and use its idt module

This commit is contained in:
Philipp Oppermann
2017-03-15 14:13:35 +01:00
parent 7ef97adb9c
commit 02eaeb7af1
7 changed files with 53 additions and 261 deletions

View File

@@ -11,6 +11,7 @@ once = "0.3.2"
rlibc = "0.1.4" rlibc = "0.1.4"
spin = "0.4.5" spin = "0.4.5"
volatile = "0.1.0" volatile = "0.1.0"
x86_64 = "0.1.0"
[dependencies.hole_list_allocator] [dependencies.hole_list_allocator]
path = "libs/hole_list_allocator" path = "libs/hole_list_allocator"
@@ -19,9 +20,6 @@ path = "libs/hole_list_allocator"
features = ["spin_no_std"] features = ["spin_no_std"]
version = "0.2.1" version = "0.2.1"
[dependencies.x86]
default-features = false
version = "0.8.0"
[lib] [lib]
crate-type = ["staticlib"] crate-type = ["staticlib"]

View File

@@ -1,6 +1,6 @@
use x86::bits64::task::TaskStateSegment; use x86_64::structures::tss::TaskStateSegment;
use x86::shared::segmentation::SegmentSelector; use x86_64::structures::gdt::SegmentSelector;
use x86::shared::PrivilegeLevel; use x86_64::PrivilegeLevel;
pub struct Gdt { pub struct Gdt {
table: [u64; 8], table: [u64; 8],
@@ -39,12 +39,11 @@ impl Gdt {
} }
pub fn load(&'static self) { pub fn load(&'static self) {
use x86::shared::dtables::{DescriptorTablePointer, lgdt}; use x86_64::instructions::tables::{DescriptorTablePointer, lgdt};
use x86::shared::segmentation;
use core::mem::size_of; use core::mem::size_of;
let ptr = DescriptorTablePointer { let ptr = DescriptorTablePointer {
base: self.table.as_ptr() as *const segmentation::SegmentDescriptor, base: self.table.as_ptr() as u64,
limit: (self.table.len() * size_of::<u64>() - 1) as u16, limit: (self.table.len() * size_of::<u64>() - 1) as u16,
}; };

View File

@@ -1,116 +0,0 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use x86::shared::segmentation::{self, SegmentSelector};
use x86::shared::PrivilegeLevel;
pub struct Idt([Entry; 16]);
impl Idt {
pub fn new() -> Idt {
Idt([Entry::missing(); 16])
}
pub fn set_handler(&mut self, entry: u8, handler: HandlerFunc) -> &mut EntryOptions {
self.0[entry as usize] = Entry::new(segmentation::cs(), handler);
&mut self.0[entry as usize].options
}
pub fn load(&'static self) {
use x86::shared::dtables::{DescriptorTablePointer, lidt};
use core::mem::size_of;
let ptr = DescriptorTablePointer {
base: self as *const _ as *const ::x86::bits64::irq::IdtEntry,
limit: (size_of::<Self>() - 1) as u16,
};
unsafe { lidt(&ptr) };
}
}
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
pub struct Entry {
pointer_low: u16,
gdt_selector: SegmentSelector,
options: EntryOptions,
pointer_middle: u16,
pointer_high: u32,
reserved: u32,
}
pub type HandlerFunc = extern "C" fn() -> !;
impl Entry {
fn new(gdt_selector: SegmentSelector, handler: HandlerFunc) -> Self {
let pointer = handler as u64;
Entry {
gdt_selector: gdt_selector,
pointer_low: pointer as u16,
pointer_middle: (pointer >> 16) as u16,
pointer_high: (pointer >> 32) as u32,
options: EntryOptions::new(),
reserved: 0,
}
}
fn missing() -> Self {
Entry {
gdt_selector: SegmentSelector::new(0, PrivilegeLevel::Ring0),
pointer_low: 0,
pointer_middle: 0,
pointer_high: 0,
options: EntryOptions::minimal(),
reserved: 0,
}
}
}
use bit_field::BitField;
#[derive(Debug, Clone, Copy)]
pub struct EntryOptions(u16);
impl EntryOptions {
fn minimal() -> Self {
let mut options = 0;
options.set_bits(9..12, 0b111); // 'must-be-one' bits
EntryOptions(options)
}
fn new() -> Self {
let mut options = Self::minimal();
options.set_present(true).disable_interrupts(true);
options
}
pub fn set_present(&mut self, present: bool) -> &mut Self {
self.0.set_bit(15, present);
self
}
pub fn disable_interrupts(&mut self, disable: bool) -> &mut Self {
self.0.set_bit(8, !disable);
self
}
#[allow(dead_code)]
pub fn set_privilege_level(&mut self, dpl: u16) -> &mut Self {
self.0.set_bits(13..15, dpl);
self
}
pub fn set_stack_index(&mut self, index: u16) -> &mut Self {
// The hardware IST index starts at 1, but our software IST index
// starts at 0. Therefore we need to add 1 here.
self.0.set_bits(0..3, index + 1);
self
}
}

View File

@@ -8,101 +8,27 @@
// except according to those terms. // except according to those terms.
use memory::MemoryController; use memory::MemoryController;
use x86::bits64::task::TaskStateSegment; use x86_64::structures::tss::TaskStateSegment;
use x86_64::structures::idt::{Idt, ExceptionStackFrame, PageFaultErrorCode};
use spin::Once; use spin::Once;
mod idt;
mod gdt; mod gdt;
macro_rules! save_scratch_registers {
() => {
asm!("push rax
push rcx
push rdx
push rsi
push rdi
push r8
push r9
push r10
push r11
" :::: "intel", "volatile");
}
}
macro_rules! restore_scratch_registers {
() => {
asm!("pop r11
pop r10
pop r9
pop r8
pop rdi
pop rsi
pop rdx
pop rcx
pop rax
" :::: "intel", "volatile");
}
}
macro_rules! handler {
($name: ident) => {{
#[naked]
extern "C" fn wrapper() -> ! {
unsafe {
save_scratch_registers!();
asm!("mov rdi, rsp
add rdi, 9*8 // calculate exception stack frame pointer
call $0"
:: "i"($name as extern "C" fn(
&ExceptionStackFrame))
: "rdi" : "intel");
restore_scratch_registers!();
asm!("iretq" :::: "intel", "volatile");
::core::intrinsics::unreachable();
}
}
wrapper
}}
}
macro_rules! handler_with_error_code {
($name: ident) => {{
#[naked]
extern "C" fn wrapper() -> ! {
unsafe {
save_scratch_registers!();
asm!("mov rsi, [rsp + 9*8] // load error code into rsi
mov rdi, rsp
add rdi, 10*8 // calculate exception stack frame pointer
sub rsp, 8 // align the stack pointer
call $0
add rsp, 8 // undo stack pointer alignment
" :: "i"($name as extern "C" fn(
&ExceptionStackFrame, u64))
: "rdi","rsi" : "intel");
restore_scratch_registers!();
asm!("add rsp, 8 // pop error code
iretq" :::: "intel", "volatile");
::core::intrinsics::unreachable();
}
}
wrapper
}}
}
const DOUBLE_FAULT_IST_INDEX: usize = 0; const DOUBLE_FAULT_IST_INDEX: usize = 0;
lazy_static! { lazy_static! {
static ref IDT: idt::Idt = { static ref IDT: Idt = {
let mut idt = idt::Idt::new(); let mut idt = Idt::new();
idt.set_handler(0, handler!(divide_by_zero_handler)); idt.divide_by_zero.set_handler_fn(divide_by_zero_handler);
idt.set_handler(3, handler!(breakpoint_handler)); idt.breakpoint.set_handler_fn(breakpoint_handler);
idt.set_handler(6, handler!(invalid_opcode_handler)); idt.invalid_opcode.set_handler_fn(invalid_opcode_handler);
idt.set_handler(8, handler_with_error_code!(double_fault_handler)) idt.page_fault.set_handler_fn(page_fault_handler);
.set_stack_index(DOUBLE_FAULT_IST_INDEX as u16);
idt.set_handler(14, handler_with_error_code!(page_fault_handler)); unsafe {
idt.double_fault.set_handler_fn(double_fault_handler)
.set_stack_index(DOUBLE_FAULT_IST_INDEX as u16);
}
idt idt
}; };
@@ -112,20 +38,22 @@ static TSS: Once<TaskStateSegment> = Once::new();
static GDT: Once<gdt::Gdt> = Once::new(); static GDT: Once<gdt::Gdt> = Once::new();
pub fn init(memory_controller: &mut MemoryController) { pub fn init(memory_controller: &mut MemoryController) {
use x86::shared::segmentation::{SegmentSelector, set_cs}; use x86_64::structures::gdt::SegmentSelector;
use x86::shared::task::load_tr; use x86_64::instructions::segmentation::set_cs;
use x86_64::instructions::tables::load_tss;
use x86_64::VirtualAddress;
let double_fault_stack = let double_fault_stack =
memory_controller.alloc_stack(1).expect("could not allocate double fault stack"); memory_controller.alloc_stack(1).expect("could not allocate double fault stack");
let tss = TSS.call_once(|| { let tss = TSS.call_once(|| {
let mut tss = TaskStateSegment::new(); let mut tss = TaskStateSegment::new();
tss.ist[DOUBLE_FAULT_IST_INDEX] = double_fault_stack.top() as u64; tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX] = VirtualAddress(double_fault_stack.top());
tss tss
}); });
let mut code_selector = SegmentSelector::empty(); let mut code_selector = SegmentSelector(0);
let mut tss_selector = SegmentSelector::empty(); let mut tss_selector = SegmentSelector(0);
let gdt = GDT.call_once(|| { let gdt = GDT.call_once(|| {
let mut gdt = gdt::Gdt::new(); let mut gdt = gdt::Gdt::new();
code_selector = gdt.add_entry(gdt::Descriptor::kernel_code_segment()); code_selector = gdt.add_entry(gdt::Descriptor::kernel_code_segment());
@@ -138,61 +66,41 @@ pub fn init(memory_controller: &mut MemoryController) {
// reload code segment register // reload code segment register
set_cs(code_selector); set_cs(code_selector);
// load TSS // load TSS
load_tr(tss_selector); load_tss(tss_selector);
} }
IDT.load(); IDT.load();
} }
#[derive(Debug)] extern "x86-interrupt" fn divide_by_zero_handler(stack_frame: &mut ExceptionStackFrame) {
#[repr(C)]
struct ExceptionStackFrame {
instruction_pointer: u64,
code_segment: u64,
cpu_flags: u64,
stack_pointer: u64,
stack_segment: u64,
}
extern "C" fn divide_by_zero_handler(stack_frame: &ExceptionStackFrame) {
println!("\nEXCEPTION: DIVIDE BY ZERO\n{:#?}", stack_frame); println!("\nEXCEPTION: DIVIDE BY ZERO\n{:#?}", stack_frame);
loop {} loop {}
} }
extern "C" fn breakpoint_handler(stack_frame: &ExceptionStackFrame) { extern "x86-interrupt" fn breakpoint_handler(stack_frame: &mut ExceptionStackFrame) {
println!("\nEXCEPTION: BREAKPOINT at {:#x}\n{:#?}", println!("\nEXCEPTION: BREAKPOINT at {:#x}\n{:#?}",
stack_frame.instruction_pointer, stack_frame.instruction_pointer,
stack_frame); stack_frame);
} }
extern "C" fn invalid_opcode_handler(stack_frame: &ExceptionStackFrame) { extern "x86-interrupt" fn invalid_opcode_handler(stack_frame: &mut ExceptionStackFrame) {
println!("\nEXCEPTION: INVALID OPCODE at {:#x}\n{:#?}", println!("\nEXCEPTION: INVALID OPCODE at {:#x}\n{:#?}",
stack_frame.instruction_pointer, stack_frame.instruction_pointer,
stack_frame); stack_frame);
loop {} loop {}
} }
bitflags! { extern "x86-interrupt" fn page_fault_handler(stack_frame: &mut ExceptionStackFrame, error_code: PageFaultErrorCode) {
flags PageFaultErrorCode: u64 { use x86_64::registers::control_regs;
const PROTECTION_VIOLATION = 1 << 0,
const CAUSED_BY_WRITE = 1 << 1,
const USER_MODE = 1 << 2,
const MALFORMED_TABLE = 1 << 3,
const INSTRUCTION_FETCH = 1 << 4,
}
}
extern "C" fn page_fault_handler(stack_frame: &ExceptionStackFrame, error_code: u64) {
use x86::shared::control_regs;
println!("\nEXCEPTION: PAGE FAULT while accessing {:#x}\nerror code: \ println!("\nEXCEPTION: PAGE FAULT while accessing {:#x}\nerror code: \
{:?}\n{:#?}", {:?}\n{:#?}",
unsafe { control_regs::cr2() }, control_regs::cr2(),
PageFaultErrorCode::from_bits(error_code).unwrap(), error_code,
stack_frame); stack_frame);
loop {} loop {}
} }
extern "C" fn double_fault_handler(stack_frame: &ExceptionStackFrame, _error_code: u64) { extern "x86-interrupt" fn double_fault_handler(stack_frame: &mut ExceptionStackFrame, _error_code: u64) {
println!("\nEXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame); println!("\nEXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame);
loop {} loop {}
} }

View File

@@ -12,7 +12,7 @@
#![feature(alloc, collections)] #![feature(alloc, collections)]
#![feature(asm)] #![feature(asm)]
#![feature(naked_functions)] #![feature(naked_functions)]
#![feature(core_intrinsics)] #![feature(abi_x86_interrupt)]
#![no_std] #![no_std]
extern crate rlibc; extern crate rlibc;
@@ -21,8 +21,7 @@ extern crate spin;
extern crate multiboot2; extern crate multiboot2;
#[macro_use] #[macro_use]
extern crate bitflags; extern crate bitflags;
#[macro_use] extern crate x86_64;
extern crate x86;
#[macro_use] #[macro_use]
extern crate once; extern crate once;
extern crate bit_field; extern crate bit_field;
@@ -68,7 +67,7 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
} }
fn enable_nxe_bit() { fn enable_nxe_bit() {
use x86::shared::msr::{IA32_EFER, rdmsr, wrmsr}; use x86_64::registers::msr::{IA32_EFER, rdmsr, wrmsr};
let nxe_bit = 1 << 11; let nxe_bit = 1 << 11;
unsafe { unsafe {
@@ -78,9 +77,9 @@ fn enable_nxe_bit() {
} }
fn enable_write_protect_bit() { fn enable_write_protect_bit() {
use x86::shared::control_regs::{cr0, cr0_write, CR0_WRITE_PROTECT}; use x86_64::registers::control_regs::{cr0, cr0_write, Cr0};
unsafe { cr0_write(cr0() | CR0_WRITE_PROTECT) }; unsafe { cr0_write(cr0() | Cr0::WRITE_PROTECT) };
} }
#[cfg(not(test))] #[cfg(not(test))]

View File

@@ -105,6 +105,9 @@ impl Mapper {
pub fn unmap<A>(&mut self, page: Page, allocator: &mut A) pub fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator where A: FrameAllocator
{ {
use x86_64::VirtualAddress;
use x86_64::instructions::tlb;
assert!(self.translate(page.start_address()).is_some()); assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut() let p1 = self.p4_mut()
@@ -114,7 +117,7 @@ impl Mapper {
.expect("mapping code does not support huge pages"); .expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap(); let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused(); p1[page.p1_index()].set_unused();
unsafe { ::x86::shared::tlb::flush(page.start_address()) }; tlb::flush(VirtualAddress(page.start_address()));
// TODO free p(1,2,3) table if empty // TODO free p(1,2,3) table if empty
// allocator.deallocate_frame(frame); // allocator.deallocate_frame(frame);
} }

View File

@@ -119,38 +119,39 @@ impl ActivePageTable {
f: F) f: F)
where F: FnOnce(&mut Mapper) where F: FnOnce(&mut Mapper)
{ {
use x86::shared::{control_regs, tlb}; use x86_64::registers::control_regs;
let flush_tlb = || unsafe { tlb::flush_all() }; use x86_64::instructions::tlb;
{ {
let backup = Frame::containing_address(unsafe { control_regs::cr3() } as usize); let backup = Frame::containing_address(control_regs::cr3().0 as usize);
// map temporary_page to current p4 table // map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self); let p4_table = temporary_page.map_table_frame(backup.clone(), self);
// overwrite recursive mapping // overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE); self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
flush_tlb(); tlb::flush_all();
// execute f in the new context // execute f in the new context
f(self); f(self);
// restore recursive mapping to original p4 table // restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE); p4_table[511].set(backup, PRESENT | WRITABLE);
flush_tlb(); tlb::flush_all();
} }
temporary_page.unmap(self); temporary_page.unmap(self);
} }
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable { pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::shared::control_regs; use x86_64::PhysicalAddress;
use x86_64::registers::control_regs;
let old_table = InactivePageTable { let old_table = InactivePageTable {
p4_frame: Frame::containing_address(unsafe { control_regs::cr3() } as usize), p4_frame: Frame::containing_address(control_regs::cr3().0 as usize),
}; };
unsafe { unsafe {
control_regs::cr3_write(new_table.p4_frame.start_address()); control_regs::cr3_write(PhysicalAddress(new_table.p4_frame.start_address() as u64));
} }
old_table old_table
} }