Merge pull request #289 from phil-opp/x86_64-crate

Use the `x86_64` crate instead of `x86`
This commit is contained in:
Philipp Oppermann
2017-03-19 13:25:17 +01:00
committed by GitHub
14 changed files with 99 additions and 312 deletions

View File

@@ -28,4 +28,3 @@ addons:
script:
- make
- cargo fmt -- --write-mode=diff

View File

@@ -11,6 +11,7 @@ once = "0.3.2"
rlibc = "0.1.4"
spin = "0.4.5"
volatile = "0.1.0"
x86_64 = "0.1.0"
[dependencies.hole_list_allocator]
path = "libs/hole_list_allocator"
@@ -19,9 +20,6 @@ path = "libs/hole_list_allocator"
features = ["spin_no_std"]
version = "0.2.1"
[dependencies.x86]
default-features = false
version = "0.8.0"
[lib]
crate-type = ["staticlib"]

View File

@@ -898,18 +898,15 @@ An x86 processor has many different caches because always accessing the main mem
The translation lookaside buffer, or TLB, caches the translation of virtual to physical addresses. It's filled automatically when a page is accessed. But it's not updated transparently when the mapping of a page changes. This is the reason that we still can access the page even through we unmapped it in the page table.
So to fix our `unmap` function, we need to remove the cached translation from the TLB. We can use Gerd Zellweger's [x86][x86 crate] crate to do this easily. To add it, we append the following to our `Cargo.toml`:
So to fix our `unmap` function, we need to remove the cached translation from the TLB. We can use the [x86_64][x86_64 crate] crate to do this easily. To add it, we append the following to our `Cargo.toml`:
[x86 crate]: https://github.com/gz/rust-x86
[x86_64 crate]: https://docs.rs/x86_64
```toml
[dependencies.x86]
version = "0.8.0"
default-features = false
[dependencies]
...
x86_64 = "0.1.0"
```
It has a `performance-counter` feature that allows reading the CPU specific [performance counters] but increases compile times. We don't need it right now, so we disable it using `default-features = false`.
[performance counters]: http://gz.github.io/rust-x86/x86/perfcnt/index.html
Now we can use it to fix `unmap`:
@@ -917,7 +914,9 @@ It has a `performance-counter` feature that allows reading the CPU specific [per
...
p1[page.p1_index()].set_unused();
unsafe {
::x86::shared::tlb::flush(page.start_address());
use x86_64::instructions::tlb;
use x86_64::VirtualAddress;
tlb::flush(VirtualAddress(page.start_address()));
}
// TODO free p(1,2,3) table if empty
//allocator.deallocate_frame(frame);

View File

@@ -345,12 +345,11 @@ pub fn with<F>(&mut self,
f: F)
where F: FnOnce(&mut ActivePageTable)
{
use x86::shared::tlb;
let flush_tlb = || unsafe { tlb::flush_all() };
use x86_64::instructions::tlb;
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
flush_tlb();
tlb::flush_all();
// execute f in the new context
f(self);
@@ -449,7 +448,7 @@ Right now, the `with` function overwrites the recursive mapping and calls the cl
To backup the physical P4 frame of the active table, we can either read it from the 511th P4 entry (before we change it) or from the CR3 control register directly. We will do the latter as it should be faster and we already have a external crate that makes it easy:
```rust
use x86::shared::control_regs;
use x86_64::shared::control_regs;
let backup = Frame::containing_address(
unsafe { control_regs::cr3() } as usize
);
@@ -482,8 +481,8 @@ pub fn with<F>(&mut self,
f: F)
where F: FnOnce(&mut Mapper)
{
use x86::shared::{control_regs, tlb};
let flush_tlb = || unsafe { tlb::flush_all() };
use x86_64::instructions::tlb;
use x86_64::shared::{control_regs, tlb};
{
let backup = Frame::containing_address(
@@ -494,14 +493,14 @@ pub fn with<F>(&mut self,
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
flush_tlb();
tlb::flush_all();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE);
flush_tlb();
tlb::flush_all();
}
temporary_page.unmap(self);
@@ -755,7 +754,7 @@ We do this in a new `ActivePageTable::switch` method:
// in `impl ActivePageTable` in src/memory/paging/mod.rs
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::shared::control_regs;
use x86_64::shared::control_regs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
@@ -976,7 +975,7 @@ So we need to enable the `NXE` bit. For that we use the awesome [x86][rust-x86]
// in lib.rs
fn enable_nxe_bit() {
use x86::shared::msr::{IA32_EFER, rdmsr, wrmsr};
use x86_64::shared::msr::{IA32_EFER, rdmsr, wrmsr};
let nxe_bit = 1 << 11;
unsafe {
@@ -996,7 +995,7 @@ Right now, we are still able to modify the `.code` and `.rodata` sections, even
// in lib.rs
fn enable_write_protect_bit() {
use x86::shared::control_regs::{cr0, cr0_write, CR0_WRITE_PROTECT};
use x86_64::shared::control_regs::{cr0, cr0_write, CR0_WRITE_PROTECT};
unsafe { cr0_write(cr0() | CR0_WRITE_PROTECT) };
}

View File

@@ -84,8 +84,9 @@ Now we create types for the IDT and its entries:
```rust
// src/interrupts/idt.rs
use x86::shared::segmentation::{self, SegmentSelector};
use x86::shared::PrivilegeLevel;
use x86_64::instructions::segmentation;
use x86_64::structures::gdt::SegmentSelector;
use x86_64::PrivilegeLevel;
pub struct Idt([Entry; 16]);
@@ -279,10 +280,9 @@ impl Idt {
}
}
```
The method overwrites the specified entry with the given handler function. We use the `segmentation::cs`[^fn-segmentation-cs] function of the [x86 crate] to get the current code segment descriptor. There's no need for different kernel code segments in long mode, so the current `cs` value should be always the right choice.
The method overwrites the specified entry with the given handler function. We use the `segmentation::cs` function of the [x86_64 crate] to get the current code segment descriptor. There's no need for different kernel code segments in long mode, so the current `cs` value should be always the right choice.
[x86 crate]: https://github.com/gz/rust-x86
[^fn-segmentation-cs]: The `segmentation::cs` function was [added](https://github.com/gz/rust-x86/pull/12) in version 0.7.0, so you might need to update your `x86` version in your `Cargo.toml`.
[x86_64 crate]: https://docs.rs/x86_64
By returning a mutual reference to the entry's options, we allow the caller to override the default settings. For example, the caller could add a non-present entry by executing: `idt.set_handler(11, handler_fn).set_present(false)`.
@@ -299,19 +299,19 @@ Type | Name | Description
u16 | Limit | The maximum addressable byte in the table. Equal to the table size in bytes minus 1.
u64 | Offset | Virtual start address of the table.
This structure is already contained [in the x86 crate], so we don't need to create it ourselves. The same is true for the [lidt function]. So we just need to put the pieces together to create a `load` method:
This structure is already contained [in the x86_64 crate], so we don't need to create it ourselves. The same is true for the [lidt function]. So we just need to put the pieces together to create a `load` method:
[in the x86 crate]: http://gz.github.io/rust-x86/x86/dtables/struct.DescriptorTablePointer.html
[lidt function]: http://gz.github.io/rust-x86/x86/dtables/fn.lidt.html
[in the x86_64 crate]: http://docs.rs/x86_64/0.1.0/x86_64/instructions/tables/struct.DescriptorTablePointer.html
[lidt function]: http://docs.rs/x86_64/0.1.0/x86_64/instructions/tables/fn.lidt.html
```rust
impl Idt {
pub fn load(&self) {
use x86::shared::dtables::{DescriptorTablePointer, lidt};
use x86_64::instructions::tables::{DescriptorTablePointer, lidt};
use core::mem::size_of;
let ptr = DescriptorTablePointer {
base: self as *const _ as *const ::x86::bits64::irq::IdtEntry,
base: self as *const _ as u64,
limit: (size_of::<Self>() - 1) as u16,
};
@@ -319,9 +319,7 @@ impl Idt {
}
}
```
The method does not need to modify the IDT, so it takes `self` by immutable reference. First, we create a `DescriptorTablePointer` and then we pass it to `lidt`. The `lidt` function expects that the `base` field has the type `x86::bits64::irq::IdtEntry`[^fn-x86-idt-entry], therefore we need to cast the `self` pointer. For calculating the `limit` we use [mem::size_of]. The additional `-1` is needed because the limit field has to be the maximum addressable byte (inclusive bound). We need an unsafe block around `lidt`, because the function assumes that the specified handler addresses are valid.
[^fn-x86-idt-entry]: The `x86` crate has its own `IdtEntry` type, but it is a bit incomplete. Therefore we created our own IDT types.
The method does not need to modify the IDT, so it takes `self` by immutable reference. First, we create a `DescriptorTablePointer` and then we pass it to `lidt`. The `lidt` function expects that the `base` field has the type `u64`, therefore we need to cast the `self` pointer. For calculating the `limit` we use [mem::size_of]. The additional `-1` is needed because the limit field has to be the maximum addressable byte (inclusive bound). We need an unsafe block around `lidt`, because the function assumes that the specified handler addresses are valid.
[mem::size_of]: https://doc.rust-lang.org/nightly/core/mem/fn.size_of.html

View File

@@ -148,7 +148,7 @@ If you want to try it, insert it in `src/lib.rs` and call it from `rust_main`. W
[...]
000000000010df90 <_ZN7blog_os22naked_function_example17ha9f733dfe42b595dE>:
10df90: 48 c7 c0 2a 00 00 00 mov $0x42,%rax
10df97: c3 retq
10df97: c3 retq
10df98: 0f 1f 84 00 00 00 00 nopl 0x0(%rax,%rax,1)
10df9f: 00
```
@@ -629,7 +629,7 @@ Now we can improve our page fault error message by using the new `PageFaultError
extern "C" fn page_fault_handler(stack_frame: &ExceptionStackFrame,
error_code: u64) -> !
{
use x86::shared::control_regs;
use x86_64::registers::control_regs;
println!(
"\nEXCEPTION: PAGE FAULT while accessing {:#x}\
\nerror code: {:?}\n{:#?}",

View File

@@ -79,7 +79,7 @@ In order to test it, we insert an `int3` instruction in our `rust_main`:
// in src/lib.rs
...
#[macro_use] // needed for the `int!` macro
extern crate x86;
extern crate x86_64;
...
#[no_mangle]

View File

@@ -41,10 +41,9 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
}
{{< / highlight >}}
We use the [int! macro] of the [x86 crate] to trigger the exception with vector number `1`, which is the [debug exception]. The debug exception occurs for example when a breakpoint defined in the [debug registers] is hit. Like the [breakpoint exception], it is mainly used for [implementing debuggers].
We use the [int! macro] of the `x86_64` crate to trigger the exception with vector number `1`, which is the [debug exception]. The debug exception occurs for example when a breakpoint defined in the [debug registers] is hit. Like the [breakpoint exception], it is mainly used for [implementing debuggers].
[int! macro]: https://docs.rs/x86/0.8.0/x86/macro.int!.html
[x86 crate]: https://github.com/gz/rust-x86
[int! macro]: https://docs.rs/x86_64/0.1.0/x86_64/macro.int!.html
[debug exception]: http://wiki.osdev.org/Exceptions#Debug
[debug registers]: https://en.wikipedia.org/wiki/X86_debug_register
[breakpoint exception]: http://wiki.osdev.org/Exceptions#Breakpoint
@@ -476,7 +475,7 @@ Let's create a new TSS that contains our double fault stack in its interrupt sta
```rust
// in src/interrupts/mod.rs
use x86::bits64::task::TaskStateSegment;
use x86_64::structures::tss::TaskStateSegment;
```
Let's create a new TSS in our `interrupts::init` function:
@@ -624,7 +623,7 @@ Let's add a function to our descriptor that creates a TSS descriptor for a given
```rust
// in src/interrupts/gdt.rs
use x86::bits64::task::TaskStateSegment;
use x86_64::structures::tss::TaskStateSegment;
impl Descriptor {
pub fn tss_segment(tss: &'static TaskStateSegment) -> Descriptor {
@@ -660,8 +659,8 @@ In order to add descriptors to the GDT, we add a `add_entry` method:
```rust
// in src/interrupts/gdt.rs
use x86::shared::segmentation::SegmentSelector;
use x86::shared::PrivilegeLevel;
use x86_64::structures::gdt::SegmentSelector;
use x86_64::PrivilegeLevel;
impl Gdt {
pub fn add_entry(&mut self, entry: Descriptor) -> SegmentSelector {
@@ -709,8 +708,8 @@ To load the GDT, we add a new `load` method:
impl Gdt {
pub fn load(&'static self) {
use x86::shared::dtables::{DescriptorTablePointer, lgdt};
use x86::shared::segmentation;
use x86_64::instructions::tables::{DescriptorTablePointer, lgdt};
use x86_64::instructions::segmentation;
use core::mem::size_of;
let ptr = DescriptorTablePointer {
@@ -847,8 +846,9 @@ For the first two steps, we need access to the `code_selector` and `tss_selector
{{< highlight rust "hl_lines=3 4 7 8 11 12 19 21" >}}
// in src/interrupts/mod.rs
pub fn init(memory_controller: &mut MemoryController) {
use x86::shared::segmentation::{SegmentSelector, set_cs};
use x86::shared::task::load_tr;
use x86_64::structures::gdt::SegmentSelector;
use x86_64::instructions::segmentation::set_cs;
use x86_64::instructions::tables::load_tss;
...
let mut code_selector = SegmentSelector::empty();
@@ -865,17 +865,17 @@ pub fn init(memory_controller: &mut MemoryController) {
// reload code segment register
set_cs(code_selector);
// load TSS
load_tr(tss_selector);
load_tss(tss_selector);
}
IDT.load();
}
{{< / highlight >}}
We first set the descriptors to `empty` and then update them from inside the closure (which implicitly borrows them as `&mut`). Now we're able to reload the code segment register using [`set_cs`] and to load the TSS using [`load_tr`].
We first set the descriptors to `empty` and then update them from inside the closure (which implicitly borrows them as `&mut`). Now we're able to reload the code segment register using [`set_cs`] and to load the TSS using [`load_tss`].
[`set_cs`]: https://docs.rs/x86/0.8.0/x86/shared/segmentation/fn.set_cs.html
[`load_tr`]: https://docs.rs/x86/0.8.0/x86/shared/task/fn.load_tr.html
[`load_tss`]: https://docs.rs/x86/0.8.0/x86/shared/task/fn.load_tss.html
Now that we loaded a valid TSS and interrupt stack table, we can set the stack index for our double fault handler in the IDT:

View File

@@ -1,6 +1,6 @@
use x86::bits64::task::TaskStateSegment;
use x86::shared::segmentation::SegmentSelector;
use x86::shared::PrivilegeLevel;
use x86_64::structures::tss::TaskStateSegment;
use x86_64::structures::gdt::SegmentSelector;
use x86_64::PrivilegeLevel;
pub struct Gdt {
table: [u64; 8],
@@ -39,12 +39,11 @@ impl Gdt {
}
pub fn load(&'static self) {
use x86::shared::dtables::{DescriptorTablePointer, lgdt};
use x86::shared::segmentation;
use x86_64::instructions::tables::{DescriptorTablePointer, lgdt};
use core::mem::size_of;
let ptr = DescriptorTablePointer {
base: self.table.as_ptr() as *const segmentation::SegmentDescriptor,
base: self.table.as_ptr() as u64,
limit: (self.table.len() * size_of::<u64>() - 1) as u16,
};

View File

@@ -1,116 +0,0 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use x86::shared::segmentation::{self, SegmentSelector};
use x86::shared::PrivilegeLevel;
pub struct Idt([Entry; 16]);
impl Idt {
pub fn new() -> Idt {
Idt([Entry::missing(); 16])
}
pub fn set_handler(&mut self, entry: u8, handler: HandlerFunc) -> &mut EntryOptions {
self.0[entry as usize] = Entry::new(segmentation::cs(), handler);
&mut self.0[entry as usize].options
}
pub fn load(&'static self) {
use x86::shared::dtables::{DescriptorTablePointer, lidt};
use core::mem::size_of;
let ptr = DescriptorTablePointer {
base: self as *const _ as *const ::x86::bits64::irq::IdtEntry,
limit: (size_of::<Self>() - 1) as u16,
};
unsafe { lidt(&ptr) };
}
}
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
pub struct Entry {
pointer_low: u16,
gdt_selector: SegmentSelector,
options: EntryOptions,
pointer_middle: u16,
pointer_high: u32,
reserved: u32,
}
pub type HandlerFunc = extern "C" fn() -> !;
impl Entry {
fn new(gdt_selector: SegmentSelector, handler: HandlerFunc) -> Self {
let pointer = handler as u64;
Entry {
gdt_selector: gdt_selector,
pointer_low: pointer as u16,
pointer_middle: (pointer >> 16) as u16,
pointer_high: (pointer >> 32) as u32,
options: EntryOptions::new(),
reserved: 0,
}
}
fn missing() -> Self {
Entry {
gdt_selector: SegmentSelector::new(0, PrivilegeLevel::Ring0),
pointer_low: 0,
pointer_middle: 0,
pointer_high: 0,
options: EntryOptions::minimal(),
reserved: 0,
}
}
}
use bit_field::BitField;
#[derive(Debug, Clone, Copy)]
pub struct EntryOptions(u16);
impl EntryOptions {
fn minimal() -> Self {
let mut options = 0;
options.set_bits(9..12, 0b111); // 'must-be-one' bits
EntryOptions(options)
}
fn new() -> Self {
let mut options = Self::minimal();
options.set_present(true).disable_interrupts(true);
options
}
pub fn set_present(&mut self, present: bool) -> &mut Self {
self.0.set_bit(15, present);
self
}
pub fn disable_interrupts(&mut self, disable: bool) -> &mut Self {
self.0.set_bit(8, !disable);
self
}
#[allow(dead_code)]
pub fn set_privilege_level(&mut self, dpl: u16) -> &mut Self {
self.0.set_bits(13..15, dpl);
self
}
pub fn set_stack_index(&mut self, index: u16) -> &mut Self {
// The hardware IST index starts at 1, but our software IST index
// starts at 0. Therefore we need to add 1 here.
self.0.set_bits(0..3, index + 1);
self
}
}

View File

@@ -8,101 +8,27 @@
// except according to those terms.
use memory::MemoryController;
use x86::bits64::task::TaskStateSegment;
use x86_64::structures::tss::TaskStateSegment;
use x86_64::structures::idt::{Idt, ExceptionStackFrame, PageFaultErrorCode};
use spin::Once;
mod idt;
mod gdt;
macro_rules! save_scratch_registers {
() => {
asm!("push rax
push rcx
push rdx
push rsi
push rdi
push r8
push r9
push r10
push r11
" :::: "intel", "volatile");
}
}
macro_rules! restore_scratch_registers {
() => {
asm!("pop r11
pop r10
pop r9
pop r8
pop rdi
pop rsi
pop rdx
pop rcx
pop rax
" :::: "intel", "volatile");
}
}
macro_rules! handler {
($name: ident) => {{
#[naked]
extern "C" fn wrapper() -> ! {
unsafe {
save_scratch_registers!();
asm!("mov rdi, rsp
add rdi, 9*8 // calculate exception stack frame pointer
call $0"
:: "i"($name as extern "C" fn(
&ExceptionStackFrame))
: "rdi" : "intel");
restore_scratch_registers!();
asm!("iretq" :::: "intel", "volatile");
::core::intrinsics::unreachable();
}
}
wrapper
}}
}
macro_rules! handler_with_error_code {
($name: ident) => {{
#[naked]
extern "C" fn wrapper() -> ! {
unsafe {
save_scratch_registers!();
asm!("mov rsi, [rsp + 9*8] // load error code into rsi
mov rdi, rsp
add rdi, 10*8 // calculate exception stack frame pointer
sub rsp, 8 // align the stack pointer
call $0
add rsp, 8 // undo stack pointer alignment
" :: "i"($name as extern "C" fn(
&ExceptionStackFrame, u64))
: "rdi","rsi" : "intel");
restore_scratch_registers!();
asm!("add rsp, 8 // pop error code
iretq" :::: "intel", "volatile");
::core::intrinsics::unreachable();
}
}
wrapper
}}
}
const DOUBLE_FAULT_IST_INDEX: usize = 0;
lazy_static! {
static ref IDT: idt::Idt = {
let mut idt = idt::Idt::new();
static ref IDT: Idt = {
let mut idt = Idt::new();
idt.set_handler(0, handler!(divide_by_zero_handler));
idt.set_handler(3, handler!(breakpoint_handler));
idt.set_handler(6, handler!(invalid_opcode_handler));
idt.set_handler(8, handler_with_error_code!(double_fault_handler))
.set_stack_index(DOUBLE_FAULT_IST_INDEX as u16);
idt.set_handler(14, handler_with_error_code!(page_fault_handler));
idt.divide_by_zero.set_handler_fn(divide_by_zero_handler);
idt.breakpoint.set_handler_fn(breakpoint_handler);
idt.invalid_opcode.set_handler_fn(invalid_opcode_handler);
idt.page_fault.set_handler_fn(page_fault_handler);
unsafe {
idt.double_fault.set_handler_fn(double_fault_handler)
.set_stack_index(DOUBLE_FAULT_IST_INDEX as u16);
}
idt
};
@@ -112,20 +38,22 @@ static TSS: Once<TaskStateSegment> = Once::new();
static GDT: Once<gdt::Gdt> = Once::new();
pub fn init(memory_controller: &mut MemoryController) {
use x86::shared::segmentation::{SegmentSelector, set_cs};
use x86::shared::task::load_tr;
use x86_64::structures::gdt::SegmentSelector;
use x86_64::instructions::segmentation::set_cs;
use x86_64::instructions::tables::load_tss;
use x86_64::VirtualAddress;
let double_fault_stack =
memory_controller.alloc_stack(1).expect("could not allocate double fault stack");
let tss = TSS.call_once(|| {
let mut tss = TaskStateSegment::new();
tss.ist[DOUBLE_FAULT_IST_INDEX] = double_fault_stack.top() as u64;
tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX] = VirtualAddress(double_fault_stack.top());
tss
});
let mut code_selector = SegmentSelector::empty();
let mut tss_selector = SegmentSelector::empty();
let mut code_selector = SegmentSelector(0);
let mut tss_selector = SegmentSelector(0);
let gdt = GDT.call_once(|| {
let mut gdt = gdt::Gdt::new();
code_selector = gdt.add_entry(gdt::Descriptor::kernel_code_segment());
@@ -138,61 +66,41 @@ pub fn init(memory_controller: &mut MemoryController) {
// reload code segment register
set_cs(code_selector);
// load TSS
load_tr(tss_selector);
load_tss(tss_selector);
}
IDT.load();
}
#[derive(Debug)]
#[repr(C)]
struct ExceptionStackFrame {
instruction_pointer: u64,
code_segment: u64,
cpu_flags: u64,
stack_pointer: u64,
stack_segment: u64,
}
extern "C" fn divide_by_zero_handler(stack_frame: &ExceptionStackFrame) {
extern "x86-interrupt" fn divide_by_zero_handler(stack_frame: &mut ExceptionStackFrame) {
println!("\nEXCEPTION: DIVIDE BY ZERO\n{:#?}", stack_frame);
loop {}
}
extern "C" fn breakpoint_handler(stack_frame: &ExceptionStackFrame) {
extern "x86-interrupt" fn breakpoint_handler(stack_frame: &mut ExceptionStackFrame) {
println!("\nEXCEPTION: BREAKPOINT at {:#x}\n{:#?}",
stack_frame.instruction_pointer,
stack_frame);
}
extern "C" fn invalid_opcode_handler(stack_frame: &ExceptionStackFrame) {
extern "x86-interrupt" fn invalid_opcode_handler(stack_frame: &mut ExceptionStackFrame) {
println!("\nEXCEPTION: INVALID OPCODE at {:#x}\n{:#?}",
stack_frame.instruction_pointer,
stack_frame);
loop {}
}
bitflags! {
flags PageFaultErrorCode: u64 {
const PROTECTION_VIOLATION = 1 << 0,
const CAUSED_BY_WRITE = 1 << 1,
const USER_MODE = 1 << 2,
const MALFORMED_TABLE = 1 << 3,
const INSTRUCTION_FETCH = 1 << 4,
}
}
extern "C" fn page_fault_handler(stack_frame: &ExceptionStackFrame, error_code: u64) {
use x86::shared::control_regs;
extern "x86-interrupt" fn page_fault_handler(stack_frame: &mut ExceptionStackFrame, error_code: PageFaultErrorCode) {
use x86_64::registers::control_regs;
println!("\nEXCEPTION: PAGE FAULT while accessing {:#x}\nerror code: \
{:?}\n{:#?}",
unsafe { control_regs::cr2() },
PageFaultErrorCode::from_bits(error_code).unwrap(),
control_regs::cr2(),
error_code,
stack_frame);
loop {}
}
extern "C" fn double_fault_handler(stack_frame: &ExceptionStackFrame, _error_code: u64) {
extern "x86-interrupt" fn double_fault_handler(stack_frame: &mut ExceptionStackFrame, _error_code: u64) {
println!("\nEXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame);
loop {}
}

View File

@@ -12,7 +12,7 @@
#![feature(alloc, collections)]
#![feature(asm)]
#![feature(naked_functions)]
#![feature(core_intrinsics)]
#![feature(abi_x86_interrupt)]
#![no_std]
extern crate rlibc;
@@ -21,8 +21,7 @@ extern crate spin;
extern crate multiboot2;
#[macro_use]
extern crate bitflags;
#[macro_use]
extern crate x86;
extern crate x86_64;
#[macro_use]
extern crate once;
extern crate bit_field;
@@ -68,7 +67,7 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
}
fn enable_nxe_bit() {
use x86::shared::msr::{IA32_EFER, rdmsr, wrmsr};
use x86_64::registers::msr::{IA32_EFER, rdmsr, wrmsr};
let nxe_bit = 1 << 11;
unsafe {
@@ -78,9 +77,9 @@ fn enable_nxe_bit() {
}
fn enable_write_protect_bit() {
use x86::shared::control_regs::{cr0, cr0_write, CR0_WRITE_PROTECT};
use x86_64::registers::control_regs::{cr0, cr0_write, Cr0};
unsafe { cr0_write(cr0() | CR0_WRITE_PROTECT) };
unsafe { cr0_write(cr0() | Cr0::WRITE_PROTECT) };
}
#[cfg(not(test))]

View File

@@ -105,6 +105,9 @@ impl Mapper {
pub fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
{
use x86_64::VirtualAddress;
use x86_64::instructions::tlb;
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
@@ -114,7 +117,7 @@ impl Mapper {
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
unsafe { ::x86::shared::tlb::flush(page.start_address()) };
tlb::flush(VirtualAddress(page.start_address()));
// TODO free p(1,2,3) table if empty
// allocator.deallocate_frame(frame);
}

View File

@@ -119,38 +119,39 @@ impl ActivePageTable {
f: F)
where F: FnOnce(&mut Mapper)
{
use x86::shared::{control_regs, tlb};
let flush_tlb = || unsafe { tlb::flush_all() };
use x86_64::registers::control_regs;
use x86_64::instructions::tlb;
{
let backup = Frame::containing_address(unsafe { control_regs::cr3() } as usize);
let backup = Frame::containing_address(control_regs::cr3().0 as usize);
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
flush_tlb();
tlb::flush_all();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE);
flush_tlb();
tlb::flush_all();
}
temporary_page.unmap(self);
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::shared::control_regs;
use x86_64::PhysicalAddress;
use x86_64::registers::control_regs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(unsafe { control_regs::cr3() } as usize),
p4_frame: Frame::containing_address(control_regs::cr3().0 as usize),
};
unsafe {
control_regs::cr3_write(new_table.p4_frame.start_address());
control_regs::cr3_write(PhysicalAddress(new_table.p4_frame.start_address() as u64));
}
old_table
}