Update x86 crate to version 0.8.0 (#266)

This commit is contained in:
Philipp Oppermann
2016-12-21 14:59:57 +01:00
committed by GitHub
parent a7a9aabce7
commit 02697891e2
10 changed files with 41 additions and 41 deletions

View File

@@ -904,7 +904,7 @@ So to fix our `unmap` function, we need to remove the cached translation from th
```toml
[dependencies.x86]
version = "0.7.1"
version = "0.8.0"
default-features = false
```
It has a `performance-counter` feature that allows reading the CPU specific [performance counters] but increases compile times. We don't need it right now, so we disable it using `default-features = false`.
@@ -917,7 +917,7 @@ It has a `performance-counter` feature that allows reading the CPU specific [per
...
p1[page.p1_index()].set_unused();
unsafe {
::x86::tlb::flush(page.start_address());
::x86::shared::tlb::flush(page.start_address());
}
// TODO free p(1,2,3) table if empty
//allocator.deallocate_frame(frame);

View File

@@ -345,7 +345,7 @@ pub fn with<F>(&mut self,
f: F)
where F: FnOnce(&mut ActivePageTable)
{
use x86::tlb;
use x86::shared::tlb;
let flush_tlb = || unsafe { tlb::flush_all() };
// overwrite recursive mapping
@@ -449,9 +449,9 @@ Right now, the `with` function overwrites the recursive mapping and calls the cl
To backup the physical P4 frame of the active table, we can either read it from the 511th P4 entry (before we change it) or from the CR3 control register directly. We will do the latter as it should be faster and we already have a external crate that makes it easy:
```rust
use x86::controlregs;
use x86::shared::control_regs;
let backup = Frame::containing_address(
unsafe { controlregs::cr3() } as usize
unsafe { control_regs::cr3() } as usize
);
```
Why is it unsafe? Because reading the CR3 register leads to a CPU exception if the processor is not running in kernel mode ([Ring 0]). But this code will always run in kernel mode, so the `unsafe` block is completely safe here.
@@ -482,12 +482,12 @@ pub fn with<F>(&mut self,
f: F)
where F: FnOnce(&mut Mapper)
{
use x86::{controlregs, tlb};
use x86::shared::{control_regs, tlb};
let flush_tlb = || unsafe { tlb::flush_all() };
{
let backup = Frame::containing_address(
unsafe { controlregs::cr3() } as usize);
unsafe { control_regs::cr3() } as usize);
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
@@ -755,15 +755,15 @@ We do this in a new `ActivePageTable::switch` method:
// in `impl ActivePageTable` in src/memory/paging/mod.rs
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::controlregs;
use x86::shared::control_regs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
unsafe { controlregs::cr3() } as usize
unsafe { control_regs::cr3() } as usize
),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address() as u64);
control_regs::cr3_write(new_table.p4_frame.start_address());
}
old_table
}
@@ -963,7 +963,7 @@ pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
But when we test it now, we get a page fault again. We can use the same technique as above to get the responsible function. I won't bother you with the QEMU output and just tell you the results:
This time the responsible function is `controlregs::cr3_write()` itself. From the [error code][page fault error code] we learn that it was a page protection violation and caused by “reading a 1 in a reserved field”. So the page table had some reserved bit set that should be always 0. It must be the `NO_EXECUTE` flag, since it's the only new bit that we set in the page table.
This time the responsible function is `control_regs::cr3_write()` itself. From the [error code][page fault error code] we learn that it was a page protection violation and caused by “reading a 1 in a reserved field”. So the page table had some reserved bit set that should be always 0. It must be the `NO_EXECUTE` flag, since it's the only new bit that we set in the page table.
### The NXE Bit
The reason is that the `NO_EXECUTE` bit must only be used when the `NXE` bit in the [Extended Feature Enable Register] \(EFER) is set. That register is similar to Rust's feature gating and can be used to enable all sorts of advanced CPU features. Since the `NXE` bit is off by default, we caused a page fault when we added the `NO_EXECUTE` bit to the page table.
@@ -976,7 +976,7 @@ So we need to enable the `NXE` bit. For that we use the awesome [x86][rust-x86]
// in lib.rs
fn enable_nxe_bit() {
use x86::msr::{IA32_EFER, rdmsr, wrmsr};
use x86::shared::msr::{IA32_EFER, rdmsr, wrmsr};
let nxe_bit = 1 << 11;
unsafe {
@@ -996,10 +996,9 @@ Right now, we are still able to modify the `.code` and `.rodata` sections, even
// in lib.rs
fn enable_write_protect_bit() {
use x86::controlregs::{cr0, cr0_write};
use x86::shared::control_regs::{cr0, cr0_write, CR0_WRITE_PROTECT};
let wp_bit = 1 << 16;
unsafe { cr0_write(cr0() | wp_bit) };
unsafe { cr0_write(cr0() | CR0_WRITE_PROTECT) };
}
```
The `cr0` functions are unsafe because accessing the `CR0` register is only allowed in kernel mode.

View File

@@ -84,7 +84,8 @@ Now we create types for the IDT and its entries:
```rust
// src/interrupts/idt.rs
use x86::segmentation::{self, SegmentSelector};
use x86::shared::segmentation::{self, SegmentSelector};
use x86::shared::PrivilegeLevel;
pub struct Idt([Entry; 16]);
@@ -254,7 +255,7 @@ impl Idt {
impl Entry {
fn missing() -> Self {
Entry {
gdt_selector: SegmentSelector::new(0),
gdt_selector: SegmentSelector::new(0, PrivilegeLevel::Ring0),
pointer_low: 0,
pointer_middle: 0,
pointer_high: 0,
@@ -306,11 +307,11 @@ This structure is already contained [in the x86 crate], so we don't need to crea
```rust
impl Idt {
pub fn load(&self) {
use x86::dtables::{DescriptorTablePointer, lidt};
use x86::shared::dtables::{DescriptorTablePointer, lidt};
use core::mem::size_of;
let ptr = DescriptorTablePointer {
base: self as *const _ as u64,
base: self as *const _ as *const ::x86::bits64::irq::IdtEntry,
limit: (size_of::<Self>() - 1) as u16,
};
@@ -318,12 +319,12 @@ impl Idt {
}
}
```
The method does not need to modify the IDT, so it takes `self` by immutable reference. We convert this reference to an u64 and calculate the table size using [mem::size_of]. The additional `-1` is needed because the limit field has to be the maximum addressable byte.
The method does not need to modify the IDT, so it takes `self` by immutable reference. First, we create a `DescriptorTablePointer` and then we pass it to `lidt`. The `lidt` function expects that the `base` field has the type `x86::bits64::irq::IdtEntry`[^fn-x86-idt-entry], therefore we need to cast the `self` pointer. For calculating the `limit` we use [mem::size_of]. The additional `-1` is needed because the limit field has to be the maximum addressable byte (inclusive bound). We need an unsafe block around `lidt`, because the function assumes that the specified handler addresses are valid.
[^fn-x86-idt-entry]: The `x86` crate has its own `IdtEntry` type, but it is a bit incomplete. Therefore we created our own IDT types.
[mem::size_of]: https://doc.rust-lang.org/nightly/core/mem/fn.size_of.html
Then we pass a pointer to our `ptr` structure to the `lidt` function, which calls the `lidt` assembly instruction in order to reload the IDT register. We need an unsafe block here, because the `lidt` assumes that the specified handler addresses are valid.
#### Safety
But can we really guarantee that handler addresses are always valid? Let's see:

View File

@@ -629,11 +629,11 @@ Now we can improve our page fault error message by using the new `PageFaultError
extern "C" fn page_fault_handler(stack_frame: &ExceptionStackFrame,
error_code: u64) -> !
{
use x86::controlregs;
use x86::shared::control_regs;
println!(
"\nEXCEPTION: PAGE FAULT while accessing {:#x}\
\nerror code: {:?}\n{:#?}",
unsafe { controlregs::cr2() },
unsafe { control_regs::cr2() },
PageFaultErrorCode::from_bits(error_code).unwrap(),
unsafe { &*stack_frame });
loop {}