Compare commits

..

23 Commits

Author SHA1 Message Date
Philipp Oppermann
1007c5157d Remove paging testing code again 2017-11-19 11:42:16 +01:00
Philipp Oppermann
55e602dc61 Fix unmap function by flushing the TLB 2017-11-19 11:42:16 +01:00
Philipp Oppermann
eb5c9afa51 Add a dependency on the x86_64 crate 2017-11-19 11:42:16 +01:00
Philipp Oppermann
0ee4ed738a Test the unmap function (it doesn't work correctly) 2017-11-19 11:42:16 +01:00
Philipp Oppermann
b49cb6986c Test the map_to function 2017-11-19 11:42:16 +01:00
Philipp Oppermann
15c9f43622 Add an (unfinished) unmap method 2017-11-19 11:42:16 +01:00
Philipp Oppermann
3696c7bacb Add map and identity_map functions for convenience 2017-11-19 11:42:16 +01:00
Philipp Oppermann
bb473c7907 Make translate and map_to safe by making them methods of a new ActivePageTable struct 2017-11-19 11:42:16 +01:00
Philipp Oppermann
dedf6e8959 Add a map_to function 2017-11-19 11:42:16 +01:00
Philipp Oppermann
83d56aa9de Add translate and translate_page functions 2017-11-19 11:42:16 +01:00
Philipp Oppermann
a7170a4a44 Use type system tricks to make next_table methods safe 2017-11-19 11:42:16 +01:00
Philipp Oppermann
0874625269 Add methods to retrieve a (mutual) reference to the next table 2017-11-19 10:39:17 +01:00
Philipp Oppermann
3706331a43 Add a P4 constant and a Table::next_table_address method 2017-11-19 10:39:17 +01:00
Philipp Oppermann
8fcea7951b Recursive map the P4 table 2017-11-19 10:39:17 +01:00
Philipp Oppermann
e50d70c02e Add a Table::zero method to clear all entries 2017-11-19 10:39:17 +01:00
Philipp Oppermann
2b7fa410fb Implement the Index and IndexMut traits for Table 2017-11-19 10:39:17 +01:00
Philipp Oppermann
62d655fbdd Create a memory::paging::table submodule with a Table struct 2017-11-19 10:39:17 +01:00
Philipp Oppermann
fe464463eb Add pointed_frame and set methods to Entry 2017-11-19 10:39:17 +01:00
Philipp Oppermann
148d506629 Use bitflags to create an EntryFlags type 2017-11-19 10:39:17 +01:00
Philipp Oppermann
f957f2ecde Add a dependency on the bitflags crate 2017-11-19 10:39:17 +01:00
Philipp Oppermann
c4c27c10e6 Create a memory::paging::entry submodule with an Entry struct 2017-11-19 10:39:17 +01:00
Philipp Oppermann
c7c02d7dca Create a memory::paging module with a Page struct 2017-11-19 10:39:17 +01:00
Philipp Oppermann
9f84e37e1b Update Readme for “Page Tables” post 2017-11-19 10:39:17 +01:00
8 changed files with 323 additions and 10 deletions

View File

@@ -11,3 +11,5 @@ rlibc = "1.0"
volatile = "0.1.0"
spin = "0.4.5"
multiboot2 = "0.1.0"
bitflags = "0.7.0"
x86_64 = "0.1.2"

View File

@@ -1,7 +1,7 @@
# Blog OS (Allocating Frames)
[![Build Status](https://travis-ci.org/phil-opp/blog_os.svg?branch=post_5)](https://travis-ci.org/phil-opp/blog_os/branches)
# Blog OS (Page Tables)
[![Build Status](https://travis-ci.org/phil-opp/blog_os.svg?branch=post_6)](https://travis-ci.org/phil-opp/blog_os/branches)
This repository contains the source code for the [Allocating Frames](http://os.phil-opp.com/allocating-frames.html) post of the [Writing an OS in Rust](http://os.phil-opp.com) series.
This repository contains the source code for the [Page Tables](http://os.phil-opp.com/modifying-page-tables.html) post of the [Writing an OS in Rust](http://os.phil-opp.com) series.
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**

View File

@@ -85,6 +85,11 @@ check_long_mode:
jmp error
set_up_page_tables:
; map P4 table recursively
mov eax, p4_table
or eax, 0b11 ; present + writable
mov [p4_table + 511 * 8], eax
; map first P4 entry to P3 table
mov eax, p3_table
or eax, 0b11 ; present + writable

View File

@@ -8,6 +8,9 @@ extern crate rlibc;
extern crate volatile;
extern crate spin;
extern crate multiboot2;
#[macro_use]
extern crate bitflags;
extern crate x86_64;
#[macro_use]
mod vga_buffer;
@@ -50,13 +53,6 @@ pub extern fn rust_main(multiboot_information_address: usize) {
kernel_start as usize, kernel_end as usize, multiboot_start,
multiboot_end, memory_map_tag.memory_areas());
for i in 0.. {
if let None = frame_allocator.allocate_frame() {
println!("allocated {} frames", i);
break;
}
}
loop{}
}

View File

@@ -1,6 +1,8 @@
pub use self::area_frame_allocator::AreaFrameAllocator;
use self::paging::PhysicalAddress;
mod area_frame_allocator;
mod paging;
pub const PAGE_SIZE: usize = 4096;
@@ -13,6 +15,10 @@ impl Frame {
fn containing_address(address: usize) -> Frame {
Frame{ number: address / PAGE_SIZE }
}
fn start_address(&self) -> PhysicalAddress {
self.number * PAGE_SIZE
}
}
pub trait FrameAllocator {

View File

@@ -0,0 +1,47 @@
use memory::Frame;
pub struct Entry(u64);
impl Entry {
pub fn is_unused(&self) -> bool {
self.0 == 0
}
pub fn set_unused(&mut self) {
self.0 = 0;
}
pub fn flags(&self) -> EntryFlags {
EntryFlags::from_bits_truncate(self.0)
}
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT) {
Some(Frame::containing_address(
self.0 as usize & 0x000fffff_fffff000
))
} else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
assert!(frame.start_address() & !0x000fffff_fffff000 == 0);
self.0 = (frame.start_address() as u64) | flags.bits();
}
}
bitflags! {
pub flags EntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}

157
src/memory/paging/mod.rs Normal file
View File

@@ -0,0 +1,157 @@
pub use self::entry::*;
use core::ptr::Unique;
use memory::{PAGE_SIZE, Frame, FrameAllocator};
use self::table::{Table, Level4};
mod entry;
mod table;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
pub struct Page {
number: usize,
}
impl Page {
pub fn containing_address(address: VirtualAddress) -> Page {
assert!(address < 0x0000_8000_0000_0000 ||
address >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}", address);
Page { number: address / PAGE_SIZE }
}
fn start_address(&self) -> usize {
self.number * PAGE_SIZE
}
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
}
pub struct ActivePageTable {
p4: Unique<Table<Level4>>,
}
impl ActivePageTable {
pub unsafe fn new() -> ActivePageTable {
ActivePageTable {
p4: Unique::new_unchecked(table::P4),
}
}
fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.as_ref() }
}
fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.as_mut() }
}
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| frame.number * PAGE_SIZE + offset)
}
fn translate_page(&self, page: Page) -> Option<Frame> {
use self::entry::HUGE_PAGE;
let p3 = self.p4().next_table(page.p4_index());
let huge_page = || {
p3.and_then(|p3| {
let p3_entry = &p3[page.p3_index()];
// 1GiB page?
if let Some(start_frame) = p3_entry.pointed_frame() {
if p3_entry.flags().contains(HUGE_PAGE) {
// address must be 1GiB aligned
assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
return Some(Frame {
number: start_frame.number + page.p2_index() *
ENTRY_COUNT + page.p1_index(),
});
}
}
if let Some(p2) = p3.next_table(page.p3_index()) {
let p2_entry = &p2[page.p2_index()];
// 2MiB page?
if let Some(start_frame) = p2_entry.pointed_frame() {
if p2_entry.flags().contains(HUGE_PAGE) {
// address must be 2MiB aligned
assert!(start_frame.number % ENTRY_COUNT == 0);
return Some(Frame {
number: start_frame.number + page.p1_index()
});
}
}
}
None
})
};
p3.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
.or_else(huge_page)
}
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags,
allocator: &mut A)
where A: FrameAllocator
{
let p4 = self.p4_mut();
let mut p3 = p4.next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | PRESENT);
}
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
}
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let page = Page::containing_address(frame.start_address());
self.map_to(page, frame, flags, allocator)
}
fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
{
use x86_64::instructions::tlb;
use x86_64::VirtualAddress;
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
tlb::flush(VirtualAddress(page.start_address()));
// TODO free p(1,2,3) table if empty
//allocator.deallocate_frame(frame);
}
}

100
src/memory/paging/table.rs Normal file
View File

@@ -0,0 +1,100 @@
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use memory::FrameAllocator;
use memory::paging::entry::*;
use memory::paging::ENTRY_COUNT;
pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
pub struct Table<L: TableLevel> {
entries: [Entry; ENTRY_COUNT],
level: PhantomData<L>,
}
impl<L> Table<L> where L: TableLevel {
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_unused();
}
}
}
impl<L> Table<L> where L: HierarchicalLevel {
fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = self[index].flags();
if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) {
let table_address = self as *const _ as usize;
Some((table_address << 9) | (index << 12))
} else {
None
}
}
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
self.next_table_address(index)
.map(|address| unsafe { &*(address as *const _) })
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
self.next_table_address(index)
.map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create<A>(&mut self,
index: usize,
allocator: &mut A)
-> &mut Table<L::NextLevel>
where A: FrameAllocator
{
if self.next_table(index).is_none() {
assert!(!self.entries[index].flags().contains(HUGE_PAGE),
"mapping code does not support huge pages");
let frame = allocator.allocate_frame().expect("no frames available");
self.entries[index].set(frame, PRESENT | WRITABLE);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
}
impl<L> Index<usize> for Table<L> where L: TableLevel {
type Output = Entry;
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}
pub trait TableLevel {}
pub enum Level4 {}
pub enum Level3 {}
pub enum Level2 {}
pub enum Level1 {}
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierarchicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierarchicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}