diff --git a/src/memory/core_map.rs b/src/memory/frame_allocator.rs
similarity index 62%
rename from src/memory/core_map.rs
rename to src/memory/frame_allocator.rs
index 673ba1e9..ea7255d0 100644
--- a/src/memory/core_map.rs
+++ b/src/memory/frame_allocator.rs
@@ -1,8 +1,14 @@
use core::ptr::Unique;
use core::mem;
-use super::{Page, FrameStack};
+use memory::paging;
pub type Frame = super::Frame;
+pub type Page = super::paging::Page;
+
+pub trait FrameAllocator {
+ fn allocate_frame(&mut self, lock: &mut paging::Lock) -> Option;
+ fn deallocate_frame(&mut self, lock: &mut paging::Lock, frame: Frame);
+}
pub struct DynamicFrameStack {
head: Unique, // TODO invariant
@@ -11,11 +17,11 @@ pub struct DynamicFrameStack {
}
impl DynamicFrameStack {
- pub fn new(at: *mut Frame) -> DynamicFrameStack {
+ pub fn new(at: Page) -> DynamicFrameStack {
DynamicFrameStack {
- head: unsafe{ Unique::new(at) },
+ head: unsafe{ Unique::new(at.pointer() as *mut () as *mut _) },
length: 0,
- capacity: 0,
+ capacity: Self::capacity_per_frame(),
}
}
@@ -24,10 +30,33 @@ impl DynamicFrameStack {
}
}
-impl FrameStack for DynamicFrameStack {
- fn push(&mut self, frame: Frame, map_to: F)
- where F: FnOnce(Page, Frame),
- {
+impl FrameAllocator for DynamicFrameStack {
+ fn allocate_frame(&mut self, lock: &mut paging::Lock) -> Option {
+ use core::intrinsics::offset;
+
+ if self.length == 0 {
+ // no frames left but maybe we can decrease the capacity and use that frame (but keep
+ // at least 1 frame because the paging logic might need some frames to map a page)
+ if self.capacity <= Self::capacity_per_frame() {
+ None
+ } else {
+ // decrease capacity and thus free a frame used as backing store
+ self.capacity -= Self::capacity_per_frame();
+ let page_address = unsafe{ offset(*self.head, self.capacity as isize) } as usize;
+ lock.mapper(self).unmap(Page::containing_address(page_address));
+ self.allocate_frame(lock)
+ }
+ } else {
+ // pop the last frame from the stack
+ self.length -= 1;
+ unsafe {
+ let frame = offset(*self.head, self.length as isize) as *mut _;
+ Some(mem::replace(&mut *frame, mem::zeroed()))
+ }
+ }
+ }
+
+ fn deallocate_frame(&mut self, lock: &mut paging::Lock, frame: Frame) {
use core::intrinsics::offset;
if self.length < self.capacity {
@@ -40,34 +69,8 @@ impl FrameStack for DynamicFrameStack {
} else {
// frame stack is full, use passed frame to expand it
let page_address = unsafe{ offset(*self.head, self.capacity as isize) } as usize;
- map_to(Page::containing_address(page_address), frame);
+ lock.mapper(self).map_to(Page::containing_address(page_address), frame, true, false);
self.capacity += Self::capacity_per_frame();
}
}
-
- fn pop(&mut self, unmap_page: F) -> Option
- where F: FnOnce(Page) -> Frame,
- {
- use core::intrinsics::offset;
-
- if self.length == 0 {
- // no frames left but maybe we can decrease the capacity and use that frame
- if self.capacity == 0 {
- None
- } else {
- // decrease capacity and thus free a frame used as backing store
- self.capacity -= Self::capacity_per_frame();
- let page_address = unsafe{ offset(*self.head, self.capacity as isize) } as usize;
- Some(unmap_page(Page::containing_address(page_address)))
-
- }
- } else {
- // pop the last frame from the stack
- self.length -= 1;
- unsafe {
- let frame = offset(*self.head, self.length as isize) as *mut _;
- Some(mem::replace(&mut *frame, mem::zeroed()))
- }
- }
- }
}
diff --git a/src/memory/mod.rs b/src/memory/mod.rs
index 41fd32ac..2ac16f02 100644
--- a/src/memory/mod.rs
+++ b/src/memory/mod.rs
@@ -1,24 +1,43 @@
use multiboot2::Multiboot;
-use core::iter::range_inclusive;
-use core::cmp::max;
+use self::paging::Page;
mod paging;
-mod core_map;
+mod frame_allocator;
+mod tlb;
pub const PAGE_SIZE: u64 = 4096;
pub fn init(multiboot: &Multiboot) {
// ATTENTION: we have a very small stack and no guard page
+ use core::cmp::max;
+ use self::frame_allocator::FrameAllocator;
let kernel_end = multiboot.elf_tag().unwrap().sections().map(|s| s.addr + s.size).max()
.unwrap() as usize;
let multiboot_end = multiboot as *const _ as usize + multiboot.total_size as usize;
- let mut allocator = FrameAllocator::new(max(kernel_end, multiboot_end));
- let mut c = unsafe{paging::Controller::new(allocator)};
+ let mut bump_pointer = BumpPointer::new(max(kernel_end, multiboot_end));
- c.begin_new_table();
+ let mut lock = unsafe{ paging::Lock::new() };
+ let new_p4_frame = bump_pointer.allocate_frame(&mut lock).expect("failed allocating
+ new_p4_frame");
- for section in multiboot.elf_tag().unwrap().sections() {
+ unsafe{lock.begin_new_table_on_identity_mapped_frame(new_p4_frame)};
+ identity_map_kernel_sections(multiboot, lock.mapper(&mut bump_pointer));
+ lock.activate_current_table();
+
+ init_core_map(multiboot, &mut lock, bump_pointer);
+
+ let maximal_memory = multiboot.memory_area_tag().unwrap().areas().map(
+ |area| area.base_addr + area.length).max().unwrap();
+ println!("maximal_memory: 0x{:x}", maximal_memory);
+
+}
+
+
+fn identity_map_kernel_sections(multiboot: &Multiboot, mut mapper: paging::Mapper) {
+ use core::iter::range_inclusive;
+
+ for section in multiboot.elf_tag().expect("no section tag").sections() {
let in_memory = section.flags & 0x2 != 0;
let writable = section.flags & 0x1 != 0;
let executable = section.flags & 0x4 != 0;
@@ -29,48 +48,87 @@ pub fn init(multiboot: &Multiboot) {
in_memory, writable, executable);
let start_page = Page::containing_address(section.addr as usize);
let end_page = Page::containing_address((section.addr + section.size) as usize);
- for page in range_inclusive(start_page.number, end_page.number).map(|n| Page{number: n}) {
- c.identity_map(page, writable, executable);
+ for page in range_inclusive(start_page.number, end_page.number)
+ .map(|n| Page{number: n})
+ {
+ unsafe{ mapper.identity_map(page, writable, executable) };
}
}
// identity map VGA text buffer
- c.identity_map(Page{number: 0xb8}, true, false);
+ unsafe {
+ mapper.identity_map(Page::containing_address(0xb8000), true, false);
+ }
// identity map Multiboot structure
let multiboot_address = multiboot as *const _ as usize;
let start_page = Page::containing_address(multiboot_address);
let end_page = Page::containing_address(multiboot_address + multiboot.total_size as usize);
for page in range_inclusive(start_page.number, end_page.number).map(|n| Page{number: n}) {
- c.identity_map(page, false, false);
+ unsafe{ mapper.identity_map(page, false, false) };
}
-
- c.activate_new_table();
-
- let maximal_memory = multiboot.memory_area_tag().unwrap().areas().map(
- |area| area.base_addr + area.length).max().unwrap();
- println!("maximal_memory: 0x{:x}", maximal_memory);
-
- let core_map = allocator.allocate_frames((maximal_memory / paging::PAGE_SIZE) as usize);
}
-struct VirtualAddress(*const u8);
+fn init_core_map(multiboot: &Multiboot, lock: &mut paging::Lock, mut bump_pointer: BumpPointer) {
+ use core::iter::range_inclusive;
+ use self::frame_allocator::{FrameAllocator, DynamicFrameStack};
-struct FrameAllocator {
+
+ const CORE_MAP_PAGE: Page = Page{number: 0o_001_000_000};
+
+ lock.mapper(&mut bump_pointer).map(CORE_MAP_PAGE, true, false);
+ let mut frame_stack = DynamicFrameStack::new(CORE_MAP_PAGE);
+
+ println!("{:?}", bump_pointer);
+
+ for area in multiboot.memory_area_tag().expect("no memory tag").areas() {
+ println!("area start {:x} length {:x}", area.base_addr, area.length);
+ let start_frame = Frame::containing_address(area.base_addr as usize);
+ let end_frame = Frame::containing_address((area.base_addr + area.length) as usize);
+ for frame in range_inclusive(start_frame.number, end_frame.number)
+ .map(|n| Frame{number:n})
+ {
+ let page = Page{number: frame.number};
+
+ if page.is_unused() && !bump_pointer.has_allocated(frame) {
+ //print!("_{:x} ", frame.number);
+ frame_stack.deallocate_frame(lock, frame)
+ } else {
+ if !page.is_unused() {
+ print!("b{} ", frame.number);
+ } else {
+ print!("+{} ", frame.number);
+ }
+ }
+ }
+ }
+ loop {
+
+ }
+}
+
+#[derive(Debug)]
+struct BumpPointer {
+ first_free_frame: usize,
next_free_frame: usize,
}
-impl FrameAllocator {
- fn new(kernel_end: usize) -> FrameAllocator {
- assert!(kernel_end > 0x100000);
- FrameAllocator {
- next_free_frame: ((kernel_end - 1) >> 12) + 1,
- }
- }
-
- fn allocate_frame(&mut self) -> Option {
+impl frame_allocator::FrameAllocator for BumpPointer {
+ fn allocate_frame(&mut self, _: &mut paging::Lock) -> Option {
self.allocate_frames(1)
}
+ fn deallocate_frame(&mut self, _: &mut paging::Lock, _: Frame) {}
+}
+
+impl BumpPointer {
+ fn new(kernel_end: usize) -> BumpPointer {
+ assert!(kernel_end > 0x100000);
+ let frame = ((kernel_end - 1) >> 12) + 1;
+ BumpPointer {
+ first_free_frame: frame,
+ next_free_frame: frame,
+ }
+ }
fn allocate_frames(&mut self, number: usize) -> Option {
let page_number = self.next_free_frame;
@@ -79,21 +137,20 @@ impl FrameAllocator {
number: page_number
})
}
+
+ fn has_allocated(&self, frame: Frame) -> bool {
+ frame.number > self.first_free_frame && frame.number < self.next_free_frame
+ }
}
-#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy)]
+#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone, Copy)]
struct Frame {
number: usize,
}
-#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy)]
-struct Page {
- number: usize,
-}
-
-impl Page {
- fn containing_address(address: usize) -> Page {
- Page {
+impl Frame {
+ fn containing_address(address: usize) -> Frame {
+ Frame {
number: address >> 12,
}
}
diff --git a/src/memory/paging/mod.rs b/src/memory/paging/mod.rs
new file mode 100644
index 00000000..c81b5ee8
--- /dev/null
+++ b/src/memory/paging/mod.rs
@@ -0,0 +1,76 @@
+pub use self::table::Page;
+
+use self::table::{map_to, unmap};
+use memory::frame_allocator::{Frame, FrameAllocator};
+
+pub const PAGE_SIZE: usize = 4096;
+
+mod table;
+
+/// The paging lock must be unique. It is required for all page table operations and thus
+/// guarantees exclusive page table access.
+pub struct Lock {
+ _private: (),
+}
+
+impl Lock {
+ /// Creates a new paging lock. It's unsafe because only one lock can exist at a
+ /// time.
+ pub unsafe fn new() -> Lock {
+ Lock {
+ _private: (),
+ }
+ }
+
+ /// Uses the passed frame to create a new page table that becomes the _current table_.
+ /// All subsequent page table operations will modify it (the _current_ table) and leave the
+ /// _active_ table unchanged. To activate the current table and make it the active table, use
+ /// the `activate_new_table` method.
+ /// This method assumes that the passed frame is identity mapped and is thus unsafe.
+ pub unsafe fn begin_new_table_on_identity_mapped_frame(&mut self, frame: Frame)
+ {
+ table::begin_new_on_identity_mapped_frame(self, frame)
+ }
+
+ /// Activates the _current_ table. If the current table is equal to the active table, nothing
+ /// changes. However, if _current_ and _active_ table are different, a new table becomes active /// and becomes the table used by the CPU.
+ pub fn activate_current_table(&mut self) {
+ table::activate_current()
+ }
+
+ pub fn mapper<'a, A>(&'a mut self, allocator: &'a mut A) -> Mapper<'a, A>
+ where A: FrameAllocator,
+ {
+ Mapper {
+ lock: self,
+ allocator: allocator,
+ }
+ }
+}
+
+pub struct Mapper<'a, A> where A: 'a {
+ lock: &'a mut Lock,
+ allocator: &'a mut A,
+}
+
+impl<'a, A> Mapper<'a, A> where A: FrameAllocator {
+ pub fn map_to(&mut self, page: Page, frame: Frame, writable: bool, executable: bool) {
+ map_to(self.lock, page, frame, writable, executable, self.allocator)
+ }
+
+ pub fn map(&mut self, page: Page, writable: bool, executable: bool) {
+ let frame = self.allocator.allocate_frame(&mut self.lock)
+ .expect("no more frames available");
+ self.map_to(page, frame, writable, executable)
+ }
+
+ pub fn unmap(&mut self, page: Page) {
+ unmap(self.lock, page, self.allocator)
+ }
+
+ pub unsafe fn identity_map(&mut self, page: Page, writable: bool, executable: bool) {
+ let frame = Frame {number: page.number};
+ self.map_to(page, frame, writable, executable)
+ }
+
+}
diff --git a/src/memory/paging/table.rs b/src/memory/paging/table.rs
new file mode 100644
index 00000000..4376a1d0
--- /dev/null
+++ b/src/memory/paging/table.rs
@@ -0,0 +1,181 @@
+use memory::frame_allocator::FrameAllocator;
+use memory::tlb;
+use super::{PAGE_SIZE, Lock};
+use memory::frame_allocator::Frame;
+use core::intrinsics::offset;
+use core::mem::size_of;
+
+const P4: Table = Table( Page{ number: 0o_777_777_777_777} );
+
+pub unsafe fn begin_new_on_identity_mapped_frame(_lock: &mut Lock, new_p4_frame: Frame) {
+ let new_p4 = &mut Table(Page{ number: new_p4_frame.number });
+ new_p4.zero();
+ new_p4.field(511).set(new_p4_frame, PRESENT | WRITABLE);
+
+ P4.field(511).set(new_p4_frame, PRESENT | WRITABLE);
+
+ tlb::flush();
+}
+
+pub fn activate_current() {
+ unsafe {
+ let p4_address: u64 = {
+ let field = *(0xfffffffffffffff8 as *const u64);
+ field & !0xfff
+ };
+
+ asm!("mov cr3, $0" :: "r"(p4_address) :: "intel")
+ }
+}
+
+pub fn map_to(lock: &mut Lock, page: Page, frame: Frame, writable: bool,
+ executable: bool, allocator: &mut A) where A: FrameAllocator
+{
+ let mut flags = PRESENT;
+ if writable {
+ flags = flags | WRITABLE;
+ }
+ if !executable {
+ flags = flags | NO_EXECUTE;
+ }
+
+ let p4_field = page.p4_page().field(page.p4_index());
+ if p4_field.is_unused() {
+ p4_field.set(allocator.allocate_frame(lock).expect("no more frames"), PRESENT | WRITABLE);
+ unsafe{page.p3_page().zero()};
+ }
+ let p3_field = page.p3_page().field(page.p3_index());
+ if p3_field.is_unused() {
+ p3_field.set(allocator.allocate_frame(lock).expect("no more frames"), PRESENT | WRITABLE);
+ unsafe{page.p2_page().zero()};
+ }
+ let p2_field = page.p2_page().field(page.p2_index());
+ if p2_field.is_unused() {
+ p2_field.set(allocator.allocate_frame(lock).expect("no more frames"), PRESENT | WRITABLE);
+ unsafe{page.p1_page().zero()};
+ }
+ let p1_field = page.p1_page().field(page.p1_index());
+ //TODOassert!(p1_field.is_unused());
+ p1_field.set(frame, flags);
+}
+
+pub fn unmap(lock: &mut Lock, page: Page, allocator: &mut A) where A: FrameAllocator {
+ // TODO assertions
+ let p1_field = page.p1_page().field(page.p1_index());
+ let frame = p1_field.pointed_frame();
+ p1_field.set_unused();
+ // TODO free p(1,2,3) table if empty
+ allocator.deallocate_frame(lock, frame);
+}
+
+
+/// A mapped or unmapped page
+pub struct Page {
+ pub number: usize, // TOOD make private
+}
+
+impl Page {
+ pub fn containing_address(address: usize) -> Page {
+ Page {
+ number: (address >> 12) & 0o_777_777_777_777,
+ }
+ }
+
+ pub fn pointer(&self) -> *const () {
+ if self.number >= 0o400_000_000_000 {
+ //sign extension
+ ((self.number << 12) | 0o177777_000_000_000_000_0000) as *const ()
+ } else {
+ (self.number << 12) as *const ()
+ }
+ }
+
+ // TODO fix
+ pub fn is_unused(&self) -> bool {
+ self.p4_page().field(self.p4_index()).is_unused() ||
+ self.p3_page().field(self.p3_index()).is_unused() ||
+ self.p2_page().field(self.p2_index()).is_unused() ||
+ self.p1_page().field(self.p1_index()).is_unused()
+ }
+
+ fn p4_index(&self) -> usize {(self.number >> 27) & 0o777}
+ fn p3_index(&self) -> usize {(self.number >> 18) & 0o777}
+ fn p2_index(&self) -> usize {(self.number >> 9) & 0o777}
+ fn p1_index(&self) -> usize {(self.number >> 0) & 0o777}
+
+ fn p4_page(&self) -> Table {
+ P4
+ }
+ fn p3_page(&self) -> Table {
+ Table(Page {
+ number: 0o_777_777_777_000 | self.p4_index(),
+ })
+ }
+ fn p2_page(&self) -> Table {
+ Table(Page {
+ number: 0o_777_777_000_000 | (self.p4_index() << 9) | self.p3_index(),
+ })
+ }
+ fn p1_page(&self) -> Table {
+ Table(Page {
+ number: 0o_777_000_000_000 | (self.p4_index() << 18) | (self.p3_index() << 9)
+ | self.p2_index(),
+ })
+ }
+}
+
+/// A page table on a _mapped_ page.
+struct Table(Page);
+
+impl Table {
+ unsafe fn zero(&mut self) {
+ let page = self.0.pointer() as *mut () as *mut [u64; (PAGE_SIZE/64) as usize];
+ *page = [0; (PAGE_SIZE/64) as usize];
+ }
+
+ fn field(&self, index: usize) -> &'static mut TableField {
+ assert!(index < PAGE_SIZE / size_of::());
+ unsafe {
+ let field = offset(self.0.pointer() as *const u64, index as isize);
+ &mut *(field as *const _ as *mut _)
+ }
+ }
+}
+
+struct TableField(u64);
+
+impl TableField {
+ fn is_unused(&self) -> bool {
+ self.0 == 0
+ }
+
+ fn set_unused(&mut self) {
+ self.0 = 0
+ }
+
+ fn set(&mut self, frame: Frame, flags: TableFieldFlags) {
+ self.0 = (((frame.number as u64) << 12) & 0x000fffff_fffff000) | flags.bits();
+ }
+
+ fn pointed_frame(&self) -> Frame {
+ Frame {
+ number: ((self.0 & 0x000fffff_fffff000) >> 12) as usize,
+ }
+ }
+
+}
+
+bitflags! {
+ flags TableFieldFlags: u64 {
+ const PRESENT = 1 << 0,
+ const WRITABLE = 1 << 1,
+ const USER_ACCESSIBLE = 1 << 2,
+ const WRITE_THROUGH = 1 << 3,
+ const NO_CACHE = 1 << 4,
+ const ACCESSED = 1 << 5,
+ const DIRTY = 1 << 6,
+ const OTHER1 = 1 << 9,
+ const OTHER2 = 1 << 10,
+ const NO_EXECUTE = 1 << 63,
+ }
+}
diff --git a/src/memory/paging.rs b/src/memory/paging_old.rs
similarity index 84%
rename from src/memory/paging.rs
rename to src/memory/paging_old.rs
index bd36a891..e2b44a05 100644
--- a/src/memory/paging.rs
+++ b/src/memory/paging_old.rs
@@ -19,33 +19,30 @@ bitflags! {
}
}
-pub struct Controller<'a, A> where A: 'a {
- allocator: &'a mut A,
+pub struct Controller {
+ allocator: A,
}
-impl<'a, A> Controller<'a, A> where A: FrameStack {
- pub unsafe fn new(allocator: &mut A) -> Controller {
+impl Controller where A: FrameStack {
+ pub unsafe fn new(allocator: A) -> Controller {
Controller {
allocator: allocator,
}
}
pub fn map_to(&mut self, page: Page, frame:Frame, writable: bool, executable: bool) {
- let mut flags = PRESENT;
- if writable {
- flags = flags | WRITABLE;
- }
- if !executable {
- flags = flags | NO_EXECUTE;
- }
-
- page.map_to(frame, flags, || {self.allocate_frame()})
+ Self::map_to_static(page, frame, writable, executable, &mut self.allocator)
}
pub fn identity_map(&mut self, page: Page, writable: bool, executable: bool) {
self.map_to(page, Frame{number: page.number}, writable, executable)
}
+ pub fn map(&mut self, page: Page, writable: bool, executable: bool) {
+ let frame = self.allocate_frame();
+ self.map_to(page, frame, writable, executable)
+ }
+
pub fn unmap(&mut self, page: Page) -> Frame {
page.unmap()
}
@@ -73,11 +70,31 @@ impl<'a, A> Controller<'a, A> where A: FrameStack {
}
}
+ pub unsafe fn add_free_frame(&mut self, frame: Frame) {
+ self.allocator.push(frame, Self::map_to_static)
+ }
+
fn allocate_frame(&mut self) -> Frame {
+ Self::allocate_frame_static(&mut self.allocator)
+ }
+
+ fn allocate_frame_static(allocator: &mut A) -> Frame {
let unmap_page = |page: Page| {
page.unmap()
};
- self.allocator.pop(unmap_page).expect("no more frames available")
+ allocator.pop(unmap_page).expect("no more frames available")
+ }
+
+ fn map_to_static(page: Page, frame: Frame, writable: bool, executable: bool, allocator: &mut A) {
+ let mut flags = PRESENT;
+ if writable {
+ flags = flags | WRITABLE;
+ }
+ if !executable {
+ flags = flags | NO_EXECUTE;
+ }
+
+ page.map_to(frame, flags, || {Self::allocate_frame_static(allocator)})
}
fn flush_tlb() {
@@ -99,6 +116,14 @@ struct PageIter(Page);
struct PageTableField(*const u64);
impl Page {
+ pub fn is_free(&self) -> bool {
+ let p4_field = self.p4_page().field(self.p4_index());
+ let p3_field = self.p3_page().field(self.p3_index());
+ let p2_field = self.p2_page().field(self.p2_index());
+ let p1_field = self.p1_page().field(self.p1_index());
+ p4_field.is_free() || p3_field.is_free() || p2_field.is_free() || p1_field.is_free()
+ }
+
fn from_address(address: &VirtualAddress) -> Page {
Page {
number: address.0 as usize >> 12,
diff --git a/src/memory/tlb.rs b/src/memory/tlb.rs
new file mode 100644
index 00000000..e09b498a
--- /dev/null
+++ b/src/memory/tlb.rs
@@ -0,0 +1,4 @@
+pub fn flush() {
+ unsafe{asm!("mov rax, cr3
+ mov cr3, rax" ::: "{rax}" : "intel")}
+}