diff --git a/Cargo.toml b/Cargo.toml
index 1470612e..7eb63f79 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,10 +12,3 @@ spin = "0.3.4"
[dependencies.multiboot2]
git = "https://github.com/phil-opp/multiboot2-elf64"
-
-[dependencies.x86]
-git = "https://github.com/gz/rust-x86"
-
-[dependencies.bitflags]
-git = "https://github.com/phil-opp/bitflags.git"
-branch = "no_std"
diff --git a/src/arch/x86_64/boot.asm b/src/arch/x86_64/boot.asm
index 7035fc17..2f5295d7 100644
--- a/src/arch/x86_64/boot.asm
+++ b/src/arch/x86_64/boot.asm
@@ -42,11 +42,6 @@ start:
jmp gdt64.code:long_mode_start
setup_page_tables:
- ; recursive map P4
- mov eax, p4_table
- or eax, 0b11 ; present + writable
- mov [p4_table + 511 * 8], eax
-
; map first P4 entry to P3 table
mov eax, p3_table
or eax, 0b11 ; present + writable
@@ -156,7 +151,7 @@ p3_table:
p2_table:
resb 4096
stack_bottom:
- resb 4096 * 2
+ resb 4096
stack_top:
section .rodata
diff --git a/src/lib.rs b/src/lib.rs
index 49ffa893..e0ae38fc 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -13,16 +13,12 @@
// limitations under the License.
#![feature(no_std, lang_items)]
-#![feature(const_fn, unique, core_str_ext, iter_cmp, optin_builtin_traits)]
-#![feature(core_slice_ext)]
+#![feature(const_fn, unique, core_str_ext, iter_cmp)]
#![no_std]
extern crate rlibc;
extern crate spin;
extern crate multiboot2;
-extern crate x86;
-#[macro_use]
-extern crate bitflags;
#[macro_use]
mod vga_buffer;
@@ -61,23 +57,13 @@ pub extern fn rust_main(multiboot_information_address: usize) {
let mut frame_allocator = memory::AreaFrameAllocator::new(kernel_start as usize,
kernel_end as usize, multiboot_start, multiboot_end, memory_map_tag.memory_areas());
-
- // println!("outer {}", {println!("inner"); "NO DEADLOCK"});
- /*println!("{:?}", memory::paging::translate::translate(0));*/
-
- println!("{:?}", memory::paging::translate::translate(0));
- println!("{:?}", memory::paging::translate::translate(0x40000000));
- println!("{:?}", memory::paging::translate::translate(0x40000000 - 1));
- println!("{:?}", memory::paging::translate::translate(0xdeadbeaa000));
- println!("{:?}", memory::paging::translate::translate(0xcafebeaf000));
- memory::paging::test(&mut frame_allocator);
- println!("{:x}", memory::paging::translate::translate(0xdeadbeaa000).unwrap());
- println!("{:x}", memory::paging::translate::translate(0xdeadbeab000).unwrap());
- println!("{:x}", memory::paging::translate::translate(0xdeadbeac000).unwrap());
- println!("{:x}", memory::paging::translate::translate(0xdeadbead000).unwrap());
- println!("{:x}", memory::paging::translate::translate(0xcafebeaf000).unwrap());
-
-
+ for i in 0.. {
+ use memory::FrameAllocator;
+ if let None = frame_allocator.allocate_frame() {
+ println!("allocated {} frames", i);
+ break;
+ }
+ }
loop{}
}
diff --git a/src/memory/mod.rs b/src/memory/mod.rs
index 7bf5033d..7bfc85d8 100644
--- a/src/memory/mod.rs
+++ b/src/memory/mod.rs
@@ -1,7 +1,5 @@
pub use self::area_frame_allocator::AreaFrameAllocator;
-use self::paging::PhysicalAddress;
-pub mod paging;
mod area_frame_allocator;
pub const PAGE_SIZE: usize = 4096;
@@ -13,11 +11,7 @@ pub struct Frame {
impl Frame {
fn containing_address(address: usize) -> Frame {
- Frame { number: address / PAGE_SIZE }
- }
-
- fn start_address(&self) -> PhysicalAddress {
- self.number * PAGE_SIZE
+ Frame{ number: address / PAGE_SIZE }
}
}
diff --git a/src/memory/paging/entry.rs b/src/memory/paging/entry.rs
deleted file mode 100644
index ce6f2c82..00000000
--- a/src/memory/paging/entry.rs
+++ /dev/null
@@ -1,46 +0,0 @@
-use memory::Frame;
-use memory::paging::PhysicalAddress;
-
-pub struct Entry(u64);
-
-impl Entry {
- pub fn is_unused(&self) -> bool {
- self.0 == 0
- }
-
- pub fn set_unused(&mut self) {
- self.0 = 0;
- }
-
- pub fn flags(&self) -> EntryFlags {
- EntryFlags::from_bits_truncate(self.0)
- }
-
- pub fn pointed_frame(&self) -> Option {
- if self.flags().contains(PRESENT) {
- Some(Frame::containing_address(self.0 as usize & 0x000fffff_fffff000))
- } else {
- None
- }
- }
-
- pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
- assert!(frame.start_address() & !0x000fffff_fffff000 == 0);
- self.0 = (frame.start_address() as u64) | flags.bits();
- }
-}
-
-bitflags! {
- flags EntryFlags: u64 {
- const PRESENT = 1 << 0,
- const WRITABLE = 1 << 1,
- const USER_ACCESSIBLE = 1 << 2,
- const WRITE_THROUGH = 1 << 3,
- const NO_CACHE = 1 << 4,
- const ACCESSED = 1 << 5,
- const DIRTY = 1 << 6,
- const HUGE_PAGE = 1 << 7,
- const GLOBAL = 1 << 8,
- const NO_EXECUTE = 1 << 63,
- }
-}
diff --git a/src/memory/paging/mapping.rs b/src/memory/paging/mapping.rs
deleted file mode 100644
index faa7c602..00000000
--- a/src/memory/paging/mapping.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-use memory::Frame;
-use super::Page;
-use super::entry::{EntryFlags, PRESENT};
-use memory::FrameAllocator;
-use super::table::P4;
-
-pub fn map(page: &Page, flags: EntryFlags, allocator: &mut A)
- where A: FrameAllocator
-{
- let frame = allocator.allocate_frame().expect("out of memory");
- map_to(page, frame, flags, allocator)
-}
-
-pub fn map_to(page: &Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
- where A: FrameAllocator
-{
- let p4 = unsafe { &mut *P4 };
- let mut p3 = p4.next_table_create(page.p4_index(), allocator);
- let mut p2 = p3.next_table_create(page.p3_index(), allocator);
- let mut p1 = p2.next_table_create(page.p2_index(), allocator);
-
- assert!(p1[page.p1_index()].is_unused());
- p1[page.p1_index()].set(frame, flags | PRESENT);
-}
diff --git a/src/memory/paging/mod.rs b/src/memory/paging/mod.rs
deleted file mode 100644
index 9f6c12fb..00000000
--- a/src/memory/paging/mod.rs
+++ /dev/null
@@ -1,225 +0,0 @@
-use core::ptr::Unique;
-use memory::{PAGE_SIZE, Frame, FrameAllocator};
-use self::table::{Table, Level4};
-use self::entry::*;
-
-mod entry;
-mod table;
-pub mod translate;
-pub mod mapping;
-
-pub fn test(frame_allocator: &mut A)
- where A: super::FrameAllocator
-{
- use self::entry::PRESENT;
- mapping::map(&Page::containing_address(0xdeadbeaa000),
- PRESENT,
- frame_allocator);
- mapping::map(&Page::containing_address(0xdeadbeab000),
- PRESENT,
- frame_allocator);
- mapping::map(&Page::containing_address(0xdeadbeac000),
- PRESENT,
- frame_allocator);
- mapping::map(&Page::containing_address(0xdeadbead000),
- PRESENT,
- frame_allocator);
- mapping::map(&Page::containing_address(0xcafebeaf000),
- PRESENT,
- frame_allocator);
- mapping::map(&Page::containing_address(0x0), PRESENT, frame_allocator);
-}
-
-const ENTRY_COUNT: usize = 512;
-
-pub type PhysicalAddress = usize;
-pub type VirtualAddress = usize;
-
-pub struct Page {
- number: usize,
-}
-
-impl Page {
- fn containing_address(address: VirtualAddress) -> Page {
- assert!(address < 0x0000_8000_0000_0000 || address >= 0xffff_8000_0000_0000,
- "invalid address: 0x{:x}",
- address);
- Page { number: address / PAGE_SIZE }
- }
-
- fn start_address(&self) -> VirtualAddress {
- self.number * PAGE_SIZE
- }
-
- fn p4_index(&self) -> usize {
- (self.number >> 27) & 0o777
- }
- fn p3_index(&self) -> usize {
- (self.number >> 18) & 0o777
- }
- fn p2_index(&self) -> usize {
- (self.number >> 9) & 0o777
- }
- fn p1_index(&self) -> usize {
- (self.number >> 0) & 0o777
- }
-}
-
-pub struct RecursivePageTable {
- p4: Unique
>,
-}
-
-impl RecursivePageTable {
- pub unsafe fn new() -> RecursivePageTable {
- use self::table::P4;
- RecursivePageTable {
- p4: Unique::new(P4),
- }
- }
-
- fn p4(&self) -> &Table {
- unsafe { self.p4.get() }
- }
-
- fn p4_mut(&mut self) -> &mut Table {
- unsafe { self.p4.get_mut() }
- }
-
- pub fn translate(&self, virtual_address: VirtualAddress) -> Option {
- let offset = virtual_address % PAGE_SIZE;
- self.translate_page(Page::containing_address(virtual_address))
- .map(|frame| frame.number * PAGE_SIZE + offset)
- }
-
- fn translate_page(&self, page: Page) -> Option {
- let p3 = self.p4().next_table(page.p4_index());
-
- let huge_page = || {
- p3.and_then(|p3| {
- let p3_entry = &p3[page.p3_index()];
- // 1GiB page?
- if let Some(start_frame) = p3_entry.pointed_frame() {
- if p3_entry.flags().contains(HUGE_PAGE) {
- // address must be 1GiB aligned
- assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
- return Some(Frame {
- number: start_frame.number + page.p2_index() * ENTRY_COUNT +
- page.p1_index(),
- });
- }
- }
- if let Some(p2) = p3.next_table(page.p3_index()) {
- let p2_entry = &p2[page.p2_index()];
- // 2MiB page?
- if let Some(start_frame) = p2_entry.pointed_frame() {
- if p2_entry.flags().contains(HUGE_PAGE) {
- // address must be 2MiB aligned
- assert!(start_frame.number % ENTRY_COUNT == 0);
- return Some(Frame { number: start_frame.number + page.p1_index() });
- }
- }
- }
- None
- })
- };
-
- p3.and_then(|p3| p3.next_table(page.p3_index()))
- .and_then(|p2| p2.next_table(page.p2_index()))
- .and_then(|p1| p1[page.p1_index()].pointed_frame())
- .or_else(huge_page)
- }
-
- pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
- where A: FrameAllocator
- {
- let frame = allocator.allocate_frame().expect("out of memory");
- self.map_to(page, frame, flags, allocator)
- }
-
- pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
- where A: FrameAllocator
- {
- let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator);
- let mut p2 = p3.next_table_create(page.p3_index(), allocator);
- let mut p1 = p2.next_table_create(page.p2_index(), allocator);
-
- assert!(!p1[page.p1_index()].flags().contains(PRESENT));
- p1[page.p1_index()].set(frame, flags | PRESENT);
- }
-
- pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
- where A: FrameAllocator
- {
- let page = Page::containing_address(frame.start_address());
- self.map_to(page, frame, flags, allocator)
- }
-
-
- fn unmap(&mut self, page: Page, allocator: &mut A)
- where A: FrameAllocator
- {
- use x86::tlb;
-
- assert!(self.translate(page.start_address()).is_some());
-
- let p1 = self.p4_mut()
- .next_table_mut(page.p4_index())
- .and_then(|p3| p3.next_table_mut(page.p3_index()))
- .and_then(|p2| p2.next_table_mut(page.p2_index()))
- .expect("mapping code does not support huge pages");
- let frame = p1[page.p1_index()].pointed_frame().unwrap();
- p1[page.p1_index()].set_unused();
- unsafe { tlb::flush(page.start_address()) };
- // TODO free p(1,2,3) table if empty
- allocator.deallocate_frame(frame);
- }
-}
-
-pub struct InactivePageTable {
- p4_frame: Frame, // recursive mapped
-}
-
-impl InactivePageTable {
- pub fn create_new_on_identity_mapped_frame(&self,
- identity_mapped_frame: Frame)
- -> InactivePageTable {
- let page_address = Page { number: identity_mapped_frame.number }.start_address();
- // frame must be identity mapped
- assert!(self.read(|lock| lock.translate(page_address)) == Some(page_address));
-
- let table = unsafe { &mut *(page_address as *mut Table) };
- table[511].set(Frame { number: identity_mapped_frame.number }, WRITABLE);
- InactivePageTable { p4_frame: identity_mapped_frame }
- }
-
- pub fn read(&self, f: F) -> R
- where F: FnOnce(&RecursivePageTable) -> R
- {
- self.activate_temporary(|pt| f(pt))
- }
-
- pub fn modify(&mut self, f: F)
- where F: FnOnce(&mut RecursivePageTable)
- {
- self.activate_temporary(f)
- }
-
- fn activate_temporary(&self, f: F) -> R
- where F: FnOnce(&mut RecursivePageTable) -> R
- {
- use memory::paging::table::P4;
-
- let mut page_table = RecursivePageTable { p4: unsafe { Unique::new(P4) } };
-
- let backup = page_table.p4()[511].pointed_frame().unwrap();
- if backup == self.p4_frame {
- f(&mut page_table)
- } else {
- page_table.p4_mut()[511]
- .set(Frame { number: self.p4_frame.number }, PRESENT | WRITABLE);
- let ret = f(&mut page_table);
- page_table.p4_mut()[511].set(backup, PRESENT | WRITABLE);
- ret
- }
- }
-}
diff --git a/src/memory/paging/table.rs b/src/memory/paging/table.rs
deleted file mode 100644
index e81f671d..00000000
--- a/src/memory/paging/table.rs
+++ /dev/null
@@ -1,105 +0,0 @@
-use memory::FrameAllocator;
-use memory::paging::ENTRY_COUNT;
-use memory::paging::entry::*;
-use core::ops::{Index, IndexMut};
-use core::marker::PhantomData;
-
-pub const P4: *mut Table = 0xffffffff_fffff000 as *mut _;
-
-pub struct Table {
- entries: [Entry; ENTRY_COUNT],
- level: PhantomData,
-}
-
-impl Table where L: TableLevel
-{
- pub fn zero(&mut self) {
- for entry in self.entries.iter_mut() {
- entry.set_unused();
- }
- }
-}
-
-impl Table where L: HierachicalLevel
-{
- pub fn next_table(&self, index: usize) -> Option<&Table> {
- self.next_table_address(index).map(|t| unsafe { &*(t as *const _) })
- }
-
- pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table> {
- self.next_table_address(index).map(|t| unsafe { &mut *(t as *mut _) })
- }
-
- pub fn next_table_create(&mut self,
- index: usize,
- allocator: &mut A)
- -> &mut Table
- where A: FrameAllocator
- {
- if self.next_table(index).is_none() {
- assert!(!self.entries[index].flags().contains(HUGE_PAGE),
- "mapping code does not support huge pages");
- let frame = allocator.allocate_frame().expect("no frames available");
- self.entries[index].set(frame, PRESENT | WRITABLE);
- self.next_table_mut(index).unwrap().zero();
- }
- self.next_table_mut(index).unwrap()
- }
-
- fn next_table_address(&self, index: usize) -> Option {
- let entry_flags = self[index].flags();
- if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) {
- let table_address = self as *const _ as usize;
- Some((table_address << 9) | (index << 12))
- } else {
- None
- }
- }
-}
-
-impl Index for Table where L: TableLevel
-{
- type Output = Entry;
-
- fn index(&self, index: usize) -> &Entry {
- &self.entries[index]
- }
-}
-
-impl IndexMut for Table where L: TableLevel
-{
- fn index_mut(&mut self, index: usize) -> &mut Entry {
- &mut self.entries[index]
- }
-}
-
-pub trait TableLevel {}
-
-pub enum Level4 {}
-#[allow(dead_code)]
-enum Level3 {}
-#[allow(dead_code)]
-enum Level2 {}
-#[allow(dead_code)]
-enum Level1 {}
-
-impl TableLevel for Level4 {}
-impl TableLevel for Level3 {}
-impl TableLevel for Level2 {}
-impl TableLevel for Level1 {}
-
-trait HierachicalLevel: TableLevel {
- type NextLevel: TableLevel;
-}
-
-impl HierachicalLevel for Level4 {
- type NextLevel = Level3;
-}
-
-impl HierachicalLevel for Level3 {
- type NextLevel = Level2;
-}
-
-impl HierachicalLevel for Level2 {
- type NextLevel = Level1;
-}
diff --git a/src/memory/paging/translate.rs b/src/memory/paging/translate.rs
deleted file mode 100644
index 73dda190..00000000
--- a/src/memory/paging/translate.rs
+++ /dev/null
@@ -1,50 +0,0 @@
-use super::{VirtualAddress, PhysicalAddress, Page, ENTRY_COUNT};
-use super::table::P4;
-use super::entry::HUGE_PAGE;
-use memory::{PAGE_SIZE, Frame};
-
-pub fn translate(virtual_address: VirtualAddress) -> Option {
- let offset = virtual_address % PAGE_SIZE;
- translate_page(Page::containing_address(virtual_address))
- .map(|frame| frame.number * PAGE_SIZE + offset)
-}
-
-fn translate_page(page: Page) -> Option {
- let p4 = unsafe { &*P4 };
-
- let huge_page = || {
- p4.next_table(page.p4_index())
- .and_then(|p3| {
- let p3_entry = &p3[page.p3_index()];
- // 1GiB page?
- if let Some(start_frame) = p3_entry.pointed_frame() {
- if p3_entry.flags().contains(HUGE_PAGE) {
- // address must be 1GiB aligned
- assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
- return Some(Frame {
- number: start_frame.number + page.p2_index() * ENTRY_COUNT +
- page.p1_index(),
- });
- }
- }
- if let Some(p2) = p3.next_table(page.p3_index()) {
- let p2_entry = &p2[page.p2_index()];
- // 2MiB page?
- if let Some(start_frame) = p2_entry.pointed_frame() {
- if p2_entry.flags().contains(HUGE_PAGE) {
- // address must be 2MiB aligned
- assert!(start_frame.number % ENTRY_COUNT == 0);
- return Some(Frame { number: start_frame.number + page.p1_index() });
- }
- }
- }
- None
- })
- };
-
- p4.next_table(page.p4_index())
- .and_then(|p3| p3.next_table(page.p3_index()))
- .and_then(|p2| p2.next_table(page.p2_index()))
- .and_then(|p1| p1[page.p1_index()].pointed_frame())
- .or_else(huge_page)
-}