Compare commits

...

21 Commits

Author SHA1 Message Date
Philipp Oppermann
ca42c45c5e Switch to new multiboot2 crate 2015-11-12 17:06:07 +01:00
Philipp Oppermann
a4835b6778 Init allocator to make allocations work 2015-10-10 00:51:56 +02:00
Philipp Oppermann
d790fc20e5 Add an allocator 2015-10-09 17:52:00 +02:00
Philipp Oppermann
97a0da1bb9 Add spinlock dependency 2015-10-09 17:50:43 +02:00
Philipp Oppermann
3b762cafe0 Add unmap assertion 2015-10-09 15:16:36 +02:00
Philipp Oppermann
51949e80eb Remove old paging code 2015-10-09 15:14:22 +02:00
Philipp Oppermann
430181b9d7 Fix frame stack filling 2015-10-09 15:13:51 +02:00
Philipp Oppermann
bb593c2f63 WIP 2015-10-08 19:18:23 +02:00
Philipp Oppermann
f59b6c03d6 Print panic message 2015-10-08 18:31:16 +02:00
Philipp Oppermann
abd6e48c08 Add a dynamic growing frame stack 2015-10-08 01:54:46 +02:00
Philipp Oppermann
10ddcead2d wip 2015-09-15 14:57:09 +02:00
Philipp Oppermann
d27d36fdd0 WIP: Add paging and section remapping code 2015-09-15 14:57:09 +02:00
Philipp Oppermann
8090c2a752 Page align all sections as they will be individually mapped 2015-09-15 14:57:09 +02:00
Philipp Oppermann
95d6d30c29 Merge multiboot section to .text section
All sections must be page aligned to set the right privileges, so we would many padding bytes otherwise.
2015-09-15 14:57:08 +02:00
Philipp Oppermann
23e8270a60 Enable the no-execute feature in page tables 2015-09-15 14:57:08 +02:00
Philipp Oppermann
2655ac093c wip 2015-09-15 14:57:08 +02:00
Philipp Oppermann
812396d473 Add bitflags macro 2015-09-15 14:57:08 +02:00
Philipp Oppermann
7391a7a9eb [unfinished] Add Multiboot 2 crate and load Multiboot structure
Conflicts:
	Cargo.toml
	src/lib.rs

Conflicts:
	Cargo.toml
2015-09-15 14:57:08 +02:00
Philipp Oppermann
6713e55073 Pass Multiboot structure pointer as argument 2015-09-15 14:57:08 +02:00
Philipp Oppermann
88455c3f85 use vga buffer module for test output 2015-09-15 14:56:31 +02:00
Philipp Oppermann
a06577c685 add VGA text buffer module 2015-09-15 14:56:31 +02:00
16 changed files with 805 additions and 23 deletions

View File

@@ -8,3 +8,14 @@ crate-type = ["staticlib"]
[dependencies]
rlibc = "*"
spin = "*"
[dependencies.bitflags]
git = "https://github.com/phil-opp/bitflags.git"
branch = "no_std"
[dependencies.multiboot2]
git = "https://github.com/phil-opp/multiboot2-elf64"
[dependencies.allocator]
path = "src/memory/alloc/allocator"

View File

@@ -32,7 +32,7 @@ clean:
@rm -rf build
run: $(iso)
@qemu-system-x86_64 -hda $(iso)
@qemu-system-x86_64 -s -hda $(iso)
iso: $(iso)

View File

@@ -19,6 +19,7 @@ section .text
bits 32
start:
mov esp, stack_top
mov edi, ebx
call check_multiboot
call check_cpuid
@@ -47,6 +48,11 @@ setup_page_tables:
; map first P3 entry to a huge page that starts at address 0
mov dword [p3_table], 0b10000011 ; present + writable + huge
; recursive map P4
mov eax, p4_table
or eax, 0b11 ; present + writable
mov [p4_table + 511 * 8], eax
ret
enable_paging:
@@ -62,7 +68,8 @@ enable_paging:
; set the long mode bit in the EFER MSR (model specific register)
mov ecx, 0xC0000080
rdmsr
or eax, 1 << 8
or eax, 1 << 8 ; enable long mode
or eax, 1 << 11 ; enable no-execute bit in page tables
wrmsr
; enable paging in the cr0 register
@@ -132,7 +139,9 @@ p4_table:
p3_table:
resb 4096
stack_bottom:
resb 64
; TODO a >= 80 byte stack is enough. Theoretically we could use the memory
; of the p3 table as a hack (it won't override the important first entry)
resb 4096
stack_top:
section .rodata

View File

@@ -19,14 +19,26 @@ ENTRY(start)
SECTIONS {
. = 1M;
.boot :
{
.text BLOCK(4k) : {
/* ensure that the multiboot header is at the beginning */
KEEP(*(.multiboot))
*(.text .text.*)
}
.text :
{
*(.text)
.rodata BLOCK(4k) : {
*(.rodata .rodata.*)
}
.data BLOCK(4k) : {
*(.data .data.*)
}
.data.rel.ro BLOCK(4k) : {
*(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*)
}
.gcc_except_table BLOCK(4k) : {
*(.gcc_except_table .gcc_except_table.*)
}
}

View File

@@ -20,7 +20,7 @@ bits 64
long_mode_start:
call setup_SSE
; call rust main
; call rust main (with multiboot pointer in rdi)
call rust_main
.os_returned:
; rust main returned, print `OS returned!`

View File

@@ -12,27 +12,47 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(no_std, lang_items)]
#![feature(core_slice_ext, core_str_ext, core_intrinsics)]
#![feature(no_std, lang_items, asm)]
#![feature(core_str_ext, const_fn, range_inclusive)]
#![feature(unique, core_intrinsics, alloc)]
#![feature(box_syntax)]
#![no_std]
extern crate rlibc;
extern crate alloc;
extern crate allocator;
extern crate multiboot2;
#[macro_use]
extern crate bitflags;
extern crate spin;
use core::intrinsics::offset;
use core::fmt::Write;
#[macro_use]
mod vga_buffer;
mod memory;
#[no_mangle]
pub extern fn rust_main() {
pub extern fn rust_main(multiboot_address: usize) {
// ATTENTION: we have a very small stack and no guard page
let x = ["Hello", " ", "World", "!"];
let screen_pointer = 0xb8000 as *const u16;
use vga_buffer::{Writer, Color};
use alloc::boxed::Box;
for (byte, i) in x.iter().flat_map(|s| s.bytes()).zip(0..) {
let c = 0x1f00 | (byte as u16);
unsafe {
let screen_char = offset(screen_pointer, i) as *mut u16;
*screen_char = c
}
}
vga_buffer::clear_screen();
let multiboot = unsafe{multiboot2::load(multiboot_address)};
memory::init(multiboot);
let mut writer = Writer::new(Color::Blue, Color::LightGreen);
writer.write_byte(b'H');
let _ = writer.write_str("ello! ");
let _ = write!(writer, "The numbers are {} and {}", 42, 1.0/3.0);
println!("");
println!("{} {}", "line", 1);
print!("line {}", 2);
Box::new(42);
box [42; 25000000];
loop{}
}
@@ -43,4 +63,8 @@ extern fn eh_personality() {}
#[cfg(not(test))]
#[lang = "panic_fmt"]
extern fn panic_fmt() -> ! {loop{}}
extern fn panic_fmt(fmt: core::fmt::Arguments, file: &str, line: u32) -> ! {
println!("\n\nPANIC in {} at line {}:", file, line);
println!("{}", fmt);
loop{}
}

4
src/memory/alloc/allocator/Cargo.lock generated Normal file
View File

@@ -0,0 +1,4 @@
[root]
name = "allocator"
version = "0.1.0"

View File

@@ -0,0 +1,4 @@
[package]
name = "allocator"
version = "0.1.0"
authors = ["Philipp Oppermann <dev@phil-opp.com>"]

View File

@@ -0,0 +1,14 @@
#![feature(no_std, allocator)]
#![no_std]
#![allocator]
#![allow(improper_ctypes)]
extern {
fn __rust_allocate(size: usize, align: usize) -> *mut u8;
fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize);
fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usize,
align: usize) -> *mut u8;
fn __rust_reallocate_inplace(ptr: *mut u8, old_size: usize, size: usize,
align: usize) -> usize;
fn __rust_usable_size(size: usize, align: usize) -> usize;
}

81
src/memory/alloc/mod.rs Normal file
View File

@@ -0,0 +1,81 @@
use memory::paging::{self, Page, Mapper};
use memory::frame_allocator::{FrameAllocator, DynamicFrameStack};
use core::iter::range_inclusive;
use rlibc::memcpy;
use spin::Mutex;
static ALLOCATOR: Mutex<Option<Allocator>> = Mutex::new(None);
const HEAD_BOTTOM: usize = 0o_001_000_000_000_0000;
struct Allocator {
heap_top: usize,
last_mapped_page: Page,
lock: paging::Lock,
frame_stack: DynamicFrameStack,
}
impl Allocator {
pub fn allocate(&mut self, size: usize, align: usize) -> *mut u8 {
println!("allocate {} bytes (align {})", size, align); //loop{}
let start_address = align_up(self.heap_top, align);
let end_address = start_address + size;
let end_page = Page::containing_address(end_address - 1).number;
let last_mapped_page = self.last_mapped_page.number;
if end_page > last_mapped_page {
for page in range_inclusive(last_mapped_page + 1, end_page).map(|n| Page{number: n}) {
let mut mapper = self.lock.mapper(&mut self.frame_stack);
mapper.map(page, true, false)
}
self.last_mapped_page.number = end_page;
}
self.heap_top = end_address;
start_address as *mut u8
}
pub fn reallocate(&mut self, ptr: *mut u8, old_size: usize, size: usize,
align: usize) -> *mut u8
{
let new_ptr = self.allocate(size, align);
unsafe{ memcpy(new_ptr, ptr, old_size) };
new_ptr
}
pub fn deallocate(&mut self, ptr: *mut u8, old_size: usize, align: usize) {
//TODO
}
}
fn align_up(addr: usize, align: usize) -> usize {
if addr % align == 0 {
addr
} else {
addr + align - (addr % align)
}
}
pub fn init(mut lock: paging::Lock, mut frame_stack: DynamicFrameStack) {
let last_mapped_page = Page::containing_address(HEAD_BOTTOM);
{
let mut mapper = lock.mapper(&mut frame_stack);
mapper.map(last_mapped_page, true, false);
}
*ALLOCATOR.lock() = Some(Allocator {
heap_top: HEAD_BOTTOM,
last_mapped_page: last_mapped_page,
lock: lock,
frame_stack: frame_stack,
})
}
#[no_mangle]
pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
ALLOCATOR.lock().as_mut().expect("no allocator").allocate(size, align)
}
#[no_mangle]
pub extern fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
ALLOCATOR.lock().as_mut().expect("no allocator").deallocate(ptr, old_size, align)
}

View File

@@ -0,0 +1,79 @@
use core::ptr::Unique;
use core::mem;
use memory::paging;
pub type Frame = super::Frame;
pub type Page = super::paging::Page;
pub trait FrameAllocator {
fn allocate_frame(&mut self, lock: &mut paging::Lock) -> Option<Frame>;
fn deallocate_frame(&mut self, lock: &mut paging::Lock, frame: Frame);
}
pub struct DynamicFrameStack {
head: Unique<Frame>, // TODO invariant
length: usize,
capacity: usize,
}
impl DynamicFrameStack {
pub fn new(at: Page) -> DynamicFrameStack {
DynamicFrameStack {
head: unsafe{ Unique::new(at.pointer() as *mut () as *mut _) },
length: 0,
capacity: Self::capacity_per_frame(),
}
}
fn capacity_per_frame() -> usize {
(super::PAGE_SIZE as usize) / mem::size_of::<Frame>()
}
}
impl FrameAllocator for DynamicFrameStack {
fn allocate_frame(&mut self, lock: &mut paging::Lock) -> Option<Frame> {
use core::intrinsics::offset;
if self.length == 0 {
// no frames left but maybe we can decrease the capacity and use that frame (but keep
// at least 1 frame because the paging logic might need some frames to map a page)
if self.capacity <= Self::capacity_per_frame() {
None
} else {
// decrease capacity and thus free a frame used as backing store
self.capacity -= Self::capacity_per_frame();
let page_address = unsafe{ offset(*self.head, self.capacity as isize) } as usize;
lock.mapper(self).unmap(Page::containing_address(page_address));
self.allocate_frame(lock)
}
} else {
// pop the last frame from the stack
self.length -= 1;
unsafe {
let frame = offset(*self.head, self.length as isize) as *mut _;
Some(mem::replace(&mut *frame, mem::zeroed()))
}
}
}
fn deallocate_frame(&mut self, lock: &mut paging::Lock, frame: Frame) {
use core::intrinsics::offset;
if self.length < self.capacity {
// add frame to frame stack
unsafe {
let new_frame = offset(*self.head, self.length as isize) as *mut _;
mem::forget(mem::replace(&mut *new_frame, frame));
}
self.length += 1;
} else {
// frame stack is full, use passed frame to expand it
let page_address = unsafe{ offset(*self.head, self.capacity as isize) } as usize;
unsafe {
lock.mapper(self).map_to(Page::containing_address(page_address), frame,
true, false);
}
self.capacity += Self::capacity_per_frame();
}
}
}

150
src/memory/mod.rs Normal file
View File

@@ -0,0 +1,150 @@
use multiboot2::Multiboot;
use self::paging::Page;
use self::frame_allocator::{FrameAllocator, DynamicFrameStack};
mod alloc;
mod paging;
mod frame_allocator;
mod tlb;
pub const PAGE_SIZE: u64 = 4096;
pub fn init(multiboot: &Multiboot) {
// ATTENTION: we have a very small stack and no guard page
use core::cmp::max;
use self::frame_allocator::FrameAllocator;
let kernel_end = multiboot.elf_sections_tag().unwrap().sections().map(|s| s.addr + s.size).max()
.unwrap() as usize;
let multiboot_end = multiboot as *const _ as usize + multiboot.total_size as usize;
let mut bump_pointer = BumpPointer::new(max(kernel_end, multiboot_end));
let mut lock = unsafe{ paging::Lock::new() };
let new_p4_frame = bump_pointer.allocate_frame(&mut lock).expect("failed allocating
new_p4_frame");
unsafe{lock.begin_new_table_on_identity_mapped_frame(new_p4_frame)};
identity_map_kernel_sections(multiboot, lock.mapper(&mut bump_pointer));
lock.activate_current_table();
let frame_stack = init_core_map(multiboot, &mut lock, bump_pointer);
let maximal_memory = multiboot.memory_area_tag().unwrap().memory_areas().map(
|area| area.base_addr + area.length).max().unwrap();
println!("maximal_memory: 0x{:x}", maximal_memory);
alloc::init(lock, frame_stack);
}
fn identity_map_kernel_sections<T>(multiboot: &Multiboot, mut mapper: paging::Mapper<T>)
where T: frame_allocator::FrameAllocator,
{
use core::iter::range_inclusive;
for section in multiboot.elf_sections_tag().expect("no section tag").sections() {
let in_memory = section.flags & 0x2 != 0;
let writable = section.flags & 0x1 != 0;
let executable = section.flags & 0x4 != 0;
if !in_memory {
continue;
}
println!("section at 0x{:x}, allocated: {}, writable: {}, executable: {}", section.addr,
in_memory, writable, executable);
let start_page = Page::containing_address(section.addr as usize);
let end_page = Page::containing_address((section.addr + section.size) as usize);
for page in range_inclusive(start_page.number, end_page.number)
.map(|n| Page{number: n})
{
unsafe{ mapper.identity_map(page, writable, executable) };
}
}
// identity map VGA text buffer
unsafe {
mapper.identity_map(Page::containing_address(0xb8000), true, false);
}
// identity map Multiboot structure
let multiboot_address = multiboot as *const _ as usize;
let start_page = Page::containing_address(multiboot_address);
let end_page = Page::containing_address(multiboot_address + multiboot.total_size as usize);
for page in range_inclusive(start_page.number, end_page.number).map(|n| Page{number: n}) {
unsafe{ mapper.identity_map(page, false, false) };
}
}
fn init_core_map(multiboot: &Multiboot, lock: &mut paging::Lock,
mut bump_pointer: BumpPointer) -> DynamicFrameStack
{
use core::iter::range_inclusive;
const CORE_MAP_PAGE: Page = Page{number: 0o_001_000_000};
lock.mapper(&mut bump_pointer).map(CORE_MAP_PAGE, true, false);
let mut frame_stack = DynamicFrameStack::new(CORE_MAP_PAGE);
for area in multiboot.memory_area_tag().expect("no memory tag").memory_areas() {
println!("area start {:x} length {:x}", area.base_addr, area.length);
let start_frame = Frame::containing_address(area.base_addr as usize);
let end_frame = Frame::containing_address((area.base_addr + area.length) as usize);
for frame in range_inclusive(start_frame.number, end_frame.number)
.map(|n| Frame{number:n})
{
let page = Page{number: frame.number};
if page.is_unused() && !bump_pointer.has_allocated(frame) {
frame_stack.deallocate_frame(lock, frame)
}
}
}
frame_stack
}
#[derive(Debug)]
struct BumpPointer {
first_free_frame: usize,
next_free_frame: usize,
}
impl frame_allocator::FrameAllocator for BumpPointer {
fn allocate_frame(&mut self, _: &mut paging::Lock) -> Option<Frame> {
self.allocate_frames(1)
}
fn deallocate_frame(&mut self, _: &mut paging::Lock, _: Frame) {}
}
impl BumpPointer {
fn new(kernel_end: usize) -> BumpPointer {
assert!(kernel_end > 0x100000);
let frame = ((kernel_end - 1) >> 12) + 1;
BumpPointer {
first_free_frame: frame,
next_free_frame: frame,
}
}
fn allocate_frames(&mut self, number: usize) -> Option<Frame> {
let page_number = self.next_free_frame;
self.next_free_frame += number;
Some(Frame {
number: page_number
})
}
fn has_allocated(&self, frame: Frame) -> bool {
frame.number >= self.first_free_frame && frame.number < self.next_free_frame
}
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone, Copy)]
struct Frame {
number: usize,
}
impl Frame {
fn containing_address(address: usize) -> Frame {
Frame {
number: address >> 12,
}
}
}

75
src/memory/paging/mod.rs Normal file
View File

@@ -0,0 +1,75 @@
pub use self::table::Page;
use self::table::{map_to, unmap};
use memory::frame_allocator::{Frame, FrameAllocator};
pub const PAGE_SIZE: usize = 4096;
mod table;
/// The paging lock must be unique. It is required for all page table operations and thus
/// guarantees exclusive page table access.
pub struct Lock {
_private: (),
}
impl Lock {
/// Creates a new paging lock. It's unsafe because only one lock can exist at a
/// time.
pub unsafe fn new() -> Lock {
Lock {
_private: (),
}
}
/// Uses the passed frame to create a new page table that becomes the _current table_.
/// All subsequent page table operations will modify it (the _current_ table) and leave the
/// _active_ table unchanged. To activate the current table and make it the active table, use
/// the `activate_new_table` method.
/// This method assumes that the passed frame is identity mapped and is thus unsafe.
pub unsafe fn begin_new_table_on_identity_mapped_frame(&mut self, frame: Frame)
{
table::begin_new_on_identity_mapped_frame(self, frame)
}
/// Activates the _current_ table. If the current table is equal to the active table, nothing
/// changes. However, if _current_ and _active_ table are different, a new table becomes active /// and becomes the table used by the CPU.
pub fn activate_current_table(&mut self) {
table::activate_current()
}
pub fn mapper<'a, A>(&'a mut self, allocator: &'a mut A) -> Mapper<'a, A>
where A: FrameAllocator,
{
Mapper {
lock: self,
allocator: allocator,
}
}
}
pub struct Mapper<'a, A> where A: 'a {
lock: &'a mut Lock,
allocator: &'a mut A,
}
impl<'a, A> Mapper<'a, A> where A: FrameAllocator {
pub fn map(&mut self, page: Page, writable: bool, executable: bool) {
let frame = self.allocator.allocate_frame(&mut self.lock)
.expect("no more frames available");
unsafe{ self.map_to(page, frame, writable, executable) }
}
pub fn unmap(&mut self, page: Page) {
unmap(self.lock, page, self.allocator)
}
pub unsafe fn map_to(&mut self, page: Page, frame: Frame, writable: bool, executable: bool) {
map_to(self.lock, page, frame, writable, executable, self.allocator)
}
pub unsafe fn identity_map(&mut self, page: Page, writable: bool, executable: bool) {
let frame = Frame {number: page.number};
self.map_to(page, frame, writable, executable)
}
}

182
src/memory/paging/table.rs Normal file
View File

@@ -0,0 +1,182 @@
use memory::frame_allocator::FrameAllocator;
use memory::tlb;
use super::{PAGE_SIZE, Lock};
use memory::frame_allocator::Frame;
use core::intrinsics::offset;
use core::mem::size_of;
const P4: Table = Table( Page{ number: 0o_777_777_777_777} );
pub unsafe fn begin_new_on_identity_mapped_frame(_lock: &mut Lock, new_p4_frame: Frame) {
let new_p4 = &mut Table(Page{ number: new_p4_frame.number });
new_p4.zero();
new_p4.field(511).set(new_p4_frame, PRESENT | WRITABLE);
P4.field(511).set(new_p4_frame, PRESENT | WRITABLE);
tlb::flush();
}
pub fn activate_current() {
unsafe {
let p4_address: u64 = {
let field = *(0xfffffffffffffff8 as *const u64);
field & !0xfff
};
asm!("mov cr3, $0" :: "r"(p4_address) :: "intel")
}
}
pub fn map_to<A>(lock: &mut Lock, page: Page, frame: Frame, writable: bool,
executable: bool, allocator: &mut A) where A: FrameAllocator
{
let mut flags = PRESENT;
if writable {
flags = flags | WRITABLE;
}
if !executable {
flags = flags | NO_EXECUTE;
}
let p4_field = page.p4_page().field(page.p4_index());
if p4_field.is_unused() {
p4_field.set(allocator.allocate_frame(lock).expect("no more frames"), PRESENT | WRITABLE);
unsafe{page.p3_page().zero()};
}
let p3_field = page.p3_page().field(page.p3_index());
if p3_field.is_unused() {
p3_field.set(allocator.allocate_frame(lock).expect("no more frames"), PRESENT | WRITABLE);
unsafe{page.p2_page().zero()};
}
let p2_field = page.p2_page().field(page.p2_index());
if p2_field.is_unused() {
p2_field.set(allocator.allocate_frame(lock).expect("no more frames"), PRESENT | WRITABLE);
unsafe{page.p1_page().zero()};
}
let p1_field = page.p1_page().field(page.p1_index());
assert!(p1_field.is_unused());
p1_field.set(frame, flags);
}
pub fn unmap<A>(lock: &mut Lock, page: Page, allocator: &mut A) where A: FrameAllocator {
assert!(!page.is_unused());
let p1_field = page.p1_page().field(page.p1_index());
let frame = p1_field.pointed_frame();
p1_field.set_unused();
// TODO free p(1,2,3) table if empty
allocator.deallocate_frame(lock, frame);
}
/// A mapped or unmapped page
#[derive(Clone, Copy)]
pub struct Page {
pub number: usize, // TOOD make private
}
impl Page {
pub fn containing_address(address: usize) -> Page {
Page {
number: (address >> 12) & 0o_777_777_777_777,
}
}
pub fn pointer(&self) -> *const () {
if self.number >= 0o400_000_000_000 {
//sign extension
((self.number << 12) | 0o177777_000_000_000_000_0000) as *const ()
} else {
(self.number << 12) as *const ()
}
}
pub fn is_unused(&self) -> bool {
self.p4_page().field(self.p4_index()).is_unused() ||
self.p3_page().field(self.p3_index()).is_unused() ||
self.p2_page().field(self.p2_index()).is_unused() ||
self.p1_page().field(self.p1_index()).is_unused()
}
fn p4_index(&self) -> usize {(self.number >> 27) & 0o777}
fn p3_index(&self) -> usize {(self.number >> 18) & 0o777}
fn p2_index(&self) -> usize {(self.number >> 9) & 0o777}
fn p1_index(&self) -> usize {(self.number >> 0) & 0o777}
fn p4_page(&self) -> Table {
P4
}
fn p3_page(&self) -> Table {
Table(Page {
number: 0o_777_777_777_000 | self.p4_index(),
})
}
fn p2_page(&self) -> Table {
Table(Page {
number: 0o_777_777_000_000 | (self.p4_index() << 9) | self.p3_index(),
})
}
fn p1_page(&self) -> Table {
Table(Page {
number: 0o_777_000_000_000 | (self.p4_index() << 18) | (self.p3_index() << 9)
| self.p2_index(),
})
}
}
/// A page table on a _mapped_ page.
struct Table(Page);
impl Table {
unsafe fn zero(&mut self) {
const ENTRIES: usize = PAGE_SIZE / 8;
let page = self.0.pointer() as *mut () as *mut [u64; ENTRIES];
*page = [0; ENTRIES];
}
fn field(&self, index: usize) -> &'static mut TableField {
assert!(index < PAGE_SIZE / size_of::<u64>());
unsafe {
let field = offset(self.0.pointer() as *const u64, index as isize);
&mut *(field as *const _ as *mut _)
}
}
}
struct TableField(u64);
impl TableField {
fn is_unused(&self) -> bool {
self.0 == 0
}
fn set_unused(&mut self) {
self.0 = 0
}
fn set(&mut self, frame: Frame, flags: TableFieldFlags) {
self.0 = (((frame.number as u64) << 12) & 0x000fffff_fffff000) | flags.bits();
}
fn pointed_frame(&self) -> Frame {
Frame {
number: ((self.0 & 0x000fffff_fffff000) >> 12) as usize,
}
}
}
bitflags! {
flags TableFieldFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const OTHER1 = 1 << 9,
const OTHER2 = 1 << 10,
const NO_EXECUTE = 1 << 63,
}
}

4
src/memory/tlb.rs Normal file
View File

@@ -0,0 +1,4 @@
pub fn flush() {
unsafe{asm!("mov rax, cr3
mov cr3, rax" ::: "{rax}" : "intel")}
}

133
src/vga_buffer.rs Normal file
View File

@@ -0,0 +1,133 @@
const BUFFER_HEIGHT: usize = 25;
const BUFFER_WIDTH: usize = 80;
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::vga_buffer::_print(format_args!($($arg)*)));
}
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
pub fn clear_screen() {
for _ in 0..BUFFER_HEIGHT {
println!("");
}
}
pub fn _print(fmt: ::core::fmt::Arguments) {
use core::fmt::Write;
static mut WRITER: Writer = Writer::new(Color::LightGreen, Color::Black);
let _ = unsafe{WRITER.write_fmt(fmt)};
}
pub struct Writer {
column_position: usize,
color_code: ColorCode,
}
impl Writer {
pub const fn new(foreground: Color, background: Color) -> Writer {
Writer {
column_position: 0,
color_code: ColorCode::new(foreground, background),
}
}
pub fn write_byte(&mut self, byte: u8) {
const NEWLINE: u8 = b'\n';
match byte {
NEWLINE => self.new_line(),
byte => {
if self.column_position >= BUFFER_WIDTH {
self.new_line()
}
let row = BUFFER_HEIGHT - 1;
let col = self.column_position;
Self::buffer().chars[row][col] = ScreenChar {
ascii_character: byte,
color_code: self.color_code,
};
self.column_position += 1;
}
}
}
fn buffer() -> &'static mut Buffer {
const BUFFER: *mut Buffer = 0xb8000 as *mut _;
unsafe{&mut *BUFFER}
}
fn new_line(&mut self) {
let buffer = Self::buffer();
for row in 0..(BUFFER_HEIGHT-1) {
buffer.chars[row] = buffer.chars[row + 1]
}
self.clear_row(BUFFER_HEIGHT-1);
self.column_position = 0;
}
fn clear_row(&mut self, row: usize) {
let blank = ScreenChar {
ascii_character: ' ' as u8,
color_code: self.color_code,
};
Self::buffer().chars[row] = [blank; BUFFER_WIDTH];
}
}
impl ::core::fmt::Write for Writer {
fn write_str(&mut self, s: &str) -> ::core::fmt::Result {
for byte in s.bytes() {
self.write_byte(byte)
}
Ok(())
}
}
#[derive(Clone, Copy)]
#[allow(dead_code)]
#[repr(u8)]
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
#[derive(Clone, Copy)]
struct ColorCode(u8);
impl ColorCode {
const fn new(foreground: Color, background: Color) -> ColorCode {
ColorCode((background as u8) << 4 | (foreground as u8))
}
}
#[derive(Clone, Copy)]
#[repr(packed)]
struct ScreenChar {
ascii_character: u8,
color_code: ColorCode,
}
struct Buffer {
chars: [[ScreenChar; BUFFER_WIDTH]; BUFFER_HEIGHT],
}