Compare commits

..

21 Commits

Author SHA1 Message Date
Philipp Oppermann
ca42c45c5e Switch to new multiboot2 crate 2015-11-12 17:06:07 +01:00
Philipp Oppermann
a4835b6778 Init allocator to make allocations work 2015-10-10 00:51:56 +02:00
Philipp Oppermann
d790fc20e5 Add an allocator 2015-10-09 17:52:00 +02:00
Philipp Oppermann
97a0da1bb9 Add spinlock dependency 2015-10-09 17:50:43 +02:00
Philipp Oppermann
3b762cafe0 Add unmap assertion 2015-10-09 15:16:36 +02:00
Philipp Oppermann
51949e80eb Remove old paging code 2015-10-09 15:14:22 +02:00
Philipp Oppermann
430181b9d7 Fix frame stack filling 2015-10-09 15:13:51 +02:00
Philipp Oppermann
bb593c2f63 WIP 2015-10-08 19:18:23 +02:00
Philipp Oppermann
f59b6c03d6 Print panic message 2015-10-08 18:31:16 +02:00
Philipp Oppermann
abd6e48c08 Add a dynamic growing frame stack 2015-10-08 01:54:46 +02:00
Philipp Oppermann
10ddcead2d wip 2015-09-15 14:57:09 +02:00
Philipp Oppermann
d27d36fdd0 WIP: Add paging and section remapping code 2015-09-15 14:57:09 +02:00
Philipp Oppermann
8090c2a752 Page align all sections as they will be individually mapped 2015-09-15 14:57:09 +02:00
Philipp Oppermann
95d6d30c29 Merge multiboot section to .text section
All sections must be page aligned to set the right privileges, so we would many padding bytes otherwise.
2015-09-15 14:57:08 +02:00
Philipp Oppermann
23e8270a60 Enable the no-execute feature in page tables 2015-09-15 14:57:08 +02:00
Philipp Oppermann
2655ac093c wip 2015-09-15 14:57:08 +02:00
Philipp Oppermann
812396d473 Add bitflags macro 2015-09-15 14:57:08 +02:00
Philipp Oppermann
7391a7a9eb [unfinished] Add Multiboot 2 crate and load Multiboot structure
Conflicts:
	Cargo.toml
	src/lib.rs

Conflicts:
	Cargo.toml
2015-09-15 14:57:08 +02:00
Philipp Oppermann
6713e55073 Pass Multiboot structure pointer as argument 2015-09-15 14:57:08 +02:00
Philipp Oppermann
88455c3f85 use vga buffer module for test output 2015-09-15 14:56:31 +02:00
Philipp Oppermann
a06577c685 add VGA text buffer module 2015-09-15 14:56:31 +02:00
33 changed files with 808 additions and 1579 deletions

View File

@@ -15,4 +15,5 @@ addons:
packages:
- nasm
script: make
script:
- make

View File

@@ -1,29 +1,21 @@
[package]
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
name = "blog_os"
version = "0.1.0"
[dependencies]
bit_field = "0.7.0"
bitflags = "0.7.0"
multiboot2 = "0.1.0"
once = "0.2.1"
rlibc = "0.1.4"
spin = "0.3.4"
volatile = "0.1.0"
[dependencies.hole_list_allocator]
path = "libs/hole_list_allocator"
[dependencies.x86]
default-features = false
version = "0.8.0"
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
[lib]
crate-type = ["staticlib"]
[profile.dev]
panic = "abort"
[dependencies]
rlibc = "*"
spin = "*"
[profile.release]
panic = "abort"
[dependencies.bitflags]
git = "https://github.com/phil-opp/bitflags.git"
branch = "no_std"
[dependencies.multiboot2]
git = "https://github.com/phil-opp/multiboot2-elf64"
[dependencies.allocator]
path = "src/memory/alloc/allocator"

View File

@@ -1,9 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Philipp Oppermann
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,25 +1,29 @@
# Copyright 2016 Philipp Oppermann. See the README.md
# file at the top-level directory of this distribution.
# Copyright 2015 Philipp Oppermann
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
arch ?= x86_64
target ?= $(arch)-unknown-linux-gnu
kernel := build/kernel-$(arch).bin
iso := build/os-$(arch).iso
rust_os := target/$(target)/debug/libblog_os.a
rust_os := target/debug/libblog_os.a
linker_script := src/arch/$(arch)/linker.ld
grub_cfg := src/arch/$(arch)/grub.cfg
assembly_source_files := $(wildcard src/arch/$(arch)/*.asm)
assembly_object_files := $(patsubst src/arch/$(arch)/%.asm, \
build/arch/$(arch)/%.o, $(assembly_source_files))
.PHONY: all clean run debug iso cargo gdb
.PHONY: all clean run iso cargo
all: $(kernel)
@@ -28,19 +32,13 @@ clean:
@rm -rf build
run: $(iso)
@qemu-system-x86_64 -cdrom $(iso) -s
debug: $(iso)
@qemu-system-x86_64 -cdrom $(iso) -s -S
gdb:
@rust-os-gdb/bin/rust-gdb "build/kernel-x86_64.bin" -ex "target remote :1234"
@qemu-system-x86_64 -s -hda $(iso)
iso: $(iso)
$(iso): $(kernel) $(grub_cfg)
$(iso): $(kernel)
@mkdir -p build/isofiles/boot/grub
@cp $(kernel) build/isofiles/boot/kernel.bin
@cp $(kernel) build/isofiles/boot/
@cp $(grub_cfg) build/isofiles/boot/grub
@grub-mkrescue -o $(iso) build/isofiles 2> /dev/null
@rm -r build/isofiles
@@ -49,7 +47,7 @@ $(kernel): cargo $(rust_os) $(assembly_object_files) $(linker_script)
@ld -n --gc-sections -T $(linker_script) -o $(kernel) $(assembly_object_files) $(rust_os)
cargo:
@cargo build --target $(target)
@cargo rustc -- -Z no-landing-pads
# compile assembly files
build/arch/$(arch)/%.o: src/arch/$(arch)/%.asm

View File

@@ -1,14 +1 @@
# Blog OS (Better Exception Messages)
[![Build Status](https://travis-ci.org/phil-opp/blog_os.svg?branch=better_exception_messages)](https://travis-ci.org/phil-opp/blog_os/branches)
This repository contains the source code for the [Better Exception Messages](http://os.phil-opp.com/better-exception-messages.html) post of the [Writing an OS in Rust](http://os.phil-opp.com) series.
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**
## Building
You need to have `nasm`, `grub-mkrescue`, `xorriso`, `qemu`, and a nightly Rust compiler installed. Then you can run it using `make run`.
Please file an issue if you have any problems.
## License
The source code is dual-licensed under MIT or the Apache License (Version 2.0).
# blogOS

View File

@@ -1,2 +0,0 @@
# Generated by Cargo
/target/

View File

@@ -1,114 +0,0 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(const_fn)]
#![feature(allocator)]
#![allocator]
#![no_std]
use spin::Mutex;
extern crate spin;
pub const HEAP_START: usize = 0o_000_001_000_000_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
static BUMP_ALLOCATOR: Mutex<BumpAllocator> = Mutex::new(
BumpAllocator::new(HEAP_START, HEAP_SIZE));
#[derive(Debug)]
struct BumpAllocator {
heap_start: usize,
heap_size: usize,
next: usize,
}
impl BumpAllocator {
/// Create a new allocator, which uses the memory in the
/// range [heap_start, heap_start + heap_size).
const fn new(heap_start: usize, heap_size: usize) -> BumpAllocator {
BumpAllocator {
heap_start: heap_start,
heap_size: heap_size,
next: heap_start,
}
}
/// Allocates a block of memory with the given size and alignment.
fn allocate(&mut self, size: usize, align: usize) -> Option<*mut u8> {
let alloc_start = align_up(self.next, align);
let alloc_end = alloc_start.saturating_add(size);
if alloc_end <= self.heap_start + self.heap_size {
self.next = alloc_end;
Some(alloc_start as *mut u8)
} else {
None
}
}
}
/// Align downwards. Returns the greatest x with alignment `align`
/// so that x <= addr. The alignment must be a power of 2.
pub fn align_down(addr: usize, align: usize) -> usize {
if align.is_power_of_two() {
addr & !(align - 1)
} else if align == 0 {
addr
} else {
panic!("`align` must be a power of 2");
}
}
/// Align upwards. Returns the smallest x with alignment `align`
/// so that x >= addr. The alignment must be a power of 2.
pub fn align_up(addr: usize, align: usize) -> usize {
align_down(addr + align - 1, align)
}
#[no_mangle]
pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
BUMP_ALLOCATOR.lock().allocate(size, align).expect("out of memory")
}
#[no_mangle]
pub extern fn __rust_deallocate(_ptr: *mut u8, _size: usize,
_align: usize)
{
// just leak it
}
#[no_mangle]
pub extern fn __rust_usable_size(size: usize, _align: usize) -> usize {
size
}
#[no_mangle]
pub extern fn __rust_reallocate_inplace(_ptr: *mut u8, size: usize,
_new_size: usize, _align: usize) -> usize
{
size
}
#[no_mangle]
pub extern fn __rust_reallocate(ptr: *mut u8, size: usize, new_size: usize,
align: usize) -> *mut u8 {
use core::{ptr, cmp};
// from: https://github.com/rust-lang/rust/blob/
// c66d2380a810c9a2b3dbb4f93a830b101ee49cc2/
// src/liballoc_system/lib.rs#L98-L101
let new_ptr = __rust_allocate(new_size, align);
unsafe { ptr::copy(ptr, new_ptr, cmp::min(size, new_size)) };
__rust_deallocate(ptr, size, align);
new_ptr
}

View File

@@ -1,2 +0,0 @@
# Generated by Cargo
/target/

View File

@@ -1,12 +0,0 @@
[package]
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
name = "hole_list_allocator"
version = "0.1.0"
[dependencies]
linked_list_allocator = "0.2.0"
spin = "0.3.5"
[dependencies.lazy_static]
version = "0.2.1"
features = ["spin_no_std"]

View File

@@ -1,68 +0,0 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(allocator)]
#![feature(const_fn)]
#![allocator]
#![no_std]
use spin::Mutex;
use linked_list_allocator::Heap;
extern crate spin;
extern crate linked_list_allocator;
#[macro_use]
extern crate lazy_static;
pub const HEAP_START: usize = 0o_000_001_000_000_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
lazy_static! {
static ref HEAP: Mutex<Heap> = Mutex::new(unsafe {
Heap::new(HEAP_START, HEAP_SIZE)
});
}
#[no_mangle]
pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
HEAP.lock().allocate_first_fit(size, align).expect("out of memory")
}
#[no_mangle]
pub extern fn __rust_deallocate(ptr: *mut u8, size: usize, align: usize) {
unsafe { HEAP.lock().deallocate(ptr, size, align) };
}
#[no_mangle]
pub extern fn __rust_usable_size(size: usize, _align: usize) -> usize {
size
}
#[no_mangle]
pub extern fn __rust_reallocate_inplace(_ptr: *mut u8, size: usize,
_new_size: usize, _align: usize) -> usize
{
size
}
#[no_mangle]
pub extern fn __rust_reallocate(ptr: *mut u8, size: usize, new_size: usize,
align: usize) -> *mut u8 {
use core::{ptr, cmp};
// from: https://github.com/rust-lang/rust/blob/
// c66d2380a810c9a2b3dbb4f93a830b101ee49cc2/
// src/liballoc_system/lib.rs#L98-L101
let new_ptr = __rust_allocate(new_size, align);
unsafe { ptr::copy(ptr, new_ptr, cmp::min(size, new_size)) };
__rust_deallocate(ptr, size, align);
new_ptr
}

View File

@@ -1,11 +1,16 @@
; Copyright 2016 Philipp Oppermann. See the README.md
; file at the top-level directory of this distribution.
; Copyright 2015 Philipp Oppermann
;
; Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
; http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
; <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
; option. This file may not be copied, modified, or distributed
; except according to those terms.
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
global start
extern long_mode_start
@@ -14,51 +19,39 @@ section .text
bits 32
start:
mov esp, stack_top
; Move Multiboot info pointer to edi to pass it to the kernel. We must not
; modify the `edi` register until the kernel it called.
mov edi, ebx
call check_multiboot
call check_cpuid
call check_long_mode
call set_up_page_tables
call setup_page_tables
call enable_paging
call set_up_SSE
; load the 64-bit GDT
lgdt [gdt64.pointer]
; update selectors
mov ax, gdt64.data
mov ss, ax
mov ds, ax
mov es, ax
jmp gdt64.code:long_mode_start
set_up_page_tables:
; recursive map P4
mov eax, p4_table
or eax, 0b11 ; present + writable
mov [p4_table + 511 * 8], eax
setup_page_tables:
; map first P4 entry to P3 table
mov eax, p3_table
or eax, 0b11 ; present + writable
mov [p4_table], eax
; map first P3 entry to P2 table
mov eax, p2_table
; map first P3 entry to a huge page that starts at address 0
mov dword [p3_table], 0b10000011 ; present + writable + huge
; recursive map P4
mov eax, p4_table
or eax, 0b11 ; present + writable
mov [p3_table], eax
; map each P2 entry to a huge 2MiB page
mov ecx, 0 ; counter variable
.map_p2_table:
; map ecx-th P2 entry to a huge page that starts at address (2MiB * ecx)
mov eax, 0x200000 ; 2MiB
mul ecx ; start address of ecx-th page
or eax, 0b10000011 ; present + writable + huge
mov [p2_table + ecx * 8], eax ; map ecx-th entry
inc ecx ; increase counter
cmp ecx, 512 ; if counter == 512, the whole P2 table is mapped
jne .map_p2_table ; else map the next entry
mov [p4_table + 511 * 8], eax
ret
@@ -75,12 +68,14 @@ enable_paging:
; set the long mode bit in the EFER MSR (model specific register)
mov ecx, 0xC0000080
rdmsr
or eax, 1 << 8
or eax, 1 << 8 ; enable long mode
or eax, 1 << 11 ; enable no-execute bit in page tables
wrmsr
; enable paging in the cr0 register
mov eax, cr0
or eax, 1 << 31
or eax, 1 << 16
mov cr0, eax
ret
@@ -105,98 +100,57 @@ check_multiboot:
; Throw error 1 if the CPU doesn't support the CPUID command.
check_cpuid:
; Check if CPUID is supported by attempting to flip the ID bit (bit 21) in
; the FLAGS register. If we can flip it, CPUID is available.
; Copy FLAGS in to EAX via stack
pushfd
pop eax
; Copy to ECX as well for comparing later on
mov ecx, eax
; Flip the ID bit
xor eax, 1 << 21
; Copy EAX to FLAGS via the stack
push eax
popfd
; Copy FLAGS back to EAX (with the flipped bit if CPUID is supported)
pushfd
pop eax
; Restore FLAGS from the old version stored in ECX (i.e. flipping the ID bit
; back if it was ever flipped).
push ecx
popfd
; Compare EAX and ECX. If they are equal then that means the bit wasn't
; flipped, and CPUID isn't supported.
cmp eax, ecx
je .no_cpuid
ret
pushfd ; Store the FLAGS-register.
pop eax ; Restore the A-register.
mov ecx, eax ; Set the C-register to the A-register.
xor eax, 1 << 21 ; Flip the ID-bit, which is bit 21.
push eax ; Store the A-register.
popfd ; Restore the FLAGS-register.
pushfd ; Store the FLAGS-register.
pop eax ; Restore the A-register.
push ecx ; Store the C-register.
popfd ; Restore the FLAGS-register.
xor eax, ecx ; Do a XOR-operation on the A-register and the C-register.
jz .no_cpuid ; The zero flag is set, no CPUID.
ret ; CPUID is available for use.
.no_cpuid:
mov al, "1"
jmp error
; Throw error 2 if the CPU doesn't support Long Mode.
check_long_mode:
; test if extended processor info in available
mov eax, 0x80000000 ; implicit argument for cpuid
cpuid ; get highest supported argument
cmp eax, 0x80000001 ; it needs to be at least 0x80000001
jb .no_long_mode ; if it's less, the CPU is too old for long mode
; use extended info to test if long mode is available
mov eax, 0x80000001 ; argument for extended processor info
cpuid ; returns various feature bits in ecx and edx
test edx, 1 << 29 ; test if the LM-bit is set in the D-register
jz .no_long_mode ; If it's not set, there is no long mode
mov eax, 0x80000000 ; Set the A-register to 0x80000000.
cpuid ; CPU identification.
cmp eax, 0x80000001 ; Compare the A-register with 0x80000001.
jb .no_long_mode ; It is less, there is no long mode.
mov eax, 0x80000000 ; Set the A-register to 0x80000000.
cpuid ; CPU identification.
cmp eax, 0x80000001 ; Compare the A-register with 0x80000001.
jb .no_long_mode ; It is less, there is no long mode.
ret
.no_long_mode:
mov al, "2"
jmp error
; Check for SSE and enable it. If it's not supported throw error "a".
set_up_SSE:
; check for SSE
mov eax, 0x1
cpuid
test edx, 1<<25
jz .no_SSE
; enable SSE
mov eax, cr0
and ax, 0xFFFB ; clear coprocessor emulation CR0.EM
or ax, 0x2 ; set coprocessor monitoring CR0.MP
mov cr0, eax
mov eax, cr4
or ax, 3 << 9 ; set CR4.OSFXSR and CR4.OSXMMEXCPT at the same time
mov cr4, eax
ret
.no_SSE:
mov al, "a"
jmp error
section .bss
align 4096
p4_table:
resb 4096
p3_table:
resb 4096
p2_table:
resb 4096
stack_bottom:
resb 4096 * 2
; TODO a >= 80 byte stack is enough. Theoretically we could use the memory
; of the p3 table as a hack (it won't override the important first entry)
resb 4096
stack_top:
section .rodata
gdt64:
dq 0 ; zero entry
.code: equ $ - gdt64 ; new
dq (1<<44) | (1<<47) | (1<<43) | (1<<53) ; code segment
dq (1<<44) | (1<<47) | (1<<41) | (1<<43) | (1<<53) ; code segment
.data: equ $ - gdt64 ; new
dq (1<<44) | (1<<47) | (1<<41) ; data segment
.pointer:
dw $ - gdt64 - 1
dq gdt64

View File

@@ -1,16 +1,21 @@
# Copyright 2016 Philipp Oppermann. See the README.md
# file at the top-level directory of this distribution.
# Copyright 2015 Philipp Oppermann
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set timeout=0
set default=0
menuentry "my os" {
multiboot2 /boot/kernel.bin
multiboot2 /boot/kernel-x86_64.bin
boot
}

View File

@@ -1,12 +1,17 @@
/*
Copyright 2016 Philipp Oppermann. See the README.md
file at the top-level directory of this distribution.
Copyright 2015 Philipp Oppermann
Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
<LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
option. This file may not be copied, modified, or distributed
except according to those terms.
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
ENTRY(start)
@@ -14,51 +19,26 @@ ENTRY(start)
SECTIONS {
. = 1M;
.rodata :
{
.text BLOCK(4k) : {
/* ensure that the multiboot header is at the beginning */
KEEP(*(.multiboot_header))
*(.rodata .rodata.*)
. = ALIGN(4K);
}
.text :
{
KEEP(*(.multiboot))
*(.text .text.*)
. = ALIGN(4K);
}
.data :
{
.rodata BLOCK(4k) : {
*(.rodata .rodata.*)
}
.data BLOCK(4k) : {
*(.data .data.*)
. = ALIGN(4K);
}
.bss :
{
*(.bss .bss.*)
. = ALIGN(4K);
}
.got :
{
*(.got)
. = ALIGN(4K);
}
.got.plt :
{
*(.got.plt)
. = ALIGN(4K);
}
.data.rel.ro : ALIGN(4K) {
.data.rel.ro BLOCK(4k) : {
*(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*)
. = ALIGN(4K);
}
.gcc_except_table : ALIGN(4K) {
*(.gcc_except_table)
. = ALIGN(4K);
.gcc_except_table BLOCK(4k) : {
*(.gcc_except_table .gcc_except_table.*)
}
}

View File

@@ -1,11 +1,16 @@
; Copyright 2016 Philipp Oppermann. See the README.md
; file at the top-level directory of this distribution.
; Copyright 2015 Philipp Oppermann
;
; Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
; http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
; <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
; option. This file may not be copied, modified, or distributed
; except according to those terms.
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
global long_mode_start
extern rust_main
@@ -13,13 +18,7 @@ extern rust_main
section .text
bits 64
long_mode_start:
; load 0 into all data segment registers
mov ax, 0
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
call setup_SSE
; call rust main (with multiboot pointer in rdi)
call rust_main
@@ -32,3 +31,36 @@ long_mode_start:
mov rax, 0x4f214f644f654f6e
mov [0xb8010], rax
hlt
; Check for SSE and enable it. If it's not supported throw error "a".
setup_SSE:
; check for SSE
mov rax, 0x1
cpuid
test edx, 1<<25
jz .no_SSE
; enable SSE
mov rax, cr0
and ax, 0xFFFB ; clear coprocessor emulation CR0.EM
or ax, 0x2 ; set coprocessor monitoring CR0.MP
mov cr0, rax
mov rax, cr4
or ax, 3 << 9 ; set CR4.OSFXSR and CR4.OSXMMEXCPT at the same time
mov cr4, rax
ret
.no_SSE:
mov al, "a"
jmp error
; Prints `ERROR: ` and the given error code to screen and hangs.
; parameter: error code (in ascii) in al
error:
mov rbx, 0x4f4f4f524f524f45
mov [0xb8000], rbx
mov rbx, 0x4f204f204f3a4f52
mov [0xb8008], rbx
mov byte [0xb800e], al
hlt
jmp error

View File

@@ -1,13 +1,18 @@
; Copyright 2016 Philipp Oppermann. See the README.md
; file at the top-level directory of this distribution.
; Copyright 2015 Philipp Oppermann
;
; Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
; http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
; <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
; option. This file may not be copied, modified, or distributed
; except according to those terms.
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
section .multiboot_header
section .multiboot
header_start:
dd 0xe85250d6 ; magic number (multiboot 2)
dd 0 ; architecture 0 (protected mode i386)

View File

@@ -1,115 +0,0 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use x86::shared::segmentation::{self, SegmentSelector};
use x86::shared::PrivilegeLevel;
pub struct Idt([Entry; 16]);
impl Idt {
pub fn new() -> Idt {
Idt([Entry::missing(); 16])
}
pub fn set_handler(&mut self, entry: u8, handler: HandlerFunc) -> &mut EntryOptions {
self.0[entry as usize] = Entry::new(segmentation::cs(), handler);
&mut self.0[entry as usize].options
}
pub fn load(&'static self) {
use x86::shared::dtables::{DescriptorTablePointer, lidt};
use core::mem::size_of;
let ptr = DescriptorTablePointer {
base: self as *const _ as *const ::x86::bits64::irq::IdtEntry,
limit: (size_of::<Self>() - 1) as u16,
};
unsafe { lidt(&ptr) };
}
}
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
pub struct Entry {
pointer_low: u16,
gdt_selector: SegmentSelector,
options: EntryOptions,
pointer_middle: u16,
pointer_high: u32,
reserved: u32,
}
pub type HandlerFunc = extern "C" fn() -> !;
impl Entry {
fn new(gdt_selector: SegmentSelector, handler: HandlerFunc) -> Self {
let pointer = handler as u64;
Entry {
gdt_selector: gdt_selector,
pointer_low: pointer as u16,
pointer_middle: (pointer >> 16) as u16,
pointer_high: (pointer >> 32) as u32,
options: EntryOptions::new(),
reserved: 0,
}
}
fn missing() -> Self {
Entry {
gdt_selector: SegmentSelector::new(0, PrivilegeLevel::Ring0),
pointer_low: 0,
pointer_middle: 0,
pointer_high: 0,
options: EntryOptions::minimal(),
reserved: 0,
}
}
}
use bit_field::BitField;
#[derive(Debug, Clone, Copy)]
pub struct EntryOptions(u16);
impl EntryOptions {
fn minimal() -> Self {
let mut options = 0;
options.set_bits(9..12, 0b111); // 'must-be-one' bits
EntryOptions(options)
}
fn new() -> Self {
let mut options = Self::minimal();
options.set_present(true).disable_interrupts(true);
options
}
pub fn set_present(&mut self, present: bool) -> &mut Self {
self.0.set_bit(15, present);
self
}
pub fn disable_interrupts(&mut self, disable: bool) -> &mut Self {
self.0.set_bit(8, !disable);
self
}
#[allow(dead_code)]
pub fn set_privilege_level(&mut self, dpl: u16) -> &mut Self {
self.0.set_bits(13..15, dpl);
self
}
#[allow(dead_code)]
pub fn set_stack_index(&mut self, index: u16) -> &mut Self {
self.0.set_bits(0..3, index);
self
}
}

View File

@@ -1,105 +0,0 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod idt;
macro_rules! handler {
($name: ident) => {{
#[naked]
extern "C" fn wrapper() -> ! {
unsafe {
asm!("mov rdi, rsp
sub rsp, 8 // align the stack pointer
call $0"
:: "i"($name as extern "C" fn(
&ExceptionStackFrame) -> !)
: "rdi" : "intel");
::core::intrinsics::unreachable();
}
}
wrapper
}}
}
macro_rules! handler_with_error_code {
($name: ident) => {{
#[naked]
extern "C" fn wrapper() -> ! {
unsafe {
asm!("pop rsi // pop error code into rsi
mov rdi, rsp
sub rsp, 8 // align the stack pointer
call $0"
:: "i"($name as extern "C" fn(
&ExceptionStackFrame, u64) -> !)
: "rdi","rsi" : "intel");
::core::intrinsics::unreachable();
}
}
wrapper
}}
}
lazy_static! {
static ref IDT: idt::Idt = {
let mut idt = idt::Idt::new();
idt.set_handler(0, handler!(divide_by_zero_handler));
idt.set_handler(6, handler!(invalid_opcode_handler));
idt.set_handler(14, handler_with_error_code!(page_fault_handler));
idt
};
}
pub fn init() {
IDT.load();
}
#[derive(Debug)]
#[repr(C)]
struct ExceptionStackFrame {
instruction_pointer: u64,
code_segment: u64,
cpu_flags: u64,
stack_pointer: u64,
stack_segment: u64,
}
extern "C" fn divide_by_zero_handler(stack_frame: &ExceptionStackFrame) -> ! {
println!("\nEXCEPTION: DIVIDE BY ZERO\n{:#?}", stack_frame);
loop {}
}
extern "C" fn invalid_opcode_handler(stack_frame: &ExceptionStackFrame) -> ! {
println!("\nEXCEPTION: INVALID OPCODE at {:#x}\n{:#?}",
stack_frame.instruction_pointer,
stack_frame);
loop {}
}
bitflags! {
flags PageFaultErrorCode: u64 {
const PROTECTION_VIOLATION = 1 << 0,
const CAUSED_BY_WRITE = 1 << 1,
const USER_MODE = 1 << 2,
const MALFORMED_TABLE = 1 << 3,
const INSTRUCTION_FETCH = 1 << 4,
}
}
extern "C" fn page_fault_handler(stack_frame: &ExceptionStackFrame, error_code: u64) -> ! {
use x86::shared::control_regs;
println!("\nEXCEPTION: PAGE FAULT while accessing {:#x}\nerror code: \
{:?}\n{:#?}",
unsafe { control_regs::cr2() },
PageFaultErrorCode::from_bits(error_code).unwrap(),
stack_frame);
loop {}
}

View File

@@ -1,97 +1,70 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
// Copyright 2015 Philipp Oppermann
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(lang_items)]
#![feature(const_fn, unique)]
#![feature(alloc, collections)]
#![feature(asm)]
#![feature(naked_functions)]
#![feature(core_intrinsics)]
#![feature(no_std, lang_items, asm)]
#![feature(core_str_ext, const_fn, range_inclusive)]
#![feature(unique, core_intrinsics, alloc)]
#![feature(box_syntax)]
#![no_std]
extern crate rlibc;
extern crate volatile;
extern crate spin;
extern crate alloc;
extern crate allocator;
extern crate multiboot2;
#[macro_use]
extern crate bitflags;
extern crate x86;
#[macro_use]
extern crate once;
extern crate bit_field;
extern crate spin;
extern crate hole_list_allocator;
extern crate alloc;
#[macro_use]
extern crate collections;
use core::fmt::Write;
#[macro_use]
mod vga_buffer;
mod memory;
mod interrupts;
#[no_mangle]
pub extern "C" fn rust_main(multiboot_information_address: usize) {
pub extern fn rust_main(multiboot_address: usize) {
// ATTENTION: we have a very small stack and no guard page
use vga_buffer::{Writer, Color};
use alloc::boxed::Box;
vga_buffer::clear_screen();
println!("Hello World{}", "!");
let multiboot = unsafe{multiboot2::load(multiboot_address)};
memory::init(multiboot);
let boot_info = unsafe { multiboot2::load(multiboot_information_address) };
enable_nxe_bit();
enable_write_protect_bit();
let mut writer = Writer::new(Color::Blue, Color::LightGreen);
writer.write_byte(b'H');
let _ = writer.write_str("ello! ");
let _ = write!(writer, "The numbers are {} and {}", 42, 1.0/3.0);
println!("");
println!("{} {}", "line", 1);
print!("line {}", 2);
// set up guard page and map the heap pages
memory::init(boot_info);
Box::new(42);
box [42; 25000000];
// initialize our IDT
interrupts::init();
// provoke a page fault
unsafe { *(0xdeadbeaf as *mut u64) = 42 };
println!("It did not crash!");
loop {}
}
fn enable_nxe_bit() {
use x86::shared::msr::{IA32_EFER, rdmsr, wrmsr};
let nxe_bit = 1 << 11;
unsafe {
let efer = rdmsr(IA32_EFER);
wrmsr(IA32_EFER, efer | nxe_bit);
}
}
fn enable_write_protect_bit() {
use x86::shared::control_regs::{cr0, cr0_write, CR0_WRITE_PROTECT};
unsafe { cr0_write(cr0() | CR0_WRITE_PROTECT) };
loop{}
}
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
extern fn eh_personality() {}
#[cfg(not(test))]
#[lang = "panic_fmt"]
#[no_mangle]
pub extern "C" fn panic_fmt(fmt: core::fmt::Arguments, file: &'static str, line: u32) -> ! {
extern fn panic_fmt(fmt: core::fmt::Arguments, file: &str, line: u32) -> ! {
println!("\n\nPANIC in {} at line {}:", file, line);
println!(" {}", fmt);
loop {}
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn _Unwind_Resume() -> ! {
loop {}
println!("{}", fmt);
loop{}
}

4
src/memory/alloc/allocator/Cargo.lock generated Normal file
View File

@@ -0,0 +1,4 @@
[root]
name = "allocator"
version = "0.1.0"

View File

@@ -1,7 +1,4 @@
[package]
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
name = "bump_allocator"
name = "allocator"
version = "0.1.0"
[dependencies]
spin = "0.3.5"
authors = ["Philipp Oppermann <dev@phil-opp.com>"]

View File

@@ -0,0 +1,14 @@
#![feature(no_std, allocator)]
#![no_std]
#![allocator]
#![allow(improper_ctypes)]
extern {
fn __rust_allocate(size: usize, align: usize) -> *mut u8;
fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize);
fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usize,
align: usize) -> *mut u8;
fn __rust_reallocate_inplace(ptr: *mut u8, old_size: usize, size: usize,
align: usize) -> usize;
fn __rust_usable_size(size: usize, align: usize) -> usize;
}

81
src/memory/alloc/mod.rs Normal file
View File

@@ -0,0 +1,81 @@
use memory::paging::{self, Page, Mapper};
use memory::frame_allocator::{FrameAllocator, DynamicFrameStack};
use core::iter::range_inclusive;
use rlibc::memcpy;
use spin::Mutex;
static ALLOCATOR: Mutex<Option<Allocator>> = Mutex::new(None);
const HEAD_BOTTOM: usize = 0o_001_000_000_000_0000;
struct Allocator {
heap_top: usize,
last_mapped_page: Page,
lock: paging::Lock,
frame_stack: DynamicFrameStack,
}
impl Allocator {
pub fn allocate(&mut self, size: usize, align: usize) -> *mut u8 {
println!("allocate {} bytes (align {})", size, align); //loop{}
let start_address = align_up(self.heap_top, align);
let end_address = start_address + size;
let end_page = Page::containing_address(end_address - 1).number;
let last_mapped_page = self.last_mapped_page.number;
if end_page > last_mapped_page {
for page in range_inclusive(last_mapped_page + 1, end_page).map(|n| Page{number: n}) {
let mut mapper = self.lock.mapper(&mut self.frame_stack);
mapper.map(page, true, false)
}
self.last_mapped_page.number = end_page;
}
self.heap_top = end_address;
start_address as *mut u8
}
pub fn reallocate(&mut self, ptr: *mut u8, old_size: usize, size: usize,
align: usize) -> *mut u8
{
let new_ptr = self.allocate(size, align);
unsafe{ memcpy(new_ptr, ptr, old_size) };
new_ptr
}
pub fn deallocate(&mut self, ptr: *mut u8, old_size: usize, align: usize) {
//TODO
}
}
fn align_up(addr: usize, align: usize) -> usize {
if addr % align == 0 {
addr
} else {
addr + align - (addr % align)
}
}
pub fn init(mut lock: paging::Lock, mut frame_stack: DynamicFrameStack) {
let last_mapped_page = Page::containing_address(HEAD_BOTTOM);
{
let mut mapper = lock.mapper(&mut frame_stack);
mapper.map(last_mapped_page, true, false);
}
*ALLOCATOR.lock() = Some(Allocator {
heap_top: HEAD_BOTTOM,
last_mapped_page: last_mapped_page,
lock: lock,
frame_stack: frame_stack,
})
}
#[no_mangle]
pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
ALLOCATOR.lock().as_mut().expect("no allocator").allocate(size, align)
}
#[no_mangle]
pub extern fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
ALLOCATOR.lock().as_mut().expect("no allocator").deallocate(ptr, old_size, align)
}

View File

@@ -1,103 +0,0 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use memory::{Frame, FrameAllocator};
use multiboot2::{MemoryAreaIter, MemoryArea};
/// A frame allocator that uses the memory areas from the multiboot information structure as
/// source. The {kernel, multiboot}_{start, end} fields are used to avoid returning memory that is
/// already in use.
///
/// `kernel_end` and `multiboot_end` are _inclusive_ bounds.
pub struct AreaFrameAllocator {
next_free_frame: Frame,
current_area: Option<&'static MemoryArea>,
areas: MemoryAreaIter,
kernel_start: Frame,
kernel_end: Frame,
multiboot_start: Frame,
multiboot_end: Frame,
}
impl AreaFrameAllocator {
pub fn new(kernel_start: usize,
kernel_end: usize,
multiboot_start: usize,
multiboot_end: usize,
memory_areas: MemoryAreaIter)
-> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(0),
current_area: None,
areas: memory_areas,
kernel_start: Frame::containing_address(kernel_start),
kernel_end: Frame::containing_address(kernel_end),
multiboot_start: Frame::containing_address(multiboot_start),
multiboot_end: Frame::containing_address(multiboot_end),
};
allocator.choose_next_area();
allocator
}
fn choose_next_area(&mut self) {
self.current_area = self.areas
.clone()
.filter(|area| {
let address = area.base_addr + area.length - 1;
Frame::containing_address(address as usize) >= self.next_free_frame
})
.min_by_key(|area| area.base_addr);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(area.base_addr as usize);
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
}
}
}
impl FrameAllocator for AreaFrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
if let Some(area) = self.current_area {
// "clone" the frame to return it if it's free. Frame doesn't
// implement Clone, but we can construct an identical frame.
let frame = Frame { number: self.next_free_frame.number };
// the last frame of the current area
let current_area_last_frame = {
let address = area.base_addr + area.length - 1;
Frame::containing_address(address as usize)
};
if frame > current_area_last_frame {
// all frames of current area are used, switch to next area
self.choose_next_area();
} else if frame >= self.kernel_start && frame <= self.kernel_end {
// `frame` is used by the kernel
self.next_free_frame = Frame { number: self.kernel_end.number + 1 };
} else if frame >= self.multiboot_start && frame <= self.multiboot_end {
// `frame` is used by the multiboot information structure
self.next_free_frame = Frame { number: self.multiboot_end.number + 1 };
} else {
// frame is unused, increment `next_free_frame` and return it
self.next_free_frame.number += 1;
return Some(frame);
}
// `frame` was not valid, try it again with the updated `next_free_frame`
self.allocate_frame()
} else {
None // no free frames left
}
}
fn deallocate_frame(&mut self, _frame: Frame) {
unimplemented!()
}
}

View File

@@ -0,0 +1,79 @@
use core::ptr::Unique;
use core::mem;
use memory::paging;
pub type Frame = super::Frame;
pub type Page = super::paging::Page;
pub trait FrameAllocator {
fn allocate_frame(&mut self, lock: &mut paging::Lock) -> Option<Frame>;
fn deallocate_frame(&mut self, lock: &mut paging::Lock, frame: Frame);
}
pub struct DynamicFrameStack {
head: Unique<Frame>, // TODO invariant
length: usize,
capacity: usize,
}
impl DynamicFrameStack {
pub fn new(at: Page) -> DynamicFrameStack {
DynamicFrameStack {
head: unsafe{ Unique::new(at.pointer() as *mut () as *mut _) },
length: 0,
capacity: Self::capacity_per_frame(),
}
}
fn capacity_per_frame() -> usize {
(super::PAGE_SIZE as usize) / mem::size_of::<Frame>()
}
}
impl FrameAllocator for DynamicFrameStack {
fn allocate_frame(&mut self, lock: &mut paging::Lock) -> Option<Frame> {
use core::intrinsics::offset;
if self.length == 0 {
// no frames left but maybe we can decrease the capacity and use that frame (but keep
// at least 1 frame because the paging logic might need some frames to map a page)
if self.capacity <= Self::capacity_per_frame() {
None
} else {
// decrease capacity and thus free a frame used as backing store
self.capacity -= Self::capacity_per_frame();
let page_address = unsafe{ offset(*self.head, self.capacity as isize) } as usize;
lock.mapper(self).unmap(Page::containing_address(page_address));
self.allocate_frame(lock)
}
} else {
// pop the last frame from the stack
self.length -= 1;
unsafe {
let frame = offset(*self.head, self.length as isize) as *mut _;
Some(mem::replace(&mut *frame, mem::zeroed()))
}
}
}
fn deallocate_frame(&mut self, lock: &mut paging::Lock, frame: Frame) {
use core::intrinsics::offset;
if self.length < self.capacity {
// add frame to frame stack
unsafe {
let new_frame = offset(*self.head, self.length as isize) as *mut _;
mem::forget(mem::replace(&mut *new_frame, frame));
}
self.length += 1;
} else {
// frame stack is full, use passed frame to expand it
let page_address = unsafe{ offset(*self.head, self.capacity as isize) } as usize;
unsafe {
lock.mapper(self).map_to(Page::containing_address(page_address), frame,
true, false);
}
self.capacity += Self::capacity_per_frame();
}
}
}

View File

@@ -1,111 +1,150 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use multiboot2::Multiboot;
use self::paging::Page;
use self::frame_allocator::{FrameAllocator, DynamicFrameStack};
pub use self::area_frame_allocator::AreaFrameAllocator;
pub use self::paging::remap_the_kernel;
use self::paging::PhysicalAddress;
use multiboot2::BootInformation;
mod area_frame_allocator;
mod alloc;
mod paging;
mod frame_allocator;
mod tlb;
pub const PAGE_SIZE: usize = 4096;
pub const PAGE_SIZE: u64 = 4096;
pub fn init(boot_info: &BootInformation) {
assert_has_not_been_called!("memory::init must be called only once");
pub fn init(multiboot: &Multiboot) {
// ATTENTION: we have a very small stack and no guard page
use core::cmp::max;
use self::frame_allocator::FrameAllocator;
let memory_map_tag = boot_info.memory_map_tag().expect("Memory map tag required");
let elf_sections_tag = boot_info.elf_sections_tag().expect("Elf sections tag required");
let kernel_end = multiboot.elf_sections_tag().unwrap().sections().map(|s| s.addr + s.size).max()
.unwrap() as usize;
let multiboot_end = multiboot as *const _ as usize + multiboot.total_size as usize;
let mut bump_pointer = BumpPointer::new(max(kernel_end, multiboot_end));
let kernel_start = elf_sections_tag.sections()
.filter(|s| s.is_allocated())
.map(|s| s.addr)
.min()
.unwrap();
let kernel_end = elf_sections_tag.sections()
.filter(|s| s.is_allocated())
.map(|s| s.addr + s.size)
.max()
.unwrap();
let mut lock = unsafe{ paging::Lock::new() };
let new_p4_frame = bump_pointer.allocate_frame(&mut lock).expect("failed allocating
new_p4_frame");
println!("kernel start: {:#x}, kernel end: {:#x}",
kernel_start,
kernel_end);
println!("multiboot start: {:#x}, multiboot end: {:#x}",
boot_info.start_address(),
boot_info.end_address());
unsafe{lock.begin_new_table_on_identity_mapped_frame(new_p4_frame)};
identity_map_kernel_sections(multiboot, lock.mapper(&mut bump_pointer));
lock.activate_current_table();
let mut frame_allocator = AreaFrameAllocator::new(kernel_start as usize,
kernel_end as usize,
boot_info.start_address(),
boot_info.end_address(),
memory_map_tag.memory_areas());
let frame_stack = init_core_map(multiboot, &mut lock, bump_pointer);
let mut active_table = paging::remap_the_kernel(&mut frame_allocator, boot_info);
let maximal_memory = multiboot.memory_area_tag().unwrap().memory_areas().map(
|area| area.base_addr + area.length).max().unwrap();
println!("maximal_memory: 0x{:x}", maximal_memory);
alloc::init(lock, frame_stack);
}
use self::paging::Page;
use hole_list_allocator::{HEAP_START, HEAP_SIZE};
let heap_start_page = Page::containing_address(HEAP_START);
let heap_end_page = Page::containing_address(HEAP_START + HEAP_SIZE - 1);
fn identity_map_kernel_sections<T>(multiboot: &Multiboot, mut mapper: paging::Mapper<T>)
where T: frame_allocator::FrameAllocator,
{
use core::iter::range_inclusive;
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, paging::WRITABLE, &mut frame_allocator);
for section in multiboot.elf_sections_tag().expect("no section tag").sections() {
let in_memory = section.flags & 0x2 != 0;
let writable = section.flags & 0x1 != 0;
let executable = section.flags & 0x4 != 0;
if !in_memory {
continue;
}
println!("section at 0x{:x}, allocated: {}, writable: {}, executable: {}", section.addr,
in_memory, writable, executable);
let start_page = Page::containing_address(section.addr as usize);
let end_page = Page::containing_address((section.addr + section.size) as usize);
for page in range_inclusive(start_page.number, end_page.number)
.map(|n| Page{number: n})
{
unsafe{ mapper.identity_map(page, writable, executable) };
}
}
// identity map VGA text buffer
unsafe {
mapper.identity_map(Page::containing_address(0xb8000), true, false);
}
// identity map Multiboot structure
let multiboot_address = multiboot as *const _ as usize;
let start_page = Page::containing_address(multiboot_address);
let end_page = Page::containing_address(multiboot_address + multiboot.total_size as usize);
for page in range_inclusive(start_page.number, end_page.number).map(|n| Page{number: n}) {
unsafe{ mapper.identity_map(page, false, false) };
}
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Frame {
fn init_core_map(multiboot: &Multiboot, lock: &mut paging::Lock,
mut bump_pointer: BumpPointer) -> DynamicFrameStack
{
use core::iter::range_inclusive;
const CORE_MAP_PAGE: Page = Page{number: 0o_001_000_000};
lock.mapper(&mut bump_pointer).map(CORE_MAP_PAGE, true, false);
let mut frame_stack = DynamicFrameStack::new(CORE_MAP_PAGE);
for area in multiboot.memory_area_tag().expect("no memory tag").memory_areas() {
println!("area start {:x} length {:x}", area.base_addr, area.length);
let start_frame = Frame::containing_address(area.base_addr as usize);
let end_frame = Frame::containing_address((area.base_addr + area.length) as usize);
for frame in range_inclusive(start_frame.number, end_frame.number)
.map(|n| Frame{number:n})
{
let page = Page{number: frame.number};
if page.is_unused() && !bump_pointer.has_allocated(frame) {
frame_stack.deallocate_frame(lock, frame)
}
}
}
frame_stack
}
#[derive(Debug)]
struct BumpPointer {
first_free_frame: usize,
next_free_frame: usize,
}
impl frame_allocator::FrameAllocator for BumpPointer {
fn allocate_frame(&mut self, _: &mut paging::Lock) -> Option<Frame> {
self.allocate_frames(1)
}
fn deallocate_frame(&mut self, _: &mut paging::Lock, _: Frame) {}
}
impl BumpPointer {
fn new(kernel_end: usize) -> BumpPointer {
assert!(kernel_end > 0x100000);
let frame = ((kernel_end - 1) >> 12) + 1;
BumpPointer {
first_free_frame: frame,
next_free_frame: frame,
}
}
fn allocate_frames(&mut self, number: usize) -> Option<Frame> {
let page_number = self.next_free_frame;
self.next_free_frame += number;
Some(Frame {
number: page_number
})
}
fn has_allocated(&self, frame: Frame) -> bool {
frame.number >= self.first_free_frame && frame.number < self.next_free_frame
}
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone, Copy)]
struct Frame {
number: usize,
}
impl Frame {
fn containing_address(address: usize) -> Frame {
Frame { number: address / PAGE_SIZE }
}
fn start_address(&self) -> PhysicalAddress {
self.number * PAGE_SIZE
}
fn clone(&self) -> Frame {
Frame { number: self.number }
}
fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
FrameIter {
start: start,
end: end,
Frame {
number: address >> 12,
}
}
}
struct FrameIter {
start: Frame,
end: Frame,
}
impl Iterator for FrameIter {
type Item = Frame;
fn next(&mut self) -> Option<Frame> {
if self.start <= self.end {
let frame = self.start.clone();
self.start.number += 1;
Some(frame)
} else {
None
}
}
}
pub trait FrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame>;
fn deallocate_frame(&mut self, frame: Frame);
}

View File

@@ -1,76 +0,0 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use memory::Frame;
use multiboot2::ElfSection;
pub struct Entry(u64);
impl Entry {
pub fn is_unused(&self) -> bool {
self.0 == 0
}
pub fn set_unused(&mut self) {
self.0 = 0;
}
pub fn flags(&self) -> EntryFlags {
EntryFlags::from_bits_truncate(self.0)
}
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT) {
Some(Frame::containing_address(self.0 as usize & 0x000fffff_fffff000))
} else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
assert!(frame.start_address() & !0x000fffff_fffff000 == 0);
self.0 = (frame.start_address() as u64) | flags.bits();
}
}
bitflags! {
pub flags EntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
impl EntryFlags {
pub fn from_elf_section_flags(section: &ElfSection) -> EntryFlags {
use multiboot2::{ELF_SECTION_ALLOCATED, ELF_SECTION_WRITABLE, ELF_SECTION_EXECUTABLE};
let mut flags = EntryFlags::empty();
if section.flags().contains(ELF_SECTION_ALLOCATED) {
// section is loaded to memory
flags = flags | PRESENT;
}
if section.flags().contains(ELF_SECTION_WRITABLE) {
flags = flags | WRITABLE;
}
if !section.flags().contains(ELF_SECTION_EXECUTABLE) {
flags = flags | NO_EXECUTE;
}
flags
}
}

View File

@@ -1,118 +0,0 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::{VirtualAddress, PhysicalAddress, Page, ENTRY_COUNT};
use super::entry::*;
use super::table::{self, Table, Level4};
use memory::{PAGE_SIZE, Frame, FrameAllocator};
use core::ptr::Unique;
pub struct Mapper {
p4: Unique<Table<Level4>>,
}
impl Mapper {
pub unsafe fn new() -> Mapper {
Mapper { p4: Unique::new(table::P4) }
}
pub fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.get() }
}
pub fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.get_mut() }
}
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| frame.number * PAGE_SIZE + offset)
}
pub fn translate_page(&self, page: Page) -> Option<Frame> {
let p3 = self.p4().next_table(page.p4_index());
let huge_page = || {
p3.and_then(|p3| {
let p3_entry = &p3[page.p3_index()];
// 1GiB page?
if let Some(start_frame) = p3_entry.pointed_frame() {
if p3_entry.flags().contains(HUGE_PAGE) {
// address must be 1GiB aligned
assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
return Some(Frame {
number: start_frame.number + page.p2_index() * ENTRY_COUNT +
page.p1_index(),
});
}
}
if let Some(p2) = p3.next_table(page.p3_index()) {
let p2_entry = &p2[page.p2_index()];
// 2MiB page?
if let Some(start_frame) = p2_entry.pointed_frame() {
if p2_entry.flags().contains(HUGE_PAGE) {
// address must be 2MiB aligned
assert!(start_frame.number % ENTRY_COUNT == 0);
return Some(Frame { number: start_frame.number + page.p1_index() });
}
}
}
None
})
};
p3.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
.or_else(huge_page)
}
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | PRESENT);
}
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
}
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let page = Page::containing_address(frame.start_address());
self.map_to(page, frame, flags, allocator)
}
pub fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
{
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
unsafe { ::x86::shared::tlb::flush(page.start_address()) };
// TODO free p(1,2,3) table if empty
// allocator.deallocate_frame(frame);
}
}

View File

@@ -1,227 +1,75 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::table::Page;
pub use self::entry::*;
use memory::{PAGE_SIZE, Frame, FrameAllocator};
use self::temporary_page::TemporaryPage;
pub use self::mapper::Mapper;
use core::ops::{Deref, DerefMut};
use multiboot2::BootInformation;
use self::table::{map_to, unmap};
use memory::frame_allocator::{Frame, FrameAllocator};
pub const PAGE_SIZE: usize = 4096;
mod entry;
mod table;
mod temporary_page;
mod mapper;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
number: usize,
/// The paging lock must be unique. It is required for all page table operations and thus
/// guarantees exclusive page table access.
pub struct Lock {
_private: (),
}
impl Page {
pub fn containing_address(address: VirtualAddress) -> Page {
assert!(address < 0x0000_8000_0000_0000 || address >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}",
address);
Page { number: address / PAGE_SIZE }
}
fn start_address(&self) -> usize {
self.number * PAGE_SIZE
}
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
PageIter {
start: start,
end: end,
impl Lock {
/// Creates a new paging lock. It's unsafe because only one lock can exist at a
/// time.
pub unsafe fn new() -> Lock {
Lock {
_private: (),
}
}
}
pub struct PageIter {
start: Page,
end: Page,
}
impl Iterator for PageIter {
type Item = Page;
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
Some(page)
} else {
None
}
}
}
pub struct ActivePageTable {
mapper: Mapper,
}
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
}
}
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
}
impl ActivePageTable {
unsafe fn new() -> ActivePageTable {
ActivePageTable { mapper: Mapper::new() }
}
pub fn with<F>(&mut self,
table: &mut InactivePageTable,
temporary_page: &mut temporary_page::TemporaryPage, // new
f: F)
where F: FnOnce(&mut Mapper)
/// Uses the passed frame to create a new page table that becomes the _current table_.
/// All subsequent page table operations will modify it (the _current_ table) and leave the
/// _active_ table unchanged. To activate the current table and make it the active table, use
/// the `activate_new_table` method.
/// This method assumes that the passed frame is identity mapped and is thus unsafe.
pub unsafe fn begin_new_table_on_identity_mapped_frame(&mut self, frame: Frame)
{
use x86::shared::{control_regs, tlb};
let flush_tlb = || unsafe { tlb::flush_all() };
{
let backup = Frame::containing_address(unsafe { control_regs::cr3() } as usize);
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
flush_tlb();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE);
flush_tlb();
}
temporary_page.unmap(self);
table::begin_new_on_identity_mapped_frame(self, frame)
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::shared::control_regs;
/// Activates the _current_ table. If the current table is equal to the active table, nothing
/// changes. However, if _current_ and _active_ table are different, a new table becomes active /// and becomes the table used by the CPU.
pub fn activate_current_table(&mut self) {
table::activate_current()
}
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(unsafe { control_regs::cr3() } as usize),
};
unsafe {
control_regs::cr3_write(new_table.p4_frame.start_address());
pub fn mapper<'a, A>(&'a mut self, allocator: &'a mut A) -> Mapper<'a, A>
where A: FrameAllocator,
{
Mapper {
lock: self,
allocator: allocator,
}
old_table
}
}
pub struct InactivePageTable {
p4_frame: Frame,
pub struct Mapper<'a, A> where A: 'a {
lock: &'a mut Lock,
allocator: &'a mut A,
}
impl InactivePageTable {
pub fn new(frame: Frame,
active_table: &mut ActivePageTable,
temporary_page: &mut TemporaryPage)
-> InactivePageTable {
{
let table = temporary_page.map_table_frame(frame.clone(), active_table);
table.zero();
table[511].set(frame.clone(), PRESENT | WRITABLE);
}
temporary_page.unmap(active_table);
impl<'a, A> Mapper<'a, A> where A: FrameAllocator {
pub fn map(&mut self, page: Page, writable: bool, executable: bool) {
let frame = self.allocator.allocate_frame(&mut self.lock)
.expect("no more frames available");
unsafe{ self.map_to(page, frame, writable, executable) }
}
InactivePageTable { p4_frame: frame }
pub fn unmap(&mut self, page: Page) {
unmap(self.lock, page, self.allocator)
}
pub unsafe fn map_to(&mut self, page: Page, frame: Frame, writable: bool, executable: bool) {
map_to(self.lock, page, frame, writable, executable, self.allocator)
}
pub unsafe fn identity_map(&mut self, page: Page, writable: bool, executable: bool) {
let frame = Frame {number: page.number};
self.map_to(page, frame, writable, executable)
}
}
pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation) -> ActivePageTable
where A: FrameAllocator
{
let mut temporary_page = TemporaryPage::new(Page { number: 0xcafebabe }, allocator);
let mut active_table = unsafe { ActivePageTable::new() };
let mut new_table = {
let frame = allocator.allocate_frame().expect("no more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let elf_sections_tag = boot_info.elf_sections_tag()
.expect("Memory map tag required");
// identity map the allocated kernel sections
for section in elf_sections_tag.sections() {
if !section.is_allocated() {
// section is not loaded to memory
continue;
}
assert!(section.addr as usize % PAGE_SIZE == 0,
"sections need to be page aligned");
println!("mapping section at addr: {:#x}, size: {:#x}",
section.addr,
section.size);
let flags = EntryFlags::from_elf_section_flags(section);
let start_frame = Frame::containing_address(section.start_address());
let end_frame = Frame::containing_address(section.end_address() - 1);
for frame in Frame::range_inclusive(start_frame, end_frame) {
mapper.identity_map(frame, flags, allocator);
}
}
// identity map the VGA text buffer
let vga_buffer_frame = Frame::containing_address(0xb8000);
mapper.identity_map(vga_buffer_frame, WRITABLE, allocator);
// identity map the multiboot info structure
let multiboot_start = Frame::containing_address(boot_info.start_address());
let multiboot_end = Frame::containing_address(boot_info.end_address() - 1);
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
mapper.identity_map(frame, PRESENT, allocator);
}
});
let old_table = active_table.switch(new_table);
println!("NEW TABLE!!!");
let old_p4_page = Page::containing_address(old_table.p4_frame.start_address());
active_table.unmap(old_p4_page, allocator);
println!("guard page at {:#x}", old_p4_page.start_address());
active_table
}

View File

@@ -1,119 +1,182 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use memory::frame_allocator::FrameAllocator;
use memory::tlb;
use super::{PAGE_SIZE, Lock};
use memory::frame_allocator::Frame;
use core::intrinsics::offset;
use core::mem::size_of;
use memory::paging::entry::*;
use memory::paging::ENTRY_COUNT;
use memory::FrameAllocator;
use core::ops::{Index, IndexMut};
use core::marker::PhantomData;
const P4: Table = Table( Page{ number: 0o_777_777_777_777} );
pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
pub unsafe fn begin_new_on_identity_mapped_frame(_lock: &mut Lock, new_p4_frame: Frame) {
let new_p4 = &mut Table(Page{ number: new_p4_frame.number });
new_p4.zero();
new_p4.field(511).set(new_p4_frame, PRESENT | WRITABLE);
pub struct Table<L: TableLevel> {
entries: [Entry; ENTRY_COUNT],
level: PhantomData<L>,
P4.field(511).set(new_p4_frame, PRESENT | WRITABLE);
tlb::flush();
}
impl<L> Table<L>
where L: TableLevel
{
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_unused();
}
pub fn activate_current() {
unsafe {
let p4_address: u64 = {
let field = *(0xfffffffffffffff8 as *const u64);
field & !0xfff
};
asm!("mov cr3, $0" :: "r"(p4_address) :: "intel")
}
}
impl<L> Table<L>
where L: HierarchicalLevel
pub fn map_to<A>(lock: &mut Lock, page: Page, frame: Frame, writable: bool,
executable: bool, allocator: &mut A) where A: FrameAllocator
{
fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = self[index].flags();
if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) {
let table_address = self as *const _ as usize;
Some((table_address << 9) | (index << 12))
let mut flags = PRESENT;
if writable {
flags = flags | WRITABLE;
}
if !executable {
flags = flags | NO_EXECUTE;
}
let p4_field = page.p4_page().field(page.p4_index());
if p4_field.is_unused() {
p4_field.set(allocator.allocate_frame(lock).expect("no more frames"), PRESENT | WRITABLE);
unsafe{page.p3_page().zero()};
}
let p3_field = page.p3_page().field(page.p3_index());
if p3_field.is_unused() {
p3_field.set(allocator.allocate_frame(lock).expect("no more frames"), PRESENT | WRITABLE);
unsafe{page.p2_page().zero()};
}
let p2_field = page.p2_page().field(page.p2_index());
if p2_field.is_unused() {
p2_field.set(allocator.allocate_frame(lock).expect("no more frames"), PRESENT | WRITABLE);
unsafe{page.p1_page().zero()};
}
let p1_field = page.p1_page().field(page.p1_index());
assert!(p1_field.is_unused());
p1_field.set(frame, flags);
}
pub fn unmap<A>(lock: &mut Lock, page: Page, allocator: &mut A) where A: FrameAllocator {
assert!(!page.is_unused());
let p1_field = page.p1_page().field(page.p1_index());
let frame = p1_field.pointed_frame();
p1_field.set_unused();
// TODO free p(1,2,3) table if empty
allocator.deallocate_frame(lock, frame);
}
/// A mapped or unmapped page
#[derive(Clone, Copy)]
pub struct Page {
pub number: usize, // TOOD make private
}
impl Page {
pub fn containing_address(address: usize) -> Page {
Page {
number: (address >> 12) & 0o_777_777_777_777,
}
}
pub fn pointer(&self) -> *const () {
if self.number >= 0o400_000_000_000 {
//sign extension
((self.number << 12) | 0o177777_000_000_000_000_0000) as *const ()
} else {
None
(self.number << 12) as *const ()
}
}
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
self.next_table_address(index)
.map(|address| unsafe { &*(address as *const _) })
pub fn is_unused(&self) -> bool {
self.p4_page().field(self.p4_index()).is_unused() ||
self.p3_page().field(self.p3_index()).is_unused() ||
self.p2_page().field(self.p2_index()).is_unused() ||
self.p1_page().field(self.p1_index()).is_unused()
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
self.next_table_address(index)
.map(|address| unsafe { &mut *(address as *mut _) })
fn p4_index(&self) -> usize {(self.number >> 27) & 0o777}
fn p3_index(&self) -> usize {(self.number >> 18) & 0o777}
fn p2_index(&self) -> usize {(self.number >> 9) & 0o777}
fn p1_index(&self) -> usize {(self.number >> 0) & 0o777}
fn p4_page(&self) -> Table {
P4
}
fn p3_page(&self) -> Table {
Table(Page {
number: 0o_777_777_777_000 | self.p4_index(),
})
}
fn p2_page(&self) -> Table {
Table(Page {
number: 0o_777_777_000_000 | (self.p4_index() << 9) | self.p3_index(),
})
}
fn p1_page(&self) -> Table {
Table(Page {
number: 0o_777_000_000_000 | (self.p4_index() << 18) | (self.p3_index() << 9)
| self.p2_index(),
})
}
}
/// A page table on a _mapped_ page.
struct Table(Page);
impl Table {
unsafe fn zero(&mut self) {
const ENTRIES: usize = PAGE_SIZE / 8;
let page = self.0.pointer() as *mut () as *mut [u64; ENTRIES];
*page = [0; ENTRIES];
}
pub fn next_table_create<A>(&mut self,
index: usize,
allocator: &mut A)
-> &mut Table<L::NextLevel>
where A: FrameAllocator
{
if self.next_table(index).is_none() {
assert!(!self.entries[index].flags().contains(HUGE_PAGE),
"mapping code does not support huge pages");
let frame = allocator.allocate_frame().expect("no frames available");
self.entries[index].set(frame, PRESENT | WRITABLE);
self.next_table_mut(index).unwrap().zero();
fn field(&self, index: usize) -> &'static mut TableField {
assert!(index < PAGE_SIZE / size_of::<u64>());
unsafe {
let field = offset(self.0.pointer() as *const u64, index as isize);
&mut *(field as *const _ as *mut _)
}
self.next_table_mut(index).unwrap()
}
}
impl<L> Index<usize> for Table<L>
where L: TableLevel
{
type Output = Entry;
struct TableField(u64);
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
impl TableField {
fn is_unused(&self) -> bool {
self.0 == 0
}
fn set_unused(&mut self) {
self.0 = 0
}
fn set(&mut self, frame: Frame, flags: TableFieldFlags) {
self.0 = (((frame.number as u64) << 12) & 0x000fffff_fffff000) | flags.bits();
}
fn pointed_frame(&self) -> Frame {
Frame {
number: ((self.0 & 0x000fffff_fffff000) >> 12) as usize,
}
}
}
bitflags! {
flags TableFieldFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const OTHER1 = 1 << 9,
const OTHER2 = 1 << 10,
const NO_EXECUTE = 1 << 63,
}
}
impl<L> IndexMut<usize> for Table<L>
where L: TableLevel
{
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}
pub trait TableLevel {}
pub enum Level4 {}
#[allow(dead_code)]
pub enum Level3 {}
#[allow(dead_code)]
pub enum Level2 {}
pub enum Level1 {}
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierarchicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierarchicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}

View File

@@ -1,86 +0,0 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::{Page, ActivePageTable, VirtualAddress};
use super::table::{Table, Level1};
use memory::{Frame, FrameAllocator};
pub struct TemporaryPage {
page: Page,
allocator: TinyAllocator,
}
impl TemporaryPage {
pub fn new<A>(page: Page, allocator: &mut A) -> TemporaryPage
where A: FrameAllocator
{
TemporaryPage {
page: page,
allocator: TinyAllocator::new(allocator),
}
}
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable) -> VirtualAddress {
use super::entry::WRITABLE;
assert!(active_table.translate_page(self.page).is_none(),
"temporary page is already mapped");
active_table.map_to(self.page, frame, WRITABLE, &mut self.allocator);
self.page.start_address()
}
/// Maps the temporary page to the given page table frame in the active table.
/// Returns a reference to the now mapped table.
pub fn map_table_frame(&mut self,
frame: Frame,
active_table: &mut ActivePageTable)
-> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, active_table) as *mut Table<Level1>) }
}
/// Unmaps the temporary page in the active table.
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
active_table.unmap(self.page, &mut self.allocator)
}
}
struct TinyAllocator([Option<Frame>; 3]);
impl TinyAllocator {
fn new<A>(allocator: &mut A) -> TinyAllocator
where A: FrameAllocator
{
let mut f = || allocator.allocate_frame();
let frames = [f(), f(), f()];
TinyAllocator(frames)
}
}
impl FrameAllocator for TinyAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
for frame_option in &mut self.0 {
if frame_option.is_some() {
return frame_option.take();
}
}
None
}
fn deallocate_frame(&mut self, frame: Frame) {
for frame_option in &mut self.0 {
if frame_option.is_none() {
*frame_option = Some(frame);
return;
}
}
panic!("Tiny allocator can hold only 3 frames.");
}
}

4
src/memory/tlb.rs Normal file
View File

@@ -0,0 +1,4 @@
pub fn flush() {
unsafe{asm!("mov rax, cr3
mov cr3, rax" ::: "{rax}" : "intel")}
}

View File

@@ -1,135 +1,118 @@
// Copyright 2016 Philipp Oppermann. See the README.md
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::ptr::Unique;
use core::fmt;
use spin::Mutex;
use volatile::Volatile;
const BUFFER_HEIGHT: usize = 25;
const BUFFER_WIDTH: usize = 80;
pub static WRITER: Mutex<Writer> = Mutex::new(Writer {
column_position: 0,
color_code: ColorCode::new(Color::LightGreen, Color::Black),
buffer: unsafe { Unique::new(0xb8000 as *mut _) },
});
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::vga_buffer::_print(format_args!($($arg)*)));
}
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
macro_rules! print {
($($arg:tt)*) => ({
$crate::vga_buffer::print(format_args!($($arg)*));
});
}
pub fn print(args: fmt::Arguments) {
use core::fmt::Write;
WRITER.lock().write_fmt(args).unwrap();
}
pub fn clear_screen() {
for _ in 0..BUFFER_HEIGHT {
println!("");
}
}
#[allow(dead_code)]
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
pub fn _print(fmt: ::core::fmt::Arguments) {
use core::fmt::Write;
static mut WRITER: Writer = Writer::new(Color::LightGreen, Color::Black);
let _ = unsafe{WRITER.write_fmt(fmt)};
}
pub struct Writer {
column_position: usize,
color_code: ColorCode,
buffer: Unique<Buffer>,
}
impl Writer {
pub const fn new(foreground: Color, background: Color) -> Writer {
Writer {
column_position: 0,
color_code: ColorCode::new(foreground, background),
}
}
pub fn write_byte(&mut self, byte: u8) {
const NEWLINE: u8 = b'\n';
match byte {
b'\n' => self.new_line(),
NEWLINE => self.new_line(),
byte => {
if self.column_position >= BUFFER_WIDTH {
self.new_line();
self.new_line()
}
let row = BUFFER_HEIGHT - 1;
let col = self.column_position;
let color_code = self.color_code;
self.buffer().chars[row][col].write(ScreenChar {
Self::buffer().chars[row][col] = ScreenChar {
ascii_character: byte,
color_code: color_code,
});
color_code: self.color_code,
};
self.column_position += 1;
}
}
}
fn buffer(&mut self) -> &mut Buffer {
unsafe { self.buffer.get_mut() }
fn buffer() -> &'static mut Buffer {
const BUFFER: *mut Buffer = 0xb8000 as *mut _;
unsafe{&mut *BUFFER}
}
fn new_line(&mut self) {
for row in 1..BUFFER_HEIGHT {
for col in 0..BUFFER_WIDTH {
let buffer = self.buffer();
let character = buffer.chars[row][col].read();
buffer.chars[row - 1][col].write(character);
}
let buffer = Self::buffer();
for row in 0..(BUFFER_HEIGHT-1) {
buffer.chars[row] = buffer.chars[row + 1]
}
self.clear_row(BUFFER_HEIGHT - 1);
self.clear_row(BUFFER_HEIGHT-1);
self.column_position = 0;
}
fn clear_row(&mut self, row: usize) {
let blank = ScreenChar {
ascii_character: b' ',
ascii_character: ' ' as u8,
color_code: self.color_code,
};
for col in 0..BUFFER_WIDTH {
self.buffer().chars[row][col].write(blank);
}
Self::buffer().chars[row] = [blank; BUFFER_WIDTH];
}
}
impl fmt::Write for Writer {
impl ::core::fmt::Write for Writer {
fn write_str(&mut self, s: &str) -> ::core::fmt::Result {
for byte in s.bytes() {
self.write_byte(byte)
self.write_byte(byte)
}
Ok(())
}
}
#[derive(Debug, Clone, Copy)]
#[derive(Clone, Copy)]
#[allow(dead_code)]
#[repr(u8)]
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
#[derive(Clone, Copy)]
struct ColorCode(u8);
impl ColorCode {
@@ -138,13 +121,13 @@ impl ColorCode {
}
}
#[derive(Debug, Clone, Copy)]
#[repr(C)]
#[derive(Clone, Copy)]
#[repr(packed)]
struct ScreenChar {
ascii_character: u8,
color_code: ColorCode,
}
struct Buffer {
chars: [[Volatile<ScreenChar>; BUFFER_WIDTH]; BUFFER_HEIGHT],
chars: [[ScreenChar; BUFFER_WIDTH]; BUFFER_HEIGHT],
}