From 75264e7cac39d921c20960ae3306a97777459f39 Mon Sep 17 00:00:00 2001 From: Philipp Oppermann Date: Wed, 9 Dec 2015 15:03:29 +0100 Subject: [PATCH] Fix unmap function by flushing the TLB --- Cargo.toml | 1 + posts/DRAFT-paging.md | 18 +++++++++++++++++- src/lib.rs | 1 + src/memory/paging/mod.rs | 4 +--- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2738b730..0524dc4f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ crate-type = ["staticlib"] [dependencies] rlibc = "0.1.4" spin = "0.3.4" +x86 = "0.5.0" [dependencies.multiboot2] git = "https://github.com/phil-opp/multiboot2-elf64" diff --git a/posts/DRAFT-paging.md b/posts/DRAFT-paging.md index 64d6d6ad..b24c67b6 100644 --- a/posts/DRAFT-paging.md +++ b/posts/DRAFT-paging.md @@ -860,7 +860,23 @@ println!("{:#x}", unsafe{ ``` Since we don't zero the mapped pages, the output is random. For me, it's `0xf000ff53f000ff53`. -If `unmap` worked correctly, reading it again after unmapping should cause a page fault. But it doesn't. Instead, it just prints the same number again. +If `unmap` worked correctly, reading it again after unmapping should cause a page fault. But it doesn't. Instead, it just prints the same number again. When we remove the first read, we get the desired page fault (i.e. QEMU reboots again and again). So this seems to be some cache issue. + +An x86 processor has many different caches because always accessing the main memory would be very slow. Most of these caches are completely _transparent_. That means everything works exactly the same as without them, it's just much faster. But there is one cache, that needs to be updated manually: the _translation lookaside buffer_. + +The translation lookaside buffer, or TLB, caches the translation of virtual to physical addresses. It's filled automatically when a page is accessed. But it's not updated transparently when the mapping of a page changes. This is the reason that we still can access the page even through we unmapped it in the page table. + +So to fix our `unmap` function, we need to remove the cached translation from the TLB. We can use Gerd Zellweger's x86 crate to do this easily. To add it, append `x86 = "0.5.0"` to the dependency section in the `Cargo.toml`. Then we can use it to fix `unmap`: + +```rust +... + p1[page.p1_index()].set_unused(); + unsafe { ::x86::tlb::flush(page.start_address() )}; + // TODO free p(1,2,3) table if empty + //allocator.deallocate_frame(frame); +} +``` +Now the desired page fault occurs even when we access the page before. ## What's next? In the next post we will extend this module and add a function to modify inactive page tables. Through that function, we will create a new page table hierarchy that maps the kernel correctly using 4KiB pages. Then we will switch to the new table to get a safer kernel environment. diff --git a/src/lib.rs b/src/lib.rs index 35e160e4..b2e54a14 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,6 +21,7 @@ extern crate spin; extern crate multiboot2; #[macro_use] extern crate bitflags; +extern crate x86; #[macro_use] mod vga_buffer; diff --git a/src/memory/paging/mod.rs b/src/memory/paging/mod.rs index 4eef2c22..3477ad88 100644 --- a/src/memory/paging/mod.rs +++ b/src/memory/paging/mod.rs @@ -139,6 +139,7 @@ impl RecursivePageTable { .expect("mapping code does not support huge pages"); let frame = p1[page.p1_index()].pointed_frame().unwrap(); p1[page.p1_index()].set_unused(); + unsafe { ::x86::tlb::flush(page.start_address() )}; // TODO free p(1,2,3) table if empty //allocator.deallocate_frame(frame); } @@ -174,7 +175,4 @@ pub fn test_paging(allocator: &mut A) }); page_table.unmap(Page::containing_address(addr), allocator); println!("None = {:?}", page_table.translate(addr)); - println!("{:#x}", unsafe { - *(Page::containing_address(addr).start_address() as *const u64) - }); }