diff --git a/src/memory/area_frame_allocator.rs b/src/memory/area_frame_allocator.rs index c0162b66..c8fc24dd 100644 --- a/src/memory/area_frame_allocator.rs +++ b/src/memory/area_frame_allocator.rs @@ -47,13 +47,12 @@ impl AreaFrameAllocator { fn choose_next_area(&mut self) { self.current_area = self.areas - .clone() - .filter(|area| { - let address = area.base_addr + area.length - 1; - Frame::containing_address(address as usize) >= - self.next_free_frame - }) - .min_by_key(|area| area.base_addr); + .clone() + .filter(|area| { + let address = area.base_addr + area.length - 1; + Frame::containing_address(address as usize) >= self.next_free_frame + }) + .min_by_key(|area| area.base_addr); if let Some(area) = self.current_area { let start_frame = Frame::containing_address(area.base_addr as usize); diff --git a/src/memory/mod.rs b/src/memory/mod.rs index 7cb46c9f..199471e3 100644 --- a/src/memory/mod.rs +++ b/src/memory/mod.rs @@ -24,15 +24,15 @@ pub fn init(boot_info: &BootInformation) { let elf_sections_tag = boot_info.elf_sections_tag().expect("Elf sections tag required"); let kernel_start = elf_sections_tag.sections() - .filter(|s| s.is_allocated()) - .map(|s| s.addr) - .min() - .unwrap(); + .filter(|s| s.is_allocated()) + .map(|s| s.addr) + .min() + .unwrap(); let kernel_end = elf_sections_tag.sections() - .filter(|s| s.is_allocated()) - .map(|s| s.addr + s.size) - .max() - .unwrap(); + .filter(|s| s.is_allocated()) + .map(|s| s.addr + s.size) + .max() + .unwrap(); println!("kernel start: {:#x}, kernel end: {:#x}", kernel_start, diff --git a/src/memory/paging/mapper.rs b/src/memory/paging/mapper.rs index 68d34113..96f9e405 100644 --- a/src/memory/paging/mapper.rs +++ b/src/memory/paging/mapper.rs @@ -69,9 +69,9 @@ impl Mapper { }; p3.and_then(|p3| p3.next_table(page.p3_index())) - .and_then(|p2| p2.next_table(page.p2_index())) - .and_then(|p1| p1[page.p1_index()].pointed_frame()) - .or_else(huge_page) + .and_then(|p2| p2.next_table(page.p2_index())) + .and_then(|p1| p1[page.p1_index()].pointed_frame()) + .or_else(huge_page) } pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A) @@ -105,10 +105,10 @@ impl Mapper { assert!(self.translate(page.start_address()).is_some()); let p1 = self.p4_mut() - .next_table_mut(page.p4_index()) - .and_then(|p3| p3.next_table_mut(page.p3_index())) - .and_then(|p2| p2.next_table_mut(page.p2_index())) - .expect("mapping code does not support huge pages"); + .next_table_mut(page.p4_index()) + .and_then(|p3| p3.next_table_mut(page.p3_index())) + .and_then(|p2| p2.next_table_mut(page.p2_index())) + .expect("mapping code does not support huge pages"); let frame = p1[page.p1_index()].pointed_frame().unwrap(); p1[page.p1_index()].set_unused(); unsafe { ::x86::tlb::flush(page.start_address()) }; diff --git a/src/memory/paging/mod.rs b/src/memory/paging/mod.rs index 967ae47a..43fab853 100644 --- a/src/memory/paging/mod.rs +++ b/src/memory/paging/mod.rs @@ -180,7 +180,7 @@ pub fn remap_the_kernel(allocator: &mut A, boot_info: &BootInformation) -> Ac active_table.with(&mut new_table, &mut temporary_page, |mapper| { let elf_sections_tag = boot_info.elf_sections_tag() - .expect("Memory map tag required"); + .expect("Memory map tag required"); // identity map the allocated kernel sections for section in elf_sections_tag.sections() {