mirror of
https://github.com/phil-opp/blog_os.git
synced 2025-12-16 06:17:49 +00:00
Unsafe operations in unsafe fn require an unsafe block since Rust 2024
This commit is contained in:
@@ -403,14 +403,12 @@ pub unsafe fn active_level_4_table(physical_memory_offset: VirtAddr)
|
||||
let virt = physical_memory_offset + phys.as_u64();
|
||||
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
|
||||
|
||||
&mut *page_table_ptr // unsafe
|
||||
unsafe { &mut *page_table_ptr }
|
||||
}
|
||||
```
|
||||
|
||||
まず、有効なレベル4テーブルの物理フレームを`CR3`レジスタから読みます。その開始物理アドレスを取り出し、`u64`に変換し、`physical_memory_offset`に足すことでそのページテーブルフレームに対応する仮想アドレスを得ます。最後に、`as_mut_ptr`メソッドを使ってこの仮想アドレスを`*mut PageTable`生ポインタに変換し、これから`&mut PageTable`参照を作ります(ここがunsafe)。`&`参照ではなく`&mut`参照にしているのは、後でこのページテーブルを変更するためです。
|
||||
|
||||
Rustは`unsafe fn`の中身全体を大きな`unsafe`ブロックであるかのように扱うので、ここでunsafeブロックを使う必要はありません。これでは、(unsafeを意図した)最後の行より前の行に間違ってunsafeな操作を書いても気づけないので、コードがより危険になります。また、どこがunsafeな操作であるのかを探すのも非常に難しくなります。そのため、この挙動を変更する[RFC](https://github.com/rust-lang/rfcs/pull/2585)が提案されています。
|
||||
|
||||
この関数を使って、レベル4テーブルのエントリを出力してみましょう:
|
||||
|
||||
```rust
|
||||
@@ -640,8 +638,10 @@ use x86_64::structures::paging::OffsetPageTable;
|
||||
/// 名称を持つこと (mutable aliasingといい、動作が未定義)
|
||||
/// につながるため、この関数は一度しか呼び出してはならない。
|
||||
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
|
||||
let level_4_table = active_level_4_table(physical_memory_offset);
|
||||
OffsetPageTable::new(level_4_table, physical_memory_offset)
|
||||
unsafe {
|
||||
let level_4_table = active_level_4_table(physical_memory_offset);
|
||||
OffsetPageTable::new(level_4_table, physical_memory_offset)
|
||||
}
|
||||
}
|
||||
|
||||
// これは非公開にする
|
||||
|
||||
@@ -399,14 +399,12 @@ pub unsafe fn active_level_4_table(physical_memory_offset: VirtAddr)
|
||||
let virt = physical_memory_offset + phys.as_u64();
|
||||
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
|
||||
|
||||
&mut *page_table_ptr // unsafe
|
||||
unsafe { mut *page_table_ptr }
|
||||
}
|
||||
```
|
||||
|
||||
First, we read the physical frame of the active level 4 table from the `CR3` register. We then take its physical start address, convert it to a `u64`, and add it to `physical_memory_offset` to get the virtual address where the page table frame is mapped. Finally, we convert the virtual address to a `*mut PageTable` raw pointer through the `as_mut_ptr` method and then unsafely create a `&mut PageTable` reference from it. We create a `&mut` reference instead of a `&` reference because we will mutate the page tables later in this post.
|
||||
|
||||
We don't need to use an unsafe block here because Rust treats the complete body of an `unsafe fn` like a large `unsafe` block. This makes our code more dangerous since we could accidentally introduce an unsafe operation in previous lines without noticing. It also makes it much more difficult to spot unsafe operations in between safe operations. There is an [RFC](https://github.com/rust-lang/rfcs/pull/2585) to change this behavior.
|
||||
|
||||
We can now use this function to print the entries of the level 4 table:
|
||||
|
||||
```rust
|
||||
@@ -632,8 +630,10 @@ use x86_64::structures::paging::OffsetPageTable;
|
||||
/// `physical_memory_offset`. Also, this function must be only called once
|
||||
/// to avoid aliasing `&mut` references (which is undefined behavior).
|
||||
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
|
||||
let level_4_table = active_level_4_table(physical_memory_offset);
|
||||
OffsetPageTable::new(level_4_table, physical_memory_offset)
|
||||
unsafe {
|
||||
let level_4_table = active_level_4_table(physical_memory_offset);
|
||||
OffsetPageTable::new(level_4_table, physical_memory_offset)
|
||||
}
|
||||
}
|
||||
|
||||
// make private
|
||||
|
||||
@@ -409,14 +409,12 @@ pub unsafe fn active_level_4_table(physical_memory_offset: VirtAddr)
|
||||
let virt = physical_memory_offset + phys.as_u64();
|
||||
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
|
||||
|
||||
&mut *page_table_ptr // unsafe
|
||||
unsafe { &mut *page_table_ptr }
|
||||
}
|
||||
```
|
||||
|
||||
首先,我们从`CR3`寄存器中读取活动的4级表的物理帧。然后我们取其物理起始地址,将其转换为`u64`,并将其添加到`physical_memory_offset`中,得到页表框架映射的虚拟地址。最后,我们通过`as_mut_ptr`方法将虚拟地址转换为`*mut PageTable`原始指针,然后不安全地从它创建一个`&mut PageTable`引用。我们创建一个`&mut`引用,而不是`&`引用,因为我们将在本篇文章的后面对页表进行突变。
|
||||
|
||||
我们不需要在这里使用不安全块,因为Rust把一个 `不安全 fn` 的完整主体当作一个大的 `不安全`块。这使得我们的代码更加危险,因为我们可能会在不知不觉中在前几行引入不安全操作。这也使得在安全操作之间发现不安全操作的难度大大增加。有一个[RFC](https://github.com/rust-lang/rfcs/pull/2585)可以改变这种行为。
|
||||
|
||||
现在我们可以用这个函数来打印第4级表格的条目。
|
||||
|
||||
```rust
|
||||
@@ -638,8 +636,10 @@ use x86_64::structures::paging::OffsetPageTable;
|
||||
/// 传递的`physical_memory_offset`处被映射到虚拟内存。另
|
||||
/// 外,这个函数必须只被调用一次,以避免别名"&mut "引用(这是未定义的行为)。
|
||||
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
|
||||
let level_4_table = active_level_4_table(physical_memory_offset);
|
||||
OffsetPageTable::new(level_4_table, physical_memory_offset)
|
||||
unsafe {
|
||||
let level_4_table = active_level_4_table(physical_memory_offset);
|
||||
OffsetPageTable::new(level_4_table, physical_memory_offset)
|
||||
}
|
||||
}
|
||||
|
||||
// 私下进行
|
||||
|
||||
@@ -538,7 +538,9 @@ impl LinkedListAllocator {
|
||||
/// 有効でヒープが未使用であることを保証しなければならないからである。
|
||||
/// このメソッドは一度しか呼ばれてはならない。
|
||||
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||
self.add_free_region(heap_start, heap_size);
|
||||
unsafe {
|
||||
self.add_free_region(heap_start, heap_size);
|
||||
}
|
||||
}
|
||||
|
||||
/// 与えられたメモリ領域をリストの先頭に追加する。
|
||||
@@ -581,8 +583,10 @@ impl LinkedListAllocator {
|
||||
let mut node = ListNode::new(size);
|
||||
node.next = self.head.next.take();
|
||||
let node_ptr = addr as *mut ListNode;
|
||||
node_ptr.write(node);
|
||||
self.head.next = Some(&mut *node_ptr)
|
||||
unsafe {
|
||||
node_ptr.write(node);
|
||||
self.head.next = Some(&mut *node_ptr)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -715,7 +719,9 @@ unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
|
||||
let alloc_end = alloc_start.checked_add(size).expect("overflow");
|
||||
let excess_size = region.end_addr() - alloc_end;
|
||||
if excess_size > 0 {
|
||||
allocator.add_free_region(alloc_end, excess_size);
|
||||
unsafe {
|
||||
allocator.add_free_region(alloc_end, excess_size);
|
||||
}
|
||||
}
|
||||
alloc_start as *mut u8
|
||||
} else {
|
||||
@@ -727,7 +733,7 @@ unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
|
||||
// レイアウト調整を行う
|
||||
let (size, _) = LinkedListAllocator::size_align(layout);
|
||||
|
||||
self.lock().add_free_region(ptr as usize, size)
|
||||
unsafe { self.lock().add_free_region(ptr as usize, size) }
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -959,7 +965,9 @@ impl FixedSizeBlockAllocator {
|
||||
/// ヒープが未使用であることを保証しなければならないからである。
|
||||
/// このメソッドは一度しか呼ばれてはならない。
|
||||
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||
self.fallback_allocator.init(heap_start, heap_size);
|
||||
unsafe {
|
||||
self.fallback_allocator.init(heap_start, heap_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -1112,12 +1120,16 @@ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
|
||||
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
|
||||
let new_node_ptr = ptr as *mut ListNode;
|
||||
new_node_ptr.write(new_node);
|
||||
allocator.list_heads[index] = Some(&mut *new_node_ptr);
|
||||
unsafe {
|
||||
new_node_ptr.write(new_node);
|
||||
allocator.list_heads[index] = Some(&mut *new_node_ptr);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
let ptr = NonNull::new(ptr).unwrap();
|
||||
allocator.fallback_allocator.deallocate(ptr, layout);
|
||||
unsafe {
|
||||
allocator.fallback_allocator.deallocate(ptr, layout);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -537,7 +537,9 @@ impl LinkedListAllocator {
|
||||
/// heap bounds are valid and that the heap is unused. This method must be
|
||||
/// called only once.
|
||||
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||
self.add_free_region(heap_start, heap_size);
|
||||
unsafe {
|
||||
self.add_free_region(heap_start, heap_size);
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds the given memory region to the front of the list.
|
||||
@@ -580,8 +582,10 @@ impl LinkedListAllocator {
|
||||
let mut node = ListNode::new(size);
|
||||
node.next = self.head.next.take();
|
||||
let node_ptr = addr as *mut ListNode;
|
||||
node_ptr.write(node);
|
||||
self.head.next = Some(&mut *node_ptr)
|
||||
unsafe {
|
||||
node_ptr.write(node);
|
||||
self.head.next = Some(&mut *node_ptr)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -714,7 +718,9 @@ unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
|
||||
let alloc_end = alloc_start.checked_add(size).expect("overflow");
|
||||
let excess_size = region.end_addr() - alloc_end;
|
||||
if excess_size > 0 {
|
||||
allocator.add_free_region(alloc_end, excess_size);
|
||||
unsafe {
|
||||
allocator.add_free_region(alloc_end, excess_size);
|
||||
}
|
||||
}
|
||||
alloc_start as *mut u8
|
||||
} else {
|
||||
@@ -726,7 +732,7 @@ unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
|
||||
// perform layout adjustments
|
||||
let (size, _) = LinkedListAllocator::size_align(layout);
|
||||
|
||||
self.lock().add_free_region(ptr as usize, size)
|
||||
unsafe { self.lock().add_free_region(ptr as usize, size) }
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -958,7 +964,7 @@ impl FixedSizeBlockAllocator {
|
||||
/// heap bounds are valid and that the heap is unused. This method must be
|
||||
/// called only once.
|
||||
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||
self.fallback_allocator.init(heap_start, heap_size);
|
||||
unsafe { self.fallback_allocator.init(heap_start, heap_size); }
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -1111,12 +1117,16 @@ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
|
||||
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
|
||||
let new_node_ptr = ptr as *mut ListNode;
|
||||
new_node_ptr.write(new_node);
|
||||
allocator.list_heads[index] = Some(&mut *new_node_ptr);
|
||||
unsafe {
|
||||
new_node_ptr.write(new_node);
|
||||
allocator.list_heads[index] = Some(&mut *new_node_ptr);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
let ptr = NonNull::new(ptr).unwrap();
|
||||
allocator.fallback_allocator.deallocate(ptr, layout);
|
||||
unsafe {
|
||||
allocator.fallback_allocator.deallocate(ptr, layout);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user