Compare commits

...

367 Commits

Author SHA1 Message Date
Philipp Oppermann
3fc7bf6aa1 Fix register typo: rpb -> rbp
Fixes #746
2020-02-16 18:02:24 +01:00
Philipp Oppermann
b337f65abb Add a yield_now function and use it in idle thread 2020-01-28 12:28:50 +01:00
Philipp Oppermann
22c6bd5aa7 Add idle thread and begin support for blocking 2020-01-28 12:22:37 +01:00
Philipp Oppermann
87719f2260 Switch current_thread_id before context switch 2020-01-28 11:29:21 +01:00
Philipp Oppermann
0caf5c351e Run cargo fmt 2020-01-23 14:24:13 +01:00
Philipp Oppermann
cb7bb0ddef Refactor and rewrite 2020-01-23 14:22:29 +01:00
Philipp Oppermann
7ad30651fb Rename allocator.rs to allocator/mod.rs 2020-01-23 11:46:45 +01:00
Philipp Oppermann
49923acb3f Print thread id instead of hardcoding numbers 2020-01-23 11:03:15 +01:00
Philipp Oppermann
f2b1f3a593 Fix handling of current thread id 2020-01-23 10:49:51 +01:00
Philipp Oppermann
5e2e0b629e Refactor threading code 2020-01-23 10:42:37 +01:00
Philipp Oppermann
35379c90e6 Force unlock writer on panic to avoid deadlocks 2020-01-23 10:42:19 +01:00
Philipp Oppermann
e5d10fcaec Increase stack size to avoid stack overflow 2020-01-23 10:41:59 +01:00
Philipp Oppermann
e1242a867f Move global_asm inline in threads module 2020-01-23 09:20:17 +01:00
Philipp Oppermann
cd138a3a1b Rename multitasking module to threads 2020-01-23 09:19:38 +01:00
Philipp Oppermann
11a0eb679c Fix race condition
The first timer interrupt might occur before the heap is initialized. With lazy_static, this causes an allocation failure since the VecDeque is allocated when it's accessed the first time. This commit fixes this by only initializing VecDeque in `add_thread`.
2020-01-23 08:50:35 +01:00
Philipp Oppermann
241c1ab2c9 Add support for closures 2020-01-23 08:24:48 +01:00
Philipp Oppermann
b75406b37e Add new modules 2020-01-22 17:24:17 +01:00
Philipp Oppermann
c3450b6df7 Refactor a bit 2020-01-22 16:33:23 +01:00
Philipp Oppermann
ce1fdcf768 Wip 2020-01-22 16:15:23 +01:00
Philipp Oppermann
002d6f255f Set FixedSizeBlockAllocator as global allocator 2020-01-20 14:09:00 +01:00
Philipp Oppermann
6c3bf0b10f Implement GlobalAlloc::dealloc 2020-01-20 14:07:47 +01:00
Philipp Oppermann
7a792f5cb0 Implement GlobalAlloc::alloc 2020-01-20 14:07:02 +01:00
Philipp Oppermann
93b4dcf434 Add skeleton for GlobalAlloc implementation 2020-01-20 14:06:26 +01:00
Philipp Oppermann
821dd2adb4 Add function to calculate the list index 2020-01-20 14:05:24 +01:00
Philipp Oppermann
d636939b51 Add FixedSizeBlockAllocator::fallback_alloc method 2020-01-20 14:05:01 +01:00
Philipp Oppermann
9b7326541e Add FixedSizeBlockAllocator type 2020-01-20 14:04:13 +01:00
Philipp Oppermann
4f234b67ef Add ListNode type and BLOCK_SIZES constant 2020-01-20 14:02:57 +01:00
Philipp Oppermann
7381e11f3c Create a new fixed_size_block allocator submodule 2020-01-20 14:01:59 +01:00
Philipp Oppermann
a9fe65a0ce Use new LinkedListAllocator 2020-01-10 13:04:46 +01:00
Philipp Oppermann
2001814119 Implement LinkedListAllocator::size_align 2020-01-10 13:00:16 +01:00
Philipp Oppermann
a5c50e7408 Implement GlobalAlloc for LinkedListAllocator 2020-01-10 11:52:04 +01:00
Philipp Oppermann
70a52c291d Implement alloc_from_region 2020-01-10 11:48:56 +01:00
Philipp Oppermann
c56bfa27e4 Implement find_region 2020-01-10 11:46:10 +01:00
Philipp Oppermann
55aec9ebf3 Apply rustfmt to ListNode::new function 2020-01-10 11:44:38 +01:00
Philipp Oppermann
2e1d132a9a Implement add_free_region 2020-01-10 11:44:17 +01:00
Philipp Oppermann
63e8577d77 Create a basic LinkedListAllocator type 2020-01-10 11:42:04 +01:00
Philipp Oppermann
75d826bf69 Add a test that memory is reused with a long lived allocation
This test fails for the bump allocator because it can only free the complete heap at once, which is prevented by the single long-lived allocation.
2020-01-09 15:45:38 +01:00
Philipp Oppermann
45be3f0648 Use our BumpAllocator instead of linked_list_allocator crate 2020-01-09 15:37:43 +01:00
Philipp Oppermann
055c560a7a Add an align_up function 2020-01-09 15:36:06 +01:00
Philipp Oppermann
e87044a7ee Implement GlobalAlloc for BumpAllocator 2020-01-09 15:35:03 +01:00
Philipp Oppermann
08d2289dad Add a Locked wrapper type that can be used to implement GlobalAlloc 2020-01-09 15:34:04 +01:00
Philipp Oppermann
7c84dbaa1d Create a basic BumpAllocator type 2020-01-09 15:25:37 +01:00
Philipp Oppermann
882c83f9de Update many_boxes test to scale with heap size (#716)
Instead of using a hardcoded number of boxes, use the HEAP_SIZE constant. This ensures that we get a test failure because of an out-of-memory error when the allocator does not reuse freed memory.
2020-01-09 12:58:29 +01:00
Philipp Oppermann
869a69e531 Move #[global_allocator] into allocator module (#714)
The Rust issue that the #[global_allocator] cannot be defined in submodules was fixed.
2020-01-08 12:38:06 +01:00
Philipp Oppermann
817267e51c Update Github Actions badge for post-10 2019-12-12 09:32:59 +01:00
Philipp Oppermann
9e75078dab Merge branch 'post-09' into post-10 2019-12-12 09:32:46 +01:00
Philipp Oppermann
ed3af8e984 Update Github Actions badge for post-09 2019-12-12 09:32:30 +01:00
Philipp Oppermann
1d1075b35e Merge branch 'post-08' into post-09 2019-12-12 09:32:16 +01:00
Philipp Oppermann
00f39aaccb Update Github Actions badge for post-08 2019-12-12 09:31:56 +01:00
Philipp Oppermann
f815a1742a Merge branch 'post-07' into post-08 2019-12-12 09:31:42 +01:00
Philipp Oppermann
2bf70751bf Update Github Actions badge for post-07 2019-12-12 09:31:24 +01:00
Philipp Oppermann
ad2590068d Merge branch 'post-06' into post-07 2019-12-12 09:31:07 +01:00
Philipp Oppermann
41f38d92f2 Merge branch 'post-09' into post-10 2019-12-12 09:23:31 +01:00
Philipp Oppermann
8eb44dcbd5 Merge branch 'post-08' into post-09 2019-12-12 09:23:31 +01:00
Philipp Oppermann
1f771a6820 Merge branch 'post-07' into post-08 2019-12-12 09:23:31 +01:00
Philipp Oppermann
36b8fd402f Merge branch 'post-06' into post-07 2019-12-12 09:23:31 +01:00
Philipp Oppermann
3ac5318c94 Remove now unneeded unsafe block
The `map_to` method is safe since x86_64 0.8.1.
2019-12-11 16:47:45 +01:00
Philipp Oppermann
e94a489a31 Merge branch 'post-09' into post-10 2019-12-11 16:38:08 +01:00
Philipp Oppermann
f392d8a7f5 Merge pull request #703 from phil-opp/post-09-fix
Update frame allocation code to x86_64 0.8.1
2019-12-11 16:37:49 +01:00
Philipp Oppermann
c7bc26d8ee Update frame allocation code to x86_64 0.8.1 2019-12-11 16:26:38 +01:00
Philipp Oppermann
3171ab584b Merge branch 'post-07' into post-08 2019-12-10 17:31:24 +01:00
Philipp Oppermann
dfd65d579c Merge branch 'post-06' into post-07 2019-12-10 17:31:24 +01:00
Philipp Oppermann
9da9ecb515 Merge branch 'post-09' into post-10 2019-12-10 17:31:24 +01:00
Philipp Oppermann
6c1594ea24 Merge branch 'post-08' into post-09 2019-12-10 17:31:24 +01:00
Philipp Oppermann
9d31eb8aa8 Merge branch 'post-07' into post-08 2019-12-10 17:17:53 +01:00
Philipp Oppermann
3ffd71723f Merge branch 'post-06' into post-07 2019-12-10 17:17:53 +01:00
Philipp Oppermann
f226ba0c3f Merge branch 'post-09' into post-10 2019-12-10 17:17:53 +01:00
Philipp Oppermann
94aa2c4093 Merge branch 'post-08' into post-09 2019-12-10 17:17:53 +01:00
Philipp Oppermann
2436801a05 Merge branch 'post-07' into post-08 2019-12-10 16:55:41 +01:00
Philipp Oppermann
617d0e6db6 Merge branch 'post-06' into post-07 2019-12-10 16:55:41 +01:00
Philipp Oppermann
c1fc7171fb Merge branch 'post-09' into post-10 2019-12-10 16:55:41 +01:00
Philipp Oppermann
259742a5e3 Merge branch 'post-08' into post-09 2019-12-10 16:55:41 +01:00
Philipp Oppermann
cf527e9ce7 Update post-10 to new lockfile format 2019-11-28 14:08:28 +01:00
Philipp Oppermann
c891acb234 Merge branch 'post-09' into post-10 2019-11-28 14:08:08 +01:00
Philipp Oppermann
6876e82fc5 Merge branch 'post-08' into post-09 2019-11-28 14:07:43 +01:00
Philipp Oppermann
1b1953678c Merge branch 'post-07' into post-08 2019-11-28 14:07:29 +01:00
Philipp Oppermann
d4c256c653 Update post-07 to new lockfile format 2019-11-28 14:07:23 +01:00
Philipp Oppermann
9e5ff8dd78 Merge branch 'post-06' into post-07 2019-11-28 14:07:03 +01:00
Philipp Oppermann
b9a0702bc0 Merge branch 'post-09' into post-10 2019-11-25 13:35:20 +01:00
Philipp Oppermann
84ddda6a3f Merge branch 'post-08' into post-09 2019-11-25 13:35:20 +01:00
Philipp Oppermann
37b8b42cba Add missing hlt_loop import 2019-11-25 13:35:18 +01:00
Philipp Oppermann
2919cbdefc Merge branch 'post-09' into post-10 2019-11-25 13:25:15 +01:00
Philipp Oppermann
f85664fd6b Merge branch 'post-08' into post-09 2019-11-25 13:25:15 +01:00
Philipp Oppermann
1e86c9f5f0 Merge branch 'post-07' into post-08 2019-11-25 13:25:15 +01:00
Philipp Oppermann
084a77775f Merge branch 'post-06' into post-07 2019-11-25 13:25:02 +01:00
Philipp Oppermann
059ea76848 Merge branch 'post-09' into post-10 2019-11-22 16:33:23 +01:00
Philipp Oppermann
a65573b061 Merge branch 'post-08' into post-09 2019-11-22 16:33:23 +01:00
Philipp Oppermann
723776f852 Merge branch 'post-07' into post-08 2019-11-22 16:33:23 +01:00
Philipp Oppermann
1bf2a49526 Merge pull request #689 from phil-opp/post-07-new
post-07: Use panic instead of println + hlt_loop for double fault handler
2019-11-22 16:33:00 +01:00
Philipp Oppermann
b5b37d6e2b Remove unused crate::hlt_loop import 2019-11-22 16:12:28 +01:00
Philipp Oppermann
c3f76cf1f0 Use panic instead of println + hlt_loop for double fault handler 2019-11-22 16:06:56 +01:00
Philipp Oppermann
2f085b7310 Merge branch 'post-09' into post-10 2019-11-22 16:05:13 +01:00
Philipp Oppermann
385004cab2 Merge branch 'post-08' into post-09 2019-11-22 16:05:13 +01:00
Philipp Oppermann
fab00675e1 Merge branch 'post-07' into post-08 2019-11-22 16:05:13 +01:00
Philipp Oppermann
12eed472ba Merge branch 'post-06' into post-07 2019-11-22 16:05:10 +01:00
Philipp Oppermann
cc713e4570 Merge branch 'post-09' into post-10 2019-11-22 15:57:18 +01:00
Philipp Oppermann
9696612b2b Merge branch 'post-08' into post-09 2019-11-22 15:57:18 +01:00
Philipp Oppermann
b0b0ebda06 Merge branch 'post-07' into post-08 2019-11-22 15:57:18 +01:00
Philipp Oppermann
1e8720b4ff Merge branch 'post-06' into post-07 2019-11-22 15:57:18 +01:00
Philipp Oppermann
b3066e9a78 Merge branch 'post-09' into post-10 2019-10-08 19:44:38 +02:00
Philipp Oppermann
d5d7db0de6 Merge branch 'post-08' into post-09 2019-10-08 19:44:38 +02:00
Philipp Oppermann
271f65e21c Merge branch 'post-07' into post-08 2019-10-08 19:44:38 +02:00
Philipp Oppermann
c83160554e Merge branch 'post-06' into post-07 2019-10-08 19:44:38 +02:00
Philipp Oppermann
66c3a0de76 Merge branch 'post-09' into post-10 2019-09-25 13:21:18 +02:00
Philipp Oppermann
45375d1f1b Merge branch 'post-08' into post-09 2019-09-25 13:21:06 +02:00
Philipp Oppermann
9cab6fb659 Merge branch 'post-07' into post-08 2019-09-25 13:20:55 +02:00
Philipp Oppermann
e7446d2df6 Merge branch 'post-06' into post-07 2019-09-25 13:20:38 +02:00
Philipp Oppermann
96d9de76b0 Fix: memory::init expects a VirtAddr 2019-09-15 10:56:15 +02:00
Philipp Oppermann
f56e4d24e3 Add missing import 2019-09-14 19:08:53 +02:00
Philipp Oppermann
8d3d712cef Merge branch 'post-09' into post-10 2019-09-14 19:06:30 +02:00
Philipp Oppermann
4897e9c4d5 Merge pull request #667 from phil-opp/post-09-offset_page_table
Update post-09 branch for improved Paging Implementation Post
2019-09-14 19:04:53 +02:00
Philipp Oppermann
5cced71fb0 Directly use OffsetPageTable for create_example_mapping instead of impl trait 2019-09-14 18:33:37 +02:00
Philipp Oppermann
7ec727f69f Update comment 2019-09-14 18:33:16 +02:00
Philipp Oppermann
e75c623985 Merge branch 'post-07' into post-08 2019-09-13 17:43:25 +02:00
Philipp Oppermann
27a0ae6000 Merge branch 'post-06' into post-07 2019-09-13 17:43:25 +02:00
Philipp Oppermann
5f66b437d5 Merge branch 'post-09' into post-10 2019-09-13 17:43:25 +02:00
Philipp Oppermann
8059c229c5 Merge branch 'post-08' into post-09 2019-09-13 17:43:25 +02:00
Philipp Oppermann
dce26ede7e Merge branch 'post-07' into post-08 2019-09-13 10:53:50 +02:00
Philipp Oppermann
cfe2e23a9c Merge branch 'post-06' into post-07 2019-09-13 10:53:50 +02:00
Philipp Oppermann
0694f29946 Merge branch 'post-09' into post-10 2019-09-13 10:53:50 +02:00
Philipp Oppermann
bc2099d31d Merge branch 'post-08' into post-09 2019-09-13 10:53:50 +02:00
Philipp Oppermann
211ec3898b Use OffsetPageTable instead of MappedPageTable 2019-09-11 13:40:36 +02:00
Philipp Oppermann
889c0771d6 Merge branch 'post-09' into post-10 2019-09-11 13:33:07 +02:00
Philipp Oppermann
2227fa434f Merge branch 'post-08' into post-09 2019-09-11 13:29:14 +02:00
Philipp Oppermann
20ffda14f4 Merge branch 'post-07' into post-08 2019-09-11 13:28:53 +02:00
Philipp Oppermann
e4ce277ca9 Merge branch 'post-06' into post-07 2019-09-11 13:19:01 +02:00
Philipp Oppermann
e74d9753f2 Merge branch 'post-09' into post-10 2019-09-11 11:21:15 +02:00
Philipp Oppermann
23d554548a Merge branch 'post-08' into post-09 2019-09-11 11:21:15 +02:00
Philipp Oppermann
0cd7d4cbcc Merge branch 'post-07' into post-08 2019-09-11 11:21:15 +02:00
Philipp Oppermann
a9bcf44012 Merge branch 'post-06' into post-07 2019-09-11 11:21:15 +02:00
Philipp Oppermann
9fc71547d7 Run cargo update 2019-09-11 10:59:31 +02:00
Philipp Oppermann
caa9d8b0e4 Merge branch 'post-09' into post-10 2019-09-11 10:59:18 +02:00
Philipp Oppermann
091f7ef153 Merge branch 'post-08' into post-09 2019-09-11 10:58:56 +02:00
Philipp Oppermann
76d03974fe Merge branch 'post-07' into post-08 2019-09-11 10:58:40 +02:00
Philipp Oppermann
881ad152a0 Merge branch 'post-06' into post-07 2019-09-11 10:58:19 +02:00
Philipp Oppermann
29512ddd31 Merge branch 'post-09' into post-10 2019-09-11 10:32:57 +02:00
Philipp Oppermann
38d606b4d1 Merge branch 'post-08' into post-09 2019-09-11 10:32:57 +02:00
Philipp Oppermann
3dcc43b374 Merge branch 'post-07' into post-08 2019-09-11 10:32:57 +02:00
Philipp Oppermann
0c713b9978 Merge branch 'post-06' into post-07 2019-09-11 10:32:57 +02:00
Philipp Oppermann
a7943e7e55 Merge branch 'post-09' into post-10 2019-09-11 10:12:46 +02:00
Philipp Oppermann
ae75d8b209 Merge branch 'post-08' into post-09 2019-09-11 10:12:46 +02:00
Philipp Oppermann
f7fc89fd31 Merge branch 'post-07' into post-08 2019-09-11 10:12:46 +02:00
Philipp Oppermann
1427993cac Merge branch 'post-06' into post-07 2019-09-11 10:12:46 +02:00
Philipp Oppermann
bcdadec5b6 Merge branch 'post-09' into post-10 2019-09-11 10:11:25 +02:00
Philipp Oppermann
e0d5cdd625 Merge branch 'post-08' into post-09 2019-09-11 10:11:25 +02:00
Philipp Oppermann
db92a921b8 Merge branch 'post-07' into post-08 2019-09-11 10:11:25 +02:00
Philipp Oppermann
84eb8632f5 Merge branch 'post-06' into post-07 2019-09-11 10:11:25 +02:00
Philipp Oppermann
17f8866264 Merge branch 'post-09' into post-10 2019-09-11 10:02:41 +02:00
Philipp Oppermann
3f95494ae8 Merge branch 'post-08' into post-09 2019-09-11 10:02:41 +02:00
Philipp Oppermann
80686ded94 Merge branch 'post-07' into post-08 2019-09-11 10:02:41 +02:00
Philipp Oppermann
cefa607569 Merge branch 'post-06' into post-07 2019-09-11 10:02:41 +02:00
Philipp Oppermann
a7ef4012d0 Merge branch 'post-09' into post-10 2019-09-10 11:12:54 +02:00
Philipp Oppermann
cabbbec72f Merge branch 'post-08' into post-09 2019-09-10 11:12:54 +02:00
Philipp Oppermann
5a67b64d20 Merge branch 'post-07' into post-08 2019-09-10 11:12:54 +02:00
Philipp Oppermann
c2431ecf63 Merge branch 'post-06' into post-07 2019-09-10 11:12:54 +02:00
Philipp Oppermann
6f07c2b666 Merge branch 'post-09' into post-10 2019-09-10 10:21:14 +02:00
Philipp Oppermann
1dd7f03a7a Merge branch 'post-08' into post-09 2019-09-10 10:21:14 +02:00
Philipp Oppermann
1a74b36c99 Merge branch 'post-07' into post-08 2019-09-10 10:21:14 +02:00
Philipp Oppermann
c87b221f5e Merge branch 'post-06' into post-07 2019-09-10 10:21:14 +02:00
Philipp Oppermann
e89c77398d Merge branch 'post-09' into post-10 2019-08-07 12:40:04 +02:00
Philipp Oppermann
66c4eae9cf Merge branch 'post-08' into post-09 2019-08-07 12:40:04 +02:00
Philipp Oppermann
175065ca2f Merge branch 'post-07' into post-08 2019-08-07 12:40:04 +02:00
Philipp Oppermann
6ab4b0170f Merge branch 'post-06' into post-07 2019-08-07 12:40:04 +02:00
Philipp Oppermann
f3a7689f33 Merge branch 'post-09' into post-10 2019-07-22 10:46:35 +02:00
Philipp Oppermann
e721878e4b Merge branch 'post-08' into post-09 2019-07-22 10:46:35 +02:00
Philipp Oppermann
7020999ab8 The error code issue is fixed, so let's print it (#643)
See https://github.com/phil-opp/blog_os/issues/513 for more information.
2019-07-22 10:46:19 +02:00
Philipp Oppermann
14d0e07b3e Merge branch 'post-09' into post-10 2019-07-22 10:36:58 +02:00
Philipp Oppermann
f2eb6cec9a Merge branch 'post-08' into post-09 2019-07-22 10:36:51 +02:00
Philipp Oppermann
30e0b16a81 Merge branch 'post-07' into post-08 2019-07-22 10:36:51 +02:00
Philipp Oppermann
05248fe322 Merge branch 'post-06' into post-07 2019-07-22 10:36:37 +02:00
Philipp Oppermann
9651eb7a3b Merge branch 'post-09' into post-10 2019-07-18 10:17:50 +02:00
Philipp Oppermann
bc4ddcef05 Merge branch 'post-08' into post-09 2019-07-18 10:17:37 +02:00
Philipp Oppermann
d7d63af4cc Merge branch 'post-07' into post-08 2019-07-18 10:17:20 +02:00
Philipp Oppermann
ac2b6b1307 Merge branch 'post-06' into post-07 2019-07-18 10:17:10 +02:00
Philipp Oppermann
89e3e2b190 Use correct build badge in post-10 Readme 2019-07-07 11:26:15 +02:00
Philipp Oppermann
004282138c Merge branch 'post-09' into post-10 2019-07-07 11:25:48 +02:00
Philipp Oppermann
4580b85fe2 Use correct build badge in post-09 Readme 2019-07-07 11:25:40 +02:00
Philipp Oppermann
8c575388cf Merge pull request #626 from phil-opp/code-heap
Code for new heap allocation post
2019-06-26 21:30:42 +02:00
Philipp Oppermann
4792ec41b1 Adjust comments to be equal with post 2019-06-26 21:08:08 +02:00
Philipp Oppermann
df75f7f4e8 Add an integration test for heap allocation 2019-06-26 17:45:32 +02:00
Philipp Oppermann
5cf3884396 Run cargo fmt 2019-06-26 16:59:38 +02:00
Philipp Oppermann
e5b6ba38ac Update Readme for new post 2019-06-26 16:33:20 +02:00
Philipp Oppermann
f429a8ab03 Example use of Box, Vec, and Rc in kernel_main 2019-06-26 15:06:40 +02:00
Philipp Oppermann
d7484ab48b Use linked_list_allocator crate instead of dummy allocator 2019-06-26 15:05:57 +02:00
Philipp Oppermann
06fc63028a Create a heap memory area 2019-06-26 13:14:56 +02:00
Philipp Oppermann
d4623419b0 Try to use Box type in main.rs
This causes an allocation error because the Dummy::alloc function always returns a null pointer.
2019-06-26 12:34:57 +02:00
Philipp Oppermann
417c44159e Add a alloc_error_handler function 2019-06-26 12:34:57 +02:00
Philipp Oppermann
ebbc6d55d2 Use dummy allocator as global allocator 2019-06-26 12:34:57 +02:00
Philipp Oppermann
c0367074ac Create an allocator module with a dummy allocator 2019-06-26 12:34:57 +02:00
Philipp Oppermann
48e2175bac Add a dependency on the alloc crate 2019-06-26 12:34:57 +02:00
Philipp Oppermann
954cfe977a Merge branch 'post-07' into post-08 2019-06-23 18:07:34 +02:00
Philipp Oppermann
262f56c9e2 Merge branch 'post-06' into post-07 2019-06-23 18:07:34 +02:00
Philipp Oppermann
45e1e99390 Merge branch 'post-08' into post-09 2019-06-23 18:07:34 +02:00
Philipp Oppermann
eb86565308 Merge branch 'post-07' into post-08 2019-06-17 17:28:32 +02:00
Philipp Oppermann
061dee44eb Merge branch 'post-06' into post-07 2019-06-17 17:28:32 +02:00
Philipp Oppermann
b5793c34c6 Merge branch 'post-08' into post-09 2019-06-17 17:28:32 +02:00
Philipp Oppermann
bbeb63ec3d Merge branch 'post-07' into post-08 2019-05-23 12:39:35 +02:00
Philipp Oppermann
fe35a21965 Merge branch 'post-06' into post-07 2019-05-23 12:39:35 +02:00
Philipp Oppermann
67ee45090d Merge branch 'post-08' into post-09 2019-05-23 12:39:35 +02:00
Philipp Oppermann
a4a7b5e8d6 Merge branch 'post-07' into post-08 2019-05-09 15:38:47 +02:00
Philipp Oppermann
64d1a587e7 Merge branch 'post-08' into post-09 2019-05-09 15:38:47 +02:00
Philipp Oppermann
13923c59f2 Fix: Make keyboard port mutable
This is required because of the update to x86_64 0.7.0 (see #606).
2019-05-09 15:38:34 +02:00
Philipp Oppermann
189ab7d0d8 Merge branch 'post-07' into post-08 2019-05-09 14:59:55 +02:00
Philipp Oppermann
fbe279831e Merge branch 'post-08' into post-09 2019-05-09 14:59:55 +02:00
Philipp Oppermann
09be8647d5 Merge branch 'post-06' into post-07 2019-05-09 14:59:55 +02:00
Philipp Oppermann
375d4d0479 Merge pull request #599 from phil-opp/post-09-new
Update `post-09` branch for version 0.6.0 of `x86_64` crate
2019-05-03 19:41:29 +02:00
Philipp Oppermann
7796d4c14a FrameAllocator is an unsafe trait now
Make `BootInfoFrameAllocator` unsafe because the caller must guarantee that the given memory map is valid.
2019-05-03 19:29:14 +02:00
Philipp Oppermann
a2beb9d2a6 Merge branch 'post-07' into post-08 2019-05-03 18:36:45 +02:00
Philipp Oppermann
a2d36342c1 Merge branch 'post-06' into post-07 2019-05-03 18:36:45 +02:00
Philipp Oppermann
78e4b22a2f Merge branch 'post-08' into post-09 2019-05-03 18:36:45 +02:00
Philipp Oppermann
a867450e3b Merge pull request #595 from phil-opp/redesign-frame-allocator
Avoid generic impl trait parameters in BootInfoFrameAllocator
2019-04-30 13:24:43 +02:00
Philipp Oppermann
b5ee44621c Use an import for initializing BootInfoFrameAllocator 2019-04-30 13:10:53 +02:00
Philipp Oppermann
24a9e7abd2 Simplify FrameAllocator implementation using Iterator::nth 2019-04-30 12:47:45 +02:00
Philipp Oppermann
180c77d1f4 Avoid generic impl trait parameters in BootInfoFrameAllocator 2019-04-30 11:14:23 +02:00
Philipp Oppermann
247af45791 Merge branch 'post-08-new' into post-09-new 2019-04-26 15:50:18 +02:00
Philipp Oppermann
667c093594 Merge branch 'post-07-new' into post-08-new 2019-04-26 15:50:02 +02:00
Philipp Oppermann
abf640254a Merge branch 'post-06-new' into post-07-new 2019-04-26 15:49:43 +02:00
Philipp Oppermann
fd0646fecf Improve formatting 2019-04-26 15:29:43 +02:00
Philipp Oppermann
c8821cb226 Use entry_point macro in lib.rs too 2019-04-26 15:29:43 +02:00
Philipp Oppermann
2e531850b8 Remove redundant import 2019-04-26 15:29:43 +02:00
Philipp Oppermann
65dbb5ac48 Merge branch 'post-08-new' into post-09-new 2019-04-26 15:28:56 +02:00
Philipp Oppermann
62f913facc Move test_main call to end of _start 2019-04-26 15:27:48 +02:00
Philipp Oppermann
d6f48d72aa Remove duplicated import 2019-04-26 15:27:48 +02:00
Philipp Oppermann
2784998301 Update post number for Readme badge 2019-04-26 15:27:05 +02:00
Philipp Oppermann
6099fddd54 Merge branch 'post-07-new' into post-08-new 2019-04-26 15:26:47 +02:00
Philipp Oppermann
8854b6b751 Move hlt_loop up to keep cfg(test) functions together 2019-04-26 15:25:41 +02:00
Philipp Oppermann
51cdc4db8b Use hlt_loop in lib.rs too 2019-04-26 15:25:41 +02:00
Philipp Oppermann
ae93dc18c3 Fix race condition in test_println_output test 2019-04-26 15:25:41 +02:00
Philipp Oppermann
acfdf929ad Move PIC initialization and interrupt::enable to blog_os::init 2019-04-26 15:25:41 +02:00
Philipp Oppermann
3e6f242b52 Update post number for Readme badge 2019-04-26 15:24:38 +02:00
Philipp Oppermann
ad6cb02d5c Merge branch 'post-06-new' into post-07-new-rebased 2019-04-26 15:24:28 +02:00
Philipp Oppermann
16cf7e8e42 Merge branch 'post-09' into post-10 2019-04-25 11:08:11 +02:00
Philipp Oppermann
9c7faf92ed Merge branch 'post-08' into post-09 2019-04-25 11:08:11 +02:00
Philipp Oppermann
db0489403a Merge branch 'post-07' into post-08 2019-04-25 11:08:11 +02:00
Philipp Oppermann
19556d9e68 Merge branch 'post-09' into post-10 2019-04-11 15:32:21 +02:00
Philipp Oppermann
22ba71a3f6 Merge branch 'post-08' into post-09 2019-04-11 15:32:21 +02:00
Philipp Oppermann
25c6640c62 Merge branch 'post-07' into post-08 2019-04-11 15:32:21 +02:00
Philipp Oppermann
338833262c Merge branch 'post-09' into post-10 2019-04-06 17:35:17 +02:00
Philipp Oppermann
59d0a267d6 Merge branch 'post-08' into post-09 2019-04-06 17:35:17 +02:00
Philipp Oppermann
2053c59d2a Merge branch 'post-07' into post-08 2019-04-06 17:35:17 +02:00
Philipp Oppermann
77800d9212 Merge branch 'post-09' into post-10 2019-04-06 17:33:49 +02:00
Philipp Oppermann
71b3d0431f Merge branch 'post-08' into post-09 2019-04-06 17:33:29 +02:00
Philipp Oppermann
823bc3c5a3 Merge branch 'post-07' into post-08 2019-04-06 17:31:55 +02:00
Philipp Oppermann
be618df7f5 Merge branch 'post-09' into post-10 2019-04-06 16:49:18 +02:00
Philipp Oppermann
0ca216d4fe Merge branch 'post-08' into post-09 2019-04-06 16:49:02 +02:00
Philipp Oppermann
37a2c925d6 Merge branch 'post-07' into post-08 2019-04-06 16:49:02 +02:00
Philipp Oppermann
ff85a2e502 Merge branch 'post-09' into post-10
# Conflicts:
#	README.md
2019-04-03 10:47:15 +02:00
Philipp Oppermann
80d4065b4c Merge branch 'post-08' into post-09
# Conflicts:
#	README.md
2019-04-03 10:47:04 +02:00
Philipp Oppermann
6a44f51eaa Merge branch 'post-07' into post-08
# Conflicts:
#	README.md
2019-04-03 10:46:54 +02:00
Philipp Oppermann
74675346c9 Merge branch 'post-09' into post-10 2019-03-26 13:26:56 +01:00
Philipp Oppermann
d36d1dc18b Merge branch 'post-08' into post-09 2019-03-26 13:26:56 +01:00
Philipp Oppermann
a0420c229e Merge branch 'post-07' into post-08 2019-03-26 13:26:56 +01:00
Philipp Oppermann
e9344ae046 Remove unneeded into_iter() in init_frame_allocator 2019-03-14 13:20:26 +01:00
Philipp Oppermann
4acf12bb69 Merge branch 'post-09' into post-10 2019-03-14 11:18:40 +01:00
Philipp Oppermann
6f25c34d46 Merge branch 'post-08' into post-09 2019-03-14 11:18:22 +01:00
Philipp Oppermann
23e45b5b13 Merge branch 'post-07' into post-08 2019-03-14 11:18:22 +01:00
Philipp Oppermann
161d5fe7be Merge pull request #569 from phil-opp/post-10-new
Update post-10 branch for new "Paging Implementation" post
2019-03-14 10:49:55 +01:00
Philipp Oppermann
9bf4ea7341 Use BootInfoFrameAllocator to create a 0xdeadbeaf000 mapping 2019-03-14 10:30:37 +01:00
Philipp Oppermann
a1bf5651fc Create an init_frame_allocator function 2019-03-14 10:30:37 +01:00
Philipp Oppermann
763228c859 Create a generic BootInfoFrameAllocator type 2019-03-14 10:30:37 +01:00
Philipp Oppermann
770af27d75 Create a new mapping and write through it to the screen 2019-03-14 10:30:37 +01:00
Philipp Oppermann
3e59283c19 Create an EmptyFrameAllocator 2019-03-14 10:30:37 +01:00
Philipp Oppermann
6146ccba2d Add a memory::create_example_mapping function 2019-03-14 10:30:37 +01:00
Philipp Oppermann
b0e1527a95 Delete our memory::translate_addr function again 2019-03-14 10:30:37 +01:00
Philipp Oppermann
cb4410c84e Update kernel_main to use MapperAllSizes::translate_addr 2019-03-14 10:30:37 +01:00
Philipp Oppermann
98b5976656 Create a memory::init function that initializes a MappedPageTable 2019-03-14 10:30:37 +01:00
Philipp Oppermann
9335386928 Add and test a memory::translate_addr function 2019-03-14 10:30:37 +01:00
Philipp Oppermann
7c30d62f33 Also show non-empty level 3 table entries 2019-03-14 10:30:37 +01:00
Philipp Oppermann
61683bccda Print non-empty level 4 table entries 2019-03-14 10:30:37 +01:00
Philipp Oppermann
e1ec5159b8 Add boot info argument and use entry_point macro 2019-03-14 10:30:37 +01:00
Philipp Oppermann
7b7d19592f Enable map_physical_memory feature of bootloader 2019-03-14 10:30:37 +01:00
Philipp Oppermann
e387c0b6b8 Create a memory::active_level_4_table function 2019-03-14 10:30:37 +01:00
Philipp Oppermann
d5abc119f3 Update Readme for Paging Implementation post 2019-03-14 10:30:37 +01:00
Philipp Oppermann
59da6e5620 Update bootloader to version 0.4.0 2019-03-14 10:30:37 +01:00
Philipp Oppermann
ef1cc0ed4f Reset code to post-09 branch for new 'Paging Implementation' post 2019-03-14 10:20:46 +01:00
Philipp Oppermann
90f0caec1a Merge branch 'post-09' into post-10 2019-03-14 10:17:37 +01:00
Philipp Oppermann
7198a4d110 The code for reading the level 4 table was moved to the next post 2019-03-12 17:49:31 +01:00
Philipp Oppermann
5c0fb63f33 Merge branch 'post-09' into post-10 2019-03-12 17:48:43 +01:00
Philipp Oppermann
6ffcb2cf1a Merge branch 'post-08' into post-09 2019-03-12 17:48:43 +01:00
Philipp Oppermann
1c72107cb1 Merge branch 'post-07' into post-08 2019-03-12 17:48:24 +01:00
Philipp Oppermann
036a8e7608 Merge branch 'post-09' into post-10 2019-03-09 14:21:12 +01:00
Philipp Oppermann
3b960751f4 Merge branch 'post-08' into post-09 2019-03-09 14:21:12 +01:00
Philipp Oppermann
0ff6334026 Merge branch 'post-07' into post-08 2019-03-09 14:21:12 +01:00
Philipp Oppermann
10c4d0509d Update post-10 code for changes in x86_64 0.5.0
We no longer need a custom translate function as we can directly use MapperAllSizes::translate_addr.
2019-03-09 12:40:27 +01:00
Philipp Oppermann
57998ea4f8 Merge branch 'post-09' into post-10 2019-03-09 12:39:25 +01:00
Philipp Oppermann
f05aaeb0ac Update post-09 code for changes in x86_64 0.5.0 2019-03-09 12:39:14 +01:00
Philipp Oppermann
78a30984bc Merge branch 'post-08' into post-09 2019-03-09 12:38:25 +01:00
Philipp Oppermann
09dd68a1a2 Update post-08 code for changes in x86_64 0.5.0 2019-03-09 12:37:45 +01:00
Philipp Oppermann
81e4eec055 Merge branch 'post-07' into post-08 2019-03-09 12:36:39 +01:00
Philipp Oppermann
ef9a629ddc Merge branch 'post-09' into post-10 2019-02-25 17:04:56 +01:00
Philipp Oppermann
18d8d311cb Merge branch 'post-08' into post-09 2019-02-25 17:04:40 +01:00
Philipp Oppermann
7b61da94a0 Run cargo fmt 2019-02-25 17:04:25 +01:00
Philipp Oppermann
d974cf5200 Merge branch 'post-09' into post-10 2019-02-25 16:37:31 +01:00
Philipp Oppermann
94447af25a Merge branch 'post-08' into post-09 2019-02-25 16:37:30 +01:00
Philipp Oppermann
a7f487f206 Merge branch 'post-07' into post-08 2019-02-25 16:37:30 +01:00
Philipp Oppermann
36d6c6d0e9 Merge branch 'post-09' into post-10 2019-02-12 19:31:09 +01:00
Philipp Oppermann
76d3715eef Merge branch 'post-08' into post-09 2019-02-12 19:31:09 +01:00
Antoine
babf9d8cce Introduce an InterruptIndex enum (#557)
The following modifications aim to group the hardware interrupts' indexes in an easily accessible structure, while being more friendly to eventual evolutions.
* the hardware interrupts' indexes `TIMER_INTERRUPT_ID` and `KEYBOARD_INTERRUPT_ID` have been replaced by the attributes `Timer` and `Keyboard` contained in `enum InterruptIndex`.
* only the first attribute `Timer` is explicitly declared, the following as inferred by the compiler.
* the functions `as_u8` and `as_usize` avoid the need of casts to `u8` or `usize`.
2019-02-12 19:28:24 +01:00
Philipp Oppermann
ff49104764 Merge branch 'post-09' into post-10 2019-02-07 18:48:39 +01:00
Philipp Oppermann
bda1b8929c Merge branch 'post-08' into post-09 2019-02-07 18:48:39 +01:00
Philipp Oppermann
09ff2e01b1 Merge branch 'post-07' into post-08 2019-02-07 18:46:32 +01:00
Philipp Oppermann
03e43da9f9 Merge branch 'post-09' into post-10 2019-02-07 16:17:54 +01:00
Philipp Oppermann
58e171cce7 Merge branch 'post-08' into post-09 2019-02-07 16:15:10 +01:00
Philipp Oppermann
901a1630eb Merge branch 'post-07' into post-08 2019-02-07 16:15:10 +01:00
Philipp Oppermann
e696d65b60 Merge branch 'post-09' into post-10 2019-02-05 15:23:49 +01:00
Philipp Oppermann
ef09418cbf Merge branch 'post-08' into post-09 2019-02-05 15:23:49 +01:00
Philipp Oppermann
28f37da07d Merge branch 'post-07' into post-08 2019-02-05 15:23:49 +01:00
Philipp Oppermann
ebf626061f Merge branch 'post-09' into post-10 2019-02-05 14:59:15 +01:00
Philipp Oppermann
4c9352d898 Merge branch 'post-08' into post-09 2019-02-05 14:59:15 +01:00
Philipp Oppermann
a41a007039 Merge branch 'post-07' into post-08 2019-02-05 14:59:15 +01:00
Philipp Oppermann
c3d023ad40 Merge branch 'post-09' into post-10 2019-02-05 10:46:08 +01:00
Philipp Oppermann
f3cf5b51de Merge branch 'post-08' into post-09 2019-02-05 10:46:08 +01:00
Philipp Oppermann
26be4cb84d Merge branch 'post-07' into post-08 2019-02-05 10:46:08 +01:00
Philipp Oppermann
0a10b3e784 Merge branch 'post-09' into post-10 2019-02-05 10:45:12 +01:00
Philipp Oppermann
9617680e45 Merge branch 'post-08' into post-09 2019-02-05 10:45:12 +01:00
Philipp Oppermann
8f18fb4282 Merge branch 'post-07' into post-08 2019-02-05 10:45:12 +01:00
Philipp Oppermann
e0f66a8196 Merge branch 'post-09' into post-10 2019-01-29 12:16:54 +01:00
Philipp Oppermann
ba54fd2503 Merge branch 'post-08' into post-09 2019-01-29 12:16:54 +01:00
Philipp Oppermann
6b9d275c2d Merge branch 'post-07' into post-08 2019-01-29 12:16:54 +01:00
Philipp Oppermann
e5dfbd4b23 Merge branch 'post-09' into post-10 2019-01-28 11:51:35 +01:00
Philipp Oppermann
4e6ce8d16e Merge branch 'post-08' into post-09 2019-01-28 11:51:23 +01:00
Philipp Oppermann
519f47286c Merge branch 'post-07' into post-08 2019-01-28 11:51:03 +01:00
Philipp Oppermann
5ad2962389 Merge branch 'post-09' into post-10 2019-01-28 11:44:14 +01:00
Philipp Oppermann
82e6c4b066 Merge branch 'post-08' into post-09 2019-01-28 11:44:14 +01:00
Philipp Oppermann
d564dc208f Merge branch 'post-07' into post-08 2019-01-28 11:44:14 +01:00
Philipp Oppermann
a56e22b6fc Use BootInfoFrameAllocator instead of EmptyFrameAllocator 2019-01-28 11:30:27 +01:00
Philipp Oppermann
67f536d7c6 Add a BootInfoFrameAllocator 2019-01-28 11:28:51 +01:00
Philipp Oppermann
741224411b Use the BootInfo struct passed by the bootloader 2019-01-28 11:24:16 +01:00
Philipp Oppermann
818417d119 Try to create example mapping for page 0xdeadbeaf000 2019-01-28 11:23:46 +01:00
Philipp Oppermann
f272785861 Create example mapping for page 0x1000 2019-01-28 11:19:46 +01:00
Philipp Oppermann
5d807ee622 Run rustfmt 2019-01-28 11:19:34 +01:00
Philipp Oppermann
90c3cdf0f3 Update Readme for Advanced Paging post 2019-01-27 17:14:58 +01:00
Philipp Oppermann
2bc233b2f6 Merge branch 'post-09' into post-10 2019-01-27 17:14:12 +01:00
Philipp Oppermann
0df629df47 Update Readme for Introduction to Paging post 2019-01-27 17:13:57 +01:00
Philipp Oppermann
cca85de5ed Merge branch 'post-08' into post-09 2019-01-27 17:13:21 +01:00
Philipp Oppermann
1da81c6f84 Update Readme for Hardware Interrupts post 2019-01-27 17:13:00 +01:00
Philipp Oppermann
42d89c1030 Merge branch 'post-07' into post-08 2019-01-27 17:12:28 +01:00
Philipp Oppermann
051b23f577 Merge branch 'post-09' into post-10 2019-01-27 16:33:54 +01:00
Philipp Oppermann
97e884e6a3 Merge branch 'post-08' into post-09 2019-01-27 16:33:54 +01:00
Philipp Oppermann
954c0bcfbb Merge branch 'post-07' into post-08 2019-01-27 16:33:54 +01:00
Philipp Oppermann
bd1f5345da Merge branch 'z_post_08' into z_post_09 2019-01-27 15:46:59 +01:00
Philipp Oppermann
662faa8dd0 Merge branch 'z_post_07' into z_post_08 2019-01-27 15:46:59 +01:00
Philipp Oppermann
f2bc2d33f0 Rewrite translation function on top of RecursivePageTable 2019-01-27 14:38:49 +01:00
Philipp Oppermann
38a121a887 Test translate_addr by translating some virtual addresses 2019-01-27 14:38:49 +01:00
Philipp Oppermann
8b380f0692 Create a new memory module with a translate_addr function 2019-01-27 14:38:49 +01:00
Philipp Oppermann
f23ee04161 Merge branch 'z_post_08' into z_post_09 2019-01-27 14:30:32 +01:00
Philipp Oppermann
2031a8dc81 Merge branch 'z_post_07' into z_post_08 2019-01-27 14:29:25 +01:00
Philipp Oppermann
b184f7d996 Move the testing code to the end of _start 2019-01-26 12:57:32 +01:00
Philipp Oppermann
7c07a67bf5 Merge branch 'z_post_08' into z_post_09 2019-01-25 14:47:46 +01:00
Philipp Oppermann
ec2da4bebd Merge branch 'z_post_07' into z_post_08 2019-01-25 14:47:32 +01:00
Philipp Oppermann
abaf5bd862 Use PageTable struct of x86_64 crate for accessing entries 2019-01-25 14:29:50 +01:00
Philipp Oppermann
b7005b766f Print first 10 entries of level 4 page table 2019-01-25 14:29:04 +01:00
Philipp Oppermann
91ca04e8c2 Retrieve address of level 4 page table 2019-01-25 14:28:23 +01:00
Philipp Oppermann
ada45c6e52 Provoke page fault 2019-01-25 14:27:31 +01:00
Philipp Oppermann
207a466707 Add a page fault handler 2019-01-25 14:26:35 +01:00
Philipp Oppermann
a954c02fbe Use pc-keyboard crate to translate all scancodes 2019-01-25 14:23:23 +01:00
Philipp Oppermann
895991fee3 Translate keycodes for keys 0-9 2019-01-25 14:22:25 +01:00
Philipp Oppermann
25796110f3 Read and print scancodes in keyboard interrupt handler 2019-01-25 14:21:49 +01:00
Philipp Oppermann
1d153d694e Add a keyboard interrupt handler 2019-01-25 14:21:12 +01:00
Philipp Oppermann
c2e4e8c96f Add and use hlt_loop function 2019-01-25 14:21:12 +01:00
Philipp Oppermann
599a643d97 Remove deadlock provoking code again
This reverts commit 1a39774ead.
2019-01-25 14:14:45 +01:00
Philipp Oppermann
5efcecc2f2 Avoid deadlock by disabling interrupts in print! and serial_print! macros 2019-01-25 14:14:26 +01:00
Philipp Oppermann
1a39774ead Provoke a print! deadlock 2019-01-25 14:12:14 +01:00
Philipp Oppermann
1ea8cf6ed1 Send end of interrupt signal 2019-01-25 14:09:47 +01:00
Philipp Oppermann
4060ac558c Add a timer interrupt handler 2019-01-25 14:09:12 +01:00
Philipp Oppermann
28a11e47bc Enable hardware interrupts 2019-01-25 14:05:20 +01:00
Philipp Oppermann
6504bed810 Initialize the PIC 2019-01-25 14:04:55 +01:00
18 changed files with 1247 additions and 33 deletions

33
Cargo.lock generated
View File

@@ -27,6 +27,9 @@ version = "0.1.0"
dependencies = [
"bootloader",
"lazy_static",
"linked_list_allocator",
"pc-keyboard",
"pic8259_simple",
"spin",
"uart_16550",
"volatile",
@@ -48,6 +51,12 @@ dependencies = [
"rustc_version",
]
[[package]]
name = "cpuio"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22b8e308ccfc5acf3b82f79c0eac444cf6114cb2ac67a230ca6c177210068daa"
[[package]]
name = "lazy_static"
version = "1.4.0"
@@ -57,12 +66,36 @@ dependencies = [
"spin",
]
[[package]]
name = "linked_list_allocator"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47314ec1d29aa869ee7cb5a5be57be9b1055c56567d59c3fb6689926743e0bea"
dependencies = [
"spin",
]
[[package]]
name = "nodrop"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
[[package]]
name = "pc-keyboard"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fff50ab09ba31bcebc0669f4e64c0952fae1acdca9e6e0587e68e4e8443808ac"
[[package]]
name = "pic8259_simple"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc64b2fd10828da8521b6cdabe0679385d7d2a3a6d4c336b819d1fa31ba35c72"
dependencies = [
"cpuio",
]
[[package]]
name = "rustc_version"
version = "0.2.3"

View File

@@ -13,11 +13,14 @@ name = "stack_overflow"
harness = false
[dependencies]
bootloader = "0.8.0"
bootloader = { version = "0.8.0", features = ["map_physical_memory"]}
volatile = "0.2.6"
spin = "0.5.2"
x86_64 = "0.8.1"
uart_16550 = "0.2.0"
pic8259_simple = "0.1.1"
pc-keyboard = "0.3.1"
linked_list_allocator = "0.6.4"
[dependencies.lazy_static]
version = "1.0"
@@ -30,3 +33,6 @@ test-args = [
"-display", "none"
]
test-success-exit-code = 33 # (0x10 << 1) | 1
[profile.release]
lto = true

View File

@@ -1,10 +1,10 @@
# Blog OS (Double Faults)
# Blog OS (Heap Allocation)
[![Build Status](https://github.com/phil-opp/blog_os/workflows/Build%20Code/badge.svg?branch=post-06)](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Build+Code%22+branch%3Apost-06)
[![Build Status](https://github.com/phil-opp/blog_os/workflows/Build%20Code/badge.svg?branch=post-10)](https://github.com/phil-opp/blog_os/actions?query=workflow%3A%22Build+Code%22+branch%3Apost-10)
This repository contains the source code for the [Double Faults][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
This repository contains the source code for the [Heap Allocation][post] post of the [Writing an OS in Rust](https://os.phil-opp.com) series.
[post]: https://os.phil-opp.com/double-fault-exceptions/
[post]: https://os.phil-opp.com/heap-allocation/
**Check out the [master branch](https://github.com/phil-opp/blog_os) for more information.**

58
src/allocator/bump.rs Normal file
View File

@@ -0,0 +1,58 @@
use super::{align_up, Locked};
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr;
pub struct BumpAllocator {
heap_start: usize,
heap_end: usize,
next: usize,
allocations: usize,
}
impl BumpAllocator {
/// Creates a new empty bump allocator.
pub const fn new() -> Self {
BumpAllocator {
heap_start: 0,
heap_end: 0,
next: 0,
allocations: 0,
}
}
/// Initializes the bump allocator with the given heap bounds.
///
/// This method is unsafe because the caller must ensure that the given
/// memory range is unused. Also, this method must be called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.heap_start = heap_start;
self.heap_end = heap_start + heap_size;
self.next = heap_start;
}
}
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut bump = self.lock(); // get a mutable reference
let alloc_start = align_up(bump.next, layout.align());
let alloc_end = alloc_start + layout.size();
if alloc_end > bump.heap_end {
ptr::null_mut() // out of memory
} else {
bump.next = alloc_end;
bump.allocations += 1;
alloc_start as *mut u8
}
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
let mut bump = self.lock(); // get a mutable reference
bump.allocations -= 1;
if bump.allocations == 0 {
bump.next = bump.heap_start;
}
}
}

View File

@@ -0,0 +1,102 @@
use super::Locked;
use alloc::alloc::{GlobalAlloc, Layout};
use core::{
mem,
ptr::{self, NonNull},
};
/// The block sizes to use.
///
/// The sizes must each be power of 2 because they are also used as
/// the block alignment (alignments must be always powers of 2).
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
/// Choose an appropriate block size for the given layout.
///
/// Returns an index into the `BLOCK_SIZES` array.
fn list_index(layout: &Layout) -> Option<usize> {
let required_block_size = layout.size().max(layout.align());
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
}
struct ListNode {
next: Option<&'static mut ListNode>,
}
pub struct FixedSizeBlockAllocator {
list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap,
}
impl FixedSizeBlockAllocator {
/// Creates an empty FixedSizeBlockAllocator.
pub const fn new() -> Self {
FixedSizeBlockAllocator {
list_heads: [None; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap::empty(),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.fallback_allocator.init(heap_start, heap_size);
}
/// Allocates using the fallback allocator.
fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 {
match self.fallback_allocator.allocate_first_fit(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(_) => ptr::null_mut(),
}
}
}
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
match allocator.list_heads[index].take() {
Some(node) => {
allocator.list_heads[index] = node.next.take();
node as *mut ListNode as *mut u8
}
None => {
// no block exists in list => allocate new block
let block_size = BLOCK_SIZES[index];
// only works if all block sizes are a power of 2
let block_align = block_size;
let layout = Layout::from_size_align(block_size, block_align).unwrap();
allocator.fallback_alloc(layout)
}
}
}
None => allocator.fallback_alloc(layout),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
let new_node = ListNode {
next: allocator.list_heads[index].take(),
};
// verify that block has size and alignment required for storing node
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
let new_node_ptr = ptr as *mut ListNode;
new_node_ptr.write(new_node);
allocator.list_heads[index] = Some(&mut *new_node_ptr);
}
None => {
let ptr = NonNull::new(ptr).unwrap();
allocator.fallback_allocator.deallocate(ptr, layout);
}
}
}
}

View File

@@ -0,0 +1,145 @@
use super::{align_up, Locked};
use alloc::alloc::{GlobalAlloc, Layout};
use core::{mem, ptr};
struct ListNode {
size: usize,
next: Option<&'static mut ListNode>,
}
impl ListNode {
const fn new(size: usize) -> Self {
ListNode { size, next: None }
}
fn start_addr(&self) -> usize {
self as *const Self as usize
}
fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}
pub struct LinkedListAllocator {
head: ListNode,
}
impl LinkedListAllocator {
/// Creates an empty LinkedListAllocator.
pub const fn new() -> Self {
Self {
head: ListNode::new(0),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.add_free_region(heap_start, heap_size);
}
/// Adds the given memory region to the front of the list.
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
// ensure that the freed region is capable of holding ListNode
assert!(align_up(addr, mem::align_of::<ListNode>()) == addr);
assert!(size >= mem::size_of::<ListNode>());
// create a new list node and append it at the start of the list
let mut node = ListNode::new(size);
node.next = self.head.next.take();
let node_ptr = addr as *mut ListNode;
node_ptr.write(node);
self.head.next = Some(&mut *node_ptr)
}
/// Looks for a free region with the given size and alignment and removes
/// it from the list.
///
/// Returns a tuple of the list node and the start address of the allocation.
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
// reference to current list node, updated for each iteration
let mut current = &mut self.head;
// look for a large enough memory region in linked list
while let Some(ref mut region) = current.next {
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
// region suitable for allocation -> remove node from list
let next = region.next.take();
let ret = Some((current.next.take().unwrap(), alloc_start));
current.next = next;
return ret;
} else {
// region not suitable -> continue with next region
current = current.next.as_mut().unwrap();
}
}
// no suitable region found
None
}
/// Try to use the given region for an allocation with given size and alignment.
///
/// Returns the allocation start address on success.
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> {
let alloc_start = align_up(region.start_addr(), align);
let alloc_end = alloc_start + size;
if alloc_end > region.end_addr() {
// region too small
return Err(());
}
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
// rest of region too small to hold a ListNode (required because the
// allocation splits the region in a used and a free part)
return Err(());
}
// region suitable for allocation
Ok(alloc_start)
}
/// Adjust the given layout so that the resulting allocated memory
/// region is also capable of storing a `ListNode`.
///
/// Returns the adjusted size and alignment as a (size, align) tuple.
fn size_align(layout: Layout) -> (usize, usize) {
let layout = layout
.align_to(mem::align_of::<ListNode>())
.expect("adjusting alignment failed")
.pad_to_align();
let size = layout.size().max(mem::size_of::<ListNode>());
(size, layout.align())
}
}
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// perform layout adjustments
let (size, align) = LinkedListAllocator::size_align(layout);
let mut allocator = self.inner.lock();
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
let alloc_end = alloc_start + size;
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 {
allocator.add_free_region(alloc_end, excess_size);
}
alloc_start as *mut u8
} else {
ptr::null_mut()
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// perform layout adjustments
let (size, _) = LinkedListAllocator::size_align(layout);
self.inner.lock().add_free_region(ptr as usize, size)
}
}

84
src/allocator/mod.rs Normal file
View File

@@ -0,0 +1,84 @@
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr::null_mut;
use fixed_size_block::FixedSizeBlockAllocator;
use x86_64::{
structures::paging::{
mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB,
},
VirtAddr,
};
pub mod bump;
pub mod fixed_size_block;
pub mod linked_list;
pub const HEAP_START: usize = 0x_4444_4444_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
#[global_allocator]
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(FixedSizeBlockAllocator::new());
pub fn init_heap(
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<(), MapToError> {
let page_range = {
let heap_start = VirtAddr::new(HEAP_START as u64);
let heap_end = heap_start + HEAP_SIZE - 1u64;
let heap_start_page = Page::containing_address(heap_start);
let heap_end_page = Page::containing_address(heap_end);
Page::range_inclusive(heap_start_page, heap_end_page)
};
for page in page_range {
let frame = frame_allocator
.allocate_frame()
.ok_or(MapToError::FrameAllocationFailed)?;
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
mapper.map_to(page, frame, flags, frame_allocator)?.flush();
}
unsafe {
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
}
Ok(())
}
pub struct Dummy;
unsafe impl GlobalAlloc for Dummy {
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
null_mut()
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
panic!("dealloc should be never called")
}
}
/// A wrapper around spin::Mutex to permit trait implementations.
pub struct Locked<A> {
inner: spin::Mutex<A>,
}
impl<A> Locked<A> {
pub const fn new(inner: A) -> Self {
Locked {
inner: spin::Mutex::new(inner),
}
}
pub fn lock(&self) -> spin::MutexGuard<A> {
self.inner.lock()
}
}
fn align_up(addr: usize, align: usize) -> usize {
let remainder = addr % align;
if remainder == 0 {
addr // addr already aligned
} else {
addr - remainder + align
}
}

View File

@@ -1,16 +1,44 @@
use crate::{gdt, println};
use crate::{gdt, hlt_loop, print, println};
use lazy_static::lazy_static;
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame};
use pic8259_simple::ChainedPics;
use spin;
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode};
pub const PIC_1_OFFSET: u8 = 32;
pub const PIC_2_OFFSET: u8 = PIC_1_OFFSET + 8;
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum InterruptIndex {
Timer = PIC_1_OFFSET,
Keyboard,
}
impl InterruptIndex {
fn as_u8(self) -> u8 {
self as u8
}
fn as_usize(self) -> usize {
usize::from(self.as_u8())
}
}
pub static PICS: spin::Mutex<ChainedPics> =
spin::Mutex::new(unsafe { ChainedPics::new(PIC_1_OFFSET, PIC_2_OFFSET) });
lazy_static! {
static ref IDT: InterruptDescriptorTable = {
let mut idt = InterruptDescriptorTable::new();
idt.breakpoint.set_handler_fn(breakpoint_handler);
idt.page_fault.set_handler_fn(page_fault_handler);
unsafe {
idt.double_fault
.set_handler_fn(double_fault_handler)
.set_stack_index(gdt::DOUBLE_FAULT_IST_INDEX);
}
idt[InterruptIndex::Timer.as_usize()].set_handler_fn(timer_interrupt_handler);
idt[InterruptIndex::Keyboard.as_usize()].set_handler_fn(keyboard_interrupt_handler);
idt
};
}
@@ -23,6 +51,19 @@ extern "x86-interrupt" fn breakpoint_handler(stack_frame: &mut InterruptStackFra
println!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame);
}
extern "x86-interrupt" fn page_fault_handler(
stack_frame: &mut InterruptStackFrame,
error_code: PageFaultErrorCode,
) {
use x86_64::registers::control::Cr2;
println!("EXCEPTION: PAGE FAULT");
println!("Accessed Address: {:?}", Cr2::read());
println!("Error Code: {:?}", error_code);
println!("{:#?}", stack_frame);
hlt_loop();
}
extern "x86-interrupt" fn double_fault_handler(
stack_frame: &mut InterruptStackFrame,
_error_code: u64,
@@ -30,6 +71,44 @@ extern "x86-interrupt" fn double_fault_handler(
panic!("EXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame);
}
extern "x86-interrupt" fn timer_interrupt_handler(_stack_frame: &mut InterruptStackFrame) {
print!(".");
unsafe {
PICS.lock()
.notify_end_of_interrupt(InterruptIndex::Timer.as_u8());
}
crate::multitasking::invoke_scheduler();
}
extern "x86-interrupt" fn keyboard_interrupt_handler(_stack_frame: &mut InterruptStackFrame) {
use pc_keyboard::{layouts, DecodedKey, Keyboard, ScancodeSet1};
use spin::Mutex;
use x86_64::instructions::port::Port;
lazy_static! {
static ref KEYBOARD: Mutex<Keyboard<layouts::Us104Key, ScancodeSet1>> =
Mutex::new(Keyboard::new(layouts::Us104Key, ScancodeSet1));
}
let mut keyboard = KEYBOARD.lock();
let mut port = Port::new(0x60);
let scancode: u8 = unsafe { port.read() };
if let Ok(Some(key_event)) = keyboard.add_byte(scancode) {
if let Some(key) = keyboard.process_keyevent(key_event) {
match key {
DecodedKey::Unicode(character) => print!("{}", character),
DecodedKey::RawKey(key) => print!("{:?}", key),
}
}
}
unsafe {
PICS.lock()
.notify_end_of_interrupt(InterruptIndex::Keyboard.as_u8());
}
}
#[cfg(test)]
use crate::{serial_print, serial_println};

View File

@@ -2,19 +2,36 @@
#![cfg_attr(test, no_main)]
#![feature(custom_test_frameworks)]
#![feature(abi_x86_interrupt)]
#![feature(alloc_error_handler)]
#![feature(const_fn)]
#![feature(alloc_layout_extra)]
#![feature(const_in_array_repeat_expressions)]
#![feature(global_asm)]
#![feature(asm)]
#![feature(raw)]
#![feature(never_type)]
#![feature(naked_functions)]
#![feature(option_expect_none)]
#![test_runner(crate::test_runner)]
#![reexport_test_harness_main = "test_main"]
extern crate alloc;
use core::panic::PanicInfo;
pub mod allocator;
pub mod gdt;
pub mod interrupts;
pub mod memory;
pub mod multitasking;
pub mod serial;
pub mod vga_buffer;
pub fn init() {
gdt::init();
interrupts::init_idt();
unsafe { interrupts::PICS.lock().initialize() };
x86_64::instructions::interrupts::enable();
}
pub fn test_runner(tests: &[&dyn Fn()]) {
@@ -29,7 +46,7 @@ pub fn test_panic_handler(info: &PanicInfo) -> ! {
serial_println!("[failed]\n");
serial_println!("Error: {}\n", info);
exit_qemu(QemuExitCode::Failed);
loop {}
hlt_loop();
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@@ -48,13 +65,24 @@ pub fn exit_qemu(exit_code: QemuExitCode) {
}
}
pub fn hlt_loop() -> ! {
loop {
x86_64::instructions::hlt();
}
}
#[cfg(test)]
use bootloader::{entry_point, BootInfo};
#[cfg(test)]
entry_point!(test_kernel_main);
/// Entry point for `cargo xtest`
#[cfg(test)]
#[no_mangle]
pub extern "C" fn _start() -> ! {
fn test_kernel_main(_boot_info: &'static BootInfo) -> ! {
init();
test_main();
loop {}
hlt_loop();
}
#[cfg(test)]
@@ -62,3 +90,8 @@ pub extern "C" fn _start() -> ! {
fn panic(info: &PanicInfo) -> ! {
test_panic_handler(info)
}
#[alloc_error_handler]
fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! {
panic!("allocation error: {:?}", layout)
}

View File

@@ -4,35 +4,98 @@
#![test_runner(blog_os::test_runner)]
#![reexport_test_harness_main = "test_main"]
use blog_os::println;
extern crate alloc;
use alloc::{boxed::Box, rc::Rc, vec, vec::Vec};
use blog_os::multitasking::{self, thread::Thread, with_scheduler};
use blog_os::{print, println};
use bootloader::{entry_point, BootInfo};
use core::panic::PanicInfo;
#[no_mangle]
pub extern "C" fn _start() -> ! {
println!("Hello World{}", "!");
entry_point!(kernel_main);
fn kernel_main(boot_info: &'static BootInfo) -> ! {
use blog_os::allocator;
use blog_os::memory::{self, BootInfoFrameAllocator};
use x86_64::VirtAddr;
println!("Hello World{}", "!");
blog_os::init();
fn stack_overflow() {
stack_overflow(); // for each recursion, the return address is pushed
}
let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
let mut mapper = unsafe { memory::init(phys_mem_offset) };
let mut frame_allocator = unsafe { BootInfoFrameAllocator::init(&boot_info.memory_map) };
// uncomment line below to trigger a stack overflow
// stack_overflow();
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");
// allocate a number on the heap
let heap_value = Box::new(41);
println!("heap_value at {:p}", heap_value);
// create a dynamically sized vector
let mut vec = Vec::new();
for i in 0..500 {
vec.push(i);
}
println!("vec at {:p}", vec.as_slice());
// create a reference counted vector -> will be freed when count reaches 0
let reference_counted = Rc::new(vec![1, 2, 3]);
let cloned_reference = reference_counted.clone();
println!(
"current reference count is {}",
Rc::strong_count(&cloned_reference)
);
core::mem::drop(reference_counted);
println!(
"reference count is {} now",
Rc::strong_count(&cloned_reference)
);
#[cfg(test)]
test_main();
let idle_thread = Thread::create(idle_thread, 2, &mut mapper, &mut frame_allocator).unwrap();
with_scheduler(|s| s.set_idle_thread(idle_thread));
for _ in 0..10 {
let thread = Thread::create(thread_entry, 2, &mut mapper, &mut frame_allocator).unwrap();
with_scheduler(|s| s.add_new_thread(thread));
}
let thread =
Thread::create_from_closure(|| thread_entry(), 2, &mut mapper, &mut frame_allocator)
.unwrap();
with_scheduler(|s| s.add_new_thread(thread));
println!("It did not crash!");
loop {}
thread_entry();
}
fn idle_thread() -> ! {
loop {
x86_64::instructions::hlt();
multitasking::yield_now();
}
}
fn thread_entry() -> ! {
let thread_id = with_scheduler(|s| s.current_thread_id()).as_u64();
for _ in 0..=thread_id {
print!("{}", thread_id);
x86_64::instructions::hlt();
}
multitasking::exit_thread();
}
/// This function is called on panic.
#[cfg(not(test))]
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
unsafe {
blog_os::vga_buffer::WRITER.force_unlock();
}
println!("{}", info);
loop {}
blog_os::hlt_loop();
}
#[cfg(test)]

154
src/memory.rs Normal file
View File

@@ -0,0 +1,154 @@
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
use x86_64::{
structures::paging::{
mapper, FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PhysFrame, Size4KiB,
UnusedPhysFrame,
},
PhysAddr, VirtAddr,
};
/// Initialize a new OffsetPageTable.
///
/// This function is unsafe because the caller must guarantee that the
/// complete physical memory is mapped to virtual memory at the passed
/// `physical_memory_offset`. Also, this function must be only called once
/// to avoid aliasing `&mut` references (which is undefined behavior).
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
let level_4_table = active_level_4_table(physical_memory_offset);
OffsetPageTable::new(level_4_table, physical_memory_offset)
}
/// Returns a mutable reference to the active level 4 table.
///
/// This function is unsafe because the caller must guarantee that the
/// complete physical memory is mapped to virtual memory at the passed
/// `physical_memory_offset`. Also, this function must be only called once
/// to avoid aliasing `&mut` references (which is undefined behavior).
unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) -> &'static mut PageTable {
use x86_64::registers::control::Cr3;
let (level_4_table_frame, _) = Cr3::read();
let phys = level_4_table_frame.start_address();
let virt = physical_memory_offset + phys.as_u64();
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
&mut *page_table_ptr // unsafe
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct StackBounds {
start: VirtAddr,
end: VirtAddr,
}
impl StackBounds {
pub fn start(&self) -> VirtAddr {
self.start
}
pub fn end(&self) -> VirtAddr {
self.end
}
}
pub fn alloc_stack(
size_in_pages: u64,
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<StackBounds, mapper::MapToError> {
use core::sync::atomic::{AtomicU64, Ordering};
use x86_64::structures::paging::PageTableFlags as Flags;
static STACK_ALLOC_NEXT: AtomicU64 = AtomicU64::new(0x_5555_5555_0000);
let guard_page_start = STACK_ALLOC_NEXT.fetch_add(
(size_in_pages + 1) * Page::<Size4KiB>::SIZE,
Ordering::SeqCst,
);
let guard_page = Page::from_start_address(VirtAddr::new(guard_page_start))
.expect("`STACK_ALLOC_NEXT` not page aligned");
let stack_start = guard_page + 1;
let stack_end = stack_start + size_in_pages;
let flags = Flags::PRESENT | Flags::WRITABLE;
for page in Page::range(stack_start, stack_end) {
let frame = frame_allocator
.allocate_frame()
.ok_or(mapper::MapToError::FrameAllocationFailed)?;
mapper.map_to(page, frame, flags, frame_allocator)?.flush();
}
Ok(StackBounds {
start: stack_start.start_address(),
end: stack_end.start_address(),
})
}
/// Creates an example mapping for the given page to frame `0xb8000`.
pub fn create_example_mapping(
page: Page,
mapper: &mut OffsetPageTable,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) {
use x86_64::structures::paging::PageTableFlags as Flags;
let frame = PhysFrame::containing_address(PhysAddr::new(0xb8000));
// FIXME: ONLY FOR TEMPORARY TESTING
let unused_frame = unsafe { UnusedPhysFrame::new(frame) };
let flags = Flags::PRESENT | Flags::WRITABLE;
let map_to_result = mapper.map_to(page, unused_frame, flags, frame_allocator);
map_to_result.expect("map_to failed").flush();
}
/// A FrameAllocator that always returns `None`.
pub struct EmptyFrameAllocator;
unsafe impl FrameAllocator<Size4KiB> for EmptyFrameAllocator {
fn allocate_frame(&mut self) -> Option<UnusedPhysFrame> {
None
}
}
/// A FrameAllocator that returns usable frames from the bootloader's memory map.
pub struct BootInfoFrameAllocator {
memory_map: &'static MemoryMap,
next: usize,
}
impl BootInfoFrameAllocator {
/// Create a FrameAllocator from the passed memory map.
///
/// This function is unsafe because the caller must guarantee that the passed
/// memory map is valid. The main requirement is that all frames that are marked
/// as `USABLE` in it are really unused.
pub unsafe fn init(memory_map: &'static MemoryMap) -> Self {
BootInfoFrameAllocator {
memory_map,
next: 0,
}
}
/// Returns an iterator over the usable frames specified in the memory map.
fn usable_frames(&self) -> impl Iterator<Item = UnusedPhysFrame> {
// get usable regions from memory map
let regions = self.memory_map.iter();
let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable);
// map each region to its address range
let addr_ranges = usable_regions.map(|r| r.range.start_addr()..r.range.end_addr());
// transform to an iterator of frame start addresses
let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096));
// create `PhysFrame` types from the start addresses
let frames = frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)));
// we know that the frames are really unused
frames.map(|f| unsafe { UnusedPhysFrame::new(f) })
}
}
unsafe impl FrameAllocator<Size4KiB> for BootInfoFrameAllocator {
fn allocate_frame(&mut self) -> Option<UnusedPhysFrame> {
let frame = self.usable_frames().nth(self.next);
self.next += 1;
frame
}
}

View File

@@ -0,0 +1,105 @@
use super::{with_scheduler, SwitchReason};
use crate::multitasking::thread::ThreadId;
use alloc::boxed::Box;
use core::mem;
use core::raw::TraitObject;
use x86_64::VirtAddr;
pub struct Stack {
pointer: VirtAddr,
}
impl Stack {
pub unsafe fn new(stack_pointer: VirtAddr) -> Self {
Stack {
pointer: stack_pointer,
}
}
pub fn get_stack_pointer(self) -> VirtAddr {
self.pointer
}
pub fn set_up_for_closure(&mut self, closure: Box<dyn FnOnce() -> !>) {
let trait_object: TraitObject = unsafe { mem::transmute(closure) };
unsafe { self.push(trait_object.data) };
unsafe { self.push(trait_object.vtable) };
self.set_up_for_entry_point(call_closure_entry);
}
pub fn set_up_for_entry_point(&mut self, entry_point: fn() -> !) {
unsafe { self.push(entry_point) };
let rflags: u64 = 0x200;
unsafe { self.push(rflags) };
}
unsafe fn push<T>(&mut self, value: T) {
self.pointer -= core::mem::size_of::<T>();
let ptr: *mut T = self.pointer.as_mut_ptr();
ptr.write(value);
}
}
pub unsafe fn context_switch_to(
new_stack_pointer: VirtAddr,
prev_thread_id: ThreadId,
switch_reason: SwitchReason,
) {
asm!(
"call asm_context_switch"
:
: "{rdi}"(new_stack_pointer), "{rsi}"(prev_thread_id), "{rdx}"(switch_reason as u64)
: "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15", "rflags", "memory"
: "intel", "volatile"
);
}
global_asm!(
"
.intel_syntax noprefix
// asm_context_switch(stack_pointer: u64, thread_id: u64)
asm_context_switch:
pushfq
mov rax, rsp
mov rsp, rdi
mov rdi, rax
call add_paused_thread
popfq
ret
"
);
#[no_mangle]
pub extern "C" fn add_paused_thread(
paused_stack_pointer: VirtAddr,
paused_thread_id: ThreadId,
switch_reason: SwitchReason,
) {
with_scheduler(|s| s.add_paused_thread(paused_stack_pointer, paused_thread_id, switch_reason));
}
#[naked]
fn call_closure_entry() -> ! {
unsafe {
asm!("
pop rsi
pop rdi
call call_closure
" ::: "mem" : "intel", "volatile")
};
unreachable!();
}
// no_mangle required because of https://github.com/rust-lang/rust/issues/68136
#[no_mangle]
extern "C" fn call_closure(data: *mut (), vtable: *mut ()) -> ! {
let trait_object = TraitObject { data, vtable };
let f: Box<dyn FnOnce() -> !> = unsafe { mem::transmute(trait_object) };
f()
}

57
src/multitasking/mod.rs Normal file
View File

@@ -0,0 +1,57 @@
use scheduler::Scheduler;
pub mod context_switch;
pub mod scheduler;
pub mod thread;
static SCHEDULER: spin::Mutex<Option<Scheduler>> = spin::Mutex::new(None);
#[repr(u64)]
pub enum SwitchReason {
Paused,
Yield,
Blocked,
Exit,
}
pub fn invoke_scheduler() {
let next = SCHEDULER
.try_lock()
.and_then(|mut scheduler| scheduler.as_mut().and_then(|s| s.schedule()));
if let Some((next_stack_pointer, prev_thread_id)) = next {
unsafe {
context_switch::context_switch_to(
next_stack_pointer,
prev_thread_id,
SwitchReason::Paused,
)
};
}
}
pub fn exit_thread() -> ! {
synchronous_context_switch(SwitchReason::Exit).expect("can't exit last thread");
unreachable!("finished thread continued");
}
pub fn yield_now() {
let _ = synchronous_context_switch(SwitchReason::Yield);
}
fn synchronous_context_switch(reason: SwitchReason) -> Result<(), ()> {
let next = with_scheduler(|s| s.schedule());
match next {
Some((next_stack_pointer, prev_thread_id)) => unsafe {
context_switch::context_switch_to(next_stack_pointer, prev_thread_id, reason);
Ok(())
},
None => Err(()),
}
}
pub fn with_scheduler<F, T>(f: F) -> T
where
F: FnOnce(&mut Scheduler) -> T,
{
f(SCHEDULER.lock().get_or_insert_with(Scheduler::new))
}

View File

@@ -0,0 +1,122 @@
use super::SwitchReason;
use crate::multitasking::thread::{Thread, ThreadId};
use alloc::collections::{BTreeMap, BTreeSet, VecDeque};
use core::mem;
use x86_64::VirtAddr;
pub struct Scheduler {
threads: BTreeMap<ThreadId, Thread>,
idle_thread_id: Option<ThreadId>,
current_thread_id: ThreadId,
paused_threads: VecDeque<ThreadId>,
blocked_threads: BTreeSet<ThreadId>,
wakeups: BTreeSet<ThreadId>,
}
impl Scheduler {
pub fn new() -> Self {
let root_thread = Thread::create_root_thread();
let root_id = root_thread.id();
let mut threads = BTreeMap::new();
threads
.insert(root_id, root_thread)
.expect_none("map is not empty after creation");
Scheduler {
threads,
current_thread_id: root_id,
paused_threads: VecDeque::new(),
blocked_threads: BTreeSet::new(),
wakeups: BTreeSet::new(),
idle_thread_id: None,
}
}
fn next_thread(&mut self) -> Option<ThreadId> {
self.paused_threads.pop_front()
}
pub fn schedule(&mut self) -> Option<(VirtAddr, ThreadId)> {
let mut next_thread_id = self.next_thread();
if next_thread_id.is_none() && Some(self.current_thread_id) != self.idle_thread_id {
next_thread_id = self.idle_thread_id
}
if let Some(next_id) = next_thread_id {
let next_thread = self
.threads
.get_mut(&next_id)
.expect("next thread does not exist");
let next_stack_pointer = next_thread
.stack_pointer()
.take()
.expect("paused thread has no stack pointer");
let prev_thread_id = mem::replace(&mut self.current_thread_id, next_thread.id());
Some((next_stack_pointer, prev_thread_id))
} else {
None
}
}
pub(super) fn add_paused_thread(
&mut self,
paused_stack_pointer: VirtAddr,
paused_thread_id: ThreadId,
switch_reason: SwitchReason,
) {
let paused_thread = self
.threads
.get_mut(&paused_thread_id)
.expect("paused thread does not exist");
paused_thread
.stack_pointer()
.replace(paused_stack_pointer)
.expect_none("running thread should have stack pointer set to None");
if Some(paused_thread_id) == self.idle_thread_id {
return; // do nothing
}
match switch_reason {
SwitchReason::Paused | SwitchReason::Yield => {
self.paused_threads.push_back(paused_thread_id)
}
SwitchReason::Blocked => {
self.blocked_threads.insert(paused_thread_id);
self.check_for_wakeup(paused_thread_id);
}
SwitchReason::Exit => {
let thread = self
.threads
.remove(&paused_thread_id)
.expect("thread not found");
// TODO: free stack memory again
}
}
}
pub fn add_new_thread(&mut self, thread: Thread) {
let thread_id = thread.id();
self.threads
.insert(thread_id, thread)
.expect_none("thread already exists");
self.paused_threads.push_back(thread_id);
}
pub fn set_idle_thread(&mut self, thread: Thread) {
let thread_id = thread.id();
self.threads
.insert(thread_id, thread)
.expect_none("thread already exists");
self.idle_thread_id
.replace(thread_id)
.expect_none("idle thread should be set only once");
}
pub fn current_thread_id(&self) -> ThreadId {
self.current_thread_id
}
fn check_for_wakeup(&mut self, thread_id: ThreadId) {
if self.wakeups.remove(&thread_id) {
assert!(self.blocked_threads.remove(&thread_id));
self.paused_threads.push_back(thread_id);
}
}
}

View File

@@ -0,0 +1,82 @@
use crate::memory::{alloc_stack, StackBounds};
use crate::multitasking::context_switch::Stack;
use alloc::boxed::Box;
use x86_64::{
structures::paging::{mapper, FrameAllocator, Mapper, Size4KiB},
VirtAddr,
};
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct ThreadId(u64);
impl ThreadId {
pub fn as_u64(&self) -> u64 {
self.0
}
fn new() -> Self {
use core::sync::atomic::{AtomicU64, Ordering};
static NEXT_THREAD_ID: AtomicU64 = AtomicU64::new(1);
ThreadId(NEXT_THREAD_ID.fetch_add(1, Ordering::SeqCst))
}
}
#[derive(Debug)]
pub struct Thread {
id: ThreadId,
stack_pointer: Option<VirtAddr>,
stack_bounds: Option<StackBounds>,
}
impl Thread {
pub fn create(
entry_point: fn() -> !,
stack_size: u64,
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<Self, mapper::MapToError> {
let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?;
let mut stack = unsafe { Stack::new(stack_bounds.end()) };
stack.set_up_for_entry_point(entry_point);
Ok(Self::new(stack.get_stack_pointer(), stack_bounds))
}
pub fn create_from_closure<F>(
closure: F,
stack_size: u64,
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<Self, mapper::MapToError>
where
F: FnOnce() -> ! + 'static + Send + Sync,
{
let stack_bounds = alloc_stack(stack_size, mapper, frame_allocator)?;
let mut stack = unsafe { Stack::new(stack_bounds.end()) };
stack.set_up_for_closure(Box::new(closure));
Ok(Self::new(stack.get_stack_pointer(), stack_bounds))
}
fn new(stack_pointer: VirtAddr, stack_bounds: StackBounds) -> Self {
Thread {
id: ThreadId::new(),
stack_pointer: Some(stack_pointer),
stack_bounds: Some(stack_bounds),
}
}
pub(super) fn create_root_thread() -> Self {
Thread {
id: ThreadId(0),
stack_pointer: None,
stack_bounds: None,
}
}
pub fn id(&self) -> ThreadId {
self.id
}
pub(super) fn stack_pointer(&mut self) -> &mut Option<VirtAddr> {
&mut self.stack_pointer
}
}

View File

@@ -13,10 +13,14 @@ lazy_static! {
#[doc(hidden)]
pub fn _print(args: ::core::fmt::Arguments) {
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
SERIAL1
.lock()
.write_fmt(args)
.expect("Printing to serial failed");
});
}
/// Prints to the host through the serial interface.

View File

@@ -166,11 +166,16 @@ macro_rules! println {
($($arg:tt)*) => ($crate::print!("{}\n", format_args!($($arg)*)));
}
/// Prints the given formatted string to the VGA text buffer through the global `WRITER` instance.
/// Prints the given formatted string to the VGA text buffer
/// through the global `WRITER` instance.
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
WRITER.lock().write_fmt(args).unwrap();
});
}
#[test_case]
@@ -191,14 +196,20 @@ fn test_println_many() {
#[test_case]
fn test_println_output() {
use core::fmt::Write;
use x86_64::instructions::interrupts;
serial_print!("test_println_output... ");
let s = "Some test string that fits on a single line";
println!("{}", s);
interrupts::without_interrupts(|| {
let mut writer = WRITER.lock();
writeln!(writer, "\n{}", s).expect("writeln failed");
for (i, c) in s.chars().enumerate() {
let screen_char = WRITER.lock().buffer.chars[BUFFER_HEIGHT - 2][i].read();
let screen_char = writer.buffer.chars[BUFFER_HEIGHT - 2][i].read();
assert_eq!(char::from(screen_char.ascii_character), c);
}
});
serial_println!("[ok]");
}

76
tests/heap_allocation.rs Normal file
View File

@@ -0,0 +1,76 @@
#![no_std]
#![no_main]
#![feature(custom_test_frameworks)]
#![test_runner(blog_os::test_runner)]
#![reexport_test_harness_main = "test_main"]
extern crate alloc;
use alloc::{boxed::Box, vec::Vec};
use blog_os::{allocator::HEAP_SIZE, serial_print, serial_println};
use bootloader::{entry_point, BootInfo};
use core::panic::PanicInfo;
entry_point!(main);
fn main(boot_info: &'static BootInfo) -> ! {
use blog_os::allocator;
use blog_os::memory::{self, BootInfoFrameAllocator};
use x86_64::VirtAddr;
blog_os::init();
let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
let mut mapper = unsafe { memory::init(phys_mem_offset) };
let mut frame_allocator = unsafe { BootInfoFrameAllocator::init(&boot_info.memory_map) };
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");
test_main();
loop {}
}
#[test_case]
fn simple_allocation() {
serial_print!("simple_allocation... ");
let heap_value = Box::new(41);
assert_eq!(*heap_value, 41);
serial_println!("[ok]");
}
#[test_case]
fn large_vec() {
serial_print!("large_vec... ");
let n = 1000;
let mut vec = Vec::new();
for i in 0..n {
vec.push(i);
}
assert_eq!(vec.iter().sum::<u64>(), (n - 1) * n / 2);
serial_println!("[ok]");
}
#[test_case]
fn many_boxes() {
serial_print!("many_boxes... ");
for i in 0..HEAP_SIZE {
let x = Box::new(i);
assert_eq!(*x, i);
}
serial_println!("[ok]");
}
#[test_case]
fn many_boxes_long_lived() {
serial_print!("many_boxes_long_lived... ");
let long_lived = Box::new(1); // new
for i in 0..HEAP_SIZE {
let x = Box::new(i);
assert_eq!(*x, i);
}
assert_eq!(*long_lived, 1); // new
serial_println!("[ok]");
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
blog_os::test_panic_handler(info)
}