@@ -18,6 +18,10 @@ use x86_64::structures::paging::{Page, PageTableFlags, PhysFrame, PhysFrameRange
18
18
use x86_64:: ux:: u9;
19
19
use x86_64:: { PhysAddr , VirtAddr } ;
20
20
21
+ /// The offset into the virtual address space where the physical memory is mapped if
22
+ /// the `map_physical_memory` is activated.
23
+ const PHYSICAL_MEMORY_OFFSET : u64 = 0o_177777_770_000_000_000_0000 ;
24
+
21
25
global_asm ! ( include_str!( "stage_1.s" ) ) ;
22
26
global_asm ! ( include_str!( "stage_2.s" ) ) ;
23
27
global_asm ! ( include_str!( "e820.s" ) ) ;
@@ -150,8 +154,8 @@ fn load_elf(
150
154
recursive_index,
151
155
recursive_index,
152
156
recursive_index,
153
- ) ;
154
- let page_table = unsafe { & mut * ( recursive_page_table_addr. start_address ( ) . as_mut_ptr ( ) ) } ;
157
+ ) . start_address ( ) ;
158
+ let page_table = unsafe { & mut * ( recursive_page_table_addr. as_mut_ptr ( ) ) } ;
155
159
let mut rec_page_table =
156
160
RecursivePageTable :: new ( page_table) . expect ( "recursive page table creation failed" ) ;
157
161
@@ -229,10 +233,9 @@ fn load_elf(
229
233
page
230
234
} ;
231
235
232
- #[ cfg( feature = "map_physical_memory" ) ]
233
- {
236
+ if cfg ! ( feature = "map_physical_memory" ) {
234
237
fn virt_for_phys ( phys : PhysAddr ) -> VirtAddr {
235
- VirtAddr :: new ( phys. as_u64 ( ) + bootloader :: PHYSICAL_MEMORY_OFFSET )
238
+ VirtAddr :: new ( phys. as_u64 ( ) + PHYSICAL_MEMORY_OFFSET )
236
239
}
237
240
238
241
let start_frame = PhysFrame :: < Size2MiB > :: containing_address ( PhysAddr :: new ( 0 ) ) ;
@@ -253,7 +256,7 @@ fn load_elf(
253
256
}
254
257
255
258
// Construct boot info structure.
256
- let mut boot_info = BootInfo :: new ( memory_map) ;
259
+ let mut boot_info = BootInfo :: new ( memory_map, recursive_page_table_addr . as_u64 ( ) , PHYSICAL_MEMORY_OFFSET ) ;
257
260
boot_info. memory_map . sort ( ) ;
258
261
259
262
// Write boot info to boot info page.
@@ -263,12 +266,6 @@ fn load_elf(
263
266
// Make sure that the kernel respects the write-protection bits, even when in ring 0.
264
267
enable_write_protect_bit ( ) ;
265
268
266
- #[ cfg( feature = "recursive_level_4_table" ) ]
267
- assert_eq ! (
268
- recursive_page_table_addr. start_address( ) . as_u64( ) ,
269
- bootloader:: RECURSIVE_LEVEL_4_TABLE_ADDR
270
- ) ;
271
-
272
269
if cfg ! ( not( feature = "recursive_level_4_table" ) ) {
273
270
// unmap recursive entry
274
271
rec_page_table
0 commit comments