Skip to content

Commit ccb6d6b

Browse files
committed
Keep recursive level 4 address and physical memory offset in BootInfo
1 parent 00e6c3c commit ccb6d6b

File tree

3 files changed

+28
-28
lines changed

3 files changed

+28
-28
lines changed

src/bootinfo/mod.rs

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,31 @@ mod memory_map;
88
#[repr(C)]
99
pub struct BootInfo {
1010
pub memory_map: MemoryMap,
11+
/// The virtual address of the recursively mapped level 4 page table.
12+
#[cfg(feature = "recursive_level_4_table")]
13+
pub recursive_level_4_table_addr: u64,
14+
/// The offset into the virtual address space where the physical memory is mapped.
15+
///
16+
/// Physical addresses can be converted to virtual addresses by adding this offset to them.
17+
///
18+
/// The mapping of the physical memory allows to access arbitrary physical frames. Accessing
19+
/// frames that are also mapped at other virtual addresses can easily break memory safety and
20+
/// cause undefined behavior. Only frames reported as `USABLE` by the memory map in the `BootInfo`
21+
/// can be safely accessed.
22+
#[cfg(feature = "map_physical_memory")]
23+
pub physical_memory_offset: u64,
1124
_non_exhaustive: u8, // `()` is not FFI safe
1225
}
1326

1427
impl BootInfo {
15-
pub fn new(memory_map: MemoryMap) -> Self {
28+
#[allow(unused_variables)]
29+
pub fn new(memory_map: MemoryMap, recursive_level_4_table_addr: u64, physical_memory_offset: u64) -> Self {
1630
BootInfo {
1731
memory_map,
32+
#[cfg(feature = "recursive_level_4_table")]
33+
recursive_level_4_table_addr,
34+
#[cfg(feature = "map_physical_memory")]
35+
physical_memory_offset,
1836
_non_exhaustive: 0,
1937
}
2038
}

src/lib.rs

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,5 @@
11
#![no_std]
22

3-
/// The virtual address of the recursively mapped level 4 page table.
4-
#[cfg(feature = "recursive_level_4_table")]
5-
pub const RECURSIVE_LEVEL_4_TABLE_ADDR: u64 = 0o_177777_777_777_777_777_0000;
6-
7-
/// The offset into the virtual address space where the physical memory is mapped.
8-
///
9-
/// Physical addresses can be converted to virtual addresses by adding this offset to them.
10-
///
11-
/// The mapping of the physical memory allows to access arbitrary physical frames. Accessing
12-
/// frames that are also mapped at other virtual addresses can easily break memory safety and
13-
/// cause undefined behavior. Only frames reported as `USABLE` by the memory map in the `BootInfo`
14-
/// can be safely accessed.
15-
#[cfg(feature = "map_physical_memory")]
16-
pub const PHYSICAL_MEMORY_OFFSET: u64 = 0o_177777_770_000_000_000_0000;
17-
183
pub use crate::bootinfo::BootInfo;
194

205
pub mod bootinfo;

src/main.rs

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,10 @@ use x86_64::structures::paging::{Page, PageTableFlags, PhysFrame, PhysFrameRange
1818
use x86_64::ux::u9;
1919
use x86_64::{PhysAddr, VirtAddr};
2020

21+
/// The offset into the virtual address space where the physical memory is mapped if
22+
/// the `map_physical_memory` is activated.
23+
const PHYSICAL_MEMORY_OFFSET: u64 = 0o_177777_770_000_000_000_0000;
24+
2125
global_asm!(include_str!("stage_1.s"));
2226
global_asm!(include_str!("stage_2.s"));
2327
global_asm!(include_str!("e820.s"));
@@ -150,8 +154,8 @@ fn load_elf(
150154
recursive_index,
151155
recursive_index,
152156
recursive_index,
153-
);
154-
let page_table = unsafe { &mut *(recursive_page_table_addr.start_address().as_mut_ptr()) };
157+
).start_address();
158+
let page_table = unsafe { &mut *(recursive_page_table_addr.as_mut_ptr()) };
155159
let mut rec_page_table =
156160
RecursivePageTable::new(page_table).expect("recursive page table creation failed");
157161

@@ -229,10 +233,9 @@ fn load_elf(
229233
page
230234
};
231235

232-
#[cfg(feature = "map_physical_memory")]
233-
{
236+
if cfg!(feature = "map_physical_memory") {
234237
fn virt_for_phys(phys: PhysAddr) -> VirtAddr {
235-
VirtAddr::new(phys.as_u64() + bootloader::PHYSICAL_MEMORY_OFFSET)
238+
VirtAddr::new(phys.as_u64() + PHYSICAL_MEMORY_OFFSET)
236239
}
237240

238241
let start_frame = PhysFrame::<Size2MiB>::containing_address(PhysAddr::new(0));
@@ -253,7 +256,7 @@ fn load_elf(
253256
}
254257

255258
// Construct boot info structure.
256-
let mut boot_info = BootInfo::new(memory_map);
259+
let mut boot_info = BootInfo::new(memory_map, recursive_page_table_addr.as_u64(), PHYSICAL_MEMORY_OFFSET);
257260
boot_info.memory_map.sort();
258261

259262
// Write boot info to boot info page.
@@ -263,12 +266,6 @@ fn load_elf(
263266
// Make sure that the kernel respects the write-protection bits, even when in ring 0.
264267
enable_write_protect_bit();
265268

266-
#[cfg(feature = "recursive_level_4_table")]
267-
assert_eq!(
268-
recursive_page_table_addr.start_address().as_u64(),
269-
bootloader::RECURSIVE_LEVEL_4_TABLE_ADDR
270-
);
271-
272269
if cfg!(not(feature = "recursive_level_4_table")) {
273270
// unmap recursive entry
274271
rec_page_table

0 commit comments

Comments
 (0)