Skip to content

Commit 8d4379b

Browse files
committed
construct memory in memory mapped by other page table
This has the advantage that we don't need to map the memory map into the bootloader's page tables. Up until now, we've assumed that UEFI only maps memory into regions covered by the first PML4 entry, but it turns out that some implementations map the frame buffer into memory mapped by other PML4 entries (this is totally legal, the frame buffer need not be identity mapped). Previously we removed all but the first PML4 entry, so that we can map our own memory there, but it's now clear that this can lead to problems. The solution to this is to not modify the page tables constructed by UEFI and keep all its PML4 entries, but this is problematic because the code constructing the boot info and memory map assumes that it can map them into the same addresses used for the kernel's address space, but UEFI may have already mapped some memory in there. Instead of mapping the boot info and memory map into the bootloader's address space, we now only map them into the kernel's address space. When we want to write to them, we first look up the physical addresses in the kernel's page table and write to the identity mapping. RemoteMemoryRegion implements this. We have some unit tests for constructing the memory map and we want those unit tests to be able to run outside the bootloader, so we can't use RemoteMemoryRegion. To solve this, we introduce a trait MemoryRegionSlice that only requires implementing a method for writing a memory region and implement that slice for MemoryRegionSlice as well as MemoryRegionSlice.
1 parent ea3f61a commit 8d4379b

File tree

2 files changed

+166
-80
lines changed

2 files changed

+166
-80
lines changed

common/src/legacy_memory_region.rs

Lines changed: 118 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,9 @@
11
use bootloader_api::info::{MemoryRegion, MemoryRegionKind};
2-
use core::{
3-
cmp,
4-
iter::{empty, Empty},
5-
mem::MaybeUninit,
6-
};
2+
use core::{cmp, mem::MaybeUninit};
73
use x86_64::{
84
align_down, align_up,
9-
structures::paging::{FrameAllocator, PhysFrame, Size4KiB},
10-
PhysAddr,
5+
structures::paging::{FrameAllocator, OffsetPageTable, PhysFrame, Size4KiB, Translate},
6+
PhysAddr, VirtAddr,
117
};
128

139
/// A slice of memory that is used by the bootloader and needs to be reserved
@@ -159,14 +155,14 @@ where
159155
/// must be at least the value returned by [`len`] plus 1.
160156
///
161157
/// The return slice is a subslice of `regions`, shortened to the actual number of regions.
162-
pub fn construct_memory_map(
158+
pub(crate) fn construct_memory_map(
163159
self,
164-
regions: &mut [MaybeUninit<MemoryRegion>],
160+
regions: &mut (impl MemoryRegionSlice + ?Sized),
165161
kernel_slice_start: PhysAddr,
166162
kernel_slice_len: u64,
167163
ramdisk_slice_start: Option<PhysAddr>,
168164
ramdisk_slice_len: u64,
169-
) -> &mut [MemoryRegion] {
165+
) -> usize {
170166
let used_slices = [
171167
UsedMemorySlice {
172168
start: self.min_frame.start_address().as_u64(),
@@ -211,17 +207,12 @@ where
211207
}
212208
}
213209

214-
let initialized = &mut regions[..next_index];
215-
unsafe {
216-
// inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)`
217-
// TODO: undo inlining when `slice_assume_init_mut` becomes stable
218-
&mut *(initialized as *mut [_] as *mut [_])
219-
}
210+
next_index
220211
}
221212

222213
fn split_and_add_region<'a, U>(
223214
mut region: MemoryRegion,
224-
regions: &mut [MaybeUninit<MemoryRegion>],
215+
regions: &mut (impl MemoryRegionSlice + ?Sized),
225216
next_index: &mut usize,
226217
used_slices: U,
227218
) where
@@ -279,24 +270,97 @@ where
279270

280271
fn add_region(
281272
region: MemoryRegion,
282-
regions: &mut [MaybeUninit<MemoryRegion>],
273+
regions: &mut (impl MemoryRegionSlice + ?Sized),
283274
next_index: &mut usize,
284275
) {
285276
if region.start == region.end {
286277
// skip zero sized regions
287278
return;
288279
}
289-
unsafe {
290-
regions
291-
.get_mut(*next_index)
292-
.expect("cannot add region: no more free entries in memory map")
293-
.as_mut_ptr()
294-
.write(region)
295-
};
280+
regions.set(*next_index, region);
296281
*next_index += 1;
297282
}
298283
}
299284

285+
/// A trait for slice-like types that allow writing a memory region to given
286+
/// index. Usually `RemoteMemoryRegion` is used, but we use
287+
/// `[MaybeUninit<MemoryRegion>]` in tests.
288+
pub(crate) trait MemoryRegionSlice {
289+
fn set(&mut self, index: usize, region: MemoryRegion);
290+
}
291+
292+
#[cfg(test)]
293+
impl MemoryRegionSlice for [MaybeUninit<MemoryRegion>] {
294+
fn set(&mut self, index: usize, region: MemoryRegion) {
295+
self.get_mut(index)
296+
.expect("cannot add region: no more free entries in memory map")
297+
.write(region);
298+
}
299+
}
300+
301+
/// This type makes it possible to write to a slice mapped in a different set
302+
/// of page tables. For every write access, we look up the physical frame in
303+
/// the page tables and directly write to the physical memory. That way we
304+
/// don't need to map the slice into the current page tables.
305+
pub(crate) struct RemoteMemoryRegion<'a> {
306+
page_table: &'a OffsetPageTable<'a>,
307+
base: VirtAddr,
308+
len: usize,
309+
}
310+
311+
impl<'a> RemoteMemoryRegion<'a> {
312+
/// Construct a new `RemoteMemoryRegion`.
313+
///
314+
/// # Safety
315+
///
316+
/// The caller has to ensure that the memory in the starting at `base`
317+
/// isn't aliasing memory in the current page tables.
318+
pub unsafe fn new(page_table: &'a OffsetPageTable<'a>, base: VirtAddr, len: usize) -> Self {
319+
Self {
320+
page_table,
321+
base,
322+
len,
323+
}
324+
}
325+
}
326+
327+
impl MemoryRegionSlice for RemoteMemoryRegion<'_> {
328+
fn set(&mut self, index: usize, region: MemoryRegion) {
329+
assert!(
330+
index < self.len,
331+
"cannot add region: no more free entries in memory map"
332+
);
333+
334+
// Cast the memory region into a byte slice. MemoryRegion has some
335+
// padding bytes, so need to use `MaybeUninit<u8>` instead of `u8`.
336+
let bytes = unsafe {
337+
core::slice::from_raw_parts(
338+
&region as *const _ as *const MaybeUninit<u8>,
339+
size_of::<MemoryRegion>(),
340+
)
341+
};
342+
343+
// An entry may cross a page boundary, so write one byte at a time.
344+
let addr = self.base + index * size_of::<MemoryRegion>();
345+
for (addr, byte) in (addr..).zip(bytes.iter().copied()) {
346+
// Lookup the physical address in the remote page table.
347+
let phys_addr = self
348+
.page_table
349+
.translate_addr(addr)
350+
.expect("memory is mapped in the page table");
351+
352+
// Get a pointer to the physical memory -> All physical memory is
353+
// identitiy mapped.
354+
let ptr = phys_addr.as_u64() as *mut MaybeUninit<u8>;
355+
356+
// Write the byte.
357+
unsafe {
358+
ptr.write(byte);
359+
}
360+
}
361+
}
362+
}
363+
300364
unsafe impl<I, D> FrameAllocator<Size4KiB> for LegacyFrameAllocator<I, D>
301365
where
302366
I: ExactSizeIterator<Item = D> + Clone,
@@ -384,14 +448,21 @@ mod tests {
384448
let ramdisk_slice_start = None;
385449
let ramdisk_slice_len = 0;
386450

387-
let kernel_regions = allocator.construct_memory_map(
388-
&mut regions,
451+
let len = allocator.construct_memory_map(
452+
regions.as_mut_slice(),
389453
kernel_slice_start,
390454
kernel_slice_len,
391455
ramdisk_slice_start,
392456
ramdisk_slice_len,
393457
);
394458

459+
let initialized = &mut regions[..len];
460+
let kernel_regions = unsafe {
461+
// inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)`
462+
// TODO: undo inlining when `slice_assume_init_mut` becomes stable
463+
&mut *(initialized as *mut [_] as *mut [MemoryRegion])
464+
};
465+
395466
for region in kernel_regions.iter() {
396467
assert!(region.start % 0x1000 == 0);
397468
assert!(region.end % 0x1000 == 0);
@@ -411,13 +482,21 @@ mod tests {
411482
let ramdisk_slice_start = Some(PhysAddr::new(0x60000));
412483
let ramdisk_slice_len = 0x2000;
413484

414-
let kernel_regions = allocator.construct_memory_map(
415-
&mut regions,
485+
let len = allocator.construct_memory_map(
486+
regions.as_mut_slice(),
416487
kernel_slice_start,
417488
kernel_slice_len,
418489
ramdisk_slice_start,
419490
ramdisk_slice_len,
420491
);
492+
493+
let initialized = &mut regions[..len];
494+
let kernel_regions = unsafe {
495+
// inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)`
496+
// TODO: undo inlining when `slice_assume_init_mut` becomes stable
497+
&mut *(initialized as *mut [_] as *mut [MemoryRegion])
498+
};
499+
421500
let mut kernel_regions = kernel_regions.iter();
422501
// usable memory before the kernel
423502
assert_eq!(
@@ -514,13 +593,21 @@ mod tests {
514593
let ramdisk_slice_start = Some(PhysAddr::new(0x60000));
515594
let ramdisk_slice_len = 0x2000;
516595

517-
let kernel_regions = allocator.construct_memory_map(
518-
&mut regions,
596+
let len = allocator.construct_memory_map(
597+
regions.as_mut_slice(),
519598
kernel_slice_start,
520599
kernel_slice_len,
521600
ramdisk_slice_start,
522601
ramdisk_slice_len,
523602
);
603+
604+
let initialized = &mut regions[..len];
605+
let kernel_regions = unsafe {
606+
// inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)`
607+
// TODO: undo inlining when `slice_assume_init_mut` becomes stable
608+
&mut *(initialized as *mut [_] as *mut [MemoryRegion])
609+
};
610+
524611
let mut kernel_regions = kernel_regions.iter();
525612

526613
// usable memory before the kernel

common/src/lib.rs

Lines changed: 48 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ use bootloader_api::{
1010
};
1111
use bootloader_boot_config::{BootConfig, LevelFilter};
1212
use core::{alloc::Layout, arch::asm, mem::MaybeUninit, slice};
13+
use legacy_memory_region::RemoteMemoryRegion;
1314
use level_4_entries::UsedLevel4Entries;
1415
use usize_conversions::FromUsize;
1516
use x86_64::{
@@ -481,69 +482,67 @@ where
481482
log::info!("Allocate bootinfo");
482483

483484
// allocate and map space for the boot info
484-
let (boot_info, memory_regions) = {
485-
let boot_info_layout = Layout::new::<BootInfo>();
486-
let regions = frame_allocator.memory_map_max_region_count();
487-
let memory_regions_layout = Layout::array::<MemoryRegion>(regions).unwrap();
488-
let (combined, memory_regions_offset) =
489-
boot_info_layout.extend(memory_regions_layout).unwrap();
490-
491-
let boot_info_addr = mapping_addr(
492-
config.mappings.boot_info,
493-
u64::from_usize(combined.size()),
494-
u64::from_usize(combined.align()),
495-
&mut mappings.used_entries,
496-
)
497-
.expect("boot info addr is not properly aligned");
485+
let boot_info_layout = Layout::new::<BootInfo>();
486+
let regions = frame_allocator.memory_map_max_region_count();
487+
let memory_regions_layout = Layout::array::<MemoryRegion>(regions).unwrap();
488+
let (combined, memory_regions_offset) = boot_info_layout.extend(memory_regions_layout).unwrap();
489+
490+
let boot_info_addr = mapping_addr(
491+
config.mappings.boot_info,
492+
u64::from_usize(combined.size()),
493+
u64::from_usize(combined.align()),
494+
&mut mappings.used_entries,
495+
)
496+
.expect("boot info addr is not properly aligned");
498497

499-
let memory_map_regions_addr = boot_info_addr + memory_regions_offset;
500-
let memory_map_regions_end = boot_info_addr + combined.size();
498+
let memory_map_regions_addr = boot_info_addr + memory_regions_offset;
499+
let memory_map_regions_end = boot_info_addr + combined.size();
501500

502-
let start_page = Page::containing_address(boot_info_addr);
503-
let end_page = Page::containing_address(memory_map_regions_end - 1u64);
504-
for page in Page::range_inclusive(start_page, end_page) {
505-
let flags =
506-
PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
507-
let frame = frame_allocator
508-
.allocate_frame()
509-
.expect("frame allocation for boot info failed");
510-
match unsafe {
511-
page_tables
512-
.kernel
513-
.map_to(page, frame, flags, &mut frame_allocator)
514-
} {
515-
Ok(tlb) => tlb.flush(),
516-
Err(err) => panic!("failed to map page {:?}: {:?}", page, err),
517-
}
518-
// we need to be able to access it too
519-
match unsafe {
520-
page_tables
521-
.bootloader
522-
.map_to(page, frame, flags, &mut frame_allocator)
523-
} {
524-
Ok(tlb) => tlb.flush(),
525-
Err(err) => panic!("failed to map page {:?}: {:?}", page, err),
526-
}
501+
let start_page = Page::containing_address(boot_info_addr);
502+
let end_page = Page::containing_address(memory_map_regions_end - 1u64);
503+
for page in Page::range_inclusive(start_page, end_page) {
504+
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
505+
let frame = frame_allocator
506+
.allocate_frame()
507+
.expect("frame allocation for boot info failed");
508+
match unsafe {
509+
page_tables
510+
.kernel
511+
.map_to(page, frame, flags, &mut frame_allocator)
512+
} {
513+
Ok(tlb) => tlb.flush(),
514+
Err(err) => panic!("failed to map page {:?}: {:?}", page, err),
527515
}
516+
// we need to be able to access it too
517+
match unsafe {
518+
page_tables
519+
.bootloader
520+
.map_to(page, frame, flags, &mut frame_allocator)
521+
} {
522+
Ok(tlb) => tlb.flush(),
523+
Err(err) => panic!("failed to map page {:?}: {:?}", page, err),
524+
}
525+
}
528526

529-
let boot_info: &'static mut MaybeUninit<BootInfo> =
530-
unsafe { &mut *boot_info_addr.as_mut_ptr() };
531-
let memory_regions: &'static mut [MaybeUninit<MemoryRegion>] =
532-
unsafe { slice::from_raw_parts_mut(memory_map_regions_addr.as_mut_ptr(), regions) };
533-
(boot_info, memory_regions)
534-
};
527+
let boot_info: &'static mut MaybeUninit<BootInfo> =
528+
unsafe { &mut *boot_info_addr.as_mut_ptr() };
535529

536530
log::info!("Create Memory Map");
537531

538532
// build memory map
539-
let memory_regions = frame_allocator.construct_memory_map(
540-
memory_regions,
533+
let mut slice =
534+
unsafe { RemoteMemoryRegion::new(&page_tables.kernel, memory_map_regions_addr, regions) };
535+
let len = frame_allocator.construct_memory_map(
536+
&mut slice,
541537
mappings.kernel_slice_start,
542538
mappings.kernel_slice_len,
543539
mappings.ramdisk_slice_phys_start,
544540
mappings.ramdisk_slice_len,
545541
);
546542

543+
let memory_regions =
544+
unsafe { core::slice::from_raw_parts_mut(memory_map_regions_addr.as_mut_ptr(), len) };
545+
547546
log::info!("Create bootinfo");
548547

549548
// create boot info

0 commit comments

Comments
 (0)