1
1
use bootloader_api:: info:: { MemoryRegion , MemoryRegionKind } ;
2
- use core:: {
3
- cmp,
4
- iter:: { empty, Empty } ,
5
- mem:: MaybeUninit ,
6
- } ;
2
+ use core:: { cmp, mem:: MaybeUninit } ;
7
3
use x86_64:: {
8
4
align_down, align_up,
9
- structures:: paging:: { FrameAllocator , PhysFrame , Size4KiB } ,
10
- PhysAddr ,
5
+ structures:: paging:: { FrameAllocator , OffsetPageTable , PhysFrame , Size4KiB , Translate } ,
6
+ PhysAddr , VirtAddr ,
11
7
} ;
12
8
13
9
/// A slice of memory that is used by the bootloader and needs to be reserved
@@ -159,14 +155,14 @@ where
159
155
/// must be at least the value returned by [`len`] plus 1.
160
156
///
161
157
/// The return slice is a subslice of `regions`, shortened to the actual number of regions.
162
- pub fn construct_memory_map (
158
+ pub ( crate ) fn construct_memory_map (
163
159
self ,
164
- regions : & mut [ MaybeUninit < MemoryRegion > ] ,
160
+ regions : & mut ( impl MemoryRegionSlice + ? Sized ) ,
165
161
kernel_slice_start : PhysAddr ,
166
162
kernel_slice_len : u64 ,
167
163
ramdisk_slice_start : Option < PhysAddr > ,
168
164
ramdisk_slice_len : u64 ,
169
- ) -> & mut [ MemoryRegion ] {
165
+ ) -> usize {
170
166
let used_slices = [
171
167
UsedMemorySlice {
172
168
start : self . min_frame . start_address ( ) . as_u64 ( ) ,
@@ -211,17 +207,12 @@ where
211
207
}
212
208
}
213
209
214
- let initialized = & mut regions[ ..next_index] ;
215
- unsafe {
216
- // inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)`
217
- // TODO: undo inlining when `slice_assume_init_mut` becomes stable
218
- & mut * ( initialized as * mut [ _ ] as * mut [ _ ] )
219
- }
210
+ next_index
220
211
}
221
212
222
213
fn split_and_add_region < ' a , U > (
223
214
mut region : MemoryRegion ,
224
- regions : & mut [ MaybeUninit < MemoryRegion > ] ,
215
+ regions : & mut ( impl MemoryRegionSlice + ? Sized ) ,
225
216
next_index : & mut usize ,
226
217
used_slices : U ,
227
218
) where
@@ -279,24 +270,97 @@ where
279
270
280
271
fn add_region (
281
272
region : MemoryRegion ,
282
- regions : & mut [ MaybeUninit < MemoryRegion > ] ,
273
+ regions : & mut ( impl MemoryRegionSlice + ? Sized ) ,
283
274
next_index : & mut usize ,
284
275
) {
285
276
if region. start == region. end {
286
277
// skip zero sized regions
287
278
return ;
288
279
}
289
- unsafe {
290
- regions
291
- . get_mut ( * next_index)
292
- . expect ( "cannot add region: no more free entries in memory map" )
293
- . as_mut_ptr ( )
294
- . write ( region)
295
- } ;
280
+ regions. set ( * next_index, region) ;
296
281
* next_index += 1 ;
297
282
}
298
283
}
299
284
285
+ /// A trait for slice-like types that allow writing a memory region to given
286
+ /// index. Usually `RemoteMemoryRegion` is used, but we use
287
+ /// `[MaybeUninit<MemoryRegion>]` in tests.
288
+ pub ( crate ) trait MemoryRegionSlice {
289
+ fn set ( & mut self , index : usize , region : MemoryRegion ) ;
290
+ }
291
+
292
+ #[ cfg( test) ]
293
+ impl MemoryRegionSlice for [ MaybeUninit < MemoryRegion > ] {
294
+ fn set ( & mut self , index : usize , region : MemoryRegion ) {
295
+ self . get_mut ( index)
296
+ . expect ( "cannot add region: no more free entries in memory map" )
297
+ . write ( region) ;
298
+ }
299
+ }
300
+
301
+ /// This type makes it possible to write to a slice mapped in a different set
302
+ /// of page tables. For every write access, we look up the physical frame in
303
+ /// the page tables and directly write to the physical memory. That way we
304
+ /// don't need to map the slice into the current page tables.
305
+ pub ( crate ) struct RemoteMemoryRegion < ' a > {
306
+ page_table : & ' a OffsetPageTable < ' a > ,
307
+ base : VirtAddr ,
308
+ len : usize ,
309
+ }
310
+
311
+ impl < ' a > RemoteMemoryRegion < ' a > {
312
+ /// Construct a new `RemoteMemoryRegion`.
313
+ ///
314
+ /// # Safety
315
+ ///
316
+ /// The caller has to ensure that the memory in the starting at `base`
317
+ /// isn't aliasing memory in the current page tables.
318
+ pub unsafe fn new ( page_table : & ' a OffsetPageTable < ' a > , base : VirtAddr , len : usize ) -> Self {
319
+ Self {
320
+ page_table,
321
+ base,
322
+ len,
323
+ }
324
+ }
325
+ }
326
+
327
+ impl MemoryRegionSlice for RemoteMemoryRegion < ' _ > {
328
+ fn set ( & mut self , index : usize , region : MemoryRegion ) {
329
+ assert ! (
330
+ index < self . len,
331
+ "cannot add region: no more free entries in memory map"
332
+ ) ;
333
+
334
+ // Cast the memory region into a byte slice. MemoryRegion has some
335
+ // padding bytes, so need to use `MaybeUninit<u8>` instead of `u8`.
336
+ let bytes = unsafe {
337
+ core:: slice:: from_raw_parts (
338
+ & region as * const _ as * const MaybeUninit < u8 > ,
339
+ size_of :: < MemoryRegion > ( ) ,
340
+ )
341
+ } ;
342
+
343
+ // An entry may cross a page boundary, so write one byte at a time.
344
+ let addr = self . base + index * size_of :: < MemoryRegion > ( ) ;
345
+ for ( addr, byte) in ( addr..) . zip ( bytes. iter ( ) . copied ( ) ) {
346
+ // Lookup the physical address in the remote page table.
347
+ let phys_addr = self
348
+ . page_table
349
+ . translate_addr ( addr)
350
+ . expect ( "memory is mapped in the page table" ) ;
351
+
352
+ // Get a pointer to the physical memory -> All physical memory is
353
+ // identitiy mapped.
354
+ let ptr = phys_addr. as_u64 ( ) as * mut MaybeUninit < u8 > ;
355
+
356
+ // Write the byte.
357
+ unsafe {
358
+ ptr. write ( byte) ;
359
+ }
360
+ }
361
+ }
362
+ }
363
+
300
364
unsafe impl < I , D > FrameAllocator < Size4KiB > for LegacyFrameAllocator < I , D >
301
365
where
302
366
I : ExactSizeIterator < Item = D > + Clone ,
@@ -384,14 +448,21 @@ mod tests {
384
448
let ramdisk_slice_start = None ;
385
449
let ramdisk_slice_len = 0 ;
386
450
387
- let kernel_regions = allocator. construct_memory_map (
388
- & mut regions,
451
+ let len = allocator. construct_memory_map (
452
+ regions. as_mut_slice ( ) ,
389
453
kernel_slice_start,
390
454
kernel_slice_len,
391
455
ramdisk_slice_start,
392
456
ramdisk_slice_len,
393
457
) ;
394
458
459
+ let initialized = & mut regions[ ..len] ;
460
+ let kernel_regions = unsafe {
461
+ // inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)`
462
+ // TODO: undo inlining when `slice_assume_init_mut` becomes stable
463
+ & mut * ( initialized as * mut [ _ ] as * mut [ MemoryRegion ] )
464
+ } ;
465
+
395
466
for region in kernel_regions. iter ( ) {
396
467
assert ! ( region. start % 0x1000 == 0 ) ;
397
468
assert ! ( region. end % 0x1000 == 0 ) ;
@@ -411,13 +482,21 @@ mod tests {
411
482
let ramdisk_slice_start = Some ( PhysAddr :: new ( 0x60000 ) ) ;
412
483
let ramdisk_slice_len = 0x2000 ;
413
484
414
- let kernel_regions = allocator. construct_memory_map (
415
- & mut regions,
485
+ let len = allocator. construct_memory_map (
486
+ regions. as_mut_slice ( ) ,
416
487
kernel_slice_start,
417
488
kernel_slice_len,
418
489
ramdisk_slice_start,
419
490
ramdisk_slice_len,
420
491
) ;
492
+
493
+ let initialized = & mut regions[ ..len] ;
494
+ let kernel_regions = unsafe {
495
+ // inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)`
496
+ // TODO: undo inlining when `slice_assume_init_mut` becomes stable
497
+ & mut * ( initialized as * mut [ _ ] as * mut [ MemoryRegion ] )
498
+ } ;
499
+
421
500
let mut kernel_regions = kernel_regions. iter ( ) ;
422
501
// usable memory before the kernel
423
502
assert_eq ! (
@@ -514,13 +593,21 @@ mod tests {
514
593
let ramdisk_slice_start = Some ( PhysAddr :: new ( 0x60000 ) ) ;
515
594
let ramdisk_slice_len = 0x2000 ;
516
595
517
- let kernel_regions = allocator. construct_memory_map (
518
- & mut regions,
596
+ let len = allocator. construct_memory_map (
597
+ regions. as_mut_slice ( ) ,
519
598
kernel_slice_start,
520
599
kernel_slice_len,
521
600
ramdisk_slice_start,
522
601
ramdisk_slice_len,
523
602
) ;
603
+
604
+ let initialized = & mut regions[ ..len] ;
605
+ let kernel_regions = unsafe {
606
+ // inlined variant of: `MaybeUninit::slice_assume_init_mut(initialized)`
607
+ // TODO: undo inlining when `slice_assume_init_mut` becomes stable
608
+ & mut * ( initialized as * mut [ _ ] as * mut [ MemoryRegion ] )
609
+ } ;
610
+
524
611
let mut kernel_regions = kernel_regions. iter ( ) ;
525
612
526
613
// usable memory before the kernel
0 commit comments