@@ -317,37 +317,37 @@ impl HoleList {
317
317
}
318
318
}
319
319
320
- // // / Frees the allocation given by `ptr` and `layout`.
321
- // // /
322
- // // / `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with
323
- // // / identical layout. Undefined behavior may occur for invalid arguments.
324
- // // / The function performs exactly the same layout adjustments as [`allocate_first_fit`] and
325
- // // / returns the aligned layout.
326
- // // /
327
- // // / This function walks the list and inserts the given block at the correct place. If the freed
328
- // // / block is adjacent to another free block, the blocks are merged again.
329
- // // / This operation is in `O(n)` since the list needs to be sorted by address.
330
- // // /
331
- // // / [`allocate_first_fit`]: HoleList::allocate_first_fit
332
- // pub unsafe fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> Layout {
333
- // let aligned_layout = Self::align_layout(layout);
334
- // deallocate(&mut self.first, ptr.as_ptr(), aligned_layout.size());
335
- // aligned_layout
336
- // }
320
+ /// Frees the allocation given by `ptr` and `layout`.
321
+ ///
322
+ /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with
323
+ /// identical layout. Undefined behavior may occur for invalid arguments.
324
+ /// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and
325
+ /// returns the aligned layout.
326
+ ///
327
+ /// This function walks the list and inserts the given block at the correct place. If the freed
328
+ /// block is adjacent to another free block, the blocks are merged again.
329
+ /// This operation is in `O(n)` since the list needs to be sorted by address.
330
+ ///
331
+ /// [`allocate_first_fit`]: HoleList::allocate_first_fit
332
+ pub unsafe fn deallocate ( & mut self , ptr : NonNull < u8 > , layout : Layout ) -> Layout {
333
+ let aligned_layout = Self :: align_layout ( layout) ;
334
+ deallocate ( & mut self . first , ptr. as_ptr ( ) , aligned_layout. size ( ) ) ;
335
+ aligned_layout
336
+ }
337
337
338
338
/// Returns the minimal allocation size. Smaller allocations or deallocations are not allowed.
339
339
pub fn min_size ( ) -> usize {
340
340
size_of :: < usize > ( ) * 2
341
341
}
342
342
343
- // // / Returns information about the first hole for test purposes.
344
- // #[cfg(test)]
345
- // pub fn first_hole(&self) -> Option<(*const u8, usize)> {
346
- // self.first
347
- // .next
348
- // .as_ref()
349
- // .map(|hole| ((* hole) as *const Hole as *const u8, hole.size))
350
- // }
343
+ /// Returns information about the first hole for test purposes.
344
+ #[ cfg( test) ]
345
+ pub fn first_hole ( & self ) -> Option < ( * const u8 , usize ) > {
346
+ self . first
347
+ . next
348
+ . as_ref ( )
349
+ . map ( |hole| ( hole. as_ptr ( ) as * mut u8 as * const u8 , unsafe { hole. as_ref ( ) . size } ) )
350
+ }
351
351
}
352
352
353
353
/// A block containing free memory. It points to the next hole and thus forms a linked list.
@@ -381,96 +381,96 @@ struct Allocation {
381
381
back_padding : Option < HoleInfo > ,
382
382
}
383
383
384
- // // / Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to
385
- // // / find the correct place (the list is sorted by address).
386
- // fn deallocate(mut hole: &mut Hole, addr: *mut u8, mut size: usize) {
387
- // loop {
388
- // assert!(size >= HoleList::min_size());
389
-
390
- // let hole_addr = if hole.size == 0 {
391
- // // It's the dummy hole, which is the head of the HoleList. It's somewhere on the stack,
392
- // // so it's address is not the address of the hole. We set the addr to 0 as it's always
393
- // // the first hole.
394
- // core::ptr::null_mut()
395
- // } else {
396
- // // tt's a real hole in memory and its address is the address of the hole
397
- // hole as *mut _ as *mut u8
398
- // };
399
-
400
- // // Each freed block must be handled by the previous hole in memory. Thus the freed
401
- // // address must be always behind the current hole.
402
- // assert!(
403
- // hole_addr.wrapping_offset(hole.size.try_into().unwrap()) <= addr,
404
- // "invalid deallocation (probably a double free)"
405
- // );
406
-
407
- // // get information about the next block
408
- // let next_hole_info = hole.next.as_mut().map(|next| next.info());
409
-
410
- // match next_hole_info {
411
- // Some(next)
412
- // if hole_addr.wrapping_offset(hole.size.try_into().unwrap()) == addr
413
- // && addr.wrapping_offset(size.try_into().unwrap()) == next.addr =>
414
- // {
415
- // // block fills the gap between this hole and the next hole
416
- // // before: ___XXX____YYYYY____ where X is this hole and Y the next hole
417
- // // after: ___XXXFFFFYYYYY____ where F is the freed block
418
-
419
- // hole.size += size + next.size; // merge the F and Y blocks to this X block
420
- // hole.next = hole.next.as_mut().unwrap().next.take(); // remove the Y block
421
- // }
422
- // _ if hole_addr.wrapping_add(hole.size.try_into().unwrap()) == addr => {
423
- // // block is right behind this hole but there is used memory after it
424
- // // before: ___XXX______YYYYY____ where X is this hole and Y the next hole
425
- // // after: ___XXXFFFF__YYYYY____ where F is the freed block
426
-
427
- // // or: block is right behind this hole and this is the last hole
428
- // // before: ___XXX_______________ where X is this hole and Y the next hole
429
- // // after: ___XXXFFFF___________ where F is the freed block
430
-
431
- // hole.size += size; // merge the F block to this X block
432
- // }
433
- // Some(next) if addr.wrapping_offset(size.try_into().unwrap()) == next.addr => {
434
- // // block is right before the next hole but there is used memory before it
435
- // // before: ___XXX______YYYYY____ where X is this hole and Y the next hole
436
- // // after: ___XXX__FFFFYYYYY____ where F is the freed block
437
-
438
- // hole.next = hole.next.as_mut().unwrap().next.take(); // remove the Y block
439
- // size += next.size; // free the merged F/Y block in next iteration
440
- // continue;
441
- // }
442
- // Some(next) if next.addr <= addr => {
443
- // // block is behind the next hole, so we delegate it to the next hole
444
- // // before: ___XXX__YYYYY________ where X is this hole and Y the next hole
445
- // // after: ___XXX__YYYYY__FFFF__ where F is the freed block
446
-
447
- // hole = move_helper(hole).next.as_mut().unwrap(); // start next iteration at next hole
448
- // continue;
449
- // }
450
- // _ => {
451
- // // block is between this and the next hole
452
- // // before: ___XXX________YYYYY_ where X is this hole and Y the next hole
453
- // // after: ___XXX__FFFF__YYYYY_ where F is the freed block
454
-
455
- // // or: this is the last hole
456
- // // before: ___XXX_________ where X is this hole
457
- // // after: ___XXX__FFFF___ where F is the freed block
458
-
459
- // let new_hole = Hole {
460
- // size: size,
461
- // next: hole.next.take(), // the reference to the Y block (if it exists)
462
- // };
463
- // // write the new hole to the freed memory
464
- // debug_assert_eq!(addr as usize % align_of::<Hole>(), 0);
465
- // let ptr = addr as *mut Hole;
466
- // unsafe { ptr.write(new_hole) };
467
- // // add the F block as the next block of the X block
468
- // hole.next = Some(unsafe { &mut * ptr });
469
- // }
470
- // }
471
- // break;
472
- // }
473
- // }
384
+ /// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to
385
+ /// find the correct place (the list is sorted by address).
386
+ fn deallocate ( mut hole : & mut Hole , addr : * mut u8 , mut size : usize ) {
387
+ loop {
388
+ assert ! ( size >= HoleList :: min_size( ) ) ;
389
+
390
+ let hole_addr = if hole. size == 0 {
391
+ // It's the dummy hole, which is the head of the HoleList. It's somewhere on the stack,
392
+ // so it's address is not the address of the hole. We set the addr to 0 as it's always
393
+ // the first hole.
394
+ core:: ptr:: null_mut ( )
395
+ } else {
396
+ // tt's a real hole in memory and its address is the address of the hole
397
+ hole as * mut _ as * mut u8
398
+ } ;
399
+
400
+ // Each freed block must be handled by the previous hole in memory. Thus the freed
401
+ // address must be always behind the current hole.
402
+ assert ! (
403
+ hole_addr. wrapping_offset( hole. size. try_into( ) . unwrap( ) ) <= addr,
404
+ "invalid deallocation (probably a double free)"
405
+ ) ;
406
+
407
+ // get information about the next block
408
+ let next_hole_info = hole. next . as_mut ( ) . map ( |next| unsafe { next. as_mut ( ) . info ( ) } ) ;
409
+
410
+ match next_hole_info {
411
+ Some ( next)
412
+ if hole_addr. wrapping_offset ( hole. size . try_into ( ) . unwrap ( ) ) == addr
413
+ && addr. wrapping_offset ( size. try_into ( ) . unwrap ( ) ) == next. addr =>
414
+ {
415
+ // block fills the gap between this hole and the next hole
416
+ // before: ___XXX____YYYYY____ where X is this hole and Y the next hole
417
+ // after: ___XXXFFFFYYYYY____ where F is the freed block
418
+
419
+ hole. size += size + next. size ; // merge the F and Y blocks to this X block
420
+ hole. next = unsafe { hole. next . as_mut ( ) . unwrap ( ) . as_mut ( ) . next . take ( ) } ; // remove the Y block
421
+ }
422
+ _ if hole_addr. wrapping_add ( hole. size . try_into ( ) . unwrap ( ) ) == addr => {
423
+ // block is right behind this hole but there is used memory after it
424
+ // before: ___XXX______YYYYY____ where X is this hole and Y the next hole
425
+ // after: ___XXXFFFF__YYYYY____ where F is the freed block
426
+
427
+ // or: block is right behind this hole and this is the last hole
428
+ // before: ___XXX_______________ where X is this hole and Y the next hole
429
+ // after: ___XXXFFFF___________ where F is the freed block
430
+
431
+ hole. size += size; // merge the F block to this X block
432
+ }
433
+ Some ( next) if addr. wrapping_offset ( size. try_into ( ) . unwrap ( ) ) == next. addr => {
434
+ // block is right before the next hole but there is used memory before it
435
+ // before: ___XXX______YYYYY____ where X is this hole and Y the next hole
436
+ // after: ___XXX__FFFFYYYYY____ where F is the freed block
437
+
438
+ hole. next = unsafe { hole. next . as_mut ( ) . unwrap ( ) . as_mut ( ) . next . take ( ) } ; // remove the Y block
439
+ size += next. size ; // free the merged F/Y block in next iteration
440
+ continue ;
441
+ }
442
+ Some ( next) if next. addr <= addr => {
443
+ // block is behind the next hole, so we delegate it to the next hole
444
+ // before: ___XXX__YYYYY________ where X is this hole and Y the next hole
445
+ // after: ___XXX__YYYYY__FFFF__ where F is the freed block
446
+
447
+ hole = unsafe { move_helper ( hole) . next . as_mut ( ) . unwrap ( ) . as_mut ( ) } ; // start next iteration at next hole
448
+ continue ;
449
+ }
450
+ _ => {
451
+ // block is between this and the next hole
452
+ // before: ___XXX________YYYYY_ where X is this hole and Y the next hole
453
+ // after: ___XXX__FFFF__YYYYY_ where F is the freed block
454
+
455
+ // or: this is the last hole
456
+ // before: ___XXX_________ where X is this hole
457
+ // after: ___XXX__FFFF___ where F is the freed block
458
+
459
+ let new_hole = Hole {
460
+ size : size,
461
+ next : hole. next . take ( ) , // the reference to the Y block (if it exists)
462
+ } ;
463
+ // write the new hole to the freed memory
464
+ debug_assert_eq ! ( addr as usize % align_of:: <Hole >( ) , 0 ) ;
465
+ let ptr = addr as * mut Hole ;
466
+ unsafe { ptr. write ( new_hole) } ;
467
+ // add the F block as the next block of the X block
468
+ hole. next = Some ( unsafe { NonNull :: new_unchecked ( ptr) } ) ;
469
+ }
470
+ }
471
+ break ;
472
+ }
473
+ }
474
474
475
475
/// Identity function to ease moving of references.
476
476
///
0 commit comments