Skip to content

Commit 0d263fc

Browse files
committed
Address review comments, update CHANGELOG, add CI miri tests
1 parent 6129afa commit 0d263fc

File tree

5 files changed

+181
-42
lines changed

5 files changed

+181
-42
lines changed

.github/workflows/build.yml

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,27 @@ on:
1010
- cron: '40 5 * * *' # every day at 5:40
1111
pull_request:
1212

13+
env:
14+
# disable incremental compilation.
15+
#
16+
# incremental compilation is useful as part of an edit-build-test-edit cycle,
17+
# as it lets the compiler avoid recompiling code that hasn't changed. however,
18+
# on CI, we're not making small edits; we're almost always building the entire
19+
# project from scratch. thus, incremental compilation on CI actually
20+
# introduces *additional* overhead to support making future builds
21+
# faster...but no future builds will ever occur in any given CI environment.
22+
#
23+
# see https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
24+
# for details.
25+
CARGO_INCREMENTAL: 0
26+
# allow more retries for network requests in cargo (downloading crates) and
27+
# rustup (installing toolchains). this should help to reduce flaky CI failures
28+
# from transient network timeouts or other issues.
29+
CARGO_NET_RETRY: 10
30+
RUSTUP_MAX_RETRIES: 10
31+
# don't emit giant backtraces in the CI logs.
32+
RUST_BACKTRACE: short
33+
1334
jobs:
1435
test:
1536
name: "Test"
@@ -110,6 +131,17 @@ jobs:
110131
- name: "Run cargo test with `use_spin_nightly` feature"
111132
run: cargo test --features use_spin_nightly
112133

134+
test_miri:
135+
name: "Miri tests"
136+
runs-on: ubuntu-latest
137+
env:
138+
MIRIFLAGS: "-Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-tag-raw-pointers"
139+
# TODO: Make sure the first test fails by not adding -Zmiri-ignore-leaks
140+
steps:
141+
- uses: actions/checkout@v1
142+
- run: rustup toolchain install nightly --profile minimal --component rust-src miri
143+
- run: cargo +nightly miri test --all-features
144+
113145
check_formatting:
114146
name: "Check Formatting"
115147
runs-on: ubuntu-latest

Changelog.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
# Unreleased
22

3+
- Changed constructor to take `*mut u8` instead of `usize` ([#62])
4+
- NOTE: Breaking API change - will require 0.10.0 release
5+
- Reworked internals to pass Miri tests ([#62])
6+
7+
[#62]: https://github.com/phil-opp/linked-list-allocator/pull/62
8+
39
# 0.9.1 – 2021-10-17
410

511
- Add safe constructor and initialization for `Heap` ([#55](https://github.com/phil-opp/linked-list-allocator/pull/55))

src/hole.rs

Lines changed: 52 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -83,9 +83,7 @@ impl Cursor {
8383
// it will just have a smaller size after we have chopped off the "tail" for
8484
// the allocation.
8585
addr: hole_addr_u8,
86-
size: unsafe { aligned_addr.offset_from(hole_addr_u8) }
87-
.try_into()
88-
.unwrap(),
86+
size: (aligned_addr as usize) - (hole_addr_u8 as usize),
8987
});
9088
aligned_addr
9189
};
@@ -109,24 +107,25 @@ impl Cursor {
109107
// sort of assume that the allocation is actually a bit larger than it
110108
// actually needs to be.
111109
//
112-
// TODO(AJM): Write a test for this - does the code actually handle this
113-
// correctly? Do we need to check the delta is less than an aligned hole
114-
// size?
110+
// NOTE: Because we always use `HoleList::align_layout`, the size of
111+
// the new allocation is always "rounded up" to cover any partial gaps that
112+
// would have occurred. For this reason, we DON'T need to "round up"
113+
// to account for an unaligned hole spot.
115114
let hole_layout = Layout::new::<Hole>();
116115
let back_padding_start = align_up(allocation_end, hole_layout.align());
117116
let back_padding_end = back_padding_start.wrapping_add(hole_layout.size());
118117

119118
// Will the proposed new back padding actually fit in the old hole slot?
120119
back_padding = if back_padding_end <= hole_end {
121-
// Yes, it does!
120+
// Yes, it does! Place a back padding node
122121
Some(HoleInfo {
123122
addr: back_padding_start,
124-
size: unsafe { hole_end.offset_from(back_padding_start) }
125-
.try_into()
126-
.unwrap(),
123+
size: (hole_end as usize) - (back_padding_start as usize),
127124
})
128125
} else {
129-
// No, it does not.
126+
// No, it does not. We are now pretending the allocation now
127+
// holds the extra 0..size_of::<Hole>() bytes that are not
128+
// big enough to hold what SHOULD be back_padding
130129
None
131130
};
132131
}
@@ -236,6 +235,7 @@ impl HoleList {
236235
}
237236

238237
#[cfg(test)]
238+
#[allow(dead_code)]
239239
pub(crate) fn debug(&mut self) {
240240
if let Some(cursor) = self.cursor() {
241241
let mut cursor = cursor;
@@ -261,11 +261,11 @@ impl HoleList {
261261

262262
/// Creates a `HoleList` that contains the given hole.
263263
///
264-
/// ## Safety
264+
/// # Safety
265265
///
266-
/// This function is unsafe because it
267-
/// creates a hole at the given `hole_addr`. This can cause undefined behavior if this address
268-
/// is invalid or if memory from the `[hole_addr, hole_addr+size)` range is used somewhere else.
266+
/// This function is unsafe because it creates a hole at the given `hole_addr`.
267+
/// This can cause undefined behavior if this address is invalid or if memory from the
268+
/// `[hole_addr, hole_addr+size)` range is used somewhere else.
269269
///
270270
/// The pointer to `hole_addr` is automatically aligned.
271271
pub unsafe fn new(hole_addr: *mut u8, hole_size: usize) -> HoleList {
@@ -314,6 +314,10 @@ impl HoleList {
314314
///
315315
/// This function uses the “first fit” strategy, so it uses the first hole that is big
316316
/// enough. Thus the runtime is in O(n) but it should be reasonably fast for small allocations.
317+
//
318+
// NOTE: We could probably replace this with an `Option` instead of a `Result` in a later
319+
// release to remove this clippy warning
320+
#[allow(clippy::result_unit_err)]
317321
pub fn allocate_first_fit(&mut self, layout: Layout) -> Result<(NonNull<u8>, Layout), ()> {
318322
let aligned_layout = Self::align_layout(layout);
319323
let mut cursor = self.cursor().ok_or(())?;
@@ -332,16 +336,18 @@ impl HoleList {
332336

333337
/// Frees the allocation given by `ptr` and `layout`.
334338
///
335-
/// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with
336-
/// identical layout. Undefined behavior may occur for invalid arguments.
337-
/// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and
338-
/// returns the aligned layout.
339-
///
340339
/// This function walks the list and inserts the given block at the correct place. If the freed
341340
/// block is adjacent to another free block, the blocks are merged again.
342341
/// This operation is in `O(n)` since the list needs to be sorted by address.
343342
///
344343
/// [`allocate_first_fit`]: HoleList::allocate_first_fit
344+
///
345+
/// # Safety
346+
///
347+
/// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with
348+
/// identical layout. Undefined behavior may occur for invalid arguments.
349+
/// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and
350+
/// returns the aligned layout.
345351
pub unsafe fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> Layout {
346352
let aligned_layout = Self::align_layout(layout);
347353
deallocate(self, ptr.as_ptr(), aligned_layout.size());
@@ -379,7 +385,11 @@ struct HoleInfo {
379385

380386
unsafe fn make_hole(addr: *mut u8, size: usize) -> NonNull<Hole> {
381387
let hole_addr = addr.cast::<Hole>();
382-
debug_assert_eq!(addr as usize % align_of::<Hole>(), 0);
388+
debug_assert_eq!(
389+
addr as usize % align_of::<Hole>(),
390+
0,
391+
"Hole address not aligned!",
392+
);
383393
hole_addr.write(Hole { size, next: None });
384394
NonNull::new_unchecked(hole_addr)
385395
}
@@ -393,8 +403,11 @@ impl Cursor {
393403
let node_size = unsafe { node.as_ref().size };
394404
let hole_u8 = self.hole.as_ptr().cast::<u8>();
395405

396-
assert!(node_u8.wrapping_add(node_size) <= hole_u8);
397-
assert_eq!(self.previous().size, 0);
406+
assert!(
407+
node_u8.wrapping_add(node_size) <= hole_u8,
408+
"Freed node aliases existing hole! Bad free?",
409+
);
410+
debug_assert_eq!(self.previous().size, 0);
398411

399412
let Cursor { mut prev, hole } = self;
400413
unsafe {
@@ -415,12 +428,18 @@ impl Cursor {
415428
let hole_size = self.current().size;
416429

417430
// Does hole overlap node?
418-
assert!(hole_u8.wrapping_add(hole_size) <= node_u8);
431+
assert!(
432+
hole_u8.wrapping_add(hole_size) <= node_u8,
433+
"Freed node aliases existing hole! Bad free?",
434+
);
419435

420436
// If we have a next, does the node overlap next?
421437
if let Some(next) = self.current().next.as_ref() {
422438
let node_u8 = node_u8 as *const u8;
423-
assert!(node_u8.wrapping_add(node_size) <= next.as_ptr().cast::<u8>())
439+
assert!(
440+
node_u8.wrapping_add(node_size) <= next.as_ptr().cast::<u8>(),
441+
"Freed node aliases existing hole! Bad free?",
442+
);
424443
}
425444

426445
// All good! Let's insert that after.
@@ -447,6 +466,11 @@ impl Cursor {
447466
};
448467

449468
// Can we directly merge these? e.g. are they touching?
469+
//
470+
// NOTE: Because we always use `HoleList::align_layout`, the size of
471+
// the new hole is always "rounded up" to cover any partial gaps that
472+
// would have occurred. For this reason, we DON'T need to "round up"
473+
// to account for an unaligned hole spot.
450474
let hole_u8 = hole.as_ptr().cast::<u8>();
451475
let hole_sz = unsafe { hole.as_ref().size };
452476
let next_u8 = next.as_ptr().cast::<u8>();
@@ -510,7 +534,9 @@ fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) {
510534
Err(mut cursor) => {
511535
// Nope. It lives somewhere else. Advance the list until we find its home
512536
while let Err(()) = cursor.try_insert_after(hole) {
513-
cursor = cursor.next().unwrap();
537+
cursor = cursor
538+
.next()
539+
.expect("Reached end of holes without finding deallocation hole!");
514540
}
515541
// Great! We found a home for it, our cursor is now JUST BEFORE the new
516542
// node we inserted, so we need to try to merge up to twice: One to combine

src/lib.rs

Lines changed: 44 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ use core::alloc::GlobalAlloc;
1717
use core::alloc::Layout;
1818
#[cfg(feature = "alloc_ref")]
1919
use core::alloc::{AllocError, Allocator};
20-
use core::convert::{TryFrom, TryInto};
20+
use core::convert::TryInto;
2121
use core::mem::MaybeUninit;
2222
#[cfg(feature = "use_spin")]
2323
use core::ops::Deref;
@@ -64,10 +64,17 @@ impl Heap {
6464

6565
/// Initializes an empty heap
6666
///
67-
/// # Unsafety
67+
/// # Safety
6868
///
6969
/// This function must be called at most once and must only be used on an
7070
/// empty heap.
71+
///
72+
/// The bottom address must be valid and the memory in the
73+
/// `[heap_bottom, heap_bottom + heap_size)` range must not be used for anything else.
74+
/// This function is unsafe because it can cause undefined behavior if the given address
75+
/// is invalid.
76+
///
77+
/// The provided memory range must be valid for the `'static` lifetime.
7178
pub unsafe fn init(&mut self, heap_bottom: *mut u8, heap_size: usize) {
7279
self.bottom = heap_bottom;
7380
self.size = heap_size;
@@ -104,10 +111,16 @@ impl Heap {
104111
unsafe { self.init(address, size) }
105112
}
106113

107-
/// Creates a new heap with the given `bottom` and `size`. The bottom address must be valid
108-
/// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for
109-
/// anything else. This function is unsafe because it can cause undefined behavior if the
110-
/// given address is invalid.
114+
/// Creates a new heap with the given `bottom` and `size`.
115+
///
116+
/// # Safety
117+
///
118+
/// The bottom address must be valid and the memory in the
119+
/// `[heap_bottom, heap_bottom + heap_size)` range must not be used for anything else.
120+
/// This function is unsafe because it can cause undefined behavior if the given address
121+
/// is invalid.
122+
///
123+
/// The provided memory range must be valid for the `'static` lifetime.
111124
pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> Heap {
112125
if heap_size < HoleList::min_size() {
113126
Self::empty()
@@ -138,6 +151,10 @@ impl Heap {
138151
/// This function scans the list of free memory blocks and uses the first block that is big
139152
/// enough. The runtime is in O(n) where n is the number of free blocks, but it should be
140153
/// reasonably fast for small allocations.
154+
//
155+
// NOTE: We could probably replace this with an `Option` instead of a `Result` in a later
156+
// release to remove this clippy warning
157+
#[allow(clippy::result_unit_err)]
141158
pub fn allocate_first_fit(&mut self, layout: Layout) -> Result<NonNull<u8>, ()> {
142159
match self.holes.allocate_first_fit(layout) {
143160
Ok((ptr, aligned_layout)) => {
@@ -149,12 +166,16 @@ impl Heap {
149166
}
150167

151168
/// Frees the given allocation. `ptr` must be a pointer returned
152-
/// by a call to the `allocate_first_fit` function with identical size and alignment. Undefined
153-
/// behavior may occur for invalid arguments, thus this function is unsafe.
169+
/// by a call to the `allocate_first_fit` function with identical size and alignment.
154170
///
155171
/// This function walks the list of free memory blocks and inserts the freed block at the
156172
/// correct place. If the freed block is adjacent to another free block, the blocks are merged
157173
/// again. This operation is in `O(n)` since the list needs to be sorted by address.
174+
///
175+
/// # Safety
176+
///
177+
/// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with
178+
/// identical layout. Undefined behavior may occur for invalid arguments.
158179
pub unsafe fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) {
159180
self.used -= self.holes.deallocate(ptr, layout).size();
160181
}
@@ -171,8 +192,7 @@ impl Heap {
171192

172193
/// Return the top address of the heap
173194
pub fn top(&self) -> *mut u8 {
174-
self.bottom
175-
.wrapping_offset(isize::try_from(self.size).unwrap())
195+
self.bottom.wrapping_add(self.size)
176196
}
177197

178198
/// Returns the size of the used part of the heap
@@ -187,9 +207,11 @@ impl Heap {
187207

188208
/// Extends the size of the heap by creating a new hole at the end
189209
///
190-
/// # Unsafety
210+
/// # Safety
191211
///
192-
/// The new extended area must be valid
212+
/// The amount of data given in `by` MUST exist directly after the original
213+
/// range of data provided when constructing the [Heap]. The additional data
214+
/// must have the same lifetime of the original range of data.
193215
pub unsafe fn extend(&mut self, by: usize) {
194216
let top = self.top();
195217
let layout = Layout::from_size_align(by, 1).unwrap();
@@ -235,10 +257,16 @@ impl LockedHeap {
235257
LockedHeap(Spinlock::new(Heap::empty()))
236258
}
237259

238-
/// Creates a new heap with the given `bottom` and `size`. The bottom address must be valid
239-
/// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for
240-
/// anything else. This function is unsafe because it can cause undefined behavior if the
241-
/// given address is invalid.
260+
/// Creates a new heap with the given `bottom` and `size`.
261+
///
262+
/// # Safety
263+
///
264+
/// The bottom address must be valid and the memory in the
265+
/// `[heap_bottom, heap_bottom + heap_size)` range must not be used for anything else.
266+
/// This function is unsafe because it can cause undefined behavior if the given address
267+
/// is invalid.
268+
///
269+
/// The provided memory range must be valid for the `'static` lifetime.
242270
pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> LockedHeap {
243271
LockedHeap(Spinlock::new(Heap {
244272
bottom: heap_bottom,

0 commit comments

Comments
 (0)