Skip to content
This repository was archived by the owner on May 28, 2025. It is now read-only.

Commit 4a07f78

Browse files
committed
Forbade all racing mixed size atomic accesses
1 parent ceb173d commit 4a07f78

File tree

6 files changed

+78
-129
lines changed

6 files changed

+78
-129
lines changed

src/concurrency/data_race.rs

Lines changed: 14 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -287,20 +287,15 @@ impl MemoryCellClocks {
287287
Ok(())
288288
}
289289

290-
/// Checks if the memory cell write races with any prior atomic read or write
291-
fn write_race_free_with_atomic(&mut self, clocks: &ThreadClockSet) -> bool {
290+
/// Checks if the memory cell access is ordered with all prior atomic reads and writes
291+
fn race_free_with_atomic(&self, clocks: &ThreadClockSet) -> bool {
292292
if let Some(atomic) = self.atomic() {
293293
atomic.read_vector <= clocks.clock && atomic.write_vector <= clocks.clock
294294
} else {
295295
true
296296
}
297297
}
298298

299-
/// Checks if the memory cell read races with any prior atomic write
300-
fn read_race_free_with_atomic(&self, clocks: &ThreadClockSet) -> bool {
301-
if let Some(atomic) = self.atomic() { atomic.write_vector <= clocks.clock } else { true }
302-
}
303-
304299
/// Update memory cell data-race tracking for atomic
305300
/// load relaxed semantics, is a no-op if this memory was
306301
/// not used previously as atomic memory.
@@ -528,7 +523,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
528523
// the *value* (including the associated provenance if this is an AtomicPtr) at this location.
529524
// Only metadata on the location itself is used.
530525
let scalar = this.allow_data_races_ref(move |this| this.read_scalar(&place.into()))?;
531-
this.validate_overlapping_atomic_read(place)?;
526+
this.validate_overlapping_atomic(place)?;
532527
this.buffered_atomic_read(place, atomic, scalar, || {
533528
this.validate_atomic_load(place, atomic)
534529
})
@@ -542,7 +537,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
542537
atomic: AtomicWriteOp,
543538
) -> InterpResult<'tcx> {
544539
let this = self.eval_context_mut();
545-
this.validate_overlapping_atomic_write(dest)?;
540+
this.validate_overlapping_atomic(dest)?;
546541
this.allow_data_races_mut(move |this| this.write_scalar(val, &(*dest).into()))?;
547542
this.validate_atomic_store(dest, atomic)?;
548543
// FIXME: it's not possible to get the value before write_scalar. A read_scalar will cause
@@ -563,7 +558,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
563558
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
564559
let this = self.eval_context_mut();
565560

566-
this.validate_overlapping_atomic_write(place)?;
561+
this.validate_overlapping_atomic(place)?;
567562
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
568563

569564
// Atomics wrap around on overflow.
@@ -592,7 +587,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
592587
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
593588
let this = self.eval_context_mut();
594589

595-
this.validate_overlapping_atomic_write(place)?;
590+
this.validate_overlapping_atomic(place)?;
596591
let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
597592
this.allow_data_races_mut(|this| this.write_scalar(new, &(*place).into()))?;
598593

@@ -613,7 +608,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
613608
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
614609
let this = self.eval_context_mut();
615610

616-
this.validate_overlapping_atomic_write(place)?;
611+
this.validate_overlapping_atomic(place)?;
617612
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
618613
let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar()?.to_bool()?;
619614

@@ -656,7 +651,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
656651
use rand::Rng as _;
657652
let this = self.eval_context_mut();
658653

659-
this.validate_overlapping_atomic_write(place)?;
654+
this.validate_overlapping_atomic(place)?;
660655
// Failure ordering cannot be stronger than success ordering, therefore first attempt
661656
// to read with the failure ordering and if successful then try again with the success
662657
// read ordering and write in the success case.
@@ -706,7 +701,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
706701
atomic: AtomicReadOp,
707702
) -> InterpResult<'tcx> {
708703
let this = self.eval_context_ref();
709-
this.validate_overlapping_atomic_read(place)?;
704+
this.validate_overlapping_atomic(place)?;
710705
this.validate_atomic_op(
711706
place,
712707
atomic,
@@ -729,7 +724,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
729724
atomic: AtomicWriteOp,
730725
) -> InterpResult<'tcx> {
731726
let this = self.eval_context_mut();
732-
this.validate_overlapping_atomic_write(place)?;
727+
this.validate_overlapping_atomic(place)?;
733728
this.validate_atomic_op(
734729
place,
735730
atomic,
@@ -755,7 +750,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
755750
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
756751
let release = matches!(atomic, Release | AcqRel | SeqCst);
757752
let this = self.eval_context_mut();
758-
this.validate_overlapping_atomic_write(place)?;
753+
this.validate_overlapping_atomic(place)?;
759754
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
760755
if acquire {
761756
memory.load_acquire(clocks, index)?;
@@ -941,9 +936,9 @@ impl VClockAlloc {
941936
)
942937
}
943938

944-
/// Detect racing atomic writes (not data races)
939+
/// Detect racing atomic read and writes (not data races)
945940
/// on every byte of the current access range
946-
pub(super) fn read_race_free_with_atomic<'tcx>(
941+
pub(super) fn race_free_with_atomic<'tcx>(
947942
&self,
948943
range: AllocRange,
949944
global: &GlobalState,
@@ -952,26 +947,7 @@ impl VClockAlloc {
952947
let (_, clocks) = global.current_thread_state();
953948
let alloc_ranges = self.alloc_ranges.borrow();
954949
for (_, range) in alloc_ranges.iter(range.start, range.size) {
955-
if !range.read_race_free_with_atomic(&clocks) {
956-
return false;
957-
}
958-
}
959-
}
960-
true
961-
}
962-
963-
/// Detect racing atomic read and writes (not data races)
964-
/// on every byte of the current access range
965-
pub(super) fn write_race_free_with_atomic<'tcx>(
966-
&mut self,
967-
range: AllocRange,
968-
global: &GlobalState,
969-
) -> bool {
970-
if global.race_detecting() {
971-
let (_, clocks) = global.current_thread_state();
972-
let alloc_ranges = self.alloc_ranges.get_mut();
973-
for (_, range) in alloc_ranges.iter_mut(range.start, range.size) {
974-
if !range.write_race_free_with_atomic(&clocks) {
950+
if !range.race_free_with_atomic(&clocks) {
975951
return false;
976952
}
977953
}

src/concurrency/weak_memory.rs

Lines changed: 7 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,8 @@
3535
//! (such as accessing the top 16 bits of an AtomicU32). These senarios are generally undiscussed in formalisations of C++ memory model.
3636
//! In Rust, these operations can only be done through a `&mut AtomicFoo` reference or one derived from it, therefore these operations
3737
//! can only happen after all previous accesses on the same locations. This implementation is adapted to allow these operations.
38-
//! A mixed size/atomicity read that races with writes, or a write that races with reads or writes will still cause UBs to be thrown.
38+
//! A mixed atomicity read that races with writes, or a write that races with reads or writes will still cause UBs to be thrown.
39+
//! Mixed size atomic accesses must not race with any other atomic access, whether read or write, or a UB will be thrown.
3940
//! You can refer to test cases in weak_memory/extra_cpp.rs and weak_memory/extra_cpp_unsafe.rs for examples of these operations.
4041
4142
// Our and the author's own implementation (tsan11) of the paper have some deviations from the provided operational semantics in §5.3:
@@ -403,9 +404,9 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
403404
crate::MiriEvalContextExt<'mir, 'tcx>
404405
{
405406
// If weak memory emulation is enabled, check if this atomic op imperfectly overlaps with a previous
406-
// atomic write. If it does, then we require it to be ordered (non-racy) with all previous atomic
407-
// writes on all the bytes in range
408-
fn validate_overlapping_atomic_read(&self, place: &MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
407+
// atomic read or write. If it does, then we require it to be ordered (non-racy) with all previous atomic
408+
// accesses on all the bytes in range
409+
fn validate_overlapping_atomic(&self, place: &MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
409410
let this = self.eval_context_ref();
410411
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
411412
if let crate::AllocExtra {
@@ -417,37 +418,9 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
417418
let range = alloc_range(base_offset, place.layout.size);
418419
if alloc_buffers.is_overlapping(range)
419420
&& !alloc_clocks
420-
.read_race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap())
421+
.race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap())
421422
{
422-
throw_ub_format!(
423-
"racy imperfectly overlapping atomic access is not possible in the C++20 memory model"
424-
);
425-
}
426-
}
427-
Ok(())
428-
}
429-
430-
// Same as above but needs to be ordered with all previous atomic read or writes
431-
fn validate_overlapping_atomic_write(
432-
&mut self,
433-
place: &MPlaceTy<'tcx, Tag>,
434-
) -> InterpResult<'tcx> {
435-
let this = self.eval_context_mut();
436-
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
437-
if let (
438-
crate::AllocExtra {
439-
weak_memory: Some(alloc_buffers),
440-
data_race: Some(alloc_clocks),
441-
..
442-
},
443-
crate::Evaluator { data_race: Some(global), .. },
444-
) = this.get_alloc_extra_mut(alloc_id)?
445-
{
446-
let range = alloc_range(base_offset, place.layout.size);
447-
if alloc_buffers.is_overlapping(range)
448-
&& !alloc_clocks.write_race_free_with_atomic(range, global)
449-
{
450-
throw_ub_format!("racy imperfectly overlapping atomic access");
423+
throw_ub_format!("racy imperfectly overlapping atomic access is not possible in the C++20 memory model");
451424
}
452425
}
453426
Ok(())

tests/compile-fail/weak_memory/racing_mixed_size.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
// ignore-windows: Concurrency on Windows is not supported yet.
2-
// compile-flags: -Zmiri-ignore-leaks
32

43
#![feature(core_intrinsics)]
54

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
// ignore-windows: Concurrency on Windows is not supported yet.
2+
3+
#![feature(core_intrinsics)]
4+
5+
use std::sync::atomic::AtomicU32;
6+
use std::sync::atomic::Ordering::*;
7+
use std::thread::spawn;
8+
9+
fn static_atomic(val: u32) -> &'static AtomicU32 {
10+
let ret = Box::leak(Box::new(AtomicU32::new(val)));
11+
ret
12+
}
13+
14+
fn split_u32_ptr(dword: *const u32) -> *const [u16; 2] {
15+
unsafe { std::mem::transmute::<*const u32, *const [u16; 2]>(dword) }
16+
}
17+
18+
// Racing mixed size reads may cause two loads to read-from
19+
// the same store but observe different values, which doesn't make
20+
// sense under the formal model so we forbade this.
21+
pub fn main() {
22+
let x = static_atomic(0);
23+
24+
let j1 = spawn(move || {
25+
x.load(Relaxed);
26+
});
27+
28+
let j2 = spawn(move || {
29+
let x_ptr = x as *const AtomicU32 as *const u32;
30+
let x_split = split_u32_ptr(x_ptr);
31+
unsafe {
32+
let hi = &(*x_split)[0] as *const u16;
33+
std::intrinsics::atomic_load_relaxed(hi); //~ ERROR: imperfectly overlapping
34+
}
35+
});
36+
37+
j1.join().unwrap();
38+
j2.join().unwrap();
39+
}
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
warning: thread support is experimental: the scheduler is not preemptive, and can get stuck in spin loops.
2+
(see https://github.com/rust-lang/miri/issues/1388)
3+
4+
error: Undefined Behavior: racy imperfectly overlapping atomic access is not possible in the C++20 memory model
5+
--> $DIR/racing_mixed_size_read.rs:LL:CC
6+
|
7+
LL | std::intrinsics::atomic_load_relaxed(hi);
8+
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ racy imperfectly overlapping atomic access is not possible in the C++20 memory model
9+
|
10+
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
11+
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
12+
13+
= note: inside closure at $DIR/racing_mixed_size_read.rs:LL:CC
14+
15+
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
16+
17+
error: aborting due to previous error; 1 warning emitted
18+

tests/run-pass/weak_memory/extra_cpp_unsafe.rs

Lines changed: 0 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,6 @@ fn static_atomic(val: u32) -> &'static AtomicU32 {
1818
ret
1919
}
2020

21-
fn split_u32_ptr(dword: *const u32) -> *const [u16; 2] {
22-
unsafe { std::mem::transmute::<*const u32, *const [u16; 2]>(dword) }
23-
}
24-
2521
// We allow non-atomic and atomic reads to race
2622
fn racing_mixed_atomicity_read() {
2723
let x = static_atomic(0);
@@ -41,58 +37,6 @@ fn racing_mixed_atomicity_read() {
4137
assert_eq!(r2, 42);
4238
}
4339

44-
// We allow mixed-size atomic reads to race
45-
fn racing_mixed_size_read() {
46-
let x = static_atomic(0);
47-
48-
let j1 = spawn(move || {
49-
x.load(Relaxed);
50-
});
51-
52-
let j2 = spawn(move || {
53-
let x_ptr = x as *const AtomicU32 as *const u32;
54-
let x_split = split_u32_ptr(x_ptr);
55-
unsafe {
56-
let hi = &(*x_split)[0] as *const u16;
57-
std::intrinsics::atomic_load_relaxed(hi);
58-
}
59-
});
60-
61-
j1.join().unwrap();
62-
j2.join().unwrap();
63-
}
64-
65-
// And we allow the combination of both of the above.
66-
fn racing_mixed_atomicity_and_size_read() {
67-
let x = static_atomic(u32::from_be(0xabbafafa));
68-
69-
let j1 = spawn(move || {
70-
x.load(Relaxed);
71-
});
72-
73-
let j2 = spawn(move || {
74-
let x_ptr = x as *const AtomicU32 as *const u32;
75-
unsafe { *x_ptr };
76-
});
77-
78-
let j3 = spawn(move || {
79-
let x_ptr = x as *const AtomicU32 as *const u32;
80-
let x_split = split_u32_ptr(x_ptr);
81-
unsafe {
82-
let hi = &(*x_split)[0] as *const u16;
83-
std::intrinsics::atomic_load_relaxed(hi)
84-
}
85-
});
86-
87-
j1.join().unwrap();
88-
j2.join().unwrap();
89-
let r3 = j3.join().unwrap();
90-
91-
assert_eq!(r3, u16::from_be(0xabba));
92-
}
93-
9440
pub fn main() {
9541
racing_mixed_atomicity_read();
96-
racing_mixed_size_read();
97-
racing_mixed_atomicity_and_size_read();
9842
}

0 commit comments

Comments
 (0)