@@ -287,20 +287,15 @@ impl MemoryCellClocks {
287
287
Ok ( ( ) )
288
288
}
289
289
290
- /// Checks if the memory cell write races with any prior atomic read or write
291
- fn write_race_free_with_atomic ( & mut self , clocks : & ThreadClockSet ) -> bool {
290
+ /// Checks if the memory cell access is ordered with all prior atomic reads and writes
291
+ fn race_free_with_atomic ( & self , clocks : & ThreadClockSet ) -> bool {
292
292
if let Some ( atomic) = self . atomic ( ) {
293
293
atomic. read_vector <= clocks. clock && atomic. write_vector <= clocks. clock
294
294
} else {
295
295
true
296
296
}
297
297
}
298
298
299
- /// Checks if the memory cell read races with any prior atomic write
300
- fn read_race_free_with_atomic ( & self , clocks : & ThreadClockSet ) -> bool {
301
- if let Some ( atomic) = self . atomic ( ) { atomic. write_vector <= clocks. clock } else { true }
302
- }
303
-
304
299
/// Update memory cell data-race tracking for atomic
305
300
/// load relaxed semantics, is a no-op if this memory was
306
301
/// not used previously as atomic memory.
@@ -528,7 +523,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
528
523
// the *value* (including the associated provenance if this is an AtomicPtr) at this location.
529
524
// Only metadata on the location itself is used.
530
525
let scalar = this. allow_data_races_ref ( move |this| this. read_scalar ( & place. into ( ) ) ) ?;
531
- this. validate_overlapping_atomic_read ( place) ?;
526
+ this. validate_overlapping_atomic ( place) ?;
532
527
this. buffered_atomic_read ( place, atomic, scalar, || {
533
528
this. validate_atomic_load ( place, atomic)
534
529
} )
@@ -542,7 +537,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
542
537
atomic : AtomicWriteOp ,
543
538
) -> InterpResult < ' tcx > {
544
539
let this = self . eval_context_mut ( ) ;
545
- this. validate_overlapping_atomic_write ( dest) ?;
540
+ this. validate_overlapping_atomic ( dest) ?;
546
541
this. allow_data_races_mut ( move |this| this. write_scalar ( val, & ( * dest) . into ( ) ) ) ?;
547
542
this. validate_atomic_store ( dest, atomic) ?;
548
543
// FIXME: it's not possible to get the value before write_scalar. A read_scalar will cause
@@ -563,7 +558,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
563
558
) -> InterpResult < ' tcx , ImmTy < ' tcx , Tag > > {
564
559
let this = self . eval_context_mut ( ) ;
565
560
566
- this. validate_overlapping_atomic_write ( place) ?;
561
+ this. validate_overlapping_atomic ( place) ?;
567
562
let old = this. allow_data_races_mut ( |this| this. read_immediate ( & place. into ( ) ) ) ?;
568
563
569
564
// Atomics wrap around on overflow.
@@ -592,7 +587,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
592
587
) -> InterpResult < ' tcx , ScalarMaybeUninit < Tag > > {
593
588
let this = self . eval_context_mut ( ) ;
594
589
595
- this. validate_overlapping_atomic_write ( place) ?;
590
+ this. validate_overlapping_atomic ( place) ?;
596
591
let old = this. allow_data_races_mut ( |this| this. read_scalar ( & place. into ( ) ) ) ?;
597
592
this. allow_data_races_mut ( |this| this. write_scalar ( new, & ( * place) . into ( ) ) ) ?;
598
593
@@ -613,7 +608,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
613
608
) -> InterpResult < ' tcx , ImmTy < ' tcx , Tag > > {
614
609
let this = self . eval_context_mut ( ) ;
615
610
616
- this. validate_overlapping_atomic_write ( place) ?;
611
+ this. validate_overlapping_atomic ( place) ?;
617
612
let old = this. allow_data_races_mut ( |this| this. read_immediate ( & place. into ( ) ) ) ?;
618
613
let lt = this. binary_op ( mir:: BinOp :: Lt , & old, & rhs) ?. to_scalar ( ) ?. to_bool ( ) ?;
619
614
@@ -656,7 +651,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
656
651
use rand:: Rng as _;
657
652
let this = self . eval_context_mut ( ) ;
658
653
659
- this. validate_overlapping_atomic_write ( place) ?;
654
+ this. validate_overlapping_atomic ( place) ?;
660
655
// Failure ordering cannot be stronger than success ordering, therefore first attempt
661
656
// to read with the failure ordering and if successful then try again with the success
662
657
// read ordering and write in the success case.
@@ -706,7 +701,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
706
701
atomic : AtomicReadOp ,
707
702
) -> InterpResult < ' tcx > {
708
703
let this = self . eval_context_ref ( ) ;
709
- this. validate_overlapping_atomic_read ( place) ?;
704
+ this. validate_overlapping_atomic ( place) ?;
710
705
this. validate_atomic_op (
711
706
place,
712
707
atomic,
@@ -729,7 +724,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
729
724
atomic : AtomicWriteOp ,
730
725
) -> InterpResult < ' tcx > {
731
726
let this = self . eval_context_mut ( ) ;
732
- this. validate_overlapping_atomic_write ( place) ?;
727
+ this. validate_overlapping_atomic ( place) ?;
733
728
this. validate_atomic_op (
734
729
place,
735
730
atomic,
@@ -755,7 +750,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
755
750
let acquire = matches ! ( atomic, Acquire | AcqRel | SeqCst ) ;
756
751
let release = matches ! ( atomic, Release | AcqRel | SeqCst ) ;
757
752
let this = self . eval_context_mut ( ) ;
758
- this. validate_overlapping_atomic_write ( place) ?;
753
+ this. validate_overlapping_atomic ( place) ?;
759
754
this. validate_atomic_op ( place, atomic, "Atomic RMW" , move |memory, clocks, index, _| {
760
755
if acquire {
761
756
memory. load_acquire ( clocks, index) ?;
@@ -941,9 +936,9 @@ impl VClockAlloc {
941
936
)
942
937
}
943
938
944
- /// Detect racing atomic writes (not data races)
939
+ /// Detect racing atomic read and writes (not data races)
945
940
/// on every byte of the current access range
946
- pub ( super ) fn read_race_free_with_atomic < ' tcx > (
941
+ pub ( super ) fn race_free_with_atomic < ' tcx > (
947
942
& self ,
948
943
range : AllocRange ,
949
944
global : & GlobalState ,
@@ -952,26 +947,7 @@ impl VClockAlloc {
952
947
let ( _, clocks) = global. current_thread_state ( ) ;
953
948
let alloc_ranges = self . alloc_ranges . borrow ( ) ;
954
949
for ( _, range) in alloc_ranges. iter ( range. start , range. size ) {
955
- if !range. read_race_free_with_atomic ( & clocks) {
956
- return false ;
957
- }
958
- }
959
- }
960
- true
961
- }
962
-
963
- /// Detect racing atomic read and writes (not data races)
964
- /// on every byte of the current access range
965
- pub ( super ) fn write_race_free_with_atomic < ' tcx > (
966
- & mut self ,
967
- range : AllocRange ,
968
- global : & GlobalState ,
969
- ) -> bool {
970
- if global. race_detecting ( ) {
971
- let ( _, clocks) = global. current_thread_state ( ) ;
972
- let alloc_ranges = self . alloc_ranges . get_mut ( ) ;
973
- for ( _, range) in alloc_ranges. iter_mut ( range. start , range. size ) {
974
- if !range. write_race_free_with_atomic ( & clocks) {
950
+ if !range. race_free_with_atomic ( & clocks) {
975
951
return false ;
976
952
}
977
953
}
0 commit comments