39
39
//!
40
40
//! The timestamps used in the data-race detector assign each sequence of non-atomic operations
41
41
//! followed by a single atomic or concurrent operation a single timestamp.
42
- //! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread
42
+ //! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread.
43
43
//! This is because extra increment operations between the operations in the sequence are not
44
44
//! required for accurate reporting of data-race values.
45
45
//!
46
- //! If the timestamp was not incremented after the atomic operation, then data-races would not be detected:
47
- //! Example - this should report a data-race but does not:
48
- //! t1: (x,0), atomic[release A], t1=(x+1, 0 ), write(var B),
49
- //! t2: (0,y) , atomic[acquire A], t2=(x+1, y+1), ,write(var B)
50
- //!
51
- //! The timestamp is not incremented before an atomic operation, since the result is indistinguishable
52
- //! from the value not being incremented.
53
- //! t: (x, 0), atomic[release _], (x + 1, 0) || (0, y), atomic[acquire _], (x, _)
54
- //! vs t: (x, 0), atomic[release _], (x + 1, 0) || (0, y), atomic[acquire _], (x+1, _)
55
- //! Both result in the sequence on thread x up to and including the atomic release as happening
56
- //! before the acquire.
46
+ //! As per the paper a threads timestamp is only incremented after a release operation is performed
47
+ //! so some atomic operations that only perform acquires do not increment the timestamp, due to shared
48
+ //! code some atomic operations may increment the timestamp when not necessary but this has no effect
49
+ //! on the data-race detection code.
57
50
//!
58
51
//! FIXME:
59
52
//! currently we have our own local copy of the currently active thread index and names, this is due
@@ -516,7 +509,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
516
509
Ok ( old)
517
510
}
518
511
519
- /// Perform an atomic compare and exchange at a given memory location
512
+ /// Perform an atomic compare and exchange at a given memory location.
520
513
/// On success an atomic RMW operation is performed and on failure
521
514
/// only an atomic read occurs.
522
515
fn atomic_compare_exchange_scalar (
@@ -640,7 +633,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
640
633
// Either Release | AcqRel | SeqCst
641
634
clocks. apply_release_fence ( ) ;
642
635
}
643
- Ok ( ( ) )
636
+
637
+ // Increment timestamp if hase release semantics
638
+ Ok ( atomic != AtomicFenceOp :: Acquire )
644
639
} )
645
640
} else {
646
641
Ok ( ( ) )
@@ -651,9 +646,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
651
646
/// Vector clock metadata for a logical memory allocation.
652
647
#[ derive( Debug , Clone ) ]
653
648
pub struct VClockAlloc {
654
- /// Range of Vector clocks, this gives each byte a potentially
655
- /// unqiue set of vector clocks, but merges identical information
656
- /// together for improved efficiency.
649
+ /// Assigning each byte a MemoryCellClocks.
657
650
alloc_ranges : RefCell < RangeMap < MemoryCellClocks > > ,
658
651
659
652
// Pointer to global state.
@@ -935,10 +928,12 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
935
928
true ,
936
929
place_ptr,
937
930
size,
938
- ) ;
931
+ ) . map ( |_| true ) ;
939
932
}
940
933
}
941
- Ok ( ( ) )
934
+
935
+ // This conservatively assumes all operations have release semantics
936
+ Ok ( true )
942
937
} ) ?;
943
938
944
939
// Log changes to atomic memory.
@@ -1159,6 +1154,7 @@ impl GlobalState {
1159
1154
created. join_with ( current) ;
1160
1155
1161
1156
// Advance both threads after the synchronized operation.
1157
+ // Both operations are considered to have release semantics.
1162
1158
current. increment_clock ( current_index) ;
1163
1159
created. increment_clock ( created_index) ;
1164
1160
}
@@ -1185,11 +1181,9 @@ impl GlobalState {
1185
1181
1186
1182
// The join thread happens-before the current thread
1187
1183
// so update the current vector clock.
1184
+ // Is not a release operation so the clock is not incremented.
1188
1185
current. clock . join ( join_clock) ;
1189
1186
1190
- // Increment clocks after atomic operation.
1191
- current. increment_clock ( current_index) ;
1192
-
1193
1187
// Check the number of active threads, if the value is 1
1194
1188
// then test for potentially disabling multi-threaded execution.
1195
1189
let active_threads = self . active_thread_count . get ( ) ;
@@ -1287,13 +1281,14 @@ impl GlobalState {
1287
1281
/// operation may create.
1288
1282
fn maybe_perform_sync_operation < ' tcx > (
1289
1283
& self ,
1290
- op : impl FnOnce ( VectorIdx , RefMut < ' _ , ThreadClockSet > ) -> InterpResult < ' tcx > ,
1284
+ op : impl FnOnce ( VectorIdx , RefMut < ' _ , ThreadClockSet > ) -> InterpResult < ' tcx , bool > ,
1291
1285
) -> InterpResult < ' tcx > {
1292
1286
if self . multi_threaded . get ( ) {
1293
1287
let ( index, clocks) = self . current_thread_state_mut ( ) ;
1294
- op ( index, clocks) ?;
1295
- let ( _, mut clocks) = self . current_thread_state_mut ( ) ;
1296
- clocks. increment_clock ( index) ;
1288
+ if op ( index, clocks) ? {
1289
+ let ( _, mut clocks) = self . current_thread_state_mut ( ) ;
1290
+ clocks. increment_clock ( index) ;
1291
+ }
1297
1292
}
1298
1293
Ok ( ( ) )
1299
1294
}
@@ -1313,10 +1308,11 @@ impl GlobalState {
1313
1308
1314
1309
/// Acquire a lock, express that the previous call of
1315
1310
/// `validate_lock_release` must happen before this.
1311
+ /// As this is an acquire operation, the thread timestamp is not
1312
+ /// incremented.
1316
1313
pub fn validate_lock_acquire ( & self , lock : & VClock , thread : ThreadId ) {
1317
- let ( index , mut clocks) = self . load_thread_state_mut ( thread) ;
1314
+ let ( _ , mut clocks) = self . load_thread_state_mut ( thread) ;
1318
1315
clocks. clock . join ( & lock) ;
1319
- clocks. increment_clock ( index) ;
1320
1316
}
1321
1317
1322
1318
/// Release a lock handle, express that this happens-before
@@ -1335,8 +1331,8 @@ impl GlobalState {
1335
1331
/// any subsequent calls to `validate_lock_acquire` as well
1336
1332
/// as any previous calls to this function after any
1337
1333
/// `validate_lock_release` calls.
1338
- /// For normal locks this should be equivalent to `validate_lock_release`
1339
- /// this function only exists for joining over the set of concurrent readers
1334
+ /// For normal locks this should be equivalent to `validate_lock_release`.
1335
+ /// This function only exists for joining over the set of concurrent readers
1340
1336
/// in a read-write lock and should not be used for anything else.
1341
1337
pub fn validate_lock_release_shared ( & self , lock : & mut VClock , thread : ThreadId ) {
1342
1338
let ( index, mut clocks) = self . load_thread_state_mut ( thread) ;
0 commit comments