Skip to content

Commit 6c57229

Browse files
committed
Fix typos - looked into the papers handling of timestamps, after looking into it again, it seems the paper only increments the timestamp after release operations, so changed to approximation of that implementation.
1 parent 55fc552 commit 6c57229

File tree

2 files changed

+26
-46
lines changed

2 files changed

+26
-46
lines changed

src/data_race.rs

Lines changed: 26 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -39,21 +39,14 @@
3939
//!
4040
//! The timestamps used in the data-race detector assign each sequence of non-atomic operations
4141
//! followed by a single atomic or concurrent operation a single timestamp.
42-
//! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread
42+
//! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread.
4343
//! This is because extra increment operations between the operations in the sequence are not
4444
//! required for accurate reporting of data-race values.
4545
//!
46-
//! If the timestamp was not incremented after the atomic operation, then data-races would not be detected:
47-
//! Example - this should report a data-race but does not:
48-
//! t1: (x,0), atomic[release A], t1=(x+1, 0 ), write(var B),
49-
//! t2: (0,y) , atomic[acquire A], t2=(x+1, y+1), ,write(var B)
50-
//!
51-
//! The timestamp is not incremented before an atomic operation, since the result is indistinguishable
52-
//! from the value not being incremented.
53-
//! t: (x, 0), atomic[release _], (x + 1, 0) || (0, y), atomic[acquire _], (x, _)
54-
//! vs t: (x, 0), atomic[release _], (x + 1, 0) || (0, y), atomic[acquire _], (x+1, _)
55-
//! Both result in the sequence on thread x up to and including the atomic release as happening
56-
//! before the acquire.
46+
//! As per the paper a threads timestamp is only incremented after a release operation is performed
47+
//! so some atomic operations that only perform acquires do not increment the timestamp, due to shared
48+
//! code some atomic operations may increment the timestamp when not necessary but this has no effect
49+
//! on the data-race detection code.
5750
//!
5851
//! FIXME:
5952
//! currently we have our own local copy of the currently active thread index and names, this is due
@@ -516,7 +509,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
516509
Ok(old)
517510
}
518511

519-
/// Perform an atomic compare and exchange at a given memory location
512+
/// Perform an atomic compare and exchange at a given memory location.
520513
/// On success an atomic RMW operation is performed and on failure
521514
/// only an atomic read occurs.
522515
fn atomic_compare_exchange_scalar(
@@ -640,7 +633,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
640633
// Either Release | AcqRel | SeqCst
641634
clocks.apply_release_fence();
642635
}
643-
Ok(())
636+
637+
// Increment timestamp if hase release semantics
638+
Ok(atomic != AtomicFenceOp::Acquire)
644639
})
645640
} else {
646641
Ok(())
@@ -651,9 +646,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
651646
/// Vector clock metadata for a logical memory allocation.
652647
#[derive(Debug, Clone)]
653648
pub struct VClockAlloc {
654-
/// Range of Vector clocks, this gives each byte a potentially
655-
/// unqiue set of vector clocks, but merges identical information
656-
/// together for improved efficiency.
649+
/// Assigning each byte a MemoryCellClocks.
657650
alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
658651

659652
// Pointer to global state.
@@ -935,10 +928,12 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
935928
true,
936929
place_ptr,
937930
size,
938-
);
931+
).map(|_| true);
939932
}
940933
}
941-
Ok(())
934+
935+
// This conservatively assumes all operations have release semantics
936+
Ok(true)
942937
})?;
943938

944939
// Log changes to atomic memory.
@@ -1159,6 +1154,7 @@ impl GlobalState {
11591154
created.join_with(current);
11601155

11611156
// Advance both threads after the synchronized operation.
1157+
// Both operations are considered to have release semantics.
11621158
current.increment_clock(current_index);
11631159
created.increment_clock(created_index);
11641160
}
@@ -1185,11 +1181,9 @@ impl GlobalState {
11851181

11861182
// The join thread happens-before the current thread
11871183
// so update the current vector clock.
1184+
// Is not a release operation so the clock is not incremented.
11881185
current.clock.join(join_clock);
11891186

1190-
// Increment clocks after atomic operation.
1191-
current.increment_clock(current_index);
1192-
11931187
// Check the number of active threads, if the value is 1
11941188
// then test for potentially disabling multi-threaded execution.
11951189
let active_threads = self.active_thread_count.get();
@@ -1287,13 +1281,14 @@ impl GlobalState {
12871281
/// operation may create.
12881282
fn maybe_perform_sync_operation<'tcx>(
12891283
&self,
1290-
op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx>,
1284+
op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
12911285
) -> InterpResult<'tcx> {
12921286
if self.multi_threaded.get() {
12931287
let (index, clocks) = self.current_thread_state_mut();
1294-
op(index, clocks)?;
1295-
let (_, mut clocks) = self.current_thread_state_mut();
1296-
clocks.increment_clock(index);
1288+
if op(index, clocks)? {
1289+
let (_, mut clocks) = self.current_thread_state_mut();
1290+
clocks.increment_clock(index);
1291+
}
12971292
}
12981293
Ok(())
12991294
}
@@ -1313,10 +1308,11 @@ impl GlobalState {
13131308

13141309
/// Acquire a lock, express that the previous call of
13151310
/// `validate_lock_release` must happen before this.
1311+
/// As this is an acquire operation, the thread timestamp is not
1312+
/// incremented.
13161313
pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
1317-
let (index, mut clocks) = self.load_thread_state_mut(thread);
1314+
let (_, mut clocks) = self.load_thread_state_mut(thread);
13181315
clocks.clock.join(&lock);
1319-
clocks.increment_clock(index);
13201316
}
13211317

13221318
/// Release a lock handle, express that this happens-before
@@ -1335,8 +1331,8 @@ impl GlobalState {
13351331
/// any subsequent calls to `validate_lock_acquire` as well
13361332
/// as any previous calls to this function after any
13371333
/// `validate_lock_release` calls.
1338-
/// For normal locks this should be equivalent to `validate_lock_release`
1339-
/// this function only exists for joining over the set of concurrent readers
1334+
/// For normal locks this should be equivalent to `validate_lock_release`.
1335+
/// This function only exists for joining over the set of concurrent readers
13401336
/// in a read-write lock and should not be used for anything else.
13411337
pub fn validate_lock_release_shared(&self, lock: &mut VClock, thread: ThreadId) {
13421338
let (index, mut clocks) = self.load_thread_state_mut(thread);

src/shims/posix/sync.rs

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,6 @@ fn mutex_get_kind<'mir, 'tcx: 'mir>(
6262
mutex_op: OpTy<'tcx, Tag>,
6363
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
6464
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
65-
//FIXME: this has been made atomic to fix data-race reporting inside the internal
66-
// mutex implementation, it may not need to be atomic.
6765
ecx.read_scalar_at_offset_atomic(
6866
mutex_op, offset, ecx.machine.layouts.i32,
6967
AtomicReadOp::Relaxed
@@ -76,8 +74,6 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>(
7674
kind: impl Into<ScalarMaybeUninit<Tag>>,
7775
) -> InterpResult<'tcx, ()> {
7876
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
79-
//FIXME: this has been made atomic to fix data-race reporting inside the internal
80-
// mutex implementation, it may not need to be atomic.
8177
ecx.write_scalar_at_offset_atomic(
8278
mutex_op, offset, kind, ecx.machine.layouts.i32,
8379
AtomicWriteOp::Relaxed
@@ -88,8 +84,6 @@ fn mutex_get_id<'mir, 'tcx: 'mir>(
8884
ecx: &MiriEvalContext<'mir, 'tcx>,
8985
mutex_op: OpTy<'tcx, Tag>,
9086
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
91-
//FIXME: this has been made atomic to fix data-race reporting inside the internal
92-
// mutex implementation, it may not need to be atomic.
9387
ecx.read_scalar_at_offset_atomic(
9488
mutex_op, 4, ecx.machine.layouts.u32,
9589
AtomicReadOp::Relaxed
@@ -101,8 +95,6 @@ fn mutex_set_id<'mir, 'tcx: 'mir>(
10195
mutex_op: OpTy<'tcx, Tag>,
10296
id: impl Into<ScalarMaybeUninit<Tag>>,
10397
) -> InterpResult<'tcx, ()> {
104-
//FIXME: this has been made atomic to fix data-race reporting inside the internal
105-
// mutex implementation, it may not need to be atomic.
10698
ecx.write_scalar_at_offset_atomic(
10799
mutex_op, 4, id, ecx.machine.layouts.u32,
108100
AtomicWriteOp::Relaxed
@@ -135,8 +127,6 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
135127
fn rwlock_get_id<'mir, 'tcx: 'mir>(
136128
ecx: &MiriEvalContext<'mir, 'tcx>,
137129
rwlock_op: OpTy<'tcx, Tag>,
138-
//FIXME: this has been made atomic to fix data-race reporting inside the internal
139-
// rw-lock implementation, it may not need to be atomic.
140130
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
141131
ecx.read_scalar_at_offset_atomic(
142132
rwlock_op, 4, ecx.machine.layouts.u32,
@@ -149,8 +139,6 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>(
149139
rwlock_op: OpTy<'tcx, Tag>,
150140
id: impl Into<ScalarMaybeUninit<Tag>>,
151141
) -> InterpResult<'tcx, ()> {
152-
//FIXME: this has been made atomic to fix data-race reporting inside the internal
153-
// rw-lock implementation, it may not need to be atomic.
154142
ecx.write_scalar_at_offset_atomic(
155143
rwlock_op, 4, id, ecx.machine.layouts.u32,
156144
AtomicWriteOp::Relaxed
@@ -207,8 +195,6 @@ fn cond_get_id<'mir, 'tcx: 'mir>(
207195
ecx: &MiriEvalContext<'mir, 'tcx>,
208196
cond_op: OpTy<'tcx, Tag>,
209197
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
210-
//FIXME: this has been made atomic to fix data-race reporting inside the internal
211-
// cond-var implementation, it may not need to be atomic.
212198
ecx.read_scalar_at_offset_atomic(
213199
cond_op, 4, ecx.machine.layouts.u32,
214200
AtomicReadOp::Relaxed
@@ -220,8 +206,6 @@ fn cond_set_id<'mir, 'tcx: 'mir>(
220206
cond_op: OpTy<'tcx, Tag>,
221207
id: impl Into<ScalarMaybeUninit<Tag>>,
222208
) -> InterpResult<'tcx, ()> {
223-
//FIXME: this has been made atomic to fix data-race reporting inside the internal
224-
// cond-var implementation, it may not need to be atomic.
225209
ecx.write_scalar_at_offset_atomic(
226210
cond_op, 4, id, ecx.machine.layouts.u32,
227211
AtomicWriteOp::Relaxed

0 commit comments

Comments
 (0)