Skip to content

Commit 927ab19

Browse files
committed
make some operations private to the data race detector / atomic intrinsic file
1 parent cd2edbf commit 927ab19

File tree

3 files changed

+110
-107
lines changed

3 files changed

+110
-107
lines changed

src/concurrency/data_race.rs

+100-100
Original file line numberDiff line numberDiff line change
@@ -464,33 +464,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
464464
this.write_scalar_atomic(value.into(), &value_place, atomic)
465465
}
466466

467-
/// Checks that an atomic access is legal at the given place.
468-
fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
469-
let this = self.eval_context_ref();
470-
// Check alignment requirements. Atomics must always be aligned to their size,
471-
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
472-
// be 8-aligned).
473-
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
474-
this.check_ptr_access_align(
475-
place.ptr,
476-
place.layout.size,
477-
align,
478-
CheckInAllocMsg::MemoryAccessTest,
479-
)?;
480-
// Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
481-
// memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
482-
// atomic loads can be implemented via compare_exchange on some targets. See
483-
// <https://github.com/rust-lang/miri/issues/2463>.
484-
// We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
485-
// access will happen later.
486-
let (alloc_id, _offset, _prov) =
487-
this.ptr_try_get_alloc_id(place.ptr).expect("there are no zero-sized atomic accesses");
488-
if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
489-
throw_ub_format!("atomic operations cannot be performed on read-only memory");
490-
}
491-
Ok(())
492-
}
493-
494467
/// Perform an atomic read operation at the memory location.
495468
fn read_scalar_atomic(
496469
&self,
@@ -682,80 +655,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
682655
Ok(res)
683656
}
684657

685-
/// Update the data-race detector for an atomic read occurring at the
686-
/// associated memory-place and on the current thread.
687-
fn validate_atomic_load(
688-
&self,
689-
place: &MPlaceTy<'tcx, Provenance>,
690-
atomic: AtomicReadOrd,
691-
) -> InterpResult<'tcx> {
692-
let this = self.eval_context_ref();
693-
this.validate_overlapping_atomic(place)?;
694-
this.validate_atomic_op(
695-
place,
696-
atomic,
697-
"Atomic Load",
698-
move |memory, clocks, index, atomic| {
699-
if atomic == AtomicReadOrd::Relaxed {
700-
memory.load_relaxed(&mut *clocks, index)
701-
} else {
702-
memory.load_acquire(&mut *clocks, index)
703-
}
704-
},
705-
)
706-
}
707-
708-
/// Update the data-race detector for an atomic write occurring at the
709-
/// associated memory-place and on the current thread.
710-
fn validate_atomic_store(
711-
&mut self,
712-
place: &MPlaceTy<'tcx, Provenance>,
713-
atomic: AtomicWriteOrd,
714-
) -> InterpResult<'tcx> {
715-
let this = self.eval_context_mut();
716-
this.validate_overlapping_atomic(place)?;
717-
this.validate_atomic_op(
718-
place,
719-
atomic,
720-
"Atomic Store",
721-
move |memory, clocks, index, atomic| {
722-
if atomic == AtomicWriteOrd::Relaxed {
723-
memory.store_relaxed(clocks, index)
724-
} else {
725-
memory.store_release(clocks, index)
726-
}
727-
},
728-
)
729-
}
730-
731-
/// Update the data-race detector for an atomic read-modify-write occurring
732-
/// at the associated memory place and on the current thread.
733-
fn validate_atomic_rmw(
734-
&mut self,
735-
place: &MPlaceTy<'tcx, Provenance>,
736-
atomic: AtomicRwOrd,
737-
) -> InterpResult<'tcx> {
738-
use AtomicRwOrd::*;
739-
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
740-
let release = matches!(atomic, Release | AcqRel | SeqCst);
741-
let this = self.eval_context_mut();
742-
this.validate_overlapping_atomic(place)?;
743-
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
744-
if acquire {
745-
memory.load_acquire(clocks, index)?;
746-
} else {
747-
memory.load_relaxed(clocks, index)?;
748-
}
749-
if release {
750-
memory.rmw_release(clocks, index)
751-
} else {
752-
memory.rmw_relaxed(clocks, index)
753-
}
754-
})
755-
}
756-
757658
/// Update the data-race detector for an atomic fence on the current thread.
758-
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
659+
fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
759660
let this = self.eval_context_mut();
760661
if let Some(data_race) = &mut this.machine.data_race {
761662
data_race.maybe_perform_sync_operation(&this.machine.threads, |index, mut clocks| {
@@ -1081,6 +982,105 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1081982
result
1082983
}
1083984

985+
/// Checks that an atomic access is legal at the given place.
986+
fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
987+
let this = self.eval_context_ref();
988+
// Check alignment requirements. Atomics must always be aligned to their size,
989+
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
990+
// be 8-aligned).
991+
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
992+
this.check_ptr_access_align(
993+
place.ptr,
994+
place.layout.size,
995+
align,
996+
CheckInAllocMsg::MemoryAccessTest,
997+
)?;
998+
// Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
999+
// memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
1000+
// atomic loads can be implemented via compare_exchange on some targets. See
1001+
// <https://github.com/rust-lang/miri/issues/2463>.
1002+
// We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
1003+
// access will happen later.
1004+
let (alloc_id, _offset, _prov) =
1005+
this.ptr_try_get_alloc_id(place.ptr).expect("there are no zero-sized atomic accesses");
1006+
if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
1007+
throw_ub_format!("atomic operations cannot be performed on read-only memory");
1008+
}
1009+
Ok(())
1010+
}
1011+
1012+
/// Update the data-race detector for an atomic read occurring at the
1013+
/// associated memory-place and on the current thread.
1014+
fn validate_atomic_load(
1015+
&self,
1016+
place: &MPlaceTy<'tcx, Provenance>,
1017+
atomic: AtomicReadOrd,
1018+
) -> InterpResult<'tcx> {
1019+
let this = self.eval_context_ref();
1020+
this.validate_overlapping_atomic(place)?;
1021+
this.validate_atomic_op(
1022+
place,
1023+
atomic,
1024+
"Atomic Load",
1025+
move |memory, clocks, index, atomic| {
1026+
if atomic == AtomicReadOrd::Relaxed {
1027+
memory.load_relaxed(&mut *clocks, index)
1028+
} else {
1029+
memory.load_acquire(&mut *clocks, index)
1030+
}
1031+
},
1032+
)
1033+
}
1034+
1035+
/// Update the data-race detector for an atomic write occurring at the
1036+
/// associated memory-place and on the current thread.
1037+
fn validate_atomic_store(
1038+
&mut self,
1039+
place: &MPlaceTy<'tcx, Provenance>,
1040+
atomic: AtomicWriteOrd,
1041+
) -> InterpResult<'tcx> {
1042+
let this = self.eval_context_mut();
1043+
this.validate_overlapping_atomic(place)?;
1044+
this.validate_atomic_op(
1045+
place,
1046+
atomic,
1047+
"Atomic Store",
1048+
move |memory, clocks, index, atomic| {
1049+
if atomic == AtomicWriteOrd::Relaxed {
1050+
memory.store_relaxed(clocks, index)
1051+
} else {
1052+
memory.store_release(clocks, index)
1053+
}
1054+
},
1055+
)
1056+
}
1057+
1058+
/// Update the data-race detector for an atomic read-modify-write occurring
1059+
/// at the associated memory place and on the current thread.
1060+
fn validate_atomic_rmw(
1061+
&mut self,
1062+
place: &MPlaceTy<'tcx, Provenance>,
1063+
atomic: AtomicRwOrd,
1064+
) -> InterpResult<'tcx> {
1065+
use AtomicRwOrd::*;
1066+
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1067+
let release = matches!(atomic, Release | AcqRel | SeqCst);
1068+
let this = self.eval_context_mut();
1069+
this.validate_overlapping_atomic(place)?;
1070+
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
1071+
if acquire {
1072+
memory.load_acquire(clocks, index)?;
1073+
} else {
1074+
memory.load_relaxed(clocks, index)?;
1075+
}
1076+
if release {
1077+
memory.rmw_release(clocks, index)
1078+
} else {
1079+
memory.rmw_relaxed(clocks, index)
1080+
}
1081+
})
1082+
}
1083+
10841084
/// Generic atomic operation implementation
10851085
fn validate_atomic_op<A: Debug + Copy>(
10861086
&self,

src/shims/intrinsics/atomic.rs

+8-5
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
6767
["load", ord] => this.atomic_load(args, dest, read_ord(ord)?)?,
6868
["store", ord] => this.atomic_store(args, write_ord(ord)?)?,
6969

70-
["fence", ord] => this.atomic_fence(args, fence_ord(ord)?)?,
71-
["singlethreadfence", ord] => this.compiler_fence(args, fence_ord(ord)?)?,
70+
["fence", ord] => this.atomic_fence_intrinsic(args, fence_ord(ord)?)?,
71+
["singlethreadfence", ord] => this.compiler_fence_intrinsic(args, fence_ord(ord)?)?,
7272

7373
["xchg", ord] => this.atomic_exchange(args, dest, rw_ord(ord)?)?,
7474
["cxchg", ord1, ord2] =>
@@ -117,7 +117,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
117117
}
118118
Ok(())
119119
}
120+
}
120121

122+
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
123+
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
121124
fn atomic_load(
122125
&mut self,
123126
args: &[OpTy<'tcx, Provenance>],
@@ -153,7 +156,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
153156
Ok(())
154157
}
155158

156-
fn compiler_fence(
159+
fn compiler_fence_intrinsic(
157160
&mut self,
158161
args: &[OpTy<'tcx, Provenance>],
159162
atomic: AtomicFenceOrd,
@@ -164,14 +167,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
164167
Ok(())
165168
}
166169

167-
fn atomic_fence(
170+
fn atomic_fence_intrinsic(
168171
&mut self,
169172
args: &[OpTy<'tcx, Provenance>],
170173
atomic: AtomicFenceOrd,
171174
) -> InterpResult<'tcx> {
172175
let this = self.eval_context_mut();
173176
let [] = check_arg_count(args)?;
174-
this.validate_atomic_fence(atomic)?;
177+
this.atomic_fence(atomic)?;
175178
Ok(())
176179
}
177180

src/shims/unix/linux/sync.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ pub fn futex<'tcx>(
169169
//
170170
// Thankfully, preemptions cannot happen inside a Miri shim, so we do not need to
171171
// do anything special to guarantee fence-load-comparison atomicity.
172-
this.validate_atomic_fence(AtomicFenceOrd::SeqCst)?;
172+
this.atomic_fence(AtomicFenceOrd::SeqCst)?;
173173
// Read an `i32` through the pointer, regardless of any wrapper types.
174174
// It's not uncommon for `addr` to be passed as another type than `*mut i32`, such as `*const AtomicI32`.
175175
let futex_val = this
@@ -240,7 +240,7 @@ pub fn futex<'tcx>(
240240
// Together with the SeqCst fence in futex_wait, this makes sure that futex_wait
241241
// will see the latest value on addr which could be changed by our caller
242242
// before doing the syscall.
243-
this.validate_atomic_fence(AtomicFenceOrd::SeqCst)?;
243+
this.atomic_fence(AtomicFenceOrd::SeqCst)?;
244244
let mut n = 0;
245245
for _ in 0..val {
246246
if let Some(thread) = this.futex_wake(addr_usize, bitset) {

0 commit comments

Comments
 (0)