@@ -464,33 +464,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
464
464
this. write_scalar_atomic ( value. into ( ) , & value_place, atomic)
465
465
}
466
466
467
- /// Checks that an atomic access is legal at the given place.
468
- fn atomic_access_check ( & self , place : & MPlaceTy < ' tcx , Provenance > ) -> InterpResult < ' tcx > {
469
- let this = self . eval_context_ref ( ) ;
470
- // Check alignment requirements. Atomics must always be aligned to their size,
471
- // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
472
- // be 8-aligned).
473
- let align = Align :: from_bytes ( place. layout . size . bytes ( ) ) . unwrap ( ) ;
474
- this. check_ptr_access_align (
475
- place. ptr ,
476
- place. layout . size ,
477
- align,
478
- CheckInAllocMsg :: MemoryAccessTest ,
479
- ) ?;
480
- // Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
481
- // memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
482
- // atomic loads can be implemented via compare_exchange on some targets. See
483
- // <https://github.com/rust-lang/miri/issues/2463>.
484
- // We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
485
- // access will happen later.
486
- let ( alloc_id, _offset, _prov) =
487
- this. ptr_try_get_alloc_id ( place. ptr ) . expect ( "there are no zero-sized atomic accesses" ) ;
488
- if this. get_alloc_mutability ( alloc_id) ? == Mutability :: Not {
489
- throw_ub_format ! ( "atomic operations cannot be performed on read-only memory" ) ;
490
- }
491
- Ok ( ( ) )
492
- }
493
-
494
467
/// Perform an atomic read operation at the memory location.
495
468
fn read_scalar_atomic (
496
469
& self ,
@@ -682,80 +655,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
682
655
Ok ( res)
683
656
}
684
657
685
- /// Update the data-race detector for an atomic read occurring at the
686
- /// associated memory-place and on the current thread.
687
- fn validate_atomic_load (
688
- & self ,
689
- place : & MPlaceTy < ' tcx , Provenance > ,
690
- atomic : AtomicReadOrd ,
691
- ) -> InterpResult < ' tcx > {
692
- let this = self . eval_context_ref ( ) ;
693
- this. validate_overlapping_atomic ( place) ?;
694
- this. validate_atomic_op (
695
- place,
696
- atomic,
697
- "Atomic Load" ,
698
- move |memory, clocks, index, atomic| {
699
- if atomic == AtomicReadOrd :: Relaxed {
700
- memory. load_relaxed ( & mut * clocks, index)
701
- } else {
702
- memory. load_acquire ( & mut * clocks, index)
703
- }
704
- } ,
705
- )
706
- }
707
-
708
- /// Update the data-race detector for an atomic write occurring at the
709
- /// associated memory-place and on the current thread.
710
- fn validate_atomic_store (
711
- & mut self ,
712
- place : & MPlaceTy < ' tcx , Provenance > ,
713
- atomic : AtomicWriteOrd ,
714
- ) -> InterpResult < ' tcx > {
715
- let this = self . eval_context_mut ( ) ;
716
- this. validate_overlapping_atomic ( place) ?;
717
- this. validate_atomic_op (
718
- place,
719
- atomic,
720
- "Atomic Store" ,
721
- move |memory, clocks, index, atomic| {
722
- if atomic == AtomicWriteOrd :: Relaxed {
723
- memory. store_relaxed ( clocks, index)
724
- } else {
725
- memory. store_release ( clocks, index)
726
- }
727
- } ,
728
- )
729
- }
730
-
731
- /// Update the data-race detector for an atomic read-modify-write occurring
732
- /// at the associated memory place and on the current thread.
733
- fn validate_atomic_rmw (
734
- & mut self ,
735
- place : & MPlaceTy < ' tcx , Provenance > ,
736
- atomic : AtomicRwOrd ,
737
- ) -> InterpResult < ' tcx > {
738
- use AtomicRwOrd :: * ;
739
- let acquire = matches ! ( atomic, Acquire | AcqRel | SeqCst ) ;
740
- let release = matches ! ( atomic, Release | AcqRel | SeqCst ) ;
741
- let this = self . eval_context_mut ( ) ;
742
- this. validate_overlapping_atomic ( place) ?;
743
- this. validate_atomic_op ( place, atomic, "Atomic RMW" , move |memory, clocks, index, _| {
744
- if acquire {
745
- memory. load_acquire ( clocks, index) ?;
746
- } else {
747
- memory. load_relaxed ( clocks, index) ?;
748
- }
749
- if release {
750
- memory. rmw_release ( clocks, index)
751
- } else {
752
- memory. rmw_relaxed ( clocks, index)
753
- }
754
- } )
755
- }
756
-
757
658
/// Update the data-race detector for an atomic fence on the current thread.
758
- fn validate_atomic_fence ( & mut self , atomic : AtomicFenceOrd ) -> InterpResult < ' tcx > {
659
+ fn atomic_fence ( & mut self , atomic : AtomicFenceOrd ) -> InterpResult < ' tcx > {
759
660
let this = self . eval_context_mut ( ) ;
760
661
if let Some ( data_race) = & mut this. machine . data_race {
761
662
data_race. maybe_perform_sync_operation ( & this. machine . threads , |index, mut clocks| {
@@ -1081,6 +982,105 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1081
982
result
1082
983
}
1083
984
985
+ /// Checks that an atomic access is legal at the given place.
986
+ fn atomic_access_check ( & self , place : & MPlaceTy < ' tcx , Provenance > ) -> InterpResult < ' tcx > {
987
+ let this = self . eval_context_ref ( ) ;
988
+ // Check alignment requirements. Atomics must always be aligned to their size,
989
+ // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
990
+ // be 8-aligned).
991
+ let align = Align :: from_bytes ( place. layout . size . bytes ( ) ) . unwrap ( ) ;
992
+ this. check_ptr_access_align (
993
+ place. ptr ,
994
+ place. layout . size ,
995
+ align,
996
+ CheckInAllocMsg :: MemoryAccessTest ,
997
+ ) ?;
998
+ // Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
999
+ // memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
1000
+ // atomic loads can be implemented via compare_exchange on some targets. See
1001
+ // <https://github.com/rust-lang/miri/issues/2463>.
1002
+ // We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
1003
+ // access will happen later.
1004
+ let ( alloc_id, _offset, _prov) =
1005
+ this. ptr_try_get_alloc_id ( place. ptr ) . expect ( "there are no zero-sized atomic accesses" ) ;
1006
+ if this. get_alloc_mutability ( alloc_id) ? == Mutability :: Not {
1007
+ throw_ub_format ! ( "atomic operations cannot be performed on read-only memory" ) ;
1008
+ }
1009
+ Ok ( ( ) )
1010
+ }
1011
+
1012
+ /// Update the data-race detector for an atomic read occurring at the
1013
+ /// associated memory-place and on the current thread.
1014
+ fn validate_atomic_load (
1015
+ & self ,
1016
+ place : & MPlaceTy < ' tcx , Provenance > ,
1017
+ atomic : AtomicReadOrd ,
1018
+ ) -> InterpResult < ' tcx > {
1019
+ let this = self . eval_context_ref ( ) ;
1020
+ this. validate_overlapping_atomic ( place) ?;
1021
+ this. validate_atomic_op (
1022
+ place,
1023
+ atomic,
1024
+ "Atomic Load" ,
1025
+ move |memory, clocks, index, atomic| {
1026
+ if atomic == AtomicReadOrd :: Relaxed {
1027
+ memory. load_relaxed ( & mut * clocks, index)
1028
+ } else {
1029
+ memory. load_acquire ( & mut * clocks, index)
1030
+ }
1031
+ } ,
1032
+ )
1033
+ }
1034
+
1035
+ /// Update the data-race detector for an atomic write occurring at the
1036
+ /// associated memory-place and on the current thread.
1037
+ fn validate_atomic_store (
1038
+ & mut self ,
1039
+ place : & MPlaceTy < ' tcx , Provenance > ,
1040
+ atomic : AtomicWriteOrd ,
1041
+ ) -> InterpResult < ' tcx > {
1042
+ let this = self . eval_context_mut ( ) ;
1043
+ this. validate_overlapping_atomic ( place) ?;
1044
+ this. validate_atomic_op (
1045
+ place,
1046
+ atomic,
1047
+ "Atomic Store" ,
1048
+ move |memory, clocks, index, atomic| {
1049
+ if atomic == AtomicWriteOrd :: Relaxed {
1050
+ memory. store_relaxed ( clocks, index)
1051
+ } else {
1052
+ memory. store_release ( clocks, index)
1053
+ }
1054
+ } ,
1055
+ )
1056
+ }
1057
+
1058
+ /// Update the data-race detector for an atomic read-modify-write occurring
1059
+ /// at the associated memory place and on the current thread.
1060
+ fn validate_atomic_rmw (
1061
+ & mut self ,
1062
+ place : & MPlaceTy < ' tcx , Provenance > ,
1063
+ atomic : AtomicRwOrd ,
1064
+ ) -> InterpResult < ' tcx > {
1065
+ use AtomicRwOrd :: * ;
1066
+ let acquire = matches ! ( atomic, Acquire | AcqRel | SeqCst ) ;
1067
+ let release = matches ! ( atomic, Release | AcqRel | SeqCst ) ;
1068
+ let this = self . eval_context_mut ( ) ;
1069
+ this. validate_overlapping_atomic ( place) ?;
1070
+ this. validate_atomic_op ( place, atomic, "Atomic RMW" , move |memory, clocks, index, _| {
1071
+ if acquire {
1072
+ memory. load_acquire ( clocks, index) ?;
1073
+ } else {
1074
+ memory. load_relaxed ( clocks, index) ?;
1075
+ }
1076
+ if release {
1077
+ memory. rmw_release ( clocks, index)
1078
+ } else {
1079
+ memory. rmw_relaxed ( clocks, index)
1080
+ }
1081
+ } )
1082
+ }
1083
+
1084
1084
/// Generic atomic operation implementation
1085
1085
fn validate_atomic_op < A : Debug + Copy > (
1086
1086
& self ,
0 commit comments