@@ -83,7 +83,8 @@ use rustc_const_eval::interpret::{
83
83
use rustc_data_structures:: fx:: FxHashMap ;
84
84
85
85
use crate :: {
86
- AtomicReadOrd , AtomicRwOrd , AtomicWriteOrd , Tag , ThreadManager , VClock , VTimestamp , VectorIdx ,
86
+ AtomicReadOrd , AtomicRwOrd , AtomicWriteOrd , Provenance , ThreadManager , VClock , VTimestamp ,
87
+ VectorIdx ,
87
88
} ;
88
89
89
90
use super :: {
@@ -127,7 +128,7 @@ struct StoreElement {
127
128
// FIXME: this means the store is either fully initialized or fully uninitialized;
128
129
// we will have to change this if we want to support atomics on
129
130
// partially initialized data.
130
- val : ScalarMaybeUninit < Tag > ,
131
+ val : ScalarMaybeUninit < Provenance > ,
131
132
132
133
/// Timestamp of first loads from this store element by each thread
133
134
/// Behind a RefCell to keep load op take &self
@@ -174,7 +175,7 @@ impl StoreBufferAlloc {
174
175
fn get_or_create_store_buffer < ' tcx > (
175
176
& self ,
176
177
range : AllocRange ,
177
- init : ScalarMaybeUninit < Tag > ,
178
+ init : ScalarMaybeUninit < Provenance > ,
178
179
) -> InterpResult < ' tcx , Ref < ' _ , StoreBuffer > > {
179
180
let access_type = self . store_buffers . borrow ( ) . access_type ( range) ;
180
181
let pos = match access_type {
@@ -199,7 +200,7 @@ impl StoreBufferAlloc {
199
200
fn get_or_create_store_buffer_mut < ' tcx > (
200
201
& mut self ,
201
202
range : AllocRange ,
202
- init : ScalarMaybeUninit < Tag > ,
203
+ init : ScalarMaybeUninit < Provenance > ,
203
204
) -> InterpResult < ' tcx , & mut StoreBuffer > {
204
205
let buffers = self . store_buffers . get_mut ( ) ;
205
206
let access_type = buffers. access_type ( range) ;
@@ -220,7 +221,7 @@ impl StoreBufferAlloc {
220
221
}
221
222
222
223
impl < ' mir , ' tcx : ' mir > StoreBuffer {
223
- fn new ( init : ScalarMaybeUninit < Tag > ) -> Self {
224
+ fn new ( init : ScalarMaybeUninit < Provenance > ) -> Self {
224
225
let mut buffer = VecDeque :: new ( ) ;
225
226
buffer. reserve ( STORE_BUFFER_LIMIT ) ;
226
227
let mut ret = Self { buffer } ;
@@ -253,7 +254,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
253
254
is_seqcst : bool ,
254
255
rng : & mut ( impl rand:: Rng + ?Sized ) ,
255
256
validate : impl FnOnce ( ) -> InterpResult < ' tcx > ,
256
- ) -> InterpResult < ' tcx , ScalarMaybeUninit < Tag > > {
257
+ ) -> InterpResult < ' tcx , ScalarMaybeUninit < Provenance > > {
257
258
// Having a live borrow to store_buffer while calling validate_atomic_load is fine
258
259
// because the race detector doesn't touch store_buffer
259
260
@@ -278,7 +279,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
278
279
279
280
fn buffered_write (
280
281
& mut self ,
281
- val : ScalarMaybeUninit < Tag > ,
282
+ val : ScalarMaybeUninit < Provenance > ,
282
283
global : & DataRaceState ,
283
284
thread_mgr : & ThreadManager < ' _ , ' _ > ,
284
285
is_seqcst : bool ,
@@ -366,7 +367,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
366
367
/// ATOMIC STORE IMPL in the paper (except we don't need the location's vector clock)
367
368
fn store_impl (
368
369
& mut self ,
369
- val : ScalarMaybeUninit < Tag > ,
370
+ val : ScalarMaybeUninit < Provenance > ,
370
371
index : VectorIdx ,
371
372
thread_clock : & VClock ,
372
373
is_seqcst : bool ,
@@ -408,7 +409,11 @@ impl StoreElement {
408
409
/// buffer regardless of subsequent loads by the same thread; if the earliest load of another
409
410
/// thread doesn't happen before the current one, then no subsequent load by the other thread
410
411
/// can happen before the current one.
411
- fn load_impl ( & self , index : VectorIdx , clocks : & ThreadClockSet ) -> ScalarMaybeUninit < Tag > {
412
+ fn load_impl (
413
+ & self ,
414
+ index : VectorIdx ,
415
+ clocks : & ThreadClockSet ,
416
+ ) -> ScalarMaybeUninit < Provenance > {
412
417
let _ = self . loads . borrow_mut ( ) . try_insert ( index, clocks. clock [ index] ) ;
413
418
self . val
414
419
}
@@ -421,7 +426,10 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
421
426
// If weak memory emulation is enabled, check if this atomic op imperfectly overlaps with a previous
422
427
// atomic read or write. If it does, then we require it to be ordered (non-racy) with all previous atomic
423
428
// accesses on all the bytes in range
424
- fn validate_overlapping_atomic ( & self , place : & MPlaceTy < ' tcx , Tag > ) -> InterpResult < ' tcx > {
429
+ fn validate_overlapping_atomic (
430
+ & self ,
431
+ place : & MPlaceTy < ' tcx , Provenance > ,
432
+ ) -> InterpResult < ' tcx > {
425
433
let this = self . eval_context_ref ( ) ;
426
434
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
427
435
if let crate :: AllocExtra {
@@ -448,10 +456,10 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
448
456
449
457
fn buffered_atomic_rmw (
450
458
& mut self ,
451
- new_val : ScalarMaybeUninit < Tag > ,
452
- place : & MPlaceTy < ' tcx , Tag > ,
459
+ new_val : ScalarMaybeUninit < Provenance > ,
460
+ place : & MPlaceTy < ' tcx , Provenance > ,
453
461
atomic : AtomicRwOrd ,
454
- init : ScalarMaybeUninit < Tag > ,
462
+ init : ScalarMaybeUninit < Provenance > ,
455
463
) -> InterpResult < ' tcx > {
456
464
let this = self . eval_context_mut ( ) ;
457
465
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
@@ -474,11 +482,11 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
474
482
475
483
fn buffered_atomic_read (
476
484
& self ,
477
- place : & MPlaceTy < ' tcx , Tag > ,
485
+ place : & MPlaceTy < ' tcx , Provenance > ,
478
486
atomic : AtomicReadOrd ,
479
- latest_in_mo : ScalarMaybeUninit < Tag > ,
487
+ latest_in_mo : ScalarMaybeUninit < Provenance > ,
480
488
validate : impl FnOnce ( ) -> InterpResult < ' tcx > ,
481
- ) -> InterpResult < ' tcx , ScalarMaybeUninit < Tag > > {
489
+ ) -> InterpResult < ' tcx , ScalarMaybeUninit < Provenance > > {
482
490
let this = self . eval_context_ref ( ) ;
483
491
if let Some ( global) = & this. machine . data_race {
484
492
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
@@ -510,10 +518,10 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
510
518
511
519
fn buffered_atomic_write (
512
520
& mut self ,
513
- val : ScalarMaybeUninit < Tag > ,
514
- dest : & MPlaceTy < ' tcx , Tag > ,
521
+ val : ScalarMaybeUninit < Provenance > ,
522
+ dest : & MPlaceTy < ' tcx , Provenance > ,
515
523
atomic : AtomicWriteOrd ,
516
- init : ScalarMaybeUninit < Tag > ,
524
+ init : ScalarMaybeUninit < Provenance > ,
517
525
) -> InterpResult < ' tcx > {
518
526
let this = self . eval_context_mut ( ) ;
519
527
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( dest. ptr ) ?;
@@ -555,9 +563,9 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
555
563
/// to perform load_impl on the latest store element
556
564
fn perform_read_on_buffered_latest (
557
565
& self ,
558
- place : & MPlaceTy < ' tcx , Tag > ,
566
+ place : & MPlaceTy < ' tcx , Provenance > ,
559
567
atomic : AtomicReadOrd ,
560
- init : ScalarMaybeUninit < Tag > ,
568
+ init : ScalarMaybeUninit < Provenance > ,
561
569
) -> InterpResult < ' tcx > {
562
570
let this = self . eval_context_ref ( ) ;
563
571
0 commit comments