Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 2f872af

Browse files
committedJul 1, 2022
Allow arithmetic and certain bitwise ops on AtomicPtr
This is mainly to support migrating from AtomicUsize, for the strict provenance experiment. Fixes #95492
1 parent 7425fb2 commit 2f872af

File tree

4 files changed

+428
-3
lines changed

4 files changed

+428
-3
lines changed
 

‎compiler/rustc_codegen_ssa/src/mir/intrinsic.rs

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -513,9 +513,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
513513
};
514514

515515
let ty = substs.type_at(0);
516-
if int_type_width_signed(ty, bx.tcx()).is_some()
517-
|| (ty.is_unsafe_ptr() && op == "xchg")
518-
{
516+
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
519517
let mut ptr = args[0].immediate();
520518
let mut val = args[1].immediate();
521519
if ty.is_unsafe_ptr() {

‎library/core/src/sync/atomic.rs

Lines changed: 341 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1451,6 +1451,347 @@ impl<T> AtomicPtr<T> {
14511451
}
14521452
Err(prev)
14531453
}
1454+
1455+
/// Offsets the pointer's address by adding `val` (in units of `T`),
1456+
/// returning the previous pointer.
1457+
///
1458+
/// This is equivalent to using [`wrapping_add`] to atomically perform the
1459+
/// equivalent of `ptr = ptr.wrapping_add(val);`.
1460+
///
1461+
/// This method operates in units of `T`, which means that it cannot be used
1462+
/// to offset the pointer by an amount which is not a multiple of
1463+
/// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
1464+
/// work with a deliberately misaligned pointer. In such cases, you may use
1465+
/// the [`fetch_add_bytes`](Self::fetch_add_bytes) method instead.
1466+
///
1467+
/// `fetch_add` takes an [`Ordering`] argument which describes the memory
1468+
/// ordering of this operation. All ordering modes are possible. Note that
1469+
/// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1470+
/// and using [`Release`] makes the load part [`Relaxed`].
1471+
///
1472+
/// **Note**: This method is only available on platforms that support atomic
1473+
/// operations on [`AtomicPtr`].
1474+
///
1475+
/// [`wrapping_add`]: pointer::wrapping_add
1476+
///
1477+
/// # Examples
1478+
///
1479+
/// ```
1480+
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1481+
/// use core::sync::atomic::{AtomicPtr, Ordering};
1482+
///
1483+
/// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1484+
/// assert_eq!(atom.fetch_add(1, Ordering::Relaxed).addr(), 0);
1485+
/// // Note: units of `size_of::<i64>()`.
1486+
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
1487+
/// ```
1488+
#[inline]
1489+
#[cfg(target_has_atomic = "ptr")]
1490+
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
1491+
pub fn fetch_add(&self, val: usize, order: Ordering) -> *mut T {
1492+
self.fetch_add_bytes(val.wrapping_mul(core::mem::size_of::<T>()), order)
1493+
}
1494+
1495+
/// Offsets the pointer's address by subtracting `val` (in units of `T`),
1496+
/// returning the previous pointer.
1497+
///
1498+
/// This is equivalent to using [`wrapping_sub`] to atomically perform the
1499+
/// equivalent of `ptr = ptr.wrapping_sub(val);`.
1500+
///
1501+
/// This method operates in units of `T`, which means that it cannot be used
1502+
/// to offset the pointer by an amount which is not a multiple of
1503+
/// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
1504+
/// work with a deliberately misaligned pointer. In such cases, you may use
1505+
/// the [`fetch_sub_bytes`](Self::fetch_sub_bytes) method instead.
1506+
///
1507+
/// `fetch_sub` takes an [`Ordering`] argument which describes the memory
1508+
/// ordering of this operation. All ordering modes are possible. Note that
1509+
/// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1510+
/// and using [`Release`] makes the load part [`Relaxed`].
1511+
///
1512+
/// **Note**: This method is only available on platforms that support atomic
1513+
/// operations on [`AtomicPtr`].
1514+
///
1515+
/// [`wrapping_sub`]: pointer::wrapping_sub
1516+
///
1517+
/// # Examples
1518+
///
1519+
/// ```
1520+
/// #![feature(strict_provenance_atomic_ptr)]
1521+
/// use core::sync::atomic::{AtomicPtr, Ordering};
1522+
///
1523+
/// let array = [1i32, 2i32];
1524+
/// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
1525+
///
1526+
/// assert!(core::ptr::eq(
1527+
/// atom.fetch_sub(1, Ordering::Relaxed),
1528+
/// &array[1],
1529+
/// ));
1530+
/// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
1531+
/// ```
1532+
#[inline]
1533+
#[cfg(target_has_atomic = "ptr")]
1534+
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
1535+
pub fn fetch_sub(&self, val: usize, order: Ordering) -> *mut T {
1536+
self.fetch_sub_bytes(val.wrapping_mul(core::mem::size_of::<T>()), order)
1537+
}
1538+
1539+
/// Offsets the pointer's address by adding `val` *bytes*, returning the
1540+
/// previous pointer.
1541+
///
1542+
/// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically
1543+
/// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`.
1544+
///
1545+
/// `fetch_add_bytes` takes an [`Ordering`] argument which describes the
1546+
/// memory ordering of this operation. All ordering modes are possible. Note
1547+
/// that using [`Acquire`] makes the store part of this operation
1548+
/// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
1549+
///
1550+
/// **Note**: This method is only available on platforms that support atomic
1551+
/// operations on [`AtomicPtr`].
1552+
///
1553+
/// [`wrapping_add`]: pointer::wrapping_add
1554+
/// [`cast`]: pointer::cast
1555+
///
1556+
/// # Examples
1557+
///
1558+
/// ```
1559+
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1560+
/// use core::sync::atomic::{AtomicPtr, Ordering};
1561+
///
1562+
/// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1563+
/// assert_eq!(atom.fetch_add_bytes(1, Ordering::Relaxed).addr(), 0);
1564+
/// // Note: in units of bytes, not `size_of::<i64>()`.
1565+
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
1566+
/// ```
1567+
#[inline]
1568+
#[cfg(target_has_atomic = "ptr")]
1569+
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
1570+
pub fn fetch_add_bytes(&self, val: usize, order: Ordering) -> *mut T {
1571+
#[cfg(not(bootstrap))]
1572+
// SAFETY: data races are prevented by atomic intrinsics.
1573+
unsafe {
1574+
atomic_add(self.p.get(), core::ptr::invalid_mut(val), order).cast()
1575+
}
1576+
#[cfg(bootstrap)]
1577+
// SAFETY: data races are prevented by atomic intrinsics.
1578+
unsafe {
1579+
atomic_add(self.p.get().cast::<usize>(), val, order) as *mut T
1580+
}
1581+
}
1582+
1583+
/// Offsets the pointer's address by subtracting `val` *bytes*, returning the
1584+
/// previous pointer.
1585+
///
1586+
/// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically
1587+
/// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`.
1588+
///
1589+
/// `fetch_add_bytes` takes an [`Ordering`] argument which describes the
1590+
/// memory ordering of this operation. All ordering modes are possible. Note
1591+
/// that using [`Acquire`] makes the store part of this operation
1592+
/// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
1593+
///
1594+
/// **Note**: This method is only available on platforms that support atomic
1595+
/// operations on [`AtomicPtr`].
1596+
///
1597+
/// [`wrapping_sub`]: pointer::wrapping_sub
1598+
/// [`cast`]: pointer::cast
1599+
///
1600+
/// # Examples
1601+
///
1602+
/// ```
1603+
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1604+
/// use core::sync::atomic::{AtomicPtr, Ordering};
1605+
///
1606+
/// let atom = AtomicPtr::<i64>::new(core::ptr::invalid_mut(1));
1607+
/// assert_eq!(atom.fetch_sub_bytes(1, Ordering::Relaxed).addr(), 1);
1608+
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
1609+
/// ```
1610+
#[inline]
1611+
#[cfg(target_has_atomic = "ptr")]
1612+
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
1613+
pub fn fetch_sub_bytes(&self, val: usize, order: Ordering) -> *mut T {
1614+
#[cfg(not(bootstrap))]
1615+
// SAFETY: data races are prevented by atomic intrinsics.
1616+
unsafe {
1617+
atomic_sub(self.p.get(), core::ptr::invalid_mut(val), order).cast()
1618+
}
1619+
#[cfg(bootstrap)]
1620+
// SAFETY: data races are prevented by atomic intrinsics.
1621+
unsafe {
1622+
atomic_sub(self.p.get().cast::<usize>(), val, order) as *mut T
1623+
}
1624+
}
1625+
1626+
/// Performs a bitwise "or" operation on the address of the current pointer,
1627+
/// and the argument `val`, and stores a pointer with provenance of the
1628+
/// current pointer and the resulting address.
1629+
///
1630+
/// This is equivalent equivalent to using [`map_addr`] to atomically
1631+
/// perform `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
1632+
/// pointer schemes to atomically set tag bits.
1633+
///
1634+
/// **Caveat**: This operation returns the previous value. To compute the
1635+
/// stored value without losing provenance, you may use [`map_addr`]. For
1636+
/// example: `a.fetch_or(val).map_addr(|a| a | val)`.
1637+
///
1638+
/// `fetch_or` takes an [`Ordering`] argument which describes the memory
1639+
/// ordering of this operation. All ordering modes are possible. Note that
1640+
/// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1641+
/// and using [`Release`] makes the load part [`Relaxed`].
1642+
///
1643+
/// **Note**: This method is only available on platforms that support atomic
1644+
/// operations on [`AtomicPtr`].
1645+
///
1646+
/// This API and its claimed semantics are part of the Strict Provenance
1647+
/// experiment, see the [module documentation for `ptr`][crate::ptr] for
1648+
/// details.
1649+
///
1650+
/// [`map_addr`]: pointer::map_addr
1651+
///
1652+
/// # Examples
1653+
///
1654+
/// ```
1655+
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1656+
/// use core::sync::atomic::{AtomicPtr, Ordering};
1657+
///
1658+
/// let pointer = &mut 3i64 as *mut i64;
1659+
///
1660+
/// let atom = AtomicPtr::<i64>::new(pointer);
1661+
/// // Tag the bottom bit of the pointer.
1662+
/// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0);
1663+
/// // Extract and untag.
1664+
/// let tagged = atom.load(Ordering::Relaxed);
1665+
/// assert_eq!(tagged.addr() & 1, 1);
1666+
/// assert_eq!(tagged.map_addr(|p| p & !1), pointer);
1667+
/// ```
1668+
#[inline]
1669+
#[cfg(target_has_atomic = "ptr")]
1670+
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
1671+
pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
1672+
#[cfg(not(bootstrap))]
1673+
// SAFETY: data races are prevented by atomic intrinsics.
1674+
unsafe {
1675+
atomic_or(self.p.get(), core::ptr::invalid_mut(val), order).cast()
1676+
}
1677+
#[cfg(bootstrap)]
1678+
// SAFETY: data races are prevented by atomic intrinsics.
1679+
unsafe {
1680+
atomic_or(self.p.get().cast::<usize>(), val, order) as *mut T
1681+
}
1682+
}
1683+
1684+
/// Performs a bitwise "and" operation on the address of the current
1685+
/// pointer, and the argument `val`, and stores a pointer with provenance of
1686+
/// the current pointer and the resulting address.
1687+
///
1688+
/// This is equivalent equivalent to using [`map_addr`] to atomically
1689+
/// perform `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
1690+
/// pointer schemes to atomically unset tag bits.
1691+
///
1692+
/// **Caveat**: This operation returns the previous value. To compute the
1693+
/// stored value without losing provenance, you may use [`map_addr`]. For
1694+
/// example: `a.fetch_and(val).map_addr(|a| a & val)`.
1695+
///
1696+
/// `fetch_and` takes an [`Ordering`] argument which describes the memory
1697+
/// ordering of this operation. All ordering modes are possible. Note that
1698+
/// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1699+
/// and using [`Release`] makes the load part [`Relaxed`].
1700+
///
1701+
/// **Note**: This method is only available on platforms that support atomic
1702+
/// operations on [`AtomicPtr`].
1703+
///
1704+
/// This API and its claimed semantics are part of the Strict Provenance
1705+
/// experiment, see the [module documentation for `ptr`][crate::ptr] for
1706+
/// details.
1707+
///
1708+
/// [`map_addr`]: pointer::map_addr
1709+
///
1710+
/// # Examples
1711+
///
1712+
/// ```
1713+
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1714+
/// use core::sync::atomic::{AtomicPtr, Ordering};
1715+
///
1716+
/// let pointer = &mut 3i64 as *mut i64;
1717+
/// // A tagged pointer
1718+
/// let atom = AtomicPtr::<i64>::new(pointer.map_addr(|a| a | 1));
1719+
/// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1);
1720+
/// // Untag, and extract the previously tagged pointer.
1721+
/// let untagged = atom.fetch_and(!1, Ordering::Relaxed)
1722+
/// .map_addr(|a| a & !1);
1723+
/// assert_eq!(untagged, pointer);
1724+
/// ```
1725+
#[inline]
1726+
#[cfg(target_has_atomic = "ptr")]
1727+
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
1728+
pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
1729+
#[cfg(not(bootstrap))]
1730+
// SAFETY: data races are prevented by atomic intrinsics.
1731+
unsafe {
1732+
atomic_and(self.p.get(), core::ptr::invalid_mut(val), order).cast()
1733+
}
1734+
#[cfg(bootstrap)]
1735+
// SAFETY: data races are prevented by atomic intrinsics.
1736+
unsafe {
1737+
atomic_and(self.p.get().cast::<usize>(), val, order) as *mut T
1738+
}
1739+
}
1740+
1741+
/// Performs a bitwise "xor" operation on the address of the current
1742+
/// pointer, and the argument `val`, and stores a pointer with provenance of
1743+
/// the current pointer and the resulting address.
1744+
///
1745+
/// This is equivalent equivalent to using [`map_addr`] to atomically
1746+
/// perform `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
1747+
/// pointer schemes to atomically toggle tag bits.
1748+
///
1749+
/// **Caveat**: This operation returns the previous value. To compute the
1750+
/// stored value without losing provenance, you may use [`map_addr`]. For
1751+
/// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`.
1752+
///
1753+
/// `fetch_xor` takes an [`Ordering`] argument which describes the memory
1754+
/// ordering of this operation. All ordering modes are possible. Note that
1755+
/// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1756+
/// and using [`Release`] makes the load part [`Relaxed`].
1757+
///
1758+
/// **Note**: This method is only available on platforms that support atomic
1759+
/// operations on [`AtomicPtr`].
1760+
///
1761+
/// This API and its claimed semantics are part of the Strict Provenance
1762+
/// experiment, see the [module documentation for `ptr`][crate::ptr] for
1763+
/// details.
1764+
///
1765+
/// [`map_addr`]: pointer::map_addr
1766+
///
1767+
/// # Examples
1768+
///
1769+
/// ```
1770+
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
1771+
/// use core::sync::atomic::{AtomicPtr, Ordering};
1772+
///
1773+
/// let pointer = &mut 3i64 as *mut i64;
1774+
/// let atom = AtomicPtr::<i64>::new(pointer);
1775+
///
1776+
/// // Toggle a tag bit on the pointer.
1777+
/// atom.fetch_xor(1, Ordering::Relaxed);
1778+
/// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1);
1779+
/// ```
1780+
#[inline]
1781+
#[cfg(target_has_atomic = "ptr")]
1782+
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
1783+
pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
1784+
#[cfg(not(bootstrap))]
1785+
// SAFETY: data races are prevented by atomic intrinsics.
1786+
unsafe {
1787+
atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast()
1788+
}
1789+
#[cfg(bootstrap)]
1790+
// SAFETY: data races are prevented by atomic intrinsics.
1791+
unsafe {
1792+
atomic_xor(self.p.get().cast::<usize>(), val, order) as *mut T
1793+
}
1794+
}
14541795
}
14551796

14561797
#[cfg(target_has_atomic_load_store = "8")]

‎library/core/tests/atomic.rs

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,91 @@ fn int_max() {
127127
assert_eq!(x.load(SeqCst), 0xf731);
128128
}
129129

130+
#[test]
131+
#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
132+
fn ptr_add_null() {
133+
let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
134+
assert_eq!(atom.fetch_add(1, SeqCst).addr(), 0);
135+
assert_eq!(atom.load(SeqCst).addr(), 8);
136+
137+
assert_eq!(atom.fetch_add_bytes(1, SeqCst).addr(), 8);
138+
assert_eq!(atom.load(SeqCst).addr(), 9);
139+
140+
assert_eq!(atom.fetch_sub(1, SeqCst).addr(), 9);
141+
assert_eq!(atom.load(SeqCst).addr(), 1);
142+
143+
assert_eq!(atom.fetch_sub_bytes(1, SeqCst).addr(), 1);
144+
assert_eq!(atom.load(SeqCst).addr(), 0);
145+
}
146+
147+
#[test]
148+
#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
149+
fn ptr_add_data() {
150+
let num = 0i64;
151+
let n = &num as *const i64 as *mut _;
152+
let atom = AtomicPtr::<i64>::new(n);
153+
assert_eq!(atom.fetch_add(1, SeqCst), n);
154+
assert_eq!(atom.load(SeqCst), n.wrapping_add(1));
155+
156+
assert_eq!(atom.fetch_sub(1, SeqCst), n.wrapping_add(1));
157+
assert_eq!(atom.load(SeqCst), n);
158+
let bytes_from_n = |b| n.cast::<u8>().wrapping_add(b).cast::<i64>();
159+
160+
assert_eq!(atom.fetch_add_bytes(1, SeqCst), n);
161+
assert_eq!(atom.load(SeqCst), bytes_from_n(1));
162+
163+
assert_eq!(atom.fetch_add_bytes(5, SeqCst), bytes_from_n(1));
164+
assert_eq!(atom.load(SeqCst), bytes_from_n(6));
165+
166+
assert_eq!(atom.fetch_sub_bytes(1, SeqCst), bytes_from_n(6));
167+
assert_eq!(atom.load(SeqCst), bytes_from_n(5));
168+
169+
assert_eq!(atom.fetch_sub_bytes(5, SeqCst), bytes_from_n(5));
170+
assert_eq!(atom.load(SeqCst), n);
171+
}
172+
173+
#[test]
174+
#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
175+
fn ptr_bitops() {
176+
let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
177+
assert_eq!(atom.fetch_or(0b0111, SeqCst).addr(), 0);
178+
assert_eq!(atom.load(SeqCst).addr(), 0b0111);
179+
180+
assert_eq!(atom.fetch_and(0b1101, SeqCst).addr(), 0b0111);
181+
assert_eq!(atom.load(SeqCst).addr(), 0b0101);
182+
183+
assert_eq!(atom.fetch_xor(0b1111, SeqCst).addr(), 0b0101);
184+
assert_eq!(atom.load(SeqCst).addr(), 0b1010);
185+
}
186+
187+
#[test]
188+
#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
189+
fn ptr_bitops_tagging() {
190+
#[repr(align(16))]
191+
struct Tagme(u128);
192+
193+
let tagme = Tagme(1000);
194+
let ptr = &tagme as *const Tagme as *mut Tagme;
195+
let atom: AtomicPtr<Tagme> = AtomicPtr::new(ptr);
196+
197+
const MASK_TAG: usize = 0b1111;
198+
const MASK_PTR: usize = !MASK_TAG;
199+
200+
assert_eq!(ptr.addr() & MASK_TAG, 0);
201+
202+
assert_eq!(atom.fetch_or(0b0111, SeqCst), ptr);
203+
assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b111));
204+
205+
assert_eq!(atom.fetch_and(MASK_PTR | 0b0010, SeqCst), ptr.map_addr(|a| a | 0b111));
206+
assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b0010));
207+
208+
assert_eq!(atom.fetch_xor(0b1011, SeqCst), ptr.map_addr(|a| a | 0b0010));
209+
assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b1001));
210+
211+
assert_eq!(atom.fetch_and(MASK_PTR, SeqCst), ptr.map_addr(|a| a | 0b1001));
212+
assert_eq!(atom.load(SeqCst), ptr);
213+
}
214+
130215
static S_FALSE: AtomicBool = AtomicBool::new(false);
131216
static S_TRUE: AtomicBool = AtomicBool::new(true);
132217
static S_INT: AtomicIsize = AtomicIsize::new(0);

‎library/core/tests/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@
9090
#![feature(slice_group_by)]
9191
#![feature(split_array)]
9292
#![feature(strict_provenance)]
93+
#![feature(strict_provenance_atomic_ptr)]
9394
#![feature(trusted_random_access)]
9495
#![feature(unsize)]
9596
#![feature(unzip_option)]

0 commit comments

Comments
 (0)
Please sign in to comment.