From 2f872afdb514032f746b8641672eb13936fc897d Mon Sep 17 00:00:00 2001 From: Thom Chiovoloni Date: Tue, 10 May 2022 17:04:26 -0700 Subject: [PATCH 1/2] Allow arithmetic and certain bitwise ops on AtomicPtr This is mainly to support migrating from AtomicUsize, for the strict provenance experiment. Fixes #95492 --- .../rustc_codegen_ssa/src/mir/intrinsic.rs | 4 +- library/core/src/sync/atomic.rs | 341 ++++++++++++++++++ library/core/tests/atomic.rs | 85 +++++ library/core/tests/lib.rs | 1 + 4 files changed, 428 insertions(+), 3 deletions(-) diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 7f14b95317b46..645afae30d887 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -513,9 +513,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let ty = substs.type_at(0); - if int_type_width_signed(ty, bx.tcx()).is_some() - || (ty.is_unsafe_ptr() && op == "xchg") - { + if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { let mut ptr = args[0].immediate(); let mut val = args[1].immediate(); if ty.is_unsafe_ptr() { diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 90e2dfd5d3d9b..bb6d82ff13de3 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -1451,6 +1451,347 @@ impl AtomicPtr { } Err(prev) } + + /// Offsets the pointer's address by adding `val` (in units of `T`), + /// returning the previous pointer. + /// + /// This is equivalent to using [`wrapping_add`] to atomically perform the + /// equivalent of `ptr = ptr.wrapping_add(val);`. + /// + /// This method operates in units of `T`, which means that it cannot be used + /// to offset the pointer by an amount which is not a multiple of + /// `size_of::()`. This can sometimes be inconvenient, as you may want to + /// work with a deliberately misaligned pointer. In such cases, you may use + /// the [`fetch_add_bytes`](Self::fetch_add_bytes) method instead. + /// + /// `fetch_add` takes an [`Ordering`] argument which describes the memory + /// ordering of this operation. All ordering modes are possible. Note that + /// using [`Acquire`] makes the store part of this operation [`Relaxed`], + /// and using [`Release`] makes the load part [`Relaxed`]. + /// + /// **Note**: This method is only available on platforms that support atomic + /// operations on [`AtomicPtr`]. + /// + /// [`wrapping_add`]: pointer::wrapping_add + /// + /// # Examples + /// + /// ``` + /// #![feature(strict_provenance_atomic_ptr, strict_provenance)] + /// use core::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let atom = AtomicPtr::::new(core::ptr::null_mut()); + /// assert_eq!(atom.fetch_add(1, Ordering::Relaxed).addr(), 0); + /// // Note: units of `size_of::()`. + /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] + pub fn fetch_add(&self, val: usize, order: Ordering) -> *mut T { + self.fetch_add_bytes(val.wrapping_mul(core::mem::size_of::()), order) + } + + /// Offsets the pointer's address by subtracting `val` (in units of `T`), + /// returning the previous pointer. + /// + /// This is equivalent to using [`wrapping_sub`] to atomically perform the + /// equivalent of `ptr = ptr.wrapping_sub(val);`. + /// + /// This method operates in units of `T`, which means that it cannot be used + /// to offset the pointer by an amount which is not a multiple of + /// `size_of::()`. This can sometimes be inconvenient, as you may want to + /// work with a deliberately misaligned pointer. In such cases, you may use + /// the [`fetch_sub_bytes`](Self::fetch_sub_bytes) method instead. + /// + /// `fetch_sub` takes an [`Ordering`] argument which describes the memory + /// ordering of this operation. All ordering modes are possible. Note that + /// using [`Acquire`] makes the store part of this operation [`Relaxed`], + /// and using [`Release`] makes the load part [`Relaxed`]. + /// + /// **Note**: This method is only available on platforms that support atomic + /// operations on [`AtomicPtr`]. + /// + /// [`wrapping_sub`]: pointer::wrapping_sub + /// + /// # Examples + /// + /// ``` + /// #![feature(strict_provenance_atomic_ptr)] + /// use core::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let array = [1i32, 2i32]; + /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _); + /// + /// assert!(core::ptr::eq( + /// atom.fetch_sub(1, Ordering::Relaxed), + /// &array[1], + /// )); + /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0])); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] + pub fn fetch_sub(&self, val: usize, order: Ordering) -> *mut T { + self.fetch_sub_bytes(val.wrapping_mul(core::mem::size_of::()), order) + } + + /// Offsets the pointer's address by adding `val` *bytes*, returning the + /// previous pointer. + /// + /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically + /// perform `ptr = ptr.cast::().wrapping_add(val).cast::()`. + /// + /// `fetch_add_bytes` takes an [`Ordering`] argument which describes the + /// memory ordering of this operation. All ordering modes are possible. Note + /// that using [`Acquire`] makes the store part of this operation + /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. + /// + /// **Note**: This method is only available on platforms that support atomic + /// operations on [`AtomicPtr`]. + /// + /// [`wrapping_add`]: pointer::wrapping_add + /// [`cast`]: pointer::cast + /// + /// # Examples + /// + /// ``` + /// #![feature(strict_provenance_atomic_ptr, strict_provenance)] + /// use core::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let atom = AtomicPtr::::new(core::ptr::null_mut()); + /// assert_eq!(atom.fetch_add_bytes(1, Ordering::Relaxed).addr(), 0); + /// // Note: in units of bytes, not `size_of::()`. + /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] + pub fn fetch_add_bytes(&self, val: usize, order: Ordering) -> *mut T { + #[cfg(not(bootstrap))] + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { + atomic_add(self.p.get(), core::ptr::invalid_mut(val), order).cast() + } + #[cfg(bootstrap)] + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { + atomic_add(self.p.get().cast::(), val, order) as *mut T + } + } + + /// Offsets the pointer's address by subtracting `val` *bytes*, returning the + /// previous pointer. + /// + /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically + /// perform `ptr = ptr.cast::().wrapping_sub(val).cast::()`. + /// + /// `fetch_add_bytes` takes an [`Ordering`] argument which describes the + /// memory ordering of this operation. All ordering modes are possible. Note + /// that using [`Acquire`] makes the store part of this operation + /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. + /// + /// **Note**: This method is only available on platforms that support atomic + /// operations on [`AtomicPtr`]. + /// + /// [`wrapping_sub`]: pointer::wrapping_sub + /// [`cast`]: pointer::cast + /// + /// # Examples + /// + /// ``` + /// #![feature(strict_provenance_atomic_ptr, strict_provenance)] + /// use core::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let atom = AtomicPtr::::new(core::ptr::invalid_mut(1)); + /// assert_eq!(atom.fetch_sub_bytes(1, Ordering::Relaxed).addr(), 1); + /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] + pub fn fetch_sub_bytes(&self, val: usize, order: Ordering) -> *mut T { + #[cfg(not(bootstrap))] + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { + atomic_sub(self.p.get(), core::ptr::invalid_mut(val), order).cast() + } + #[cfg(bootstrap)] + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { + atomic_sub(self.p.get().cast::(), val, order) as *mut T + } + } + + /// Performs a bitwise "or" operation on the address of the current pointer, + /// and the argument `val`, and stores a pointer with provenance of the + /// current pointer and the resulting address. + /// + /// This is equivalent equivalent to using [`map_addr`] to atomically + /// perform `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged + /// pointer schemes to atomically set tag bits. + /// + /// **Caveat**: This operation returns the previous value. To compute the + /// stored value without losing provenance, you may use [`map_addr`]. For + /// example: `a.fetch_or(val).map_addr(|a| a | val)`. + /// + /// `fetch_or` takes an [`Ordering`] argument which describes the memory + /// ordering of this operation. All ordering modes are possible. Note that + /// using [`Acquire`] makes the store part of this operation [`Relaxed`], + /// and using [`Release`] makes the load part [`Relaxed`]. + /// + /// **Note**: This method is only available on platforms that support atomic + /// operations on [`AtomicPtr`]. + /// + /// This API and its claimed semantics are part of the Strict Provenance + /// experiment, see the [module documentation for `ptr`][crate::ptr] for + /// details. + /// + /// [`map_addr`]: pointer::map_addr + /// + /// # Examples + /// + /// ``` + /// #![feature(strict_provenance_atomic_ptr, strict_provenance)] + /// use core::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let pointer = &mut 3i64 as *mut i64; + /// + /// let atom = AtomicPtr::::new(pointer); + /// // Tag the bottom bit of the pointer. + /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0); + /// // Extract and untag. + /// let tagged = atom.load(Ordering::Relaxed); + /// assert_eq!(tagged.addr() & 1, 1); + /// assert_eq!(tagged.map_addr(|p| p & !1), pointer); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] + pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T { + #[cfg(not(bootstrap))] + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { + atomic_or(self.p.get(), core::ptr::invalid_mut(val), order).cast() + } + #[cfg(bootstrap)] + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { + atomic_or(self.p.get().cast::(), val, order) as *mut T + } + } + + /// Performs a bitwise "and" operation on the address of the current + /// pointer, and the argument `val`, and stores a pointer with provenance of + /// the current pointer and the resulting address. + /// + /// This is equivalent equivalent to using [`map_addr`] to atomically + /// perform `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged + /// pointer schemes to atomically unset tag bits. + /// + /// **Caveat**: This operation returns the previous value. To compute the + /// stored value without losing provenance, you may use [`map_addr`]. For + /// example: `a.fetch_and(val).map_addr(|a| a & val)`. + /// + /// `fetch_and` takes an [`Ordering`] argument which describes the memory + /// ordering of this operation. All ordering modes are possible. Note that + /// using [`Acquire`] makes the store part of this operation [`Relaxed`], + /// and using [`Release`] makes the load part [`Relaxed`]. + /// + /// **Note**: This method is only available on platforms that support atomic + /// operations on [`AtomicPtr`]. + /// + /// This API and its claimed semantics are part of the Strict Provenance + /// experiment, see the [module documentation for `ptr`][crate::ptr] for + /// details. + /// + /// [`map_addr`]: pointer::map_addr + /// + /// # Examples + /// + /// ``` + /// #![feature(strict_provenance_atomic_ptr, strict_provenance)] + /// use core::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let pointer = &mut 3i64 as *mut i64; + /// // A tagged pointer + /// let atom = AtomicPtr::::new(pointer.map_addr(|a| a | 1)); + /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1); + /// // Untag, and extract the previously tagged pointer. + /// let untagged = atom.fetch_and(!1, Ordering::Relaxed) + /// .map_addr(|a| a & !1); + /// assert_eq!(untagged, pointer); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] + pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T { + #[cfg(not(bootstrap))] + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { + atomic_and(self.p.get(), core::ptr::invalid_mut(val), order).cast() + } + #[cfg(bootstrap)] + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { + atomic_and(self.p.get().cast::(), val, order) as *mut T + } + } + + /// Performs a bitwise "xor" operation on the address of the current + /// pointer, and the argument `val`, and stores a pointer with provenance of + /// the current pointer and the resulting address. + /// + /// This is equivalent equivalent to using [`map_addr`] to atomically + /// perform `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged + /// pointer schemes to atomically toggle tag bits. + /// + /// **Caveat**: This operation returns the previous value. To compute the + /// stored value without losing provenance, you may use [`map_addr`]. For + /// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`. + /// + /// `fetch_xor` takes an [`Ordering`] argument which describes the memory + /// ordering of this operation. All ordering modes are possible. Note that + /// using [`Acquire`] makes the store part of this operation [`Relaxed`], + /// and using [`Release`] makes the load part [`Relaxed`]. + /// + /// **Note**: This method is only available on platforms that support atomic + /// operations on [`AtomicPtr`]. + /// + /// This API and its claimed semantics are part of the Strict Provenance + /// experiment, see the [module documentation for `ptr`][crate::ptr] for + /// details. + /// + /// [`map_addr`]: pointer::map_addr + /// + /// # Examples + /// + /// ``` + /// #![feature(strict_provenance_atomic_ptr, strict_provenance)] + /// use core::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let pointer = &mut 3i64 as *mut i64; + /// let atom = AtomicPtr::::new(pointer); + /// + /// // Toggle a tag bit on the pointer. + /// atom.fetch_xor(1, Ordering::Relaxed); + /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1); + /// ``` + #[inline] + #[cfg(target_has_atomic = "ptr")] + #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] + pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T { + #[cfg(not(bootstrap))] + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { + atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast() + } + #[cfg(bootstrap)] + // SAFETY: data races are prevented by atomic intrinsics. + unsafe { + atomic_xor(self.p.get().cast::(), val, order) as *mut T + } + } } #[cfg(target_has_atomic_load_store = "8")] diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs index 7f8672f035417..2c048435dde97 100644 --- a/library/core/tests/atomic.rs +++ b/library/core/tests/atomic.rs @@ -127,6 +127,91 @@ fn int_max() { assert_eq!(x.load(SeqCst), 0xf731); } +#[test] +#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins +fn ptr_add_null() { + let atom = AtomicPtr::::new(core::ptr::null_mut()); + assert_eq!(atom.fetch_add(1, SeqCst).addr(), 0); + assert_eq!(atom.load(SeqCst).addr(), 8); + + assert_eq!(atom.fetch_add_bytes(1, SeqCst).addr(), 8); + assert_eq!(atom.load(SeqCst).addr(), 9); + + assert_eq!(atom.fetch_sub(1, SeqCst).addr(), 9); + assert_eq!(atom.load(SeqCst).addr(), 1); + + assert_eq!(atom.fetch_sub_bytes(1, SeqCst).addr(), 1); + assert_eq!(atom.load(SeqCst).addr(), 0); +} + +#[test] +#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins +fn ptr_add_data() { + let num = 0i64; + let n = &num as *const i64 as *mut _; + let atom = AtomicPtr::::new(n); + assert_eq!(atom.fetch_add(1, SeqCst), n); + assert_eq!(atom.load(SeqCst), n.wrapping_add(1)); + + assert_eq!(atom.fetch_sub(1, SeqCst), n.wrapping_add(1)); + assert_eq!(atom.load(SeqCst), n); + let bytes_from_n = |b| n.cast::().wrapping_add(b).cast::(); + + assert_eq!(atom.fetch_add_bytes(1, SeqCst), n); + assert_eq!(atom.load(SeqCst), bytes_from_n(1)); + + assert_eq!(atom.fetch_add_bytes(5, SeqCst), bytes_from_n(1)); + assert_eq!(atom.load(SeqCst), bytes_from_n(6)); + + assert_eq!(atom.fetch_sub_bytes(1, SeqCst), bytes_from_n(6)); + assert_eq!(atom.load(SeqCst), bytes_from_n(5)); + + assert_eq!(atom.fetch_sub_bytes(5, SeqCst), bytes_from_n(5)); + assert_eq!(atom.load(SeqCst), n); +} + +#[test] +#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins +fn ptr_bitops() { + let atom = AtomicPtr::::new(core::ptr::null_mut()); + assert_eq!(atom.fetch_or(0b0111, SeqCst).addr(), 0); + assert_eq!(atom.load(SeqCst).addr(), 0b0111); + + assert_eq!(atom.fetch_and(0b1101, SeqCst).addr(), 0b0111); + assert_eq!(atom.load(SeqCst).addr(), 0b0101); + + assert_eq!(atom.fetch_xor(0b1111, SeqCst).addr(), 0b0101); + assert_eq!(atom.load(SeqCst).addr(), 0b1010); +} + +#[test] +#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins +fn ptr_bitops_tagging() { + #[repr(align(16))] + struct Tagme(u128); + + let tagme = Tagme(1000); + let ptr = &tagme as *const Tagme as *mut Tagme; + let atom: AtomicPtr = AtomicPtr::new(ptr); + + const MASK_TAG: usize = 0b1111; + const MASK_PTR: usize = !MASK_TAG; + + assert_eq!(ptr.addr() & MASK_TAG, 0); + + assert_eq!(atom.fetch_or(0b0111, SeqCst), ptr); + assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b111)); + + assert_eq!(atom.fetch_and(MASK_PTR | 0b0010, SeqCst), ptr.map_addr(|a| a | 0b111)); + assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b0010)); + + assert_eq!(atom.fetch_xor(0b1011, SeqCst), ptr.map_addr(|a| a | 0b0010)); + assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b1001)); + + assert_eq!(atom.fetch_and(MASK_PTR, SeqCst), ptr.map_addr(|a| a | 0b1001)); + assert_eq!(atom.load(SeqCst), ptr); +} + static S_FALSE: AtomicBool = AtomicBool::new(false); static S_TRUE: AtomicBool = AtomicBool::new(true); static S_INT: AtomicIsize = AtomicIsize::new(0); diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs index 9611e197a41c4..fe89dd8c88d97 100644 --- a/library/core/tests/lib.rs +++ b/library/core/tests/lib.rs @@ -90,6 +90,7 @@ #![feature(slice_group_by)] #![feature(split_array)] #![feature(strict_provenance)] +#![feature(strict_provenance_atomic_ptr)] #![feature(trusted_random_access)] #![feature(unsize)] #![feature(unzip_option)] From e65ecee90eff2c90e12164417cc31ab959c321cf Mon Sep 17 00:00:00 2001 From: Thom Chiovoloni Date: Sat, 14 May 2022 12:37:19 -0700 Subject: [PATCH 2/2] Rename AtomicPtr::fetch_{add,sub}{,_bytes} --- library/core/src/sync/atomic.rs | 38 ++++++++++++++++----------------- library/core/tests/atomic.rs | 20 ++++++++--------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index bb6d82ff13de3..bf8ce6971b87e 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -1462,12 +1462,12 @@ impl AtomicPtr { /// to offset the pointer by an amount which is not a multiple of /// `size_of::()`. This can sometimes be inconvenient, as you may want to /// work with a deliberately misaligned pointer. In such cases, you may use - /// the [`fetch_add_bytes`](Self::fetch_add_bytes) method instead. + /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead. /// - /// `fetch_add` takes an [`Ordering`] argument which describes the memory - /// ordering of this operation. All ordering modes are possible. Note that - /// using [`Acquire`] makes the store part of this operation [`Relaxed`], - /// and using [`Release`] makes the load part [`Relaxed`]. + /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the + /// memory ordering of this operation. All ordering modes are possible. Note + /// that using [`Acquire`] makes the store part of this operation + /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic /// operations on [`AtomicPtr`]. @@ -1481,15 +1481,15 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::null_mut()); - /// assert_eq!(atom.fetch_add(1, Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0); /// // Note: units of `size_of::()`. /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] - pub fn fetch_add(&self, val: usize, order: Ordering) -> *mut T { - self.fetch_add_bytes(val.wrapping_mul(core::mem::size_of::()), order) + pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T { + self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::()), order) } /// Offsets the pointer's address by subtracting `val` (in units of `T`), @@ -1502,9 +1502,9 @@ impl AtomicPtr { /// to offset the pointer by an amount which is not a multiple of /// `size_of::()`. This can sometimes be inconvenient, as you may want to /// work with a deliberately misaligned pointer. In such cases, you may use - /// the [`fetch_sub_bytes`](Self::fetch_sub_bytes) method instead. + /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead. /// - /// `fetch_sub` takes an [`Ordering`] argument which describes the memory + /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory /// ordering of this operation. All ordering modes are possible. Note that /// using [`Acquire`] makes the store part of this operation [`Relaxed`], /// and using [`Release`] makes the load part [`Relaxed`]. @@ -1524,7 +1524,7 @@ impl AtomicPtr { /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _); /// /// assert!(core::ptr::eq( - /// atom.fetch_sub(1, Ordering::Relaxed), + /// atom.fetch_ptr_sub(1, Ordering::Relaxed), /// &array[1], /// )); /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0])); @@ -1532,8 +1532,8 @@ impl AtomicPtr { #[inline] #[cfg(target_has_atomic = "ptr")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] - pub fn fetch_sub(&self, val: usize, order: Ordering) -> *mut T { - self.fetch_sub_bytes(val.wrapping_mul(core::mem::size_of::()), order) + pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T { + self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::()), order) } /// Offsets the pointer's address by adding `val` *bytes*, returning the @@ -1542,7 +1542,7 @@ impl AtomicPtr { /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically /// perform `ptr = ptr.cast::().wrapping_add(val).cast::()`. /// - /// `fetch_add_bytes` takes an [`Ordering`] argument which describes the + /// `fetch_byte_add` takes an [`Ordering`] argument which describes the /// memory ordering of this operation. All ordering modes are possible. Note /// that using [`Acquire`] makes the store part of this operation /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. @@ -1560,14 +1560,14 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::null_mut()); - /// assert_eq!(atom.fetch_add_bytes(1, Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0); /// // Note: in units of bytes, not `size_of::()`. /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] - pub fn fetch_add_bytes(&self, val: usize, order: Ordering) -> *mut T { + pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T { #[cfg(not(bootstrap))] // SAFETY: data races are prevented by atomic intrinsics. unsafe { @@ -1586,7 +1586,7 @@ impl AtomicPtr { /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically /// perform `ptr = ptr.cast::().wrapping_sub(val).cast::()`. /// - /// `fetch_add_bytes` takes an [`Ordering`] argument which describes the + /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the /// memory ordering of this operation. All ordering modes are possible. Note /// that using [`Acquire`] makes the store part of this operation /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. @@ -1604,13 +1604,13 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::invalid_mut(1)); - /// assert_eq!(atom.fetch_sub_bytes(1, Ordering::Relaxed).addr(), 1); + /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1); /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] - pub fn fetch_sub_bytes(&self, val: usize, order: Ordering) -> *mut T { + pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T { #[cfg(not(bootstrap))] // SAFETY: data races are prevented by atomic intrinsics. unsafe { diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs index 2c048435dde97..13b12db209a76 100644 --- a/library/core/tests/atomic.rs +++ b/library/core/tests/atomic.rs @@ -131,16 +131,16 @@ fn int_max() { #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins fn ptr_add_null() { let atom = AtomicPtr::::new(core::ptr::null_mut()); - assert_eq!(atom.fetch_add(1, SeqCst).addr(), 0); + assert_eq!(atom.fetch_ptr_add(1, SeqCst).addr(), 0); assert_eq!(atom.load(SeqCst).addr(), 8); - assert_eq!(atom.fetch_add_bytes(1, SeqCst).addr(), 8); + assert_eq!(atom.fetch_byte_add(1, SeqCst).addr(), 8); assert_eq!(atom.load(SeqCst).addr(), 9); - assert_eq!(atom.fetch_sub(1, SeqCst).addr(), 9); + assert_eq!(atom.fetch_ptr_sub(1, SeqCst).addr(), 9); assert_eq!(atom.load(SeqCst).addr(), 1); - assert_eq!(atom.fetch_sub_bytes(1, SeqCst).addr(), 1); + assert_eq!(atom.fetch_byte_sub(1, SeqCst).addr(), 1); assert_eq!(atom.load(SeqCst).addr(), 0); } @@ -150,23 +150,23 @@ fn ptr_add_data() { let num = 0i64; let n = &num as *const i64 as *mut _; let atom = AtomicPtr::::new(n); - assert_eq!(atom.fetch_add(1, SeqCst), n); + assert_eq!(atom.fetch_ptr_add(1, SeqCst), n); assert_eq!(atom.load(SeqCst), n.wrapping_add(1)); - assert_eq!(atom.fetch_sub(1, SeqCst), n.wrapping_add(1)); + assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1)); assert_eq!(atom.load(SeqCst), n); let bytes_from_n = |b| n.cast::().wrapping_add(b).cast::(); - assert_eq!(atom.fetch_add_bytes(1, SeqCst), n); + assert_eq!(atom.fetch_byte_add(1, SeqCst), n); assert_eq!(atom.load(SeqCst), bytes_from_n(1)); - assert_eq!(atom.fetch_add_bytes(5, SeqCst), bytes_from_n(1)); + assert_eq!(atom.fetch_byte_add(5, SeqCst), bytes_from_n(1)); assert_eq!(atom.load(SeqCst), bytes_from_n(6)); - assert_eq!(atom.fetch_sub_bytes(1, SeqCst), bytes_from_n(6)); + assert_eq!(atom.fetch_byte_sub(1, SeqCst), bytes_from_n(6)); assert_eq!(atom.load(SeqCst), bytes_from_n(5)); - assert_eq!(atom.fetch_sub_bytes(5, SeqCst), bytes_from_n(5)); + assert_eq!(atom.fetch_byte_sub(5, SeqCst), bytes_from_n(5)); assert_eq!(atom.load(SeqCst), n); }