@@ -1462,12 +1462,12 @@ impl<T> AtomicPtr<T> {
1462
1462
/// to offset the pointer by an amount which is not a multiple of
1463
1463
/// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
1464
1464
/// work with a deliberately misaligned pointer. In such cases, you may use
1465
- /// the [`fetch_add_bytes `](Self::fetch_add_bytes ) method instead.
1465
+ /// the [`fetch_byte_add `](Self::fetch_byte_add ) method instead.
1466
1466
///
1467
- /// `fetch_add ` takes an [`Ordering`] argument which describes the memory
1468
- /// ordering of this operation. All ordering modes are possible. Note that
1469
- /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1470
- /// and using [`Release`] makes the load part [`Relaxed`].
1467
+ /// `fetch_ptr_add ` takes an [`Ordering`] argument which describes the
1468
+ /// memory ordering of this operation. All ordering modes are possible. Note
1469
+ /// that using [`Acquire`] makes the store part of this operation
1470
+ /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
1471
1471
///
1472
1472
/// **Note**: This method is only available on platforms that support atomic
1473
1473
/// operations on [`AtomicPtr`].
@@ -1481,15 +1481,15 @@ impl<T> AtomicPtr<T> {
1481
1481
/// use core::sync::atomic::{AtomicPtr, Ordering};
1482
1482
///
1483
1483
/// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1484
- /// assert_eq!(atom.fetch_add (1, Ordering::Relaxed).addr(), 0);
1484
+ /// assert_eq!(atom.fetch_ptr_add (1, Ordering::Relaxed).addr(), 0);
1485
1485
/// // Note: units of `size_of::<i64>()`.
1486
1486
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
1487
1487
/// ```
1488
1488
#[ inline]
1489
1489
#[ cfg( target_has_atomic = "ptr" ) ]
1490
1490
#[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1491
- pub fn fetch_add ( & self , val : usize , order : Ordering ) -> * mut T {
1492
- self . fetch_add_bytes ( val. wrapping_mul ( core:: mem:: size_of :: < T > ( ) ) , order)
1491
+ pub fn fetch_ptr_add ( & self , val : usize , order : Ordering ) -> * mut T {
1492
+ self . fetch_byte_add ( val. wrapping_mul ( core:: mem:: size_of :: < T > ( ) ) , order)
1493
1493
}
1494
1494
1495
1495
/// Offsets the pointer's address by subtracting `val` (in units of `T`),
@@ -1502,9 +1502,9 @@ impl<T> AtomicPtr<T> {
1502
1502
/// to offset the pointer by an amount which is not a multiple of
1503
1503
/// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
1504
1504
/// work with a deliberately misaligned pointer. In such cases, you may use
1505
- /// the [`fetch_sub_bytes `](Self::fetch_sub_bytes ) method instead.
1505
+ /// the [`fetch_byte_sub `](Self::fetch_byte_sub ) method instead.
1506
1506
///
1507
- /// `fetch_sub ` takes an [`Ordering`] argument which describes the memory
1507
+ /// `fetch_ptr_sub ` takes an [`Ordering`] argument which describes the memory
1508
1508
/// ordering of this operation. All ordering modes are possible. Note that
1509
1509
/// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1510
1510
/// and using [`Release`] makes the load part [`Relaxed`].
@@ -1524,16 +1524,16 @@ impl<T> AtomicPtr<T> {
1524
1524
/// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
1525
1525
///
1526
1526
/// assert!(core::ptr::eq(
1527
- /// atom.fetch_sub (1, Ordering::Relaxed),
1527
+ /// atom.fetch_ptr_sub (1, Ordering::Relaxed),
1528
1528
/// &array[1],
1529
1529
/// ));
1530
1530
/// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
1531
1531
/// ```
1532
1532
#[ inline]
1533
1533
#[ cfg( target_has_atomic = "ptr" ) ]
1534
1534
#[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1535
- pub fn fetch_sub ( & self , val : usize , order : Ordering ) -> * mut T {
1536
- self . fetch_sub_bytes ( val. wrapping_mul ( core:: mem:: size_of :: < T > ( ) ) , order)
1535
+ pub fn fetch_ptr_sub ( & self , val : usize , order : Ordering ) -> * mut T {
1536
+ self . fetch_byte_sub ( val. wrapping_mul ( core:: mem:: size_of :: < T > ( ) ) , order)
1537
1537
}
1538
1538
1539
1539
/// Offsets the pointer's address by adding `val` *bytes*, returning the
@@ -1542,7 +1542,7 @@ impl<T> AtomicPtr<T> {
1542
1542
/// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically
1543
1543
/// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`.
1544
1544
///
1545
- /// `fetch_add_bytes ` takes an [`Ordering`] argument which describes the
1545
+ /// `fetch_byte_add ` takes an [`Ordering`] argument which describes the
1546
1546
/// memory ordering of this operation. All ordering modes are possible. Note
1547
1547
/// that using [`Acquire`] makes the store part of this operation
1548
1548
/// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
@@ -1560,14 +1560,14 @@ impl<T> AtomicPtr<T> {
1560
1560
/// use core::sync::atomic::{AtomicPtr, Ordering};
1561
1561
///
1562
1562
/// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1563
- /// assert_eq!(atom.fetch_add_bytes (1, Ordering::Relaxed).addr(), 0);
1563
+ /// assert_eq!(atom.fetch_byte_add (1, Ordering::Relaxed).addr(), 0);
1564
1564
/// // Note: in units of bytes, not `size_of::<i64>()`.
1565
1565
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
1566
1566
/// ```
1567
1567
#[ inline]
1568
1568
#[ cfg( target_has_atomic = "ptr" ) ]
1569
1569
#[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1570
- pub fn fetch_add_bytes ( & self , val : usize , order : Ordering ) -> * mut T {
1570
+ pub fn fetch_byte_add ( & self , val : usize , order : Ordering ) -> * mut T {
1571
1571
#[ cfg( not( bootstrap) ) ]
1572
1572
// SAFETY: data races are prevented by atomic intrinsics.
1573
1573
unsafe {
@@ -1586,7 +1586,7 @@ impl<T> AtomicPtr<T> {
1586
1586
/// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically
1587
1587
/// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`.
1588
1588
///
1589
- /// `fetch_add_bytes ` takes an [`Ordering`] argument which describes the
1589
+ /// `fetch_byte_sub ` takes an [`Ordering`] argument which describes the
1590
1590
/// memory ordering of this operation. All ordering modes are possible. Note
1591
1591
/// that using [`Acquire`] makes the store part of this operation
1592
1592
/// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
@@ -1604,13 +1604,13 @@ impl<T> AtomicPtr<T> {
1604
1604
/// use core::sync::atomic::{AtomicPtr, Ordering};
1605
1605
///
1606
1606
/// let atom = AtomicPtr::<i64>::new(core::ptr::invalid_mut(1));
1607
- /// assert_eq!(atom.fetch_sub_bytes (1, Ordering::Relaxed).addr(), 1);
1607
+ /// assert_eq!(atom.fetch_byte_sub (1, Ordering::Relaxed).addr(), 1);
1608
1608
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
1609
1609
/// ```
1610
1610
#[ inline]
1611
1611
#[ cfg( target_has_atomic = "ptr" ) ]
1612
1612
#[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1613
- pub fn fetch_sub_bytes ( & self , val : usize , order : Ordering ) -> * mut T {
1613
+ pub fn fetch_byte_sub ( & self , val : usize , order : Ordering ) -> * mut T {
1614
1614
#[ cfg( not( bootstrap) ) ]
1615
1615
// SAFETY: data races are prevented by atomic intrinsics.
1616
1616
unsafe {
0 commit comments