@@ -230,6 +230,100 @@ use crate::intrinsics;
230230
231231use  crate :: hint:: spin_loop; 
232232
233+ trait  Sealed  { } 
234+ 
235+ /// A marker trait for primitive types which can be modified atomically. 
236+ /// 
237+ /// This is an implementation detail for <code>[Atomic]\<T></code> which may disappear or be replaced at any time. 
238+ /// 
239+ /// # Safety 
240+ /// 
241+ /// Types implementing this trait must be primitives that can be modified atomically. 
242+ /// 
243+ /// The associated `Self::AtomicInner` type must have the same size and bit validity as `Self`, 
244+ /// but may have a higher alignment requirement, so the following `transmute`s are sound: 
245+ /// 
246+ /// - `&mut Self::AtomicInner` as `&mut Self` 
247+ /// - `Self` as `Self::AtomicInner` or the reverse 
248+ #[ unstable(  
249+     feature = "atomic_internals" ,  
250+     reason = "implementation detail which may disappear or be replaced at any time" ,  
251+     issue = "none"  
252+ ) ] 
253+ #[ allow( private_bounds) ]  
254+ pub  unsafe  trait  AtomicPrimitive :  Sized  + Copy  + Sealed  { 
255+     #[ doc( hidden) ]  
256+     type  AtomicInner :  Sized ; 
257+ } 
258+ 
259+ macroimpl_atomic_primitive ( 
260+     $Atom: ident $( <$T: ident>) ? ( $Primitive: ty) , 
261+     size ( $size: literal) , 
262+     align ( $align: literal)  $( , ) ?
263+ ) { 
264+     impl  $( <$T>) ? Sealed  for $Primitive{ } 
265+ 
266+     #[ unstable(  
267+         feature = "atomic_internals" ,  
268+         reason = "implementation detail which may disappear or be replaced at any time" ,  
269+         issue = "none"  
270+     ) ]  
271+     #[ cfg( target_has_atomic_load_store = $size) ]  
272+     unsafe  impl  $( <$T>) ? AtomicPrimitive  for $Primitive { 
273+         type  AtomicInner  = $Atom $( <$T>) ?; 
274+     } 
275+ } 
276+ 
277+ impl_atomic_primitive ! ( AtomicBool ( bool ) ,  size( "8" ) ,  align( 1 ) ) ; 
278+ impl_atomic_primitive ! ( AtomicI8 ( i8 ) ,  size( "8" ) ,  align( 1 ) ) ; 
279+ impl_atomic_primitive ! ( AtomicU8 ( u8 ) ,  size( "8" ) ,  align( 1 ) ) ; 
280+ impl_atomic_primitive ! ( AtomicI16 ( i16 ) ,  size( "16" ) ,  align( 2 ) ) ; 
281+ impl_atomic_primitive ! ( AtomicU16 ( u16 ) ,  size( "16" ) ,  align( 2 ) ) ; 
282+ impl_atomic_primitive ! ( AtomicI32 ( i32 ) ,  size( "32" ) ,  align( 4 ) ) ; 
283+ impl_atomic_primitive ! ( AtomicU32 ( u32 ) ,  size( "32" ) ,  align( 4 ) ) ; 
284+ impl_atomic_primitive ! ( AtomicI64 ( i64 ) ,  size( "64" ) ,  align( 8 ) ) ; 
285+ impl_atomic_primitive ! ( AtomicU64 ( u64 ) ,  size( "64" ) ,  align( 8 ) ) ; 
286+ impl_atomic_primitive ! ( AtomicI128 ( i128 ) ,  size( "128" ) ,  align( 16 ) ) ; 
287+ impl_atomic_primitive ! ( AtomicU128 ( u128 ) ,  size( "128" ) ,  align( 16 ) ) ; 
288+ 
289+ #[ cfg( target_pointer_width = "16" ) ]  
290+ impl_atomic_primitive ! ( AtomicIsize ( isize ) ,  size( "ptr" ) ,  align( 2 ) ) ; 
291+ #[ cfg( target_pointer_width = "32" ) ]  
292+ impl_atomic_primitive ! ( AtomicIsize ( isize ) ,  size( "ptr" ) ,  align( 4 ) ) ; 
293+ #[ cfg( target_pointer_width = "64" ) ]  
294+ impl_atomic_primitive ! ( AtomicIsize ( isize ) ,  size( "ptr" ) ,  align( 8 ) ) ; 
295+ 
296+ #[ cfg( target_pointer_width = "16" ) ]  
297+ impl_atomic_primitive ! ( AtomicUsize ( usize ) ,  size( "ptr" ) ,  align( 2 ) ) ; 
298+ #[ cfg( target_pointer_width = "32" ) ]  
299+ impl_atomic_primitive ! ( AtomicUsize ( usize ) ,  size( "ptr" ) ,  align( 4 ) ) ; 
300+ #[ cfg( target_pointer_width = "64" ) ]  
301+ impl_atomic_primitive ! ( AtomicUsize ( usize ) ,  size( "ptr" ) ,  align( 8 ) ) ; 
302+ 
303+ #[ cfg( target_pointer_width = "16" ) ]  
304+ impl_atomic_primitive ! ( AtomicPtr <T >( * mut  T ) ,  size( "ptr" ) ,  align( 2 ) ) ; 
305+ #[ cfg( target_pointer_width = "32" ) ]  
306+ impl_atomic_primitive ! ( AtomicPtr <T >( * mut  T ) ,  size( "ptr" ) ,  align( 4 ) ) ; 
307+ #[ cfg( target_pointer_width = "64" ) ]  
308+ impl_atomic_primitive ! ( AtomicPtr <T >( * mut  T ) ,  size( "ptr" ) ,  align( 8 ) ) ; 
309+ 
310+ /// A memory location which can be safely modified from multiple threads. 
311+ /// 
312+ /// This has the same size and bit validity as the underlying type `T`. However, 
313+ /// the alignment of this type is always equal to its size, even on targets where 
314+ /// `T` has alignment less than its size. 
315+ /// 
316+ /// For more about the differences between atomic types and non-atomic types as 
317+ /// well as information about the portability of this type, please see the 
318+ /// [module-level documentation]. 
319+ /// 
320+ /// **Note:** This type is only available on platforms that support atomic loads 
321+ /// and stores of `T`. 
322+ /// 
323+ /// [module-level documentation]: crate::sync::atomic 
324+ #[ unstable( feature = "generic_atomic" ,  issue = "130539" ) ]  
325+ pub  type  Atomic < T >  = <T  as  AtomicPrimitive >:: AtomicInner ; 
326+ 
233327// Some architectures don't have byte-sized atomics, which results in LLVM 
234328// emulating them using a LL/SC loop. However for AtomicBool we can take 
235329// advantage of the fact that it only ever contains 0 or 1 and use atomic OR/AND 
0 commit comments