@@ -43,6 +43,46 @@ cfg_if! {
43
43
use std:: ops:: Add ;
44
44
use std:: panic:: { resume_unwind, catch_unwind, AssertUnwindSafe } ;
45
45
46
+ /// This is a single threaded variant of AtomicCell provided by crossbeam.
47
+ /// Unlike `Atomic` this is intended for all `Copy` types,
48
+ /// but it lacks the explicit ordering arguments.
49
+ #[ derive( Debug ) ]
50
+ pub struct AtomicCell <T : Copy >( Cell <T >) ;
51
+
52
+ impl <T : Copy > AtomicCell <T > {
53
+ #[ inline]
54
+ pub fn new( v: T ) -> Self {
55
+ AtomicCell ( Cell :: new( v) )
56
+ }
57
+
58
+ #[ inline]
59
+ pub fn get_mut( & mut self ) -> & mut T {
60
+ self . 0 . get_mut( )
61
+ }
62
+ }
63
+
64
+ impl <T : Copy > AtomicCell <T > {
65
+ #[ inline]
66
+ pub fn into_inner( self ) -> T {
67
+ self . 0 . into_inner( )
68
+ }
69
+
70
+ #[ inline]
71
+ pub fn load( & self ) -> T {
72
+ self . 0 . get( )
73
+ }
74
+
75
+ #[ inline]
76
+ pub fn store( & self , val: T ) {
77
+ self . 0 . set( val)
78
+ }
79
+
80
+ #[ inline]
81
+ pub fn swap( & self , val: T ) -> T {
82
+ self . 0 . replace( val)
83
+ }
84
+ }
85
+
46
86
/// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
47
87
/// It differs from `AtomicCell` in that it has explicit ordering arguments
48
88
/// and is only intended for use with the native atomic types.
@@ -59,6 +99,11 @@ cfg_if! {
59
99
}
60
100
61
101
impl <T : Copy > Atomic <T > {
102
+ #[ inline]
103
+ pub fn into_inner( self ) -> T {
104
+ self . 0 . into_inner( )
105
+ }
106
+
62
107
#[ inline]
63
108
pub fn load( & self , _: Ordering ) -> T {
64
109
self . 0 . get( )
@@ -68,6 +113,11 @@ cfg_if! {
68
113
pub fn store( & self , val: T , _: Ordering ) {
69
114
self . 0 . set( val)
70
115
}
116
+
117
+ #[ inline]
118
+ pub fn swap( & self , val: T , _: Ordering ) -> T {
119
+ self . 0 . replace( val)
120
+ }
71
121
}
72
122
73
123
impl <T : Copy + PartialEq > Atomic <T > {
@@ -109,6 +159,22 @@ cfg_if! {
109
159
( oper_a( ) , oper_b( ) )
110
160
}
111
161
162
+ pub struct SerialScope ;
163
+
164
+ impl SerialScope {
165
+ pub fn spawn<F >( & self , f: F )
166
+ where F : FnOnce ( & SerialScope )
167
+ {
168
+ f( self )
169
+ }
170
+ }
171
+
172
+ pub fn scope<F , R >( f: F ) -> R
173
+ where F : FnOnce ( & SerialScope ) -> R
174
+ {
175
+ f( & SerialScope )
176
+ }
177
+
112
178
#[ macro_export]
113
179
macro_rules! parallel {
114
180
( $( $blocks: tt) , * ) => {
@@ -180,6 +246,12 @@ cfg_if! {
180
246
pub fn new<F : FnMut ( usize ) -> T >( mut f: F ) -> WorkerLocal <T > {
181
247
WorkerLocal ( OneThread :: new( f( 0 ) ) )
182
248
}
249
+
250
+ /// Returns the worker-local value for each thread
251
+ #[ inline]
252
+ pub fn into_inner( self ) -> Vec <T > {
253
+ vec![ OneThread :: into_inner( self . 0 ) ]
254
+ }
183
255
}
184
256
185
257
impl <T > Deref for WorkerLocal <T > {
@@ -207,6 +279,16 @@ cfg_if! {
207
279
self . 0
208
280
}
209
281
282
+ #[ inline( always) ]
283
+ pub fn get_mut( & mut self ) -> & mut T {
284
+ & mut self . 0
285
+ }
286
+
287
+ #[ inline( always) ]
288
+ pub fn lock( & self ) -> & T {
289
+ & self . 0
290
+ }
291
+
210
292
#[ inline( always) ]
211
293
pub fn lock_mut( & mut self ) -> & mut T {
212
294
& mut self . 0
@@ -236,6 +318,8 @@ cfg_if! {
236
318
237
319
pub use std:: sync:: atomic:: { AtomicBool , AtomicUsize , AtomicU32 , AtomicU64 } ;
238
320
321
+ pub use crossbeam_utils:: atomic:: AtomicCell ;
322
+
239
323
pub use std:: sync:: Arc as Lrc ;
240
324
pub use std:: sync:: Weak as Weak ;
241
325
@@ -437,6 +521,16 @@ impl<T> RwLock<T> {
437
521
RwLock ( InnerRwLock :: new ( inner) )
438
522
}
439
523
524
+ #[ inline( always) ]
525
+ pub fn into_inner ( self ) -> T {
526
+ self . 0 . into_inner ( )
527
+ }
528
+
529
+ #[ inline( always) ]
530
+ pub fn get_mut ( & mut self ) -> & mut T {
531
+ self . 0 . get_mut ( )
532
+ }
533
+
440
534
#[ cfg( not( parallel_compiler) ) ]
441
535
#[ inline( always) ]
442
536
pub fn read ( & self ) -> ReadGuard < ' _ , T > {
@@ -453,6 +547,11 @@ impl<T> RwLock<T> {
453
547
}
454
548
}
455
549
550
+ #[ inline( always) ]
551
+ pub fn with_read_lock < F : FnOnce ( & T ) -> R , R > ( & self , f : F ) -> R {
552
+ f ( & * self . read ( ) )
553
+ }
554
+
456
555
#[ cfg( not( parallel_compiler) ) ]
457
556
#[ inline( always) ]
458
557
pub fn try_write ( & self ) -> Result < WriteGuard < ' _ , T > , ( ) > {
@@ -481,6 +580,11 @@ impl<T> RwLock<T> {
481
580
}
482
581
}
483
582
583
+ #[ inline( always) ]
584
+ pub fn with_write_lock < F : FnOnce ( & mut T ) -> R , R > ( & self , f : F ) -> R {
585
+ f ( & mut * self . write ( ) )
586
+ }
587
+
484
588
#[ inline( always) ]
485
589
pub fn borrow ( & self ) -> ReadGuard < ' _ , T > {
486
590
self . read ( )
@@ -529,6 +633,12 @@ impl<T> OneThread<T> {
529
633
inner,
530
634
}
531
635
}
636
+
637
+ #[ inline( always) ]
638
+ pub fn into_inner ( value : Self ) -> T {
639
+ value. check ( ) ;
640
+ value. inner
641
+ }
532
642
}
533
643
534
644
impl < T > Deref for OneThread < T > {
0 commit comments