2525extern crate libc;
2626
2727use core:: ptr;
28- use core:: sync:: atomic:: { AtomicPtr , AtomicUsize , Ordering } ;
28+ use core:: sync:: atomic:: { AtomicBool , Ordering } ;
2929
3030// The minimum alignment guaranteed by the architecture. This value is used to
3131// add fast paths for low alignment values. In practice, the alignment is a
@@ -48,8 +48,9 @@ const MIN_ALIGN: usize = 16;
4848const CHUNK_SIZE : usize = 4096 * 16 ;
4949const CHUNK_ALIGN : usize = 4096 ;
5050
51- static HEAP : AtomicPtr < u8 > = AtomicPtr :: new ( ptr:: null_mut ( ) ) ;
52- static HEAP_LEFT : AtomicUsize = AtomicUsize :: new ( 0 ) ;
51+ static mut HEAP : * mut u8 = ptr:: null_mut ( ) ;
52+ static mut HEAP_LEFT : usize = 0 ;
53+ static HEAP_MUTEX : AtomicBool = AtomicBool :: new ( false ) ;
5354
5455#[ no_mangle]
5556pub extern "C" fn __rust_allocate ( size : usize , align : usize ) -> * mut u8 {
@@ -61,16 +62,19 @@ pub extern "C" fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
6162 return imp:: allocate ( size, align) ;
6263 }
6364
64- let heap = HEAP . load ( Ordering :: SeqCst ) ;
65- let heap_left = HEAP_LEFT . load ( Ordering :: SeqCst ) ;
66- if new_size < heap_left {
67- HEAP_LEFT . store ( heap_left - new_size, Ordering :: SeqCst ) ;
68- HEAP . store ( heap. offset ( new_size as isize ) , Ordering :: SeqCst ) ;
69- return heap;
65+ while HEAP_MUTEX . compare_and_swap ( false , true , Ordering :: SeqCst ) { }
66+
67+ if new_size < HEAP_LEFT {
68+ let p = HEAP ;
69+ HEAP = p. offset ( new_size as isize ) ;
70+ HEAP_LEFT -= new_size;
71+ HEAP_MUTEX . store ( false , Ordering :: SeqCst ) ;
72+ return p;
7073 } else {
71- HEAP_LEFT . store ( CHUNK_SIZE - new_size, Ordering :: SeqCst ) ;
7274 let p = imp:: allocate ( CHUNK_SIZE , CHUNK_ALIGN ) ;
73- HEAP . store ( p. offset ( new_size as isize ) , Ordering :: SeqCst ) ;
75+ HEAP = p. offset ( new_size as isize ) ;
76+ HEAP_LEFT = CHUNK_SIZE - new_size;
77+ HEAP_MUTEX . store ( false , Ordering :: SeqCst ) ;
7478 return p;
7579 }
7680 }
0 commit comments