@@ -28,7 +28,7 @@ terms of the MIT license. A copy of the license can be found in the file
28
28
extern inline void * _mi_page_malloc (mi_heap_t * heap , mi_page_t * page , size_t size , bool zero ) mi_attr_noexcept {
29
29
mi_assert_internal (page -> xblock_size == 0 || mi_page_block_size (page ) >= size );
30
30
mi_block_t * const block = page -> free ;
31
- if ( mi_unlikely (block == NULL ) ) {
31
+ if mi_unlikely (block == NULL ) {
32
32
return _mi_malloc_generic (heap , size , zero );
33
33
}
34
34
mi_assert_internal (block != NULL && _mi_ptr_page (block ) == page );
@@ -38,9 +38,9 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
38
38
mi_assert_internal (page -> free == NULL || _mi_ptr_page (page -> free ) == page );
39
39
40
40
// zero the block?
41
- if ( mi_unlikely (zero ) ) {
41
+ if mi_unlikely (zero ) {
42
42
mi_assert_internal (page -> xblock_size != 0 ); // do not call with zero'ing for huge blocks
43
- const size_t zsize = (mi_unlikely ( page -> is_zero ) ? sizeof (block -> next ) : page -> xblock_size );
43
+ const size_t zsize = (page -> is_zero ? sizeof (block -> next ) : page -> xblock_size );
44
44
_mi_memzero_aligned (block , zsize );
45
45
}
46
46
@@ -108,7 +108,7 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
108
108
109
109
// The main allocation function
110
110
mi_decl_nodiscard extern inline void * _mi_heap_malloc_zero (mi_heap_t * heap , size_t size , bool zero ) mi_attr_noexcept {
111
- if ( mi_likely (size <= MI_SMALL_SIZE_MAX ) ) {
111
+ if mi_likely (size <= MI_SMALL_SIZE_MAX ) {
112
112
return mi_heap_malloc_small_zero (heap , size , zero );
113
113
}
114
114
else {
@@ -350,7 +350,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
350
350
mi_thread_free_t tfree = mi_atomic_load_relaxed (& page -> xthread_free );
351
351
do {
352
352
use_delayed = (mi_tf_delayed (tfree ) == MI_USE_DELAYED_FREE );
353
- if ( mi_unlikely (use_delayed ) ) {
353
+ if mi_unlikely (use_delayed ) {
354
354
// unlikely: this only happens on the first concurrent free in a page that is in the full list
355
355
tfreex = mi_tf_set_delayed (tfree ,MI_DELAYED_FREEING );
356
356
}
@@ -361,7 +361,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
361
361
}
362
362
} while (!mi_atomic_cas_weak_release (& page -> xthread_free , & tfree , tfreex ));
363
363
364
- if ( mi_unlikely (use_delayed ) ) {
364
+ if mi_unlikely (use_delayed ) {
365
365
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
366
366
mi_heap_t * const heap = (mi_heap_t * )(mi_atomic_load_acquire (& page -> xheap )); //mi_page_heap(page);
367
367
mi_assert_internal (heap != NULL );
@@ -387,20 +387,20 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
387
387
static inline void _mi_free_block (mi_page_t * page , bool local , mi_block_t * block )
388
388
{
389
389
// and push it on the free list
390
- if ( mi_likely (local ) ) {
390
+ if mi_likely (local ) {
391
391
// owning thread can free a block directly
392
- if ( mi_unlikely (mi_check_is_double_free (page , block ) )) return ;
392
+ if mi_unlikely (mi_check_is_double_free (page , block )) return ;
393
393
mi_check_padding (page , block );
394
394
#if (MI_DEBUG != 0 )
395
395
memset (block , MI_DEBUG_FREED , mi_page_block_size (page ));
396
396
#endif
397
397
mi_block_set_next (page , block , page -> local_free );
398
398
page -> local_free = block ;
399
399
page -> used -- ;
400
- if ( mi_unlikely (mi_page_all_free (page ) )) {
400
+ if mi_unlikely (mi_page_all_free (page )) {
401
401
_mi_page_retire (page );
402
402
}
403
- else if ( mi_unlikely (mi_page_is_in_full (page ) )) {
403
+ else if mi_unlikely (mi_page_is_in_full (page )) {
404
404
_mi_page_unfull (page );
405
405
}
406
406
}
@@ -433,26 +433,26 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
433
433
{
434
434
MI_UNUSED (msg );
435
435
#if (MI_DEBUG > 0 )
436
- if ( mi_unlikely (((uintptr_t )p & (MI_INTPTR_SIZE - 1 )) != 0 ) ) {
436
+ if mi_unlikely (((uintptr_t )p & (MI_INTPTR_SIZE - 1 )) != 0 ) {
437
437
_mi_error_message (EINVAL , "%s: invalid (unaligned) pointer: %p\n" , msg , p );
438
438
return NULL ;
439
439
}
440
440
#endif
441
441
442
442
mi_segment_t * const segment = _mi_ptr_segment (p );
443
- if ( mi_unlikely (segment == NULL ) ) return NULL ; // checks also for (p==NULL)
443
+ if mi_unlikely (segment == NULL ) return NULL ; // checks also for (p==NULL)
444
444
445
445
#if (MI_DEBUG > 0 )
446
- if ( mi_unlikely (!mi_is_in_heap_region (p ) )) {
446
+ if mi_unlikely (!mi_is_in_heap_region (p )) {
447
447
_mi_warning_message ("%s: pointer might not point to a valid heap region: %p\n"
448
448
"(this may still be a valid very large allocation (over 64MiB))\n" , msg , p );
449
- if ( mi_likely (_mi_ptr_cookie (segment ) == segment -> cookie ) ) {
449
+ if mi_likely (_mi_ptr_cookie (segment ) == segment -> cookie ) {
450
450
_mi_warning_message ("(yes, the previous pointer %p was valid after all)\n" , p );
451
451
}
452
452
}
453
453
#endif
454
454
#if (MI_DEBUG > 0 || MI_SECURE >=4 )
455
- if ( mi_unlikely (_mi_ptr_cookie (segment ) != segment -> cookie ) ) {
455
+ if mi_unlikely (_mi_ptr_cookie (segment ) != segment -> cookie ) {
456
456
_mi_error_message (EINVAL , "%s: pointer does not point to a valid heap space: %p\n" , msg , p );
457
457
return NULL ;
458
458
}
@@ -464,23 +464,23 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
464
464
void mi_free (void * p ) mi_attr_noexcept
465
465
{
466
466
mi_segment_t * const segment = mi_checked_ptr_segment (p ,"mi_free" );
467
- if ( mi_unlikely (segment == NULL ) ) return ;
467
+ if mi_unlikely (segment == NULL ) return ;
468
468
469
469
mi_threadid_t tid = _mi_thread_id ();
470
470
mi_page_t * const page = _mi_segment_page_of (segment , p );
471
471
mi_block_t * const block = (mi_block_t * )p ;
472
472
473
- if ( mi_likely (tid == mi_atomic_load_relaxed (& segment -> thread_id ) && page -> flags .full_aligned == 0 ) ) { // the thread id matches and it is not a full page, nor has aligned blocks
473
+ if mi_likely (tid == mi_atomic_load_relaxed (& segment -> thread_id ) && page -> flags .full_aligned == 0 ) { // the thread id matches and it is not a full page, nor has aligned blocks
474
474
// local, and not full or aligned
475
- if ( mi_unlikely (mi_check_is_double_free (page ,block ) )) return ;
475
+ if mi_unlikely (mi_check_is_double_free (page ,block )) return ;
476
476
mi_check_padding (page , block );
477
477
mi_stat_free (page , block );
478
478
#if (MI_DEBUG != 0 )
479
479
memset (block , MI_DEBUG_FREED , mi_page_block_size (page ));
480
480
#endif
481
481
mi_block_set_next (page , block , page -> local_free );
482
482
page -> local_free = block ;
483
- if ( mi_unlikely (-- page -> used == 0 ) ) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
483
+ if mi_unlikely (-- page -> used == 0 ) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
484
484
_mi_page_retire (page );
485
485
}
486
486
}
@@ -526,7 +526,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
526
526
const mi_segment_t * const segment = mi_checked_ptr_segment (p , msg );
527
527
if (segment == NULL ) return 0 ; // also returns 0 if `p == NULL`
528
528
const mi_page_t * const page = _mi_segment_page_of (segment , p );
529
- if ( mi_likely (!mi_page_has_aligned (page ) )) {
529
+ if mi_likely (!mi_page_has_aligned (page )) {
530
530
const mi_block_t * block = (const mi_block_t * )p ;
531
531
return mi_page_usable_size_of (page , block );
532
532
}
@@ -621,18 +621,18 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
621
621
// else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
622
622
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
623
623
const size_t size = _mi_usable_size (p ,"mi_realloc" ); // also works if p == NULL (with size 0)
624
- if ( mi_unlikely (newsize <= size && newsize >= (size / 2 ) && newsize > 0 ) ) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
624
+ if mi_unlikely (newsize <= size && newsize >= (size / 2 ) && newsize > 0 ) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
625
625
// todo: adjust potential padding to reflect the new size?
626
626
return p ; // reallocation still fits and not more than 50% waste
627
627
}
628
628
void * newp = mi_heap_malloc (heap ,newsize );
629
- if ( mi_likely (newp != NULL ) ) {
629
+ if mi_likely (newp != NULL ) {
630
630
if (zero && newsize > size ) {
631
631
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
632
632
const size_t start = (size >= sizeof (intptr_t ) ? size - sizeof (intptr_t ) : 0 );
633
633
memset ((uint8_t * )newp + start , 0 , newsize - start );
634
634
}
635
- if ( mi_likely (p != NULL ) ) {
635
+ if mi_likely (p != NULL ) {
636
636
_mi_memcpy_aligned (newp , p , (newsize > size ? size : newsize ));
637
637
mi_free (p ); // only free the original pointer if successful
638
638
}
@@ -857,13 +857,13 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
857
857
858
858
mi_decl_nodiscard mi_decl_restrict void * mi_new (size_t size ) {
859
859
void * p = mi_malloc (size );
860
- if ( mi_unlikely (p == NULL ) ) return mi_try_new (size ,false);
860
+ if mi_unlikely (p == NULL ) return mi_try_new (size ,false);
861
861
return p ;
862
862
}
863
863
864
864
mi_decl_nodiscard mi_decl_restrict void * mi_new_nothrow (size_t size ) mi_attr_noexcept {
865
865
void * p = mi_malloc (size );
866
- if ( mi_unlikely (p == NULL ) ) return mi_try_new (size , true);
866
+ if mi_unlikely (p == NULL ) return mi_try_new (size , true);
867
867
return p ;
868
868
}
869
869
@@ -887,7 +887,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, siz
887
887
888
888
mi_decl_nodiscard mi_decl_restrict void * mi_new_n (size_t count , size_t size ) {
889
889
size_t total ;
890
- if ( mi_unlikely (mi_count_size_overflow (count , size , & total ) )) {
890
+ if mi_unlikely (mi_count_size_overflow (count , size , & total )) {
891
891
mi_try_new_handler (false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
892
892
return NULL ;
893
893
}
@@ -906,7 +906,7 @@ mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
906
906
907
907
mi_decl_nodiscard void * mi_new_reallocn (void * p , size_t newcount , size_t size ) {
908
908
size_t total ;
909
- if ( mi_unlikely (mi_count_size_overflow (newcount , size , & total ) )) {
909
+ if mi_unlikely (mi_count_size_overflow (newcount , size , & total )) {
910
910
mi_try_new_handler (false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
911
911
return NULL ;
912
912
}
0 commit comments