Skip to content

Commit 7bc602e

Browse files
committed
redefine mi_likely/mi_unlikely to work with C++ 20 [[likely]] attributes
1 parent d69d4c8 commit 7bc602e

File tree

12 files changed

+67
-64
lines changed

12 files changed

+67
-64
lines changed

ide/vs2022/mimalloc.vcxproj

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@
119119
<PreprocessorDefinitions>MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
120120
<CompileAs>CompileAsCpp</CompileAs>
121121
<SupportJustMyCode>false</SupportJustMyCode>
122-
<LanguageStandard>Default</LanguageStandard>
122+
<LanguageStandard>stdcpp20</LanguageStandard>
123123
</ClCompile>
124124
<PostBuildEvent>
125125
<Command>
@@ -179,7 +179,7 @@
179179
<InlineFunctionExpansion>Default</InlineFunctionExpansion>
180180
<CompileAs>CompileAsCpp</CompileAs>
181181
<IntrinsicFunctions>true</IntrinsicFunctions>
182-
<LanguageStandard>Default</LanguageStandard>
182+
<LanguageStandard>stdcpp20</LanguageStandard>
183183
</ClCompile>
184184
<Link>
185185
<EnableCOMDATFolding>true</EnableCOMDATFolding>

include/mimalloc-internal.h

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -154,8 +154,11 @@ bool _mi_page_is_valid(mi_page_t* page);
154154
// ------------------------------------------------------
155155

156156
#if defined(__GNUC__) || defined(__clang__)
157-
#define mi_unlikely(x) __builtin_expect(!!(x),false)
158-
#define mi_likely(x) __builtin_expect(!!(x),true)
157+
#define mi_unlikely(x) (__builtin_expect(!!(x),false))
158+
#define mi_likely(x) (__builtin_expect(!!(x),true))
159+
#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
160+
#define mi_unlikely(x) (x) [[unlikely]]
161+
#define mi_likely(x) (x) [[likely]]
159162
#else
160163
#define mi_unlikely(x) (x)
161164
#define mi_likely(x) (x)
@@ -277,7 +280,7 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot
277280
*total = size;
278281
return false;
279282
}
280-
else if (mi_unlikely(mi_mul_overflow(count, size, total))) {
283+
else if mi_unlikely(mi_mul_overflow(count, size, total)) {
281284
#if MI_DEBUG > 0
282285
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size);
283286
#endif
@@ -351,7 +354,7 @@ extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate
351354
static inline mi_heap_t* mi_get_default_heap(void) {
352355
#if defined(MI_TLS_SLOT)
353356
mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT);
354-
if (mi_unlikely(heap == NULL)) {
357+
if mi_unlikely(heap == NULL) {
355358
#ifdef __GNUC__
356359
__asm(""); // prevent conditional load of the address of _mi_heap_empty
357360
#endif
@@ -453,7 +456,7 @@ static inline mi_page_t* _mi_ptr_page(void* p) {
453456
static inline size_t mi_page_block_size(const mi_page_t* page) {
454457
const size_t bsize = page->xblock_size;
455458
mi_assert_internal(bsize > 0);
456-
if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) {
459+
if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
457460
return bsize;
458461
}
459462
else {
@@ -607,11 +610,11 @@ static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
607610

608611
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
609612
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
610-
return (mi_unlikely(p==null) ? NULL : p);
613+
return (p==null ? NULL : p);
611614
}
612615

613616
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
614-
uintptr_t x = (uintptr_t)(mi_unlikely(p==NULL) ? null : p);
617+
uintptr_t x = (uintptr_t)(p==NULL ? null : p);
615618
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
616619
}
617620

@@ -638,7 +641,7 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t*
638641
mi_block_t* next = mi_block_nextx(page,block,page->keys);
639642
// check for free list corruption: is `next` at least in the same page?
640643
// TODO: check if `next` is `page->block_size` aligned?
641-
if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
644+
if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
642645
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
643646
next = NULL;
644647
}
@@ -691,12 +694,12 @@ size_t _mi_os_numa_node_count_get(void);
691694

692695
extern _Atomic(size_t) _mi_numa_node_count;
693696
static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
694-
if (mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1)) return 0;
697+
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
695698
else return _mi_os_numa_node_get(tld);
696699
}
697700
static inline size_t _mi_os_numa_node_count(void) {
698701
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
699-
if (mi_likely(count>0)) return count;
702+
if mi_likely(count > 0) { return count; }
700703
else return _mi_os_numa_node_count_get();
701704
}
702705

src/alloc-aligned.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -49,19 +49,19 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
4949
{
5050
// note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
5151
mi_assert(alignment > 0);
52-
if (mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment))) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
52+
if mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
5353
#if MI_DEBUG > 0
5454
_mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment);
5555
#endif
5656
return NULL;
5757
}
58-
if (mi_unlikely(alignment > MI_ALIGNMENT_MAX)) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
58+
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
5959
#if MI_DEBUG > 0
6060
_mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment);
6161
#endif
6262
return NULL;
6363
}
64-
if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
64+
if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
6565
#if MI_DEBUG > 0
6666
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
6767
#endif
@@ -71,10 +71,10 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
7171
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
7272

7373
// try first if there happens to be a small block available with just the right alignment
74-
if (mi_likely(padsize <= MI_SMALL_SIZE_MAX)) {
74+
if mi_likely(padsize <= MI_SMALL_SIZE_MAX) {
7575
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
7676
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
77-
if (mi_likely(page->free != NULL && is_aligned))
77+
if mi_likely(page->free != NULL && is_aligned)
7878
{
7979
#if MI_STAT>1
8080
mi_heap_stat_increase(heap, malloc, size);
@@ -102,7 +102,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap,
102102
#if !MI_PADDING
103103
// without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`)
104104
if (!_mi_is_power_of_two(alignment)) return NULL;
105-
if (mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX))
105+
if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX)
106106
#else
107107
// with padding, we can only guarantee this for fixed alignments
108108
if (mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))

src/alloc-override-osx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,7 @@ static malloc_zone_t mi_malloc_zone = {
254254
static inline malloc_zone_t* mi_get_default_zone(void)
255255
{
256256
static bool init;
257-
if (mi_unlikely(!init)) {
257+
if mi_unlikely(!init) {
258258
init = true;
259259
malloc_zone_register(&mi_malloc_zone); // by calling register we avoid a zone error on free (see <http://eatmyrandom.blogspot.com/2010/03/mallocfree-interception-on-mac-os-x.html>)
260260
}

src/alloc-posix.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcep
8383
}
8484

8585
mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
86-
if (mi_unlikely((size&(alignment-1)) != 0)) { // C11 requires alignment>0 && integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
86+
if mi_unlikely((size&(alignment-1)) != 0) { // C11 requires alignment>0 && integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
8787
#if MI_DEBUG > 0
8888
_mi_error_message(EOVERFLOW, "(mi_)aligned_alloc requires the size to be an integral multiple of the alignment (size %zu, alignment %zu)\n", size, alignment);
8989
#endif
@@ -109,7 +109,7 @@ mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_att
109109
}
110110
void** op = (void**)p;
111111
void* newp = mi_reallocarray(*op, count, size);
112-
if (mi_unlikely(newp == NULL)) return errno;
112+
if mi_unlikely(newp == NULL) { return errno; }
113113
*op = newp;
114114
return 0;
115115
}

src/alloc.c

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ terms of the MIT license. A copy of the license can be found in the file
2828
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
2929
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
3030
mi_block_t* const block = page->free;
31-
if (mi_unlikely(block == NULL)) {
31+
if mi_unlikely(block == NULL) {
3232
return _mi_malloc_generic(heap, size, zero);
3333
}
3434
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
@@ -38,9 +38,9 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
3838
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
3939

4040
// zero the block?
41-
if (mi_unlikely(zero)) {
41+
if mi_unlikely(zero) {
4242
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks
43-
const size_t zsize = (mi_unlikely(page->is_zero) ? sizeof(block->next) : page->xblock_size);
43+
const size_t zsize = (page->is_zero ? sizeof(block->next) : page->xblock_size);
4444
_mi_memzero_aligned(block, zsize);
4545
}
4646

@@ -108,7 +108,7 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
108108

109109
// The main allocation function
110110
mi_decl_nodiscard extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
111-
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
111+
if mi_likely(size <= MI_SMALL_SIZE_MAX) {
112112
return mi_heap_malloc_small_zero(heap, size, zero);
113113
}
114114
else {
@@ -350,7 +350,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
350350
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
351351
do {
352352
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
353-
if (mi_unlikely(use_delayed)) {
353+
if mi_unlikely(use_delayed) {
354354
// unlikely: this only happens on the first concurrent free in a page that is in the full list
355355
tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
356356
}
@@ -361,7 +361,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
361361
}
362362
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
363363

364-
if (mi_unlikely(use_delayed)) {
364+
if mi_unlikely(use_delayed) {
365365
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
366366
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
367367
mi_assert_internal(heap != NULL);
@@ -387,20 +387,20 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
387387
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
388388
{
389389
// and push it on the free list
390-
if (mi_likely(local)) {
390+
if mi_likely(local) {
391391
// owning thread can free a block directly
392-
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
392+
if mi_unlikely(mi_check_is_double_free(page, block)) return;
393393
mi_check_padding(page, block);
394394
#if (MI_DEBUG!=0)
395395
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
396396
#endif
397397
mi_block_set_next(page, block, page->local_free);
398398
page->local_free = block;
399399
page->used--;
400-
if (mi_unlikely(mi_page_all_free(page))) {
400+
if mi_unlikely(mi_page_all_free(page)) {
401401
_mi_page_retire(page);
402402
}
403-
else if (mi_unlikely(mi_page_is_in_full(page))) {
403+
else if mi_unlikely(mi_page_is_in_full(page)) {
404404
_mi_page_unfull(page);
405405
}
406406
}
@@ -433,26 +433,26 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
433433
{
434434
MI_UNUSED(msg);
435435
#if (MI_DEBUG>0)
436-
if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) {
436+
if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
437437
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
438438
return NULL;
439439
}
440440
#endif
441441

442442
mi_segment_t* const segment = _mi_ptr_segment(p);
443-
if (mi_unlikely(segment == NULL)) return NULL; // checks also for (p==NULL)
443+
if mi_unlikely(segment == NULL) return NULL; // checks also for (p==NULL)
444444

445445
#if (MI_DEBUG>0)
446-
if (mi_unlikely(!mi_is_in_heap_region(p))) {
446+
if mi_unlikely(!mi_is_in_heap_region(p)) {
447447
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
448448
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
449-
if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
449+
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
450450
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
451451
}
452452
}
453453
#endif
454454
#if (MI_DEBUG>0 || MI_SECURE>=4)
455-
if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
455+
if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
456456
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
457457
return NULL;
458458
}
@@ -464,23 +464,23 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
464464
void mi_free(void* p) mi_attr_noexcept
465465
{
466466
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
467-
if (mi_unlikely(segment == NULL)) return;
467+
if mi_unlikely(segment == NULL) return;
468468

469469
mi_threadid_t tid = _mi_thread_id();
470470
mi_page_t* const page = _mi_segment_page_of(segment, p);
471471
mi_block_t* const block = (mi_block_t*)p;
472472

473-
if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
473+
if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks
474474
// local, and not full or aligned
475-
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
475+
if mi_unlikely(mi_check_is_double_free(page,block)) return;
476476
mi_check_padding(page, block);
477477
mi_stat_free(page, block);
478478
#if (MI_DEBUG!=0)
479479
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
480480
#endif
481481
mi_block_set_next(page, block, page->local_free);
482482
page->local_free = block;
483-
if (mi_unlikely(--page->used == 0)) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
483+
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
484484
_mi_page_retire(page);
485485
}
486486
}
@@ -526,7 +526,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
526526
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
527527
if (segment==NULL) return 0; // also returns 0 if `p == NULL`
528528
const mi_page_t* const page = _mi_segment_page_of(segment, p);
529-
if (mi_likely(!mi_page_has_aligned(page))) {
529+
if mi_likely(!mi_page_has_aligned(page)) {
530530
const mi_block_t* block = (const mi_block_t*)p;
531531
return mi_page_usable_size_of(page, block);
532532
}
@@ -621,18 +621,18 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
621621
// else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
622622
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
623623
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
624-
if (mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0)) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
624+
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
625625
// todo: adjust potential padding to reflect the new size?
626626
return p; // reallocation still fits and not more than 50% waste
627627
}
628628
void* newp = mi_heap_malloc(heap,newsize);
629-
if (mi_likely(newp != NULL)) {
629+
if mi_likely(newp != NULL) {
630630
if (zero && newsize > size) {
631631
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
632632
const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
633633
memset((uint8_t*)newp + start, 0, newsize - start);
634634
}
635-
if (mi_likely(p != NULL)) {
635+
if mi_likely(p != NULL) {
636636
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
637637
mi_free(p); // only free the original pointer if successful
638638
}
@@ -857,13 +857,13 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
857857

858858
mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
859859
void* p = mi_malloc(size);
860-
if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
860+
if mi_unlikely(p == NULL) return mi_try_new(size,false);
861861
return p;
862862
}
863863

864864
mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
865865
void* p = mi_malloc(size);
866-
if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
866+
if mi_unlikely(p == NULL) return mi_try_new(size, true);
867867
return p;
868868
}
869869

@@ -887,7 +887,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, siz
887887

888888
mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
889889
size_t total;
890-
if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
890+
if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
891891
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
892892
return NULL;
893893
}
@@ -906,7 +906,7 @@ mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
906906

907907
mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
908908
size_t total;
909-
if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) {
909+
if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
910910
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
911911
return NULL;
912912
}

src/bitmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm
283283
static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) {
284284
MI_UNUSED_RELEASE(bitmap_fields);
285285
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
286-
if (mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS)) {
286+
if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) {
287287
*pre_mask = mi_bitmap_mask_(count, bitidx);
288288
*mid_mask = 0;
289289
*post_mask = 0;

src/heap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -410,7 +410,7 @@ static mi_heap_t* mi_heap_of_block(const void* p) {
410410
mi_segment_t* segment = _mi_ptr_segment(p);
411411
bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
412412
mi_assert_internal(valid);
413-
if (mi_unlikely(!valid)) return NULL;
413+
if mi_unlikely(!valid) return NULL;
414414
return mi_page_heap(_mi_segment_page_of(segment,p));
415415
}
416416

src/options.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ mi_decl_nodiscard long mi_option_get(mi_option_t option) {
119119
if (option < 0 || option >= _mi_option_last) return 0;
120120
mi_option_desc_t* desc = &options[option];
121121
mi_assert(desc->option == option); // index should match the option
122-
if (mi_unlikely(desc->init == UNINIT)) {
122+
if mi_unlikely(desc->init == UNINIT) {
123123
mi_option_init(desc);
124124
}
125125
return desc->value;

0 commit comments

Comments
 (0)