libmimalloc-sys 0.1.46

Sys crate wrapping the mimalloc allocator
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2025, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#if !defined(MI_IN_ALLOC_C)
#error "this file should be included from 'alloc.c' (so aliases can work from alloc-override)"
// add includes help an IDE
#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "mimalloc/prim.h"   // _mi_prim_thread_id()
#endif

// forward declarations
static void   mi_check_padding(const mi_page_t* page, const mi_block_t* block);
static bool   mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block);
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block, bool was_guarded);
static void   mi_stat_free(const mi_page_t* page, const mi_block_t* block);


// ------------------------------------------------------
// Free
// ------------------------------------------------------

// regular free of a (thread local) block pointer
// fast path written carefully to prevent spilling on the stack
static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool was_guarded, bool track_stats, bool check_full)
{
  MI_UNUSED(was_guarded);
  // checks  
  if mi_unlikely(mi_check_is_double_free(page, block)) return;
  if (!was_guarded) { mi_check_padding(page, block); }
  if (track_stats) { mi_stat_free(page, block); }
  #if (MI_DEBUG>0) && !MI_TRACK_ENABLED  && !MI_TSAN
  memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
  #endif
  if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block, was_guarded)); } // faster then mi_usable_size as we already know the page and that p is unaligned

  // actual free: push on the local free list
  mi_block_set_next(page, block, page->local_free);
  page->local_free = block;
  if mi_unlikely(--page->used == 0) {
    if (page->retire_expire==0) { // no need to re-retire retired pages (happens when we alloc/free one block repeatedly in an empty page)
      _mi_page_retire(page); 
    }
  }
  else if mi_unlikely(check_full && mi_page_is_in_full(page)) {
    _mi_page_unfull(page);
  }
}

// Forward declaration for multi-threaded collect
static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t* mt_free) mi_attr_noexcept;

// Free a block multi-threaded
static inline void mi_free_block_mt(mi_page_t* page, mi_block_t* block, bool was_guarded) mi_attr_noexcept
{
  MI_UNUSED(was_guarded);
  // adjust stats (after padding check and potentially recursive `mi_free` above)
  mi_stat_free(page, block);    // stat_free may access the padding
  mi_track_free_size(block, mi_page_usable_size_of(page, block, was_guarded));

  // _mi_padding_shrink(page, block, sizeof(mi_block_t));
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED  && !MI_TSAN       // note: when tracking, cannot use mi_usable_size with multi-threading
  if (!was_guarded) {
    size_t dbgsize = mi_usable_size(block);
    if (dbgsize > MI_MiB) { dbgsize = MI_MiB; }
    _mi_memset_aligned(block, MI_DEBUG_FREED, dbgsize);
  }
#endif

  // push atomically on the page thread free list
  mi_thread_free_t tf_new;
  mi_thread_free_t tf_old = mi_atomic_load_relaxed(&page->xthread_free);
  do {
    mi_block_set_next(page, block, mi_tf_block(tf_old));
    tf_new = mi_tf_create(block, true /* always use owned: try to claim it if the page is abandoned */);
  } while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tf_old, tf_new)); // todo: release is enough?

  // and atomically try to collect the page if it was abandoned
  const bool is_owned_now = !mi_tf_is_owned(tf_old);
  if (is_owned_now) {
    mi_assert_internal(mi_page_is_abandoned(page));
    mi_free_try_collect_mt(page,block);
  }
}


// Adjust a block that was allocated aligned, to the actual start of the block in the page.
// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the
// `page_start` and `block_size` fields; however these are constant and the page won't be
// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently.
mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
  mi_assert_internal(page!=NULL && p!=NULL);

  const size_t diff = (uint8_t*)p - mi_page_start(page);
  const size_t block_size = mi_page_block_size(page);
  const size_t adjust = (_mi_is_power_of_two(block_size) ? diff & (block_size - 1) : diff % block_size);
  return (mi_block_t*)((uintptr_t)p - adjust);
}

// forward declaration for a MI_GUARDED build
#if MI_GUARDED
static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p); // forward declaration
static inline bool mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) {
  if (mi_block_ptr_is_guarded(block, p)) { 
    mi_block_unguard(page, block, p); 
    return true;
  }
  else {
    return false;
  }
}
#else
static inline bool mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) {
  MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(p);
  return false;
}
#endif

static inline mi_block_t* mi_validate_block_from_ptr( const mi_page_t* page, void* p ) {
  mi_assert(_mi_page_ptr_unalign(page,p) == (mi_block_t*)p); // should never be an interior pointer
  #if MI_SECURE > 0
  // in secure mode we always unalign to guard against free-ing interior pointers
  return _mi_page_ptr_unalign(page,p);
  #else
  MI_UNUSED(page);
  return (mi_block_t*)p;
  #endif
}


// free a local pointer  (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, void* p) mi_attr_noexcept {
  mi_assert_internal(p!=NULL && page != NULL);
  mi_block_t* const block = (mi_page_has_interior_pointers(page) ? _mi_page_ptr_unalign(page, p) : mi_validate_block_from_ptr(page,p));
  const bool was_guarded = mi_block_check_unguard(page, block, p);
  mi_free_block_local(page, block, was_guarded, true /* track stats */, true /* check for a full page */);
}

// free a pointer owned by another thread (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, void* p) mi_attr_noexcept {
  mi_assert_internal(p!=NULL && page != NULL);
  mi_block_t* const block = (mi_page_has_interior_pointers(page) ? _mi_page_ptr_unalign(page, p) : mi_validate_block_from_ptr(page,p));
  const bool was_guarded = mi_block_check_unguard(page, block, p);
  mi_free_block_mt(page, block, was_guarded);
}

// generic free (for runtime integration)
void mi_decl_noinline _mi_free_generic(mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
  if (is_local) mi_free_generic_local(page,p);
           else mi_free_generic_mt(page,p);
}


// Get the page belonging to a pointer
// Does further checks in debug mode to see if this was a valid pointer.
static inline mi_page_t* mi_validate_ptr_page(const void* p, const char* msg)
{
  MI_UNUSED_RELEASE(msg);
  #if MI_DEBUG
  if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0 && !mi_option_is_enabled(mi_option_guarded_precise)) {
    _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
    return NULL;
  }
  mi_page_t* page = _mi_safe_ptr_page(p);
  if (p != NULL && page == NULL) {
    _mi_error_message(EINVAL, "%s: invalid pointer: %p\n", msg, p);
  }
  return page;
  #else
  return _mi_ptr_page(p);
  #endif
}

// Free a block
// Fast path written carefully to prevent register spilling on the stack
static mi_decl_forceinline void mi_free_ex(void* p, size_t* usable, mi_page_t* page)  
{
  if mi_unlikely(page==NULL) return;  // page will be NULL if p==NULL
  mi_assert_internal(p!=NULL && page!=NULL);
  if (usable!=NULL) { *usable = mi_page_usable_block_size(page); }

  const mi_threadid_t xtid = (_mi_prim_thread_id() ^ mi_page_xthread_id(page));
  if mi_likely(xtid == 0) {                        // `tid == mi_page_thread_id(page) && mi_page_flags(page) == 0`
    // thread-local, aligned, and not a full page
    mi_block_t* const block = mi_validate_block_from_ptr(page,p);
    mi_free_block_local(page, block, false /* was guarded */, true /* track stats */, false /* no need to check if the page is full */);
  }
  else if (xtid <= MI_PAGE_FLAG_MASK) {            // `tid == mi_page_thread_id(page) && mi_page_flags(page) != 0`
    // page is local, but is full or contains (inner) aligned blocks; use generic path
    mi_free_generic_local(page, p);
  }
  // free-ing in a page owned by a theap in another thread, or an abandoned page (not belonging to a theap)
  else if ((xtid & MI_PAGE_FLAG_MASK) == 0) {      // `tid != mi_page_thread_id(page) && mi_page_flags(page) == 0`
    // blocks are aligned (and not a full page); push on the thread_free list
    mi_block_t* const block = mi_validate_block_from_ptr(page,p);
    mi_free_block_mt(page,block,false /* was_guarded */);
  }
  else {
    // page is full or contains (inner) aligned blocks; use generic multi-thread path
    mi_free_generic_mt(page, p);
  }
}

void mi_free(void* p) mi_attr_noexcept {
  mi_page_t* const page = mi_validate_ptr_page(p,"mi_free");  
  mi_free_ex(p, NULL, page);
}

void mi_ufree(void* p, size_t* usable) mi_attr_noexcept {
  mi_page_t* const page = mi_validate_ptr_page(p,"mi_ufree");  
  mi_free_ex(p, usable, page);
}

void mi_free_small(void* p) mi_attr_noexcept {
  // We can only call `mi_free_small` for pointers allocated with `mi_(heap_)malloc_small`.
  // If we keep page info in front of the page area for small objects, we can find the info
  // just by aligning down the pointer instead of looking it up in the page map.
  #if MI_PAGE_META_ALIGNED_FREE_SMALL 
    #if MI_GUARDED 
    #warning "MI_PAGE_META_ALIGNED_FREE_SMALL ignored as MI_GUARDED is defined"
    mi_free(p);
    #elif MI_ARENA_SLICE_ALIGN < MI_SMALL_PAGE_SIZE
    #warning "MI_PAGE_META_ALIGNED_FREE_SMALL ignored as the MI_ARENA_SLICE_ALIGN is less than the small page size"
    mi_free(p);
    #else
      mi_page_t* const page = (mi_page_t*)_mi_align_down_ptr(p,MI_SMALL_PAGE_SIZE);
      mi_assert(page == mi_validate_ptr_page(p,"mi_free_small"));
      mi_assert((void*)page == _mi_align_down_ptr(page->page_start,MI_SMALL_PAGE_SIZE));
      mi_assert(page->block_size <= MI_SMALL_SIZE_MAX);  // note: not `MI_SMALL_MAX_OBJ_SIZE` as we need to match `mi_(heap_)malloc_small`
      mi_free_ex(p, NULL, page);
    #endif
  #else
  mi_free(p);
  #endif  
}


// --------------------------------------------------------------------------------------------
// `mi_free_try_collect_mt`: Potentially collect a page in a free in an abandoned page.
// 1. if the page becomes empty, free it
// 2. if it can be reclaimed, reclaim it in our theap
// 3. if it went to < 7/8th used, re-abandon to be mapped (so it can be found by theaps looking for free pages)
// --------------------------------------------------------------------------------------------

// Helper for mi_free_try_collect_mt: free if the page has no more used blocks (this is updated by `_mi_page_free_collect(_partly)`)
static bool mi_abandoned_page_try_free(mi_page_t* page)
{
  if (!mi_page_all_free(page)) return false;
  // first remove it from the abandoned pages in the arena (if mapped, this might wait for any readers to finish)
  _mi_arenas_page_unabandon(page,NULL);
  _mi_arenas_page_free(page,NULL); // we can now free the page directly
  return true;
}

// Helper for mi_free_try_collect_mt: try if we can reabandon a previously abandoned mostly full page to be mapped
static bool mi_abandoned_page_try_reabandon_to_mapped(mi_page_t* page)
{
  // if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations
  // We only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page
  if (mi_page_is_mostly_used(page)) return false;   // not too full
  if (page->memid.memkind != MI_MEM_ARENA || mi_page_is_abandoned_mapped(page)) return false;  // and not already mapped (or unmappable)

  mi_assert(!mi_page_is_full(page));
  return _mi_arenas_page_try_reabandon_to_mapped(page);
}

// Release ownership of a page. This may free or reabandoned the page if other blocks are concurrently
// freed in the meantime. Returns `true` if the page was freed.
// By passing the captured `expected_thread_free`, we can often avoid calling `mi_page_free_collect`.
static void mi_abandoned_page_unown_from_free(mi_page_t* page, mi_block_t* expected_thread_free) {
  mi_assert_internal(mi_page_is_owned(page));
  mi_assert_internal(mi_page_is_abandoned(page));
  mi_assert_internal(!mi_page_all_free(page));
  // try to cas atomically the original free list (`mt_free`) back with the ownership cleared.
  mi_thread_free_t tf_expect = mi_tf_create(expected_thread_free, true);
  mi_thread_free_t tf_new    = mi_tf_create(expected_thread_free, false);
  while mi_unlikely(!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tf_expect, tf_new)) {
    mi_assert_internal(mi_tf_is_owned(tf_expect));
    // while the xthread_free list is not empty..
    while (mi_tf_block(tf_expect) != NULL) {
      // if there were concurrent updates to the thread-free list, we retry to free or reabandon to mapped (if it became !mosty_used).
      _mi_page_free_collect(page,false);  // update used count
      if (mi_abandoned_page_try_free(page)) return;
      if (mi_abandoned_page_try_reabandon_to_mapped(page)) return;
      // otherwise continue un-owning
      tf_expect = mi_atomic_load_relaxed(&page->xthread_free);
    }
    // and try again to release ownership
    mi_assert_internal(mi_tf_block(tf_expect)==NULL);
    tf_new = mi_tf_create(NULL, false);
  }
}

static inline bool mi_page_queue_len_is_atmost( mi_theap_t* theap, size_t block_size, long atmost) {
  if (atmost < 0) return false;
  mi_page_queue_t* const pq = mi_page_queue(theap,block_size);
  mi_assert_internal(pq!=NULL);
  return (pq->count <= (size_t)atmost);
}

// Helper for mi_free_try_collect_mt:  try to reclaim the page for ourselves
static mi_decl_noinline bool mi_abandoned_page_try_reclaim(mi_page_t* page, long reclaim_on_free) mi_attr_noexcept
{
  // note: reclaiming can improve benchmarks like `larson` or `rbtree-ck` a lot even in the single-threaded case,
  // since free-ing from an owned page avoids atomic operations. However, if we reclaim too eagerly in
  // a multi-threaded scenario we may start to hold on to too much memory and reduce reuse among threads.
  // If the current theap is where the page originally came from, we reclaim much more eagerly while
  // 'cross-thread' reclaiming on free is by default off (and we only 'reclaim' these by finding the abandoned
  // pages when we allocate a fresh page).
  mi_assert_internal(mi_page_is_owned(page));
  mi_assert_internal(mi_page_is_abandoned(page));
  mi_assert_internal(!mi_page_all_free(page));
  mi_assert_internal(page->block_size <= MI_SMALL_SIZE_MAX);
  mi_assert_internal(reclaim_on_free >= 0);

  // dont reclaim if we just have terminated this thread and we should
  // not reinitialize the theap for this thread. (can happen due to thread-local destructors for example -- issue #944)
  if (!_mi_thread_is_initialized()) return false;

  // get our theap 
  mi_theap_t* const theap = _mi_page_associated_theap_peek(page);
  if (theap==NULL || !theap->allow_page_reclaim) return false;
  
  // todo: cache `is_in_threadpool` and `exclusive_arena` directly in the theap for performance?
  // set max_reclaim limit
  long max_reclaim = 0;
  if mi_likely(theap == page->theap) {  // did this page originate from the current theap? (and thus allocated from this thread)
    // originating theap
    max_reclaim = _mi_option_get_fast(theap->tld->is_in_threadpool ? mi_option_page_cross_thread_max_reclaim : mi_option_page_max_reclaim);
  }
  else if (reclaim_on_free == 1 &&               // if cross-thread is allowed
            !theap->tld->is_in_threadpool &&      // and we are not part of a threadpool
            !mi_page_is_mostly_used(page) &&     // and the page is not too full
            _mi_arena_memid_is_suitable(page->memid, _mi_theap_heap(theap)->exclusive_arena)) {   // and it fits our memory
    // across threads
    max_reclaim = _mi_option_get_fast(mi_option_page_cross_thread_max_reclaim);
  }

  // are we within the reclaim limit?
  if (max_reclaim >= 0 && !mi_page_queue_len_is_atmost(theap, page->block_size, max_reclaim)) {
    return false;
  }

  // reclaim the page into this theap
  // first remove it from the abandoned pages in the arena -- this might wait for any readers to finish
  _mi_arenas_page_unabandon(page, theap);
  _mi_theap_page_reclaim(theap, page);
  mi_theap_stat_counter_increase(theap, pages_reclaim_on_free, 1);
  return true;
}


// We freed a block in an abandoned page (that was not owned). Try to collect
static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t* mt_free) mi_attr_noexcept
{
  mi_assert_internal(mi_page_is_owned(page));
  mi_assert_internal(mi_page_is_abandoned(page));
  mi_assert_internal(mt_free != NULL);
  // we own the page now, and it is safe to collect the thread atomic free list
  if (page->block_size <= MI_SMALL_SIZE_MAX) {
    // use the `_partly` version to avoid atomic operations since we already have the `mt_free` pointing into the thread free list
    // (after this the `used` count might be too high (as some blocks may have been concurrently added to the thread free list and are yet uncounted).
    //  however, if the page became completely free, the used count is guaranteed to be 0.)
    mi_assert_internal(page->reserved>=16); // below this even one freed block goes from full to no longer mostly used.
    _mi_page_free_collect_partly(page, mt_free);    
  }
  else {
    // for larger blocks we use the regular collect 
    _mi_page_free_collect(page,false /* no force */);
    mt_free = NULL; // expected page->xthread_free value after collection
  }
  const long reclaim_on_free = _mi_option_get_fast(mi_option_page_reclaim_on_free);
  #if MI_DEBUG > 1
  if (mi_page_is_singleton(page)) { mi_assert_internal(mi_page_all_free(page)); }
  if (mi_page_is_full(page))      { mi_assert(mi_page_is_mostly_used(page)); }
  #endif

  // try to: 1. free it, 2. reclaim it, or 3. reabandon it to be mapped
  if (mi_abandoned_page_try_free(page)) return;
  if (page->block_size <= MI_SMALL_SIZE_MAX && reclaim_on_free >= 0) {  // early test for better codegen
    if (mi_abandoned_page_try_reclaim(page, reclaim_on_free)) return;
  }
  if (mi_abandoned_page_try_reabandon_to_mapped(page)) return;
  
  // otherwise unown the page again
  mi_abandoned_page_unown_from_free(page, mt_free);
}


// ------------------------------------------------------
// Usable size
// ------------------------------------------------------

// Bytes available in a block
static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* page, const void* p) mi_attr_noexcept {
  const mi_block_t* block = _mi_page_ptr_unalign(page, p);
  const bool is_guarded = mi_block_ptr_is_guarded(block,p);
  const size_t size = mi_page_usable_size_of(page, block, is_guarded);
  const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
  mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
  const size_t aligned_size = (size - adjust);  
  return aligned_size;
}

static inline size_t _mi_usable_size(const void* p, const mi_page_t* page) mi_attr_noexcept {
  if mi_unlikely(page==NULL) return 0;
  if mi_likely(!mi_page_has_interior_pointers(page)) {
    const mi_block_t* block = (const mi_block_t*)p;
    return mi_page_usable_size_of(page, block, false /* is guarded */);
  }
  else {
    // split out to separate routine for improved code generation
    return mi_page_usable_aligned_size_of(page, p);
  }
}

mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
  const mi_page_t* const page = mi_validate_ptr_page(p,"mi_usable_size");
  return _mi_usable_size(p,page);
}


// ------------------------------------------------------
// Free variants
// ------------------------------------------------------

void mi_free_size(void* p, size_t size) mi_attr_noexcept {
  MI_UNUSED_RELEASE(size);
  #if MI_DEBUG
  const mi_page_t* const page = mi_validate_ptr_page(p,"mi_free_size");  
  const size_t available = _mi_usable_size(p,page);
  mi_assert(p == NULL || size <= available || available == 0 /* invalid pointer */ );
  #endif
  mi_free(p);
}

void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {
  MI_UNUSED_RELEASE(alignment);
  mi_assert(((uintptr_t)p % alignment) == 0);
  mi_free_size(p,size);
}

void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
  MI_UNUSED_RELEASE(alignment);
  mi_assert(((uintptr_t)p % alignment) == 0);
  mi_free(p);
}


// ------------------------------------------------------
// Check for double free in secure and debug mode
// This is somewhat expensive so only enabled for secure mode 4
// ------------------------------------------------------

#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
// linear check if the free list contains a specific element
static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
  while (list != NULL) {
    if (elem==list) return true;
    list = mi_block_next(page, list);
  }
  return false;
}

static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {
  // The decoded value is in the same page (or NULL).
  // Walk the free lists to verify positively if it is already freed
  if (mi_list_contains(page, page->free, block) ||
      mi_list_contains(page, page->local_free, block) ||
      mi_list_contains(page, mi_page_thread_free(page), block))
  {
    _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
    return true;
  }
  return false;
}

#define mi_track_page(page,access)  { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }

static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
  bool is_double_free = false;
  mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
  if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 &&  // quick check: aligned pointer?
      (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
  {
    // Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free?
    // (continue in separate function to improve code generation)
    is_double_free = mi_check_is_double_freex(page, block);
  }
  return is_double_free;
}
#else
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
  MI_UNUSED(page);
  MI_UNUSED(block);
  return false;
}
#endif


// ---------------------------------------------------------------------------
// Check for theap block overflow by setting up padding at the end of the block
// ---------------------------------------------------------------------------

#if MI_PADDING // && !MI_TRACK_ENABLED
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
  *bsize = mi_page_usable_block_size(page);
  const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
  mi_track_mem_defined(padding,sizeof(mi_padding_t));
  *delta = padding->delta;
  uint32_t canary = padding->canary;
  uintptr_t keys[2];
  keys[0] = page->keys[0];
  keys[1] = page->keys[1];
  bool ok = (mi_ptr_encode_canary(page,block,keys) == canary && *delta <= *bsize);
  mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
  return ok;
}

// Return the exact usable size of a block.
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block, bool is_guarded) {
  if (is_guarded) {
    const size_t bsize = mi_page_block_size(page);
    return (bsize - _mi_os_page_size());
  }
  else {
    size_t bsize;
    size_t delta;
    bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
    mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
    return (ok ? bsize - delta : 0);
  }
}

// When a non-thread-local block is freed, it becomes part of the thread delayed free
// list that is freed later by the owning theap. If the exact usable size is too small to
// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
// so it will later not trigger an overflow error in `mi_free_block`.
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
  size_t bsize;
  size_t delta;
  bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
  mi_assert_internal(ok);
  if (!ok || (bsize - delta) >= min_size) return;  // usually already enough space
  mi_assert_internal(bsize >= min_size);
  if (bsize < min_size) return;  // should never happen
  size_t new_delta = (bsize - min_size);
  mi_assert_internal(new_delta < bsize);
  mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
  mi_track_mem_defined(padding,sizeof(mi_padding_t));
  padding->delta = (uint32_t)new_delta;
  mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
}
#else
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block, bool is_guarded) {
  MI_UNUSED(is_guarded); MI_UNUSED(block);
  return mi_page_usable_block_size(page);
}

void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
  MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(min_size);
}
#endif

#if MI_PADDING && MI_PADDING_CHECK

static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
  size_t bsize;
  size_t delta;
  bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
  *size = *wrong = bsize;
  if (!ok) return false;
  mi_assert_internal(bsize >= delta);
  *size = bsize - delta;
  if (!mi_page_is_huge(page)) {
    uint8_t* fill = (uint8_t*)block + bsize - delta;
    const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
    mi_track_mem_defined(fill, maxpad);
    for (size_t i = 0; i < maxpad; i++) {
      if (fill[i] != MI_DEBUG_PADDING) {
        *wrong = bsize - delta + i;
        ok = false;
        break;
      }
    }
    mi_track_mem_noaccess(fill, maxpad);
  }
  return ok;
}

static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
  size_t size;
  size_t wrong;
  if (!mi_verify_padding(page,block,&size,&wrong)) {
    _mi_error_message(EFAULT, "buffer overflow in theap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
  }
}

#else

static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
  MI_UNUSED(page);
  MI_UNUSED(block);
}

#endif

// only maintain stats for smaller objects if requested
#if (MI_STAT>0)
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
  MI_UNUSED(block);
  mi_theap_t* const theap = _mi_theap_default();
  if (!mi_theap_is_initialized(theap)) return; // (for now) skip statistics if free'd after thread_done was called (usually a thread cleanup call by the OS)

  const size_t bsize = mi_page_usable_block_size(page);
  // #if (MI_STAT>1)
  // const size_t usize = mi_page_usable_size_of(page, block);
  // mi_theap_stat_decrease(theap, malloc_requested, usize);
  // #endif
  if (bsize <= MI_LARGE_MAX_OBJ_SIZE) {
    mi_theap_stat_decrease(theap, malloc_normal, bsize);
    #if (MI_STAT > 1)
    mi_theap_stat_decrease(theap, malloc_bins[_mi_bin(bsize)], 1);
    #endif
  }
  else {
    const size_t bpsize = mi_page_block_size(page);  // match stat in page.c:mi_huge_page_alloc
    mi_theap_stat_decrease(theap, malloc_huge, bpsize);
  }
}
#else
void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
  MI_UNUSED(page); MI_UNUSED(block);
}
#endif


// Remove guard page when building with MI_GUARDED
#if MI_GUARDED
static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) {
  MI_UNUSED(p);
  mi_assert_internal(mi_block_ptr_is_guarded(block, p));
  mi_assert_internal(mi_page_has_interior_pointers(page));
  mi_assert_internal((uint8_t*)p - (uint8_t*)block >= (ptrdiff_t)sizeof(mi_block_t));
  mi_assert_internal(block->next == MI_BLOCK_TAG_GUARDED);

  const size_t bsize = mi_page_block_size(page);
  const size_t psize = _mi_os_page_size();
  mi_assert_internal(bsize > psize);
  mi_assert_internal(!page->memid.is_pinned);
  void* gpage = (uint8_t*)block + bsize - psize;
  mi_assert_internal(_mi_is_aligned(gpage, psize));
  _mi_os_unprotect(gpage, psize);
}
#endif