1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
#![allow(nonstandard_style)]

use core::ffi::c_void;

use cty::{c_char, c_int, c_long, c_ulonglong};

/// The maximum number of bytes which may be used as an argument to a function
/// in the `_small` family ([`mi_malloc_small`], [`mi_zalloc_small`], etc).
pub const MI_SMALL_SIZE_MAX: usize = 128 * core::mem::size_of::<*mut c_void>();

extern "C" {
    /// Allocate `count` items of `size` length each.
    ///
    /// Returns `null` if `count * size` overflows or on out-of-memory.
    ///
    /// All items are initialized to zero.
    pub fn mi_calloc(count: usize, size: usize) -> *mut c_void;

    /// Allocate `count` items of `size` length each.
    ///
    /// Returns `null` if `count * size` overflows or on out-of-memory,
    /// otherwise returns the same as [`mi_malloc(count *
    /// size)`](crate::mi_malloc).
    /// Equivalent to [`mi_calloc`], but returns uninitialized (and not zeroed)
    /// bytes.
    pub fn mi_mallocn(count: usize, size: usize) -> *mut c_void;

    /// Re-allocate memory to `count` elements of `size` bytes.
    ///
    /// The realloc equivalent of the [`mi_mallocn`] interface. Returns `null`
    /// if `count * size` overflows or on out-of-memory, otherwise returns the
    /// same as [`mi_realloc(p, count * size)`](crate::mi_realloc).
    pub fn mi_reallocn(p: *mut c_void, count: usize, size: usize) -> *mut c_void;

    /// Try to re-allocate memory to `newsize` bytes _in place_.
    ///
    /// Returns null on out-of-memory or if the memory could not be expanded in
    /// place. On success, returns the same pointer as `p`.
    ///
    /// If `newsize` is larger than the original `size` allocated for `p`, the
    /// bytes after `size` are uninitialized.
    ///
    /// If null is returned, the original pointer is not freed.
    ///
    /// Note: Conceptually, this is a realloc-like which returns null if it
    /// would be forced to reallocate memory and copy. In practice it's
    /// equivalent testing against [`mi_usable_size`](crate::mi_usable_size).
    pub fn mi_expand(p: *mut c_void, newsize: usize) -> *mut c_void;

    /// Re-allocate memory to `newsize` bytes.
    ///
    /// This differs from [`mi_realloc`](crate::mi_realloc) in that on failure,
    /// `p` is freed.
    pub fn mi_reallocf(p: *mut c_void, newsize: usize) -> *mut c_void;

    /// Allocate and duplicate a nul-terminated C string.
    ///
    /// This can be useful for Rust code when interacting with the FFI.
    pub fn mi_strdup(s: *const c_char) -> *mut c_char;

    /// Allocate and duplicate a nul-terminated C string, up to `n` bytes.
    ///
    /// This can be useful for Rust code when interacting with the FFI.
    pub fn mi_strndup(s: *const c_char, n: usize) -> *mut c_char;

    /// Resolve a file path name, producing a `C` string which can be passed to
    /// [`mi_free`](crate::mi_free).
    ///
    /// `resolved_name` should be null, but can also point to a buffer of at
    /// least `PATH_MAX` bytes.
    ///
    /// If successful, returns a pointer to the resolved absolute file name, or
    /// `null` on failure (with `errno` set to the error code).
    ///
    /// If `resolved_name` was `null`, the returned result should be freed with
    /// [`mi_free`](crate::mi_free).
    ///
    /// This can rarely be useful in FFI code, but is mostly included for
    /// completeness.
    pub fn mi_realpath(fname: *const c_char, resolved_name: *mut c_char) -> *mut c_char;

    /// Allocate `size * count` bytes aligned by `alignment`.
    ///
    /// Return pointer to the allocated memory or null if out of memory or if
    /// `size * count` overflows.
    ///
    /// Returns a unique pointer if called with `size * count` 0.
    pub fn mi_calloc_aligned(count: usize, size: usize, alignment: usize) -> *mut c_void;

    /// Allocate `size` bytes aligned by `alignment` at a specified `offset`.
    ///
    /// Note that the resulting pointer itself is not aligned by the alignment,
    /// but after `offset` bytes it will be. This can be useful for allocating
    /// data with an inline header, where the data has a specific alignment
    /// requirement.
    ///
    /// Specifically, if `p` is the returned pointer `p.add(offset)` is aligned
    /// to `alignment`.
    pub fn mi_malloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;

    /// Allocate `size` bytes aligned by `alignment` at a specified `offset`,
    /// zero-initialized.
    ///
    /// This is a [`mi_zalloc`](crate::mi_zalloc) equivalent of [`mi_malloc_aligned_at`].
    pub fn mi_zalloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;

    /// Allocate `size` of bytes aligned by `alignment` and place the address of the
    /// allocated memory to `ptr`.
    ///
    /// Returns zero on success, invalid argument for invalid alignment, or out-of-memory.
    pub fn mi_posix_memalign(ptr: *mut *mut c_void, alignment: usize, size: usize) -> c_int;

    /// Allocate `size` bytes aligned by `alignment` with alignment as the first
    /// parameter.
    ///
    /// Return pointer to the allocated memory or null if out of memory.
    pub fn mi_aligned_alloc(alignment: usize, size: usize) -> *mut c_void;

    /// Allocate `size * count` bytes aligned by `alignment` at a specified
    /// `offset`, zero-initialized.
    ///
    /// This is a [`calloc`](crate::mi_calloc) equivalent of [`mi_malloc_aligned_at`].
    pub fn mi_calloc_aligned_at(
        count: usize,
        size: usize,
        alignment: usize,
        offset: usize,
    ) -> *mut c_void;

    /// Re-allocate memory to `newsize` bytes aligned by `alignment` at a
    /// specified `offset`.
    ///
    /// This is a [`realloc`](crate::mi_realloc) equivalent of [`mi_malloc_aligned_at`].
    pub fn mi_realloc_aligned_at(
        p: *mut c_void,
        newsize: usize,
        alignment: usize,
        offset: usize,
    ) -> *mut c_void;

    /// Zero initialized [re-allocation](crate::mi_realloc).
    ///
    /// In general, only valid on memory originally allocated by zero
    /// initialization: [`mi_calloc`](crate::mi_calloc),
    /// [`mi_zalloc`](crate::mi_zalloc),
    /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
    pub fn mi_rezalloc(p: *mut c_void, newsize: usize) -> *mut c_void;

    /// Zero initialized [re-allocation](crate::mi_realloc), following `calloc`
    /// paramater conventions.
    ///
    /// In general, only valid on memory originally allocated by zero
    /// initialization: [`mi_calloc`](crate::mi_calloc),
    /// [`mi_zalloc`](crate::mi_zalloc),
    /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
    pub fn mi_recalloc(p: *mut c_void, newcount: usize, size: usize) -> *mut c_void;

    /// Aligned version of [`mi_rezalloc`].
    pub fn mi_rezalloc_aligned(p: *mut c_void, newsize: usize, alignment: usize) -> *mut c_void;

    /// Offset-aligned version of [`mi_rezalloc`].
    pub fn mi_rezalloc_aligned_at(
        p: *mut c_void,
        newsize: usize,
        alignment: usize,
        offset: usize,
    ) -> *mut c_void;

    /// Aligned version of [`mi_recalloc`].
    pub fn mi_recalloc_aligned(
        p: *mut c_void,
        newcount: usize,
        size: usize,
        alignment: usize,
    ) -> *mut c_void;

    /// Offset-aligned version of [`mi_recalloc`].
    pub fn mi_recalloc_aligned_at(
        p: *mut c_void,
        newcount: usize,
        size: usize,
        alignment: usize,
        offset: usize,
    ) -> *mut c_void;

    /// Allocate an object of no more than [`MI_SMALL_SIZE_MAX`] bytes.
    ///
    /// Does not check that `size` is indeed small.
    ///
    /// Note: Currently [`mi_malloc`](crate::mi_malloc) checks if `size` is
    /// small and calls this if
    /// so at runtime, so its' only worth using if you know for certain.
    pub fn mi_malloc_small(size: usize) -> *mut c_void;

    /// Allocate an zero-initialized object of no more than
    /// [`MI_SMALL_SIZE_MAX`] bytes.
    ///
    /// Does not check that `size` is indeed small.
    ///
    /// Note: Currently [`mi_zalloc`](crate::mi_zalloc) checks if `size` is
    /// small and calls this if so at runtime, so its' only worth using if you
    /// know for certain.
    pub fn mi_zalloc_small(size: usize) -> *mut c_void;

    /// Return the used allocation size.
    ///
    /// Returns the size `n` that will be allocated, where `n >= size`.
    ///
    /// Generally, `mi_usable_size(mi_malloc(size)) == mi_good_size(size)`. This
    /// can be used to reduce internal wasted space when allocating buffers for
    /// example.
    ///
    /// See [`mi_usable_size`](crate::mi_usable_size).
    pub fn mi_good_size(size: usize) -> usize;

    /// Eagerly free memory.
    ///
    /// If `force` is true, aggressively return memory to the OS (can be
    /// expensive!)
    ///
    /// Regular code should not have to call this function. It can be beneficial
    /// in very narrow circumstances; in particular, when a long running thread
    /// allocates a lot of blocks that are freed by other threads it may improve
    /// resource usage by calling this every once in a while.
    pub fn mi_collect(force: bool);

    /// Checked free: If `p` came from mimalloc's heap (as decided by
    /// [`mi_is_in_heap_region`]), this is [`mi_free(p)`](crate::mi_free), but
    /// otherwise it is a no-op.
    pub fn mi_cfree(p: *mut c_void);

    /// Returns true if this is a pointer into a memory region that has been
    /// reserved by the mimalloc heap.
    ///
    /// This function is described by the mimalloc documentation as "relatively
    /// fast".
    ///
    /// See also [`mi_heap_check_owned`], which is (much) slower and slightly
    /// more precise, but only concerns a single `mi_heap`.
    pub fn mi_is_in_heap_region(p: *const c_void) -> bool;

    /// Layout-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
    /// the size and alignment as well.
    ///
    /// Note: unlike some allocators that require this information for
    /// performance, mimalloc doesn't need it (as of the current version,
    /// v2.0.0), and so it currently implements this as a (debug) assertion that
    /// verifies that `p` is actually aligned to `alignment` and is usable for
    /// at least `size` bytes, before delegating to `mi_free`.
    ///
    /// However, currently there's no way to have this crate enable mimalloc's
    /// debug assertions, so these checks aren't particularly useful.
    ///
    /// Note: It's legal to pass null to this function, and you are not required
    /// to use this to deallocate memory from an aligned allocation function.
    pub fn mi_free_size_aligned(p: *mut c_void, size: usize, alignment: usize);

    /// Size-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
    /// the size and alignment as well.
    ///
    /// Note: unlike some allocators that require this information for
    /// performance, mimalloc doesn't need it (as of the current version,
    /// v2.0.0), and so it currently implements this as a (debug) assertion that
    /// verifies that `p` is actually aligned to `alignment` and is usable for
    /// at least `size` bytes, before delegating to `mi_free`.
    ///
    /// However, currently there's no way to have this crate enable mimalloc's
    /// debug assertions, so these checks aren't particularly useful.
    ///
    /// Note: It's legal to pass null to this function.
    pub fn mi_free_size(p: *mut c_void, size: usize);

    /// Alignment-aware deallocation: Like [`mi_free`](crate::mi_free), but
    /// accepts the size and alignment as well.
    ///
    /// Note: unlike some allocators that require this information for
    /// performance, mimalloc doesn't need it (as of the current version,
    /// v2.0.0), and so it currently implements this as a (debug) assertion that
    /// verifies that `p` is actually aligned to `alignment` and is usable for
    /// at least `size` bytes, before delegating to `mi_free`.
    ///
    /// However, currently there's no way to have this crate enable mimalloc's
    /// debug assertions, so these checks aren't particularly useful.
    ///
    /// Note: It's legal to pass null to this function.
    pub fn mi_free_aligned(p: *mut c_void, alignment: usize);

    /// Print the main statistics.
    ///
    /// Ignores the passed in argument, and outputs to the registered output
    /// function or stderr by default.
    ///
    /// Most detailed when using a debug build.
    pub fn mi_stats_print(_: *mut c_void);

    /// Print the main statistics.
    ///
    /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
    /// passed as it's second parameter.
    ///
    /// Most detailed when using a debug build.
    pub fn mi_stats_print_out(out: mi_output_fun, arg: *mut c_void);

    /// Reset statistics.
    ///
    /// Note: This function is thread safe.
    pub fn mi_stats_reset();

    /// Merge thread local statistics with the main statistics and reset.
    ///
    /// Note: This function is thread safe.
    pub fn mi_stats_merge();

    /// Return the mimalloc version number.
    ///
    /// For example version 1.6.3 would return the number `163`.
    pub fn mi_version() -> c_int;

    /// Initialize mimalloc on a thread.
    ///
    /// Should not be used as on most systems (pthreads, windows) this is done
    /// automatically.
    pub fn mi_thread_init();

    /// Initialize the process.
    ///
    /// Should not be used on most systems, as it's called by thread_init or the
    /// process loader.
    pub fn mi_process_init();

    /// Return process information (time and memory usage). All parameters are
    /// optional (nullable) out-params:
    ///
    /// | Parameter        | Description |
    /// | :-               | :- |
    /// | `elapsed_msecs`  | Elapsed wall-clock time of the process in milli-seconds. |
    /// | `user_msecs`     | User time in milli-seconds (as the sum over all threads). |
    /// | `system_msecs`   | System time in milli-seconds. |
    /// | `current_rss`    | Current working set size (touched pages). |
    /// | `peak_rss`       | Peak working set size (touched pages). |
    /// | `current_commit` | Current committed memory (backed by the page file). |
    /// | `peak_commit`    | Peak committed memory (backed by the page file). |
    /// | `page_faults`    | Count of hard page faults. |
    ///
    /// The `current_rss` is precise on Windows and MacOSX; other systems
    /// estimate this using `current_commit`. The `commit` is precise on Windows
    /// but estimated on other systems as the amount of read/write accessible
    /// memory reserved by mimalloc.
    pub fn mi_process_info(
        elapsed_msecs: *mut usize,
        user_msecs: *mut usize,
        system_msecs: *mut usize,
        current_rss: *mut usize,
        peak_rss: *mut usize,
        current_commit: *mut usize,
        peak_commit: *mut usize,
        page_faults: *mut usize,
    );

    /// Uninitialize mimalloc on a thread.
    ///
    /// Should not be used as on most systems (pthreads, windows) this is done
    /// automatically. Ensures that any memory that is not freed yet (but will
    /// be freed by other threads in the future) is properly handled.
    ///
    /// Note: This function is thread safe.
    pub fn mi_thread_done();

    /// Print out heap statistics for this thread.
    ///
    /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
    /// passed as it's second parameter
    ///
    /// Most detailed when using a debug build.
    ///
    /// Note: This function is thread safe.
    pub fn mi_thread_stats_print_out(out: mi_output_fun, arg: *mut c_void);

    /// Register an output function.
    ///
    /// - `out` The output function, use `None` to output to stderr.
    /// - `arg` Argument that will be passed on to the output function.
    ///
    /// The `out` function is called to output any information from mimalloc,
    /// like verbose or warning messages.
    ///
    /// Note: This function is thread safe.
    pub fn mi_register_output(out: mi_output_fun, arg: *mut c_void);

    /// Register a deferred free function.
    ///
    /// - `deferred_free` Address of a deferred free-ing function or `None` to
    ///   unregister.
    /// - `arg` Argument that will be passed on to the deferred free function.
    ///
    /// Some runtime systems use deferred free-ing, for example when using
    /// reference counting to limit the worst case free time.
    ///
    /// Such systems can register (re-entrant) deferred free function to free
    /// more memory on demand.
    ///
    /// - When the `force` parameter is `true` all possible memory should be
    ///   freed.
    ///
    /// - The per-thread `heartbeat` parameter is monotonically increasing and
    ///   guaranteed to be deterministic if the program allocates
    ///   deterministically.
    ///
    /// - The `deferred_free` function is guaranteed to be called
    ///   deterministically after some number of allocations (regardless of
    ///   freeing or available free memory).
    ///
    /// At most one `deferred_free` function can be active.
    ///
    /// Note: This function is thread safe.
    pub fn mi_register_deferred_free(out: mi_deferred_free_fun, arg: *mut c_void);

    /// Register an error callback function.
    ///
    /// The `errfun` function is called on an error in mimalloc after emitting
    /// an error message (through the output function).
    ///
    /// It as always legal to just return from the `errfun` function in which
    /// case allocation functions generally return null or ignore the condition.
    ///
    /// The default function only calls abort() when compiled in secure mode
    /// with an `EFAULT` error. The possible error codes are:
    ///
    /// - `EAGAIN` (11): Double free was detected (only in debug and secure
    ///   mode).
    /// - `EFAULT` (14): Corrupted free list or meta-data was detected (only in
    ///   debug and secure mode).
    /// - `ENOMEM` (12): Not enough memory available to satisfy the request.
    /// - `EOVERFLOW` (75): Too large a request, for example in `mi_calloc`, the
    ///   `count` and `size` parameters are too large.
    /// - `EINVAL` (22): Trying to free or re-allocate an invalid pointer.
    ///
    /// Note: This function is thread safe.
    pub fn mi_register_error(out: mi_error_fun, arg: *mut c_void);
}

/// An output callback. Must be thread-safe.
///
/// See [`mi_stats_print_out`], [`mi_thread_stats_print_out`], [`mi_register_output`]
pub type mi_output_fun = Option<unsafe extern "C" fn(msg: *const c_char, arg: *mut c_void)>;

/// Type of deferred free functions. Must be thread-safe.
///
/// - `force`: If true, all outstanding items should be freed.
/// - `heartbeat` A monotonically increasing count.
/// - `arg` Argument that was passed at registration to hold extra state.
///
/// See [`mi_register_deferred_free`]
pub type mi_deferred_free_fun =
    Option<unsafe extern "C" fn(force: bool, heartbeat: c_ulonglong, arg: *mut c_void)>;

/// Type of error callback functions. Must be thread-safe.
///
/// - `err`: Error code (see [`mi_register_error`] for a list).
/// - `arg`: Argument that was passed at registration to hold extra state.
///
/// See [`mi_register_error`]
pub type mi_error_fun = Option<unsafe extern "C" fn(code: c_int, arg: *mut c_void)>;

/// Runtime options. All options are false by default.
pub type mi_option_t = c_int;

#[cfg(feature = "arena")]
/// Arena Id
pub type mi_arena_id_t = c_int;

// Note: mimalloc doc website seems to have the order of show_stats and
// show_errors reversed as of 1.6.3, however what I have here is correct:
// https://github.com/microsoft/mimalloc/issues/266#issuecomment-653822341

/// Print error messages to `stderr`.
pub const mi_option_show_errors: mi_option_t = 0;

/// Print statistics to `stderr` when the program is done.
pub const mi_option_show_stats: mi_option_t = 1;

/// Print verbose messages to `stderr`.
pub const mi_option_verbose: mi_option_t = 2;

/// ### The following options are experimental

/// Option (experimental) Use large OS pages (2MiB in size) if possible.
///
/// Use large OS pages (2MiB) when available; for some workloads this can
/// significantly improve performance. Use mi_option_verbose to check if
/// the large OS pages are enabled -- usually one needs to explicitly allow
/// large OS pages (as on Windows and Linux). However, sometimes the OS is
/// very slow to reserve contiguous physical memory for large OS pages so
/// use with care on systems that can have fragmented memory (for that
/// reason, we generally recommend to use mi_option_reserve_huge_os_pages
/// instead whenever possible).
pub const mi_option_large_os_pages: mi_option_t = 6;

/// Option (experimental) The number of huge OS pages (1GiB in size) to reserve at the start of the program.
///
/// This reserves the huge pages at startup and sometimes this can give a large (latency) performance
/// improvement on big workloads. Usually it is better to not use MIMALLOC_LARGE_OS_PAGES in
/// combination with this setting. Just like large OS pages, use with care as reserving contiguous
/// physical memory can take a long time when memory is fragmented (but reserving the huge pages is
/// done at startup only once). Note that we usually need to explicitly enable huge OS pages (as on
/// Windows and Linux)). With huge OS pages, it may be beneficial to set the setting
/// mi_option_eager_commit_delay=N (N is 1 by default) to delay the initial N segments (of 4MiB) of
/// a thread to not allocate in the huge OS pages; this prevents threads that are short lived and
/// allocate just a little to take up space in the huge OS page area (which cannot be reset).
pub const mi_option_reserve_huge_os_pages: mi_option_t = 7;

/// Option (experimental) Reserve huge OS pages at node N.
///
/// The huge pages are usually allocated evenly among NUMA nodes.
/// You can use mi_option_reserve_huge_os_pages_at=N where `N` is the numa node (starting at 0) to allocate all
/// the huge pages at a specific numa node instead.
pub const mi_option_reserve_huge_os_pages_at: mi_option_t = 8;

/// Option (experimental) Reserve specified amount of OS memory at startup, e.g. "1g" or "512m".
pub const mi_option_reserve_os_memory: mi_option_t = 9;

/// Option (experimental) the first N segments per thread are not eagerly committed (=1).
pub const mi_option_eager_commit_delay: mi_option_t = 14;

/// Option (experimental) Pretend there are at most N NUMA nodes; Use 0 to use the actual detected NUMA nodes at runtime.
pub const mi_option_use_numa_nodes: mi_option_t = 16;

/// Option (experimental) If set to 1, do not use OS memory for allocation (but only pre-reserved arenas)
pub const mi_option_limit_os_alloc: mi_option_t = 17;

/// Option (experimental) OS tag to assign to mimalloc'd memory
pub const mi_option_os_tag: mi_option_t = 18;

/// Option (experimental)
pub const mi_option_max_errors: mi_option_t = 19;

/// Option (experimental)
pub const mi_option_max_warnings: mi_option_t = 20;

/// Option (experimental)
pub const mi_option_max_segment_reclaim: mi_option_t = 21;

/// Last option.
pub const _mi_option_last: mi_option_t = 28;

extern "C" {
    // Note: mi_option_{enable,disable} aren't exposed because they're redundant
    // and because of https://github.com/microsoft/mimalloc/issues/266.

    /// Returns true if the provided option is enabled.
    ///
    /// Note: this function is not thread safe.
    pub fn mi_option_is_enabled(option: mi_option_t) -> bool;

    /// Enable or disable the given option.
    ///
    /// Note: this function is not thread safe.
    pub fn mi_option_set_enabled(option: mi_option_t, enable: bool);

    /// If the given option has not yet been initialized with [`mi_option_set`]
    /// or [`mi_option_set_enabled`], enables or disables the option. If it has,
    /// this function does nothing.
    ///
    /// Note: this function is not thread safe.
    pub fn mi_option_set_enabled_default(option: mi_option_t, enable: bool);

    /// Returns the value of the provided option.
    ///
    /// The value of boolean options is 1 or 0, however experimental options
    /// exist which take a numeric value, which is the intended use of this
    /// function.
    ///
    /// These options are not exposed as constants for stability reasons,
    /// however you can still use them as arguments to this and other
    /// `mi_option_` functions if needed, see the mimalloc documentation for
    /// details: https://microsoft.github.io/mimalloc/group__options.html
    ///
    /// Note: this function is not thread safe.
    pub fn mi_option_get(option: mi_option_t) -> c_long;

    /// Set the option to the given value.
    ///
    /// The value of boolean options is 1 or 0, however experimental options
    /// exist which take a numeric value, which is the intended use of this
    /// function.
    ///
    /// These options are not exposed as constants for stability reasons,
    /// however you can still use them as arguments to this and other
    /// `mi_option_` functions if needed,
    ///
    /// Note: this function is not thread safe.
    pub fn mi_option_set(option: mi_option_t, value: c_long);

    /// If the given option has not yet been initialized with [`mi_option_set`]
    /// or [`mi_option_set_enabled`], sets the option to the given value. If it
    /// has, this function does nothing.
    ///
    /// The value of boolean options is 1 or 0, however experimental options
    /// exist which take a numeric value, which is the intended use of this
    /// function.
    ///
    /// These options are not exposed as constants for stability reasons,
    /// however you can still use them as arguments to this and other
    /// `mi_option_` functions if needed.
    ///
    /// Note: this function is not thread safe.
    pub fn mi_option_set_default(option: mi_option_t, value: c_long);
}

/// First-class heaps that can be destroyed in one go.
///
/// Note: The pointers allocated out of a heap can be be freed using
/// [`mi_free`](crate::mi_free) -- there is no `mi_heap_free`.
///
/// # Example
///
/// ```
/// use libmimalloc_sys as mi;
/// unsafe {
///     let h = mi::mi_heap_new();
///     assert!(!h.is_null());
///     let p = mi::mi_heap_malloc(h, 50);
///     assert!(!p.is_null());
///
///     // use p...
///     mi::mi_free(p);
///
///     // Clean up the heap. Note that pointers allocated from `h`
///     // are *not* invalided by `mi_heap_delete`. You would have
///     // to use (the very dangerous) `mi_heap_destroy` for that
///     // behavior
///     mi::mi_heap_delete(h);
/// }
/// ```
pub enum mi_heap_t {}

/// An area of heap space contains blocks of a single size.
///
/// The bytes in freed blocks are `committed - used`.
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct mi_heap_area_t {
    /// Start of the area containing heap blocks.
    pub blocks: *mut c_void,
    /// Bytes reserved for this area.
    pub reserved: usize,
    /// Current committed bytes of this area.
    pub committed: usize,
    /// Bytes in use by allocated blocks.
    pub used: usize,
    /// Size in bytes of one block.
    pub block_size: usize,
    /// Size in bytes of a full block including padding and metadata.
    pub full_block_size: usize,
}

/// Visitor function passed to [`mi_heap_visit_blocks`]
///
/// Should return `true` to continue, and `false` to stop visiting (i.e. break)
///
/// This function is always first called for every `area` with `block` as a null
/// pointer. If `visit_all_blocks` was `true`, the function is then called for
/// every allocated block in that area.
pub type mi_block_visit_fun = Option<
    unsafe extern "C" fn(
        heap: *const mi_heap_t,
        area: *const mi_heap_area_t,
        block: *mut c_void,
        block_size: usize,
        arg: *mut c_void,
    ) -> bool,
>;

extern "C" {
    /// Create a new heap that can be used for allocation.
    pub fn mi_heap_new() -> *mut mi_heap_t;

    /// Delete a previously allocated heap.
    ///
    /// This will release resources and migrate any still allocated blocks in
    /// this heap (efficienty) to the default heap.
    ///
    /// If `heap` is the default heap, the default heap is set to the backing
    /// heap.
    pub fn mi_heap_delete(heap: *mut mi_heap_t);

    /// Destroy a heap, freeing all its still allocated blocks.
    ///
    /// Use with care as this will free all blocks still allocated in the heap.
    /// However, this can be a very efficient way to free all heap memory in one
    /// go.
    ///
    /// If `heap` is the default heap, the default heap is set to the backing
    /// heap.
    pub fn mi_heap_destroy(heap: *mut mi_heap_t);

    /// Set the default heap to use for [`mi_malloc`](crate::mi_malloc) et al.
    ///
    /// Returns the previous default heap.
    pub fn mi_heap_set_default(heap: *mut mi_heap_t) -> *mut mi_heap_t;

    /// Get the default heap that is used for [`mi_malloc`](crate::mi_malloc) et al.
    pub fn mi_heap_get_default() -> *mut mi_heap_t;

    /// Get the backing heap.
    ///
    /// The _backing_ heap is the initial default heap for a thread and always
    /// available for allocations. It cannot be destroyed or deleted except by
    /// exiting the thread.
    pub fn mi_heap_get_backing() -> *mut mi_heap_t;

    /// Release outstanding resources in a specific heap.
    ///
    /// See also [`mi_collect`].
    pub fn mi_heap_collect(heap: *mut mi_heap_t, force: bool);

    /// Equivalent to [`mi_malloc`](crate::mi_malloc), but allocates out of the
    /// specific heap instead of the default.
    pub fn mi_heap_malloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;

    /// Equivalent to [`mi_zalloc`](crate::mi_zalloc), but allocates out of the
    /// specific heap instead of the default.
    pub fn mi_heap_zalloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;

    /// Equivalent to [`mi_calloc`], but allocates out of the specific heap
    /// instead of the default.
    pub fn mi_heap_calloc(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;

    /// Equivalent to [`mi_mallocn`], but allocates out of the specific heap
    /// instead of the default.
    pub fn mi_heap_mallocn(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;

    /// Equivalent to [`mi_malloc_small`], but allocates out of the specific
    /// heap instead of the default.
    ///
    /// `size` must be smaller or equal to [`MI_SMALL_SIZE_MAX`].
    pub fn mi_heap_malloc_small(heap: *mut mi_heap_t, size: usize) -> *mut c_void;

    /// Equivalent to [`mi_realloc`](crate::mi_realloc), but allocates out of
    /// the specific heap instead of the default.
    pub fn mi_heap_realloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;

    /// Equivalent to [`mi_reallocn`], but allocates out of the specific heap
    /// instead of the default.
    pub fn mi_heap_reallocn(
        heap: *mut mi_heap_t,
        p: *mut c_void,
        count: usize,
        size: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_reallocf`], but allocates out of the specific heap
    /// instead of the default.
    pub fn mi_heap_reallocf(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;

    /// Equivalent to [`mi_strdup`], but allocates out of the specific heap
    /// instead of the default.
    pub fn mi_heap_strdup(heap: *mut mi_heap_t, s: *const c_char) -> *mut c_char;

    /// Equivalent to [`mi_strndup`], but allocates out of the specific heap
    /// instead of the default.
    pub fn mi_heap_strndup(heap: *mut mi_heap_t, s: *const c_char, n: usize) -> *mut c_char;

    /// Equivalent to [`mi_realpath`], but allocates out of the specific heap
    /// instead of the default.
    pub fn mi_heap_realpath(
        heap: *mut mi_heap_t,
        fname: *const c_char,
        resolved_name: *mut c_char,
    ) -> *mut c_char;

    /// Equivalent to [`mi_malloc_aligned`](crate::mi_malloc_aligned), but
    /// allocates out of the specific heap instead of the default.
    pub fn mi_heap_malloc_aligned(
        heap: *mut mi_heap_t,
        size: usize,
        alignment: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_malloc_aligned_at`], but allocates out of the
    /// specific heap instead of the default.
    pub fn mi_heap_malloc_aligned_at(
        heap: *mut mi_heap_t,
        size: usize,
        alignment: usize,
        offset: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), but
    /// allocates out of the specific heap instead of the default.
    pub fn mi_heap_zalloc_aligned(
        heap: *mut mi_heap_t,
        size: usize,
        alignment: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_zalloc_aligned_at`], but allocates out of the
    /// specific heap instead of the default.
    pub fn mi_heap_zalloc_aligned_at(
        heap: *mut mi_heap_t,
        size: usize,
        alignment: usize,
        offset: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_calloc_aligned`], but allocates out of the specific
    /// heap instead of the default.
    pub fn mi_heap_calloc_aligned(
        heap: *mut mi_heap_t,
        count: usize,
        size: usize,
        alignment: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_calloc_aligned_at`], but allocates out of the
    /// specific heap instead of the default.
    pub fn mi_heap_calloc_aligned_at(
        heap: *mut mi_heap_t,
        count: usize,
        size: usize,
        alignment: usize,
        offset: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_realloc_aligned`](crate::mi_realloc_aligned), but allocates out of the specific
    /// heap instead of the default.
    pub fn mi_heap_realloc_aligned(
        heap: *mut mi_heap_t,
        p: *mut c_void,
        newsize: usize,
        alignment: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_realloc_aligned_at`], but allocates out of the
    /// specific heap instead of the default.
    pub fn mi_heap_realloc_aligned_at(
        heap: *mut mi_heap_t,
        p: *mut c_void,
        newsize: usize,
        alignment: usize,
        offset: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_rezalloc`], but allocates out of the specific heap
    /// instead of the default.
    pub fn mi_heap_rezalloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;

    /// Equivalent to [`mi_recalloc`], but allocates out of the specific heap
    /// instead of the default.
    pub fn mi_heap_recalloc(
        heap: *mut mi_heap_t,
        p: *mut c_void,
        newcount: usize,
        size: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_rezalloc_aligned`], but allocates out of the specific
    /// heap instead of the default.
    pub fn mi_heap_rezalloc_aligned(
        heap: *mut mi_heap_t,
        p: *mut c_void,
        newsize: usize,
        alignment: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_rezalloc_aligned_at`], but allocates out of the
    /// specific heap instead of the default.
    pub fn mi_heap_rezalloc_aligned_at(
        heap: *mut mi_heap_t,
        p: *mut c_void,
        newsize: usize,
        alignment: usize,
        offset: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_recalloc_aligned`], but allocates out of the
    /// specific heap instead of the default.
    pub fn mi_heap_recalloc_aligned(
        heap: *mut mi_heap_t,
        p: *mut c_void,
        newcount: usize,
        size: usize,
        alignment: usize,
    ) -> *mut c_void;

    /// Equivalent to [`mi_recalloc_aligned_at`], but allocates out of the
    /// specific heap instead of the default.
    pub fn mi_heap_recalloc_aligned_at(
        heap: *mut mi_heap_t,
        p: *mut c_void,
        newcount: usize,
        size: usize,
        alignment: usize,
        offset: usize,
    ) -> *mut c_void;

    /// Does a heap contain a pointer to a previously allocated block?
    ///
    /// `p` must be a pointer to a previously allocated block (in any heap) -- it cannot be some
    /// random pointer!
    ///
    /// Returns `true` if the block pointed to by `p` is in the `heap`.
    ///
    /// See [`mi_heap_check_owned`].
    pub fn mi_heap_contains_block(heap: *mut mi_heap_t, p: *const c_void) -> bool;

    /// Check safely if any pointer is part of a heap.
    ///
    /// `p` may be any pointer -- not required to be previously allocated by the
    /// given heap or any other mimalloc heap. Returns `true` if `p` points to a
    /// block in the given heap, false otherwise.
    ///
    /// Note: expensive function, linear in the pages in the heap.
    ///
    /// See [`mi_heap_contains_block`], [`mi_heap_get_default`], and
    /// [`mi_is_in_heap_region`]
    pub fn mi_heap_check_owned(heap: *mut mi_heap_t, p: *const c_void) -> bool;

    /// Check safely if any pointer is part of the default heap of this thread.
    ///
    /// `p` may be any pointer -- not required to be previously allocated by the
    /// default heap for this thread, or any other mimalloc heap. Returns `true`
    /// if `p` points to a block in the default heap, false otherwise.
    ///
    /// Note: expensive function, linear in the pages in the heap.
    ///
    /// See [`mi_heap_contains_block`], [`mi_heap_get_default`]
    pub fn mi_check_owned(p: *const c_void) -> bool;

    /// Visit all areas and blocks in `heap`.
    ///
    /// If `visit_all_blocks` is false, the `visitor` is only called once for
    /// every heap area. If it's true, the `visitor` is also called for every
    /// allocated block inside every area (with `!block.is_null()`). Return
    /// `false` from the `visitor` to return early.
    ///
    /// `arg` is an extra argument passed into the `visitor`.
    ///
    /// Returns `true` if all areas and blocks were visited.
    ///
    /// Passing a `None` visitor is allowed, and is a no-op.
    pub fn mi_heap_visit_blocks(
        heap: *const mi_heap_t,
        visit_all_blocks: bool,
        visitor: mi_block_visit_fun,
        arg: *mut c_void,
    ) -> bool;

    #[cfg(feature = "arena")]
    /// Create a heap that only allocates in the specified arena
    pub fn mi_heap_new_in_arena(arena_id: mi_arena_id_t) -> *mut mi_heap_t;

    #[cfg(feature = "arena")]
    /// Reserve OS memory for use by mimalloc. Reserved areas are used
    /// before allocating from the OS again. By reserving a large area upfront,
    /// allocation can be more efficient, and can be better managed on systems
    /// without `mmap`/`VirtualAlloc` (like WASM for example).
    ///
    /// - `size` The size to reserve.
    /// - `commit` Commit the memory upfront.
    /// - `allow_large` Allow large OS pages (2MiB) to be used?
    /// - `exclusive` Only allow allocations if specifically for this arena.
    /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
    ///
    /// Returns 0 if successful, and an error code otherwise (e.g. `ENOMEM`)
    pub fn mi_reserve_os_memory_ex(
        size: usize,
        commit: bool,
        allow_large: bool,
        exclusive: bool,
        arena_id: *mut mi_arena_id_t,
    ) -> c_int;

    #[cfg(feature = "arena")]
    /// Manage a particular memory area for use by mimalloc.
    /// This is just like `mi_reserve_os_memory_ex` except that the area should already be
    /// allocated in some manner and available for use my mimalloc.
    ///
    /// # Safety
    /// mimalloc will likely segfault when allocating from the arena if the arena `start` & `size`
    /// aren't aligned with mimalloc's `MI_SEGMENT_ALIGN` (e.g. 32MB on x86_64 machines).
    ///
    /// - `start` Start of the memory area
    /// - `size` The size of the memory area. Must be large than `MI_ARENA_BLOCK_SIZE` (e.g. 64MB
    ///          on x86_64 machines).
    /// - `commit` Set true if the memory range is already commited.
    /// - `is_large` Set true if the memory range consists of large files, or if the memory should
    ///              not be decommitted or protected (like rdma etc.).
    /// - `is_zero` Set true if the memory range consists only of zeros.
    /// - `numa_node` Possible associated numa node or `-1`.
    /// - `exclusive` Only allow allocations if specifically for this arena.
    /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
    ///
    /// Returns `true` if arena was successfully allocated
    pub fn mi_manage_os_memory_ex(
        start: *const c_void,
        size: usize,
        is_committed: bool,
        is_large: bool,
        is_zero: bool,
        numa_node: c_int,
        exclusive: bool,
        arena_id: *mut mi_arena_id_t,
    ) -> bool;
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn runtime_stable_option() {
        unsafe {
            assert_eq!(mi_option_get(mi_option_show_errors), 0);
            mi_option_set(mi_option_show_errors, 1);
            assert_eq!(mi_option_get(mi_option_show_errors), 1);

            assert_eq!(mi_option_get(mi_option_show_stats), 0);
            mi_option_set(mi_option_show_stats, 1);
            assert_eq!(mi_option_get(mi_option_show_stats), 1);

            assert_eq!(mi_option_get(mi_option_verbose), 0);
            mi_option_set(mi_option_verbose, 1);
            assert_eq!(mi_option_get(mi_option_verbose), 1);
        }
    }
}