lean_sys/
lib.rs

1/*!
2Rust bindings to [Lean 4](https://github.com/leanprover/lean4)'s C API
3
4Functions and comments manually translated from those in the [`lean.h` header](https://github.com/leanprover/lean4/blob/master/src/include/lean/lean.h) provided with Lean 4
5*/
6#![allow(non_upper_case_globals)]
7#![allow(non_camel_case_types)]
8#![allow(non_snake_case)]
9#![allow(clippy::missing_safety_doc)]
10#![no_std]
11
12use core::ffi::*;
13use core::sync::atomic::{AtomicI32, AtomicI64, AtomicU32, Ordering};
14use memoffset::raw_field;
15use static_assertions::const_assert;
16
17pub mod alloc;
18pub mod array;
19pub mod closure;
20pub mod constructor;
21pub mod dbg;
22pub mod external;
23pub mod init;
24pub mod int;
25pub mod io;
26pub mod libuv;
27pub mod nat;
28pub mod panic;
29pub mod primitive;
30pub mod sarray;
31pub mod share_common;
32pub mod string;
33pub mod task;
34pub mod thread;
35pub mod thunk;
36
37pub use array::*;
38pub use closure::*;
39pub use constructor::*;
40pub use dbg::*;
41pub use external::*;
42pub use init::*;
43pub use int::*;
44pub use io::*;
45pub use libuv::*;
46pub use nat::*;
47pub use primitive::*;
48pub use sarray::*;
49pub use share_common::*;
50pub use string::*;
51pub use task::*;
52pub use thread::*;
53pub use thunk::*;
54
55pub const LEAN_SMALL_ALLOCATOR: bool = cfg!(feature = "small_allocator");
56pub const LEAN_MIMALLOC: bool = cfg!(feature = "mimalloc");
57pub const LEAN_CLOSURE_MAX_ARGS: u32 = 16;
58pub const LEAN_OBJECT_SIZE_DELTA: usize = 8;
59pub const LEAN_MAX_SMALL_OBJECT_SIZE: u32 = 4096;
60pub const LeanMaxCtorTag: u8 = 243;
61pub const LeanPromise: u8 = 244;
62pub const LeanClosure: u8 = 245;
63pub const LeanArray: u8 = 246;
64pub const LeanStructArray: u8 = 247;
65pub const LeanScalarArray: u8 = 248;
66pub const LeanString: u8 = 249;
67pub const LeanMPZ: u8 = 250;
68pub const LeanThunk: u8 = 251;
69pub const LeanTask: u8 = 252;
70pub const LeanRef: u8 = 253;
71pub const LeanExternal: u8 = 254;
72pub const LeanReserved: u8 = 255;
73pub const LEAN_MAX_CTOR_FIELDS: u32 = 256;
74pub const LEAN_MAX_CTOR_SCALARS_SIZE: u32 = 1024;
75
76const_assert! {
77    layout_compat::<c_int, i32>() ||
78    layout_compat::<c_int, i64>()
79}
80
81#[inline(always)]
82pub const fn layout_compat<A, B>() -> bool {
83    size_of::<A>() == size_of::<B>() && align_of::<A>() == align_of::<B>()
84}
85
86#[inline(always)]
87pub unsafe fn c_int_load(p: *const c_int, order: Ordering) -> c_int {
88    match c_int::BITS {
89        32 => (*(p as *const AtomicI32)).load(order) as c_int,
90        64 => (*(p as *const AtomicI64)).load(order) as c_int,
91        _ => unreachable!(),
92    }
93}
94
95mod types;
96pub use types::*;
97
98#[inline]
99pub fn lean_is_big_object_tag(tag: u8) -> bool {
100    matches!(
101        tag,
102        LeanArray | LeanStructArray | LeanScalarArray | LeanString
103    )
104}
105
106/// Whether this lean object is a scalar
107///
108/// # Examples
109/// ```rust
110/// # use lean_sys::*;
111/// unsafe { lean_initialize_runtime_module(); }
112/// let arr = unsafe { lean_mk_empty_array() };
113/// assert!(lean_is_scalar(lean_box(5)));
114/// assert!(!lean_is_scalar(arr));
115/// ```
116#[inline(always)]
117pub fn lean_is_scalar(obj: *const lean_object) -> bool {
118    (obj as usize & 1) == 1
119}
120
121/// Box a `usize` into a `lean_object`
122///
123/// # Examples
124/// ```rust
125/// # use lean_sys::*;
126/// for i in [0, 5, 658948, 1 << (usize::BITS - 2)] {
127///     assert_eq!(lean_unbox(lean_box(i)), i);
128/// }
129/// ```
130#[inline(always)]
131pub const fn lean_box(n: usize) -> *mut lean_object {
132    ((n << 1) | 1) as *mut lean_object
133}
134
135/// Unbox a `lean_object` into a `usize`
136///
137/// See [`lean_box`] for usage examples.
138#[inline(always)]
139pub fn lean_unbox(o: *const lean_object) -> usize {
140    o as usize >> 1
141}
142
143#[inline]
144pub const fn lean_align(v: usize, a: usize) -> usize {
145    (v / a).wrapping_mul(a) + a.wrapping_mul((v % a != 0) as usize)
146}
147
148#[inline]
149pub fn lean_get_slot_idx(sz: c_uint) -> c_uint {
150    debug_assert_ne!(sz, 0);
151    debug_assert_eq!(lean_align(sz as usize, LEAN_OBJECT_SIZE_DELTA), sz as usize);
152    sz / (LEAN_OBJECT_SIZE_DELTA as c_uint) - 1
153}
154
155#[inline]
156pub unsafe fn lean_alloc_small_object(sz: c_uint) -> *mut lean_object {
157    if LEAN_SMALL_ALLOCATOR {
158        let sz = lean_align(sz as usize, LEAN_OBJECT_SIZE_DELTA) as c_uint;
159        let slot_idx = lean_get_slot_idx(sz);
160        debug_assert!(sz <= LEAN_MAX_SMALL_OBJECT_SIZE);
161        lean_alloc_small(sz, slot_idx).cast()
162    } else {
163        lean_inc_heartbeat();
164        if LEAN_MIMALLOC {
165            // HACK: emulate behavior of small allocator to avoid `leangz` breakage for now
166            let sz = lean_align(sz as usize, LEAN_OBJECT_SIZE_DELTA) as c_uint;
167            #[cfg(feature = "libmimalloc-sys")]
168            let mem = libmimalloc_sys::mi_malloc_small(sz as usize);
169            #[cfg(not(feature = "libmimalloc-sys"))]
170            let mem = core::ptr::null_mut::<c_void>();
171            if mem.is_null() {
172                lean_internal_panic_out_of_memory()
173            }
174            let o = mem.cast::<lean_object>();
175            (*o).m_cs_sz = sz as u16;
176            o
177        } else {
178            let mem = libc::malloc(size_of::<usize>() + sz as usize) as *mut usize;
179            if mem.is_null() {
180                lean_internal_panic_out_of_memory()
181            }
182            *mem = sz as usize;
183            mem.add(1).cast()
184        }
185    }
186}
187
188#[inline]
189pub unsafe fn lean_alloc_ctor_memory(sz: c_uint) -> *mut lean_object {
190    if LEAN_SMALL_ALLOCATOR {
191        let sz1 = lean_align(sz as usize, LEAN_OBJECT_SIZE_DELTA) as c_uint;
192        let slot_idx = lean_get_slot_idx(sz1);
193        debug_assert!(sz1 <= LEAN_MAX_SMALL_OBJECT_SIZE);
194        let r = lean_alloc_small(sz1, slot_idx);
195        if sz1 > sz {
196            //TODO: "two structurally equal objects"?
197            /* Initialize last word.
198            In our runtime `lean_object_byte_size` is always
199            a multiple of the machine word size for constructors.
200
201            By setting the last word to 0, we make sure the sharing
202            maximizer procedures at `maxsharing.cpp` and `compact.cpp` are
203            not affected by uninitialized data at the (sz1 - sz) last bytes.
204            Otherwise, we may mistakenly assume to structurally equal
205            objects are not identical because of this uninitialized memory. */
206            let end = (r as *mut u8).add(sz1 as usize) as *mut usize;
207            *end.sub(1) = 0;
208        }
209        r as *mut lean_object
210    } else if LEAN_MIMALLOC {
211        let sz1 = lean_align(sz as usize, LEAN_OBJECT_SIZE_DELTA) as c_uint;
212        let r = lean_alloc_small_object(sz1);
213        if sz1 > sz {
214            *r.byte_add(sz1 as usize).cast::<usize>().sub(1) = 0;
215        }
216        r
217    } else {
218        lean_alloc_small_object(sz)
219    }
220}
221
222#[inline(always)]
223pub unsafe fn lean_small_object_size(o: *mut lean_object) -> c_uint {
224    if LEAN_SMALL_ALLOCATOR {
225        lean_small_mem_size(o.cast())
226    } else if LEAN_MIMALLOC {
227        (*o).m_cs_sz as c_uint
228    } else {
229        *o.cast::<usize>().sub(1) as c_uint
230    }
231}
232
233#[inline(always)]
234pub unsafe fn lean_free_small_object(o: *mut lean_object) {
235    if LEAN_SMALL_ALLOCATOR {
236        lean_free_small(o.cast())
237    } else if LEAN_MIMALLOC {
238        #[cfg(feature = "libmimalloc-sys")]
239        libmimalloc_sys::mi_free(o.cast())
240    } else {
241        extern "C" {
242            fn free_sized(ptr: *mut c_void, _: usize);
243        }
244        let ptr = o.cast::<usize>().sub(1);
245        free_sized(ptr.cast(), *ptr + size_of::<usize>());
246    }
247}
248
249#[inline(always)]
250pub unsafe fn lean_ptr_tag(o: *const lean_object) -> u8 {
251    *raw_field!(o, lean_object, m_tag)
252}
253
254#[inline(always)]
255pub unsafe fn lean_ptr_other(o: *const lean_object) -> c_uint {
256    *raw_field!(o, lean_object, m_other) as c_uint
257}
258
259/// Whether an object is multi-threaded
260#[inline(always)]
261pub unsafe fn lean_is_mt(o: *const lean_object) -> bool {
262    relaxed_rc_load(o) < 0
263}
264
265/// Whether an object is single-threaded
266#[inline(always)]
267pub unsafe fn lean_is_st(o: *const lean_object) -> bool {
268    relaxed_rc_load(o) > 0
269}
270
271/// Perform a relaxed load of the reference counter of `o`, which must be a pointer into a valid [`lean_object`]
272#[inline(always)]
273pub unsafe fn relaxed_rc_load(o: *const lean_object) -> c_int {
274    c_int_load(lean_get_rc_mt_addr(o as *mut _), Ordering::Relaxed)
275}
276
277/* We never update the reference counter of objects stored in compact regions and allocated at initialization time. */
278#[inline(always)]
279pub unsafe fn lean_is_persistent(o: *const lean_object) -> bool {
280    relaxed_rc_load(o) == 0
281}
282
283/// Whether an object is reference counted
284#[inline(always)]
285pub unsafe fn lean_has_rc(o: *const lean_object) -> bool {
286    relaxed_rc_load(o) != 0
287}
288
289#[inline(always)]
290pub unsafe fn lean_get_rc_mt_addr(o: *mut lean_object) -> *mut c_int {
291    raw_field!(o, lean_object, m_rc) as *mut c_int
292}
293
294#[inline]
295pub unsafe fn lean_inc_ref_n(o: *mut lean_object, n: usize) {
296    if lean_is_st(o) {
297        *(raw_field!(o, lean_object, m_rc) as *mut c_int) += n as c_int
298    } else if *raw_field!(o, lean_object, m_rc) != 0 {
299        (*lean_get_rc_mt_addr(o).cast::<AtomicU32>()).fetch_sub(n as c_uint, Ordering::Relaxed);
300    }
301}
302
303#[inline]
304pub unsafe fn lean_inc_ref(o: *mut lean_object) {
305    lean_inc_ref_n(o, 1)
306}
307
308#[inline]
309pub unsafe fn lean_dec_ref(o: *mut lean_object) {
310    if relaxed_rc_load(o) > 1 {
311        *(raw_field!(o, lean_object, m_rc) as *mut c_int) -= 1
312    } else if *raw_field!(o, lean_object, m_rc) != 0 {
313        lean_dec_ref_cold(o)
314    }
315}
316
317#[inline]
318pub unsafe fn lean_inc(o: *mut lean_object) {
319    if !lean_is_scalar(o) {
320        lean_inc_ref(o)
321    }
322}
323
324#[inline]
325pub unsafe fn lean_inc_n(o: *mut lean_object, n: usize) {
326    if !lean_is_scalar(o) {
327        lean_inc_ref_n(o, n)
328    }
329}
330
331#[inline]
332pub unsafe fn lean_dec(o: *mut lean_object) {
333    if !lean_is_scalar(o) {
334        lean_dec_ref(o)
335    }
336}
337
338#[inline(always)]
339pub unsafe fn lean_is_ctor(o: *const lean_object) -> bool {
340    lean_ptr_tag(o) <= LeanMaxCtorTag
341}
342
343#[inline(always)]
344pub unsafe fn lean_is_closure(o: *const lean_object) -> bool {
345    lean_ptr_tag(o) == LeanClosure
346}
347
348#[inline(always)]
349pub unsafe fn lean_is_array(o: *const lean_object) -> bool {
350    lean_ptr_tag(o) == LeanArray
351}
352
353#[inline(always)]
354pub unsafe fn lean_is_sarray(o: *const lean_object) -> bool {
355    lean_ptr_tag(o) == LeanScalarArray
356}
357
358#[inline(always)]
359pub unsafe fn lean_is_string(o: *const lean_object) -> bool {
360    lean_ptr_tag(o) == LeanString
361}
362
363#[inline(always)]
364pub unsafe fn lean_is_mpz(o: *const lean_object) -> bool {
365    lean_ptr_tag(o) == LeanMPZ
366}
367
368#[inline(always)]
369pub unsafe fn lean_is_thunk(o: *const lean_object) -> bool {
370    lean_ptr_tag(o) == LeanThunk
371}
372
373#[inline(always)]
374pub unsafe fn lean_is_task(o: *const lean_object) -> bool {
375    lean_ptr_tag(o) == LeanTask
376}
377
378#[inline(always)]
379pub unsafe fn lean_is_promise(o: *const lean_object) -> bool {
380    lean_ptr_tag(o) == LeanPromise
381}
382
383#[inline(always)]
384pub unsafe fn lean_is_external(o: *const lean_object) -> bool {
385    lean_ptr_tag(o) == LeanExternal
386}
387
388#[inline(always)]
389pub unsafe fn lean_is_ref(o: *const lean_object) -> bool {
390    lean_ptr_tag(o) == LeanRef
391}
392
393#[inline(always)]
394pub unsafe fn lean_obj_tag(o: *const lean_object) -> c_uint {
395    if lean_is_scalar(o) {
396        lean_unbox(o) as c_uint
397    } else {
398        lean_ptr_tag(o) as c_uint
399    }
400}
401
402#[inline(always)]
403pub unsafe fn lean_to_ctor(o: *mut lean_object) -> *mut lean_ctor_object {
404    debug_assert!(lean_is_ctor(o));
405    o.cast()
406}
407
408#[inline(always)]
409pub unsafe fn lean_to_closure(o: *mut lean_object) -> *mut lean_closure_object {
410    debug_assert!(lean_is_closure(o));
411    o.cast()
412}
413
414#[inline(always)]
415pub unsafe fn lean_to_array(o: *mut lean_object) -> *mut lean_array_object {
416    debug_assert!(lean_is_array(o));
417    o.cast()
418}
419
420#[inline(always)]
421pub unsafe fn lean_to_sarray(o: *mut lean_object) -> *mut lean_sarray_object {
422    debug_assert!(lean_is_sarray(o));
423    o.cast()
424}
425
426#[inline(always)]
427pub unsafe fn lean_to_string(o: *mut lean_object) -> *mut lean_string_object {
428    debug_assert!(lean_is_string(o));
429    o.cast()
430}
431
432#[inline(always)]
433pub unsafe fn lean_to_thunk(o: *mut lean_object) -> *mut lean_thunk_object {
434    debug_assert!(lean_is_thunk(o));
435    o.cast()
436}
437
438#[inline(always)]
439pub unsafe fn lean_to_task(o: *mut lean_object) -> *mut lean_task_object {
440    debug_assert!(lean_is_task(o));
441    o.cast()
442}
443
444#[inline(always)]
445pub unsafe fn lean_to_promise(o: *mut lean_object) -> *mut lean_promise_object {
446    debug_assert!(lean_is_promise(o));
447    o.cast()
448}
449
450#[inline(always)]
451pub unsafe fn lean_to_ref(o: *mut lean_object) -> *mut lean_ref_object {
452    debug_assert!(lean_is_ref(o));
453    o.cast()
454}
455
456#[inline(always)]
457pub unsafe fn lean_to_external(o: *mut lean_object) -> *mut lean_external_object {
458    debug_assert!(lean_is_external(o));
459    o.cast()
460}
461
462#[inline]
463pub unsafe fn lean_is_exclusive(o: *mut lean_object) -> bool {
464    //TODO: likely... or, why not just relaxed_rc_load(o) == 1?
465    if lean_is_st(o) {
466        *raw_field!(o, lean_object, m_rc) == 1
467    } else {
468        false
469    }
470}
471
472#[inline]
473pub unsafe fn lean_is_exclusive_obj(o: *mut lean_object) -> u8 {
474    lean_is_exclusive(o) as u8
475}
476
477#[inline]
478pub unsafe fn lean_is_shared(o: *mut lean_object) -> bool {
479    //TODO: likely... or, why not just relaxed_rc_load(o) > 1?
480    if lean_is_st(o) {
481        *raw_field!(o, lean_object, m_rc) > 1
482    } else {
483        false
484    }
485}
486
487#[inline]
488pub unsafe fn lean_set_st_header(o: *mut lean_object, tag: c_uint, other: c_uint) {
489    *lean_get_rc_mt_addr(o) = 1;
490    (raw_field!(o, lean_object, m_other) as *mut u8).write(other as u8);
491    (raw_field!(o, lean_object, m_tag) as *mut u8).write(tag as u8);
492    if !LEAN_MIMALLOC {
493        // already initialized by `lean_alloc(_small)_object` when using mimalloc
494        (raw_field!(o, lean_object, m_cs_sz) as *mut u16).write(0);
495    }
496}
497
498/** Remark: we don't need a reference counter for objects that are not stored in the heap.
499Thus, we use the area to store the object size for small objects. */
500#[inline]
501pub unsafe fn lean_set_non_heap_header(o: *mut lean_object, sz: usize, tag: c_uint, other: c_uint) {
502    debug_assert!(sz > 0);
503    debug_assert!(sz < (1 << 16));
504    debug_assert!(sz == 1 || !lean_is_big_object_tag(tag as u8));
505    *lean_get_rc_mt_addr(o) = 0;
506    (raw_field!(o, lean_object, m_cs_sz) as *mut u16).write(sz as u16);
507    (raw_field!(o, lean_object, m_other) as *mut u8).write(other as u8);
508    (raw_field!(o, lean_object, m_tag) as *mut u8).write(tag as u8);
509}
510
511/** `lean_set_non_heap_header` for (potentially) big objects such as arrays and strings. */
512#[inline(always)]
513pub unsafe fn lean_set_non_heap_header_for_big(o: *mut lean_object, tag: c_uint, other: c_uint) {
514    lean_set_non_heap_header(o, 1, tag, other)
515}
516
517extern "C" {
518    pub fn lean_set_exit_on_panic(flag: bool);
519    /// Enable/disable panic messages
520    pub fn lean_set_panic_messages(flag: bool);
521    pub fn lean_panic(msg: *const u8, force_stderr: bool);
522    pub fn lean_panic_fn(default_val: *mut lean_object, msg: *mut lean_object);
523    pub fn lean_internal_panic(msg: *const u8) -> !;
524    pub fn lean_internal_panic_out_of_memory() -> !;
525    pub fn lean_internal_panic_unreachable() -> !;
526    pub fn lean_internal_panic_rc_overflow() -> !;
527    pub fn lean_alloc_small(sz: c_uint, slot_idx: c_uint) -> *mut c_void;
528    pub fn lean_free_small(p: *mut c_void);
529    pub fn lean_small_mem_size(p: *mut c_void) -> c_uint;
530    pub fn lean_inc_heartbeat();
531    pub fn lean_alloc_object(sz: usize) -> *mut lean_object;
532    pub fn lean_free_object(o: *mut lean_object);
533    /** The object size may be slightly bigger for constructor objects.
534    The runtime does not track the size of the scalar size area.
535    All constructor objects are "small", and allocated into pages.
536    We retrieve their size by accessing the page header. The size of
537    small objects is a multiple of `LEAN_OBJECT_SIZE_DELTA` */
538    pub fn lean_object_byte_size(o: *mut lean_object) -> usize;
539    /** Returns the size of the salient part of an object's storage,
540    i.e. the parts that contribute to the value representation;
541    padding or unused capacity is excluded. Operations that read
542    from an object's storage must only access these parts, since
543    the non-salient parts may not be initialized. */
544    pub fn lean_object_data_byte_size(o: *mut lean_object) -> usize;
545    #[cold]
546    pub fn lean_dec_ref_cold(o: *mut lean_object);
547    pub fn lean_mark_mt(o: *mut lean_object);
548    pub fn lean_mark_persistent(o: *mut lean_object);
549}