Skip to main content

nexus_smartptr/
flex.rs

1//! Inline storage with heap fallback.
2//!
3//! [`Flex<T, B>`] tries to store a `?Sized` value inline. If the concrete
4//! type is too large or over-aligned, it falls back to a heap allocation.
5//! Construction never panics (unlike [`Flat`](crate::Flat)).
6//!
7//! `B` is a buffer marker type — `size_of::<Flex<dyn Trait, B32>>() == 32`.
8//!
9//! The heap pointer doubles as the inline/heap discriminant:
10//! null means inline, non-null is the heap address.
11
12use core::marker::PhantomData;
13use core::mem::{self, MaybeUninit};
14use core::ops::{Deref, DerefMut};
15use core::ptr;
16
17use alloc::alloc::{Layout, alloc, dealloc, handle_alloc_error};
18
19use crate::Buffer;
20use crate::meta::{self, Metadata};
21
22extern crate alloc;
23
24/// Size of the metadata word.
25const META_SIZE: usize = mem::size_of::<Metadata>();
26
27/// Size of the heap pointer / discriminant slot.
28const PTR_SIZE: usize = mem::size_of::<*mut u8>();
29
30/// Compile-time check that the buffer can hold the overhead fields.
31///
32/// For ?Sized T: B::CAPACITY >= 2 * pointer size (metadata + heap pointer).
33/// For Sized T: B::CAPACITY >= pointer size (heap pointer only).
34const fn assert_flex_buffer<T: ?Sized, B: Buffer>() {
35    if meta::is_fat_ptr::<T>() {
36        assert!(
37            B::CAPACITY >= META_SIZE + PTR_SIZE,
38            "Flex: buffer too small for ?Sized overhead (metadata + heap pointer)"
39        );
40    } else {
41        assert!(
42            B::CAPACITY >= PTR_SIZE,
43            "Flex: buffer too small for Sized overhead (heap pointer)"
44        );
45    }
46}
47
48/// Inline storage with heap fallback for `?Sized` types.
49///
50/// Stores a trait object (or slice) inline when it fits, otherwise
51/// heap-allocates. Use [`is_inline`](Flex::is_inline) to query which
52/// path was taken.
53///
54/// The total struct size equals `size_of::<B>()`.
55///
56/// # Layout
57///
58/// The heap pointer slot doubles as the inline/heap discriminant
59/// (null = inline, non-null = heap address).
60///
61/// - **?Sized T**: `[metadata(ptr)][heap_ptr(ptr)][value(B − 2*ptr)]`
62/// - **Sized T**: `[heap_ptr(ptr)][value(B − ptr)]`
63///
64/// Use the [`flex!`](crate::flex!) macro for `?Sized` construction,
65/// or [`Flex::new`] for `Sized` types.
66///
67/// # Compile-time safety
68///
69/// Buffers too small for the overhead produce a compile error:
70///
71/// ```compile_fail
72/// nexus_smartptr::define_buffer!(B8, 8);
73/// trait Foo { fn foo(&self); }
74/// struct Bar;
75/// impl Foo for Bar { fn foo(&self) {} }
76/// // B8 can't fit ?Sized overhead (metadata + heap pointer = 16 bytes).
77/// let _: nexus_smartptr::Flex<dyn Foo, B8> = nexus_smartptr::flex!(Bar);
78/// ```
79#[repr(C)]
80pub struct Flex<T: ?Sized, B: Buffer> {
81    inner: MaybeUninit<B>,
82    _marker: PhantomData<T>,
83}
84
85impl<T: ?Sized, B: Buffer> Flex<T, B> {
86    /// Byte offset where the heap-pointer / discriminant lives.
87    const PTR_OFFSET: usize = if meta::is_fat_ptr::<T>() {
88        META_SIZE
89    } else {
90        0
91    };
92
93    /// Byte offset where the inline value starts.
94    const VALUE_OFFSET: usize = Self::PTR_OFFSET + PTR_SIZE;
95
96    /// Returns the usable inline value capacity in bytes.
97    ///
98    /// For `Sized` types: `B::CAPACITY - 8` (heap pointer slot).
99    /// For `?Sized` types: `B::CAPACITY - 16` (metadata + heap pointer).
100    pub const fn capacity() -> usize {
101        B::CAPACITY.saturating_sub(Self::VALUE_OFFSET)
102    }
103
104    /// Returns `true` if the value is stored inline (no heap allocation).
105    pub fn is_inline(&self) -> bool {
106        self.heap_ptr().is_null()
107    }
108
109    /// Reads the heap-pointer / discriminant slot.
110    #[inline(always)]
111    fn heap_ptr(&self) -> *mut u8 {
112        let base = self.inner.as_ptr().cast::<u8>();
113        // SAFETY: PTR_OFFSET + PTR_SIZE <= B::CAPACITY (enforced by const assert).
114        unsafe { base.add(Self::PTR_OFFSET).cast::<*mut u8>().read() }
115    }
116
117    /// Constructs a `Flex` from a concrete value and a (possibly fat) pointer.
118    ///
119    /// This is an implementation detail of the [`flex!`](crate::flex!) macro.
120    /// Do not call directly.
121    ///
122    /// # Safety
123    ///
124    /// `ptr` must be a pointer whose metadata (vtable/length) corresponds to `V`.
125    /// The [`flex!`](crate::flex!) macro guarantees this via unsizing coercion.
126    #[doc(hidden)]
127    pub unsafe fn new_raw<V>(val: V, ptr: *const T) -> Self {
128        // Compile-time: buffer must fit the overhead fields.
129        const { assert_flex_buffer::<T, B>() }
130
131        let size = mem::size_of::<V>();
132        let align = mem::align_of::<V>();
133        let metadata = meta::extract_metadata(ptr);
134
135        if size <= Self::capacity() && align <= mem::align_of::<usize>() {
136            Self::new_inline(val, metadata)
137        } else {
138            Self::new_heap(val, metadata)
139        }
140    }
141
142    /// Inline construction path.
143    fn new_inline<V>(val: V, metadata: Metadata) -> Self {
144        let mut this: Self = Flex {
145            inner: MaybeUninit::uninit(),
146            _marker: PhantomData,
147        };
148        let base = this.inner.as_mut_ptr().cast::<u8>();
149
150        // SAFETY: buffer has capacity for overhead + value.
151        // align(8) on buffer satisfies usize alignment.
152        unsafe {
153            if meta::is_fat_ptr::<T>() {
154                // Write metadata at offset 0.
155                base.cast::<*const ()>().write(metadata.0);
156            }
157            // Write null heap_ptr (= inline discriminant).
158            base.add(Self::PTR_OFFSET)
159                .cast::<*mut u8>()
160                .write(ptr::null_mut());
161            // Write value after the overhead.
162            base.add(Self::VALUE_OFFSET).cast::<V>().write(val);
163        }
164
165        this
166    }
167
168    /// Heap construction path.
169    fn new_heap<V>(val: V, metadata: Metadata) -> Self {
170        let layout = Layout::new::<V>();
171        let heap = if layout.size() == 0 {
172            // ZST: dangling pointer, no allocation.
173            core::ptr::NonNull::<V>::dangling().as_ptr().cast::<u8>()
174        } else {
175            // SAFETY: layout has non-zero size.
176            let p = unsafe { alloc(layout) };
177            if p.is_null() {
178                handle_alloc_error(layout);
179            }
180            // SAFETY: p is valid, aligned for V, with sufficient size.
181            unsafe {
182                p.cast::<V>().write(val);
183            }
184            p
185        };
186
187        let mut this: Self = Flex {
188            inner: MaybeUninit::uninit(),
189            _marker: PhantomData,
190        };
191        let base = this.inner.as_mut_ptr().cast::<u8>();
192
193        // SAFETY: writing metadata + heap pointer within buffer bounds.
194        unsafe {
195            if meta::is_fat_ptr::<T>() {
196                base.cast::<*const ()>().write(metadata.0);
197            }
198            base.add(Self::PTR_OFFSET).cast::<*mut u8>().write(heap);
199        }
200
201        this
202    }
203
204    /// Returns the data pointer (to inline value or heap allocation).
205    #[inline(always)]
206    fn data_ptr(&self) -> *const () {
207        let hp = self.heap_ptr();
208        if hp.is_null() {
209            // Inline: value lives after the overhead.
210            let base = self.inner.as_ptr().cast::<u8>();
211            unsafe { base.add(Self::VALUE_OFFSET) }.cast::<()>()
212        } else {
213            hp.cast::<()>().cast_const()
214        }
215    }
216
217    /// Returns the mutable data pointer.
218    #[inline(always)]
219    fn data_ptr_mut(&mut self) -> *mut () {
220        let hp = self.heap_ptr();
221        if hp.is_null() {
222            let base = self.inner.as_mut_ptr().cast::<u8>();
223            unsafe { base.add(Self::VALUE_OFFSET) }.cast::<()>()
224        } else {
225            hp.cast::<()>()
226        }
227    }
228
229    /// Returns a (possibly fat) pointer to the stored value.
230    #[inline(always)]
231    fn as_ptr(&self) -> *const T {
232        let data = self.data_ptr();
233        if meta::is_fat_ptr::<T>() {
234            let base = self.inner.as_ptr().cast::<u8>();
235            // SAFETY: metadata at offset 0, preserved from construction.
236            let metadata = Metadata(unsafe { base.cast::<*const ()>().read() });
237            unsafe { meta::make_ptr(data, metadata) }
238        } else {
239            unsafe { meta::make_ptr(data, Metadata::NULL) }
240        }
241    }
242
243    /// Returns a mutable (possibly fat) pointer to the stored value.
244    #[inline(always)]
245    fn as_mut_ptr(&mut self) -> *mut T {
246        let data = self.data_ptr_mut();
247        if meta::is_fat_ptr::<T>() {
248            let base = self.inner.as_ptr().cast::<u8>();
249            let metadata = Metadata(unsafe { base.cast::<*const ()>().read() });
250            unsafe { meta::make_ptr_mut(data, metadata) }
251        } else {
252            unsafe { meta::make_ptr_mut(data, Metadata::NULL) }
253        }
254    }
255}
256
257// -- Methods only for Sized T --
258impl<T, B: Buffer> Flex<T, B> {
259    /// Constructs a `Flex` from a `Sized` value.
260    ///
261    /// Stores inline if the value fits, otherwise heap-allocates.
262    ///
263    /// # Examples
264    ///
265    /// ```
266    /// use nexus_smartptr::{Flex, B32};
267    ///
268    /// let f: Flex<u64, B32> = Flex::new(42);
269    /// assert!(f.is_inline());
270    /// assert_eq!(*f, 42);
271    /// ```
272    pub fn new(val: T) -> Self {
273        // Compile-time: buffer must fit the overhead fields.
274        const { assert_flex_buffer::<T, B>() }
275
276        let size = mem::size_of::<T>();
277        let align = mem::align_of::<T>();
278        if size <= Self::capacity() && align <= mem::align_of::<usize>() {
279            Self::new_inline(val, Metadata::NULL)
280        } else {
281            Self::new_heap(val, Metadata::NULL)
282        }
283    }
284}
285
286impl<T: ?Sized, B: Buffer> Deref for Flex<T, B> {
287    type Target = T;
288
289    #[inline(always)]
290    fn deref(&self) -> &T {
291        // SAFETY: the stored value is valid and initialized.
292        unsafe { &*self.as_ptr() }
293    }
294}
295
296impl<T: ?Sized, B: Buffer> DerefMut for Flex<T, B> {
297    #[inline(always)]
298    fn deref_mut(&mut self) -> &mut T {
299        // SAFETY: same as Deref, plus exclusive access via &mut self.
300        unsafe { &mut *self.as_mut_ptr() }
301    }
302}
303
304impl<T: ?Sized, B: Buffer> Drop for Flex<T, B> {
305    fn drop(&mut self) {
306        let hp = self.heap_ptr();
307        if hp.is_null() {
308            // Inline: drop in place.
309            // SAFETY: as_mut_ptr returns a valid pointer to the stored value.
310            unsafe {
311                ptr::drop_in_place(self.as_mut_ptr());
312            }
313        } else {
314            // Heap: get layout BEFORE drop_in_place.
315            let fat = self.as_mut_ptr();
316            // SAFETY: value is still alive, fat pointer is valid.
317            let layout = Layout::for_value(unsafe { &*fat });
318            // SAFETY: fat points to the heap-allocated value.
319            unsafe {
320                ptr::drop_in_place(fat);
321            }
322            if layout.size() > 0 {
323                // SAFETY: heap was allocated with this layout.
324                // Size > 0 means this isn't a dangling ZST pointer.
325                unsafe {
326                    dealloc(hp, layout);
327                }
328            }
329        }
330    }
331}
332
333// SAFETY: Flex<T, B> logically owns a T. The raw pointer in the heap slot
334// is an owned allocation — not shared. Send/Sync depend only on T.
335// MaybeUninit<B> is raw storage, not a meaningful Send/Sync participant.
336#[allow(clippy::non_send_fields_in_send_ty)]
337unsafe impl<T: ?Sized + Send, B: Buffer> Send for Flex<T, B> {}
338unsafe impl<T: ?Sized + Sync, B: Buffer> Sync for Flex<T, B> {}
339
340#[cfg(test)]
341mod tests {
342    use super::*;
343    use crate::{B16, B32, B64};
344    use core::fmt::Display;
345    use std::sync::atomic::{AtomicUsize, Ordering};
346
347    trait Greet {
348        fn greet(&self) -> &str;
349    }
350
351    struct Hello;
352    impl Greet for Hello {
353        fn greet(&self) -> &str {
354            "hello"
355        }
356    }
357
358    struct World(u64);
359    impl Greet for World {
360        fn greet(&self) -> &str {
361            "world"
362        }
363    }
364
365    fn make_flex_greet<V: Greet + 'static, B: Buffer>(val: V) -> Flex<dyn Greet, B> {
366        let ptr: *const dyn Greet = &val as &dyn Greet;
367        unsafe { Flex::new_raw(val, ptr) }
368    }
369
370    #[test]
371    fn total_size_matches_buffer() {
372        assert_eq!(mem::size_of::<Flex<dyn Greet, B16>>(), 16);
373        assert_eq!(mem::size_of::<Flex<dyn Greet, B32>>(), 32);
374        assert_eq!(mem::size_of::<Flex<dyn Greet, B64>>(), 64);
375        assert_eq!(mem::size_of::<Flex<u64, B32>>(), 32);
376    }
377
378    #[test]
379    fn capacity_unsized() {
380        assert_eq!(Flex::<dyn Greet, B16>::capacity(), 0);
381        assert_eq!(Flex::<dyn Greet, B32>::capacity(), 16);
382        assert_eq!(Flex::<dyn Greet, B64>::capacity(), 48);
383    }
384
385    #[test]
386    fn capacity_sized() {
387        assert_eq!(Flex::<u64, B16>::capacity(), 8);
388        assert_eq!(Flex::<u64, B32>::capacity(), 24);
389    }
390
391    #[test]
392    fn sized_new_inline() {
393        let f: Flex<u64, B32> = Flex::new(42);
394        assert!(f.is_inline());
395        assert_eq!(*f, 42);
396    }
397
398    #[test]
399    fn sized_new_heap() {
400        // [u64; 4] = 32 bytes, B16 Sized capacity = 8 bytes
401        let f: Flex<[u64; 4], B16> = Flex::new([1, 2, 3, 4]);
402        assert!(!f.is_inline());
403        assert_eq!(*f, [1, 2, 3, 4]);
404    }
405
406    #[test]
407    fn sized_deref_mut_inline() {
408        let mut f: Flex<u64, B16> = Flex::new(10);
409        assert!(f.is_inline());
410        *f = 20;
411        assert_eq!(*f, 20);
412    }
413
414    #[test]
415    fn sized_deref_mut_heap() {
416        let mut f: Flex<[u64; 4], B16> = Flex::new([0u64; 4]);
417        assert!(!f.is_inline());
418        f[0] = 99;
419        assert_eq!(f[0], 99);
420    }
421
422    #[test]
423    fn zst_always_inline() {
424        let f: Flex<dyn Greet, B32> = make_flex_greet(Hello);
425        assert!(f.is_inline());
426        assert_eq!(f.greet(), "hello");
427    }
428
429    #[test]
430    fn small_value_inline() {
431        let f: Flex<dyn Greet, B32> = make_flex_greet(World(42));
432        assert!(f.is_inline());
433        assert_eq!(f.greet(), "world");
434    }
435
436    #[test]
437    fn large_value_heap() {
438        // [u64; 8] = 64 bytes, B32 ?Sized capacity = 16 bytes
439        struct Big([u64; 8]);
440        impl Greet for Big {
441            fn greet(&self) -> &str {
442                "big"
443            }
444        }
445
446        let f: Flex<dyn Greet, B32> = make_flex_greet(Big([0xAB; 8]));
447        assert!(!f.is_inline());
448        assert_eq!(f.greet(), "big");
449    }
450
451    #[test]
452    fn b16_unsized_zero_capacity_goes_to_heap() {
453        // B16 has 0 bytes value capacity for ?Sized — non-ZSTs must go to heap.
454        let f: Flex<dyn Greet, B16> = make_flex_greet(World(7));
455        assert!(!f.is_inline());
456        assert_eq!(f.greet(), "world");
457    }
458
459    #[test]
460    fn b16_unsized_zst_still_inline() {
461        let f: Flex<dyn Greet, B16> = make_flex_greet(Hello);
462        assert!(f.is_inline());
463        assert_eq!(f.greet(), "hello");
464    }
465
466    #[test]
467    fn deref_mut_inline() {
468        trait Increment {
469            fn inc(&mut self);
470            fn val(&self) -> u64;
471        }
472
473        struct Counter(u64);
474        impl Increment for Counter {
475            fn inc(&mut self) {
476                self.0 += 1;
477            }
478            fn val(&self) -> u64 {
479                self.0
480            }
481        }
482
483        fn make<V: Increment + 'static, B: Buffer>(val: V) -> Flex<dyn Increment, B> {
484            let ptr: *const dyn Increment = &val as &dyn Increment;
485            unsafe { Flex::new_raw(val, ptr) }
486        }
487
488        let mut f: Flex<dyn Increment, B32> = make(Counter(0));
489        assert!(f.is_inline());
490        f.inc();
491        f.inc();
492        assert_eq!(f.val(), 2);
493    }
494
495    #[test]
496    fn deref_mut_heap() {
497        trait Accumulate {
498            fn push(&mut self, v: u64);
499            fn sum(&self) -> u64;
500        }
501
502        struct BigAccum {
503            data: [u64; 15],
504            count: usize,
505        }
506        impl BigAccum {
507            fn new() -> Self {
508                BigAccum {
509                    data: [0; 15],
510                    count: 0,
511                }
512            }
513        }
514        impl Accumulate for BigAccum {
515            fn push(&mut self, v: u64) {
516                self.data[self.count] = v;
517                self.count += 1;
518            }
519            fn sum(&self) -> u64 {
520                self.data[..self.count].iter().sum()
521            }
522        }
523
524        fn make<V: Accumulate + 'static, B: Buffer>(val: V) -> Flex<dyn Accumulate, B> {
525            let ptr: *const dyn Accumulate = &val as &dyn Accumulate;
526            unsafe { Flex::new_raw(val, ptr) }
527        }
528
529        let mut f: Flex<dyn Accumulate, B32> = make(BigAccum::new());
530        assert!(!f.is_inline());
531        f.push(10);
532        f.push(20);
533        assert_eq!(f.sum(), 30);
534    }
535
536    #[test]
537    fn drop_inline() {
538        static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
539
540        struct Dropper;
541        impl Drop for Dropper {
542            fn drop(&mut self) {
543                DROP_COUNT.fetch_add(1, Ordering::Relaxed);
544            }
545        }
546        impl Greet for Dropper {
547            fn greet(&self) -> &str {
548                "dropping"
549            }
550        }
551
552        DROP_COUNT.store(0, Ordering::Relaxed);
553        {
554            let f: Flex<dyn Greet, B32> = make_flex_greet(Dropper);
555            assert!(f.is_inline());
556            assert_eq!(f.greet(), "dropping");
557        }
558        assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1);
559    }
560
561    #[test]
562    fn drop_heap() {
563        static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
564
565        struct BigDropper([u64; 8]);
566        impl Drop for BigDropper {
567            fn drop(&mut self) {
568                DROP_COUNT.fetch_add(1, Ordering::Relaxed);
569            }
570        }
571        impl Greet for BigDropper {
572            fn greet(&self) -> &str {
573                "big drop"
574            }
575        }
576
577        DROP_COUNT.store(0, Ordering::Relaxed);
578        {
579            let f: Flex<dyn Greet, B32> = make_flex_greet(BigDropper([0; 8]));
580            assert!(!f.is_inline());
581            assert_eq!(f.greet(), "big drop");
582        }
583        assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1);
584    }
585
586    #[test]
587    fn drop_heap_sized() {
588        static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
589
590        struct BigDropper([u64; 8]);
591        impl Drop for BigDropper {
592            fn drop(&mut self) {
593                DROP_COUNT.fetch_add(1, Ordering::Relaxed);
594            }
595        }
596
597        DROP_COUNT.store(0, Ordering::Relaxed);
598        {
599            let f: Flex<BigDropper, B16> = Flex::new(BigDropper([0; 8]));
600            assert!(!f.is_inline());
601        }
602        assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1);
603    }
604
605    #[test]
606    fn display_trait_object_inline() {
607        let val: u32 = 42;
608        let ptr: *const dyn Display = &val as &dyn Display;
609        let f: Flex<dyn Display, B32> = unsafe { Flex::new_raw(val, ptr) };
610        assert!(f.is_inline());
611        assert_eq!(format!("{}", &*f), "42");
612    }
613
614    #[test]
615    fn exact_fit_is_inline() {
616        // [usize; 2] = 16 bytes, B32 ?Sized capacity = 16 bytes — exact fit
617        struct Exact([usize; 2]);
618        impl Greet for Exact {
619            fn greet(&self) -> &str {
620                "exact"
621            }
622        }
623
624        let f: Flex<dyn Greet, B32> = make_flex_greet(Exact([1, 2]));
625        assert!(f.is_inline());
626        assert_eq!(f.greet(), "exact");
627    }
628
629    #[test]
630    fn one_byte_over_goes_to_heap() {
631        // [usize; 2] + u8 = 17 bytes (with padding: 24), B32 ?Sized capacity = 16
632        #[repr(C)]
633        struct OneTooMany {
634            _data: [usize; 2],
635            _extra: u8,
636        }
637        impl Greet for OneTooMany {
638            fn greet(&self) -> &str {
639                "spilled"
640            }
641        }
642
643        let f: Flex<dyn Greet, B32> = make_flex_greet(OneTooMany {
644            _data: [0; 2],
645            _extra: 0,
646        });
647        assert!(!f.is_inline());
648        assert_eq!(f.greet(), "spilled");
649    }
650
651    #[test]
652    fn macro_construction_inline() {
653        let f: Flex<dyn Greet, B32> = crate::flex!(Hello);
654        assert!(f.is_inline());
655        assert_eq!(f.greet(), "hello");
656    }
657
658    #[test]
659    fn macro_construction_heap() {
660        struct Big([u64; 8]);
661        impl Greet for Big {
662            fn greet(&self) -> &str {
663                "big"
664            }
665        }
666
667        let f: Flex<dyn Greet, B32> = crate::flex!(Big([0; 8]));
668        assert!(!f.is_inline());
669        assert_eq!(f.greet(), "big");
670    }
671}