inline_array/
lib.rs

1#![allow(unsafe_code)]
2
3//! [`InlineArray`] is a stack-inlinable array of bytes that is intended for situations where many bytes
4//! are being shared in database-like scenarios, where optimizing for space usage is extremely
5//! important.
6//!
7//! `InlineArray` uses 8 bytes on the stack. It will inline arrays of up to 7 bytes. If the bytes
8//! are longer than that, it will store them in an optimized reference-count-backed structure of
9//! two different variants. For arrays up to length 255, the data is stored with an `AtomicU8`
10//! reference counter and `u8` length field, for only two bytes of overhead. For values larger
11//! than that, they are stored with an `AtomicU16` reference counter and a 48-bit length field.
12//! If the maximum counter is reached for either variant, the bytes are copied into a new
13//! `InlineArray` with a fresh reference count of 1. This is made with the assumption that most
14//! reference counts will be far lower than 2^16 and only rarely surpassing 255 in the small case.
15//!
16//! The inline and both types of shared instances of `InlineArray` guarantee that the stored array is
17//! always aligned to 8-byte boundaries, regardless of if it is inline on the stack or
18//! shared on the heap. This is advantageous for using in combination with certain
19//! zero-copy serialization techniques that require alignment guarantees.
20//!
21//! Byte arrays that require more than 48 bits to store their length (256 terabytes) are not supported.
22//!
23//! [`InlineArray::make_mut`] (inspired by [`std::sync::Arc::make_mut`]) can be used for getting a mutable
24//! reference to the bytes in this structure. If the shared reference counter is higher than  1, this acts
25//! like a [`std::borrow::Cow`] and will make self into a private copy that is safe for modification.
26//!
27//! # Features
28//!
29//! * `serde` implements `serde::Serialize` and `serde::Deserialize` for `InlineArray` (disabled by default)
30//!
31//! # Examples
32//!
33//! ```
34//! use inline_array::InlineArray;
35//!
36//! let ia = InlineArray::from(b"yo!");
37//!
38//! // then use it more or less like you would an Arc<[u8]>
39//! ```
40
41use std::{
42    alloc::{alloc, dealloc, Layout},
43    convert::TryFrom,
44    fmt,
45    hash::{Hash, Hasher},
46    iter::FromIterator,
47    mem::size_of,
48    num::NonZeroU64,
49    ops::Deref,
50    ptr::{with_exposed_provenance, with_exposed_provenance_mut},
51    sync::atomic::{AtomicU16, AtomicU8, Ordering},
52};
53
54#[cfg(feature = "concurrent_map_minimum")]
55impl concurrent_map::Minimum for InlineArray {
56    const MIN: InlineArray = EMPTY;
57}
58
59#[cfg(feature = "serde")]
60mod serde;
61
62const SZ: usize = 8;
63const INLINE_CUTOFF: usize = SZ - 1;
64const SMALL_REMOTE_CUTOFF: usize = u8::MAX as usize;
65const BIG_REMOTE_LEN_BYTES: usize = 6;
66
67const INLINE_TRAILER_TAG: u8 = 0b01;
68const SMALL_REMOTE_TRAILER_TAG: u8 = 0b10;
69const BIG_REMOTE_TRAILER_TAG: u8 = 0b11;
70const TRAILER_TAG_MASK: u8 = 0b0000_0011;
71const TRAILER_PTR_MASK: u8 = 0b1111_1100;
72
73/// A const-friendly empty `InlineArray`
74pub const EMPTY: InlineArray = InlineArray([0, 0, 0, 0, 0, 0, 0, INLINE_TRAILER_TAG]);
75
76#[repr(u8)]
77#[derive(Debug, Clone, Copy, PartialEq, Eq)]
78enum Kind {
79    Inline,
80    SmallRemote,
81    BigRemote,
82}
83
84const fn _static_tests() {
85    // static assert that BigRemoteHeader is 8 bytes in size
86    let _: [u8; 8] = [0; std::mem::size_of::<BigRemoteHeader>()];
87
88    // static assert that BigRemoteHeader is 8 byte-aligned
89    let _: [u8; 8] = [0; std::mem::align_of::<BigRemoteHeader>()];
90
91    // static assert that SmallRemoteTrailer is 2 bytes in size
92    let _: [u8; 2] = [0; std::mem::size_of::<SmallRemoteTrailer>()];
93
94    // static assert that SmallRemoteTrailer is 1 byte-aligned
95    let _: [u8; 1] = [0; std::mem::align_of::<SmallRemoteTrailer>()];
96
97    // static assert that InlineArray is 8 bytes
98    let _: [u8; 8] = [0; std::mem::size_of::<InlineArray>()];
99
100    // static assert that InlineArray is 8 byte-aligned
101    let _: [u8; 8] = [0; std::mem::align_of::<InlineArray>()];
102}
103
104/// A buffer that may either be inline or remote and protected
105/// by an Arc. The inner buffer is guaranteed to be aligned to
106/// 8 byte boundaries.
107#[repr(align(8))]
108pub struct InlineArray([u8; SZ]);
109
110impl Clone for InlineArray {
111    fn clone(&self) -> InlineArray {
112        // We use 16 bytes for the reference count at
113        // the cost of this CAS and copying the inline
114        // array when we reach our max reference count size.
115        //
116        // When measured against the standard Arc reference
117        // count increment, this had a negligible performance
118        // hit that only became measurable at high contention,
119        // which is probably not likely for DB workloads where
120        // it is expected that most concurrent operations will
121        // distributed somewhat across larger structures.
122
123        if self.kind() == Kind::SmallRemote {
124            let rc = &self.deref_small_trailer().rc;
125
126            loop {
127                let current = rc.load(Ordering::Relaxed);
128                if current == u8::MAX {
129                    return InlineArray::from(self.deref());
130                }
131
132                let cas_res = rc.compare_exchange_weak(
133                    current,
134                    current + 1,
135                    Ordering::Relaxed,
136                    Ordering::Relaxed,
137                );
138                if cas_res.is_ok() {
139                    break;
140                }
141            }
142        } else if self.kind() == Kind::BigRemote {
143            let rc = &self.deref_big_header().rc;
144
145            loop {
146                let current = rc.load(Ordering::Relaxed);
147                if current == u16::MAX {
148                    return InlineArray::from(self.deref());
149                }
150
151                let cas_res = rc.compare_exchange_weak(
152                    current,
153                    current + 1,
154                    Ordering::Relaxed,
155                    Ordering::Relaxed,
156                );
157                if cas_res.is_ok() {
158                    break;
159                }
160            }
161        }
162        InlineArray(self.0)
163    }
164}
165
166impl Drop for InlineArray {
167    fn drop(&mut self) {
168        let kind = self.kind();
169
170        if kind == Kind::SmallRemote {
171            let small_trailer = self.deref_small_trailer();
172            let rc = small_trailer.rc.fetch_sub(1, Ordering::Release) - 1;
173
174            if rc == 0 {
175                std::sync::atomic::fence(Ordering::Acquire);
176
177                let layout = Layout::from_size_align(
178                    small_trailer.len() + size_of::<SmallRemoteTrailer>(),
179                    SZ,
180                )
181                .unwrap();
182
183                unsafe {
184                    let ptr = self.remote_ptr().map_addr(|a| a - small_trailer.len());
185                    let ptr_addr = ptr.expose_provenance();
186
187                    dealloc(with_exposed_provenance_mut(ptr_addr), layout);
188                }
189            }
190        } else if kind == Kind::BigRemote {
191            let big_header = self.deref_big_header();
192            let rc = big_header.rc.fetch_sub(1, Ordering::Release) - 1;
193
194            if rc == 0 {
195                std::sync::atomic::fence(Ordering::Acquire);
196
197                let layout =
198                    Layout::from_size_align(big_header.len() + size_of::<BigRemoteHeader>(), SZ)
199                        .unwrap();
200                let remote = self.remote_ptr();
201                let remote_addr = remote.expose_provenance();
202                let ptr = with_exposed_provenance_mut::<u8>(remote_addr);
203
204                unsafe {
205                    dealloc(ptr, layout);
206                }
207            }
208        }
209    }
210}
211
212struct SmallRemoteTrailer {
213    rc: AtomicU8,
214    len: u8,
215}
216
217impl SmallRemoteTrailer {
218    const fn len(&self) -> usize {
219        self.len as usize
220    }
221}
222
223#[repr(align(8))]
224struct BigRemoteHeader {
225    rc: AtomicU16,
226    len: [u8; BIG_REMOTE_LEN_BYTES],
227}
228
229impl BigRemoteHeader {
230    const fn len(&self) -> usize {
231        #[cfg(any(target_pointer_width = "32", feature = "fake_32_bit"))]
232        let buf: [u8; 4] = [self.len[0], self.len[1], self.len[2], self.len[3]];
233
234        #[cfg(all(target_pointer_width = "64", not(feature = "fake_32_bit")))]
235        let buf: [u8; 8] = [
236            self.len[0],
237            self.len[1],
238            self.len[2],
239            self.len[3],
240            self.len[4],
241            self.len[5],
242            0,
243            0,
244        ];
245
246        #[cfg(feature = "fake_32_bit")]
247        let ret = u32::from_le_bytes(buf) as usize;
248
249        #[cfg(not(feature = "fake_32_bit"))]
250        let ret = usize::from_le_bytes(buf);
251
252        ret
253    }
254}
255
256impl Deref for InlineArray {
257    type Target = [u8];
258
259    #[inline]
260    fn deref(&self) -> &[u8] {
261        match self.kind() {
262            Kind::Inline => &self.0[..self.inline_len()],
263            Kind::SmallRemote => unsafe {
264                let len = self.deref_small_trailer().len();
265                let data_ptr = self.remote_ptr().map_addr(|a| a - len);
266                let data_addr = data_ptr.expose_provenance();
267
268                std::slice::from_raw_parts(with_exposed_provenance(data_addr), len)
269            },
270            Kind::BigRemote => unsafe {
271                let len = self.deref_big_header().len();
272                let remote_ptr = self.remote_ptr();
273                let data_ptr = remote_ptr.map_addr(|a| a + size_of::<BigRemoteHeader>());
274                let data_addr = data_ptr.expose_provenance();
275
276                std::slice::from_raw_parts(with_exposed_provenance(data_addr), len)
277            },
278        }
279    }
280}
281
282impl AsRef<[u8]> for InlineArray {
283    #[inline]
284    fn as_ref(&self) -> &[u8] {
285        self
286    }
287}
288
289impl Default for InlineArray {
290    fn default() -> Self {
291        Self::from(&[])
292    }
293}
294
295impl Hash for InlineArray {
296    fn hash<H: Hasher>(&self, state: &mut H) {
297        self.deref().hash(state);
298    }
299}
300
301impl InlineArray {
302    fn new(slice: &[u8]) -> Self {
303        let mut data = [0_u8; SZ];
304        if slice.len() <= INLINE_CUTOFF {
305            data[SZ - 1] = u8::try_from(slice.len()).unwrap() << 2;
306            data[..slice.len()].copy_from_slice(slice);
307            data[SZ - 1] |= INLINE_TRAILER_TAG;
308        } else if slice.len() <= SMALL_REMOTE_CUTOFF {
309            let layout =
310                Layout::from_size_align(slice.len() + size_of::<SmallRemoteTrailer>(), SZ).unwrap();
311
312            let trailer = SmallRemoteTrailer {
313                rc: 1.into(),
314                len: u8::try_from(slice.len()).unwrap(),
315            };
316
317            unsafe {
318                let data_ptr = alloc(layout);
319                assert!(!data_ptr.is_null());
320                let trailer_ptr = data_ptr.map_addr(|o| o + slice.len());
321
322                std::ptr::write(trailer_ptr as *mut SmallRemoteTrailer, trailer);
323                std::ptr::copy_nonoverlapping(slice.as_ptr(), data_ptr, slice.len());
324                std::ptr::write_unaligned(
325                    data.as_mut_ptr() as _,
326                    trailer_ptr.expose_provenance().to_le_bytes(),
327                );
328            }
329
330            // assert that the bottom 3 bits are empty, as we expect
331            // the buffer to always have an alignment of 8 (2 ^ 3).
332            #[cfg(not(miri))]
333            assert_eq!(data[SZ - 1] & 0b111, 0);
334
335            data[SZ - 1] |= SMALL_REMOTE_TRAILER_TAG;
336        } else {
337            let layout =
338                Layout::from_size_align(slice.len() + size_of::<BigRemoteHeader>(), SZ).unwrap();
339
340            let slice_len_buf: [u8; 8] = (slice.len() as u64).to_le_bytes();
341
342            let len: [u8; BIG_REMOTE_LEN_BYTES] = [
343                slice_len_buf[0],
344                slice_len_buf[1],
345                slice_len_buf[2],
346                slice_len_buf[3],
347                slice_len_buf[4],
348                slice_len_buf[5],
349            ];
350            assert_eq!(slice_len_buf[6], 0);
351            assert_eq!(slice_len_buf[7], 0);
352
353            let header = BigRemoteHeader { rc: 1.into(), len };
354
355            unsafe {
356                let header_ptr = alloc(layout);
357                assert!(!header_ptr.is_null());
358
359                let data_ptr = header_ptr.map_addr(|o| o + size_of::<BigRemoteHeader>());
360
361                std::ptr::write(header_ptr as *mut BigRemoteHeader, header);
362                std::ptr::copy_nonoverlapping(slice.as_ptr(), data_ptr, slice.len());
363                std::ptr::write_unaligned(
364                    data.as_mut_ptr() as _,
365                    header_ptr.expose_provenance().to_le_bytes(),
366                );
367            }
368
369            // assert that the bottom 3 bits are empty, as we expect
370            // the buffer to always have an alignment of 8 (2 ^ 3).
371            #[cfg(not(miri))]
372            assert_eq!(data[SZ - 1] & 0b111, 0);
373
374            data[SZ - 1] |= BIG_REMOTE_TRAILER_TAG;
375        }
376        Self(data)
377    }
378
379    fn remote_ptr(&self) -> *const u8 {
380        assert_ne!(self.kind(), Kind::Inline);
381
382        let mut copied = self.0;
383        copied[SZ - 1] &= TRAILER_PTR_MASK;
384
385        unsafe { std::ptr::read_unaligned(copied.as_ptr() as *const *const u8) }
386    }
387
388    fn deref_small_trailer(&self) -> &SmallRemoteTrailer {
389        assert_eq!(self.kind(), Kind::SmallRemote);
390        let remote = self.remote_ptr();
391        let ptr = with_exposed_provenance_mut(remote.expose_provenance());
392
393        unsafe { &*(ptr) }
394    }
395
396    fn deref_big_header(&self) -> &BigRemoteHeader {
397        assert_eq!(self.kind(), Kind::BigRemote);
398        let remote = self.remote_ptr();
399        let ptr = with_exposed_provenance_mut(remote.expose_provenance());
400
401        unsafe { &*(ptr) }
402    }
403
404    #[cfg(miri)]
405    fn inline_len(&self) -> usize {
406        (self.inline_trailer() >> 2) as usize
407    }
408
409    #[cfg(miri)]
410    fn kind(&self) -> Kind {
411        match self.inline_trailer() & TRAILER_TAG_MASK {
412            INLINE_TRAILER_TAG => Kind::Inline,
413            SMALL_REMOTE_TRAILER_TAG => Kind::SmallRemote,
414            BIG_REMOTE_TRAILER_TAG => Kind::BigRemote,
415            _other => unsafe { std::hint::unreachable_unchecked() },
416        }
417    }
418
419    #[cfg(miri)]
420    fn inline_trailer(&self) -> u8 {
421        self.0[SZ - 1]
422    }
423
424    #[cfg(not(miri))]
425    const fn inline_len(&self) -> usize {
426        (self.inline_trailer() >> 2) as usize
427    }
428
429    #[cfg(not(miri))]
430    const fn kind(&self) -> Kind {
431        match self.inline_trailer() & TRAILER_TAG_MASK {
432            INLINE_TRAILER_TAG => Kind::Inline,
433            SMALL_REMOTE_TRAILER_TAG => Kind::SmallRemote,
434            BIG_REMOTE_TRAILER_TAG => Kind::BigRemote,
435            _other => unsafe { std::hint::unreachable_unchecked() },
436        }
437    }
438
439    #[cfg(not(miri))]
440    const fn inline_trailer(&self) -> u8 {
441        self.0[SZ - 1]
442    }
443
444    /// This function returns a mutable reference to the inner
445    /// byte array. If there are more than 1 atomic references
446    /// to the inner array, the array is copied into a new
447    /// `InlineVec` and a reference to that is returned. This
448    /// functions similarly in spirit to [`std::sync::Arc::make_mut`].
449    pub fn make_mut(&mut self) -> &mut [u8] {
450        match self.kind() {
451            Kind::Inline => {
452                let inline_len = self.inline_len();
453                &mut self.0[..inline_len]
454            }
455            Kind::SmallRemote => {
456                if self.deref_small_trailer().rc.load(Ordering::Acquire) != 1 {
457                    *self = InlineArray::from(self.deref())
458                }
459                unsafe {
460                    let len = self.deref_small_trailer().len();
461                    let data_ptr = self.remote_ptr().map_addr(|a| a - len);
462                    let data_addr = data_ptr.expose_provenance();
463                    std::slice::from_raw_parts_mut(with_exposed_provenance_mut(data_addr), len)
464                }
465            }
466            Kind::BigRemote => {
467                if self.deref_big_header().rc.load(Ordering::Acquire) != 1 {
468                    *self = InlineArray::from(self.deref())
469                }
470                unsafe {
471                    let data_ptr = self
472                        .remote_ptr()
473                        .map_addr(|a| a + size_of::<BigRemoteHeader>());
474                    let len = self.deref_big_header().len();
475                    std::slice::from_raw_parts_mut(
476                        with_exposed_provenance_mut(data_ptr.expose_provenance()),
477                        len,
478                    )
479                }
480            }
481        }
482    }
483
484    /// Similar in spirit to [`std::boxed::Box::into_raw`] except always keeps the 8-byte representation,
485    /// so we return a `NonZeroU64` here instead of a pointer. Must be paired with exactly one
486    /// corresponding [`InlineArray::from_raw`] to avoid a leak.
487    ///
488    /// Be certain to pay attention to the unsafe contract for `from_raw`.
489    ///
490    /// # Examples
491    /// ```
492    /// use std::num::NonZeroU64;
493    ///
494    /// use inline_array::InlineArray;
495    ///
496    /// let bytes = b"yo!";
497    ///
498    /// let ia = InlineArray::from(bytes);
499    ///
500    /// let ptr: NonZeroU64 = ia.into_raw();
501    ///
502    /// let ia_2 = unsafe { InlineArray::from_raw(ptr) };
503    ///
504    /// assert_eq!(&ia_2, bytes);
505    /// ```
506    pub fn into_raw(self) -> NonZeroU64 {
507        let ret = NonZeroU64::new(u64::from_le_bytes(self.0)).unwrap();
508
509        std::mem::forget(self);
510
511        ret
512    }
513
514    /// Similar in spirit to [`std::boxed::Box::from_raw`].
515    ///
516    /// # Safety
517    ///
518    /// * Must only be used with a `NonZeroU64` that was produced from [`InlineArray::into_raw`]
519    /// * When an [`InlineArray`] drops, it decrements a reference count (if its size is over the inline threshold)
520    ///   and when that reference count reaches 0, the backing memory that this points to is
521    ///   deallocated.
522    /// * As the reference count is small, when the max reference count is reached, a new
523    ///   allocation is created with a new reference count. This means that you can't expect
524    ///   to have as many [`InlineArray`] objects created from the same arbitrary `NonZeroU64` as calls to
525    ///   clone on the initial object would intuitively (but mistakenly) allow.
526    /// * To be safe in light of the above two points, treat calls to [`InlineArray::from_raw`] as
527    ///   consuming, owned semantics for corresponding previous calls to a `into_raw`. If you try
528    ///   to be tricky with multiple calls to `from_raw` for a particular `NonZeroU64`, you must be
529    ///   certain that drops are not causing your backing allocation to be deallocated and leading
530    ///   to a use after free, which may be harder to reason about than you expect at first glance.
531    ///   Headaches around use after frees are likely to follow if you don't treat a `NonZeroU64` created by a
532    ///   particular call to `into_raw` as a unique pointer that should be paired with at most one
533    ///   call to `from_raw`, similar to [`std::sync::Arc::from_raw`] but actually harder to reason
534    ///   about due to the smaller reference counts causing new allocations when they reach their
535    ///   max count.
536    ///
537    /// # Examples
538    /// ```
539    /// use std::num::NonZeroU64;
540    ///
541    /// use inline_array::InlineArray;
542    ///
543    /// let bytes = b"yo!";
544    ///
545    /// let ia = InlineArray::from(bytes);
546    ///
547    /// let ptr: NonZeroU64 = ia.into_raw();
548    ///
549    /// let ia_2 = unsafe { InlineArray::from_raw(ptr) };
550    ///
551    /// assert_eq!(&ia_2, bytes);
552    /// ```
553    pub unsafe fn from_raw(raw: NonZeroU64) -> InlineArray {
554        InlineArray(raw.get().to_le_bytes())
555    }
556}
557
558impl FromIterator<u8> for InlineArray {
559    fn from_iter<T>(iter: T) -> Self
560    where
561        T: IntoIterator<Item = u8>,
562    {
563        let bs: Vec<u8> = iter.into_iter().collect();
564        bs.into()
565    }
566}
567
568impl From<&[u8]> for InlineArray {
569    fn from(slice: &[u8]) -> Self {
570        InlineArray::new(slice)
571    }
572}
573
574impl From<&str> for InlineArray {
575    fn from(s: &str) -> Self {
576        Self::from(s.as_bytes())
577    }
578}
579
580impl From<String> for InlineArray {
581    fn from(s: String) -> Self {
582        Self::from(s.as_bytes())
583    }
584}
585
586impl From<&String> for InlineArray {
587    fn from(s: &String) -> Self {
588        Self::from(s.as_bytes())
589    }
590}
591
592impl From<&InlineArray> for InlineArray {
593    fn from(v: &Self) -> Self {
594        v.clone()
595    }
596}
597
598impl From<Vec<u8>> for InlineArray {
599    fn from(v: Vec<u8>) -> Self {
600        InlineArray::new(&v)
601    }
602}
603
604impl From<Box<[u8]>> for InlineArray {
605    fn from(v: Box<[u8]>) -> Self {
606        InlineArray::new(&v)
607    }
608}
609
610impl std::borrow::Borrow<[u8]> for InlineArray {
611    fn borrow(&self) -> &[u8] {
612        self.as_ref()
613    }
614}
615
616impl std::borrow::Borrow<[u8]> for &InlineArray {
617    fn borrow(&self) -> &[u8] {
618        self.as_ref()
619    }
620}
621
622impl<const N: usize> From<&[u8; N]> for InlineArray {
623    fn from(v: &[u8; N]) -> Self {
624        Self::from(&v[..])
625    }
626}
627
628impl<const N: usize> From<[u8; N]> for InlineArray {
629    fn from(v: [u8; N]) -> Self {
630        Self::from(&v[..])
631    }
632}
633
634impl Ord for InlineArray {
635    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
636        self.as_ref().cmp(other.as_ref())
637    }
638}
639
640impl PartialOrd for InlineArray {
641    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
642        Some(self.cmp(other))
643    }
644}
645
646impl<T: AsRef<[u8]>> PartialEq<T> for InlineArray {
647    fn eq(&self, other: &T) -> bool {
648        self.as_ref() == other.as_ref()
649    }
650}
651
652impl PartialEq<[u8]> for InlineArray {
653    fn eq(&self, other: &[u8]) -> bool {
654        self.as_ref() == other
655    }
656}
657
658impl Eq for InlineArray {}
659
660impl fmt::Debug for InlineArray {
661    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
662        self.as_ref().fmt(f)
663    }
664}
665
666#[cfg(test)]
667mod tests {
668
669    use super::InlineArray;
670
671    #[test]
672    fn inline_array_smoke() {
673        let ia = InlineArray::from(vec![1, 2, 3]);
674        assert_eq!(ia, vec![1, 2, 3]);
675    }
676
677    #[test]
678    fn small_remote_array_smoke() {
679        let ia = InlineArray::from(&[4; 200][..]);
680        assert_eq!(ia, vec![4; 200]);
681    }
682
683    #[test]
684    fn big_remote_array_smoke() {
685        let ia = InlineArray::from(&[4; 256][..]);
686        assert_eq!(ia, vec![4; 256]);
687    }
688
689    #[test]
690    fn boxed_slice_conversion() {
691        let boite1: Box<[u8]> = Box::new([1, 2, 3]);
692        let iv1: InlineArray = boite1.into();
693        assert_eq!(iv1, vec![1, 2, 3]);
694        let boite2: Box<[u8]> = Box::new([4; 128]);
695        let iv2: InlineArray = boite2.into();
696        assert_eq!(iv2, vec![4; 128]);
697    }
698
699    #[test]
700    fn inline_array_as_mut_identity() {
701        let initial = &[1];
702        let mut iv = InlineArray::from(initial);
703        assert_eq!(initial, &*iv);
704        assert_eq!(initial, iv.make_mut());
705    }
706
707    fn prop_identity(inline_array: &InlineArray) -> bool {
708        let mut iv2 = inline_array.clone();
709
710        if iv2 != inline_array {
711            println!("expected clone to equal original");
712            return false;
713        }
714
715        if *inline_array != *iv2 {
716            println!("expected to equal original");
717            return false;
718        }
719
720        if inline_array != iv2.make_mut() {
721            println!("expected AsMut to equal original");
722            return false;
723        }
724
725        let buf: &[u8] = inline_array.as_ref();
726        assert_eq!(buf.as_ptr() as usize % 8, 0);
727
728        true
729    }
730
731    #[cfg(feature = "serde")]
732    fn prop_serde_roundtrip(inline_array: &InlineArray) -> bool {
733        let ser = bincode::serialize(inline_array).unwrap();
734        let de: InlineArray = bincode::deserialize(&ser).unwrap();
735        de == inline_array
736    }
737
738    impl quickcheck::Arbitrary for InlineArray {
739        fn arbitrary(g: &mut quickcheck::Gen) -> Self {
740            InlineArray::from(Vec::arbitrary(g))
741        }
742    }
743
744    quickcheck::quickcheck! {
745        #[cfg_attr(miri, ignore)]
746        fn inline_array(item: InlineArray) -> bool {
747            dbg!(item.len());
748            assert!(prop_identity(&item));
749
750            #[cfg(feature = "serde")]
751            assert!(prop_serde_roundtrip(&item));
752
753            true
754        }
755    }
756
757    #[test]
758    fn inline_array_bug_00() {
759        assert!(prop_identity(&InlineArray::new(&[
760            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
761        ])));
762    }
763}