Skip to main content

inline_array/
lib.rs

1#![allow(unsafe_code)]
2
3//! [`InlineArray`] is a stack-inlinable array of bytes that is intended for situations where many bytes
4//! are being shared in database-like scenarios, where optimizing for space usage is extremely
5//! important.
6//!
7//! `InlineArray` uses 8 bytes on the stack. It will inline arrays of up to 7 bytes. If the bytes
8//! are longer than that, it will store them in an optimized reference-count-backed structure of
9//! two different variants. For arrays up to length 255, the data is stored with an `AtomicU8`
10//! reference counter and `u8` length field, for only two bytes of overhead. For values larger
11//! than that, they are stored with an `AtomicU16` reference counter and a 48-bit length field.
12//! If the maximum counter is reached for either variant, the bytes are copied into a new
13//! `InlineArray` with a fresh reference count of 1. This is made with the assumption that most
14//! reference counts will be far lower than 2^16 and only rarely surpassing 255 in the small case.
15//!
16//! The inline and both types of shared instances of `InlineArray` guarantee that the stored array is
17//! always aligned to 8-byte boundaries, regardless of if it is inline on the stack or
18//! shared on the heap. This is advantageous for using in combination with certain
19//! zero-copy serialization techniques that require alignment guarantees.
20//!
21//! Byte arrays that require more than 48 bits to store their length (256 terabytes) are not supported.
22//!
23//! [`InlineArray::make_mut`] (inspired by [`std::sync::Arc::make_mut`]) can be used for getting a mutable
24//! reference to the bytes in this structure. If the shared reference counter is higher than  1, this acts
25//! like a [`std::borrow::Cow`] and will make self into a private copy that is safe for modification.
26//!
27//! # Features
28//!
29//! * `serde` implements `serde::Serialize` and `serde::Deserialize` for `InlineArray` (disabled by default)
30//!
31//! # Examples
32//!
33//! ```
34//! use inline_array::InlineArray;
35//!
36//! let ia = InlineArray::from(b"yo!");
37//!
38//! // then use it more or less like you would an Arc<[u8]>
39//! ```
40
41use std::{
42    alloc::{Layout, alloc, dealloc},
43    convert::TryFrom,
44    fmt,
45    hash::{Hash, Hasher},
46    iter::FromIterator,
47    mem::size_of,
48    num::NonZeroU64,
49    ops::Deref,
50    sync::atomic::{AtomicU8, AtomicU16, Ordering},
51};
52
53#[cfg(feature = "concurrent_map_minimum")]
54impl concurrent_map::Minimum for InlineArray {
55    const MIN: InlineArray = EMPTY;
56}
57
58#[cfg(feature = "serde")]
59mod serde;
60
61const SZ: usize = 8;
62const INLINE_CUTOFF: usize = SZ - 1;
63const SMALL_REMOTE_CUTOFF: usize = u8::MAX as usize;
64const BIG_REMOTE_LEN_BYTES: usize = 6;
65
66const INLINE_TRAILER_TAG: u8 = 0b01;
67const SMALL_REMOTE_TRAILER_TAG: u8 = 0b10;
68const BIG_REMOTE_TRAILER_TAG: u8 = 0b11;
69const TRAILER_TAG_MASK: u8 = 0b0000_0011;
70const REMOTE_TAG_SHIFT: usize = (SZ - 1) * 8;
71const REMOTE_TAG_MASK: usize = (TRAILER_TAG_MASK as usize) << REMOTE_TAG_SHIFT;
72
73/// A const-friendly empty `InlineArray`
74pub const EMPTY: InlineArray = InlineArray(Inner {
75    inline: [0, 0, 0, 0, 0, 0, 0, INLINE_TRAILER_TAG],
76});
77
78#[repr(u8)]
79#[derive(Debug, Clone, Copy, PartialEq, Eq)]
80enum Kind {
81    Inline,
82    SmallRemote,
83    BigRemote,
84}
85
86const fn _static_tests() {
87    // static assert that BigRemoteHeader is 8 bytes in size
88    let _: [u8; 8] = [0; std::mem::size_of::<BigRemoteHeader>()];
89
90    // static assert that BigRemoteHeader is 8 byte-aligned
91    let _: [u8; 8] = [0; std::mem::align_of::<BigRemoteHeader>()];
92
93    // static assert that SmallRemoteTrailer is 2 bytes in size
94    let _: [u8; 2] = [0; std::mem::size_of::<SmallRemoteTrailer>()];
95
96    // static assert that SmallRemoteTrailer is 1 byte-aligned
97    let _: [u8; 1] = [0; std::mem::align_of::<SmallRemoteTrailer>()];
98
99    // static assert that InlineArray is 8 bytes
100    let _: [u8; 8] = [0; std::mem::size_of::<InlineArray>()];
101
102    // static assert that InlineArray is 8 byte-aligned
103    let _: [u8; 8] = [0; std::mem::align_of::<InlineArray>()];
104}
105
106/// A buffer that may either be inline or remote and protected
107/// by an Arc. The inner buffer is guaranteed to be aligned to
108/// 8 byte boundaries.
109#[repr(align(8))]
110pub struct InlineArray(Inner);
111
112#[derive(Clone, Copy)]
113#[repr(align(8))]
114union Inner {
115    inline: [u8; SZ],
116    remote: *const u8,
117}
118
119unsafe impl Send for InlineArray {}
120unsafe impl Sync for InlineArray {}
121
122impl Clone for InlineArray {
123    fn clone(&self) -> InlineArray {
124        // We use 16 bytes for the reference count at
125        // the cost of this CAS and copying the inline
126        // array when we reach our max reference count size.
127        //
128        // When measured against the standard Arc reference
129        // count increment, this had a negligible performance
130        // hit that only became measurable at high contention,
131        // which is probably not likely for DB workloads where
132        // it is expected that most concurrent operations will
133        // distributed somewhat across larger structures.
134
135        if self.kind() == Kind::SmallRemote {
136            let rc = &self.deref_small_trailer().rc;
137
138            loop {
139                let current = rc.load(Ordering::Relaxed);
140                if current == u8::MAX {
141                    return InlineArray::from(self.deref());
142                }
143
144                let cas_res = rc.compare_exchange_weak(
145                    current,
146                    current + 1,
147                    Ordering::Relaxed,
148                    Ordering::Relaxed,
149                );
150                if cas_res.is_ok() {
151                    break;
152                }
153            }
154        } else if self.kind() == Kind::BigRemote {
155            let rc = &self.deref_big_header().rc;
156
157            loop {
158                let current = rc.load(Ordering::Relaxed);
159                if current == u16::MAX {
160                    return InlineArray::from(self.deref());
161                }
162
163                let cas_res = rc.compare_exchange_weak(
164                    current,
165                    current + 1,
166                    Ordering::Relaxed,
167                    Ordering::Relaxed,
168                );
169                if cas_res.is_ok() {
170                    break;
171                }
172            }
173        }
174        InlineArray(self.0)
175    }
176}
177
178impl Drop for InlineArray {
179    fn drop(&mut self) {
180        let kind = self.kind();
181
182        if kind == Kind::SmallRemote {
183            let small_trailer = self.deref_small_trailer();
184            let rc = small_trailer.rc.fetch_sub(1, Ordering::Release) - 1;
185
186            if rc == 0 {
187                std::sync::atomic::fence(Ordering::Acquire);
188
189                let layout = Layout::from_size_align(
190                    small_trailer.len() + size_of::<SmallRemoteTrailer>(),
191                    SZ,
192                )
193                .unwrap();
194
195                unsafe {
196                    let ptr = self.remote_ptr().sub(small_trailer.len());
197
198                    dealloc(ptr.cast_mut(), layout);
199                }
200            }
201        } else if kind == Kind::BigRemote {
202            let big_header = self.deref_big_header();
203            let rc = big_header.rc.fetch_sub(1, Ordering::Release) - 1;
204
205            if rc == 0 {
206                std::sync::atomic::fence(Ordering::Acquire);
207
208                let layout =
209                    Layout::from_size_align(big_header.len() + size_of::<BigRemoteHeader>(), SZ)
210                        .unwrap();
211                let remote = self.remote_ptr();
212
213                unsafe {
214                    dealloc(remote.cast_mut(), layout);
215                }
216            }
217        }
218    }
219}
220
221struct SmallRemoteTrailer {
222    rc: AtomicU8,
223    len: u8,
224}
225
226impl SmallRemoteTrailer {
227    const fn len(&self) -> usize {
228        self.len as usize
229    }
230}
231
232#[repr(align(8))]
233struct BigRemoteHeader {
234    rc: AtomicU16,
235    len: [u8; BIG_REMOTE_LEN_BYTES],
236}
237
238impl BigRemoteHeader {
239    const fn len(&self) -> usize {
240        #[cfg(any(target_pointer_width = "32", feature = "fake_32_bit"))]
241        let buf: [u8; 4] = [self.len[0], self.len[1], self.len[2], self.len[3]];
242
243        #[cfg(all(target_pointer_width = "64", not(feature = "fake_32_bit")))]
244        let buf: [u8; 8] = [
245            self.len[0],
246            self.len[1],
247            self.len[2],
248            self.len[3],
249            self.len[4],
250            self.len[5],
251            0,
252            0,
253        ];
254
255        #[cfg(feature = "fake_32_bit")]
256        let ret = u32::from_le_bytes(buf) as usize;
257
258        #[cfg(not(feature = "fake_32_bit"))]
259        let ret = usize::from_le_bytes(buf);
260
261        ret
262    }
263}
264
265impl Deref for InlineArray {
266    type Target = [u8];
267
268    #[inline]
269    fn deref(&self) -> &[u8] {
270        match self.kind() {
271            Kind::Inline => unsafe { &self.0.inline[..self.inline_len()] },
272            Kind::SmallRemote => unsafe {
273                let len = self.deref_small_trailer().len();
274                let data_ptr = self.remote_ptr().sub(len);
275
276                std::slice::from_raw_parts(data_ptr, len)
277            },
278            Kind::BigRemote => unsafe {
279                let len = self.deref_big_header().len();
280                let remote_ptr = self.remote_ptr();
281                let data_ptr = remote_ptr.add(size_of::<BigRemoteHeader>());
282
283                std::slice::from_raw_parts(data_ptr, len)
284            },
285        }
286    }
287}
288
289impl AsRef<[u8]> for InlineArray {
290    #[inline]
291    fn as_ref(&self) -> &[u8] {
292        self
293    }
294}
295
296impl Default for InlineArray {
297    fn default() -> Self {
298        Self::from(&[])
299    }
300}
301
302impl Hash for InlineArray {
303    fn hash<H: Hasher>(&self, state: &mut H) {
304        self.deref().hash(state);
305    }
306}
307
308impl InlineArray {
309    fn new(slice: &[u8]) -> Self {
310        if slice.len() <= INLINE_CUTOFF {
311            let mut inline = [0_u8; SZ];
312
313            inline[SZ - 1] = u8::try_from(slice.len()).unwrap() << 2;
314            inline[..slice.len()].copy_from_slice(slice);
315            inline[SZ - 1] |= INLINE_TRAILER_TAG;
316
317            Self(Inner { inline })
318        } else if slice.len() <= SMALL_REMOTE_CUTOFF {
319            let layout =
320                Layout::from_size_align(slice.len() + size_of::<SmallRemoteTrailer>(), SZ).unwrap();
321
322            let trailer = SmallRemoteTrailer {
323                rc: 1.into(),
324                len: u8::try_from(slice.len()).unwrap(),
325            };
326
327            unsafe {
328                let data_ptr = alloc(layout);
329                assert!(!data_ptr.is_null());
330                let trailer_ptr = data_ptr.add(slice.len());
331
332                std::ptr::write(trailer_ptr as *mut SmallRemoteTrailer, trailer);
333                std::ptr::copy_nonoverlapping(slice.as_ptr(), data_ptr, slice.len());
334
335                // assert that the bottom 3 bits are empty, as we expect
336                // the buffer to always have an alignment of 8 (2 ^ 3).
337                #[cfg(not(miri))]
338                assert_eq!(trailer_ptr.addr().to_le_bytes()[SZ - 1] & 0b111, 0);
339
340                let remote = trailer_ptr.map_addr(|addr| {
341                    addr | (SMALL_REMOTE_TRAILER_TAG as usize) << REMOTE_TAG_SHIFT
342                });
343                Self(Inner { remote })
344            }
345        } else {
346            let layout =
347                Layout::from_size_align(slice.len() + size_of::<BigRemoteHeader>(), SZ).unwrap();
348
349            let slice_len_buf: [u8; 8] = (slice.len() as u64).to_le_bytes();
350
351            let len: [u8; BIG_REMOTE_LEN_BYTES] = [
352                slice_len_buf[0],
353                slice_len_buf[1],
354                slice_len_buf[2],
355                slice_len_buf[3],
356                slice_len_buf[4],
357                slice_len_buf[5],
358            ];
359            assert_eq!(slice_len_buf[6], 0);
360            assert_eq!(slice_len_buf[7], 0);
361
362            let header = BigRemoteHeader { rc: 1.into(), len };
363
364            unsafe {
365                let header_ptr = alloc(layout);
366                assert!(!header_ptr.is_null());
367
368                let data_ptr = header_ptr.add(size_of::<BigRemoteHeader>());
369
370                std::ptr::write(header_ptr as *mut BigRemoteHeader, header);
371                std::ptr::copy_nonoverlapping(slice.as_ptr(), data_ptr, slice.len());
372
373                // assert that the bottom 3 bits are empty, as we expect
374                // the buffer to always have an alignment of 8 (2 ^ 3).
375                #[cfg(not(miri))]
376                assert_eq!(header_ptr.addr().to_le_bytes()[SZ - 1] & 0b111, 0);
377
378                let remote = header_ptr
379                    .map_addr(|addr| addr | (BIG_REMOTE_TRAILER_TAG as usize) << REMOTE_TAG_SHIFT);
380                Self(Inner { remote })
381            }
382        }
383    }
384
385    fn remote_ptr(&self) -> *const u8 {
386        assert_ne!(self.kind(), Kind::Inline);
387
388        let remote_ptr = unsafe { self.0.remote };
389
390        remote_ptr.map_addr(|addr| addr & !REMOTE_TAG_MASK)
391    }
392
393    fn deref_small_trailer(&self) -> &SmallRemoteTrailer {
394        assert_eq!(self.kind(), Kind::SmallRemote);
395        unsafe { &*self.remote_ptr().cast::<SmallRemoteTrailer>() }
396    }
397
398    fn deref_big_header(&self) -> &BigRemoteHeader {
399        assert_eq!(self.kind(), Kind::BigRemote);
400        unsafe { &*self.remote_ptr().cast::<BigRemoteHeader>() }
401    }
402
403    #[cfg(miri)]
404    fn inline_len(&self) -> usize {
405        (self.inline_trailer() >> 2) as usize
406    }
407
408    #[cfg(miri)]
409    fn kind(&self) -> Kind {
410        match self.inline_trailer() & TRAILER_TAG_MASK {
411            INLINE_TRAILER_TAG => Kind::Inline,
412            SMALL_REMOTE_TRAILER_TAG => Kind::SmallRemote,
413            BIG_REMOTE_TRAILER_TAG => Kind::BigRemote,
414            _other => unsafe { std::hint::unreachable_unchecked() },
415        }
416    }
417
418    #[cfg(miri)]
419    fn inline_trailer(&self) -> u8 {
420        unsafe { self.0.inline[SZ - 1] }
421    }
422
423    #[cfg(not(miri))]
424    const fn inline_len(&self) -> usize {
425        (self.inline_trailer() >> 2) as usize
426    }
427
428    #[cfg(not(miri))]
429    const fn kind(&self) -> Kind {
430        match self.inline_trailer() & TRAILER_TAG_MASK {
431            INLINE_TRAILER_TAG => Kind::Inline,
432            SMALL_REMOTE_TRAILER_TAG => Kind::SmallRemote,
433            BIG_REMOTE_TRAILER_TAG => Kind::BigRemote,
434            _other => unsafe { std::hint::unreachable_unchecked() },
435        }
436    }
437
438    #[cfg(not(miri))]
439    const fn inline_trailer(&self) -> u8 {
440        unsafe { self.0.inline[SZ - 1] }
441    }
442
443    /// This function returns a mutable reference to the inner
444    /// byte array. If there are more than 1 atomic references
445    /// to the inner array, the array is copied into a new
446    /// `InlineVec` and a reference to that is returned. This
447    /// functions similarly in spirit to [`std::sync::Arc::make_mut`].
448    pub fn make_mut(&mut self) -> &mut [u8] {
449        match self.kind() {
450            Kind::Inline => {
451                let inline_len = self.inline_len();
452                unsafe { &mut self.0.inline[..inline_len] }
453            }
454            Kind::SmallRemote => {
455                if self.deref_small_trailer().rc.load(Ordering::Acquire) != 1 {
456                    *self = InlineArray::from(self.deref())
457                }
458                unsafe {
459                    let len = self.deref_small_trailer().len();
460                    let data_ptr = self.remote_ptr().sub(len);
461
462                    std::slice::from_raw_parts_mut(data_ptr.cast_mut(), len)
463                }
464            }
465            Kind::BigRemote => {
466                if self.deref_big_header().rc.load(Ordering::Acquire) != 1 {
467                    *self = InlineArray::from(self.deref())
468                }
469                unsafe {
470                    let data_ptr = self.remote_ptr().add(size_of::<BigRemoteHeader>());
471                    let len = self.deref_big_header().len();
472                    std::slice::from_raw_parts_mut(data_ptr.cast_mut(), len)
473                }
474            }
475        }
476    }
477
478    /// Similar in spirit to [`std::boxed::Box::into_raw`] except always keeps the 8-byte representation,
479    /// so we return a `NonZeroU64` here instead of a pointer. Must be paired with exactly one
480    /// corresponding [`InlineArray::from_raw`] to avoid a leak.
481    ///
482    /// Be certain to pay attention to the unsafe contract for `from_raw`.
483    ///
484    /// # Examples
485    /// ```
486    /// use std::num::NonZeroU64;
487    ///
488    /// use inline_array::InlineArray;
489    ///
490    /// let bytes = b"yo!";
491    ///
492    /// let ia = InlineArray::from(bytes);
493    ///
494    /// let ptr: NonZeroU64 = ia.into_raw();
495    ///
496    /// let ia_2 = unsafe { InlineArray::from_raw(ptr) };
497    ///
498    /// assert_eq!(&ia_2, bytes);
499    /// ```
500    pub fn into_raw(self) -> NonZeroU64 {
501        let ret = NonZeroU64::new(u64::from_le_bytes(unsafe { self.0.inline })).unwrap();
502
503        std::mem::forget(self);
504
505        ret
506    }
507
508    /// Similar in spirit to [`std::boxed::Box::from_raw`].
509    ///
510    /// # Safety
511    ///
512    /// * Must only be used with a `NonZeroU64` that was produced from [`InlineArray::into_raw`]
513    /// * When an [`InlineArray`] drops, it decrements a reference count (if its size is over the inline threshold)
514    ///   and when that reference count reaches 0, the backing memory that this points to is
515    ///   deallocated.
516    /// * As the reference count is small, when the max reference count is reached, a new
517    ///   allocation is created with a new reference count. This means that you can't expect
518    ///   to have as many [`InlineArray`] objects created from the same arbitrary `NonZeroU64` as calls to
519    ///   clone on the initial object would intuitively (but mistakenly) allow.
520    /// * To be safe in light of the above two points, treat calls to [`InlineArray::from_raw`] as
521    ///   consuming, owned semantics for corresponding previous calls to a `into_raw`. If you try
522    ///   to be tricky with multiple calls to `from_raw` for a particular `NonZeroU64`, you must be
523    ///   certain that drops are not causing your backing allocation to be deallocated and leading
524    ///   to a use after free, which may be harder to reason about than you expect at first glance.
525    ///   Headaches around use after frees are likely to follow if you don't treat a `NonZeroU64` created by a
526    ///   particular call to `into_raw` as a unique pointer that should be paired with at most one
527    ///   call to `from_raw`, similar to [`std::sync::Arc::from_raw`] but actually harder to reason
528    ///   about due to the smaller reference counts causing new allocations when they reach their
529    ///   max count.
530    ///
531    /// # Examples
532    /// ```
533    /// use std::num::NonZeroU64;
534    ///
535    /// use inline_array::InlineArray;
536    ///
537    /// let bytes = b"yo!";
538    ///
539    /// let ia = InlineArray::from(bytes);
540    ///
541    /// let ptr: NonZeroU64 = ia.into_raw();
542    ///
543    /// let ia_2 = unsafe { InlineArray::from_raw(ptr) };
544    ///
545    /// assert_eq!(&ia_2, bytes);
546    /// ```
547    pub unsafe fn from_raw(raw: NonZeroU64) -> InlineArray {
548        InlineArray(Inner {
549            inline: raw.get().to_le_bytes(),
550        })
551    }
552}
553
554impl FromIterator<u8> for InlineArray {
555    fn from_iter<T>(iter: T) -> Self
556    where
557        T: IntoIterator<Item = u8>,
558    {
559        let bs: Vec<u8> = iter.into_iter().collect();
560        bs.into()
561    }
562}
563
564impl From<&[u8]> for InlineArray {
565    fn from(slice: &[u8]) -> Self {
566        InlineArray::new(slice)
567    }
568}
569
570impl From<&str> for InlineArray {
571    fn from(s: &str) -> Self {
572        Self::from(s.as_bytes())
573    }
574}
575
576impl From<String> for InlineArray {
577    fn from(s: String) -> Self {
578        Self::from(s.as_bytes())
579    }
580}
581
582impl From<&String> for InlineArray {
583    fn from(s: &String) -> Self {
584        Self::from(s.as_bytes())
585    }
586}
587
588impl From<&InlineArray> for InlineArray {
589    fn from(v: &Self) -> Self {
590        v.clone()
591    }
592}
593
594impl From<Vec<u8>> for InlineArray {
595    fn from(v: Vec<u8>) -> Self {
596        InlineArray::new(&v)
597    }
598}
599
600impl From<Box<[u8]>> for InlineArray {
601    fn from(v: Box<[u8]>) -> Self {
602        InlineArray::new(&v)
603    }
604}
605
606impl std::borrow::Borrow<[u8]> for InlineArray {
607    fn borrow(&self) -> &[u8] {
608        self.as_ref()
609    }
610}
611
612impl std::borrow::Borrow<[u8]> for &InlineArray {
613    fn borrow(&self) -> &[u8] {
614        self.as_ref()
615    }
616}
617
618impl<const N: usize> From<&[u8; N]> for InlineArray {
619    fn from(v: &[u8; N]) -> Self {
620        Self::from(&v[..])
621    }
622}
623
624impl<const N: usize> From<[u8; N]> for InlineArray {
625    fn from(v: [u8; N]) -> Self {
626        Self::from(&v[..])
627    }
628}
629
630impl Ord for InlineArray {
631    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
632        self.as_ref().cmp(other.as_ref())
633    }
634}
635
636impl PartialOrd for InlineArray {
637    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
638        Some(self.cmp(other))
639    }
640}
641
642impl<T: AsRef<[u8]>> PartialEq<T> for InlineArray {
643    fn eq(&self, other: &T) -> bool {
644        self.as_ref() == other.as_ref()
645    }
646}
647
648impl PartialEq<[u8]> for InlineArray {
649    fn eq(&self, other: &[u8]) -> bool {
650        self.as_ref() == other
651    }
652}
653
654impl Eq for InlineArray {}
655
656impl fmt::Debug for InlineArray {
657    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
658        self.as_ref().fmt(f)
659    }
660}
661
662#[cfg(test)]
663mod tests {
664
665    use super::InlineArray;
666
667    #[test]
668    fn inline_array_smoke() {
669        let ia = InlineArray::from(vec![1, 2, 3]);
670        assert_eq!(ia, vec![1, 2, 3]);
671    }
672
673    #[test]
674    fn small_remote_array_smoke() {
675        let ia = InlineArray::from(&[4; 200][..]);
676        assert_eq!(ia, vec![4; 200]);
677    }
678
679    #[test]
680    fn big_remote_array_smoke() {
681        let ia = InlineArray::from(&[4; 256][..]);
682        assert_eq!(ia, vec![4; 256]);
683    }
684
685    #[test]
686    fn boxed_slice_conversion() {
687        let boite1: Box<[u8]> = Box::new([1, 2, 3]);
688        let iv1: InlineArray = boite1.into();
689        assert_eq!(iv1, vec![1, 2, 3]);
690        let boite2: Box<[u8]> = Box::new([4; 128]);
691        let iv2: InlineArray = boite2.into();
692        assert_eq!(iv2, vec![4; 128]);
693    }
694
695    #[test]
696    fn inline_array_as_mut_identity() {
697        let initial = &[1];
698        let mut iv = InlineArray::from(initial);
699        assert_eq!(initial, &*iv);
700        assert_eq!(initial, iv.make_mut());
701    }
702
703    fn prop_identity(inline_array: &InlineArray) -> bool {
704        let mut iv2 = inline_array.clone();
705
706        if iv2 != inline_array {
707            println!("expected clone to equal original");
708            return false;
709        }
710
711        if *inline_array != *iv2 {
712            println!("expected to equal original");
713            return false;
714        }
715
716        if inline_array != iv2.make_mut() {
717            println!("expected AsMut to equal original");
718            return false;
719        }
720
721        let buf: &[u8] = inline_array.as_ref();
722        assert_eq!(buf.as_ptr() as usize % 8, 0);
723
724        true
725    }
726
727    #[cfg(feature = "serde")]
728    fn prop_serde_roundtrip(inline_array: &InlineArray) -> bool {
729        let ser = bincode::serialize(inline_array).unwrap();
730        let de: InlineArray = bincode::deserialize(&ser).unwrap();
731        de == inline_array
732    }
733
734    impl quickcheck::Arbitrary for InlineArray {
735        fn arbitrary(g: &mut quickcheck::Gen) -> Self {
736            InlineArray::from(Vec::arbitrary(g))
737        }
738    }
739
740    quickcheck::quickcheck! {
741        #[cfg_attr(miri, ignore)]
742        fn inline_array(item: InlineArray) -> bool {
743            dbg!(item.len());
744            assert!(prop_identity(&item));
745
746            #[cfg(feature = "serde")]
747            assert!(prop_serde_roundtrip(&item));
748
749            true
750        }
751    }
752
753    #[test]
754    fn inline_array_bug_00() {
755        assert!(prop_identity(&InlineArray::new(&[
756            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
757        ])));
758    }
759}