musli_zerocopy/pointer/
ref.rs

1use core::any;
2use core::cmp::Ordering;
3use core::fmt;
4use core::hash::Hash;
5use core::marker::PhantomData;
6use core::mem::size_of;
7
8use crate::ZeroCopy;
9use crate::buf::{Padder, Validator};
10use crate::endian::{Big, ByteOrder, Little, Native};
11use crate::error::{CoerceError, CoerceErrorKind, Error};
12use crate::mem::MaybeUninit;
13use crate::pointer::Coerce;
14use crate::pointer::{DefaultSize, Pointee, Size};
15
16/// A stored reference to a type `T`.
17///
18/// A reference is made up of two components:
19/// * An [`offset()`] indicating the absolute offset into a [`Buf`] where the
20///   pointed-to (pointee) data is located.
21/// * An optional [`metadata()`] components, which if set indicates that this
22///   reference is a wide pointer. This is used when encoding types such as
23///   `[T]` or `str` to include additional data necessary to handle the type.
24///
25/// [`Buf`]: crate::buf::Buf
26/// [`offset()`]: Ref::offset
27/// [`metadata()`]: Ref::metadata
28///
29/// # Examples
30///
31/// ```
32/// use std::mem::align_of;
33///
34/// use musli_zerocopy::{Ref, OwnedBuf};
35///
36/// let mut buf = OwnedBuf::with_alignment::<u32>();
37/// buf.extend_from_slice(&[1, 2, 3, 4]);
38///
39/// let buf = buf.as_ref();
40///
41/// let number = Ref::<u32>::new(0u32);
42/// assert_eq!(*buf.load(number)?, u32::from_ne_bytes([1, 2, 3, 4]));
43/// # Ok::<_, musli_zerocopy::Error>(())
44/// ```
45#[repr(C)]
46pub struct Ref<T, E = Native, O = DefaultSize>
47where
48    T: ?Sized + Pointee,
49    E: ByteOrder,
50    O: Size,
51{
52    offset: O,
53    metadata: T::Stored<O>,
54    _marker: PhantomData<(E, T)>,
55}
56
57unsafe impl<T, E, O> ZeroCopy for Ref<T, E, O>
58where
59    T: ?Sized + Pointee,
60    E: ByteOrder,
61    O: Size,
62{
63    // A `Ref` type cannot inhabit any bit pattern since it must represent a
64    // validly sized reference.
65    const ANY_BITS: bool = false;
66
67    const PADDED: bool = const {
68        debug_assert!(
69            size_of::<Self>() == (size_of::<O>() + size_of::<T::Stored<O>>()),
70            "Size of Ref should equal its fields"
71        );
72        debug_assert!(!O::PADDED, "Offset should not be padded");
73        debug_assert!(!T::Stored::<O>::PADDED, "Metadata should not be padded");
74        false
75    };
76
77    // Since the ref type statically encodes the byte order, it cannot be byte
78    // swapped with retained meaning.
79    const CAN_SWAP_BYTES: bool = false;
80
81    #[inline]
82    unsafe fn pad(padder: &mut Padder<'_, Self>) {
83        unsafe {
84            padder.pad::<O>();
85            padder.pad::<T::Stored<O>>();
86        }
87    }
88
89    #[inline]
90    unsafe fn validate(validator: &mut Validator<'_, Self>) -> Result<(), Error> {
91        unsafe {
92            let offset = *validator.field::<O>()?;
93            let metadata = *validator.field::<T::Stored<O>>()?;
94            Self::try_from_parts(offset, metadata)?;
95            Ok(())
96        }
97    }
98
99    #[inline]
100    fn swap_bytes<B: ByteOrder>(self) -> Self {
101        self
102    }
103}
104
105impl<T, E, O> Ref<T, E, O>
106where
107    T: ?Sized + Pointee,
108    E: ByteOrder,
109    O: Size,
110{
111    /// Convert this reference into a [`Big`]-endian [`ByteOrder`].
112    ///
113    /// # Examples
114    ///
115    /// ```
116    /// use musli_zerocopy::{endian, Ref};
117    ///
118    /// let r: Ref<u32> = Ref::new(10u32);
119    /// assert_eq!(r.offset(), 10);
120    ///
121    /// let r: Ref<u32, endian::Little> = Ref::new(10u32);
122    /// assert_eq!(r.offset(), 10);
123    ///
124    /// let r: Ref<u32, endian::Big> = r.to_be();
125    /// assert_eq!(r.offset(), 10);
126    /// ```
127    #[inline]
128    pub fn to_be(self) -> Ref<T, Big, O> {
129        self.to_endian()
130    }
131
132    /// Convert this reference into a [`Little`]-endian [`ByteOrder`].
133    ///
134    /// # Examples
135    ///
136    /// ```
137    /// use musli_zerocopy::{endian, Ref};
138    ///
139    /// let r: Ref<u32> = Ref::new(10u32);
140    /// assert_eq!(r.offset(), 10);
141    ///
142    /// let r: Ref<u32, endian::Big> = Ref::new(10u32);
143    /// assert_eq!(r.offset(), 10);
144    ///
145    /// let r: Ref<u32, endian::Little> = r.to_le();
146    /// assert_eq!(r.offset(), 10);
147    /// ```
148    #[inline]
149    pub fn to_le(self) -> Ref<T, Little, O> {
150        self.to_endian()
151    }
152
153    /// Convert this reference into a [`Native`]-endian [`ByteOrder`].
154    ///
155    /// # Examples
156    ///
157    /// ```
158    /// use musli_zerocopy::{endian, Ref};
159    ///
160    /// let r: Ref<u32, endian::Native> = Ref::<u32, endian::Big>::new(10u32).to_ne();
161    /// assert_eq!(r.offset(), 10);
162    ///
163    /// let r: Ref<u32, endian::Native> = Ref::<u32, endian::Little>::new(10u32).to_ne();
164    /// assert_eq!(r.offset(), 10);
165    ///
166    /// let r: Ref<u32, endian::Native> = Ref::<u32, endian::Native>::new(10u32).to_ne();
167    /// assert_eq!(r.offset(), 10);
168    /// ```
169    #[inline]
170    pub fn to_ne(self) -> Ref<T, Native, O> {
171        self.to_endian()
172    }
173
174    /// Convert this reference into a `U`-endian [`ByteOrder`].
175    ///
176    /// # Examples
177    ///
178    /// ```
179    /// use musli_zerocopy::{endian, Ref};
180    ///
181    /// let r: Ref<u32, endian::Native> = Ref::<u32, endian::Big>::new(10u32).to_endian();
182    /// assert_eq!(r.offset(), 10);
183    ///
184    /// let r: Ref<u32, endian::Native> = Ref::<u32, endian::Little>::new(10u32).to_endian();
185    /// assert_eq!(r.offset(), 10);
186    ///
187    /// let r: Ref<u32, endian::Native> = Ref::<u32, endian::Native>::new(10u32).to_endian();
188    /// assert_eq!(r.offset(), 10);
189    /// ```
190    #[inline]
191    pub fn to_endian<U: ByteOrder>(self) -> Ref<T, U, O> {
192        Ref {
193            offset: self.offset.swap_bytes::<E>().swap_bytes::<U>(),
194            metadata: self.metadata.swap_bytes::<E>().swap_bytes::<U>(),
195            _marker: PhantomData,
196        }
197    }
198}
199
200impl<T, E, O> Ref<T, E, O>
201where
202    T: ?Sized + Pointee,
203    E: ByteOrder,
204    O: Size,
205{
206    #[inline]
207    fn from_parts(offset: O, metadata: T::Stored<O>) -> Self {
208        match Self::try_from_parts(offset, metadata) {
209            Ok(ok) => ok,
210            Err(error) => panic!("{error}"),
211        }
212    }
213
214    #[inline]
215    fn try_from_parts(offset: O, metadata: T::Stored<O>) -> Result<Self, CoerceError> {
216        let m = metadata.swap_bytes::<E>();
217
218        let Ok(layout) = T::pointee_layout::<O>(m) else {
219            return Err(CoerceError::new(CoerceErrorKind::InvalidLayout {
220                size: T::size::<O>(m),
221                align: T::align::<O>(m),
222            }));
223        };
224
225        let offset_usize = offset.swap_bytes::<E>().as_usize();
226
227        if offset_usize.checked_add(layout.size()).is_none() {
228            return Err(CoerceError::new(CoerceErrorKind::InvalidOffsetRange {
229                offset: offset_usize,
230                end: usize::MAX - layout.size(),
231            }));
232        };
233
234        Ok(Self {
235            offset,
236            metadata,
237            _marker: PhantomData,
238        })
239    }
240
241    /// Construct a reference with custom metadata.
242    ///
243    /// # Panics
244    ///
245    /// This will panic if:
246    /// * The `offset` or `metadata` can't be byte swapped as per
247    ///   [`ZeroCopy::CAN_SWAP_BYTES`].
248    /// * Packed [`offset()`] cannot be constructed from `U` (out of range).
249    /// * Packed [`metadata()`] cannot be constructed from `T::Metadata` (reason
250    ///   depends on the exact metadata).
251    /// * The metadata does not describe a valid [`Layout`].
252    /// * The `offset` plus this layout's size overflow `usize::MAX`.
253    ///
254    /// To guarantee that this constructor will never panic, [`Ref<T, E,
255    /// usize>`] can be used. This also ensures that construction is a no-op.
256    ///
257    /// [`Layout`]: core::alloc::Layout
258    /// [`offset()`]: Ref::offset
259    /// [`metadata()`]: Ref::metadata
260    ///
261    /// # Examples
262    ///
263    /// ```
264    /// use musli_zerocopy::Ref;
265    ///
266    /// let reference = Ref::<[u64]>::with_metadata(42u32, 10);
267    /// assert_eq!(reference.offset(), 42);
268    /// assert_eq!(reference.len(), 10);
269    /// ```
270    ///
271    /// Using maximally sized metadata with different byte orderings:
272    ///
273    /// ```
274    /// use musli_zerocopy::Ref;
275    /// use musli_zerocopy::endian::{Big, Little};
276    ///
277    /// let o = usize::MAX - isize::MAX as usize;
278    /// let l = isize::MAX as usize;
279    ///
280    /// let a = Ref::<[u8], Big, usize>::with_metadata(o, l);
281    /// let b = Ref::<[u8], Little, usize>::with_metadata(o, l);
282    ///
283    /// assert_eq!(a.len(), l);
284    /// assert_eq!(a.len(), b.len());
285    /// ```
286    #[inline]
287    pub fn with_metadata<U>(offset: U, metadata: T::Metadata) -> Self
288    where
289        U: Size,
290    {
291        match Ref::try_with_metadata(offset, metadata) {
292            Ok(ok) => ok,
293            Err(error) => panic!("{error}"),
294        }
295    }
296
297    /// Fallibly try to construct a reference with metadata.
298    ///
299    /// # Errors
300    ///
301    /// This will not compile through a constant assertion if the `offset` or
302    ///   `metadata` can't be byte swapped as per [`ZeroCopy::CAN_SWAP_BYTES`].
303    ///
304    /// This will error if:
305    /// * Packed [`offset()`] cannot be constructed from `U` (out of range).
306    /// * Packed [`metadata()`] cannot be constructed from `T::Metadata` (reason
307    ///   depends on the exact metadata).
308    /// * The metadata does not describe a valid [`Layout`].
309    /// * The `offset` plus this layout's overflows `usize::MAX`.
310    ///
311    /// To guarantee that this constructor will never error, [`Ref<T, Native,
312    /// usize>`] can be used. This also ensures that construction is a no-op.
313    ///
314    /// [`Layout`]: core::alloc::Layout
315    /// [`offset()`]: Ref::offset
316    /// [`metadata()`]: Ref::metadata
317    ///
318    /// # Examples
319    ///
320    /// ```
321    /// use musli_zerocopy::Ref;
322    ///
323    /// let reference = Ref::<[u64]>::try_with_metadata(42u32, 10)?;
324    /// assert_eq!(reference.offset(), 42);
325    /// assert_eq!(reference.len(), 10);
326    /// # Ok::<_, musli_zerocopy::Error>(())
327    /// ```
328    ///
329    /// Using maximally sized metadata with different byte orderings:
330    ///
331    /// ```
332    /// use musli_zerocopy::Ref;
333    /// use musli_zerocopy::endian::{Big, Little};
334    ///
335    /// let o = usize::MAX - isize::MAX as usize;
336    /// let l = isize::MAX as usize;
337    ///
338    /// let a = Ref::<[u8], Big, usize>::try_with_metadata(o, l)?;
339    /// let b = Ref::<[u8], Little, usize>::try_with_metadata(o, l)?;
340    ///
341    /// assert_eq!(a.len(), l);
342    /// assert_eq!(a.len(), b.len());
343    ///
344    /// assert!(Ref::<[u8], Big, usize>::try_with_metadata(o + 1, l).is_err());
345    /// assert!(Ref::<[u8], Little, usize>::try_with_metadata(o + 1, l).is_err());
346    /// # Ok::<_, musli_zerocopy::CoerceError>(())
347    /// ```
348    pub fn try_with_metadata<U>(offset: U, metadata: T::Metadata) -> Result<Self, CoerceError>
349    where
350        U: Size,
351    {
352        const {
353            assert!(
354                O::CAN_SWAP_BYTES,
355                "Offset cannot be byte-ordered since it would not inhabit valid types"
356            );
357
358            assert!(
359                T::Stored::<O>::CAN_SWAP_BYTES,
360                "Packed offset cannot be byte-ordered since it would not inhabit valid types"
361            );
362        }
363
364        let offset = O::try_from(offset)?;
365        let metadata = T::try_from_metadata(metadata)?;
366
367        let Ok(layout) = T::pointee_layout::<O>(metadata) else {
368            return Err(CoerceError::new(CoerceErrorKind::InvalidLayout {
369                size: T::size::<O>(metadata),
370                align: T::align::<O>(metadata),
371            }));
372        };
373
374        let offset_usize = offset.as_usize();
375
376        if offset_usize.checked_add(layout.size()).is_none() {
377            return Err(CoerceError::new(CoerceErrorKind::InvalidOffsetRange {
378                offset: offset_usize,
379                end: usize::MAX - layout.size(),
380            }));
381        };
382
383        Ok(Ref {
384            offset: O::swap_bytes::<E>(offset),
385            metadata: T::Stored::swap_bytes::<E>(metadata),
386            _marker: PhantomData,
387        })
388    }
389}
390
391impl<T, E, O> Ref<[T], E, O>
392where
393    T: ZeroCopy,
394    E: ByteOrder,
395    O: Size,
396{
397    /// Return the number of elements in the slice `[T]`.
398    ///
399    /// # Examples
400    ///
401    /// ```
402    /// use musli_zerocopy::pointer::Ref;
403    ///
404    /// let slice = Ref::<[u32]>::with_metadata(0u32, 2);
405    /// assert_eq!(slice.len(), 2);
406    /// ```
407    #[inline]
408    pub fn len(self) -> usize {
409        self.metadata.swap_bytes::<E>().as_usize()
410    }
411
412    /// Test if the slice `[T]` is empty.
413    ///
414    /// # Examples
415    ///
416    /// ```
417    /// use musli_zerocopy::pointer::Ref;
418    ///
419    /// let slice = Ref::<[u32]>::with_metadata(0u32, 0);
420    /// assert!(slice.is_empty());
421    ///
422    /// let slice = Ref::<[u32]>::with_metadata(0u32, 2);
423    /// assert!(!slice.is_empty());
424    /// ```
425    #[inline]
426    pub fn is_empty(self) -> bool {
427        self.metadata.is_zero()
428    }
429
430    /// Try to get a reference directly out of the slice without validation.
431    ///
432    /// This avoids having to validate every element in a slice in order to
433    /// address them.
434    ///
435    /// # Examples
436    ///
437    /// ```
438    /// use musli_zerocopy::OwnedBuf;
439    ///
440    /// let mut buf = OwnedBuf::new();
441    /// let slice = buf.store_slice(&[1, 2, 3, 4]);
442    ///
443    /// let two = slice.get(2).expect("Missing element 2");
444    /// assert_eq!(buf.load(two)?, &3);
445    ///
446    /// assert!(slice.get(4).is_none());
447    /// # Ok::<_, musli_zerocopy::Error>(())
448    /// ```
449    #[inline]
450    pub fn get(self, index: usize) -> Option<Ref<T, E, O>> {
451        if index >= self.len() {
452            return None;
453        }
454
455        let offset = self.offset.swap_bytes::<E>().as_usize() + size_of::<T>() * index;
456        Some(Ref::new(offset))
457    }
458
459    /// Get an unchecked reference directly out of the slice without validation.
460    ///
461    /// This avoids having to validate every element in a slice in order to
462    /// address them.
463    ///
464    /// In contrast to [`get()`], this does not check that the index is within
465    /// the bounds of the current slice, all though it's not unsafe since it
466    /// cannot lead to anything inherently unsafe. Only garbled data.
467    ///
468    /// [`get()`]: Ref::get
469    ///
470    /// # Examples
471    ///
472    /// ```
473    /// use musli_zerocopy::OwnedBuf;
474    ///
475    /// let mut buf = OwnedBuf::new();
476    /// let slice = buf.store_slice(&[1, 2, 3, 4]);
477    ///
478    /// let two = slice.get_unchecked(2);
479    /// assert_eq!(buf.load(two)?, &3);
480    ///
481    /// let oob = slice.get_unchecked(4);
482    /// assert!(buf.load(oob).is_err());
483    /// # Ok::<_, musli_zerocopy::Error>(())
484    /// ```
485    pub fn get_unchecked(self, index: usize) -> Ref<T, E, O> {
486        let offset = self.offset.swap_bytes::<E>().as_usize() + size_of::<T>() * index;
487        Ref::new(offset)
488    }
489
490    /// Split the slice reference at the given position `at`.
491    ///
492    /// # Panics
493    ///
494    /// This panics if the given range is out of bounds.
495    ///
496    /// # Examples
497    ///
498    /// ```
499    /// use musli_zerocopy::OwnedBuf;
500    ///
501    /// let mut buf = OwnedBuf::new();
502    /// let slice = buf.store_slice(&[1, 2, 3, 4]);
503    ///
504    /// buf.align_in_place();
505    ///
506    /// let (a, b) = slice.split_at(3);
507    /// let (c, d) = slice.split_at(4);
508    ///
509    /// assert_eq!(buf.load(a)?, &[1, 2, 3]);
510    /// assert_eq!(buf.load(b)?, &[4]);
511    /// assert_eq!(buf.load(c)?, &[1, 2, 3, 4]);
512    /// assert_eq!(buf.load(d)?, &[]);
513    /// # Ok::<_, musli_zerocopy::Error>(())
514    /// ```
515    #[inline]
516    pub fn split_at(self, at: usize) -> (Self, Self) {
517        let offset = self.offset();
518        let len = self.len();
519        assert!(at <= len, "Split point {at} is out of bounds 0..={len}");
520        let a = Self::with_metadata(offset, at);
521        let b = Self::with_metadata(offset + at * size_of::<T>(), len - at);
522        (a, b)
523    }
524
525    /// Perform an fetch like `get` which panics with diagnostics in case the
526    /// index is out-of-bounds.
527    #[inline]
528    #[cfg(feature = "alloc")]
529    pub(crate) fn at(self, index: usize) -> Ref<T, E, O> {
530        let Some(r) = self.get(index) else {
531            panic!("Index {index} out of bounds 0-{}", self.len());
532        };
533
534        r
535    }
536
537    /// Construct an iterator over this reference.
538    ///
539    /// # Examples
540    ///
541    /// ```
542    /// use musli_zerocopy::OwnedBuf;
543    ///
544    /// let mut buf = OwnedBuf::new();
545    /// buf.extend_from_slice(&[1, 2, 3, 4]);
546    ///
547    /// let slice = buf.store_slice(&[1, 2, 3, 4]);
548    ///
549    /// buf.align_in_place();
550    ///
551    /// let mut out = Vec::new();
552    ///
553    /// for r in slice.iter() {
554    ///     out.push(*buf.load(r)?);
555    /// }
556    ///
557    /// for r in slice.iter().rev() {
558    ///     out.push(*buf.load(r)?);
559    /// }
560    ///
561    /// assert_eq!(out, [1, 2, 3, 4, 4, 3, 2, 1]);
562    /// # Ok::<_, musli_zerocopy::Error>(())
563    /// ```
564    #[inline]
565    pub fn iter(self) -> Iter<T, E, O> {
566        let start = self.offset.swap_bytes::<E>().as_usize();
567        let end = start + self.metadata.swap_bytes::<E>().as_usize() * size_of::<T>();
568
569        Iter {
570            start,
571            end,
572            _marker: PhantomData,
573        }
574    }
575}
576
577impl<E, O> Ref<str, E, O>
578where
579    E: ByteOrder,
580    O: Size,
581{
582    /// Return the length of the string.
583    ///
584    /// # Examples
585    ///
586    /// ```
587    /// use musli_zerocopy::pointer::Ref;
588    ///
589    /// let slice = Ref::<str>::with_metadata(0u32, 2);
590    /// assert_eq!(slice.len(), 2);
591    /// ```
592    #[inline]
593    pub fn len(self) -> usize {
594        self.metadata.swap_bytes::<E>().as_usize()
595    }
596
597    /// Test if the slice `[T]` is empty.
598    ///
599    /// # Examples
600    ///
601    /// ```
602    /// use musli_zerocopy::pointer::Ref;
603    ///
604    /// let slice = Ref::<str>::with_metadata(0u32, 0);
605    /// assert!(slice.is_empty());
606    ///
607    /// let slice = Ref::<str>::with_metadata(0u32, 2);
608    /// assert!(!slice.is_empty());
609    /// ```
610    #[inline]
611    pub fn is_empty(self) -> bool {
612        self.metadata.is_zero()
613    }
614}
615
616/// An iterator over a `Ref<[T]>` which produces `Ref<T>` values.
617///
618/// See [`Ref::iter`].
619pub struct Iter<T, E, O> {
620    start: usize,
621    end: usize,
622    _marker: PhantomData<(T, E, O)>,
623}
624
625impl<T, E, O> Iterator for Iter<T, E, O>
626where
627    T: ZeroCopy,
628    E: ByteOrder,
629    O: Size,
630{
631    type Item = Ref<T, E, O>;
632
633    #[inline]
634    fn next(&mut self) -> Option<Self::Item> {
635        if self.start == self.end {
636            return None;
637        }
638
639        let start = self.start;
640        self.start += size_of::<T>();
641        Some(Ref::new(start))
642    }
643}
644
645impl<T, E, O> DoubleEndedIterator for Iter<T, E, O>
646where
647    T: ZeroCopy,
648    E: ByteOrder,
649    O: Size,
650{
651    #[inline]
652    fn next_back(&mut self) -> Option<Self::Item> {
653        if self.start == self.end {
654            return None;
655        }
656
657        self.end -= size_of::<T>();
658        Some(Ref::new(self.end))
659    }
660}
661
662impl<T, E, O> Ref<T, E, O>
663where
664    T: ?Sized + Pointee,
665    E: ByteOrder,
666    O: Size,
667{
668    /// The number of elements in the slice.
669    ///
670    /// # Examples
671    ///
672    /// ```
673    /// use musli_zerocopy::pointer::Ref;
674    ///
675    /// let slice = Ref::<str>::with_metadata(0u32, 10);
676    /// assert_eq!(slice.metadata(), 10);
677    /// ```
678    #[inline]
679    pub fn metadata(self) -> T::Stored<O> {
680        self.metadata
681    }
682}
683
684impl<T, E, O> Ref<T, E, O>
685where
686    T: Pointee<Metadata = (), Stored<O> = ()>,
687    E: ByteOrder,
688    O: Size,
689{
690    /// Construct a reference at the given offset.
691    ///
692    /// # Errors
693    ///
694    /// This will not compile through a constant assertion if the `offset` or
695    /// can't be byte swapped as per [`ZeroCopy::CAN_SWAP_BYTES`].
696    ///
697    /// # Panics
698    ///
699    /// This will panic if:
700    /// * Packed [`offset()`] cannot be constructed from `U` (out of range).
701    ///
702    /// [`offset()`]: Self::offset
703    ///
704    /// # Examples
705    ///
706    /// ```
707    /// use musli_zerocopy::Ref;
708    ///
709    /// let reference = Ref::<u64>::new(42u32);
710    /// assert_eq!(reference.offset(), 42);
711    /// ```
712    ///
713    /// Characters cannot be used as offsets:
714    ///
715    /// ```compile_fail
716    /// use musli_zerocopy::Ref;
717    ///
718    /// let reference = Ref::<_, _, char>::new('a');
719    /// ```
720    #[inline]
721    pub fn new<U>(offset: U) -> Self
722    where
723        U: Size,
724    {
725        const {
726            assert!(
727                O::CAN_SWAP_BYTES,
728                "Offset cannot be byte-ordered since it would not inhabit valid types",
729            );
730        }
731
732        let Ok(offset) = O::try_from(offset) else {
733            panic!(
734                "Offset {} not in the valid range 0-{}",
735                offset.as_usize(),
736                O::MAX_USIZE
737            );
738        };
739
740        Ref::from_parts(O::swap_bytes::<E>(offset), ())
741    }
742
743    /// Construct a typed reference to the zeroeth offset in a buffer.
744    ///
745    /// # Examples
746    ///
747    /// ```
748    /// use musli_zerocopy::Ref;
749    ///
750    /// let reference = Ref::<u64>::zero();
751    /// assert_eq!(reference.offset(), 0);
752    /// ```
753    #[inline]
754    pub const fn zero() -> Self {
755        Self {
756            offset: O::ZERO,
757            metadata: (),
758            _marker: PhantomData,
759        }
760    }
761}
762
763impl<T, E, O> Ref<T, E, O>
764where
765    T: ?Sized + Pointee,
766    E: ByteOrder,
767    O: Size,
768{
769    /// Get the offset the reference points to.
770    ///
771    /// # Examples
772    ///
773    /// ```
774    /// use musli_zerocopy::Ref;
775    /// use musli_zerocopy::endian::{Big, Little, Native};
776    ///
777    /// let reference = Ref::<u64, Native>::new(42u32);
778    /// assert_eq!(reference.offset(), 42);
779    ///
780    /// let reference = Ref::<u64, Little>::new(42u32);
781    /// assert_eq!(reference.offset(), 42);
782    ///
783    /// let reference = Ref::<u64, Big>::new(42u32);
784    /// assert_eq!(reference.offset(), 42);
785    /// ```
786    #[inline]
787    pub fn offset(self) -> usize {
788        self.offset.swap_bytes::<E>().as_usize()
789    }
790
791    /// Coerce from one kind of reference to another ensuring that the
792    /// destination type `U` is size-compatible.
793    ///
794    /// This performs metadata conversion if the destination metadata for `U`
795    /// differs from `T`, such as for `[u32]` to `[u8]` it would multiply the
796    /// length by 4 to ensure that the slice points to an appropriately sized
797    /// region.
798    ///
799    /// If the metadata conversion would overflow, this will wrap around the
800    /// numerical bounds or panic for debug builds.
801    ///
802    /// See [`try_coerce()`] for more documentation, which is also a checked
803    /// variant of this method.
804    ///
805    /// [`try_coerce()`]: Self::try_coerce
806    pub fn coerce<U>(self) -> Ref<U, E, O>
807    where
808        T: Coerce<U>,
809        U: ?Sized + Pointee,
810    {
811        // NB: Since the metadata representation is byte-swapped, we need to
812        // swap it back and forth to convert to native representation during
813        // coercion.
814        let metadata = T::coerce_metadata(self.metadata.swap_bytes::<E>()).swap_bytes::<E>();
815        Ref::from_parts(self.offset, metadata)
816    }
817
818    /// Try to coerce from one kind of reference to another ensuring that the
819    /// destination type `U` is size-compatible.
820    ///
821    /// This performs metadata conversion if the destination metadata for `U`
822    /// differs from `T`, such as for `[u32]` to `[u8]` it would multiply the
823    /// length by 4 to ensure that the slice points to an appropriately sized
824    /// region.
825    ///
826    /// This returns `None` in case metadata would overflow due to the
827    /// conversion.
828    ///
829    /// ```
830    /// use musli_zerocopy::Ref;
831    ///
832    /// let reference: Ref<u64> = Ref::zero();
833    /// let reference2 = reference.coerce::<[u32]>();
834    /// assert_eq!(reference2.len(), 2);
835    /// ```
836    ///
837    /// This method ensures that coercions across inappropriate types are
838    /// prohibited, such as coercing from a reference to a slice which is too
839    /// large.
840    ///
841    /// ```compile_fail
842    /// use musli_zerocopy::Ref;
843    ///
844    /// let reference: Ref<u32> = Ref::zero();
845    /// let reference2 = reference.coerce::<[u64]>();
846    /// ```
847    ///
848    /// If metadata needs to be adjusted for the destination type such as for
849    /// slices, it will be:
850    ///
851    /// ```
852    /// use musli_zerocopy::Ref;
853    ///
854    /// let reference: Ref<[u32]> = Ref::with_metadata(0u32, 1);
855    /// let reference2 = reference.try_coerce::<[u8]>()?;
856    /// assert_eq!(reference2.len(), 4);
857    ///
858    /// let reference: Ref<str> = Ref::with_metadata(0u32, 12);
859    /// let reference2 = reference.try_coerce::<[u8]>()?;
860    /// assert_eq!(reference2.len(), 12);
861    /// # Ok::<_, musli_zerocopy::CoerceError>(())
862    /// ```
863    ///
864    /// This does mean that numerical overflow might occur if the packed
865    /// metadata is too small:
866    ///
867    /// ```
868    /// use musli_zerocopy::Ref;
869    /// use musli_zerocopy::endian::Native;
870    ///
871    /// let reference = Ref::<[u32], Native, u8>::with_metadata(0u32, 64);
872    /// let reference2 = reference.try_coerce::<[u8]>();
873    /// assert!(reference2.is_err()); // 64 * 4 would overflow u8 packed metadata.
874    /// ```
875    ///
876    /// Coercion of non-zero types are supported, but do not guarantee that the
877    /// destination data is valid.
878    pub fn try_coerce<U>(self) -> Result<Ref<U, E, O>, CoerceError>
879    where
880        T: Coerce<U>,
881        U: ?Sized + Pointee,
882    {
883        // NB: Since the metadata representation is byte-swapped, we need to
884        // swap it back and forth to convert to native representation during
885        // coercion.
886        let metadata = T::try_coerce_metadata(self.metadata.swap_bytes::<E>())?.swap_bytes::<E>();
887        Ref::try_from_parts(self.offset, metadata)
888    }
889
890    #[cfg(test)]
891    pub(crate) fn cast<U>(self) -> Ref<U, E, O>
892    where
893        U: ?Sized + Pointee<Stored<O> = T::Stored<O>>,
894    {
895        Ref::from_parts(self.offset, self.metadata)
896    }
897}
898
899impl<T, const N: usize, E, O> Ref<[T; N], E, O>
900where
901    T: ZeroCopy,
902    E: ByteOrder,
903    O: Size,
904{
905    /// Coerce a reference to an array into a slice.
906    ///
907    /// # Examples
908    ///
909    /// ```
910    /// use musli_zerocopy::OwnedBuf;
911    ///
912    /// let mut buf = OwnedBuf::new();
913    ///
914    /// let values = buf.store(&[1, 2, 3, 4]);
915    /// let slice = values.array_into_slice();
916    ///
917    /// assert_eq!(buf.load(slice)?, &[1, 2, 3, 4]);
918    /// # Ok::<_, musli_zerocopy::Error>(())
919    /// ```
920    #[inline]
921    pub fn array_into_slice(self) -> Ref<[T], E, O> {
922        Ref::with_metadata(self.offset, N)
923    }
924}
925
926impl<T, E, O> Ref<MaybeUninit<T>, E, O>
927where
928    T: Pointee,
929    E: ByteOrder,
930    O: Size,
931{
932    /// Assume that the reference is initialized.
933    ///
934    /// Unlike the counterpart in Rust, this isn't actually unsafe. Because in
935    /// order to load the reference again we'd have to validate it anyways.
936    #[inline]
937    pub const fn assume_init(self) -> Ref<T, E, O> {
938        Ref {
939            offset: self.offset,
940            metadata: self.metadata,
941            _marker: PhantomData,
942        }
943    }
944}
945
946impl<T, E, O> fmt::Debug for Ref<T, E, O>
947where
948    T: ?Sized + Pointee<Stored<O>: fmt::Debug>,
949    E: ByteOrder,
950    O: Size + fmt::Debug,
951{
952    #[inline]
953    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
954        write!(
955            f,
956            "Ref<{}, {}> {{ offset: {:?}, metadata: {:?} }}",
957            any::type_name::<T>(),
958            E::NAME,
959            self.offset,
960            self.metadata,
961        )
962    }
963}
964
965impl<T, E, O> Clone for Ref<T, E, O>
966where
967    T: ?Sized + Pointee,
968    E: ByteOrder,
969    O: Size,
970{
971    #[inline]
972    fn clone(&self) -> Self {
973        *self
974    }
975}
976
977impl<T, E, O> Copy for Ref<T, E, O>
978where
979    T: ?Sized + Pointee,
980    E: ByteOrder,
981    O: Size,
982{
983}
984
985impl<T, E, O> PartialEq for Ref<T, E, O>
986where
987    T: ?Sized + Pointee<Stored<O>: PartialEq>,
988    E: ByteOrder,
989    O: PartialEq + Size,
990{
991    #[inline]
992    fn eq(&self, other: &Self) -> bool {
993        self.offset == other.offset && self.metadata == other.metadata
994    }
995}
996
997impl<T, E, O> Eq for Ref<T, E, O>
998where
999    T: ?Sized + Pointee<Stored<O>: Eq>,
1000    E: ByteOrder,
1001    O: Eq + Size,
1002{
1003}
1004
1005impl<T, E, O> PartialOrd for Ref<T, E, O>
1006where
1007    T: ?Sized + Pointee<Stored<O>: PartialOrd>,
1008    E: ByteOrder,
1009    O: Ord + Size,
1010{
1011    #[inline]
1012    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1013        match self.offset.partial_cmp(&other.offset) {
1014            Some(Ordering::Equal) => {}
1015            ord => return ord,
1016        }
1017
1018        self.metadata.partial_cmp(&other.metadata)
1019    }
1020}
1021
1022impl<T, E, O> Ord for Ref<T, E, O>
1023where
1024    T: ?Sized + Pointee<Stored<O>: Ord>,
1025    E: ByteOrder,
1026    O: Ord + Size,
1027{
1028    #[inline]
1029    fn cmp(&self, other: &Self) -> Ordering {
1030        match self.offset.cmp(&other.offset) {
1031            Ordering::Equal => {}
1032            ord => return ord,
1033        }
1034
1035        self.metadata.cmp(&other.metadata)
1036    }
1037}
1038
1039impl<T, E, O> Hash for Ref<T, E, O>
1040where
1041    T: ?Sized + Pointee<Stored<O>: Hash>,
1042    E: ByteOrder,
1043    O: Hash + Size,
1044{
1045    #[inline]
1046    fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
1047        self.offset.hash(state);
1048        self.metadata.hash(state);
1049    }
1050}