musli_zerocopy/buf/
owned_buf.rs

1use core::alloc::Layout;
2use core::borrow::Borrow;
3use core::marker::PhantomData;
4use core::mem::{align_of, size_of, size_of_val, ManuallyDrop};
5use core::ops::{Deref, DerefMut};
6use core::ptr::NonNull;
7use core::slice::{self, SliceIndex};
8
9#[cfg(feature = "std")]
10use std::io;
11
12use alloc::alloc;
13
14use crate::buf::{self, Buf, DefaultAlignment, Padder, StoreBuf};
15use crate::endian::{ByteOrder, Native};
16use crate::error::Error;
17use crate::mem::MaybeUninit;
18use crate::pointer::{DefaultSize, Ref, Size};
19use crate::traits::{UnsizedZeroCopy, ZeroCopy};
20
21/// An allocating buffer with dynamic alignment.
22///
23/// By default this buffer starts out having the same alignment as `usize`,
24/// making it platform specific. But this alignment can grow in demand to the
25/// types being stored in it.
26///
27/// # Examples
28///
29/// ```
30/// use musli_zerocopy::{OwnedBuf, ZeroCopy};
31///
32/// #[derive(ZeroCopy)]
33/// #[repr(C, align(128))]
34/// struct Custom { field: u32 }
35///
36/// let mut buf = OwnedBuf::new();
37/// buf.store(&Custom { field: 10 });
38/// ```
39pub struct OwnedBuf<E = Native, O = DefaultSize>
40where
41    E: ByteOrder,
42    O: Size,
43{
44    data: NonNull<u8>,
45    /// The initialized length of the buffer.
46    len: usize,
47    /// The capacity of the buffer.
48    capacity: usize,
49    /// The requested alignment.
50    requested: usize,
51    /// The current alignment.
52    align: usize,
53    /// Holding onto the current pointer size.
54    _marker: PhantomData<(E, O)>,
55}
56
57impl Default for OwnedBuf {
58    #[inline]
59    fn default() -> Self {
60        Self::new()
61    }
62}
63
64impl OwnedBuf {
65    /// Construct a new empty buffer with the default alignment.
66    ///
67    /// The default alignment is guaranteed to be larger than 0.
68    ///
69    /// # Examples
70    ///
71    /// ```
72    /// use musli_zerocopy::OwnedBuf;
73    ///
74    /// let buf = OwnedBuf::new();
75    /// assert!(buf.is_empty());
76    /// assert!(buf.alignment() > 0);
77    /// assert!(buf.alignment() >= buf.requested());
78    /// ```
79    pub const fn new() -> Self {
80        Self::with_alignment::<DefaultAlignment>()
81    }
82
83    /// Allocate a new buffer with the given capacity and default alignment.
84    ///
85    /// The buffer must allocate for at least the given `capacity`, but might
86    /// allocate more. If the capacity specified is `0` it will not allocate.
87    ///
88    /// # Panics
89    ///
90    /// Panics if the specified capacity and memory layout are illegal, which
91    /// happens if:
92    /// * The alignment is not a power of two.
93    /// * The specified capacity causes the needed memory to overflow
94    ///   `isize::MAX`.
95    ///
96    /// ```should_panic
97    /// use std::mem::align_of;
98    ///
99    /// use musli_zerocopy::{endian, DefaultAlignment, OwnedBuf};
100    ///
101    /// let max = isize::MAX as usize - (align_of::<DefaultAlignment>() - 1);
102    /// OwnedBuf::<endian::Native, u32>::with_capacity(max);
103    /// ```
104    ///
105    /// # Examples
106    ///
107    /// ```
108    /// use musli_zerocopy::OwnedBuf;
109    ///
110    /// let buf = OwnedBuf::with_capacity(6);
111    /// assert!(buf.capacity() >= 6);
112    /// ```
113    pub fn with_capacity(capacity: usize) -> Self {
114        Self::with_capacity_and_alignment::<DefaultAlignment>(capacity)
115    }
116
117    /// Construct a new empty buffer with an alignment matching that of `T`.
118    ///
119    /// # Examples
120    ///
121    /// ```
122    /// use musli_zerocopy::OwnedBuf;
123    ///
124    /// let buf = OwnedBuf::with_alignment::<u64>();
125    /// assert!(buf.is_empty());
126    /// assert!(buf.alignment() >= 8);
127    /// assert_eq!(buf.requested(), 8);
128    /// ```
129    pub const fn with_alignment<T>() -> Self {
130        let align = align_of::<T>();
131
132        Self {
133            // SAFETY: Alignment is asserted through `T`.
134            data: unsafe { dangling(align) },
135            len: 0,
136            capacity: 0,
137            requested: align,
138            align,
139            _marker: PhantomData,
140        }
141    }
142
143    /// Allocate a new buffer with the given `capacity` and an alignment
144    /// matching that of `T`.
145    ///
146    /// The buffer must allocate for at least the given `capacity`, but might
147    /// allocate more. If the capacity specified is `0` it will not allocate.
148    ///
149    /// # Panics
150    ///
151    /// Panics if the specified capacity and memory layout are illegal, which
152    /// happens if:
153    /// * The alignment is not a power of two.
154    /// * The specified capacity causes the needed memory to overflow
155    ///   `isize::MAX`.
156    ///
157    /// ```should_panic
158    /// use musli_zerocopy::OwnedBuf;
159    ///
160    /// let max = isize::MAX as usize - (8 - 1);
161    /// OwnedBuf::with_capacity_and_alignment::<u64>(max);
162    /// ```
163    ///
164    /// # Examples
165    ///
166    /// ```
167    /// use musli_zerocopy::OwnedBuf;
168    ///
169    /// let buf = OwnedBuf::with_capacity_and_alignment::<u16>(6);
170    /// assert!(buf.capacity() >= 6);
171    /// assert!(buf.alignment() >= 2);
172    /// ```
173    pub fn with_capacity_and_alignment<T>(capacity: usize) -> Self {
174        // SAFETY: Alignment of `T` is always a power of two.
175        unsafe { Self::with_capacity_and_custom_alignment(capacity, align_of::<T>()) }
176    }
177}
178
179impl<E, O> OwnedBuf<E, O>
180where
181    E: ByteOrder,
182    O: Size,
183{
184    /// Modify the buffer to utilize the specified pointer size when inserting
185    /// references.
186    ///
187    /// # Examples
188    ///
189    /// ```
190    /// use musli_zerocopy::OwnedBuf;
191    ///
192    /// let mut buf = OwnedBuf::with_capacity(1024)
193    ///     .with_size::<u8>();
194    /// ```
195    #[inline]
196    pub fn with_size<U: Size>(self) -> OwnedBuf<E, U> {
197        let this = ManuallyDrop::new(self);
198
199        OwnedBuf {
200            data: this.data,
201            len: this.len,
202            capacity: this.capacity,
203            requested: this.requested,
204            align: this.align,
205            _marker: PhantomData,
206        }
207    }
208
209    /// Modify the buffer to utilize the specified byte order when inserting
210    /// references.
211    ///
212    /// # Examples
213    ///
214    /// ```
215    /// use musli_zerocopy::{endian, OwnedBuf};
216    ///
217    /// let mut buf = OwnedBuf::with_capacity(1024)
218    ///     .with_byte_order::<endian::Little>();
219    /// ```
220    #[inline]
221    pub fn with_byte_order<U>(self) -> OwnedBuf<U, O>
222    where
223        U: ByteOrder,
224    {
225        let this = ManuallyDrop::new(self);
226
227        OwnedBuf {
228            data: this.data,
229            len: this.len,
230            capacity: this.capacity,
231            requested: this.requested,
232            align: this.align,
233            _marker: PhantomData,
234        }
235    }
236
237    // # Safety
238    //
239    // The specified alignment must be a power of two.
240    pub(crate) unsafe fn with_capacity_and_custom_alignment(capacity: usize, align: usize) -> Self where
241    {
242        if capacity == 0 {
243            return Self {
244                // SAFETY: Alignment is asserted through `T`.
245                data: dangling(align),
246                len: 0,
247                capacity: 0,
248                requested: align,
249                align,
250                _marker: PhantomData,
251            };
252        }
253
254        let layout = Layout::from_size_align(capacity, align).expect("Illegal memory layout");
255
256        unsafe {
257            let data = alloc::alloc(layout);
258
259            if data.is_null() {
260                alloc::handle_alloc_error(layout);
261            }
262
263            Self {
264                data: NonNull::new_unchecked(data),
265                len: 0,
266                capacity,
267                requested: align,
268                align,
269                _marker: PhantomData,
270            }
271        }
272    }
273
274    /// Get the current length of the buffer.
275    ///
276    /// # Examples
277    ///
278    /// ```
279    /// use musli_zerocopy::OwnedBuf;
280    ///
281    /// let buf = OwnedBuf::new();
282    /// assert_eq!(buf.len(), 0);
283    /// ```
284    #[inline]
285    pub fn len(&self) -> usize {
286        self.len
287    }
288
289    /// Clear the current buffer.
290    ///
291    /// This won't cause any reallocations.
292    ///
293    /// # Examples
294    ///
295    /// ```
296    /// use musli_zerocopy::OwnedBuf;
297    ///
298    /// let mut buf = OwnedBuf::new();
299    /// assert_eq!(buf.capacity(), 0);
300    /// buf.extend_from_slice(&[1, 2, 3, 4]);
301    ///
302    /// assert_eq!(buf.len(), 4);
303    /// buf.clear();
304    /// assert!(buf.capacity() > 0);
305    /// assert_eq!(buf.len(), 0);
306    /// ```
307    #[inline]
308    pub fn clear(&mut self) {
309        self.len = 0;
310    }
311
312    /// Test if the buffer is empty.
313    ///
314    /// # Examples
315    ///
316    /// ```
317    /// use musli_zerocopy::OwnedBuf;
318    ///
319    /// let buf = OwnedBuf::new();
320    /// assert!(buf.is_empty());
321    /// ```
322    #[inline]
323    pub fn is_empty(&self) -> bool {
324        self.len == 0
325    }
326
327    /// Get the current capacity of the buffer.
328    ///
329    /// # Examples
330    ///
331    /// ```
332    /// use musli_zerocopy::OwnedBuf;
333    ///
334    /// let buf = OwnedBuf::new();
335    /// assert_eq!(buf.capacity(), 0);
336    /// ```
337    #[inline]
338    pub fn capacity(&self) -> usize {
339        self.capacity
340    }
341
342    /// Return the requested alignment of the buffer.
343    ///
344    /// # Examples
345    ///
346    /// ```
347    /// use musli_zerocopy::OwnedBuf;
348    ///
349    /// let buf = OwnedBuf::with_alignment::<u64>();
350    /// assert!(buf.is_empty());
351    /// assert!(buf.alignment() >= 8);
352    /// assert_eq!(buf.requested(), 8);
353    /// ```
354    #[inline]
355    pub fn requested(&self) -> usize {
356        self.requested
357    }
358
359    /// Reserve capacity for at least `capacity` more bytes in this buffer.
360    ///
361    /// # Examples
362    ///
363    /// ```
364    /// use musli_zerocopy::OwnedBuf;
365    ///
366    /// let mut buf = OwnedBuf::new();
367    /// assert_eq!(buf.capacity(), 0);
368    ///
369    /// buf.reserve(10);
370    /// assert!(buf.capacity() >= 10);
371    /// ```
372    #[inline]
373    pub fn reserve(&mut self, capacity: usize) {
374        let new_capacity = self.len + capacity;
375        self.ensure_capacity(new_capacity);
376    }
377
378    /// Advance the length of the owned buffer by `size`.
379    ///
380    /// # Safety
381    ///
382    /// The caller must ensure that bytes up until `len() + size` has been
383    /// initialized in this buffer.
384    #[inline]
385    pub unsafe fn advance(&mut self, size: usize) {
386        self.len += size;
387    }
388
389    /// Get get a raw pointer to the current buffer.
390    #[inline]
391    pub(crate) fn as_ptr(&self) -> *const u8 {
392        self.data.as_ptr() as *const _
393    }
394
395    /// Get get a raw mutable pointer to the current buffer.
396    #[inline]
397    pub(crate) fn as_mut_ptr(&mut self) -> *mut u8 {
398        self.data.as_ptr()
399    }
400
401    /// Get get a raw mutable pointer to the current buffer.
402    #[inline]
403    #[cfg(test)]
404    pub(crate) fn as_nonnull(&mut self) -> NonNull<u8> {
405        self.data
406    }
407
408    /// Extract a slice containing the entire buffer.
409    ///
410    /// # Examples
411    ///
412    /// ```
413    /// use musli_zerocopy::OwnedBuf;
414    ///
415    /// let mut buf = OwnedBuf::new();
416    /// buf.extend_from_slice(b"hello world");
417    /// assert_eq!(buf.as_slice(), b"hello world");
418    /// ```
419    #[inline]
420    pub fn as_slice(&self) -> &[u8] {
421        unsafe { slice::from_raw_parts(self.as_ptr(), self.len()) }
422    }
423
424    /// Extract a mutable slice containing the entire buffer.
425    ///
426    /// # Examples
427    ///
428    /// ```
429    /// use musli_zerocopy::OwnedBuf;
430    ///
431    /// let mut buf = OwnedBuf::new();
432    /// buf.extend_from_slice(b"hello world");
433    /// buf.as_mut_slice().make_ascii_uppercase();
434    /// assert_eq!(buf.as_slice(), b"HELLO WORLD");
435    /// ```
436    #[inline]
437    pub fn as_mut_slice(&mut self) -> &mut [u8] {
438        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len()) }
439    }
440
441    /// Store an uninitialized value.
442    ///
443    /// This allows values to be inserted before they can be initialized, which
444    /// can be useful if you need them to be in a certain location in the buffer
445    /// but don't have access to their value yet.
446    ///
447    /// The memory for `T` will be zero-initialized at [`next_offset<T>()`] and
448    /// the length and alignment requirement of `OwnedBuf` updated to reflect
449    /// that an instance of `T` has been stored. But that representation might
450    /// not match the representation of `T`[^non-zero].
451    ///
452    /// To get the offset where the value will be written, call
453    /// [`next_offset<T>()`] before storing the value.
454    ///
455    /// > **Note:** this does not return [`std::mem::MaybeUninit`], instead we
456    /// > use an internal [`MaybeUninit`] which is similar but has different
457    /// > properties. See [its documentation][MaybeUninit] for more.
458    ///
459    /// [`next_offset<T>()`]: Self::next_offset()
460    /// [^non-zero]: Like with [`NonZero*`][core::num] types.
461    ///
462    /// # Examples
463    ///
464    /// ```
465    /// use musli_zerocopy::mem::MaybeUninit;
466    /// use musli_zerocopy::{OwnedBuf, Ref, ZeroCopy};
467    ///
468    /// #[derive(ZeroCopy)]
469    /// #[repr(C)]
470    /// struct Custom { field: u32, string: Ref<str> }
471    ///
472    /// let mut buf = OwnedBuf::new();
473    /// let reference: Ref<MaybeUninit<Custom>> = buf.store_uninit::<Custom>();
474    ///
475    /// let string = buf.store_unsized("Hello World!");
476    ///
477    /// buf.load_uninit_mut(reference).write(&Custom { field: 42, string });
478    ///
479    /// let reference = reference.assume_init();
480    /// assert_eq!(reference.offset(), 0);
481    /// # Ok::<_, musli_zerocopy::Error>(())
482    /// ```
483    #[inline]
484    pub fn store_uninit<T>(&mut self) -> Ref<MaybeUninit<T>, E, O>
485    where
486        T: ZeroCopy,
487    {
488        // SAFETY: We've just reserved capacity for this write.
489        unsafe {
490            self.next_offset_with_and_reserve(align_of::<T>(), size_of::<T>());
491            let offset = self.len;
492            self.data
493                .as_ptr()
494                .add(self.len)
495                .write_bytes(0, size_of::<T>());
496            self.len += size_of::<T>();
497            Ref::new(offset)
498        }
499    }
500
501    /// Write a reference that might not have been initialized.
502    ///
503    /// This does not prevent [`Ref`] from different instances of [`OwnedBuf`]
504    /// from being written. It would only result in garbled data, but wouldn't
505    /// be a safety concern.
506    ///
507    /// > **Note:** this does not return [`std::mem::MaybeUninit`], instead we
508    /// > use an internal [`MaybeUninit`] which is similar but has different
509    /// > properties. See [its documentation][MaybeUninit] for more.
510    ///
511    /// # Panics
512    ///
513    /// Panics if the reference [`Ref::offset()`] and size of `T` does not fit
514    /// within the [`len()`] of the current structure. This might happen if you
515    /// try and use a reference constructed from a different [`OwnedBuf`]
516    /// instance.
517    ///
518    /// [`len()`]: Self::len()
519    ///
520    /// ```should_panic
521    /// use musli_zerocopy::OwnedBuf;
522    ///
523    /// let mut buf1 = OwnedBuf::new();
524    /// buf1.store(&1u32);
525    ///
526    /// let mut buf2 = OwnedBuf::new();
527    /// buf2.store(&10u32);
528    ///
529    /// let number = buf2.store_uninit::<u32>();
530    ///
531    /// buf1.load_uninit_mut(number);
532    /// ```
533    ///
534    /// # Examples
535    ///
536    /// ```
537    /// use musli_zerocopy::{OwnedBuf, Ref, ZeroCopy};
538    /// use musli_zerocopy::mem::MaybeUninit;
539    ///
540    /// #[derive(ZeroCopy)]
541    /// #[repr(C)]
542    /// struct Custom { field: u32, string: Ref<str> }
543    ///
544    /// let mut buf = OwnedBuf::new();
545    /// let reference: Ref<MaybeUninit<Custom>> = buf.store_uninit::<Custom>();
546    ///
547    /// let string = buf.store_unsized("Hello World!");
548    ///
549    /// buf.load_uninit_mut(reference).write(&Custom { field: 42, string });
550    ///
551    /// let reference = reference.assume_init();
552    /// assert_eq!(reference.offset(), 0);
553    /// # Ok::<_, musli_zerocopy::Error>(())
554    /// ```
555    #[inline]
556    pub fn load_uninit_mut<T, U, I>(
557        &mut self,
558        reference: Ref<MaybeUninit<T>, U, I>,
559    ) -> &mut MaybeUninit<T>
560    where
561        T: ZeroCopy,
562        U: ByteOrder,
563        I: Size,
564    {
565        let at = reference.offset();
566
567        // Note: We only need this as debug assertion, because `MaybeUninit<T>`
568        // does not implement `ZeroCopy`, so there is no way to construct.
569        assert!(at + size_of::<T>() <= self.len, "Length overflow");
570
571        // SAFETY: `MaybeUninit<T>` has no representation requirements and is
572        // unaligned.
573        unsafe { &mut *(self.data.as_ptr().add(at) as *mut MaybeUninit<T>) }
574    }
575
576    /// Insert a value with the given size.
577    ///
578    /// The memory for `T` will be initialized at [`next_offset<T>()`] and the
579    /// length and alignment requirement of `OwnedBuf` updated to reflect that
580    /// an instance of `T` has been stored.
581    ///
582    /// To get the offset where the value will be written, call
583    /// [`next_offset<T>()`] before storing the value or access the offset
584    /// through the [`Ref::offset`] being returned.
585    ///
586    /// [`next_offset<T>()`]: Self::next_offset
587    ///
588    /// # Examples
589    ///
590    /// ```
591    /// use musli_zerocopy::{OwnedBuf, Ref, ZeroCopy};
592    ///
593    /// #[derive(ZeroCopy)]
594    /// #[repr(C)]
595    /// struct Custom { field: u32, string: Ref<str> }
596    ///
597    /// let mut buf = OwnedBuf::new();
598    ///
599    /// let string = buf.store_unsized("string");
600    /// let custom = buf.store(&Custom { field: 1, string });
601    /// let custom2 = buf.store(&Custom { field: 2, string });
602    ///
603    /// let custom = buf.load(custom)?;
604    /// assert_eq!(custom.field, 1);
605    /// assert_eq!(buf.load(custom.string)?, "string");
606    ///
607    /// let custom2 = buf.load(custom2)?;
608    /// assert_eq!(custom2.field, 2);
609    /// assert_eq!(buf.load(custom2.string)?, "string");
610    /// # Ok::<_, musli_zerocopy::Error>(())
611    /// ```
612    ///
613    /// Storing an array:
614    ///
615    ///
616    /// ```
617    /// use musli_zerocopy::{ZeroCopy, OwnedBuf};
618    ///
619    /// // Element with padding.
620    /// #[derive(Debug, PartialEq, ZeroCopy)]
621    /// #[repr(C)]
622    /// struct Element {
623    ///     first: u8,
624    ///     second: u32,
625    /// }
626    ///
627    /// let values = [
628    ///     Element { first: 0x01, second: 0x01020304u32 },
629    ///     Element { first: 0x02, second: 0x01020304u32 }
630    /// ];
631    ///
632    /// let mut buf = OwnedBuf::new();
633    /// let array = buf.store(&values);
634    /// assert_eq!(buf.load(array)?, &values);
635    /// # Ok::<_, musli_zerocopy::Error>(())
636    /// ```
637    #[inline]
638    pub fn store<T>(&mut self, value: &T) -> Ref<T, E, O>
639    where
640        T: ZeroCopy,
641    {
642        self.next_offset_with_and_reserve(align_of::<T>(), size_of::<T>());
643
644        // SAFETY: We're ensuring to both align the internal buffer and store
645        // the value.
646        unsafe { self.store_unchecked(value) }
647    }
648
649    /// Insert a value with the given size without ensuring that the buffer has
650    /// the reserved capacity for to or is properly aligned.
651    ///
652    /// This is a low level API which is tricky to use correctly. The
653    /// recommended way to use this is through [`OwnedBuf::store`].
654    ///
655    /// [`OwnedBuf::store`]: Self::store
656    ///
657    /// # Safety
658    ///
659    /// The caller has to ensure that the buffer has the required capacity for
660    /// `&T` and is properly aligned. This can easily be accomplished by calling
661    /// [`request_align::<T>()`] followed by [`align_in_place()`] before this
662    /// function. A safe variant of this function is [`OwnedBuf::store`].
663    ///
664    /// [`align_in_place()`]: Self::align_in_place
665    /// [`OwnedBuf::store`]: Self::store
666    /// [`request_align::<T>()`]: Self::request_align
667    ///
668    /// # Examples
669    ///
670    /// ```
671    /// use std::mem::size_of;
672    ///
673    /// use musli_zerocopy::{OwnedBuf, Ref, ZeroCopy};
674    ///
675    /// #[derive(ZeroCopy)]
676    /// #[repr(C, align(4096))]
677    /// struct Custom { field: u32, string: Ref<str> }
678    ///
679    /// let mut buf = OwnedBuf::new();
680    ///
681    /// let string = buf.store_unsized("string");
682    ///
683    /// buf.request_align::<Custom>();
684    /// buf.reserve(2 * size_of::<Custom>());
685    /// buf.align_in_place();
686    ///
687    /// // SAFETY: We've ensure that the buffer is internally aligned and sized just above.
688    /// let custom = unsafe { buf.store_unchecked(&Custom { field: 1, string }) };
689    /// let custom2 = unsafe { buf.store_unchecked(&Custom { field: 2, string }) };
690    ///
691    /// let custom = buf.load(custom)?;
692    /// assert_eq!(custom.field, 1);
693    /// assert_eq!(buf.load(custom.string)?, "string");
694    ///
695    /// let custom2 = buf.load(custom2)?;
696    /// assert_eq!(custom2.field, 2);
697    /// assert_eq!(buf.load(custom2.string)?, "string");
698    /// # Ok::<_, musli_zerocopy::Error>(())
699    /// ```
700    #[inline]
701    pub unsafe fn store_unchecked<T>(&mut self, value: &T) -> Ref<T, E, O>
702    where
703        T: ZeroCopy,
704    {
705        let offset = self.len;
706        let ptr = NonNull::new_unchecked(self.data.as_ptr().add(offset));
707        buf::store_unaligned(ptr, value);
708        self.len += size_of::<T>();
709        Ref::new(offset)
710    }
711
712    /// Write a value to the buffer.
713    ///
714    /// ```
715    /// use musli_zerocopy::OwnedBuf;
716    ///
717    /// let mut buf = OwnedBuf::new();
718    ///
719    /// let first = buf.store_unsized("first");
720    /// let second = buf.store_unsized("second");
721    ///
722    /// dbg!(first, second);
723    ///
724    /// assert_eq!(buf.load(first)?, "first");
725    /// assert_eq!(buf.load(second)?, "second");
726    /// # Ok::<_, musli_zerocopy::Error>(())
727    /// ```
728    #[inline]
729    pub fn store_unsized<T>(&mut self, value: &T) -> Ref<T, E, O>
730    where
731        T: ?Sized + UnsizedZeroCopy,
732    {
733        unsafe {
734            let size = size_of_val(value);
735            self.next_offset_with_and_reserve(T::ALIGN, size);
736            let offset = self.len;
737            let ptr = NonNull::new_unchecked(self.data.as_ptr().add(offset));
738            ptr.as_ptr().copy_from_nonoverlapping(value.as_ptr(), size);
739
740            if T::PADDED {
741                let mut padder = Padder::new(ptr);
742                value.pad(&mut padder);
743                padder.remaining_unsized(value);
744            }
745
746            self.len += size;
747            Ref::with_metadata(offset, value.metadata())
748        }
749    }
750
751    /// Insert a slice into the buffer.
752    ///
753    /// # Examples
754    ///
755    /// ```
756    /// use musli_zerocopy::OwnedBuf;
757    ///
758    /// let mut buf = OwnedBuf::new();
759    ///
760    /// let mut values = Vec::new();
761    ///
762    /// values.push(buf.store_unsized("first"));
763    /// values.push(buf.store_unsized("second"));
764    ///
765    /// let slice_ref = buf.store_slice(&values);
766    ///
767    /// let slice = buf.load(slice_ref)?;
768    ///
769    /// let mut strings = Vec::new();
770    ///
771    /// for value in slice {
772    ///     strings.push(buf.load(*value)?);
773    /// }
774    ///
775    /// assert_eq!(&strings, &["first", "second"][..]);
776    /// # Ok::<_, musli_zerocopy::Error>(())
777    /// ```
778    #[inline(always)]
779    pub fn store_slice<T>(&mut self, values: &[T]) -> Ref<[T], E, O>
780    where
781        T: ZeroCopy,
782    {
783        self.store_unsized(values)
784    }
785
786    /// Extend the buffer from a slice.
787    ///
788    /// Note that this only extends the underlying buffer but does not ensure
789    /// that any required alignment is abided by.
790    ///
791    /// To do this, the caller must call [`request_align()`] with the appropriate
792    /// alignment, otherwise the necessary alignment to decode the buffer again
793    /// will be lost.
794    ///
795    /// [`request_align()`]: Self::request_align
796    ///
797    /// # Errors
798    ///
799    /// This is a raw API, and does not guarantee that any given alignment will
800    /// be respected. The following exemplifies incorrect use since the u32 type
801    /// required a 4-byte alignment:
802    ///
803    /// ```
804    /// use musli_zerocopy::{OwnedBuf, Ref};
805    ///
806    /// let mut buf = OwnedBuf::with_alignment::<u32>();
807    ///
808    /// // Add one byte of padding to throw of any incidental alignment.
809    /// buf.extend_from_slice(&[1]);
810    ///
811    /// let ptr: Ref<u32> = Ref::new(buf.next_offset::<u8>());
812    /// buf.extend_from_slice(&[1, 2, 3, 4]);
813    ///
814    /// // This will succeed because the buffer follows its interior alignment:
815    /// let buf = buf.as_ref();
816    ///
817    /// // This will fail, because the buffer is not aligned.
818    /// assert!(buf.load(ptr).is_err());
819    /// # Ok::<_, musli_zerocopy::Error>(())
820    /// ```
821    ///
822    /// # Examples
823    ///
824    /// ```
825    /// use musli_zerocopy::{OwnedBuf, Ref};
826    ///
827    /// let mut buf = OwnedBuf::with_alignment::<()>();
828    ///
829    /// // Add one byte of padding to throw of any incidental alignment.
830    /// buf.extend_from_slice(&[1]);
831    ///
832    /// let ptr: Ref<u32> = Ref::new(buf.next_offset::<u32>());
833    /// buf.extend_from_slice(&[1, 2, 3, 4]);
834    ///
835    /// // This will succeed because the buffer follows its interior alignment:
836    /// let buf = buf.as_ref();
837    ///
838    /// assert_eq!(*buf.load(ptr)?, u32::from_ne_bytes([1, 2, 3, 4]));
839    /// # Ok::<_, musli_zerocopy::Error>(())
840    /// ```
841    pub fn extend_from_slice(&mut self, bytes: &[u8]) {
842        self.reserve(bytes.len());
843
844        // SAFETY: We just allocated space for the slice.
845        unsafe {
846            self.store_bytes(bytes);
847        }
848    }
849
850    /// Fill and initialize the buffer with `byte` up to `len`.
851    pub(crate) fn fill(&mut self, byte: u8, len: usize) {
852        self.reserve(len);
853
854        unsafe {
855            let ptr = self.data.as_ptr().add(self.len);
856            ptr.write_bytes(byte, len);
857            self.len += len;
858        }
859    }
860
861    /// Store the slice without allocating.
862    ///
863    /// # Safety
864    ///
865    /// The caller must ensure that the buffer has the capacity for
866    /// `bytes.len()` and that the value being stored is not padded as per
867    /// `ZeroCopy::PADDED`.
868    #[inline]
869    pub(crate) unsafe fn store_bytes<T>(&mut self, values: &[T])
870    where
871        T: ZeroCopy,
872    {
873        let dst = self.as_mut_ptr().add(self.len);
874        dst.copy_from_nonoverlapping(values.as_ptr().cast(), size_of_val(values));
875        self.len += size_of_val(values);
876    }
877
878    /// Align a buffer in place if necessary.
879    ///
880    /// If [`requested()`] does not equal [`alignment()`] this will cause the buffer
881    /// to be reallocated before it is returned.
882    ///
883    /// [`requested()`]: Self::requested
884    /// [`alignment()`]: Buf::alignment
885    /// [`as_ref`]: Self::as_ref
886    ///
887    /// # Examples
888    ///
889    /// A buffer has to be a aligned in order for `load` calls to succeed
890    /// without errors.
891    ///
892    /// ```
893    /// use musli_zerocopy::OwnedBuf;
894    ///
895    /// let mut buf = OwnedBuf::with_alignment::<()>();
896    /// let number = buf.store(&1u32);
897    ///
898    /// buf.align_in_place();
899    ///
900    /// assert_eq!(buf.load(number)?, &1u32);
901    /// # Ok::<_, musli_zerocopy::Error>(())
902    /// ```
903    ///
904    /// Example using a mutable buffer. A buffer has to be a aligned in order
905    /// for `load` and `load_mut` calls to succeed without errors.
906    ///
907    /// ```
908    /// use musli_zerocopy::OwnedBuf;
909    ///
910    /// let mut buf = OwnedBuf::with_alignment::<()>();
911    /// let number = buf.store(&1u32);
912    ///
913    /// buf.align_in_place();
914    ///
915    /// *buf.load_mut(number)? += 1;
916    /// assert_eq!(buf.load(number)?, &2u32);
917    /// # Ok::<_, musli_zerocopy::Error>(())
918    /// ```
919    #[inline]
920    pub fn align_in_place(&mut self) {
921        // SAFETY: self.requested is guaranteed to be a power of two.
922        if !buf::is_aligned_with(self.as_ptr(), self.requested) {
923            let (old_layout, new_layout) = self.layouts(self.capacity);
924            self.alloc_new(old_layout, new_layout);
925        }
926    }
927
928    /// Request that the current buffer should have at least the specified
929    /// alignment and zero-initialize the buffer up to the next position which
930    /// matches the given alignment.
931    ///
932    /// Note that this does not guarantee that the internal buffer is aligned
933    /// in-memory, to ensure this you can use [`align_in_place()`].
934    ///
935    /// ```
936    /// use musli_zerocopy::OwnedBuf;
937    /// let mut buf = OwnedBuf::new();
938    ///
939    /// buf.extend_from_slice(&[1, 2]);
940    /// buf.request_align::<u32>();
941    ///
942    /// assert_eq!(buf.as_slice(), &[1, 2, 0, 0]);
943    /// ```
944    ///
945    /// Calling this function only causes the underlying buffer to be realigned
946    /// if a reallocation is triggered due to reaching its [`capacity()`].
947    ///
948    /// ```
949    /// use musli_zerocopy::{endian, OwnedBuf};
950    /// let mut buf = OwnedBuf::<endian::Native, u32>::with_capacity_and_alignment::<u16>(32);
951    ///
952    /// buf.extend_from_slice(&[1, 2]);
953    /// assert!(buf.alignment() >= 2);
954    /// buf.request_align::<u32>();
955    ///
956    /// assert_eq!(buf.requested(), 4);
957    /// assert!(buf.alignment() >= 2);
958    ///
959    /// buf.extend_from_slice(&[0; 32]);
960    /// assert_eq!(buf.requested(), 4);
961    /// assert!(buf.alignment() >= 4);
962    /// ```
963    ///
964    /// [`capacity()`]: Self::capacity
965    /// [`align_in_place()`]: Self::align_in_place
966    ///
967    /// # Safety
968    ///
969    /// The caller must guarantee that the alignment is a power of two.
970    ///
971    /// # Examples
972    ///
973    /// ```
974    /// use musli_zerocopy::OwnedBuf;
975    ///
976    /// let mut buf = OwnedBuf::new();
977    /// buf.extend_from_slice(&[1, 2, 3, 4]);
978    /// buf.request_align::<u64>();
979    /// buf.extend_from_slice(&[5, 6, 7, 8]);
980    ///
981    /// assert_eq!(buf.as_slice(), &[1, 2, 3, 4, 0, 0, 0, 0, 5, 6, 7, 8]);
982    /// ```
983    #[inline]
984    pub fn request_align<T>(&mut self)
985    where
986        T: ZeroCopy,
987    {
988        self.requested = self.requested.max(align_of::<T>());
989        self.ensure_aligned_and_reserve(align_of::<T>(), size_of::<T>());
990    }
991
992    /// Ensure that the current buffer is aligned under the assumption that it needs to be allocated.
993    #[inline]
994    fn ensure_aligned_and_reserve(&mut self, align: usize, reserve: usize) {
995        let extra = buf::padding_to(self.len, align);
996        self.reserve(extra + reserve);
997
998        // SAFETY: The length is ensures to be within the address space.
999        unsafe {
1000            self.data.as_ptr().add(self.len).write_bytes(0, extra);
1001            self.len += extra;
1002        }
1003    }
1004
1005    /// Construct a pointer aligned for `align` into the current buffer which
1006    /// points to the next location that will be written.
1007    #[inline]
1008    pub(crate) fn next_offset_with_and_reserve(&mut self, align: usize, reserve: usize) {
1009        self.requested = self.requested.max(align);
1010        self.ensure_aligned_and_reserve(align, reserve);
1011    }
1012
1013    /// Construct an offset aligned for `T` into the current buffer which points
1014    /// to the next location that will be written.
1015    ///
1016    /// This ensures that the alignment of the pointer is a multiple of `align`
1017    /// and that the current buffer has the capacity for store `T`.
1018    ///
1019    /// # Examples
1020    ///
1021    /// ```
1022    /// use musli_zerocopy::{OwnedBuf, Ref};
1023    ///
1024    /// let mut buf = OwnedBuf::new();
1025    ///
1026    /// // Add one byte of padding to throw of any incidental alignment.
1027    /// buf.extend_from_slice(&[1]);
1028    ///
1029    /// let ptr: Ref<u32> = Ref::new(buf.next_offset::<u32>());
1030    /// buf.extend_from_slice(&[1, 2, 3, 4]);
1031    ///
1032    /// // This will succeed because the buffer follows its interior alignment:
1033    /// let buf = buf.as_ref();
1034    ///
1035    /// assert_eq!(*buf.load(ptr)?, u32::from_ne_bytes([1, 2, 3, 4]));
1036    /// # Ok::<_, musli_zerocopy::Error>(())
1037    /// ```
1038    #[inline]
1039    pub fn next_offset<T>(&mut self) -> usize {
1040        // SAFETY: The alignment of `T` is guaranteed to be a power of two. We
1041        // also make sure to reserve space for `T` since it is very likely that
1042        // it will be written immediately after this.
1043        self.next_offset_with_and_reserve(align_of::<T>(), size_of::<T>());
1044        self.len
1045    }
1046
1047    // We never want this call to be inlined, because we take great care to
1048    // ensure that reallocations we perform publicly are performed in a sparse
1049    // way.
1050    #[inline(never)]
1051    fn ensure_capacity(&mut self, new_capacity: usize) {
1052        let new_capacity = new_capacity.max(self.requested);
1053
1054        if self.capacity >= new_capacity {
1055            return;
1056        }
1057
1058        let new_capacity = new_capacity.max((self.capacity as f32 * 1.5) as usize);
1059        let (old_layout, new_layout) = self.layouts(new_capacity);
1060
1061        if old_layout.size() == 0 {
1062            self.alloc_init(new_layout);
1063        } else if new_layout.align() == old_layout.align() {
1064            self.alloc_realloc(old_layout, new_layout);
1065        } else {
1066            self.alloc_new(old_layout, new_layout);
1067        }
1068    }
1069
1070    /// Return a pair of the currently allocated layout, and new layout that is
1071    /// requested with the given capacity.
1072    #[inline]
1073    fn layouts(&self, new_capacity: usize) -> (Layout, Layout) {
1074        // SAFETY: The existing layout cannot be invalid since it's either
1075        // checked as it's replacing the old layout, or is initialized with
1076        // known good values.
1077        let old_layout = unsafe { Layout::from_size_align_unchecked(self.capacity, self.align) };
1078        let layout =
1079            Layout::from_size_align(new_capacity, self.requested).expect("Proposed layout invalid");
1080        (old_layout, layout)
1081    }
1082
1083    /// Perform the initial allocation with the given layout and capacity.
1084    fn alloc_init(&mut self, new_layout: Layout) {
1085        unsafe {
1086            let ptr = alloc::alloc(new_layout);
1087
1088            if ptr.is_null() {
1089                alloc::handle_alloc_error(new_layout);
1090            }
1091
1092            self.data = NonNull::new_unchecked(ptr);
1093            self.capacity = new_layout.size();
1094            self.align = self.requested;
1095        }
1096    }
1097
1098    /// Reallocate, note that the alignment of the old layout must match the new
1099    /// one.
1100    fn alloc_realloc(&mut self, old_layout: Layout, new_layout: Layout) {
1101        debug_assert_eq!(old_layout.align(), new_layout.align());
1102
1103        unsafe {
1104            let ptr = alloc::realloc(self.as_mut_ptr(), old_layout, new_layout.size());
1105
1106            if ptr.is_null() {
1107                alloc::handle_alloc_error(old_layout);
1108            }
1109
1110            // NB: We may simply forget the old allocation, since `realloc` is
1111            // responsible for freeing it.
1112            self.data = NonNull::new_unchecked(ptr);
1113            self.capacity = new_layout.size();
1114        }
1115    }
1116
1117    /// Perform a new allocation, deallocating the old one in the process.
1118    #[inline(always)]
1119    fn alloc_new(&mut self, old_layout: Layout, new_layout: Layout) {
1120        unsafe {
1121            let ptr = alloc::alloc(new_layout);
1122
1123            if ptr.is_null() {
1124                alloc::handle_alloc_error(new_layout);
1125            }
1126
1127            ptr.copy_from_nonoverlapping(self.as_ptr(), self.len);
1128            alloc::dealloc(self.as_mut_ptr(), old_layout);
1129
1130            // We've deallocated the old pointer.
1131            self.data = NonNull::new_unchecked(ptr);
1132            self.capacity = new_layout.size();
1133            self.align = self.requested;
1134        }
1135    }
1136}
1137
1138/// `OwnedBuf` are `Send` because the data they reference is unaliased.
1139unsafe impl Send for OwnedBuf {}
1140/// `OwnedBuf` are `Sync` since they are `Send` and the data they reference is
1141/// unaliased.
1142unsafe impl Sync for OwnedBuf {}
1143
1144impl<E, O> Deref for OwnedBuf<E, O>
1145where
1146    E: ByteOrder,
1147    O: Size,
1148{
1149    type Target = Buf;
1150
1151    #[inline]
1152    fn deref(&self) -> &Self::Target {
1153        Buf::new(self.as_slice())
1154    }
1155}
1156
1157impl<E, O> DerefMut for OwnedBuf<E, O>
1158where
1159    E: ByteOrder,
1160    O: Size,
1161{
1162    #[inline]
1163    fn deref_mut(&mut self) -> &mut Self::Target {
1164        Buf::new_mut(self.as_mut_slice())
1165    }
1166}
1167
1168impl<E, O> AsRef<Buf> for OwnedBuf<E, O>
1169where
1170    E: ByteOrder,
1171    O: Size,
1172{
1173    /// Trivial `AsRef<Buf>` implementation for `OwnedBuf<O>`.
1174    ///
1175    /// # Examples
1176    ///
1177    /// ```
1178    /// use musli_zerocopy::OwnedBuf;
1179    ///
1180    /// let mut buf = OwnedBuf::new();
1181    /// let slice = buf.store_unsized("hello world");
1182    /// let buf = buf.as_ref();
1183    ///
1184    /// assert_eq!(buf.load(slice)?, "hello world");
1185    /// # Ok::<_, musli_zerocopy::Error>(())
1186    /// ```
1187    #[inline]
1188    fn as_ref(&self) -> &Buf {
1189        self
1190    }
1191}
1192
1193impl<E, O> AsMut<Buf> for OwnedBuf<E, O>
1194where
1195    E: ByteOrder,
1196    O: Size,
1197{
1198    /// Trivial `AsMut<Buf>` implementation for `OwnedBuf<O>`.
1199    ///
1200    /// # Examples
1201    ///
1202    /// ```
1203    /// use musli_zerocopy::OwnedBuf;
1204    ///
1205    /// let mut buf = OwnedBuf::new();
1206    /// let slice = buf.store_unsized("hello world");
1207    /// let buf = buf.as_mut();
1208    ///
1209    /// buf.load_mut(slice)?.make_ascii_uppercase();
1210    /// assert_eq!(buf.load(slice)?, "HELLO WORLD");
1211    /// # Ok::<_, musli_zerocopy::Error>(())
1212    /// ```
1213    #[inline]
1214    fn as_mut(&mut self) -> &mut Buf {
1215        self
1216    }
1217}
1218
1219impl<E, O> Borrow<Buf> for OwnedBuf<E, O>
1220where
1221    E: ByteOrder,
1222    O: Size,
1223{
1224    #[inline]
1225    fn borrow(&self) -> &Buf {
1226        self.as_ref()
1227    }
1228}
1229
1230/// Clone the [`OwnedBuf`].
1231///
1232/// While this causes another allocation, it doesn't ensure that the returned
1233/// buffer has the [`requested()`] alignment. To achieve this prefer using
1234/// [`align_in_place()`].
1235///
1236/// [`requested()`]: Self::requested()
1237/// [`align_in_place()`]: Self::align_in_place
1238///
1239/// # Examples
1240///
1241/// ```
1242/// use std::mem::align_of;
1243///
1244/// use musli_zerocopy::{endian, OwnedBuf};
1245///
1246/// assert_ne!(align_of::<u16>(), align_of::<u32>());
1247///
1248/// let mut buf = OwnedBuf::<endian::Native, u32>::with_capacity_and_alignment::<u16>(32);
1249/// buf.extend_from_slice(&[1, 2, 3, 4]);
1250/// buf.request_align::<u32>();
1251///
1252/// let buf2 = buf.clone();
1253/// assert!(buf2.alignment() >= align_of::<u16>());
1254///
1255/// buf.align_in_place();
1256/// assert!(buf.alignment() >= align_of::<u32>());
1257/// ```
1258impl<E, O> Clone for OwnedBuf<E, O>
1259where
1260    E: ByteOrder,
1261    O: Size,
1262{
1263    fn clone(&self) -> Self {
1264        unsafe {
1265            let mut new = ManuallyDrop::new(Self::with_capacity_and_custom_alignment(
1266                self.len, self.align,
1267            ));
1268            new.as_mut_ptr()
1269                .copy_from_nonoverlapping(self.as_ptr(), self.len);
1270            // Set requested to the same as original.
1271            new.requested = self.requested;
1272            new.len = self.len;
1273            ManuallyDrop::into_inner(new)
1274        }
1275    }
1276}
1277
1278impl<E, O> Drop for OwnedBuf<E, O>
1279where
1280    E: ByteOrder,
1281    O: Size,
1282{
1283    fn drop(&mut self) {
1284        unsafe {
1285            if self.capacity != 0 {
1286                // SAFETY: This is guaranteed to be valid per the construction
1287                // of this type.
1288                let layout = Layout::from_size_align_unchecked(self.capacity, self.align);
1289                alloc::dealloc(self.data.as_ptr(), layout);
1290            }
1291        }
1292    }
1293}
1294
1295const unsafe fn dangling(align: usize) -> NonNull<u8> {
1296    NonNull::new_unchecked(invalid_mut(align))
1297}
1298
1299// Replace with `core::ptr::invalid_mut` once stable.
1300#[allow(clippy::useless_transmute)]
1301const fn invalid_mut<T>(addr: usize) -> *mut T {
1302    // FIXME(strict_provenance_magic): I am magic and should be a compiler
1303    // intrinsic. We use transmute rather than a cast so tools like Miri can
1304    // tell that this is *not* the same as from_exposed_addr. SAFETY: every
1305    // valid integer is also a valid pointer (as long as you don't dereference
1306    // that pointer).
1307    unsafe { core::mem::transmute(addr) }
1308}
1309
1310impl<E, O> StoreBuf for OwnedBuf<E, O>
1311where
1312    E: ByteOrder,
1313    O: Size,
1314{
1315    type ByteOrder = E;
1316    type Size = O;
1317
1318    #[inline]
1319    fn len(&self) -> usize {
1320        OwnedBuf::len(self)
1321    }
1322
1323    #[inline]
1324    fn truncate(&mut self, len: usize) {
1325        if self.len > len {
1326            self.len = len;
1327        }
1328    }
1329
1330    #[inline]
1331    fn store_unsized<T>(&mut self, value: &T) -> Ref<T, Self::ByteOrder, Self::Size>
1332    where
1333        T: ?Sized + UnsizedZeroCopy,
1334    {
1335        OwnedBuf::store_unsized(self, value)
1336    }
1337
1338    #[inline]
1339    fn store<T>(&mut self, value: &T) -> Ref<T, Self::ByteOrder, Self::Size>
1340    where
1341        T: ZeroCopy,
1342    {
1343        OwnedBuf::store(self, value)
1344    }
1345
1346    #[inline]
1347    fn swap<T>(
1348        &mut self,
1349        a: Ref<T, Self::ByteOrder, Self::Size>,
1350        b: Ref<T, Self::ByteOrder, Self::Size>,
1351    ) -> Result<(), Error>
1352    where
1353        T: ZeroCopy,
1354    {
1355        Buf::swap(self, a, b)
1356    }
1357
1358    #[inline]
1359    fn align_in_place(&mut self) {
1360        OwnedBuf::align_in_place(self);
1361    }
1362
1363    #[inline]
1364    fn next_offset<T>(&mut self) -> usize {
1365        OwnedBuf::next_offset::<T>(self)
1366    }
1367
1368    #[inline]
1369    fn next_offset_with_and_reserve(&mut self, align: usize, reserve: usize) {
1370        OwnedBuf::next_offset_with_and_reserve(self, align, reserve)
1371    }
1372
1373    #[inline]
1374    fn fill(&mut self, byte: u8, len: usize) {
1375        OwnedBuf::fill(self, byte, len);
1376    }
1377
1378    #[inline]
1379    fn get<I>(&self, index: I) -> Option<&I::Output>
1380    where
1381        I: SliceIndex<[u8]>,
1382    {
1383        Buf::get(self, index)
1384    }
1385
1386    #[inline]
1387    fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
1388    where
1389        I: SliceIndex<[u8]>,
1390    {
1391        Buf::get_mut(self, index)
1392    }
1393
1394    #[inline]
1395    fn as_buf(&self) -> &Buf {
1396        self
1397    }
1398
1399    #[inline]
1400    fn as_mut_buf(&mut self) -> &mut Buf {
1401        self
1402    }
1403}
1404
1405#[cfg(feature = "std")]
1406#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
1407impl io::Write for OwnedBuf {
1408    #[inline]
1409    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
1410        self.extend_from_slice(buf);
1411        Ok(buf.len())
1412    }
1413
1414    #[inline]
1415    fn flush(&mut self) -> io::Result<()> {
1416        Ok(())
1417    }
1418}