musli_zerocopy/buf/
owned_buf.rs

1use core::alloc::Layout;
2use core::borrow::Borrow;
3use core::marker::PhantomData;
4use core::mem::{ManuallyDrop, align_of, size_of, size_of_val};
5use core::ops::Deref;
6use core::ptr::NonNull;
7use core::slice::{self, SliceIndex};
8
9#[cfg(feature = "std")]
10use std::io;
11
12use alloc::alloc;
13
14use crate::buf::{self, Buf, DefaultAlignment, Padder, StoreBuf};
15use crate::endian::{ByteOrder, Native};
16use crate::error::Error;
17use crate::mem::MaybeUninit;
18use crate::pointer::{DefaultSize, Ref, Size};
19use crate::traits::{UnsizedZeroCopy, ZeroCopy};
20
21/// An allocating buffer with dynamic alignment.
22///
23/// By default this buffer starts out having the same alignment as `usize`,
24/// making it platform specific. But this alignment can grow in demand to the
25/// types being stored in it.
26///
27/// # Examples
28///
29/// ```
30/// use musli_zerocopy::{OwnedBuf, ZeroCopy};
31///
32/// #[derive(ZeroCopy)]
33/// #[repr(C, align(128))]
34/// struct Custom { field: u32 }
35///
36/// let mut buf = OwnedBuf::new();
37/// buf.store(&Custom { field: 10 });
38/// ```
39pub struct OwnedBuf<E = Native, O = DefaultSize>
40where
41    E: ByteOrder,
42    O: Size,
43{
44    data: NonNull<u8>,
45    /// The initialized length of the buffer.
46    len: usize,
47    /// The capacity of the buffer.
48    capacity: usize,
49    /// The requested alignment.
50    requested: usize,
51    /// The current alignment.
52    align: usize,
53    /// Holding onto the current pointer size.
54    _marker: PhantomData<(E, O)>,
55}
56
57impl Default for OwnedBuf {
58    #[inline]
59    fn default() -> Self {
60        Self::new()
61    }
62}
63
64impl OwnedBuf {
65    /// Construct a new empty buffer with the default alignment.
66    ///
67    /// The default alignment is guaranteed to be larger than 0.
68    ///
69    /// # Examples
70    ///
71    /// ```
72    /// use musli_zerocopy::OwnedBuf;
73    ///
74    /// let buf = OwnedBuf::new();
75    /// assert!(buf.is_empty());
76    /// assert!(buf.alignment() > 0);
77    /// assert!(buf.alignment() >= buf.requested());
78    /// ```
79    pub const fn new() -> Self {
80        Self::with_alignment::<DefaultAlignment>()
81    }
82
83    /// Allocate a new buffer with the given capacity and default alignment.
84    ///
85    /// The buffer must allocate for at least the given `capacity`, but might
86    /// allocate more. If the capacity specified is `0` it will not allocate.
87    ///
88    /// # Panics
89    ///
90    /// Panics if the specified capacity and memory layout are illegal, which
91    /// happens if:
92    /// * The alignment is not a power of two.
93    /// * The specified capacity causes the needed memory to overflow
94    ///   `isize::MAX`.
95    ///
96    /// ```should_panic
97    /// use std::mem::align_of;
98    ///
99    /// use musli_zerocopy::{endian, DefaultAlignment, OwnedBuf};
100    ///
101    /// let max = isize::MAX as usize - (align_of::<DefaultAlignment>() - 1);
102    /// OwnedBuf::<endian::Native, u32>::with_capacity(max);
103    /// ```
104    ///
105    /// # Examples
106    ///
107    /// ```
108    /// use musli_zerocopy::OwnedBuf;
109    ///
110    /// let buf = OwnedBuf::with_capacity(6);
111    /// assert!(buf.capacity() >= 6);
112    /// ```
113    pub fn with_capacity(capacity: usize) -> Self {
114        Self::with_capacity_and_alignment::<DefaultAlignment>(capacity)
115    }
116
117    /// Construct a new empty buffer with an alignment matching that of `T`.
118    ///
119    /// # Examples
120    ///
121    /// ```
122    /// use musli_zerocopy::OwnedBuf;
123    ///
124    /// let buf = OwnedBuf::with_alignment::<u64>();
125    /// assert!(buf.is_empty());
126    /// assert!(buf.alignment() >= 8);
127    /// assert_eq!(buf.requested(), 8);
128    /// ```
129    pub const fn with_alignment<T>() -> Self {
130        let align = align_of::<T>();
131
132        Self {
133            // SAFETY: Alignment is asserted through `T`.
134            data: unsafe { dangling(align) },
135            len: 0,
136            capacity: 0,
137            requested: align,
138            align,
139            _marker: PhantomData,
140        }
141    }
142
143    /// Allocate a new buffer with the given `capacity` and an alignment
144    /// matching that of `T`.
145    ///
146    /// The buffer must allocate for at least the given `capacity`, but might
147    /// allocate more. If the capacity specified is `0` it will not allocate.
148    ///
149    /// # Panics
150    ///
151    /// Panics if the specified capacity and memory layout are illegal, which
152    /// happens if:
153    /// * The alignment is not a power of two.
154    /// * The specified capacity causes the needed memory to overflow
155    ///   `isize::MAX`.
156    ///
157    /// ```should_panic
158    /// use musli_zerocopy::OwnedBuf;
159    ///
160    /// let max = isize::MAX as usize - (8 - 1);
161    /// OwnedBuf::with_capacity_and_alignment::<u64>(max);
162    /// ```
163    ///
164    /// # Examples
165    ///
166    /// ```
167    /// use musli_zerocopy::OwnedBuf;
168    ///
169    /// let buf = OwnedBuf::with_capacity_and_alignment::<u16>(6);
170    /// assert!(buf.capacity() >= 6);
171    /// assert!(buf.alignment() >= 2);
172    /// ```
173    pub fn with_capacity_and_alignment<T>(capacity: usize) -> Self {
174        // SAFETY: Alignment of `T` is always a power of two.
175        unsafe { Self::with_capacity_and_custom_alignment(capacity, align_of::<T>()) }
176    }
177}
178
179impl<E, O> OwnedBuf<E, O>
180where
181    E: ByteOrder,
182    O: Size,
183{
184    /// Modify the buffer to utilize the specified pointer size when inserting
185    /// references.
186    ///
187    /// # Examples
188    ///
189    /// ```
190    /// use musli_zerocopy::OwnedBuf;
191    ///
192    /// let mut buf = OwnedBuf::with_capacity(1024)
193    ///     .with_size::<u8>();
194    /// ```
195    #[inline]
196    pub fn with_size<U: Size>(self) -> OwnedBuf<E, U> {
197        let this = ManuallyDrop::new(self);
198
199        OwnedBuf {
200            data: this.data,
201            len: this.len,
202            capacity: this.capacity,
203            requested: this.requested,
204            align: this.align,
205            _marker: PhantomData,
206        }
207    }
208
209    /// Modify the buffer to utilize the specified byte order when inserting
210    /// references.
211    ///
212    /// # Examples
213    ///
214    /// ```
215    /// use musli_zerocopy::{endian, OwnedBuf};
216    ///
217    /// let mut buf = OwnedBuf::with_capacity(1024)
218    ///     .with_byte_order::<endian::Little>();
219    /// ```
220    #[inline]
221    pub fn with_byte_order<U>(self) -> OwnedBuf<U, O>
222    where
223        U: ByteOrder,
224    {
225        let this = ManuallyDrop::new(self);
226
227        OwnedBuf {
228            data: this.data,
229            len: this.len,
230            capacity: this.capacity,
231            requested: this.requested,
232            align: this.align,
233            _marker: PhantomData,
234        }
235    }
236
237    // # Safety
238    //
239    // The specified alignment must be a power of two.
240    pub(crate) unsafe fn with_capacity_and_custom_alignment(capacity: usize, align: usize) -> Self where
241    {
242        if capacity == 0 {
243            return Self {
244                // SAFETY: Alignment is asserted through `T`.
245                data: unsafe { dangling(align) },
246                len: 0,
247                capacity: 0,
248                requested: align,
249                align,
250                _marker: PhantomData,
251            };
252        }
253
254        let layout = Layout::from_size_align(capacity, align).expect("Illegal memory layout");
255
256        unsafe {
257            let data = alloc::alloc(layout);
258
259            if data.is_null() {
260                alloc::handle_alloc_error(layout);
261            }
262
263            Self {
264                data: NonNull::new_unchecked(data),
265                len: 0,
266                capacity,
267                requested: align,
268                align,
269                _marker: PhantomData,
270            }
271        }
272    }
273
274    /// Get the current length of the buffer.
275    ///
276    /// # Examples
277    ///
278    /// ```
279    /// use musli_zerocopy::OwnedBuf;
280    ///
281    /// let buf = OwnedBuf::new();
282    /// assert_eq!(buf.len(), 0);
283    /// ```
284    #[inline]
285    pub fn len(&self) -> usize {
286        self.len
287    }
288
289    /// Clear the current buffer.
290    ///
291    /// This won't cause any reallocations.
292    ///
293    /// # Examples
294    ///
295    /// ```
296    /// use musli_zerocopy::OwnedBuf;
297    ///
298    /// let mut buf = OwnedBuf::new();
299    /// assert_eq!(buf.capacity(), 0);
300    /// buf.extend_from_slice(&[1, 2, 3, 4]);
301    ///
302    /// assert_eq!(buf.len(), 4);
303    /// buf.clear();
304    /// assert!(buf.capacity() > 0);
305    /// assert_eq!(buf.len(), 0);
306    /// ```
307    #[inline]
308    pub fn clear(&mut self) {
309        self.len = 0;
310    }
311
312    /// Test if the buffer is empty.
313    ///
314    /// # Examples
315    ///
316    /// ```
317    /// use musli_zerocopy::OwnedBuf;
318    ///
319    /// let buf = OwnedBuf::new();
320    /// assert!(buf.is_empty());
321    /// ```
322    #[inline]
323    pub fn is_empty(&self) -> bool {
324        self.len == 0
325    }
326
327    /// Get the current capacity of the buffer.
328    ///
329    /// # Examples
330    ///
331    /// ```
332    /// use musli_zerocopy::OwnedBuf;
333    ///
334    /// let buf = OwnedBuf::new();
335    /// assert_eq!(buf.capacity(), 0);
336    /// ```
337    #[inline]
338    pub fn capacity(&self) -> usize {
339        self.capacity
340    }
341
342    /// Return the requested alignment of the buffer.
343    ///
344    /// # Examples
345    ///
346    /// ```
347    /// use musli_zerocopy::OwnedBuf;
348    ///
349    /// let buf = OwnedBuf::with_alignment::<u64>();
350    /// assert!(buf.is_empty());
351    /// assert!(buf.alignment() >= 8);
352    /// assert_eq!(buf.requested(), 8);
353    /// ```
354    #[inline]
355    pub fn requested(&self) -> usize {
356        self.requested
357    }
358
359    /// Reserve capacity for at least `capacity` more bytes in this buffer.
360    ///
361    /// # Examples
362    ///
363    /// ```
364    /// use musli_zerocopy::OwnedBuf;
365    ///
366    /// let mut buf = OwnedBuf::new();
367    /// assert_eq!(buf.capacity(), 0);
368    ///
369    /// buf.reserve(10);
370    /// assert!(buf.capacity() >= 10);
371    /// ```
372    #[inline]
373    pub fn reserve(&mut self, capacity: usize) {
374        let new_capacity = self.len + capacity;
375        self.ensure_capacity(new_capacity);
376    }
377
378    /// Advance the length of the owned buffer by `size`.
379    ///
380    /// # Safety
381    ///
382    /// The caller must ensure that bytes up until `len() + size` has been
383    /// initialized in this buffer.
384    #[inline]
385    pub unsafe fn advance(&mut self, size: usize) {
386        self.len += size;
387    }
388
389    /// Get get a raw pointer to the current buffer.
390    #[inline]
391    pub(crate) fn as_ptr(&self) -> *const u8 {
392        self.data.as_ptr() as *const _
393    }
394
395    /// Get get a raw mutable pointer to the current buffer.
396    #[inline]
397    pub(crate) fn as_mut_ptr(&mut self) -> *mut u8 {
398        self.data.as_ptr()
399    }
400
401    /// Get get a raw mutable pointer to the current buffer.
402    #[inline]
403    #[cfg(test)]
404    pub(crate) fn as_nonnull(&mut self) -> NonNull<u8> {
405        self.data
406    }
407
408    /// Extract a slice containing the entire buffer.
409    ///
410    /// # Examples
411    ///
412    /// ```
413    /// use musli_zerocopy::OwnedBuf;
414    ///
415    /// let mut buf = OwnedBuf::new();
416    /// buf.extend_from_slice(b"hello world");
417    /// assert_eq!(buf.as_slice(), b"hello world");
418    /// ```
419    #[inline]
420    pub fn as_slice(&self) -> &[u8] {
421        unsafe { slice::from_raw_parts(self.as_ptr(), self.len()) }
422    }
423
424    /// Extract a mutable slice containing the entire buffer.
425    ///
426    /// # Examples
427    ///
428    /// ```
429    /// use musli_zerocopy::OwnedBuf;
430    ///
431    /// let mut buf = OwnedBuf::new();
432    /// buf.extend_from_slice(b"hello world");
433    /// buf.as_mut_slice().make_ascii_uppercase();
434    /// assert_eq!(buf.as_slice(), b"HELLO WORLD");
435    /// ```
436    #[inline]
437    pub fn as_mut_slice(&mut self) -> &mut [u8] {
438        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len()) }
439    }
440
441    /// Access the buffer mutably.
442    ///
443    /// # Examples
444    ///
445    /// ```
446    /// use musli_zerocopy::OwnedBuf;
447    ///
448    /// let mut buf = OwnedBuf::new();
449    /// let slice = buf.store_unsized("hello world");
450    ///
451    /// // SAFETY: We don't manipulate the underlying buffer in a way which leaves uninitialized data.
452    /// let buf = unsafe { buf.as_mut_buf() };
453    ///
454    /// buf.load_mut(slice)?.make_ascii_uppercase();
455    /// assert_eq!(buf.load(slice)?, "HELLO WORLD");
456    /// # Ok::<_, musli_zerocopy::Error>(())
457    /// ```
458    ///
459    /// # Safety
460    ///
461    /// Since this allows the underlying buffer to be mutated, depending on how
462    /// the buffer is used it might result in undefined bit-patterns like
463    /// padding bytes being written to it. The caller must ensure this is not
464    /// done with the structures being written by for example calling
465    /// [`ZeroCopy::initialize_padding()`] after the contents of the buffer is
466    /// modified.
467    ///
468    /// See [`Buf::new_mut`] for more information.
469    #[inline]
470    pub unsafe fn as_mut_buf(&mut self) -> &mut Buf {
471        unsafe { Buf::new_mut(self.as_mut_slice()) }
472    }
473
474    /// Store an uninitialized value.
475    ///
476    /// This allows values to be inserted before they can be initialized, which
477    /// can be useful if you need them to be in a certain location in the buffer
478    /// but don't have access to their value yet.
479    ///
480    /// The memory for `T` will be zero-initialized at [`next_offset<T>()`] and
481    /// the length and alignment requirement of `OwnedBuf` updated to reflect
482    /// that an instance of `T` has been stored. But that representation might
483    /// not match the representation of `T`[^non-zero].
484    ///
485    /// To get the offset where the value will be written, call
486    /// [`next_offset<T>()`] before storing the value.
487    ///
488    /// > **Note:** this does not return [`std::mem::MaybeUninit`], instead we
489    /// > use an internal [`MaybeUninit`] which is similar but has different
490    /// > properties. See [its documentation][MaybeUninit] for more.
491    ///
492    /// [`next_offset<T>()`]: Self::next_offset()
493    /// [^non-zero]: Like with [`NonZero*`][core::num] types.
494    ///
495    /// # Examples
496    ///
497    /// ```
498    /// use musli_zerocopy::mem::MaybeUninit;
499    /// use musli_zerocopy::{OwnedBuf, Ref, ZeroCopy};
500    ///
501    /// #[derive(ZeroCopy)]
502    /// #[repr(C)]
503    /// struct Custom { field: u32, string: Ref<str> }
504    ///
505    /// let mut buf = OwnedBuf::new();
506    /// let reference: Ref<MaybeUninit<Custom>> = buf.store_uninit::<Custom>();
507    ///
508    /// let string = buf.store_unsized("Hello World!");
509    ///
510    /// buf.load_uninit_mut(reference).write(&Custom { field: 42, string });
511    ///
512    /// let reference = reference.assume_init();
513    /// assert_eq!(reference.offset(), 0);
514    /// # Ok::<_, musli_zerocopy::Error>(())
515    /// ```
516    #[inline]
517    pub fn store_uninit<T>(&mut self) -> Ref<MaybeUninit<T>, E, O>
518    where
519        T: ZeroCopy,
520    {
521        // SAFETY: We've just reserved capacity for this write.
522        unsafe {
523            self.next_offset_with_and_reserve(align_of::<T>(), size_of::<T>());
524            let offset = self.len;
525            self.data
526                .as_ptr()
527                .add(self.len)
528                .write_bytes(0, size_of::<T>());
529            self.len += size_of::<T>();
530            Ref::new(offset)
531        }
532    }
533
534    /// Write a reference that might not have been initialized.
535    ///
536    /// This does not prevent [`Ref`] from different instances of [`OwnedBuf`]
537    /// from being written. It would only result in garbled data, but wouldn't
538    /// be a safety concern.
539    ///
540    /// > **Note:** this does not return [`std::mem::MaybeUninit`], instead we
541    /// > use an internal [`MaybeUninit`] which is similar but has different
542    /// > properties. See [its documentation][MaybeUninit] for more.
543    ///
544    /// # Panics
545    ///
546    /// Panics if the reference [`Ref::offset()`] and size of `T` does not fit
547    /// within the [`len()`] of the current structure. This might happen if you
548    /// try and use a reference constructed from a different [`OwnedBuf`]
549    /// instance.
550    ///
551    /// [`len()`]: Self::len()
552    ///
553    /// ```should_panic
554    /// use musli_zerocopy::OwnedBuf;
555    ///
556    /// let mut buf1 = OwnedBuf::new();
557    /// buf1.store(&1u32);
558    ///
559    /// let mut buf2 = OwnedBuf::new();
560    /// buf2.store(&10u32);
561    ///
562    /// let number = buf2.store_uninit::<u32>();
563    ///
564    /// buf1.load_uninit_mut(number);
565    /// ```
566    ///
567    /// # Examples
568    ///
569    /// ```
570    /// use musli_zerocopy::{OwnedBuf, Ref, ZeroCopy};
571    /// use musli_zerocopy::mem::MaybeUninit;
572    ///
573    /// #[derive(ZeroCopy)]
574    /// #[repr(C)]
575    /// struct Custom { field: u32, string: Ref<str> }
576    ///
577    /// let mut buf = OwnedBuf::new();
578    /// let reference: Ref<MaybeUninit<Custom>> = buf.store_uninit::<Custom>();
579    ///
580    /// let string = buf.store_unsized("Hello World!");
581    ///
582    /// buf.load_uninit_mut(reference).write(&Custom { field: 42, string });
583    ///
584    /// let reference = reference.assume_init();
585    /// assert_eq!(reference.offset(), 0);
586    /// # Ok::<_, musli_zerocopy::Error>(())
587    /// ```
588    #[inline]
589    pub fn load_uninit_mut<T, U, I>(
590        &mut self,
591        reference: Ref<MaybeUninit<T>, U, I>,
592    ) -> &mut MaybeUninit<T>
593    where
594        T: ZeroCopy,
595        U: ByteOrder,
596        I: Size,
597    {
598        let at = reference.offset();
599
600        // Note: We only need this as debug assertion, because `MaybeUninit<T>`
601        // does not implement `ZeroCopy`, so there is no way to construct.
602        assert!(at + size_of::<T>() <= self.len, "Length overflow");
603
604        // SAFETY: `MaybeUninit<T>` has no representation requirements and is
605        // unaligned.
606        unsafe { &mut *(self.data.as_ptr().add(at) as *mut MaybeUninit<T>) }
607    }
608
609    /// Insert a value with the given size.
610    ///
611    /// The memory for `T` will be initialized at [`next_offset<T>()`] and the
612    /// length and alignment requirement of `OwnedBuf` updated to reflect that
613    /// an instance of `T` has been stored.
614    ///
615    /// To get the offset where the value will be written, call
616    /// [`next_offset<T>()`] before storing the value or access the offset
617    /// through the [`Ref::offset`] being returned.
618    ///
619    /// [`next_offset<T>()`]: Self::next_offset
620    ///
621    /// # Examples
622    ///
623    /// ```
624    /// use musli_zerocopy::{OwnedBuf, Ref, ZeroCopy};
625    ///
626    /// #[derive(ZeroCopy)]
627    /// #[repr(C)]
628    /// struct Custom { field: u32, string: Ref<str> }
629    ///
630    /// let mut buf = OwnedBuf::new();
631    ///
632    /// let string = buf.store_unsized("string");
633    /// let custom = buf.store(&Custom { field: 1, string });
634    /// let custom2 = buf.store(&Custom { field: 2, string });
635    ///
636    /// let custom = buf.load(custom)?;
637    /// assert_eq!(custom.field, 1);
638    /// assert_eq!(buf.load(custom.string)?, "string");
639    ///
640    /// let custom2 = buf.load(custom2)?;
641    /// assert_eq!(custom2.field, 2);
642    /// assert_eq!(buf.load(custom2.string)?, "string");
643    /// # Ok::<_, musli_zerocopy::Error>(())
644    /// ```
645    ///
646    /// Storing an array:
647    ///
648    ///
649    /// ```
650    /// use musli_zerocopy::{ZeroCopy, OwnedBuf};
651    ///
652    /// // Element with padding.
653    /// #[derive(Debug, PartialEq, ZeroCopy)]
654    /// #[repr(C)]
655    /// struct Element {
656    ///     first: u8,
657    ///     second: u32,
658    /// }
659    ///
660    /// let values = [
661    ///     Element { first: 0x01, second: 0x01020304u32 },
662    ///     Element { first: 0x02, second: 0x01020304u32 }
663    /// ];
664    ///
665    /// let mut buf = OwnedBuf::new();
666    /// let array = buf.store(&values);
667    /// assert_eq!(buf.load(array)?, &values);
668    /// # Ok::<_, musli_zerocopy::Error>(())
669    /// ```
670    #[inline]
671    pub fn store<T>(&mut self, value: &T) -> Ref<T, E, O>
672    where
673        T: ZeroCopy,
674    {
675        self.next_offset_with_and_reserve(align_of::<T>(), size_of::<T>());
676
677        // SAFETY: We're ensuring to both align the internal buffer and store
678        // the value.
679        unsafe { self.store_unchecked(value) }
680    }
681
682    /// Insert a value with the given size without ensuring that the buffer has
683    /// the reserved capacity for to or is properly aligned.
684    ///
685    /// This is a low level API which is tricky to use correctly. The
686    /// recommended way to use this is through [`OwnedBuf::store`].
687    ///
688    /// [`OwnedBuf::store`]: Self::store
689    ///
690    /// # Safety
691    ///
692    /// The caller has to ensure that the buffer has the required capacity for
693    /// `&T` and is properly aligned. This can easily be accomplished by calling
694    /// [`request_align::<T>()`] followed by [`align_in_place()`] before this
695    /// function. A safe variant of this function is [`OwnedBuf::store`].
696    ///
697    /// [`align_in_place()`]: Self::align_in_place
698    /// [`OwnedBuf::store`]: Self::store
699    /// [`request_align::<T>()`]: Self::request_align
700    ///
701    /// # Examples
702    ///
703    /// ```
704    /// use std::mem::size_of;
705    ///
706    /// use musli_zerocopy::{OwnedBuf, Ref, ZeroCopy};
707    ///
708    /// #[derive(ZeroCopy)]
709    /// #[repr(C, align(4096))]
710    /// struct Custom { field: u32, string: Ref<str> }
711    ///
712    /// let mut buf = OwnedBuf::new();
713    ///
714    /// let string = buf.store_unsized("string");
715    ///
716    /// buf.request_align::<Custom>();
717    /// buf.reserve(2 * size_of::<Custom>());
718    /// buf.align_in_place();
719    ///
720    /// // SAFETY: We've ensure that the buffer is internally aligned and sized just above.
721    /// let custom = unsafe { buf.store_unchecked(&Custom { field: 1, string }) };
722    /// let custom2 = unsafe { buf.store_unchecked(&Custom { field: 2, string }) };
723    ///
724    /// let custom = buf.load(custom)?;
725    /// assert_eq!(custom.field, 1);
726    /// assert_eq!(buf.load(custom.string)?, "string");
727    ///
728    /// let custom2 = buf.load(custom2)?;
729    /// assert_eq!(custom2.field, 2);
730    /// assert_eq!(buf.load(custom2.string)?, "string");
731    /// # Ok::<_, musli_zerocopy::Error>(())
732    /// ```
733    #[inline]
734    pub unsafe fn store_unchecked<T>(&mut self, value: &T) -> Ref<T, E, O>
735    where
736        T: ZeroCopy,
737    {
738        let offset = self.len;
739
740        unsafe {
741            let ptr = NonNull::new_unchecked(self.data.as_ptr().add(offset));
742            buf::store_unaligned(ptr, value);
743            self.len += size_of::<T>();
744        }
745
746        Ref::new(offset)
747    }
748
749    /// Write a value to the buffer.
750    ///
751    /// ```
752    /// use musli_zerocopy::OwnedBuf;
753    ///
754    /// let mut buf = OwnedBuf::new();
755    ///
756    /// let first = buf.store_unsized("first");
757    /// let second = buf.store_unsized("second");
758    ///
759    /// dbg!(first, second);
760    ///
761    /// assert_eq!(buf.load(first)?, "first");
762    /// assert_eq!(buf.load(second)?, "second");
763    /// # Ok::<_, musli_zerocopy::Error>(())
764    /// ```
765    #[inline]
766    pub fn store_unsized<T>(&mut self, value: &T) -> Ref<T, E, O>
767    where
768        T: ?Sized + UnsizedZeroCopy,
769    {
770        unsafe {
771            let size = size_of_val(value);
772            self.next_offset_with_and_reserve(T::ALIGN, size);
773            let offset = self.len;
774            let ptr = NonNull::new_unchecked(self.data.as_ptr().add(offset));
775            ptr.as_ptr().copy_from_nonoverlapping(value.as_ptr(), size);
776
777            if T::PADDED {
778                let mut padder = Padder::new(ptr);
779                value.pad(&mut padder);
780                padder.remaining_unsized(value);
781            }
782
783            self.len += size;
784            Ref::with_metadata(offset, value.metadata())
785        }
786    }
787
788    /// Insert a slice into the buffer.
789    ///
790    /// # Examples
791    ///
792    /// ```
793    /// use musli_zerocopy::OwnedBuf;
794    ///
795    /// let mut buf = OwnedBuf::new();
796    ///
797    /// let mut values = Vec::new();
798    ///
799    /// values.push(buf.store_unsized("first"));
800    /// values.push(buf.store_unsized("second"));
801    ///
802    /// let slice_ref = buf.store_slice(&values);
803    ///
804    /// let slice = buf.load(slice_ref)?;
805    ///
806    /// let mut strings = Vec::new();
807    ///
808    /// for value in slice {
809    ///     strings.push(buf.load(*value)?);
810    /// }
811    ///
812    /// assert_eq!(&strings, &["first", "second"][..]);
813    /// # Ok::<_, musli_zerocopy::Error>(())
814    /// ```
815    #[inline(always)]
816    pub fn store_slice<T>(&mut self, values: &[T]) -> Ref<[T], E, O>
817    where
818        T: ZeroCopy,
819    {
820        self.store_unsized(values)
821    }
822
823    /// Extend the buffer from a slice.
824    ///
825    /// Note that this only extends the underlying buffer but does not ensure
826    /// that any required alignment is abided by.
827    ///
828    /// To do this, the caller must call [`request_align()`] with the appropriate
829    /// alignment, otherwise the necessary alignment to decode the buffer again
830    /// will be lost.
831    ///
832    /// [`request_align()`]: Self::request_align
833    ///
834    /// # Errors
835    ///
836    /// This is a raw API, and does not guarantee that any given alignment will
837    /// be respected. The following exemplifies incorrect use since the u32 type
838    /// required a 4-byte alignment:
839    ///
840    /// ```
841    /// use musli_zerocopy::{OwnedBuf, Ref};
842    ///
843    /// let mut buf = OwnedBuf::with_alignment::<u32>();
844    ///
845    /// // Add one byte of padding to throw of any incidental alignment.
846    /// buf.extend_from_slice(&[1]);
847    ///
848    /// let ptr: Ref<u32> = Ref::new(buf.next_offset::<u8>());
849    /// buf.extend_from_slice(&[1, 2, 3, 4]);
850    ///
851    /// // This will succeed because the buffer follows its interior alignment:
852    /// let buf = buf.as_ref();
853    ///
854    /// // This will fail, because the buffer is not aligned.
855    /// assert!(buf.load(ptr).is_err());
856    /// # Ok::<_, musli_zerocopy::Error>(())
857    /// ```
858    ///
859    /// # Examples
860    ///
861    /// ```
862    /// use musli_zerocopy::{OwnedBuf, Ref};
863    ///
864    /// let mut buf = OwnedBuf::with_alignment::<()>();
865    ///
866    /// // Add one byte of padding to throw of any incidental alignment.
867    /// buf.extend_from_slice(&[1]);
868    ///
869    /// let ptr: Ref<u32> = Ref::new(buf.next_offset::<u32>());
870    /// buf.extend_from_slice(&[1, 2, 3, 4]);
871    ///
872    /// // This will succeed because the buffer follows its interior alignment:
873    /// let buf = buf.as_ref();
874    ///
875    /// assert_eq!(*buf.load(ptr)?, u32::from_ne_bytes([1, 2, 3, 4]));
876    /// # Ok::<_, musli_zerocopy::Error>(())
877    /// ```
878    pub fn extend_from_slice(&mut self, bytes: &[u8]) {
879        self.reserve(bytes.len());
880
881        // SAFETY: We just allocated space for the slice.
882        unsafe {
883            self.store_bytes(bytes);
884        }
885    }
886
887    /// Fill and initialize the buffer with `byte` up to `len`.
888    pub(crate) fn fill(&mut self, byte: u8, len: usize) {
889        self.reserve(len);
890
891        unsafe {
892            let ptr = self.data.as_ptr().add(self.len);
893            ptr.write_bytes(byte, len);
894            self.len += len;
895        }
896    }
897
898    /// Store the slice without allocating.
899    ///
900    /// # Safety
901    ///
902    /// The caller must ensure that the buffer has the capacity for
903    /// `bytes.len()` and that the value being stored is not padded as per
904    /// `ZeroCopy::PADDED`.
905    #[inline]
906    pub(crate) unsafe fn store_bytes<T>(&mut self, values: &[T])
907    where
908        T: ZeroCopy,
909    {
910        unsafe {
911            let dst = self.as_mut_ptr().add(self.len);
912            dst.copy_from_nonoverlapping(values.as_ptr().cast(), size_of_val(values));
913            self.len += size_of_val(values);
914        }
915    }
916
917    /// Align a buffer in place if necessary.
918    ///
919    /// If [`requested()`] does not equal [`alignment()`] this will cause the buffer
920    /// to be reallocated before it is returned.
921    ///
922    /// [`requested()`]: Self::requested
923    /// [`alignment()`]: Buf::alignment
924    /// [`as_ref`]: Self::as_ref
925    ///
926    /// # Examples
927    ///
928    /// A buffer has to be a aligned in order for `load` calls to succeed
929    /// without errors.
930    ///
931    /// ```
932    /// use musli_zerocopy::OwnedBuf;
933    ///
934    /// let mut buf = OwnedBuf::with_alignment::<()>();
935    /// let number = buf.store(&1u32);
936    ///
937    /// buf.align_in_place();
938    ///
939    /// assert_eq!(buf.load(number)?, &1u32);
940    /// # Ok::<_, musli_zerocopy::Error>(())
941    /// ```
942    ///
943    /// Example using a mutable buffer. A buffer has to be a aligned in order
944    /// for `load` and `load_mut` calls to succeed without errors.
945    ///
946    /// ```
947    /// use musli_zerocopy::OwnedBuf;
948    ///
949    /// let mut buf = OwnedBuf::with_alignment::<()>();
950    /// let number = buf.store(&1u32);
951    ///
952    /// buf.align_in_place();
953    ///
954    /// // SAFETY: We're not writing data in a way which leaves uninitialized regions.
955    /// unsafe {
956    ///     *buf.as_mut_buf().load_mut(number)? += 1;
957    /// }
958    /// assert_eq!(buf.load(number)?, &2u32);
959    /// # Ok::<_, musli_zerocopy::Error>(())
960    /// ```
961    #[inline]
962    pub fn align_in_place(&mut self) {
963        // SAFETY: self.requested is guaranteed to be a power of two.
964        if !buf::is_aligned_with(self.as_ptr(), self.requested) {
965            let (old_layout, new_layout) = self.layouts(self.capacity);
966            self.alloc_new(old_layout, new_layout);
967        }
968    }
969
970    /// Request that the current buffer should have at least the specified
971    /// alignment and zero-initialize the buffer up to the next position which
972    /// matches the given alignment.
973    ///
974    /// Note that this does not guarantee that the internal buffer is aligned
975    /// in-memory, to ensure this you can use [`align_in_place()`].
976    ///
977    /// ```
978    /// use musli_zerocopy::OwnedBuf;
979    /// let mut buf = OwnedBuf::new();
980    ///
981    /// buf.extend_from_slice(&[1, 2]);
982    /// buf.request_align::<u32>();
983    ///
984    /// assert_eq!(buf.as_slice(), &[1, 2, 0, 0]);
985    /// ```
986    ///
987    /// Calling this function only causes the underlying buffer to be realigned
988    /// if a reallocation is triggered due to reaching its [`capacity()`].
989    ///
990    /// ```
991    /// use musli_zerocopy::{endian, OwnedBuf};
992    /// let mut buf = OwnedBuf::<endian::Native, u32>::with_capacity_and_alignment::<u16>(32);
993    ///
994    /// buf.extend_from_slice(&[1, 2]);
995    /// assert!(buf.alignment() >= 2);
996    /// buf.request_align::<u32>();
997    ///
998    /// assert_eq!(buf.requested(), 4);
999    /// assert!(buf.alignment() >= 2);
1000    ///
1001    /// buf.extend_from_slice(&[0; 32]);
1002    /// assert_eq!(buf.requested(), 4);
1003    /// assert!(buf.alignment() >= 4);
1004    /// ```
1005    ///
1006    /// [`capacity()`]: Self::capacity
1007    /// [`align_in_place()`]: Self::align_in_place
1008    ///
1009    /// # Safety
1010    ///
1011    /// The caller must guarantee that the alignment is a power of two.
1012    ///
1013    /// # Examples
1014    ///
1015    /// ```
1016    /// use musli_zerocopy::OwnedBuf;
1017    ///
1018    /// let mut buf = OwnedBuf::new();
1019    /// buf.extend_from_slice(&[1, 2, 3, 4]);
1020    /// buf.request_align::<u64>();
1021    /// buf.extend_from_slice(&[5, 6, 7, 8]);
1022    ///
1023    /// assert_eq!(buf.as_slice(), &[1, 2, 3, 4, 0, 0, 0, 0, 5, 6, 7, 8]);
1024    /// ```
1025    #[inline]
1026    pub fn request_align<T>(&mut self)
1027    where
1028        T: ZeroCopy,
1029    {
1030        self.requested = self.requested.max(align_of::<T>());
1031        self.ensure_aligned_and_reserve(align_of::<T>(), size_of::<T>());
1032    }
1033
1034    /// Ensure that the current buffer is aligned under the assumption that it needs to be allocated.
1035    #[inline]
1036    fn ensure_aligned_and_reserve(&mut self, align: usize, reserve: usize) {
1037        let extra = buf::padding_to(self.len, align);
1038        self.reserve(extra + reserve);
1039
1040        // SAFETY: The length is ensures to be within the address space.
1041        unsafe {
1042            self.data.as_ptr().add(self.len).write_bytes(0, extra);
1043            self.len += extra;
1044        }
1045    }
1046
1047    /// Construct a pointer aligned for `align` into the current buffer which
1048    /// points to the next location that will be written.
1049    #[inline]
1050    pub(crate) fn next_offset_with_and_reserve(&mut self, align: usize, reserve: usize) {
1051        self.requested = self.requested.max(align);
1052        self.ensure_aligned_and_reserve(align, reserve);
1053    }
1054
1055    /// Construct an offset aligned for `T` into the current buffer which points
1056    /// to the next location that will be written.
1057    ///
1058    /// This ensures that the alignment of the pointer is a multiple of `align`
1059    /// and that the current buffer has the capacity for store `T`.
1060    ///
1061    /// # Examples
1062    ///
1063    /// ```
1064    /// use musli_zerocopy::{OwnedBuf, Ref};
1065    ///
1066    /// let mut buf = OwnedBuf::new();
1067    ///
1068    /// // Add one byte of padding to throw of any incidental alignment.
1069    /// buf.extend_from_slice(&[1]);
1070    ///
1071    /// let ptr: Ref<u32> = Ref::new(buf.next_offset::<u32>());
1072    /// buf.extend_from_slice(&[1, 2, 3, 4]);
1073    ///
1074    /// // This will succeed because the buffer follows its interior alignment:
1075    /// let buf = buf.as_ref();
1076    ///
1077    /// assert_eq!(*buf.load(ptr)?, u32::from_ne_bytes([1, 2, 3, 4]));
1078    /// # Ok::<_, musli_zerocopy::Error>(())
1079    /// ```
1080    #[inline]
1081    pub fn next_offset<T>(&mut self) -> usize {
1082        // SAFETY: The alignment of `T` is guaranteed to be a power of two. We
1083        // also make sure to reserve space for `T` since it is very likely that
1084        // it will be written immediately after this.
1085        self.next_offset_with_and_reserve(align_of::<T>(), size_of::<T>());
1086        self.len
1087    }
1088
1089    // We never want this call to be inlined, because we take great care to
1090    // ensure that reallocations we perform publicly are performed in a sparse
1091    // way.
1092    #[inline(never)]
1093    fn ensure_capacity(&mut self, new_capacity: usize) {
1094        let new_capacity = new_capacity.max(self.requested);
1095
1096        if self.capacity >= new_capacity {
1097            return;
1098        }
1099
1100        let new_capacity = new_capacity.max((self.capacity as f32 * 1.5) as usize);
1101        let (old_layout, new_layout) = self.layouts(new_capacity);
1102
1103        if old_layout.size() == 0 {
1104            self.alloc_init(new_layout);
1105        } else if new_layout.align() == old_layout.align() {
1106            self.alloc_realloc(old_layout, new_layout);
1107        } else {
1108            self.alloc_new(old_layout, new_layout);
1109        }
1110    }
1111
1112    /// Return a pair of the currently allocated layout, and new layout that is
1113    /// requested with the given capacity.
1114    #[inline]
1115    fn layouts(&self, new_capacity: usize) -> (Layout, Layout) {
1116        // SAFETY: The existing layout cannot be invalid since it's either
1117        // checked as it's replacing the old layout, or is initialized with
1118        // known good values.
1119        let old_layout = unsafe { Layout::from_size_align_unchecked(self.capacity, self.align) };
1120        let layout =
1121            Layout::from_size_align(new_capacity, self.requested).expect("Proposed layout invalid");
1122        (old_layout, layout)
1123    }
1124
1125    /// Perform the initial allocation with the given layout and capacity.
1126    fn alloc_init(&mut self, new_layout: Layout) {
1127        unsafe {
1128            let ptr = alloc::alloc(new_layout);
1129
1130            if ptr.is_null() {
1131                alloc::handle_alloc_error(new_layout);
1132            }
1133
1134            self.data = NonNull::new_unchecked(ptr);
1135            self.capacity = new_layout.size();
1136            self.align = self.requested;
1137        }
1138    }
1139
1140    /// Reallocate, note that the alignment of the old layout must match the new
1141    /// one.
1142    fn alloc_realloc(&mut self, old_layout: Layout, new_layout: Layout) {
1143        debug_assert_eq!(old_layout.align(), new_layout.align());
1144
1145        unsafe {
1146            let ptr = alloc::realloc(self.as_mut_ptr(), old_layout, new_layout.size());
1147
1148            if ptr.is_null() {
1149                alloc::handle_alloc_error(old_layout);
1150            }
1151
1152            // NB: We may simply forget the old allocation, since `realloc` is
1153            // responsible for freeing it.
1154            self.data = NonNull::new_unchecked(ptr);
1155            self.capacity = new_layout.size();
1156        }
1157    }
1158
1159    /// Perform a new allocation, deallocating the old one in the process.
1160    #[inline(always)]
1161    fn alloc_new(&mut self, old_layout: Layout, new_layout: Layout) {
1162        unsafe {
1163            let ptr = alloc::alloc(new_layout);
1164
1165            if ptr.is_null() {
1166                alloc::handle_alloc_error(new_layout);
1167            }
1168
1169            ptr.copy_from_nonoverlapping(self.as_ptr(), self.len);
1170            alloc::dealloc(self.as_mut_ptr(), old_layout);
1171
1172            // We've deallocated the old pointer.
1173            self.data = NonNull::new_unchecked(ptr);
1174            self.capacity = new_layout.size();
1175            self.align = self.requested;
1176        }
1177    }
1178}
1179
1180/// `OwnedBuf` are `Send` because the data they reference is unaliased.
1181unsafe impl Send for OwnedBuf {}
1182/// `OwnedBuf` are `Sync` since they are `Send` and the data they reference is
1183/// unaliased.
1184unsafe impl Sync for OwnedBuf {}
1185
1186impl<E, O> Deref for OwnedBuf<E, O>
1187where
1188    E: ByteOrder,
1189    O: Size,
1190{
1191    type Target = Buf;
1192
1193    #[inline]
1194    fn deref(&self) -> &Self::Target {
1195        Buf::new(self.as_slice())
1196    }
1197}
1198
1199impl<E, O> AsRef<Buf> for OwnedBuf<E, O>
1200where
1201    E: ByteOrder,
1202    O: Size,
1203{
1204    /// Trivial `AsRef<Buf>` implementation for `OwnedBuf<O>`.
1205    ///
1206    /// # Examples
1207    ///
1208    /// ```
1209    /// use musli_zerocopy::OwnedBuf;
1210    ///
1211    /// let mut buf = OwnedBuf::new();
1212    /// let slice = buf.store_unsized("hello world");
1213    /// let buf = buf.as_ref();
1214    ///
1215    /// assert_eq!(buf.load(slice)?, "hello world");
1216    /// # Ok::<_, musli_zerocopy::Error>(())
1217    /// ```
1218    #[inline]
1219    fn as_ref(&self) -> &Buf {
1220        self
1221    }
1222}
1223
1224impl<E, O> Borrow<Buf> for OwnedBuf<E, O>
1225where
1226    E: ByteOrder,
1227    O: Size,
1228{
1229    #[inline]
1230    fn borrow(&self) -> &Buf {
1231        self.as_ref()
1232    }
1233}
1234
1235/// Clone the [`OwnedBuf`].
1236///
1237/// While this causes another allocation, it doesn't ensure that the returned
1238/// buffer has the [`requested()`] alignment. To achieve this prefer using
1239/// [`align_in_place()`].
1240///
1241/// [`requested()`]: Self::requested()
1242/// [`align_in_place()`]: Self::align_in_place
1243///
1244/// # Examples
1245///
1246/// ```
1247/// use std::mem::align_of;
1248///
1249/// use musli_zerocopy::{endian, OwnedBuf};
1250///
1251/// assert_ne!(align_of::<u16>(), align_of::<u32>());
1252///
1253/// let mut buf = OwnedBuf::<endian::Native, u32>::with_capacity_and_alignment::<u16>(32);
1254/// buf.extend_from_slice(&[1, 2, 3, 4]);
1255/// buf.request_align::<u32>();
1256///
1257/// let buf2 = buf.clone();
1258/// assert!(buf2.alignment() >= align_of::<u16>());
1259///
1260/// buf.align_in_place();
1261/// assert!(buf.alignment() >= align_of::<u32>());
1262/// ```
1263impl<E, O> Clone for OwnedBuf<E, O>
1264where
1265    E: ByteOrder,
1266    O: Size,
1267{
1268    fn clone(&self) -> Self {
1269        unsafe {
1270            let mut new = ManuallyDrop::new(Self::with_capacity_and_custom_alignment(
1271                self.len, self.align,
1272            ));
1273            new.as_mut_ptr()
1274                .copy_from_nonoverlapping(self.as_ptr(), self.len);
1275            // Set requested to the same as original.
1276            new.requested = self.requested;
1277            new.len = self.len;
1278            ManuallyDrop::into_inner(new)
1279        }
1280    }
1281}
1282
1283impl<E, O> Drop for OwnedBuf<E, O>
1284where
1285    E: ByteOrder,
1286    O: Size,
1287{
1288    fn drop(&mut self) {
1289        unsafe {
1290            if self.capacity != 0 {
1291                // SAFETY: This is guaranteed to be valid per the construction
1292                // of this type.
1293                let layout = Layout::from_size_align_unchecked(self.capacity, self.align);
1294                alloc::dealloc(self.data.as_ptr(), layout);
1295            }
1296        }
1297    }
1298}
1299
1300const unsafe fn dangling(align: usize) -> NonNull<u8> {
1301    unsafe { NonNull::new_unchecked(invalid_mut(align)) }
1302}
1303
1304// Replace with `core::ptr::invalid_mut` once stable.
1305#[allow(clippy::useless_transmute)]
1306const fn invalid_mut<T>(addr: usize) -> *mut T {
1307    // FIXME(strict_provenance_magic): I am magic and should be a compiler
1308    // intrinsic. We use transmute rather than a cast so tools like Miri can
1309    // tell that this is *not* the same as from_exposed_addr. SAFETY: every
1310    // valid integer is also a valid pointer (as long as you don't dereference
1311    // that pointer).
1312    unsafe { core::mem::transmute(addr) }
1313}
1314
1315impl<E, O> StoreBuf for OwnedBuf<E, O>
1316where
1317    E: ByteOrder,
1318    O: Size,
1319{
1320    type ByteOrder = E;
1321    type Size = O;
1322
1323    #[inline]
1324    fn len(&self) -> usize {
1325        OwnedBuf::len(self)
1326    }
1327
1328    #[inline]
1329    fn truncate(&mut self, len: usize) {
1330        if self.len > len {
1331            self.len = len;
1332        }
1333    }
1334
1335    #[inline]
1336    fn store_unsized<T>(&mut self, value: &T) -> Ref<T, Self::ByteOrder, Self::Size>
1337    where
1338        T: ?Sized + UnsizedZeroCopy,
1339    {
1340        OwnedBuf::store_unsized(self, value)
1341    }
1342
1343    #[inline]
1344    fn store<T>(&mut self, value: &T) -> Ref<T, Self::ByteOrder, Self::Size>
1345    where
1346        T: ZeroCopy,
1347    {
1348        OwnedBuf::store(self, value)
1349    }
1350
1351    #[inline]
1352    fn swap<T>(
1353        &mut self,
1354        a: Ref<T, Self::ByteOrder, Self::Size>,
1355        b: Ref<T, Self::ByteOrder, Self::Size>,
1356    ) -> Result<(), Error>
1357    where
1358        T: ZeroCopy,
1359    {
1360        // SAFETY: Since we are swapping two locations which have the same type
1361        // `T`, it does not affect the initialized state of the buffer.
1362        let buf = unsafe { self.as_mut_buf() };
1363        Buf::swap(buf, a, b)
1364    }
1365
1366    #[inline]
1367    fn align_in_place(&mut self) {
1368        OwnedBuf::align_in_place(self);
1369    }
1370
1371    #[inline]
1372    fn next_offset<T>(&mut self) -> usize {
1373        OwnedBuf::next_offset::<T>(self)
1374    }
1375
1376    #[inline]
1377    fn next_offset_with_and_reserve(&mut self, align: usize, reserve: usize) {
1378        OwnedBuf::next_offset_with_and_reserve(self, align, reserve)
1379    }
1380
1381    #[inline]
1382    fn fill(&mut self, byte: u8, len: usize) {
1383        OwnedBuf::fill(self, byte, len);
1384    }
1385
1386    #[inline]
1387    fn get<I>(&self, index: I) -> Option<&I::Output>
1388    where
1389        I: SliceIndex<[u8]>,
1390    {
1391        Buf::get(self, index)
1392    }
1393
1394    #[inline]
1395    unsafe fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
1396    where
1397        I: SliceIndex<[u8]>,
1398    {
1399        unsafe { OwnedBuf::as_mut_buf(self).get_mut(index) }
1400    }
1401
1402    #[inline]
1403    fn as_buf(&self) -> &Buf {
1404        self
1405    }
1406
1407    #[inline]
1408    unsafe fn as_mut_buf(&mut self) -> &mut Buf {
1409        unsafe { OwnedBuf::as_mut_buf(self) }
1410    }
1411}
1412
1413#[cfg(feature = "std")]
1414#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
1415impl io::Write for OwnedBuf {
1416    #[inline]
1417    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
1418        self.extend_from_slice(buf);
1419        Ok(buf.len())
1420    }
1421
1422    #[inline]
1423    fn flush(&mut self) -> io::Result<()> {
1424        Ok(())
1425    }
1426}