musli_zerocopy/buf/slice_mut.rs
1use core::borrow::Borrow;
2use core::marker::PhantomData;
3use core::mem::{ManuallyDrop, align_of, size_of, size_of_val};
4use core::ops::Deref;
5use core::ptr::NonNull;
6use core::slice::{self, SliceIndex};
7
8#[cfg(feature = "alloc")]
9use alloc::borrow::Cow;
10
11use crate::buf::{self, Buf, DefaultAlignment, Padder, StoreBuf};
12use crate::endian::{ByteOrder, Native};
13use crate::error::Error;
14use crate::mem::MaybeUninit;
15use crate::pointer::{DefaultSize, Ref, Size};
16use crate::traits::{UnsizedZeroCopy, ZeroCopy};
17
18/// A fixed buffer wrapping a `&mut [u8]` with a dynamic alignment.
19///
20/// By default this buffer starts out having the same alignment as `usize`,
21/// making it platform specific. But this alignment can grow in demand to the
22/// types being stored in it.
23///
24/// # Examples
25///
26/// ```
27/// use musli_zerocopy::{SliceMut, ZeroCopy};
28///
29/// #[derive(ZeroCopy)]
30/// #[repr(C, align(128))]
31/// struct Custom { field: u32 }
32///
33/// let mut buf = [0; 1024];
34/// let mut buf = SliceMut::new(&mut buf);
35/// buf.store(&Custom { field: 10 });
36/// ```
37pub struct SliceMut<'a, E = Native, O = DefaultSize>
38where
39 E: ByteOrder,
40 O: Size,
41{
42 /// Base data pointer.
43 data: NonNull<u8>,
44 /// The initialized length of the buffer.
45 len: usize,
46 /// The capacity of the buffer.
47 capacity: usize,
48 /// The requested alignment.
49 requested: usize,
50 /// Sticky endianness and pointer size.
51 _marker: PhantomData<(&'a mut [u8], E, O)>,
52}
53
54impl<'a> SliceMut<'a> {
55 /// Construct a new empty buffer with a requested default alignment.
56 ///
57 /// The default alignment is guaranteed to be larger than 0.
58 ///
59 /// # Examples
60 ///
61 /// ```
62 /// use musli_zerocopy::SliceMut;
63 ///
64 /// let mut buf = [0; 1024];
65 /// let buf = SliceMut::new(&mut buf);
66 /// assert!(buf.is_empty());
67 /// ```
68 pub fn new(bytes: &'a mut [u8]) -> Self {
69 Self::with_alignment::<DefaultAlignment>(bytes)
70 }
71
72 /// Construct a new empty buffer with the an alignment request matching that
73 /// of `T`
74 ///
75 /// Note that this does not guarantee that the underlying buffer is aligned.
76 ///
77 /// # Examples
78 ///
79 /// ```
80 /// use musli_zerocopy::SliceMut;
81 ///
82 /// let mut buf = [0; 1024];
83 /// let buf = SliceMut::with_alignment::<u64>(&mut buf);
84 /// assert!(buf.is_empty());
85 /// assert_eq!(buf.requested(), 8);
86 /// ```
87 pub fn with_alignment<T>(bytes: &'a mut [u8]) -> Self {
88 let align = align_of::<T>();
89 let capacity = bytes.len();
90
91 Self {
92 data: unsafe { NonNull::new_unchecked(bytes.as_mut_ptr()) },
93 len: 0,
94 capacity,
95 requested: align,
96 _marker: PhantomData,
97 }
98 }
99}
100
101impl<'a, E, O> SliceMut<'a, E, O>
102where
103 E: ByteOrder,
104 O: Size,
105{
106 /// Modify the buffer to utilize the specified pointer size when inserting
107 /// references.
108 ///
109 /// # Examples
110 ///
111 /// ```
112 /// use musli_zerocopy::SliceMut;
113 ///
114 /// let mut buf = SliceMut::new(&mut [0; 16])
115 /// .with_size::<u8>();
116 /// ```
117 #[inline]
118 pub fn with_size<U: Size>(self) -> SliceMut<'a, E, U> {
119 let this = ManuallyDrop::new(self);
120
121 SliceMut {
122 data: this.data,
123 len: this.len,
124 capacity: this.capacity,
125 requested: this.requested,
126 _marker: PhantomData,
127 }
128 }
129
130 /// Modify the buffer to utilize the specified byte order when inserting
131 /// references.
132 ///
133 /// # Examples
134 ///
135 /// ```
136 /// use musli_zerocopy::{endian, SliceMut};
137 ///
138 /// let mut buf = [0; 1024];
139 /// let mut buf = SliceMut::new(&mut buf)
140 /// .with_byte_order::<endian::Little>();
141 /// ```
142 #[inline]
143 pub fn with_byte_order<U>(self) -> SliceMut<'a, U, O>
144 where
145 U: ByteOrder,
146 {
147 let this = ManuallyDrop::new(self);
148
149 SliceMut {
150 data: this.data,
151 len: this.len,
152 capacity: this.capacity,
153 requested: this.requested,
154 _marker: PhantomData,
155 }
156 }
157
158 /// Get the current length of the buffer.
159 ///
160 /// # Examples
161 ///
162 /// ```
163 /// use musli_zerocopy::SliceMut;
164 ///
165 /// let mut buf = [0; 1024];
166 /// let buf = SliceMut::new(&mut buf);
167 /// assert_eq!(buf.len(), 0);
168 /// ```
169 #[inline]
170 pub fn len(&self) -> usize {
171 self.len
172 }
173
174 /// Clear the current buffer.
175 ///
176 /// # Examples
177 ///
178 /// ```
179 /// use musli_zerocopy::SliceMut;
180 ///
181 /// let mut buf = [0; 1024];
182 /// let mut buf = SliceMut::new(&mut buf);
183 /// assert_eq!(buf.capacity(), 1024);
184 /// buf.extend_from_slice(&[1, 2, 3, 4]);
185 ///
186 /// assert_eq!(buf.len(), 4);
187 /// buf.clear();
188 /// assert_eq!(buf.capacity(), 1024);
189 /// assert_eq!(buf.len(), 0);
190 /// ```
191 #[inline]
192 pub fn clear(&mut self) {
193 self.len = 0;
194 }
195
196 /// Test if the buffer is empty.
197 ///
198 /// # Examples
199 ///
200 /// ```
201 /// use musli_zerocopy::SliceMut;
202 ///
203 /// let mut buf = [0; 1024];
204 /// let buf = SliceMut::new(&mut buf);
205 /// assert!(buf.is_empty());
206 /// ```
207 #[inline]
208 pub fn is_empty(&self) -> bool {
209 self.len == 0
210 }
211
212 /// Get the current capacity of the buffer.
213 ///
214 /// # Examples
215 ///
216 /// ```
217 /// use musli_zerocopy::SliceMut;
218 ///
219 /// let mut buf = [0; 1024];
220 /// let buf = SliceMut::new(&mut buf);
221 /// assert_eq!(buf.capacity(), 1024);
222 /// ```
223 #[inline]
224 pub fn capacity(&self) -> usize {
225 self.capacity
226 }
227
228 /// Return the requested alignment of the buffer.
229 ///
230 /// # Examples
231 ///
232 /// ```
233 /// use musli_zerocopy::SliceMut;
234 ///
235 /// let mut buf = [0; 1024];
236 /// let buf = SliceMut::with_alignment::<u64>(&mut buf);
237 /// assert!(buf.is_empty());
238 /// assert_eq!(buf.requested(), 8);
239 /// ```
240 #[inline]
241 pub fn requested(&self) -> usize {
242 self.requested
243 }
244
245 /// Reserve capacity for at least `capacity` more bytes in this buffer.
246 ///
247 /// # Examples
248 ///
249 /// ```
250 /// use musli_zerocopy::SliceMut;
251 ///
252 /// let mut buf = [0; 1024];
253 /// let mut buf = SliceMut::new(&mut buf);
254 /// assert_eq!(buf.capacity(), 1024);
255 ///
256 /// buf.reserve(10);
257 /// assert!(buf.capacity() >= 10);
258 /// ```
259 #[inline]
260 pub fn reserve(&mut self, capacity: usize) {
261 let new_capacity = self.len + capacity;
262 self.ensure_capacity(new_capacity);
263 }
264
265 /// Advance the length of the owned buffer by `size`.
266 ///
267 /// # Safety
268 ///
269 /// The caller must ensure that bytes up until `len() + size` has been
270 /// initialized in this buffer.
271 #[inline]
272 pub unsafe fn advance(&mut self, size: usize) {
273 self.len += size;
274 }
275
276 /// Get get a raw pointer to the current buffer.
277 #[inline]
278 pub fn as_ptr(&self) -> *const u8 {
279 self.data.as_ptr() as *const _
280 }
281
282 /// Get get a raw mutable pointer to the current buffer.
283 #[inline]
284 pub fn as_mut_ptr(&mut self) -> *mut u8 {
285 self.data.as_ptr()
286 }
287
288 /// Extract a slice containing the entire buffer.
289 ///
290 /// # Examples
291 ///
292 /// ```
293 /// use musli_zerocopy::SliceMut;
294 ///
295 /// let mut buf = [0; 1024];
296 /// let mut buf = SliceMut::new(&mut buf);
297 /// buf.extend_from_slice(b"hello world");
298 /// assert_eq!(buf.as_slice(), b"hello world");
299 /// ```
300 #[inline]
301 pub fn as_slice(&self) -> &[u8] {
302 unsafe { slice::from_raw_parts(self.as_ptr(), self.len()) }
303 }
304
305 /// Extract a mutable slice containing the entire buffer.
306 ///
307 /// # Examples
308 ///
309 /// ```
310 /// use musli_zerocopy::SliceMut;
311 ///
312 /// let mut buf = [0; 1024];
313 /// let mut buf = SliceMut::new(&mut buf);
314 /// buf.extend_from_slice(b"hello world");
315 /// buf.as_mut_slice().make_ascii_uppercase();
316 /// assert_eq!(buf.as_slice(), b"HELLO WORLD");
317 /// ```
318 #[inline]
319 pub fn as_mut_slice(&mut self) -> &mut [u8] {
320 unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len()) }
321 }
322
323 /// Access the buffer mutably.
324 ///
325 /// # Examples
326 ///
327 /// ```
328 /// use musli_zerocopy::SliceMut;
329 ///
330 /// let mut buf = [0; 1024];
331 /// let mut buf = SliceMut::new(&mut buf);
332 /// let slice = buf.store_unsized("hello world");
333 ///
334 /// // SAFETY: We don't manipulate the underlying buffer in a way which leaves uninitialized data.
335 /// let mut buf = unsafe { buf.as_mut_buf() };
336 ///
337 /// buf.load_mut(slice)?.make_ascii_uppercase();
338 /// assert_eq!(buf.load(slice)?, "HELLO WORLD");
339 /// # Ok::<_, musli_zerocopy::Error>(())
340 /// ```
341 ///
342 /// # Safety
343 ///
344 /// Since this allows the underlying buffer to be mutated, depending on how
345 /// the buffer is used it might result in undefined bit-patterns like
346 /// padding bytes being written to it. The caller must ensure this is not
347 /// done with the structures being written by for example calling
348 /// [`ZeroCopy::initialize_padding()`] after the contents of the buffer is
349 /// modified.
350 ///
351 /// See [`Buf::new_mut`] for more information.
352 #[inline]
353 pub unsafe fn as_mut_buf(&mut self) -> &mut Buf {
354 unsafe { Buf::new_mut(self.as_mut_slice()) }
355 }
356
357 /// Store an uninitialized value.
358 ///
359 /// This allows values to be inserted before they can be initialized, which
360 /// can be useful if you need them to be in a certain location in the buffer
361 /// but don't have access to their value yet.
362 ///
363 /// The memory for `T` will be zero-initialized at [`next_offset<T>()`] and
364 /// the length and alignment requirement of `SliceMut` updated to reflect
365 /// that an instance of `T` has been stored. But that representation might
366 /// not match the representation of `T`[^non-zero].
367 ///
368 /// To get the offset where the value will be written, call
369 /// [`next_offset<T>()`] before storing the value.
370 ///
371 /// > **Note:** this does not return [`std::mem::MaybeUninit`], instead we
372 /// > use an internal [`MaybeUninit`] which is similar but has different
373 /// > properties. See [its documentation][MaybeUninit] for more.
374 ///
375 /// [`next_offset<T>()`]: Self::next_offset()
376 /// [^non-zero]: Like with [`NonZero*`][core::num] types.
377 ///
378 /// # Examples
379 ///
380 /// ```
381 /// use musli_zerocopy::mem::MaybeUninit;
382 /// use musli_zerocopy::{SliceMut, Ref, ZeroCopy};
383 ///
384 /// #[derive(ZeroCopy)]
385 /// #[repr(C)]
386 /// struct Custom { field: u32, string: Ref<str> }
387 ///
388 /// let mut buf = [0; 1024];
389 /// let mut buf = SliceMut::new(&mut buf);
390 /// let reference: Ref<MaybeUninit<Custom>> = buf.store_uninit::<Custom>();
391 ///
392 /// let string = buf.store_unsized("Hello World!");
393 ///
394 /// buf.load_uninit_mut(reference).write(&Custom { field: 42, string });
395 ///
396 /// let reference = reference.assume_init();
397 /// assert_eq!(reference.offset(), 0);
398 /// # Ok::<_, musli_zerocopy::Error>(())
399 /// ```
400 #[inline]
401 pub fn store_uninit<T>(&mut self) -> Ref<MaybeUninit<T>, E, O>
402 where
403 T: ZeroCopy,
404 {
405 // SAFETY: We've just reserved capacity for this write.
406 unsafe {
407 self.next_offset_with_and_reserve(align_of::<T>(), size_of::<T>());
408 let offset = self.len;
409 self.data
410 .as_ptr()
411 .add(self.len)
412 .write_bytes(0, size_of::<T>());
413 self.len += size_of::<T>();
414 Ref::new(offset)
415 }
416 }
417
418 /// Write a reference that might not have been initialized.
419 ///
420 /// This does not prevent [`Ref`] from different instances of [`SliceMut`]
421 /// from being written. It would only result in garbled data, but wouldn't
422 /// be a safety concern.
423 ///
424 /// > **Note:** this does not return [`std::mem::MaybeUninit`], instead we
425 /// > use an internal [`MaybeUninit`] which is similar but has different
426 /// > properties. See [its documentation][MaybeUninit] for more.
427 ///
428 /// # Panics
429 ///
430 /// Panics if the reference [`Ref::offset()`] and size of `T` does not fit
431 /// within the [`len()`] of the current structure. This might happen if you
432 /// try and use a reference constructed from a different [`SliceMut`]
433 /// instance.
434 ///
435 /// [`len()`]: Self::len()
436 ///
437 /// ```should_panic
438 /// use musli_zerocopy::SliceMut;
439 ///
440 /// let mut buf = [0; 1024];
441 /// let mut buf1 = SliceMut::new(&mut buf);
442 /// buf1.store(&1u32);
443 ///
444 /// let mut buf = [0; 1024];
445 /// let mut buf2 = SliceMut::new(&mut buf);
446 /// buf2.store(&10u32);
447 ///
448 /// let number = buf2.store_uninit::<u32>();
449 ///
450 /// buf1.load_uninit_mut(number);
451 /// ```
452 ///
453 /// # Examples
454 ///
455 /// ```
456 /// use musli_zerocopy::{SliceMut, Ref, ZeroCopy};
457 /// use musli_zerocopy::mem::MaybeUninit;
458 ///
459 /// #[derive(ZeroCopy)]
460 /// #[repr(C)]
461 /// struct Custom { field: u32, string: Ref<str> }
462 ///
463 /// let mut buf = [0; 1024];
464 /// let mut buf = SliceMut::new(&mut buf);
465 /// let reference: Ref<MaybeUninit<Custom>> = buf.store_uninit::<Custom>();
466 ///
467 /// let string = buf.store_unsized("Hello World!");
468 ///
469 /// buf.load_uninit_mut(reference).write(&Custom { field: 42, string });
470 ///
471 /// let reference = reference.assume_init();
472 /// assert_eq!(reference.offset(), 0);
473 /// # Ok::<_, musli_zerocopy::Error>(())
474 /// ```
475 #[inline]
476 pub fn load_uninit_mut<T, U, I>(
477 &mut self,
478 reference: Ref<MaybeUninit<T>, U, I>,
479 ) -> &mut MaybeUninit<T>
480 where
481 T: ZeroCopy,
482 U: ByteOrder,
483 I: Size,
484 {
485 let at = reference.offset();
486
487 // Note: We only need this as debug assertion, because `MaybeUninit<T>`
488 // does not implement `ZeroCopy`, so there is no way to construct.
489 assert!(at + size_of::<T>() <= self.len, "Length overflow");
490
491 // SAFETY: `MaybeUninit<T>` has no representation requirements and is
492 // unaligned.
493 unsafe { &mut *(self.data.as_ptr().add(at) as *mut MaybeUninit<T>) }
494 }
495
496 /// Insert a value with the given size.
497 ///
498 /// The memory for `T` will be initialized at [`next_offset<T>()`] and the
499 /// length and alignment requirement of `SliceMut` updated to reflect that
500 /// an instance of `T` has been stored.
501 ///
502 /// To get the offset where the value will be written, call
503 /// [`next_offset<T>()`] before storing the value or access the offset
504 /// through the [`Ref::offset`] being returned.
505 ///
506 /// [`next_offset<T>()`]: Self::next_offset
507 ///
508 /// # Examples
509 ///
510 /// ```
511 /// use musli_zerocopy::{SliceMut, Ref, ZeroCopy};
512 ///
513 /// #[derive(ZeroCopy)]
514 /// #[repr(C)]
515 /// struct Custom { field: u32, string: Ref<str> }
516 ///
517 /// let mut buf = [0; 1024];
518 /// let mut buf = SliceMut::new(&mut buf);
519 ///
520 /// let string = buf.store_unsized("string");
521 /// let custom = buf.store(&Custom { field: 1, string });
522 /// let custom2 = buf.store(&Custom { field: 2, string });
523 ///
524 /// let buf = buf.to_requested();
525 ///
526 /// let custom = buf.load(custom)?;
527 /// assert_eq!(custom.field, 1);
528 /// assert_eq!(buf.load(custom.string)?, "string");
529 ///
530 /// let custom2 = buf.load(custom2)?;
531 /// assert_eq!(custom2.field, 2);
532 /// assert_eq!(buf.load(custom2.string)?, "string");
533 /// # Ok::<_, musli_zerocopy::Error>(())
534 /// ```
535 ///
536 /// Storing an array:
537 ///
538 ///
539 /// ```
540 /// use musli_zerocopy::{ZeroCopy, SliceMut};
541 ///
542 /// // Element with padding.
543 /// #[derive(Debug, PartialEq, ZeroCopy)]
544 /// #[repr(C)]
545 /// struct Element {
546 /// first: u8,
547 /// second: u32,
548 /// }
549 ///
550 /// let values = [
551 /// Element { first: 0x01, second: 0x01020304u32 },
552 /// Element { first: 0x02, second: 0x01020304u32 }
553 /// ];
554 ///
555 /// let mut buf = [0; 1024];
556 /// let mut buf = SliceMut::new(&mut buf);
557 /// let array = buf.store(&values);
558 ///
559 /// let buf = buf.to_requested();
560 ///
561 /// assert_eq!(buf.load(array)?, &values);
562 /// # Ok::<_, musli_zerocopy::Error>(())
563 /// ```
564 #[inline]
565 pub fn store<T>(&mut self, value: &T) -> Ref<T, E, O>
566 where
567 T: ZeroCopy,
568 {
569 self.next_offset_with_and_reserve(align_of::<T>(), size_of::<T>());
570
571 // SAFETY: We're ensuring to both align the internal buffer and store
572 // the value.
573 unsafe { self.store_unchecked(value) }
574 }
575
576 /// Insert a value with the given size without ensuring that the buffer has
577 /// the reserved capacity for to or is properly aligned.
578 ///
579 /// This is a low level API which is tricky to use correctly. The
580 /// recommended way to use this is through [`SliceMut::store`].
581 ///
582 /// [`SliceMut::store`]: Self::store
583 ///
584 /// # Safety
585 ///
586 /// The caller has to ensure that the buffer has the required capacity for
587 /// `&T` and is properly aligned. This can easily be accomplished by calling
588 /// [`request_align::<T>()`] followed by [`align_in_place()`] before this
589 /// function. A safe variant of this function is [`SliceMut::store`].
590 ///
591 /// [`align_in_place()`]: Self::align_in_place
592 /// [`SliceMut::store`]: Self::store
593 /// [`request_align::<T>()`]: Self::request_align
594 ///
595 /// # Examples
596 ///
597 /// ```
598 /// use std::mem::size_of;
599 ///
600 /// use musli_zerocopy::{SliceMut, Ref, ZeroCopy};
601 ///
602 /// #[derive(ZeroCopy)]
603 /// #[repr(C, align(4096))]
604 /// struct Custom { field: u32, string: Ref<str> }
605 ///
606 /// let mut buf = [0; 12288];
607 /// let mut buf = SliceMut::new(&mut buf);
608 ///
609 /// let string = buf.store_unsized("string");
610 ///
611 /// buf.request_align::<Custom>();
612 /// buf.reserve(2 * size_of::<Custom>());
613 ///
614 /// // SAFETY: We've ensure that the buffer is internally aligned and sized just above.
615 /// let custom = unsafe { buf.store_unchecked(&Custom { field: 1, string }) };
616 /// let custom2 = unsafe { buf.store_unchecked(&Custom { field: 2, string }) };
617 ///
618 /// let buf = buf.to_requested();
619 ///
620 /// let custom = buf.load(custom)?;
621 /// assert_eq!(custom.field, 1);
622 /// assert_eq!(buf.load(custom.string)?, "string");
623 ///
624 /// let custom2 = buf.load(custom2)?;
625 /// assert_eq!(custom2.field, 2);
626 /// assert_eq!(buf.load(custom2.string)?, "string");
627 /// # Ok::<_, musli_zerocopy::Error>(())
628 /// ```
629 #[inline]
630 pub unsafe fn store_unchecked<T>(&mut self, value: &T) -> Ref<T, E, O>
631 where
632 T: ZeroCopy,
633 {
634 let offset = self.len;
635
636 unsafe {
637 let ptr = NonNull::new_unchecked(self.data.as_ptr().add(offset));
638 buf::store_unaligned(ptr, value);
639 self.len += size_of::<T>();
640 }
641
642 Ref::new(offset)
643 }
644
645 /// Either return the current buffer, or allocate one which has a
646 /// [`requested()`] alignment.
647 ///
648 /// [`requested()`]: Self::requested
649 ///
650 /// # Examples
651 ///
652 /// ```
653 /// use musli_zerocopy::SliceMut;
654 ///
655 /// let mut buf = [0; 1024];
656 /// let mut buf = SliceMut::new(&mut buf);
657 ///
658 /// let first = buf.store_unsized("first");
659 /// let second = buf.store_unsized("second");
660 ///
661 /// let buf = buf.to_requested();
662 ///
663 /// assert_eq!(buf.load(first)?, "first");
664 /// assert_eq!(buf.load(second)?, "second");
665 /// # Ok::<_, musli_zerocopy::Error>(())
666 /// ```
667 #[cfg(feature = "alloc")]
668 pub fn to_requested(&self) -> Cow<'_, Buf> {
669 self.to_aligned_with(self.requested)
670 }
671
672 /// Write a value to the buffer.
673 ///
674 /// # Examples
675 ///
676 /// ```
677 /// use musli_zerocopy::SliceMut;
678 ///
679 /// let mut buf = [0; 1024];
680 /// let mut buf = SliceMut::new(&mut buf);
681 ///
682 /// let first = buf.store_unsized("first");
683 /// let second = buf.store_unsized("second");
684 ///
685 /// let buf = buf.to_requested();
686 ///
687 /// assert_eq!(buf.load(first)?, "first");
688 /// assert_eq!(buf.load(second)?, "second");
689 /// # Ok::<_, musli_zerocopy::Error>(())
690 /// ```
691 #[inline]
692 pub fn store_unsized<T>(&mut self, value: &T) -> Ref<T, E, O>
693 where
694 T: ?Sized + UnsizedZeroCopy,
695 {
696 unsafe {
697 let size = size_of_val(value);
698 self.next_offset_with_and_reserve(T::ALIGN, size);
699 let offset = self.len;
700 let ptr = NonNull::new_unchecked(self.data.as_ptr().add(offset));
701 ptr.as_ptr().copy_from_nonoverlapping(value.as_ptr(), size);
702
703 if T::PADDED {
704 let mut padder = Padder::new(ptr);
705 value.pad(&mut padder);
706 padder.remaining_unsized(value);
707 }
708
709 self.len += size;
710 Ref::with_metadata(offset, value.metadata())
711 }
712 }
713
714 /// Insert a slice into the buffer.
715 ///
716 /// # Examples
717 ///
718 /// ```
719 /// use musli_zerocopy::SliceMut;
720 ///
721 /// let mut buf = [0; 1024];
722 /// let mut buf = SliceMut::new(&mut buf);
723 ///
724 /// let mut values = Vec::new();
725 ///
726 /// values.push(buf.store_unsized("first"));
727 /// values.push(buf.store_unsized("second"));
728 ///
729 /// let slice_ref = buf.store_slice(&values);
730 ///
731 /// let buf = buf.to_requested();
732 ///
733 /// let slice = buf.load(slice_ref)?;
734 ///
735 /// let mut strings = Vec::new();
736 ///
737 /// for value in slice {
738 /// strings.push(buf.load(*value)?);
739 /// }
740 ///
741 /// assert_eq!(&strings, &["first", "second"][..]);
742 /// # Ok::<_, musli_zerocopy::Error>(())
743 /// ```
744 #[inline(always)]
745 pub fn store_slice<T>(&mut self, values: &[T]) -> Ref<[T], E, O>
746 where
747 T: ZeroCopy,
748 {
749 self.store_unsized(values)
750 }
751
752 /// Extend the buffer from a slice.
753 ///
754 /// Note that this only extends the underlying buffer but does not ensure
755 /// that any required alignment is abided by.
756 ///
757 /// To do this, the caller must call [`request_align()`] with the appropriate
758 /// alignment, otherwise the necessary alignment to decode the buffer again
759 /// will be lost.
760 ///
761 /// [`request_align()`]: Self::request_align
762 ///
763 /// # Errors
764 ///
765 /// This is a raw API, and does not guarantee that any given alignment will
766 /// be respected.
767 ///
768 /// # Examples
769 ///
770 /// ```
771 /// use musli_zerocopy::{SliceMut, Ref};
772 ///
773 /// let mut buf = [0; 1024];
774 /// let mut buf = SliceMut::with_alignment::<()>(&mut buf);
775 ///
776 /// // Add one byte of padding to throw of any incidental alignment.
777 /// buf.extend_from_slice(&[1]);
778 ///
779 /// let ptr: Ref<u32> = Ref::new(buf.next_offset::<u32>());
780 /// buf.extend_from_slice(&[1, 2, 3, 4]);
781 ///
782 /// let buf = buf.to_requested();
783 ///
784 /// assert_eq!(*buf.load(ptr)?, u32::from_ne_bytes([1, 2, 3, 4]));
785 /// # Ok::<_, musli_zerocopy::Error>(())
786 /// ```
787 pub fn extend_from_slice(&mut self, bytes: &[u8]) {
788 self.reserve(bytes.len());
789
790 // SAFETY: We just checked that there is space in the slice.
791 unsafe {
792 self.store_bytes(bytes);
793 }
794 }
795
796 /// Fill and initialize the buffer with `byte` up to `len`.
797 pub(crate) fn fill(&mut self, byte: u8, len: usize) {
798 self.reserve(len);
799
800 unsafe {
801 let ptr = self.data.as_ptr().add(self.len);
802 ptr.write_bytes(byte, len);
803 self.len += len;
804 }
805 }
806
807 /// Store the slice without allocating.
808 ///
809 /// # Safety
810 ///
811 /// The caller must ensure that the buffer has the capacity for
812 /// `bytes.len()` and that the value being stored is not padded as per
813 /// `ZeroCopy::PADDED`.
814 #[inline]
815 pub(crate) unsafe fn store_bytes<T>(&mut self, values: &[T])
816 where
817 T: ZeroCopy,
818 {
819 unsafe {
820 let dst = self.as_mut_ptr().add(self.len);
821 dst.copy_from_nonoverlapping(values.as_ptr().cast(), size_of_val(values));
822 self.len += size_of_val(values);
823 }
824 }
825
826 /// Request that the current buffer should have at least the specified
827 /// alignment and zero-initialize the buffer up to the next position which
828 /// matches the given alignment.
829 ///
830 /// Note that this does not guarantee that the internal buffer is aligned
831 /// in-memory. An instance of [`SliceMut`] cannot guarantee this.
832 ///
833 /// ```
834 /// use musli_zerocopy::SliceMut;
835 /// let mut buf = [0; 1024];
836 /// let mut buf = SliceMut::new(&mut buf);
837 ///
838 /// buf.extend_from_slice(&[1, 2]);
839 /// buf.request_align::<u32>();
840 ///
841 /// assert_eq!(buf.as_slice(), &[1, 2, 0, 0]);
842 /// ```
843 ///
844 /// # Safety
845 ///
846 /// The caller must guarantee that the alignment is a power of two.
847 ///
848 /// # Examples
849 ///
850 /// ```
851 /// use musli_zerocopy::SliceMut;
852 ///
853 /// let mut buf = [0; 1024];
854 /// let mut buf = SliceMut::new(&mut buf);
855 /// buf.extend_from_slice(&[1, 2, 3, 4]);
856 /// buf.request_align::<u64>();
857 /// buf.extend_from_slice(&[5, 6, 7, 8]);
858 ///
859 /// assert_eq!(buf.as_slice(), &[1, 2, 3, 4, 0, 0, 0, 0, 5, 6, 7, 8]);
860 /// ```
861 #[inline]
862 pub fn request_align<T>(&mut self)
863 where
864 T: ZeroCopy,
865 {
866 self.requested = self.requested.max(align_of::<T>());
867 self.ensure_aligned_and_reserve(align_of::<T>(), size_of::<T>());
868 }
869
870 /// Ensure that the current buffer is aligned under the assumption that it
871 /// needs to be allocated.
872 #[inline]
873 fn ensure_aligned_and_reserve(&mut self, align: usize, reserve: usize) {
874 let extra = buf::padding_to(self.len, align);
875 self.reserve(extra + reserve);
876
877 // SAFETY: The length is ensures to be within the address space.
878 unsafe {
879 self.data.as_ptr().add(self.len).write_bytes(0, extra);
880 self.len += extra;
881 }
882 }
883
884 /// Construct a pointer aligned for `align` into the current buffer which
885 /// points to the next location that will be written.
886 #[inline]
887 pub(crate) fn next_offset_with_and_reserve(&mut self, align: usize, reserve: usize) {
888 self.requested = self.requested.max(align);
889 self.ensure_aligned_and_reserve(align, reserve);
890 }
891
892 /// Construct a pointer aligned for `T` into the current buffer which points
893 /// to the next location that will be written.
894 ///
895 /// This ensures that the alignment of the pointer is a multiple of `align`
896 /// and that the current buffer has the capacity for store `T`.
897 ///
898 /// # Examples
899 ///
900 /// ```
901 /// use musli_zerocopy::{SliceMut, Ref};
902 ///
903 /// let mut buf = [0; 1024];
904 /// let mut buf = SliceMut::new(&mut buf);
905 ///
906 /// // Add one byte of padding to throw of any incidental alignment.
907 /// buf.extend_from_slice(&[1]);
908 ///
909 /// let ptr: Ref<u32> = Ref::new(buf.next_offset::<u32>());
910 /// buf.extend_from_slice(&[1, 2, 3, 4]);
911 ///
912 /// let buf = buf.to_requested();
913 ///
914 /// assert_eq!(*buf.load(ptr)?, u32::from_ne_bytes([1, 2, 3, 4]));
915 /// # Ok::<_, musli_zerocopy::Error>(())
916 /// ```
917 #[inline]
918 pub fn next_offset<T>(&mut self) -> usize {
919 // SAFETY: The alignment of `T` is guaranteed to be a power of two. We
920 // also make sure to reserve space for `T` since it is very likely that
921 // it will be written immediately after this.
922 self.next_offset_with_and_reserve(align_of::<T>(), size_of::<T>());
923 self.len
924 }
925
926 // Ensure that the new capacity is available or panic.
927 #[inline]
928 fn ensure_capacity(&mut self, new_capacity: usize) {
929 let new_capacity = new_capacity.max(self.requested);
930
931 if self.capacity < new_capacity {
932 panic!(
933 "Underlying slice has the capacity {}, but {} bytes are needed",
934 self.capacity, new_capacity
935 )
936 }
937 }
938}
939
940/// `SliceMut` are `Send` because the data they reference is unaliased.
941unsafe impl Send for SliceMut<'_> {}
942/// `SliceMut` are `Sync` since they are `Send` and the data they reference is
943/// unaliased.
944unsafe impl Sync for SliceMut<'_> {}
945
946impl<E, O> Deref for SliceMut<'_, E, O>
947where
948 E: ByteOrder,
949 O: Size,
950{
951 type Target = Buf;
952
953 #[inline]
954 fn deref(&self) -> &Self::Target {
955 Buf::new(self.as_slice())
956 }
957}
958
959impl<E, O> AsRef<Buf> for SliceMut<'_, E, O>
960where
961 E: ByteOrder,
962 O: Size,
963{
964 /// Trivial `AsRef<Buf>` implementation for `SliceMut<O>`.
965 ///
966 /// # Examples
967 ///
968 /// ```
969 /// use musli_zerocopy::SliceMut;
970 ///
971 /// let mut buf = [0; 1024];
972 /// let mut buf = SliceMut::new(&mut buf);
973 /// let slice = buf.store_unsized("hello world");
974 ///
975 /// let buf = buf.to_requested();
976 ///
977 /// assert_eq!(buf.load(slice)?, "hello world");
978 /// # Ok::<_, musli_zerocopy::Error>(())
979 /// ```
980 #[inline]
981 fn as_ref(&self) -> &Buf {
982 self
983 }
984}
985
986impl<E, O> Borrow<Buf> for SliceMut<'_, E, O>
987where
988 E: ByteOrder,
989 O: Size,
990{
991 #[inline]
992 fn borrow(&self) -> &Buf {
993 self.as_ref()
994 }
995}
996
997impl<E, O> StoreBuf for SliceMut<'_, E, O>
998where
999 E: ByteOrder,
1000 O: Size,
1001{
1002 type ByteOrder = E;
1003 type Size = O;
1004
1005 #[inline]
1006 fn len(&self) -> usize {
1007 SliceMut::len(self)
1008 }
1009
1010 #[inline]
1011 fn truncate(&mut self, len: usize) {
1012 if self.len > len {
1013 self.len = len;
1014 }
1015 }
1016
1017 #[inline]
1018 fn store_unsized<T>(&mut self, value: &T) -> Ref<T, Self::ByteOrder, Self::Size>
1019 where
1020 T: ?Sized + UnsizedZeroCopy,
1021 {
1022 SliceMut::store_unsized(self, value)
1023 }
1024
1025 #[inline]
1026 fn store<T>(&mut self, value: &T) -> Ref<T, Self::ByteOrder, Self::Size>
1027 where
1028 T: ZeroCopy,
1029 {
1030 SliceMut::store(self, value)
1031 }
1032
1033 #[inline]
1034 fn swap<T>(
1035 &mut self,
1036 a: Ref<T, Self::ByteOrder, Self::Size>,
1037 b: Ref<T, Self::ByteOrder, Self::Size>,
1038 ) -> Result<(), Error>
1039 where
1040 T: ZeroCopy,
1041 {
1042 // SAFETY: Since we are swapping two locations which have the same type
1043 // `T`, it does not affect the initialized state of the buffer.
1044 let buf = unsafe { self.as_mut_buf() };
1045 Buf::swap(buf, a, b)
1046 }
1047
1048 #[inline]
1049 fn align_in_place(&mut self) {
1050 // SAFETY: self.requested is guaranteed to be a power of two.
1051 if !buf::is_aligned_with(self.as_ptr(), self.requested) {
1052 panic!("Slice is not aligned by {}", self.requested);
1053 }
1054 }
1055
1056 #[inline]
1057 fn next_offset<T>(&mut self) -> usize {
1058 SliceMut::next_offset::<T>(self)
1059 }
1060
1061 #[inline]
1062 fn next_offset_with_and_reserve(&mut self, align: usize, reserve: usize) {
1063 SliceMut::next_offset_with_and_reserve(self, align, reserve)
1064 }
1065
1066 #[inline]
1067 fn fill(&mut self, byte: u8, len: usize) {
1068 SliceMut::fill(self, byte, len);
1069 }
1070
1071 #[inline]
1072 fn get<I>(&self, index: I) -> Option<&I::Output>
1073 where
1074 I: SliceIndex<[u8]>,
1075 {
1076 Buf::get(self, index)
1077 }
1078
1079 #[inline]
1080 unsafe fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
1081 where
1082 I: SliceIndex<[u8]>,
1083 {
1084 unsafe { SliceMut::as_mut_buf(self).get_mut(index) }
1085 }
1086
1087 #[inline]
1088 fn as_buf(&self) -> &Buf {
1089 self
1090 }
1091
1092 #[inline]
1093 unsafe fn as_mut_buf(&mut self) -> &mut Buf {
1094 unsafe { SliceMut::as_mut_buf(self) }
1095 }
1096}