tiny_artnet_bytes_no_atomic/bytes_mut.rs
1use core::iter::{FromIterator, Iterator};
2use core::mem::{self, ManuallyDrop};
3use core::ops::{Deref, DerefMut};
4use core::ptr::{self, NonNull};
5use core::{cmp, fmt, hash, isize, slice, usize};
6
7use alloc::{
8 borrow::{Borrow, BorrowMut},
9 boxed::Box,
10 string::String,
11 vec::Vec,
12};
13
14use crate::buf::{IntoIter, UninitSlice};
15use crate::bytes::Vtable;
16#[allow(unused)]
17use crate::loom::sync::atomic::AtomicMut;
18use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
19use crate::{Buf, BufMut, Bytes};
20
21/// A unique reference to a contiguous slice of memory.
22///
23/// `BytesMut` represents a unique view into a potentially shared memory region.
24/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
25/// mutate the memory.
26///
27/// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
28/// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
29/// same `buf` overlaps with its slice. That guarantee means that a write lock
30/// is not required.
31///
32/// # Growth
33///
34/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
35/// necessary. However, explicitly reserving the required space up-front before
36/// a series of inserts will be more efficient.
37///
38/// # Examples
39///
40/// ```
41/// use bytes::{BytesMut, BufMut};
42///
43/// let mut buf = BytesMut::with_capacity(64);
44///
45/// buf.put_u8(b'h');
46/// buf.put_u8(b'e');
47/// buf.put(&b"llo"[..]);
48///
49/// assert_eq!(&buf[..], b"hello");
50///
51/// // Freeze the buffer so that it can be shared
52/// let a = buf.freeze();
53///
54/// // This does not allocate, instead `b` points to the same memory.
55/// let b = a.clone();
56///
57/// assert_eq!(&a[..], b"hello");
58/// assert_eq!(&b[..], b"hello");
59/// ```
60pub struct BytesMut {
61 ptr: NonNull<u8>,
62 len: usize,
63 cap: usize,
64 data: *mut Shared,
65}
66
67// Thread-safe reference-counted container for the shared storage. This mostly
68// the same as `core::sync::Arc` but without the weak counter. The ref counting
69// fns are based on the ones found in `std`.
70//
71// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
72// up making the overall code simpler and easier to reason about. This is due to
73// some of the logic around setting `Inner::arc` and other ways the `arc` field
74// is used. Using `Arc` ended up requiring a number of funky transmutes and
75// other shenanigans to make it work.
76struct Shared {
77 vec: Vec<u8>,
78 original_capacity_repr: usize,
79 ref_count: AtomicUsize,
80}
81
82// Buffer storage strategy flags.
83const KIND_ARC: usize = 0b0;
84const KIND_VEC: usize = 0b1;
85const KIND_MASK: usize = 0b1;
86
87// The max original capacity value. Any `Bytes` allocated with a greater initial
88// capacity will default to this.
89const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
90// The original capacity algorithm will not take effect unless the originally
91// allocated capacity was at least 1kb in size.
92const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
93// The original capacity is stored in powers of 2 starting at 1kb to a max of
94// 64kb. Representing it as such requires only 3 bits of storage.
95const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
96const ORIGINAL_CAPACITY_OFFSET: usize = 2;
97
98// When the storage is in the `Vec` representation, the pointer can be advanced
99// at most this value. This is due to the amount of storage available to track
100// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
101// bits.
102const VEC_POS_OFFSET: usize = 5;
103const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
104const NOT_VEC_POS_MASK: usize = 0b11111;
105
106#[cfg(target_pointer_width = "64")]
107const PTR_WIDTH: usize = 64;
108#[cfg(target_pointer_width = "32")]
109const PTR_WIDTH: usize = 32;
110
111/*
112 *
113 * ===== BytesMut =====
114 *
115 */
116
117impl BytesMut {
118 /// Creates a new `BytesMut` with the specified capacity.
119 ///
120 /// The returned `BytesMut` will be able to hold at least `capacity` bytes
121 /// without reallocating.
122 ///
123 /// It is important to note that this function does not specify the length
124 /// of the returned `BytesMut`, but only the capacity.
125 ///
126 /// # Examples
127 ///
128 /// ```
129 /// use bytes::{BytesMut, BufMut};
130 ///
131 /// let mut bytes = BytesMut::with_capacity(64);
132 ///
133 /// // `bytes` contains no data, even though there is capacity
134 /// assert_eq!(bytes.len(), 0);
135 ///
136 /// bytes.put(&b"hello world"[..]);
137 ///
138 /// assert_eq!(&bytes[..], b"hello world");
139 /// ```
140 #[inline]
141 pub fn with_capacity(capacity: usize) -> BytesMut {
142 BytesMut::from_vec(Vec::with_capacity(capacity))
143 }
144
145 /// Creates a new `BytesMut` with default capacity.
146 ///
147 /// Resulting object has length 0 and unspecified capacity.
148 /// This function does not allocate.
149 ///
150 /// # Examples
151 ///
152 /// ```
153 /// use bytes::{BytesMut, BufMut};
154 ///
155 /// let mut bytes = BytesMut::new();
156 ///
157 /// assert_eq!(0, bytes.len());
158 ///
159 /// bytes.reserve(2);
160 /// bytes.put_slice(b"xy");
161 ///
162 /// assert_eq!(&b"xy"[..], &bytes[..]);
163 /// ```
164 #[inline]
165 pub fn new() -> BytesMut {
166 BytesMut::with_capacity(0)
167 }
168
169 /// Returns the number of bytes contained in this `BytesMut`.
170 ///
171 /// # Examples
172 ///
173 /// ```
174 /// use bytes::BytesMut;
175 ///
176 /// let b = BytesMut::from(&b"hello"[..]);
177 /// assert_eq!(b.len(), 5);
178 /// ```
179 #[inline]
180 pub fn len(&self) -> usize {
181 self.len
182 }
183
184 /// Returns true if the `BytesMut` has a length of 0.
185 ///
186 /// # Examples
187 ///
188 /// ```
189 /// use bytes::BytesMut;
190 ///
191 /// let b = BytesMut::with_capacity(64);
192 /// assert!(b.is_empty());
193 /// ```
194 #[inline]
195 pub fn is_empty(&self) -> bool {
196 self.len == 0
197 }
198
199 /// Returns the number of bytes the `BytesMut` can hold without reallocating.
200 ///
201 /// # Examples
202 ///
203 /// ```
204 /// use bytes::BytesMut;
205 ///
206 /// let b = BytesMut::with_capacity(64);
207 /// assert_eq!(b.capacity(), 64);
208 /// ```
209 #[inline]
210 pub fn capacity(&self) -> usize {
211 self.cap
212 }
213
214 /// Converts `self` into an immutable `Bytes`.
215 ///
216 /// The conversion is zero cost and is used to indicate that the slice
217 /// referenced by the handle will no longer be mutated. Once the conversion
218 /// is done, the handle can be cloned and shared across threads.
219 ///
220 /// # Examples
221 ///
222 /// ```
223 /// use bytes::{BytesMut, BufMut};
224 /// use std::thread;
225 ///
226 /// let mut b = BytesMut::with_capacity(64);
227 /// b.put(&b"hello world"[..]);
228 /// let b1 = b.freeze();
229 /// let b2 = b1.clone();
230 ///
231 /// let th = thread::spawn(move || {
232 /// assert_eq!(&b1[..], b"hello world");
233 /// });
234 ///
235 /// assert_eq!(&b2[..], b"hello world");
236 /// th.join().unwrap();
237 /// ```
238 #[inline]
239 pub fn freeze(mut self) -> Bytes {
240 if self.kind() == KIND_VEC {
241 // Just re-use `Bytes` internal Vec vtable
242 unsafe {
243 let (off, _) = self.get_vec_pos();
244 let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
245 mem::forget(self);
246 let mut b: Bytes = vec.into();
247 b.advance(off);
248 b
249 }
250 } else {
251 debug_assert_eq!(self.kind(), KIND_ARC);
252
253 let ptr = self.ptr.as_ptr();
254 let len = self.len;
255 let data = AtomicPtr::new(self.data as _);
256 mem::forget(self);
257 unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
258 }
259 }
260
261 /// Splits the bytes into two at the given index.
262 ///
263 /// Afterwards `self` contains elements `[0, at)`, and the returned
264 /// `BytesMut` contains elements `[at, capacity)`.
265 ///
266 /// This is an `O(1)` operation that just increases the reference count
267 /// and sets a few indices.
268 ///
269 /// # Examples
270 ///
271 /// ```
272 /// use bytes::BytesMut;
273 ///
274 /// let mut a = BytesMut::from(&b"hello world"[..]);
275 /// let mut b = a.split_off(5);
276 ///
277 /// a[0] = b'j';
278 /// b[0] = b'!';
279 ///
280 /// assert_eq!(&a[..], b"jello");
281 /// assert_eq!(&b[..], b"!world");
282 /// ```
283 ///
284 /// # Panics
285 ///
286 /// Panics if `at > capacity`.
287 #[must_use = "consider BytesMut::truncate if you don't need the other half"]
288 pub fn split_off(&mut self, at: usize) -> BytesMut {
289 assert!(
290 at <= self.capacity(),
291 "split_off out of bounds: {:?} <= {:?}",
292 at,
293 self.capacity(),
294 );
295 unsafe {
296 let mut other = self.shallow_clone();
297 other.set_start(at);
298 self.set_end(at);
299 other
300 }
301 }
302
303 /// Removes the bytes from the current view, returning them in a new
304 /// `BytesMut` handle.
305 ///
306 /// Afterwards, `self` will be empty, but will retain any additional
307 /// capacity that it had before the operation. This is identical to
308 /// `self.split_to(self.len())`.
309 ///
310 /// This is an `O(1)` operation that just increases the reference count and
311 /// sets a few indices.
312 ///
313 /// # Examples
314 ///
315 /// ```
316 /// use bytes::{BytesMut, BufMut};
317 ///
318 /// let mut buf = BytesMut::with_capacity(1024);
319 /// buf.put(&b"hello world"[..]);
320 ///
321 /// let other = buf.split();
322 ///
323 /// assert!(buf.is_empty());
324 /// assert_eq!(1013, buf.capacity());
325 ///
326 /// assert_eq!(other, b"hello world"[..]);
327 /// ```
328 #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"]
329 pub fn split(&mut self) -> BytesMut {
330 let len = self.len();
331 self.split_to(len)
332 }
333
334 /// Splits the buffer into two at the given index.
335 ///
336 /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
337 /// contains elements `[0, at)`.
338 ///
339 /// This is an `O(1)` operation that just increases the reference count and
340 /// sets a few indices.
341 ///
342 /// # Examples
343 ///
344 /// ```
345 /// use bytes::BytesMut;
346 ///
347 /// let mut a = BytesMut::from(&b"hello world"[..]);
348 /// let mut b = a.split_to(5);
349 ///
350 /// a[0] = b'!';
351 /// b[0] = b'j';
352 ///
353 /// assert_eq!(&a[..], b"!world");
354 /// assert_eq!(&b[..], b"jello");
355 /// ```
356 ///
357 /// # Panics
358 ///
359 /// Panics if `at > len`.
360 #[must_use = "consider BytesMut::advance if you don't need the other half"]
361 pub fn split_to(&mut self, at: usize) -> BytesMut {
362 assert!(
363 at <= self.len(),
364 "split_to out of bounds: {:?} <= {:?}",
365 at,
366 self.len(),
367 );
368
369 unsafe {
370 let mut other = self.shallow_clone();
371 other.set_end(at);
372 self.set_start(at);
373 other
374 }
375 }
376
377 /// Shortens the buffer, keeping the first `len` bytes and dropping the
378 /// rest.
379 ///
380 /// If `len` is greater than the buffer's current length, this has no
381 /// effect.
382 ///
383 /// The [`split_off`] method can emulate `truncate`, but this causes the
384 /// excess bytes to be returned instead of dropped.
385 ///
386 /// # Examples
387 ///
388 /// ```
389 /// use bytes::BytesMut;
390 ///
391 /// let mut buf = BytesMut::from(&b"hello world"[..]);
392 /// buf.truncate(5);
393 /// assert_eq!(buf, b"hello"[..]);
394 /// ```
395 ///
396 /// [`split_off`]: #method.split_off
397 pub fn truncate(&mut self, len: usize) {
398 if len <= self.len() {
399 unsafe {
400 self.set_len(len);
401 }
402 }
403 }
404
405 /// Clears the buffer, removing all data.
406 ///
407 /// # Examples
408 ///
409 /// ```
410 /// use bytes::BytesMut;
411 ///
412 /// let mut buf = BytesMut::from(&b"hello world"[..]);
413 /// buf.clear();
414 /// assert!(buf.is_empty());
415 /// ```
416 pub fn clear(&mut self) {
417 self.truncate(0);
418 }
419
420 /// Resizes the buffer so that `len` is equal to `new_len`.
421 ///
422 /// If `new_len` is greater than `len`, the buffer is extended by the
423 /// difference with each additional byte set to `value`. If `new_len` is
424 /// less than `len`, the buffer is simply truncated.
425 ///
426 /// # Examples
427 ///
428 /// ```
429 /// use bytes::BytesMut;
430 ///
431 /// let mut buf = BytesMut::new();
432 ///
433 /// buf.resize(3, 0x1);
434 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
435 ///
436 /// buf.resize(2, 0x2);
437 /// assert_eq!(&buf[..], &[0x1, 0x1]);
438 ///
439 /// buf.resize(4, 0x3);
440 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
441 /// ```
442 pub fn resize(&mut self, new_len: usize, value: u8) {
443 let len = self.len();
444 if new_len > len {
445 let additional = new_len - len;
446 self.reserve(additional);
447 unsafe {
448 let dst = self.chunk_mut().as_mut_ptr();
449 ptr::write_bytes(dst, value, additional);
450 self.set_len(new_len);
451 }
452 } else {
453 self.truncate(new_len);
454 }
455 }
456
457 /// Sets the length of the buffer.
458 ///
459 /// This will explicitly set the size of the buffer without actually
460 /// modifying the data, so it is up to the caller to ensure that the data
461 /// has been initialized.
462 ///
463 /// # Examples
464 ///
465 /// ```
466 /// use bytes::BytesMut;
467 ///
468 /// let mut b = BytesMut::from(&b"hello world"[..]);
469 ///
470 /// unsafe {
471 /// b.set_len(5);
472 /// }
473 ///
474 /// assert_eq!(&b[..], b"hello");
475 ///
476 /// unsafe {
477 /// b.set_len(11);
478 /// }
479 ///
480 /// assert_eq!(&b[..], b"hello world");
481 /// ```
482 #[inline]
483 pub unsafe fn set_len(&mut self, len: usize) {
484 debug_assert!(len <= self.cap, "set_len out of bounds");
485 self.len = len;
486 }
487
488 /// Reserves capacity for at least `additional` more bytes to be inserted
489 /// into the given `BytesMut`.
490 ///
491 /// More than `additional` bytes may be reserved in order to avoid frequent
492 /// reallocations. A call to `reserve` may result in an allocation.
493 ///
494 /// Before allocating new buffer space, the function will attempt to reclaim
495 /// space in the existing buffer. If the current handle references a small
496 /// view in the original buffer and all other handles have been dropped,
497 /// and the requested capacity is less than or equal to the existing
498 /// buffer's capacity, then the current view will be copied to the front of
499 /// the buffer and the handle will take ownership of the full buffer.
500 ///
501 /// # Examples
502 ///
503 /// In the following example, a new buffer is allocated.
504 ///
505 /// ```
506 /// use bytes::BytesMut;
507 ///
508 /// let mut buf = BytesMut::from(&b"hello"[..]);
509 /// buf.reserve(64);
510 /// assert!(buf.capacity() >= 69);
511 /// ```
512 ///
513 /// In the following example, the existing buffer is reclaimed.
514 ///
515 /// ```
516 /// use bytes::{BytesMut, BufMut};
517 ///
518 /// let mut buf = BytesMut::with_capacity(128);
519 /// buf.put(&[0; 64][..]);
520 ///
521 /// let ptr = buf.as_ptr();
522 /// let other = buf.split();
523 ///
524 /// assert!(buf.is_empty());
525 /// assert_eq!(buf.capacity(), 64);
526 ///
527 /// drop(other);
528 /// buf.reserve(128);
529 ///
530 /// assert_eq!(buf.capacity(), 128);
531 /// assert_eq!(buf.as_ptr(), ptr);
532 /// ```
533 ///
534 /// # Panics
535 ///
536 /// Panics if the new capacity overflows `usize`.
537 #[inline]
538 pub fn reserve(&mut self, additional: usize) {
539 let len = self.len();
540 let rem = self.capacity() - len;
541
542 if additional <= rem {
543 // The handle can already store at least `additional` more bytes, so
544 // there is no further work needed to be done.
545 return;
546 }
547
548 self.reserve_inner(additional);
549 }
550
551 // In separate function to allow the short-circuits in `reserve` to
552 // be inline-able. Significant helps performance.
553 fn reserve_inner(&mut self, additional: usize) {
554 let len = self.len();
555 let kind = self.kind();
556
557 if kind == KIND_VEC {
558 // If there's enough free space before the start of the buffer, then
559 // just copy the data backwards and reuse the already-allocated
560 // space.
561 //
562 // Otherwise, since backed by a vector, use `Vec::reserve`
563 unsafe {
564 let (off, prev) = self.get_vec_pos();
565
566 // Only reuse space if we can satisfy the requested additional space.
567 if self.capacity() - self.len() + off >= additional {
568 // There's space - reuse it
569 //
570 // Just move the pointer back to the start after copying
571 // data back.
572 let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
573 ptr::copy(self.ptr.as_ptr(), base_ptr, self.len);
574 self.ptr = vptr(base_ptr);
575 self.set_vec_pos(0, prev);
576
577 // Length stays constant, but since we moved backwards we
578 // can gain capacity back.
579 self.cap += off;
580 } else {
581 // No space - allocate more
582 let mut v =
583 ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
584 v.reserve(additional);
585
586 // Update the info
587 self.ptr = vptr(v.as_mut_ptr().offset(off as isize));
588 self.len = v.len() - off;
589 self.cap = v.capacity() - off;
590 }
591
592 return;
593 }
594 }
595
596 debug_assert_eq!(kind, KIND_ARC);
597 let shared: *mut Shared = self.data as _;
598
599 // Reserving involves abandoning the currently shared buffer and
600 // allocating a new vector with the requested capacity.
601 //
602 // Compute the new capacity
603 let mut new_cap = len.checked_add(additional).expect("overflow");
604
605 let original_capacity;
606 let original_capacity_repr;
607
608 unsafe {
609 original_capacity_repr = (*shared).original_capacity_repr;
610 original_capacity = original_capacity_from_repr(original_capacity_repr);
611
612 // First, try to reclaim the buffer. This is possible if the current
613 // handle is the only outstanding handle pointing to the buffer.
614 if (*shared).is_unique() {
615 // This is the only handle to the buffer. It can be reclaimed.
616 // However, before doing the work of copying data, check to make
617 // sure that the vector has enough capacity.
618 let v = &mut (*shared).vec;
619
620 if v.capacity() >= new_cap {
621 // The capacity is sufficient, reclaim the buffer
622 let ptr = v.as_mut_ptr();
623
624 ptr::copy(self.ptr.as_ptr(), ptr, len);
625
626 self.ptr = vptr(ptr);
627 self.cap = v.capacity();
628
629 return;
630 }
631
632 // The vector capacity is not sufficient. The reserve request is
633 // asking for more than the initial buffer capacity. Allocate more
634 // than requested if `new_cap` is not much bigger than the current
635 // capacity.
636 //
637 // There are some situations, using `reserve_exact` that the
638 // buffer capacity could be below `original_capacity`, so do a
639 // check.
640 let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
641
642 new_cap = cmp::max(cmp::max(double, new_cap), original_capacity);
643 } else {
644 new_cap = cmp::max(new_cap, original_capacity);
645 }
646 }
647
648 // Create a new vector to store the data
649 let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
650
651 // Copy the bytes
652 v.extend_from_slice(self.as_ref());
653
654 // Release the shared handle. This must be done *after* the bytes are
655 // copied.
656 unsafe { release_shared(shared) };
657
658 // Update self
659 let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
660 self.data = data as _;
661 self.ptr = vptr(v.as_mut_ptr());
662 self.len = v.len();
663 self.cap = v.capacity();
664 }
665
666 /// Appends given bytes to this `BytesMut`.
667 ///
668 /// If this `BytesMut` object does not have enough capacity, it is resized
669 /// first.
670 ///
671 /// # Examples
672 ///
673 /// ```
674 /// use bytes::BytesMut;
675 ///
676 /// let mut buf = BytesMut::with_capacity(0);
677 /// buf.extend_from_slice(b"aaabbb");
678 /// buf.extend_from_slice(b"cccddd");
679 ///
680 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
681 /// ```
682 pub fn extend_from_slice(&mut self, extend: &[u8]) {
683 let cnt = extend.len();
684 self.reserve(cnt);
685
686 unsafe {
687 let dst = self.uninit_slice();
688 // Reserved above
689 debug_assert!(dst.len() >= cnt);
690
691 ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt);
692 }
693
694 unsafe {
695 self.advance_mut(cnt);
696 }
697 }
698
699 /// Absorbs a `BytesMut` that was previously split off.
700 ///
701 /// If the two `BytesMut` objects were previously contiguous, i.e., if
702 /// `other` was created by calling `split_off` on this `BytesMut`, then
703 /// this is an `O(1)` operation that just decreases a reference
704 /// count and sets a few indices. Otherwise this method degenerates to
705 /// `self.extend_from_slice(other.as_ref())`.
706 ///
707 /// # Examples
708 ///
709 /// ```
710 /// use bytes::BytesMut;
711 ///
712 /// let mut buf = BytesMut::with_capacity(64);
713 /// buf.extend_from_slice(b"aaabbbcccddd");
714 ///
715 /// let split = buf.split_off(6);
716 /// assert_eq!(b"aaabbb", &buf[..]);
717 /// assert_eq!(b"cccddd", &split[..]);
718 ///
719 /// buf.unsplit(split);
720 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
721 /// ```
722 pub fn unsplit(&mut self, other: BytesMut) {
723 if self.is_empty() {
724 *self = other;
725 return;
726 }
727
728 if let Err(other) = self.try_unsplit(other) {
729 self.extend_from_slice(other.as_ref());
730 }
731 }
732
733 // private
734
735 // For now, use a `Vec` to manage the memory for us, but we may want to
736 // change that in the future to some alternate allocator strategy.
737 //
738 // Thus, we don't expose an easy way to construct from a `Vec` since an
739 // internal change could make a simple pattern (`BytesMut::from(vec)`)
740 // suddenly a lot more expensive.
741 #[inline]
742 pub(crate) fn from_vec(mut vec: Vec<u8>) -> BytesMut {
743 let ptr = vptr(vec.as_mut_ptr());
744 let len = vec.len();
745 let cap = vec.capacity();
746 mem::forget(vec);
747
748 let original_capacity_repr = original_capacity_to_repr(cap);
749 let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
750
751 BytesMut {
752 ptr,
753 len,
754 cap,
755 data: data as *mut _,
756 }
757 }
758
759 #[inline]
760 fn as_slice(&self) -> &[u8] {
761 unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
762 }
763
764 #[inline]
765 fn as_slice_mut(&mut self) -> &mut [u8] {
766 unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
767 }
768
769 unsafe fn set_start(&mut self, start: usize) {
770 // Setting the start to 0 is a no-op, so return early if this is the
771 // case.
772 if start == 0 {
773 return;
774 }
775
776 debug_assert!(start <= self.cap, "internal: set_start out of bounds");
777
778 let kind = self.kind();
779
780 if kind == KIND_VEC {
781 // Setting the start when in vec representation is a little more
782 // complicated. First, we have to track how far ahead the
783 // "start" of the byte buffer from the beginning of the vec. We
784 // also have to ensure that we don't exceed the maximum shift.
785 let (mut pos, prev) = self.get_vec_pos();
786 pos += start;
787
788 if pos <= MAX_VEC_POS {
789 self.set_vec_pos(pos, prev);
790 } else {
791 // The repr must be upgraded to ARC. This will never happen
792 // on 64 bit systems and will only happen on 32 bit systems
793 // when shifting past 134,217,727 bytes. As such, we don't
794 // worry too much about performance here.
795 self.promote_to_shared(/*ref_count = */ 1);
796 }
797 }
798
799 // Updating the start of the view is setting `ptr` to point to the
800 // new start and updating the `len` field to reflect the new length
801 // of the view.
802 self.ptr = vptr(self.ptr.as_ptr().offset(start as isize));
803
804 if self.len >= start {
805 self.len -= start;
806 } else {
807 self.len = 0;
808 }
809
810 self.cap -= start;
811 }
812
813 unsafe fn set_end(&mut self, end: usize) {
814 debug_assert_eq!(self.kind(), KIND_ARC);
815 assert!(end <= self.cap, "set_end out of bounds");
816
817 self.cap = end;
818 self.len = cmp::min(self.len, end);
819 }
820
821 fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
822 if other.capacity() == 0 {
823 return Ok(());
824 }
825
826 let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) };
827 if ptr == other.ptr.as_ptr()
828 && self.kind() == KIND_ARC
829 && other.kind() == KIND_ARC
830 && self.data == other.data
831 {
832 // Contiguous blocks, just combine directly
833 self.len += other.len;
834 self.cap += other.cap;
835 Ok(())
836 } else {
837 Err(other)
838 }
839 }
840
841 #[inline]
842 fn kind(&self) -> usize {
843 self.data as usize & KIND_MASK
844 }
845
846 unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
847 debug_assert_eq!(self.kind(), KIND_VEC);
848 debug_assert!(ref_cnt == 1 || ref_cnt == 2);
849
850 let original_capacity_repr =
851 (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
852
853 // The vec offset cannot be concurrently mutated, so there
854 // should be no danger reading it.
855 let off = (self.data as usize) >> VEC_POS_OFFSET;
856
857 // First, allocate a new `Shared` instance containing the
858 // `Vec` fields. It's important to note that `ptr`, `len`,
859 // and `cap` cannot be mutated without having `&mut self`.
860 // This means that these fields will not be concurrently
861 // updated and since the buffer hasn't been promoted to an
862 // `Arc`, those three fields still are the components of the
863 // vector.
864 let shared = Box::new(Shared {
865 vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
866 original_capacity_repr,
867 ref_count: AtomicUsize::new(ref_cnt),
868 });
869
870 let shared = Box::into_raw(shared);
871
872 // The pointer should be aligned, so this assert should
873 // always succeed.
874 debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
875
876 self.data = shared as _;
877 }
878
879 /// Makes an exact shallow clone of `self`.
880 ///
881 /// The kind of `self` doesn't matter, but this is unsafe
882 /// because the clone will have the same offsets. You must
883 /// be sure the returned value to the user doesn't allow
884 /// two views into the same range.
885 #[inline]
886 unsafe fn shallow_clone(&mut self) -> BytesMut {
887 if self.kind() == KIND_ARC {
888 increment_shared(self.data);
889 ptr::read(self)
890 } else {
891 self.promote_to_shared(/*ref_count = */ 2);
892 ptr::read(self)
893 }
894 }
895
896 #[inline]
897 unsafe fn get_vec_pos(&mut self) -> (usize, usize) {
898 debug_assert_eq!(self.kind(), KIND_VEC);
899
900 let prev = self.data as usize;
901 (prev >> VEC_POS_OFFSET, prev)
902 }
903
904 #[inline]
905 unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) {
906 debug_assert_eq!(self.kind(), KIND_VEC);
907 debug_assert!(pos <= MAX_VEC_POS);
908
909 self.data = ((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)) as *mut _;
910 }
911
912 #[inline]
913 fn uninit_slice(&mut self) -> &mut UninitSlice {
914 unsafe {
915 let ptr = self.ptr.as_ptr().offset(self.len as isize);
916 let len = self.cap - self.len;
917
918 UninitSlice::from_raw_parts_mut(ptr, len)
919 }
920 }
921}
922
923impl Drop for BytesMut {
924 fn drop(&mut self) {
925 let kind = self.kind();
926
927 if kind == KIND_VEC {
928 unsafe {
929 let (off, _) = self.get_vec_pos();
930
931 // Vector storage, free the vector
932 let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
933 }
934 } else if kind == KIND_ARC {
935 unsafe { release_shared(self.data as _) };
936 }
937 }
938}
939
940impl Buf for BytesMut {
941 #[inline]
942 fn remaining(&self) -> usize {
943 self.len()
944 }
945
946 #[inline]
947 fn chunk(&self) -> &[u8] {
948 self.as_slice()
949 }
950
951 #[inline]
952 fn advance(&mut self, cnt: usize) {
953 assert!(
954 cnt <= self.remaining(),
955 "cannot advance past `remaining`: {:?} <= {:?}",
956 cnt,
957 self.remaining(),
958 );
959 unsafe {
960 self.set_start(cnt);
961 }
962 }
963
964 fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
965 self.split_to(len).freeze()
966 }
967}
968
969unsafe impl BufMut for BytesMut {
970 #[inline]
971 fn remaining_mut(&self) -> usize {
972 usize::MAX - self.len()
973 }
974
975 #[inline]
976 unsafe fn advance_mut(&mut self, cnt: usize) {
977 let new_len = self.len() + cnt;
978 assert!(
979 new_len <= self.cap,
980 "new_len = {}; capacity = {}",
981 new_len,
982 self.cap
983 );
984 self.len = new_len;
985 }
986
987 #[inline]
988 fn chunk_mut(&mut self) -> &mut UninitSlice {
989 if self.capacity() == self.len() {
990 self.reserve(64);
991 }
992 self.uninit_slice()
993 }
994
995 // Specialize these methods so they can skip checking `remaining_mut`
996 // and `advance_mut`.
997
998 fn put<T: crate::Buf>(&mut self, mut src: T)
999 where
1000 Self: Sized,
1001 {
1002 while src.has_remaining() {
1003 let s = src.chunk();
1004 let l = s.len();
1005 self.extend_from_slice(s);
1006 src.advance(l);
1007 }
1008 }
1009
1010 fn put_slice(&mut self, src: &[u8]) {
1011 self.extend_from_slice(src);
1012 }
1013}
1014
1015impl AsRef<[u8]> for BytesMut {
1016 #[inline]
1017 fn as_ref(&self) -> &[u8] {
1018 self.as_slice()
1019 }
1020}
1021
1022impl Deref for BytesMut {
1023 type Target = [u8];
1024
1025 #[inline]
1026 fn deref(&self) -> &[u8] {
1027 self.as_ref()
1028 }
1029}
1030
1031impl AsMut<[u8]> for BytesMut {
1032 #[inline]
1033 fn as_mut(&mut self) -> &mut [u8] {
1034 self.as_slice_mut()
1035 }
1036}
1037
1038impl DerefMut for BytesMut {
1039 #[inline]
1040 fn deref_mut(&mut self) -> &mut [u8] {
1041 self.as_mut()
1042 }
1043}
1044
1045impl<'a> From<&'a [u8]> for BytesMut {
1046 fn from(src: &'a [u8]) -> BytesMut {
1047 BytesMut::from_vec(src.to_vec())
1048 }
1049}
1050
1051impl<'a> From<&'a str> for BytesMut {
1052 fn from(src: &'a str) -> BytesMut {
1053 BytesMut::from(src.as_bytes())
1054 }
1055}
1056
1057impl From<BytesMut> for Bytes {
1058 fn from(src: BytesMut) -> Bytes {
1059 src.freeze()
1060 }
1061}
1062
1063impl PartialEq for BytesMut {
1064 fn eq(&self, other: &BytesMut) -> bool {
1065 self.as_slice() == other.as_slice()
1066 }
1067}
1068
1069impl PartialOrd for BytesMut {
1070 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1071 self.as_slice().partial_cmp(other.as_slice())
1072 }
1073}
1074
1075impl Ord for BytesMut {
1076 fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
1077 self.as_slice().cmp(other.as_slice())
1078 }
1079}
1080
1081impl Eq for BytesMut {}
1082
1083impl Default for BytesMut {
1084 #[inline]
1085 fn default() -> BytesMut {
1086 BytesMut::new()
1087 }
1088}
1089
1090impl hash::Hash for BytesMut {
1091 fn hash<H>(&self, state: &mut H)
1092 where
1093 H: hash::Hasher,
1094 {
1095 let s: &[u8] = self.as_ref();
1096 s.hash(state);
1097 }
1098}
1099
1100impl Borrow<[u8]> for BytesMut {
1101 fn borrow(&self) -> &[u8] {
1102 self.as_ref()
1103 }
1104}
1105
1106impl BorrowMut<[u8]> for BytesMut {
1107 fn borrow_mut(&mut self) -> &mut [u8] {
1108 self.as_mut()
1109 }
1110}
1111
1112impl fmt::Write for BytesMut {
1113 #[inline]
1114 fn write_str(&mut self, s: &str) -> fmt::Result {
1115 if self.remaining_mut() >= s.len() {
1116 self.put_slice(s.as_bytes());
1117 Ok(())
1118 } else {
1119 Err(fmt::Error)
1120 }
1121 }
1122
1123 #[inline]
1124 fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1125 fmt::write(self, args)
1126 }
1127}
1128
1129impl Clone for BytesMut {
1130 fn clone(&self) -> BytesMut {
1131 BytesMut::from(&self[..])
1132 }
1133}
1134
1135impl IntoIterator for BytesMut {
1136 type Item = u8;
1137 type IntoIter = IntoIter<BytesMut>;
1138
1139 fn into_iter(self) -> Self::IntoIter {
1140 IntoIter::new(self)
1141 }
1142}
1143
1144impl<'a> IntoIterator for &'a BytesMut {
1145 type Item = &'a u8;
1146 type IntoIter = core::slice::Iter<'a, u8>;
1147
1148 fn into_iter(self) -> Self::IntoIter {
1149 self.as_ref().into_iter()
1150 }
1151}
1152
1153impl Extend<u8> for BytesMut {
1154 fn extend<T>(&mut self, iter: T)
1155 where
1156 T: IntoIterator<Item = u8>,
1157 {
1158 let iter = iter.into_iter();
1159
1160 let (lower, _) = iter.size_hint();
1161 self.reserve(lower);
1162
1163 // TODO: optimize
1164 // 1. If self.kind() == KIND_VEC, use Vec::extend
1165 // 2. Make `reserve` inline-able
1166 for b in iter {
1167 self.reserve(1);
1168 self.put_u8(b);
1169 }
1170 }
1171}
1172
1173impl<'a> Extend<&'a u8> for BytesMut {
1174 fn extend<T>(&mut self, iter: T)
1175 where
1176 T: IntoIterator<Item = &'a u8>,
1177 {
1178 self.extend(iter.into_iter().map(|b| *b))
1179 }
1180}
1181
1182impl FromIterator<u8> for BytesMut {
1183 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1184 BytesMut::from_vec(Vec::from_iter(into_iter))
1185 }
1186}
1187
1188impl<'a> FromIterator<&'a u8> for BytesMut {
1189 fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1190 BytesMut::from_iter(into_iter.into_iter().map(|b| *b))
1191 }
1192}
1193
1194/*
1195 *
1196 * ===== Inner =====
1197 *
1198 */
1199
1200unsafe fn increment_shared(ptr: *mut Shared) {
1201 let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
1202
1203 if old_size > isize::MAX as usize {
1204 crate::abort();
1205 }
1206}
1207
1208unsafe fn release_shared(ptr: *mut Shared) {
1209 // `Shared` storage... follow the drop steps from Arc.
1210 if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
1211 return;
1212 }
1213
1214 // This fence is needed to prevent reordering of use of the data and
1215 // deletion of the data. Because it is marked `Release`, the decreasing
1216 // of the reference count synchronizes with this `Acquire` fence. This
1217 // means that use of the data happens before decreasing the reference
1218 // count, which happens before this fence, which happens before the
1219 // deletion of the data.
1220 //
1221 // As explained in the [Boost documentation][1],
1222 //
1223 // > It is important to enforce any possible access to the object in one
1224 // > thread (through an existing reference) to *happen before* deleting
1225 // > the object in a different thread. This is achieved by a "release"
1226 // > operation after dropping a reference (any access to the object
1227 // > through this reference must obviously happened before), and an
1228 // > "acquire" operation before deleting the object.
1229 //
1230 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1231 atomic::fence(Ordering::Acquire);
1232
1233 // Drop the data
1234 Box::from_raw(ptr);
1235}
1236
1237impl Shared {
1238 fn is_unique(&self) -> bool {
1239 // The goal is to check if the current handle is the only handle
1240 // that currently has access to the buffer. This is done by
1241 // checking if the `ref_count` is currently 1.
1242 //
1243 // The `Acquire` ordering synchronizes with the `Release` as
1244 // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1245 // operation guarantees that any mutations done in other threads
1246 // are ordered before the `ref_count` is decremented. As such,
1247 // this `Acquire` will guarantee that those mutations are
1248 // visible to the current thread.
1249 self.ref_count.load(Ordering::Acquire) == 1
1250 }
1251}
1252
1253#[inline]
1254fn original_capacity_to_repr(cap: usize) -> usize {
1255 let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
1256 cmp::min(
1257 width,
1258 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
1259 )
1260}
1261
1262fn original_capacity_from_repr(repr: usize) -> usize {
1263 if repr == 0 {
1264 return 0;
1265 }
1266
1267 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
1268}
1269
1270/*
1271#[test]
1272fn test_original_capacity_to_repr() {
1273 assert_eq!(original_capacity_to_repr(0), 0);
1274
1275 let max_width = 32;
1276
1277 for width in 1..(max_width + 1) {
1278 let cap = 1 << width - 1;
1279
1280 let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
1281 0
1282 } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
1283 width - MIN_ORIGINAL_CAPACITY_WIDTH
1284 } else {
1285 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
1286 };
1287
1288 assert_eq!(original_capacity_to_repr(cap), expected);
1289
1290 if width > 1 {
1291 assert_eq!(original_capacity_to_repr(cap + 1), expected);
1292 }
1293
1294 // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
1295 if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
1296 assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
1297 assert_eq!(original_capacity_to_repr(cap + 76), expected);
1298 } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
1299 assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
1300 assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
1301 }
1302 }
1303}
1304
1305#[test]
1306fn test_original_capacity_from_repr() {
1307 assert_eq!(0, original_capacity_from_repr(0));
1308
1309 let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
1310
1311 assert_eq!(min_cap, original_capacity_from_repr(1));
1312 assert_eq!(min_cap * 2, original_capacity_from_repr(2));
1313 assert_eq!(min_cap * 4, original_capacity_from_repr(3));
1314 assert_eq!(min_cap * 8, original_capacity_from_repr(4));
1315 assert_eq!(min_cap * 16, original_capacity_from_repr(5));
1316 assert_eq!(min_cap * 32, original_capacity_from_repr(6));
1317 assert_eq!(min_cap * 64, original_capacity_from_repr(7));
1318}
1319*/
1320
1321unsafe impl Send for BytesMut {}
1322unsafe impl Sync for BytesMut {}
1323
1324/*
1325 *
1326 * ===== PartialEq / PartialOrd =====
1327 *
1328 */
1329
1330impl PartialEq<[u8]> for BytesMut {
1331 fn eq(&self, other: &[u8]) -> bool {
1332 &**self == other
1333 }
1334}
1335
1336impl PartialOrd<[u8]> for BytesMut {
1337 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
1338 (**self).partial_cmp(other)
1339 }
1340}
1341
1342impl PartialEq<BytesMut> for [u8] {
1343 fn eq(&self, other: &BytesMut) -> bool {
1344 *other == *self
1345 }
1346}
1347
1348impl PartialOrd<BytesMut> for [u8] {
1349 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1350 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1351 }
1352}
1353
1354impl PartialEq<str> for BytesMut {
1355 fn eq(&self, other: &str) -> bool {
1356 &**self == other.as_bytes()
1357 }
1358}
1359
1360impl PartialOrd<str> for BytesMut {
1361 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
1362 (**self).partial_cmp(other.as_bytes())
1363 }
1364}
1365
1366impl PartialEq<BytesMut> for str {
1367 fn eq(&self, other: &BytesMut) -> bool {
1368 *other == *self
1369 }
1370}
1371
1372impl PartialOrd<BytesMut> for str {
1373 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1374 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1375 }
1376}
1377
1378impl PartialEq<Vec<u8>> for BytesMut {
1379 fn eq(&self, other: &Vec<u8>) -> bool {
1380 *self == &other[..]
1381 }
1382}
1383
1384impl PartialOrd<Vec<u8>> for BytesMut {
1385 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
1386 (**self).partial_cmp(&other[..])
1387 }
1388}
1389
1390impl PartialEq<BytesMut> for Vec<u8> {
1391 fn eq(&self, other: &BytesMut) -> bool {
1392 *other == *self
1393 }
1394}
1395
1396impl PartialOrd<BytesMut> for Vec<u8> {
1397 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1398 other.partial_cmp(self)
1399 }
1400}
1401
1402impl PartialEq<String> for BytesMut {
1403 fn eq(&self, other: &String) -> bool {
1404 *self == &other[..]
1405 }
1406}
1407
1408impl PartialOrd<String> for BytesMut {
1409 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
1410 (**self).partial_cmp(other.as_bytes())
1411 }
1412}
1413
1414impl PartialEq<BytesMut> for String {
1415 fn eq(&self, other: &BytesMut) -> bool {
1416 *other == *self
1417 }
1418}
1419
1420impl PartialOrd<BytesMut> for String {
1421 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1422 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1423 }
1424}
1425
1426impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
1427where
1428 BytesMut: PartialEq<T>,
1429{
1430 fn eq(&self, other: &&'a T) -> bool {
1431 *self == **other
1432 }
1433}
1434
1435impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
1436where
1437 BytesMut: PartialOrd<T>,
1438{
1439 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
1440 self.partial_cmp(*other)
1441 }
1442}
1443
1444impl PartialEq<BytesMut> for &[u8] {
1445 fn eq(&self, other: &BytesMut) -> bool {
1446 *other == *self
1447 }
1448}
1449
1450impl PartialOrd<BytesMut> for &[u8] {
1451 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1452 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1453 }
1454}
1455
1456impl PartialEq<BytesMut> for &str {
1457 fn eq(&self, other: &BytesMut) -> bool {
1458 *other == *self
1459 }
1460}
1461
1462impl PartialOrd<BytesMut> for &str {
1463 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1464 other.partial_cmp(self)
1465 }
1466}
1467
1468impl PartialEq<BytesMut> for Bytes {
1469 fn eq(&self, other: &BytesMut) -> bool {
1470 &other[..] == &self[..]
1471 }
1472}
1473
1474impl PartialEq<Bytes> for BytesMut {
1475 fn eq(&self, other: &Bytes) -> bool {
1476 &other[..] == &self[..]
1477 }
1478}
1479
1480#[inline]
1481fn vptr(ptr: *mut u8) -> NonNull<u8> {
1482 if cfg!(debug_assertions) {
1483 NonNull::new(ptr).expect("Vec pointer should be non-null")
1484 } else {
1485 unsafe { NonNull::new_unchecked(ptr) }
1486 }
1487}
1488
1489unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
1490 let ptr = ptr.offset(-(off as isize));
1491 len += off;
1492 cap += off;
1493
1494 Vec::from_raw_parts(ptr, len, cap)
1495}
1496
1497// ===== impl SharedVtable =====
1498
1499static SHARED_VTABLE: Vtable = Vtable {
1500 clone: shared_v_clone,
1501 drop: shared_v_drop,
1502};
1503
1504unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1505 let shared = data.load(Ordering::Relaxed) as *mut Shared;
1506 increment_shared(shared);
1507
1508 let data = AtomicPtr::new(shared as _);
1509 Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
1510}
1511
1512unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1513 data.with_mut(|shared| {
1514 release_shared(*shared as *mut Shared);
1515 });
1516}
1517
1518// compile-fails
1519
1520/// ```compile_fail
1521/// use bytes::BytesMut;
1522/// #[deny(unused_must_use)]
1523/// {
1524/// let mut b1 = BytesMut::from("hello world");
1525/// b1.split_to(6);
1526/// }
1527/// ```
1528fn _split_to_must_use() {}
1529
1530/// ```compile_fail
1531/// use bytes::BytesMut;
1532/// #[deny(unused_must_use)]
1533/// {
1534/// let mut b1 = BytesMut::from("hello world");
1535/// b1.split_off(6);
1536/// }
1537/// ```
1538fn _split_off_must_use() {}
1539
1540/// ```compile_fail
1541/// use bytes::BytesMut;
1542/// #[deny(unused_must_use)]
1543/// {
1544/// let mut b1 = BytesMut::from("hello world");
1545/// b1.split();
1546/// }
1547/// ```
1548fn _split_must_use() {}
1549
1550// fuzz tests
1551#[cfg(all(test, loom))]
1552mod fuzz {
1553 use loom::sync::Arc;
1554 use loom::thread;
1555
1556 use super::BytesMut;
1557 use crate::Bytes;
1558
1559 #[test]
1560 fn bytes_mut_cloning_frozen() {
1561 loom::model(|| {
1562 let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
1563 let addr = a.as_ptr() as usize;
1564
1565 // test the Bytes::clone is Sync by putting it in an Arc
1566 let a1 = Arc::new(a);
1567 let a2 = a1.clone();
1568
1569 let t1 = thread::spawn(move || {
1570 let b: Bytes = (*a1).clone();
1571 assert_eq!(b.as_ptr() as usize, addr);
1572 });
1573
1574 let t2 = thread::spawn(move || {
1575 let b: Bytes = (*a2).clone();
1576 assert_eq!(b.as_ptr() as usize, addr);
1577 });
1578
1579 t1.join().unwrap();
1580 t2.join().unwrap();
1581 });
1582 }
1583}