1use core::mem::{self, ManuallyDrop};
2use core::ops::{Deref, RangeBounds};
3use core::ptr::NonNull;
4use core::{cmp, fmt, hash, ptr, slice};
5
6use alloc::{
7 alloc::{dealloc, Layout},
8 borrow::Borrow,
9 boxed::Box,
10 string::String,
11 vec::Vec,
12};
13
14use crate::buf::IntoIter;
15#[allow(unused)]
16use crate::loom::sync::atomic::AtomicMut;
17use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
18use crate::{Buf, BytesMut};
19
20pub struct Bytes {
102 ptr: *const u8,
103 len: usize,
104 data: AtomicPtr<()>,
106 vtable: &'static Vtable,
107}
108
109pub(crate) struct Vtable {
110 pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
112 pub into_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
116 pub into_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
117 pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
119 pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
121}
122
123impl Bytes {
124 #[inline]
137 #[cfg(not(all(loom, test)))]
138 pub const fn new() -> Self {
139 const EMPTY: &[u8] = &[];
142 Bytes::from_static(EMPTY)
143 }
144
145 #[cfg(all(loom, test))]
147 pub fn new() -> Self {
148 const EMPTY: &[u8] = &[];
149 Bytes::from_static(EMPTY)
150 }
151
152 #[inline]
166 #[cfg(not(all(loom, test)))]
167 pub const fn from_static(bytes: &'static [u8]) -> Self {
168 Bytes {
169 ptr: bytes.as_ptr(),
170 len: bytes.len(),
171 data: AtomicPtr::new(ptr::null_mut()),
172 vtable: &STATIC_VTABLE,
173 }
174 }
175
176 #[cfg(all(loom, test))]
178 pub fn from_static(bytes: &'static [u8]) -> Self {
179 Bytes {
180 ptr: bytes.as_ptr(),
181 len: bytes.len(),
182 data: AtomicPtr::new(ptr::null_mut()),
183 vtable: &STATIC_VTABLE,
184 }
185 }
186
187 fn new_empty_with_ptr(ptr: *const u8) -> Self {
189 debug_assert!(!ptr.is_null());
190
191 let ptr = without_provenance(ptr as usize);
194
195 Bytes {
196 ptr,
197 len: 0,
198 data: AtomicPtr::new(ptr::null_mut()),
199 vtable: &STATIC_VTABLE,
200 }
201 }
202
203 pub fn from_owner<T>(owner: T) -> Self
251 where
252 T: AsRef<[u8]> + Send + 'static,
253 {
254 let owned = Box::into_raw(Box::new(Owned {
270 ref_cnt: AtomicUsize::new(1),
271 owner,
272 }));
273
274 let mut ret = Bytes {
275 ptr: NonNull::dangling().as_ptr(),
276 len: 0,
277 data: AtomicPtr::new(owned.cast()),
278 vtable: &Owned::<T>::VTABLE,
279 };
280
281 let buf = unsafe { &*owned }.owner.as_ref();
282 ret.ptr = buf.as_ptr();
283 ret.len = buf.len();
284
285 ret
286 }
287
288 #[inline]
299 pub const fn len(&self) -> usize {
300 self.len
301 }
302
303 #[inline]
314 pub const fn is_empty(&self) -> bool {
315 self.len == 0
316 }
317
318 pub fn is_unique(&self) -> bool {
339 unsafe { (self.vtable.is_unique)(&self.data) }
340 }
341
342 pub fn copy_from_slice(data: &[u8]) -> Self {
344 data.to_vec().into()
345 }
346
347 pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
370 use core::ops::Bound;
371
372 let len = self.len();
373
374 let begin = match range.start_bound() {
375 Bound::Included(&n) => n,
376 Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
377 Bound::Unbounded => 0,
378 };
379
380 let end = match range.end_bound() {
381 Bound::Included(&n) => n.checked_add(1).expect("out of range"),
382 Bound::Excluded(&n) => n,
383 Bound::Unbounded => len,
384 };
385
386 assert!(
387 begin <= end,
388 "range start must not be greater than end: {:?} <= {:?}",
389 begin,
390 end,
391 );
392 assert!(
393 end <= len,
394 "range end out of bounds: {:?} <= {:?}",
395 end,
396 len,
397 );
398
399 if end == begin {
400 return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(begin));
401 }
402
403 let mut ret = self.clone();
404
405 ret.len = end - begin;
406 ret.ptr = unsafe { ret.ptr.add(begin) };
407
408 ret
409 }
410
411 pub fn slice_ref(&self, subset: &[u8]) -> Self {
437 if subset.is_empty() {
440 return Bytes::new();
441 }
442
443 let bytes_p = self.as_ptr() as usize;
444 let bytes_len = self.len();
445
446 let sub_p = subset.as_ptr() as usize;
447 let sub_len = subset.len();
448
449 assert!(
450 sub_p >= bytes_p,
451 "subset pointer ({:p}) is smaller than self pointer ({:p})",
452 subset.as_ptr(),
453 self.as_ptr(),
454 );
455 assert!(
456 sub_p + sub_len <= bytes_p + bytes_len,
457 "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
458 self.as_ptr(),
459 bytes_len,
460 subset.as_ptr(),
461 sub_len,
462 );
463
464 let sub_offset = sub_p - bytes_p;
465
466 self.slice(sub_offset..(sub_offset + sub_len))
467 }
468
469 #[must_use = "consider Bytes::truncate if you don't need the other half"]
495 pub fn split_off(&mut self, at: usize) -> Self {
496 if at == self.len() {
497 return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(at));
498 }
499
500 if at == 0 {
501 return mem::replace(self, Bytes::new_empty_with_ptr(self.ptr));
502 }
503
504 assert!(
505 at <= self.len(),
506 "split_off out of bounds: {:?} <= {:?}",
507 at,
508 self.len(),
509 );
510
511 let mut ret = self.clone();
512
513 self.len = at;
514
515 unsafe { ret.inc_start(at) };
516
517 ret
518 }
519
520 #[must_use = "consider Bytes::advance if you don't need the other half"]
544 pub fn split_to(&mut self, at: usize) -> Self {
545 if at == self.len() {
546 let end_ptr = self.ptr.wrapping_add(at);
547 return mem::replace(self, Bytes::new_empty_with_ptr(end_ptr));
548 }
549
550 if at == 0 {
551 return Bytes::new_empty_with_ptr(self.ptr);
552 }
553
554 assert!(
555 at <= self.len(),
556 "split_to out of bounds: {:?} <= {:?}",
557 at,
558 self.len(),
559 );
560
561 let mut ret = self.clone();
562
563 unsafe { self.inc_start(at) };
564
565 ret.len = at;
566 ret
567 }
568
569 #[inline]
588 pub fn truncate(&mut self, len: usize) {
589 if len < self.len {
590 if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
594 || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
595 {
596 drop(self.split_off(len));
597 } else {
598 self.len = len;
599 }
600 }
601 }
602
603 #[inline]
615 pub fn clear(&mut self) {
616 self.truncate(0);
617 }
618
619 pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
638 if self.is_unique() {
639 Ok(self.into())
640 } else {
641 Err(self)
642 }
643 }
644
645 #[inline]
646 pub(crate) unsafe fn with_vtable(
647 ptr: *const u8,
648 len: usize,
649 data: AtomicPtr<()>,
650 vtable: &'static Vtable,
651 ) -> Bytes {
652 Bytes {
653 ptr,
654 len,
655 data,
656 vtable,
657 }
658 }
659
660 #[inline]
663 fn as_slice(&self) -> &[u8] {
664 unsafe { slice::from_raw_parts(self.ptr, self.len) }
665 }
666
667 #[inline]
668 unsafe fn inc_start(&mut self, by: usize) {
669 debug_assert!(self.len >= by, "internal: inc_start out of bounds");
671 self.len -= by;
672 self.ptr = self.ptr.add(by);
673 }
674}
675
676unsafe impl Send for Bytes {}
678unsafe impl Sync for Bytes {}
679
680impl Drop for Bytes {
681 #[inline]
682 fn drop(&mut self) {
683 unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
684 }
685}
686
687impl Clone for Bytes {
688 #[inline]
689 fn clone(&self) -> Bytes {
690 unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
691 }
692}
693
694impl Buf for Bytes {
695 #[inline]
696 fn remaining(&self) -> usize {
697 self.len()
698 }
699
700 #[inline]
701 fn chunk(&self) -> &[u8] {
702 self.as_slice()
703 }
704
705 #[inline]
706 fn advance(&mut self, cnt: usize) {
707 assert!(
708 cnt <= self.len(),
709 "cannot advance past `remaining`: {:?} <= {:?}",
710 cnt,
711 self.len(),
712 );
713
714 unsafe {
715 self.inc_start(cnt);
716 }
717 }
718
719 fn copy_to_bytes(&mut self, len: usize) -> Self {
720 self.split_to(len)
721 }
722}
723
724impl Deref for Bytes {
725 type Target = [u8];
726
727 #[inline]
728 fn deref(&self) -> &[u8] {
729 self.as_slice()
730 }
731}
732
733impl AsRef<[u8]> for Bytes {
734 #[inline]
735 fn as_ref(&self) -> &[u8] {
736 self.as_slice()
737 }
738}
739
740impl hash::Hash for Bytes {
741 fn hash<H>(&self, state: &mut H)
742 where
743 H: hash::Hasher,
744 {
745 self.as_slice().hash(state);
746 }
747}
748
749impl Borrow<[u8]> for Bytes {
750 fn borrow(&self) -> &[u8] {
751 self.as_slice()
752 }
753}
754
755impl IntoIterator for Bytes {
756 type Item = u8;
757 type IntoIter = IntoIter<Bytes>;
758
759 fn into_iter(self) -> Self::IntoIter {
760 IntoIter::new(self)
761 }
762}
763
764impl<'a> IntoIterator for &'a Bytes {
765 type Item = &'a u8;
766 type IntoIter = core::slice::Iter<'a, u8>;
767
768 fn into_iter(self) -> Self::IntoIter {
769 self.as_slice().iter()
770 }
771}
772
773impl FromIterator<u8> for Bytes {
774 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
775 Vec::from_iter(into_iter).into()
776 }
777}
778
779impl PartialEq for Bytes {
782 fn eq(&self, other: &Bytes) -> bool {
783 self.as_slice() == other.as_slice()
784 }
785}
786
787impl PartialOrd for Bytes {
788 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
789 Some(self.cmp(other))
790 }
791}
792
793impl Ord for Bytes {
794 fn cmp(&self, other: &Bytes) -> cmp::Ordering {
795 self.as_slice().cmp(other.as_slice())
796 }
797}
798
799impl Eq for Bytes {}
800
801impl PartialEq<[u8]> for Bytes {
802 fn eq(&self, other: &[u8]) -> bool {
803 self.as_slice() == other
804 }
805}
806
807impl PartialOrd<[u8]> for Bytes {
808 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
809 self.as_slice().partial_cmp(other)
810 }
811}
812
813impl PartialEq<Bytes> for [u8] {
814 fn eq(&self, other: &Bytes) -> bool {
815 *other == *self
816 }
817}
818
819impl PartialOrd<Bytes> for [u8] {
820 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
821 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
822 }
823}
824
825impl PartialEq<str> for Bytes {
826 fn eq(&self, other: &str) -> bool {
827 self.as_slice() == other.as_bytes()
828 }
829}
830
831impl PartialOrd<str> for Bytes {
832 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
833 self.as_slice().partial_cmp(other.as_bytes())
834 }
835}
836
837impl PartialEq<Bytes> for str {
838 fn eq(&self, other: &Bytes) -> bool {
839 *other == *self
840 }
841}
842
843impl PartialOrd<Bytes> for str {
844 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
845 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
846 }
847}
848
849impl PartialEq<Vec<u8>> for Bytes {
850 fn eq(&self, other: &Vec<u8>) -> bool {
851 *self == other[..]
852 }
853}
854
855impl PartialOrd<Vec<u8>> for Bytes {
856 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
857 self.as_slice().partial_cmp(&other[..])
858 }
859}
860
861impl PartialEq<Bytes> for Vec<u8> {
862 fn eq(&self, other: &Bytes) -> bool {
863 *other == *self
864 }
865}
866
867impl PartialOrd<Bytes> for Vec<u8> {
868 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
869 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
870 }
871}
872
873impl PartialEq<String> for Bytes {
874 fn eq(&self, other: &String) -> bool {
875 *self == other[..]
876 }
877}
878
879impl PartialOrd<String> for Bytes {
880 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
881 self.as_slice().partial_cmp(other.as_bytes())
882 }
883}
884
885impl PartialEq<Bytes> for String {
886 fn eq(&self, other: &Bytes) -> bool {
887 *other == *self
888 }
889}
890
891impl PartialOrd<Bytes> for String {
892 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
893 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
894 }
895}
896
897impl PartialEq<Bytes> for &[u8] {
898 fn eq(&self, other: &Bytes) -> bool {
899 *other == *self
900 }
901}
902
903impl PartialOrd<Bytes> for &[u8] {
904 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
905 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
906 }
907}
908
909impl PartialEq<Bytes> for &str {
910 fn eq(&self, other: &Bytes) -> bool {
911 *other == *self
912 }
913}
914
915impl PartialOrd<Bytes> for &str {
916 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
917 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
918 }
919}
920
921impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
922where
923 Bytes: PartialEq<T>,
924{
925 fn eq(&self, other: &&'a T) -> bool {
926 *self == **other
927 }
928}
929
930impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
931where
932 Bytes: PartialOrd<T>,
933{
934 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
935 self.partial_cmp(&**other)
936 }
937}
938
939impl Default for Bytes {
942 #[inline]
943 fn default() -> Bytes {
944 Bytes::new()
945 }
946}
947
948impl From<&'static [u8]> for Bytes {
949 fn from(slice: &'static [u8]) -> Bytes {
950 Bytes::from_static(slice)
951 }
952}
953
954impl From<&'static str> for Bytes {
955 fn from(slice: &'static str) -> Bytes {
956 Bytes::from_static(slice.as_bytes())
957 }
958}
959
960impl From<Vec<u8>> for Bytes {
961 fn from(vec: Vec<u8>) -> Bytes {
962 let mut vec = ManuallyDrop::new(vec);
963 let ptr = vec.as_mut_ptr();
964 let len = vec.len();
965 let cap = vec.capacity();
966
967 if len == cap {
969 let vec = ManuallyDrop::into_inner(vec);
970 return Bytes::from(vec.into_boxed_slice());
971 }
972
973 let shared = Box::new(Shared {
974 buf: ptr,
975 cap,
976 ref_cnt: AtomicUsize::new(1),
977 });
978
979 let shared = Box::into_raw(shared);
980 debug_assert!(
983 0 == (shared as usize & KIND_MASK),
984 "internal: Box<Shared> should have an aligned pointer",
985 );
986 Bytes {
987 ptr,
988 len,
989 data: AtomicPtr::new(shared as _),
990 vtable: &SHARED_VTABLE,
991 }
992 }
993}
994
995impl From<Box<[u8]>> for Bytes {
996 fn from(slice: Box<[u8]>) -> Bytes {
997 if slice.is_empty() {
1001 return Bytes::new();
1002 }
1003
1004 let len = slice.len();
1005 let ptr = Box::into_raw(slice) as *mut u8;
1006
1007 if ptr as usize & 0x1 == 0 {
1008 let data = ptr_map(ptr, |addr| addr | KIND_VEC);
1009 Bytes {
1010 ptr,
1011 len,
1012 data: AtomicPtr::new(data.cast()),
1013 vtable: &PROMOTABLE_EVEN_VTABLE,
1014 }
1015 } else {
1016 Bytes {
1017 ptr,
1018 len,
1019 data: AtomicPtr::new(ptr.cast()),
1020 vtable: &PROMOTABLE_ODD_VTABLE,
1021 }
1022 }
1023 }
1024}
1025
1026impl From<Bytes> for BytesMut {
1027 fn from(bytes: Bytes) -> Self {
1043 let bytes = ManuallyDrop::new(bytes);
1044 unsafe { (bytes.vtable.into_mut)(&bytes.data, bytes.ptr, bytes.len) }
1045 }
1046}
1047
1048impl From<String> for Bytes {
1049 fn from(s: String) -> Bytes {
1050 Bytes::from(s.into_bytes())
1051 }
1052}
1053
1054impl From<Bytes> for Vec<u8> {
1055 fn from(bytes: Bytes) -> Vec<u8> {
1056 let bytes = ManuallyDrop::new(bytes);
1057 unsafe { (bytes.vtable.into_vec)(&bytes.data, bytes.ptr, bytes.len) }
1058 }
1059}
1060
1061impl fmt::Debug for Vtable {
1064 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1065 f.debug_struct("Vtable")
1066 .field("clone", &(self.clone as *const ()))
1067 .field("drop", &(self.drop as *const ()))
1068 .finish()
1069 }
1070}
1071
1072const STATIC_VTABLE: Vtable = Vtable {
1075 clone: static_clone,
1076 into_vec: static_to_vec,
1077 into_mut: static_to_mut,
1078 is_unique: static_is_unique,
1079 drop: static_drop,
1080};
1081
1082unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1083 let slice = slice::from_raw_parts(ptr, len);
1084 Bytes::from_static(slice)
1085}
1086
1087unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1088 let slice = slice::from_raw_parts(ptr, len);
1089 slice.to_vec()
1090}
1091
1092unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1093 let slice = slice::from_raw_parts(ptr, len);
1094 BytesMut::from(slice)
1095}
1096
1097fn static_is_unique(_: &AtomicPtr<()>) -> bool {
1098 false
1099}
1100
1101unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
1102 }
1104
1105#[repr(C)]
1108struct Owned<T> {
1109 ref_cnt: AtomicUsize,
1110 owner: T,
1111}
1112
1113impl<T> Owned<T> {
1114 const VTABLE: Vtable = Vtable {
1115 clone: owned_clone::<T>,
1116 into_vec: owned_to_vec::<T>,
1117 into_mut: owned_to_mut::<T>,
1118 is_unique: owned_is_unique,
1119 drop: owned_drop::<T>,
1120 };
1121}
1122
1123unsafe fn owned_clone<T>(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1124 let owned = data.load(Ordering::Relaxed);
1125 let old_cnt = (*owned.cast::<AtomicUsize>()).fetch_add(1, Ordering::Relaxed);
1126 if old_cnt > usize::MAX >> 1 {
1127 crate::abort();
1128 }
1129
1130 Bytes {
1131 ptr,
1132 len,
1133 data: AtomicPtr::new(owned as _),
1134 vtable: &Owned::<T>::VTABLE,
1135 }
1136}
1137
1138unsafe fn owned_to_vec<T>(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1139 let slice = slice::from_raw_parts(ptr, len);
1140 let vec = slice.to_vec();
1141 owned_drop_impl::<T>(data.load(Ordering::Relaxed));
1142 vec
1143}
1144
1145unsafe fn owned_to_mut<T>(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1146 BytesMut::from_vec(owned_to_vec::<T>(data, ptr, len))
1147}
1148
1149unsafe fn owned_is_unique(_data: &AtomicPtr<()>) -> bool {
1150 false
1151}
1152
1153unsafe fn owned_drop_impl<T>(owned: *mut ()) {
1154 {
1155 let ref_cnt = &*owned.cast::<AtomicUsize>();
1156
1157 let old_cnt = ref_cnt.fetch_sub(1, Ordering::Release);
1158 debug_assert!(
1159 old_cnt > 0 && old_cnt <= usize::MAX >> 1,
1160 "expected non-zero refcount and no underflow"
1161 );
1162 if old_cnt != 1 {
1163 return;
1164 }
1165 ref_cnt.load(Ordering::Acquire);
1166 }
1167
1168 drop(Box::<Owned<T>>::from_raw(owned.cast()));
1169}
1170
1171unsafe fn owned_drop<T>(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1172 let owned = data.load(Ordering::Relaxed);
1173 owned_drop_impl::<T>(owned);
1174}
1175
1176static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
1179 clone: promotable_even_clone,
1180 into_vec: promotable_even_to_vec,
1181 into_mut: promotable_even_to_mut,
1182 is_unique: promotable_is_unique,
1183 drop: promotable_even_drop,
1184};
1185
1186static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
1187 clone: promotable_odd_clone,
1188 into_vec: promotable_odd_to_vec,
1189 into_mut: promotable_odd_to_mut,
1190 is_unique: promotable_is_unique,
1191 drop: promotable_odd_drop,
1192};
1193
1194unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1195 let shared = data.load(Ordering::Acquire);
1196 let kind = shared as usize & KIND_MASK;
1197
1198 if kind == KIND_ARC {
1199 shallow_clone_arc(shared.cast(), ptr, len)
1200 } else {
1201 debug_assert_eq!(kind, KIND_VEC);
1202 let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1203 shallow_clone_vec(data, shared, buf, ptr, len)
1204 }
1205}
1206
1207unsafe fn promotable_to_vec(
1208 data: &AtomicPtr<()>,
1209 ptr: *const u8,
1210 len: usize,
1211 f: fn(*mut ()) -> *mut u8,
1212) -> Vec<u8> {
1213 let shared = data.load(Ordering::Acquire);
1214 let kind = shared as usize & KIND_MASK;
1215
1216 if kind == KIND_ARC {
1217 shared_to_vec_impl(shared.cast(), ptr, len)
1218 } else {
1219 debug_assert_eq!(kind, KIND_VEC);
1221
1222 let buf = f(shared);
1223
1224 let cap = ptr.offset_from(buf) as usize + len;
1225
1226 ptr::copy(ptr, buf, len);
1228
1229 Vec::from_raw_parts(buf, len, cap)
1230 }
1231}
1232
1233unsafe fn promotable_to_mut(
1234 data: &AtomicPtr<()>,
1235 ptr: *const u8,
1236 len: usize,
1237 f: fn(*mut ()) -> *mut u8,
1238) -> BytesMut {
1239 let shared = data.load(Ordering::Acquire);
1240 let kind = shared as usize & KIND_MASK;
1241
1242 if kind == KIND_ARC {
1243 shared_to_mut_impl(shared.cast(), ptr, len)
1244 } else {
1245 debug_assert_eq!(kind, KIND_VEC);
1250
1251 let buf = f(shared);
1252 let off = ptr.offset_from(buf) as usize;
1253 let cap = off + len;
1254 let v = Vec::from_raw_parts(buf, cap, cap);
1255
1256 let mut b = BytesMut::from_vec(v);
1257 b.advance_unchecked(off);
1258 b
1259 }
1260}
1261
1262unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1263 promotable_to_vec(data, ptr, len, |shared| {
1264 ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1265 })
1266}
1267
1268unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1269 promotable_to_mut(data, ptr, len, |shared| {
1270 ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1271 })
1272}
1273
1274unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1275 data.with_mut(|shared| {
1276 let shared = *shared;
1277 let kind = shared as usize & KIND_MASK;
1278
1279 if kind == KIND_ARC {
1280 release_shared(shared.cast());
1281 } else {
1282 debug_assert_eq!(kind, KIND_VEC);
1283 let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1284 free_boxed_slice(buf, ptr, len);
1285 }
1286 });
1287}
1288
1289unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1290 let shared = data.load(Ordering::Acquire);
1291 let kind = shared as usize & KIND_MASK;
1292
1293 if kind == KIND_ARC {
1294 shallow_clone_arc(shared as _, ptr, len)
1295 } else {
1296 debug_assert_eq!(kind, KIND_VEC);
1297 shallow_clone_vec(data, shared, shared.cast(), ptr, len)
1298 }
1299}
1300
1301unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1302 promotable_to_vec(data, ptr, len, |shared| shared.cast())
1303}
1304
1305unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1306 promotable_to_mut(data, ptr, len, |shared| shared.cast())
1307}
1308
1309unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1310 data.with_mut(|shared| {
1311 let shared = *shared;
1312 let kind = shared as usize & KIND_MASK;
1313
1314 if kind == KIND_ARC {
1315 release_shared(shared.cast());
1316 } else {
1317 debug_assert_eq!(kind, KIND_VEC);
1318
1319 free_boxed_slice(shared.cast(), ptr, len);
1320 }
1321 });
1322}
1323
1324unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
1325 let shared = data.load(Ordering::Acquire);
1326 let kind = shared as usize & KIND_MASK;
1327
1328 if kind == KIND_ARC {
1329 let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1330 ref_cnt == 1
1331 } else {
1332 true
1333 }
1334}
1335
1336unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
1337 let cap = offset.offset_from(buf) as usize + len;
1338 dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1339}
1340
1341struct Shared {
1344 buf: *mut u8,
1346 cap: usize,
1347 ref_cnt: AtomicUsize,
1348}
1349
1350impl Drop for Shared {
1351 fn drop(&mut self) {
1352 unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1353 }
1354}
1355
1356const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; static SHARED_VTABLE: Vtable = Vtable {
1363 clone: shared_clone,
1364 into_vec: shared_to_vec,
1365 into_mut: shared_to_mut,
1366 is_unique: shared_is_unique,
1367 drop: shared_drop,
1368};
1369
1370const KIND_ARC: usize = 0b0;
1371const KIND_VEC: usize = 0b1;
1372const KIND_MASK: usize = 0b1;
1373
1374unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1375 let shared = data.load(Ordering::Relaxed);
1376 shallow_clone_arc(shared as _, ptr, len)
1377}
1378
1379unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1380 if (*shared)
1387 .ref_cnt
1388 .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1389 .is_ok()
1390 {
1391 let shared = *Box::from_raw(shared);
1393 let shared = ManuallyDrop::new(shared);
1394 let buf = shared.buf;
1395 let cap = shared.cap;
1396
1397 ptr::copy(ptr, buf, len);
1399
1400 Vec::from_raw_parts(buf, len, cap)
1401 } else {
1402 let v = slice::from_raw_parts(ptr, len).to_vec();
1403 release_shared(shared);
1404 v
1405 }
1406}
1407
1408unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1409 shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1410}
1411
1412unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
1413 if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
1426 let shared = *Box::from_raw(shared);
1428 let shared = ManuallyDrop::new(shared);
1429 let buf = shared.buf;
1430 let cap = shared.cap;
1431
1432 let off = ptr.offset_from(buf) as usize;
1434 let v = Vec::from_raw_parts(buf, len + off, cap);
1435
1436 let mut b = BytesMut::from_vec(v);
1437 b.advance_unchecked(off);
1438 b
1439 } else {
1440 let v = slice::from_raw_parts(ptr, len).to_vec();
1442 release_shared(shared);
1443 BytesMut::from_vec(v)
1444 }
1445}
1446
1447unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1448 shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1449}
1450
1451pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
1452 let shared = data.load(Ordering::Acquire);
1453 let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1454 ref_cnt == 1
1455}
1456
1457unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1458 data.with_mut(|shared| {
1459 release_shared(shared.cast());
1460 });
1461}
1462
1463unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1464 let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1465
1466 if old_size > usize::MAX >> 1 {
1467 crate::abort();
1468 }
1469
1470 Bytes {
1471 ptr,
1472 len,
1473 data: AtomicPtr::new(shared as _),
1474 vtable: &SHARED_VTABLE,
1475 }
1476}
1477
1478#[cold]
1479unsafe fn shallow_clone_vec(
1480 atom: &AtomicPtr<()>,
1481 ptr: *const (),
1482 buf: *mut u8,
1483 offset: *const u8,
1484 len: usize,
1485) -> Bytes {
1486 let shared = Box::new(Shared {
1498 buf,
1499 cap: offset.offset_from(buf) as usize + len,
1500 ref_cnt: AtomicUsize::new(2),
1504 });
1505
1506 let shared = Box::into_raw(shared);
1507
1508 debug_assert!(
1511 0 == (shared as usize & KIND_MASK),
1512 "internal: Box<Shared> should have an aligned pointer",
1513 );
1514
1515 match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1525 Ok(actual) => {
1526 debug_assert!(core::ptr::eq(actual, ptr));
1527 Bytes {
1530 ptr: offset,
1531 len,
1532 data: AtomicPtr::new(shared as _),
1533 vtable: &SHARED_VTABLE,
1534 }
1535 }
1536 Err(actual) => {
1537 let shared = Box::from_raw(shared);
1541 mem::forget(*shared);
1542
1543 shallow_clone_arc(actual as _, offset, len)
1546 }
1547 }
1548}
1549
1550unsafe fn release_shared(ptr: *mut Shared) {
1551 if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1553 return;
1554 }
1555
1556 (*ptr).ref_cnt.load(Ordering::Acquire);
1577
1578 drop(Box::from_raw(ptr));
1580}
1581
1582#[cfg(miri)]
1589fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1590where
1591 F: FnOnce(usize) -> usize,
1592{
1593 let old_addr = ptr as usize;
1594 let new_addr = f(old_addr);
1595 let diff = new_addr.wrapping_sub(old_addr);
1596 ptr.wrapping_add(diff)
1597}
1598
1599#[cfg(not(miri))]
1600fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1601where
1602 F: FnOnce(usize) -> usize,
1603{
1604 let old_addr = ptr as usize;
1605 let new_addr = f(old_addr);
1606 new_addr as *mut u8
1607}
1608
1609fn without_provenance(ptr: usize) -> *const u8 {
1610 core::ptr::null::<u8>().wrapping_add(ptr)
1611}
1612
1613fn _split_to_must_use() {}
1624
1625fn _split_off_must_use() {}
1634
1635#[cfg(all(test, loom))]
1637mod fuzz {
1638 use loom::sync::Arc;
1639 use loom::thread;
1640
1641 use super::Bytes;
1642 #[test]
1643 fn bytes_cloning_vec() {
1644 loom::model(|| {
1645 let a = Bytes::from(b"abcdefgh".to_vec());
1646 let addr = a.as_ptr() as usize;
1647
1648 let a1 = Arc::new(a);
1650 let a2 = a1.clone();
1651
1652 let t1 = thread::spawn(move || {
1653 let b: Bytes = (*a1).clone();
1654 assert_eq!(b.as_ptr() as usize, addr);
1655 });
1656
1657 let t2 = thread::spawn(move || {
1658 let b: Bytes = (*a2).clone();
1659 assert_eq!(b.as_ptr() as usize, addr);
1660 });
1661
1662 t1.join().unwrap();
1663 t2.join().unwrap();
1664 });
1665 }
1666}