1use core::iter::FromIterator;
2use core::ops::{Deref, RangeBounds};
3use core::{cmp, fmt, hash, mem, ptr, slice, usize};
4
5use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec};
6
7use crate::buf::IntoIter;
8#[allow(unused)]
9use crate::loom::sync::atomic::AtomicMut;
10use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
11use crate::Buf;
12
13pub struct Bytes {
95 ptr: *const u8,
96 len: usize,
97 data: AtomicPtr<()>,
99 vtable: &'static Vtable,
100}
101
102pub(crate) struct Vtable {
103 pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
105 pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
107}
108
109impl Bytes {
110 #[inline]
123 #[cfg(not(all(loom, test)))]
124 pub const fn new() -> Bytes {
125 const EMPTY: &[u8] = &[];
128 Bytes::from_static(EMPTY)
129 }
130
131 #[cfg(all(loom, test))]
132 pub fn new() -> Bytes {
133 const EMPTY: &[u8] = &[];
134 Bytes::from_static(EMPTY)
135 }
136
137 #[inline]
151 #[cfg(not(all(loom, test)))]
152 pub const fn from_static(bytes: &'static [u8]) -> Bytes {
153 Bytes {
154 ptr: bytes.as_ptr(),
155 len: bytes.len(),
156 data: AtomicPtr::new(ptr::null_mut()),
157 vtable: &STATIC_VTABLE,
158 }
159 }
160
161 #[cfg(all(loom, test))]
162 pub fn from_static(bytes: &'static [u8]) -> Bytes {
163 Bytes {
164 ptr: bytes.as_ptr(),
165 len: bytes.len(),
166 data: AtomicPtr::new(ptr::null_mut()),
167 vtable: &STATIC_VTABLE,
168 }
169 }
170
171 #[inline]
182 pub fn len(&self) -> usize {
183 self.len
184 }
185
186 #[inline]
197 pub fn is_empty(&self) -> bool {
198 self.len == 0
199 }
200
201 pub fn copy_from_slice(data: &[u8]) -> Self {
203 data.to_vec().into()
204 }
205
206 pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
229 use core::ops::Bound;
230
231 let len = self.len();
232
233 let begin = match range.start_bound() {
234 Bound::Included(&n) => n,
235 Bound::Excluded(&n) => n + 1,
236 Bound::Unbounded => 0,
237 };
238
239 let end = match range.end_bound() {
240 Bound::Included(&n) => n.checked_add(1).expect("out of range"),
241 Bound::Excluded(&n) => n,
242 Bound::Unbounded => len,
243 };
244
245 assert!(
246 begin <= end,
247 "range start must not be greater than end: {:?} <= {:?}",
248 begin,
249 end,
250 );
251 assert!(
252 end <= len,
253 "range end out of bounds: {:?} <= {:?}",
254 end,
255 len,
256 );
257
258 if end == begin {
259 return Bytes::new();
260 }
261
262 let mut ret = self.clone();
263
264 ret.len = end - begin;
265 ret.ptr = unsafe { ret.ptr.offset(begin as isize) };
266
267 ret
268 }
269
270 pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
296 if subset.is_empty() {
299 return Bytes::new();
300 }
301
302 let bytes_p = self.as_ptr() as usize;
303 let bytes_len = self.len();
304
305 let sub_p = subset.as_ptr() as usize;
306 let sub_len = subset.len();
307
308 assert!(
309 sub_p >= bytes_p,
310 "subset pointer ({:p}) is smaller than self pointer ({:p})",
311 sub_p as *const u8,
312 bytes_p as *const u8,
313 );
314 assert!(
315 sub_p + sub_len <= bytes_p + bytes_len,
316 "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
317 bytes_p as *const u8,
318 bytes_len,
319 sub_p as *const u8,
320 sub_len,
321 );
322
323 let sub_offset = sub_p - bytes_p;
324
325 self.slice(sub_offset..(sub_offset + sub_len))
326 }
327
328 #[must_use = "consider Bytes::truncate if you don't need the other half"]
352 pub fn split_off(&mut self, at: usize) -> Bytes {
353 assert!(
354 at <= self.len(),
355 "split_off out of bounds: {:?} <= {:?}",
356 at,
357 self.len(),
358 );
359
360 if at == self.len() {
361 return Bytes::new();
362 }
363
364 if at == 0 {
365 return mem::replace(self, Bytes::new());
366 }
367
368 let mut ret = self.clone();
369
370 self.len = at;
371
372 unsafe { ret.inc_start(at) };
373
374 ret
375 }
376
377 #[must_use = "consider Bytes::advance if you don't need the other half"]
401 pub fn split_to(&mut self, at: usize) -> Bytes {
402 assert!(
403 at <= self.len(),
404 "split_to out of bounds: {:?} <= {:?}",
405 at,
406 self.len(),
407 );
408
409 if at == self.len() {
410 return mem::replace(self, Bytes::new());
411 }
412
413 if at == 0 {
414 return Bytes::new();
415 }
416
417 let mut ret = self.clone();
418
419 unsafe { self.inc_start(at) };
420
421 ret.len = at;
422 ret
423 }
424
425 #[inline]
446 pub fn truncate(&mut self, len: usize) {
447 if len < self.len {
448 if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
452 || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
453 {
454 drop(self.split_off(len));
455 } else {
456 self.len = len;
457 }
458 }
459 }
460
461 #[inline]
473 pub fn clear(&mut self) {
474 self.truncate(0);
475 }
476
477 #[inline]
478 pub(crate) unsafe fn with_vtable(
479 ptr: *const u8,
480 len: usize,
481 data: AtomicPtr<()>,
482 vtable: &'static Vtable,
483 ) -> Bytes {
484 Bytes {
485 ptr,
486 len,
487 data,
488 vtable,
489 }
490 }
491
492 #[inline]
495 fn as_slice(&self) -> &[u8] {
496 unsafe { slice::from_raw_parts(self.ptr, self.len) }
497 }
498
499 #[inline]
500 unsafe fn inc_start(&mut self, by: usize) {
501 debug_assert!(self.len >= by, "internal: inc_start out of bounds");
503 self.len -= by;
504 self.ptr = self.ptr.offset(by as isize);
505 }
506}
507
508unsafe impl Send for Bytes {}
510unsafe impl Sync for Bytes {}
511
512impl Drop for Bytes {
513 #[inline]
514 fn drop(&mut self) {
515 unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
516 }
517}
518
519impl Clone for Bytes {
520 #[inline]
521 fn clone(&self) -> Bytes {
522 unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
523 }
524}
525
526impl Buf for Bytes {
527 #[inline]
528 fn remaining(&self) -> usize {
529 self.len()
530 }
531
532 #[inline]
533 fn chunk(&self) -> &[u8] {
534 self.as_slice()
535 }
536
537 #[inline]
538 fn advance(&mut self, cnt: usize) {
539 assert!(
540 cnt <= self.len(),
541 "cannot advance past `remaining`: {:?} <= {:?}",
542 cnt,
543 self.len(),
544 );
545
546 unsafe {
547 self.inc_start(cnt);
548 }
549 }
550
551 fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
552 if len == self.remaining() {
553 core::mem::replace(self, Bytes::new())
554 } else {
555 let ret = self.slice(..len);
556 self.advance(len);
557 ret
558 }
559 }
560}
561
562impl Deref for Bytes {
563 type Target = [u8];
564
565 #[inline]
566 fn deref(&self) -> &[u8] {
567 self.as_slice()
568 }
569}
570
571impl AsRef<[u8]> for Bytes {
572 #[inline]
573 fn as_ref(&self) -> &[u8] {
574 self.as_slice()
575 }
576}
577
578impl hash::Hash for Bytes {
579 fn hash<H>(&self, state: &mut H)
580 where
581 H: hash::Hasher,
582 {
583 self.as_slice().hash(state);
584 }
585}
586
587impl Borrow<[u8]> for Bytes {
588 fn borrow(&self) -> &[u8] {
589 self.as_slice()
590 }
591}
592
593impl IntoIterator for Bytes {
594 type Item = u8;
595 type IntoIter = IntoIter<Bytes>;
596
597 fn into_iter(self) -> Self::IntoIter {
598 IntoIter::new(self)
599 }
600}
601
602impl<'a> IntoIterator for &'a Bytes {
603 type Item = &'a u8;
604 type IntoIter = core::slice::Iter<'a, u8>;
605
606 fn into_iter(self) -> Self::IntoIter {
607 self.as_slice().into_iter()
608 }
609}
610
611impl FromIterator<u8> for Bytes {
612 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
613 Vec::from_iter(into_iter).into()
614 }
615}
616
617impl PartialEq for Bytes {
620 fn eq(&self, other: &Bytes) -> bool {
621 self.as_slice() == other.as_slice()
622 }
623}
624
625impl PartialOrd for Bytes {
626 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
627 self.as_slice().partial_cmp(other.as_slice())
628 }
629}
630
631impl Ord for Bytes {
632 fn cmp(&self, other: &Bytes) -> cmp::Ordering {
633 self.as_slice().cmp(other.as_slice())
634 }
635}
636
637impl Eq for Bytes {}
638
639impl PartialEq<[u8]> for Bytes {
640 fn eq(&self, other: &[u8]) -> bool {
641 self.as_slice() == other
642 }
643}
644
645impl PartialOrd<[u8]> for Bytes {
646 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
647 self.as_slice().partial_cmp(other)
648 }
649}
650
651impl PartialEq<Bytes> for [u8] {
652 fn eq(&self, other: &Bytes) -> bool {
653 *other == *self
654 }
655}
656
657impl PartialOrd<Bytes> for [u8] {
658 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
659 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
660 }
661}
662
663impl PartialEq<str> for Bytes {
664 fn eq(&self, other: &str) -> bool {
665 self.as_slice() == other.as_bytes()
666 }
667}
668
669impl PartialOrd<str> for Bytes {
670 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
671 self.as_slice().partial_cmp(other.as_bytes())
672 }
673}
674
675impl PartialEq<Bytes> for str {
676 fn eq(&self, other: &Bytes) -> bool {
677 *other == *self
678 }
679}
680
681impl PartialOrd<Bytes> for str {
682 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
683 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
684 }
685}
686
687impl PartialEq<Vec<u8>> for Bytes {
688 fn eq(&self, other: &Vec<u8>) -> bool {
689 *self == &other[..]
690 }
691}
692
693impl PartialOrd<Vec<u8>> for Bytes {
694 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
695 self.as_slice().partial_cmp(&other[..])
696 }
697}
698
699impl PartialEq<Bytes> for Vec<u8> {
700 fn eq(&self, other: &Bytes) -> bool {
701 *other == *self
702 }
703}
704
705impl PartialOrd<Bytes> for Vec<u8> {
706 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
707 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
708 }
709}
710
711impl PartialEq<String> for Bytes {
712 fn eq(&self, other: &String) -> bool {
713 *self == &other[..]
714 }
715}
716
717impl PartialOrd<String> for Bytes {
718 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
719 self.as_slice().partial_cmp(other.as_bytes())
720 }
721}
722
723impl PartialEq<Bytes> for String {
724 fn eq(&self, other: &Bytes) -> bool {
725 *other == *self
726 }
727}
728
729impl PartialOrd<Bytes> for String {
730 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
731 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
732 }
733}
734
735impl PartialEq<Bytes> for &[u8] {
736 fn eq(&self, other: &Bytes) -> bool {
737 *other == *self
738 }
739}
740
741impl PartialOrd<Bytes> for &[u8] {
742 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
743 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
744 }
745}
746
747impl PartialEq<Bytes> for &str {
748 fn eq(&self, other: &Bytes) -> bool {
749 *other == *self
750 }
751}
752
753impl PartialOrd<Bytes> for &str {
754 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
755 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
756 }
757}
758
759impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
760where
761 Bytes: PartialEq<T>,
762{
763 fn eq(&self, other: &&'a T) -> bool {
764 *self == **other
765 }
766}
767
768impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
769where
770 Bytes: PartialOrd<T>,
771{
772 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
773 self.partial_cmp(&**other)
774 }
775}
776
777impl Default for Bytes {
780 #[inline]
781 fn default() -> Bytes {
782 Bytes::new()
783 }
784}
785
786impl From<&'static [u8]> for Bytes {
787 fn from(slice: &'static [u8]) -> Bytes {
788 Bytes::from_static(slice)
789 }
790}
791
792impl From<&'static str> for Bytes {
793 fn from(slice: &'static str) -> Bytes {
794 Bytes::from_static(slice.as_bytes())
795 }
796}
797
798impl From<Vec<u8>> for Bytes {
799 fn from(vec: Vec<u8>) -> Bytes {
800 if vec.is_empty() {
804 return Bytes::new();
805 }
806
807 let slice = vec.into_boxed_slice();
808 let len = slice.len();
809 let ptr = Box::into_raw(slice) as *mut u8;
810
811 if ptr as usize & 0x1 == 0 {
812 let data = ptr as usize | KIND_VEC;
813 Bytes {
814 ptr,
815 len,
816 data: AtomicPtr::new(data as *mut _),
817 vtable: &PROMOTABLE_EVEN_VTABLE,
818 }
819 } else {
820 Bytes {
821 ptr,
822 len,
823 data: AtomicPtr::new(ptr as *mut _),
824 vtable: &PROMOTABLE_ODD_VTABLE,
825 }
826 }
827 }
828}
829
830impl From<String> for Bytes {
831 fn from(s: String) -> Bytes {
832 Bytes::from(s.into_bytes())
833 }
834}
835
836impl fmt::Debug for Vtable {
839 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
840 f.debug_struct("Vtable")
841 .field("clone", &(self.clone as *const ()))
842 .field("drop", &(self.drop as *const ()))
843 .finish()
844 }
845}
846
847const STATIC_VTABLE: Vtable = Vtable {
850 clone: static_clone,
851 drop: static_drop,
852};
853
854unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
855 let slice = slice::from_raw_parts(ptr, len);
856 Bytes::from_static(slice)
857}
858
859unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
860 }
862
863static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
866 clone: promotable_even_clone,
867 drop: promotable_even_drop,
868};
869
870static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
871 clone: promotable_odd_clone,
872 drop: promotable_odd_drop,
873};
874
875unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
876 let shared = data.load(Ordering::Acquire);
877 let kind = shared as usize & KIND_MASK;
878
879 if kind == KIND_ARC {
880 shallow_clone_arc(shared as _, ptr, len)
881 } else {
882 debug_assert_eq!(kind, KIND_VEC);
883 let buf = (shared as usize & !KIND_MASK) as *mut u8;
884 shallow_clone_vec(data, shared, buf, ptr, len)
885 }
886}
887
888unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
889 data.with_mut(|shared| {
890 let shared = *shared;
891 let kind = shared as usize & KIND_MASK;
892
893 if kind == KIND_ARC {
894 release_shared(shared as *mut Shared);
895 } else {
896 debug_assert_eq!(kind, KIND_VEC);
897 let buf = (shared as usize & !KIND_MASK) as *mut u8;
898 drop(rebuild_boxed_slice(buf, ptr, len));
899 }
900 });
901}
902
903unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
904 let shared = data.load(Ordering::Acquire);
905 let kind = shared as usize & KIND_MASK;
906
907 if kind == KIND_ARC {
908 shallow_clone_arc(shared as _, ptr, len)
909 } else {
910 debug_assert_eq!(kind, KIND_VEC);
911 shallow_clone_vec(data, shared, shared as *mut u8, ptr, len)
912 }
913}
914
915unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
916 data.with_mut(|shared| {
917 let shared = *shared;
918 let kind = shared as usize & KIND_MASK;
919
920 if kind == KIND_ARC {
921 release_shared(shared as *mut Shared);
922 } else {
923 debug_assert_eq!(kind, KIND_VEC);
924
925 drop(rebuild_boxed_slice(shared as *mut u8, ptr, len));
926 }
927 });
928}
929
930unsafe fn rebuild_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) -> Box<[u8]> {
931 let cap = (offset as usize - buf as usize) + len;
932 Box::from_raw(slice::from_raw_parts_mut(buf, cap))
933}
934
935struct Shared {
938 _vec: Vec<u8>,
940 ref_cnt: AtomicUsize,
941}
942
943const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; static SHARED_VTABLE: Vtable = Vtable {
950 clone: shared_clone,
951 drop: shared_drop,
952};
953
954const KIND_ARC: usize = 0b0;
955const KIND_VEC: usize = 0b1;
956const KIND_MASK: usize = 0b1;
957
958unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
959 let shared = data.load(Ordering::Relaxed);
960 shallow_clone_arc(shared as _, ptr, len)
961}
962
963unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
964 data.with_mut(|shared| {
965 release_shared(*shared as *mut Shared);
966 });
967}
968
969unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
970 let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
971
972 if old_size > usize::MAX >> 1 {
973 crate::abort();
974 }
975
976 Bytes {
977 ptr,
978 len,
979 data: AtomicPtr::new(shared as _),
980 vtable: &SHARED_VTABLE,
981 }
982}
983
984#[cold]
985unsafe fn shallow_clone_vec(
986 atom: &AtomicPtr<()>,
987 ptr: *const (),
988 buf: *mut u8,
989 offset: *const u8,
990 len: usize,
991) -> Bytes {
992 let vec = rebuild_boxed_slice(buf, offset, len).into_vec();
1004 let shared = Box::new(Shared {
1005 _vec: vec,
1006 ref_cnt: AtomicUsize::new(2),
1010 });
1011
1012 let shared = Box::into_raw(shared);
1013
1014 debug_assert!(
1017 0 == (shared as usize & KIND_MASK),
1018 "internal: Box<Shared> should have an aligned pointer",
1019 );
1020
1021 match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1031 Ok(actual) => {
1032 debug_assert!(actual as usize == ptr as usize);
1033 Bytes {
1036 ptr: offset,
1037 len,
1038 data: AtomicPtr::new(shared as _),
1039 vtable: &SHARED_VTABLE,
1040 }
1041 }
1042 Err(actual) => {
1043 let shared = Box::from_raw(shared);
1047 mem::forget(*shared);
1048
1049 shallow_clone_arc(actual as _, offset, len)
1052 }
1053 }
1054}
1055
1056unsafe fn release_shared(ptr: *mut Shared) {
1057 if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1059 return;
1060 }
1061
1062 atomic::fence(Ordering::Acquire);
1080
1081 Box::from_raw(ptr);
1083}
1084
1085fn _split_to_must_use() {}
1096
1097fn _split_off_must_use() {}
1106
1107#[cfg(all(test, loom))]
1109mod fuzz {
1110 use loom::sync::Arc;
1111 use loom::thread;
1112
1113 use super::Bytes;
1114 #[test]
1115 fn bytes_cloning_vec() {
1116 loom::model(|| {
1117 let a = Bytes::from(b"abcdefgh".to_vec());
1118 let addr = a.as_ptr() as usize;
1119
1120 let a1 = Arc::new(a);
1122 let a2 = a1.clone();
1123
1124 let t1 = thread::spawn(move || {
1125 let b: Bytes = (*a1).clone();
1126 assert_eq!(b.as_ptr() as usize, addr);
1127 });
1128
1129 let t2 = thread::spawn(move || {
1130 let b: Bytes = (*a2).clone();
1131 assert_eq!(b.as_ptr() as usize, addr);
1132 });
1133
1134 t1.join().unwrap();
1135 t2.join().unwrap();
1136 });
1137 }
1138}