1use std::{
2 fmt::Debug,
3 mem::{ManuallyDrop, MaybeUninit},
4 ops::{Deref, DerefMut},
5};
6
7#[derive(Copy, Clone)]
8pub(crate) enum Allocator {
9 Default,
10 #[cfg(any(target_os = "linux", target_os = "android"))]
11 Mmap,
12}
13
14pub struct Buffer {
31 pub(crate) ptr: *mut u8,
33
34 pub(crate) len: u32,
36
37 pub(crate) requested_len: u32,
39
40 pub(crate) capacity: u32,
42
43 pub(crate) allocator: Allocator,
45}
46
47impl Buffer {
48 #[inline]
60 pub fn new(requested_len: usize) -> Self {
61 let len_u32 = requested_len.try_into().expect("length overflow");
62 let mut vec = ManuallyDrop::new(Vec::with_capacity(requested_len));
63 Buffer {
64 ptr: vec.as_mut_ptr(),
65 len: 0,
66 requested_len: len_u32,
67 capacity: vec.capacity().try_into().expect("capacity overflow"),
68 allocator: Allocator::Default,
69 }
70 }
71
72 #[cfg(any(target_os = "linux", target_os = "android"))]
73 pub(crate) fn mmap(
74 fd: &std::os::unix::prelude::OwnedFd,
75 len: usize,
76 ) -> Result<Buffer, rustix::io::Errno> {
77 let len_u32 = len.try_into().expect("length overflow");
78
79 let ptr = unsafe {
80 rustix::mm::mmap(
81 std::ptr::null_mut(),
82 len,
83 rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
84 rustix::mm::MapFlags::SHARED,
85 fd,
86 0,
87 )
88 }?;
89
90 Ok(Buffer {
91 ptr: ptr as *mut u8,
92 len: 0,
93 requested_len: len_u32,
94 capacity: len_u32,
95 allocator: Allocator::Mmap,
96 })
97 }
98
99 #[inline]
104 pub fn len(&self) -> usize {
105 self.len as usize
106 }
107
108 #[inline]
110 pub fn is_empty(&self) -> bool {
111 self.len == 0
112 }
113
114 #[inline]
116 pub fn requested_len(&self) -> usize {
117 self.requested_len as usize
118 }
119
120 #[inline]
125 pub fn set_requested_len(&mut self, len: usize) {
126 assert!(len <= self.capacity as usize, "length exceeds capacity");
127 self.requested_len = len.try_into().expect("requested_len overflow");
128 }
129
130 #[inline]
132 pub fn capacity(&self) -> usize {
133 self.capacity as usize
134 }
135
136 #[inline]
140 pub fn remaining_capacity(&self) -> usize {
141 self.capacity() - self.len()
142 }
143
144 #[inline]
149 pub fn clear(&mut self) {
150 self.len = 0;
151 }
152
153 pub fn extend_fill(&mut self, len: usize, value: u8) -> &mut [u8] {
159 assert!(len <= self.remaining_capacity(), "length exceeds capacity");
160 unsafe {
161 std::ptr::write_bytes(self.ptr.add(self.len()), value, len);
162 }
163 self.len += len as u32;
164 unsafe { std::slice::from_raw_parts_mut(self.ptr.add(self.len() - len), len) }
165 }
166
167 pub fn extend_from_slice(&mut self, slice: &[u8]) {
172 assert!(
173 slice.len() <= self.remaining_capacity(),
174 "length exceeds capacity"
175 );
176 unsafe {
177 std::ptr::copy_nonoverlapping(slice.as_ptr(), self.ptr.add(self.len()), slice.len());
178 }
179 self.len += slice.len() as u32;
180 }
181
182 pub fn is_zero_copy(&self) -> bool {
184 !matches!(self.allocator, Allocator::Default)
185 }
186
187 pub fn into_vec(self) -> Vec<u8> {
193 match self.allocator {
194 Allocator::Default => {
195 let buf = ManuallyDrop::new(self);
196 unsafe { Vec::from_raw_parts(buf.ptr, buf.len as usize, buf.capacity as usize) }
197 }
198 #[allow(unreachable_patterns)]
199 _ => self[..].to_vec(),
200 }
201 }
202}
203
204unsafe impl Send for Buffer {}
205unsafe impl Sync for Buffer {}
206
207impl From<Vec<u8>> for Buffer {
211 fn from(vec: Vec<u8>) -> Self {
212 let mut vec = ManuallyDrop::new(vec);
213 Buffer {
214 ptr: vec.as_mut_ptr(),
215 len: vec.len().try_into().expect("len overflow"),
216 requested_len: vec.len().try_into().expect("len overflow"),
217 capacity: vec.capacity().try_into().expect("capacity overflow"),
218 allocator: Allocator::Default,
219 }
220 }
221}
222
223impl From<&[u8]> for Buffer {
226 fn from(slice: &[u8]) -> Self {
227 Self::from(slice.to_vec())
228 }
229}
230
231impl<const N: usize> From<[u8; N]> for Buffer {
234 fn from(array: [u8; N]) -> Self {
235 Self::from(array.to_vec())
236 }
237}
238
239impl From<Vec<MaybeUninit<u8>>> for Buffer {
243 fn from(vec: Vec<MaybeUninit<u8>>) -> Self {
244 let mut vec = ManuallyDrop::new(vec);
245 Buffer {
246 ptr: vec.as_mut_ptr().cast(),
247 len: 0,
248 requested_len: vec.len().try_into().expect("len overflow"),
249 capacity: vec.capacity().try_into().expect("capacity overflow"),
250 allocator: Allocator::Default,
251 }
252 }
253}
254
255impl Deref for Buffer {
256 type Target = [u8];
257
258 fn deref(&self) -> &[u8] {
259 unsafe { std::slice::from_raw_parts(self.ptr, self.len as usize) }
260 }
261}
262
263impl DerefMut for Buffer {
264 fn deref_mut(&mut self) -> &mut [u8] {
265 unsafe { std::slice::from_raw_parts_mut(self.ptr, self.len as usize) }
266 }
267}
268
269impl Debug for Buffer {
270 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
271 f.debug_struct("Buffer")
272 .field("len", &self.len)
273 .field("requested_len", &self.requested_len)
274 .field("data", &format_args!("{:02x?}", &self[..]))
275 .finish()
276 }
277}
278
279impl Drop for Buffer {
280 fn drop(&mut self) {
281 match self.allocator {
282 Allocator::Default => unsafe {
283 drop(Vec::from_raw_parts(
284 self.ptr,
285 self.len as usize,
286 self.capacity as usize,
287 ));
288 },
289 #[cfg(any(target_os = "linux", target_os = "android"))]
290 Allocator::Mmap => unsafe {
291 rustix::mm::munmap(self.ptr as *mut _, self.capacity as usize).unwrap();
292 },
293 }
294 }
295}