ktls_core/
utils.rs

1//! Utilities
2
3use std::fmt;
4use std::mem::MaybeUninit;
5use std::num::NonZeroUsize;
6
7#[derive(Clone, Default)]
8/// A simple buffer with a read offset.
9pub struct Buffer {
10    /// The inner buffer data.
11    inner: Vec<u8>,
12
13    /// The number of initialized but unfilled bytes in the inner buffer.
14    unfilled_initialized: usize,
15
16    /// Read offset of the buffer.
17    offset: usize,
18}
19
20impl fmt::Debug for Buffer {
21    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
22        f.debug_struct("Buffer")
23            .field("len", &self.inner.len())
24            .field("capacity", &self.inner.capacity())
25            .field("unfilled_initialized", &self.unfilled_initialized)
26            .field("offset", &self.offset)
27            .finish()
28    }
29}
30
31impl From<Vec<u8>> for Buffer {
32    #[inline]
33    fn from(buffer: Vec<u8>) -> Self {
34        Self::new(buffer)
35    }
36}
37
38impl Buffer {
39    #[inline]
40    #[must_use] 
41    /// Creates a new [`Buffer`] from the given bytes slice.
42    pub fn new(buffer: Vec<u8>) -> Self {
43        Self {
44            inner: buffer,
45            unfilled_initialized: 0,
46            offset: 0,
47        }
48    }
49
50    #[must_use] 
51    /// Creates an empty [`Buffer`].
52    pub const fn empty() -> Self {
53        Self {
54            inner: Vec::new(),
55            unfilled_initialized: 0,
56            offset: 0,
57        }
58    }
59
60    #[track_caller]
61    /// Reads the unread part of the buffer with the provided F, and advances
62    /// the read offset by the number of bytes read.
63    ///
64    /// Returns the number of bytes read by `f`.
65    ///
66    /// # Panics
67    ///
68    /// Panics if the closure returns an invalid read count.
69    pub fn read<F>(&mut self, f: F) -> Option<NonZeroUsize>
70    where
71        F: FnOnce(&[u8]) -> usize,
72    {
73        if self.inner.is_empty() {
74            // Empty buffer, nothing to read.
75
76            return None;
77        }
78
79        let Some((_, unread)) = self.inner.split_at_checked(self.offset) else {
80            unreachable!(
81                "The offset is always within the buffer length, but it is not: offset = {}, len = \
82                 {}",
83                self.offset,
84                self.inner.len()
85            );
86        };
87
88        if unread.is_empty() {
89            // All data has been read, reset the buffer.
90            self.reset();
91
92            return None;
93        }
94
95        let has_read = NonZeroUsize::new(f(unread));
96
97        match has_read {
98            Some(n) if n.get() <= unread.len() => {
99                // Advance the read offset, ensuring it does not exceed the buffer
100                // length.
101                self.offset = self.offset.saturating_add(n.get());
102            }
103            Some(n) => panic!(
104                "The closure read more bytes than available: read = {}, available = {}",
105                n,
106                unread.len()
107            ),
108            None => {}
109        }
110
111        has_read
112    }
113
114    #[inline]
115    #[must_use] 
116    /// Returns the unread part of the buffer as a byte slice.
117    pub fn unread(&self) -> &[u8] {
118        &self.inner[self.offset..]
119    }
120
121    #[inline]
122    /// Drains the inner buffer data, clearing the buffer but does not change
123    /// its capacity, and returns the drained data.
124    pub fn drain(&mut self) -> Option<Vec<u8>> {
125        if self.unread().is_empty() {
126            None
127        } else {
128            let drained = self.unread().to_vec();
129
130            // Reset the buffer after draining.
131            self.reset();
132
133            Some(drained)
134        }
135    }
136
137    #[inline]
138    /// Reserves capacity for at least `additional` more bytes.
139    pub(crate) fn reserve(&mut self, additional: usize) {
140        self.inner.reserve(additional);
141    }
142
143    #[inline]
144    /// Returns a mutable reference to the unfilled part of the buffer without
145    /// ensuring that it has been fully initialized.
146    pub(crate) fn unfilled_mut(&mut self) -> &mut [MaybeUninit<u8>] {
147        self.unfilled_initialized = 0;
148
149        self.inner.spare_capacity_mut()
150    }
151
152    #[inline]
153    /// Returns a reference to the unfilled but initialized part of the buffer.
154    pub(crate) fn unfilled_initialized(&self) -> &[u8] {
155        #[allow(unsafe_code)]
156        // SAFETY: We have ensured that the unfilled part is initialized.
157        unsafe {
158            std::slice::from_raw_parts(
159                self.inner
160                    .as_ptr()
161                    .add(self.inner.len()),
162                self.unfilled_initialized,
163            )
164        }
165    }
166
167    #[allow(unsafe_code)]
168    #[inline]
169    /// Marks additional `cnt` bytes of uninitialized part of the inner buffer
170    /// as initialized.
171    ///
172    /// # Safety
173    ///
174    /// The caller must ensure that the first `cnt` bytes of the spare capacity
175    /// have been initialized, and that `self.initialized + cnt` does not exceed
176    /// the capacity of the inner buffer.
177    ///
178    /// It is recommended to update the initialized bytes after acquiring (via
179    /// [`Spare::unfilled_mut`]) and writing to the unfilled part of the
180    /// buffer.
181    pub(crate) unsafe fn assume_init_additional(&mut self, cnt: usize) {
182        let unfilled_initialized = self.unfilled_initialized + cnt;
183
184        debug_assert!(self.inner.len() + unfilled_initialized <= self.inner.capacity());
185
186        self.unfilled_initialized = unfilled_initialized;
187    }
188
189    #[inline]
190    /// Marks all initialized spare capacity as filled.
191    pub(crate) fn set_filled_all(&mut self) {
192        let initialized = self.inner.len() + self.unfilled_initialized;
193
194        debug_assert!(initialized <= self.inner.capacity());
195
196        #[allow(unsafe_code)]
197        // SAFETY: We have ensured that the unfilled part is initialized, and the length is valid.
198        unsafe {
199            self.inner.set_len(initialized);
200        };
201    }
202
203    #[inline]
204    /// Resets the buffer, clearing the inner data and resetting the read
205    /// offset.
206    fn reset(&mut self) {
207        self.inner.truncate(0);
208        self.inner.shrink_to(65536);
209        self.offset = 0;
210    }
211}