1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
use crate::{DrainChunks, IntoChunks}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use std::collections::VecDeque; use std::io::IoSlice; use std::mem::MaybeUninit; const DEFAULT_CHUNK_SIZE: usize = 4096; const INITIAL_CHUNKS_CAPACITY: usize = 1; /// A non-contiguous buffer for efficient serialization of data structures. /// /// A `ChunkedBytes` container has a staging buffer to coalesce small byte /// sequences of source data, and a queue of byte chunks split off the staging /// buffer that can be incrementally consumed by an output API such as an object /// implementing `AsyncWrite`. Once the number of bytes in the staging /// buffer reaches a certain configured chunk size, the buffer content is /// split off to form a new chunk. /// /// Refer to the documentation on the methods available for `ChunkedBytes`, /// including the methods of traits `Buf` and `BufMut`, for details on working /// with this container. #[derive(Debug)] pub struct ChunkedBytes { staging: BytesMut, chunks: VecDeque<Bytes>, chunk_size: usize, } impl Default for ChunkedBytes { #[inline] fn default() -> Self { ChunkedBytes { staging: BytesMut::new(), chunks: VecDeque::with_capacity(INITIAL_CHUNKS_CAPACITY), chunk_size: DEFAULT_CHUNK_SIZE, } } } impl ChunkedBytes { /// Creates a new `ChunkedBytes` container with the preferred chunk size /// set to a default value. #[inline] pub fn new() -> Self { Default::default() } /// Creates a new `ChunkedBytes` container with the given chunk size /// to prefer. #[inline] pub fn with_preferred_chunk_size(chunk_size: usize) -> Self { ChunkedBytes { chunk_size, ..Default::default() } } /// Returns the size this `ChunkedBytes` container uses as the threshold /// for splitting off complete chunks. /// /// Note that the size of produced chunks may be larger than the /// configured value due to the allocation strategy used internally by /// the implementation. Chunks may also be smaller than the threshold if /// writing with `BufMut` methods has been mixed with use of the /// `push_chunk` method, or the `flush` method has been called directly. #[inline] pub fn preferred_chunk_size(&self) -> usize { self.chunk_size } /// Returns true if the `ChunkedBytes` container has no complete chunks /// and the staging buffer is empty. #[inline] pub fn is_empty(&self) -> bool { self.chunks.is_empty() && self.staging.is_empty() } /// Splits any bytes that are currently in the staging buffer into a new /// complete chunk. /// If the staging buffer is empty, this method does nothing. /// /// Most users should not need to call this method. It is called /// internally when needed by the methods that advance the writing /// position. #[inline] pub fn flush(&mut self) { if !self.staging.is_empty() { let bytes = self.staging.split().freeze(); self.chunks.push_back(bytes) } } /// Appends a `Bytes` slice to the container without copying the data. /// /// If there are any bytes currently in the staging buffer, they are split /// to form a complete chunk. Next, the given slice is appended as the /// next chunk. #[inline] pub fn push_chunk(&mut self, chunk: Bytes) { if !chunk.is_empty() { self.flush(); self.chunks.push_back(chunk); } } /// Returns an iterator that removes complete chunks from the /// `ChunkedBytes` container and yields the removed chunks as `Bytes` /// slice handles. This does not include bytes in the staging buffer. /// /// The chunks are removed even if the iterator is dropped without being /// consumed until the end. It is unspecified how many chunks are removed /// if the `DrainChunks` value is not dropped, but the borrow it holds /// expires (e.g. due to `std::mem::forget`). #[inline] pub fn drain_chunks(&mut self) -> DrainChunks<'_> { DrainChunks::new(self.chunks.drain(..)) } /// Consumes the `ChunkedBytes` container to produce an iterator over /// its chunks. If there are bytes in the staging buffer, they are yielded /// as the last chunk. /// /// The memory allocated for `IntoChunks` may be slightly more than the /// `ChunkedBytes` container it consumes. This is an infrequent side effect /// of making the internal state efficient in general for iteration. #[inline] pub fn into_chunks(mut self) -> IntoChunks { if !self.staging.is_empty() { self.chunks.push_back(self.staging.freeze()); } IntoChunks::new(self.chunks.into_iter()) } } impl BufMut for ChunkedBytes { #[inline] fn remaining_mut(&self) -> usize { self.staging.remaining_mut() } /// Advances the writing position in the staging buffer. /// /// If the number of bytes accumulated in the staging buffer reaches /// or exceeds the preferred chunk size, the bytes are split off /// to form a new complete chunk. /// /// # Panics /// /// This function may panic if `cnt > self.remaining_mut()`. /// #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { self.staging.advance_mut(cnt); if self.staging.len() >= self.chunk_size { self.flush(); } } /// Returns a mutable slice of unwritten bytes available in /// the staging buffer, starting at the current writing position. /// /// The length of the slice may be larger than the preferred chunk /// size due to the allocation strategy used internally by /// the implementation. #[inline] fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] { if self.staging.len() == self.staging.capacity() { self.flush(); self.staging.reserve(self.chunk_size); } self.staging.bytes_mut() } } impl Buf for ChunkedBytes { fn remaining(&self) -> usize { self.chunks .iter() .fold(self.staging.len(), |sum, chunk| sum + chunk.len()) } #[inline] fn has_remaining(&self) -> bool { !self.is_empty() } /// Returns a slice of the bytes in the first extant complete chunk, /// or the bytes in the staging buffer if there are no unconsumed chunks. /// /// It is more efficient to use `bytes_vectored` to gather all the disjoint /// slices for vectored output, as is done in many specialized /// implementations of the `AsyncWrite::poll_write_buf` method. #[inline] fn bytes(&self) -> &[u8] { if let Some(chunk) = self.chunks.front() { chunk } else { &self.staging } } /// Advances the reading position by `cnt`, dropping the `Bytes` references /// to any complete chunks that the position has been advanced past /// and then advancing the starting position of the first remaining chunk. /// If there are no complete chunks left, the reading position is advanced /// in the staging buffer, effectively removing the consumed bytes. /// /// # Panics /// /// This function may panic when `cnt > self.remaining()`. /// fn advance(&mut self, mut cnt: usize) { loop { match self.chunks.front_mut() { None => { self.staging.advance(cnt); return; } Some(chunk) => { let len = chunk.len(); if cnt < len { chunk.advance(cnt); return; } else { cnt -= len; self.chunks.pop_front(); } } } } } /// Fills `dst` sequentially with the slice views of the chunks, then /// the bytes in the staging buffer if any remain and there is /// another unfilled entry left in `dst`. Returns the number of `IoSlice` /// entries filled. fn bytes_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { let n = { let zipped = dst.iter_mut().zip(self.chunks.iter()); let len = zipped.len(); for (io_slice, chunk) in zipped { *io_slice = IoSlice::new(chunk); } len }; if n < dst.len() && !self.staging.is_empty() { dst[n] = IoSlice::new(&self.staging); n + 1 } else { n } } fn to_bytes(&mut self) -> Bytes { match self.chunks.pop_front() { None => self.staging.split().freeze(), Some(chunk) => { if self.is_empty() { return chunk; } let cap = chunk.len() + self.remaining(); let mut buf = BytesMut::with_capacity(cap); buf.put(chunk); while let Some(chunk) = self.chunks.pop_front() { buf.put(chunk); } buf.put(self.staging.split()); buf.freeze() } } } }