chunked_bytes/loosely.rs
1//! Buffer with a loose adherence to the preferred chunk size.
2
3use super::chunked::Inner;
4use crate::{DrainChunks, IntoChunks};
5
6use bytes::buf::{Buf, BufMut, UninitSlice};
7use bytes::Bytes;
8
9use std::fmt;
10use std::io::IoSlice;
11
12/// A non-contiguous buffer for efficient serialization of data structures.
13///
14/// A `ChunkedBytes` container has a staging buffer to coalesce small byte
15/// sequences of source data, and a queue of byte chunks split off the staging
16/// buffer that can be incrementally consumed by an output API such as an object
17/// implementing `AsyncWrite`. Once the number of bytes in the staging
18/// buffer reaches a certain configured chunk size, the buffer content is
19/// split off to form a new chunk.
20///
21/// This variant of the container does not enforce an upper limit on the size
22/// of contiguous chunks, being optimized for performance. If your application
23/// needs the sizes of the produced chunks to be capped,
24/// use `strictly::ChunkedBytes` instead.
25///
26/// Refer to the documentation on the methods available for `ChunkedBytes`,
27/// including the methods of traits `Buf` and `BufMut`, for details on working
28/// with this container.
29#[derive(Debug, Default)]
30pub struct ChunkedBytes {
31 inner: Inner,
32}
33
34impl ChunkedBytes {
35 /// Creates a new `ChunkedBytes` container with the preferred chunk size
36 /// set to a default value.
37 #[inline]
38 pub fn new() -> Self {
39 Default::default()
40 }
41
42 /// Creates a new `ChunkedBytes` container with the given chunk size
43 /// to prefer.
44 #[inline]
45 pub fn with_chunk_size_hint(chunk_size: usize) -> Self {
46 ChunkedBytes {
47 inner: Inner::with_chunk_size(chunk_size),
48 }
49 }
50
51 /// The fully detailed constructor for `ChunkedBytes`.
52 /// The preferred chunk size is given in `chunk_size`, and an upper
53 /// estimate of the number of chunks this container could be expected to
54 /// have at any moment of time should be given in `chunking_capacity`.
55 /// More chunks can still be held, but this may cause reallocations of
56 /// internal data structures.
57 #[inline]
58 pub fn with_profile(chunk_size: usize, chunking_capacity: usize) -> Self {
59 ChunkedBytes {
60 inner: Inner::with_profile(chunk_size, chunking_capacity),
61 }
62 }
63
64 /// Returns the size this `ChunkedBytes` container uses as the threshold
65 /// for splitting off complete chunks.
66 ///
67 /// Note that the size of produced chunks may be larger or smaller than the
68 /// configured value, due to the allocation strategy used internally by
69 /// the implementation and also depending on the pattern of usage.
70 #[inline]
71 pub fn chunk_size_hint(&self) -> usize {
72 self.inner.chunk_size()
73 }
74
75 /// Returns true if the `ChunkedBytes` container has no complete chunks
76 /// and the staging buffer is empty.
77 #[inline]
78 pub fn is_empty(&self) -> bool {
79 self.inner.is_empty()
80 }
81
82 #[cfg(test)]
83 pub fn staging_capacity(&self) -> usize {
84 self.inner.staging_capacity()
85 }
86
87 /// Splits any bytes that are currently in the staging buffer into a new
88 /// complete chunk.
89 /// If the staging buffer is empty, this method does nothing.
90 ///
91 /// Most users should not need to call this method. It is called
92 /// internally when needed by the methods that advance the writing
93 /// position.
94 #[inline]
95 pub fn flush(&mut self) {
96 self.inner.flush()
97 }
98
99 /// Appends a `Bytes` slice to the container without copying the data.
100 ///
101 /// If `chunk` is empty, this method does nothing. Otherwise,
102 /// if there are any bytes currently in the staging buffer, they are split
103 /// to form a complete chunk. Next, the given slice is appended as the
104 /// next chunk.
105 ///
106 /// # Performance Notes
107 ///
108 /// For a small slice originating from a buffer that is not split
109 /// or shared between other `Bytes` instances, copying the bytes with
110 /// `BufMut::put_slice` may be faster than the overhead of
111 /// atomic reference counting induced by use of this method.
112 #[inline]
113 pub fn put_bytes(&mut self, chunk: Bytes) {
114 if !chunk.is_empty() {
115 self.flush();
116 self.inner.push_chunk(chunk);
117 }
118 }
119
120 /// Returns an iterator that removes complete chunks from the
121 /// `ChunkedBytes` container and yields the removed chunks as `Bytes`
122 /// slice handles. This does not include bytes in the staging buffer.
123 ///
124 /// The chunks are removed even if the iterator is dropped without being
125 /// consumed until the end. It is unspecified how many chunks are removed
126 /// if the `DrainChunks` value is not dropped, but the borrow it holds
127 /// expires (e.g. due to `std::mem::forget`).
128 #[inline]
129 pub fn drain_chunks(&mut self) -> DrainChunks<'_> {
130 self.inner.drain_chunks()
131 }
132
133 /// Consumes the `ChunkedBytes` container to produce an iterator over
134 /// its chunks. If there are bytes in the staging buffer, they are yielded
135 /// as the last chunk.
136 ///
137 /// The memory allocated for `IntoChunks` may be slightly more than the
138 /// `ChunkedBytes` container it consumes. This is an infrequent side effect
139 /// of making the internal state efficient in general for iteration.
140 #[inline]
141 pub fn into_chunks(self) -> IntoChunks {
142 self.inner.into_chunks()
143 }
144}
145
146unsafe impl BufMut for ChunkedBytes {
147 #[inline]
148 fn remaining_mut(&self) -> usize {
149 self.inner.remaining_mut()
150 }
151
152 #[inline]
153 unsafe fn advance_mut(&mut self, cnt: usize) {
154 self.inner.advance_mut(cnt);
155 }
156
157 /// Returns a mutable slice of unwritten bytes available in
158 /// the staging buffer, starting at the current writing position.
159 ///
160 /// The length of the slice may be larger than the preferred chunk
161 /// size due to the allocation strategy used internally by
162 /// the implementation.
163 #[inline]
164 fn chunk_mut(&mut self) -> &mut UninitSlice {
165 if self.inner.staging_len() == self.inner.staging_capacity() {
166 self.inner.reserve_staging();
167 }
168 self.inner.chunk_mut()
169 }
170}
171
172impl Buf for ChunkedBytes {
173 #[inline]
174 fn remaining(&self) -> usize {
175 self.inner.remaining()
176 }
177
178 #[inline]
179 fn has_remaining(&self) -> bool {
180 !self.is_empty()
181 }
182
183 /// Returns a slice of the bytes in the first extant complete chunk,
184 /// or the bytes in the staging buffer if there are no unconsumed chunks.
185 ///
186 /// It is more efficient to use `chunks_vectored` to gather all the disjoint
187 /// slices for vectored output.
188 #[inline]
189 fn chunk(&self) -> &[u8] {
190 self.inner.chunk()
191 }
192
193 /// Advances the reading position by `cnt`, dropping the `Bytes` references
194 /// to any complete chunks that the position has been advanced past
195 /// and then advancing the starting position of the first remaining chunk.
196 /// If there are no complete chunks left, the reading position is advanced
197 /// in the staging buffer, effectively removing the consumed bytes.
198 ///
199 /// # Panics
200 ///
201 /// This function may panic when `cnt > self.remaining()`.
202 ///
203 #[inline]
204 fn advance(&mut self, cnt: usize) {
205 let _ = self.inner.advance(cnt);
206 }
207
208 /// Fills `dst` sequentially with the slice views of the chunks, then
209 /// the bytes in the staging buffer if any remain and there is
210 /// another unfilled entry left in `dst`. Returns the number of `IoSlice`
211 /// entries filled.
212 #[inline]
213 fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
214 self.inner.chunks_vectored(dst)
215 }
216
217 #[inline]
218 fn copy_to_bytes(&mut self, len: usize) -> Bytes {
219 self.inner.copy_to_bytes(len)
220 }
221}
222
223impl fmt::Write for ChunkedBytes {
224 #[inline]
225 fn write_str(&mut self, s: &str) -> fmt::Result {
226 if self.remaining_mut() >= s.len() {
227 self.put_slice(s.as_bytes());
228 Ok(())
229 } else {
230 Err(fmt::Error)
231 }
232 }
233
234 // The default implementation delegates to
235 // fmt::write(&mut self as &mut dyn fmt::Write, args)
236 #[inline]
237 fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
238 fmt::write(self, args)
239 }
240}