bytesbuf/
buf.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4use std::any::type_name;
5use std::mem::{self, MaybeUninit};
6use std::num::NonZero;
7
8use smallvec::SmallVec;
9
10use crate::mem::{Block, BlockSize, Memory};
11use crate::{BytesBufWrite, BytesView, MAX_INLINE_SPANS, MemoryGuard, Span, SpanBuilder};
12
13/// Assembles byte sequences, exposing them as [`BytesView`]s.
14///
15/// The buffer owns some memory capacity into which it allows you to write a sequence of bytes that
16/// you can thereafter extract as one or more [`BytesView`]s over immutable data. Mutation of the
17/// buffer contents is append-only - once data has been written into the buffer, it cannot be modified.
18///
19/// Capacity must be reserved in advance (e.g. via [`reserve()`]) before you can write data into the buffer.
20/// The exception to this is when appending an existing [`BytesView`] via [`put_bytes()`] because
21/// appending a [`BytesView`] is a zero-copy operation that reuses the view's existing memory capacity.
22///
23/// # Memory capacity
24///
25/// A single `BytesBuf` can use memory capacity from any [memory provider], including a
26/// mix of different memory providers. All methods that extend the memory capacity require the caller
27/// to provide a reference to the memory provider to use.
28///
29/// To understand how to obtain access to a memory provider, see [Producing Byte Sequences].
30///
31/// When data is extracted from the buffer by consuming it (via [`consume()`] or [`consume_all()`]),
32/// ownership of the used memory capacity is transferred to the returned [`BytesView`]. Any leftover
33/// memory capacity remains in the buffer, ready to receive further writes.
34///
35/// # Conceptual design
36///
37/// The memory capacity owned by a `BytesBuf` can be viewed as two regions:
38///
39/// * Filled memory - data has been written into this memory but this data has not yet been consumed as a
40///   [`BytesView`]. Nevertheless, this data may already be in use because it may have been exposed via
41///   [`peek()`], which does not consume it from the buffer. Memory capacity is removed from this region
42///   when bytes are consumed from the buffer.
43/// * Available memory - no data has been written into this memory. Calling any of the write methods on
44///   `BytesBuf` will write data to the start of this region and transfer the affected capacity to the
45///   filled memory region.
46///
47/// Existing [`BytesView`]s can be appended to the `BytesBuf` via [`put_bytes()`] without
48/// consuming capacity as each appended [`BytesView`] brings its own backing memory capacity.
49#[doc = include_str!("../doc/snippets/sequence_memory_layout.md")]
50///
51/// # Example
52///
53/// ```
54/// # use bytesbuf::mem::{GlobalPool, Memory};
55/// use bytesbuf::BytesBuf;
56///
57/// const HEADER_MAGIC: &[u8] = b"HDR\x00";
58///
59/// # let memory = GlobalPool::new();
60/// let mut buf = memory.reserve(64);
61///
62/// // Build a message from various pieces.
63/// buf.put_slice(HEADER_MAGIC);
64/// buf.put_num_be(1_u16); // Version
65/// buf.put_num_be(42_u32); // Payload length
66/// buf.put_num_be(0xDEAD_BEEF_u64); // Checksum
67///
68/// // Consume the buffered data as an immutable BytesView.
69/// let message = buf.consume_all();
70/// assert_eq!(message.len(), 18);
71/// ```
72///
73/// [memory provider]: crate::mem::Memory
74/// [`reserve()`]: Self::reserve
75/// [`put_bytes()`]: Self::put_bytes
76/// [`consume()`]: Self::consume
77/// [`consume_all()`]: Self::consume_all
78/// [`peek()`]: Self::peek
79/// [Producing Byte Sequences]: crate#producing-byte-sequences
80#[derive(Default)]
81pub struct BytesBuf {
82    // The frozen spans are at the front of the sequence being built and have already become
83    // immutable (or already arrived in that form). They will be consumed first.
84    //
85    // Optimization: we might get slightly better performance by using a stack-preferring queue
86    // here instead. No suitable crate was found at time of writing, may need to invent it.
87    frozen_spans: SmallVec<[Span; MAX_INLINE_SPANS]>,
88
89    // The span builders contain "potential spans" that have not yet been materialized/frozen.
90    // The first item may be partially filled with data, with the others being spare capacity.
91    //
92    // Exception: a vectored write may write to any number of span builders concurrently but when
93    // the vectored write is committed we immediately restore the above situation (with only
94    // the first span builder potentially containing data).
95    //
96    // When the capacity of a span builder is exhausted, we transform any data in it into a span
97    // and move it to `frozen_spans`.
98    //
99    // Partially filled span builders may be split into a span and a builder over the remaining
100    // memory. This happens on demand when the sequence builder needs to emit data from part of
101    // a span builder's memory region.
102    //
103    // Note that we do not require the span builders to be of the same capacity.
104    //
105    // We store the span builders in reverse order - the logically first span builder (which we
106    // may have partially filled with content) is the last one in this collection.
107    //
108    // Optimization: we might get slightly better performance by using a stack-preferring queue
109    // here instead. No suitable crate was found at time of writing, may need to invent it.
110    span_builders_reversed: SmallVec<[SpanBuilder; MAX_INLINE_SPANS]>,
111
112    /// Length of the filled memory in this sequence builder.
113    ///
114    /// We cache this to avoid recalculating it every time we need this information.
115    len: usize,
116
117    /// Length of the data contained in the frozen spans. The total `len` is this plus whatever
118    /// may be partially filled in the (logically) first span builder.
119    ///
120    /// We cache this to avoid recalculating it every time we need this information.
121    frozen: usize,
122
123    /// Available capacity that can accept additional data into it.
124    /// The total capacity is `len + available`.
125    ///
126    /// We cache this to avoid recalculating it every time we need this information.
127    available: usize,
128}
129
130impl BytesBuf {
131    /// Creates an instance without any memory capacity.
132    #[must_use]
133    pub fn new() -> Self {
134        Self::default()
135    }
136
137    /// Creates an instance that owns the provided memory blocks.
138    ///
139    /// This is the API used by memory providers to issue rented memory capacity to callers.
140    /// Unless you are implementing a memory provider, you will not need to call this function.
141    /// Instead, use either [`Memory::reserve()`] or [`BytesBuf::reserve()`].
142    ///
143    /// # Blocks are unordered
144    ///
145    /// There is no guarantee that the `BytesBuf` uses the blocks in the order provided to
146    /// this function. Blocks may be used in any order.
147    ///
148    /// [`Memory::reserve()`]: Memory::reserve
149    /// [`BytesBuf::reserve()`]: Self::reserve
150    #[must_use]
151    pub fn from_blocks<I>(blocks: I) -> Self
152    where
153        I: IntoIterator<Item = Block>,
154    {
155        Self::from_span_builders(blocks.into_iter().map(Block::into_span_builder))
156    }
157
158    pub(crate) fn from_span_builders<I>(span_builders: I) -> Self
159    where
160        I: IntoIterator<Item = SpanBuilder>,
161    {
162        let span_builders: SmallVec<[SpanBuilder; MAX_INLINE_SPANS]> = span_builders.into_iter().collect();
163
164        let available = span_builders.iter().map(SpanBuilder::remaining_capacity).sum();
165
166        Self {
167            frozen_spans: SmallVec::new_const(),
168            // We do not expect the order that we use the span builders to matter,
169            // so we do not reverse them here before storing.
170            span_builders_reversed: span_builders,
171            len: 0,
172            frozen: 0,
173            available,
174        }
175    }
176
177    /// Adds enough memory capacity to accommodate at least `additional_bytes` of content.
178    ///
179    /// After this call, [`remaining_capacity()`] will be at least `additional_bytes`.
180    ///
181    /// The memory provider may provide more capacity than requested - `additional_bytes` is only a lower bound.
182    ///
183    /// # Example
184    ///
185    /// ```
186    /// use bytesbuf::BytesBuf;
187    /// # use bytesbuf::mem::GlobalPool;
188    ///
189    /// # let memory = GlobalPool::new();
190    /// let mut buf = BytesBuf::new();
191    ///
192    /// // Must reserve capacity before writing.
193    /// buf.reserve(16, &memory);
194    /// assert!(buf.remaining_capacity() >= 16);
195    ///
196    /// buf.put_num_be(0x1234_5678_u32);
197    ///
198    /// // Can reserve more capacity at any time.
199    /// buf.reserve(100, &memory);
200    /// assert!(buf.remaining_capacity() >= 100);
201    /// ```
202    ///
203    /// # Panics
204    ///
205    /// Panics if the resulting total buffer capacity would be greater than `usize::MAX`.
206    ///
207    /// [`remaining_capacity()`]: Self::remaining_capacity
208    pub fn reserve(&mut self, additional_bytes: usize, memory_provider: &impl Memory) {
209        let bytes_needed = additional_bytes.saturating_sub(self.remaining_capacity());
210
211        let Some(bytes_needed) = NonZero::new(bytes_needed) else {
212            return;
213        };
214
215        self.extend_capacity_by_at_least(bytes_needed, memory_provider);
216    }
217
218    fn extend_capacity_by_at_least(&mut self, bytes: NonZero<usize>, memory_provider: &impl Memory) {
219        let additional_memory = memory_provider.reserve(bytes.get());
220
221        // For extra paranoia. We expect a memory provider to return an empty buffer.
222        debug_assert!(additional_memory.capacity() >= bytes.get());
223        debug_assert!(additional_memory.is_empty());
224
225        self.available = self
226            .available
227            .checked_add(additional_memory.capacity())
228            .expect("buffer capacity cannot exceed usize::MAX");
229
230        // We put the new ones in front (existing content needs to stay at the end).
231        self.span_builders_reversed.insert_many(0, additional_memory.span_builders_reversed);
232    }
233
234    /// Appends the contents of an existing [`BytesView`] to the end of the buffer.
235    ///
236    /// Memory capacity of the existing [`BytesView`] is reused without copying.
237    ///
238    /// This is a private API to keep the nitty-gritty of span bookkeeping contained in this file
239    /// while the public API lives in another file for ease of maintenance. The equivalent
240    /// public API is `put_bytes()`.
241    ///
242    /// # Panics
243    ///
244    /// Panics if the resulting total buffer capacity would be greater than `usize::MAX`.
245    pub(crate) fn append(&mut self, bytes: BytesView) {
246        if bytes.is_empty() {
247            return;
248        }
249
250        let bytes_len = bytes.len();
251
252        // Only the first span builder may hold unfrozen data (the rest are for spare capacity).
253        let total_unfrozen_bytes = NonZero::new(self.span_builders_reversed.last().map_or(0, SpanBuilder::len));
254
255        if let Some(total_unfrozen_bytes) = total_unfrozen_bytes {
256            // If there is any unfrozen data, we freeze it now to ensure we append after all
257            // existing data already in the sequence builder.
258            self.freeze_from_first(total_unfrozen_bytes);
259
260            // Debug build paranoia: nothing remains in the span builder, right?
261            debug_assert!(self.span_builders_reversed.last().map_or(0, SpanBuilder::len) == 0);
262        }
263
264        // We do this first so if we do panic, we have not performed any incomplete operations.
265        // The freezing above is safe even if we panic here - freezing is an atomic operation.
266        self.len = self.len.checked_add(bytes_len).expect("buffer capacity cannot exceed usize::MAX");
267
268        // Any appended BytesView is frozen by definition, as contents of a BytesView are immutable.
269        // This cannot wrap because we verified `len` is in-bounds and `frozen <= len` is a type invariant.
270        self.frozen = self.frozen.wrapping_add(bytes_len);
271
272        self.frozen_spans.extend(bytes.into_spans_reversed().into_iter().rev());
273    }
274
275    /// Peeks at the contents of the filled bytes region.
276    ///
277    /// The returned [`BytesView`] covers all data in the buffer but does not consume any of the data.
278    ///
279    /// Functionally similar to [`consume_all()`] except all the data remains in the
280    /// buffer and can still be consumed later.
281    ///
282    /// # Example
283    ///
284    /// ```
285    /// # let memory = bytesbuf::mem::GlobalPool::new();
286    /// use bytesbuf::mem::Memory;
287    ///
288    /// let mut buf = memory.reserve(16);
289    /// buf.put_num_be(0x1234_u16);
290    /// buf.put_num_be(0x5678_u16);
291    ///
292    /// // Peek at the data without consuming it.
293    /// let mut peeked = buf.peek();
294    /// assert_eq!(peeked.get_num_be::<u16>(), 0x1234);
295    /// assert_eq!(peeked.get_num_be::<u16>(), 0x5678);
296    ///
297    /// // Despite consuming from peeked, the buffer still contains all data.
298    /// assert_eq!(buf.len(), 4);
299    ///
300    /// let consumed = buf.consume_all();
301    /// assert_eq!(consumed.len(), 4);
302    /// ```
303    ///
304    /// [`consume_all()`]: Self::consume_all
305    #[must_use]
306    pub fn peek(&self) -> BytesView {
307        // Build a list of all spans to include in the result, in reverse order for efficient construction.
308        let mut result_spans_reversed: SmallVec<[Span; MAX_INLINE_SPANS]> = SmallVec::new();
309
310        // Add any filled data from the first (potentially partially filled) span builder.
311        if let Some(first_builder) = self.span_builders_reversed.last() {
312            // We only peek the span builder, as well. This is to avoid freezing it because freezing
313            // has security/performance implications and the motivating idea behind peeking is to
314            // verify the contents are ready for processing before we commit to freezing them.
315            let span = first_builder.peek();
316
317            // It might just be empty - that's also fine.
318            if !span.is_empty() {
319                result_spans_reversed.push(span);
320            }
321        }
322
323        // Add all the frozen spans. They are stored in content order in our storage,
324        // so we reverse them when adding to the result_spans_reversed collection.
325        result_spans_reversed.extend(self.frozen_spans.iter().rev().cloned());
326
327        BytesView::from_spans_reversed(result_spans_reversed)
328    }
329
330    /// How many bytes of data are in the buffer, ready to be consumed.
331    ///
332    /// # Example
333    ///
334    /// ```
335    /// # let memory = bytesbuf::mem::GlobalPool::new();
336    /// use bytesbuf::mem::Memory;
337    ///
338    /// let mut buf = memory.reserve(32);
339    /// assert_eq!(buf.len(), 0);
340    ///
341    /// buf.put_num_be(0x1234_5678_u32);
342    /// assert_eq!(buf.len(), 4);
343    ///
344    /// buf.put_slice(*b"Hello");
345    /// assert_eq!(buf.len(), 9);
346    ///
347    /// _ = buf.consume(4);
348    /// assert_eq!(buf.len(), 5);
349    /// ```
350    #[must_use]
351    #[cfg_attr(debug_assertions, expect(clippy::missing_panics_doc, reason = "only unreachable panics"))]
352    pub fn len(&self) -> usize {
353        #[cfg(debug_assertions)]
354        assert_eq!(self.len, self.calculate_len());
355
356        self.len
357    }
358
359    #[cfg(debug_assertions)]
360    fn calculate_len(&self) -> usize {
361        let frozen_len = self.frozen_spans.iter().map(|x| x.len() as usize).sum::<usize>();
362        let unfrozen_len = self.span_builders_reversed.last().map_or(0, SpanBuilder::len) as usize;
363
364        // Will not overflow - `capacity <= usize::MAX` is a type invariant and obviously `len < capacity`.
365        frozen_len.wrapping_add(unfrozen_len)
366    }
367
368    /// Whether the buffer is empty (contains no data).
369    ///
370    /// This does not imply that the buffer has no remaining memory capacity.
371    #[must_use]
372    pub fn is_empty(&self) -> bool {
373        self.len() == 0
374    }
375
376    /// The total capacity of the buffer.
377    ///
378    /// This is the total length of the filled bytes and the available bytes regions.
379    ///
380    /// # Example
381    ///
382    /// ```
383    /// use bytesbuf::BytesBuf;
384    /// # use bytesbuf::mem::GlobalPool;
385    ///
386    /// # let memory = GlobalPool::new();
387    /// let mut buf = BytesBuf::new();
388    /// assert_eq!(buf.capacity(), 0);
389    ///
390    /// buf.reserve(100, &memory);
391    /// let initial_capacity = buf.capacity();
392    /// assert!(initial_capacity >= 100);
393    ///
394    /// // Writing does not change capacity.
395    /// buf.put_slice(*b"Hello");
396    /// assert_eq!(buf.capacity(), initial_capacity);
397    ///
398    /// // Consuming reduces capacity (memory is transferred to the BytesView).
399    /// _ = buf.consume(5);
400    /// assert!(buf.capacity() < initial_capacity);
401    /// ```
402    #[must_use]
403    pub fn capacity(&self) -> usize {
404        // Will not overflow - `capacity <= usize::MAX` is a type invariant.
405        self.len().wrapping_add(self.remaining_capacity())
406    }
407
408    /// How many more bytes can be written into the buffer
409    /// before its memory capacity is exhausted.
410    ///
411    /// # Example
412    ///
413    /// ```
414    /// use bytesbuf::BytesBuf;
415    /// # use bytesbuf::mem::GlobalPool;
416    ///
417    /// # let memory = GlobalPool::new();
418    /// let mut buf = BytesBuf::new();
419    ///
420    /// buf.reserve(100, &memory);
421    /// let initial_remaining = buf.remaining_capacity();
422    /// assert!(initial_remaining >= 100);
423    ///
424    /// // Writing reduces remaining capacity.
425    /// buf.put_slice(*b"Hello");
426    /// assert_eq!(buf.remaining_capacity(), initial_remaining - 5);
427    ///
428    /// // Reserving more increases remaining capacity.
429    /// buf.reserve(200, &memory);
430    /// assert!(buf.remaining_capacity() >= 200);
431    ///
432    /// // Consuming buffered data does NOT affect remaining capacity.
433    /// let remaining_before_consume = buf.remaining_capacity();
434    /// _ = buf.consume(5);
435    /// assert_eq!(buf.remaining_capacity(), remaining_before_consume);
436    /// ```
437    #[cfg_attr(test, mutants::skip)] // Lying about buffer sizes is an easy way to infinite loops.
438    pub fn remaining_capacity(&self) -> usize {
439        // The remaining capacity is the sum of the remaining capacity of all span builders.
440        debug_assert_eq!(
441            self.available,
442            self.span_builders_reversed
443                .iter()
444                .map(SpanBuilder::remaining_capacity)
445                .sum::<usize>()
446        );
447
448        self.available
449    }
450
451    /// Consumes `len` bytes from the beginning of the buffer.
452    ///
453    /// The consumed bytes and the memory capacity that backs them are removed from the buffer.
454    ///
455    /// # Example
456    ///
457    /// ```
458    /// # let memory = bytesbuf::mem::GlobalPool::new();
459    /// use bytesbuf::mem::Memory;
460    ///
461    /// let mut buf = memory.reserve(32);
462    ///
463    /// buf.put_num_be(0x1111_u16);
464    /// buf.put_num_be(0x2222_u16);
465    ///
466    /// // Consume first part.
467    /// let mut first = buf.consume(2);
468    /// assert_eq!(first.get_num_be::<u16>(), 0x1111);
469    ///
470    /// // Write more data.
471    /// buf.put_num_be(0x3333_u16);
472    ///
473    /// // Consume remaining data.
474    /// let mut rest = buf.consume(4);
475    /// assert_eq!(rest.get_num_be::<u16>(), 0x2222);
476    /// assert_eq!(rest.get_num_be::<u16>(), 0x3333);
477    /// ```
478    ///
479    /// # Panics
480    ///
481    /// Panics if the buffer does not contain at least `len` bytes.
482    pub fn consume(&mut self, len: usize) -> BytesView {
483        self.consume_checked(len)
484            .expect("attempted to consume more bytes than available in buffer")
485    }
486
487    /// Consumes `len` bytes from the beginning of the buffer.
488    ///
489    /// Returns `None` if the buffer does not contain at least `len` bytes.
490    ///
491    /// The consumed bytes and the memory capacity that backs them are removed from the buffer.
492    #[expect(clippy::missing_panics_doc, reason = "only unreachable panics")]
493    pub fn consume_checked(&mut self, len: usize) -> Option<BytesView> {
494        if len > self.len() {
495            return None;
496        }
497
498        self.ensure_frozen(len);
499
500        let manifest = self.prepare_consume(len);
501
502        // We build the result spans collection up in storage order.
503        // The first piece of content goes last into the result spans.
504        let mut result_spans_reversed: SmallVec<[Span; MAX_INLINE_SPANS]> = SmallVec::with_capacity(manifest.required_spans_capacity());
505
506        // The content-order last span goes first into the result, so if we have a partial
507        // span, shove it in there first. The fully detached spans get processed together.
508        if manifest.consume_partial_span_bytes != 0 {
509            // We also need some bytes from the first frozen span that now remains
510            // but not the entire frozen span.
511            let partially_consumed_frozen_span = self
512                .frozen_spans
513                .get_mut(manifest.detach_complete_frozen_spans)
514                .expect("guarded by ensure_frozen()");
515
516            let take = partially_consumed_frozen_span.slice(0..manifest.consume_partial_span_bytes);
517            result_spans_reversed.push(take);
518
519            // SAFETY: We must guarantee that we do not try to advance out of bounds. This is guaranteed
520            // by the manifest calculation, the job of which is to determine the right in-bounds value.
521            unsafe { partially_consumed_frozen_span.advance(manifest.consume_partial_span_bytes as usize) };
522        }
523
524        // We extend the result spans with the (storage-order) fully detached spans.
525        // BytesBuf stores the frozen spans in content order, so we must reverse.
526        result_spans_reversed.extend(self.frozen_spans.drain(..manifest.detach_complete_frozen_spans).rev());
527
528        // Will not wrap because we verified bounds above.
529        self.len = self.len.wrapping_sub(len);
530
531        // Will not wrap because all consumed data must first have been frozen,
532        // which we guarantee via ensure_frozen() above.
533        self.frozen = self.frozen.wrapping_sub(len);
534
535        Some(BytesView::from_spans_reversed(result_spans_reversed))
536    }
537
538    fn prepare_consume(&self, mut len: usize) -> ConsumeManifest {
539        debug_assert!(len <= self.frozen);
540
541        let mut detach_complete_frozen_spans: usize = 0;
542
543        for span in &self.frozen_spans {
544            let span_len = span.len();
545
546            if span_len as usize <= len {
547                // Will not wrap because a type invariant is `capacity <= usize::MAX`, so if
548                // capacity is in-bounds, the number of spans could not possibly be greater.
549                detach_complete_frozen_spans = detach_complete_frozen_spans.wrapping_add(1);
550
551                len = len
552                    .checked_sub(span_len as usize)
553                    .expect("somehow ended up with negative bytes remaining - algorithm defect");
554
555                if len != 0 {
556                    // We will consume this whole span and need more - go to next one.
557                    continue;
558                }
559            }
560
561            // This span satisfied our needs, either in full or in part.
562            break;
563        }
564
565        ConsumeManifest {
566            detach_complete_frozen_spans,
567            // If any `len` was left, it was not a full span.
568            consume_partial_span_bytes: len.try_into().expect("we are supposed to have less than one memory block worth of data remaining but its length does not fit into a single memory block - algorithm defect"),
569        }
570    }
571
572    /// Consumes all bytes in the buffer.
573    ///
574    /// The consumed bytes and the memory capacity that backs them are removed from the buffer.
575    ///
576    /// # Example
577    ///
578    /// ```
579    /// # let memory = bytesbuf::mem::GlobalPool::new();
580    /// use bytesbuf::mem::Memory;
581    ///
582    /// let mut buf = memory.reserve(32);
583    /// buf.put_slice(*b"Hello, ");
584    /// buf.put_slice(*b"world!");
585    /// buf.put_num_be(0x2121_u16); // "!!"
586    ///
587    /// let message = buf.consume_all();
588    ///
589    /// assert_eq!(message, b"Hello, world!!!");
590    /// assert!(buf.is_empty());
591    /// ```
592    pub fn consume_all(&mut self) -> BytesView {
593        // SAFETY: Consuming len() bytes from self cannot possibly be out of bounds.
594        unsafe { self.consume_checked(self.len()).unwrap_unchecked() }
595    }
596
597    /// Consumes `len` bytes from the first span builder and moves it to the frozen spans list.
598    fn freeze_from_first(&mut self, len: NonZero<BlockSize>) {
599        let span_builder = self
600            .span_builders_reversed
601            .last_mut()
602            .expect("there must be at least one span builder for it to be possible to freeze bytes");
603
604        debug_assert!(len.get() <= span_builder.len());
605
606        let span = span_builder.consume(len);
607        self.frozen_spans.push(span);
608
609        if span_builder.remaining_capacity() == 0 {
610            // No more capacity left in this builder, so drop it.
611            self.span_builders_reversed.pop();
612        }
613
614        self.frozen = self
615            .frozen
616            .checked_add(len.get() as usize)
617            .expect("usize overflow should be impossible here because the sequence builder capacity would exceed virtual memory size");
618    }
619
620    /// Ensures that the frozen spans list contains at least `len` bytes of data, freezing
621    /// additional data from the span builders if necessary.
622    ///
623    /// # Panics
624    ///
625    /// Panics if there is not enough data in the span builders to fulfill the request.
626    fn ensure_frozen(&mut self, len: usize) {
627        let must_freeze_bytes: BlockSize = len
628            .saturating_sub(self.frozen)
629            .try_into()
630            .expect("requested to freeze more bytes from the first block than can actually fit into one block");
631
632        let Some(must_freeze_bytes) = NonZero::new(must_freeze_bytes) else {
633            return;
634        };
635
636        // We only need to freeze from the first span builder because a type invariant is that
637        // only the first span builder may contain data. The others are just spare capacity.
638        self.freeze_from_first(must_freeze_bytes);
639    }
640
641    /// The first slice of memory in the remaining capacity of the buffer.
642    ///
643    /// This allows you to manually write into the buffer instead of using the various
644    /// provided convenience methods. Only the first slice of the remaining capacity is
645    /// exposed at any given time by this API.
646    ///
647    /// After writing data to the start of this slice, call [`advance()`] to indicate
648    /// how many bytes have been filled with data. The next call to `first_unfilled_slice()`
649    /// will return the next slice of memory you can write into. This slice must be
650    /// completely filled before the next slice is exposed (a partial fill will simply
651    /// return the remaining range from the same slice in the next call).
652    ///
653    /// To write to multiple slices concurrently, use [`begin_vectored_write()`].
654    #[doc = include_str!("../doc/snippets/sequence_memory_layout.md")]
655    ///
656    /// # Example
657    ///
658    /// ```
659    /// # let memory = bytesbuf::mem::GlobalPool::new();
660    /// use bytesbuf::mem::Memory;
661    ///
662    /// let mut buf = memory.reserve(64);
663    /// let data_to_write: &[u8] = b"0123456789";
664    ///
665    /// // Write data without assuming the length of first_unfilled_slice().
666    /// let mut written = 0;
667    ///
668    /// while written < data_to_write.len() {
669    ///     let dst = buf.first_unfilled_slice();
670    ///
671    ///     let bytes_to_write = dst.len().min(data_to_write.len() - written);
672    ///
673    ///     for i in 0..bytes_to_write {
674    ///         dst[i].write(data_to_write[written + i]);
675    ///     }
676    ///
677    ///     // SAFETY: We just initialized `bytes_to_write` bytes.
678    ///     unsafe {
679    ///         buf.advance(bytes_to_write);
680    ///     }
681    ///     written += bytes_to_write;
682    /// }
683    ///
684    /// assert_eq!(buf.consume_all(), b"0123456789");
685    /// ```
686    ///
687    /// [`advance()`]: Self::advance
688    /// [`begin_vectored_write()`]: Self::begin_vectored_write
689    pub fn first_unfilled_slice(&mut self) -> &mut [MaybeUninit<u8>] {
690        if let Some(last) = self.span_builders_reversed.last_mut() {
691            last.unfilled_slice_mut()
692        } else {
693            // We are required to always return something, even if we have no span builders!
694            &mut []
695        }
696    }
697
698    /// Signals that `count` bytes have been written to the start of [`first_unfilled_slice()`].
699    ///
700    /// The next call to [`first_unfilled_slice()`] will return the next slice of memory that
701    /// can be filled with data.
702    ///
703    /// # Example
704    ///
705    /// ```
706    /// # let memory = bytesbuf::mem::GlobalPool::new();
707    /// use bytesbuf::mem::Memory;
708    ///
709    /// let mut buf = memory.reserve(64);
710    /// let data_to_write: &[u8] = b"0123456789";
711    ///
712    /// // Write data without assuming the length of first_unfilled_slice().
713    /// let mut written = 0;
714    ///
715    /// while written < data_to_write.len() {
716    ///     let dst = buf.first_unfilled_slice();
717    ///
718    ///     let bytes_to_write = dst.len().min(data_to_write.len() - written);
719    ///
720    ///     for i in 0..bytes_to_write {
721    ///         dst[i].write(data_to_write[written + i]);
722    ///     }
723    ///
724    ///     // SAFETY: We just initialized `bytes_to_write` bytes.
725    ///     unsafe {
726    ///         buf.advance(bytes_to_write);
727    ///     }
728    ///     written += bytes_to_write;
729    /// }
730    ///
731    /// assert_eq!(buf.consume_all(), b"0123456789");
732    /// ```
733    ///
734    /// # Safety
735    ///
736    /// The caller must guarantee that `count` bytes from the beginning of [`first_unfilled_slice()`]
737    /// have been initialized.
738    ///
739    /// [`first_unfilled_slice()`]: Self::first_unfilled_slice
740    #[expect(clippy::missing_panics_doc, reason = "only unreachable panics")]
741    pub unsafe fn advance(&mut self, count: usize) {
742        if count == 0 {
743            return;
744        }
745
746        // The write head can only be advanced (via this method) up to the end of the first slice, no further.
747        // This is guaranteed by our safety requirements, so we only assert this in debug builds for extra validation.
748        debug_assert!(count <= self.span_builders_reversed.last().map_or(0, SpanBuilder::remaining_capacity));
749
750        let span_builder = self
751            .span_builders_reversed
752            .last_mut()
753            .expect("there must be at least one span builder if we wrote nonzero bytes");
754
755        // SAFETY: We simply rely on the caller's safety promises here, "forwarding" them.
756        unsafe { span_builder.advance(count) };
757
758        if span_builder.remaining_capacity() == 0 {
759            // The span builder is full, so we need to freeze it and move it to the frozen spans.
760            let len = NonZero::new(span_builder.len())
761                .expect("there is no capacity left in the span builder so there must be at least one byte to consume unless we somehow left an empty span builder in the queue");
762
763            self.freeze_from_first(len);
764
765            // Debug build paranoia: no full span remains after freeze, right?
766            debug_assert!(
767                self.span_builders_reversed
768                    .last()
769                    .map_or(usize::MAX, SpanBuilder::remaining_capacity)
770                    > 0
771            );
772        }
773
774        self.len = self
775            .len
776            .checked_add(count)
777            .expect("usize overflow should be impossible here because the sequence builder capacity would exceed virtual memory size");
778
779        self.available = self
780            .available
781            .checked_sub(count)
782            .expect("guarded by assertion above - we must have at least this much capacity still available");
783    }
784
785    /// Concurrently writes data into all the byte slices that make up the buffer.
786    ///
787    /// The vectored write takes exclusive ownership of the buffer for the duration of the operation
788    /// and allows individual slices of the remaining capacity to be filled concurrently, up to an
789    /// optional limit of `max_len` bytes.
790    ///
791    /// Some I/O operations are naturally limited to a maximum number of bytes that can be
792    /// transferred, so the length limit here allows you to project a restricted view of the
793    /// available capacity without having to limit the true capacity of the buffer.
794    ///
795    /// # Example
796    ///
797    /// ```
798    /// # let memory = bytesbuf::mem::GlobalPool::new();
799    /// use std::ptr;
800    ///
801    /// use bytesbuf::mem::Memory;
802    ///
803    /// let mut buf = memory.reserve(64);
804    /// let capacity = buf.remaining_capacity();
805    ///
806    /// let mut vectored = buf.begin_vectored_write(None);
807    /// let mut slices: Vec<_> = vectored.iter_slices_mut().collect();
808    ///
809    /// // Fill all slices with 0xAE bytes.
810    /// // In practice, these could be filled concurrently by vectored I/O APIs.
811    /// let mut total_written = 0;
812    /// for slice in &mut slices {
813    ///     // SAFETY: Writing valid u8 values to the entire slice.
814    ///     unsafe {
815    ///         ptr::write_bytes(slice.as_mut_ptr(), 0xAE, slice.len());
816    ///     }
817    ///     total_written += slice.len();
818    /// }
819    ///
820    /// // SAFETY: We initialized `total_written` bytes sequentially.
821    /// unsafe {
822    ///     vectored.commit(total_written);
823    /// }
824    ///
825    /// assert_eq!(buf.len(), capacity);
826    /// ```
827    ///
828    /// # Panics
829    ///
830    /// Panics if `max_len` is greater than the remaining capacity of the buffer.
831    pub fn begin_vectored_write(&mut self, max_len: Option<usize>) -> BytesBufVectoredWrite<'_> {
832        self.begin_vectored_write_checked(max_len)
833            .expect("attempted to begin a vectored write with a max_len that was greater than the remaining capacity")
834    }
835
836    /// Concurrently writes data into all the byte slices that make up the buffer.
837    ///
838    /// The vectored write takes exclusive ownership of the buffer for the duration of the operation
839    /// and allows individual slices of the remaining capacity to be filled concurrently, up to an
840    /// optional limit of `max_len` bytes.
841    ///
842    /// Some I/O operations are naturally limited to a maximum number of bytes that can be
843    /// transferred, so the length limit here allows you to project a restricted view of the
844    /// available capacity without having to limit the true capacity of the buffer.
845    ///
846    /// # Returns
847    ///
848    /// Returns `None` if `max_len` is greater than the remaining capacity of the buffer.
849    pub fn begin_vectored_write_checked(&mut self, max_len: Option<usize>) -> Option<BytesBufVectoredWrite<'_>> {
850        if let Some(max_len) = max_len
851            && max_len > self.remaining_capacity()
852        {
853            return None;
854        }
855
856        Some(BytesBufVectoredWrite { buf: self, max_len })
857    }
858
859    fn iter_available_capacity(&mut self, max_len: Option<usize>) -> BytesBufRemaining<'_> {
860        let next_span_builder_index = if self.span_builders_reversed.is_empty() { None } else { Some(0) };
861
862        BytesBufRemaining {
863            buf: self,
864            next_span_builder_index,
865            max_len,
866        }
867    }
868
869    /// Extends the lifetime of the memory capacity backing this buffer.
870    ///
871    /// This can be useful when unsafe code is used to reference the contents of a `BytesBuf` and it
872    /// is possible to reach a condition where the `BytesBuf` itself no longer exists, even though
873    /// the contents are referenced (e.g. because the remaining references are in non-Rust code).
874    pub fn extend_lifetime(&self) -> MemoryGuard {
875        MemoryGuard::new(
876            self.span_builders_reversed
877                .iter()
878                .map(SpanBuilder::block)
879                .map(Clone::clone)
880                .chain(self.frozen_spans.iter().map(Span::block_ref).map(Clone::clone)),
881        )
882    }
883
884    /// Exposes the instance through the [`Write`][std::io::Write] trait.
885    ///
886    /// The memory capacity of the `BytesBuf` will be automatically extended on demand
887    /// with additional capacity from the supplied memory provider.
888    ///
889    /// # Example
890    ///
891    /// ```
892    /// # let memory = bytesbuf::mem::GlobalPool::new();
893    /// use std::io::Write;
894    ///
895    /// use bytesbuf::mem::Memory;
896    ///
897    /// let mut buf = memory.reserve(32);
898    /// {
899    ///     let mut writer = buf.as_write(&memory);
900    ///     writer.write_all(b"Hello, ")?;
901    ///     writer.write_all(b"world!")?;
902    /// }
903    ///
904    /// assert_eq!(buf.consume_all(), b"Hello, world!");
905    /// # Ok::<(), std::io::Error>(())
906    /// ```
907    #[inline]
908    pub fn as_write<M: Memory>(&mut self, memory: &M) -> impl std::io::Write {
909        BytesBufWrite::new(self, memory)
910    }
911}
912
913impl std::fmt::Debug for BytesBuf {
914    #[cfg_attr(test, mutants::skip)] // We have no API contract here.
915    #[cfg_attr(coverage_nightly, coverage(off))] // We have no API contract here.
916    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
917        let frozen_spans = self.frozen_spans.iter().map(|x| x.len().to_string()).collect::<Vec<_>>().join(", ");
918
919        let span_builders = self
920            .span_builders_reversed
921            .iter()
922            .rev()
923            .map(|x| {
924                if x.is_empty() {
925                    x.remaining_capacity().to_string()
926                } else {
927                    format!("{} + {}", x.len(), x.remaining_capacity())
928                }
929            })
930            .collect::<Vec<_>>()
931            .join(", ");
932
933        f.debug_struct(type_name::<Self>())
934            .field("len", &self.len)
935            .field("frozen", &self.frozen)
936            .field("available", &self.available)
937            .field("frozen_spans", &frozen_spans)
938            .field("span_builders", &span_builders)
939            .finish()
940    }
941}
942
943/// A prepared "consume bytes" operation, identifying what must be done to perform the operation.
944#[derive(Debug, Clone, Copy)]
945struct ConsumeManifest {
946    /// How many frozen spans are to be fully detached from the front of the collection.
947    detach_complete_frozen_spans: usize,
948
949    /// How many bytes of data to consume from the first remaining frozen span.
950    /// Any remainder is left within that span - the span itself is not detached.
951    consume_partial_span_bytes: BlockSize,
952}
953
954impl ConsumeManifest {
955    const fn required_spans_capacity(&self) -> usize {
956        if self.consume_partial_span_bytes != 0 {
957            // This will not wrap because a type invariant is `capacity <= usize::MAX`, so if
958            // capacity is already in-bounds, the count of spans certainly is not a greater number.
959            self.detach_complete_frozen_spans.wrapping_add(1)
960        } else {
961            self.detach_complete_frozen_spans
962        }
963    }
964}
965
966/// Coordinates concurrent write operations into a buffer's memory capacity.
967///
968/// The operation takes exclusive ownership of the `BytesBuf`. During the vectored write,
969/// the remaining capacity of the `BytesBuf` is exposed as `MaybeUninit<u8>` slices
970/// that at the end of the operation must be filled sequentially and in order, without gaps,
971/// in any desired amount (from 0 bytes written to all slices filled).
972///
973/// All slices may be written to concurrently and/or in any order - consistency of the contents
974/// is only required at the moment the write is committed.
975///
976/// The capacity exposed during the operation can optionally be limited to `max_len` bytes.
977///
978/// The operation is completed by calling `.commit()` on the instance, after which the operation is
979/// consumed and the exclusive ownership of the `BytesBuf` released.
980///
981/// If the instance is dropped without committing, the operation is aborted and all remaining capacity
982/// is left in a potentially uninitialized state.
983#[derive(Debug)]
984pub struct BytesBufVectoredWrite<'a> {
985    buf: &'a mut BytesBuf,
986    max_len: Option<usize>,
987}
988
989impl BytesBufVectoredWrite<'_> {
990    /// Iterates over the slices of available capacity of the buffer,
991    /// allowing them to be filled with data.
992    ///
993    /// The slices returned from this iterator have the lifetime of the vectored
994    /// write operation itself, allowing them to be mutated concurrently.
995    pub fn iter_slices_mut(&mut self) -> BytesBufRemaining<'_> {
996        self.buf.iter_available_capacity(self.max_len)
997    }
998
999    /// Extends the lifetime of the memory capacity backing this buffer.
1000    ///
1001    /// This can be useful when unsafe code is used to reference the contents of a `BytesBuf` and it
1002    /// is possible to reach a condition where the `BytesBuf` itself no longer exists, even though
1003    /// the contents are referenced (e.g. because the remaining references are in non-Rust code).
1004    pub fn extend_lifetime(&self) -> MemoryGuard {
1005        self.buf.extend_lifetime()
1006    }
1007
1008    /// Completes the vectored write operation, committing `bytes_written` bytes of data that
1009    /// sequentially and completely fills slices from the start of the provided slices.
1010    ///
1011    /// # Safety
1012    ///
1013    /// The caller must ensure that `bytes_written` bytes of data have actually been written
1014    /// into the slices of memory returned from `iter_slices_mut()`, sequentially from the start.
1015    #[expect(clippy::missing_panics_doc, reason = "only unreachable panics")]
1016    pub unsafe fn commit(self, bytes_written: usize) {
1017        debug_assert!(bytes_written <= self.buf.remaining_capacity());
1018
1019        if let Some(max_len) = self.max_len {
1020            debug_assert!(bytes_written <= max_len);
1021        }
1022
1023        // Ordinarily, we have a type invariant that only the first span builder may contain data,
1024        // with the others being spare capacity. For the duration of a vectored write, this
1025        // invariant is suspended (because the vectored write has an exclusive reference which makes
1026        // the suspension of this invariant invisible to any other caller). We must now restore this
1027        // invariant. We do this by advancing the write head slice by slice, triggering the normal
1028        // freezing logic as we go (to avoid implementing two versions of the same logic), until we
1029        // have run out of written bytes to commit.
1030
1031        let mut bytes_remaining = bytes_written;
1032
1033        while bytes_remaining > 0 {
1034            let span_builder = self
1035                .buf
1036                .span_builders_reversed
1037                .last_mut()
1038                .expect("there must be at least one span builder because we still have filled capacity remaining to freeze");
1039
1040            let bytes_available = span_builder.remaining_capacity();
1041            let bytes_to_commit = bytes_available.min(bytes_remaining);
1042
1043            // SAFETY: We forward the promise from our own safety requirements to guarantee that
1044            // the specified number of bytes really has been written.
1045            unsafe { self.buf.advance(bytes_to_commit) };
1046
1047            bytes_remaining = bytes_remaining
1048                .checked_sub(bytes_to_commit)
1049                .expect("we somehow advanced the write head more than the count of written bytes");
1050        }
1051    }
1052}
1053
1054/// Exposes the remaining memory capacity of a `BytesBuf` for concurrent writes.
1055///
1056/// This is used during a vectored write operation, iterating over a sequence
1057/// of `MaybeUninit<u8>` slices that the caller can concurrently write into.
1058///
1059/// The slices may be mutated for as long as the vectored write operation exists.
1060#[derive(Debug)]
1061pub struct BytesBufRemaining<'a> {
1062    buf: &'a mut BytesBuf,
1063    next_span_builder_index: Option<usize>,
1064
1065    // Self-imposed constraint on how much of the available capacity is made visible through
1066    // this iterator. This can be useful to limit the amount of data that can be written into
1067    // a `BytesBuf` during a vectored write operation without having to limit the
1068    // actual capacity of the `BytesBuf`.
1069    max_len: Option<usize>,
1070}
1071
1072impl<'a> Iterator for BytesBufRemaining<'a> {
1073    type Item = &'a mut [MaybeUninit<u8>];
1074
1075    #[cfg_attr(test, mutants::skip)] // This gets mutated into an infinite loop which is not very helpful.
1076    fn next(&mut self) -> Option<Self::Item> {
1077        let next_span_builder_index = self.next_span_builder_index?;
1078
1079        self.next_span_builder_index = Some(
1080            // Will not overflow because `capacity <= usize::MAX` is a type invariant,
1081            // so the count of span builders certainly cannot be greater.
1082            next_span_builder_index.wrapping_add(1),
1083        );
1084        if self.next_span_builder_index == Some(self.buf.span_builders_reversed.len()) {
1085            self.next_span_builder_index = None;
1086        }
1087
1088        // The iterator iterates through things in content order but we need to access
1089        // the span builders in storage order.
1090        let next_span_builder_index_storage_order = self
1091            .buf
1092            .span_builders_reversed
1093            .len()
1094            // Will not overflow because `capacity <= usize::MAX` is a type invariant,
1095            // so the count of span builders certainly cannot be greater.
1096            .wrapping_sub(next_span_builder_index + 1);
1097
1098        let span_builder = self
1099            .buf
1100            .span_builders_reversed
1101            .get_mut(next_span_builder_index_storage_order)
1102            .expect("iterator cursor referenced a span builder that does not exist");
1103
1104        let uninit_slice_mut = span_builder.unfilled_slice_mut();
1105
1106        // SAFETY: There is nothing Rust can do to promise the reference we return is valid for 'a
1107        // but we can make such a promise ourselves. In essence, returning the references with 'a
1108        // this will extend the exclusive ownership of `BytesBuf` until all returned chunk
1109        // references are dropped, even if the iterator itself is dropped earlier. We can do this
1110        // because we know that to access the chunks requires a reference to the `BytesBuf`,
1111        // so as long as a chunk reference exists, access via the `BytesBuf` is blocked.
1112        let uninit_slice_mut = unsafe { mem::transmute::<&mut [MaybeUninit<u8>], &'a mut [MaybeUninit<u8>]>(&mut *uninit_slice_mut) };
1113
1114        let uninit_slice_mut = if let Some(max_len) = self.max_len {
1115            // Limit the visible range of the slice if we have a size limit.
1116            // If this results in the slice being limited to not its full size,
1117            // we will also terminate the iteration
1118            let constrained_len = uninit_slice_mut.len().min(max_len);
1119
1120            let adjusted_slice = uninit_slice_mut.get_mut(..constrained_len).expect("guarded by min() above");
1121
1122            // Will not wrap because it is guarded by min() above.
1123            self.max_len = Some(max_len.wrapping_sub(constrained_len));
1124
1125            if self.max_len == Some(0) {
1126                // Even if there are more span builders, we have returned all the capacity
1127                // we are allowed to return, so pretend there is nothing more to return.
1128                self.next_span_builder_index = None;
1129            }
1130
1131            adjusted_slice
1132        } else {
1133            uninit_slice_mut
1134        };
1135
1136        Some(uninit_slice_mut)
1137    }
1138}
1139
1140impl From<BytesView> for BytesBuf {
1141    fn from(value: BytesView) -> Self {
1142        let mut buf = Self::new();
1143        buf.append(value);
1144        buf
1145    }
1146}
1147
1148#[cfg_attr(coverage_nightly, coverage(off))]
1149#[cfg(test)]
1150mod tests {
1151    #![allow(clippy::indexing_slicing, reason = "Fine in test code, we prefer panic on error")]
1152
1153    use std::pin::pin;
1154
1155    use new_zealand::nz;
1156    use static_assertions::assert_impl_all;
1157    use testing_aids::assert_panic;
1158
1159    use super::*;
1160    use crate::mem::GlobalPool;
1161    use crate::mem::testing::{FixedBlockMemory, TestMemoryBlock};
1162
1163    const U64_SIZE: usize = size_of::<u64>();
1164    const TWO_U64_SIZE: usize = size_of::<u64>() + size_of::<u64>();
1165    const THREE_U64_SIZE: usize = size_of::<u64>() + size_of::<u64>() + size_of::<u64>();
1166
1167    assert_impl_all!(BytesBuf: Send, Sync);
1168
1169    #[test]
1170    fn smoke_test() {
1171        let memory = FixedBlockMemory::new(nz!(1234));
1172
1173        let min_length = 1000;
1174
1175        let mut buf = memory.reserve(min_length);
1176
1177        assert!(buf.capacity() >= min_length);
1178        assert!(buf.remaining_capacity() >= min_length);
1179        assert_eq!(buf.capacity(), buf.remaining_capacity());
1180        assert_eq!(buf.len(), 0);
1181        assert!(buf.is_empty());
1182
1183        buf.put_num_ne(1234_u64);
1184        buf.put_num_ne(5678_u64);
1185        buf.put_num_ne(1234_u64);
1186        buf.put_num_ne(5678_u64);
1187
1188        assert_eq!(buf.len(), 32);
1189        assert!(!buf.is_empty());
1190
1191        // SAFETY: Writing 0 bytes is always valid.
1192        unsafe {
1193            buf.advance(0);
1194        }
1195
1196        let mut first_two = buf.consume(TWO_U64_SIZE);
1197        let mut second_two = buf.consume(TWO_U64_SIZE);
1198
1199        assert_eq!(first_two.len(), 16);
1200        assert_eq!(second_two.len(), 16);
1201        assert_eq!(buf.len(), 0);
1202
1203        assert_eq!(first_two.get_num_ne::<u64>(), 1234);
1204        assert_eq!(first_two.get_num_ne::<u64>(), 5678);
1205
1206        assert_eq!(second_two.get_num_ne::<u64>(), 1234);
1207        assert_eq!(second_two.get_num_ne::<u64>(), 5678);
1208
1209        buf.put_num_ne(1111_u64);
1210
1211        assert_eq!(buf.len(), 8);
1212
1213        let mut last = buf.consume(U64_SIZE);
1214
1215        assert_eq!(last.len(), 8);
1216        assert_eq!(buf.len(), 0);
1217
1218        assert_eq!(last.get_num_ne::<u64>(), 1111);
1219
1220        assert!(buf.consume_checked(1).is_none());
1221        assert!(buf.consume_all().is_empty());
1222    }
1223
1224    #[test]
1225    fn extend_capacity() {
1226        let mut buf = BytesBuf::new();
1227
1228        assert_eq!(buf.capacity(), 0);
1229        assert_eq!(buf.remaining_capacity(), 0);
1230
1231        let memory = FixedBlockMemory::new(nz!(100));
1232
1233        // Have 0, desired 10, requesting 10, will get 100.
1234        buf.reserve(10, &memory);
1235
1236        assert_eq!(buf.capacity(), 100);
1237        assert_eq!(buf.remaining_capacity(), 100);
1238
1239        // Write 10 bytes of data just to verify that it does not affect "capacity" logic.
1240        buf.put_num_ne(1234_u64);
1241        buf.put_num_ne(5678_u16);
1242
1243        assert_eq!(buf.len(), 10);
1244        assert_eq!(buf.remaining_capacity(), 90);
1245        assert_eq!(buf.capacity(), 100);
1246
1247        // Have 100, desired 10+140=150, requesting 50, will get another 100 for a total of 200.
1248        buf.reserve(140, &memory);
1249
1250        assert_eq!(buf.len(), 10);
1251        assert_eq!(buf.remaining_capacity(), 190);
1252        assert_eq!(buf.capacity(), 200);
1253
1254        // Have 200, desired 10+200=210, 210-200=10, will get another 100.
1255        buf.reserve(200, &memory);
1256
1257        assert_eq!(buf.len(), 10);
1258        assert_eq!(buf.remaining_capacity(), 290);
1259        assert_eq!(buf.capacity(), 300);
1260    }
1261
1262    #[test]
1263    fn append_existing_view() {
1264        let memory = FixedBlockMemory::new(nz!(1234));
1265
1266        let min_length = 1000;
1267
1268        // This one we use to prepare some data to append.
1269        let mut payload_buffer = memory.reserve(min_length);
1270
1271        // This is where we append the data to.
1272        let mut target_buffer = memory.reserve(min_length);
1273
1274        // First we make a couple pieces to append.
1275        payload_buffer.put_num_ne(1111_u64);
1276        payload_buffer.put_num_ne(2222_u64);
1277        payload_buffer.put_num_ne(3333_u64);
1278        payload_buffer.put_num_ne(4444_u64);
1279
1280        let payload1 = payload_buffer.consume(TWO_U64_SIZE);
1281        let payload2 = payload_buffer.consume(TWO_U64_SIZE);
1282
1283        // Then we prefill some data to start us off.
1284        target_buffer.put_num_ne(5555_u64);
1285        target_buffer.put_num_ne(6666_u64);
1286
1287        // Consume a little just for extra complexity.
1288        let _ = target_buffer.consume(U64_SIZE);
1289
1290        // Append the payloads.
1291        target_buffer.put_bytes(payload1);
1292        target_buffer.put_bytes(payload2);
1293
1294        // Appending an empty byte sequence does nothing.
1295        target_buffer.put_bytes(BytesView::default());
1296
1297        // Add some custom data at the end.
1298        target_buffer.put_num_ne(7777_u64);
1299
1300        assert_eq!(target_buffer.len(), 48);
1301
1302        let mut result = target_buffer.consume(48);
1303
1304        assert_eq!(result.get_num_ne::<u64>(), 6666);
1305        assert_eq!(result.get_num_ne::<u64>(), 1111);
1306        assert_eq!(result.get_num_ne::<u64>(), 2222);
1307        assert_eq!(result.get_num_ne::<u64>(), 3333);
1308        assert_eq!(result.get_num_ne::<u64>(), 4444);
1309        assert_eq!(result.get_num_ne::<u64>(), 7777);
1310    }
1311
1312    #[test]
1313    fn consume_all_mixed() {
1314        let mut buf = BytesBuf::new();
1315        let memory = FixedBlockMemory::new(nz!(8));
1316
1317        // Reserve some capacity and add initial data.
1318        buf.reserve(16, &memory);
1319        buf.put_num_ne(1111_u64);
1320        buf.put_num_ne(2222_u64);
1321
1322        // Consume some data (the 1111).
1323        let _ = buf.consume(8);
1324
1325        // Append a sequence (the 3333).
1326        let mut append_buf = BytesBuf::new();
1327        append_buf.reserve(8, &memory);
1328        append_buf.put_num_ne(3333_u64);
1329        let reused_bytes_to_append = append_buf.consume_all();
1330        buf.append(reused_bytes_to_append);
1331
1332        // Add more data (the 4444).
1333        buf.reserve(8, &memory);
1334        buf.put_num_ne(4444_u64);
1335
1336        // Consume all data and validate we got all the pieces.
1337        let mut result = buf.consume_all();
1338
1339        assert_eq!(result.len(), 24);
1340        assert_eq!(result.get_num_ne::<u64>(), 2222);
1341        assert_eq!(result.get_num_ne::<u64>(), 3333);
1342        assert_eq!(result.get_num_ne::<u64>(), 4444);
1343    }
1344
1345    #[test]
1346    #[expect(clippy::cognitive_complexity, reason = "test code")]
1347    fn peek_basic() {
1348        let mut buf = BytesBuf::new();
1349
1350        assert_eq!(buf.capacity(), 0);
1351        assert_eq!(buf.remaining_capacity(), 0);
1352
1353        let memory = FixedBlockMemory::new(nz!(10));
1354
1355        // Peeking an empty buffer is fine, it is just an empty BytesView in that case.
1356        let peeked = buf.peek();
1357        assert_eq!(peeked.len(), 0);
1358
1359        buf.reserve(100, &memory);
1360
1361        assert_eq!(buf.capacity(), 100);
1362
1363        buf.put_num_ne(1111_u64);
1364
1365        // We have 0 frozen spans and 10 span builders,
1366        // the first of which has 8 bytes of filled content.
1367        let mut peeked = buf.peek();
1368        assert_eq!(peeked.first_slice().len(), 8);
1369        assert_eq!(peeked.get_num_ne::<u64>(), 1111);
1370        assert_eq!(peeked.len(), 0);
1371
1372        buf.put_num_ne(2222_u64);
1373        buf.put_num_ne(3333_u64);
1374        buf.put_num_ne(4444_u64);
1375        buf.put_num_ne(5555_u64);
1376        buf.put_num_ne(6666_u64);
1377        buf.put_num_ne(7777_u64);
1378        buf.put_num_ne(8888_u64);
1379        // These will cross a span boundary so we can also observe
1380        // crossing that boundary during peeking.
1381        buf.put_byte_repeated(9, 8);
1382
1383        assert_eq!(buf.len(), 72);
1384        assert_eq!(buf.capacity(), 100);
1385        assert_eq!(buf.remaining_capacity(), 28);
1386
1387        // We should have 7 frozen spans and 3 span builders,
1388        // the first of which has 2 bytes of filled content.
1389        let mut peeked = buf.peek();
1390
1391        assert_eq!(peeked.len(), 72);
1392
1393        // This should be the first frozen span of 10 bytes.
1394        assert_eq!(peeked.first_slice().len(), 10);
1395
1396        assert_eq!(peeked.get_num_ne::<u64>(), 1111);
1397        assert_eq!(peeked.get_num_ne::<u64>(), 2222);
1398
1399        // The length of the buffer does not change just because we peek at its data.
1400        assert_eq!(buf.len(), 72);
1401
1402        // We consumed 16 bytes from the peeked view, so should be looking at the remaining 4 bytes in the 2nd span.
1403        assert_eq!(peeked.first_slice().len(), 4);
1404
1405        assert_eq!(peeked.get_num_ne::<u64>(), 3333);
1406        assert_eq!(peeked.get_num_ne::<u64>(), 4444);
1407        assert_eq!(peeked.get_num_ne::<u64>(), 5555);
1408        assert_eq!(peeked.get_num_ne::<u64>(), 6666);
1409        assert_eq!(peeked.get_num_ne::<u64>(), 7777);
1410        assert_eq!(peeked.get_num_ne::<u64>(), 8888);
1411
1412        for _ in 0..8 {
1413            assert_eq!(peeked.get_byte(), 9);
1414        }
1415
1416        assert_eq!(peeked.len(), 0);
1417        assert_eq!(peeked.first_slice().len(), 0);
1418
1419        // Fill up the remaining 28 bytes of data so we have a full sequence builder.
1420        buf.put_byte_repeated(88, 28);
1421
1422        let mut peeked = buf.peek();
1423        peeked.advance(72);
1424
1425        assert_eq!(peeked.len(), 28);
1426
1427        for _ in 0..28 {
1428            assert_eq!(peeked.get_byte(), 88);
1429        }
1430    }
1431
1432    #[test]
1433    fn consume_part_of_frozen_span() {
1434        let mut buf = BytesBuf::new();
1435
1436        assert_eq!(buf.capacity(), 0);
1437        assert_eq!(buf.remaining_capacity(), 0);
1438
1439        let memory = FixedBlockMemory::new(nz!(10));
1440
1441        buf.reserve(100, &memory);
1442
1443        assert_eq!(buf.capacity(), 100);
1444
1445        buf.put_num_ne(1111_u64);
1446        // This freezes the first span of 10, as we filled it all up.
1447        buf.put_num_ne(2222_u64);
1448
1449        let mut first8 = buf.consume(U64_SIZE);
1450        assert_eq!(first8.get_num_ne::<u64>(), 1111);
1451        assert!(first8.is_empty());
1452
1453        buf.put_num_ne(3333_u64);
1454
1455        let mut second16 = buf.consume(16);
1456        assert_eq!(second16.get_num_ne::<u64>(), 2222);
1457        assert_eq!(second16.get_num_ne::<u64>(), 3333);
1458        assert!(second16.is_empty());
1459    }
1460
1461    #[test]
1462    fn empty_buffer() {
1463        let mut buf = BytesBuf::new();
1464        assert!(buf.is_empty());
1465        assert!(buf.peek().is_empty());
1466        assert_eq!(0, buf.first_unfilled_slice().len());
1467
1468        let consumed = buf.consume(0);
1469        assert!(consumed.is_empty());
1470
1471        let consumed = buf.consume_all();
1472        assert!(consumed.is_empty());
1473    }
1474
1475    #[test]
1476    fn iter_available_empty_with_capacity() {
1477        let mut buf = BytesBuf::new();
1478
1479        assert_eq!(buf.capacity(), 0);
1480        assert_eq!(buf.remaining_capacity(), 0);
1481
1482        let memory = FixedBlockMemory::new(nz!(100));
1483
1484        // Capacity: 0 -> 1000 (10x100)
1485        buf.reserve(1000, &memory);
1486
1487        assert_eq!(buf.capacity(), 1000);
1488        assert_eq!(buf.remaining_capacity(), 1000);
1489
1490        let iter = buf.iter_available_capacity(None);
1491
1492        // Demonstrating that we can access slices concurrently, not only one by one.
1493        let slices = iter.collect::<Vec<_>>();
1494
1495        assert_eq!(slices.len(), 10);
1496
1497        for slice in slices {
1498            assert_eq!(slice.len(), 100);
1499        }
1500
1501        // After we have dropped all slice references, it is again legal to access the buffer.
1502        // This is blocked by the borrow checker while slice references still exist.
1503        buf.reserve(100, &memory);
1504    }
1505
1506    #[test]
1507    fn iter_available_nonempty() {
1508        let mut buf = BytesBuf::new();
1509
1510        assert_eq!(buf.capacity(), 0);
1511        assert_eq!(buf.remaining_capacity(), 0);
1512
1513        let memory = FixedBlockMemory::new(nz!(8));
1514
1515        // Capacity: 0 -> 16 (2x8)
1516        buf.reserve(TWO_U64_SIZE, &memory);
1517
1518        assert_eq!(buf.capacity(), 16);
1519        assert_eq!(buf.remaining_capacity(), 16);
1520
1521        // We write an u64 - this fills half the capacity and should result in
1522        // the first span builder being frozen and the second remaining in its entirety.
1523        buf.put_num_ne(1234_u64);
1524
1525        assert_eq!(buf.len(), 8);
1526        assert_eq!(buf.remaining_capacity(), 8);
1527
1528        let available_slices = buf.iter_available_capacity(None).collect::<Vec<_>>();
1529        assert_eq!(available_slices.len(), 1);
1530        assert_eq!(available_slices[0].len(), 8);
1531
1532        // We write a u32 - this fills half the remaining capacity, which results
1533        // in a half-filled span builder remaining in the buffer.
1534        buf.put_num_ne(5678_u32);
1535
1536        assert_eq!(buf.len(), 12);
1537        assert_eq!(buf.remaining_capacity(), 4);
1538
1539        let available_slices = buf.iter_available_capacity(None).collect::<Vec<_>>();
1540        assert_eq!(available_slices.len(), 1);
1541        assert_eq!(available_slices[0].len(), 4);
1542
1543        // We write a final u32 to use up all the capacity.
1544        buf.put_num_ne(9012_u32);
1545
1546        assert_eq!(buf.len(), 16);
1547        assert_eq!(buf.remaining_capacity(), 0);
1548
1549        assert_eq!(buf.iter_available_capacity(None).count(), 0);
1550    }
1551
1552    #[test]
1553    fn iter_available_empty_no_capacity() {
1554        let mut buf = BytesBuf::new();
1555
1556        assert_eq!(buf.capacity(), 0);
1557        assert_eq!(buf.remaining_capacity(), 0);
1558        assert_eq!(buf.iter_available_capacity(None).count(), 0);
1559    }
1560
1561    #[test]
1562    fn vectored_write_zero() {
1563        let mut buf = BytesBuf::new();
1564
1565        assert_eq!(buf.capacity(), 0);
1566        assert_eq!(buf.remaining_capacity(), 0);
1567
1568        let memory = FixedBlockMemory::new(nz!(8));
1569
1570        // Capacity: 0 -> 16 (2x8)
1571        buf.reserve(TWO_U64_SIZE, &memory);
1572
1573        assert_eq!(buf.capacity(), 16);
1574        assert_eq!(buf.remaining_capacity(), 16);
1575
1576        let vectored_write = buf.begin_vectored_write(None);
1577
1578        // SAFETY: Yes, we really wrote 0 bytes.
1579        unsafe {
1580            vectored_write.commit(0);
1581        }
1582
1583        assert_eq!(buf.capacity(), 16);
1584        assert_eq!(buf.remaining_capacity(), 16);
1585    }
1586
1587    #[test]
1588    fn vectored_write_one_slice() {
1589        let mut buf = BytesBuf::new();
1590
1591        assert_eq!(buf.capacity(), 0);
1592        assert_eq!(buf.remaining_capacity(), 0);
1593
1594        let memory = FixedBlockMemory::new(nz!(8));
1595
1596        // Capacity: 0 -> 8 (1x8)
1597        buf.reserve(U64_SIZE, &memory);
1598
1599        assert_eq!(buf.capacity(), 8);
1600        assert_eq!(buf.remaining_capacity(), 8);
1601
1602        let mut vectored_write = buf.begin_vectored_write(None);
1603
1604        let mut slices = vectored_write.iter_slices_mut().collect::<Vec<_>>();
1605        assert_eq!(slices.len(), 1);
1606        assert_eq!(slices[0].len(), 8);
1607
1608        write_copy_of_slice(slices[0], &0x3333_3333_3333_3333_u64.to_ne_bytes());
1609
1610        // SAFETY: Yes, we really wrote 8 bytes.
1611        unsafe {
1612            vectored_write.commit(8);
1613        }
1614
1615        assert_eq!(buf.len(), 8);
1616        assert_eq!(buf.remaining_capacity(), 0);
1617        assert_eq!(buf.capacity(), 8);
1618
1619        let mut result = buf.consume(U64_SIZE);
1620        assert_eq!(result.get_num_ne::<u64>(), 0x3333_3333_3333_3333);
1621    }
1622
1623    #[test]
1624    fn vectored_write_multiple_slices() {
1625        let mut buf = BytesBuf::new();
1626
1627        assert_eq!(buf.capacity(), 0);
1628        assert_eq!(buf.remaining_capacity(), 0);
1629
1630        let memory = FixedBlockMemory::new(nz!(8));
1631
1632        // Capacity: 0 -> 24 (3x8)
1633        buf.reserve(THREE_U64_SIZE, &memory);
1634
1635        assert_eq!(buf.capacity(), 24);
1636        assert_eq!(buf.remaining_capacity(), 24);
1637
1638        let mut vectored_write = buf.begin_vectored_write(None);
1639
1640        let mut slices = vectored_write.iter_slices_mut().collect::<Vec<_>>();
1641        assert_eq!(slices.len(), 3);
1642        assert_eq!(slices[0].len(), 8);
1643        assert_eq!(slices[1].len(), 8);
1644        assert_eq!(slices[2].len(), 8);
1645
1646        // We fill 12 bytes, leaving middle chunk split in half between filled/available.
1647
1648        write_copy_of_slice(slices[0], &0x3333_3333_3333_3333_u64.to_ne_bytes());
1649        write_copy_of_slice(slices[1], &0x4444_4444_u32.to_ne_bytes());
1650
1651        // SAFETY: Yes, we really wrote 12 bytes.
1652        unsafe {
1653            vectored_write.commit(12);
1654        }
1655
1656        assert_eq!(buf.len(), 12);
1657        assert_eq!(buf.remaining_capacity(), 12);
1658        assert_eq!(buf.capacity(), 24);
1659
1660        let mut vectored_write = buf.begin_vectored_write(None);
1661
1662        let mut slices = vectored_write.iter_slices_mut().collect::<Vec<_>>();
1663        assert_eq!(slices.len(), 2);
1664        assert_eq!(slices[0].len(), 4);
1665        assert_eq!(slices[1].len(), 8);
1666
1667        // We fill the remaining 12 bytes.
1668
1669        write_copy_of_slice(slices[0], &0x5555_5555_u32.to_ne_bytes());
1670        write_copy_of_slice(slices[1], &0x6666_6666_6666_6666_u64.to_ne_bytes());
1671
1672        // SAFETY: Yes, we really wrote 12 bytes.
1673        unsafe {
1674            vectored_write.commit(12);
1675        }
1676
1677        assert_eq!(buf.len(), 24);
1678        assert_eq!(buf.remaining_capacity(), 0);
1679        assert_eq!(buf.capacity(), 24);
1680
1681        let mut result = buf.consume(THREE_U64_SIZE);
1682        assert_eq!(result.get_num_ne::<u64>(), 0x3333_3333_3333_3333);
1683        assert_eq!(result.get_num_ne::<u32>(), 0x4444_4444);
1684        assert_eq!(result.get_num_ne::<u32>(), 0x5555_5555);
1685        assert_eq!(result.get_num_ne::<u64>(), 0x6666_6666_6666_6666);
1686    }
1687
1688    #[test]
1689    fn vectored_write_max_len() {
1690        let mut buf = BytesBuf::new();
1691
1692        assert_eq!(buf.capacity(), 0);
1693        assert_eq!(buf.remaining_capacity(), 0);
1694
1695        let memory = FixedBlockMemory::new(nz!(8));
1696
1697        // Capacity: 0 -> 24 (3x8)
1698        buf.reserve(THREE_U64_SIZE, &memory);
1699
1700        assert_eq!(buf.capacity(), 24);
1701        assert_eq!(buf.remaining_capacity(), 24);
1702
1703        // We limit to 13 bytes of visible capacity, of which we will fill 12.
1704        let mut vectored_write = buf.begin_vectored_write(Some(13));
1705
1706        let mut slices = vectored_write.iter_slices_mut().collect::<Vec<_>>();
1707        assert_eq!(slices.len(), 2);
1708        assert_eq!(slices[0].len(), 8);
1709        assert_eq!(slices[1].len(), 5);
1710
1711        // We fill 12 bytes, leaving middle chunk split in half between filled/available.
1712
1713        write_copy_of_slice(slices[0], &0x3333_3333_3333_3333_u64.to_ne_bytes());
1714        write_copy_of_slice(slices[1], &0x4444_4444_u32.to_ne_bytes());
1715
1716        // SAFETY: Yes, we really wrote 12 bytes.
1717        unsafe {
1718            vectored_write.commit(12);
1719        }
1720
1721        assert_eq!(buf.len(), 12);
1722        assert_eq!(buf.remaining_capacity(), 12);
1723        assert_eq!(buf.capacity(), 24);
1724
1725        // There are 12 remaining and we set max_limit to exactly cover those 12
1726        let mut vectored_write = buf.begin_vectored_write(Some(12));
1727
1728        let mut slices = vectored_write.iter_slices_mut().collect::<Vec<_>>();
1729        assert_eq!(slices.len(), 2);
1730        assert_eq!(slices[0].len(), 4);
1731        assert_eq!(slices[1].len(), 8);
1732
1733        write_copy_of_slice(slices[0], &0x5555_5555_u32.to_ne_bytes());
1734        write_copy_of_slice(slices[1], &0x6666_6666_6666_6666_u64.to_ne_bytes());
1735
1736        // SAFETY: Yes, we really wrote 12 bytes.
1737        unsafe {
1738            vectored_write.commit(12);
1739        }
1740
1741        assert_eq!(buf.len(), 24);
1742        assert_eq!(buf.remaining_capacity(), 0);
1743        assert_eq!(buf.capacity(), 24);
1744
1745        let mut result = buf.consume(THREE_U64_SIZE);
1746        assert_eq!(result.get_num_ne::<u64>(), 0x3333_3333_3333_3333);
1747        assert_eq!(result.get_num_ne::<u32>(), 0x4444_4444);
1748        assert_eq!(result.get_num_ne::<u32>(), 0x5555_5555);
1749        assert_eq!(result.get_num_ne::<u64>(), 0x6666_6666_6666_6666);
1750    }
1751
1752    #[test]
1753    fn vectored_write_max_len_overflow() {
1754        let mut buf = BytesBuf::new();
1755
1756        let memory = FixedBlockMemory::new(nz!(8));
1757
1758        // Capacity: 0 -> 24 (3x8)
1759        buf.reserve(THREE_U64_SIZE, &memory);
1760
1761        assert_eq!(buf.capacity(), 24);
1762        assert_eq!(buf.remaining_capacity(), 24);
1763
1764        // We ask for 25 bytes of capacity but there are only 24 available. Oops!
1765        assert_panic!(buf.begin_vectored_write(Some(25)));
1766    }
1767
1768    #[test]
1769    fn vectored_write_overcommit() {
1770        let mut buf = BytesBuf::new();
1771
1772        assert_eq!(buf.capacity(), 0);
1773        assert_eq!(buf.remaining_capacity(), 0);
1774
1775        let memory = FixedBlockMemory::new(nz!(8));
1776
1777        // Capacity: 0 -> 16 (2x8)
1778        buf.reserve(TWO_U64_SIZE, &memory);
1779
1780        assert_eq!(buf.capacity(), 16);
1781        assert_eq!(buf.remaining_capacity(), 16);
1782
1783        let vectored_write = buf.begin_vectored_write(None);
1784
1785        assert_panic!(
1786            // SAFETY: Intentionally lying here to trigger a panic.
1787            unsafe {
1788                vectored_write.commit(17);
1789            }
1790        );
1791    }
1792
1793    #[test]
1794    fn vectored_write_abort() {
1795        let mut buf = BytesBuf::new();
1796
1797        assert_eq!(buf.capacity(), 0);
1798        assert_eq!(buf.remaining_capacity(), 0);
1799
1800        let memory = FixedBlockMemory::new(nz!(8));
1801
1802        // Capacity: 0 -> 8 (1x8)
1803        buf.reserve(U64_SIZE, &memory);
1804
1805        assert_eq!(buf.capacity(), 8);
1806        assert_eq!(buf.remaining_capacity(), 8);
1807
1808        let mut vectored_write = buf.begin_vectored_write(None);
1809
1810        let mut slices = vectored_write.iter_slices_mut().collect::<Vec<_>>();
1811        assert_eq!(slices.len(), 1);
1812        assert_eq!(slices[0].len(), 8);
1813
1814        write_copy_of_slice(slices[0], &0x3333_3333_3333_3333_u64.to_ne_bytes());
1815
1816        // Actually never mind - we drop it here.
1817        #[expect(clippy::drop_non_drop, reason = "Just being explicit for illustration")]
1818        drop(vectored_write);
1819
1820        assert_eq!(buf.len(), 0);
1821        assert_eq!(buf.remaining_capacity(), 8);
1822        assert_eq!(buf.capacity(), 8);
1823    }
1824
1825    #[test]
1826    fn extend_lifetime_references_all_blocks() {
1827        // We need to detect here whether a block is being released (i.e. ref count goes to zero).
1828
1829        // SAFETY: We are not allowed to drop this until all BlockRef are gone. This is fine
1830        // because it is dropped at the end of the function, after all BlockRef instances.
1831        let block1 = unsafe { TestMemoryBlock::new(nz!(8), None) };
1832        let block1 = pin!(block1);
1833
1834        // SAFETY: We are not allowed to drop this until all BlockRef are gone. This is fine
1835        // because it is dropped at the end of the function, after all BlockRef instances.
1836        let block2 = unsafe { TestMemoryBlock::new(nz!(8), None) };
1837        let block2 = pin!(block2);
1838
1839        let guard = {
1840            // SAFETY: We guarantee exclusive access to the memory capacity.
1841            let block1 = unsafe { block1.as_ref().to_block() };
1842            // SAFETY: We guarantee exclusive access to the memory capacity.
1843            let block2 = unsafe { block2.as_ref().to_block() };
1844
1845            let mut buf = BytesBuf::from_blocks([block1, block2]);
1846
1847            // Freezes first span of 8, retains one span builder.
1848            buf.put_num_ne(1234_u64);
1849
1850            assert_eq!(buf.frozen_spans.len(), 1);
1851            assert_eq!(buf.span_builders_reversed.len(), 1);
1852
1853            buf.extend_lifetime()
1854        };
1855
1856        // The sequence builder was destroyed and all BlockRefs it was holding are gone.
1857        // However, the lifetime guard is still alive and has a BlockRef.
1858
1859        assert_eq!(block1.ref_count(), 1);
1860        assert_eq!(block2.ref_count(), 1);
1861
1862        drop(guard);
1863
1864        // And now they should all be dead.
1865        assert_eq!(block1.ref_count(), 0);
1866        assert_eq!(block2.ref_count(), 0);
1867    }
1868
1869    #[test]
1870    fn extend_lifetime_during_vectored_write_references_all_blocks() {
1871        // We need to detect here whether a block is being released (i.e. ref count goes to zero).
1872
1873        // SAFETY: We are not allowed to drop this until all BlockRef are gone. This is fine
1874        // because it is dropped at the end of the function, after all BlockRef instances.
1875        let block1 = unsafe { TestMemoryBlock::new(nz!(8), None) };
1876        let block1 = pin!(block1);
1877
1878        // SAFETY: We are not allowed to drop this until all BlockRef are gone. This is fine
1879        // because it is dropped at the end of the function, after all BlockRef instances.
1880        let block2 = unsafe { TestMemoryBlock::new(nz!(8), None) };
1881        let block2 = pin!(block2);
1882
1883        let guard = {
1884            // SAFETY: We guarantee exclusive access to the memory capacity.
1885            let block1 = unsafe { block1.as_ref().to_block() };
1886            // SAFETY: We guarantee exclusive access to the memory capacity.
1887            let block2 = unsafe { block2.as_ref().to_block() };
1888
1889            let mut buf = BytesBuf::from_blocks([block1, block2]);
1890
1891            // Freezes first span of 8, retains one span builder.
1892            buf.put_num_ne(1234_u64);
1893
1894            assert_eq!(buf.frozen_spans.len(), 1);
1895            assert_eq!(buf.span_builders_reversed.len(), 1);
1896
1897            let vectored_write = buf.begin_vectored_write(None);
1898
1899            vectored_write.extend_lifetime()
1900        };
1901
1902        // The sequence builder was destroyed and all BlockRefs it was holding are gone.
1903        // However, the lifetime guard is still alive and has a BlockRef.
1904
1905        assert_eq!(block1.ref_count(), 1);
1906        assert_eq!(block2.ref_count(), 1);
1907
1908        drop(guard);
1909
1910        // And now they should all be dead.
1911        assert_eq!(block1.ref_count(), 0);
1912        assert_eq!(block2.ref_count(), 0);
1913    }
1914
1915    #[test]
1916    fn from_view() {
1917        let memory = GlobalPool::new();
1918
1919        let view1 = BytesView::copied_from_slice(b"bla bla bla", &memory);
1920
1921        let mut buf: BytesBuf = view1.clone().into();
1922
1923        let view2 = buf.consume_all();
1924
1925        assert_eq!(view1, view2);
1926    }
1927
1928    #[test]
1929    fn consume_manifest_correctly_calculated() {
1930        let memory = FixedBlockMemory::new(nz!(10));
1931
1932        let mut buf = BytesBuf::new();
1933        buf.reserve(100, &memory);
1934
1935        // 32 bytes in 3 spans.
1936        buf.put_num_ne(1111_u64);
1937        buf.put_num_ne(1111_u64);
1938        buf.put_num_ne(1111_u64);
1939        buf.put_num_ne(1111_u64);
1940
1941        // Freeze it all - a precondition to consuming is to freeze everything first.
1942        buf.ensure_frozen(32);
1943
1944        let consume8 = buf.prepare_consume(8);
1945
1946        assert_eq!(consume8.detach_complete_frozen_spans, 0);
1947        assert_eq!(consume8.consume_partial_span_bytes, 8);
1948        assert_eq!(consume8.required_spans_capacity(), 1);
1949
1950        let consume10 = buf.prepare_consume(10);
1951
1952        assert_eq!(consume10.detach_complete_frozen_spans, 1);
1953        assert_eq!(consume10.consume_partial_span_bytes, 0);
1954        assert_eq!(consume10.required_spans_capacity(), 1);
1955
1956        let consume11 = buf.prepare_consume(11);
1957
1958        assert_eq!(consume11.detach_complete_frozen_spans, 1);
1959        assert_eq!(consume11.consume_partial_span_bytes, 1);
1960        assert_eq!(consume11.required_spans_capacity(), 2);
1961
1962        let consume30 = buf.prepare_consume(30);
1963
1964        assert_eq!(consume30.detach_complete_frozen_spans, 3);
1965        assert_eq!(consume30.consume_partial_span_bytes, 0);
1966        assert_eq!(consume30.required_spans_capacity(), 3);
1967
1968        let consume31 = buf.prepare_consume(31);
1969
1970        assert_eq!(consume31.detach_complete_frozen_spans, 3);
1971        assert_eq!(consume31.consume_partial_span_bytes, 1);
1972        assert_eq!(consume31.required_spans_capacity(), 4);
1973
1974        let consume32 = buf.prepare_consume(32);
1975
1976        // Note that even though our memory comes in blocks of 10, there are only 2 bytes
1977        // in the last frozen span, for a total frozen of 10 + 10 + 10 + 2. We consume it all.
1978        // Frozen spans do not have to be full memory blocks!
1979        assert_eq!(consume32.detach_complete_frozen_spans, 4);
1980        assert_eq!(consume32.consume_partial_span_bytes, 0);
1981        assert_eq!(consume32.required_spans_capacity(), 4);
1982    }
1983
1984    #[test]
1985    fn size_change_detector() {
1986        // The point of this is not to say that we expect it to have a specific size but to allow
1987        // us to easily detect when the size changes and (if we choose to) bless the change.
1988        // We assume 64-bit pointers - any support for 32-bit is problem for the future.
1989        assert_eq!(size_of::<BytesBuf>(), 552);
1990    }
1991
1992    #[test]
1993    fn peek_empty_builder() {
1994        let buf = BytesBuf::new();
1995        let peeked = buf.peek();
1996
1997        assert!(peeked.is_empty());
1998        assert_eq!(peeked.len(), 0);
1999    }
2000
2001    #[test]
2002    fn peek_with_frozen_spans_only() {
2003        let memory = FixedBlockMemory::new(nz!(10));
2004        let mut buf = BytesBuf::new();
2005
2006        buf.reserve(20, &memory);
2007        buf.put_num_ne(0x1111_1111_1111_1111_u64);
2008        buf.put_num_ne(0x2222_2222_2222_2222_u64);
2009        // Both blocks are now frozen (filled completely)
2010        assert_eq!(buf.len(), 16);
2011
2012        let mut peeked = buf.peek();
2013
2014        assert_eq!(peeked.len(), 16);
2015        assert_eq!(peeked.get_num_ne::<u64>(), 0x1111_1111_1111_1111);
2016        assert_eq!(peeked.get_num_ne::<u64>(), 0x2222_2222_2222_2222);
2017
2018        // Original builder still has the data
2019        assert_eq!(buf.len(), 16);
2020    }
2021
2022    #[test]
2023    fn peek_with_partially_filled_span_builder() {
2024        let memory = FixedBlockMemory::new(nz!(10));
2025        let mut buf = BytesBuf::new();
2026
2027        buf.reserve(10, &memory);
2028        buf.put_num_ne(0x3333_3333_3333_3333_u64);
2029        buf.put_num_ne(0x4444_u16);
2030        // We have 10 bytes filled in a 10-byte block
2031        assert_eq!(buf.len(), 10);
2032
2033        let mut peeked = buf.peek();
2034
2035        assert_eq!(peeked.len(), 10);
2036        assert_eq!(peeked.get_num_ne::<u64>(), 0x3333_3333_3333_3333);
2037        assert_eq!(peeked.get_num_ne::<u16>(), 0x4444);
2038
2039        // Original builder still has the data
2040        assert_eq!(buf.len(), 10);
2041    }
2042
2043    #[test]
2044    fn peek_preserves_capacity_of_partial_span_builder() {
2045        let memory = FixedBlockMemory::new(nz!(20));
2046        let mut buf = BytesBuf::new();
2047
2048        buf.reserve(20, &memory);
2049        buf.put_num_ne(0x5555_5555_5555_5555_u64);
2050
2051        // We have 8 bytes filled and 12 bytes remaining capacity
2052        assert_eq!(buf.len(), 8);
2053        assert_eq!(buf.remaining_capacity(), 12);
2054
2055        let mut peeked = buf.peek();
2056
2057        assert_eq!(peeked.len(), 8);
2058        assert_eq!(peeked.get_num_ne::<u64>(), 0x5555_5555_5555_5555);
2059
2060        // CRITICAL TEST: Capacity should be preserved
2061        assert_eq!(buf.len(), 8);
2062        assert_eq!(buf.remaining_capacity(), 12);
2063
2064        // We should still be able to write more data
2065        buf.put_num_ne(0x6666_6666_u32);
2066        assert_eq!(buf.len(), 12);
2067        assert_eq!(buf.remaining_capacity(), 8);
2068
2069        // And we can peek again to see the updated data
2070        let mut peeked2 = buf.peek();
2071        assert_eq!(peeked2.len(), 12);
2072        assert_eq!(peeked2.get_num_ne::<u64>(), 0x5555_5555_5555_5555);
2073        assert_eq!(peeked2.get_num_ne::<u32>(), 0x6666_6666);
2074    }
2075
2076    #[test]
2077    fn peek_with_mixed_frozen_and_unfrozen() {
2078        let memory = FixedBlockMemory::new(nz!(10));
2079        let mut buf = BytesBuf::new();
2080
2081        buf.reserve(30, &memory);
2082
2083        // Fill first block completely (10 bytes) - will be frozen
2084        buf.put_num_ne(0x1111_1111_1111_1111_u64);
2085        buf.put_num_ne(0x2222_u16);
2086
2087        // Fill second block completely (10 bytes) - will be frozen
2088        buf.put_num_ne(0x3333_3333_3333_3333_u64);
2089        buf.put_num_ne(0x4444_u16);
2090
2091        // Partially fill third block (only 4 bytes) - will remain unfrozen
2092        buf.put_num_ne(0x5555_5555_u32);
2093
2094        assert_eq!(buf.len(), 24);
2095        assert_eq!(buf.remaining_capacity(), 6);
2096
2097        let mut peeked = buf.peek();
2098
2099        assert_eq!(peeked.len(), 24);
2100        assert_eq!(peeked.get_num_ne::<u64>(), 0x1111_1111_1111_1111);
2101        assert_eq!(peeked.get_num_ne::<u16>(), 0x2222);
2102        assert_eq!(peeked.get_num_ne::<u64>(), 0x3333_3333_3333_3333);
2103        assert_eq!(peeked.get_num_ne::<u16>(), 0x4444);
2104        assert_eq!(peeked.get_num_ne::<u32>(), 0x5555_5555);
2105        // Original builder still has all the data and capacity
2106        assert_eq!(buf.len(), 24);
2107        assert_eq!(buf.remaining_capacity(), 6);
2108    }
2109
2110    #[test]
2111    fn peek_then_consume() {
2112        let memory = FixedBlockMemory::new(nz!(20));
2113        let mut buf = BytesBuf::new();
2114
2115        buf.reserve(20, &memory);
2116        buf.put_num_ne(0x7777_7777_7777_7777_u64);
2117        buf.put_num_ne(0x8888_8888_u32);
2118        assert_eq!(buf.len(), 12);
2119
2120        // Peek at the data
2121        let mut peeked = buf.peek();
2122        assert_eq!(peeked.len(), 12);
2123        assert_eq!(peeked.get_num_ne::<u64>(), 0x7777_7777_7777_7777);
2124
2125        // Original builder still has the data
2126        assert_eq!(buf.len(), 12);
2127
2128        // Now consume some of it
2129        let mut consumed = buf.consume(8);
2130        assert_eq!(consumed.get_num_ne::<u64>(), 0x7777_7777_7777_7777);
2131
2132        // Builder should have less data now
2133        assert_eq!(buf.len(), 4);
2134
2135        // Peek again should show the remaining data
2136        let mut peeked2 = buf.peek();
2137        assert_eq!(peeked2.len(), 4);
2138        assert_eq!(peeked2.get_num_ne::<u32>(), 0x8888_8888);
2139    }
2140
2141    #[test]
2142    fn peek_multiple_times() {
2143        let memory = FixedBlockMemory::new(nz!(20));
2144        let mut buf = BytesBuf::new();
2145
2146        buf.reserve(20, &memory);
2147        buf.put_num_ne(0xAAAA_AAAA_AAAA_AAAA_u64);
2148
2149        // Peek multiple times - each should work independently
2150        let mut peeked1 = buf.peek();
2151        let mut peeked2 = buf.peek();
2152
2153        assert_eq!(peeked1.get_num_ne::<u64>(), 0xAAAA_AAAA_AAAA_AAAA);
2154        assert_eq!(peeked2.get_num_ne::<u64>(), 0xAAAA_AAAA_AAAA_AAAA);
2155
2156        // Original builder still intact
2157        assert_eq!(buf.len(), 8);
2158    }
2159
2160    // To be stabilized soon: https://github.com/rust-lang/rust/issues/79995
2161    fn write_copy_of_slice(dst: &mut [MaybeUninit<u8>], src: &[u8]) {
2162        assert!(dst.len() >= src.len());
2163
2164        // SAFETY: We have verified that dst is large enough.
2165        unsafe {
2166            src.as_ptr().copy_to_nonoverlapping(dst.as_mut_ptr().cast(), src.len());
2167        }
2168    }
2169}