Skip to main content

bytesbuf/
view.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4use std::marker::PhantomData;
5use std::num::NonZero;
6use std::ops::{Bound, RangeBounds};
7use std::{iter, mem};
8
9use nm::{Event, Magnitude};
10use smallvec::SmallVec;
11
12use crate::mem::{BlockMeta, BlockSize, Memory};
13use crate::{BytesViewReader, MAX_INLINE_SPANS, MemoryGuard, Span};
14
15/// A view over a sequence of immutable bytes.
16///
17/// Only the contents are immutable - the view itself can be mutated in terms of progressively
18/// marking the byte sequence as consumed until the view becomes empty.
19///
20/// # Creating a `BytesView`
21///
22/// Instances can be created in different ways:
23///
24/// * Use a [`BytesBuf`] to build the byte sequence piece by piece, consuming the buffered data as a new `BytesView` when finished.
25/// * Clone an existing `BytesView` with [`clone()`].
26/// * Take a range of bytes from an existing `BytesView` via [`range()`].
27/// * Combine multiple `BytesView` instances into one via [`from_views()`], [`concat()`] or [`append()`].
28/// * Copy data from a `&[u8]` using [`copied_from_slice()`].
29///
30/// Some of these methods may require you to first [obtain access to a memory provider].
31#[doc = include_str!("../doc/snippets/sequence_memory_layout.md")]
32///
33/// # Example
34///
35/// ```
36/// # let memory = bytesbuf::mem::GlobalPool::new();
37/// use bytesbuf::BytesView;
38///
39/// let mut view = BytesView::copied_from_slice(b"Hello!", &memory);
40///
41/// // Read bytes one at a time until the view is empty.
42/// while !view.is_empty() {
43///     let byte = view.get_byte();
44///     println!("Read byte: {byte}");
45/// }
46///
47/// assert!(view.is_empty());
48/// ```
49///
50/// [`BytesBuf`]: crate::BytesBuf
51/// [`copied_from_slice()`]: Self::copied_from_slice
52/// [`concat()`]: Self::concat
53/// [`append()`]: Self::append
54/// [`range()`]: Self::range
55/// [`from_views()`]: Self::from_views
56/// [`clone()`]: Self::clone
57/// [obtain access to a memory provider]: crate#producing-byte-sequences
58#[derive(Clone, Debug)]
59pub struct BytesView {
60    /// The spans of the byte sequence, stored in reverse order for efficient consumption
61    /// by popping items off the end of the collection.
62    pub(crate) spans_reversed: SmallVec<[Span; MAX_INLINE_SPANS]>,
63
64    /// We cache the length so we do not have to recalculate it every time it is queried.
65    len: usize,
66}
67
68impl BytesView {
69    /// Creates a view over a zero-sized byte sequence.
70    ///
71    /// Use a [`BytesBuf`] to create a view over some actual data.
72    ///
73    /// [`BytesBuf`]: crate::BytesBuf
74    #[cfg_attr(test, mutants::skip)] // Generates no-op mutations, not useful.
75    #[must_use]
76    pub const fn new() -> Self {
77        Self {
78            spans_reversed: SmallVec::new_const(),
79            len: 0,
80        }
81    }
82
83    pub(crate) fn from_spans_reversed(spans_reversed: SmallVec<[Span; MAX_INLINE_SPANS]>) -> Self {
84        #[cfg(debug_assertions)]
85        spans_reversed.iter().for_each(|span| assert!(!span.is_empty()));
86
87        // We can use this to fine-tune the inline span count once we have real-world data.
88        VIEW_CREATED_SPANS.with(|x| x.observe(spans_reversed.len()));
89
90        let len = spans_reversed.iter().fold(0_usize, |acc, span: &Span| {
91            acc.checked_add(span.len() as usize)
92                .expect("attempted to create a BytesView larger than usize::MAX bytes")
93        });
94
95        Self { spans_reversed, len }
96    }
97
98    /// (For testing) Concatenates a number of spans, yielding a view that combines the spans.
99    ///
100    /// Later changes made to the input spans will not be reflected in the resulting view.
101    #[cfg(test)]
102    pub(crate) fn from_spans<I>(spans: I) -> Self
103    where
104        I: IntoIterator<Item = Span>,
105        <I as IntoIterator>::IntoIter: iter::DoubleEndedIterator,
106    {
107        let spans_reversed = spans.into_iter().rev().collect::<SmallVec<_>>();
108
109        Self::from_spans_reversed(spans_reversed)
110    }
111
112    /// Concatenates a number of existing byte sequences, yielding a combined view.
113    ///
114    /// # Example
115    ///
116    /// ```
117    /// # let memory = bytesbuf::mem::GlobalPool::new();
118    /// use bytesbuf::BytesView;
119    ///
120    /// let header = BytesView::copied_from_slice(b"HTTP/1.1 ", &memory);
121    /// let status = BytesView::copied_from_slice(b"200 ", &memory);
122    /// let message = BytesView::copied_from_slice(b"OK", &memory);
123    ///
124    /// let response_line = BytesView::from_views([header, status, message]);
125    ///
126    /// assert_eq!(response_line.len(), 15);
127    /// assert_eq!(response_line, b"HTTP/1.1 200 OK");
128    /// ```
129    ///
130    /// # Panics
131    ///
132    /// Panics if the resulting view would be larger than `usize::MAX` bytes.
133    pub fn from_views<I>(views: I) -> Self
134    where
135        I: IntoIterator<Item = Self>,
136        <I as IntoIterator>::IntoIter: iter::DoubleEndedIterator,
137    {
138        // Note that this requires the SmallVec to resize on the fly because thanks to the
139        // two-level mapping here, there is no usable size hint that lets it know the size in
140        // advance. If we had the span count here, we could avoid some allocations.
141
142        // For a given input ABC123.
143        let spans_reversed: SmallVec<_> = views
144            .into_iter()
145            // We first reverse the views: 123ABC.
146            .rev()
147            // And from inside each view we take the reversed spans: 321CBA.
148            .flat_map(|view| view.spans_reversed)
149            // Which become our final SmallVec of spans. Great success!
150            .collect();
151
152        Self::from_spans_reversed(spans_reversed)
153    }
154
155    /// Creates a `BytesView` by copying the contents of a `&[u8]`.
156    ///
157    /// # Example
158    ///
159    /// ```
160    /// # struct TcpConnection;
161    /// # impl TcpConnection {
162    /// #     fn memory(&self) -> impl bytesbuf::mem::Memory { bytesbuf::mem::GlobalPool::new() }
163    /// # }
164    /// # let tcp_connection = TcpConnection;
165    /// use bytesbuf::BytesView;
166    ///
167    /// const CONTENT_TYPE_KEY: &[u8] = b"Content-Type: ";
168    ///
169    /// let header_key = BytesView::copied_from_slice(CONTENT_TYPE_KEY, &tcp_connection.memory());
170    ///
171    /// assert_eq!(header_key.len(), 14);
172    /// ```
173    ///
174    /// # Reusing without copying
175    ///
176    /// There is intentionally no mechanism in the `bytesbuf` crate to reference an existing
177    /// `&[u8]` without copying the contents, even if it has a `'static` lifetime.
178    ///
179    /// The purpose of this limitation is to discourage accidentally involving arbitrary
180    /// memory in high-performance I/O workflows. For efficient I/O processing, data must
181    /// be stored in memory configured according to the needs of the consuming I/O endpoint,
182    /// which is not the case for an arbitrary `&'static [u8]`.
183    ///
184    /// To reuse memory allocations, you need to reuse `BytesView` instances themselves.
185    /// See the `bb_reuse.rs` example in the `bytesbuf` crate for a detailed example.
186    #[must_use]
187    pub fn copied_from_slice(bytes: &[u8], memory: &impl Memory) -> Self {
188        let mut buf = memory.reserve(bytes.len());
189        buf.put_slice(bytes);
190        buf.consume_all()
191    }
192
193    pub(crate) fn into_spans_reversed(self) -> SmallVec<[Span; MAX_INLINE_SPANS]> {
194        self.spans_reversed
195    }
196
197    /// The number of bytes exposed through the view.
198    ///
199    /// Consuming bytes from the view reduces its length.
200    ///
201    /// # Example
202    ///
203    /// ```
204    /// # let memory = bytesbuf::mem::GlobalPool::new();
205    /// use bytesbuf::BytesView;
206    ///
207    /// let mut view = BytesView::copied_from_slice(b"Hello", &memory);
208    /// assert_eq!(view.len(), 5);
209    ///
210    /// _ = view.get_byte();
211    /// assert_eq!(view.len(), 4);
212    ///
213    /// _ = view.get_num_le::<u16>();
214    /// assert_eq!(view.len(), 2);
215    /// ```
216    #[cfg_attr(test, mutants::skip)] // Mutating this can cause infinite loops.
217    #[must_use]
218    pub fn len(&self) -> usize {
219        // Sanity check.
220        debug_assert_eq!(self.len, self.spans_reversed.iter().map(|x| x.len() as usize).sum::<usize>());
221
222        self.len
223    }
224
225    /// Whether the view is of a zero-sized byte sequence.
226    #[cfg_attr(test, mutants::skip)] // Mutating this can cause infinite loops.
227    #[must_use]
228    pub fn is_empty(&self) -> bool {
229        self.len() == 0
230    }
231
232    /// Extends the lifetime of the memory capacity backing this view.
233    ///
234    /// This can be useful when unsafe code is used to reference the contents of a `BytesView` and it
235    /// is possible to reach a condition where the `BytesView` itself no longer exists, even though
236    /// the contents are referenced (e.g. because the remaining references are in non-Rust code).
237    pub fn extend_lifetime(&self) -> MemoryGuard {
238        MemoryGuard::new(self.spans_reversed.iter().map(Span::block_ref).map(Clone::clone))
239    }
240
241    /// Returns a range of the byte sequence.
242    ///
243    /// The bounds logic only considers data currently present in the view.
244    /// Any data already consumed is not considered part of the view.
245    ///
246    /// # Example
247    ///
248    /// ```
249    /// # let memory = bytesbuf::mem::GlobalPool::new();
250    /// use bytesbuf::BytesView;
251    ///
252    /// let view = BytesView::copied_from_slice(b"Hello, world!", &memory);
253    ///
254    /// assert_eq!(view.range(0..5), b"Hello");
255    /// assert_eq!(view.range(7..), b"world!");
256    /// assert_eq!(view.range(..5), b"Hello");
257    /// assert_eq!(view.range(..), b"Hello, world!");
258    /// ```
259    ///
260    /// # Panics
261    ///
262    /// Panics if the provided range is outside the bounds of the view.
263    #[must_use]
264    pub fn range<R>(&self, range: R) -> Self
265    where
266        R: RangeBounds<usize>,
267    {
268        self.range_checked(range).expect("provided range out of view bounds")
269    }
270
271    /// Returns a range of the byte sequence or `None` if out of bounds.
272    ///
273    /// The bounds logic only considers data currently present in the view.
274    /// Any data already consumed is not considered part of the view.
275    #[must_use]
276    #[expect(clippy::missing_panics_doc, reason = "only unreachable panics")]
277    #[expect(clippy::too_many_lines, reason = "acceptable for now")]
278    #[cfg_attr(test, mutants::skip)] // Mutations include impossible conditions that we cannot test as well as mutations that are functionally equivalent.
279    pub fn range_checked<R>(&self, range: R) -> Option<Self>
280    where
281        R: RangeBounds<usize>,
282    {
283        let bytes_until_range = match range.start_bound() {
284            Bound::Included(&x) => x,
285            Bound::Excluded(&x) => x.checked_add(1)?,
286            Bound::Unbounded => 0,
287        };
288
289        let bytes_in_range = match range.end_bound() {
290            Bound::Included(&x) => x.checked_add(1)?.checked_sub(bytes_until_range)?,
291            Bound::Excluded(&x) => x.checked_sub(bytes_until_range)?,
292            Bound::Unbounded => self.len().checked_sub(bytes_until_range)?,
293        };
294
295        let required_len = bytes_until_range
296            .checked_add(bytes_in_range)
297            .expect("overflowing usize is impossible because we are calculating offset into usize-bounded range");
298
299        if required_len > self.len() {
300            // Did not have enough data to cover the range.
301            return None;
302        }
303
304        if bytes_in_range == 0 {
305            // Empty sequence is empty.
306            return Some(Self::new());
307        }
308
309        // Take the spans from the end of our spans_reversed (the logical beginning), while taking
310        // bytes in each span from the beginning of the span. We implement this in two passes:
311        // 1. Identify relevant range of spans. The idea is that our slice may just be a tiny
312        //    subset of the entire sequence and we should not be processing parts of the sequence
313        //    that do not matter (either because they are before the slice or after it).
314        // 2. Within the relevant spans, skip to the relevant bytes, take them, and ignore the rest.
315        //    This may range across any number of spans, though due to the pre-filtering in step 1
316        //    we know that we only need to skip the head/tail in the first and last span.
317
318        // Our accounting is all "logical", content-based.
319        // These are the outputs from the first pass.
320        let mut spans_until_range: usize = 0;
321        let mut spans_in_range: usize = 0;
322        let mut bytes_to_skip_in_first_relevant_span: BlockSize = 0;
323        let mut bytes_to_leave_in_last_relevant_span: BlockSize = 0;
324
325        {
326            let mut pass1_bytes_until_range = bytes_until_range;
327            let mut pass1_bytes_in_range = bytes_in_range;
328
329            for span in self.spans_reversed.iter().rev() {
330                let bytes_in_span = span.len();
331                let bytes_in_span_usize = bytes_in_span as usize;
332
333                if pass1_bytes_until_range > 0 && bytes_in_span_usize <= pass1_bytes_until_range {
334                    // This entire span is uninteresting for us - skip.
335                    spans_until_range = spans_until_range
336                        .checked_add(1)
337                        .expect("overflowing usize is impossible because we are calculating chunks within usize-bounded range");
338                    pass1_bytes_until_range = pass1_bytes_until_range
339                        .checked_sub(bytes_in_span_usize)
340                        .expect("somehow ended up with negative bytes remaining until range start - only possible if the math is wrong");
341                    continue;
342                }
343
344                // If we got to this point, it is an interesting span.
345
346                // If this is the last span, we need to account for the bytes we are leaving behind.
347                bytes_to_leave_in_last_relevant_span = bytes_in_span;
348
349                // If we are at this point, pass1_bytes_until_range is either zero or points to some
350                // position within this span, so it is now `BlockSize` bounded.
351                let pass1_bytes_until_range_block_size = pass1_bytes_until_range.try_into().expect("we are supposedly indicating a position inside a span but the offset is larger than a memory block range - algorithm error");
352
353                // We may still have some prefix to remove, so not every byte is relevant.
354                if pass1_bytes_until_range != 0 {
355                    bytes_to_skip_in_first_relevant_span = pass1_bytes_until_range_block_size;
356
357                    // The first span might also be the last span.
358                    bytes_to_leave_in_last_relevant_span = bytes_to_leave_in_last_relevant_span
359                        .checked_sub(bytes_to_skip_in_first_relevant_span)
360                        .expect("somehow ended up with negative bytes remaining in span - only possible if the math is wrong");
361                }
362
363                #[expect(
364                    clippy::cast_possible_truncation,
365                    reason = "the usize never contains a value outside bounds of BlockSize - guarded by min()"
366                )]
367                let relevant_bytes_in_span = ((bytes_in_span
368                    .checked_sub(pass1_bytes_until_range_block_size)
369                    .expect("somehow ended up with negative bytes remaining in span - only possible if the math is wrong")
370                    as usize)
371                    .min(pass1_bytes_in_range)) as BlockSize;
372
373                bytes_to_leave_in_last_relevant_span = bytes_to_leave_in_last_relevant_span
374                    .checked_sub(relevant_bytes_in_span)
375                    .expect("somehow ended up with negative bytes remaining in span - only possible if the math is wrong");
376
377                // Whatever happened, we have reached the relevant range now.
378                spans_in_range = spans_in_range
379                    .checked_add(1)
380                    .expect("overflowing usize is impossible because we are calculating chunks within usize-bounded range");
381
382                pass1_bytes_until_range = 0;
383
384                pass1_bytes_in_range = pass1_bytes_in_range
385                    .checked_sub(relevant_bytes_in_span as usize)
386                    .expect("somehow ended up with negative bytes remaining in range - only possible if the math is wrong");
387
388                if pass1_bytes_in_range == 0 {
389                    // We have reached the end of the range - remaining spans are not interesting.
390                    break;
391                }
392            }
393        }
394
395        let relevant_spans = self.spans_reversed.iter().rev().skip(spans_until_range).take(spans_in_range);
396
397        let mut bytes_remaining_in_range = bytes_in_range;
398
399        // We skip bytes_to_skip_in_first_relevant_span.
400        // Then we take until bytes_remaining_in_range runs out.
401        // The end. We know that every span is relevant now.
402
403        // NB! We have to for-iterate over the relevant spans and not blindly use .map() because
404        // .map() is lazy and may be evaluated in a completely different order from what we would
405        // expect "logically". The easiest way for us to control iteration order is to for-loop.
406        let mut slice_spans = SmallVec::with_capacity(spans_in_range);
407
408        // These are in REVERSE ORDER, same as we use in storage. So we start with the last span.
409        for span in relevant_spans.rev() {
410            let mut bytes_to_even_consider = span.len();
411
412            // If this is nonzero, we must be looking at the last relevant span.
413            if bytes_to_leave_in_last_relevant_span > 0 {
414                bytes_to_even_consider = bytes_to_even_consider
415                    .checked_sub(bytes_to_leave_in_last_relevant_span)
416                    .expect("somehow ended up with negative bytes remaining in span - only possible if the math is wrong");
417
418                bytes_to_leave_in_last_relevant_span = 0;
419            }
420
421            #[expect(
422                clippy::cast_possible_truncation,
423                reason = "the usize never contains a value outside bounds of BlockSize - guarded by min()"
424            )]
425            let mut max_take_bytes = (bytes_to_even_consider as usize).min(bytes_remaining_in_range) as BlockSize;
426
427            // Now if this is the first logical span (last in our iteration), we need to skip
428            // some from the start. The key challenge here is - how do we know it is the first?
429            // Simply put - it is the first if it can supply all the remaining bytes.
430            let is_first_span = bytes_remaining_in_range <= max_take_bytes as usize;
431
432            if is_first_span && bytes_to_skip_in_first_relevant_span > 0 {
433                let remainder_in_span = bytes_to_even_consider
434                    .checked_sub(bytes_to_skip_in_first_relevant_span)
435                    .expect("somehow ended up with negative bytes remaining in span - only possible if the math is wrong");
436
437                max_take_bytes = max_take_bytes.min(remainder_in_span);
438
439                bytes_remaining_in_range = bytes_remaining_in_range
440                    .checked_sub(max_take_bytes as usize)
441                    .expect("somehow ended up with negative bytes remaining - only possible if the math is wrong");
442
443                let start = bytes_to_skip_in_first_relevant_span;
444                let end = bytes_to_skip_in_first_relevant_span
445                    .checked_add(max_take_bytes)
446                    .expect("overflowing usize is impossible because we are calculating slice within usize-bounded range");
447
448                bytes_to_skip_in_first_relevant_span = 0;
449
450                slice_spans.push(span.slice(start..end));
451            } else {
452                bytes_remaining_in_range = bytes_remaining_in_range
453                    .checked_sub(max_take_bytes as usize)
454                    .expect("somehow ended up with negative bytes remaining - only possible if the math is wrong");
455
456                slice_spans.push(span.slice(0..max_take_bytes));
457            }
458        }
459
460        Some(Self {
461            spans_reversed: slice_spans,
462            len: bytes_in_range,
463        })
464    }
465
466    /// Executes a function `f` on each slice, consuming them all.
467    ///
468    /// The slices that make up the view are iterated in order,
469    /// providing each to `f`. The view becomes empty after this.
470    ///
471    /// # Example
472    ///
473    /// ```
474    /// # let memory = bytesbuf::mem::GlobalPool::new();
475    /// use bytesbuf::BytesView;
476    ///
477    /// // Create a multi-slice view by concatenating independent views.
478    /// # let part1 = BytesView::copied_from_slice(b"Hello", &memory);
479    /// # let part2 = BytesView::copied_from_slice(b", ", &memory);
480    /// # let part3 = BytesView::copied_from_slice(b"world!", &memory);
481    /// let mut view = BytesView::from_views([part1, part2, part3]);
482    ///
483    /// view.consume_all_slices(|slice| {
484    ///     println!("Slice of {} bytes: {:?}", slice.len(), slice);
485    /// });
486    ///
487    /// assert!(view.is_empty());
488    /// ```
489    pub fn consume_all_slices<F>(&mut self, mut f: F)
490    where
491        F: FnMut(&[u8]),
492    {
493        // TODO: This fn could just be .into_iter() - we have no real
494        // need for the "consume pattern" here. Iterators are more idiomatic.
495        while !self.is_empty() {
496            let slice = self.first_slice();
497            f(slice);
498            self.advance(slice.len());
499        }
500    }
501
502    /// References the first slice of bytes in the byte sequence.
503    ///
504    /// Returns an empty slice if the view is over a zero-sized byte sequence.
505    #[doc = include_str!("../doc/snippets/sequence_memory_layout.md")]
506    ///
507    /// # Example
508    ///
509    /// ```
510    /// # let memory = bytesbuf::mem::GlobalPool::new();
511    /// use bytesbuf::BytesView;
512    ///
513    /// let mut view = BytesView::copied_from_slice(b"0123456789ABCDEF", &memory);
514    ///
515    /// // Read the first 10 bytes without assuming the length of first_slice().
516    /// let mut ten_bytes = Vec::with_capacity(10);
517    ///
518    /// while ten_bytes.len() < 10 {
519    ///     let slice = view.first_slice();
520    ///
521    ///     let bytes_to_take = slice.len().min(10 - ten_bytes.len());
522    ///
523    ///     ten_bytes.extend_from_slice(&slice[..bytes_to_take]);
524    ///     view.advance(bytes_to_take);
525    /// }
526    ///
527    /// assert_eq!(ten_bytes, b"0123456789");
528    /// ```
529    #[cfg_attr(test, mutants::skip)] // Mutating this can cause infinite loops.
530    #[must_use]
531    pub fn first_slice(&self) -> &[u8] {
532        self.spans_reversed.last().map_or::<&[u8], _>(&[], |span| span)
533    }
534
535    /// Iterates over all the slices that make up this view, together with their metadata.
536    ///
537    /// Each item is a tuple of `(data, meta)` where `data` is a byte slice and `meta` is the
538    /// optional metadata of the memory block backing that slice.
539    #[doc = include_str!("../doc/snippets/sequence_memory_layout.md")]
540    ///
541    /// # Example
542    ///
543    /// ```
544    /// # let memory = bytesbuf::mem::GlobalPool::new();
545    /// # struct PageAlignedMemory;
546    /// use bytesbuf::BytesView;
547    ///
548    /// # let part1 = BytesView::copied_from_slice(b"Hello", &memory);
549    /// # let part2 = BytesView::copied_from_slice(b"World", &memory);
550    /// let view = BytesView::from_views([part1, part2]);
551    ///
552    /// for (data, meta) in view.slices() {
553    ///     let is_page_aligned = meta.is_some_and(|m| m.is::<PageAlignedMemory>());
554    ///     println!(
555    ///         "Slice of {} bytes (page-aligned: {is_page_aligned})",
556    ///         data.len()
557    ///     );
558    /// }
559    /// ```
560    ///
561    /// See the stand-alone example `bb_optimal_path.rs` in the `bytesbuf` crate for
562    /// a more detailed example of how to make use of the slice metadata.
563    pub fn slices(&self) -> BytesViewSlices<'_> {
564        BytesViewSlices::new(self)
565    }
566
567    /// Inspects the metadata of the [`first_slice()`].
568    ///
569    /// `None` if there is no metadata associated with the first slice or
570    /// if the view is over a zero-sized byte sequence.
571    ///
572    /// # Example
573    ///
574    /// ```
575    /// # let memory = bytesbuf::mem::GlobalPool::new();
576    /// # struct PageAlignedMemory;
577    /// use bytesbuf::BytesView;
578    ///
579    /// let view = BytesView::copied_from_slice(b"Hello", &memory);
580    ///
581    /// let is_page_aligned = view
582    ///     .first_slice_meta()
583    ///     .is_some_and(|meta| meta.is::<PageAlignedMemory>());
584    ///
585    /// println!("First slice is page-aligned: {is_page_aligned}");
586    /// ```
587    ///
588    /// See the stand-alone example `bb_optimal_path.rs` in the `bytesbuf` crate for
589    /// a more detailed example of how to make use of the slice metadata.
590    ///
591    /// [`first_slice()`]: Self::first_slice
592    #[must_use]
593    pub fn first_slice_meta(&self) -> Option<&dyn BlockMeta> {
594        self.spans_reversed.last().and_then(|span| span.block_ref().meta())
595    }
596
597    /// Removes the first `count` bytes from the front of the view.
598    ///
599    /// The consumed bytes are dropped from the view, moving any remaining bytes to the front.
600    ///
601    /// If permitted by memory layout considerations and reference counts, the memory capacity
602    /// backing the dropped bytes is released back to the memory provider.
603    ///
604    /// # Example
605    ///
606    /// ```
607    /// # let memory = bytesbuf::mem::GlobalPool::new();
608    /// use bytesbuf::BytesView;
609    ///
610    /// let mut view = BytesView::copied_from_slice(b"0123456789ABCDEF", &memory);
611    ///
612    /// // Read the first 10 bytes without assuming the length of first_slice().
613    /// let mut ten_bytes = Vec::with_capacity(10);
614    ///
615    /// while ten_bytes.len() < 10 {
616    ///     let slice = view.first_slice();
617    ///
618    ///     let bytes_to_take = slice.len().min(10 - ten_bytes.len());
619    ///
620    ///     ten_bytes.extend_from_slice(&slice[..bytes_to_take]);
621    ///     view.advance(bytes_to_take);
622    /// }
623    ///
624    /// assert_eq!(ten_bytes, b"0123456789");
625    /// ```
626    ///
627    /// # Panics
628    ///
629    /// Panics if `count` is greater than the number of bytes remaining.
630    #[cfg_attr(test, mutants::skip)] // Mutating this can cause infinite loops.
631    pub fn advance(&mut self, mut count: usize) {
632        self.len = self.len.checked_sub(count).expect("attempted to advance past end of the view");
633
634        while count > 0 {
635            let front = self
636                .spans_reversed
637                .last_mut()
638                .expect("logic error - ran out of spans before advancing over their contents");
639            let span_len = front.len() as usize;
640
641            if count < span_len {
642                // SAFETY: We must guarantee we advance in-bounds. The if statement guarantees that.
643                unsafe {
644                    front.advance(count);
645                }
646                break;
647            }
648
649            self.spans_reversed.pop();
650            // Will never overflow because we already handled the count < span_len case.
651            count = count.wrapping_sub(span_len);
652        }
653    }
654
655    /// Appends another view to the end of this one.
656    ///
657    /// This is a zero-copy operation, reusing the memory capacity of the other view.
658    ///
659    /// # Example
660    ///
661    /// ```
662    /// # let memory = bytesbuf::mem::GlobalPool::new();
663    /// use bytesbuf::BytesView;
664    ///
665    /// let mut greeting = BytesView::copied_from_slice(b"Hello, ", &memory);
666    /// let name = BytesView::copied_from_slice(b"world!", &memory);
667    ///
668    /// greeting.append(name);
669    ///
670    /// assert_eq!(greeting, b"Hello, world!");
671    /// ```
672    ///
673    /// # Panics
674    ///
675    /// Panics if the resulting view would be larger than `usize::MAX` bytes.
676    pub fn append(&mut self, other: Self) {
677        self.len = self
678            .len
679            .checked_add(other.len)
680            .expect("attempted to create a BytesView larger than usize::MAX bytes");
681
682        self.spans_reversed.insert_many(0, other.spans_reversed);
683    }
684
685    /// Returns a new view that concatenates this view with another.
686    ///
687    /// This is a zero-copy operation, reusing the memory capacity of the other view.
688    ///
689    /// # Example
690    ///
691    /// ```
692    /// # let memory = bytesbuf::mem::GlobalPool::new();
693    /// use bytesbuf::BytesView;
694    ///
695    /// let greeting = BytesView::copied_from_slice(b"Hello, ", &memory);
696    /// let name = BytesView::copied_from_slice(b"world!", &memory);
697    ///
698    /// let message = greeting.concat(name);
699    ///
700    /// // Original view is unchanged.
701    /// assert_eq!(greeting, b"Hello, ");
702    /// // New view contains the concatenation.
703    /// assert_eq!(message, b"Hello, world!");
704    /// ```
705    ///
706    /// # Panics
707    ///
708    /// Panics if the resulting view would be larger than `usize::MAX` bytes.
709    #[must_use]
710    pub fn concat(&self, other: Self) -> Self {
711        let mut new_view = self.clone();
712        new_view.append(other);
713        new_view
714    }
715
716    /// Exposes the instance through the [`Read`][std::io::Read] trait.
717    ///
718    /// # Example
719    ///
720    /// ```
721    /// # let memory = bytesbuf::mem::GlobalPool::new();
722    /// use std::io::Read;
723    ///
724    /// use bytesbuf::BytesView;
725    ///
726    /// let mut view = BytesView::copied_from_slice(b"Hello, world!", &memory);
727    /// let mut reader = view.as_read();
728    ///
729    /// let mut buffer = [0u8; 5];
730    /// let bytes_read = reader.read(&mut buffer)?;
731    ///
732    /// assert_eq!(bytes_read, 5);
733    /// assert_eq!(&buffer, b"Hello");
734    /// # Ok::<(), std::io::Error>(())
735    /// ```
736    #[must_use]
737    pub fn as_read(&mut self) -> impl std::io::Read {
738        BytesViewReader::new(self)
739    }
740}
741
742impl Default for BytesView {
743    fn default() -> Self {
744        Self::new()
745    }
746}
747
748impl PartialEq for BytesView {
749    fn eq(&self, other: &Self) -> bool {
750        // We do not care about the structure, only the contents.
751        if self.len() != other.len() {
752            return false;
753        }
754
755        let mut remaining_bytes = self.len();
756
757        // The two views may have differently sized spans, so we only compare in steps
758        // of the smallest span size offered by either view.
759
760        // We clone the views to create windows that we slide over the contents.
761        let mut self_view = self.clone();
762        let mut other_view = other.clone();
763
764        while remaining_bytes > 0 {
765            let self_slice = self_view.first_slice();
766            let other_slice = other_view.first_slice();
767
768            let comparison_len = NonZero::new(self_slice.len().min(other_slice.len()))
769                .expect("both views said there are remaining bytes but we got an empty slice from at least one of them");
770
771            let self_slice = self_slice.get(..comparison_len.get()).expect("already checked that remaining > 0");
772            let other_slice = other_slice.get(..comparison_len.get()).expect("already checked that remaining > 0");
773
774            if self_slice != other_slice {
775                // Something is different. That is enough for a determination.
776                return false;
777            }
778
779            // Advance both views by the same amount.
780            self_view.advance(comparison_len.get());
781            other_view.advance(comparison_len.get());
782
783            remaining_bytes = remaining_bytes
784                .checked_sub(comparison_len.get())
785                .expect("impossible to consume more bytes from the sequences than are remaining");
786        }
787
788        debug_assert_eq!(remaining_bytes, 0);
789        debug_assert_eq!(self_view.len(), 0);
790        debug_assert_eq!(other_view.len(), 0);
791
792        true
793    }
794}
795
796impl PartialEq<&[u8]> for BytesView {
797    fn eq(&self, other: &&[u8]) -> bool {
798        let mut other = *other;
799
800        // We do not care about the structure, only the contents.
801
802        if self.len() != other.len() {
803            return false;
804        }
805
806        let mut remaining_bytes = self.len();
807
808        // We clone the sequence to create a temporary view that we slide over the contents.
809        let mut self_view = self.clone();
810
811        while remaining_bytes > 0 {
812            let self_slice = self_view.first_slice();
813            let slice_size = NonZero::new(self_slice.len())
814                .expect("both sides of the comparison said there are remaining bytes but we got an empty slice from at least one of them");
815
816            let self_slice = self_slice.get(..slice_size.get()).expect("already checked that remaining > 0");
817            let other_slice = other.get(..slice_size.get()).expect("already checked that remaining > 0");
818
819            if self_slice != other_slice {
820                // Something is different. That is enough for a determination.
821                return false;
822            }
823
824            // Advance the sequence by the same amount.
825            self_view.advance(slice_size.get());
826            other = other.get(slice_size.get()..).expect("guarded by min() above");
827
828            remaining_bytes = remaining_bytes
829                .checked_sub(slice_size.get())
830                .expect("impossible to consume more bytes from the sequences than are remaining");
831        }
832
833        debug_assert_eq!(remaining_bytes, 0);
834        debug_assert_eq!(self_view.len(), 0);
835        debug_assert_eq!(other.len(), 0);
836
837        true
838    }
839}
840
841impl PartialEq<BytesView> for &[u8] {
842    fn eq(&self, other: &BytesView) -> bool {
843        other.eq(self)
844    }
845}
846
847impl<const LEN: usize> PartialEq<&[u8; LEN]> for BytesView {
848    fn eq(&self, other: &&[u8; LEN]) -> bool {
849        self.eq(&other.as_slice())
850    }
851}
852
853impl<const LEN: usize> PartialEq<BytesView> for &[u8; LEN] {
854    fn eq(&self, other: &BytesView) -> bool {
855        other.eq(&self.as_slice())
856    }
857}
858
859/// Iterator over the slices of a [`BytesView`] and their metadata.
860///
861/// Returned by [`BytesView::slices()`] and provides each slice together with its
862/// associated memory block metadata.
863#[must_use]
864#[derive(Debug)]
865pub struct BytesViewSlices<'s> {
866    // This starts off as a clone of the view, just for ease of implementation.
867    // We consume the parts of the view we have already iterated over.
868    view: BytesView,
869
870    // We keep a reference to the view we are iterating over, even though
871    // the current implementation does not use it (because a future one might).
872    _parent: PhantomData<&'s BytesView>,
873}
874
875impl<'s> BytesViewSlices<'s> {
876    pub(crate) fn new(view: &'s BytesView) -> Self {
877        Self {
878            view: view.clone(),
879            _parent: PhantomData,
880        }
881    }
882}
883
884impl<'s> Iterator for BytesViewSlices<'s> {
885    type Item = (&'s [u8], Option<&'s dyn BlockMeta>);
886
887    #[cfg_attr(test, mutants::skip)] // Mutating this can cause infinite loops.
888    fn next(&mut self) -> Option<Self::Item> {
889        if self.view.is_empty() {
890            return None;
891        }
892
893        let slice = self.view.first_slice();
894        let meta = self.view.first_slice_meta();
895
896        // SAFETY: It is normally not possible to return a self-reference from an iterator because
897        // next() only has an implicit lifetime for `&self`, which cannot be named in `Item`.
898        // However, we can take advantage of the fact that a `BlockRef` implementation is required
899        // to guarantee that both the data and the metadata live as long as any clone of the memory
900        // block. Because the iterator has borrowed the parent `BytesView` we know that the memory
901        // block must live for as long as the iterator lives.
902        //
903        // Therefore we can just re-stamp the return values with the 's lifetime to indicate that
904        // they are valid for as long as the iterator has borrowed the parent BytesView for.
905        let slice_with_s = unsafe { mem::transmute::<&[u8], &'s [u8]>(slice) };
906        // SAFETY: Same reasoning as above - metadata lives as long as any clone of the block.
907        let meta_with_s = unsafe { mem::transmute::<Option<&dyn BlockMeta>, Option<&'s dyn BlockMeta>>(meta) };
908
909        // Seek forward to the next chunk before we return.
910        self.view.advance(self.view.first_slice().len());
911
912        Some((slice_with_s, meta_with_s))
913    }
914}
915
916const SPAN_COUNT_BUCKETS: &[Magnitude] = &[0, 1, 2, 4, 8, 16, 32];
917
918thread_local! {
919    static VIEW_CREATED_SPANS: Event = Event::builder()
920        .name("bytesbuf_view_created_spans")
921        .histogram(SPAN_COUNT_BUCKETS)
922        .build();
923}
924
925#[cfg_attr(coverage_nightly, coverage(off))]
926#[cfg(test)]
927mod tests {
928    #![allow(
929        clippy::indexing_slicing,
930        clippy::needless_range_loop,
931        clippy::arithmetic_side_effects,
932        reason = "This is all fine in test code"
933    )]
934
935    use std::pin::pin;
936    use std::thread;
937
938    use new_zealand::nz;
939    use static_assertions::{assert_impl_all, assert_not_impl_any};
940    use testing_aids::assert_panic;
941
942    use super::*;
943    use crate::BytesBuf;
944    use crate::mem::testing::{TestMemoryBlock, TransparentMemory, std_alloc_block};
945
946    assert_impl_all!(BytesView: Send, Sync);
947
948    // BytesView intentionally does not implement From<&[u8]> because creating a view
949    // requires a memory provider to ensure optimal memory configuration. Users should
950    // call `BytesView::copied_from_slice()` instead, which makes the memory provider
951    // requirement explicit.
952    assert_not_impl_any!(BytesView: From<&'static [u8]>);
953
954    #[test]
955    fn smoke_test() {
956        let mut span_builder = std_alloc_block::allocate(nz!(10)).into_span_builder();
957
958        span_builder.put_slice(&1234_u64.to_ne_bytes());
959        span_builder.put_slice(&16_u16.to_ne_bytes());
960
961        let span1 = span_builder.consume(nz!(4));
962        let span2 = span_builder.consume(nz!(3));
963        let span3 = span_builder.consume(nz!(3));
964
965        assert_eq!(0, span_builder.remaining_capacity());
966        assert_eq!(span1.len(), 4);
967        assert_eq!(span2.len(), 3);
968        assert_eq!(span3.len(), 3);
969
970        let mut view = BytesView::from_spans(vec![span1, span2, span3]);
971
972        assert!(!view.is_empty());
973        assert_eq!(10, view.len());
974
975        let slice = view.first_slice();
976        assert_eq!(4, slice.len());
977
978        // We read 8 bytes here, so should land straight inside span3.
979        assert_eq!(view.get_num_ne::<u64>(), 1234);
980
981        assert_eq!(2, view.len());
982
983        let slice = view.first_slice();
984        assert_eq!(2, slice.len());
985
986        assert_eq!(view.get_num_ne::<u16>(), 16);
987
988        assert_eq!(0, view.len());
989        assert!(view.is_empty());
990    }
991
992    #[test]
993    fn oob_is_panic() {
994        let mut span_builder = std_alloc_block::allocate(nz!(10)).into_span_builder();
995
996        span_builder.put_slice(&1234_u64.to_ne_bytes());
997        span_builder.put_slice(&16_u16.to_ne_bytes());
998
999        let span1 = span_builder.consume(nz!(4));
1000        let span2 = span_builder.consume(nz!(3));
1001        let span3 = span_builder.consume(nz!(3));
1002
1003        let mut view = BytesView::from_spans(vec![span1, span2, span3]);
1004
1005        assert_eq!(10, view.len());
1006
1007        assert_eq!(view.get_num_ne::<u64>(), 1234);
1008        assert_panic!(_ = view.get_num_ne::<u32>()); // Reads 4 but only has 2 remaining.
1009    }
1010
1011    #[test]
1012    fn extend_lifetime_references_all_blocks() {
1013        // We need to detect here whether a block is being released (i.e. ref count goes to zero).
1014
1015        // SAFETY: We are not allowed to drop this until all BlockRef are gone. This is fine
1016        // because it is dropped at the end of the function, after all BlockRef instances.
1017        let block1 = unsafe { TestMemoryBlock::new(nz!(8), None) };
1018        let block1 = pin!(block1);
1019
1020        // SAFETY: We are not allowed to drop this until all BlockRef are gone. This is fine
1021        // because it is dropped at the end of the function, after all BlockRef instances.
1022        let block2 = unsafe { TestMemoryBlock::new(nz!(8), None) };
1023        let block2 = pin!(block2);
1024
1025        let guard = {
1026            // SAFETY: We guarantee exclusive access to the memory capacity.
1027            let mut span_builder1 = unsafe { block1.as_ref().to_block() }.into_span_builder();
1028            // SAFETY: We guarantee exclusive access to the memory capacity.
1029            let mut span_builder2 = unsafe { block2.as_ref().to_block() }.into_span_builder();
1030
1031            span_builder1.put_slice(&1234_u64.to_ne_bytes());
1032            span_builder2.put_slice(&1234_u64.to_ne_bytes());
1033
1034            let span1 = span_builder1.consume(nz!(8));
1035            let span2 = span_builder2.consume(nz!(8));
1036
1037            let view = BytesView::from_spans(vec![span1, span2]);
1038
1039            view.extend_lifetime()
1040        };
1041
1042        // The sequence was destroyed and all BlockRefs it was holding are gone.
1043        // However, the lifetime guard is still alive and has a BlockRef.
1044
1045        assert_eq!(block1.ref_count(), 1);
1046        assert_eq!(block2.ref_count(), 1);
1047
1048        drop(guard);
1049
1050        // And now they should all be dead.
1051        assert_eq!(block1.ref_count(), 0);
1052        assert_eq!(block2.ref_count(), 0);
1053    }
1054
1055    #[test]
1056    fn from_views() {
1057        let mut span_builder = std_alloc_block::allocate(nz!(100)).into_span_builder();
1058
1059        span_builder.put_slice(&1234_u64.to_ne_bytes());
1060        span_builder.put_slice(&5678_u64.to_ne_bytes());
1061
1062        let span1 = span_builder.consume(nz!(8));
1063        let span2 = span_builder.consume(nz!(8));
1064
1065        let view1 = BytesView::from_spans(vec![span1]);
1066        let view2 = BytesView::from_spans(vec![span2]);
1067
1068        let mut combined_view = BytesView::from_views(vec![view1, view2]);
1069
1070        assert_eq!(16, combined_view.len());
1071
1072        assert_eq!(combined_view.get_num_ne::<u64>(), 1234);
1073        assert_eq!(combined_view.get_num_ne::<u64>(), 5678);
1074    }
1075
1076    #[test]
1077    fn empty_view() {
1078        let view = BytesView::default();
1079
1080        assert!(view.is_empty());
1081        assert_eq!(0, view.len());
1082        assert_eq!(0, view.first_slice().len());
1083    }
1084
1085    #[test]
1086    fn slice_from_single_span_view() {
1087        // A very simple view to start with, consisting of just one 100 byte span.
1088        let span_builder = std_alloc_block::allocate(nz!(100)).into_span_builder();
1089
1090        let mut buf = BytesBuf::from_span_builders([span_builder]);
1091
1092        for i in 0..100 {
1093            buf.put_byte(i);
1094        }
1095
1096        let view = buf.consume_all();
1097
1098        let mut sliced_view = view.range(50..55);
1099
1100        assert_eq!(5, sliced_view.len());
1101        assert_eq!(100, view.len());
1102
1103        assert_eq!(50, sliced_view.get_byte());
1104
1105        assert_eq!(4, sliced_view.len());
1106        assert_eq!(100, view.len());
1107
1108        assert_eq!(51, sliced_view.get_byte());
1109        assert_eq!(52, sliced_view.get_byte());
1110        assert_eq!(53, sliced_view.get_byte());
1111        assert_eq!(54, sliced_view.get_byte());
1112
1113        assert_eq!(0, sliced_view.len());
1114
1115        assert!(view.range_checked(0..101).is_none());
1116        assert!(view.range_checked(100..101).is_none());
1117        assert!(view.range_checked(101..101).is_none());
1118    }
1119
1120    #[test]
1121    fn slice_from_multi_span_view() {
1122        const SPAN_SIZE: NonZero<BlockSize> = nz!(10);
1123
1124        // A multi-span view, 10 bytes x10.
1125        let span_builders = iter::repeat_with(|| std_alloc_block::allocate(SPAN_SIZE).into_span_builder())
1126            .take(10)
1127            .collect::<Vec<_>>();
1128
1129        let mut buf = BytesBuf::from_span_builders(span_builders);
1130
1131        for i in 0..100 {
1132            buf.put_byte(i);
1133        }
1134
1135        let view = buf.consume_all();
1136
1137        let mut first5 = view.range(0..5);
1138        assert_eq!(5, first5.len());
1139        assert_eq!(100, view.len());
1140        assert_eq!(0, first5.get_byte());
1141
1142        let mut last5 = view.range(95..100);
1143        assert_eq!(5, last5.len());
1144        assert_eq!(100, view.len());
1145        assert_eq!(95, last5.get_byte());
1146
1147        let mut middle5 = view.range(49..54);
1148        assert_eq!(5, middle5.len());
1149        assert_eq!(100, view.len());
1150        assert_eq!(49, middle5.get_byte());
1151        assert_eq!(50, middle5.get_byte());
1152        assert_eq!(51, middle5.get_byte());
1153        assert_eq!(52, middle5.get_byte());
1154        assert_eq!(53, middle5.get_byte());
1155
1156        assert!(view.range_checked(0..101).is_none());
1157        assert!(view.range_checked(100..101).is_none());
1158        assert!(view.range_checked(101..101).is_none());
1159    }
1160
1161    #[test]
1162    fn slice_indexing_kinds() {
1163        let span_builder = std_alloc_block::allocate(nz!(10)).into_span_builder();
1164
1165        let mut buf = BytesBuf::from_span_builders([span_builder]);
1166        buf.put_byte(0);
1167        buf.put_byte(1);
1168        buf.put_byte(2);
1169        buf.put_byte(3);
1170        buf.put_byte(4);
1171        buf.put_byte(5);
1172
1173        let data = buf.consume_all();
1174
1175        let mut middle_four = data.range(1..5);
1176        assert_eq!(4, middle_four.len());
1177        assert_eq!(1, middle_four.get_byte());
1178        assert_eq!(2, middle_four.get_byte());
1179        assert_eq!(3, middle_four.get_byte());
1180        assert_eq!(4, middle_four.get_byte());
1181
1182        let mut middle_four = data.range(1..=4);
1183        assert_eq!(4, middle_four.len());
1184        assert_eq!(1, middle_four.get_byte());
1185        assert_eq!(2, middle_four.get_byte());
1186        assert_eq!(3, middle_four.get_byte());
1187        assert_eq!(4, middle_four.get_byte());
1188
1189        let mut last_two = data.range(4..);
1190        assert_eq!(2, last_two.len());
1191        assert_eq!(4, last_two.get_byte());
1192        assert_eq!(5, last_two.get_byte());
1193
1194        let mut first_two = data.range(..2);
1195        assert_eq!(2, first_two.len());
1196        assert_eq!(0, first_two.get_byte());
1197        assert_eq!(1, first_two.get_byte());
1198
1199        let mut first_two = data.range(..=1);
1200        assert_eq!(2, first_two.len());
1201        assert_eq!(0, first_two.get_byte());
1202        assert_eq!(1, first_two.get_byte());
1203    }
1204
1205    #[test]
1206    fn slice_checked_with_excluded_start_bound() {
1207        let span_builder = std_alloc_block::allocate(nz!(100)).into_span_builder();
1208
1209        let mut buf = BytesBuf::from_span_builders([span_builder]);
1210        buf.put_byte(0);
1211        buf.put_byte(1);
1212        buf.put_byte(2);
1213        buf.put_byte(3);
1214        buf.put_byte(4);
1215        buf.put_byte(5);
1216        buf.put_byte(6);
1217        buf.put_byte(7);
1218        buf.put_byte(8);
1219
1220        let view = buf.consume_all();
1221
1222        // Test with excluded start bound: (Bound::Excluded(1), Bound::Excluded(5))
1223        // This should be equivalent to 2..5 (items at indices 2, 3, 4)
1224        let sliced = view.range_checked((Bound::Excluded(1), Bound::Excluded(5)));
1225        assert!(sliced.is_some());
1226        let mut sliced = sliced.unwrap();
1227        assert_eq!(3, sliced.len());
1228        assert_eq!(2, sliced.get_byte());
1229        assert_eq!(3, sliced.get_byte());
1230        assert_eq!(4, sliced.get_byte());
1231
1232        // Test edge case: excluded start at the last valid index returns empty sequence
1233        let sliced = view.range_checked((Bound::Excluded(8), Bound::Unbounded));
1234        assert!(sliced.is_some());
1235        assert_eq!(0, sliced.unwrap().len());
1236
1237        // Test edge case: excluded start that would overflow when adding 1
1238        let sliced = view.range_checked((Bound::Excluded(usize::MAX), Bound::Unbounded));
1239        assert!(sliced.is_none());
1240    }
1241
1242    #[test]
1243    fn slice_oob_is_panic() {
1244        let span_builder = std_alloc_block::allocate(nz!(1000)).into_span_builder();
1245
1246        let mut buf = BytesBuf::from_span_builders([span_builder]);
1247        buf.put_byte_repeated(0, 100);
1248
1249        let view = buf.consume_all();
1250
1251        assert_panic!(_ = view.range(0..101));
1252        assert_panic!(_ = view.range(0..=100));
1253        assert_panic!(_ = view.range(100..=100));
1254        assert_panic!(_ = view.range(100..101));
1255        assert_panic!(_ = view.range(101..));
1256        assert_panic!(_ = view.range(101..101));
1257        assert_panic!(_ = view.range(101..=101));
1258    }
1259
1260    #[test]
1261    fn slice_at_boundary_is_not_panic() {
1262        let span_builder = std_alloc_block::allocate(nz!(100)).into_span_builder();
1263
1264        let mut buf = BytesBuf::from_span_builders([span_builder]);
1265        buf.put_byte_repeated(0, 100);
1266
1267        let view = buf.consume_all();
1268
1269        assert_eq!(0, view.range(0..0).len());
1270        assert_eq!(1, view.range(0..=0).len());
1271        assert_eq!(0, view.range(..0).len());
1272        assert_eq!(1, view.range(..=0).len());
1273        assert_eq!(0, view.range(100..100).len());
1274        assert_eq!(0, view.range(99..99).len());
1275        assert_eq!(1, view.range(99..=99).len());
1276        assert_eq!(1, view.range(99..).len());
1277        assert_eq!(100, view.range(..).len());
1278    }
1279
1280    #[test]
1281    fn slice_empty_is_empty_if_not_oob() {
1282        let span_builder = std_alloc_block::allocate(nz!(100)).into_span_builder();
1283
1284        let mut buf = BytesBuf::from_span_builders([span_builder]);
1285
1286        for i in 0..100 {
1287            buf.put_byte(i);
1288        }
1289
1290        let view = buf.consume_all();
1291
1292        let sub_view = view.range(50..50);
1293        assert_eq!(0, sub_view.len());
1294
1295        // 100 is the index at the end of the view - still in-bounds, if at edge.
1296        let sub_view = view.range(100..100);
1297        assert_eq!(0, sub_view.len());
1298        assert!(view.range_checked(101..101).is_none());
1299    }
1300
1301    #[test]
1302    fn consume_all_slices() {
1303        const SPAN_SIZE: NonZero<BlockSize> = nz!(10);
1304
1305        // A multi-span sequence, 10 bytes x10.
1306        let span_builders = iter::repeat_with(|| std_alloc_block::allocate(SPAN_SIZE).into_span_builder())
1307            .take(10)
1308            .collect::<Vec<_>>();
1309
1310        let mut buf = BytesBuf::from_span_builders(span_builders);
1311
1312        for i in 0..100 {
1313            buf.put_byte(i);
1314        }
1315
1316        let mut view = buf.consume_all();
1317
1318        let mut slice_index = 0;
1319        let mut bytes_consumed = 0;
1320
1321        view.consume_all_slices(|slice| {
1322            assert_eq!(slice.len(), 10);
1323            bytes_consumed += slice.len();
1324
1325            for i in 0..10 {
1326                assert_eq!(slice_index * 10 + i, slice[i] as usize);
1327            }
1328
1329            slice_index += 1;
1330        });
1331
1332        assert_eq!(bytes_consumed, 100);
1333
1334        view.consume_all_slices(|_| unreachable!("view should now be empty"));
1335    }
1336
1337    #[test]
1338    fn multithreaded_usage() {
1339        fn post_to_another_thread(view: BytesView) {
1340            thread::spawn(move || {
1341                let mut view = view;
1342                assert_eq!(view.get_byte(), b'H');
1343                assert_eq!(view.get_byte(), b'e');
1344                assert_eq!(view.get_byte(), b'l');
1345                assert_eq!(view.get_byte(), b'l');
1346                assert_eq!(view.get_byte(), b'o');
1347            })
1348            .join()
1349            .unwrap();
1350        }
1351
1352        let memory = TransparentMemory::new();
1353        let view = BytesView::copied_from_slice(b"Hello, world!", &memory);
1354
1355        post_to_another_thread(view);
1356    }
1357
1358    #[test]
1359    fn slices_iterator() {
1360        let memory = TransparentMemory::new();
1361        let segment1 = BytesView::copied_from_slice(b"Hello, world!", &memory);
1362        let segment2 = BytesView::copied_from_slice(b"Hello, another world!", &memory);
1363
1364        let view = BytesView::from_views(vec![segment1.clone(), segment2.clone()]);
1365
1366        let slices: Vec<_> = view.slices().collect();
1367
1368        assert_eq!(slices.len(), 2);
1369        assert_eq!(slices[0].0.len(), segment1.len());
1370        assert_eq!(slices[1].0.len(), segment2.len());
1371    }
1372
1373    #[test]
1374    fn slices_iterator_empty() {
1375        let view = BytesView::new();
1376        assert_eq!(view.slices().count(), 0);
1377    }
1378
1379    #[test]
1380    fn eq_view() {
1381        let memory = TransparentMemory::new();
1382
1383        let view1 = BytesView::copied_from_slice(b"Hello, world!", &memory);
1384        let view2 = BytesView::copied_from_slice(b"Hello, world!", &memory);
1385
1386        assert_eq!(view1, view2);
1387
1388        let view3 = BytesView::copied_from_slice(b"Jello, world!", &memory);
1389
1390        assert_ne!(view1, view3);
1391
1392        let view4 = BytesView::copied_from_slice(b"Hello, world! ", &memory);
1393
1394        assert_ne!(view1, view4);
1395
1396        let view5_part1 = BytesView::copied_from_slice(b"Hello, ", &memory);
1397        let view5_part2 = BytesView::copied_from_slice(b"world!", &memory);
1398        let view5 = BytesView::from_views([view5_part1, view5_part2]);
1399
1400        assert_eq!(view1, view5);
1401        assert_ne!(view5, view3);
1402
1403        let view6 = BytesView::copied_from_slice(b"Hello, ", &memory);
1404
1405        assert_ne!(view1, view6);
1406        assert_ne!(view5, view6);
1407    }
1408
1409    #[test]
1410    fn eq_slice() {
1411        let memory = TransparentMemory::new();
1412
1413        let view1 = BytesView::copied_from_slice(b"Hello, world!", &memory);
1414
1415        assert_eq!(view1, b"Hello, world!".as_slice());
1416        assert_ne!(view1, b"Jello, world!".as_slice());
1417        assert_ne!(view1, b"Hello, world! ".as_slice());
1418
1419        assert_eq!(b"Hello, world!".as_slice(), view1);
1420        assert_ne!(b"Jello, world!".as_slice(), view1);
1421        assert_ne!(b"Hello, world! ".as_slice(), view1);
1422
1423        let view2_part1 = BytesView::copied_from_slice(b"Hello, ", &memory);
1424        let view2_part2 = BytesView::copied_from_slice(b"world!", &memory);
1425        let view2 = BytesView::from_views([view2_part1, view2_part2]);
1426
1427        assert_eq!(view2, b"Hello, world!".as_slice());
1428        assert_ne!(view2, b"Jello, world!".as_slice());
1429        assert_ne!(view2, b"Hello, world! ".as_slice());
1430        assert_ne!(view2, b"Hello, ".as_slice());
1431
1432        assert_eq!(b"Hello, world!".as_slice(), view2);
1433        assert_ne!(b"Jello, world!".as_slice(), view2);
1434        assert_ne!(b"Hello, world! ".as_slice(), view2);
1435        assert_ne!(b"Hello, ".as_slice(), view2);
1436    }
1437
1438    #[test]
1439    fn eq_array() {
1440        let memory = TransparentMemory::new();
1441
1442        let view1 = BytesView::copied_from_slice(b"Hello, world!", &memory);
1443
1444        assert_eq!(view1, b"Hello, world!");
1445        assert_ne!(view1, b"Jello, world!");
1446        assert_ne!(view1, b"Hello, world! ");
1447
1448        assert_eq!(b"Hello, world!", view1);
1449        assert_ne!(b"Jello, world!", view1);
1450        assert_ne!(b"Hello, world! ", view1);
1451
1452        let view2_part1 = BytesView::copied_from_slice(b"Hello, ", &memory);
1453        let view2_part2 = BytesView::copied_from_slice(b"world!", &memory);
1454        let view2 = BytesView::from_views([view2_part1, view2_part2]);
1455
1456        assert_eq!(view2, b"Hello, world!");
1457        assert_ne!(view2, b"Jello, world!");
1458        assert_ne!(view2, b"Hello, world! ");
1459        assert_ne!(view2, b"Hello, ");
1460
1461        assert_eq!(b"Hello, world!", view2);
1462        assert_ne!(b"Jello, world!", view2);
1463        assert_ne!(b"Hello, world! ", view2);
1464        assert_ne!(b"Hello, ", view2);
1465    }
1466
1467    #[test]
1468    fn meta_none() {
1469        let memory = TransparentMemory::new();
1470
1471        let view1 = BytesView::copied_from_slice(b"Hello, ", &memory);
1472        let view2 = BytesView::copied_from_slice(b"world!", &memory);
1473
1474        let view = BytesView::from_views([view1, view2]);
1475
1476        let mut slices_iter = view.slices();
1477
1478        // We have two chunks, both without metadata.
1479        let (data1, meta1) = slices_iter.next().expect("should have first slice");
1480        assert!(!data1.is_empty());
1481        assert!(meta1.is_none());
1482
1483        let (data2, meta2) = slices_iter.next().expect("should have second slice");
1484        assert!(!data2.is_empty());
1485        assert!(meta2.is_none());
1486
1487        assert!(slices_iter.next().is_none());
1488    }
1489
1490    #[test]
1491    fn meta_some() {
1492        #[derive(Debug)]
1493        struct GreenMeta;
1494        #[derive(Debug)]
1495        struct BlueMeta;
1496
1497        impl BlockMeta for GreenMeta {}
1498        impl BlockMeta for BlueMeta {}
1499
1500        // SAFETY: We are not allowed to drop this until all BlockRef are gone. This is fine
1501        // because it is dropped at the end of the function, after all BlockRef instances.
1502        let block1 = unsafe { TestMemoryBlock::new(nz!(100), Some(Box::new(GreenMeta {}))) };
1503        let block1 = pin!(block1);
1504
1505        // SAFETY: We are not allowed to drop this until all BlockRef are gone. This is fine
1506        // because it is dropped at the end of the function, after all BlockRef instances.
1507        let block2 = unsafe { TestMemoryBlock::new(nz!(100), Some(Box::new(BlueMeta {}))) };
1508        let block2 = pin!(block2);
1509
1510        // SAFETY: We guarantee exclusive access to the memory capacity.
1511        let block1 = unsafe { block1.as_ref().to_block() };
1512        // SAFETY: We guarantee exclusive access to the memory capacity.
1513        let block2 = unsafe { block2.as_ref().to_block() };
1514
1515        let mut buf = BytesBuf::from_blocks([block1, block2]);
1516
1517        // Add enough bytes to make use of both blocks.
1518        buf.put_byte_repeated(123, 166);
1519
1520        let view = buf.consume_all();
1521
1522        let mut slices_iter = view.slices();
1523
1524        // NB! There is no requirement that the BytesBuf use the blocks in the order we gave
1525        // them in. We use white-box knowledge here to know that it actually reverses the order.
1526        // This behavior may change in a future version - be ready to change the test if so.
1527
1528        let (data1, meta1) = slices_iter.next().expect("should have first block");
1529        assert!(!data1.is_empty());
1530        assert!(meta1.is_some());
1531        assert!(meta1.unwrap().is::<BlueMeta>());
1532        assert!(!meta1.unwrap().is::<GreenMeta>());
1533
1534        let (data2, meta2) = slices_iter.next().expect("should have second block");
1535        assert!(!data2.is_empty());
1536        assert!(meta2.is_some());
1537        assert!(meta2.unwrap().is::<GreenMeta>());
1538        assert!(!meta2.unwrap().is::<BlueMeta>());
1539
1540        assert!(slices_iter.next().is_none(), "should have no more slices");
1541    }
1542
1543    #[test]
1544    fn append_single_span() {
1545        let memory = TransparentMemory::new();
1546
1547        // Create two single-span views.
1548        let mut view1 = BytesView::copied_from_slice(b"Hello, ", &memory);
1549        let view2 = BytesView::copied_from_slice(b"world!", &memory);
1550
1551        assert_eq!(view1.len(), 7);
1552        assert_eq!(view2.len(), 6);
1553
1554        view1.append(view2);
1555
1556        assert_eq!(view1.len(), 13);
1557        assert_eq!(view1, b"Hello, world!");
1558    }
1559
1560    #[test]
1561    fn append_multi_span() {
1562        let memory = TransparentMemory::new();
1563
1564        // Create two multi-span views (2 spans each)
1565        let view1_part1 = BytesView::copied_from_slice(b"AAA", &memory);
1566        let view1_part2 = BytesView::copied_from_slice(b"BBB", &memory);
1567        let mut view1 = BytesView::from_views([view1_part1, view1_part2]);
1568
1569        let view2_part1 = BytesView::copied_from_slice(b"CCC", &memory);
1570        let view2_part2 = BytesView::copied_from_slice(b"DDD", &memory);
1571        let view2 = BytesView::from_views([view2_part1, view2_part2]);
1572
1573        assert_eq!(view1.len(), 6);
1574        assert_eq!(view2.len(), 6);
1575
1576        view1.append(view2);
1577
1578        assert_eq!(view1.len(), 12);
1579        assert_eq!(view1, b"AAABBBCCCDDD");
1580    }
1581
1582    #[test]
1583    fn append_empty_view() {
1584        let memory = TransparentMemory::new();
1585
1586        let mut view1 = BytesView::copied_from_slice(b"Hello", &memory);
1587        let view2 = BytesView::new();
1588
1589        view1.append(view2);
1590        assert_eq!(view1.len(), 5);
1591        assert_eq!(view1, b"Hello");
1592
1593        let mut view3 = BytesView::new();
1594        let view4 = BytesView::copied_from_slice(b"world", &memory);
1595
1596        view3.append(view4);
1597        assert_eq!(view3.len(), 5);
1598        assert_eq!(view3, b"world");
1599    }
1600
1601    #[test]
1602    fn concat_single_span() {
1603        let memory = TransparentMemory::new();
1604
1605        // Create two single-span views
1606        let view1 = BytesView::copied_from_slice(b"Hello, ", &memory);
1607        let view2 = BytesView::copied_from_slice(b"world!", &memory);
1608
1609        assert_eq!(view1.len(), 7);
1610        assert_eq!(view2.len(), 6);
1611
1612        let view3 = view1.concat(view2);
1613
1614        // Original view unchanged
1615        assert_eq!(view1.len(), 7);
1616        assert_eq!(view1, b"Hello, ");
1617
1618        // New view contains combined data
1619        assert_eq!(view3.len(), 13);
1620        assert_eq!(view3, b"Hello, world!");
1621    }
1622
1623    #[test]
1624    fn concat_multi_span() {
1625        let memory = TransparentMemory::new();
1626
1627        // Create two multi-span views (2 spans each)
1628        let view1_part1 = BytesView::copied_from_slice(b"AAA", &memory);
1629        let view1_part2 = BytesView::copied_from_slice(b"BBB", &memory);
1630        let view1 = BytesView::from_views([view1_part1, view1_part2]);
1631
1632        let view2_part1 = BytesView::copied_from_slice(b"CCC", &memory);
1633        let view2_part2 = BytesView::copied_from_slice(b"DDD", &memory);
1634        let view2 = BytesView::from_views([view2_part1, view2_part2]);
1635
1636        assert_eq!(view1.len(), 6);
1637        assert_eq!(view2.len(), 6);
1638
1639        let view3 = view1.concat(view2);
1640
1641        // Original view unchanged
1642        assert_eq!(view1.len(), 6);
1643        assert_eq!(view1, b"AAABBB");
1644
1645        // New view contains combined data
1646        assert_eq!(view3.len(), 12);
1647        assert_eq!(view3, b"AAABBBCCCDDD");
1648    }
1649
1650    #[test]
1651    fn concat_empty_views() {
1652        let memory = TransparentMemory::new();
1653
1654        let view1 = BytesView::copied_from_slice(b"Hello", &memory);
1655        let view2 = BytesView::new();
1656
1657        let view3 = view1.concat(view2);
1658        assert_eq!(view3.len(), 5);
1659        assert_eq!(view3, b"Hello");
1660
1661        let view4 = BytesView::new();
1662        let view5 = BytesView::copied_from_slice(b"world", &memory);
1663
1664        let view6 = view4.concat(view5);
1665        assert_eq!(view6.len(), 5);
1666        assert_eq!(view6, b"world");
1667    }
1668
1669    #[test]
1670    fn size_change_detector() {
1671        // The point of this is not to say that we expect it to have a specific size but to allow
1672        // us to easily detect when the size changes and (if we choose to) bless the change.
1673        // We assume 64-bit pointers - any support for 32-bit is problem for the future.
1674        assert_eq!(size_of::<BytesView>(), 272);
1675    }
1676}