bytesbuf/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
5
6//! Manipulate sequences of bytes for efficient I/O.
7//!
8//! A [`BytesView`] is a view over a logical sequence of zero or more bytes
9//! stored in memory, similar to a slice `&[u8]` but with some key differences:
10//!
11//! * The bytes in a byte sequence are not required to be consecutive in memory.
12//! * The bytes in a byte sequence are always immutable, even if you own the [`BytesView`].
13//!
14//! In practical terms, you may think of a byte sequence as a `Vec<Vec<u8>>` whose contents are
15//! treated as one logical sequence of bytes. The types in this crate provide a way to work with
16//! byte sequences using an API that is reasonably convenient while also being compatible with
17//! the requirements of high-performance zero-copy I/O operations.
18//!
19//! # Consuming Byte Sequences
20//!
21//! The standard model for using bytes of data from a [`BytesView`] is to consume them via the
22//! [`bytes::buf::Buf`][17] trait, which is implemented by [`BytesView`].
23//!
24//! There are many helper methods on this trait that will read bytes from the beginning of the
25//! sequence and simultaneously remove the read bytes from the sequence, shrinking it to only
26//! the remaining bytes.
27//!
28//! ```
29//! # let memory = bytesbuf::GlobalPool::new();
30//! # let message = BytesView::copied_from_slice(b"1234123412341234", &memory);
31//! use bytes::Buf;
32//! use bytesbuf::BytesView;
33//!
34//! fn consume_message(mut message: BytesView) {
35//!     // We read the message and calculate the sum of all the words in it.
36//!     let mut sum: u64 = 0;
37//!
38//!     while message.has_remaining() {
39//!         let word = message.get_u64();
40//!         sum = sum.saturating_add(word);
41//!     }
42//!
43//!     println!("Message received. The sum of all words in the message is {sum}.");
44//! }
45//! # consume_message(message);
46//! ```
47//!
48//! If the helper methods are not sufficient, you can access the contents via byte slices using the
49//! more fundamental methods of the [`bytes::buf::Buf`][17] trait such as:
50//!
51//! * [`chunk()`][21], which returns a slice of bytes from the beginning of the sequence. The
52//!   length of this slice is determined by the inner structure of the byte sequence and it may not
53//!   contain all the bytes in the sequence.
54//! * [`advance()`][22], which removes bytes from the beginning of the sequence, advancing the
55//!   head to a new position. When you advance past the slice returned by `chunk()`, the next
56//!   call to `chunk()` will return a new slice of bytes starting from the new head position.
57//! * [`chunks_vectored()`][23], which returns multiple slices of bytes from the beginning of the
58//!   sequence. This can be desirable for advanced access models that can consume multiple
59//!   chunks of data at the same time.
60//!
61//! ```
62//! # let memory = bytesbuf::GlobalPool::new();
63//! # let mut sequence = BytesView::copied_from_slice(b"1234123412341234", &memory);
64//! use bytes::Buf;
65//! use bytesbuf::BytesView;
66//!
67//! let len = sequence.len();
68//! let mut chunk_lengths = Vec::new();
69//!
70//! while sequence.has_remaining() {
71//!     let chunk = sequence.chunk();
72//!     chunk_lengths.push(chunk.len());
73//!
74//!     // We have completed processing this chunk, all we wanted was to know its length.
75//!     sequence.advance(chunk.len());
76//! }
77//!
78//! println!("Inspected a sequence of {len} bytes with chunk lengths: {chunk_lengths:?}");
79//! ```
80//!
81//! To reuse a byte sequence, clone it before consuming the contents. This is a cheap
82//! zero-copy operation.
83//!
84//! ```
85//! # let memory = bytesbuf::GlobalPool::new();
86//! # let mut sequence = BytesView::copied_from_slice(b"1234123412341234", &memory);
87//! use bytes::Buf;
88//! use bytesbuf::BytesView;
89//!
90//! assert_eq!(sequence.len(), 16);
91//!
92//! let mut sequence_clone = sequence.clone();
93//! assert_eq!(sequence_clone.len(), 16);
94//!
95//! _ = sequence_clone.get_u64();
96//! assert_eq!(sequence_clone.len(), 8);
97//!
98//! // Operations on the clone have no effect on the original sequence.
99//! assert_eq!(sequence.len(), 16);
100//! ```
101//!
102//! # Producing Byte Sequences
103//!
104//! For creating a byte sequence, you first need some memory capacity to put the bytes into. This
105//! means you need a memory provider, which is a type that implements the [`Memory`] trait.
106//!
107//! Obtaining a memory provider is generally straightforward. Simply use the first matching option
108//! from the following list:
109//!
110//! 1. If you are creating byte sequences for the purpose of submitting them to a specific
111//!    object of a known type (e.g. writing them to a network connection), the target type will
112//!    typically implement the [`HasMemory`] trait, which gives you a suitable memory
113//!    provider instance via [`HasMemory::memory()`][25]. Use it - this memory provider will
114//!    give you memory with the configuration that is optimal for delivering bytes to that
115//!    specific instance.
116//! 1. If you are creating byte sequences as part of usage-neutral data processing, obtain an
117//!    instance of [`GlobalPool`]. In a typical web application framework, this is a service
118//!    exposed by the application framework. In a different context (e.g. example or test code
119//!    with no framework), you can create your own instance via `GlobalPool::new()`.
120//!
121//! Once you have a memory provider, you can reserve memory from it by calling
122//! [`Memory::reserve()`][14] on it. This returns a [`BytesBuf`] with the requested
123//! memory capacity.
124//!
125//! ```
126//! # struct Connection {}
127//! # impl Connection { fn memory(&self) -> impl Memory { bytesbuf::GlobalPool::new() } }
128//! # let connection = Connection {};
129//! use bytesbuf::Memory;
130//!
131//! let memory = connection.memory();
132//!
133//! let mut sequence_builder = memory.reserve(100);
134//! ```
135//!
136//! Now that you have the memory capacity and a [`BytesBuf`], you can fill the memory
137//! capacity with bytes of data. The standard pattern for this is to use the
138//! [`bytes::buf::BufMut`][20] trait, which is implemented by [`BytesBuf`].
139//!
140//! Helper methods on this trait allow you to write bytes to the sequence builder up to the
141//! extent of the reserved memory capacity.
142//!
143//! ```
144//! # struct Connection {}
145//! # impl Connection { fn memory(&self) -> impl Memory { bytesbuf::GlobalPool::new() } }
146//! # let connection = Connection {};
147//! use bytes::buf::BufMut;
148//! use bytesbuf::Memory;
149//!
150//! let memory = connection.memory();
151//!
152//! let mut sequence_builder = memory.reserve(100);
153//!
154//! sequence_builder.put_u64(1234);
155//! sequence_builder.put_u64(5678);
156//! sequence_builder.put(b"Hello, world!".as_slice());
157//! ```
158//!
159//! If the helper methods are not sufficient, you can append contents via mutable byte slices
160//! using the more fundamental methods of the [`bytes::buf::BufMut`][20] trait such as:
161//!
162//! * [`chunk_mut()`][24], which returns a mutable slice of bytes from the beginning of the
163//!   sequence builder's unused capacity. The length of this slice is determined by the inner
164//!   structure of the sequence builder and it may not contain all the capacity that has been
165//!   reserved.
166//! * [`advance_mut()`][22], which declares that a number of bytes from the beginning of the
167//!   unused capacity have been initialized with data and are no longer unused. This will
168//!   mark these bytes as valid for reading and advance `chunk_mut()` to the next slice if the
169//!   current one has been completely filled.
170//!
171//! See `examples/mem_chunk_write.rs` for an example of how to use these methods.
172//!
173//! If you do not know exactly how much memory you need in advance, you can extend the sequence
174//! builder capacity on demand if you run out by calling [`BytesBuf::reserve()`][13],
175//! which will reserve more memory capacity. You can use [`bytes::buf::BufMut::remaining_mut()`][26]
176//! on the sequence builder to identify how much unused memory capacity is available for writing.
177//!
178//! ```
179//! # struct Connection {}
180//! # impl Connection { fn memory(&self) -> impl Memory { bytesbuf::GlobalPool::new() } }
181//! # let connection = Connection {};
182//! use bytes::buf::BufMut;
183//! use bytesbuf::Memory;
184//!
185//! let memory = connection.memory();
186//!
187//! let mut sequence_builder = memory.reserve(100);
188//!
189//! // .. write some data into the sequence builder ..
190//!
191//! // We discover that we need 80 additional bytes of memory! No problem.
192//! sequence_builder.reserve(80, &memory);
193//!
194//! // Remember that a memory provider can always provide more memory than requested.
195//! assert!(sequence_builder.capacity() >= 100 + 80);
196//! assert!(sequence_builder.remaining_mut() >= 80);
197//! ```
198//!
199//! When you have filled the memory capacity with the bytes you wanted to write, you can consume
200//! the data in the sequence builder, turning it into a [`BytesView`] of immutable bytes.
201//!
202//! ```
203//! # struct Connection {}
204//! # impl Connection { fn memory(&self) -> impl Memory { bytesbuf::GlobalPool::new() } }
205//! # let connection = Connection {};
206//! use bytes::buf::BufMut;
207//! use bytesbuf::Memory;
208//!
209//! let memory = connection.memory();
210//!
211//! let mut sequence_builder = memory.reserve(100);
212//!
213//! sequence_builder.put_u64(1234);
214//! sequence_builder.put_u64(5678);
215//! sequence_builder.put(b"Hello, world!".as_slice());
216//!
217//! let message = sequence_builder.consume_all();
218//! ```
219//!
220//! This can be done piece by piece, and you can continue writing to the sequence builder
221//! after consuming some already written bytes.
222//!
223//! ```
224//! # struct Connection {}
225//! # impl Connection { fn memory(&self) -> impl Memory { bytesbuf::GlobalPool::new() } }
226//! # let connection = Connection {};
227//! use bytes::buf::BufMut;
228//! use bytesbuf::Memory;
229//!
230//! let memory = connection.memory();
231//!
232//! let mut sequence_builder = memory.reserve(100);
233//!
234//! sequence_builder.put_u64(1234);
235//! sequence_builder.put_u64(5678);
236//!
237//! let first_8_bytes = sequence_builder.consume(8);
238//! let second_8_bytes = sequence_builder.consume(8);
239//!
240//! sequence_builder.put(b"Hello, world!".as_slice());
241//!
242//! let final_contents = sequence_builder.consume_all();
243//! ```
244//!
245//! If you already have a [`BytesView`] that you want to write into a [`BytesBuf`], call
246//! [`BytesBuf::append()`][26]. This is a highly efficient zero-copy operation
247//! that reuses the memory capacity of the sequence you are appending.
248//!
249//! ```
250//! # struct Connection {}
251//! # impl Connection { fn memory(&self) -> impl Memory { bytesbuf::GlobalPool::new() } }
252//! # let connection = Connection {};
253//! use bytes::buf::BufMut;
254//! use bytesbuf::Memory;
255//!
256//! let memory = connection.memory();
257//!
258//! let mut header_builder = memory.reserve(16);
259//! header_builder.put_u64(1234);
260//! let header = header_builder.consume_all();
261//!
262//! let mut sequence_builder = memory.reserve(128);
263//! sequence_builder.append(header);
264//! sequence_builder.put(b"Hello, world!".as_slice());
265//! ```
266//!
267//! Note that there is no requirement that the memory capacity of the sequence builder and the
268//! memory capacity of the sequence being appended come from the same memory provider. It is valid
269//! to mix and match memory from different providers, though this may disable some optimizations.
270//!
271//! # Implementing APIs that Consume Byte Sequences
272//!
273//! If you are implementing a type that accepts byte sequences, you should implement the
274//! [`HasMemory`] trait to make it possible for the caller to use optimally
275//! configured memory.
276//!
277//! Even if the implementation of your type today is not capable of taking advantage of
278//! optimizations that depend on the memory configuration, it may be capable of doing so
279//! in the future or may, today or in the future, pass the data to another type that
280//! implements [`HasMemory`], which can take advantage of memory optimizations.
281//! Therefore, it is best to implement this trait on all types that accept byte sequences.
282//!
283//! The recommended implementation strategy for [`HasMemory`] is as follows:
284//!
285//! * If your type always passes the data to another type that implements [`HasMemory`],
286//!   simply forward the memory provider from the other type.
287//! * If your type can take advantage of optimizations enabled by specific memory configurations,
288//!   (e.g. because it uses operating system APIs that unlock better performance when the memory
289//!   is appropriately configured), return a memory provider that performs the necessary
290//!   configuration.
291//! * If your type neither passes the data to another type that implements [`HasMemory`]
292//!   nor can take advantage of optimizations enabled by specific memory configurations, obtain
293//!   an instance of [`GlobalPool`] as a dependency and return it as the memory provider.
294//!
295//! Example of forwarding the memory provider (see `examples/mem_has_provider_forwarding.rs`
296//! for full code):
297//!
298//! ```
299//! use bytesbuf::{HasMemory, MemoryShared, BytesView};
300//!
301//! /// Counts the number of 0x00 bytes in a sequence before
302//! /// writing that sequence to a network connection.
303//! ///
304//! /// # Implementation strategy for `HasMemory`
305//! ///
306//! /// This type merely inspects a byte sequence before passing it on. This means that it does not
307//! /// have a preference of its own for how that memory should be configured.
308//! ///
309//! /// However, the thing it passes the sequence to (the `Connection` type) may have a preference,
310//! /// so we forward the memory provider of the `Connection` type as our own memory provider, so the
311//! /// caller can use memory optimal for submission to the `Connection` instance.
312//! #[derive(Debug)]
313//! struct ConnectionZeroCounter {
314//!     connection: Connection,
315//! }
316//!
317//! impl ConnectionZeroCounter {
318//!     pub fn new(connection: Connection) -> Self {
319//!         Self {
320//!             connection,
321//!         }
322//!     }
323//!
324//!     pub fn write(&mut self, sequence: BytesView) {
325//!         // TODO: Count zeros.
326//!
327//!         self.connection.write(sequence);
328//!     }
329//! }
330//!
331//! impl HasMemory for ConnectionZeroCounter {
332//!     fn memory(&self) -> impl MemoryShared {
333//!         // We forward the memory provider of the connection, so that the caller can use
334//!         // memory optimal for submission to the connection.
335//!         self.connection.memory()
336//!     }
337//! }
338//! # #[derive(Debug)] struct Connection;
339//! # impl Connection { fn write(&mut self, mut _message: BytesView) {} }
340//! # impl HasMemory for Connection { fn memory(&self) -> impl MemoryShared { bytesbuf::TransparentTestMemory::new() } }
341//! ```
342//!
343//! Example of returning a memory provider that performs configuration for optimal memory (see
344//! `examples/mem_has_provider_optimizing.rs` for full code):
345//!
346//! ```
347//! use bytesbuf::{CallbackMemory, HasMemory, MemoryShared, BytesView};
348//!
349//! /// # Implementation strategy for `HasMemory`
350//! ///
351//! /// This type can benefit from optimal performance if specifically configured memory is used and
352//! /// the memory is reserved from the I/O memory pool. It uses the I/O context to reserve memory,
353//! /// providing a usage-specific configuration when reserving memory capacity.
354//! ///
355//! /// A delegating memory provider is used to attach the configuration to each memory reservation.
356//! #[derive(Debug)]
357//! struct UdpConnection {
358//!     io_context: IoContext,
359//! }
360//!
361//! impl UdpConnection {
362//!     pub fn new(io_context: IoContext) -> Self {
363//!         Self { io_context }
364//!     }
365//! }
366//!
367//! /// Represents the optimal memory configuration for a UDP connection when reserving I/O memory.
368//! const UDP_CONNECTION_OPTIMAL_MEMORY_CONFIGURATION: MemoryConfiguration = MemoryConfiguration {
369//!     requires_page_alignment: false,
370//!     zero_memory_on_release: false,
371//!     requires_registered_memory: true,
372//! };
373//!
374//! impl HasMemory for UdpConnection {
375//!     fn memory(&self) -> impl MemoryShared {
376//!         CallbackMemory::new({
377//!             // Cloning is cheap, as it is a service that shares resources between clones.
378//!             let io_context = self.io_context.clone();
379//!
380//!             move |min_len| {
381//!                 io_context.reserve_io_memory(min_len, UDP_CONNECTION_OPTIMAL_MEMORY_CONFIGURATION)
382//!             }
383//!         })
384//!     }
385//! }
386//!
387//! # use bytesbuf::BytesBuf;
388//! # #[derive(Clone, Debug)]
389//! # struct IoContext;
390//! # impl IoContext {
391//! #     pub fn reserve_io_memory(
392//! #         &self,
393//! #         min_len: usize,
394//! #         _memory_configuration: MemoryConfiguration,
395//! #     ) -> BytesBuf {
396//! #         todo!()
397//! #     }
398//! # }
399//! # struct MemoryConfiguration { requires_page_alignment: bool, zero_memory_on_release: bool, requires_registered_memory: bool }
400//! ```
401//!
402//! Example of returning a usage-neutral memory provider (see `examples/mem_has_provider_neutral.rs` for
403//! full code):
404//!
405//! ```
406//! use bytesbuf::{GlobalPool, HasMemory, MemoryShared};
407//!
408//! /// Calculates a checksum for a given byte sequence.
409//! ///
410//! /// # Implementation strategy for `HasMemory`
411//! ///
412//! /// This type does not benefit from any specific memory configuration - it consumes bytes no
413//! /// matter what sort of memory they are in. It also does not pass the bytes to some other type.
414//! ///
415//! /// Therefore, we simply use `GlobalPool` as the memory provider we publish, as this is
416//! /// the default choice when there is no specific provider to prefer.
417//! #[derive(Debug)]
418//! struct ChecksumCalculator {
419//!     // The application logic must provide this - it is our dependency.
420//!     memory_provider: GlobalPool,
421//! }
422//!
423//! impl ChecksumCalculator {
424//!     pub fn new(memory_provider: GlobalPool) -> Self {
425//!         Self { memory_provider }
426//!     }
427//! }
428//!
429//! impl HasMemory for ChecksumCalculator {
430//!     fn memory(&self) -> impl MemoryShared {
431//!         // Cloning a memory provider is a cheap operation, as clones reuse resources.
432//!         self.memory_provider.clone()
433//!     }
434//! }
435//! ```
436//!
437//! It is generally expected that all APIs work with byte sequences using memory from any provider.
438//! It is true that in some cases this may be impossible (e.g. because you are interacting directly
439//! with a device driver that requires the data to be in a specific physical memory module) but
440//! these cases will be rare and must be explicitly documented.
441//!
442//! If your type can take advantage of optimizations enabled by specific memory configurations,
443//! it needs to determine whether a byte sequence actually uses the desired memory configuration.
444//! This can be done by inspecting the provided byte sequence and the memory metadata it exposes.
445//! If the metadata indicates a suitable configuration, the optimal implementation can be used.
446//! Otherwise, the implementation can fall back to a generic implementation that works with any
447//! byte sequence.
448//!
449//! Example of identifying whether a byte sequence uses the optimal memory configuration (see
450//! `examples/mem_optimal_path.rs` for full code):
451//!
452//! ```
453//! # struct Foo;
454//! use bytesbuf::BytesView;
455//!
456//! # impl Foo {
457//! pub fn write(&mut self, message: BytesView) {
458//!     // We now need to identify whether the message actually uses memory that allows us to
459//!     // ues the optimal I/O path. There is no requirement that the data passed to us contains
460//!     // only memory with our preferred configuration.
461//!
462//!     let use_optimal_path = message.iter_chunk_metas().all(|meta| {
463//!         // If there is no metadata, the memory is not I/O memory.
464//!         meta.is_some_and(|meta| {
465//!             // If the type of metadata does not match the metadata
466//!             // exposed by the I/O memory provider, the memory is not I/O memory.
467//!             let Some(io_memory_configuration) = meta.downcast_ref::<MemoryConfiguration>()
468//!             else {
469//!                 return false;
470//!             };
471//!
472//!             // If the memory is I/O memory but is not not pre-registered
473//!             // with the operating system, we cannot use the optimal path.
474//!             io_memory_configuration.requires_registered_memory
475//!         })
476//!     });
477//!
478//!     if use_optimal_path {
479//!         self.write_optimal(message);
480//!     } else {
481//!         self.write_fallback(message);
482//!     }
483//! }
484//! # fn write_optimal(&mut self, _message: BytesView) { }
485//! # fn write_fallback(&mut self, _message: BytesView) { }
486//! # }
487//! # struct MemoryConfiguration { requires_registered_memory: bool }
488//! ```
489//!
490//! Note that there is no requirement that a byte sequence consists of homogeneous memory. Different
491//! parts of the byte sequence may come from different memory providers, so all chunks must be
492//! checked for compatibility.
493//!
494//! # Compatibility with the `bytes` Crate
495//!
496//! The popular [`Bytes`][18] type from the `bytes` crate is often used in the Rust ecosystem to
497//! represent simple byte buffers of consecutive bytes. For compatibility with this commonly used
498//! type, this crate offers conversion methods to translate between [`BytesView`] and [`Bytes`][18]:
499//!
500//! * [`BytesView::into_bytes()`][16] converts a [`BytesView`] into a [`Bytes`][18] instance. This
501//!   is not always zero-copy because a byte sequence is not guaranteed to be consecutive in memory.
502//!   You are discouraged from using this method in any performance-relevant logic path.
503//! * See `Work Item 5861368: BytesView::into_bytes_iter()`
504//! * `BytesView::from(Bytes)` or `let s: BytesView = bytes.into()` converts a [`Bytes`][18] instance
505//!   into a [`BytesView`]. This is an efficient zero-copy operation that reuses the memory of the
506//!   `Bytes` instance.
507//!
508//! # Static Data
509//!
510//! You may have static data in your logic, such as the names/prefixes of request/response headers:
511//!
512//! ```
513//! const HEADER_PREFIX: &[u8] = b"Unix-Milliseconds: ";
514//! ```
515//!
516//! Optimal processing of static data requires satisfying multiple requirements:
517//!
518//! * We want zero-copy processing when consuming this data.
519//! * We want to use memory that is optimally configured for the context in which the data is
520//!   consumed (e.g. network connection, file, etc).
521//!
522//! The standard pattern here is to use [`OnceLock`][27] to lazily initialize a [`BytesView`] from
523//! the static data on first use, using memory from a memory provider that is optimal for the
524//! intended usage.
525//!
526//! ```
527//! use std::sync::OnceLock;
528//!
529//! use bytesbuf::BytesView;
530//!
531//! const HEADER_PREFIX: &[u8] = b"Unix-Milliseconds: ";
532//!
533//! // We transform the static data into a BytesView on first use, via OnceLock.
534//! //
535//! // You are expected to reuse this variable as long as the context does not change.
536//! // For example, it is typically fine to share this across multiple network connections
537//! // because they all likely use the same memory configuration. However, writing to files
538//! // may require a different memory configuration for optimality, so you would need a different
539//! // `BytesView` for that. Such details will typically be documented in the API documentation
540//! // of the type that consumes the `BytesView` (e.g. a network connection or a file writer).
541//! let header_prefix = OnceLock::<BytesView>::new();
542//!
543//! for _ in 0..10 {
544//!     let mut connection = Connection::accept();
545//!
546//!     // The static data is transformed into a BytesView on first use,
547//!     // using memory optimally configured for a network connection.
548//!     let header_prefix = header_prefix
549//!         .get_or_init(|| BytesView::copied_from_slice(HEADER_PREFIX, &connection.memory()));
550//!
551//!     // Now we can use the `header_prefix` BytesView in the connection logic.
552//!     // Cloning a BytesView is a cheap zero-copy operation.
553//!     connection.write(header_prefix.clone());
554//! }
555//! # struct Connection;
556//! # impl Connection {
557//! #     fn accept() -> Self { Connection }
558//! #     fn memory(&self) -> impl bytesbuf::Memory { bytesbuf::GlobalPool::new() }
559//! #     fn write(&self, _sequence: BytesView) {}
560//! # }
561//! ```
562//!
563//! Different usages (e.g. file vs network) may require differently configured memory for optimal
564//! performance, so you may need a different `BytesView` if the same static data is to be used
565//! in different contexts.
566//!
567//! # Testing
568//!
569//! For testing purposes, this crate exposes some special-purpose memory providers that are not
570//! optimized for real-world usage but may be useful to test corner cases of byte sequence
571//! processing in your code:
572//!
573//! * [`TransparentTestMemory`] - a memory provider that does not add any value, just uses memory
574//!   from the Rust global allocator.
575//! * [`FixedBlockTestMemory`] - a variation of the transparent memory provider that limits
576//!   each consecutive memory block to a fixed size. This is useful for testing scenarios where
577//!   you want to ensure that your code works well even if a byte sequence consists of
578//!   non-consecutive memory. You can go down to as low as 1 byte per block!
579//!
580//! [13]: BytesBuf::reserve
581//! [14]: Memory::reserve
582//! [16]: BytesView::into_bytes
583//! [17]: https://docs.rs/bytes/latest/bytes/buf/trait.Buf.html
584//! [18]: https://docs.rs/bytes/latest/bytes/struct.Bytes.html
585//! [20]: https://docs.rs/bytes/latest/bytes/buf/trait.BufMut.html
586//! [21]: https://docs.rs/bytes/latest/bytes/buf/trait.Buf.html#method.chunk
587//! [22]: https://docs.rs/bytes/latest/bytes/buf/trait.Buf.html#method.advance
588//! [23]: https://docs.rs/bytes/latest/bytes/buf/trait.Buf.html#method.chunks_vectored
589//! [24]: https://docs.rs/bytes/latest/bytes/buf/trait.BufMut.html#method.chunk_mut
590//! [25]: HasMemory::memory
591//! [26]: https://docs.rs/bytes/latest/bytes/buf/trait.BufMut.html#method.remaining_mut
592//! [27]: std::sync::OnceLock
593
594#![doc(html_logo_url = "https://media.githubusercontent.com/media/microsoft/oxidizer/refs/heads/main/crates/bytesbuf/logo.png")]
595#![doc(html_favicon_url = "https://media.githubusercontent.com/media/microsoft/oxidizer/refs/heads/main/crates/bytesbuf/favicon.ico")]
596
597mod block;
598mod block_ref;
599mod buf;
600mod bytes;
601mod callback_memory;
602mod constants;
603mod fixed_block;
604mod global;
605mod has_memory;
606mod memory;
607mod memory_guard;
608mod memory_shared;
609mod opaque_memory;
610mod slice;
611mod span;
612mod span_builder;
613mod transparent;
614mod vec;
615mod view;
616mod write_adapter;
617
618pub use block::*;
619pub use block_ref::*;
620pub use buf::*;
621pub use callback_memory::*;
622pub use constants::*;
623pub use fixed_block::*;
624pub use global::*;
625pub use has_memory::*;
626pub use memory::*;
627pub use memory_guard::*;
628pub use memory_shared::*;
629pub use opaque_memory::*;
630pub(crate) use span::*;
631pub(crate) use span_builder::*;
632pub use transparent::*;
633pub use view::*;
634pub(crate) use write_adapter::*;
635
636#[cfg(test)]
637mod testing;
638
639pub(crate) mod std_alloc_block;