block_buffer/
lib.rs

1//! Fixed size buffer for block processing of data.
2//!
3//! # Examples
4//! ```
5//! use block_buffer::{EagerBuffer, array::typenum::U4};
6//!
7//! let mut buf = EagerBuffer::<U4>::default();
8//!
9//! let mut accum = Vec::new();
10//! let msg1: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
11//! let msg2: &[u8] = &[10, 11, 12];
12//!
13//! buf.digest_blocks(msg1, |blocks| accum.extend_from_slice(blocks));
14//! buf.digest_blocks(msg2, |blocks| accum.extend_from_slice(blocks));
15//!
16//! assert_eq!(accum.len(), 3);
17//! assert_eq!(accum[0], [0, 1, 2, 3]);
18//! assert_eq!(accum[1], [4, 5, 6, 7]);
19//! assert_eq!(accum[2], [8, 9, 10, 11]);
20//!
21//! let padded_block = buf.pad_with_zeros();
22//! assert_eq!(padded_block, [12, 0, 0, 0]);
23//! ```
24//!
25//! Note that block size used with buffers MUST be bigger than zero and smaller than 256.
26//! You will get a compilation error with an invalid block size:
27//!
28//! ```compile_fail
29//! use block_buffer::{EagerBuffer, array::typenum::U0};
30//! let buf = EagerBuffer::<U0>::default();
31//! ```
32//! ```compile_fail
33//! use block_buffer::{EagerBuffer, array::typenum::U256};
34//! let buf = EagerBuffer::<U256>::default();
35//! ```
36#![no_std]
37#![doc(
38    html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
39    html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg"
40)]
41#![warn(missing_docs)]
42
43pub use hybrid_array as array;
44
45use array::{Array, ArraySize, typenum::Sum};
46use core::{fmt, mem::MaybeUninit, ptr, slice};
47
48#[cfg(feature = "zeroize")]
49use zeroize::{Zeroize, ZeroizeOnDrop};
50
51mod read;
52mod sealed;
53
54pub use read::ReadBuffer;
55
56/// Trait for buffer kinds.
57pub trait BufferKind: sealed::Sealed {}
58
59/// Eager block buffer kind, which guarantees that buffer position
60/// always lies in the range of `0..BlockSize`.
61#[derive(Copy, Clone, Debug, Default)]
62pub struct Eager {}
63
64/// Lazy block buffer kind, which guarantees that buffer position
65/// always lies in the range of `0..=BlockSize`.
66#[derive(Copy, Clone, Debug, Default)]
67pub struct Lazy {}
68
69impl BufferKind for Eager {}
70
71impl BufferKind for Lazy {}
72
73/// Eager block buffer.
74pub type EagerBuffer<B> = BlockBuffer<B, Eager>;
75/// Lazy block buffer.
76pub type LazyBuffer<B> = BlockBuffer<B, Lazy>;
77
78/// Block buffer error.
79#[derive(Copy, Clone, Eq, PartialEq, Debug)]
80pub struct Error;
81
82impl fmt::Display for Error {
83    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
84        f.write_str("Block buffer error")
85    }
86}
87
88/// Buffer for block processing of data.
89pub struct BlockBuffer<BS: ArraySize, K: BufferKind> {
90    buffer: MaybeUninit<Array<u8, BS>>,
91    pos: K::Pos,
92}
93
94impl<BS: ArraySize, K: BufferKind> BlockBuffer<BS, K> {
95    /// This associated constant is used to assert block size correctness at compile time.
96    const BLOCK_SIZE_ASSERT: bool = {
97        if BS::USIZE == 0 {
98            panic!("Block size can not be equal to zero!");
99        }
100        if BS::USIZE > 255 {
101            panic!("Block size can not be bigger than 255!");
102        }
103        true
104    };
105}
106
107impl<BS: ArraySize, K: BufferKind> Default for BlockBuffer<BS, K> {
108    #[inline]
109    fn default() -> Self {
110        assert!(Self::BLOCK_SIZE_ASSERT);
111        let mut buffer = MaybeUninit::uninit();
112        let mut pos = Default::default();
113        K::set_pos(&mut buffer, &mut pos, 0);
114        Self { buffer, pos }
115    }
116}
117
118impl<BS: ArraySize, K: BufferKind> Clone for BlockBuffer<BS, K> {
119    #[inline]
120    fn clone(&self) -> Self {
121        // SAFETY: `BlockBuffer` does not implement `Drop` (i.e. it could be a `Copy` type),
122        // so we can safely clone it using `ptr::read`.
123        unsafe { ptr::read(self) }
124    }
125}
126
127impl<BS: ArraySize, K: BufferKind> fmt::Debug for BlockBuffer<BS, K> {
128    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
129        f.debug_struct(K::NAME)
130            .field("pos", &self.get_pos())
131            .field("block_size", &BS::USIZE)
132            .field("data", &self.get_data())
133            .finish()
134    }
135}
136
137impl<BS: ArraySize, K: BufferKind> BlockBuffer<BS, K> {
138    /// Create new buffer from slice.
139    ///
140    /// # Panics
141    /// If slice length is not valid for used buffer kind.
142    #[inline(always)]
143    pub fn new(buf: &[u8]) -> Self {
144        Self::try_new(buf).unwrap()
145    }
146
147    /// Create new buffer from slice.
148    ///
149    /// Returns an error if slice length is not valid for used buffer kind.
150    #[inline(always)]
151    pub fn try_new(buf: &[u8]) -> Result<Self, Error> {
152        assert!(Self::BLOCK_SIZE_ASSERT);
153        if !K::invariant(buf.len(), BS::USIZE) {
154            return Err(Error);
155        }
156        let mut res = Self::default();
157        // SAFETY: we have checked that buffer length satisfies the buffer kind invariant
158        unsafe {
159            res.set_data_unchecked(buf);
160        }
161        Ok(res)
162    }
163
164    /// Digest data in `input` in blocks of size `BlockSize` using
165    /// the `compress` function, which accepts slice of blocks.
166    #[inline]
167    pub fn digest_blocks(&mut self, mut input: &[u8], mut compress: impl FnMut(&[Array<u8, BS>])) {
168        let pos = self.get_pos();
169        // using `self.remaining()` for some reason
170        // prevents panic elimination
171        let rem = self.size() - pos;
172        let n = input.len();
173        // Note that checking condition `pos + n < BlockSize` is
174        // equivalent to checking `n < rem`, where `rem` is equal
175        // to `BlockSize - pos`. Using the latter allows us to work
176        // around compiler accounting for possible overflow of
177        // `pos + n` which results in it inserting unreachable
178        // panic branches. Using `unreachable_unchecked` in `get_pos`
179        // we convince compiler that `BlockSize - pos` never underflows.
180        if K::invariant(n, rem) {
181            // SAFETY: we have checked that length of `input` is smaller than
182            // number of remaining bytes in `buffer`, so we can safely write data
183            // into them and update cursor position.
184            unsafe {
185                let buf_ptr = self.buffer.as_mut_ptr().cast::<u8>().add(pos);
186                ptr::copy_nonoverlapping(input.as_ptr(), buf_ptr, input.len());
187                self.set_pos_unchecked(pos + input.len());
188            }
189            return;
190        }
191        if pos != 0 {
192            let (left, right) = input.split_at(rem);
193            input = right;
194            // SAFETY: length of `left` is equal to number of remaining bytes in `buffer`,
195            // so we can copy data into it and process `buffer` as fully initialized block.
196            let block = unsafe {
197                let buf_ptr = self.buffer.as_mut_ptr().cast::<u8>().add(pos);
198                ptr::copy_nonoverlapping(left.as_ptr(), buf_ptr, left.len());
199                self.buffer.assume_init_ref()
200            };
201            compress(slice::from_ref(block));
202        }
203
204        let (blocks, leftover) = K::split_blocks(input);
205        if !blocks.is_empty() {
206            compress(blocks);
207        }
208
209        // SAFETY: `leftover` is always smaller than block size,
210        // so it satisfies the method's safety requirements for all buffer kinds
211        unsafe {
212            self.set_data_unchecked(leftover);
213        }
214    }
215
216    /// Reset buffer by setting cursor position to zero.
217    #[inline(always)]
218    pub fn reset(&mut self) {
219        // SAFETY: 0 is always valid position
220        unsafe {
221            self.set_pos_unchecked(0);
222        }
223    }
224
225    /// Pad remaining data with zeros and return resulting block.
226    #[inline(always)]
227    pub fn pad_with_zeros(&mut self) -> Array<u8, BS> {
228        let mut res = Array::<u8, BS>::default();
229        let data = self.get_data();
230        res[..data.len()].copy_from_slice(data);
231        self.reset();
232        res
233    }
234
235    /// Return current cursor position.
236    #[inline(always)]
237    pub fn get_pos(&self) -> usize {
238        let pos = K::get_pos(&self.buffer, &self.pos);
239        if !K::invariant(pos, BS::USIZE) {
240            debug_assert!(false);
241            // SAFETY: `pos` never breaks the invariant
242            unsafe {
243                core::hint::unreachable_unchecked();
244            }
245        }
246        pos
247    }
248
249    /// Return slice of data stored inside the buffer.
250    #[inline(always)]
251    pub fn get_data(&self) -> &[u8] {
252        // SAFETY: the `buffer` field is properly initialized up to `self.get_pos()`.
253        // `get_pos` never returns position bigger than buffer size.
254        unsafe { slice::from_raw_parts(self.buffer.as_ptr().cast(), self.get_pos()) }
255    }
256
257    /// Set buffer content and cursor position.
258    ///
259    /// # Panics
260    /// If `pos` is bigger or equal to block size.
261    #[inline]
262    pub fn set(&mut self, buf: Array<u8, BS>, pos: usize) {
263        assert!(K::invariant(pos, BS::USIZE));
264        self.buffer = MaybeUninit::new(buf);
265        // SAFETY: we have asserted that `pos` satisfies the invariant and
266        // the `buffer` field is fully initialized
267        unsafe {
268            self.set_pos_unchecked(pos);
269        }
270    }
271
272    /// Return size of the internal buffer in bytes.
273    #[inline(always)]
274    pub fn size(&self) -> usize {
275        BS::USIZE
276    }
277
278    /// Return number of remaining bytes in the internal buffer.
279    #[inline(always)]
280    pub fn remaining(&self) -> usize {
281        self.size() - self.get_pos()
282    }
283
284    /// Set buffer position.
285    ///
286    /// # Safety
287    /// Bytes in the range of `0..pos` in the `buffer` field must be properly initialized.
288    ///
289    /// `pos` must satisfy invariant of buffer kind, i.e. for eager hashes it must be
290    /// strictly smaller than block size and for lazy hashes it must be smaller or equal
291    /// to block size.
292    #[inline(always)]
293    unsafe fn set_pos_unchecked(&mut self, pos: usize) {
294        debug_assert!(K::invariant(pos, BS::USIZE));
295        K::set_pos(&mut self.buffer, &mut self.pos, pos)
296    }
297
298    /// Set buffer data.
299    ///
300    /// # Safety
301    /// Length of `buf` must satisfy invariant of buffer kind, i.e. for eager hashes it must be
302    /// strictly smaller than block size and for lazy hashes it must be smaller or equal
303    /// to block size.
304    #[inline(always)]
305    unsafe fn set_data_unchecked(&mut self, buf: &[u8]) {
306        unsafe {
307            self.set_pos_unchecked(buf.len());
308            let dst_ptr: *mut u8 = self.buffer.as_mut_ptr().cast();
309            ptr::copy_nonoverlapping(buf.as_ptr(), dst_ptr, buf.len());
310        }
311    }
312}
313
314/// Size of serialized `BlockBuffer` in bytes.
315pub type SerializedBufferSize<BS, K> = Sum<BS, <K as sealed::Sealed>::Overhead>;
316/// `BlockBuffer` serialized as a byte array.
317pub type SerializedBuffer<BS, K> = Array<u8, SerializedBufferSize<BS, K>>;
318
319impl<BS: ArraySize, K: BufferKind> BlockBuffer<BS, K>
320where
321    BS: core::ops::Add<K::Overhead>,
322    Sum<BS, K::Overhead>: ArraySize,
323{
324    /// Serialize buffer into a byte array.
325    pub fn serialize(&self) -> SerializedBuffer<BS, K> {
326        let mut buf = SerializedBuffer::<BS, K>::default();
327        let data = self.get_data();
328        let (pos, block) = buf.split_at_mut(1);
329        pos[0] = u8::try_from(data.len()).expect("buffer size is smaller than 256");
330        block[..data.len()].copy_from_slice(data);
331        buf
332    }
333
334    /// Deserialize buffer from a byte array.
335    pub fn deserialize(buf: &SerializedBuffer<BS, K>) -> Result<Self, Error> {
336        let (pos, block) = buf.split_at(1);
337        let pos = usize::from(pos[0]);
338
339        if !<K as sealed::Sealed>::invariant(pos, BS::USIZE) {
340            return Err(Error);
341        }
342
343        let (data, tail) = block.split_at(pos);
344
345        if tail.iter().any(|&b| b != 0) {
346            return Err(Error);
347        }
348
349        let mut res = Self::default();
350        unsafe { res.set_data_unchecked(data) };
351        Ok(res)
352    }
353}
354
355impl<BS: ArraySize> BlockBuffer<BS, Eager> {
356    /// Compress remaining data after padding it with `delim`, zeros and
357    /// the `suffix` bytes. If there is not enough unused space, `compress`
358    /// will be called twice.
359    ///
360    /// # Panics
361    /// If suffix length is bigger than block size.
362    #[inline(always)]
363    pub fn digest_pad(
364        &mut self,
365        delim: u8,
366        suffix: &[u8],
367        mut compress: impl FnMut(&Array<u8, BS>),
368    ) {
369        if suffix.len() > BS::USIZE {
370            panic!("suffix is too long");
371        }
372        let pos = self.get_pos();
373        let mut buf = self.pad_with_zeros();
374        buf[pos] = delim;
375
376        let n = self.size() - suffix.len();
377        if self.size() - pos - 1 < suffix.len() {
378            compress(&buf);
379            buf.fill(0);
380            buf[n..].copy_from_slice(suffix);
381            compress(&buf);
382        } else {
383            buf[n..].copy_from_slice(suffix);
384            compress(&buf);
385        }
386        self.reset();
387    }
388
389    /// Pad message with 0x80, zeros and 64-bit message length using
390    /// big-endian byte order.
391    #[inline]
392    pub fn len64_padding_be(&mut self, data_len: u64, compress: impl FnMut(&Array<u8, BS>)) {
393        self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
394    }
395
396    /// Pad message with 0x80, zeros and 64-bit message length using
397    /// little-endian byte order.
398    #[inline]
399    pub fn len64_padding_le(&mut self, data_len: u64, compress: impl FnMut(&Array<u8, BS>)) {
400        self.digest_pad(0x80, &data_len.to_le_bytes(), compress);
401    }
402
403    /// Pad message with 0x80, zeros and 128-bit message length using
404    /// big-endian byte order.
405    #[inline]
406    pub fn len128_padding_be(&mut self, data_len: u128, compress: impl FnMut(&Array<u8, BS>)) {
407        self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
408    }
409}
410
411#[cfg(feature = "zeroize")]
412impl<BS: ArraySize, K: BufferKind> Zeroize for BlockBuffer<BS, K> {
413    #[inline]
414    fn zeroize(&mut self) {
415        self.buffer.zeroize();
416        self.pos.zeroize();
417    }
418}
419
420impl<BS: ArraySize, K: BufferKind> Drop for BlockBuffer<BS, K> {
421    #[inline]
422    fn drop(&mut self) {
423        #[cfg(feature = "zeroize")]
424        self.zeroize();
425    }
426}
427
428#[cfg(feature = "zeroize")]
429impl<BS: ArraySize, K: BufferKind> ZeroizeOnDrop for BlockBuffer<BS, K> {}