pub struct BlockBuffer<BS: BlockSizes, K: BufferKind> { /* private fields */ }
Expand description

Buffer for block processing of data.

Implementations§

Create new buffer from slice.

Panics

If slice length is not valid for used buffer kind.

Create new buffer from slice.

Returns an error if slice length is not valid for used buffer kind.

Examples found in repository?
src/lib.rs (line 97)
96
97
98
    pub fn new(buf: &[u8]) -> Self {
        Self::try_new(buf).unwrap()
    }

Digest data in input in blocks of size BlockSize using the compress function, which accepts slice of blocks.

Reset buffer by setting cursor position to zero.

Pad remaining data with zeros and return resulting block.

Return current cursor position.

Examples found in repository?
src/lib.rs (line 119)
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
    pub fn digest_blocks(&mut self, mut input: &[u8], mut compress: impl FnMut(&[Block<Self>])) {
        let pos = self.get_pos();
        // using `self.remaining()` for some reason
        // prevents panic elimination
        let rem = self.size() - pos;
        let n = input.len();
        // Note that checking condition `pos + n < BlockSize` is
        // equivalent to checking `n < rem`, where `rem` is equal
        // to `BlockSize - pos`. Using the latter allows us to work
        // around compiler accounting for possible overflow of
        // `pos + n` which results in it inserting unreachable
        // panic branches. Using `unreachable_unchecked` in `get_pos`
        // we convince compiler that `BlockSize - pos` never underflows.
        if K::invariant(n, rem) {
            // double slicing allows to remove panic branches
            self.buffer[pos..][..n].copy_from_slice(input);
            self.set_pos_unchecked(pos + n);
            return;
        }
        if pos != 0 {
            let (left, right) = input.split_at(rem);
            input = right;
            self.buffer[pos..].copy_from_slice(left);
            compress(slice::from_ref(&self.buffer));
        }

        let (blocks, leftover) = K::split_blocks(input);
        if !blocks.is_empty() {
            compress(blocks);
        }

        let n = leftover.len();
        self.buffer[..n].copy_from_slice(leftover);
        self.set_pos_unchecked(n);
    }

    /// Reset buffer by setting cursor position to zero.
    #[inline(always)]
    pub fn reset(&mut self) {
        self.set_pos_unchecked(0);
    }

    /// Pad remaining data with zeros and return resulting block.
    #[inline(always)]
    pub fn pad_with_zeros(&mut self) -> Block<Self> {
        let pos = self.get_pos();
        let mut res = self.buffer.clone();
        res[pos..].iter_mut().for_each(|b| *b = 0);
        self.set_pos_unchecked(0);
        res
    }

    /// Return current cursor position.
    #[inline(always)]
    pub fn get_pos(&self) -> usize {
        let pos = K::get_pos(&self.buffer, &self.pos);
        if !K::invariant(pos, BS::USIZE) {
            debug_assert!(false);
            // SAFETY: `pos` never breaks the invariant
            unsafe {
                core::hint::unreachable_unchecked();
            }
        }
        pos
    }

    /// Return slice of data stored inside the buffer.
    #[inline(always)]
    pub fn get_data(&self) -> &[u8] {
        &self.buffer[..self.get_pos()]
    }

    /// Set buffer content and cursor position.
    ///
    /// # Panics
    /// If `pos` is bigger or equal to block size.
    #[inline]
    pub fn set(&mut self, buf: Block<Self>, pos: usize) {
        assert!(K::invariant(pos, BS::USIZE));
        self.buffer = buf;
        self.set_pos_unchecked(pos);
    }

    /// Return size of the internal buffer in bytes.
    #[inline(always)]
    pub fn size(&self) -> usize {
        BS::USIZE
    }

    /// Return number of remaining bytes in the internal buffer.
    #[inline(always)]
    pub fn remaining(&self) -> usize {
        self.size() - self.get_pos()
    }

    #[inline(always)]
    fn set_pos_unchecked(&mut self, pos: usize) {
        debug_assert!(K::invariant(pos, BS::USIZE));
        K::set_pos(&mut self.buffer, &mut self.pos, pos)
    }
}

impl<BS: BlockSizes> BlockBuffer<BS, Eager> {
    /// Compress remaining data after padding it with `delim`, zeros and
    /// the `suffix` bytes. If there is not enough unused space, `compress`
    /// will be called twice.
    ///
    /// # Panics
    /// If suffix length is bigger than block size.
    #[inline(always)]
    pub fn digest_pad(&mut self, delim: u8, suffix: &[u8], mut compress: impl FnMut(&Block<Self>)) {
        if suffix.len() > BS::USIZE {
            panic!("suffix is too long");
        }
        let pos = self.get_pos();
        self.buffer[pos] = delim;
        for b in &mut self.buffer[pos + 1..] {
            *b = 0;
        }

        let n = self.size() - suffix.len();
        if self.size() - pos - 1 < suffix.len() {
            compress(&self.buffer);
            let mut block = Block::<Self>::default();
            block[n..].copy_from_slice(suffix);
            compress(&block);
        } else {
            self.buffer[n..].copy_from_slice(suffix);
            compress(&self.buffer);
        }
        self.set_pos_unchecked(0)
    }

    /// Pad message with 0x80, zeros and 64-bit message length using
    /// big-endian byte order.
    #[inline]
    pub fn len64_padding_be(&mut self, data_len: u64, compress: impl FnMut(&Block<Self>)) {
        self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
    }

    /// Pad message with 0x80, zeros and 64-bit message length using
    /// little-endian byte order.
    #[inline]
    pub fn len64_padding_le(&mut self, data_len: u64, compress: impl FnMut(&Block<Self>)) {
        self.digest_pad(0x80, &data_len.to_le_bytes(), compress);
    }

    /// Pad message with 0x80, zeros and 128-bit message length using
    /// big-endian byte order.
    #[inline]
    pub fn len128_padding_be(&mut self, data_len: u128, compress: impl FnMut(&Block<Self>)) {
        self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
    }

    /// Serialize buffer into a byte array.
    #[inline]
    pub fn serialize(&self) -> Block<Self> {
        let mut res = self.buffer.clone();
        let pos = self.get_pos();
        // zeroize "garbage" data
        for b in res[pos..BS::USIZE - 1].iter_mut() {
            *b = 0;
        }
        res
    }

Return slice of data stored inside the buffer.

Examples found in repository?
src/lib.rs (line 311)
304
305
306
307
308
309
310
311
312
313
314
    pub fn serialize(&self) -> BlockP1<BS>
    where
        BS: Add<B1>,
        Add1<BS>: ArrayLength<u8>,
    {
        let mut res = BlockP1::<BS>::default();
        res[0] = self.pos;
        let data = self.get_data();
        res[1..][..data.len()].copy_from_slice(data);
        res
    }

Set buffer content and cursor position.

Panics

If pos is bigger or equal to block size.

Return size of the internal buffer in bytes.

Examples found in repository?
src/lib.rs (line 122)
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
    pub fn digest_blocks(&mut self, mut input: &[u8], mut compress: impl FnMut(&[Block<Self>])) {
        let pos = self.get_pos();
        // using `self.remaining()` for some reason
        // prevents panic elimination
        let rem = self.size() - pos;
        let n = input.len();
        // Note that checking condition `pos + n < BlockSize` is
        // equivalent to checking `n < rem`, where `rem` is equal
        // to `BlockSize - pos`. Using the latter allows us to work
        // around compiler accounting for possible overflow of
        // `pos + n` which results in it inserting unreachable
        // panic branches. Using `unreachable_unchecked` in `get_pos`
        // we convince compiler that `BlockSize - pos` never underflows.
        if K::invariant(n, rem) {
            // double slicing allows to remove panic branches
            self.buffer[pos..][..n].copy_from_slice(input);
            self.set_pos_unchecked(pos + n);
            return;
        }
        if pos != 0 {
            let (left, right) = input.split_at(rem);
            input = right;
            self.buffer[pos..].copy_from_slice(left);
            compress(slice::from_ref(&self.buffer));
        }

        let (blocks, leftover) = K::split_blocks(input);
        if !blocks.is_empty() {
            compress(blocks);
        }

        let n = leftover.len();
        self.buffer[..n].copy_from_slice(leftover);
        self.set_pos_unchecked(n);
    }

    /// Reset buffer by setting cursor position to zero.
    #[inline(always)]
    pub fn reset(&mut self) {
        self.set_pos_unchecked(0);
    }

    /// Pad remaining data with zeros and return resulting block.
    #[inline(always)]
    pub fn pad_with_zeros(&mut self) -> Block<Self> {
        let pos = self.get_pos();
        let mut res = self.buffer.clone();
        res[pos..].iter_mut().for_each(|b| *b = 0);
        self.set_pos_unchecked(0);
        res
    }

    /// Return current cursor position.
    #[inline(always)]
    pub fn get_pos(&self) -> usize {
        let pos = K::get_pos(&self.buffer, &self.pos);
        if !K::invariant(pos, BS::USIZE) {
            debug_assert!(false);
            // SAFETY: `pos` never breaks the invariant
            unsafe {
                core::hint::unreachable_unchecked();
            }
        }
        pos
    }

    /// Return slice of data stored inside the buffer.
    #[inline(always)]
    pub fn get_data(&self) -> &[u8] {
        &self.buffer[..self.get_pos()]
    }

    /// Set buffer content and cursor position.
    ///
    /// # Panics
    /// If `pos` is bigger or equal to block size.
    #[inline]
    pub fn set(&mut self, buf: Block<Self>, pos: usize) {
        assert!(K::invariant(pos, BS::USIZE));
        self.buffer = buf;
        self.set_pos_unchecked(pos);
    }

    /// Return size of the internal buffer in bytes.
    #[inline(always)]
    pub fn size(&self) -> usize {
        BS::USIZE
    }

    /// Return number of remaining bytes in the internal buffer.
    #[inline(always)]
    pub fn remaining(&self) -> usize {
        self.size() - self.get_pos()
    }

    #[inline(always)]
    fn set_pos_unchecked(&mut self, pos: usize) {
        debug_assert!(K::invariant(pos, BS::USIZE));
        K::set_pos(&mut self.buffer, &mut self.pos, pos)
    }
}

impl<BS: BlockSizes> BlockBuffer<BS, Eager> {
    /// Compress remaining data after padding it with `delim`, zeros and
    /// the `suffix` bytes. If there is not enough unused space, `compress`
    /// will be called twice.
    ///
    /// # Panics
    /// If suffix length is bigger than block size.
    #[inline(always)]
    pub fn digest_pad(&mut self, delim: u8, suffix: &[u8], mut compress: impl FnMut(&Block<Self>)) {
        if suffix.len() > BS::USIZE {
            panic!("suffix is too long");
        }
        let pos = self.get_pos();
        self.buffer[pos] = delim;
        for b in &mut self.buffer[pos + 1..] {
            *b = 0;
        }

        let n = self.size() - suffix.len();
        if self.size() - pos - 1 < suffix.len() {
            compress(&self.buffer);
            let mut block = Block::<Self>::default();
            block[n..].copy_from_slice(suffix);
            compress(&block);
        } else {
            self.buffer[n..].copy_from_slice(suffix);
            compress(&self.buffer);
        }
        self.set_pos_unchecked(0)
    }

Return number of remaining bytes in the internal buffer.

Compress remaining data after padding it with delim, zeros and the suffix bytes. If there is not enough unused space, compress will be called twice.

Panics

If suffix length is bigger than block size.

Examples found in repository?
src/lib.rs (line 255)
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
    pub fn len64_padding_be(&mut self, data_len: u64, compress: impl FnMut(&Block<Self>)) {
        self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
    }

    /// Pad message with 0x80, zeros and 64-bit message length using
    /// little-endian byte order.
    #[inline]
    pub fn len64_padding_le(&mut self, data_len: u64, compress: impl FnMut(&Block<Self>)) {
        self.digest_pad(0x80, &data_len.to_le_bytes(), compress);
    }

    /// Pad message with 0x80, zeros and 128-bit message length using
    /// big-endian byte order.
    #[inline]
    pub fn len128_padding_be(&mut self, data_len: u128, compress: impl FnMut(&Block<Self>)) {
        self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
    }

Pad message with 0x80, zeros and 64-bit message length using big-endian byte order.

Pad message with 0x80, zeros and 64-bit message length using little-endian byte order.

Pad message with 0x80, zeros and 128-bit message length using big-endian byte order.

Serialize buffer into a byte array.

Deserialize buffer from a byte array.

Serialize buffer into a byte array.

Deserialize buffer from a byte array.

Trait Implementations§

Size of the block in bytes.
Return block size in bytes.
Returns a copy of the value. Read more
Performs copy-assignment from source. Read more
Formats the value using the given formatter. Read more
Returns the “default value” for a type. Read more

Auto Trait Implementations§

Blanket Implementations§

Gets the TypeId of self. Read more
Immutably borrows from an owned value. Read more
Mutably borrows from an owned value. Read more

Returns the argument unchanged.

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Should always be Self
The type returned in the event of a conversion error.
Performs the conversion.
The type returned in the event of a conversion error.
Performs the conversion.