iroh_blake3/lib.rs
1//! The official Rust implementation of the [BLAKE3] cryptographic hash
2//! function.
3//!
4//! # Examples
5//!
6//! ```
7//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
8//! // Hash an input all at once.
9//! let hash1 = iroh_blake3::hash(b"foobarbaz");
10//!
11//! // Hash an input incrementally.
12//! let mut hasher = iroh_blake3::Hasher::new();
13//! hasher.update(b"foo");
14//! hasher.update(b"bar");
15//! hasher.update(b"baz");
16//! let hash2 = hasher.finalize();
17//! assert_eq!(hash1, hash2);
18//!
19//! // Extended output. OutputReader also implements Read and Seek.
20//! # #[cfg(feature = "std")] {
21//! let mut output = [0; 1000];
22//! let mut output_reader = hasher.finalize_xof();
23//! output_reader.fill(&mut output);
24//! assert_eq!(hash1, output[..32]);
25//! # }
26//!
27//! // Print a hash as hex.
28//! println!("{}", hash1);
29//! # Ok(())
30//! # }
31//! ```
32//!
33//! # Cargo Features
34//!
35//! The `std` feature (the only feature enabled by default) is required for
36//! implementations of the [`Write`] and [`Seek`] traits, and also for runtime
37//! CPU feature detection on x86. If this feature is disabled, the only way to
38//! use the x86 SIMD implementations is to enable the corresponding instruction
39//! sets globally, with e.g. `RUSTFLAGS="-C target-cpu=native"`. The resulting
40//! binary will not be portable to other machines.
41//!
42//! The `rayon` feature (disabled by default, but enabled for [docs.rs]) adds
43//! the [`Hasher::update_rayon`] method, for multithreaded hashing. However,
44//! even if this feature is enabled, all other APIs remain single-threaded.
45//!
46//! The NEON implementation is enabled by default for AArch64 but requires the
47//! `neon` feature for other ARM targets. Not all ARMv7 CPUs support NEON, and
48//! enabling this feature will produce a binary that's not portable to CPUs
49//! without NEON support.
50//!
51//! The `traits-preview` feature enables implementations of traits from the
52//! RustCrypto [`digest`] crate, and re-exports that crate as
53//! `traits::digest`. However, the traits aren't stable, and they're expected to
54//! change in incompatible ways before that crate reaches 1.0. For that reason,
55//! this crate makes no SemVer guarantees for this feature, and callers who use
56//! it should expect breaking changes between patch versions. (The "-preview"
57//! feature name follows the conventions of the RustCrypto [`signature`] crate.)
58//!
59//! [`Hasher::update_rayon`]: struct.Hasher.html#method.update_rayon
60//! [BLAKE3]: https://blake3.io
61//! [Rayon]: https://github.com/rayon-rs/rayon
62//! [docs.rs]: https://docs.rs/
63//! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
64//! [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
65//! [`digest`]: https://crates.io/crates/digest
66//! [`signature`]: https://crates.io/crates/signature
67
68#![cfg_attr(not(feature = "std"), no_std)]
69
70#[cfg(feature = "zeroize")]
71extern crate zeroize_crate as zeroize; // Needed because `zeroize::Zeroize` assumes the crate is named `zeroize`.
72
73#[cfg(test)]
74mod test;
75
76// The guts module is for incremental use cases like the `bao` crate that need
77// to explicitly compute chunk and parent chaining values. It is semi-stable
78// and likely to keep working, but largely undocumented and not intended for
79// widespread use.
80#[doc(hidden)]
81pub mod guts;
82
83/// Undocumented and unstable, for benchmarks only.
84#[doc(hidden)]
85pub mod platform;
86
87// Platform-specific implementations of the compression function. These
88// BLAKE3-specific cfg flags are set in build.rs.
89#[cfg(blake3_avx2_rust)]
90#[path = "rust_avx2.rs"]
91mod avx2;
92#[cfg(blake3_avx2_ffi)]
93#[path = "ffi_avx2.rs"]
94mod avx2;
95#[cfg(blake3_avx512_ffi)]
96#[path = "ffi_avx512.rs"]
97mod avx512;
98#[cfg(blake3_neon)]
99#[path = "ffi_neon.rs"]
100mod neon;
101mod portable;
102#[cfg(blake3_sse2_rust)]
103#[path = "rust_sse2.rs"]
104mod sse2;
105#[cfg(blake3_sse2_ffi)]
106#[path = "ffi_sse2.rs"]
107mod sse2;
108#[cfg(blake3_sse41_rust)]
109#[path = "rust_sse41.rs"]
110mod sse41;
111#[cfg(blake3_sse41_ffi)]
112#[path = "ffi_sse41.rs"]
113mod sse41;
114
115#[cfg(feature = "traits-preview")]
116pub mod traits;
117
118mod join;
119
120use arrayref::{array_mut_ref, array_ref};
121use arrayvec::{ArrayString, ArrayVec};
122use core::cmp;
123use core::fmt;
124use platform::{Platform, MAX_SIMD_DEGREE, MAX_SIMD_DEGREE_OR_2};
125
126/// The number of bytes in a [`Hash`](struct.Hash.html), 32.
127pub const OUT_LEN: usize = 32;
128
129/// The number of bytes in a key, 32.
130pub const KEY_LEN: usize = 32;
131
132const MAX_DEPTH: usize = 54; // 2^54 * CHUNK_LEN = 2^64
133use guts::{BLOCK_LEN, CHUNK_LEN};
134
135// While iterating the compression function within a chunk, the CV is
136// represented as words, to avoid doing two extra endianness conversions for
137// each compression in the portable implementation. But the hash_many interface
138// needs to hash both input bytes and parent nodes, so its better for its
139// output CVs to be represented as bytes.
140type CVWords = [u32; 8];
141type CVBytes = [u8; 32]; // little-endian
142
143const IV: &CVWords = &[
144 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
145];
146
147const MSG_SCHEDULE: [[usize; 16]; 7] = [
148 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
149 [2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8],
150 [3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1],
151 [10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6],
152 [12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4],
153 [9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7],
154 [11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13],
155];
156
157// These are the internal flags that we use to domain separate root/non-root,
158// chunk/parent, and chunk beginning/middle/end. These get set at the high end
159// of the block flags word in the compression function, so their values start
160// high and go down.
161const CHUNK_START: u8 = 1 << 0;
162const CHUNK_END: u8 = 1 << 1;
163const PARENT: u8 = 1 << 2;
164const ROOT: u8 = 1 << 3;
165const KEYED_HASH: u8 = 1 << 4;
166const DERIVE_KEY_CONTEXT: u8 = 1 << 5;
167const DERIVE_KEY_MATERIAL: u8 = 1 << 6;
168
169#[inline]
170fn counter_low(counter: u64) -> u32 {
171 counter as u32
172}
173
174#[inline]
175fn counter_high(counter: u64) -> u32 {
176 (counter >> 32) as u32
177}
178
179/// An output of the default size, 32 bytes, which provides constant-time
180/// equality checking.
181///
182/// `Hash` implements [`From`] and [`Into`] for `[u8; 32]`, and it provides
183/// [`from_bytes`] and [`as_bytes`] for explicit conversions between itself and
184/// `[u8; 32]`. However, byte arrays and slices don't provide constant-time
185/// equality checking, which is often a security requirement in software that
186/// handles private data. `Hash` doesn't implement [`Deref`] or [`AsRef`], to
187/// avoid situations where a type conversion happens implicitly and the
188/// constant-time property is accidentally lost.
189///
190/// `Hash` provides the [`to_hex`] and [`from_hex`] methods for converting to
191/// and from hexadecimal. It also implements [`Display`] and [`FromStr`].
192///
193/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
194/// [`Into`]: https://doc.rust-lang.org/std/convert/trait.Into.html
195/// [`as_bytes`]: #method.as_bytes
196/// [`from_bytes`]: #method.from_bytes
197/// [`Deref`]: https://doc.rust-lang.org/stable/std/ops/trait.Deref.html
198/// [`AsRef`]: https://doc.rust-lang.org/std/convert/trait.AsRef.html
199/// [`to_hex`]: #method.to_hex
200/// [`from_hex`]: #method.from_hex
201/// [`Display`]: https://doc.rust-lang.org/std/fmt/trait.Display.html
202/// [`FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html
203#[cfg_attr(feature = "zeroize", derive(zeroize::Zeroize))]
204#[derive(Clone, Copy, Hash)]
205pub struct Hash([u8; OUT_LEN]);
206
207impl Hash {
208 /// The raw bytes of the `Hash`. Note that byte arrays don't provide
209 /// constant-time equality checking, so if you need to compare hashes,
210 /// prefer the `Hash` type.
211 #[inline]
212 pub const fn as_bytes(&self) -> &[u8; OUT_LEN] {
213 &self.0
214 }
215
216 /// Create a `Hash` from its raw bytes representation.
217 pub const fn from_bytes(bytes: [u8; OUT_LEN]) -> Self {
218 Self(bytes)
219 }
220
221 /// Encode a `Hash` in lowercase hexadecimal.
222 ///
223 /// The returned [`ArrayString`] is a fixed size and doesn't allocate memory
224 /// on the heap. Note that [`ArrayString`] doesn't provide constant-time
225 /// equality checking, so if you need to compare hashes, prefer the `Hash`
226 /// type.
227 ///
228 /// [`ArrayString`]: https://docs.rs/arrayvec/0.5.1/arrayvec/struct.ArrayString.html
229 pub fn to_hex(&self) -> ArrayString<{ 2 * OUT_LEN }> {
230 let mut s = ArrayString::new();
231 let table = b"0123456789abcdef";
232 for &b in self.0.iter() {
233 s.push(table[(b >> 4) as usize] as char);
234 s.push(table[(b & 0xf) as usize] as char);
235 }
236 s
237 }
238
239 /// Decode a `Hash` from hexadecimal. Both uppercase and lowercase ASCII
240 /// bytes are supported.
241 ///
242 /// Any byte outside the ranges `'0'...'9'`, `'a'...'f'`, and `'A'...'F'`
243 /// results in an error. An input length other than 64 also results in an
244 /// error.
245 ///
246 /// Note that `Hash` also implements `FromStr`, so `Hash::from_hex("...")`
247 /// is equivalent to `"...".parse()`.
248 pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, HexError> {
249 fn hex_val(byte: u8) -> Result<u8, HexError> {
250 match byte {
251 b'A'..=b'F' => Ok(byte - b'A' + 10),
252 b'a'..=b'f' => Ok(byte - b'a' + 10),
253 b'0'..=b'9' => Ok(byte - b'0'),
254 _ => Err(HexError(HexErrorInner::InvalidByte(byte))),
255 }
256 }
257 let hex_bytes: &[u8] = hex.as_ref();
258 if hex_bytes.len() != OUT_LEN * 2 {
259 return Err(HexError(HexErrorInner::InvalidLen(hex_bytes.len())));
260 }
261 let mut hash_bytes: [u8; OUT_LEN] = [0; OUT_LEN];
262 for i in 0..OUT_LEN {
263 hash_bytes[i] = 16 * hex_val(hex_bytes[2 * i])? + hex_val(hex_bytes[2 * i + 1])?;
264 }
265 Ok(Hash::from(hash_bytes))
266 }
267}
268
269impl From<[u8; OUT_LEN]> for Hash {
270 #[inline]
271 fn from(bytes: [u8; OUT_LEN]) -> Self {
272 Self::from_bytes(bytes)
273 }
274}
275
276impl From<Hash> for [u8; OUT_LEN] {
277 #[inline]
278 fn from(hash: Hash) -> Self {
279 hash.0
280 }
281}
282
283impl core::str::FromStr for Hash {
284 type Err = HexError;
285
286 fn from_str(s: &str) -> Result<Self, Self::Err> {
287 Hash::from_hex(s)
288 }
289}
290
291/// This implementation is constant-time.
292impl PartialEq for Hash {
293 #[inline]
294 fn eq(&self, other: &Hash) -> bool {
295 constant_time_eq::constant_time_eq_32(&self.0, &other.0)
296 }
297}
298
299/// This implementation is constant-time.
300impl PartialEq<[u8; OUT_LEN]> for Hash {
301 #[inline]
302 fn eq(&self, other: &[u8; OUT_LEN]) -> bool {
303 constant_time_eq::constant_time_eq_32(&self.0, other)
304 }
305}
306
307/// This implementation is constant-time if the target is 32 bytes long.
308impl PartialEq<[u8]> for Hash {
309 #[inline]
310 fn eq(&self, other: &[u8]) -> bool {
311 constant_time_eq::constant_time_eq(&self.0, other)
312 }
313}
314
315impl Eq for Hash {}
316
317impl fmt::Display for Hash {
318 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
319 // Formatting field as `&str` to reduce code size since the `Debug`
320 // dynamic dispatch table for `&str` is likely needed elsewhere already,
321 // but that for `ArrayString<[u8; 64]>` is not.
322 let hex = self.to_hex();
323 let hex: &str = hex.as_str();
324
325 f.write_str(hex)
326 }
327}
328
329impl fmt::Debug for Hash {
330 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
331 // Formatting field as `&str` to reduce code size since the `Debug`
332 // dynamic dispatch table for `&str` is likely needed elsewhere already,
333 // but that for `ArrayString<[u8; 64]>` is not.
334 let hex = self.to_hex();
335 let hex: &str = hex.as_str();
336
337 f.debug_tuple("Hash").field(&hex).finish()
338 }
339}
340
341/// The error type for [`Hash::from_hex`].
342///
343/// The `.to_string()` representation of this error currently distinguishes between bad length
344/// errors and bad character errors. This is to help with logging and debugging, but it isn't a
345/// stable API detail, and it may change at any time.
346#[derive(Clone, Debug)]
347pub struct HexError(HexErrorInner);
348
349#[derive(Clone, Debug)]
350enum HexErrorInner {
351 InvalidByte(u8),
352 InvalidLen(usize),
353}
354
355impl fmt::Display for HexError {
356 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
357 match self.0 {
358 HexErrorInner::InvalidByte(byte) => {
359 if byte < 128 {
360 write!(f, "invalid hex character: {:?}", byte as char)
361 } else {
362 write!(f, "invalid hex character: 0x{:x}", byte)
363 }
364 }
365 HexErrorInner::InvalidLen(len) => {
366 write!(f, "expected 64 hex bytes, received {}", len)
367 }
368 }
369 }
370}
371
372#[cfg(feature = "std")]
373impl std::error::Error for HexError {}
374
375// Each chunk or parent node can produce either a 32-byte chaining value or, by
376// setting the ROOT flag, any number of final output bytes. The Output struct
377// captures the state just prior to choosing between those two possibilities.
378#[cfg_attr(feature = "zeroize", derive(zeroize::Zeroize))]
379#[derive(Clone)]
380struct Output {
381 input_chaining_value: CVWords,
382 block: [u8; 64],
383 block_len: u8,
384 counter: u64,
385 flags: u8,
386 #[cfg_attr(feature = "zeroize", zeroize(skip))]
387 platform: Platform,
388}
389
390impl Output {
391 fn chaining_value(&self) -> CVBytes {
392 let mut cv = self.input_chaining_value;
393 self.platform.compress_in_place(
394 &mut cv,
395 &self.block,
396 self.block_len,
397 self.counter,
398 self.flags,
399 );
400 platform::le_bytes_from_words_32(&cv)
401 }
402
403 fn root_hash(&self) -> Hash {
404 debug_assert_eq!(self.counter, 0);
405 let mut cv = self.input_chaining_value;
406 self.platform
407 .compress_in_place(&mut cv, &self.block, self.block_len, 0, self.flags | ROOT);
408 Hash(platform::le_bytes_from_words_32(&cv))
409 }
410
411 fn root_output_block(&self) -> [u8; 2 * OUT_LEN] {
412 self.platform.compress_xof(
413 &self.input_chaining_value,
414 &self.block,
415 self.block_len,
416 self.counter,
417 self.flags | ROOT,
418 )
419 }
420}
421
422#[derive(Clone)]
423#[cfg_attr(feature = "zeroize", derive(zeroize::Zeroize))]
424struct ChunkState {
425 cv: CVWords,
426 chunk_counter: u64,
427 buf: [u8; BLOCK_LEN],
428 buf_len: u8,
429 blocks_compressed: u8,
430 flags: u8,
431 #[cfg_attr(feature = "zeroize", zeroize(skip))]
432 platform: Platform,
433}
434
435impl ChunkState {
436 fn new(key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform) -> Self {
437 Self {
438 cv: *key,
439 chunk_counter,
440 buf: [0; BLOCK_LEN],
441 buf_len: 0,
442 blocks_compressed: 0,
443 flags,
444 platform,
445 }
446 }
447
448 fn len(&self) -> usize {
449 BLOCK_LEN * self.blocks_compressed as usize + self.buf_len as usize
450 }
451
452 fn fill_buf(&mut self, input: &mut &[u8]) {
453 let want = BLOCK_LEN - self.buf_len as usize;
454 let take = cmp::min(want, input.len());
455 self.buf[self.buf_len as usize..][..take].copy_from_slice(&input[..take]);
456 self.buf_len += take as u8;
457 *input = &input[take..];
458 }
459
460 fn start_flag(&self) -> u8 {
461 if self.blocks_compressed == 0 {
462 CHUNK_START
463 } else {
464 0
465 }
466 }
467
468 // Try to avoid buffering as much as possible, by compressing directly from
469 // the input slice when full blocks are available.
470 fn update(&mut self, mut input: &[u8]) -> &mut Self {
471 if self.buf_len > 0 {
472 self.fill_buf(&mut input);
473 if !input.is_empty() {
474 debug_assert_eq!(self.buf_len as usize, BLOCK_LEN);
475 let block_flags = self.flags | self.start_flag(); // borrowck
476 self.platform.compress_in_place(
477 &mut self.cv,
478 &self.buf,
479 BLOCK_LEN as u8,
480 self.chunk_counter,
481 block_flags,
482 );
483 self.buf_len = 0;
484 self.buf = [0; BLOCK_LEN];
485 self.blocks_compressed += 1;
486 }
487 }
488
489 while input.len() > BLOCK_LEN {
490 debug_assert_eq!(self.buf_len, 0);
491 let block_flags = self.flags | self.start_flag(); // borrowck
492 self.platform.compress_in_place(
493 &mut self.cv,
494 array_ref!(input, 0, BLOCK_LEN),
495 BLOCK_LEN as u8,
496 self.chunk_counter,
497 block_flags,
498 );
499 self.blocks_compressed += 1;
500 input = &input[BLOCK_LEN..];
501 }
502
503 self.fill_buf(&mut input);
504 debug_assert!(input.is_empty());
505 debug_assert!(self.len() <= CHUNK_LEN);
506 self
507 }
508
509 fn output(&self) -> Output {
510 let block_flags = self.flags | self.start_flag() | CHUNK_END;
511 Output {
512 input_chaining_value: self.cv,
513 block: self.buf,
514 block_len: self.buf_len,
515 counter: self.chunk_counter,
516 flags: block_flags,
517 platform: self.platform,
518 }
519 }
520}
521
522// Don't derive(Debug), because the state may be secret.
523impl fmt::Debug for ChunkState {
524 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
525 f.debug_struct("ChunkState")
526 .field("len", &self.len())
527 .field("chunk_counter", &self.chunk_counter)
528 .field("flags", &self.flags)
529 .field("platform", &self.platform)
530 .finish()
531 }
532}
533
534// IMPLEMENTATION NOTE
535// ===================
536// The recursive function compress_subtree_wide(), implemented below, is the
537// basis of high-performance BLAKE3. We use it both for all-at-once hashing,
538// and for the incremental input with Hasher (though we have to be careful with
539// subtree boundaries in the incremental case). compress_subtree_wide() applies
540// several optimizations at the same time:
541// - Multithreading with Rayon.
542// - Parallel chunk hashing with SIMD.
543// - Parallel parent hashing with SIMD. Note that while SIMD chunk hashing
544// maxes out at MAX_SIMD_DEGREE*CHUNK_LEN, parallel parent hashing continues
545// to benefit from larger inputs, because more levels of the tree benefit can
546// use full-width SIMD vectors for parent hashing. Without parallel parent
547// hashing, we lose about 10% of overall throughput on AVX2 and AVX-512.
548
549/// Undocumented and unstable, for benchmarks only.
550#[doc(hidden)]
551#[derive(Clone, Copy)]
552pub enum IncrementCounter {
553 Yes,
554 No,
555}
556
557impl IncrementCounter {
558 #[inline]
559 fn yes(&self) -> bool {
560 match self {
561 IncrementCounter::Yes => true,
562 IncrementCounter::No => false,
563 }
564 }
565}
566
567// The largest power of two less than or equal to `n`, used for left_len()
568// immediately below, and also directly in Hasher::update().
569fn largest_power_of_two_leq(n: usize) -> usize {
570 ((n / 2) + 1).next_power_of_two()
571}
572
573// Given some input larger than one chunk, return the number of bytes that
574// should go in the left subtree. This is the largest power-of-2 number of
575// chunks that leaves at least 1 byte for the right subtree.
576fn left_len(content_len: usize) -> usize {
577 debug_assert!(content_len > CHUNK_LEN);
578 // Subtract 1 to reserve at least one byte for the right side.
579 let full_chunks = (content_len - 1) / CHUNK_LEN;
580 largest_power_of_two_leq(full_chunks) * CHUNK_LEN
581}
582
583// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
584// on a single thread. Write out the chunk chaining values and return the
585// number of chunks hashed. These chunks are never the root and never empty;
586// those cases use a different codepath.
587fn compress_chunks_parallel(
588 input: &[u8],
589 key: &CVWords,
590 chunk_counter: u64,
591 flags: u8,
592 platform: Platform,
593 out: &mut [u8],
594) -> usize {
595 debug_assert!(!input.is_empty(), "empty chunks below the root");
596 debug_assert!(input.len() <= MAX_SIMD_DEGREE * CHUNK_LEN);
597
598 let mut chunks_exact = input.chunks_exact(CHUNK_LEN);
599 let mut chunks_array = ArrayVec::<&[u8; CHUNK_LEN], MAX_SIMD_DEGREE>::new();
600 for chunk in &mut chunks_exact {
601 chunks_array.push(array_ref!(chunk, 0, CHUNK_LEN));
602 }
603 platform.hash_many(
604 &chunks_array,
605 key,
606 chunk_counter,
607 IncrementCounter::Yes,
608 flags,
609 CHUNK_START,
610 CHUNK_END,
611 out,
612 );
613
614 // Hash the remaining partial chunk, if there is one. Note that the empty
615 // chunk (meaning the empty message) is a different codepath.
616 let chunks_so_far = chunks_array.len();
617 if !chunks_exact.remainder().is_empty() {
618 let counter = chunk_counter + chunks_so_far as u64;
619 let mut chunk_state = ChunkState::new(key, counter, flags, platform);
620 chunk_state.update(chunks_exact.remainder());
621 *array_mut_ref!(out, chunks_so_far * OUT_LEN, OUT_LEN) =
622 chunk_state.output().chaining_value();
623 chunks_so_far + 1
624 } else {
625 chunks_so_far
626 }
627}
628
629// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
630// on a single thread. Write out the parent chaining values and return the
631// number of parents hashed. (If there's an odd input chaining value left over,
632// return it as an additional output.) These parents are never the root and
633// never empty; those cases use a different codepath.
634fn compress_parents_parallel(
635 child_chaining_values: &[u8],
636 key: &CVWords,
637 flags: u8,
638 platform: Platform,
639 out: &mut [u8],
640) -> usize {
641 debug_assert_eq!(child_chaining_values.len() % OUT_LEN, 0, "wacky hash bytes");
642 let num_children = child_chaining_values.len() / OUT_LEN;
643 debug_assert!(num_children >= 2, "not enough children");
644 debug_assert!(num_children <= 2 * MAX_SIMD_DEGREE_OR_2, "too many");
645
646 let mut parents_exact = child_chaining_values.chunks_exact(BLOCK_LEN);
647 // Use MAX_SIMD_DEGREE_OR_2 rather than MAX_SIMD_DEGREE here, because of
648 // the requirements of compress_subtree_wide().
649 let mut parents_array = ArrayVec::<&[u8; BLOCK_LEN], MAX_SIMD_DEGREE_OR_2>::new();
650 for parent in &mut parents_exact {
651 parents_array.push(array_ref!(parent, 0, BLOCK_LEN));
652 }
653 platform.hash_many(
654 &parents_array,
655 key,
656 0, // Parents always use counter 0.
657 IncrementCounter::No,
658 flags | PARENT,
659 0, // Parents have no start flags.
660 0, // Parents have no end flags.
661 out,
662 );
663
664 // If there's an odd child left over, it becomes an output.
665 let parents_so_far = parents_array.len();
666 if !parents_exact.remainder().is_empty() {
667 out[parents_so_far * OUT_LEN..][..OUT_LEN].copy_from_slice(parents_exact.remainder());
668 parents_so_far + 1
669 } else {
670 parents_so_far
671 }
672}
673
674// The wide helper function returns (writes out) an array of chaining values
675// and returns the length of that array. The number of chaining values returned
676// is the dynamically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
677// if the input is shorter than that many chunks. The reason for maintaining a
678// wide array of chaining values going back up the tree, is to allow the
679// implementation to hash as many parents in parallel as possible.
680//
681// As a special case when the SIMD degree is 1, this function will still return
682// at least 2 outputs. This guarantees that this function doesn't perform the
683// root compression. (If it did, it would use the wrong flags, and also we
684// wouldn't be able to implement extendable output.) Note that this function is
685// not used when the whole input is only 1 chunk long; that's a different
686// codepath.
687//
688// Why not just have the caller split the input on the first update(), instead
689// of implementing this special rule? Because we don't want to limit SIMD or
690// multithreading parallelism for that update().
691fn compress_subtree_wide<J: join::Join>(
692 input: &[u8],
693 key: &CVWords,
694 chunk_counter: u64,
695 flags: u8,
696 platform: Platform,
697 out: &mut [u8],
698) -> usize {
699 // Note that the single chunk case does *not* bump the SIMD degree up to 2
700 // when it is 1. This allows Rayon the option of multithreading even the
701 // 2-chunk case, which can help performance on smaller platforms.
702 if input.len() <= platform.simd_degree() * CHUNK_LEN {
703 return compress_chunks_parallel(input, key, chunk_counter, flags, platform, out);
704 }
705
706 // With more than simd_degree chunks, we need to recurse. Start by dividing
707 // the input into left and right subtrees. (Note that this is only optimal
708 // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
709 // of 3 or something, we'll need a more complicated strategy.)
710 debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
711 let (left, right) = input.split_at(left_len(input.len()));
712 let right_chunk_counter = chunk_counter + (left.len() / CHUNK_LEN) as u64;
713
714 // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
715 // account for the special case of returning 2 outputs when the SIMD degree
716 // is 1.
717 let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
718 let degree = if left.len() == CHUNK_LEN {
719 // The "simd_degree=1 and we're at the leaf nodes" case.
720 debug_assert_eq!(platform.simd_degree(), 1);
721 1
722 } else {
723 cmp::max(platform.simd_degree(), 2)
724 };
725 let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
726
727 // Recurse! For update_rayon(), this is where we take advantage of RayonJoin and use multiple
728 // threads.
729 let (left_n, right_n) = J::join(
730 || compress_subtree_wide::<J>(left, key, chunk_counter, flags, platform, left_out),
731 || compress_subtree_wide::<J>(right, key, right_chunk_counter, flags, platform, right_out),
732 );
733
734 // The special case again. If simd_degree=1, then we'll have left_n=1 and
735 // right_n=1. Rather than compressing them into a single output, return
736 // them directly, to make sure we always have at least two outputs.
737 debug_assert_eq!(left_n, degree);
738 debug_assert!(right_n >= 1 && right_n <= left_n);
739 if left_n == 1 {
740 out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
741 return 2;
742 }
743
744 // Otherwise, do one layer of parent node compression.
745 let num_children = left_n + right_n;
746 compress_parents_parallel(
747 &cv_array[..num_children * OUT_LEN],
748 key,
749 flags,
750 platform,
751 out,
752 )
753}
754
755// Hash a subtree with compress_subtree_wide(), and then condense the resulting
756// list of chaining values down to a single parent node. Don't compress that
757// last parent node, however. Instead, return its message bytes (the
758// concatenated chaining values of its children). This is necessary when the
759// first call to update() supplies a complete subtree, because the topmost
760// parent node of that subtree could end up being the root. It's also necessary
761// for extended output in the general case.
762//
763// As with compress_subtree_wide(), this function is not used on inputs of 1
764// chunk or less. That's a different codepath.
765fn compress_subtree_to_parent_node<J: join::Join>(
766 input: &[u8],
767 key: &CVWords,
768 chunk_counter: u64,
769 flags: u8,
770 platform: Platform,
771) -> [u8; BLOCK_LEN] {
772 debug_assert!(input.len() > CHUNK_LEN);
773 let mut cv_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
774 let mut num_cvs =
775 compress_subtree_wide::<J>(input, &key, chunk_counter, flags, platform, &mut cv_array);
776 debug_assert!(num_cvs >= 2);
777
778 // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
779 // compress_subtree_wide() returns more than 2 chaining values. Condense
780 // them into 2 by forming parent nodes repeatedly.
781 let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
782 while num_cvs > 2 {
783 let cv_slice = &cv_array[..num_cvs * OUT_LEN];
784 num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
785 cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
786 }
787 *array_ref!(cv_array, 0, 2 * OUT_LEN)
788}
789
790// Hash a complete input all at once. Unlike compress_subtree_wide() and
791// compress_subtree_to_parent_node(), this function handles the 1 chunk case.
792fn hash_all_at_once<J: join::Join>(input: &[u8], key: &CVWords, flags: u8) -> Output {
793 let platform = Platform::detect();
794
795 // If the whole subtree is one chunk, hash it directly with a ChunkState.
796 if input.len() <= CHUNK_LEN {
797 return ChunkState::new(key, 0, flags, platform)
798 .update(input)
799 .output();
800 }
801
802 // Otherwise construct an Output object from the parent node returned by
803 // compress_subtree_to_parent_node().
804 Output {
805 input_chaining_value: *key,
806 block: compress_subtree_to_parent_node::<J>(input, key, 0, flags, platform),
807 block_len: BLOCK_LEN as u8,
808 counter: 0,
809 flags: flags | PARENT,
810 platform,
811 }
812}
813
814/// The default hash function.
815///
816/// For an incremental version that accepts multiple writes, see
817/// [`Hasher::update`].
818///
819/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`] and
820/// [`OutputReader`].
821///
822/// This function is always single-threaded. For multithreading support, see
823/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
824pub fn hash(input: &[u8]) -> Hash {
825 hash_all_at_once::<join::SerialJoin>(input, IV, 0).root_hash()
826}
827
828/// The keyed hash function.
829///
830/// This is suitable for use as a message authentication code, for example to
831/// replace an HMAC instance. In that use case, the constant-time equality
832/// checking provided by [`Hash`](struct.Hash.html) is almost always a security
833/// requirement, and callers need to be careful not to compare MACs as raw
834/// bytes.
835///
836/// For output sizes other than 32 bytes, see [`Hasher::new_keyed`],
837/// [`Hasher::finalize_xof`], and [`OutputReader`].
838///
839/// This function is always single-threaded. For multithreading support, see
840/// [`Hasher::new_keyed`] and
841/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
842pub fn keyed_hash(key: &[u8; KEY_LEN], input: &[u8]) -> Hash {
843 let key_words = platform::words_from_le_bytes_32(key);
844 hash_all_at_once::<join::SerialJoin>(input, &key_words, KEYED_HASH).root_hash()
845}
846
847/// The key derivation function.
848///
849/// Given cryptographic key material of any length and a context string of any
850/// length, this function outputs a 32-byte derived subkey. **The context string
851/// should be hardcoded, globally unique, and application-specific.** A good
852/// default format for such strings is `"[application] [commit timestamp]
853/// [purpose]"`, e.g., `"example.com 2019-12-25 16:18:03 session tokens v1"`.
854///
855/// Key derivation is important when you want to use the same key in multiple
856/// algorithms or use cases. Using the same key with different cryptographic
857/// algorithms is generally forbidden, and deriving a separate subkey for each
858/// use case protects you from bad interactions. Derived keys also mitigate the
859/// damage from one part of your application accidentally leaking its key.
860///
861/// As a rare exception to that general rule, however, it is possible to use
862/// `derive_key` itself with key material that you are already using with
863/// another algorithm. You might need to do this if you're adding features to
864/// an existing application, which does not yet use key derivation internally.
865/// However, you still must not share key material with algorithms that forbid
866/// key reuse entirely, like a one-time pad. For more on this, see sections 6.2
867/// and 7.8 of the [BLAKE3 paper](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf).
868///
869/// Note that BLAKE3 is not a password hash, and **`derive_key` should never be
870/// used with passwords.** Instead, use a dedicated password hash like
871/// [Argon2]. Password hashes are entirely different from generic hash
872/// functions, with opposite design requirements.
873///
874/// For output sizes other than 32 bytes, see [`Hasher::new_derive_key`],
875/// [`Hasher::finalize_xof`], and [`OutputReader`].
876///
877/// This function is always single-threaded. For multithreading support, see
878/// [`Hasher::new_derive_key`] and
879/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
880///
881/// [Argon2]: https://en.wikipedia.org/wiki/Argon2
882pub fn derive_key(context: &str, key_material: &[u8]) -> [u8; OUT_LEN] {
883 let context_key =
884 hash_all_at_once::<join::SerialJoin>(context.as_bytes(), IV, DERIVE_KEY_CONTEXT)
885 .root_hash();
886 let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
887 hash_all_at_once::<join::SerialJoin>(key_material, &context_key_words, DERIVE_KEY_MATERIAL)
888 .root_hash()
889 .0
890}
891
892fn parent_node_output(
893 left_child: &CVBytes,
894 right_child: &CVBytes,
895 key: &CVWords,
896 flags: u8,
897 platform: Platform,
898) -> Output {
899 let mut block = [0; BLOCK_LEN];
900 block[..32].copy_from_slice(left_child);
901 block[32..].copy_from_slice(right_child);
902 Output {
903 input_chaining_value: *key,
904 block,
905 block_len: BLOCK_LEN as u8,
906 counter: 0,
907 flags: flags | PARENT,
908 platform,
909 }
910}
911
912/// An incremental hash state that can accept any number of writes.
913///
914/// When the `traits-preview` Cargo feature is enabled, this type implements
915/// several commonly used traits from the
916/// [`digest`](https://crates.io/crates/digest) crate. However, those
917/// traits aren't stable, and they're expected to change in incompatible ways
918/// before that crate reaches 1.0. For that reason, this crate makes no SemVer
919/// guarantees for this feature, and callers who use it should expect breaking
920/// changes between patch versions.
921///
922/// When the `rayon` Cargo feature is enabled, the
923/// [`update_rayon`](#method.update_rayon) method is available for multithreaded
924/// hashing.
925///
926/// **Performance note:** The [`update`](#method.update) method can't take full
927/// advantage of SIMD optimizations if its input buffer is too small or oddly
928/// sized. Using a 16 KiB buffer, or any multiple of that, enables all currently
929/// supported SIMD instruction sets.
930///
931/// # Examples
932///
933/// ```
934/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
935/// // Hash an input incrementally.
936/// let mut hasher = iroh_blake3::Hasher::new();
937/// hasher.update(b"foo");
938/// hasher.update(b"bar");
939/// hasher.update(b"baz");
940/// assert_eq!(hasher.finalize(), iroh_blake3::hash(b"foobarbaz"));
941///
942/// // Extended output. OutputReader also implements Read and Seek.
943/// # #[cfg(feature = "std")] {
944/// let mut output = [0; 1000];
945/// let mut output_reader = hasher.finalize_xof();
946/// output_reader.fill(&mut output);
947/// assert_eq!(&output[..32], iroh_blake3::hash(b"foobarbaz").as_bytes());
948/// # }
949/// # Ok(())
950/// # }
951/// ```
952#[derive(Clone)]
953#[cfg_attr(feature = "zeroize", derive(zeroize::Zeroize))]
954pub struct Hasher {
955 key: CVWords,
956 chunk_state: ChunkState,
957 // The stack size is MAX_DEPTH + 1 because we do lazy merging. For example,
958 // with 7 chunks, we have 3 entries in the stack. Adding an 8th chunk
959 // requires a 4th entry, rather than merging everything down to 1, because
960 // we don't know whether more input is coming. This is different from how
961 // the reference implementation does things.
962 cv_stack: ArrayVec<CVBytes, { MAX_DEPTH + 1 }>,
963}
964
965impl Hasher {
966 fn new_internal(key: &CVWords, flags: u8) -> Self {
967 Self {
968 key: *key,
969 chunk_state: ChunkState::new(key, 0, flags, Platform::detect()),
970 cv_stack: ArrayVec::new(),
971 }
972 }
973
974 /// Construct a new `Hasher` for the regular hash function.
975 pub fn new() -> Self {
976 Self::new_internal(IV, 0)
977 }
978
979 /// Construct a new `Hasher` with a start chunk
980 fn new_with_start_chunk(start_chunk: u64) -> Self {
981 Self {
982 key: *IV,
983 chunk_state: ChunkState::new(IV, start_chunk, 0, Platform::detect()),
984 cv_stack: ArrayVec::new(),
985 }
986 }
987
988 /// Construct a new `Hasher` for the keyed hash function. See
989 /// [`keyed_hash`].
990 ///
991 /// [`keyed_hash`]: fn.keyed_hash.html
992 pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
993 let key_words = platform::words_from_le_bytes_32(key);
994 Self::new_internal(&key_words, KEYED_HASH)
995 }
996
997 /// Construct a new `Hasher` for the key derivation function. See
998 /// [`derive_key`]. The context string should be hardcoded, globally
999 /// unique, and application-specific.
1000 ///
1001 /// [`derive_key`]: fn.derive_key.html
1002 pub fn new_derive_key(context: &str) -> Self {
1003 let context_key =
1004 hash_all_at_once::<join::SerialJoin>(context.as_bytes(), IV, DERIVE_KEY_CONTEXT)
1005 .root_hash();
1006 let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
1007 Self::new_internal(&context_key_words, DERIVE_KEY_MATERIAL)
1008 }
1009
1010 /// Reset the `Hasher` to its initial state.
1011 ///
1012 /// This is functionally the same as overwriting the `Hasher` with a new
1013 /// one, using the same key or context string if any.
1014 pub fn reset(&mut self) -> &mut Self {
1015 self.chunk_state = ChunkState::new(
1016 &self.key,
1017 0,
1018 self.chunk_state.flags,
1019 self.chunk_state.platform,
1020 );
1021 self.cv_stack.clear();
1022 self
1023 }
1024
1025 // As described in push_cv() below, we do "lazy merging", delaying merges
1026 // until right before the next CV is about to be added. This is different
1027 // from the reference implementation. Another difference is that we aren't
1028 // always merging 1 chunk at a time. Instead, each CV might represent any
1029 // power-of-two number of chunks, as long as the smaller-above-larger stack
1030 // order is maintained. Instead of the "count the trailing 0-bits"
1031 // algorithm described in the spec, we use a "count the total number of
1032 // 1-bits" variant that doesn't require us to retain the subtree size of
1033 // the CV on top of the stack. The principle is the same: each CV that
1034 // should remain in the stack is represented by a 1-bit in the total number
1035 // of chunks (or bytes) so far.
1036 fn merge_cv_stack(&mut self, total_len: u64) {
1037 let post_merge_stack_len = total_len.count_ones() as usize;
1038 while self.cv_stack.len() > post_merge_stack_len {
1039 let right_child = self.cv_stack.pop().unwrap();
1040 let left_child = self.cv_stack.pop().unwrap();
1041 let parent_output = parent_node_output(
1042 &left_child,
1043 &right_child,
1044 &self.key,
1045 self.chunk_state.flags,
1046 self.chunk_state.platform,
1047 );
1048 self.cv_stack.push(parent_output.chaining_value());
1049 }
1050 }
1051
1052 // In reference_impl.rs, we merge the new CV with existing CVs from the
1053 // stack before pushing it. We can do that because we know more input is
1054 // coming, so we know none of the merges are root.
1055 //
1056 // This setting is different. We want to feed as much input as possible to
1057 // compress_subtree_wide(), without setting aside anything for the
1058 // chunk_state. If the user gives us 64 KiB, we want to parallelize over
1059 // all 64 KiB at once as a single subtree, if at all possible.
1060 //
1061 // This leads to two problems:
1062 // 1) This 64 KiB input might be the only call that ever gets made to
1063 // update. In this case, the root node of the 64 KiB subtree would be
1064 // the root node of the whole tree, and it would need to be ROOT
1065 // finalized. We can't compress it until we know.
1066 // 2) This 64 KiB input might complete a larger tree, whose root node is
1067 // similarly going to be the the root of the whole tree. For example,
1068 // maybe we have 196 KiB (that is, 128 + 64) hashed so far. We can't
1069 // compress the node at the root of the 256 KiB subtree until we know
1070 // how to finalize it.
1071 //
1072 // The second problem is solved with "lazy merging". That is, when we're
1073 // about to add a CV to the stack, we don't merge it with anything first,
1074 // as the reference impl does. Instead we do merges using the *previous* CV
1075 // that was added, which is sitting on top of the stack, and we put the new
1076 // CV (unmerged) on top of the stack afterwards. This guarantees that we
1077 // never merge the root node until finalize().
1078 //
1079 // Solving the first problem requires an additional tool,
1080 // compress_subtree_to_parent_node(). That function always returns the top
1081 // *two* chaining values of the subtree it's compressing. We then do lazy
1082 // merging with each of them separately, so that the second CV will always
1083 // remain unmerged. (That also helps us support extendable output when
1084 // we're hashing an input all-at-once.)
1085 fn push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64) {
1086 self.merge_cv_stack(chunk_counter);
1087 self.cv_stack.push(*new_cv);
1088 }
1089
1090 /// Add input bytes to the hash state. You can call this any number of
1091 /// times.
1092 ///
1093 /// This method is always single-threaded. For multithreading support, see
1094 /// [`update_rayon`](#method.update_rayon) below (enabled with the `rayon`
1095 /// Cargo feature).
1096 ///
1097 /// Note that the degree of SIMD parallelism that `update` can use is
1098 /// limited by the size of this input buffer. The 8 KiB buffer currently
1099 /// used by [`std::io::copy`] is enough to leverage AVX2, for example, but
1100 /// not enough to leverage AVX-512. A 16 KiB buffer is large enough to
1101 /// leverage all currently supported SIMD instruction sets.
1102 ///
1103 /// [`std::io::copy`]: https://doc.rust-lang.org/std/io/fn.copy.html
1104 pub fn update(&mut self, input: &[u8]) -> &mut Self {
1105 self.update_with_join::<join::SerialJoin>(input)
1106 }
1107
1108 /// Identical to [`update`](Hasher::update), but using Rayon-based
1109 /// multithreading internally.
1110 ///
1111 /// This method is gated by the `rayon` Cargo feature, which is disabled by
1112 /// default but enabled on [docs.rs](https://docs.rs).
1113 ///
1114 /// To get any performance benefit from multithreading, the input buffer
1115 /// needs to be large. As a rule of thumb on x86_64, `update_rayon` is
1116 /// _slower_ than `update` for inputs under 128 KiB. That threshold varies
1117 /// quite a lot across different processors, and it's important to benchmark
1118 /// your specific use case.
1119 ///
1120 /// Memory mapping an entire input file is a simple way to take advantage of
1121 /// multithreading without needing to carefully tune your buffer size or
1122 /// offload IO. However, on spinning disks where random access is expensive,
1123 /// that approach can lead to disk thrashing and terrible IO performance.
1124 /// Note that OS page caching can mask this problem, in which case it might
1125 /// only appear for files larger than available RAM. Again, benchmarking
1126 /// your specific use case is important.
1127 #[cfg(feature = "rayon")]
1128 pub fn update_rayon(&mut self, input: &[u8]) -> &mut Self {
1129 self.update_with_join::<join::RayonJoin>(input)
1130 }
1131
1132 fn update_with_join<J: join::Join>(&mut self, mut input: &[u8]) -> &mut Self {
1133 // If we have some partial chunk bytes in the internal chunk_state, we
1134 // need to finish that chunk first.
1135 if self.chunk_state.len() > 0 {
1136 let want = CHUNK_LEN - self.chunk_state.len();
1137 let take = cmp::min(want, input.len());
1138 self.chunk_state.update(&input[..take]);
1139 input = &input[take..];
1140 if !input.is_empty() {
1141 // We've filled the current chunk, and there's more input
1142 // coming, so we know it's not the root and we can finalize it.
1143 // Then we'll proceed to hashing whole chunks below.
1144 debug_assert_eq!(self.chunk_state.len(), CHUNK_LEN);
1145 let chunk_cv = self.chunk_state.output().chaining_value();
1146 self.push_cv(&chunk_cv, self.chunk_state.chunk_counter);
1147 self.chunk_state = ChunkState::new(
1148 &self.key,
1149 self.chunk_state.chunk_counter + 1,
1150 self.chunk_state.flags,
1151 self.chunk_state.platform,
1152 );
1153 } else {
1154 return self;
1155 }
1156 }
1157
1158 // Now the chunk_state is clear, and we have more input. If there's
1159 // more than a single chunk (so, definitely not the root chunk), hash
1160 // the largest whole subtree we can, with the full benefits of SIMD and
1161 // multithreading parallelism. Two restrictions:
1162 // - The subtree has to be a power-of-2 number of chunks. Only subtrees
1163 // along the right edge can be incomplete, and we don't know where
1164 // the right edge is going to be until we get to finalize().
1165 // - The subtree must evenly divide the total number of chunks up until
1166 // this point (if total is not 0). If the current incomplete subtree
1167 // is only waiting for 1 more chunk, we can't hash a subtree of 4
1168 // chunks. We have to complete the current subtree first.
1169 // Because we might need to break up the input to form powers of 2, or
1170 // to evenly divide what we already have, this part runs in a loop.
1171 while input.len() > CHUNK_LEN {
1172 debug_assert_eq!(self.chunk_state.len(), 0, "no partial chunk data");
1173 debug_assert_eq!(CHUNK_LEN.count_ones(), 1, "power of 2 chunk len");
1174 let mut subtree_len = largest_power_of_two_leq(input.len());
1175 let count_so_far = self.chunk_state.chunk_counter * CHUNK_LEN as u64;
1176 // Shrink the subtree_len until it evenly divides the count so far.
1177 // We know that subtree_len itself is a power of 2, so we can use a
1178 // bitmasking trick instead of an actual remainder operation. (Note
1179 // that if the caller consistently passes power-of-2 inputs of the
1180 // same size, as is hopefully typical, this loop condition will
1181 // always fail, and subtree_len will always be the full length of
1182 // the input.)
1183 //
1184 // An aside: We don't have to shrink subtree_len quite this much.
1185 // For example, if count_so_far is 1, we could pass 2 chunks to
1186 // compress_subtree_to_parent_node. Since we'll get 2 CVs back,
1187 // we'll still get the right answer in the end, and we might get to
1188 // use 2-way SIMD parallelism. The problem with this optimization,
1189 // is that it gets us stuck always hashing 2 chunks. The total
1190 // number of chunks will remain odd, and we'll never graduate to
1191 // higher degrees of parallelism. See
1192 // https://github.com/BLAKE3-team/BLAKE3/issues/69.
1193 while (subtree_len - 1) as u64 & count_so_far != 0 {
1194 subtree_len /= 2;
1195 }
1196 // The shrunken subtree_len might now be 1 chunk long. If so, hash
1197 // that one chunk by itself. Otherwise, compress the subtree into a
1198 // pair of CVs.
1199 let subtree_chunks = (subtree_len / CHUNK_LEN) as u64;
1200 if subtree_len <= CHUNK_LEN {
1201 debug_assert_eq!(subtree_len, CHUNK_LEN);
1202 self.push_cv(
1203 &ChunkState::new(
1204 &self.key,
1205 self.chunk_state.chunk_counter,
1206 self.chunk_state.flags,
1207 self.chunk_state.platform,
1208 )
1209 .update(&input[..subtree_len])
1210 .output()
1211 .chaining_value(),
1212 self.chunk_state.chunk_counter,
1213 );
1214 } else {
1215 // This is the high-performance happy path, though getting here
1216 // depends on the caller giving us a long enough input.
1217 let cv_pair = compress_subtree_to_parent_node::<J>(
1218 &input[..subtree_len],
1219 &self.key,
1220 self.chunk_state.chunk_counter,
1221 self.chunk_state.flags,
1222 self.chunk_state.platform,
1223 );
1224 let left_cv = array_ref!(cv_pair, 0, 32);
1225 let right_cv = array_ref!(cv_pair, 32, 32);
1226 // Push the two CVs we received into the CV stack in order. Because
1227 // the stack merges lazily, this guarantees we aren't merging the
1228 // root.
1229 self.push_cv(left_cv, self.chunk_state.chunk_counter);
1230 self.push_cv(
1231 right_cv,
1232 self.chunk_state.chunk_counter + (subtree_chunks / 2),
1233 );
1234 }
1235 self.chunk_state.chunk_counter += subtree_chunks;
1236 input = &input[subtree_len..];
1237 }
1238
1239 // What remains is 1 chunk or less. Add it to the chunk state.
1240 debug_assert!(input.len() <= CHUNK_LEN);
1241 if !input.is_empty() {
1242 self.chunk_state.update(input);
1243 // Having added some input to the chunk_state, we know what's in
1244 // the CV stack won't become the root node, and we can do an extra
1245 // merge. This simplifies finalize().
1246 self.merge_cv_stack(self.chunk_state.chunk_counter);
1247 }
1248
1249 self
1250 }
1251
1252 fn final_output(&self) -> Output {
1253 // If the current chunk is the only chunk, that makes it the root node
1254 // also. Convert it directly into an Output. Otherwise, we need to
1255 // merge subtrees below.
1256 if self.cv_stack.is_empty() {
1257 // debug_assert_eq!(self.chunk_state.chunk_counter, 0);
1258 return self.chunk_state.output();
1259 }
1260
1261 // If there are any bytes in the ChunkState, finalize that chunk and
1262 // merge its CV with everything in the CV stack. In that case, the work
1263 // we did at the end of update() above guarantees that the stack
1264 // doesn't contain any unmerged subtrees that need to be merged first.
1265 // (This is important, because if there were two chunk hashes sitting
1266 // on top of the stack, they would need to merge with each other, and
1267 // merging a new chunk hash into them would be incorrect.)
1268 //
1269 // If there are no bytes in the ChunkState, we'll merge what's already
1270 // in the stack. In this case it's fine if there are unmerged chunks on
1271 // top, because we'll merge them with each other. Note that the case of
1272 // the empty chunk is taken care of above.
1273 let mut output: Output;
1274 let mut num_cvs_remaining = self.cv_stack.len();
1275 if self.chunk_state.len() > 0 {
1276 // debug_assert_eq!(
1277 // self.cv_stack.len(),
1278 // self.chunk_state.chunk_counter.count_ones() as usize,
1279 // "cv stack does not need a merge"
1280 // );
1281 output = self.chunk_state.output();
1282 } else {
1283 debug_assert!(self.cv_stack.len() >= 2);
1284 output = parent_node_output(
1285 &self.cv_stack[num_cvs_remaining - 2],
1286 &self.cv_stack[num_cvs_remaining - 1],
1287 &self.key,
1288 self.chunk_state.flags,
1289 self.chunk_state.platform,
1290 );
1291 num_cvs_remaining -= 2;
1292 }
1293 while num_cvs_remaining > 0 {
1294 output = parent_node_output(
1295 &self.cv_stack[num_cvs_remaining - 1],
1296 &output.chaining_value(),
1297 &self.key,
1298 self.chunk_state.flags,
1299 self.chunk_state.platform,
1300 );
1301 num_cvs_remaining -= 1;
1302 }
1303 output
1304 }
1305
1306 /// Finalize the hash state and return the [`Hash`](struct.Hash.html) of
1307 /// the input.
1308 ///
1309 /// This method is idempotent. Calling it twice will give the same result.
1310 /// You can also add more input and finalize again.
1311 pub fn finalize(&self) -> Hash {
1312 self.final_output().root_hash()
1313 }
1314
1315 fn finalize_node(&self, is_root: bool) -> Hash {
1316 let output = self.final_output();
1317 if is_root {
1318 output.root_hash()
1319 } else {
1320 output.chaining_value().into()
1321 }
1322 }
1323
1324 /// Finalize the hash state and return an [`OutputReader`], which can
1325 /// supply any number of output bytes.
1326 ///
1327 /// This method is idempotent. Calling it twice will give the same result.
1328 /// You can also add more input and finalize again.
1329 ///
1330 /// [`OutputReader`]: struct.OutputReader.html
1331 pub fn finalize_xof(&self) -> OutputReader {
1332 OutputReader::new(self.final_output())
1333 }
1334
1335 /// Return the total number of bytes hashed so far.
1336 pub fn count(&self) -> u64 {
1337 self.chunk_state.chunk_counter * CHUNK_LEN as u64 + self.chunk_state.len() as u64
1338 }
1339}
1340
1341// Don't derive(Debug), because the state may be secret.
1342impl fmt::Debug for Hasher {
1343 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1344 f.debug_struct("Hasher")
1345 .field("flags", &self.chunk_state.flags)
1346 .field("platform", &self.chunk_state.platform)
1347 .finish()
1348 }
1349}
1350
1351impl Default for Hasher {
1352 #[inline]
1353 fn default() -> Self {
1354 Self::new()
1355 }
1356}
1357
1358#[cfg(feature = "std")]
1359impl std::io::Write for Hasher {
1360 /// This is equivalent to [`update`](#method.update).
1361 #[inline]
1362 fn write(&mut self, input: &[u8]) -> std::io::Result<usize> {
1363 self.update(input);
1364 Ok(input.len())
1365 }
1366
1367 #[inline]
1368 fn flush(&mut self) -> std::io::Result<()> {
1369 Ok(())
1370 }
1371}
1372
1373/// An incremental reader for extended output, returned by
1374/// [`Hasher::finalize_xof`](struct.Hasher.html#method.finalize_xof).
1375///
1376/// Shorter BLAKE3 outputs are prefixes of longer ones, and explicitly requesting a short output is
1377/// equivalent to truncating the default-length output. Note that this is a difference between
1378/// BLAKE2 and BLAKE3.
1379///
1380/// # Security notes
1381///
1382/// Outputs shorter than the default length of 32 bytes (256 bits) provide less security. An N-bit
1383/// BLAKE3 output is intended to provide N bits of first and second preimage resistance and N/2
1384/// bits of collision resistance, for any N up to 256. Longer outputs don't provide any additional
1385/// security.
1386///
1387/// Avoid relying on the secrecy of the output offset, that is, the number of output bytes read or
1388/// the arguments to [`seek`](struct.OutputReader.html#method.seek) or
1389/// [`set_position`](struct.OutputReader.html#method.set_position). [_Block-Cipher-Based Tree
1390/// Hashing_ by Aldo Gunsing](https://eprint.iacr.org/2022/283) shows that an attacker who knows
1391/// both the message and the key (if any) can easily determine the offset of an extended output.
1392/// For comparison, AES-CTR has a similar property: if you know the key, you can decrypt a block
1393/// from an unknown position in the output stream to recover its block index. Callers with strong
1394/// secret keys aren't affected in practice, but secret offsets are a [design
1395/// smell](https://en.wikipedia.org/wiki/Design_smell) in any case.
1396#[cfg_attr(feature = "zeroize", derive(zeroize::Zeroize))]
1397#[derive(Clone)]
1398pub struct OutputReader {
1399 inner: Output,
1400 position_within_block: u8,
1401}
1402
1403impl OutputReader {
1404 fn new(inner: Output) -> Self {
1405 Self {
1406 inner,
1407 position_within_block: 0,
1408 }
1409 }
1410
1411 /// Fill a buffer with output bytes and advance the position of the
1412 /// `OutputReader`. This is equivalent to [`Read::read`], except that it
1413 /// doesn't return a `Result`. Both methods always fill the entire buffer.
1414 ///
1415 /// Note that `OutputReader` doesn't buffer output bytes internally, so
1416 /// calling `fill` repeatedly with a short-length or odd-length slice will
1417 /// end up performing the same compression multiple times. If you're
1418 /// reading output in a loop, prefer a slice length that's a multiple of
1419 /// 64.
1420 ///
1421 /// The maximum output size of BLAKE3 is 2<sup>64</sup>-1 bytes. If you try
1422 /// to extract more than that, for example by seeking near the end and
1423 /// reading further, the behavior is unspecified.
1424 ///
1425 /// [`Read::read`]: #method.read
1426 pub fn fill(&mut self, mut buf: &mut [u8]) {
1427 while !buf.is_empty() {
1428 let block: [u8; BLOCK_LEN] = self.inner.root_output_block();
1429 let output_bytes = &block[self.position_within_block as usize..];
1430 let take = cmp::min(buf.len(), output_bytes.len());
1431 buf[..take].copy_from_slice(&output_bytes[..take]);
1432 buf = &mut buf[take..];
1433 self.position_within_block += take as u8;
1434 if self.position_within_block == BLOCK_LEN as u8 {
1435 self.inner.counter += 1;
1436 self.position_within_block = 0;
1437 }
1438 }
1439 }
1440
1441 /// Return the current read position in the output stream. This is
1442 /// equivalent to [`Seek::stream_position`], except that it doesn't return
1443 /// a `Result`. The position of a new `OutputReader` starts at 0, and each
1444 /// call to [`fill`] or [`Read::read`] moves the position forward by the
1445 /// number of bytes read.
1446 ///
1447 /// [`Seek::stream_position`]: #method.stream_position
1448 /// [`fill`]: #method.fill
1449 /// [`Read::read`]: #method.read
1450 pub fn position(&self) -> u64 {
1451 self.inner.counter * BLOCK_LEN as u64 + self.position_within_block as u64
1452 }
1453
1454 /// Seek to a new read position in the output stream. This is equivalent to
1455 /// calling [`Seek::seek`] with [`SeekFrom::Start`], except that it doesn't
1456 /// return a `Result`.
1457 ///
1458 /// [`Seek::seek`]: #method.seek
1459 /// [`SeekFrom::Start`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
1460 pub fn set_position(&mut self, position: u64) {
1461 self.position_within_block = (position % BLOCK_LEN as u64) as u8;
1462 self.inner.counter = position / BLOCK_LEN as u64;
1463 }
1464}
1465
1466// Don't derive(Debug), because the state may be secret.
1467impl fmt::Debug for OutputReader {
1468 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1469 f.debug_struct("OutputReader")
1470 .field("position", &self.position())
1471 .finish()
1472 }
1473}
1474
1475#[cfg(feature = "std")]
1476impl std::io::Read for OutputReader {
1477 #[inline]
1478 fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
1479 self.fill(buf);
1480 Ok(buf.len())
1481 }
1482}
1483
1484#[cfg(feature = "std")]
1485impl std::io::Seek for OutputReader {
1486 fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
1487 let max_position = u64::max_value() as i128;
1488 let target_position: i128 = match pos {
1489 std::io::SeekFrom::Start(x) => x as i128,
1490 std::io::SeekFrom::Current(x) => self.position() as i128 + x as i128,
1491 std::io::SeekFrom::End(_) => {
1492 return Err(std::io::Error::new(
1493 std::io::ErrorKind::InvalidInput,
1494 "seek from end not supported",
1495 ));
1496 }
1497 };
1498 if target_position < 0 {
1499 return Err(std::io::Error::new(
1500 std::io::ErrorKind::InvalidInput,
1501 "seek before start",
1502 ));
1503 }
1504 self.set_position(cmp::min(target_position, max_position) as u64);
1505 Ok(self.position())
1506 }
1507}