Coverage Report

Created: 2025-10-10 07:11

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/blake3-1.7.0/src/lib.rs
Line
Count
Source
1
//! The official Rust implementation of the [BLAKE3] cryptographic hash
2
//! function.
3
//!
4
//! # Examples
5
//!
6
//! ```
7
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
8
//! // Hash an input all at once.
9
//! let hash1 = blake3::hash(b"foobarbaz");
10
//!
11
//! // Hash an input incrementally.
12
//! let mut hasher = blake3::Hasher::new();
13
//! hasher.update(b"foo");
14
//! hasher.update(b"bar");
15
//! hasher.update(b"baz");
16
//! let hash2 = hasher.finalize();
17
//! assert_eq!(hash1, hash2);
18
//!
19
//! // Extended output. OutputReader also implements Read and Seek.
20
//! # #[cfg(feature = "std")] {
21
//! let mut output = [0; 1000];
22
//! let mut output_reader = hasher.finalize_xof();
23
//! output_reader.fill(&mut output);
24
//! assert_eq!(hash1, output[..32]);
25
//! # }
26
//!
27
//! // Print a hash as hex.
28
//! println!("{}", hash1);
29
//! # Ok(())
30
//! # }
31
//! ```
32
//!
33
//! # Cargo Features
34
//!
35
//! The `std` feature (the only feature enabled by default) is required for
36
//! implementations of the [`Write`] and [`Seek`] traits, the
37
//! [`update_reader`](Hasher::update_reader) helper method, and runtime CPU
38
//! feature detection on x86. If this feature is disabled, the only way to use
39
//! the x86 SIMD implementations is to enable the corresponding instruction sets
40
//! globally, with e.g. `RUSTFLAGS="-C target-cpu=native"`. The resulting binary
41
//! will not be portable to other machines.
42
//!
43
//! The `rayon` feature (disabled by default, but enabled for [docs.rs]) adds
44
//! the [`update_rayon`](Hasher::update_rayon) and (in combination with `mmap`
45
//! below) [`update_mmap_rayon`](Hasher::update_mmap_rayon) methods, for
46
//! multithreaded hashing. However, even if this feature is enabled, all other
47
//! APIs remain single-threaded.
48
//!
49
//! The `mmap` feature (disabled by default, but enabled for [docs.rs]) adds the
50
//! [`update_mmap`](Hasher::update_mmap) and (in combination with `rayon` above)
51
//! [`update_mmap_rayon`](Hasher::update_mmap_rayon) helper methods for
52
//! memory-mapped IO.
53
//!
54
//! The `zeroize` feature (disabled by default, but enabled for [docs.rs])
55
//! implements
56
//! [`Zeroize`](https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html) for
57
//! this crate's types.
58
//!
59
//! The `serde` feature (disabled by default, but enabled for [docs.rs]) implements
60
//! [`serde::Serialize`](https://docs.rs/serde/latest/serde/trait.Serialize.html) and
61
//! [`serde::Deserialize`](https://docs.rs/serde/latest/serde/trait.Deserialize.html)
62
//! for [`Hash`](struct@Hash).
63
//!
64
//! The NEON implementation is enabled by default for AArch64 but requires the
65
//! `neon` feature for other ARM targets. Not all ARMv7 CPUs support NEON, and
66
//! enabling this feature will produce a binary that's not portable to CPUs
67
//! without NEON support.
68
//!
69
//! The `traits-preview` feature enables implementations of traits from the
70
//! RustCrypto [`digest`] crate, and re-exports that crate as `traits::digest`.
71
//! However, the traits aren't stable, and they're expected to change in
72
//! incompatible ways before that crate reaches 1.0. For that reason, this crate
73
//! makes no SemVer guarantees for this feature, and callers who use it should
74
//! expect breaking changes between patch versions. (The "-preview" feature name
75
//! follows the conventions of the RustCrypto [`signature`] crate.)
76
//!
77
//! [`Hasher::update_rayon`]: struct.Hasher.html#method.update_rayon
78
//! [BLAKE3]: https://blake3.io
79
//! [Rayon]: https://github.com/rayon-rs/rayon
80
//! [docs.rs]: https://docs.rs/
81
//! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
82
//! [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
83
//! [`digest`]: https://crates.io/crates/digest
84
//! [`signature`]: https://crates.io/crates/signature
85
86
#![cfg_attr(not(feature = "std"), no_std)]
87
88
#[cfg(test)]
89
mod test;
90
91
// The guts module is for incremental use cases like the `bao` crate that need
92
// to explicitly compute chunk and parent chaining values. It is semi-stable
93
// and likely to keep working, but largely undocumented and not intended for
94
// widespread use.
95
#[doc(hidden)]
96
pub mod guts;
97
98
/// Undocumented and unstable, for benchmarks only.
99
#[doc(hidden)]
100
pub mod platform;
101
102
// Platform-specific implementations of the compression function. These
103
// BLAKE3-specific cfg flags are set in build.rs.
104
#[cfg(blake3_avx2_rust)]
105
#[path = "rust_avx2.rs"]
106
mod avx2;
107
#[cfg(blake3_avx2_ffi)]
108
#[path = "ffi_avx2.rs"]
109
mod avx2;
110
#[cfg(blake3_avx512_ffi)]
111
#[path = "ffi_avx512.rs"]
112
mod avx512;
113
#[cfg(blake3_neon)]
114
#[path = "ffi_neon.rs"]
115
mod neon;
116
mod portable;
117
#[cfg(blake3_sse2_rust)]
118
#[path = "rust_sse2.rs"]
119
mod sse2;
120
#[cfg(blake3_sse2_ffi)]
121
#[path = "ffi_sse2.rs"]
122
mod sse2;
123
#[cfg(blake3_sse41_rust)]
124
#[path = "rust_sse41.rs"]
125
mod sse41;
126
#[cfg(blake3_sse41_ffi)]
127
#[path = "ffi_sse41.rs"]
128
mod sse41;
129
130
#[cfg(blake3_wasm32_simd)]
131
#[path = "wasm32_simd.rs"]
132
mod wasm32_simd;
133
134
#[cfg(feature = "traits-preview")]
135
pub mod traits;
136
137
mod io;
138
mod join;
139
140
use arrayref::{array_mut_ref, array_ref};
141
use arrayvec::{ArrayString, ArrayVec};
142
use core::cmp;
143
use core::fmt;
144
use platform::{Platform, MAX_SIMD_DEGREE, MAX_SIMD_DEGREE_OR_2};
145
#[cfg(feature = "zeroize")]
146
use zeroize::Zeroize;
147
148
/// The number of bytes in a [`Hash`](struct.Hash.html), 32.
149
pub const OUT_LEN: usize = 32;
150
151
/// The number of bytes in a key, 32.
152
pub const KEY_LEN: usize = 32;
153
154
const MAX_DEPTH: usize = 54; // 2^54 * CHUNK_LEN = 2^64
155
use guts::{BLOCK_LEN, CHUNK_LEN};
156
157
// While iterating the compression function within a chunk, the CV is
158
// represented as words, to avoid doing two extra endianness conversions for
159
// each compression in the portable implementation. But the hash_many interface
160
// needs to hash both input bytes and parent nodes, so its better for its
161
// output CVs to be represented as bytes.
162
type CVWords = [u32; 8];
163
type CVBytes = [u8; 32]; // little-endian
164
165
const IV: &CVWords = &[
166
    0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
167
];
168
169
const MSG_SCHEDULE: [[usize; 16]; 7] = [
170
    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
171
    [2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8],
172
    [3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1],
173
    [10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6],
174
    [12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4],
175
    [9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7],
176
    [11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13],
177
];
178
179
// These are the internal flags that we use to domain separate root/non-root,
180
// chunk/parent, and chunk beginning/middle/end. These get set at the high end
181
// of the block flags word in the compression function, so their values start
182
// high and go down.
183
const CHUNK_START: u8 = 1 << 0;
184
const CHUNK_END: u8 = 1 << 1;
185
const PARENT: u8 = 1 << 2;
186
const ROOT: u8 = 1 << 3;
187
const KEYED_HASH: u8 = 1 << 4;
188
const DERIVE_KEY_CONTEXT: u8 = 1 << 5;
189
const DERIVE_KEY_MATERIAL: u8 = 1 << 6;
190
191
#[inline]
192
0
fn counter_low(counter: u64) -> u32 {
193
0
    counter as u32
194
0
}
195
196
#[inline]
197
0
fn counter_high(counter: u64) -> u32 {
198
0
    (counter >> 32) as u32
199
0
}
200
201
/// An output of the default size, 32 bytes, which provides constant-time
202
/// equality checking.
203
///
204
/// `Hash` implements [`From`] and [`Into`] for `[u8; 32]`, and it provides
205
/// [`from_bytes`] and [`as_bytes`] for explicit conversions between itself and
206
/// `[u8; 32]`. However, byte arrays and slices don't provide constant-time
207
/// equality checking, which is often a security requirement in software that
208
/// handles private data. `Hash` doesn't implement [`Deref`] or [`AsRef`], to
209
/// avoid situations where a type conversion happens implicitly and the
210
/// constant-time property is accidentally lost.
211
///
212
/// `Hash` provides the [`to_hex`] and [`from_hex`] methods for converting to
213
/// and from hexadecimal. It also implements [`Display`] and [`FromStr`].
214
///
215
/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
216
/// [`Into`]: https://doc.rust-lang.org/std/convert/trait.Into.html
217
/// [`as_bytes`]: #method.as_bytes
218
/// [`from_bytes`]: #method.from_bytes
219
/// [`Deref`]: https://doc.rust-lang.org/stable/std/ops/trait.Deref.html
220
/// [`AsRef`]: https://doc.rust-lang.org/std/convert/trait.AsRef.html
221
/// [`to_hex`]: #method.to_hex
222
/// [`from_hex`]: #method.from_hex
223
/// [`Display`]: https://doc.rust-lang.org/std/fmt/trait.Display.html
224
/// [`FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html
225
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
226
#[derive(Clone, Copy, Hash)]
227
pub struct Hash([u8; OUT_LEN]);
228
229
impl Hash {
230
    /// The raw bytes of the `Hash`. Note that byte arrays don't provide
231
    /// constant-time equality checking, so if  you need to compare hashes,
232
    /// prefer the `Hash` type.
233
    #[inline]
234
0
    pub const fn as_bytes(&self) -> &[u8; OUT_LEN] {
235
0
        &self.0
236
0
    }
237
238
    /// Create a `Hash` from its raw bytes representation.
239
0
    pub const fn from_bytes(bytes: [u8; OUT_LEN]) -> Self {
240
0
        Self(bytes)
241
0
    }
242
243
    /// Create a `Hash` from its raw bytes representation as a slice.
244
    ///
245
    /// Returns an error if the slice is not exactly 32 bytes long.
246
0
    pub fn from_slice(bytes: &[u8]) -> Result<Self, core::array::TryFromSliceError> {
247
0
        Ok(Self::from_bytes(bytes.try_into()?))
248
0
    }
249
250
    /// Encode a `Hash` in lowercase hexadecimal.
251
    ///
252
    /// The returned [`ArrayString`] is a fixed size and doesn't allocate memory
253
    /// on the heap. Note that [`ArrayString`] doesn't provide constant-time
254
    /// equality checking, so if you need to compare hashes, prefer the `Hash`
255
    /// type.
256
    ///
257
    /// [`ArrayString`]: https://docs.rs/arrayvec/0.5.1/arrayvec/struct.ArrayString.html
258
0
    pub fn to_hex(&self) -> ArrayString<{ 2 * OUT_LEN }> {
259
0
        let mut s = ArrayString::new();
260
0
        let table = b"0123456789abcdef";
261
0
        for &b in self.0.iter() {
262
0
            s.push(table[(b >> 4) as usize] as char);
263
0
            s.push(table[(b & 0xf) as usize] as char);
264
0
        }
265
0
        s
266
0
    }
267
268
    /// Decode a `Hash` from hexadecimal. Both uppercase and lowercase ASCII
269
    /// bytes are supported.
270
    ///
271
    /// Any byte outside the ranges `'0'...'9'`, `'a'...'f'`, and `'A'...'F'`
272
    /// results in an error. An input length other than 64 also results in an
273
    /// error.
274
    ///
275
    /// Note that `Hash` also implements `FromStr`, so `Hash::from_hex("...")`
276
    /// is equivalent to `"...".parse()`.
277
0
    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, HexError> {
278
0
        fn hex_val(byte: u8) -> Result<u8, HexError> {
279
0
            match byte {
280
0
                b'A'..=b'F' => Ok(byte - b'A' + 10),
281
0
                b'a'..=b'f' => Ok(byte - b'a' + 10),
282
0
                b'0'..=b'9' => Ok(byte - b'0'),
283
0
                _ => Err(HexError(HexErrorInner::InvalidByte(byte))),
284
            }
285
0
        }
286
0
        let hex_bytes: &[u8] = hex.as_ref();
287
0
        if hex_bytes.len() != OUT_LEN * 2 {
288
0
            return Err(HexError(HexErrorInner::InvalidLen(hex_bytes.len())));
289
0
        }
290
0
        let mut hash_bytes: [u8; OUT_LEN] = [0; OUT_LEN];
291
0
        for i in 0..OUT_LEN {
292
0
            hash_bytes[i] = 16 * hex_val(hex_bytes[2 * i])? + hex_val(hex_bytes[2 * i + 1])?;
293
        }
294
0
        Ok(Hash::from(hash_bytes))
295
0
    }
296
}
297
298
impl From<[u8; OUT_LEN]> for Hash {
299
    #[inline]
300
0
    fn from(bytes: [u8; OUT_LEN]) -> Self {
301
0
        Self::from_bytes(bytes)
302
0
    }
303
}
304
305
impl From<Hash> for [u8; OUT_LEN] {
306
    #[inline]
307
0
    fn from(hash: Hash) -> Self {
308
0
        hash.0
309
0
    }
310
}
311
312
impl core::str::FromStr for Hash {
313
    type Err = HexError;
314
315
0
    fn from_str(s: &str) -> Result<Self, Self::Err> {
316
0
        Hash::from_hex(s)
317
0
    }
318
}
319
320
#[cfg(feature = "zeroize")]
321
impl Zeroize for Hash {
322
    fn zeroize(&mut self) {
323
        // Destructuring to trigger compile error as a reminder to update this impl.
324
        let Self(bytes) = self;
325
        bytes.zeroize();
326
    }
327
}
328
329
/// This implementation is constant-time.
330
impl PartialEq for Hash {
331
    #[inline]
332
0
    fn eq(&self, other: &Hash) -> bool {
333
0
        constant_time_eq::constant_time_eq_32(&self.0, &other.0)
334
0
    }
335
}
336
337
/// This implementation is constant-time.
338
impl PartialEq<[u8; OUT_LEN]> for Hash {
339
    #[inline]
340
0
    fn eq(&self, other: &[u8; OUT_LEN]) -> bool {
341
0
        constant_time_eq::constant_time_eq_32(&self.0, other)
342
0
    }
343
}
344
345
/// This implementation is constant-time if the target is 32 bytes long.
346
impl PartialEq<[u8]> for Hash {
347
    #[inline]
348
0
    fn eq(&self, other: &[u8]) -> bool {
349
0
        constant_time_eq::constant_time_eq(&self.0, other)
350
0
    }
351
}
352
353
impl Eq for Hash {}
354
355
impl fmt::Display for Hash {
356
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
357
        // Formatting field as `&str` to reduce code size since the `Debug`
358
        // dynamic dispatch table for `&str` is likely needed elsewhere already,
359
        // but that for `ArrayString<[u8; 64]>` is not.
360
0
        let hex = self.to_hex();
361
0
        let hex: &str = hex.as_str();
362
363
0
        f.write_str(hex)
364
0
    }
365
}
366
367
impl fmt::Debug for Hash {
368
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
369
        // Formatting field as `&str` to reduce code size since the `Debug`
370
        // dynamic dispatch table for `&str` is likely needed elsewhere already,
371
        // but that for `ArrayString<[u8; 64]>` is not.
372
0
        let hex = self.to_hex();
373
0
        let hex: &str = hex.as_str();
374
375
0
        f.debug_tuple("Hash").field(&hex).finish()
376
0
    }
377
}
378
379
/// The error type for [`Hash::from_hex`].
380
///
381
/// The `.to_string()` representation of this error currently distinguishes between bad length
382
/// errors and bad character errors. This is to help with logging and debugging, but it isn't a
383
/// stable API detail, and it may change at any time.
384
#[derive(Clone, Debug)]
385
pub struct HexError(HexErrorInner);
386
387
#[derive(Clone, Debug)]
388
enum HexErrorInner {
389
    InvalidByte(u8),
390
    InvalidLen(usize),
391
}
392
393
impl fmt::Display for HexError {
394
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
395
0
        match self.0 {
396
0
            HexErrorInner::InvalidByte(byte) => {
397
0
                if byte < 128 {
398
0
                    write!(f, "invalid hex character: {:?}", byte as char)
399
                } else {
400
0
                    write!(f, "invalid hex character: 0x{:x}", byte)
401
                }
402
            }
403
0
            HexErrorInner::InvalidLen(len) => {
404
0
                write!(f, "expected 64 hex bytes, received {}", len)
405
            }
406
        }
407
0
    }
408
}
409
410
#[cfg(feature = "std")]
411
impl std::error::Error for HexError {}
412
413
// Each chunk or parent node can produce either a 32-byte chaining value or, by
414
// setting the ROOT flag, any number of final output bytes. The Output struct
415
// captures the state just prior to choosing between those two possibilities.
416
#[derive(Clone)]
417
struct Output {
418
    input_chaining_value: CVWords,
419
    block: [u8; 64],
420
    block_len: u8,
421
    counter: u64,
422
    flags: u8,
423
    platform: Platform,
424
}
425
426
impl Output {
427
0
    fn chaining_value(&self) -> CVBytes {
428
0
        let mut cv = self.input_chaining_value;
429
0
        self.platform.compress_in_place(
430
0
            &mut cv,
431
0
            &self.block,
432
0
            self.block_len,
433
0
            self.counter,
434
0
            self.flags,
435
        );
436
0
        platform::le_bytes_from_words_32(&cv)
437
0
    }
438
439
0
    fn root_hash(&self) -> Hash {
440
0
        debug_assert_eq!(self.counter, 0);
441
0
        let mut cv = self.input_chaining_value;
442
0
        self.platform
443
0
            .compress_in_place(&mut cv, &self.block, self.block_len, 0, self.flags | ROOT);
444
0
        Hash(platform::le_bytes_from_words_32(&cv))
445
0
    }
446
447
0
    fn root_output_block(&self) -> [u8; 2 * OUT_LEN] {
448
0
        self.platform.compress_xof(
449
0
            &self.input_chaining_value,
450
0
            &self.block,
451
0
            self.block_len,
452
0
            self.counter,
453
0
            self.flags | ROOT,
454
        )
455
0
    }
456
}
457
458
#[cfg(feature = "zeroize")]
459
impl Zeroize for Output {
460
    fn zeroize(&mut self) {
461
        // Destructuring to trigger compile error as a reminder to update this impl.
462
        let Self {
463
            input_chaining_value,
464
            block,
465
            block_len,
466
            counter,
467
            flags,
468
            platform: _,
469
        } = self;
470
471
        input_chaining_value.zeroize();
472
        block.zeroize();
473
        block_len.zeroize();
474
        counter.zeroize();
475
        flags.zeroize();
476
    }
477
}
478
479
#[derive(Clone)]
480
struct ChunkState {
481
    cv: CVWords,
482
    chunk_counter: u64,
483
    buf: [u8; BLOCK_LEN],
484
    buf_len: u8,
485
    blocks_compressed: u8,
486
    flags: u8,
487
    platform: Platform,
488
}
489
490
impl ChunkState {
491
0
    fn new(key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform) -> Self {
492
0
        Self {
493
0
            cv: *key,
494
0
            chunk_counter,
495
0
            buf: [0; BLOCK_LEN],
496
0
            buf_len: 0,
497
0
            blocks_compressed: 0,
498
0
            flags,
499
0
            platform,
500
0
        }
501
0
    }
502
503
0
    fn len(&self) -> usize {
504
0
        BLOCK_LEN * self.blocks_compressed as usize + self.buf_len as usize
505
0
    }
506
507
0
    fn fill_buf(&mut self, input: &mut &[u8]) {
508
0
        let want = BLOCK_LEN - self.buf_len as usize;
509
0
        let take = cmp::min(want, input.len());
510
0
        self.buf[self.buf_len as usize..][..take].copy_from_slice(&input[..take]);
511
0
        self.buf_len += take as u8;
512
0
        *input = &input[take..];
513
0
    }
514
515
0
    fn start_flag(&self) -> u8 {
516
0
        if self.blocks_compressed == 0 {
517
0
            CHUNK_START
518
        } else {
519
0
            0
520
        }
521
0
    }
522
523
    // Try to avoid buffering as much as possible, by compressing directly from
524
    // the input slice when full blocks are available.
525
0
    fn update(&mut self, mut input: &[u8]) -> &mut Self {
526
0
        if self.buf_len > 0 {
527
0
            self.fill_buf(&mut input);
528
0
            if !input.is_empty() {
529
0
                debug_assert_eq!(self.buf_len as usize, BLOCK_LEN);
530
0
                let block_flags = self.flags | self.start_flag(); // borrowck
531
0
                self.platform.compress_in_place(
532
0
                    &mut self.cv,
533
0
                    &self.buf,
534
0
                    BLOCK_LEN as u8,
535
0
                    self.chunk_counter,
536
0
                    block_flags,
537
                );
538
0
                self.buf_len = 0;
539
0
                self.buf = [0; BLOCK_LEN];
540
0
                self.blocks_compressed += 1;
541
0
            }
542
0
        }
543
544
0
        while input.len() > BLOCK_LEN {
545
0
            debug_assert_eq!(self.buf_len, 0);
546
0
            let block_flags = self.flags | self.start_flag(); // borrowck
547
0
            self.platform.compress_in_place(
548
0
                &mut self.cv,
549
0
                array_ref!(input, 0, BLOCK_LEN),
550
0
                BLOCK_LEN as u8,
551
0
                self.chunk_counter,
552
0
                block_flags,
553
            );
554
0
            self.blocks_compressed += 1;
555
0
            input = &input[BLOCK_LEN..];
556
        }
557
558
0
        self.fill_buf(&mut input);
559
0
        debug_assert!(input.is_empty());
560
0
        debug_assert!(self.len() <= CHUNK_LEN);
561
0
        self
562
0
    }
563
564
0
    fn output(&self) -> Output {
565
0
        let block_flags = self.flags | self.start_flag() | CHUNK_END;
566
0
        Output {
567
0
            input_chaining_value: self.cv,
568
0
            block: self.buf,
569
0
            block_len: self.buf_len,
570
0
            counter: self.chunk_counter,
571
0
            flags: block_flags,
572
0
            platform: self.platform,
573
0
        }
574
0
    }
575
}
576
577
// Don't derive(Debug), because the state may be secret.
578
impl fmt::Debug for ChunkState {
579
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
580
0
        f.debug_struct("ChunkState")
581
0
            .field("len", &self.len())
582
0
            .field("chunk_counter", &self.chunk_counter)
583
0
            .field("flags", &self.flags)
584
0
            .field("platform", &self.platform)
585
0
            .finish()
586
0
    }
587
}
588
589
#[cfg(feature = "zeroize")]
590
impl Zeroize for ChunkState {
591
    fn zeroize(&mut self) {
592
        // Destructuring to trigger compile error as a reminder to update this impl.
593
        let Self {
594
            cv,
595
            chunk_counter,
596
            buf,
597
            buf_len,
598
            blocks_compressed,
599
            flags,
600
            platform: _,
601
        } = self;
602
603
        cv.zeroize();
604
        chunk_counter.zeroize();
605
        buf.zeroize();
606
        buf_len.zeroize();
607
        blocks_compressed.zeroize();
608
        flags.zeroize();
609
    }
610
}
611
612
// IMPLEMENTATION NOTE
613
// ===================
614
// The recursive function compress_subtree_wide(), implemented below, is the
615
// basis of high-performance BLAKE3. We use it both for all-at-once hashing,
616
// and for the incremental input with Hasher (though we have to be careful with
617
// subtree boundaries in the incremental case). compress_subtree_wide() applies
618
// several optimizations at the same time:
619
// - Multithreading with Rayon.
620
// - Parallel chunk hashing with SIMD.
621
// - Parallel parent hashing with SIMD. Note that while SIMD chunk hashing
622
//   maxes out at MAX_SIMD_DEGREE*CHUNK_LEN, parallel parent hashing continues
623
//   to benefit from larger inputs, because more levels of the tree benefit can
624
//   use full-width SIMD vectors for parent hashing. Without parallel parent
625
//   hashing, we lose about 10% of overall throughput on AVX2 and AVX-512.
626
627
/// Undocumented and unstable, for benchmarks only.
628
#[doc(hidden)]
629
#[derive(Clone, Copy)]
630
pub enum IncrementCounter {
631
    Yes,
632
    No,
633
}
634
635
impl IncrementCounter {
636
    #[inline]
637
0
    fn yes(&self) -> bool {
638
0
        match self {
639
0
            IncrementCounter::Yes => true,
640
0
            IncrementCounter::No => false,
641
        }
642
0
    }
643
}
644
645
// The largest power of two less than or equal to `n`, used for left_len()
646
// immediately below, and also directly in Hasher::update().
647
0
fn largest_power_of_two_leq(n: usize) -> usize {
648
0
    ((n / 2) + 1).next_power_of_two()
649
0
}
650
651
// Given some input larger than one chunk, return the number of bytes that
652
// should go in the left subtree. This is the largest power-of-2 number of
653
// chunks that leaves at least 1 byte for the right subtree.
654
0
fn left_len(content_len: usize) -> usize {
655
0
    debug_assert!(content_len > CHUNK_LEN);
656
    // Subtract 1 to reserve at least one byte for the right side.
657
0
    let full_chunks = (content_len - 1) / CHUNK_LEN;
658
0
    largest_power_of_two_leq(full_chunks) * CHUNK_LEN
659
0
}
660
661
// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
662
// on a single thread. Write out the chunk chaining values and return the
663
// number of chunks hashed. These chunks are never the root and never empty;
664
// those cases use a different codepath.
665
0
fn compress_chunks_parallel(
666
0
    input: &[u8],
667
0
    key: &CVWords,
668
0
    chunk_counter: u64,
669
0
    flags: u8,
670
0
    platform: Platform,
671
0
    out: &mut [u8],
672
0
) -> usize {
673
0
    debug_assert!(!input.is_empty(), "empty chunks below the root");
674
0
    debug_assert!(input.len() <= MAX_SIMD_DEGREE * CHUNK_LEN);
675
676
0
    let mut chunks_exact = input.chunks_exact(CHUNK_LEN);
677
0
    let mut chunks_array = ArrayVec::<&[u8; CHUNK_LEN], MAX_SIMD_DEGREE>::new();
678
0
    for chunk in &mut chunks_exact {
679
0
        chunks_array.push(array_ref!(chunk, 0, CHUNK_LEN));
680
0
    }
681
0
    platform.hash_many(
682
0
        &chunks_array,
683
0
        key,
684
0
        chunk_counter,
685
0
        IncrementCounter::Yes,
686
0
        flags,
687
        CHUNK_START,
688
        CHUNK_END,
689
0
        out,
690
    );
691
692
    // Hash the remaining partial chunk, if there is one. Note that the empty
693
    // chunk (meaning the empty message) is a different codepath.
694
0
    let chunks_so_far = chunks_array.len();
695
0
    if !chunks_exact.remainder().is_empty() {
696
0
        let counter = chunk_counter + chunks_so_far as u64;
697
0
        let mut chunk_state = ChunkState::new(key, counter, flags, platform);
698
0
        chunk_state.update(chunks_exact.remainder());
699
0
        *array_mut_ref!(out, chunks_so_far * OUT_LEN, OUT_LEN) =
700
0
            chunk_state.output().chaining_value();
701
0
        chunks_so_far + 1
702
    } else {
703
0
        chunks_so_far
704
    }
705
0
}
706
707
// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
708
// on a single thread. Write out the parent chaining values and return the
709
// number of parents hashed. (If there's an odd input chaining value left over,
710
// return it as an additional output.) These parents are never the root and
711
// never empty; those cases use a different codepath.
712
0
fn compress_parents_parallel(
713
0
    child_chaining_values: &[u8],
714
0
    key: &CVWords,
715
0
    flags: u8,
716
0
    platform: Platform,
717
0
    out: &mut [u8],
718
0
) -> usize {
719
0
    debug_assert_eq!(child_chaining_values.len() % OUT_LEN, 0, "wacky hash bytes");
720
0
    let num_children = child_chaining_values.len() / OUT_LEN;
721
0
    debug_assert!(num_children >= 2, "not enough children");
722
0
    debug_assert!(num_children <= 2 * MAX_SIMD_DEGREE_OR_2, "too many");
723
724
0
    let mut parents_exact = child_chaining_values.chunks_exact(BLOCK_LEN);
725
    // Use MAX_SIMD_DEGREE_OR_2 rather than MAX_SIMD_DEGREE here, because of
726
    // the requirements of compress_subtree_wide().
727
0
    let mut parents_array = ArrayVec::<&[u8; BLOCK_LEN], MAX_SIMD_DEGREE_OR_2>::new();
728
0
    for parent in &mut parents_exact {
729
0
        parents_array.push(array_ref!(parent, 0, BLOCK_LEN));
730
0
    }
731
0
    platform.hash_many(
732
0
        &parents_array,
733
0
        key,
734
        0, // Parents always use counter 0.
735
0
        IncrementCounter::No,
736
0
        flags | PARENT,
737
        0, // Parents have no start flags.
738
        0, // Parents have no end flags.
739
0
        out,
740
    );
741
742
    // If there's an odd child left over, it becomes an output.
743
0
    let parents_so_far = parents_array.len();
744
0
    if !parents_exact.remainder().is_empty() {
745
0
        out[parents_so_far * OUT_LEN..][..OUT_LEN].copy_from_slice(parents_exact.remainder());
746
0
        parents_so_far + 1
747
    } else {
748
0
        parents_so_far
749
    }
750
0
}
751
752
// The wide helper function returns (writes out) an array of chaining values
753
// and returns the length of that array. The number of chaining values returned
754
// is the dynamically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
755
// if the input is shorter than that many chunks. The reason for maintaining a
756
// wide array of chaining values going back up the tree, is to allow the
757
// implementation to hash as many parents in parallel as possible.
758
//
759
// As a special case when the SIMD degree is 1, this function will still return
760
// at least 2 outputs. This guarantees that this function doesn't perform the
761
// root compression. (If it did, it would use the wrong flags, and also we
762
// wouldn't be able to implement extendable output.) Note that this function is
763
// not used when the whole input is only 1 chunk long; that's a different
764
// codepath.
765
//
766
// Why not just have the caller split the input on the first update(), instead
767
// of implementing this special rule? Because we don't want to limit SIMD or
768
// multithreading parallelism for that update().
769
0
fn compress_subtree_wide<J: join::Join>(
770
0
    input: &[u8],
771
0
    key: &CVWords,
772
0
    chunk_counter: u64,
773
0
    flags: u8,
774
0
    platform: Platform,
775
0
    out: &mut [u8],
776
0
) -> usize {
777
    // Note that the single chunk case does *not* bump the SIMD degree up to 2
778
    // when it is 1. This allows Rayon the option of multithreading even the
779
    // 2-chunk case, which can help performance on smaller platforms.
780
0
    if input.len() <= platform.simd_degree() * CHUNK_LEN {
781
0
        return compress_chunks_parallel(input, key, chunk_counter, flags, platform, out);
782
0
    }
783
784
    // With more than simd_degree chunks, we need to recurse. Start by dividing
785
    // the input into left and right subtrees. (Note that this is only optimal
786
    // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
787
    // of 3 or something, we'll need a more complicated strategy.)
788
0
    debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
789
0
    let (left, right) = input.split_at(left_len(input.len()));
790
0
    let right_chunk_counter = chunk_counter + (left.len() / CHUNK_LEN) as u64;
791
792
    // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
793
    // account for the special case of returning 2 outputs when the SIMD degree
794
    // is 1.
795
0
    let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
796
0
    let degree = if left.len() == CHUNK_LEN {
797
        // The "simd_degree=1 and we're at the leaf nodes" case.
798
0
        debug_assert_eq!(platform.simd_degree(), 1);
799
0
        1
800
    } else {
801
0
        cmp::max(platform.simd_degree(), 2)
802
    };
803
0
    let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
804
805
    // Recurse! For update_rayon(), this is where we take advantage of RayonJoin and use multiple
806
    // threads.
807
0
    let (left_n, right_n) = J::join(
808
0
        || compress_subtree_wide::<J>(left, key, chunk_counter, flags, platform, left_out),
809
0
        || compress_subtree_wide::<J>(right, key, right_chunk_counter, flags, platform, right_out),
810
    );
811
812
    // The special case again. If simd_degree=1, then we'll have left_n=1 and
813
    // right_n=1. Rather than compressing them into a single output, return
814
    // them directly, to make sure we always have at least two outputs.
815
0
    debug_assert_eq!(left_n, degree);
816
0
    debug_assert!(right_n >= 1 && right_n <= left_n);
817
0
    if left_n == 1 {
818
0
        out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
819
0
        return 2;
820
0
    }
821
822
    // Otherwise, do one layer of parent node compression.
823
0
    let num_children = left_n + right_n;
824
0
    compress_parents_parallel(
825
0
        &cv_array[..num_children * OUT_LEN],
826
0
        key,
827
0
        flags,
828
0
        platform,
829
0
        out,
830
    )
831
0
}
832
833
// Hash a subtree with compress_subtree_wide(), and then condense the resulting
834
// list of chaining values down to a single parent node. Don't compress that
835
// last parent node, however. Instead, return its message bytes (the
836
// concatenated chaining values of its children). This is necessary when the
837
// first call to update() supplies a complete subtree, because the topmost
838
// parent node of that subtree could end up being the root. It's also necessary
839
// for extended output in the general case.
840
//
841
// As with compress_subtree_wide(), this function is not used on inputs of 1
842
// chunk or less. That's a different codepath.
843
0
fn compress_subtree_to_parent_node<J: join::Join>(
844
0
    input: &[u8],
845
0
    key: &CVWords,
846
0
    chunk_counter: u64,
847
0
    flags: u8,
848
0
    platform: Platform,
849
0
) -> [u8; BLOCK_LEN] {
850
0
    debug_assert!(input.len() > CHUNK_LEN);
851
0
    let mut cv_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
852
0
    let mut num_cvs =
853
0
        compress_subtree_wide::<J>(input, &key, chunk_counter, flags, platform, &mut cv_array);
854
0
    debug_assert!(num_cvs >= 2);
855
856
    // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
857
    // compress_subtree_wide() returns more than 2 chaining values. Condense
858
    // them into 2 by forming parent nodes repeatedly.
859
0
    let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
860
0
    while num_cvs > 2 {
861
0
        let cv_slice = &cv_array[..num_cvs * OUT_LEN];
862
0
        num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
863
0
        cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
864
0
    }
865
0
    *array_ref!(cv_array, 0, 2 * OUT_LEN)
866
0
}
867
868
// Hash a complete input all at once. Unlike compress_subtree_wide() and
869
// compress_subtree_to_parent_node(), this function handles the 1 chunk case.
870
0
fn hash_all_at_once<J: join::Join>(input: &[u8], key: &CVWords, flags: u8) -> Output {
871
0
    let platform = Platform::detect();
872
873
    // If the whole subtree is one chunk, hash it directly with a ChunkState.
874
0
    if input.len() <= CHUNK_LEN {
875
0
        return ChunkState::new(key, 0, flags, platform)
876
0
            .update(input)
877
0
            .output();
878
0
    }
879
880
    // Otherwise construct an Output object from the parent node returned by
881
    // compress_subtree_to_parent_node().
882
0
    Output {
883
0
        input_chaining_value: *key,
884
0
        block: compress_subtree_to_parent_node::<J>(input, key, 0, flags, platform),
885
0
        block_len: BLOCK_LEN as u8,
886
0
        counter: 0,
887
0
        flags: flags | PARENT,
888
0
        platform,
889
0
    }
890
0
}
891
892
/// The default hash function.
893
///
894
/// For an incremental version that accepts multiple writes, see [`Hasher::new`],
895
/// [`Hasher::update`], and [`Hasher::finalize`]. These two lines are equivalent:
896
///
897
/// ```
898
/// let hash = blake3::hash(b"foo");
899
/// # let hash1 = hash;
900
///
901
/// let hash = blake3::Hasher::new().update(b"foo").finalize();
902
/// # let hash2 = hash;
903
/// # assert_eq!(hash1, hash2);
904
/// ```
905
///
906
/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`] and
907
/// [`OutputReader`].
908
///
909
/// This function is always single-threaded. For multithreading support, see
910
/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
911
0
pub fn hash(input: &[u8]) -> Hash {
912
0
    hash_all_at_once::<join::SerialJoin>(input, IV, 0).root_hash()
913
0
}
914
915
/// The keyed hash function.
916
///
917
/// This is suitable for use as a message authentication code, for example to
918
/// replace an HMAC instance. In that use case, the constant-time equality
919
/// checking provided by [`Hash`](struct.Hash.html) is almost always a security
920
/// requirement, and callers need to be careful not to compare MACs as raw
921
/// bytes.
922
///
923
/// For an incremental version that accepts multiple writes, see [`Hasher::new_keyed`],
924
/// [`Hasher::update`], and [`Hasher::finalize`]. These two lines are equivalent:
925
///
926
/// ```
927
/// # const KEY: &[u8; 32] = &[0; 32];
928
/// let mac = blake3::keyed_hash(KEY, b"foo");
929
/// # let mac1 = mac;
930
///
931
/// let mac = blake3::Hasher::new_keyed(KEY).update(b"foo").finalize();
932
/// # let mac2 = mac;
933
/// # assert_eq!(mac1, mac2);
934
/// ```
935
///
936
/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`], and [`OutputReader`].
937
///
938
/// This function is always single-threaded. For multithreading support, see
939
/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
940
0
pub fn keyed_hash(key: &[u8; KEY_LEN], input: &[u8]) -> Hash {
941
0
    let key_words = platform::words_from_le_bytes_32(key);
942
0
    hash_all_at_once::<join::SerialJoin>(input, &key_words, KEYED_HASH).root_hash()
943
0
}
944
945
/// The key derivation function.
946
///
947
/// Given cryptographic key material of any length and a context string of any
948
/// length, this function outputs a 32-byte derived subkey. **The context string
949
/// should be hardcoded, globally unique, and application-specific.** A good
950
/// default format for such strings is `"[application] [commit timestamp]
951
/// [purpose]"`, e.g., `"example.com 2019-12-25 16:18:03 session tokens v1"`.
952
///
953
/// Key derivation is important when you want to use the same key in multiple
954
/// algorithms or use cases. Using the same key with different cryptographic
955
/// algorithms is generally forbidden, and deriving a separate subkey for each
956
/// use case protects you from bad interactions. Derived keys also mitigate the
957
/// damage from one part of your application accidentally leaking its key.
958
///
959
/// As a rare exception to that general rule, however, it is possible to use
960
/// `derive_key` itself with key material that you are already using with
961
/// another algorithm. You might need to do this if you're adding features to
962
/// an existing application, which does not yet use key derivation internally.
963
/// However, you still must not share key material with algorithms that forbid
964
/// key reuse entirely, like a one-time pad. For more on this, see sections 6.2
965
/// and 7.8 of the [BLAKE3 paper](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf).
966
///
967
/// Note that BLAKE3 is not a password hash, and **`derive_key` should never be
968
/// used with passwords.** Instead, use a dedicated password hash like
969
/// [Argon2]. Password hashes are entirely different from generic hash
970
/// functions, with opposite design requirements.
971
///
972
/// For an incremental version that accepts multiple writes, see [`Hasher::new_derive_key`],
973
/// [`Hasher::update`], and [`Hasher::finalize`]. These two statements are equivalent:
974
///
975
/// ```
976
/// # const CONTEXT: &str = "example.com 2019-12-25 16:18:03 session tokens v1";
977
/// let key = blake3::derive_key(CONTEXT, b"key material, not a password");
978
/// # let key1 = key;
979
///
980
/// let key: [u8; 32] = blake3::Hasher::new_derive_key(CONTEXT)
981
///     .update(b"key material, not a password")
982
///     .finalize()
983
///     .into();
984
/// # let key2 = key;
985
/// # assert_eq!(key1, key2);
986
/// ```
987
///
988
/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`], and [`OutputReader`].
989
///
990
/// This function is always single-threaded. For multithreading support, see
991
/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
992
///
993
/// [Argon2]: https://en.wikipedia.org/wiki/Argon2
994
0
pub fn derive_key(context: &str, key_material: &[u8]) -> [u8; OUT_LEN] {
995
0
    let context_key =
996
0
        hash_all_at_once::<join::SerialJoin>(context.as_bytes(), IV, DERIVE_KEY_CONTEXT)
997
0
            .root_hash();
998
0
    let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
999
0
    hash_all_at_once::<join::SerialJoin>(key_material, &context_key_words, DERIVE_KEY_MATERIAL)
1000
0
        .root_hash()
1001
0
        .0
1002
0
}
1003
1004
0
fn parent_node_output(
1005
0
    left_child: &CVBytes,
1006
0
    right_child: &CVBytes,
1007
0
    key: &CVWords,
1008
0
    flags: u8,
1009
0
    platform: Platform,
1010
0
) -> Output {
1011
0
    let mut block = [0; BLOCK_LEN];
1012
0
    block[..32].copy_from_slice(left_child);
1013
0
    block[32..].copy_from_slice(right_child);
1014
0
    Output {
1015
0
        input_chaining_value: *key,
1016
0
        block,
1017
0
        block_len: BLOCK_LEN as u8,
1018
0
        counter: 0,
1019
0
        flags: flags | PARENT,
1020
0
        platform,
1021
0
    }
1022
0
}
1023
1024
/// An incremental hash state that can accept any number of writes.
1025
///
1026
/// The `rayon` and `mmap` Cargo features enable additional methods on this
1027
/// type related to multithreading and memory-mapped IO.
1028
///
1029
/// When the `traits-preview` Cargo feature is enabled, this type implements
1030
/// several commonly used traits from the
1031
/// [`digest`](https://crates.io/crates/digest) crate. However, those
1032
/// traits aren't stable, and they're expected to change in incompatible ways
1033
/// before that crate reaches 1.0. For that reason, this crate makes no SemVer
1034
/// guarantees for this feature, and callers who use it should expect breaking
1035
/// changes between patch versions.
1036
///
1037
/// # Examples
1038
///
1039
/// ```
1040
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
1041
/// // Hash an input incrementally.
1042
/// let mut hasher = blake3::Hasher::new();
1043
/// hasher.update(b"foo");
1044
/// hasher.update(b"bar");
1045
/// hasher.update(b"baz");
1046
/// assert_eq!(hasher.finalize(), blake3::hash(b"foobarbaz"));
1047
///
1048
/// // Extended output. OutputReader also implements Read and Seek.
1049
/// # #[cfg(feature = "std")] {
1050
/// let mut output = [0; 1000];
1051
/// let mut output_reader = hasher.finalize_xof();
1052
/// output_reader.fill(&mut output);
1053
/// assert_eq!(&output[..32], blake3::hash(b"foobarbaz").as_bytes());
1054
/// # }
1055
/// # Ok(())
1056
/// # }
1057
/// ```
1058
#[derive(Clone)]
1059
pub struct Hasher {
1060
    key: CVWords,
1061
    chunk_state: ChunkState,
1062
    // The stack size is MAX_DEPTH + 1 because we do lazy merging. For example,
1063
    // with 7 chunks, we have 3 entries in the stack. Adding an 8th chunk
1064
    // requires a 4th entry, rather than merging everything down to 1, because
1065
    // we don't know whether more input is coming. This is different from how
1066
    // the reference implementation does things.
1067
    cv_stack: ArrayVec<CVBytes, { MAX_DEPTH + 1 }>,
1068
}
1069
1070
impl Hasher {
1071
0
    fn new_internal(key: &CVWords, flags: u8) -> Self {
1072
0
        Self {
1073
0
            key: *key,
1074
0
            chunk_state: ChunkState::new(key, 0, flags, Platform::detect()),
1075
0
            cv_stack: ArrayVec::new(),
1076
0
        }
1077
0
    }
1078
1079
    /// Construct a new `Hasher` for the regular hash function.
1080
0
    pub fn new() -> Self {
1081
0
        Self::new_internal(IV, 0)
1082
0
    }
1083
1084
    /// Construct a new `Hasher` for the keyed hash function. See
1085
    /// [`keyed_hash`].
1086
    ///
1087
    /// [`keyed_hash`]: fn.keyed_hash.html
1088
0
    pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
1089
0
        let key_words = platform::words_from_le_bytes_32(key);
1090
0
        Self::new_internal(&key_words, KEYED_HASH)
1091
0
    }
1092
1093
    /// Construct a new `Hasher` for the key derivation function. See
1094
    /// [`derive_key`]. The context string should be hardcoded, globally
1095
    /// unique, and application-specific.
1096
    ///
1097
    /// [`derive_key`]: fn.derive_key.html
1098
0
    pub fn new_derive_key(context: &str) -> Self {
1099
0
        let context_key =
1100
0
            hash_all_at_once::<join::SerialJoin>(context.as_bytes(), IV, DERIVE_KEY_CONTEXT)
1101
0
                .root_hash();
1102
0
        let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
1103
0
        Self::new_internal(&context_key_words, DERIVE_KEY_MATERIAL)
1104
0
    }
1105
1106
    /// Reset the `Hasher` to its initial state.
1107
    ///
1108
    /// This is functionally the same as overwriting the `Hasher` with a new
1109
    /// one, using the same key or context string if any.
1110
0
    pub fn reset(&mut self) -> &mut Self {
1111
0
        self.chunk_state = ChunkState::new(
1112
0
            &self.key,
1113
0
            0,
1114
0
            self.chunk_state.flags,
1115
0
            self.chunk_state.platform,
1116
0
        );
1117
0
        self.cv_stack.clear();
1118
0
        self
1119
0
    }
1120
1121
    // As described in push_cv() below, we do "lazy merging", delaying merges
1122
    // until right before the next CV is about to be added. This is different
1123
    // from the reference implementation. Another difference is that we aren't
1124
    // always merging 1 chunk at a time. Instead, each CV might represent any
1125
    // power-of-two number of chunks, as long as the smaller-above-larger stack
1126
    // order is maintained. Instead of the "count the trailing 0-bits"
1127
    // algorithm described in the spec, we use a "count the total number of
1128
    // 1-bits" variant that doesn't require us to retain the subtree size of
1129
    // the CV on top of the stack. The principle is the same: each CV that
1130
    // should remain in the stack is represented by a 1-bit in the total number
1131
    // of chunks (or bytes) so far.
1132
0
    fn merge_cv_stack(&mut self, total_len: u64) {
1133
0
        let post_merge_stack_len = total_len.count_ones() as usize;
1134
0
        while self.cv_stack.len() > post_merge_stack_len {
1135
0
            let right_child = self.cv_stack.pop().unwrap();
1136
0
            let left_child = self.cv_stack.pop().unwrap();
1137
0
            let parent_output = parent_node_output(
1138
0
                &left_child,
1139
0
                &right_child,
1140
0
                &self.key,
1141
0
                self.chunk_state.flags,
1142
0
                self.chunk_state.platform,
1143
0
            );
1144
0
            self.cv_stack.push(parent_output.chaining_value());
1145
0
        }
1146
0
    }
1147
1148
    // In reference_impl.rs, we merge the new CV with existing CVs from the
1149
    // stack before pushing it. We can do that because we know more input is
1150
    // coming, so we know none of the merges are root.
1151
    //
1152
    // This setting is different. We want to feed as much input as possible to
1153
    // compress_subtree_wide(), without setting aside anything for the
1154
    // chunk_state. If the user gives us 64 KiB, we want to parallelize over
1155
    // all 64 KiB at once as a single subtree, if at all possible.
1156
    //
1157
    // This leads to two problems:
1158
    // 1) This 64 KiB input might be the only call that ever gets made to
1159
    //    update. In this case, the root node of the 64 KiB subtree would be
1160
    //    the root node of the whole tree, and it would need to be ROOT
1161
    //    finalized. We can't compress it until we know.
1162
    // 2) This 64 KiB input might complete a larger tree, whose root node is
1163
    //    similarly going to be the root of the whole tree. For example,
1164
    //    maybe we have 196 KiB (that is, 128 + 64) hashed so far. We can't
1165
    //    compress the node at the root of the 256 KiB subtree until we know
1166
    //    how to finalize it.
1167
    //
1168
    // The second problem is solved with "lazy merging". That is, when we're
1169
    // about to add a CV to the stack, we don't merge it with anything first,
1170
    // as the reference impl does. Instead we do merges using the *previous* CV
1171
    // that was added, which is sitting on top of the stack, and we put the new
1172
    // CV (unmerged) on top of the stack afterwards. This guarantees that we
1173
    // never merge the root node until finalize().
1174
    //
1175
    // Solving the first problem requires an additional tool,
1176
    // compress_subtree_to_parent_node(). That function always returns the top
1177
    // *two* chaining values of the subtree it's compressing. We then do lazy
1178
    // merging with each of them separately, so that the second CV will always
1179
    // remain unmerged. (That also helps us support extendable output when
1180
    // we're hashing an input all-at-once.)
1181
0
    fn push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64) {
1182
0
        self.merge_cv_stack(chunk_counter);
1183
0
        self.cv_stack.push(*new_cv);
1184
0
    }
1185
1186
    /// Add input bytes to the hash state. You can call this any number of times.
1187
    ///
1188
    /// This method is always single-threaded. For multithreading support, see
1189
    /// [`update_rayon`](#method.update_rayon) (enabled with the `rayon` Cargo feature).
1190
    ///
1191
    /// Note that the degree of SIMD parallelism that `update` can use is limited by the size of
1192
    /// this input buffer. See [`update_reader`](#method.update_reader).
1193
0
    pub fn update(&mut self, input: &[u8]) -> &mut Self {
1194
0
        self.update_with_join::<join::SerialJoin>(input)
1195
0
    }
1196
1197
0
    fn update_with_join<J: join::Join>(&mut self, mut input: &[u8]) -> &mut Self {
1198
        // If we have some partial chunk bytes in the internal chunk_state, we
1199
        // need to finish that chunk first.
1200
0
        if self.chunk_state.len() > 0 {
1201
0
            let want = CHUNK_LEN - self.chunk_state.len();
1202
0
            let take = cmp::min(want, input.len());
1203
0
            self.chunk_state.update(&input[..take]);
1204
0
            input = &input[take..];
1205
0
            if !input.is_empty() {
1206
                // We've filled the current chunk, and there's more input
1207
                // coming, so we know it's not the root and we can finalize it.
1208
                // Then we'll proceed to hashing whole chunks below.
1209
0
                debug_assert_eq!(self.chunk_state.len(), CHUNK_LEN);
1210
0
                let chunk_cv = self.chunk_state.output().chaining_value();
1211
0
                self.push_cv(&chunk_cv, self.chunk_state.chunk_counter);
1212
0
                self.chunk_state = ChunkState::new(
1213
0
                    &self.key,
1214
0
                    self.chunk_state.chunk_counter + 1,
1215
0
                    self.chunk_state.flags,
1216
0
                    self.chunk_state.platform,
1217
0
                );
1218
            } else {
1219
0
                return self;
1220
            }
1221
0
        }
1222
1223
        // Now the chunk_state is clear, and we have more input. If there's
1224
        // more than a single chunk (so, definitely not the root chunk), hash
1225
        // the largest whole subtree we can, with the full benefits of SIMD and
1226
        // multithreading parallelism. Two restrictions:
1227
        // - The subtree has to be a power-of-2 number of chunks. Only subtrees
1228
        //   along the right edge can be incomplete, and we don't know where
1229
        //   the right edge is going to be until we get to finalize().
1230
        // - The subtree must evenly divide the total number of chunks up until
1231
        //   this point (if total is not 0). If the current incomplete subtree
1232
        //   is only waiting for 1 more chunk, we can't hash a subtree of 4
1233
        //   chunks. We have to complete the current subtree first.
1234
        // Because we might need to break up the input to form powers of 2, or
1235
        // to evenly divide what we already have, this part runs in a loop.
1236
0
        while input.len() > CHUNK_LEN {
1237
0
            debug_assert_eq!(self.chunk_state.len(), 0, "no partial chunk data");
1238
0
            debug_assert_eq!(CHUNK_LEN.count_ones(), 1, "power of 2 chunk len");
1239
0
            let mut subtree_len = largest_power_of_two_leq(input.len());
1240
0
            let count_so_far = self.chunk_state.chunk_counter * CHUNK_LEN as u64;
1241
            // Shrink the subtree_len until it evenly divides the count so far.
1242
            // We know that subtree_len itself is a power of 2, so we can use a
1243
            // bitmasking trick instead of an actual remainder operation. (Note
1244
            // that if the caller consistently passes power-of-2 inputs of the
1245
            // same size, as is hopefully typical, this loop condition will
1246
            // always fail, and subtree_len will always be the full length of
1247
            // the input.)
1248
            //
1249
            // An aside: We don't have to shrink subtree_len quite this much.
1250
            // For example, if count_so_far is 1, we could pass 2 chunks to
1251
            // compress_subtree_to_parent_node. Since we'll get 2 CVs back,
1252
            // we'll still get the right answer in the end, and we might get to
1253
            // use 2-way SIMD parallelism. The problem with this optimization,
1254
            // is that it gets us stuck always hashing 2 chunks. The total
1255
            // number of chunks will remain odd, and we'll never graduate to
1256
            // higher degrees of parallelism. See
1257
            // https://github.com/BLAKE3-team/BLAKE3/issues/69.
1258
0
            while (subtree_len - 1) as u64 & count_so_far != 0 {
1259
0
                subtree_len /= 2;
1260
0
            }
1261
            // The shrunken subtree_len might now be 1 chunk long. If so, hash
1262
            // that one chunk by itself. Otherwise, compress the subtree into a
1263
            // pair of CVs.
1264
0
            let subtree_chunks = (subtree_len / CHUNK_LEN) as u64;
1265
0
            if subtree_len <= CHUNK_LEN {
1266
0
                debug_assert_eq!(subtree_len, CHUNK_LEN);
1267
0
                self.push_cv(
1268
0
                    &ChunkState::new(
1269
0
                        &self.key,
1270
0
                        self.chunk_state.chunk_counter,
1271
0
                        self.chunk_state.flags,
1272
0
                        self.chunk_state.platform,
1273
0
                    )
1274
0
                    .update(&input[..subtree_len])
1275
0
                    .output()
1276
0
                    .chaining_value(),
1277
0
                    self.chunk_state.chunk_counter,
1278
                );
1279
0
            } else {
1280
0
                // This is the high-performance happy path, though getting here
1281
0
                // depends on the caller giving us a long enough input.
1282
0
                let cv_pair = compress_subtree_to_parent_node::<J>(
1283
0
                    &input[..subtree_len],
1284
0
                    &self.key,
1285
0
                    self.chunk_state.chunk_counter,
1286
0
                    self.chunk_state.flags,
1287
0
                    self.chunk_state.platform,
1288
0
                );
1289
0
                let left_cv = array_ref!(cv_pair, 0, 32);
1290
0
                let right_cv = array_ref!(cv_pair, 32, 32);
1291
0
                // Push the two CVs we received into the CV stack in order. Because
1292
0
                // the stack merges lazily, this guarantees we aren't merging the
1293
0
                // root.
1294
0
                self.push_cv(left_cv, self.chunk_state.chunk_counter);
1295
0
                self.push_cv(
1296
0
                    right_cv,
1297
0
                    self.chunk_state.chunk_counter + (subtree_chunks / 2),
1298
0
                );
1299
0
            }
1300
0
            self.chunk_state.chunk_counter += subtree_chunks;
1301
0
            input = &input[subtree_len..];
1302
        }
1303
1304
        // What remains is 1 chunk or less. Add it to the chunk state.
1305
0
        debug_assert!(input.len() <= CHUNK_LEN);
1306
0
        if !input.is_empty() {
1307
0
            self.chunk_state.update(input);
1308
0
            // Having added some input to the chunk_state, we know what's in
1309
0
            // the CV stack won't become the root node, and we can do an extra
1310
0
            // merge. This simplifies finalize().
1311
0
            self.merge_cv_stack(self.chunk_state.chunk_counter);
1312
0
        }
1313
1314
0
        self
1315
0
    }
1316
1317
0
    fn final_output(&self) -> Output {
1318
        // If the current chunk is the only chunk, that makes it the root node
1319
        // also. Convert it directly into an Output. Otherwise, we need to
1320
        // merge subtrees below.
1321
0
        if self.cv_stack.is_empty() {
1322
0
            debug_assert_eq!(self.chunk_state.chunk_counter, 0);
1323
0
            return self.chunk_state.output();
1324
0
        }
1325
1326
        // If there are any bytes in the ChunkState, finalize that chunk and
1327
        // merge its CV with everything in the CV stack. In that case, the work
1328
        // we did at the end of update() above guarantees that the stack
1329
        // doesn't contain any unmerged subtrees that need to be merged first.
1330
        // (This is important, because if there were two chunk hashes sitting
1331
        // on top of the stack, they would need to merge with each other, and
1332
        // merging a new chunk hash into them would be incorrect.)
1333
        //
1334
        // If there are no bytes in the ChunkState, we'll merge what's already
1335
        // in the stack. In this case it's fine if there are unmerged chunks on
1336
        // top, because we'll merge them with each other. Note that the case of
1337
        // the empty chunk is taken care of above.
1338
        let mut output: Output;
1339
0
        let mut num_cvs_remaining = self.cv_stack.len();
1340
0
        if self.chunk_state.len() > 0 {
1341
0
            debug_assert_eq!(
1342
0
                self.cv_stack.len(),
1343
0
                self.chunk_state.chunk_counter.count_ones() as usize,
1344
0
                "cv stack does not need a merge"
1345
            );
1346
0
            output = self.chunk_state.output();
1347
        } else {
1348
0
            debug_assert!(self.cv_stack.len() >= 2);
1349
0
            output = parent_node_output(
1350
0
                &self.cv_stack[num_cvs_remaining - 2],
1351
0
                &self.cv_stack[num_cvs_remaining - 1],
1352
0
                &self.key,
1353
0
                self.chunk_state.flags,
1354
0
                self.chunk_state.platform,
1355
0
            );
1356
0
            num_cvs_remaining -= 2;
1357
        }
1358
0
        while num_cvs_remaining > 0 {
1359
0
            output = parent_node_output(
1360
0
                &self.cv_stack[num_cvs_remaining - 1],
1361
0
                &output.chaining_value(),
1362
0
                &self.key,
1363
0
                self.chunk_state.flags,
1364
0
                self.chunk_state.platform,
1365
0
            );
1366
0
            num_cvs_remaining -= 1;
1367
0
        }
1368
0
        output
1369
0
    }
1370
1371
    /// Finalize the hash state and return the [`Hash`](struct.Hash.html) of
1372
    /// the input.
1373
    ///
1374
    /// This method is idempotent. Calling it twice will give the same result.
1375
    /// You can also add more input and finalize again.
1376
0
    pub fn finalize(&self) -> Hash {
1377
0
        self.final_output().root_hash()
1378
0
    }
1379
1380
    /// Finalize the hash state and return an [`OutputReader`], which can
1381
    /// supply any number of output bytes.
1382
    ///
1383
    /// This method is idempotent. Calling it twice will give the same result.
1384
    /// You can also add more input and finalize again.
1385
    ///
1386
    /// [`OutputReader`]: struct.OutputReader.html
1387
0
    pub fn finalize_xof(&self) -> OutputReader {
1388
0
        OutputReader::new(self.final_output())
1389
0
    }
1390
1391
    /// Return the total number of bytes hashed so far.
1392
0
    pub fn count(&self) -> u64 {
1393
0
        self.chunk_state.chunk_counter * CHUNK_LEN as u64 + self.chunk_state.len() as u64
1394
0
    }
1395
1396
    /// As [`update`](Hasher::update), but reading from a
1397
    /// [`std::io::Read`](https://doc.rust-lang.org/std/io/trait.Read.html) implementation.
1398
    ///
1399
    /// [`Hasher`] implements
1400
    /// [`std::io::Write`](https://doc.rust-lang.org/std/io/trait.Write.html), so it's possible to
1401
    /// use [`std::io::copy`](https://doc.rust-lang.org/std/io/fn.copy.html) to update a [`Hasher`]
1402
    /// from any reader. Unfortunately, this standard approach can limit performance, because
1403
    /// `copy` currently uses an internal 8 KiB buffer that isn't big enough to take advantage of
1404
    /// all SIMD instruction sets. (In particular, [AVX-512](https://en.wikipedia.org/wiki/AVX-512)
1405
    /// needs a 16 KiB buffer.) `update_reader` avoids this performance problem and is slightly
1406
    /// more convenient.
1407
    ///
1408
    /// The internal buffer size this method uses may change at any time, and it may be different
1409
    /// for different targets. The only guarantee is that it will be large enough for all of this
1410
    /// crate's SIMD implementations on the current platform.
1411
    ///
1412
    /// The most common implementer of
1413
    /// [`std::io::Read`](https://doc.rust-lang.org/std/io/trait.Read.html) might be
1414
    /// [`std::fs::File`](https://doc.rust-lang.org/std/fs/struct.File.html), but note that memory
1415
    /// mapping can be faster than this method for hashing large files. See
1416
    /// [`update_mmap`](Hasher::update_mmap) and [`update_mmap_rayon`](Hasher::update_mmap_rayon),
1417
    /// which require the `mmap` and (for the latter) `rayon` Cargo features.
1418
    ///
1419
    /// This method requires the `std` Cargo feature, which is enabled by default.
1420
    ///
1421
    /// # Example
1422
    ///
1423
    /// ```no_run
1424
    /// # use std::fs::File;
1425
    /// # use std::io;
1426
    /// # fn main() -> io::Result<()> {
1427
    /// // Hash standard input.
1428
    /// let mut hasher = blake3::Hasher::new();
1429
    /// hasher.update_reader(std::io::stdin().lock())?;
1430
    /// println!("{}", hasher.finalize());
1431
    /// # Ok(())
1432
    /// # }
1433
    /// ```
1434
    #[cfg(feature = "std")]
1435
0
    pub fn update_reader(&mut self, reader: impl std::io::Read) -> std::io::Result<&mut Self> {
1436
0
        io::copy_wide(reader, self)?;
1437
0
        Ok(self)
1438
0
    }
1439
1440
    /// As [`update`](Hasher::update), but using Rayon-based multithreading
1441
    /// internally.
1442
    ///
1443
    /// This method is gated by the `rayon` Cargo feature, which is disabled by
1444
    /// default but enabled on [docs.rs](https://docs.rs).
1445
    ///
1446
    /// To get any performance benefit from multithreading, the input buffer
1447
    /// needs to be large. As a rule of thumb on x86_64, `update_rayon` is
1448
    /// _slower_ than `update` for inputs under 128 KiB. That threshold varies
1449
    /// quite a lot across different processors, and it's important to benchmark
1450
    /// your specific use case. See also the performance warning associated with
1451
    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon).
1452
    ///
1453
    /// If you already have a large buffer in memory, and you want to hash it
1454
    /// with multiple threads, this method is a good option. However, reading a
1455
    /// file into memory just to call this method can be a performance mistake,
1456
    /// both because it requires lots of memory and because single-threaded
1457
    /// reads can be slow. For hashing whole files, see
1458
    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon), which is gated by both
1459
    /// the `rayon` and `mmap` Cargo features.
1460
    #[cfg(feature = "rayon")]
1461
    pub fn update_rayon(&mut self, input: &[u8]) -> &mut Self {
1462
        self.update_with_join::<join::RayonJoin>(input)
1463
    }
1464
1465
    /// As [`update`](Hasher::update), but reading the contents of a file using memory mapping.
1466
    ///
1467
    /// Not all files can be memory mapped, and memory mapping small files can be slower than
1468
    /// reading them the usual way. In those cases, this method will fall back to standard file IO.
1469
    /// The heuristic for whether to use memory mapping is currently very simple (file size >=
1470
    /// 16 KiB), and it might change at any time.
1471
    ///
1472
    /// Like [`update`](Hasher::update), this method is single-threaded. In this author's
1473
    /// experience, memory mapping improves single-threaded performance by ~10% for large files
1474
    /// that are already in cache. This probably varies between platforms, and as always it's a
1475
    /// good idea to benchmark your own use case. In comparison, the multithreaded
1476
    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon) method can have a much larger impact on
1477
    /// performance.
1478
    ///
1479
    /// There's a correctness reason that this method takes
1480
    /// [`Path`](https://doc.rust-lang.org/stable/std/path/struct.Path.html) instead of
1481
    /// [`File`](https://doc.rust-lang.org/std/fs/struct.File.html): reading from a memory-mapped
1482
    /// file ignores the seek position of the original file handle (it neither respects the current
1483
    /// position nor updates the position). This difference in behavior would've caused
1484
    /// `update_mmap` and [`update_reader`](Hasher::update_reader) to give different answers and
1485
    /// have different side effects in some cases. Taking a
1486
    /// [`Path`](https://doc.rust-lang.org/stable/std/path/struct.Path.html) avoids this problem by
1487
    /// making it clear that a new [`File`](https://doc.rust-lang.org/std/fs/struct.File.html) is
1488
    /// opened internally.
1489
    ///
1490
    /// This method requires the `mmap` Cargo feature, which is disabled by default but enabled on
1491
    /// [docs.rs](https://docs.rs).
1492
    ///
1493
    /// # Example
1494
    ///
1495
    /// ```no_run
1496
    /// # use std::io;
1497
    /// # use std::path::Path;
1498
    /// # fn main() -> io::Result<()> {
1499
    /// let path = Path::new("file.dat");
1500
    /// let mut hasher = blake3::Hasher::new();
1501
    /// hasher.update_mmap(path)?;
1502
    /// println!("{}", hasher.finalize());
1503
    /// # Ok(())
1504
    /// # }
1505
    /// ```
1506
    #[cfg(feature = "mmap")]
1507
    pub fn update_mmap(&mut self, path: impl AsRef<std::path::Path>) -> std::io::Result<&mut Self> {
1508
        let file = std::fs::File::open(path.as_ref())?;
1509
        if let Some(mmap) = io::maybe_mmap_file(&file)? {
1510
            self.update(&mmap);
1511
        } else {
1512
            io::copy_wide(&file, self)?;
1513
        }
1514
        Ok(self)
1515
    }
1516
1517
    /// As [`update_rayon`](Hasher::update_rayon), but reading the contents of a file using
1518
    /// memory mapping. This is the default behavior of `b3sum`.
1519
    ///
1520
    /// For large files that are likely to be in cache, this can be much faster than
1521
    /// single-threaded hashing. When benchmarks report that BLAKE3 is 10x or 20x faster than other
1522
    /// cryptographic hashes, this is usually what they're measuring. However...
1523
    ///
1524
    /// **Performance Warning:** There are cases where multithreading hurts performance. The worst
1525
    /// case is [a large file on a spinning disk](https://github.com/BLAKE3-team/BLAKE3/issues/31),
1526
    /// where simultaneous reads from multiple threads can cause "thrashing" (i.e. the disk spends
1527
    /// more time seeking around than reading data). Windows tends to be somewhat worse about this,
1528
    /// in part because it's less likely than Linux to keep very large files in cache. More
1529
    /// generally, if your CPU cores are already busy, then multithreading will add overhead
1530
    /// without improving performance. If your code runs in different environments that you don't
1531
    /// control and can't measure, then unfortunately there's no one-size-fits-all answer for
1532
    /// whether multithreading is a good idea.
1533
    ///
1534
    /// The memory mapping behavior of this function is the same as
1535
    /// [`update_mmap`](Hasher::update_mmap), and the heuristic for when to fall back to standard
1536
    /// file IO might change at any time.
1537
    ///
1538
    /// This method requires both the `mmap` and `rayon` Cargo features, which are disabled by
1539
    /// default but enabled on [docs.rs](https://docs.rs).
1540
    ///
1541
    /// # Example
1542
    ///
1543
    /// ```no_run
1544
    /// # use std::io;
1545
    /// # use std::path::Path;
1546
    /// # fn main() -> io::Result<()> {
1547
    /// # #[cfg(feature = "rayon")]
1548
    /// # {
1549
    /// let path = Path::new("big_file.dat");
1550
    /// let mut hasher = blake3::Hasher::new();
1551
    /// hasher.update_mmap_rayon(path)?;
1552
    /// println!("{}", hasher.finalize());
1553
    /// # }
1554
    /// # Ok(())
1555
    /// # }
1556
    /// ```
1557
    #[cfg(feature = "mmap")]
1558
    #[cfg(feature = "rayon")]
1559
    pub fn update_mmap_rayon(
1560
        &mut self,
1561
        path: impl AsRef<std::path::Path>,
1562
    ) -> std::io::Result<&mut Self> {
1563
        let file = std::fs::File::open(path.as_ref())?;
1564
        if let Some(mmap) = io::maybe_mmap_file(&file)? {
1565
            self.update_rayon(&mmap);
1566
        } else {
1567
            io::copy_wide(&file, self)?;
1568
        }
1569
        Ok(self)
1570
    }
1571
}
1572
1573
// Don't derive(Debug), because the state may be secret.
1574
impl fmt::Debug for Hasher {
1575
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1576
0
        f.debug_struct("Hasher")
1577
0
            .field("flags", &self.chunk_state.flags)
1578
0
            .field("platform", &self.chunk_state.platform)
1579
0
            .finish()
1580
0
    }
1581
}
1582
1583
impl Default for Hasher {
1584
    #[inline]
1585
0
    fn default() -> Self {
1586
0
        Self::new()
1587
0
    }
1588
}
1589
1590
#[cfg(feature = "std")]
1591
impl std::io::Write for Hasher {
1592
    /// This is equivalent to [`update`](#method.update).
1593
    #[inline]
1594
0
    fn write(&mut self, input: &[u8]) -> std::io::Result<usize> {
1595
0
        self.update(input);
1596
0
        Ok(input.len())
1597
0
    }
1598
1599
    #[inline]
1600
0
    fn flush(&mut self) -> std::io::Result<()> {
1601
0
        Ok(())
1602
0
    }
1603
}
1604
1605
#[cfg(feature = "zeroize")]
1606
impl Zeroize for Hasher {
1607
    fn zeroize(&mut self) {
1608
        // Destructuring to trigger compile error as a reminder to update this impl.
1609
        let Self {
1610
            key,
1611
            chunk_state,
1612
            cv_stack,
1613
        } = self;
1614
1615
        key.zeroize();
1616
        chunk_state.zeroize();
1617
        cv_stack.zeroize();
1618
    }
1619
}
1620
1621
/// An incremental reader for extended output, returned by
1622
/// [`Hasher::finalize_xof`](struct.Hasher.html#method.finalize_xof).
1623
///
1624
/// Shorter BLAKE3 outputs are prefixes of longer ones, and explicitly requesting a short output is
1625
/// equivalent to truncating the default-length output. Note that this is a difference between
1626
/// BLAKE2 and BLAKE3.
1627
///
1628
/// # Security notes
1629
///
1630
/// Outputs shorter than the default length of 32 bytes (256 bits) provide less security. An N-bit
1631
/// BLAKE3 output is intended to provide N bits of first and second preimage resistance and N/2
1632
/// bits of collision resistance, for any N up to 256. Longer outputs don't provide any additional
1633
/// security.
1634
///
1635
/// Avoid relying on the secrecy of the output offset, that is, the number of output bytes read or
1636
/// the arguments to [`seek`](struct.OutputReader.html#method.seek) or
1637
/// [`set_position`](struct.OutputReader.html#method.set_position). [_Block-Cipher-Based Tree
1638
/// Hashing_ by Aldo Gunsing](https://eprint.iacr.org/2022/283) shows that an attacker who knows
1639
/// both the message and the key (if any) can easily determine the offset of an extended output.
1640
/// For comparison, AES-CTR has a similar property: if you know the key, you can decrypt a block
1641
/// from an unknown position in the output stream to recover its block index. Callers with strong
1642
/// secret keys aren't affected in practice, but secret offsets are a [design
1643
/// smell](https://en.wikipedia.org/wiki/Design_smell) in any case.
1644
#[derive(Clone)]
1645
pub struct OutputReader {
1646
    inner: Output,
1647
    position_within_block: u8,
1648
}
1649
1650
impl OutputReader {
1651
0
    fn new(inner: Output) -> Self {
1652
0
        Self {
1653
0
            inner,
1654
0
            position_within_block: 0,
1655
0
        }
1656
0
    }
1657
1658
    // This helper function handles both the case where the output buffer is
1659
    // shorter than one block, and the case where our position_within_block is
1660
    // non-zero.
1661
0
    fn fill_one_block(&mut self, buf: &mut &mut [u8]) {
1662
0
        let output_block: [u8; BLOCK_LEN] = self.inner.root_output_block();
1663
0
        let output_bytes = &output_block[self.position_within_block as usize..];
1664
0
        let take = cmp::min(buf.len(), output_bytes.len());
1665
0
        buf[..take].copy_from_slice(&output_bytes[..take]);
1666
0
        self.position_within_block += take as u8;
1667
0
        if self.position_within_block == BLOCK_LEN as u8 {
1668
0
            self.inner.counter += 1;
1669
0
            self.position_within_block = 0;
1670
0
        }
1671
        // Advance the dest buffer. mem::take() is a borrowck workaround.
1672
0
        *buf = &mut core::mem::take(buf)[take..];
1673
0
    }
1674
1675
    /// Fill a buffer with output bytes and advance the position of the
1676
    /// `OutputReader`. This is equivalent to [`Read::read`], except that it
1677
    /// doesn't return a `Result`. Both methods always fill the entire buffer.
1678
    ///
1679
    /// Note that `OutputReader` doesn't buffer output bytes internally, so
1680
    /// calling `fill` repeatedly with a short-length or odd-length slice will
1681
    /// end up performing the same compression multiple times. If you're
1682
    /// reading output in a loop, prefer a slice length that's a multiple of
1683
    /// 64.
1684
    ///
1685
    /// The maximum output size of BLAKE3 is 2<sup>64</sup>-1 bytes. If you try
1686
    /// to extract more than that, for example by seeking near the end and
1687
    /// reading further, the behavior is unspecified.
1688
    ///
1689
    /// [`Read::read`]: #method.read
1690
0
    pub fn fill(&mut self, mut buf: &mut [u8]) {
1691
0
        if buf.is_empty() {
1692
0
            return;
1693
0
        }
1694
1695
        // If we're partway through a block, try to get to a block boundary.
1696
0
        if self.position_within_block != 0 {
1697
0
            self.fill_one_block(&mut buf);
1698
0
        }
1699
1700
0
        let full_blocks = buf.len() / BLOCK_LEN;
1701
0
        let full_blocks_len = full_blocks * BLOCK_LEN;
1702
0
        if full_blocks > 0 {
1703
0
            debug_assert_eq!(0, self.position_within_block);
1704
0
            self.inner.platform.xof_many(
1705
0
                &self.inner.input_chaining_value,
1706
0
                &self.inner.block,
1707
0
                self.inner.block_len,
1708
0
                self.inner.counter,
1709
0
                self.inner.flags | ROOT,
1710
0
                &mut buf[..full_blocks_len],
1711
            );
1712
0
            self.inner.counter += full_blocks as u64;
1713
0
            buf = &mut buf[full_blocks * BLOCK_LEN..];
1714
0
        }
1715
1716
0
        if !buf.is_empty() {
1717
0
            debug_assert!(buf.len() < BLOCK_LEN);
1718
0
            self.fill_one_block(&mut buf);
1719
0
            debug_assert!(buf.is_empty());
1720
0
        }
1721
0
    }
1722
1723
    /// Return the current read position in the output stream. This is
1724
    /// equivalent to [`Seek::stream_position`], except that it doesn't return
1725
    /// a `Result`. The position of a new `OutputReader` starts at 0, and each
1726
    /// call to [`fill`] or [`Read::read`] moves the position forward by the
1727
    /// number of bytes read.
1728
    ///
1729
    /// [`Seek::stream_position`]: #method.stream_position
1730
    /// [`fill`]: #method.fill
1731
    /// [`Read::read`]: #method.read
1732
0
    pub fn position(&self) -> u64 {
1733
0
        self.inner.counter * BLOCK_LEN as u64 + self.position_within_block as u64
1734
0
    }
1735
1736
    /// Seek to a new read position in the output stream. This is equivalent to
1737
    /// calling [`Seek::seek`] with [`SeekFrom::Start`], except that it doesn't
1738
    /// return a `Result`.
1739
    ///
1740
    /// [`Seek::seek`]: #method.seek
1741
    /// [`SeekFrom::Start`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
1742
0
    pub fn set_position(&mut self, position: u64) {
1743
0
        self.position_within_block = (position % BLOCK_LEN as u64) as u8;
1744
0
        self.inner.counter = position / BLOCK_LEN as u64;
1745
0
    }
1746
}
1747
1748
// Don't derive(Debug), because the state may be secret.
1749
impl fmt::Debug for OutputReader {
1750
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1751
0
        f.debug_struct("OutputReader")
1752
0
            .field("position", &self.position())
1753
0
            .finish()
1754
0
    }
1755
}
1756
1757
#[cfg(feature = "std")]
1758
impl std::io::Read for OutputReader {
1759
    #[inline]
1760
0
    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
1761
0
        self.fill(buf);
1762
0
        Ok(buf.len())
1763
0
    }
1764
}
1765
1766
#[cfg(feature = "std")]
1767
impl std::io::Seek for OutputReader {
1768
0
    fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
1769
0
        let max_position = u64::max_value() as i128;
1770
0
        let target_position: i128 = match pos {
1771
0
            std::io::SeekFrom::Start(x) => x as i128,
1772
0
            std::io::SeekFrom::Current(x) => self.position() as i128 + x as i128,
1773
            std::io::SeekFrom::End(_) => {
1774
0
                return Err(std::io::Error::new(
1775
0
                    std::io::ErrorKind::InvalidInput,
1776
0
                    "seek from end not supported",
1777
0
                ));
1778
            }
1779
        };
1780
0
        if target_position < 0 {
1781
0
            return Err(std::io::Error::new(
1782
0
                std::io::ErrorKind::InvalidInput,
1783
0
                "seek before start",
1784
0
            ));
1785
0
        }
1786
0
        self.set_position(cmp::min(target_position, max_position) as u64);
1787
0
        Ok(self.position())
1788
0
    }
1789
}
1790
1791
#[cfg(feature = "zeroize")]
1792
impl Zeroize for OutputReader {
1793
    fn zeroize(&mut self) {
1794
        // Destructuring to trigger compile error as a reminder to update this impl.
1795
        let Self {
1796
            inner,
1797
            position_within_block,
1798
        } = self;
1799
1800
        inner.zeroize();
1801
        position_within_block.zeroize();
1802
    }
1803
}