blake3/
lib.rs

1//! The official Rust implementation of the [BLAKE3] cryptographic hash
2//! function.
3//!
4//! # Examples
5//!
6//! ```
7//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
8//! // Hash an input all at once.
9//! let hash1 = blake3::hash(b"foobarbaz");
10//!
11//! // Hash an input incrementally.
12//! let mut hasher = blake3::Hasher::new();
13//! hasher.update(b"foo");
14//! hasher.update(b"bar");
15//! hasher.update(b"baz");
16//! let hash2 = hasher.finalize();
17//! assert_eq!(hash1, hash2);
18//!
19//! // Extended output. OutputReader also implements Read and Seek.
20//! # #[cfg(feature = "std")] {
21//! let mut output = [0; 1000];
22//! let mut output_reader = hasher.finalize_xof();
23//! output_reader.fill(&mut output);
24//! assert_eq!(hash1, output[..32]);
25//! # }
26//!
27//! // Print a hash as hex.
28//! println!("{}", hash1);
29//! # Ok(())
30//! # }
31//! ```
32//!
33//! # Cargo Features
34//!
35//! The `std` feature (the only feature enabled by default) is required for
36//! implementations of the [`Write`] and [`Seek`] traits, the
37//! [`update_reader`](Hasher::update_reader) helper method, and runtime CPU
38//! feature detection on x86. If this feature is disabled, the only way to use
39//! the x86 SIMD implementations is to enable the corresponding instruction sets
40//! globally, with e.g. `RUSTFLAGS="-C target-cpu=native"`. The resulting binary
41//! will not be portable to other machines.
42//!
43//! The `rayon` feature (disabled by default, but enabled for [docs.rs]) adds
44//! the [`update_rayon`](Hasher::update_rayon) and (in combination with `mmap`
45//! below) [`update_mmap_rayon`](Hasher::update_mmap_rayon) methods, for
46//! multithreaded hashing. However, even if this feature is enabled, all other
47//! APIs remain single-threaded.
48//!
49//! The `mmap` feature (disabled by default, but enabled for [docs.rs]) adds the
50//! [`update_mmap`](Hasher::update_mmap) and (in combination with `rayon` above)
51//! [`update_mmap_rayon`](Hasher::update_mmap_rayon) helper methods for
52//! memory-mapped IO.
53//!
54//! The `zeroize` feature (disabled by default, but enabled for [docs.rs])
55//! implements
56//! [`Zeroize`](https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html) for
57//! this crate's types.
58//!
59//! The `serde` feature (disabled by default, but enabled for [docs.rs]) implements
60//! [`serde::Serialize`](https://docs.rs/serde/latest/serde/trait.Serialize.html) and
61//! [`serde::Deserialize`](https://docs.rs/serde/latest/serde/trait.Deserialize.html)
62//! for [`Hash`](struct@Hash).
63//!
64//! The NEON implementation is enabled by default for AArch64 but requires the
65//! `neon` feature for other ARM targets. Not all ARMv7 CPUs support NEON, and
66//! enabling this feature will produce a binary that's not portable to CPUs
67//! without NEON support.
68//!
69//! The `traits-preview` feature enables implementations of traits from the
70//! RustCrypto [`digest`] crate, and re-exports that crate as `traits::digest`.
71//! However, the traits aren't stable, and they're expected to change in
72//! incompatible ways before that crate reaches 1.0. For that reason, this crate
73//! makes no SemVer guarantees for this feature, and callers who use it should
74//! expect breaking changes between patch versions. (The "-preview" feature name
75//! follows the conventions of the RustCrypto [`signature`] crate.)
76//!
77//! [`Hasher::update_rayon`]: struct.Hasher.html#method.update_rayon
78//! [BLAKE3]: https://blake3.io
79//! [Rayon]: https://github.com/rayon-rs/rayon
80//! [docs.rs]: https://docs.rs/
81//! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
82//! [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
83//! [`digest`]: https://crates.io/crates/digest
84//! [`signature`]: https://crates.io/crates/signature
85
86#![cfg_attr(not(feature = "std"), no_std)]
87
88#[cfg(test)]
89mod test;
90
91// The guts module is for incremental use cases like the `bao` crate that need
92// to explicitly compute chunk and parent chaining values. It is semi-stable
93// and likely to keep working, but largely undocumented and not intended for
94// widespread use.
95#[doc(hidden)]
96pub mod guts;
97
98/// Undocumented and unstable, for benchmarks only.
99#[doc(hidden)]
100pub mod platform;
101
102// Platform-specific implementations of the compression function. These
103// BLAKE3-specific cfg flags are set in build.rs.
104#[cfg(blake3_avx2_rust)]
105#[path = "rust_avx2.rs"]
106mod avx2;
107#[cfg(blake3_avx2_ffi)]
108#[path = "ffi_avx2.rs"]
109mod avx2;
110#[cfg(blake3_avx512_ffi)]
111#[path = "ffi_avx512.rs"]
112mod avx512;
113#[cfg(blake3_neon)]
114#[path = "ffi_neon.rs"]
115mod neon;
116mod portable;
117#[cfg(blake3_sse2_rust)]
118#[path = "rust_sse2.rs"]
119mod sse2;
120#[cfg(blake3_sse2_ffi)]
121#[path = "ffi_sse2.rs"]
122mod sse2;
123#[cfg(blake3_sse41_rust)]
124#[path = "rust_sse41.rs"]
125mod sse41;
126#[cfg(blake3_sse41_ffi)]
127#[path = "ffi_sse41.rs"]
128mod sse41;
129
130#[cfg(blake3_wasm32_simd)]
131#[path = "wasm32_simd.rs"]
132mod wasm32_simd;
133
134#[cfg(feature = "traits-preview")]
135pub mod traits;
136
137mod io;
138mod join;
139
140use arrayref::{array_mut_ref, array_ref};
141use arrayvec::{ArrayString, ArrayVec};
142use core::cmp;
143use core::fmt;
144use platform::{Platform, MAX_SIMD_DEGREE, MAX_SIMD_DEGREE_OR_2};
145#[cfg(feature = "zeroize")]
146use zeroize::Zeroize;
147
148/// The number of bytes in a [`Hash`](struct.Hash.html), 32.
149pub const OUT_LEN: usize = 32;
150
151/// The number of bytes in a key, 32.
152pub const KEY_LEN: usize = 32;
153
154const MAX_DEPTH: usize = 54; // 2^54 * CHUNK_LEN = 2^64
155use guts::{BLOCK_LEN, CHUNK_LEN};
156
157// While iterating the compression function within a chunk, the CV is
158// represented as words, to avoid doing two extra endianness conversions for
159// each compression in the portable implementation. But the hash_many interface
160// needs to hash both input bytes and parent nodes, so its better for its
161// output CVs to be represented as bytes.
162type CVWords = [u32; 8];
163type CVBytes = [u8; 32]; // little-endian
164
165const IV: &CVWords = &[
166    0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
167];
168
169const MSG_SCHEDULE: [[usize; 16]; 7] = [
170    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
171    [2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8],
172    [3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1],
173    [10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6],
174    [12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4],
175    [9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7],
176    [11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13],
177];
178
179// These are the internal flags that we use to domain separate root/non-root,
180// chunk/parent, and chunk beginning/middle/end. These get set at the high end
181// of the block flags word in the compression function, so their values start
182// high and go down.
183const CHUNK_START: u8 = 1 << 0;
184const CHUNK_END: u8 = 1 << 1;
185const PARENT: u8 = 1 << 2;
186const ROOT: u8 = 1 << 3;
187const KEYED_HASH: u8 = 1 << 4;
188const DERIVE_KEY_CONTEXT: u8 = 1 << 5;
189const DERIVE_KEY_MATERIAL: u8 = 1 << 6;
190
191#[inline]
192fn counter_low(counter: u64) -> u32 {
193    counter as u32
194}
195
196#[inline]
197fn counter_high(counter: u64) -> u32 {
198    (counter >> 32) as u32
199}
200
201/// An output of the default size, 32 bytes, which provides constant-time
202/// equality checking.
203///
204/// `Hash` implements [`From`] and [`Into`] for `[u8; 32]`, and it provides
205/// [`from_bytes`] and [`as_bytes`] for explicit conversions between itself and
206/// `[u8; 32]`. However, byte arrays and slices don't provide constant-time
207/// equality checking, which is often a security requirement in software that
208/// handles private data. `Hash` doesn't implement [`Deref`] or [`AsRef`], to
209/// avoid situations where a type conversion happens implicitly and the
210/// constant-time property is accidentally lost.
211///
212/// `Hash` provides the [`to_hex`] and [`from_hex`] methods for converting to
213/// and from hexadecimal. It also implements [`Display`] and [`FromStr`].
214///
215/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
216/// [`Into`]: https://doc.rust-lang.org/std/convert/trait.Into.html
217/// [`as_bytes`]: #method.as_bytes
218/// [`from_bytes`]: #method.from_bytes
219/// [`Deref`]: https://doc.rust-lang.org/stable/std/ops/trait.Deref.html
220/// [`AsRef`]: https://doc.rust-lang.org/std/convert/trait.AsRef.html
221/// [`to_hex`]: #method.to_hex
222/// [`from_hex`]: #method.from_hex
223/// [`Display`]: https://doc.rust-lang.org/std/fmt/trait.Display.html
224/// [`FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html
225#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
226#[derive(Clone, Copy, Hash)]
227pub struct Hash([u8; OUT_LEN]);
228
229impl Hash {
230    /// The raw bytes of the `Hash`. Note that byte arrays don't provide
231    /// constant-time equality checking, so if  you need to compare hashes,
232    /// prefer the `Hash` type.
233    #[inline]
234    pub const fn as_bytes(&self) -> &[u8; OUT_LEN] {
235        &self.0
236    }
237
238    /// Create a `Hash` from its raw bytes representation.
239    pub const fn from_bytes(bytes: [u8; OUT_LEN]) -> Self {
240        Self(bytes)
241    }
242
243    /// Create a `Hash` from its raw bytes representation as a slice.
244    ///
245    /// Returns an error if the slice is not exactly 32 bytes long.
246    pub fn from_slice(bytes: &[u8]) -> Result<Self, core::array::TryFromSliceError> {
247        Ok(Self::from_bytes(bytes.try_into()?))
248    }
249
250    /// Encode a `Hash` in lowercase hexadecimal.
251    ///
252    /// The returned [`ArrayString`] is a fixed size and doesn't allocate memory
253    /// on the heap. Note that [`ArrayString`] doesn't provide constant-time
254    /// equality checking, so if you need to compare hashes, prefer the `Hash`
255    /// type.
256    ///
257    /// [`ArrayString`]: https://docs.rs/arrayvec/0.5.1/arrayvec/struct.ArrayString.html
258    pub fn to_hex(&self) -> ArrayString<{ 2 * OUT_LEN }> {
259        let mut s = ArrayString::new();
260        let table = b"0123456789abcdef";
261        for &b in self.0.iter() {
262            s.push(table[(b >> 4) as usize] as char);
263            s.push(table[(b & 0xf) as usize] as char);
264        }
265        s
266    }
267
268    /// Decode a `Hash` from hexadecimal. Both uppercase and lowercase ASCII
269    /// bytes are supported.
270    ///
271    /// Any byte outside the ranges `'0'...'9'`, `'a'...'f'`, and `'A'...'F'`
272    /// results in an error. An input length other than 64 also results in an
273    /// error.
274    ///
275    /// Note that `Hash` also implements `FromStr`, so `Hash::from_hex("...")`
276    /// is equivalent to `"...".parse()`.
277    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, HexError> {
278        fn hex_val(byte: u8) -> Result<u8, HexError> {
279            match byte {
280                b'A'..=b'F' => Ok(byte - b'A' + 10),
281                b'a'..=b'f' => Ok(byte - b'a' + 10),
282                b'0'..=b'9' => Ok(byte - b'0'),
283                _ => Err(HexError(HexErrorInner::InvalidByte(byte))),
284            }
285        }
286        let hex_bytes: &[u8] = hex.as_ref();
287        if hex_bytes.len() != OUT_LEN * 2 {
288            return Err(HexError(HexErrorInner::InvalidLen(hex_bytes.len())));
289        }
290        let mut hash_bytes: [u8; OUT_LEN] = [0; OUT_LEN];
291        for i in 0..OUT_LEN {
292            hash_bytes[i] = 16 * hex_val(hex_bytes[2 * i])? + hex_val(hex_bytes[2 * i + 1])?;
293        }
294        Ok(Hash::from(hash_bytes))
295    }
296}
297
298impl From<[u8; OUT_LEN]> for Hash {
299    #[inline]
300    fn from(bytes: [u8; OUT_LEN]) -> Self {
301        Self::from_bytes(bytes)
302    }
303}
304
305impl From<Hash> for [u8; OUT_LEN] {
306    #[inline]
307    fn from(hash: Hash) -> Self {
308        hash.0
309    }
310}
311
312impl core::str::FromStr for Hash {
313    type Err = HexError;
314
315    fn from_str(s: &str) -> Result<Self, Self::Err> {
316        Hash::from_hex(s)
317    }
318}
319
320#[cfg(feature = "zeroize")]
321impl Zeroize for Hash {
322    fn zeroize(&mut self) {
323        // Destructuring to trigger compile error as a reminder to update this impl.
324        let Self(bytes) = self;
325        bytes.zeroize();
326    }
327}
328
329/// This implementation is constant-time.
330impl PartialEq for Hash {
331    #[inline]
332    fn eq(&self, other: &Hash) -> bool {
333        constant_time_eq::constant_time_eq_32(&self.0, &other.0)
334    }
335}
336
337/// This implementation is constant-time.
338impl PartialEq<[u8; OUT_LEN]> for Hash {
339    #[inline]
340    fn eq(&self, other: &[u8; OUT_LEN]) -> bool {
341        constant_time_eq::constant_time_eq_32(&self.0, other)
342    }
343}
344
345/// This implementation is constant-time if the target is 32 bytes long.
346impl PartialEq<[u8]> for Hash {
347    #[inline]
348    fn eq(&self, other: &[u8]) -> bool {
349        constant_time_eq::constant_time_eq(&self.0, other)
350    }
351}
352
353impl Eq for Hash {}
354
355impl fmt::Display for Hash {
356    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
357        // Formatting field as `&str` to reduce code size since the `Debug`
358        // dynamic dispatch table for `&str` is likely needed elsewhere already,
359        // but that for `ArrayString<[u8; 64]>` is not.
360        let hex = self.to_hex();
361        let hex: &str = hex.as_str();
362
363        f.write_str(hex)
364    }
365}
366
367impl fmt::Debug for Hash {
368    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
369        // Formatting field as `&str` to reduce code size since the `Debug`
370        // dynamic dispatch table for `&str` is likely needed elsewhere already,
371        // but that for `ArrayString<[u8; 64]>` is not.
372        let hex = self.to_hex();
373        let hex: &str = hex.as_str();
374
375        f.debug_tuple("Hash").field(&hex).finish()
376    }
377}
378
379/// The error type for [`Hash::from_hex`].
380///
381/// The `.to_string()` representation of this error currently distinguishes between bad length
382/// errors and bad character errors. This is to help with logging and debugging, but it isn't a
383/// stable API detail, and it may change at any time.
384#[derive(Clone, Debug)]
385pub struct HexError(HexErrorInner);
386
387#[derive(Clone, Debug)]
388enum HexErrorInner {
389    InvalidByte(u8),
390    InvalidLen(usize),
391}
392
393impl fmt::Display for HexError {
394    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
395        match self.0 {
396            HexErrorInner::InvalidByte(byte) => {
397                if byte < 128 {
398                    write!(f, "invalid hex character: {:?}", byte as char)
399                } else {
400                    write!(f, "invalid hex character: 0x{:x}", byte)
401                }
402            }
403            HexErrorInner::InvalidLen(len) => {
404                write!(f, "expected 64 hex bytes, received {}", len)
405            }
406        }
407    }
408}
409
410#[cfg(feature = "std")]
411impl std::error::Error for HexError {}
412
413// Each chunk or parent node can produce either a 32-byte chaining value or, by
414// setting the ROOT flag, any number of final output bytes. The Output struct
415// captures the state just prior to choosing between those two possibilities.
416#[derive(Clone)]
417struct Output {
418    input_chaining_value: CVWords,
419    block: [u8; 64],
420    block_len: u8,
421    counter: u64,
422    flags: u8,
423    platform: Platform,
424}
425
426impl Output {
427    fn chaining_value(&self) -> CVBytes {
428        let mut cv = self.input_chaining_value;
429        self.platform.compress_in_place(
430            &mut cv,
431            &self.block,
432            self.block_len,
433            self.counter,
434            self.flags,
435        );
436        platform::le_bytes_from_words_32(&cv)
437    }
438
439    fn root_hash(&self) -> Hash {
440        debug_assert_eq!(self.counter, 0);
441        let mut cv = self.input_chaining_value;
442        self.platform
443            .compress_in_place(&mut cv, &self.block, self.block_len, 0, self.flags | ROOT);
444        Hash(platform::le_bytes_from_words_32(&cv))
445    }
446
447    fn root_output_block(&self) -> [u8; 2 * OUT_LEN] {
448        self.platform.compress_xof(
449            &self.input_chaining_value,
450            &self.block,
451            self.block_len,
452            self.counter,
453            self.flags | ROOT,
454        )
455    }
456}
457
458#[cfg(feature = "zeroize")]
459impl Zeroize for Output {
460    fn zeroize(&mut self) {
461        // Destructuring to trigger compile error as a reminder to update this impl.
462        let Self {
463            input_chaining_value,
464            block,
465            block_len,
466            counter,
467            flags,
468            platform: _,
469        } = self;
470
471        input_chaining_value.zeroize();
472        block.zeroize();
473        block_len.zeroize();
474        counter.zeroize();
475        flags.zeroize();
476    }
477}
478
479#[derive(Clone)]
480struct ChunkState {
481    cv: CVWords,
482    chunk_counter: u64,
483    buf: [u8; BLOCK_LEN],
484    buf_len: u8,
485    blocks_compressed: u8,
486    flags: u8,
487    platform: Platform,
488}
489
490impl ChunkState {
491    fn new(key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform) -> Self {
492        Self {
493            cv: *key,
494            chunk_counter,
495            buf: [0; BLOCK_LEN],
496            buf_len: 0,
497            blocks_compressed: 0,
498            flags,
499            platform,
500        }
501    }
502
503    fn len(&self) -> usize {
504        BLOCK_LEN * self.blocks_compressed as usize + self.buf_len as usize
505    }
506
507    fn fill_buf(&mut self, input: &mut &[u8]) {
508        let want = BLOCK_LEN - self.buf_len as usize;
509        let take = cmp::min(want, input.len());
510        self.buf[self.buf_len as usize..][..take].copy_from_slice(&input[..take]);
511        self.buf_len += take as u8;
512        *input = &input[take..];
513    }
514
515    fn start_flag(&self) -> u8 {
516        if self.blocks_compressed == 0 {
517            CHUNK_START
518        } else {
519            0
520        }
521    }
522
523    // Try to avoid buffering as much as possible, by compressing directly from
524    // the input slice when full blocks are available.
525    fn update(&mut self, mut input: &[u8]) -> &mut Self {
526        if self.buf_len > 0 {
527            self.fill_buf(&mut input);
528            if !input.is_empty() {
529                debug_assert_eq!(self.buf_len as usize, BLOCK_LEN);
530                let block_flags = self.flags | self.start_flag(); // borrowck
531                self.platform.compress_in_place(
532                    &mut self.cv,
533                    &self.buf,
534                    BLOCK_LEN as u8,
535                    self.chunk_counter,
536                    block_flags,
537                );
538                self.buf_len = 0;
539                self.buf = [0; BLOCK_LEN];
540                self.blocks_compressed += 1;
541            }
542        }
543
544        while input.len() > BLOCK_LEN {
545            debug_assert_eq!(self.buf_len, 0);
546            let block_flags = self.flags | self.start_flag(); // borrowck
547            self.platform.compress_in_place(
548                &mut self.cv,
549                array_ref!(input, 0, BLOCK_LEN),
550                BLOCK_LEN as u8,
551                self.chunk_counter,
552                block_flags,
553            );
554            self.blocks_compressed += 1;
555            input = &input[BLOCK_LEN..];
556        }
557
558        self.fill_buf(&mut input);
559        debug_assert!(input.is_empty());
560        debug_assert!(self.len() <= CHUNK_LEN);
561        self
562    }
563
564    fn output(&self) -> Output {
565        let block_flags = self.flags | self.start_flag() | CHUNK_END;
566        Output {
567            input_chaining_value: self.cv,
568            block: self.buf,
569            block_len: self.buf_len,
570            counter: self.chunk_counter,
571            flags: block_flags,
572            platform: self.platform,
573        }
574    }
575}
576
577// Don't derive(Debug), because the state may be secret.
578impl fmt::Debug for ChunkState {
579    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
580        f.debug_struct("ChunkState")
581            .field("len", &self.len())
582            .field("chunk_counter", &self.chunk_counter)
583            .field("flags", &self.flags)
584            .field("platform", &self.platform)
585            .finish()
586    }
587}
588
589#[cfg(feature = "zeroize")]
590impl Zeroize for ChunkState {
591    fn zeroize(&mut self) {
592        // Destructuring to trigger compile error as a reminder to update this impl.
593        let Self {
594            cv,
595            chunk_counter,
596            buf,
597            buf_len,
598            blocks_compressed,
599            flags,
600            platform: _,
601        } = self;
602
603        cv.zeroize();
604        chunk_counter.zeroize();
605        buf.zeroize();
606        buf_len.zeroize();
607        blocks_compressed.zeroize();
608        flags.zeroize();
609    }
610}
611
612// IMPLEMENTATION NOTE
613// ===================
614// The recursive function compress_subtree_wide(), implemented below, is the
615// basis of high-performance BLAKE3. We use it both for all-at-once hashing,
616// and for the incremental input with Hasher (though we have to be careful with
617// subtree boundaries in the incremental case). compress_subtree_wide() applies
618// several optimizations at the same time:
619// - Multithreading with Rayon.
620// - Parallel chunk hashing with SIMD.
621// - Parallel parent hashing with SIMD. Note that while SIMD chunk hashing
622//   maxes out at MAX_SIMD_DEGREE*CHUNK_LEN, parallel parent hashing continues
623//   to benefit from larger inputs, because more levels of the tree benefit can
624//   use full-width SIMD vectors for parent hashing. Without parallel parent
625//   hashing, we lose about 10% of overall throughput on AVX2 and AVX-512.
626
627/// Undocumented and unstable, for benchmarks only.
628#[doc(hidden)]
629#[derive(Clone, Copy)]
630pub enum IncrementCounter {
631    Yes,
632    No,
633}
634
635impl IncrementCounter {
636    #[inline]
637    fn yes(&self) -> bool {
638        match self {
639            IncrementCounter::Yes => true,
640            IncrementCounter::No => false,
641        }
642    }
643}
644
645// The largest power of two less than or equal to `n`, used for left_len()
646// immediately below, and also directly in Hasher::update().
647fn largest_power_of_two_leq(n: usize) -> usize {
648    ((n / 2) + 1).next_power_of_two()
649}
650
651// Given some input larger than one chunk, return the number of bytes that
652// should go in the left subtree. This is the largest power-of-2 number of
653// chunks that leaves at least 1 byte for the right subtree.
654fn left_len(content_len: usize) -> usize {
655    debug_assert!(content_len > CHUNK_LEN);
656    // Subtract 1 to reserve at least one byte for the right side.
657    let full_chunks = (content_len - 1) / CHUNK_LEN;
658    largest_power_of_two_leq(full_chunks) * CHUNK_LEN
659}
660
661// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
662// on a single thread. Write out the chunk chaining values and return the
663// number of chunks hashed. These chunks are never the root and never empty;
664// those cases use a different codepath.
665fn compress_chunks_parallel(
666    input: &[u8],
667    key: &CVWords,
668    chunk_counter: u64,
669    flags: u8,
670    platform: Platform,
671    out: &mut [u8],
672) -> usize {
673    debug_assert!(!input.is_empty(), "empty chunks below the root");
674    debug_assert!(input.len() <= MAX_SIMD_DEGREE * CHUNK_LEN);
675
676    let mut chunks_exact = input.chunks_exact(CHUNK_LEN);
677    let mut chunks_array = ArrayVec::<&[u8; CHUNK_LEN], MAX_SIMD_DEGREE>::new();
678    for chunk in &mut chunks_exact {
679        chunks_array.push(array_ref!(chunk, 0, CHUNK_LEN));
680    }
681    platform.hash_many(
682        &chunks_array,
683        key,
684        chunk_counter,
685        IncrementCounter::Yes,
686        flags,
687        CHUNK_START,
688        CHUNK_END,
689        out,
690    );
691
692    // Hash the remaining partial chunk, if there is one. Note that the empty
693    // chunk (meaning the empty message) is a different codepath.
694    let chunks_so_far = chunks_array.len();
695    if !chunks_exact.remainder().is_empty() {
696        let counter = chunk_counter + chunks_so_far as u64;
697        let mut chunk_state = ChunkState::new(key, counter, flags, platform);
698        chunk_state.update(chunks_exact.remainder());
699        *array_mut_ref!(out, chunks_so_far * OUT_LEN, OUT_LEN) =
700            chunk_state.output().chaining_value();
701        chunks_so_far + 1
702    } else {
703        chunks_so_far
704    }
705}
706
707// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
708// on a single thread. Write out the parent chaining values and return the
709// number of parents hashed. (If there's an odd input chaining value left over,
710// return it as an additional output.) These parents are never the root and
711// never empty; those cases use a different codepath.
712fn compress_parents_parallel(
713    child_chaining_values: &[u8],
714    key: &CVWords,
715    flags: u8,
716    platform: Platform,
717    out: &mut [u8],
718) -> usize {
719    debug_assert_eq!(child_chaining_values.len() % OUT_LEN, 0, "wacky hash bytes");
720    let num_children = child_chaining_values.len() / OUT_LEN;
721    debug_assert!(num_children >= 2, "not enough children");
722    debug_assert!(num_children <= 2 * MAX_SIMD_DEGREE_OR_2, "too many");
723
724    let mut parents_exact = child_chaining_values.chunks_exact(BLOCK_LEN);
725    // Use MAX_SIMD_DEGREE_OR_2 rather than MAX_SIMD_DEGREE here, because of
726    // the requirements of compress_subtree_wide().
727    let mut parents_array = ArrayVec::<&[u8; BLOCK_LEN], MAX_SIMD_DEGREE_OR_2>::new();
728    for parent in &mut parents_exact {
729        parents_array.push(array_ref!(parent, 0, BLOCK_LEN));
730    }
731    platform.hash_many(
732        &parents_array,
733        key,
734        0, // Parents always use counter 0.
735        IncrementCounter::No,
736        flags | PARENT,
737        0, // Parents have no start flags.
738        0, // Parents have no end flags.
739        out,
740    );
741
742    // If there's an odd child left over, it becomes an output.
743    let parents_so_far = parents_array.len();
744    if !parents_exact.remainder().is_empty() {
745        out[parents_so_far * OUT_LEN..][..OUT_LEN].copy_from_slice(parents_exact.remainder());
746        parents_so_far + 1
747    } else {
748        parents_so_far
749    }
750}
751
752// The wide helper function returns (writes out) an array of chaining values
753// and returns the length of that array. The number of chaining values returned
754// is the dynamically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
755// if the input is shorter than that many chunks. The reason for maintaining a
756// wide array of chaining values going back up the tree, is to allow the
757// implementation to hash as many parents in parallel as possible.
758//
759// As a special case when the SIMD degree is 1, this function will still return
760// at least 2 outputs. This guarantees that this function doesn't perform the
761// root compression. (If it did, it would use the wrong flags, and also we
762// wouldn't be able to implement extendable output.) Note that this function is
763// not used when the whole input is only 1 chunk long; that's a different
764// codepath.
765//
766// Why not just have the caller split the input on the first update(), instead
767// of implementing this special rule? Because we don't want to limit SIMD or
768// multithreading parallelism for that update().
769fn compress_subtree_wide<J: join::Join>(
770    input: &[u8],
771    key: &CVWords,
772    chunk_counter: u64,
773    flags: u8,
774    platform: Platform,
775    out: &mut [u8],
776) -> usize {
777    // Note that the single chunk case does *not* bump the SIMD degree up to 2
778    // when it is 1. This allows Rayon the option of multithreading even the
779    // 2-chunk case, which can help performance on smaller platforms.
780    if input.len() <= platform.simd_degree() * CHUNK_LEN {
781        return compress_chunks_parallel(input, key, chunk_counter, flags, platform, out);
782    }
783
784    // With more than simd_degree chunks, we need to recurse. Start by dividing
785    // the input into left and right subtrees. (Note that this is only optimal
786    // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
787    // of 3 or something, we'll need a more complicated strategy.)
788    debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
789    let (left, right) = input.split_at(left_len(input.len()));
790    let right_chunk_counter = chunk_counter + (left.len() / CHUNK_LEN) as u64;
791
792    // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
793    // account for the special case of returning 2 outputs when the SIMD degree
794    // is 1.
795    let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
796    let degree = if left.len() == CHUNK_LEN {
797        // The "simd_degree=1 and we're at the leaf nodes" case.
798        debug_assert_eq!(platform.simd_degree(), 1);
799        1
800    } else {
801        cmp::max(platform.simd_degree(), 2)
802    };
803    let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
804
805    // Recurse! For update_rayon(), this is where we take advantage of RayonJoin and use multiple
806    // threads.
807    let (left_n, right_n) = J::join(
808        || compress_subtree_wide::<J>(left, key, chunk_counter, flags, platform, left_out),
809        || compress_subtree_wide::<J>(right, key, right_chunk_counter, flags, platform, right_out),
810    );
811
812    // The special case again. If simd_degree=1, then we'll have left_n=1 and
813    // right_n=1. Rather than compressing them into a single output, return
814    // them directly, to make sure we always have at least two outputs.
815    debug_assert_eq!(left_n, degree);
816    debug_assert!(right_n >= 1 && right_n <= left_n);
817    if left_n == 1 {
818        out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
819        return 2;
820    }
821
822    // Otherwise, do one layer of parent node compression.
823    let num_children = left_n + right_n;
824    compress_parents_parallel(
825        &cv_array[..num_children * OUT_LEN],
826        key,
827        flags,
828        platform,
829        out,
830    )
831}
832
833// Hash a subtree with compress_subtree_wide(), and then condense the resulting
834// list of chaining values down to a single parent node. Don't compress that
835// last parent node, however. Instead, return its message bytes (the
836// concatenated chaining values of its children). This is necessary when the
837// first call to update() supplies a complete subtree, because the topmost
838// parent node of that subtree could end up being the root. It's also necessary
839// for extended output in the general case.
840//
841// As with compress_subtree_wide(), this function is not used on inputs of 1
842// chunk or less. That's a different codepath.
843fn compress_subtree_to_parent_node<J: join::Join>(
844    input: &[u8],
845    key: &CVWords,
846    chunk_counter: u64,
847    flags: u8,
848    platform: Platform,
849) -> [u8; BLOCK_LEN] {
850    debug_assert!(input.len() > CHUNK_LEN);
851    let mut cv_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
852    let mut num_cvs =
853        compress_subtree_wide::<J>(input, &key, chunk_counter, flags, platform, &mut cv_array);
854    debug_assert!(num_cvs >= 2);
855
856    // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
857    // compress_subtree_wide() returns more than 2 chaining values. Condense
858    // them into 2 by forming parent nodes repeatedly.
859    let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
860    while num_cvs > 2 {
861        let cv_slice = &cv_array[..num_cvs * OUT_LEN];
862        num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
863        cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
864    }
865    *array_ref!(cv_array, 0, 2 * OUT_LEN)
866}
867
868// Hash a complete input all at once. Unlike compress_subtree_wide() and
869// compress_subtree_to_parent_node(), this function handles the 1 chunk case.
870fn hash_all_at_once<J: join::Join>(input: &[u8], key: &CVWords, flags: u8) -> Output {
871    let platform = Platform::detect();
872
873    // If the whole subtree is one chunk, hash it directly with a ChunkState.
874    if input.len() <= CHUNK_LEN {
875        return ChunkState::new(key, 0, flags, platform)
876            .update(input)
877            .output();
878    }
879
880    // Otherwise construct an Output object from the parent node returned by
881    // compress_subtree_to_parent_node().
882    Output {
883        input_chaining_value: *key,
884        block: compress_subtree_to_parent_node::<J>(input, key, 0, flags, platform),
885        block_len: BLOCK_LEN as u8,
886        counter: 0,
887        flags: flags | PARENT,
888        platform,
889    }
890}
891
892/// The default hash function.
893///
894/// For an incremental version that accepts multiple writes, see [`Hasher::new`],
895/// [`Hasher::update`], and [`Hasher::finalize`]. These two lines are equivalent:
896///
897/// ```
898/// let hash = blake3::hash(b"foo");
899/// # let hash1 = hash;
900///
901/// let hash = blake3::Hasher::new().update(b"foo").finalize();
902/// # let hash2 = hash;
903/// # assert_eq!(hash1, hash2);
904/// ```
905///
906/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`] and
907/// [`OutputReader`].
908///
909/// This function is always single-threaded. For multithreading support, see
910/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
911pub fn hash(input: &[u8]) -> Hash {
912    hash_all_at_once::<join::SerialJoin>(input, IV, 0).root_hash()
913}
914
915/// The keyed hash function.
916///
917/// This is suitable for use as a message authentication code, for example to
918/// replace an HMAC instance. In that use case, the constant-time equality
919/// checking provided by [`Hash`](struct.Hash.html) is almost always a security
920/// requirement, and callers need to be careful not to compare MACs as raw
921/// bytes.
922///
923/// For an incremental version that accepts multiple writes, see [`Hasher::new_keyed`],
924/// [`Hasher::update`], and [`Hasher::finalize`]. These two lines are equivalent:
925///
926/// ```
927/// # const KEY: &[u8; 32] = &[0; 32];
928/// let mac = blake3::keyed_hash(KEY, b"foo");
929/// # let mac1 = mac;
930///
931/// let mac = blake3::Hasher::new_keyed(KEY).update(b"foo").finalize();
932/// # let mac2 = mac;
933/// # assert_eq!(mac1, mac2);
934/// ```
935///
936/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`], and [`OutputReader`].
937///
938/// This function is always single-threaded. For multithreading support, see
939/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
940pub fn keyed_hash(key: &[u8; KEY_LEN], input: &[u8]) -> Hash {
941    let key_words = platform::words_from_le_bytes_32(key);
942    hash_all_at_once::<join::SerialJoin>(input, &key_words, KEYED_HASH).root_hash()
943}
944
945/// The key derivation function.
946///
947/// Given cryptographic key material of any length and a context string of any
948/// length, this function outputs a 32-byte derived subkey. **The context string
949/// should be hardcoded, globally unique, and application-specific.** A good
950/// default format for such strings is `"[application] [commit timestamp]
951/// [purpose]"`, e.g., `"example.com 2019-12-25 16:18:03 session tokens v1"`.
952///
953/// Key derivation is important when you want to use the same key in multiple
954/// algorithms or use cases. Using the same key with different cryptographic
955/// algorithms is generally forbidden, and deriving a separate subkey for each
956/// use case protects you from bad interactions. Derived keys also mitigate the
957/// damage from one part of your application accidentally leaking its key.
958///
959/// As a rare exception to that general rule, however, it is possible to use
960/// `derive_key` itself with key material that you are already using with
961/// another algorithm. You might need to do this if you're adding features to
962/// an existing application, which does not yet use key derivation internally.
963/// However, you still must not share key material with algorithms that forbid
964/// key reuse entirely, like a one-time pad. For more on this, see sections 6.2
965/// and 7.8 of the [BLAKE3 paper](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf).
966///
967/// Note that BLAKE3 is not a password hash, and **`derive_key` should never be
968/// used with passwords.** Instead, use a dedicated password hash like
969/// [Argon2]. Password hashes are entirely different from generic hash
970/// functions, with opposite design requirements.
971///
972/// For an incremental version that accepts multiple writes, see [`Hasher::new_derive_key`],
973/// [`Hasher::update`], and [`Hasher::finalize`]. These two statements are equivalent:
974///
975/// ```
976/// # const CONTEXT: &str = "example.com 2019-12-25 16:18:03 session tokens v1";
977/// let key = blake3::derive_key(CONTEXT, b"key material, not a password");
978/// # let key1 = key;
979///
980/// let key: [u8; 32] = blake3::Hasher::new_derive_key(CONTEXT)
981///     .update(b"key material, not a password")
982///     .finalize()
983///     .into();
984/// # let key2 = key;
985/// # assert_eq!(key1, key2);
986/// ```
987///
988/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`], and [`OutputReader`].
989///
990/// This function is always single-threaded. For multithreading support, see
991/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
992///
993/// [Argon2]: https://en.wikipedia.org/wiki/Argon2
994pub fn derive_key(context: &str, key_material: &[u8]) -> [u8; OUT_LEN] {
995    let context_key =
996        hash_all_at_once::<join::SerialJoin>(context.as_bytes(), IV, DERIVE_KEY_CONTEXT)
997            .root_hash();
998    let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
999    hash_all_at_once::<join::SerialJoin>(key_material, &context_key_words, DERIVE_KEY_MATERIAL)
1000        .root_hash()
1001        .0
1002}
1003
1004fn parent_node_output(
1005    left_child: &CVBytes,
1006    right_child: &CVBytes,
1007    key: &CVWords,
1008    flags: u8,
1009    platform: Platform,
1010) -> Output {
1011    let mut block = [0; BLOCK_LEN];
1012    block[..32].copy_from_slice(left_child);
1013    block[32..].copy_from_slice(right_child);
1014    Output {
1015        input_chaining_value: *key,
1016        block,
1017        block_len: BLOCK_LEN as u8,
1018        counter: 0,
1019        flags: flags | PARENT,
1020        platform,
1021    }
1022}
1023
1024/// An incremental hash state that can accept any number of writes.
1025///
1026/// The `rayon` and `mmap` Cargo features enable additional methods on this
1027/// type related to multithreading and memory-mapped IO.
1028///
1029/// When the `traits-preview` Cargo feature is enabled, this type implements
1030/// several commonly used traits from the
1031/// [`digest`](https://crates.io/crates/digest) crate. However, those
1032/// traits aren't stable, and they're expected to change in incompatible ways
1033/// before that crate reaches 1.0. For that reason, this crate makes no SemVer
1034/// guarantees for this feature, and callers who use it should expect breaking
1035/// changes between patch versions.
1036///
1037/// # Examples
1038///
1039/// ```
1040/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
1041/// // Hash an input incrementally.
1042/// let mut hasher = blake3::Hasher::new();
1043/// hasher.update(b"foo");
1044/// hasher.update(b"bar");
1045/// hasher.update(b"baz");
1046/// assert_eq!(hasher.finalize(), blake3::hash(b"foobarbaz"));
1047///
1048/// // Extended output. OutputReader also implements Read and Seek.
1049/// # #[cfg(feature = "std")] {
1050/// let mut output = [0; 1000];
1051/// let mut output_reader = hasher.finalize_xof();
1052/// output_reader.fill(&mut output);
1053/// assert_eq!(&output[..32], blake3::hash(b"foobarbaz").as_bytes());
1054/// # }
1055/// # Ok(())
1056/// # }
1057/// ```
1058#[derive(Clone)]
1059pub struct Hasher {
1060    key: CVWords,
1061    chunk_state: ChunkState,
1062    // The stack size is MAX_DEPTH + 1 because we do lazy merging. For example,
1063    // with 7 chunks, we have 3 entries in the stack. Adding an 8th chunk
1064    // requires a 4th entry, rather than merging everything down to 1, because
1065    // we don't know whether more input is coming. This is different from how
1066    // the reference implementation does things.
1067    cv_stack: ArrayVec<CVBytes, { MAX_DEPTH + 1 }>,
1068}
1069
1070impl Hasher {
1071    fn new_internal(key: &CVWords, flags: u8) -> Self {
1072        Self {
1073            key: *key,
1074            chunk_state: ChunkState::new(key, 0, flags, Platform::detect()),
1075            cv_stack: ArrayVec::new(),
1076        }
1077    }
1078
1079    /// Construct a new `Hasher` for the regular hash function.
1080    pub fn new() -> Self {
1081        Self::new_internal(IV, 0)
1082    }
1083
1084    /// Construct a new `Hasher` for the keyed hash function. See
1085    /// [`keyed_hash`].
1086    ///
1087    /// [`keyed_hash`]: fn.keyed_hash.html
1088    pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
1089        let key_words = platform::words_from_le_bytes_32(key);
1090        Self::new_internal(&key_words, KEYED_HASH)
1091    }
1092
1093    /// Construct a new `Hasher` for the key derivation function. See
1094    /// [`derive_key`]. The context string should be hardcoded, globally
1095    /// unique, and application-specific.
1096    ///
1097    /// [`derive_key`]: fn.derive_key.html
1098    pub fn new_derive_key(context: &str) -> Self {
1099        let context_key =
1100            hash_all_at_once::<join::SerialJoin>(context.as_bytes(), IV, DERIVE_KEY_CONTEXT)
1101                .root_hash();
1102        let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
1103        Self::new_internal(&context_key_words, DERIVE_KEY_MATERIAL)
1104    }
1105
1106    /// Reset the `Hasher` to its initial state.
1107    ///
1108    /// This is functionally the same as overwriting the `Hasher` with a new
1109    /// one, using the same key or context string if any.
1110    pub fn reset(&mut self) -> &mut Self {
1111        self.chunk_state = ChunkState::new(
1112            &self.key,
1113            0,
1114            self.chunk_state.flags,
1115            self.chunk_state.platform,
1116        );
1117        self.cv_stack.clear();
1118        self
1119    }
1120
1121    // As described in push_cv() below, we do "lazy merging", delaying merges
1122    // until right before the next CV is about to be added. This is different
1123    // from the reference implementation. Another difference is that we aren't
1124    // always merging 1 chunk at a time. Instead, each CV might represent any
1125    // power-of-two number of chunks, as long as the smaller-above-larger stack
1126    // order is maintained. Instead of the "count the trailing 0-bits"
1127    // algorithm described in the spec, we use a "count the total number of
1128    // 1-bits" variant that doesn't require us to retain the subtree size of
1129    // the CV on top of the stack. The principle is the same: each CV that
1130    // should remain in the stack is represented by a 1-bit in the total number
1131    // of chunks (or bytes) so far.
1132    fn merge_cv_stack(&mut self, total_len: u64) {
1133        let post_merge_stack_len = total_len.count_ones() as usize;
1134        while self.cv_stack.len() > post_merge_stack_len {
1135            let right_child = self.cv_stack.pop().unwrap();
1136            let left_child = self.cv_stack.pop().unwrap();
1137            let parent_output = parent_node_output(
1138                &left_child,
1139                &right_child,
1140                &self.key,
1141                self.chunk_state.flags,
1142                self.chunk_state.platform,
1143            );
1144            self.cv_stack.push(parent_output.chaining_value());
1145        }
1146    }
1147
1148    // In reference_impl.rs, we merge the new CV with existing CVs from the
1149    // stack before pushing it. We can do that because we know more input is
1150    // coming, so we know none of the merges are root.
1151    //
1152    // This setting is different. We want to feed as much input as possible to
1153    // compress_subtree_wide(), without setting aside anything for the
1154    // chunk_state. If the user gives us 64 KiB, we want to parallelize over
1155    // all 64 KiB at once as a single subtree, if at all possible.
1156    //
1157    // This leads to two problems:
1158    // 1) This 64 KiB input might be the only call that ever gets made to
1159    //    update. In this case, the root node of the 64 KiB subtree would be
1160    //    the root node of the whole tree, and it would need to be ROOT
1161    //    finalized. We can't compress it until we know.
1162    // 2) This 64 KiB input might complete a larger tree, whose root node is
1163    //    similarly going to be the root of the whole tree. For example,
1164    //    maybe we have 196 KiB (that is, 128 + 64) hashed so far. We can't
1165    //    compress the node at the root of the 256 KiB subtree until we know
1166    //    how to finalize it.
1167    //
1168    // The second problem is solved with "lazy merging". That is, when we're
1169    // about to add a CV to the stack, we don't merge it with anything first,
1170    // as the reference impl does. Instead we do merges using the *previous* CV
1171    // that was added, which is sitting on top of the stack, and we put the new
1172    // CV (unmerged) on top of the stack afterwards. This guarantees that we
1173    // never merge the root node until finalize().
1174    //
1175    // Solving the first problem requires an additional tool,
1176    // compress_subtree_to_parent_node(). That function always returns the top
1177    // *two* chaining values of the subtree it's compressing. We then do lazy
1178    // merging with each of them separately, so that the second CV will always
1179    // remain unmerged. (That also helps us support extendable output when
1180    // we're hashing an input all-at-once.)
1181    fn push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64) {
1182        self.merge_cv_stack(chunk_counter);
1183        self.cv_stack.push(*new_cv);
1184    }
1185
1186    /// Add input bytes to the hash state. You can call this any number of times.
1187    ///
1188    /// This method is always single-threaded. For multithreading support, see
1189    /// [`update_rayon`](#method.update_rayon) (enabled with the `rayon` Cargo feature).
1190    ///
1191    /// Note that the degree of SIMD parallelism that `update` can use is limited by the size of
1192    /// this input buffer. See [`update_reader`](#method.update_reader).
1193    pub fn update(&mut self, input: &[u8]) -> &mut Self {
1194        self.update_with_join::<join::SerialJoin>(input)
1195    }
1196
1197    fn update_with_join<J: join::Join>(&mut self, mut input: &[u8]) -> &mut Self {
1198        // If we have some partial chunk bytes in the internal chunk_state, we
1199        // need to finish that chunk first.
1200        if self.chunk_state.len() > 0 {
1201            let want = CHUNK_LEN - self.chunk_state.len();
1202            let take = cmp::min(want, input.len());
1203            self.chunk_state.update(&input[..take]);
1204            input = &input[take..];
1205            if !input.is_empty() {
1206                // We've filled the current chunk, and there's more input
1207                // coming, so we know it's not the root and we can finalize it.
1208                // Then we'll proceed to hashing whole chunks below.
1209                debug_assert_eq!(self.chunk_state.len(), CHUNK_LEN);
1210                let chunk_cv = self.chunk_state.output().chaining_value();
1211                self.push_cv(&chunk_cv, self.chunk_state.chunk_counter);
1212                self.chunk_state = ChunkState::new(
1213                    &self.key,
1214                    self.chunk_state.chunk_counter + 1,
1215                    self.chunk_state.flags,
1216                    self.chunk_state.platform,
1217                );
1218            } else {
1219                return self;
1220            }
1221        }
1222
1223        // Now the chunk_state is clear, and we have more input. If there's
1224        // more than a single chunk (so, definitely not the root chunk), hash
1225        // the largest whole subtree we can, with the full benefits of SIMD and
1226        // multithreading parallelism. Two restrictions:
1227        // - The subtree has to be a power-of-2 number of chunks. Only subtrees
1228        //   along the right edge can be incomplete, and we don't know where
1229        //   the right edge is going to be until we get to finalize().
1230        // - The subtree must evenly divide the total number of chunks up until
1231        //   this point (if total is not 0). If the current incomplete subtree
1232        //   is only waiting for 1 more chunk, we can't hash a subtree of 4
1233        //   chunks. We have to complete the current subtree first.
1234        // Because we might need to break up the input to form powers of 2, or
1235        // to evenly divide what we already have, this part runs in a loop.
1236        while input.len() > CHUNK_LEN {
1237            debug_assert_eq!(self.chunk_state.len(), 0, "no partial chunk data");
1238            debug_assert_eq!(CHUNK_LEN.count_ones(), 1, "power of 2 chunk len");
1239            let mut subtree_len = largest_power_of_two_leq(input.len());
1240            let count_so_far = self.chunk_state.chunk_counter * CHUNK_LEN as u64;
1241            // Shrink the subtree_len until it evenly divides the count so far.
1242            // We know that subtree_len itself is a power of 2, so we can use a
1243            // bitmasking trick instead of an actual remainder operation. (Note
1244            // that if the caller consistently passes power-of-2 inputs of the
1245            // same size, as is hopefully typical, this loop condition will
1246            // always fail, and subtree_len will always be the full length of
1247            // the input.)
1248            //
1249            // An aside: We don't have to shrink subtree_len quite this much.
1250            // For example, if count_so_far is 1, we could pass 2 chunks to
1251            // compress_subtree_to_parent_node. Since we'll get 2 CVs back,
1252            // we'll still get the right answer in the end, and we might get to
1253            // use 2-way SIMD parallelism. The problem with this optimization,
1254            // is that it gets us stuck always hashing 2 chunks. The total
1255            // number of chunks will remain odd, and we'll never graduate to
1256            // higher degrees of parallelism. See
1257            // https://github.com/BLAKE3-team/BLAKE3/issues/69.
1258            while (subtree_len - 1) as u64 & count_so_far != 0 {
1259                subtree_len /= 2;
1260            }
1261            // The shrunken subtree_len might now be 1 chunk long. If so, hash
1262            // that one chunk by itself. Otherwise, compress the subtree into a
1263            // pair of CVs.
1264            let subtree_chunks = (subtree_len / CHUNK_LEN) as u64;
1265            if subtree_len <= CHUNK_LEN {
1266                debug_assert_eq!(subtree_len, CHUNK_LEN);
1267                self.push_cv(
1268                    &ChunkState::new(
1269                        &self.key,
1270                        self.chunk_state.chunk_counter,
1271                        self.chunk_state.flags,
1272                        self.chunk_state.platform,
1273                    )
1274                    .update(&input[..subtree_len])
1275                    .output()
1276                    .chaining_value(),
1277                    self.chunk_state.chunk_counter,
1278                );
1279            } else {
1280                // This is the high-performance happy path, though getting here
1281                // depends on the caller giving us a long enough input.
1282                let cv_pair = compress_subtree_to_parent_node::<J>(
1283                    &input[..subtree_len],
1284                    &self.key,
1285                    self.chunk_state.chunk_counter,
1286                    self.chunk_state.flags,
1287                    self.chunk_state.platform,
1288                );
1289                let left_cv = array_ref!(cv_pair, 0, 32);
1290                let right_cv = array_ref!(cv_pair, 32, 32);
1291                // Push the two CVs we received into the CV stack in order. Because
1292                // the stack merges lazily, this guarantees we aren't merging the
1293                // root.
1294                self.push_cv(left_cv, self.chunk_state.chunk_counter);
1295                self.push_cv(
1296                    right_cv,
1297                    self.chunk_state.chunk_counter + (subtree_chunks / 2),
1298                );
1299            }
1300            self.chunk_state.chunk_counter += subtree_chunks;
1301            input = &input[subtree_len..];
1302        }
1303
1304        // What remains is 1 chunk or less. Add it to the chunk state.
1305        debug_assert!(input.len() <= CHUNK_LEN);
1306        if !input.is_empty() {
1307            self.chunk_state.update(input);
1308            // Having added some input to the chunk_state, we know what's in
1309            // the CV stack won't become the root node, and we can do an extra
1310            // merge. This simplifies finalize().
1311            self.merge_cv_stack(self.chunk_state.chunk_counter);
1312        }
1313
1314        self
1315    }
1316
1317    fn final_output(&self) -> Output {
1318        // If the current chunk is the only chunk, that makes it the root node
1319        // also. Convert it directly into an Output. Otherwise, we need to
1320        // merge subtrees below.
1321        if self.cv_stack.is_empty() {
1322            debug_assert_eq!(self.chunk_state.chunk_counter, 0);
1323            return self.chunk_state.output();
1324        }
1325
1326        // If there are any bytes in the ChunkState, finalize that chunk and
1327        // merge its CV with everything in the CV stack. In that case, the work
1328        // we did at the end of update() above guarantees that the stack
1329        // doesn't contain any unmerged subtrees that need to be merged first.
1330        // (This is important, because if there were two chunk hashes sitting
1331        // on top of the stack, they would need to merge with each other, and
1332        // merging a new chunk hash into them would be incorrect.)
1333        //
1334        // If there are no bytes in the ChunkState, we'll merge what's already
1335        // in the stack. In this case it's fine if there are unmerged chunks on
1336        // top, because we'll merge them with each other. Note that the case of
1337        // the empty chunk is taken care of above.
1338        let mut output: Output;
1339        let mut num_cvs_remaining = self.cv_stack.len();
1340        if self.chunk_state.len() > 0 {
1341            debug_assert_eq!(
1342                self.cv_stack.len(),
1343                self.chunk_state.chunk_counter.count_ones() as usize,
1344                "cv stack does not need a merge"
1345            );
1346            output = self.chunk_state.output();
1347        } else {
1348            debug_assert!(self.cv_stack.len() >= 2);
1349            output = parent_node_output(
1350                &self.cv_stack[num_cvs_remaining - 2],
1351                &self.cv_stack[num_cvs_remaining - 1],
1352                &self.key,
1353                self.chunk_state.flags,
1354                self.chunk_state.platform,
1355            );
1356            num_cvs_remaining -= 2;
1357        }
1358        while num_cvs_remaining > 0 {
1359            output = parent_node_output(
1360                &self.cv_stack[num_cvs_remaining - 1],
1361                &output.chaining_value(),
1362                &self.key,
1363                self.chunk_state.flags,
1364                self.chunk_state.platform,
1365            );
1366            num_cvs_remaining -= 1;
1367        }
1368        output
1369    }
1370
1371    /// Finalize the hash state and return the [`Hash`](struct.Hash.html) of
1372    /// the input.
1373    ///
1374    /// This method is idempotent. Calling it twice will give the same result.
1375    /// You can also add more input and finalize again.
1376    pub fn finalize(&self) -> Hash {
1377        self.final_output().root_hash()
1378    }
1379
1380    /// Finalize the hash state and return an [`OutputReader`], which can
1381    /// supply any number of output bytes.
1382    ///
1383    /// This method is idempotent. Calling it twice will give the same result.
1384    /// You can also add more input and finalize again.
1385    ///
1386    /// [`OutputReader`]: struct.OutputReader.html
1387    pub fn finalize_xof(&self) -> OutputReader {
1388        OutputReader::new(self.final_output())
1389    }
1390
1391    /// Return the total number of bytes hashed so far.
1392    pub fn count(&self) -> u64 {
1393        self.chunk_state.chunk_counter * CHUNK_LEN as u64 + self.chunk_state.len() as u64
1394    }
1395
1396    /// As [`update`](Hasher::update), but reading from a
1397    /// [`std::io::Read`](https://doc.rust-lang.org/std/io/trait.Read.html) implementation.
1398    ///
1399    /// [`Hasher`] implements
1400    /// [`std::io::Write`](https://doc.rust-lang.org/std/io/trait.Write.html), so it's possible to
1401    /// use [`std::io::copy`](https://doc.rust-lang.org/std/io/fn.copy.html) to update a [`Hasher`]
1402    /// from any reader. Unfortunately, this standard approach can limit performance, because
1403    /// `copy` currently uses an internal 8 KiB buffer that isn't big enough to take advantage of
1404    /// all SIMD instruction sets. (In particular, [AVX-512](https://en.wikipedia.org/wiki/AVX-512)
1405    /// needs a 16 KiB buffer.) `update_reader` avoids this performance problem and is slightly
1406    /// more convenient.
1407    ///
1408    /// The internal buffer size this method uses may change at any time, and it may be different
1409    /// for different targets. The only guarantee is that it will be large enough for all of this
1410    /// crate's SIMD implementations on the current platform.
1411    ///
1412    /// The most common implementer of
1413    /// [`std::io::Read`](https://doc.rust-lang.org/std/io/trait.Read.html) might be
1414    /// [`std::fs::File`](https://doc.rust-lang.org/std/fs/struct.File.html), but note that memory
1415    /// mapping can be faster than this method for hashing large files. See
1416    /// [`update_mmap`](Hasher::update_mmap) and [`update_mmap_rayon`](Hasher::update_mmap_rayon),
1417    /// which require the `mmap` and (for the latter) `rayon` Cargo features.
1418    ///
1419    /// This method requires the `std` Cargo feature, which is enabled by default.
1420    ///
1421    /// # Example
1422    ///
1423    /// ```no_run
1424    /// # use std::fs::File;
1425    /// # use std::io;
1426    /// # fn main() -> io::Result<()> {
1427    /// // Hash standard input.
1428    /// let mut hasher = blake3::Hasher::new();
1429    /// hasher.update_reader(std::io::stdin().lock())?;
1430    /// println!("{}", hasher.finalize());
1431    /// # Ok(())
1432    /// # }
1433    /// ```
1434    #[cfg(feature = "std")]
1435    pub fn update_reader(&mut self, reader: impl std::io::Read) -> std::io::Result<&mut Self> {
1436        io::copy_wide(reader, self)?;
1437        Ok(self)
1438    }
1439
1440    /// As [`update`](Hasher::update), but using Rayon-based multithreading
1441    /// internally.
1442    ///
1443    /// This method is gated by the `rayon` Cargo feature, which is disabled by
1444    /// default but enabled on [docs.rs](https://docs.rs).
1445    ///
1446    /// To get any performance benefit from multithreading, the input buffer
1447    /// needs to be large. As a rule of thumb on x86_64, `update_rayon` is
1448    /// _slower_ than `update` for inputs under 128 KiB. That threshold varies
1449    /// quite a lot across different processors, and it's important to benchmark
1450    /// your specific use case. See also the performance warning associated with
1451    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon).
1452    ///
1453    /// If you already have a large buffer in memory, and you want to hash it
1454    /// with multiple threads, this method is a good option. However, reading a
1455    /// file into memory just to call this method can be a performance mistake,
1456    /// both because it requires lots of memory and because single-threaded
1457    /// reads can be slow. For hashing whole files, see
1458    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon), which is gated by both
1459    /// the `rayon` and `mmap` Cargo features.
1460    #[cfg(feature = "rayon")]
1461    pub fn update_rayon(&mut self, input: &[u8]) -> &mut Self {
1462        self.update_with_join::<join::RayonJoin>(input)
1463    }
1464
1465    /// As [`update`](Hasher::update), but reading the contents of a file using memory mapping.
1466    ///
1467    /// Not all files can be memory mapped, and memory mapping small files can be slower than
1468    /// reading them the usual way. In those cases, this method will fall back to standard file IO.
1469    /// The heuristic for whether to use memory mapping is currently very simple (file size >=
1470    /// 16 KiB), and it might change at any time.
1471    ///
1472    /// Like [`update`](Hasher::update), this method is single-threaded. In this author's
1473    /// experience, memory mapping improves single-threaded performance by ~10% for large files
1474    /// that are already in cache. This probably varies between platforms, and as always it's a
1475    /// good idea to benchmark your own use case. In comparison, the multithreaded
1476    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon) method can have a much larger impact on
1477    /// performance.
1478    ///
1479    /// There's a correctness reason that this method takes
1480    /// [`Path`](https://doc.rust-lang.org/stable/std/path/struct.Path.html) instead of
1481    /// [`File`](https://doc.rust-lang.org/std/fs/struct.File.html): reading from a memory-mapped
1482    /// file ignores the seek position of the original file handle (it neither respects the current
1483    /// position nor updates the position). This difference in behavior would've caused
1484    /// `update_mmap` and [`update_reader`](Hasher::update_reader) to give different answers and
1485    /// have different side effects in some cases. Taking a
1486    /// [`Path`](https://doc.rust-lang.org/stable/std/path/struct.Path.html) avoids this problem by
1487    /// making it clear that a new [`File`](https://doc.rust-lang.org/std/fs/struct.File.html) is
1488    /// opened internally.
1489    ///
1490    /// This method requires the `mmap` Cargo feature, which is disabled by default but enabled on
1491    /// [docs.rs](https://docs.rs).
1492    ///
1493    /// # Example
1494    ///
1495    /// ```no_run
1496    /// # use std::io;
1497    /// # use std::path::Path;
1498    /// # fn main() -> io::Result<()> {
1499    /// let path = Path::new("file.dat");
1500    /// let mut hasher = blake3::Hasher::new();
1501    /// hasher.update_mmap(path)?;
1502    /// println!("{}", hasher.finalize());
1503    /// # Ok(())
1504    /// # }
1505    /// ```
1506    #[cfg(feature = "mmap")]
1507    pub fn update_mmap(&mut self, path: impl AsRef<std::path::Path>) -> std::io::Result<&mut Self> {
1508        let file = std::fs::File::open(path.as_ref())?;
1509        if let Some(mmap) = io::maybe_mmap_file(&file)? {
1510            self.update(&mmap);
1511        } else {
1512            io::copy_wide(&file, self)?;
1513        }
1514        Ok(self)
1515    }
1516
1517    /// As [`update_rayon`](Hasher::update_rayon), but reading the contents of a file using
1518    /// memory mapping. This is the default behavior of `b3sum`.
1519    ///
1520    /// For large files that are likely to be in cache, this can be much faster than
1521    /// single-threaded hashing. When benchmarks report that BLAKE3 is 10x or 20x faster than other
1522    /// cryptographic hashes, this is usually what they're measuring. However...
1523    ///
1524    /// **Performance Warning:** There are cases where multithreading hurts performance. The worst
1525    /// case is [a large file on a spinning disk](https://github.com/BLAKE3-team/BLAKE3/issues/31),
1526    /// where simultaneous reads from multiple threads can cause "thrashing" (i.e. the disk spends
1527    /// more time seeking around than reading data). Windows tends to be somewhat worse about this,
1528    /// in part because it's less likely than Linux to keep very large files in cache. More
1529    /// generally, if your CPU cores are already busy, then multithreading will add overhead
1530    /// without improving performance. If your code runs in different environments that you don't
1531    /// control and can't measure, then unfortunately there's no one-size-fits-all answer for
1532    /// whether multithreading is a good idea.
1533    ///
1534    /// The memory mapping behavior of this function is the same as
1535    /// [`update_mmap`](Hasher::update_mmap), and the heuristic for when to fall back to standard
1536    /// file IO might change at any time.
1537    ///
1538    /// This method requires both the `mmap` and `rayon` Cargo features, which are disabled by
1539    /// default but enabled on [docs.rs](https://docs.rs).
1540    ///
1541    /// # Example
1542    ///
1543    /// ```no_run
1544    /// # use std::io;
1545    /// # use std::path::Path;
1546    /// # fn main() -> io::Result<()> {
1547    /// # #[cfg(feature = "rayon")]
1548    /// # {
1549    /// let path = Path::new("big_file.dat");
1550    /// let mut hasher = blake3::Hasher::new();
1551    /// hasher.update_mmap_rayon(path)?;
1552    /// println!("{}", hasher.finalize());
1553    /// # }
1554    /// # Ok(())
1555    /// # }
1556    /// ```
1557    #[cfg(feature = "mmap")]
1558    #[cfg(feature = "rayon")]
1559    pub fn update_mmap_rayon(
1560        &mut self,
1561        path: impl AsRef<std::path::Path>,
1562    ) -> std::io::Result<&mut Self> {
1563        let file = std::fs::File::open(path.as_ref())?;
1564        if let Some(mmap) = io::maybe_mmap_file(&file)? {
1565            self.update_rayon(&mmap);
1566        } else {
1567            io::copy_wide(&file, self)?;
1568        }
1569        Ok(self)
1570    }
1571}
1572
1573// Don't derive(Debug), because the state may be secret.
1574impl fmt::Debug for Hasher {
1575    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1576        f.debug_struct("Hasher")
1577            .field("flags", &self.chunk_state.flags)
1578            .field("platform", &self.chunk_state.platform)
1579            .finish()
1580    }
1581}
1582
1583impl Default for Hasher {
1584    #[inline]
1585    fn default() -> Self {
1586        Self::new()
1587    }
1588}
1589
1590#[cfg(feature = "std")]
1591impl std::io::Write for Hasher {
1592    /// This is equivalent to [`update`](#method.update).
1593    #[inline]
1594    fn write(&mut self, input: &[u8]) -> std::io::Result<usize> {
1595        self.update(input);
1596        Ok(input.len())
1597    }
1598
1599    #[inline]
1600    fn flush(&mut self) -> std::io::Result<()> {
1601        Ok(())
1602    }
1603}
1604
1605#[cfg(feature = "zeroize")]
1606impl Zeroize for Hasher {
1607    fn zeroize(&mut self) {
1608        // Destructuring to trigger compile error as a reminder to update this impl.
1609        let Self {
1610            key,
1611            chunk_state,
1612            cv_stack,
1613        } = self;
1614
1615        key.zeroize();
1616        chunk_state.zeroize();
1617        cv_stack.zeroize();
1618    }
1619}
1620
1621/// An incremental reader for extended output, returned by
1622/// [`Hasher::finalize_xof`](struct.Hasher.html#method.finalize_xof).
1623///
1624/// Shorter BLAKE3 outputs are prefixes of longer ones, and explicitly requesting a short output is
1625/// equivalent to truncating the default-length output. Note that this is a difference between
1626/// BLAKE2 and BLAKE3.
1627///
1628/// # Security notes
1629///
1630/// Outputs shorter than the default length of 32 bytes (256 bits) provide less security. An N-bit
1631/// BLAKE3 output is intended to provide N bits of first and second preimage resistance and N/2
1632/// bits of collision resistance, for any N up to 256. Longer outputs don't provide any additional
1633/// security.
1634///
1635/// Avoid relying on the secrecy of the output offset, that is, the number of output bytes read or
1636/// the arguments to [`seek`](struct.OutputReader.html#method.seek) or
1637/// [`set_position`](struct.OutputReader.html#method.set_position). [_Block-Cipher-Based Tree
1638/// Hashing_ by Aldo Gunsing](https://eprint.iacr.org/2022/283) shows that an attacker who knows
1639/// both the message and the key (if any) can easily determine the offset of an extended output.
1640/// For comparison, AES-CTR has a similar property: if you know the key, you can decrypt a block
1641/// from an unknown position in the output stream to recover its block index. Callers with strong
1642/// secret keys aren't affected in practice, but secret offsets are a [design
1643/// smell](https://en.wikipedia.org/wiki/Design_smell) in any case.
1644#[derive(Clone)]
1645pub struct OutputReader {
1646    inner: Output,
1647    position_within_block: u8,
1648}
1649
1650impl OutputReader {
1651    fn new(inner: Output) -> Self {
1652        Self {
1653            inner,
1654            position_within_block: 0,
1655        }
1656    }
1657
1658    // This helper function handles both the case where the output buffer is
1659    // shorter than one block, and the case where our position_within_block is
1660    // non-zero.
1661    fn fill_one_block(&mut self, buf: &mut &mut [u8]) {
1662        let output_block: [u8; BLOCK_LEN] = self.inner.root_output_block();
1663        let output_bytes = &output_block[self.position_within_block as usize..];
1664        let take = cmp::min(buf.len(), output_bytes.len());
1665        buf[..take].copy_from_slice(&output_bytes[..take]);
1666        self.position_within_block += take as u8;
1667        if self.position_within_block == BLOCK_LEN as u8 {
1668            self.inner.counter += 1;
1669            self.position_within_block = 0;
1670        }
1671        // Advance the dest buffer. mem::take() is a borrowck workaround.
1672        *buf = &mut core::mem::take(buf)[take..];
1673    }
1674
1675    /// Fill a buffer with output bytes and advance the position of the
1676    /// `OutputReader`. This is equivalent to [`Read::read`], except that it
1677    /// doesn't return a `Result`. Both methods always fill the entire buffer.
1678    ///
1679    /// Note that `OutputReader` doesn't buffer output bytes internally, so
1680    /// calling `fill` repeatedly with a short-length or odd-length slice will
1681    /// end up performing the same compression multiple times. If you're
1682    /// reading output in a loop, prefer a slice length that's a multiple of
1683    /// 64.
1684    ///
1685    /// The maximum output size of BLAKE3 is 2<sup>64</sup>-1 bytes. If you try
1686    /// to extract more than that, for example by seeking near the end and
1687    /// reading further, the behavior is unspecified.
1688    ///
1689    /// [`Read::read`]: #method.read
1690    pub fn fill(&mut self, mut buf: &mut [u8]) {
1691        if buf.is_empty() {
1692            return;
1693        }
1694
1695        // If we're partway through a block, try to get to a block boundary.
1696        if self.position_within_block != 0 {
1697            self.fill_one_block(&mut buf);
1698        }
1699
1700        let full_blocks = buf.len() / BLOCK_LEN;
1701        let full_blocks_len = full_blocks * BLOCK_LEN;
1702        if full_blocks > 0 {
1703            debug_assert_eq!(0, self.position_within_block);
1704            self.inner.platform.xof_many(
1705                &self.inner.input_chaining_value,
1706                &self.inner.block,
1707                self.inner.block_len,
1708                self.inner.counter,
1709                self.inner.flags | ROOT,
1710                &mut buf[..full_blocks_len],
1711            );
1712            self.inner.counter += full_blocks as u64;
1713            buf = &mut buf[full_blocks * BLOCK_LEN..];
1714        }
1715
1716        if !buf.is_empty() {
1717            debug_assert!(buf.len() < BLOCK_LEN);
1718            self.fill_one_block(&mut buf);
1719            debug_assert!(buf.is_empty());
1720        }
1721    }
1722
1723    /// Return the current read position in the output stream. This is
1724    /// equivalent to [`Seek::stream_position`], except that it doesn't return
1725    /// a `Result`. The position of a new `OutputReader` starts at 0, and each
1726    /// call to [`fill`] or [`Read::read`] moves the position forward by the
1727    /// number of bytes read.
1728    ///
1729    /// [`Seek::stream_position`]: #method.stream_position
1730    /// [`fill`]: #method.fill
1731    /// [`Read::read`]: #method.read
1732    pub fn position(&self) -> u64 {
1733        self.inner.counter * BLOCK_LEN as u64 + self.position_within_block as u64
1734    }
1735
1736    /// Seek to a new read position in the output stream. This is equivalent to
1737    /// calling [`Seek::seek`] with [`SeekFrom::Start`], except that it doesn't
1738    /// return a `Result`.
1739    ///
1740    /// [`Seek::seek`]: #method.seek
1741    /// [`SeekFrom::Start`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
1742    pub fn set_position(&mut self, position: u64) {
1743        self.position_within_block = (position % BLOCK_LEN as u64) as u8;
1744        self.inner.counter = position / BLOCK_LEN as u64;
1745    }
1746}
1747
1748// Don't derive(Debug), because the state may be secret.
1749impl fmt::Debug for OutputReader {
1750    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1751        f.debug_struct("OutputReader")
1752            .field("position", &self.position())
1753            .finish()
1754    }
1755}
1756
1757#[cfg(feature = "std")]
1758impl std::io::Read for OutputReader {
1759    #[inline]
1760    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
1761        self.fill(buf);
1762        Ok(buf.len())
1763    }
1764}
1765
1766#[cfg(feature = "std")]
1767impl std::io::Seek for OutputReader {
1768    fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
1769        let max_position = u64::max_value() as i128;
1770        let target_position: i128 = match pos {
1771            std::io::SeekFrom::Start(x) => x as i128,
1772            std::io::SeekFrom::Current(x) => self.position() as i128 + x as i128,
1773            std::io::SeekFrom::End(_) => {
1774                return Err(std::io::Error::new(
1775                    std::io::ErrorKind::InvalidInput,
1776                    "seek from end not supported",
1777                ));
1778            }
1779        };
1780        if target_position < 0 {
1781            return Err(std::io::Error::new(
1782                std::io::ErrorKind::InvalidInput,
1783                "seek before start",
1784            ));
1785        }
1786        self.set_position(cmp::min(target_position, max_position) as u64);
1787        Ok(self.position())
1788    }
1789}
1790
1791#[cfg(feature = "zeroize")]
1792impl Zeroize for OutputReader {
1793    fn zeroize(&mut self) {
1794        // Destructuring to trigger compile error as a reminder to update this impl.
1795        let Self {
1796            inner,
1797            position_within_block,
1798        } = self;
1799
1800        inner.zeroize();
1801        position_within_block.zeroize();
1802    }
1803}