block_buffer/
lib.rs

1//! Fixed size buffer for block processing of data.
2//!
3//! # Examples
4//! ```
5//! use block_buffer::{EagerBuffer, array::typenum::U4};
6//!
7//! let mut buf = EagerBuffer::<U4>::default();
8//!
9//! let mut accum = Vec::new();
10//! let msg1: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
11//! let msg2: &[u8] = &[10, 11, 12];
12//!
13//! buf.digest_blocks(msg1, |blocks| accum.extend_from_slice(blocks));
14//! buf.digest_blocks(msg2, |blocks| accum.extend_from_slice(blocks));
15//!
16//! assert_eq!(accum.len(), 3);
17//! assert_eq!(accum[0], [0, 1, 2, 3]);
18//! assert_eq!(accum[1], [4, 5, 6, 7]);
19//! assert_eq!(accum[2], [8, 9, 10, 11]);
20//!
21//! let padded_block = buf.pad_with_zeros();
22//! assert_eq!(padded_block, [12, 0, 0, 0]);
23//! ```
24//!
25//! Note that block size used with buffers MUST be bigger than zero and smaller than 256.
26//! You will get a compilation error with an invalid block size:
27//!
28//! ```compile_fail
29//! use block_buffer::{EagerBuffer, array::typenum::U0};
30//! let buf = EagerBuffer::<U0>::default();
31//! ```
32//! ```compile_fail
33//! use block_buffer::{EagerBuffer, array::typenum::U256};
34//! let buf = EagerBuffer::<U256>::default();
35//! ```
36#![no_std]
37#![doc(
38    html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
39    html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg"
40)]
41#![warn(missing_docs, rust_2018_idioms)]
42
43pub use hybrid_array as array;
44
45use array::{
46    Array, ArraySize,
47    typenum::{Add1, B1},
48};
49use core::{fmt, mem::MaybeUninit, ops::Add, ptr, slice};
50
51#[cfg(feature = "zeroize")]
52use zeroize::Zeroize;
53
54mod read;
55mod sealed;
56
57pub use read::ReadBuffer;
58
59/// Trait for buffer kinds.
60pub trait BufferKind: sealed::Sealed {}
61
62/// Eager block buffer kind, which guarantees that buffer position
63/// always lies in the range of `0..BlockSize`.
64#[derive(Copy, Clone, Debug, Default)]
65pub struct Eager {}
66
67/// Lazy block buffer kind, which guarantees that buffer position
68/// always lies in the range of `0..=BlockSize`.
69#[derive(Copy, Clone, Debug, Default)]
70pub struct Lazy {}
71
72impl BufferKind for Eager {}
73
74impl BufferKind for Lazy {}
75
76/// Eager block buffer.
77pub type EagerBuffer<B> = BlockBuffer<B, Eager>;
78/// Lazy block buffer.
79pub type LazyBuffer<B> = BlockBuffer<B, Lazy>;
80
81/// Block buffer error.
82#[derive(Copy, Clone, Eq, PartialEq, Debug)]
83pub struct Error;
84
85impl fmt::Display for Error {
86    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
87        f.write_str("Block buffer error")
88    }
89}
90
91/// Buffer for block processing of data.
92pub struct BlockBuffer<BS: ArraySize, K: BufferKind> {
93    buffer: MaybeUninit<Array<u8, BS>>,
94    pos: K::Pos,
95}
96
97impl<BS: ArraySize, K: BufferKind> BlockBuffer<BS, K> {
98    /// This associated constant is used to assert block size correctness at compile time.
99    const BLOCK_SIZE_ASSERT: bool = {
100        if BS::USIZE == 0 {
101            panic!("Block size can not be equal to zero!");
102        }
103        if BS::USIZE > 255 {
104            panic!("Block size can not be bigger than 255!");
105        }
106        true
107    };
108}
109
110impl<BS: ArraySize, K: BufferKind> Default for BlockBuffer<BS, K> {
111    #[inline]
112    fn default() -> Self {
113        assert!(Self::BLOCK_SIZE_ASSERT);
114        let mut buffer = MaybeUninit::uninit();
115        let mut pos = Default::default();
116        K::set_pos(&mut buffer, &mut pos, 0);
117        Self { buffer, pos }
118    }
119}
120
121impl<BS: ArraySize, K: BufferKind> Clone for BlockBuffer<BS, K> {
122    #[inline]
123    fn clone(&self) -> Self {
124        // SAFETY: `BlockBuffer` does not implement `Drop` (i.e. it could be a `Copy` type),
125        // so we can safely clone it using `ptr::read`.
126        unsafe { ptr::read(self) }
127    }
128}
129
130impl<BS: ArraySize, K: BufferKind> fmt::Debug for BlockBuffer<BS, K> {
131    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
132        f.debug_struct(K::NAME)
133            .field("pos", &self.get_pos())
134            .field("block_size", &BS::USIZE)
135            .field("data", &self.get_data())
136            .finish()
137    }
138}
139
140impl<BS: ArraySize, K: BufferKind> BlockBuffer<BS, K> {
141    /// Create new buffer from slice.
142    ///
143    /// # Panics
144    /// If slice length is not valid for used buffer kind.
145    #[inline(always)]
146    pub fn new(buf: &[u8]) -> Self {
147        Self::try_new(buf).unwrap()
148    }
149
150    /// Create new buffer from slice.
151    ///
152    /// Returns an error if slice length is not valid for used buffer kind.
153    #[inline(always)]
154    pub fn try_new(buf: &[u8]) -> Result<Self, Error> {
155        assert!(Self::BLOCK_SIZE_ASSERT);
156        if !K::invariant(buf.len(), BS::USIZE) {
157            return Err(Error);
158        }
159        let mut res = Self::default();
160        // SAFETY: we have checked that buffer length satisfies the buffer kind invariant
161        unsafe {
162            res.set_data_unchecked(buf);
163        }
164        Ok(res)
165    }
166
167    /// Digest data in `input` in blocks of size `BlockSize` using
168    /// the `compress` function, which accepts slice of blocks.
169    #[inline]
170    pub fn digest_blocks(&mut self, mut input: &[u8], mut compress: impl FnMut(&[Array<u8, BS>])) {
171        let pos = self.get_pos();
172        // using `self.remaining()` for some reason
173        // prevents panic elimination
174        let rem = self.size() - pos;
175        let n = input.len();
176        // Note that checking condition `pos + n < BlockSize` is
177        // equivalent to checking `n < rem`, where `rem` is equal
178        // to `BlockSize - pos`. Using the latter allows us to work
179        // around compiler accounting for possible overflow of
180        // `pos + n` which results in it inserting unreachable
181        // panic branches. Using `unreachable_unchecked` in `get_pos`
182        // we convince compiler that `BlockSize - pos` never underflows.
183        if K::invariant(n, rem) {
184            // SAFETY: we have checked that length of `input` is smaller than
185            // number of remaining bytes in `buffer`, so we can safely write data
186            // into them and update cursor position.
187            unsafe {
188                let buf_ptr = self.buffer.as_mut_ptr().cast::<u8>().add(pos);
189                ptr::copy_nonoverlapping(input.as_ptr(), buf_ptr, input.len());
190                self.set_pos_unchecked(pos + input.len());
191            }
192            return;
193        }
194        if pos != 0 {
195            let (left, right) = input.split_at(rem);
196            input = right;
197            // SAFETY: length of `left` is equal to number of remaining bytes in `buffer`,
198            // so we can copy data into it and process `buffer` as fully initialized block.
199            let block = unsafe {
200                let buf_ptr = self.buffer.as_mut_ptr().cast::<u8>().add(pos);
201                ptr::copy_nonoverlapping(left.as_ptr(), buf_ptr, left.len());
202                self.buffer.assume_init_ref()
203            };
204            compress(slice::from_ref(block));
205        }
206
207        let (blocks, leftover) = K::split_blocks(input);
208        if !blocks.is_empty() {
209            compress(blocks);
210        }
211
212        // SAFETY: `leftover` is always smaller than block size,
213        // so it satisfies the method's safety requirements for all buffer kinds
214        unsafe {
215            self.set_data_unchecked(leftover);
216        }
217    }
218
219    /// Reset buffer by setting cursor position to zero.
220    #[inline(always)]
221    pub fn reset(&mut self) {
222        // SAFETY: 0 is always valid position
223        unsafe {
224            self.set_pos_unchecked(0);
225        }
226    }
227
228    /// Pad remaining data with zeros and return resulting block.
229    #[inline(always)]
230    pub fn pad_with_zeros(&mut self) -> Array<u8, BS> {
231        let mut res = Array::<u8, BS>::default();
232        let data = self.get_data();
233        res[..data.len()].copy_from_slice(data);
234        self.reset();
235        res
236    }
237
238    /// Return current cursor position.
239    #[inline(always)]
240    pub fn get_pos(&self) -> usize {
241        let pos = K::get_pos(&self.buffer, &self.pos);
242        if !K::invariant(pos, BS::USIZE) {
243            debug_assert!(false);
244            // SAFETY: `pos` never breaks the invariant
245            unsafe {
246                core::hint::unreachable_unchecked();
247            }
248        }
249        pos
250    }
251
252    /// Return slice of data stored inside the buffer.
253    #[inline(always)]
254    pub fn get_data(&self) -> &[u8] {
255        // SAFETY: the `buffer` field is properly initialized up to `self.get_pos()`.
256        // `get_pos` never returns position bigger than buffer size.
257        unsafe { slice::from_raw_parts(self.buffer.as_ptr().cast(), self.get_pos()) }
258    }
259
260    /// Set buffer content and cursor position.
261    ///
262    /// # Panics
263    /// If `pos` is bigger or equal to block size.
264    #[inline]
265    pub fn set(&mut self, buf: Array<u8, BS>, pos: usize) {
266        assert!(K::invariant(pos, BS::USIZE));
267        self.buffer = MaybeUninit::new(buf);
268        // SAFETY: we have asserted that `pos` satisfies the invariant and
269        // the `buffer` field is fully initialized
270        unsafe {
271            self.set_pos_unchecked(pos);
272        }
273    }
274
275    /// Return size of the internal buffer in bytes.
276    #[inline(always)]
277    pub fn size(&self) -> usize {
278        BS::USIZE
279    }
280
281    /// Return number of remaining bytes in the internal buffer.
282    #[inline(always)]
283    pub fn remaining(&self) -> usize {
284        self.size() - self.get_pos()
285    }
286
287    /// Set buffer position.
288    ///
289    /// # Safety
290    /// Bytes in the range of `0..pos` in the `buffer` field must be properly initialized.
291    ///
292    /// `pos` must satisfy invariant of buffer kind, i.e. for eager hashes it must be
293    /// strictly smaller than block size and for lazy hashes it must be smaller or equal
294    /// to block size.
295    #[inline(always)]
296    unsafe fn set_pos_unchecked(&mut self, pos: usize) {
297        debug_assert!(K::invariant(pos, BS::USIZE));
298        K::set_pos(&mut self.buffer, &mut self.pos, pos)
299    }
300
301    /// Set buffer data.
302    ///
303    /// # Safety
304    /// Length of `buf` must satisfy invariant of buffer kind, i.e. for eager hashes it must be
305    /// strictly smaller than block size and for lazy hashes it must be smaller or equal
306    /// to block size.
307    #[inline(always)]
308    unsafe fn set_data_unchecked(&mut self, buf: &[u8]) {
309        unsafe {
310            self.set_pos_unchecked(buf.len());
311            let dst_ptr: *mut u8 = self.buffer.as_mut_ptr().cast();
312            ptr::copy_nonoverlapping(buf.as_ptr(), dst_ptr, buf.len());
313        }
314    }
315}
316
317impl<BS: ArraySize> BlockBuffer<BS, Eager> {
318    /// Compress remaining data after padding it with `delim`, zeros and
319    /// the `suffix` bytes. If there is not enough unused space, `compress`
320    /// will be called twice.
321    ///
322    /// # Panics
323    /// If suffix length is bigger than block size.
324    #[inline(always)]
325    pub fn digest_pad(
326        &mut self,
327        delim: u8,
328        suffix: &[u8],
329        mut compress: impl FnMut(&Array<u8, BS>),
330    ) {
331        if suffix.len() > BS::USIZE {
332            panic!("suffix is too long");
333        }
334        let pos = self.get_pos();
335        let mut buf = self.pad_with_zeros();
336        buf[pos] = delim;
337
338        let n = self.size() - suffix.len();
339        if self.size() - pos - 1 < suffix.len() {
340            compress(&buf);
341            buf.fill(0);
342            buf[n..].copy_from_slice(suffix);
343            compress(&buf);
344        } else {
345            buf[n..].copy_from_slice(suffix);
346            compress(&buf);
347        }
348        self.reset();
349    }
350
351    /// Pad message with 0x80, zeros and 64-bit message length using
352    /// big-endian byte order.
353    #[inline]
354    pub fn len64_padding_be(&mut self, data_len: u64, compress: impl FnMut(&Array<u8, BS>)) {
355        self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
356    }
357
358    /// Pad message with 0x80, zeros and 64-bit message length using
359    /// little-endian byte order.
360    #[inline]
361    pub fn len64_padding_le(&mut self, data_len: u64, compress: impl FnMut(&Array<u8, BS>)) {
362        self.digest_pad(0x80, &data_len.to_le_bytes(), compress);
363    }
364
365    /// Pad message with 0x80, zeros and 128-bit message length using
366    /// big-endian byte order.
367    #[inline]
368    pub fn len128_padding_be(&mut self, data_len: u128, compress: impl FnMut(&Array<u8, BS>)) {
369        self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
370    }
371
372    /// Serialize buffer into a byte array.
373    #[inline]
374    pub fn serialize(&self) -> Array<u8, BS> {
375        let mut res = Array::<u8, BS>::default();
376        let data = self.get_data();
377        res[..data.len()].copy_from_slice(data);
378        res[BS::USIZE - 1] = data.len() as u8;
379        res
380    }
381
382    /// Deserialize buffer from a byte array.
383    #[inline]
384    pub fn deserialize(buffer: &Array<u8, BS>) -> Result<Self, Error> {
385        let pos = buffer[BS::USIZE - 1] as usize;
386        if !<Eager as sealed::Sealed>::invariant(pos, BS::USIZE) {
387            return Err(Error);
388        }
389        if buffer[pos..BS::USIZE - 1].iter().any(|&b| b != 0) {
390            return Err(Error);
391        }
392        Ok(Self {
393            buffer: MaybeUninit::new(buffer.clone()),
394            pos: Default::default(),
395        })
396    }
397}
398
399impl<BS: ArraySize> BlockBuffer<BS, Lazy> {
400    /// Serialize buffer into a byte array.
401    #[inline]
402    pub fn serialize(&self) -> Array<u8, Add1<BS>>
403    where
404        BS: Add<B1>,
405        Add1<BS>: ArraySize,
406    {
407        let mut res = Array::<u8, Add1<BS>>::default();
408        res[0] = self.pos;
409        let data = self.get_data();
410        res[1..][..data.len()].copy_from_slice(data);
411        res
412    }
413
414    /// Deserialize buffer from a byte array.
415    #[inline]
416    pub fn deserialize(buffer: &Array<u8, Add1<BS>>) -> Result<Self, Error>
417    where
418        BS: Add<B1>,
419        Add1<BS>: ArraySize,
420    {
421        let pos = buffer[0];
422        if !<Lazy as sealed::Sealed>::invariant(pos as usize, BS::USIZE) {
423            return Err(Error);
424        }
425        if buffer[1..][pos as usize..].iter().any(|&b| b != 0) {
426            return Err(Error);
427        }
428        let buf = Array::try_from(&buffer[1..]).expect("slice has correct length");
429        Ok(Self {
430            buffer: MaybeUninit::new(buf),
431            pos,
432        })
433    }
434}
435
436#[cfg(feature = "zeroize")]
437impl<BS: ArraySize, K: BufferKind> Zeroize for BlockBuffer<BS, K> {
438    #[inline]
439    fn zeroize(&mut self) {
440        self.buffer.zeroize();
441        self.pos.zeroize();
442    }
443}