gix_pack/data/input/
bytes_to_entries.rs

1use std::{fs, io};
2
3use gix_features::{hash::Hasher, zlib::Decompress};
4use gix_hash::ObjectId;
5
6use crate::data::input;
7
8/// An iterator over [`Entries`][input::Entry] in a byte stream.
9///
10/// The iterator used as part of [`Bundle::write_to_directory(…)`][crate::Bundle::write_to_directory()].
11pub struct BytesToEntriesIter<BR> {
12    read: BR,
13    decompressor: Decompress,
14    offset: u64,
15    had_error: bool,
16    version: crate::data::Version,
17    objects_left: u32,
18    hash: Option<Hasher>,
19    mode: input::Mode,
20    compressed: input::EntryDataMode,
21    compressed_buf: Option<Vec<u8>>,
22    hash_len: usize,
23    object_hash: gix_hash::Kind,
24}
25
26/// Access
27impl<BR> BytesToEntriesIter<BR> {
28    /// The pack version currently being iterated
29    pub fn version(&self) -> crate::data::Version {
30        self.version
31    }
32
33    /// The kind of iteration
34    pub fn mode(&self) -> input::Mode {
35        self.mode
36    }
37}
38
39/// Initialization
40impl<BR> BytesToEntriesIter<BR>
41where
42    BR: io::BufRead,
43{
44    /// Obtain an iterator from a `read` stream to a pack data file and configure it using `mode` and `compressed`.
45    /// `object_hash` specifies which hash is used for objects in ref-delta entries.
46    ///
47    /// Note that `read` is expected at the beginning of a valid pack data file with a header, entries and a trailer.
48    pub fn new_from_header(
49        mut read: BR,
50        mode: input::Mode,
51        compressed: input::EntryDataMode,
52        object_hash: gix_hash::Kind,
53    ) -> Result<BytesToEntriesIter<BR>, input::Error> {
54        let mut header_data = [0u8; 12];
55        read.read_exact(&mut header_data)?;
56
57        let (version, num_objects) = crate::data::header::decode(&header_data)?;
58        assert_eq!(
59            version,
60            crate::data::Version::V2,
61            "let's stop here if we see undocumented pack formats"
62        );
63        Ok(BytesToEntriesIter {
64            read,
65            decompressor: Decompress::new(true),
66            compressed,
67            offset: 12,
68            had_error: false,
69            version,
70            objects_left: num_objects,
71            hash: (mode != input::Mode::AsIs).then(|| {
72                let mut hash = gix_features::hash::hasher(object_hash);
73                hash.update(&header_data);
74                hash
75            }),
76            mode,
77            compressed_buf: None,
78            hash_len: object_hash.len_in_bytes(),
79            object_hash,
80        })
81    }
82
83    fn next_inner(&mut self) -> Result<input::Entry, input::Error> {
84        self.objects_left -= 1; // even an error counts as objects
85
86        // Read header
87        let entry = match self.hash.as_mut() {
88            Some(hash) => {
89                let mut read = read_and_pass_to(
90                    &mut self.read,
91                    HashWrite {
92                        inner: io::sink(),
93                        hash,
94                    },
95                );
96                crate::data::Entry::from_read(&mut read, self.offset, self.hash_len)
97            }
98            None => crate::data::Entry::from_read(&mut self.read, self.offset, self.hash_len),
99        }
100        .map_err(input::Error::from)?;
101
102        // Decompress object to learn its compressed bytes
103        let compressed_buf = self.compressed_buf.take().unwrap_or_else(|| Vec::with_capacity(4096));
104        self.decompressor.reset(true);
105        let mut decompressed_reader = DecompressRead {
106            inner: read_and_pass_to(
107                &mut self.read,
108                if self.compressed.keep() {
109                    Vec::with_capacity(entry.decompressed_size as usize)
110                } else {
111                    compressed_buf
112                },
113            ),
114            decompressor: &mut self.decompressor,
115        };
116
117        let bytes_copied = io::copy(&mut decompressed_reader, &mut io::sink())?;
118        if bytes_copied != entry.decompressed_size {
119            return Err(input::Error::IncompletePack {
120                actual: bytes_copied,
121                expected: entry.decompressed_size,
122            });
123        }
124
125        let pack_offset = self.offset;
126        let compressed_size = decompressed_reader.decompressor.total_in();
127        self.offset += entry.header_size() as u64 + compressed_size;
128
129        let mut compressed = decompressed_reader.inner.write;
130        debug_assert_eq!(
131            compressed_size,
132            compressed.len() as u64,
133            "we must track exactly the same amount of bytes as read by the decompressor"
134        );
135        if let Some(hash) = self.hash.as_mut() {
136            hash.update(&compressed);
137        }
138
139        let crc32 = if self.compressed.crc32() {
140            let mut header_buf = [0u8; 12 + gix_hash::Kind::longest().len_in_bytes()];
141            let header_len = entry.header.write_to(bytes_copied, &mut header_buf.as_mut())?;
142            let state = gix_features::hash::crc32_update(0, &header_buf[..header_len]);
143            Some(gix_features::hash::crc32_update(state, &compressed))
144        } else {
145            None
146        };
147
148        let compressed = if self.compressed.keep() {
149            Some(compressed)
150        } else {
151            compressed.clear();
152            self.compressed_buf = Some(compressed);
153            None
154        };
155
156        // Last objects gets trailer (which is potentially verified)
157        let trailer = self.try_read_trailer()?;
158        Ok(input::Entry {
159            header: entry.header,
160            header_size: entry.header_size() as u16,
161            compressed,
162            compressed_size,
163            crc32,
164            pack_offset,
165            decompressed_size: bytes_copied,
166            trailer,
167        })
168    }
169
170    fn try_read_trailer(&mut self) -> Result<Option<ObjectId>, input::Error> {
171        Ok(if self.objects_left == 0 {
172            let mut id = gix_hash::ObjectId::null(self.object_hash);
173            if let Err(err) = self.read.read_exact(id.as_mut_slice()) {
174                if self.mode != input::Mode::Restore {
175                    return Err(err.into());
176                }
177            }
178
179            if let Some(hash) = self.hash.take() {
180                let actual_id = gix_hash::ObjectId::from(hash.digest());
181                if self.mode == input::Mode::Restore {
182                    id = actual_id;
183                }
184                if id != actual_id {
185                    return Err(input::Error::ChecksumMismatch {
186                        actual: actual_id,
187                        expected: id,
188                    });
189                }
190            }
191            Some(id)
192        } else if self.mode == input::Mode::Restore {
193            let hash = self.hash.clone().expect("in restore mode a hash is set");
194            Some(gix_hash::ObjectId::from(hash.digest()))
195        } else {
196            None
197        })
198    }
199}
200
201fn read_and_pass_to<R: io::Read, W: io::Write>(read: &mut R, to: W) -> PassThrough<&mut R, W> {
202    PassThrough { read, write: to }
203}
204
205impl<R> Iterator for BytesToEntriesIter<R>
206where
207    R: io::BufRead,
208{
209    type Item = Result<input::Entry, input::Error>;
210
211    fn next(&mut self) -> Option<Self::Item> {
212        if self.had_error || self.objects_left == 0 {
213            return None;
214        }
215        let result = self.next_inner();
216        self.had_error = result.is_err();
217        if self.had_error {
218            self.objects_left = 0;
219        }
220        if self.mode == input::Mode::Restore && self.had_error {
221            None
222        } else {
223            Some(result)
224        }
225    }
226
227    fn size_hint(&self) -> (usize, Option<usize>) {
228        (self.objects_left as usize, Some(self.objects_left as usize))
229    }
230}
231
232impl<R> std::iter::ExactSizeIterator for BytesToEntriesIter<R> where R: io::BufRead {}
233
234struct PassThrough<R, W> {
235    read: R,
236    write: W,
237}
238
239impl<R, W> io::BufRead for PassThrough<R, W>
240where
241    Self: io::Read,
242    R: io::BufRead,
243    W: io::Write,
244{
245    fn fill_buf(&mut self) -> io::Result<&[u8]> {
246        self.read.fill_buf()
247    }
248
249    fn consume(&mut self, amt: usize) {
250        let buf = self
251            .read
252            .fill_buf()
253            .expect("never fail as we called fill-buf before and this does nothing");
254        self.write
255            .write_all(&buf[..amt])
256            .expect("a write to never fail - should be a memory buffer");
257        self.read.consume(amt);
258    }
259}
260
261impl<R, W> io::Read for PassThrough<R, W>
262where
263    W: io::Write,
264    R: io::Read,
265{
266    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
267        let bytes_read = self.read.read(buf)?;
268        self.write.write_all(&buf[..bytes_read])?;
269        Ok(bytes_read)
270    }
271}
272
273impl crate::data::File {
274    /// Returns an iterator over [`Entries`][crate::data::input::Entry], without making use of the memory mapping.
275    pub fn streaming_iter(&self) -> Result<BytesToEntriesIter<impl io::BufRead>, input::Error> {
276        let reader = io::BufReader::with_capacity(4096 * 8, fs::File::open(&self.path)?);
277        BytesToEntriesIter::new_from_header(
278            reader,
279            input::Mode::Verify,
280            input::EntryDataMode::KeepAndCrc32,
281            self.object_hash,
282        )
283    }
284}
285
286/// The boxed variant is faster for what we do (moving the decompressor in and out a lot)
287pub struct DecompressRead<'a, R> {
288    /// The reader from which bytes should be decompressed.
289    pub inner: R,
290    /// The decompressor doing all the work.
291    pub decompressor: &'a mut Decompress,
292}
293
294impl<R> io::Read for DecompressRead<'_, R>
295where
296    R: io::BufRead,
297{
298    fn read(&mut self, into: &mut [u8]) -> io::Result<usize> {
299        gix_features::zlib::stream::inflate::read(&mut self.inner, self.decompressor, into)
300    }
301}
302
303/// A utility to automatically generate a hash while writing into an inner writer.
304pub struct HashWrite<'a, T> {
305    /// The hash implementation.
306    pub hash: &'a mut Hasher,
307    /// The inner writer.
308    pub inner: T,
309}
310
311impl<T> std::io::Write for HashWrite<'_, T>
312where
313    T: std::io::Write,
314{
315    fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
316        let written = self.inner.write(buf)?;
317        self.hash.update(&buf[..written]);
318        Ok(written)
319    }
320
321    fn flush(&mut self) -> std::io::Result<()> {
322        self.inner.flush()
323    }
324}