1use std::sync::atomic::{AtomicBool, Ordering};
2
3use gix_features::{parallel, progress::DynNestedProgress};
4
5use super::Error;
6use crate::{
7 cache::delta::traverse,
8 index::{self, traverse::Outcome, util::index_entries_sorted_by_offset_ascending},
9};
10
11#[derive(Default)]
13pub struct Options {
14 pub thread_limit: Option<usize>,
17 pub check: crate::index::traverse::SafetyCheck,
19}
20
21#[derive(Debug, Copy, Clone)]
25pub enum ProgressId {
26 HashPackDataBytes,
28 HashPackIndexBytes,
30 CollectSortedIndexEntries,
32 TreeFromOffsetsObjects,
34 DecodedObjects,
36 DecodedBytes,
38}
39
40impl From<ProgressId> for gix_features::progress::Id {
41 fn from(v: ProgressId) -> Self {
42 match v {
43 ProgressId::HashPackDataBytes => *b"PTHP",
44 ProgressId::HashPackIndexBytes => *b"PTHI",
45 ProgressId::CollectSortedIndexEntries => *b"PTCE",
46 ProgressId::TreeFromOffsetsObjects => *b"PTDI",
47 ProgressId::DecodedObjects => *b"PTRO",
48 ProgressId::DecodedBytes => *b"PTDB",
49 }
50 }
51}
52
53impl index::File {
55 pub fn traverse_with_index<Processor, E>(
60 &self,
61 pack: &crate::data::File,
62 mut processor: Processor,
63 progress: &mut dyn DynNestedProgress,
64 should_interrupt: &AtomicBool,
65 Options { check, thread_limit }: Options,
66 ) -> Result<Outcome, Error<E>>
67 where
68 Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn gix_features::progress::Progress) -> Result<(), E>
69 + Send
70 + Clone,
71 E: std::error::Error + Send + Sync + 'static,
72 {
73 let (verify_result, traversal_result) = parallel::join(
74 {
75 let mut pack_progress = progress.add_child_with_id(
76 format!(
77 "Hash of pack '{}'",
78 pack.path().file_name().expect("pack has filename").to_string_lossy()
79 ),
80 ProgressId::HashPackDataBytes.into(),
81 );
82 let mut index_progress = progress.add_child_with_id(
83 format!(
84 "Hash of index '{}'",
85 self.path.file_name().expect("index has filename").to_string_lossy()
86 ),
87 ProgressId::HashPackIndexBytes.into(),
88 );
89 move || {
90 let res =
91 self.possibly_verify(pack, check, &mut pack_progress, &mut index_progress, should_interrupt);
92 if res.is_err() {
93 should_interrupt.store(true, Ordering::SeqCst);
94 }
95 res
96 }
97 },
98 || -> Result<_, Error<_>> {
99 let sorted_entries = index_entries_sorted_by_offset_ascending(
100 self,
101 &mut progress.add_child_with_id(
102 "collecting sorted index".into(),
103 ProgressId::CollectSortedIndexEntries.into(),
104 ),
105 ); let tree = crate::cache::delta::Tree::from_offsets_in_pack(
107 pack.path(),
108 sorted_entries.into_iter().map(Entry::from),
109 &|e| e.index_entry.pack_offset,
110 &|id| self.lookup(id).map(|idx| self.pack_offset_at_index(idx)),
111 &mut progress.add_child_with_id("indexing".into(), ProgressId::TreeFromOffsetsObjects.into()),
112 should_interrupt,
113 self.object_hash,
114 )?;
115 let mut outcome = digest_statistics(tree.traverse(
116 |slice, pack| pack.entry_slice(slice),
117 pack,
118 pack.pack_end() as u64,
119 move |data,
120 progress,
121 traverse::Context {
122 entry: pack_entry,
123 entry_end,
124 decompressed: bytes,
125 level,
126 }| {
127 let object_kind = pack_entry.header.as_kind().expect("non-delta object");
128 data.level = level;
129 data.decompressed_size = pack_entry.decompressed_size;
130 data.object_kind = object_kind;
131 data.compressed_size = entry_end - pack_entry.data_offset;
132 data.object_size = bytes.len() as u64;
133 let result = index::traverse::process_entry(
134 check,
135 object_kind,
136 bytes,
137 &data.index_entry,
138 || {
139 gix_features::hash::crc32(
143 pack.entry_slice(data.index_entry.pack_offset..entry_end)
144 .expect("slice pointing into the pack (by now data is verified)"),
145 )
146 },
147 progress,
148 &mut processor,
149 );
150 match result {
151 Err(err @ Error::PackDecode { .. }) if !check.fatal_decode_error() => {
152 progress.info(format!("Ignoring decode error: {err}"));
153 Ok(())
154 }
155 res => res,
156 }
157 },
158 traverse::Options {
159 object_progress: Box::new(
160 progress.add_child_with_id("Resolving".into(), ProgressId::DecodedObjects.into()),
161 ),
162 size_progress:
163 &mut progress.add_child_with_id("Decoding".into(), ProgressId::DecodedBytes.into()),
164 thread_limit,
165 should_interrupt,
166 object_hash: self.object_hash,
167 },
168 )?);
169 outcome.pack_size = pack.data_len() as u64;
170 Ok(outcome)
171 },
172 );
173 Ok(Outcome {
174 actual_index_checksum: verify_result?,
175 statistics: traversal_result?,
176 })
177 }
178}
179
180struct Entry {
181 index_entry: crate::index::Entry,
182 object_kind: gix_object::Kind,
183 object_size: u64,
184 decompressed_size: u64,
185 compressed_size: u64,
186 level: u16,
187}
188
189impl From<crate::index::Entry> for Entry {
190 fn from(index_entry: crate::index::Entry) -> Self {
191 Entry {
192 index_entry,
193 level: 0,
194 object_kind: gix_object::Kind::Tree,
195 object_size: 0,
196 decompressed_size: 0,
197 compressed_size: 0,
198 }
199 }
200}
201
202fn digest_statistics(traverse::Outcome { roots, children }: traverse::Outcome<Entry>) -> index::traverse::Statistics {
203 let mut res = index::traverse::Statistics::default();
204 let average = &mut res.average;
205 for item in roots.iter().chain(children.iter()) {
206 res.total_compressed_entries_size += item.data.compressed_size;
207 res.total_decompressed_entries_size += item.data.decompressed_size;
208 res.total_object_size += item.data.object_size;
209 *res.objects_per_chain_length
210 .entry(u32::from(item.data.level))
211 .or_insert(0) += 1;
212
213 average.decompressed_size += item.data.decompressed_size;
214 average.compressed_size += item.data.compressed_size as usize;
215 average.object_size += item.data.object_size;
216 average.num_deltas += u32::from(item.data.level);
217 use gix_object::Kind::*;
218 match item.data.object_kind {
219 Blob => res.num_blobs += 1,
220 Tree => res.num_trees += 1,
221 Tag => res.num_tags += 1,
222 Commit => res.num_commits += 1,
223 };
224 }
225
226 let num_nodes = roots.len() + children.len();
227 average.decompressed_size /= num_nodes as u64;
228 average.compressed_size /= num_nodes;
229 average.object_size /= num_nodes as u64;
230 average.num_deltas /= num_nodes as u32;
231
232 res
233}