1use std::collections::{HashMap, HashSet};
4use std::io::Write;
5
6use sha1::{Digest, Sha1};
7
8use crate::bloom::{BloomBuildOutcome, BloomFilterSettings};
9use crate::commit_graph_file::CommitGraphChain;
10use crate::objects::{parse_commit, ObjectId, ObjectKind};
11use crate::odb::Odb;
12
13const SIGNATURE: &[u8; 4] = b"CGPH";
14const VERSION: u8 = 1;
15const HASH_VERSION_SHA1: u8 = 1;
16const HASH_LEN: usize = 20;
17
18const CHUNK_OID_FANOUT: u32 = 0x4f49_4446;
19const CHUNK_OID_LOOKUP: u32 = 0x4f49_444c;
20const CHUNK_COMMIT_DATA: u32 = 0x4344_4154;
21const CHUNK_GENERATION_DATA: u32 = 0x4744_4132;
22const CHUNK_GENERATION_DATA_OVERFLOW: u32 = 0x4744_4f32; const CHUNK_EXTRA_EDGES: u32 = 0x4544_4745;
24const CHUNK_BLOOM_INDEXES: u32 = 0x4249_4458;
25const CHUNK_BLOOM_DATA: u32 = 0x4244_4154;
26const CHUNK_BASE: u32 = 0x4241_5345;
27
28const PARENT_NONE: u32 = 0x7000_0000;
29const GRAPH_EXTRA_EDGES_NEEDED: u32 = 0x8000_0000;
30const GRAPH_LAST_EDGE: u32 = 0x8000_0000;
31
32const GENERATION_NUMBER_V2_OFFSET_MAX: u64 = (1u64 << 31) - 1;
34const CORRECTED_COMMIT_DATE_OFFSET_OVERFLOW: u32 = 1u32 << 31;
36
37#[derive(Debug, Clone)]
39pub struct CommitGraphCommitInfo {
40 pub tree: ObjectId,
41 pub parents: Vec<ObjectId>,
42 pub commit_time: u64,
44}
45
46fn sha1_file_body(body: &[u8]) -> [u8; 20] {
47 let mut h = Sha1::new();
48 h.update(body);
49 h.finalize().into()
50}
51
52fn parse_commit_time(committer: &str) -> u64 {
53 let parts: Vec<&str> = committer.rsplitn(3, ' ').collect();
54 if parts.len() >= 2 {
55 parts[1].parse::<u64>().unwrap_or(0)
56 } else {
57 0
58 }
59}
60
61pub fn load_commit_graph_commit_info(
63 odb: &Odb,
64 oid: ObjectId,
65) -> crate::error::Result<CommitGraphCommitInfo> {
66 let obj = odb.read(&oid)?;
67 if obj.kind != ObjectKind::Commit {
68 return Err(crate::error::Error::CorruptObject(format!(
69 "object {oid} is not a commit"
70 )));
71 }
72 let c = parse_commit(&obj.data)?;
73 Ok(CommitGraphCommitInfo {
74 tree: c.tree,
75 parents: c.parents.clone(),
76 commit_time: parse_commit_time(&c.committer),
77 })
78}
79
80fn compute_topo_generations(
81 sorted_oids: &[ObjectId],
82 infos: &HashMap<ObjectId, CommitGraphCommitInfo>,
83 oid_to_idx: &HashMap<ObjectId, u32>,
84) -> Vec<u32> {
85 let n = sorted_oids.len();
86 let mut gen = vec![0u32; n];
87 let mut computed = vec![false; n];
88 for i in 0..n {
89 if computed[i] {
90 continue;
91 }
92 let mut work_stack: Vec<(usize, bool)> = vec![(i, false)];
93 while let Some((idx, parents_done)) = work_stack.pop() {
94 if computed[idx] {
95 continue;
96 }
97 let oid = sorted_oids[idx];
98 let info = &infos[&oid];
99 if parents_done {
100 let mut max_parent_gen = 0u32;
101 for p in &info.parents {
102 if let Some(&pidx) = oid_to_idx.get(p) {
103 max_parent_gen = max_parent_gen.max(gen[pidx as usize]);
104 }
105 }
106 gen[idx] = max_parent_gen + 1;
107 computed[idx] = true;
108 } else {
109 let mut all_done = true;
110 for p in &info.parents {
111 if let Some(&pidx) = oid_to_idx.get(p) {
112 if !computed[pidx as usize] {
113 all_done = false;
114 }
115 }
116 }
117 if all_done {
118 let mut max_parent_gen = 0u32;
119 for p in &info.parents {
120 if let Some(&pidx) = oid_to_idx.get(p) {
121 max_parent_gen = max_parent_gen.max(gen[pidx as usize]);
122 }
123 }
124 gen[idx] = max_parent_gen + 1;
125 computed[idx] = true;
126 } else {
127 work_stack.push((idx, true));
128 for p in &info.parents {
129 if let Some(&pidx) = oid_to_idx.get(p) {
130 if !computed[pidx as usize] {
131 work_stack.push((pidx as usize, false));
132 }
133 }
134 }
135 }
136 }
137 }
138 }
139 gen
140}
141
142fn compute_corrected_generations(
143 sorted_oids: &[ObjectId],
144 infos: &HashMap<ObjectId, CommitGraphCommitInfo>,
145 oid_to_idx: &HashMap<ObjectId, u32>,
146 topo_gen: &[u32],
147) -> Vec<u64> {
148 let n = sorted_oids.len();
149 let mut gen_date = vec![0u64; n];
150 let mut computed = vec![false; n];
151 for i in 0..n {
152 if computed[i] {
153 continue;
154 }
155 let mut work_stack: Vec<(usize, bool)> = vec![(i, false)];
156 while let Some((idx, parents_done)) = work_stack.pop() {
157 if computed[idx] {
158 continue;
159 }
160 let oid = sorted_oids[idx];
161 let info = &infos[&oid];
162 let cdate = info.commit_time;
163 if parents_done {
164 let mut max_g = cdate;
165 for p in &info.parents {
166 if let Some(&pidx) = oid_to_idx.get(p) {
167 max_g = max_g.max(gen_date[pidx as usize]);
168 }
169 }
170 let topo = topo_gen[idx] as u64;
171 if max_g < topo {
172 max_g = topo;
173 }
174 gen_date[idx] = max_g + 1;
175 computed[idx] = true;
176 } else {
177 let mut all_done = true;
178 for p in &info.parents {
179 if let Some(&pidx) = oid_to_idx.get(p) {
180 if !computed[pidx as usize] {
181 all_done = false;
182 }
183 }
184 }
185 if all_done {
186 let mut max_g = cdate;
187 for p in &info.parents {
188 if let Some(&pidx) = oid_to_idx.get(p) {
189 max_g = max_g.max(gen_date[pidx as usize]);
190 }
191 }
192 let topo = topo_gen[idx] as u64;
193 if max_g < topo {
194 max_g = topo;
195 }
196 gen_date[idx] = max_g + 1;
197 computed[idx] = true;
198 } else {
199 work_stack.push((idx, true));
200 for p in &info.parents {
201 if let Some(&pidx) = oid_to_idx.get(p) {
202 if !computed[pidx as usize] {
203 work_stack.push((pidx as usize, false));
204 }
205 }
206 }
207 }
208 }
209 }
210 }
211 gen_date
212}
213
214fn resolve_parent_edge(
215 parent: ObjectId,
216 oid_to_idx: &HashMap<ObjectId, u32>,
217 base_count: u32,
218 chain: Option<&CommitGraphChain>,
219) -> u32 {
220 if let Some(&idx) = oid_to_idx.get(&parent) {
221 return idx + base_count;
222 }
223 if let Some(c) = chain {
224 if let Some(gpos) = c.global_position(&parent) {
225 return gpos;
226 }
227 }
228 PARENT_NONE
229}
230
231#[derive(Debug, Default, Clone, Copy)]
233pub struct BloomWriteStats {
234 pub filter_computed: u32,
235 pub filter_not_computed: u32,
236 pub filter_trunc_empty: u32,
237 pub filter_trunc_large: u32,
238 pub filter_upgraded: u32,
239}
240
241pub fn build_commit_graph_bytes(
243 sorted_oids: &[ObjectId],
244 infos: &HashMap<ObjectId, CommitGraphCommitInfo>,
245 odb: &Odb,
246 changed_paths: bool,
247 bloom_settings: &BloomFilterSettings,
248 base_chain: Option<&CommitGraphChain>,
249 base_graph_hashes: &[[u8; 20]],
250 max_new_filters: Option<u32>,
251) -> crate::error::Result<(Vec<u8>, BloomWriteStats)> {
252 let base_count: u32 = base_chain.map(CommitGraphChain::total_commits).unwrap_or(0);
253
254 let oid_to_idx: HashMap<ObjectId, u32> = sorted_oids
255 .iter()
256 .enumerate()
257 .map(|(i, o)| (*o, i as u32))
258 .collect();
259
260 let topo = compute_topo_generations(sorted_oids, infos, &oid_to_idx);
261 let gen_date = compute_corrected_generations(sorted_oids, infos, &oid_to_idx, &topo);
262
263 let mut gda2: Vec<u8> = Vec::with_capacity(sorted_oids.len() * 4);
264 let mut generation_overflow: Vec<u8> = Vec::new();
265 let mut overflow_count: u32 = 0;
266 for (i, oid) in sorted_oids.iter().enumerate() {
267 let info = &infos[oid];
268 let offset_raw = gen_date[i].saturating_sub(info.commit_time);
269 if offset_raw > GENERATION_NUMBER_V2_OFFSET_MAX {
270 let marker = CORRECTED_COMMIT_DATE_OFFSET_OVERFLOW | overflow_count;
271 overflow_count = overflow_count.wrapping_add(1);
272 gda2.extend_from_slice(&marker.to_be_bytes());
273 generation_overflow.extend_from_slice(&((offset_raw >> 32) as u32).to_be_bytes());
274 generation_overflow.extend_from_slice(&((offset_raw as u32).to_be_bytes()));
275 } else {
276 gda2.extend_from_slice(&(offset_raw as u32).to_be_bytes());
277 }
278 }
279
280 let mut extra_edges: Vec<u8> = Vec::new();
281
282 let mut cdat: Vec<u8> = Vec::with_capacity(sorted_oids.len() * (HASH_LEN + 16));
283 for (i, oid) in sorted_oids.iter().enumerate() {
284 let info = &infos[oid];
285 cdat.extend_from_slice(info.tree.as_bytes());
286
287 let p1 = info
288 .parents
289 .first()
290 .map(|p| resolve_parent_edge(*p, &oid_to_idx, base_count, base_chain))
291 .unwrap_or(PARENT_NONE);
292 cdat.extend_from_slice(&p1.to_be_bytes());
293
294 let p2 = if info.parents.len() <= 1 {
295 PARENT_NONE
296 } else if info.parents.len() == 2 {
297 resolve_parent_edge(info.parents[1], &oid_to_idx, base_count, base_chain)
298 } else {
299 let start_u32 = (extra_edges.len() / 4) as u32;
300 for (j, p) in info.parents.iter().enumerate().skip(1) {
301 let mut ev = resolve_parent_edge(*p, &oid_to_idx, base_count, base_chain);
302 if j + 1 == info.parents.len() {
303 ev |= GRAPH_LAST_EDGE;
304 }
305 extra_edges.extend_from_slice(&ev.to_be_bytes());
306 }
307 GRAPH_EXTRA_EDGES_NEEDED | start_u32
308 };
309 cdat.extend_from_slice(&p2.to_be_bytes());
310
311 let topo = topo[i];
312 let date = info.commit_time;
313 let packed = (topo << 2) | (((date >> 32) & 0x3) as u32);
314 cdat.extend_from_slice(&packed.to_be_bytes());
315 cdat.extend_from_slice(&((date & 0xFFFF_FFFF) as u32).to_be_bytes());
316 }
317
318 let mut fanout = vec![0u8; 256 * 4];
319 let mut counts = [0u32; 256];
320 for oid in sorted_oids {
321 counts[oid.as_bytes()[0] as usize] += 1;
322 }
323 let mut cum = 0u32;
324 for i in 0..256 {
325 cum += counts[i];
326 fanout[i * 4..i * 4 + 4].copy_from_slice(&cum.to_be_bytes());
327 }
328
329 let mut oid_lookup = Vec::with_capacity(sorted_oids.len() * HASH_LEN);
330 for oid in sorted_oids {
331 oid_lookup.extend_from_slice(oid.as_bytes());
332 }
333
334 let mut bloom_stats = BloomWriteStats::default();
335 let max_new = max_new_filters.unwrap_or(u32::MAX);
336 let (bidx, bdat, bloom_total_payload) = if changed_paths {
337 let mut indexes: Vec<u32> = Vec::with_capacity(sorted_oids.len());
338 let mut data_payload = Vec::new();
339 let mut cur = 0u32;
340 for oid in sorted_oids {
341 let info = &infos[oid];
342 let compute = bloom_stats.filter_computed < max_new;
343 let (bytes, outcome) = if compute {
344 crate::commit_graph_file::bloom_filter_for_commit_write(
345 odb,
346 &info.parents,
347 info.tree,
348 bloom_settings,
349 )?
350 } else {
351 (Vec::new(), BloomBuildOutcome::Normal)
352 };
353 if compute {
354 bloom_stats.filter_computed += 1;
355 match outcome {
356 BloomBuildOutcome::Normal => {}
357 BloomBuildOutcome::TruncatedLarge => bloom_stats.filter_trunc_large += 1,
358 BloomBuildOutcome::TruncatedEmpty => bloom_stats.filter_trunc_empty += 1,
359 }
360 } else {
361 bloom_stats.filter_not_computed += 1;
362 }
363 cur += bytes.len() as u32;
364 indexes.push(cur);
365 data_payload.extend_from_slice(&bytes);
366 }
367 let mut bdat_chunk = Vec::with_capacity(12 + data_payload.len());
368 bdat_chunk.extend_from_slice(&bloom_settings.hash_version.to_be_bytes());
369 bdat_chunk.extend_from_slice(&bloom_settings.num_hashes.to_be_bytes());
370 bdat_chunk.extend_from_slice(&bloom_settings.bits_per_entry.to_be_bytes());
371 bdat_chunk.extend_from_slice(&data_payload);
372 let mut bidx_bytes = Vec::with_capacity(indexes.len() * 4);
373 for v in indexes {
374 bidx_bytes.extend_from_slice(&v.to_be_bytes());
375 }
376 (bidx_bytes, bdat_chunk, data_payload.len())
377 } else {
378 (Vec::new(), Vec::new(), 0)
379 };
380
381 let _ = bloom_total_payload;
382
383 let mut chunks: Vec<(u32, Vec<u8>)> = Vec::new();
384 chunks.push((CHUNK_OID_FANOUT, fanout));
385 chunks.push((CHUNK_OID_LOOKUP, oid_lookup));
386 chunks.push((CHUNK_COMMIT_DATA, cdat));
387 chunks.push((CHUNK_GENERATION_DATA, gda2));
388 if !generation_overflow.is_empty() {
389 chunks.push((CHUNK_GENERATION_DATA_OVERFLOW, generation_overflow));
390 }
391 if !extra_edges.is_empty() {
392 chunks.push((CHUNK_EXTRA_EDGES, extra_edges));
393 }
394 if changed_paths {
395 chunks.push((CHUNK_BLOOM_INDEXES, bidx));
396 chunks.push((CHUNK_BLOOM_DATA, bdat));
397 }
398 if !base_graph_hashes.is_empty() {
399 let mut base_chunk = Vec::new();
400 for h in base_graph_hashes {
401 base_chunk.extend_from_slice(h);
402 }
403 chunks.push((CHUNK_BASE, base_chunk));
404 }
405
406 let num_chunks = chunks.len() as u8;
407 let header_size = 8u64;
408 let toc_size = (num_chunks as u64 + 1) * 12;
409 let mut offsets = Vec::with_capacity(chunks.len());
410 let mut cur = header_size + toc_size;
411 for (_, data) in &chunks {
412 offsets.push(cur);
413 cur += data.len() as u64;
414 }
415 let end_offset = cur;
416
417 let mut out = Vec::with_capacity(end_offset as usize + HASH_LEN);
418 out.write_all(SIGNATURE)?;
419 let base_layers = base_graph_hashes.len() as u8;
420 out.write_all(&[VERSION, HASH_VERSION_SHA1, num_chunks, base_layers])?;
421 for i in 0..chunks.len() {
422 out.write_all(&chunks[i].0.to_be_bytes())?;
423 out.write_all(&offsets[i].to_be_bytes())?;
424 }
425 out.write_all(&[0u8; 4])?;
426 out.write_all(&end_offset.to_be_bytes())?;
427 for (_, data) in &chunks {
428 out.write_all(data)?;
429 }
430
431 let checksum = sha1_file_body(&out);
432 out.write_all(&checksum)?;
433 Ok((out, bloom_stats))
434}
435
436pub fn collect_reachable_commit_oids(
438 git_dir: &std::path::Path,
439 odb: &Odb,
440) -> crate::error::Result<HashSet<ObjectId>> {
441 use std::fs;
442 let mut commits: HashSet<ObjectId> = HashSet::new();
443 let mut stack: Vec<ObjectId> = Vec::new();
444
445 fn collect_ref_tips(
446 git_dir: &std::path::Path,
447 dir: &std::path::Path,
448 stack: &mut Vec<ObjectId>,
449 ) -> crate::error::Result<()> {
450 if !dir.exists() {
451 return Ok(());
452 }
453 for entry in fs::read_dir(dir)? {
454 let entry = entry?;
455 let path = entry.path();
456 if path.is_dir() {
457 collect_ref_tips(git_dir, &path, stack)?;
458 } else if let Ok(content) = fs::read_to_string(&path) {
459 if let Ok(oid) = ObjectId::from_hex(content.trim()) {
460 stack.push(oid);
461 }
462 }
463 }
464 Ok(())
465 }
466
467 let refs_dir = git_dir.join("refs");
468 collect_ref_tips(git_dir, &refs_dir, &mut stack)?;
469
470 let packed_refs = git_dir.join("packed-refs");
471 if packed_refs.exists() {
472 if let Ok(content) = fs::read_to_string(&packed_refs) {
473 for line in content.lines() {
474 if line.starts_with('#') || line.starts_with('^') {
475 continue;
476 }
477 if let Some(hex) = line.split_whitespace().next() {
478 if let Ok(oid) = ObjectId::from_hex(hex) {
479 stack.push(oid);
480 }
481 }
482 }
483 }
484 }
485
486 let head_path = git_dir.join("HEAD");
487 if head_path.exists() {
488 let head = fs::read_to_string(&head_path)?;
489 let head = head.trim();
490 if let Some(refpath) = head.strip_prefix("ref: ") {
491 let full = git_dir.join(refpath);
492 if full.exists() {
493 if let Ok(content) = fs::read_to_string(&full) {
494 if let Ok(oid) = ObjectId::from_hex(content.trim()) {
495 stack.push(oid);
496 }
497 }
498 }
499 } else if let Ok(oid) = ObjectId::from_hex(head) {
500 stack.push(oid);
501 }
502 }
503
504 while let Some(oid) = stack.pop() {
505 if commits.contains(&oid) {
506 continue;
507 }
508 let obj = match odb.read(&oid) {
509 Ok(o) => o,
510 Err(_) => continue,
511 };
512 if obj.kind != ObjectKind::Commit {
513 if obj.kind == ObjectKind::Tag {
514 if let Ok(text) = std::str::from_utf8(&obj.data) {
515 for line in text.lines() {
516 if let Some(rest) = line.strip_prefix("object ") {
517 if let Ok(target) = ObjectId::from_hex(rest.trim()) {
518 stack.push(target);
519 }
520 }
521 }
522 }
523 }
524 continue;
525 }
526 let commit = parse_commit(&obj.data)?;
527 for parent in &commit.parents {
528 stack.push(*parent);
529 }
530 commits.insert(oid);
531 }
532
533 Ok(commits)
534}