git_pack/data/input/
entries_to_bytes.rs1use std::iter::Peekable;
2
3use git_features::hash;
4
5use crate::data::input;
6
7pub struct EntriesToBytesIter<I: Iterator, W> {
13 pub input: Peekable<I>,
15 output: W,
17 trailer: Option<git_hash::ObjectId>,
19 data_version: crate::data::Version,
22 num_entries: u32,
24 is_done: bool,
26 object_hash: git_hash::Kind,
28}
29
30impl<I, W> EntriesToBytesIter<I, W>
31where
32 I: Iterator<Item = Result<input::Entry, input::Error>>,
33 W: std::io::Read + std::io::Write + std::io::Seek,
34{
35 pub fn new(input: I, output: W, version: crate::data::Version, object_hash: git_hash::Kind) -> Self {
44 assert!(
45 matches!(version, crate::data::Version::V2),
46 "currently only pack version 2 can be written",
47 );
48 assert!(
49 matches!(object_hash, git_hash::Kind::Sha1),
50 "currently only Sha1 is supported, right now we don't know how other hashes are encoded",
51 );
52 EntriesToBytesIter {
53 input: input.peekable(),
54 output,
55 object_hash,
56 num_entries: 0,
57 trailer: None,
58 data_version: version,
59 is_done: false,
60 }
61 }
62
63 pub fn digest(&self) -> Option<git_hash::ObjectId> {
66 self.trailer
67 }
68
69 fn next_inner(&mut self, entry: input::Entry) -> Result<input::Entry, input::Error> {
70 if self.num_entries == 0 {
71 let header_bytes = crate::data::header::encode(self.data_version, 0);
72 self.output.write_all(&header_bytes[..])?;
73 }
74 self.num_entries += 1;
75 entry.header.write_to(entry.decompressed_size, &mut self.output)?;
76 std::io::copy(
77 &mut entry
78 .compressed
79 .as_deref()
80 .expect("caller must configure generator to keep compressed bytes"),
81 &mut self.output,
82 )?;
83 Ok(entry)
84 }
85
86 fn write_header_and_digest(&mut self, last_entry: Option<&mut input::Entry>) -> Result<(), input::Error> {
87 let header_bytes = crate::data::header::encode(self.data_version, self.num_entries);
88 let num_bytes_written = if last_entry.is_some() {
89 self.output.stream_position()?
90 } else {
91 header_bytes.len() as u64
92 };
93 self.output.rewind()?;
94 self.output.write_all(&header_bytes[..])?;
95 self.output.flush()?;
96
97 self.output.rewind()?;
98 let interrupt_never = std::sync::atomic::AtomicBool::new(false);
99 let digest = hash::bytes(
100 &mut self.output,
101 num_bytes_written as usize,
102 self.object_hash,
103 &mut git_features::progress::Discard,
104 &interrupt_never,
105 )?;
106 self.output.write_all(digest.as_slice())?;
107 self.output.flush()?;
108
109 self.is_done = true;
110 if let Some(last_entry) = last_entry {
111 last_entry.trailer = Some(digest);
112 }
113 self.trailer = Some(digest);
114 Ok(())
115 }
116}
117
118impl<I, W> Iterator for EntriesToBytesIter<I, W>
119where
120 I: Iterator<Item = Result<input::Entry, input::Error>>,
121 W: std::io::Read + std::io::Write + std::io::Seek,
122{
123 type Item = Result<input::Entry, input::Error>;
125
126 fn next(&mut self) -> Option<Self::Item> {
127 if self.is_done {
128 return None;
129 }
130
131 match self.input.next() {
132 Some(res) => Some(match res {
133 Ok(entry) => self.next_inner(entry).and_then(|mut entry| {
134 if self.input.peek().is_none() {
135 self.write_header_and_digest(Some(&mut entry)).map(|_| entry)
136 } else {
137 Ok(entry)
138 }
139 }),
140 Err(err) => {
141 self.is_done = true;
142 Err(err)
143 }
144 }),
145 None => match self.write_header_and_digest(None) {
146 Ok(_) => None,
147 Err(err) => Some(Err(err)),
148 },
149 }
150 }
151
152 fn size_hint(&self) -> (usize, Option<usize>) {
153 self.input.size_hint()
154 }
155}