Function git_pack::data::header::encode

source ·
pub fn encode(version: Version, num_objects: u32) -> [u8; 12]
Expand description

Write a pack data header at version with num_objects and return a buffer.

Examples found in repository?
src/data/input/entries_to_bytes.rs (line 71)
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
    fn next_inner(&mut self, entry: input::Entry) -> Result<input::Entry, input::Error> {
        if self.num_entries == 0 {
            let header_bytes = crate::data::header::encode(self.data_version, 0);
            self.output.write_all(&header_bytes[..])?;
        }
        self.num_entries += 1;
        entry.header.write_to(entry.decompressed_size, &mut self.output)?;
        std::io::copy(
            &mut entry
                .compressed
                .as_deref()
                .expect("caller must configure generator to keep compressed bytes"),
            &mut self.output,
        )?;
        Ok(entry)
    }

    fn write_header_and_digest(&mut self, last_entry: Option<&mut input::Entry>) -> Result<(), input::Error> {
        let header_bytes = crate::data::header::encode(self.data_version, self.num_entries);
        let num_bytes_written = if last_entry.is_some() {
            self.output.stream_position()?
        } else {
            header_bytes.len() as u64
        };
        self.output.seek(std::io::SeekFrom::Start(0))?;
        self.output.write_all(&header_bytes[..])?;
        self.output.flush()?;

        self.output.seek(std::io::SeekFrom::Start(0))?;
        let interrupt_never = std::sync::atomic::AtomicBool::new(false);
        let digest = hash::bytes(
            &mut self.output,
            num_bytes_written as usize,
            self.object_hash,
            &mut git_features::progress::Discard,
            &interrupt_never,
        )?;
        self.output.write_all(digest.as_slice())?;
        self.output.flush()?;

        self.is_done = true;
        if let Some(last_entry) = last_entry {
            last_entry.trailer = Some(digest);
        }
        self.trailer = Some(digest);
        Ok(())
    }
More examples
Hide additional examples
src/data/output/bytes.rs (line 99)
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
    fn next_inner(&mut self) -> Result<u64, Error<E>> {
        let previous_written = self.written;
        if let Some((version, num_entries)) = self.header_info.take() {
            let header_bytes = crate::data::header::encode(version, num_entries);
            self.output.write_all(&header_bytes[..])?;
            self.written += header_bytes.len() as u64;
        }
        match self.input.next() {
            Some(entries) => {
                for entry in entries.map_err(Error::Input)? {
                    if entry.is_invalid() {
                        self.pack_offsets_and_validity.push((0, false));
                        continue;
                    };
                    self.pack_offsets_and_validity.push((self.written, true));
                    let header = entry.to_entry_header(self.entry_version, |index| {
                        let (base_offset, is_valid_object) = self.pack_offsets_and_validity[index];
                        if !is_valid_object {
                            unreachable!("if you see this the object database is correct as a delta refers to a non-existing object")
                        }
                        self.written - base_offset
                    });
                    self.written += header.write_to(entry.decompressed_size as u64, &mut self.output)? as u64;
                    self.written += std::io::copy(&mut &*entry.compressed_data, &mut self.output)?;
                }
            }
            None => {
                let digest = self.output.hash.clone().digest();
                self.output.inner.write_all(&digest[..])?;
                self.written += digest.len() as u64;
                self.output.inner.flush()?;
                self.is_done = true;
                self.trailer = Some(git_hash::ObjectId::from(digest));
            }
        };
        Ok(self.written - previous_written)
    }
src/index/write/mod.rs (line 194)
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
    pub fn write_data_iter_to_stream<F, F2>(
        version: crate::index::Version,
        make_resolver: F,
        entries: impl Iterator<Item = Result<crate::data::input::Entry, crate::data::input::Error>>,
        thread_limit: Option<usize>,
        mut root_progress: impl Progress,
        out: impl io::Write,
        should_interrupt: &AtomicBool,
        object_hash: git_hash::Kind,
        pack_version: crate::data::Version,
    ) -> Result<Outcome, Error>
    where
        F: FnOnce() -> io::Result<F2>,
        F2: for<'r> Fn(crate::data::EntryRange, &'r mut Vec<u8>) -> Option<()> + Send + Clone,
    {
        if version != crate::index::Version::default() {
            return Err(Error::Unsupported(version));
        }
        let mut num_objects: usize = 0;
        let mut last_seen_trailer = None;
        let anticipated_num_objects = entries.size_hint().1.unwrap_or_else(|| entries.size_hint().0);
        let mut tree = Tree::with_capacity(anticipated_num_objects)?;
        let indexing_start = std::time::Instant::now();

        root_progress.init(Some(4), progress::steps());
        let mut objects_progress = root_progress.add_child_with_id("indexing", *b"IWIO"); /* Index Write Index Objects */
        objects_progress.init(entries.size_hint().1, progress::count("objects"));
        let mut decompressed_progress = root_progress.add_child_with_id("decompressing", *b"IWDB"); /* Index Write Decompressed Bytes */
        decompressed_progress.init(None, progress::bytes());
        let mut pack_entries_end: u64 = 0;

        for entry in entries {
            let crate::data::input::Entry {
                header,
                pack_offset,
                crc32,
                header_size,
                compressed: _,
                compressed_size,
                decompressed_size,
                trailer,
            } = entry?;

            decompressed_progress.inc_by(decompressed_size as usize);

            let entry_len = header_size as u64 + compressed_size;
            pack_entries_end = pack_offset + entry_len;

            let crc32 = crc32.expect("crc32 to be computed by the iterator. Caller assures correct configuration.");

            use crate::data::entry::Header::*;
            match header {
                Tree | Blob | Commit | Tag => {
                    tree.add_root(
                        pack_offset,
                        TreeEntry {
                            id: object_hash.null(),
                            crc32,
                        },
                    )?;
                }
                RefDelta { .. } => return Err(Error::IteratorInvariantNoRefDelta),
                OfsDelta { base_distance } => {
                    let base_pack_offset =
                        crate::data::entry::Header::verified_base_pack_offset(pack_offset, base_distance).ok_or(
                            Error::IteratorInvariantBaseOffset {
                                pack_offset,
                                distance: base_distance,
                            },
                        )?;
                    tree.add_child(
                        base_pack_offset,
                        pack_offset,
                        TreeEntry {
                            id: object_hash.null(),
                            crc32,
                        },
                    )?;
                }
            };
            last_seen_trailer = trailer;
            num_objects += 1;
            objects_progress.inc();
        }
        if num_objects != anticipated_num_objects {
            objects_progress.info(format!(
                "{} objects were resolved into {} objects during thin-pack resolution",
                anticipated_num_objects, num_objects
            ));
        }
        let num_objects: u32 = num_objects
            .try_into()
            .map_err(|_| Error::IteratorInvariantTooManyObjects(num_objects))?;

        objects_progress.show_throughput(indexing_start);
        decompressed_progress.show_throughput(indexing_start);
        drop(objects_progress);
        drop(decompressed_progress);

        root_progress.inc();

        let resolver = make_resolver()?;
        let sorted_pack_offsets_by_oid = {
            let traverse::Outcome { roots, children } = tree.traverse(
                resolver,
                pack_entries_end,
                || (),
                |data,
                 _progress,
                 traverse::Context {
                     entry,
                     decompressed: bytes,
                     ..
                 }| {
                    modify_base(data, entry, bytes, version.hash());
                    Ok::<_, Error>(())
                },
                traverse::Options {
                    object_progress: root_progress.add_child_with_id("Resolving", *b"IWRO"), /* Index Write Resolve Objects */
                    size_progress: root_progress.add_child_with_id("Decoding", *b"IWDB"), /* Index Write Decode Bytes */
                    thread_limit,
                    should_interrupt,
                    object_hash,
                },
            )?;
            root_progress.inc();

            let mut items = roots;
            items.extend(children);
            {
                let _progress = root_progress.add_child_with_id("sorting by id", *b"info");
                items.sort_by_key(|e| e.data.id);
            }

            root_progress.inc();
            items
        };

        let pack_hash = match last_seen_trailer {
            Some(ph) => ph,
            None if num_objects == 0 => {
                let header = crate::data::header::encode(pack_version, 0);
                let mut hasher = git_features::hash::hasher(object_hash);
                hasher.update(&header);
                git_hash::ObjectId::from(hasher.digest())
            }
            None => return Err(Error::IteratorInvariantTrailer),
        };
        let index_hash = encode::write_to(
            out,
            sorted_pack_offsets_by_oid,
            &pack_hash,
            version,
            root_progress.add_child_with_id("writing index file", *b"IWBW"), /* Index Write Bytes Written */
        )?;
        root_progress.show_throughput_with(
            indexing_start,
            num_objects as usize,
            progress::count("objects").expect("unit always set"),
            progress::MessageLevel::Success,
        );
        Ok(Outcome {
            index_version: version,
            index_hash,
            data_hash: pack_hash,
            num_objects,
        })
    }