kwui_cli/
packager.rs

1use crate::file_format::*;
2use anyhow;
3use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
4use lzf::{compress as lzf_compress, decompress as lzf_decompress};
5use sha1::{Digest, Sha1};
6use size::Size;
7use std::{
8    fs::File,
9    io::{BufRead, BufReader, Read, Seek, SeekFrom, Write},
10    path::PathBuf,
11};
12
13#[cfg(windows)]
14use std::os::windows::fs::MetadataExt;
15
16#[cfg(unix)]
17use std::os::unix::fs::MetadataExt;
18
19const SOLID_CHUNK_SIZE: usize = 256 << 20;
20
21#[derive(Clone)]
22pub struct PackItem {
23    pub src: String,
24    pub dst: String,
25}
26
27pub fn pack(
28    output_file: &str,
29    file_items: Vec<PackItem>,
30    dir_items: Vec<String>,
31) -> anyhow::Result<()> {
32    let mut f = File::create(output_file)?;
33
34    let header = write_header_data(&mut f, false, file_items.clone(), dir_items.clone())?;
35
36    let mut uncompressed_buffer: Vec<u8> = Vec::new();
37    let mut filler = BufferFiller::new(header.items.clone());
38    let mut chunks: Vec<Chunk> = Vec::with_capacity(header.items.len());
39    for item in header.items.iter() {
40        if (item.flags & u16::from(ItemFlag::Dir)) != 0 {
41            continue;
42        }
43        uncompressed_buffer.resize(item.length as usize, 0);
44        let uncompressed_size = filler.fill(&mut uncompressed_buffer);
45        if uncompressed_size != uncompressed_buffer.len() {
46            anyhow::bail!("BufferFiller::fill error.");
47        }
48        let mut compressed = false;
49        if need_compress(&item.fname) {
50            match lzf_compress(&uncompressed_buffer[0..uncompressed_size]) {
51                Ok(compressed_buffer) => {
52                    if compressed_buffer.len() < uncompressed_size * 4 / 5 {
53                        println!(
54                            "Compress chunk [{}]->[{}]",
55                            Size::from_bytes(uncompressed_size),
56                            Size::from_bytes(compressed_buffer.len()),
57                        );
58                        chunks.push(Chunk {
59                            length: uncompressed_size,
60                            compressed_length: compressed_buffer.len(),
61                            algorithm: AlgorithmType::Lzf,
62                            flags: 0,
63                        });
64                        f.write_all(&compressed_buffer)?;
65                        compressed = true;
66                    }
67                }
68                Err(e) => {
69                    println!("Compress warning [{}], {}", item.fname, e);
70                }
71            }
72        }
73        if !compressed {
74            chunks.push(Chunk {
75                length: uncompressed_size,
76                compressed_length: uncompressed_size,
77                algorithm: AlgorithmType::Store,
78                flags: 0,
79            });
80            f.write_all(&uncompressed_buffer[0..uncompressed_size])?;
81        }
82    }
83    let (new_total_size, total_compressed) = chunks.iter().fold((0, 0), |acc, x| {
84        (acc.0 + x.length, acc.1 + x.compressed_length)
85    });
86
87    if header.total_size != new_total_size {
88        anyhow::bail!(
89            "total_size mismatch {} / {}",
90            header.total_size,
91            new_total_size
92        );
93    }
94    if header.total_chunks != chunks.len() {
95        anyhow::bail!(
96            "total_chunks mismatch {} / {}",
97            header.total_chunks,
98            chunks.len()
99        );
100    }
101
102    println!(
103        "Total compressed [{}] -> [{}]",
104        Size::from_bytes(header.total_size),
105        Size::from_bytes(total_compressed),
106    );
107
108    update_header_data(&mut f, &header, &chunks)?;
109    f.flush()?;
110
111    Ok(())
112}
113
114struct BufferFiller {
115    items: Vec<Item>,
116    curr_index: usize,
117    curr_offset: usize,
118    curr_file: Option<File>,
119}
120
121impl BufferFiller {
122    fn new(items: Vec<Item>) -> Self {
123        Self {
124            items,
125            curr_index: 0,
126            curr_offset: 0,
127            curr_file: None,
128        }
129    }
130    fn fill(&mut self, buf: &mut [u8]) -> usize {
131        let mut start = 0;
132        let end = buf.len();
133        while start < end {
134            self.advance();
135            if self.curr_index >= self.items.len() {
136                break;
137            }
138            if self.curr_file.is_none() {
139                break;
140            }
141            if let Some(ref mut f) = self.curr_file {
142                if let Ok(n) = f.read(&mut buf[start..]) {
143                    start += n;
144                    self.curr_offset += n;
145                    if n == 0 {
146                        break;
147                    }
148                } else {
149                    break;
150                }
151            }
152        }
153        start
154    }
155    fn advance(&mut self) {
156        while self.curr_index < self.items.len()
157            && self.curr_offset >= self.items[self.curr_index].length as usize
158        {
159            self.curr_file = None;
160            self.curr_offset = 0;
161            self.curr_index += 1;
162        }
163        if self.curr_index < self.items.len() {
164            if self.curr_file.is_none() {
165                self.curr_file
166                    .replace(File::open(&self.items[self.curr_index].fname).unwrap());
167            }
168        }
169    }
170}
171
172struct Entry {
173    src: String,
174    dst: String,
175    item_index: usize,
176}
177
178struct LookupTable {
179    nodes: Vec<Node>,
180}
181
182impl Entry {
183    fn is_dir(&self) -> bool {
184        self.item_index == 0
185    }
186}
187
188impl Node {
189    fn new() -> Self {
190        Self {
191            ch: 0,
192            lokid: u16::MAX,
193            eqkid: u16::MAX,
194            hikid: u16::MAX,
195        }
196    }
197}
198
199enum ParentNid {
200    None,
201    Lokid(usize),
202    Eqkid(usize),
203    Hikid(usize),
204}
205
206impl ParentNid {
207    fn update(&self, nodes: &mut [Node], nid: usize) {
208        match self {
209            ParentNid::None => (),
210            ParentNid::Lokid(pnid) => nodes[*pnid].lokid = nid as _,
211            ParentNid::Eqkid(pnid) => nodes[*pnid].eqkid = nid as _,
212            ParentNid::Hikid(pnid) => nodes[*pnid].hikid = nid as _,
213        }
214    }
215}
216
217struct LookupTableIterator {
218    index: usize,
219    items: Vec<(String, u16)>,
220}
221
222impl Iterator for LookupTableIterator {
223    type Item = (String, u16);
224
225    fn next(&mut self) -> Option<Self::Item> {
226        if self.index >= self.items.len() {
227            return None;
228        }
229        self.index += 1;
230        Some(self.items[self.index - 1].clone())
231    }
232}
233
234impl LookupTable {
235    fn new() -> Self {
236        Self { nodes: Vec::new() }
237    }
238    fn iter(&self) -> LookupTableIterator {
239        let mut items = Vec::<(String, u16)>::new();
240        let mut stack = Vec::<ParentNid>::new();
241        self.build_items(&mut items, &mut stack, 0);
242        LookupTableIterator { index: 0, items }
243    }
244    fn build_items(
245        &self,
246        items: &mut Vec<(String, u16)>,
247        stack: &mut Vec<ParentNid>,
248        current: usize,
249    ) {
250        if current >= self.nodes.len() {
251            return;
252        }
253        //println!("build_items current={}", current);
254        if self.nodes[current].ch == 0 {
255            let mut u16str = Vec::<u16>::with_capacity(stack.len());
256            for pnid in stack.iter() {
257                match pnid {
258                    ParentNid::None => (),
259                    ParentNid::Lokid(_) => (),
260                    ParentNid::Eqkid(idx) => u16str.push(self.nodes[*idx].ch),
261                    ParentNid::Hikid(_) => (),
262                }
263            }
264            let key = String::from_utf16_lossy(&u16str);
265            let value = self.nodes[current].eqkid;
266            items.push((key, value));
267        }
268        stack.push(ParentNid::Lokid(current));
269        self.build_items(items, stack, self.nodes[current].lokid as _);
270        stack.pop();
271
272        if self.nodes[current].ch != 0 {
273            stack.push(ParentNid::Eqkid(current));
274            self.build_items(items, stack, self.nodes[current].eqkid as _);
275            stack.pop();
276        }
277
278        stack.push(ParentNid::Hikid(current));
279        self.build_items(items, stack, self.nodes[current].hikid as _);
280        stack.pop();
281    }
282    fn insert(&mut self, s: &str, v: u16) -> u16 {
283        let mut s = s.encode_utf16().collect::<Vec<_>>();
284        s.push(0);
285
286        let mut sidx = 0usize;
287        let mut nid = 0usize;
288        let mut pnid = ParentNid::None;
289        while nid < self.nodes.len() {
290            let sch = s[sidx];
291            let node = &self.nodes[nid];
292            if sch < node.ch {
293                pnid = ParentNid::Lokid(nid);
294                nid = node.lokid as _;
295            } else if sch > node.ch {
296                pnid = ParentNid::Hikid(nid);
297                nid = node.hikid as _;
298            } else {
299                pnid = ParentNid::Eqkid(nid);
300                nid = node.eqkid as _;
301                sidx += 1;
302                if sch == 0 {
303                    return node.eqkid;
304                }
305            }
306        }
307
308        for sch in &s[sidx..] {
309            let nid = self.nodes.len();
310            pnid.update(&mut self.nodes, nid);
311
312            let mut node = Node::new();
313            node.ch = *sch;
314            if *sch == 0 {
315                node.eqkid = v;
316            }
317            self.nodes.push(node);
318            pnid = ParentNid::Eqkid(nid);
319        }
320        0
321    }
322}
323
324fn build_entry_and_item(
325    mut file_items: Vec<PackItem>,
326    dir_items: Vec<String>,
327) -> anyhow::Result<(Vec<Entry>, Vec<Item>, usize)> {
328    let mut entries: Vec<Entry> = Vec::new();
329    let mut items: Vec<Item> = Vec::new();
330
331    // process dir_items
332    items.push(Item {
333        fname: String::new(),
334        digest: empty_sha1(),
335        reference: dir_items.len() as _,
336        flags: ItemFlag::Dir as _,
337        offset: 0,
338        length: 0,
339    });
340    for d in dir_items.iter() {
341        entries.push(Entry {
342            src: d.clone(),
343            dst: d.clone(),
344            item_index: 0,
345        });
346    }
347
348    // process file_items
349
350    // sort for solid
351    file_items.sort_by_key(solid_sort_key);
352
353    // dedup
354    let mut total_dups = 0;
355    let mut total_file_items = 0;
356    let mut offset = 0;
357    for f in file_items.iter() {
358        println!("pack: scan_file {}", f.src);
359        let (length, digest) = scan_file(&f.src)?;
360        let item_idx = find_item(&items, length as _, &digest);
361        if let Some(item_idx) = item_idx {
362            items[item_idx].reference += 1;
363            total_dups += 1;
364            println!("find dup [{}] and [{}]", items[item_idx].fname, f.src);
365            entries.push(Entry {
366                src: f.src.clone(),
367                dst: f.dst.clone(),
368                item_index: item_idx,
369            });
370        } else {
371            items.push(Item {
372                fname: f.src.clone(),
373                digest,
374                reference: 1,
375                flags: 0,
376                offset,
377                length: length as usize,
378            });
379            entries.push(Entry {
380                src: f.src.clone(),
381                dst: f.dst.clone(),
382                item_index: items.len() - 1,
383            });
384            offset += length as usize;
385            total_file_items += 1;
386        }
387    }
388    println!("total_dups {}", total_dups);
389    Ok((entries, items, total_file_items))
390}
391
392fn empty_sha1() -> [u8; 20] {
393    let mut hasher = Sha1::new();
394    hasher.update(b"");
395    hasher.finalize().into()
396}
397
398fn find_item(items: &[Item], length: usize, digest: &[u8; 20]) -> Option<usize> {
399    for (idx, item) in items.iter().enumerate() {
400        if idx == 0 {
401            continue;
402        }
403        if item.length == length && item.digest == *digest {
404            return Some(idx);
405        }
406    }
407    None
408}
409
410fn scan_file(fpath: &str) -> anyhow::Result<(u64, [u8; 20])> {
411    let f = File::open(fpath)?;
412
413    #[cfg(windows)]
414    let file_len = f.metadata()?.file_size();
415    #[cfg(unix)]
416    let file_len = f.metadata()?.size();
417
418    let mut reader = BufReader::new(f);
419    let mut hasher = Sha1::new();
420    loop {
421        let length = {
422            let buffer = reader.fill_buf()?;
423            hasher.update(buffer);
424            buffer.len()
425        };
426        if length == 0 {
427            break;
428        }
429        reader.consume(length);
430    }
431    let digest = hasher.finalize().into();
432    Ok((file_len, digest))
433}
434
435fn solid_sort_key(p: &PackItem) -> (i32, String, String, String) {
436    let src = p.src.to_lowercase();
437    let mut prio: i32 = 0;
438    let path = PathBuf::from(&src);
439    let file_name = path
440        .file_name()
441        .map(|s| s.to_string_lossy().to_string())
442        .unwrap_or_default();
443    if let Some(idx) = src.rfind(".") {
444        let suffix = &src[(idx + 1)..];
445        if suffix == "exe" || suffix == "dll" || suffix == "ocx" || suffix == "sys" {
446            prio = -1;
447        }
448        (prio, suffix.to_string(), file_name, src)
449    } else {
450        (prio, "zzz".to_string(), file_name, src)
451    }
452}
453
454pub fn unpack(input_file: String, target_dir: PathBuf) -> anyhow::Result<()> {
455    let mut f = File::open(&input_file)?;
456    let header = read_header(&mut f)?;
457
458    let mut lookup_table = LookupTable::new();
459    lookup_table.nodes = header.nodes.clone();
460
461    println!("dir_count={} file_count={}", header.dir_count, header.file_count);
462    println!("node_count={} item_count={} chunk_count={}",
463        header.nodes.len(), header.items.len(), header.chunks.len());
464
465    let mut work_items = lookup_table.iter().collect::<Vec<_>>();
466    work_items.sort_by_key(|(_k, idx)| *idx);
467    //println!("work_item_count={}", work_items.len());
468
469    // (item_index, dst)
470    let mut last_item: Option<(u16, PathBuf)> = None;
471    let mut bw = BufferWriter::new(&mut f, header.chunk_size, header.chunks);
472    let mut offset = 0usize;
473    for (dst_path, item_idx) in work_items.iter() {
474        println!("extract [{}]...", dst_path);
475        let dst = target_dir.join(&dst_path[1..]);
476        if *item_idx == 0 {
477            std::fs::create_dir_all(dst)?;
478        } else {
479            if let Some((last_idx, last_dst)) = last_item.as_ref() {
480                if last_idx == item_idx {
481                    println!(
482                        "extract dup [{}]->[{}]",
483                        last_dst.to_string_lossy(),
484                        dst.to_string_lossy()
485                    );
486                    std::fs::copy(last_dst, dst)?;
487                    continue;
488                }
489            }
490            let mut f = File::create(&dst)?;
491            let item_offset = header.items[*item_idx as usize].offset as usize;
492            let length = header.items[*item_idx as usize].length as usize;
493            // println!("off={} offset={}", off, offset);
494            bw.write(&mut f, length)?;
495            offset += length;
496            last_item.replace((*item_idx, dst));
497        }
498    }
499
500    Ok(())
501}
502
503pub fn list(input_file: String) -> anyhow::Result<()> {
504    let mut f = File::open(&input_file)?;
505    let header = read_header(&mut f)?;
506
507    let mut lookup_table = LookupTable::new();
508    lookup_table.nodes = header.nodes.clone();
509
510    if (header.flags & u16::from(FileFlag::Solid)) != 0 {
511        print!("solid archive ");
512    } else {
513        print!("archive ");
514    }
515    println!("dir_count={} file_count={}", header.dir_count, header.file_count);
516    println!("node_count={} item_count={} chunk_count={}",
517        header.nodes.len(), header.items.len(), header.chunks.len());
518
519    let mut work_items = lookup_table.iter().collect::<Vec<_>>();
520    work_items.sort_by_key(|(_k, idx)| *idx);
521    //println!("work_item_count={}", work_items.len());
522
523    for (i, (dst_path, _item_idx)) in work_items.iter().enumerate() {
524        println!("#{}: {}", i + 1, dst_path);
525    }
526
527    Ok(())
528}
529struct BufferWriter<'r, R: std::io::Read> {
530    reader: &'r mut R,
531    chunk_size: usize,
532    chunks: Vec<Chunk>,
533    curr_index: usize,
534    curr_offset: usize,
535    curr_buf: Vec<u8>,
536    fetched: bool,
537}
538
539impl<'r, R: std::io::Read> BufferWriter<'r, R> {
540    fn new(reader: &'r mut R, chunk_size: usize, chunks: Vec<Chunk>) -> Self {
541        Self {
542            reader,
543            chunk_size,
544            chunks,
545            curr_index: 0,
546            curr_offset: 0,
547            curr_buf: Vec::with_capacity(chunk_size),
548            fetched: false,
549        }
550    }
551    fn write<W: std::io::Write>(
552        &mut self,
553        writer: &mut W,
554        length: usize,
555    ) -> std::io::Result<usize> {
556        let mut remain = length;
557        while remain > 0 {
558            self.fetch()?;
559            let n = std::cmp::min(remain, self.curr_buf.len() - self.curr_offset);
560            if n == 0 {
561                return Err(std::io::ErrorKind::InvalidData.into());
562            }
563            writer.write_all(&self.curr_buf[self.curr_offset..(self.curr_offset + n)])?;
564            self.curr_offset += n;
565            remain -= n;
566        }
567        Ok(length)
568    }
569    fn fetch_chunk(&mut self, chunk: Chunk) -> std::io::Result<()> {
570        self.curr_buf.resize(chunk.length, 0);
571        let mut compressed = Vec::<u8>::with_capacity(chunk.compressed_length);
572        compressed.resize(chunk.compressed_length, 0);
573        self.reader.read(&mut compressed)?;
574        //println!("fetch_chunk algo={} size {} <- {}", chunk.algorithm as u16, chunk.length, chunk.compressed_length);
575
576        if chunk.algorithm == AlgorithmType::Store {
577            self.curr_buf.copy_from_slice(&compressed);
578        } else if chunk.algorithm == AlgorithmType::Lzf {
579            self.curr_buf =
580                lzf_decompress(&compressed, self.curr_buf.len()).map_err(|e| {
581                    std::io::Error::new(
582                        std::io::ErrorKind::Other,
583                        format!("lzf_decompress error {}", e),
584                    )
585                })?;
586        } else {
587            return Err(std::io::Error::other("invalid chunk compression algorithm"));
588        }
589        Ok(())
590    }
591    fn fetch(&mut self) -> std::io::Result<()> {
592        if !self.fetched {
593            self.fetched = true;
594            if let Some(chunk) = self.chunks.first() {
595                self.fetch_chunk(chunk.clone())?;
596            }
597        }
598        while self.curr_offset >= self.curr_buf.len() {
599            self.curr_index += 1;
600            self.curr_offset = 0;
601            self.curr_buf.clear();
602            if self.curr_index >= self.chunks.len() {
603                break;
604            }
605            self.fetch_chunk(self.chunks[self.curr_index].clone())?;
606        }
607        Ok(())
608    }
609}
610
611fn read_header(f: &mut File) -> anyhow::Result<Header> {
612    let mut magic = [0u8; 4];
613    f.read(&mut magic)?;
614    if magic != FILE_MAGIC {
615        anyhow::bail!("invalid file magic");
616    }
617    let version = f.read_u16::<LittleEndian>()?;
618    if version != FILE_VERSION {
619        anyhow::bail!("invalid file version {}", version);
620    }
621    let flags = f.read_u16::<LittleEndian>()?;
622    let chunk_size = f.read_u32::<LittleEndian>()? as usize;
623    let dir_count = f.read_u32::<LittleEndian>()? as usize;
624    let file_count = f.read_u32::<LittleEndian>()? as usize;
625
626    let node_count = f.read_u32::<LittleEndian>()? as usize;
627    let mut nodes = Vec::with_capacity(node_count);
628    for _ in 0..node_count {
629        let ch = f.read_u16::<LittleEndian>()?;
630        let lokid = f.read_u16::<LittleEndian>()?;
631        let eqkid = f.read_u16::<LittleEndian>()?;
632        let hikid = f.read_u16::<LittleEndian>()?;
633        nodes.push(Node {
634            ch,
635            lokid,
636            eqkid,
637            hikid,
638        });
639    }
640
641    let item_count = f.read_u32::<LittleEndian>()? as usize;
642    let mut items = Vec::<Item>::with_capacity(item_count);
643    for _ in 0..item_count {
644        let reference = f.read_u16::<LittleEndian>()?;
645        let flags = f.read_u16::<LittleEndian>()?;
646        let offset = f.read_u32::<LittleEndian>()? as _;
647        let length = f.read_u32::<LittleEndian>()? as _;
648        items.push(Item {
649            digest: [0u8; 20],
650            fname: String::new(),
651            reference,
652            flags,
653            offset,
654            length,
655        });
656    }
657
658    let chunk_count = f.read_u32::<LittleEndian>()? as usize;
659    let mut chunks = Vec::<Chunk>::with_capacity(chunk_count);
660    for _ in 0..chunk_count {
661        let algorithm = f.read_u16::<LittleEndian>()?.try_into()?;
662        let flags = f.read_u16::<LittleEndian>()?;
663        let chunk_size = f.read_u32::<LittleEndian>()? as usize;
664        let compressed_size = f.read_u32::<LittleEndian>()? as usize;
665        chunks.push(Chunk {
666            algorithm,
667            flags,
668            length: chunk_size,
669            compressed_length: compressed_size,
670        });
671    }
672    
673    Ok(Header {
674        magic,
675        version,
676        flags,
677        chunk_size,
678        dir_count,
679        file_count,
680        nodes,
681        items,
682        chunks,
683    })
684}
685
686struct HeaderData {
687    total_size: usize,
688    total_chunks: usize,
689    items: Vec<Item>,
690    nodes_file_offset: u64,
691    items_file_offset: u64,
692    chunks_file_offset: u64,
693    data_file_offset: u64,
694}
695
696fn write_header_data(
697    f: &mut File,
698    solid: bool,
699    file_items: Vec<PackItem>,
700    dir_items: Vec<String>,
701) -> anyhow::Result<HeaderData> {
702    let chunk_size: usize = if solid { SOLID_CHUNK_SIZE } else { 0 };
703    f.write(&FILE_MAGIC)?;
704    f.write_u16::<LittleEndian>(FILE_VERSION)?;
705    let file_flags: u16 = if solid { FileFlag::Solid.into() } else { 0 };
706    f.write_u16::<LittleEndian>(file_flags)?;
707    f.write_u32::<LittleEndian>(chunk_size as _)?;
708    f.write_u32::<LittleEndian>(dir_items.len() as _)?;
709    f.write_u32::<LittleEndian>(file_items.len() as _)?;
710
711    let (entries, items, num_file_items) = build_entry_and_item(file_items, dir_items)?;
712    let mut tbl = LookupTable::new();
713    for e in entries.iter() {
714        tbl.insert(&e.dst, e.item_index as _);
715    }
716    println!("lookupTbl len={}", tbl.nodes.len());
717    let nodes_file_offset = f.seek(SeekFrom::Current(0)).unwrap();
718    f.write_u32::<LittleEndian>(tbl.nodes.len() as u32)?;
719    for node in tbl.nodes.iter() {
720        f.write_u16::<LittleEndian>(node.ch)?;
721        f.write_u16::<LittleEndian>(node.lokid)?;
722        f.write_u16::<LittleEndian>(node.eqkid)?;
723        f.write_u16::<LittleEndian>(node.hikid)?;
724    }
725
726    println!("item len={}", items.len());
727    let items_file_offset = f.seek(SeekFrom::Current(0)).unwrap();
728    f.write_u32::<LittleEndian>(items.len() as u32)?;
729    for item in items.iter() {
730        f.write_u16::<LittleEndian>(item.reference)?;
731        f.write_u16::<LittleEndian>(item.flags)?;
732        f.write_u32::<LittleEndian>(item.offset as _)?;
733        f.write_u32::<LittleEndian>(item.length as _)?;
734    }
735
736    let total_size = items.iter().fold(0, |acc, x| acc + x.length);
737    let total_chunks = if solid {
738        (total_size as usize + SOLID_CHUNK_SIZE - 1) / SOLID_CHUNK_SIZE
739    } else {
740        num_file_items
741    };
742    let mut chunks: Vec<Chunk> = Vec::with_capacity(total_chunks);
743    chunks.resize(
744        total_chunks,
745        Chunk {
746            algorithm: AlgorithmType::Store,
747            flags: 0,
748            length: 0,
749            compressed_length: 0,
750        },
751    );
752    println!("chunkTbl len={}", chunks.len());
753    let chunks_file_offset = f.seek(SeekFrom::Current(0)).unwrap();
754    f.write_u32::<LittleEndian>(chunks.len() as u32)?;
755    for chunk in chunks.iter() {
756        f.write_u16::<LittleEndian>(chunk.algorithm as _)?;
757        f.write_u16::<LittleEndian>(chunk.flags)?;
758        f.write_u32::<LittleEndian>(chunk.length as _)?;
759        f.write_u32::<LittleEndian>(chunk.compressed_length as _)?;
760    }
761
762    let data_file_offset = f.seek(SeekFrom::Current(0)).unwrap();
763
764    Ok(HeaderData {
765        total_size: total_size as _,
766        total_chunks,
767        items,
768        nodes_file_offset,
769        items_file_offset,
770        chunks_file_offset,
771        data_file_offset,
772    })
773}
774
775fn update_header_data(f: &mut File, header: &HeaderData, chunks: &[Chunk]) -> anyhow::Result<()> {
776    f.seek(SeekFrom::Start(header.chunks_file_offset))?;
777    f.write_u32::<LittleEndian>(chunks.len() as u32)?;
778    for chunk in chunks.iter() {
779        // println!("algo={}", chunk.algorithm as u16);
780        f.write_u16::<LittleEndian>(chunk.algorithm as _)?;
781        f.write_u16::<LittleEndian>(chunk.flags)?;
782        f.write_u32::<LittleEndian>(chunk.length as _)?;
783        f.write_u32::<LittleEndian>(chunk.compressed_length as _)?;
784    }
785
786    Ok(())
787}
788
789fn need_compress(fname: &str) -> bool {
790    let fname = fname.to_lowercase();
791    let is_image = fname.ends_with(".png")
792        || fname.ends_with(".gif")
793        || fname.ends_with(".jpg")
794        || fname.ends_with(".jpeg");
795    !is_image
796}