1use std::collections::HashMap;
8use std::ffi::{OsStr, OsString};
9use std::fs::File;
10use std::io::{Seek, SeekFrom};
11use std::ops::Deref;
12use std::os::unix::ffi::OsStrExt;
13use std::path::{Path, PathBuf};
14use std::sync::Arc;
15
16use anyhow::{anyhow, bail, Context, Error, Result};
17use base64::Engine;
18use nix::NixPath;
19use nydus_rafs::metadata::chunk::ChunkWrapper;
20use nydus_rafs::metadata::inode::{InodeWrapper, RafsInodeFlags, RafsV6Inode};
21use nydus_rafs::metadata::layout::v5::RafsV5ChunkInfo;
22use nydus_rafs::metadata::layout::RafsXAttrs;
23use nydus_rafs::metadata::RafsVersion;
24use nydus_storage::device::BlobChunkFlags;
25use nydus_storage::{RAFS_MAX_CHUNKS_PER_BLOB, RAFS_MAX_CHUNK_SIZE};
26use nydus_utils::compact::makedev;
27use nydus_utils::compress::{self, compute_compressed_gzip_size};
28use nydus_utils::digest::{self, DigestData, RafsDigest};
29use nydus_utils::{lazy_drop, root_tracer, timing_tracer, try_round_up_4k, ByteSize};
30use serde::{Deserialize, Serialize};
31
32use crate::core::context::{Artifact, NoopArtifactWriter};
33
34use super::core::blob::Blob;
35use super::core::context::{
36 ArtifactWriter, BlobManager, BootstrapManager, BuildContext, BuildOutput,
37};
38use super::core::node::{ChunkSource, Node, NodeChunk, NodeInfo};
39use super::{
40 build_bootstrap, dump_bootstrap, finalize_blob, Bootstrap, Builder, TarBuilder, Tree, TreeNode,
41};
42
43#[derive(Deserialize, Serialize, Debug, Clone, Default)]
44struct TocEntry {
45 pub name: PathBuf,
49
50 #[serde(rename = "type")]
66 pub toc_type: String,
67
68 #[serde(default)]
72 pub size: u64,
73
74 #[serde(default, rename = "linkName")]
88 pub link_name: PathBuf,
89
90 #[serde(default)]
92 pub mode: u32,
93
94 #[serde(default)]
96 pub uid: u32,
97
98 #[serde(default)]
100 pub gid: u32,
101
102 #[serde(default, rename = "userName")]
107 pub uname: String,
108
109 #[serde(default, rename = "groupName")]
114 pub gname: String,
115
116 #[serde(default, rename = "devMajor")]
120 pub dev_major: u64,
121
122 #[serde(default, rename = "devMinor")]
126 pub dev_minor: u64,
127
128 #[serde(default)]
130 pub xattrs: HashMap<String, String>,
131
132 #[serde(default)]
136 pub digest: String,
137
138 #[serde(default)]
143 pub offset: u64,
144
145 #[serde(default, rename = "chunkOffset")]
155 pub chunk_offset: u64,
156
157 #[serde(default, rename = "chunkSize")]
162 pub chunk_size: u64,
163
164 #[serde(default, rename = "chunkDigest")]
169 pub chunk_digest: String,
170
171 #[serde(default, rename = "innerOffset")]
177 pub inner_offset: u64,
178}
179
180impl TocEntry {
181 pub fn is_dir(&self) -> bool {
183 self.toc_type.as_str() == "dir"
184 }
185
186 pub fn is_reg(&self) -> bool {
188 self.toc_type.as_str() == "reg"
189 }
190
191 pub fn is_symlink(&self) -> bool {
193 self.toc_type.as_str() == "symlink"
194 }
195
196 pub fn is_hardlink(&self) -> bool {
198 self.toc_type.as_str() == "hardlink"
199 }
200
201 pub fn is_chunk(&self) -> bool {
203 self.toc_type.as_str() == "chunk"
204 }
205
206 pub fn is_blockdev(&self) -> bool {
208 self.toc_type.as_str() == "block"
209 }
210
211 pub fn is_chardev(&self) -> bool {
213 self.toc_type.as_str() == "char"
214 }
215
216 pub fn is_fifo(&self) -> bool {
218 self.toc_type.as_str() == "fifo"
219 }
220
221 pub fn is_special(&self) -> bool {
223 self.is_blockdev() || self.is_chardev() || self.is_fifo()
224 }
225
226 pub fn is_supported(&self) -> bool {
227 self.is_dir() || self.is_reg() || self.is_symlink() || self.is_hardlink() || self.is_chunk()
228 }
229
230 pub fn has_xattr(&self) -> bool {
232 !self.xattrs.is_empty()
233 }
234
235 pub fn mode(&self) -> u32 {
237 let mut mode = 0;
238 if self.is_dir() {
239 mode |= libc::S_IFDIR;
240 } else if self.is_reg() || self.is_hardlink() {
241 mode |= libc::S_IFREG;
242 } else if self.is_symlink() {
243 mode |= libc::S_IFLNK;
244 } else if self.is_blockdev() {
245 mode |= libc::S_IFBLK;
246 } else if self.is_chardev() {
247 mode |= libc::S_IFCHR;
248 } else if self.is_fifo() {
249 mode |= libc::S_IFIFO;
250 }
251
252 self.mode & !libc::S_IFMT as u32 | mode as u32
253 }
254
255 pub fn rdev(&self) -> u32 {
257 if self.is_special() {
258 makedev(self.dev_major, self.dev_minor) as u32
259 } else {
260 u32::MAX
261 }
262 }
263
264 pub fn size(&self) -> u64 {
266 if self.is_reg() {
267 self.size
268 } else {
269 0
270 }
271 }
272
273 pub fn name(&self) -> Result<&OsStr> {
277 let name = if self.name == Path::new("/") {
278 OsStr::new("/")
279 } else {
280 self.name
281 .file_name()
282 .ok_or_else(|| anyhow!("stargz: invalid entry name {}", self.name.display()))?
283 };
284 Ok(name)
285 }
286
287 pub fn path(&self) -> &Path {
291 &self.name
292 }
293
294 pub fn hardlink_link_path(&self) -> &Path {
298 assert!(self.is_hardlink());
299 &self.link_name
300 }
301
302 pub fn symlink_link_path(&self) -> &Path {
304 assert!(self.is_symlink());
305 &self.link_name
306 }
307
308 pub fn block_id(&self) -> Result<RafsDigest> {
309 if self.chunk_digest.len() != 71 || !self.chunk_digest.starts_with("sha256:") {
310 bail!("stargz: invalid chunk digest {}", self.chunk_digest);
311 }
312 match hex::decode(&self.chunk_digest[7..]) {
313 Err(_e) => bail!("stargz: invalid chunk digest {}", self.chunk_digest),
314 Ok(v) => {
315 let mut data = DigestData::default();
316 data.copy_from_slice(&v[..32]);
317 Ok(RafsDigest { data })
318 }
319 }
320 }
321
322 fn normalize(&mut self) -> Result<()> {
323 if self.name.is_empty() {
324 bail!("stargz: invalid TocEntry with empty name");
325 }
326 self.name = PathBuf::from("/").join(&self.name);
327
328 if !self.is_supported() && !self.is_special() {
329 bail!("stargz: invalid type {} for TocEntry", self.toc_type);
330 }
331
332 if (self.is_symlink() || self.is_hardlink()) && self.link_name.is_empty() {
333 bail!("stargz: empty link target");
334 }
335 if self.is_hardlink() {
336 self.link_name = PathBuf::from("/").join(&self.link_name);
337 }
338
339 if (self.is_reg() || self.is_chunk())
340 && (self.digest.is_empty() || self.chunk_digest.is_empty())
341 {
342 bail!("stargz: missing digest or chunk digest");
343 }
344
345 if self.is_chunk() && self.chunk_offset == 0 {
346 bail!("stargz: chunk offset is zero");
347 }
348
349 Ok(())
350 }
351}
352
353#[derive(Deserialize, Debug, Clone, Default)]
354struct TocIndex {
355 pub version: u32,
356 pub entries: Vec<TocEntry>,
357}
358
359impl TocIndex {
360 fn load(path: &Path, offset: u64) -> Result<TocIndex> {
361 let mut index_file = File::open(path)
362 .with_context(|| format!("stargz: failed to open index file {:?}", path))?;
363 let pos = index_file
364 .seek(SeekFrom::Start(offset))
365 .context("stargz: failed to seek to start of TOC")?;
366 if pos != offset {
367 bail!("stargz: failed to seek file position to start of TOC");
368 }
369 let mut toc_index: TocIndex = serde_json::from_reader(index_file).with_context(|| {
370 format!(
371 "stargz: failed to deserialize stargz TOC index file {:?}",
372 path
373 )
374 })?;
375
376 if toc_index.version != 1 {
377 return Err(Error::msg(format!(
378 "stargz: unsupported index version {}",
379 toc_index.version
380 )));
381 }
382
383 for entry in toc_index.entries.iter_mut() {
384 entry.normalize()?;
385 }
386
387 Ok(toc_index)
388 }
389}
390
391pub struct StargzBuilder {
393 blob_size: u64,
394 builder: TarBuilder,
395 file_chunk_map: HashMap<PathBuf, (u64, Vec<NodeChunk>)>,
396 hardlink_map: HashMap<PathBuf, TreeNode>,
397 uncompressed_offset: u64,
398}
399
400impl StargzBuilder {
401 pub fn new(blob_size: u64, ctx: &BuildContext) -> Self {
403 Self {
404 blob_size,
405 builder: TarBuilder::new(ctx.explicit_uidgid, 0, ctx.fs_version),
406 file_chunk_map: HashMap::new(),
407 hardlink_map: HashMap::new(),
408 uncompressed_offset: 0,
409 }
410 }
411
412 fn build_tree(&mut self, ctx: &mut BuildContext, layer_idx: u16) -> Result<Tree> {
413 let toc_index = TocIndex::load(&ctx.source_path, 0)?;
414 if toc_index.version != 1 {
415 bail!("stargz: TOC version {} is unsupported", toc_index.version);
416 } else if toc_index.entries.is_empty() {
417 bail!("stargz: TOC array is empty");
418 }
419
420 self.builder.layer_idx = layer_idx;
421 let root = self.builder.create_directory(&[OsString::from("/")])?;
422 let mut tree = Tree::new(root);
423
424 let mut last_reg_entry: Option<&TocEntry> = None;
426 for entry in toc_index.entries.iter() {
427 let path = entry.path();
428
429 if !entry.is_supported() {
431 warn!(
432 "stargz: unsupported {} with type {}",
433 path.display(),
434 entry.toc_type
435 );
436 continue;
437 } else if self.builder.is_stargz_special_files(path) {
438 continue;
440 }
441
442 let uncompress_size = Self::get_content_size(ctx, entry, &mut last_reg_entry)?;
444 if (entry.is_reg() || entry.is_chunk()) && uncompress_size != 0 {
445 let block_id = entry
446 .block_id()
447 .context("stargz: failed to get chunk digest")?;
448 let chunk_info = ChunkWrapper::V6(RafsV5ChunkInfo {
450 block_id,
451 blob_index: 0,
452 flags: BlobChunkFlags::COMPRESSED,
453 compressed_size: 0,
454 uncompressed_size: uncompress_size as u32,
455 compressed_offset: entry.offset as u64,
456 uncompressed_offset: self.uncompressed_offset,
457 file_offset: entry.chunk_offset as u64,
458 index: 0,
459 crc32: 0,
460 });
461 let chunk = NodeChunk {
462 source: ChunkSource::Build,
463 inner: Arc::new(chunk_info),
464 };
465
466 if let Some((size, chunks)) = self.file_chunk_map.get_mut(path) {
467 chunks.push(chunk);
468 if entry.is_reg() {
469 *size = entry.size;
470 }
471 } else if entry.is_reg() {
472 self.file_chunk_map
473 .insert(path.to_path_buf(), (entry.size, vec![chunk]));
474 } else {
475 bail!("stargz: file chunk lacks of corresponding head regular file entry");
476 }
477
478 let aligned_chunk_size = if ctx.aligned_chunk {
479 try_round_up_4k(uncompress_size).unwrap()
481 } else {
482 uncompress_size
483 };
484 self.uncompressed_offset += aligned_chunk_size;
485 }
486
487 if !entry.is_chunk() && !self.builder.is_stargz_special_files(path) {
488 self.parse_entry(&mut tree, entry, path)?;
489 }
490 }
491
492 for (size, ref mut chunks) in self.file_chunk_map.values_mut() {
493 Self::sort_and_validate_chunks(chunks, *size)?;
494 }
495
496 Ok(tree)
497 }
498
499 fn get_content_size<'a>(
501 ctx: &mut BuildContext,
502 entry: &'a TocEntry,
503 last_reg_entry: &mut Option<&'a TocEntry>,
504 ) -> Result<u64> {
505 if entry.is_reg() {
506 if entry.chunk_offset == 0 && entry.chunk_size == 0 {
508 Ok(entry.size)
509 } else if entry.chunk_offset % ctx.chunk_size as u64 != 0 {
510 bail!(
511 "stargz: chunk offset (0x{:x}) is not aligned to 0x{:x}",
512 entry.chunk_offset,
513 ctx.chunk_size
514 );
515 } else if entry.chunk_size != ctx.chunk_size as u64 {
516 bail!("stargz: first chunk size is not 0x{:x}", ctx.chunk_size);
517 } else {
518 *last_reg_entry = Some(entry);
519 Ok(entry.chunk_size)
520 }
521 } else if entry.is_chunk() {
522 if entry.chunk_offset % ctx.chunk_size as u64 != 0 {
523 bail!(
524 "stargz: chunk offset (0x{:x}) is not aligned to 0x{:x}",
525 entry.chunk_offset,
526 ctx.chunk_size
527 );
528 } else if entry.chunk_size == 0 {
529 if let Some(reg_entry) = last_reg_entry {
531 let size = reg_entry.size - entry.chunk_offset;
532 if size > ctx.chunk_size as u64 {
533 bail!(
534 "stargz: size of last chunk 0x{:x} is bigger than chunk size 0x {:x}",
535 size,
536 ctx.chunk_size
537 );
538 }
539 *last_reg_entry = None;
540 Ok(size)
541 } else {
542 bail!("stargz: tailer chunk lacks of corresponding head chunk");
543 }
544 } else if entry.chunk_size != ctx.chunk_size as u64 {
545 bail!(
546 "stargz: chunk size 0x{:x} is not 0x{:x}",
547 entry.chunk_size,
548 ctx.chunk_size
549 );
550 } else {
551 Ok(entry.chunk_size)
552 }
553 } else {
554 Ok(0)
555 }
556 }
557
558 fn parse_entry(&mut self, tree: &mut Tree, entry: &TocEntry, path: &Path) -> Result<()> {
559 let name_size = entry.name()?.byte_size() as u16;
560 let uid = if self.builder.explicit_uidgid {
561 entry.uid
562 } else {
563 0
564 };
565 let gid = if self.builder.explicit_uidgid {
566 entry.gid
567 } else {
568 0
569 };
570 let mut file_size = entry.size();
571 let mut flags = RafsInodeFlags::default();
572
573 let (symlink, symlink_size) = if entry.is_symlink() {
575 let symlink_link_path = entry.symlink_link_path();
576 let symlink_size = symlink_link_path.as_os_str().byte_size() as u16;
577 file_size = symlink_size.into();
578 flags |= RafsInodeFlags::SYMLINK;
579 (Some(symlink_link_path.as_os_str().to_owned()), symlink_size)
580 } else {
581 (None, 0)
582 };
583
584 let ino = if entry.is_hardlink() {
586 let link_path = entry.hardlink_link_path();
587 let link_path = link_path.components().as_path();
588 let targets = Node::generate_target_vec(link_path);
589 assert!(!targets.is_empty());
590 let mut tmp_tree: &Tree = tree;
591 for name in &targets[1..] {
592 match tmp_tree.get_child_idx(name.as_bytes()) {
593 Some(idx) => tmp_tree = &tmp_tree.children[idx],
594 None => {
595 bail!(
596 "stargz: unknown target {} for hardlink {}",
597 link_path.display(),
598 path.display(),
599 );
600 }
601 }
602 }
603
604 let mut tmp_node = tmp_tree.borrow_mut_node();
605 if !tmp_node.is_reg() {
606 bail!(
607 "stargz: target {} for hardlink {} is not a regular file",
608 link_path.display(),
609 path.display()
610 );
611 }
612 self.hardlink_map
613 .insert(path.to_path_buf(), tmp_tree.node.clone());
614 flags |= RafsInodeFlags::HARDLINK;
615 tmp_node.inode.set_has_hardlink(true);
616 tmp_node.inode.ino()
617 } else {
618 self.builder.next_ino()
619 };
620
621 let mut xattrs = RafsXAttrs::new();
623 if entry.has_xattr() {
624 for (name, value) in entry.xattrs.iter() {
625 flags |= RafsInodeFlags::XATTR;
626 let value = base64::engine::general_purpose::STANDARD
627 .decode(value)
628 .with_context(|| {
629 format!(
630 "stargz: failed to parse xattr {:?} for entry {:?}",
631 path, name
632 )
633 })?;
634 xattrs.add(OsString::from(name), value)?;
635 }
636 }
637
638 let mut inode = InodeWrapper::V6(RafsV6Inode {
639 i_ino: ino,
640 i_projid: 0,
641 i_uid: uid,
642 i_gid: gid,
643 i_mode: entry.mode(),
644 i_size: file_size,
645 i_nlink: 1,
646 i_blocks: 0,
647 i_flags: flags,
648 i_child_count: 0,
649 i_name_size: name_size,
650 i_symlink_size: symlink_size,
651 i_rdev: entry.rdev(),
652 i_mtime: 0,
654 i_mtime_nsec: 0,
655 });
656 inode.set_has_xattr(!xattrs.is_empty());
657
658 let source = PathBuf::from("/");
659 let target = Node::generate_target(&path, &source);
660 let target_vec = Node::generate_target_vec(&target);
661 let info = NodeInfo {
662 explicit_uidgid: self.builder.explicit_uidgid,
663 src_ino: ino,
664 src_dev: u64::MAX,
665 rdev: entry.rdev() as u64,
666 source,
667 target,
668 path: path.to_path_buf(),
669 target_vec,
670 symlink,
671 xattrs,
672 v6_force_extended_inode: false,
673 };
674 let node = Node::new(inode, info, self.builder.layer_idx);
675
676 self.builder.insert_into_tree(tree, node)
677 }
678
679 fn sort_and_validate_chunks(chunks: &mut [NodeChunk], size: u64) -> Result<()> {
680 if chunks.len() > RAFS_MAX_CHUNKS_PER_BLOB as usize {
681 bail!("stargz: file has two many chunks");
682 }
683
684 if chunks.len() > 1 {
685 chunks.sort_unstable_by_key(|v| v.inner.file_offset());
686 for idx in 0..chunks.len() - 2 {
687 let curr = &chunks[idx].inner;
688 let pos = curr
689 .file_offset()
690 .checked_add(curr.uncompressed_size() as u64);
691 match pos {
692 Some(pos) => {
693 if pos != chunks[idx + 1].inner.file_offset() {
694 bail!("stargz: unexpected holes between data chunks");
695 }
696 }
697 None => {
698 bail!(
699 "stargz: invalid chunk offset 0x{:x} or size 0x{:x}",
700 curr.file_offset(),
701 curr.uncompressed_size()
702 )
703 }
704 }
705 }
706 }
707
708 if !chunks.is_empty() {
709 let last = &chunks[chunks.len() - 1];
710 if last.inner.file_offset() + last.inner.uncompressed_size() as u64 != size {
711 bail!("stargz: file size and sum of chunk size doesn't match");
712 }
713 } else if size != 0 {
714 bail!("stargz: file size and sum of chunk size doesn't match");
715 }
716
717 Ok(())
718 }
719
720 fn fix_chunk_info(&mut self, ctx: &mut BuildContext, blob_mgr: &mut BlobManager) -> Result<()> {
721 let mut blob_chunks: Vec<&mut NodeChunk> = Vec::with_capacity(10240);
733 for (_, chunks) in self.file_chunk_map.values_mut() {
734 for chunk in chunks.iter_mut() {
735 blob_chunks.push(chunk);
736 }
737 }
738 blob_chunks.sort_unstable_by(|a, b| {
739 a.inner
740 .uncompressed_offset()
741 .cmp(&b.inner.uncompressed_offset())
742 });
743 if blob_chunks.is_empty() {
744 return Ok(());
745 }
746
747 let (blob_index, blob_ctx) = blob_mgr.get_or_create_current_blob(ctx)?;
749 let chunk_count = blob_chunks.len();
750 let mut compressed_blob_size = 0u64;
751 for idx in 0..chunk_count {
752 let curr = blob_chunks[idx].inner.compressed_offset();
753 let next = if idx == chunk_count - 1 {
754 self.blob_size
755 } else {
756 blob_chunks[idx + 1].inner.compressed_offset()
757 };
758 if curr >= next {
759 bail!("stargz: compressed offset is out of order");
760 } else if next - curr > RAFS_MAX_CHUNK_SIZE {
761 bail!("stargz: compressed size is too big");
762 }
763
764 let mut chunk = blob_chunks[idx].inner.deref().clone();
765 let uncomp_size = chunk.uncompressed_size() as usize;
766 let max_size = (next - curr) as usize;
767 let max_gzip_size = compute_compressed_gzip_size(uncomp_size, max_size);
768 let chunk_index = blob_ctx.alloc_chunk_index()?;
769 chunk.set_index(chunk_index);
770 chunk.set_blob_index(blob_index);
771 chunk.set_compressed_size(max_gzip_size as u32);
772 blob_ctx.add_chunk_meta_info(&chunk, None)?;
773 compressed_blob_size = std::cmp::max(
774 compressed_blob_size,
775 chunk.compressed_offset() + chunk.compressed_size() as u64,
776 );
777 assert_eq!(Arc::strong_count(&blob_chunks[idx].inner), 1);
778 blob_chunks[idx].inner = Arc::new(chunk);
779 }
780
781 blob_ctx.uncompressed_blob_size = self.uncompressed_offset;
782 blob_ctx.compressed_blob_size = compressed_blob_size;
783
784 Ok(())
785 }
786
787 fn fix_nodes(&mut self, bootstrap: &mut Bootstrap) -> Result<()> {
788 bootstrap
789 .tree
790 .walk_bfs(true, &mut |n| {
791 let mut node = n.borrow_mut_node();
792 let node_path = node.path();
793 if let Some((size, ref mut chunks)) = self.file_chunk_map.get_mut(node_path) {
794 node.inode.set_size(*size);
795 node.inode.set_child_count(chunks.len() as u32);
796 node.chunks = chunks.to_vec();
797 }
798
799 Ok(())
800 })
801 .context("stargz: failed to update chunk info array for nodes")?;
802
803 for (k, v) in self.hardlink_map.iter() {
804 match bootstrap.tree.get_node(k) {
805 Some(t) => {
806 let mut node = t.borrow_mut_node();
807 let target = v.borrow();
808 node.inode.set_size(target.inode.size());
809 node.inode.set_child_count(target.inode.child_count());
810 node.chunks = target.chunks.clone();
811 node.set_xattr(target.info.xattrs.clone());
812 }
813 None => bail!(
814 "stargz: failed to get target node for hardlink {}",
815 k.display()
816 ),
817 }
818 }
819
820 Ok(())
821 }
822}
823
824impl Builder for StargzBuilder {
825 fn build(
826 &mut self,
827 ctx: &mut BuildContext,
828 bootstrap_mgr: &mut BootstrapManager,
829 blob_mgr: &mut BlobManager,
830 ) -> Result<BuildOutput> {
831 if ctx.fs_version != RafsVersion::V6 {
832 bail!(
833 "stargz: unsupported filesystem version {:?}",
834 ctx.fs_version
835 );
836 } else if ctx.compressor != compress::Algorithm::GZip {
837 bail!("stargz: invalid compression algorithm {:?}", ctx.compressor);
838 } else if ctx.digester != digest::Algorithm::Sha256 {
839 bail!("stargz: invalid digest algorithm {:?}", ctx.digester);
840 }
841 let mut blob_writer: Box<dyn Artifact> = if let Some(blob_stor) = ctx.blob_storage.clone() {
842 Box::new(ArtifactWriter::new(blob_stor)?)
843 } else {
844 Box::<NoopArtifactWriter>::default()
845 };
846 let mut bootstrap_ctx = bootstrap_mgr.create_ctx()?;
847 let layer_idx = u16::from(bootstrap_ctx.layered);
848
849 let tree = timing_tracer!({ self.build_tree(ctx, layer_idx) }, "build_tree")?;
851
852 let mut bootstrap = timing_tracer!(
854 { build_bootstrap(ctx, bootstrap_mgr, &mut bootstrap_ctx, blob_mgr, tree) },
855 "build_bootstrap"
856 )?;
857
858 self.fix_chunk_info(ctx, blob_mgr)?;
859 self.fix_nodes(&mut bootstrap)?;
860
861 timing_tracer!(
863 { Blob::dump(ctx, blob_mgr, blob_writer.as_mut()) },
864 "dump_blob"
865 )?;
866
867 if let Some((_, blob_ctx)) = blob_mgr.get_current_blob() {
869 Blob::dump_meta_data(ctx, blob_ctx, blob_writer.as_mut())?;
870 }
871
872 if ctx.blob_inline_meta {
874 timing_tracer!(
875 {
876 dump_bootstrap(
877 ctx,
878 bootstrap_mgr,
879 &mut bootstrap_ctx,
880 &mut bootstrap,
881 blob_mgr,
882 blob_writer.as_mut(),
883 )
884 },
885 "dump_bootstrap"
886 )?;
887 finalize_blob(ctx, blob_mgr, blob_writer.as_mut())?;
888 } else {
889 finalize_blob(ctx, blob_mgr, blob_writer.as_mut())?;
890 timing_tracer!(
891 {
892 dump_bootstrap(
893 ctx,
894 bootstrap_mgr,
895 &mut bootstrap_ctx,
896 &mut bootstrap,
897 blob_mgr,
898 blob_writer.as_mut(),
899 )
900 },
901 "dump_bootstrap"
902 )?;
903 }
904
905 lazy_drop(bootstrap_ctx);
906
907 BuildOutput::new(blob_mgr, None, &bootstrap_mgr.bootstrap_storage, &None)
908 }
909}
910
911#[cfg(test)]
912mod tests {
913 use super::*;
914 use crate::{
915 attributes::Attributes, ArtifactStorage, ConversionType, Features, Prefetch, WhiteoutSpec,
916 };
917
918 #[test]
919 fn test_build_stargz_toc() {
920 let tmp_dir = vmm_sys_util::tempdir::TempDir::new().unwrap();
921 let mut tmp_dir = tmp_dir.as_path().to_path_buf();
922 let root_dir = &std::env::var("CARGO_MANIFEST_DIR").expect("$CARGO_MANIFEST_DIR");
923 let source_path =
924 PathBuf::from(root_dir).join("../tests/texture/stargz/estargz_sample.json");
925 let prefetch = Prefetch::default();
926 let mut ctx = BuildContext::new(
927 "".to_string(),
928 true,
929 0,
930 compress::Algorithm::GZip,
931 digest::Algorithm::Sha256,
932 true,
933 WhiteoutSpec::Oci,
934 ConversionType::EStargzIndexToRef,
935 source_path,
936 prefetch,
937 Some(ArtifactStorage::FileDir((tmp_dir.clone(), String::new()))),
938 None,
939 false,
940 Features::new(),
941 false,
942 Attributes::default(),
943 );
944 ctx.fs_version = RafsVersion::V6;
945 ctx.conversion_type = ConversionType::EStargzToRafs;
946 let mut bootstrap_mgr = BootstrapManager::new(
947 Some(ArtifactStorage::FileDir((tmp_dir.clone(), String::new()))),
948 None,
949 );
950 let mut blob_mgr = BlobManager::new(digest::Algorithm::Sha256, false);
951 let mut builder = StargzBuilder::new(0x1000000, &ctx);
952
953 let builder = builder.build(&mut ctx, &mut bootstrap_mgr, &mut blob_mgr);
954 assert!(builder.is_ok());
955 let builder = builder.unwrap();
956 assert_eq!(
957 builder.blobs,
958 vec![String::from(
959 "bd4eff3fe6f5a352457c076d2133583e43db895b4af08d717b3fbcaeca89834e"
960 )]
961 );
962 assert_eq!(builder.blob_size, Some(4128));
963 tmp_dir.push("e60676aef5cc0d5caca9f4c8031f5b0c8392a0611d44c8e1bbc46dbf7fe7bfef");
964 assert_eq!(
965 builder.bootstrap_path.unwrap(),
966 tmp_dir.to_str().unwrap().to_string()
967 )
968 }
969
970 #[test]
971 fn test_toc_entry() {
972 let root_dir = &std::env::var("CARGO_MANIFEST_DIR").expect("$CARGO_MANIFEST_DIR");
973 let source_path = PathBuf::from(root_dir).join("../tests/texture/tar/all-entry-type.tar");
974
975 let mut entry = TocEntry {
976 name: source_path,
977 toc_type: "".to_string(),
978 size: 0x10,
979 link_name: PathBuf::from("link_name"),
980 mode: 0,
981 uid: 1,
982 gid: 1,
983 uname: "user_name".to_string(),
984 gname: "group_name".to_string(),
985 dev_major: 255,
986 dev_minor: 33,
987 xattrs: Default::default(),
988 digest: Default::default(),
989 offset: 0,
990 chunk_offset: 0,
991 chunk_size: 0,
992 chunk_digest: "sha256:".to_owned(),
993 inner_offset: 0,
994 };
995 entry.chunk_digest.extend(vec!['a'; 64].iter());
996
997 entry.toc_type = "dir".to_owned();
998 assert!(entry.is_dir());
999 assert!(entry.is_supported());
1000 assert_eq!(entry.mode(), libc::S_IFDIR as u32);
1001 assert_eq!(entry.rdev(), u32::MAX);
1002
1003 entry.toc_type = "req".to_owned();
1004 assert!(!entry.is_reg());
1005 entry.toc_type = "reg".to_owned();
1006 assert!(entry.is_reg());
1007 assert!(entry.is_supported());
1008 assert_eq!(entry.mode(), libc::S_IFREG as u32);
1009 assert_eq!(entry.size(), 0x10);
1010
1011 entry.toc_type = "symlink".to_owned();
1012 assert!(entry.is_symlink());
1013 assert!(entry.is_supported());
1014 assert_eq!(entry.mode(), libc::S_IFLNK as u32);
1015 assert_eq!(entry.symlink_link_path(), Path::new("link_name"));
1016 assert!(entry.normalize().is_ok());
1017
1018 entry.toc_type = "hardlink".to_owned();
1019 assert!(entry.is_supported());
1020 assert!(entry.is_hardlink());
1021 assert_eq!(entry.mode(), libc::S_IFREG as u32);
1022 assert_eq!(entry.hardlink_link_path(), Path::new("link_name"));
1023 assert!(entry.normalize().is_ok());
1024
1025 entry.toc_type = "chunk".to_owned();
1026 assert!(entry.is_supported());
1027 assert!(entry.is_chunk());
1028 assert_eq!(entry.mode(), 0);
1029 assert_eq!(entry.size(), 0);
1030 assert!(entry.normalize().is_err());
1031
1032 entry.toc_type = "block".to_owned();
1033 assert!(entry.is_special());
1034 assert!(entry.is_blockdev());
1035 assert_eq!(entry.mode(), libc::S_IFBLK as u32);
1036
1037 entry.toc_type = "char".to_owned();
1038 assert!(entry.is_special());
1039 assert!(entry.is_chardev());
1040 assert_eq!(entry.mode(), libc::S_IFCHR as u32);
1041 assert_ne!(entry.size(), 0x10);
1042
1043 entry.toc_type = "fifo".to_owned();
1044 assert!(entry.is_fifo());
1045 assert!(entry.is_special());
1046 assert_eq!(entry.mode(), libc::S_IFIFO as u32);
1047 assert_eq!(entry.rdev(), 65313);
1048
1049 assert_eq!(entry.name().unwrap().to_str(), Some("all-entry-type.tar"));
1050 entry.name = PathBuf::from("/");
1051 assert_eq!(entry.name().unwrap().to_str(), Some("/"));
1052 assert_ne!(entry.path(), Path::new("all-entry-type.tar"));
1053
1054 assert_eq!(entry.block_id().unwrap().data, [0xaa as u8; 32]);
1055
1056 entry.name = PathBuf::from("");
1057 assert!(entry.normalize().is_err());
1058 }
1059}