1use crate::{RunContext, Runnable, util::is_mounted};
2use anyhow::{Context, Result, bail};
3use btrfs_disk::{
4 items::{
5 CompressionType, DirItem, FileExtentBody, FileExtentItem,
6 FileExtentType, FileType, InodeItem, RootItem,
7 },
8 raw, reader,
9 superblock::SUPER_MIRROR_MAX,
10 tree::{DiskKey, KeyType, TreeBlock},
11};
12use clap::Parser;
13use regex_lite::Regex;
14use std::{
15 collections::HashMap,
16 fs::{self, File, OpenOptions},
17 io::{self, Read, Seek, Write},
18 os::unix::fs::symlink,
19 path::{Path, PathBuf},
20};
21
22#[derive(Parser, Debug)]
30#[allow(clippy::doc_markdown, clippy::struct_excessive_bools)]
31pub struct RestoreCommand {
32 device: PathBuf,
34
35 path: Option<PathBuf>,
37
38 #[clap(short = 'D', long = "dry-run")]
40 dry_run: bool,
41
42 #[clap(short = 'i', long)]
44 ignore_errors: bool,
45
46 #[clap(short = 'o', long)]
48 overwrite: bool,
49
50 #[clap(short = 'm', long)]
52 metadata: bool,
53
54 #[clap(short = 'S', long)]
56 symlink: bool,
57
58 #[clap(short = 's', long)]
60 snapshots: bool,
61
62 #[clap(short = 'x', long)]
64 xattr: bool,
65
66 #[clap(long)]
68 path_regex: Option<String>,
69
70 #[clap(short = 'c', long)]
72 ignore_case: bool,
73
74 #[clap(short = 'd', long)]
76 find_dir: bool,
77
78 #[clap(short = 'l', long)]
80 list_roots: bool,
81
82 #[clap(short = 'v', long, action = clap::ArgAction::Count)]
84 verbose: u8,
85
86 #[clap(short = 'f', long)]
88 fs_location: Option<u64>,
89
90 #[clap(short = 'r', long)]
92 root: Option<u64>,
93
94 #[clap(short = 't', long)]
96 tree_location: Option<u64>,
97
98 #[clap(short = 'u', long = "super")]
100 super_mirror: Option<u64>,
101}
102
103impl Runnable for RestoreCommand {
104 fn supports_dry_run(&self) -> bool {
105 true
106 }
107
108 #[allow(clippy::too_many_lines)]
109 fn run(&self, _ctx: &RunContext) -> Result<()> {
110 if let Some(m) = self.super_mirror
111 && m >= u64::from(SUPER_MIRROR_MAX)
112 {
113 bail!(
114 "super mirror index {m} is out of range (max {})",
115 SUPER_MIRROR_MAX - 1
116 );
117 }
118
119 if is_mounted(&self.device) {
120 bail!(
121 "'{}' is mounted, refusing to restore (unmount first)",
122 self.device.display()
123 );
124 }
125
126 let file = File::open(&self.device).with_context(|| {
127 format!("cannot open '{}'", self.device.display())
128 })?;
129
130 let mut open = if let Some(m) = self.super_mirror {
132 #[allow(clippy::cast_possible_truncation)] reader::filesystem_open_mirror(file, m as u32)
134 .context("failed to open filesystem")?
135 } else {
136 let mut result = None;
137 for mirror in 0..SUPER_MIRROR_MAX {
138 match reader::filesystem_open_mirror(file.try_clone()?, mirror)
139 {
140 Ok(o) => {
141 if mirror > 0 {
142 eprintln!(
143 "using superblock mirror {mirror} \
144 (primary was damaged)"
145 );
146 }
147 result = Some(o);
148 break;
149 }
150 Err(e) => {
151 eprintln!(
152 "warning: superblock mirror {mirror} \
153 failed: {e}"
154 );
155 }
156 }
157 }
158 result.context("all superblock mirrors failed")?
159 };
160
161 if self.list_roots {
162 let root_bytenr =
163 self.tree_location.unwrap_or(open.superblock.root);
164 return list_roots(&mut open.reader, root_bytenr);
165 }
166
167 let output_path = self.path.as_ref().ok_or_else(|| {
168 anyhow::anyhow!(
169 "destination path is required (unless --list-roots)"
170 )
171 })?;
172
173 let path_regex = self
175 .path_regex
176 .as_ref()
177 .map(|pat| {
178 let full = if self.ignore_case {
179 format!("(?i){pat}")
180 } else {
181 pat.clone()
182 };
183 Regex::new(&full)
184 .with_context(|| format!("invalid regex '{pat}'"))
185 })
186 .transpose()?;
187
188 let fs_tree_oid =
190 self.root.unwrap_or(u64::from(raw::BTRFS_FS_TREE_OBJECTID));
191
192 let fs_root_bytenr = if let Some(loc) = self.fs_location {
194 loc
195 } else {
196 open.tree_roots
197 .get(&fs_tree_oid)
198 .map(|(bytenr, _)| *bytenr)
199 .with_context(|| {
200 format!("tree root for objectid {fs_tree_oid} not found")
201 })?
202 };
203
204 let mut block_reader = open.reader;
205
206 let opts = RestoreOpts {
207 dry_run: self.dry_run,
208 overwrite: self.overwrite,
209 metadata: self.metadata,
210 symlinks: self.symlink,
211 snapshots: self.snapshots,
212 xattr: self.xattr,
213 ignore_errors: self.ignore_errors,
214 verbose: self.verbose,
215 path_regex: path_regex.as_ref(),
216 tree_roots: &open.tree_roots,
217 };
218
219 let mut total_errors = 0;
220
221 let items = collect_fs_tree_items(
223 &mut block_reader,
224 fs_root_bytenr,
225 self.ignore_errors,
226 )?;
227
228 let root_ino = if self.find_dir {
230 let oid = find_first_dir(&items)?;
231 println!("Using objectid {oid} for first dir");
232 oid
233 } else {
234 u64::from(raw::BTRFS_FIRST_FREE_OBJECTID)
235 };
236
237 if !opts.dry_run {
238 fs::create_dir_all(output_path).with_context(|| {
239 format!(
240 "failed to create output directory '{}'",
241 output_path.display()
242 )
243 })?;
244 }
245
246 restore_dir(
247 &mut block_reader,
248 &items,
249 root_ino,
250 output_path,
251 &opts,
252 &mut total_errors,
253 "",
254 )?;
255
256 if self.snapshots {
260 for (&oid, &(bytenr, _)) in &open.tree_roots {
261 #[allow(clippy::cast_sign_loss)]
262 let last_free = raw::BTRFS_LAST_FREE_OBJECTID as u64;
263 if oid >= u64::from(raw::BTRFS_FIRST_FREE_OBJECTID)
264 && oid <= last_free
265 && oid != fs_tree_oid
266 {
267 let snap_dest = output_path.join(format!("snapshot.{oid}"));
268 if snap_dest.exists() {
270 continue;
271 }
272 let snap_items = collect_fs_tree_items(
273 &mut block_reader,
274 bytenr,
275 self.ignore_errors,
276 )?;
277 if !opts.dry_run {
278 fs::create_dir_all(&snap_dest).with_context(|| {
279 format!(
280 "failed to create snapshot directory '{}'",
281 snap_dest.display()
282 )
283 })?;
284 }
285 let snap_root = u64::from(raw::BTRFS_FIRST_FREE_OBJECTID);
286 restore_dir(
287 &mut block_reader,
288 &snap_items,
289 snap_root,
290 &snap_dest,
291 &opts,
292 &mut total_errors,
293 "",
294 )?;
295 }
296 }
297 }
298
299 if total_errors > 0 {
300 eprintln!("warning: {total_errors} error(s) during restore");
301 }
302
303 Ok(())
304 }
305}
306
307#[allow(clippy::struct_excessive_bools)]
308struct RestoreOpts<'a> {
309 dry_run: bool,
310 overwrite: bool,
311 metadata: bool,
312 symlinks: bool,
313 snapshots: bool,
314 xattr: bool,
315 ignore_errors: bool,
316 verbose: u8,
317 path_regex: Option<&'a Regex>,
318 tree_roots: &'a std::collections::BTreeMap<u64, (u64, u64)>,
319}
320
321struct FsTreeItems {
323 items: HashMap<u64, Vec<(DiskKey, Vec<u8>)>>,
324}
325
326impl FsTreeItems {
327 fn get(&self, objectid: u64, key_type: KeyType) -> Vec<(&DiskKey, &[u8])> {
329 self.items
330 .get(&objectid)
331 .map(|v| {
332 v.iter()
333 .filter(|(k, _)| k.key_type == key_type)
334 .map(|(k, d)| (k, d.as_slice()))
335 .collect()
336 })
337 .unwrap_or_default()
338 }
339
340 fn has_key_type(&self, key_type: KeyType) -> Option<u64> {
342 for (oid, entries) in &self.items {
343 if entries.iter().any(|(k, _)| k.key_type == key_type) {
344 return Some(*oid);
345 }
346 }
347 None
348 }
349}
350
351fn collect_fs_tree_items<R: Read + Seek>(
353 reader: &mut reader::BlockReader<R>,
354 root_bytenr: u64,
355 ignore_errors: bool,
356) -> Result<FsTreeItems> {
357 let mut items: HashMap<u64, Vec<(DiskKey, Vec<u8>)>> = HashMap::new();
358 let mut errors = 0u64;
359 collect_items_dfs(
360 reader,
361 root_bytenr,
362 &mut items,
363 ignore_errors,
364 &mut errors,
365 )?;
366 if errors > 0 {
367 eprintln!(
368 "warning: {errors} tree block(s) could not be read during scan"
369 );
370 }
371 Ok(FsTreeItems { items })
372}
373
374fn collect_items_dfs<R: Read + Seek>(
375 reader: &mut reader::BlockReader<R>,
376 logical: u64,
377 items: &mut HashMap<u64, Vec<(DiskKey, Vec<u8>)>>,
378 ignore_errors: bool,
379 errors: &mut u64,
380) -> Result<()> {
381 let block = match reader.read_tree_block(logical) {
382 Ok(b) => b,
383 Err(e) => {
384 if ignore_errors {
385 eprintln!(
386 "warning: skipping unreadable tree block at \
387 logical {logical}: {e}"
388 );
389 *errors += 1;
390 return Ok(());
391 }
392 return Err(e).with_context(|| {
393 format!("failed to read tree block at {logical}")
394 });
395 }
396 };
397
398 match &block {
399 TreeBlock::Leaf {
400 items: leaf_items,
401 data,
402 ..
403 } => {
404 let header_size = std::mem::size_of::<raw::btrfs_header>();
405 for item in leaf_items {
406 let start = header_size + item.offset as usize;
407 let end = start + item.size as usize;
408 if end <= data.len() {
409 items
410 .entry(item.key.objectid)
411 .or_default()
412 .push((item.key, data[start..end].to_vec()));
413 }
414 }
415 }
416 TreeBlock::Node { ptrs, .. } => {
417 for ptr in ptrs {
418 collect_items_dfs(
419 reader,
420 ptr.blockptr,
421 items,
422 ignore_errors,
423 errors,
424 )?;
425 }
426 }
427 }
428
429 Ok(())
430}
431
432fn find_first_dir(items: &FsTreeItems) -> Result<u64> {
434 items
435 .has_key_type(KeyType::DirIndex)
436 .context("no directory entry found in tree")
437}
438
439#[allow(clippy::too_many_lines)]
441fn restore_dir<R: Read + Seek>(
442 reader: &mut reader::BlockReader<R>,
443 items: &FsTreeItems,
444 dir_ino: u64,
445 output_path: &Path,
446 opts: &RestoreOpts,
447 errors: &mut u64,
448 prefix: &str,
449) -> Result<()> {
450 let dir_entries = items.get(dir_ino, KeyType::DirIndex);
452
453 for (_key, data) in &dir_entries {
454 let parsed = DirItem::parse_all(data);
455 for entry in parsed {
456 let name = match std::str::from_utf8(&entry.name) {
457 Ok(s) => s.to_string(),
458 Err(_) => String::from_utf8_lossy(&entry.name).into_owned(),
459 };
460 let child_path = output_path.join(&name);
461 let child_ino = entry.location.objectid;
462
463 let rel_path = if prefix.is_empty() {
465 format!("/{name}")
466 } else {
467 format!("{prefix}/{name}")
468 };
469
470 if let Some(re) = opts.path_regex
473 && !re.is_match(&rel_path)
474 {
475 continue;
476 }
477
478 if entry.location.key_type == KeyType::RootItem {
481 if opts.snapshots {
482 let subvol_oid = entry.location.objectid;
484 if let Some(&(bytenr, _)) = opts.tree_roots.get(&subvol_oid)
485 && let Err(e) = restore_snapshot(
486 reader,
487 bytenr,
488 &child_path,
489 opts,
490 errors,
491 &rel_path,
492 )
493 {
494 if !opts.ignore_errors {
495 return Err(e);
496 }
497 eprintln!(
498 "warning: failed to restore snapshot '{}': {e}",
499 child_path.display()
500 );
501 *errors += 1;
502 }
503 } else {
504 eprintln!("Skipping snapshot {name} (use -s to restore)");
505 }
506 continue;
507 }
508
509 match entry.file_type {
510 FileType::Dir => {
511 if opts.dry_run {
512 println!("{}/", child_path.display());
513 } else {
514 if opts.verbose >= 1 {
515 eprintln!("Restoring {}/", child_path.display());
516 }
517 if let Err(e) = fs::create_dir_all(&child_path) {
518 if !opts.ignore_errors {
519 return Err(e).with_context(|| {
520 format!(
521 "failed to create directory '{}'",
522 child_path.display()
523 )
524 });
525 }
526 eprintln!(
527 "warning: failed to create '{}': {e}",
528 child_path.display()
529 );
530 *errors += 1;
531 continue;
532 }
533 }
534 restore_dir(
535 reader,
536 items,
537 child_ino,
538 &child_path,
539 opts,
540 errors,
541 &rel_path,
542 )?;
543 if opts.metadata && !opts.dry_run {
546 apply_metadata(
547 items,
548 child_ino,
549 &child_path,
550 opts,
551 errors,
552 );
553 }
554 }
555 FileType::RegFile => {
556 if let Err(e) = restore_file(
557 reader,
558 items,
559 child_ino,
560 &child_path,
561 opts,
562 errors,
563 ) {
564 if !opts.ignore_errors {
565 return Err(e);
566 }
567 eprintln!(
568 "warning: failed to restore '{}': {e}",
569 child_path.display()
570 );
571 *errors += 1;
572 }
573 }
574 FileType::Symlink if opts.symlinks => {
575 if let Err(e) =
576 restore_symlink(items, child_ino, &child_path, opts)
577 {
578 if !opts.ignore_errors {
579 return Err(e);
580 }
581 eprintln!(
582 "warning: failed to restore symlink '{}': {e}",
583 child_path.display()
584 );
585 *errors += 1;
586 }
587 if opts.metadata && !opts.dry_run {
588 apply_metadata(
589 items,
590 child_ino,
591 &child_path,
592 opts,
593 errors,
594 );
595 }
596 }
597 _ => {}
598 }
599
600 if opts.xattr && !opts.dry_run {
602 restore_xattrs(items, child_ino, &child_path, errors);
603 }
604 }
605 }
606
607 Ok(())
608}
609
610fn restore_snapshot<R: Read + Seek>(
612 reader: &mut reader::BlockReader<R>,
613 bytenr: u64,
614 output_path: &Path,
615 opts: &RestoreOpts,
616 errors: &mut u64,
617 prefix: &str,
618) -> Result<()> {
619 let snap_items = collect_fs_tree_items(reader, bytenr, opts.ignore_errors)?;
620
621 if !opts.dry_run {
622 fs::create_dir_all(output_path).with_context(|| {
623 format!(
624 "failed to create snapshot directory '{}'",
625 output_path.display()
626 )
627 })?;
628 }
629
630 let snap_root = u64::from(raw::BTRFS_FIRST_FREE_OBJECTID);
631 restore_dir(
632 reader,
633 &snap_items,
634 snap_root,
635 output_path,
636 opts,
637 errors,
638 prefix,
639 )
640}
641
642#[allow(clippy::too_many_lines, clippy::cast_possible_truncation)]
644fn restore_file<R: Read + Seek>(
645 reader: &mut reader::BlockReader<R>,
646 items: &FsTreeItems,
647 ino: u64,
648 path: &Path,
649 opts: &RestoreOpts,
650 errors: &mut u64,
651) -> Result<()> {
652 if opts.dry_run {
653 println!("{}", path.display());
654 return Ok(());
655 }
656
657 if path.exists() && !opts.overwrite {
658 return Ok(());
659 }
660
661 if opts.verbose >= 1 {
662 eprintln!("Restoring {}", path.display());
663 }
664
665 let mut file = OpenOptions::new()
666 .write(true)
667 .create(true)
668 .truncate(true)
669 .open(path)
670 .with_context(|| format!("failed to create '{}'", path.display()))?;
671
672 let inode_size = items
674 .get(ino, KeyType::InodeItem)
675 .first()
676 .and_then(|(_, d)| InodeItem::parse(d))
677 .map(|i| i.size);
678
679 let extent_items = items.get(ino, KeyType::ExtentData);
680
681 for (key, data) in &extent_items {
682 let Some(extent) = FileExtentItem::parse(data) else {
683 continue;
684 };
685
686 if extent.extent_type == FileExtentType::Prealloc {
689 continue;
690 }
691
692 let file_offset = key.offset;
693
694 match &extent.body {
695 FileExtentBody::Inline { inline_size } => {
696 let header_len = data.len() - inline_size;
698 let inline_data = &data[header_len..];
699
700 let output = if extent.compression == CompressionType::None {
701 inline_data.to_vec()
702 } else {
703 decompress(
704 inline_data,
705 extent.ram_bytes as usize,
706 extent.compression,
707 )
708 .with_context(|| {
709 format!(
710 "failed to decompress inline extent in '{}'",
711 path.display()
712 )
713 })?
714 };
715
716 file.seek(io::SeekFrom::Start(file_offset))?;
717 file.write_all(&output).with_context(|| {
718 format!(
719 "failed to write inline extent to '{}'",
720 path.display()
721 )
722 })?;
723 }
724 FileExtentBody::Regular {
725 disk_bytenr,
726 disk_num_bytes,
727 offset,
728 num_bytes,
729 } => {
730 if *disk_bytenr == 0 {
731 continue;
733 }
734
735 if extent.compression == CompressionType::None
737 && *offset >= *disk_num_bytes
738 {
739 eprintln!(
740 "warning: bogus extent offset {} >= disk_size {} \
741 in '{}'",
742 offset,
743 disk_num_bytes,
744 path.display()
745 );
746 *errors += 1;
747 continue;
748 }
749 if *offset > extent.ram_bytes {
750 eprintln!(
751 "warning: bogus extent offset {} > ram_bytes {} \
752 in '{}'",
753 offset,
754 extent.ram_bytes,
755 path.display()
756 );
757 *errors += 1;
758 continue;
759 }
760
761 if extent.compression == CompressionType::None {
762 let data_buf = reader
764 .read_data(disk_bytenr + offset, *num_bytes as usize)
765 .with_context(|| {
766 format!(
767 "failed to read extent at logical {disk_bytenr}"
768 )
769 })?;
770
771 file.seek(io::SeekFrom::Start(file_offset))?;
772 file.write_all(&data_buf).with_context(|| {
773 format!(
774 "failed to write extent to '{}'",
775 path.display()
776 )
777 })?;
778 } else {
779 let compressed = reader
781 .read_data(*disk_bytenr, *disk_num_bytes as usize)
782 .with_context(|| {
783 format!(
784 "failed to read compressed extent at logical {disk_bytenr}"
785 )
786 })?;
787
788 let decompressed = decompress(
789 &compressed,
790 extent.ram_bytes as usize,
791 extent.compression,
792 )
793 .with_context(|| {
794 format!(
795 "failed to decompress extent in '{}'",
796 path.display()
797 )
798 })?;
799
800 let start = *offset as usize;
802 let end = start + *num_bytes as usize;
803 let slice = if end <= decompressed.len() {
804 &decompressed[start..end]
805 } else {
806 &decompressed[start..]
807 };
808
809 file.seek(io::SeekFrom::Start(file_offset))?;
810 file.write_all(slice).with_context(|| {
811 format!(
812 "failed to write extent to '{}'",
813 path.display()
814 )
815 })?;
816 }
817 }
818 }
819 }
820
821 if let Some(size) = inode_size {
824 file.set_len(size)?;
825 }
826
827 if opts.metadata {
828 drop(file);
830 apply_metadata(items, ino, path, opts, errors);
831 }
832
833 Ok(())
834}
835
836fn restore_symlink(
838 items: &FsTreeItems,
839 ino: u64,
840 path: &Path,
841 opts: &RestoreOpts,
842) -> Result<()> {
843 let extent_items = items.get(ino, KeyType::ExtentData);
844 let (_, data) = extent_items
845 .first()
846 .context("symlink has no EXTENT_DATA item")?;
847
848 let extent = FileExtentItem::parse(data)
849 .context("failed to parse symlink extent")?;
850
851 let target = match &extent.body {
852 FileExtentBody::Inline { inline_size } => {
853 let header_len = data.len() - inline_size;
854 &data[header_len..]
855 }
856 FileExtentBody::Regular { .. } => bail!("symlink extent is not inline"),
857 };
858
859 let target_str = std::str::from_utf8(target)
860 .context("symlink target is not valid UTF-8")?;
861
862 if opts.dry_run {
863 println!("{} -> {}", path.display(), target_str);
864 return Ok(());
865 }
866
867 if path.exists() && !opts.overwrite {
868 return Ok(());
869 }
870
871 if opts.verbose >= 2 {
872 eprintln!("SYMLINK: '{}' => '{}'", path.display(), target_str);
873 }
874
875 if path.exists() {
877 fs::remove_file(path).ok();
878 }
879
880 symlink(target_str, path).with_context(|| {
881 format!("failed to create symlink '{}'", path.display())
882 })?;
883
884 Ok(())
885}
886
887fn restore_xattrs(
889 items: &FsTreeItems,
890 ino: u64,
891 path: &Path,
892 errors: &mut u64,
893) {
894 let xattr_items = items.get(ino, KeyType::XattrItem);
895 for (_, data) in &xattr_items {
896 let entries = DirItem::parse_all(data);
897 for entry in entries {
898 let Ok(name) = std::str::from_utf8(&entry.name) else {
899 continue;
900 };
901 let Ok(c_path) =
902 std::ffi::CString::new(path.as_os_str().as_encoded_bytes())
903 else {
904 continue;
905 };
906 let Ok(c_name) = std::ffi::CString::new(name) else {
907 continue;
908 };
909 let ret = unsafe {
911 libc::lsetxattr(
912 c_path.as_ptr(),
913 c_name.as_ptr(),
914 entry.data.as_ptr().cast(),
915 entry.data.len(),
916 0,
917 )
918 };
919 if ret < 0 {
920 let err = io::Error::last_os_error();
921 eprintln!(
922 "warning: failed to set xattr '{name}' on '{}': {err}",
923 path.display()
924 );
925 *errors += 1;
926 }
927 }
928 }
929}
930
931fn apply_metadata(
933 items: &FsTreeItems,
934 ino: u64,
935 path: &Path,
936 opts: &RestoreOpts,
937 errors: &mut u64,
938) {
939 let inode_items = items.get(ino, KeyType::InodeItem);
940 let Some((_, data)) = inode_items.first() else {
941 return;
942 };
943 let Some(inode) = InodeItem::parse(data) else {
944 return;
945 };
946
947 let Ok(c_path) =
948 std::ffi::CString::new(path.as_os_str().as_encoded_bytes())
949 else {
950 return;
951 };
952
953 unsafe {
955 if libc::lchown(c_path.as_ptr(), inode.uid, inode.gid) < 0 {
956 let err = io::Error::last_os_error();
957 eprintln!("warning: failed to chown '{}': {err}", path.display());
958 *errors += 1;
959 if !opts.ignore_errors {
960 return;
961 }
962 }
963 if !path.is_symlink()
965 && libc::chmod(c_path.as_ptr(), inode.mode & 0o7777) < 0
966 {
967 let err = io::Error::last_os_error();
968 eprintln!("warning: failed to chmod '{}': {err}", path.display());
969 *errors += 1;
970 if !opts.ignore_errors {
971 return;
972 }
973 }
974
975 #[allow(clippy::cast_possible_wrap)] let times = [
977 libc::timespec {
978 tv_sec: inode.atime.sec as i64,
979 tv_nsec: i64::from(inode.atime.nsec),
980 },
981 libc::timespec {
982 tv_sec: inode.mtime.sec as i64,
983 tv_nsec: i64::from(inode.mtime.nsec),
984 },
985 ];
986 if libc::utimensat(
987 libc::AT_FDCWD,
988 c_path.as_ptr(),
989 times.as_ptr(),
990 libc::AT_SYMLINK_NOFOLLOW,
991 ) < 0
992 {
993 let err = io::Error::last_os_error();
994 eprintln!(
995 "warning: failed to set times on '{}': {err}",
996 path.display()
997 );
998 *errors += 1;
999 }
1000 }
1001}
1002
1003fn decompress(
1005 data: &[u8],
1006 output_len: usize,
1007 compression: CompressionType,
1008) -> Result<Vec<u8>> {
1009 match compression {
1010 CompressionType::None => Ok(data.to_vec()),
1011 CompressionType::Zlib => {
1012 let mut decoder = flate2::read::ZlibDecoder::new(data);
1013 let mut out = vec![0u8; output_len];
1014 decoder
1015 .read_exact(&mut out)
1016 .context("zlib decompression failed")?;
1017 Ok(out)
1018 }
1019 CompressionType::Zstd => zstd::bulk::decompress(data, output_len)
1020 .context("zstd decompression failed"),
1021 CompressionType::Lzo => decompress_lzo(data, output_len),
1022 CompressionType::Unknown(t) => {
1023 bail!("unsupported compression type {t}")
1024 }
1025 }
1026}
1027
1028fn decompress_lzo(data: &[u8], output_len: usize) -> Result<Vec<u8>> {
1033 const SECTOR_SIZE: usize = 4096;
1034
1035 if data.len() < 4 {
1036 bail!("LZO data too short for header");
1037 }
1038 let total_len = u32::from_le_bytes(data[0..4].try_into().unwrap()) as usize;
1039 if total_len > data.len() {
1040 bail!(
1041 "LZO total length {total_len} exceeds data length {}",
1042 data.len()
1043 );
1044 }
1045
1046 let mut out = Vec::with_capacity(output_len);
1047 let mut pos = 4;
1048
1049 while pos < total_len && out.len() < output_len {
1050 let sector_remaining = SECTOR_SIZE - (pos % SECTOR_SIZE);
1051 if sector_remaining < 4 {
1052 if total_len - pos <= sector_remaining {
1053 break;
1054 }
1055 pos += sector_remaining;
1056 }
1057
1058 if pos + 4 > total_len {
1059 bail!("LZO segment header truncated at offset {pos}");
1060 }
1061 let seg_len =
1062 u32::from_le_bytes(data[pos..pos + 4].try_into().unwrap()) as usize;
1063 pos += 4;
1064
1065 if pos + seg_len > data.len() {
1066 bail!(
1067 "LZO segment data truncated at offset {pos}, \
1068 need {seg_len} bytes"
1069 );
1070 }
1071
1072 let remaining = (output_len - out.len()).min(SECTOR_SIZE);
1073 let mut segment_out = vec![0u8; remaining];
1074 lzokay::decompress::decompress(
1075 &data[pos..pos + seg_len],
1076 &mut segment_out,
1077 )
1078 .map_err(|e| {
1079 anyhow::anyhow!("LZO decompression failed at offset {pos}: {e:?}")
1080 })?;
1081 out.extend_from_slice(&segment_out);
1082
1083 pos += seg_len;
1084 }
1085
1086 out.truncate(output_len);
1087 Ok(out)
1088}
1089
1090fn list_roots<R: Read + Seek>(
1092 reader: &mut reader::BlockReader<R>,
1093 root_bytenr: u64,
1094) -> Result<()> {
1095 let mut entries: Vec<(DiskKey, RootItem)> = Vec::new();
1096 collect_root_items_for_listing(reader, root_bytenr, &mut entries)?;
1097
1098 entries.sort_by_key(|(k, _)| k.objectid);
1100
1101 for (key, root_item) in &entries {
1102 println!(
1103 " tree key ({} ROOT_ITEM {}) {} level {}",
1104 key.objectid, key.offset, root_item.bytenr, root_item.level
1105 );
1106 }
1107
1108 Ok(())
1109}
1110
1111fn collect_root_items_for_listing<R: Read + Seek>(
1112 reader: &mut reader::BlockReader<R>,
1113 logical: u64,
1114 out: &mut Vec<(DiskKey, RootItem)>,
1115) -> Result<()> {
1116 let block = reader
1117 .read_tree_block(logical)
1118 .with_context(|| format!("failed to read tree block at {logical}"))?;
1119
1120 match &block {
1121 TreeBlock::Leaf {
1122 items: leaf_items,
1123 data,
1124 ..
1125 } => {
1126 let header_size = std::mem::size_of::<raw::btrfs_header>();
1127 for item in leaf_items {
1128 if item.key.key_type != KeyType::RootItem {
1129 continue;
1130 }
1131 let start = header_size + item.offset as usize;
1132 let end = start + item.size as usize;
1133 if end > data.len() {
1134 continue;
1135 }
1136 if let Some(ri) = RootItem::parse(&data[start..end]) {
1137 out.push((item.key, ri));
1138 }
1139 }
1140 }
1141 TreeBlock::Node { ptrs, .. } => {
1142 for ptr in ptrs {
1143 collect_root_items_for_listing(reader, ptr.blockptr, out)?;
1144 }
1145 }
1146 }
1147
1148 Ok(())
1149}