1use crate::{Format, Runnable, util::is_mounted};
2use anyhow::{Context, Result, bail};
3use btrfs_disk::{
4 items::{
5 CompressionType, DirItem, FileExtentBody, FileExtentItem,
6 FileExtentType, FileType, InodeItem, RootItem,
7 },
8 raw, reader,
9 superblock::SUPER_MIRROR_MAX,
10 tree::{DiskKey, KeyType, TreeBlock},
11};
12use clap::Parser;
13use regex_lite::Regex;
14use std::{
15 collections::HashMap,
16 fs::{self, File, OpenOptions},
17 io::{self, Read, Seek, Write},
18 os::unix::fs::symlink,
19 path::{Path, PathBuf},
20};
21
22#[derive(Parser, Debug)]
30#[allow(clippy::doc_markdown, clippy::struct_excessive_bools)]
31pub struct RestoreCommand {
32 device: PathBuf,
34
35 path: Option<PathBuf>,
37
38 #[clap(short = 'D', long = "dry-run")]
40 dry_run: bool,
41
42 #[clap(short = 'i', long)]
44 ignore_errors: bool,
45
46 #[clap(short = 'o', long)]
48 overwrite: bool,
49
50 #[clap(short = 'm', long)]
52 metadata: bool,
53
54 #[clap(short = 'S', long)]
56 symlink: bool,
57
58 #[clap(short = 's', long)]
60 snapshots: bool,
61
62 #[clap(short = 'x', long)]
64 xattr: bool,
65
66 #[clap(long)]
68 path_regex: Option<String>,
69
70 #[clap(short = 'c', long)]
72 ignore_case: bool,
73
74 #[clap(short = 'd', long)]
76 find_dir: bool,
77
78 #[clap(short = 'l', long)]
80 list_roots: bool,
81
82 #[clap(short = 'v', long, action = clap::ArgAction::Count)]
84 verbose: u8,
85
86 #[clap(short = 'f', long)]
88 fs_location: Option<u64>,
89
90 #[clap(short = 'r', long)]
92 root: Option<u64>,
93
94 #[clap(short = 't', long)]
96 tree_location: Option<u64>,
97
98 #[clap(short = 'u', long = "super")]
100 super_mirror: Option<u64>,
101}
102
103impl Runnable for RestoreCommand {
104 #[allow(clippy::too_many_lines)]
105 fn run(&self, _format: Format, _dry_run: bool) -> Result<()> {
106 if let Some(m) = self.super_mirror
107 && m >= u64::from(SUPER_MIRROR_MAX)
108 {
109 bail!(
110 "super mirror index {m} is out of range (max {})",
111 SUPER_MIRROR_MAX - 1
112 );
113 }
114
115 if is_mounted(&self.device) {
116 bail!(
117 "'{}' is mounted, refusing to restore (unmount first)",
118 self.device.display()
119 );
120 }
121
122 let file = File::open(&self.device).with_context(|| {
123 format!("cannot open '{}'", self.device.display())
124 })?;
125
126 let mut open = if let Some(m) = self.super_mirror {
128 #[allow(clippy::cast_possible_truncation)] reader::filesystem_open_mirror(file, m as u32)
130 .context("failed to open filesystem")?
131 } else {
132 let mut result = None;
133 for mirror in 0..SUPER_MIRROR_MAX {
134 match reader::filesystem_open_mirror(file.try_clone()?, mirror)
135 {
136 Ok(o) => {
137 if mirror > 0 {
138 eprintln!(
139 "using superblock mirror {mirror} \
140 (primary was damaged)"
141 );
142 }
143 result = Some(o);
144 break;
145 }
146 Err(e) => {
147 eprintln!(
148 "warning: superblock mirror {mirror} \
149 failed: {e}"
150 );
151 }
152 }
153 }
154 result.context("all superblock mirrors failed")?
155 };
156
157 if self.list_roots {
158 let root_bytenr =
159 self.tree_location.unwrap_or(open.superblock.root);
160 return list_roots(&mut open.reader, root_bytenr);
161 }
162
163 let output_path = self.path.as_ref().ok_or_else(|| {
164 anyhow::anyhow!(
165 "destination path is required (unless --list-roots)"
166 )
167 })?;
168
169 let path_regex = self
171 .path_regex
172 .as_ref()
173 .map(|pat| {
174 let full = if self.ignore_case {
175 format!("(?i){pat}")
176 } else {
177 pat.clone()
178 };
179 Regex::new(&full)
180 .with_context(|| format!("invalid regex '{pat}'"))
181 })
182 .transpose()?;
183
184 let fs_tree_oid =
186 self.root.unwrap_or(u64::from(raw::BTRFS_FS_TREE_OBJECTID));
187
188 let fs_root_bytenr = if let Some(loc) = self.fs_location {
190 loc
191 } else {
192 open.tree_roots
193 .get(&fs_tree_oid)
194 .map(|(bytenr, _)| *bytenr)
195 .with_context(|| {
196 format!("tree root for objectid {fs_tree_oid} not found")
197 })?
198 };
199
200 let mut block_reader = open.reader;
201
202 let opts = RestoreOpts {
203 dry_run: self.dry_run,
204 overwrite: self.overwrite,
205 metadata: self.metadata,
206 symlinks: self.symlink,
207 snapshots: self.snapshots,
208 xattr: self.xattr,
209 ignore_errors: self.ignore_errors,
210 verbose: self.verbose,
211 path_regex: path_regex.as_ref(),
212 tree_roots: &open.tree_roots,
213 };
214
215 let mut total_errors = 0;
216
217 let items = collect_fs_tree_items(
219 &mut block_reader,
220 fs_root_bytenr,
221 self.ignore_errors,
222 )?;
223
224 let root_ino = if self.find_dir {
226 let oid = find_first_dir(&items)?;
227 println!("Using objectid {oid} for first dir");
228 oid
229 } else {
230 u64::from(raw::BTRFS_FIRST_FREE_OBJECTID)
231 };
232
233 if !opts.dry_run {
234 fs::create_dir_all(output_path).with_context(|| {
235 format!(
236 "failed to create output directory '{}'",
237 output_path.display()
238 )
239 })?;
240 }
241
242 restore_dir(
243 &mut block_reader,
244 &items,
245 root_ino,
246 output_path,
247 &opts,
248 &mut total_errors,
249 "",
250 )?;
251
252 if self.snapshots {
256 for (&oid, &(bytenr, _)) in &open.tree_roots {
257 #[allow(clippy::cast_sign_loss)]
258 let last_free = raw::BTRFS_LAST_FREE_OBJECTID as u64;
259 if oid >= u64::from(raw::BTRFS_FIRST_FREE_OBJECTID)
260 && oid <= last_free
261 && oid != fs_tree_oid
262 {
263 let snap_dest = output_path.join(format!("snapshot.{oid}"));
264 if snap_dest.exists() {
266 continue;
267 }
268 let snap_items = collect_fs_tree_items(
269 &mut block_reader,
270 bytenr,
271 self.ignore_errors,
272 )?;
273 if !opts.dry_run {
274 fs::create_dir_all(&snap_dest).with_context(|| {
275 format!(
276 "failed to create snapshot directory '{}'",
277 snap_dest.display()
278 )
279 })?;
280 }
281 let snap_root = u64::from(raw::BTRFS_FIRST_FREE_OBJECTID);
282 restore_dir(
283 &mut block_reader,
284 &snap_items,
285 snap_root,
286 &snap_dest,
287 &opts,
288 &mut total_errors,
289 "",
290 )?;
291 }
292 }
293 }
294
295 if total_errors > 0 {
296 eprintln!("warning: {total_errors} error(s) during restore");
297 }
298
299 Ok(())
300 }
301}
302
303#[allow(clippy::struct_excessive_bools)]
304struct RestoreOpts<'a> {
305 dry_run: bool,
306 overwrite: bool,
307 metadata: bool,
308 symlinks: bool,
309 snapshots: bool,
310 xattr: bool,
311 ignore_errors: bool,
312 verbose: u8,
313 path_regex: Option<&'a Regex>,
314 tree_roots: &'a std::collections::BTreeMap<u64, (u64, u64)>,
315}
316
317struct FsTreeItems {
319 items: HashMap<u64, Vec<(DiskKey, Vec<u8>)>>,
320}
321
322impl FsTreeItems {
323 fn get(&self, objectid: u64, key_type: KeyType) -> Vec<(&DiskKey, &[u8])> {
325 self.items
326 .get(&objectid)
327 .map(|v| {
328 v.iter()
329 .filter(|(k, _)| k.key_type == key_type)
330 .map(|(k, d)| (k, d.as_slice()))
331 .collect()
332 })
333 .unwrap_or_default()
334 }
335
336 fn has_key_type(&self, key_type: KeyType) -> Option<u64> {
338 for (oid, entries) in &self.items {
339 if entries.iter().any(|(k, _)| k.key_type == key_type) {
340 return Some(*oid);
341 }
342 }
343 None
344 }
345}
346
347fn collect_fs_tree_items<R: Read + Seek>(
349 reader: &mut reader::BlockReader<R>,
350 root_bytenr: u64,
351 ignore_errors: bool,
352) -> Result<FsTreeItems> {
353 let mut items: HashMap<u64, Vec<(DiskKey, Vec<u8>)>> = HashMap::new();
354 let mut errors = 0u64;
355 collect_items_dfs(
356 reader,
357 root_bytenr,
358 &mut items,
359 ignore_errors,
360 &mut errors,
361 )?;
362 if errors > 0 {
363 eprintln!(
364 "warning: {errors} tree block(s) could not be read during scan"
365 );
366 }
367 Ok(FsTreeItems { items })
368}
369
370fn collect_items_dfs<R: Read + Seek>(
371 reader: &mut reader::BlockReader<R>,
372 logical: u64,
373 items: &mut HashMap<u64, Vec<(DiskKey, Vec<u8>)>>,
374 ignore_errors: bool,
375 errors: &mut u64,
376) -> Result<()> {
377 let block = match reader.read_tree_block(logical) {
378 Ok(b) => b,
379 Err(e) => {
380 if ignore_errors {
381 eprintln!(
382 "warning: skipping unreadable tree block at \
383 logical {logical}: {e}"
384 );
385 *errors += 1;
386 return Ok(());
387 }
388 return Err(e).with_context(|| {
389 format!("failed to read tree block at {logical}")
390 });
391 }
392 };
393
394 match &block {
395 TreeBlock::Leaf {
396 items: leaf_items,
397 data,
398 ..
399 } => {
400 let header_size = std::mem::size_of::<raw::btrfs_header>();
401 for item in leaf_items {
402 let start = header_size + item.offset as usize;
403 let end = start + item.size as usize;
404 if end <= data.len() {
405 items
406 .entry(item.key.objectid)
407 .or_default()
408 .push((item.key, data[start..end].to_vec()));
409 }
410 }
411 }
412 TreeBlock::Node { ptrs, .. } => {
413 for ptr in ptrs {
414 collect_items_dfs(
415 reader,
416 ptr.blockptr,
417 items,
418 ignore_errors,
419 errors,
420 )?;
421 }
422 }
423 }
424
425 Ok(())
426}
427
428fn find_first_dir(items: &FsTreeItems) -> Result<u64> {
430 items
431 .has_key_type(KeyType::DirIndex)
432 .context("no directory entry found in tree")
433}
434
435#[allow(clippy::too_many_lines)]
437fn restore_dir<R: Read + Seek>(
438 reader: &mut reader::BlockReader<R>,
439 items: &FsTreeItems,
440 dir_ino: u64,
441 output_path: &Path,
442 opts: &RestoreOpts,
443 errors: &mut u64,
444 prefix: &str,
445) -> Result<()> {
446 let dir_entries = items.get(dir_ino, KeyType::DirIndex);
448
449 for (_key, data) in &dir_entries {
450 let parsed = DirItem::parse_all(data);
451 for entry in parsed {
452 let name = match std::str::from_utf8(&entry.name) {
453 Ok(s) => s.to_string(),
454 Err(_) => String::from_utf8_lossy(&entry.name).into_owned(),
455 };
456 let child_path = output_path.join(&name);
457 let child_ino = entry.location.objectid;
458
459 let rel_path = if prefix.is_empty() {
461 format!("/{name}")
462 } else {
463 format!("{prefix}/{name}")
464 };
465
466 if let Some(re) = opts.path_regex
469 && !re.is_match(&rel_path)
470 {
471 continue;
472 }
473
474 if entry.location.key_type == KeyType::RootItem {
477 if opts.snapshots {
478 let subvol_oid = entry.location.objectid;
480 if let Some(&(bytenr, _)) = opts.tree_roots.get(&subvol_oid)
481 && let Err(e) = restore_snapshot(
482 reader,
483 bytenr,
484 &child_path,
485 opts,
486 errors,
487 &rel_path,
488 )
489 {
490 if !opts.ignore_errors {
491 return Err(e);
492 }
493 eprintln!(
494 "warning: failed to restore snapshot '{}': {e}",
495 child_path.display()
496 );
497 *errors += 1;
498 }
499 } else {
500 eprintln!("Skipping snapshot {name} (use -s to restore)");
501 }
502 continue;
503 }
504
505 match entry.file_type {
506 FileType::Dir => {
507 if opts.dry_run {
508 println!("{}/", child_path.display());
509 } else {
510 if opts.verbose >= 1 {
511 eprintln!("Restoring {}/", child_path.display());
512 }
513 if let Err(e) = fs::create_dir_all(&child_path) {
514 if !opts.ignore_errors {
515 return Err(e).with_context(|| {
516 format!(
517 "failed to create directory '{}'",
518 child_path.display()
519 )
520 });
521 }
522 eprintln!(
523 "warning: failed to create '{}': {e}",
524 child_path.display()
525 );
526 *errors += 1;
527 continue;
528 }
529 }
530 restore_dir(
531 reader,
532 items,
533 child_ino,
534 &child_path,
535 opts,
536 errors,
537 &rel_path,
538 )?;
539 if opts.metadata && !opts.dry_run {
542 apply_metadata(
543 items,
544 child_ino,
545 &child_path,
546 opts,
547 errors,
548 );
549 }
550 }
551 FileType::RegFile => {
552 if let Err(e) = restore_file(
553 reader,
554 items,
555 child_ino,
556 &child_path,
557 opts,
558 errors,
559 ) {
560 if !opts.ignore_errors {
561 return Err(e);
562 }
563 eprintln!(
564 "warning: failed to restore '{}': {e}",
565 child_path.display()
566 );
567 *errors += 1;
568 }
569 }
570 FileType::Symlink if opts.symlinks => {
571 if let Err(e) =
572 restore_symlink(items, child_ino, &child_path, opts)
573 {
574 if !opts.ignore_errors {
575 return Err(e);
576 }
577 eprintln!(
578 "warning: failed to restore symlink '{}': {e}",
579 child_path.display()
580 );
581 *errors += 1;
582 }
583 if opts.metadata && !opts.dry_run {
584 apply_metadata(
585 items,
586 child_ino,
587 &child_path,
588 opts,
589 errors,
590 );
591 }
592 }
593 _ => {}
594 }
595
596 if opts.xattr && !opts.dry_run {
598 restore_xattrs(items, child_ino, &child_path, errors);
599 }
600 }
601 }
602
603 Ok(())
604}
605
606fn restore_snapshot<R: Read + Seek>(
608 reader: &mut reader::BlockReader<R>,
609 bytenr: u64,
610 output_path: &Path,
611 opts: &RestoreOpts,
612 errors: &mut u64,
613 prefix: &str,
614) -> Result<()> {
615 let snap_items = collect_fs_tree_items(reader, bytenr, opts.ignore_errors)?;
616
617 if !opts.dry_run {
618 fs::create_dir_all(output_path).with_context(|| {
619 format!(
620 "failed to create snapshot directory '{}'",
621 output_path.display()
622 )
623 })?;
624 }
625
626 let snap_root = u64::from(raw::BTRFS_FIRST_FREE_OBJECTID);
627 restore_dir(
628 reader,
629 &snap_items,
630 snap_root,
631 output_path,
632 opts,
633 errors,
634 prefix,
635 )
636}
637
638#[allow(clippy::too_many_lines, clippy::cast_possible_truncation)]
640fn restore_file<R: Read + Seek>(
641 reader: &mut reader::BlockReader<R>,
642 items: &FsTreeItems,
643 ino: u64,
644 path: &Path,
645 opts: &RestoreOpts,
646 errors: &mut u64,
647) -> Result<()> {
648 if opts.dry_run {
649 println!("{}", path.display());
650 return Ok(());
651 }
652
653 if path.exists() && !opts.overwrite {
654 return Ok(());
655 }
656
657 if opts.verbose >= 1 {
658 eprintln!("Restoring {}", path.display());
659 }
660
661 let mut file = OpenOptions::new()
662 .write(true)
663 .create(true)
664 .truncate(true)
665 .open(path)
666 .with_context(|| format!("failed to create '{}'", path.display()))?;
667
668 let inode_size = items
670 .get(ino, KeyType::InodeItem)
671 .first()
672 .and_then(|(_, d)| InodeItem::parse(d))
673 .map(|i| i.size);
674
675 let extent_items = items.get(ino, KeyType::ExtentData);
676
677 for (key, data) in &extent_items {
678 let Some(extent) = FileExtentItem::parse(data) else {
679 continue;
680 };
681
682 if extent.extent_type == FileExtentType::Prealloc {
685 continue;
686 }
687
688 let file_offset = key.offset;
689
690 match &extent.body {
691 FileExtentBody::Inline { inline_size } => {
692 let header_len = data.len() - inline_size;
694 let inline_data = &data[header_len..];
695
696 let output = if extent.compression == CompressionType::None {
697 inline_data.to_vec()
698 } else {
699 decompress(
700 inline_data,
701 extent.ram_bytes as usize,
702 extent.compression,
703 )
704 .with_context(|| {
705 format!(
706 "failed to decompress inline extent in '{}'",
707 path.display()
708 )
709 })?
710 };
711
712 file.seek(io::SeekFrom::Start(file_offset))?;
713 file.write_all(&output).with_context(|| {
714 format!(
715 "failed to write inline extent to '{}'",
716 path.display()
717 )
718 })?;
719 }
720 FileExtentBody::Regular {
721 disk_bytenr,
722 disk_num_bytes,
723 offset,
724 num_bytes,
725 } => {
726 if *disk_bytenr == 0 {
727 continue;
729 }
730
731 if extent.compression == CompressionType::None
733 && *offset >= *disk_num_bytes
734 {
735 eprintln!(
736 "warning: bogus extent offset {} >= disk_size {} \
737 in '{}'",
738 offset,
739 disk_num_bytes,
740 path.display()
741 );
742 *errors += 1;
743 continue;
744 }
745 if *offset > extent.ram_bytes {
746 eprintln!(
747 "warning: bogus extent offset {} > ram_bytes {} \
748 in '{}'",
749 offset,
750 extent.ram_bytes,
751 path.display()
752 );
753 *errors += 1;
754 continue;
755 }
756
757 if extent.compression == CompressionType::None {
758 let data_buf = reader
760 .read_data(disk_bytenr + offset, *num_bytes as usize)
761 .with_context(|| {
762 format!(
763 "failed to read extent at logical {disk_bytenr}"
764 )
765 })?;
766
767 file.seek(io::SeekFrom::Start(file_offset))?;
768 file.write_all(&data_buf).with_context(|| {
769 format!(
770 "failed to write extent to '{}'",
771 path.display()
772 )
773 })?;
774 } else {
775 let compressed = reader
777 .read_data(*disk_bytenr, *disk_num_bytes as usize)
778 .with_context(|| {
779 format!(
780 "failed to read compressed extent at logical {disk_bytenr}"
781 )
782 })?;
783
784 let decompressed = decompress(
785 &compressed,
786 extent.ram_bytes as usize,
787 extent.compression,
788 )
789 .with_context(|| {
790 format!(
791 "failed to decompress extent in '{}'",
792 path.display()
793 )
794 })?;
795
796 let start = *offset as usize;
798 let end = start + *num_bytes as usize;
799 let slice = if end <= decompressed.len() {
800 &decompressed[start..end]
801 } else {
802 &decompressed[start..]
803 };
804
805 file.seek(io::SeekFrom::Start(file_offset))?;
806 file.write_all(slice).with_context(|| {
807 format!(
808 "failed to write extent to '{}'",
809 path.display()
810 )
811 })?;
812 }
813 }
814 }
815 }
816
817 if let Some(size) = inode_size {
820 file.set_len(size)?;
821 }
822
823 if opts.metadata {
824 drop(file);
826 apply_metadata(items, ino, path, opts, errors);
827 }
828
829 Ok(())
830}
831
832fn restore_symlink(
834 items: &FsTreeItems,
835 ino: u64,
836 path: &Path,
837 opts: &RestoreOpts,
838) -> Result<()> {
839 let extent_items = items.get(ino, KeyType::ExtentData);
840 let (_, data) = extent_items
841 .first()
842 .context("symlink has no EXTENT_DATA item")?;
843
844 let extent = FileExtentItem::parse(data)
845 .context("failed to parse symlink extent")?;
846
847 let target = match &extent.body {
848 FileExtentBody::Inline { inline_size } => {
849 let header_len = data.len() - inline_size;
850 &data[header_len..]
851 }
852 FileExtentBody::Regular { .. } => bail!("symlink extent is not inline"),
853 };
854
855 let target_str = std::str::from_utf8(target)
856 .context("symlink target is not valid UTF-8")?;
857
858 if opts.dry_run {
859 println!("{} -> {}", path.display(), target_str);
860 return Ok(());
861 }
862
863 if path.exists() && !opts.overwrite {
864 return Ok(());
865 }
866
867 if opts.verbose >= 2 {
868 eprintln!("SYMLINK: '{}' => '{}'", path.display(), target_str);
869 }
870
871 if path.exists() {
873 fs::remove_file(path).ok();
874 }
875
876 symlink(target_str, path).with_context(|| {
877 format!("failed to create symlink '{}'", path.display())
878 })?;
879
880 Ok(())
881}
882
883fn restore_xattrs(
885 items: &FsTreeItems,
886 ino: u64,
887 path: &Path,
888 errors: &mut u64,
889) {
890 let xattr_items = items.get(ino, KeyType::XattrItem);
891 for (_, data) in &xattr_items {
892 let entries = DirItem::parse_all(data);
893 for entry in entries {
894 let Ok(name) = std::str::from_utf8(&entry.name) else {
895 continue;
896 };
897 let Ok(c_path) =
898 std::ffi::CString::new(path.as_os_str().as_encoded_bytes())
899 else {
900 continue;
901 };
902 let Ok(c_name) = std::ffi::CString::new(name) else {
903 continue;
904 };
905 let ret = unsafe {
907 libc::lsetxattr(
908 c_path.as_ptr(),
909 c_name.as_ptr(),
910 entry.data.as_ptr().cast(),
911 entry.data.len(),
912 0,
913 )
914 };
915 if ret < 0 {
916 let err = io::Error::last_os_error();
917 eprintln!(
918 "warning: failed to set xattr '{name}' on '{}': {err}",
919 path.display()
920 );
921 *errors += 1;
922 }
923 }
924 }
925}
926
927fn apply_metadata(
929 items: &FsTreeItems,
930 ino: u64,
931 path: &Path,
932 opts: &RestoreOpts,
933 errors: &mut u64,
934) {
935 let inode_items = items.get(ino, KeyType::InodeItem);
936 let Some((_, data)) = inode_items.first() else {
937 return;
938 };
939 let Some(inode) = InodeItem::parse(data) else {
940 return;
941 };
942
943 let Ok(c_path) =
944 std::ffi::CString::new(path.as_os_str().as_encoded_bytes())
945 else {
946 return;
947 };
948
949 unsafe {
951 if libc::lchown(c_path.as_ptr(), inode.uid, inode.gid) < 0 {
952 let err = io::Error::last_os_error();
953 eprintln!("warning: failed to chown '{}': {err}", path.display());
954 *errors += 1;
955 if !opts.ignore_errors {
956 return;
957 }
958 }
959 if !path.is_symlink()
961 && libc::chmod(c_path.as_ptr(), inode.mode & 0o7777) < 0
962 {
963 let err = io::Error::last_os_error();
964 eprintln!("warning: failed to chmod '{}': {err}", path.display());
965 *errors += 1;
966 if !opts.ignore_errors {
967 return;
968 }
969 }
970
971 #[allow(clippy::cast_possible_wrap)] let times = [
973 libc::timespec {
974 tv_sec: inode.atime.sec as i64,
975 tv_nsec: i64::from(inode.atime.nsec),
976 },
977 libc::timespec {
978 tv_sec: inode.mtime.sec as i64,
979 tv_nsec: i64::from(inode.mtime.nsec),
980 },
981 ];
982 if libc::utimensat(
983 libc::AT_FDCWD,
984 c_path.as_ptr(),
985 times.as_ptr(),
986 libc::AT_SYMLINK_NOFOLLOW,
987 ) < 0
988 {
989 let err = io::Error::last_os_error();
990 eprintln!(
991 "warning: failed to set times on '{}': {err}",
992 path.display()
993 );
994 *errors += 1;
995 }
996 }
997}
998
999fn decompress(
1001 data: &[u8],
1002 output_len: usize,
1003 compression: CompressionType,
1004) -> Result<Vec<u8>> {
1005 match compression {
1006 CompressionType::None => Ok(data.to_vec()),
1007 CompressionType::Zlib => {
1008 let mut decoder = flate2::read::ZlibDecoder::new(data);
1009 let mut out = vec![0u8; output_len];
1010 decoder
1011 .read_exact(&mut out)
1012 .context("zlib decompression failed")?;
1013 Ok(out)
1014 }
1015 CompressionType::Zstd => zstd::bulk::decompress(data, output_len)
1016 .context("zstd decompression failed"),
1017 CompressionType::Lzo => decompress_lzo(data, output_len),
1018 CompressionType::Unknown(t) => {
1019 bail!("unsupported compression type {t}")
1020 }
1021 }
1022}
1023
1024fn decompress_lzo(data: &[u8], output_len: usize) -> Result<Vec<u8>> {
1029 const SECTOR_SIZE: usize = 4096;
1030
1031 if data.len() < 4 {
1032 bail!("LZO data too short for header");
1033 }
1034 let total_len = u32::from_le_bytes(data[0..4].try_into().unwrap()) as usize;
1035 if total_len > data.len() {
1036 bail!(
1037 "LZO total length {total_len} exceeds data length {}",
1038 data.len()
1039 );
1040 }
1041
1042 let mut out = Vec::with_capacity(output_len);
1043 let mut pos = 4;
1044
1045 while pos < total_len && out.len() < output_len {
1046 let sector_remaining = SECTOR_SIZE - (pos % SECTOR_SIZE);
1047 if sector_remaining < 4 {
1048 if total_len - pos <= sector_remaining {
1049 break;
1050 }
1051 pos += sector_remaining;
1052 }
1053
1054 if pos + 4 > total_len {
1055 bail!("LZO segment header truncated at offset {pos}");
1056 }
1057 let seg_len =
1058 u32::from_le_bytes(data[pos..pos + 4].try_into().unwrap()) as usize;
1059 pos += 4;
1060
1061 if pos + seg_len > data.len() {
1062 bail!(
1063 "LZO segment data truncated at offset {pos}, \
1064 need {seg_len} bytes"
1065 );
1066 }
1067
1068 let remaining = (output_len - out.len()).min(SECTOR_SIZE);
1069 let mut segment_out = vec![0u8; remaining];
1070 lzokay::decompress::decompress(
1071 &data[pos..pos + seg_len],
1072 &mut segment_out,
1073 )
1074 .map_err(|e| {
1075 anyhow::anyhow!("LZO decompression failed at offset {pos}: {e:?}")
1076 })?;
1077 out.extend_from_slice(&segment_out);
1078
1079 pos += seg_len;
1080 }
1081
1082 out.truncate(output_len);
1083 Ok(out)
1084}
1085
1086fn list_roots<R: Read + Seek>(
1088 reader: &mut reader::BlockReader<R>,
1089 root_bytenr: u64,
1090) -> Result<()> {
1091 let mut entries: Vec<(DiskKey, RootItem)> = Vec::new();
1092 collect_root_items_for_listing(reader, root_bytenr, &mut entries)?;
1093
1094 entries.sort_by_key(|(k, _)| k.objectid);
1096
1097 for (key, root_item) in &entries {
1098 println!(
1099 " tree key ({} ROOT_ITEM {}) {} level {}",
1100 key.objectid, key.offset, root_item.bytenr, root_item.level
1101 );
1102 }
1103
1104 Ok(())
1105}
1106
1107fn collect_root_items_for_listing<R: Read + Seek>(
1108 reader: &mut reader::BlockReader<R>,
1109 logical: u64,
1110 out: &mut Vec<(DiskKey, RootItem)>,
1111) -> Result<()> {
1112 let block = reader
1113 .read_tree_block(logical)
1114 .with_context(|| format!("failed to read tree block at {logical}"))?;
1115
1116 match &block {
1117 TreeBlock::Leaf {
1118 items: leaf_items,
1119 data,
1120 ..
1121 } => {
1122 let header_size = std::mem::size_of::<raw::btrfs_header>();
1123 for item in leaf_items {
1124 if item.key.key_type != KeyType::RootItem {
1125 continue;
1126 }
1127 let start = header_size + item.offset as usize;
1128 let end = start + item.size as usize;
1129 if end > data.len() {
1130 continue;
1131 }
1132 if let Some(ri) = RootItem::parse(&data[start..end]) {
1133 out.push((item.key, ri));
1134 }
1135 }
1136 }
1137 TreeBlock::Node { ptrs, .. } => {
1138 for ptr in ptrs {
1139 collect_root_items_for_listing(reader, ptr.blockptr, out)?;
1140 }
1141 }
1142 }
1143
1144 Ok(())
1145}