1use crate::{Format, Runnable, util::is_mounted};
2use anyhow::{Context, Result, bail};
3use btrfs_disk::{
4 items::{
5 CompressionType, DirItem, FileExtentBody, FileExtentItem,
6 FileExtentType, FileType, InodeItem, RootItem,
7 },
8 raw, reader,
9 superblock::SUPER_MIRROR_MAX,
10 tree::{DiskKey, KeyType, TreeBlock},
11};
12use clap::Parser;
13use regex_lite::Regex;
14use std::{
15 collections::HashMap,
16 fs::{self, File, OpenOptions},
17 io::{self, Read, Seek, Write},
18 os::unix::fs::symlink,
19 path::{Path, PathBuf},
20};
21
22#[derive(Parser, Debug)]
30pub struct RestoreCommand {
31 device: PathBuf,
33
34 path: Option<PathBuf>,
36
37 #[clap(short = 'D', long = "dry-run")]
39 dry_run: bool,
40
41 #[clap(short = 'i', long)]
43 ignore_errors: bool,
44
45 #[clap(short = 'o', long)]
47 overwrite: bool,
48
49 #[clap(short = 'm', long)]
51 metadata: bool,
52
53 #[clap(short = 'S', long)]
55 symlink: bool,
56
57 #[clap(short = 's', long)]
59 snapshots: bool,
60
61 #[clap(short = 'x', long)]
63 xattr: bool,
64
65 #[clap(long)]
67 path_regex: Option<String>,
68
69 #[clap(short = 'c', long)]
71 ignore_case: bool,
72
73 #[clap(short = 'd', long)]
75 find_dir: bool,
76
77 #[clap(short = 'l', long)]
79 list_roots: bool,
80
81 #[clap(short = 'v', long, action = clap::ArgAction::Count)]
83 verbose: u8,
84
85 #[clap(short = 'f', long)]
87 fs_location: Option<u64>,
88
89 #[clap(short = 'r', long)]
91 root: Option<u64>,
92
93 #[clap(short = 't', long)]
95 tree_location: Option<u64>,
96
97 #[clap(short = 'u', long = "super")]
99 super_mirror: Option<u64>,
100}
101
102impl Runnable for RestoreCommand {
103 fn run(&self, _format: Format, _dry_run: bool) -> Result<()> {
104 if let Some(m) = self.super_mirror
105 && m >= u64::from(SUPER_MIRROR_MAX)
106 {
107 bail!(
108 "super mirror index {m} is out of range (max {})",
109 SUPER_MIRROR_MAX - 1
110 );
111 }
112
113 if is_mounted(&self.device) {
114 bail!(
115 "'{}' is mounted, refusing to restore (unmount first)",
116 self.device.display()
117 );
118 }
119
120 let file = File::open(&self.device).with_context(|| {
121 format!("cannot open '{}'", self.device.display())
122 })?;
123
124 let mut open = if let Some(m) = self.super_mirror {
126 reader::filesystem_open_mirror(file, m as u32)
127 .context("failed to open filesystem")?
128 } else {
129 let mut result = None;
130 for mirror in 0..SUPER_MIRROR_MAX {
131 match reader::filesystem_open_mirror(file.try_clone()?, mirror)
132 {
133 Ok(o) => {
134 if mirror > 0 {
135 eprintln!(
136 "using superblock mirror {mirror} \
137 (primary was damaged)"
138 );
139 }
140 result = Some(o);
141 break;
142 }
143 Err(e) => {
144 eprintln!(
145 "warning: superblock mirror {mirror} \
146 failed: {e}"
147 );
148 }
149 }
150 }
151 result.context("all superblock mirrors failed")?
152 };
153
154 if self.list_roots {
155 let root_bytenr =
156 self.tree_location.unwrap_or(open.superblock.root);
157 return list_roots(&mut open.reader, root_bytenr);
158 }
159
160 let output_path = self.path.as_ref().ok_or_else(|| {
161 anyhow::anyhow!(
162 "destination path is required (unless --list-roots)"
163 )
164 })?;
165
166 let path_regex = self
168 .path_regex
169 .as_ref()
170 .map(|pat| {
171 let full = if self.ignore_case {
172 format!("(?i){pat}")
173 } else {
174 pat.clone()
175 };
176 Regex::new(&full)
177 .with_context(|| format!("invalid regex '{pat}'"))
178 })
179 .transpose()?;
180
181 let fs_tree_oid =
183 self.root.unwrap_or(raw::BTRFS_FS_TREE_OBJECTID as u64);
184
185 let fs_root_bytenr = if let Some(loc) = self.fs_location {
187 loc
188 } else {
189 open.tree_roots
190 .get(&fs_tree_oid)
191 .map(|(bytenr, _)| *bytenr)
192 .with_context(|| {
193 format!("tree root for objectid {fs_tree_oid} not found")
194 })?
195 };
196
197 let mut block_reader = open.reader;
198
199 let opts = RestoreOpts {
200 dry_run: self.dry_run,
201 overwrite: self.overwrite,
202 metadata: self.metadata,
203 symlinks: self.symlink,
204 snapshots: self.snapshots,
205 xattr: self.xattr,
206 ignore_errors: self.ignore_errors,
207 verbose: self.verbose,
208 path_regex: path_regex.as_ref(),
209 tree_roots: &open.tree_roots,
210 };
211
212 let mut total_errors = 0;
213
214 let items = collect_fs_tree_items(
216 &mut block_reader,
217 fs_root_bytenr,
218 self.ignore_errors,
219 )?;
220
221 let root_ino = if self.find_dir {
223 let oid = find_first_dir(&items)?;
224 println!("Using objectid {oid} for first dir");
225 oid
226 } else {
227 raw::BTRFS_FIRST_FREE_OBJECTID as u64
228 };
229
230 if !opts.dry_run {
231 fs::create_dir_all(output_path).with_context(|| {
232 format!(
233 "failed to create output directory '{}'",
234 output_path.display()
235 )
236 })?;
237 }
238
239 restore_dir(
240 &mut block_reader,
241 &items,
242 root_ino,
243 output_path,
244 &opts,
245 &mut total_errors,
246 "",
247 )?;
248
249 if self.snapshots {
253 for (&oid, &(bytenr, _)) in &open.tree_roots {
254 if oid >= raw::BTRFS_FIRST_FREE_OBJECTID as u64
255 && oid <= raw::BTRFS_LAST_FREE_OBJECTID as u64
256 && oid != fs_tree_oid
257 {
258 let snap_dest = output_path.join(format!("snapshot.{oid}"));
259 if snap_dest.exists() {
261 continue;
262 }
263 let snap_items = collect_fs_tree_items(
264 &mut block_reader,
265 bytenr,
266 self.ignore_errors,
267 )?;
268 if !opts.dry_run {
269 fs::create_dir_all(&snap_dest).with_context(|| {
270 format!(
271 "failed to create snapshot directory '{}'",
272 snap_dest.display()
273 )
274 })?;
275 }
276 let snap_root = raw::BTRFS_FIRST_FREE_OBJECTID as u64;
277 restore_dir(
278 &mut block_reader,
279 &snap_items,
280 snap_root,
281 &snap_dest,
282 &opts,
283 &mut total_errors,
284 "",
285 )?;
286 }
287 }
288 }
289
290 if total_errors > 0 {
291 eprintln!("warning: {total_errors} error(s) during restore");
292 }
293
294 Ok(())
295 }
296}
297
298struct RestoreOpts<'a> {
299 dry_run: bool,
300 overwrite: bool,
301 metadata: bool,
302 symlinks: bool,
303 snapshots: bool,
304 xattr: bool,
305 ignore_errors: bool,
306 verbose: u8,
307 path_regex: Option<&'a Regex>,
308 tree_roots: &'a std::collections::BTreeMap<u64, (u64, u64)>,
309}
310
311struct FsTreeItems {
313 items: HashMap<u64, Vec<(DiskKey, Vec<u8>)>>,
314}
315
316impl FsTreeItems {
317 fn get(&self, objectid: u64, key_type: KeyType) -> Vec<(&DiskKey, &[u8])> {
319 self.items
320 .get(&objectid)
321 .map(|v| {
322 v.iter()
323 .filter(|(k, _)| k.key_type == key_type)
324 .map(|(k, d)| (k, d.as_slice()))
325 .collect()
326 })
327 .unwrap_or_default()
328 }
329
330 fn has_key_type(&self, key_type: KeyType) -> Option<u64> {
332 for (oid, entries) in &self.items {
333 if entries.iter().any(|(k, _)| k.key_type == key_type) {
334 return Some(*oid);
335 }
336 }
337 None
338 }
339}
340
341fn collect_fs_tree_items<R: Read + Seek>(
343 reader: &mut reader::BlockReader<R>,
344 root_bytenr: u64,
345 ignore_errors: bool,
346) -> Result<FsTreeItems> {
347 let mut items: HashMap<u64, Vec<(DiskKey, Vec<u8>)>> = HashMap::new();
348 let mut errors = 0u64;
349 collect_items_dfs(
350 reader,
351 root_bytenr,
352 &mut items,
353 ignore_errors,
354 &mut errors,
355 )?;
356 if errors > 0 {
357 eprintln!(
358 "warning: {errors} tree block(s) could not be read during scan"
359 );
360 }
361 Ok(FsTreeItems { items })
362}
363
364fn collect_items_dfs<R: Read + Seek>(
365 reader: &mut reader::BlockReader<R>,
366 logical: u64,
367 items: &mut HashMap<u64, Vec<(DiskKey, Vec<u8>)>>,
368 ignore_errors: bool,
369 errors: &mut u64,
370) -> Result<()> {
371 let block = match reader.read_tree_block(logical) {
372 Ok(b) => b,
373 Err(e) => {
374 if ignore_errors {
375 eprintln!(
376 "warning: skipping unreadable tree block at \
377 logical {logical}: {e}"
378 );
379 *errors += 1;
380 return Ok(());
381 }
382 return Err(e).with_context(|| {
383 format!("failed to read tree block at {logical}")
384 });
385 }
386 };
387
388 match &block {
389 TreeBlock::Leaf {
390 items: leaf_items,
391 data,
392 ..
393 } => {
394 let header_size = std::mem::size_of::<raw::btrfs_header>();
395 for item in leaf_items {
396 let start = header_size + item.offset as usize;
397 let end = start + item.size as usize;
398 if end <= data.len() {
399 items
400 .entry(item.key.objectid)
401 .or_default()
402 .push((item.key, data[start..end].to_vec()));
403 }
404 }
405 }
406 TreeBlock::Node { ptrs, .. } => {
407 for ptr in ptrs {
408 collect_items_dfs(
409 reader,
410 ptr.blockptr,
411 items,
412 ignore_errors,
413 errors,
414 )?;
415 }
416 }
417 }
418
419 Ok(())
420}
421
422fn find_first_dir(items: &FsTreeItems) -> Result<u64> {
424 items
425 .has_key_type(KeyType::DirIndex)
426 .context("no directory entry found in tree")
427}
428
429fn restore_dir<R: Read + Seek>(
431 reader: &mut reader::BlockReader<R>,
432 items: &FsTreeItems,
433 dir_ino: u64,
434 output_path: &Path,
435 opts: &RestoreOpts,
436 errors: &mut u64,
437 prefix: &str,
438) -> Result<()> {
439 let dir_entries = items.get(dir_ino, KeyType::DirIndex);
441
442 for (_key, data) in &dir_entries {
443 let parsed = DirItem::parse_all(data);
444 for entry in parsed {
445 let name = match std::str::from_utf8(&entry.name) {
446 Ok(s) => s.to_string(),
447 Err(_) => String::from_utf8_lossy(&entry.name).into_owned(),
448 };
449 let child_path = output_path.join(&name);
450 let child_ino = entry.location.objectid;
451
452 let rel_path = if prefix.is_empty() {
454 format!("/{name}")
455 } else {
456 format!("{prefix}/{name}")
457 };
458
459 if let Some(re) = opts.path_regex
462 && !re.is_match(&rel_path)
463 {
464 continue;
465 }
466
467 if entry.location.key_type == KeyType::RootItem {
470 if opts.snapshots {
471 let subvol_oid = entry.location.objectid;
473 if let Some(&(bytenr, _)) = opts.tree_roots.get(&subvol_oid)
474 && let Err(e) = restore_snapshot(
475 reader,
476 bytenr,
477 &child_path,
478 opts,
479 errors,
480 &rel_path,
481 )
482 {
483 if !opts.ignore_errors {
484 return Err(e);
485 }
486 eprintln!(
487 "warning: failed to restore snapshot '{}': {e}",
488 child_path.display()
489 );
490 *errors += 1;
491 }
492 } else {
493 eprintln!("Skipping snapshot {} (use -s to restore)", name);
494 }
495 continue;
496 }
497
498 match entry.file_type {
499 FileType::Dir => {
500 if opts.dry_run {
501 println!("{}/", child_path.display());
502 } else {
503 if opts.verbose >= 1 {
504 eprintln!("Restoring {}/", child_path.display());
505 }
506 if let Err(e) = fs::create_dir_all(&child_path) {
507 if !opts.ignore_errors {
508 return Err(e).with_context(|| {
509 format!(
510 "failed to create directory '{}'",
511 child_path.display()
512 )
513 });
514 }
515 eprintln!(
516 "warning: failed to create '{}': {e}",
517 child_path.display()
518 );
519 *errors += 1;
520 continue;
521 }
522 }
523 restore_dir(
524 reader,
525 items,
526 child_ino,
527 &child_path,
528 opts,
529 errors,
530 &rel_path,
531 )?;
532 if opts.metadata && !opts.dry_run {
535 apply_metadata(
536 items,
537 child_ino,
538 &child_path,
539 opts,
540 errors,
541 );
542 }
543 }
544 FileType::RegFile => {
545 if let Err(e) = restore_file(
546 reader,
547 items,
548 child_ino,
549 &child_path,
550 opts,
551 errors,
552 ) {
553 if !opts.ignore_errors {
554 return Err(e);
555 }
556 eprintln!(
557 "warning: failed to restore '{}': {e}",
558 child_path.display()
559 );
560 *errors += 1;
561 }
562 }
563 FileType::Symlink if opts.symlinks => {
564 if let Err(e) =
565 restore_symlink(items, child_ino, &child_path, opts)
566 {
567 if !opts.ignore_errors {
568 return Err(e);
569 }
570 eprintln!(
571 "warning: failed to restore symlink '{}': {e}",
572 child_path.display()
573 );
574 *errors += 1;
575 }
576 if opts.metadata && !opts.dry_run {
577 apply_metadata(
578 items,
579 child_ino,
580 &child_path,
581 opts,
582 errors,
583 );
584 }
585 }
586 _ => {}
587 }
588
589 if opts.xattr && !opts.dry_run {
591 restore_xattrs(items, child_ino, &child_path, errors);
592 }
593 }
594 }
595
596 Ok(())
597}
598
599fn restore_snapshot<R: Read + Seek>(
601 reader: &mut reader::BlockReader<R>,
602 bytenr: u64,
603 output_path: &Path,
604 opts: &RestoreOpts,
605 errors: &mut u64,
606 prefix: &str,
607) -> Result<()> {
608 let snap_items = collect_fs_tree_items(reader, bytenr, opts.ignore_errors)?;
609
610 if !opts.dry_run {
611 fs::create_dir_all(output_path).with_context(|| {
612 format!(
613 "failed to create snapshot directory '{}'",
614 output_path.display()
615 )
616 })?;
617 }
618
619 let snap_root = raw::BTRFS_FIRST_FREE_OBJECTID as u64;
620 restore_dir(
621 reader,
622 &snap_items,
623 snap_root,
624 output_path,
625 opts,
626 errors,
627 prefix,
628 )
629}
630
631fn restore_file<R: Read + Seek>(
633 reader: &mut reader::BlockReader<R>,
634 items: &FsTreeItems,
635 ino: u64,
636 path: &Path,
637 opts: &RestoreOpts,
638 errors: &mut u64,
639) -> Result<()> {
640 if opts.dry_run {
641 println!("{}", path.display());
642 return Ok(());
643 }
644
645 if path.exists() && !opts.overwrite {
646 return Ok(());
647 }
648
649 if opts.verbose >= 1 {
650 eprintln!("Restoring {}", path.display());
651 }
652
653 let mut file = OpenOptions::new()
654 .write(true)
655 .create(true)
656 .truncate(true)
657 .open(path)
658 .with_context(|| format!("failed to create '{}'", path.display()))?;
659
660 let inode_size = items
662 .get(ino, KeyType::InodeItem)
663 .first()
664 .and_then(|(_, d)| InodeItem::parse(d))
665 .map(|i| i.size);
666
667 let extent_items = items.get(ino, KeyType::ExtentData);
668
669 for (key, data) in &extent_items {
670 let extent = match FileExtentItem::parse(data) {
671 Some(e) => e,
672 None => continue,
673 };
674
675 if extent.extent_type == FileExtentType::Prealloc {
678 continue;
679 }
680
681 let file_offset = key.offset;
682
683 match &extent.body {
684 FileExtentBody::Inline { inline_size } => {
685 let header_len = data.len() - inline_size;
687 let inline_data = &data[header_len..];
688
689 let output = if extent.compression != CompressionType::None {
690 decompress(
691 inline_data,
692 extent.ram_bytes as usize,
693 &extent.compression,
694 )
695 .with_context(|| {
696 format!(
697 "failed to decompress inline extent in '{}'",
698 path.display()
699 )
700 })?
701 } else {
702 inline_data.to_vec()
703 };
704
705 file.seek(io::SeekFrom::Start(file_offset))?;
706 file.write_all(&output).with_context(|| {
707 format!(
708 "failed to write inline extent to '{}'",
709 path.display()
710 )
711 })?;
712 }
713 FileExtentBody::Regular {
714 disk_bytenr,
715 disk_num_bytes,
716 offset,
717 num_bytes,
718 } => {
719 if *disk_bytenr == 0 {
720 continue;
722 }
723
724 if extent.compression == CompressionType::None
726 && *offset >= *disk_num_bytes
727 {
728 eprintln!(
729 "warning: bogus extent offset {} >= disk_size {} \
730 in '{}'",
731 offset,
732 disk_num_bytes,
733 path.display()
734 );
735 *errors += 1;
736 continue;
737 }
738 if *offset > extent.ram_bytes {
739 eprintln!(
740 "warning: bogus extent offset {} > ram_bytes {} \
741 in '{}'",
742 offset,
743 extent.ram_bytes,
744 path.display()
745 );
746 *errors += 1;
747 continue;
748 }
749
750 if extent.compression != CompressionType::None {
751 let compressed = reader
753 .read_data(*disk_bytenr, *disk_num_bytes as usize)
754 .with_context(|| {
755 format!(
756 "failed to read compressed extent at logical {}",
757 disk_bytenr
758 )
759 })?;
760
761 let decompressed = decompress(
762 &compressed,
763 extent.ram_bytes as usize,
764 &extent.compression,
765 )
766 .with_context(|| {
767 format!(
768 "failed to decompress extent in '{}'",
769 path.display()
770 )
771 })?;
772
773 let start = *offset as usize;
775 let end = start + *num_bytes as usize;
776 let slice = if end <= decompressed.len() {
777 &decompressed[start..end]
778 } else {
779 &decompressed[start..]
780 };
781
782 file.seek(io::SeekFrom::Start(file_offset))?;
783 file.write_all(slice).with_context(|| {
784 format!(
785 "failed to write extent to '{}'",
786 path.display()
787 )
788 })?;
789 } else {
790 let data_buf = reader
792 .read_data(disk_bytenr + offset, *num_bytes as usize)
793 .with_context(|| {
794 format!(
795 "failed to read extent at logical {}",
796 disk_bytenr
797 )
798 })?;
799
800 file.seek(io::SeekFrom::Start(file_offset))?;
801 file.write_all(&data_buf).with_context(|| {
802 format!(
803 "failed to write extent to '{}'",
804 path.display()
805 )
806 })?;
807 }
808 }
809 }
810 }
811
812 if let Some(size) = inode_size {
815 file.set_len(size)?;
816 }
817
818 if opts.metadata {
819 drop(file);
821 apply_metadata(items, ino, path, opts, errors);
822 }
823
824 Ok(())
825}
826
827fn restore_symlink(
829 items: &FsTreeItems,
830 ino: u64,
831 path: &Path,
832 opts: &RestoreOpts,
833) -> Result<()> {
834 let extent_items = items.get(ino, KeyType::ExtentData);
835 let (_, data) = extent_items
836 .first()
837 .context("symlink has no EXTENT_DATA item")?;
838
839 let extent = FileExtentItem::parse(data)
840 .context("failed to parse symlink extent")?;
841
842 let target = match &extent.body {
843 FileExtentBody::Inline { inline_size } => {
844 let header_len = data.len() - inline_size;
845 &data[header_len..]
846 }
847 _ => bail!("symlink extent is not inline"),
848 };
849
850 let target_str = std::str::from_utf8(target)
851 .context("symlink target is not valid UTF-8")?;
852
853 if opts.dry_run {
854 println!("{} -> {}", path.display(), target_str);
855 return Ok(());
856 }
857
858 if path.exists() && !opts.overwrite {
859 return Ok(());
860 }
861
862 if opts.verbose >= 2 {
863 eprintln!("SYMLINK: '{}' => '{}'", path.display(), target_str);
864 }
865
866 if path.exists() {
868 fs::remove_file(path).ok();
869 }
870
871 symlink(target_str, path).with_context(|| {
872 format!("failed to create symlink '{}'", path.display())
873 })?;
874
875 Ok(())
876}
877
878fn restore_xattrs(
880 items: &FsTreeItems,
881 ino: u64,
882 path: &Path,
883 errors: &mut u64,
884) {
885 let xattr_items = items.get(ino, KeyType::XattrItem);
886 for (_, data) in &xattr_items {
887 let entries = DirItem::parse_all(data);
888 for entry in entries {
889 let name = match std::str::from_utf8(&entry.name) {
890 Ok(s) => s,
891 Err(_) => continue,
892 };
893 let c_path = match std::ffi::CString::new(
894 path.as_os_str().as_encoded_bytes(),
895 ) {
896 Ok(p) => p,
897 Err(_) => continue,
898 };
899 let c_name = match std::ffi::CString::new(name) {
900 Ok(n) => n,
901 Err(_) => continue,
902 };
903 let ret = unsafe {
905 libc::lsetxattr(
906 c_path.as_ptr(),
907 c_name.as_ptr(),
908 entry.data.as_ptr().cast(),
909 entry.data.len(),
910 0,
911 )
912 };
913 if ret < 0 {
914 let err = io::Error::last_os_error();
915 eprintln!(
916 "warning: failed to set xattr '{name}' on '{}': {err}",
917 path.display()
918 );
919 *errors += 1;
920 }
921 }
922 }
923}
924
925fn apply_metadata(
927 items: &FsTreeItems,
928 ino: u64,
929 path: &Path,
930 opts: &RestoreOpts,
931 errors: &mut u64,
932) {
933 let inode_items = items.get(ino, KeyType::InodeItem);
934 let Some((_, data)) = inode_items.first() else {
935 return;
936 };
937 let Some(inode) = InodeItem::parse(data) else {
938 return;
939 };
940
941 let c_path =
942 match std::ffi::CString::new(path.as_os_str().as_encoded_bytes()) {
943 Ok(p) => p,
944 Err(_) => return,
945 };
946
947 unsafe {
949 if libc::lchown(c_path.as_ptr(), inode.uid, inode.gid) < 0 {
950 let err = io::Error::last_os_error();
951 eprintln!("warning: failed to chown '{}': {err}", path.display());
952 *errors += 1;
953 if !opts.ignore_errors {
954 return;
955 }
956 }
957 if !path.is_symlink()
959 && libc::chmod(c_path.as_ptr(), inode.mode & 0o7777) < 0
960 {
961 let err = io::Error::last_os_error();
962 eprintln!("warning: failed to chmod '{}': {err}", path.display());
963 *errors += 1;
964 if !opts.ignore_errors {
965 return;
966 }
967 }
968
969 let times = [
970 libc::timespec {
971 tv_sec: inode.atime.sec as i64,
972 tv_nsec: inode.atime.nsec as i64,
973 },
974 libc::timespec {
975 tv_sec: inode.mtime.sec as i64,
976 tv_nsec: inode.mtime.nsec as i64,
977 },
978 ];
979 if libc::utimensat(
980 libc::AT_FDCWD,
981 c_path.as_ptr(),
982 times.as_ptr(),
983 libc::AT_SYMLINK_NOFOLLOW,
984 ) < 0
985 {
986 let err = io::Error::last_os_error();
987 eprintln!(
988 "warning: failed to set times on '{}': {err}",
989 path.display()
990 );
991 *errors += 1;
992 }
993 }
994}
995
996fn decompress(
998 data: &[u8],
999 output_len: usize,
1000 compression: &CompressionType,
1001) -> Result<Vec<u8>> {
1002 match compression {
1003 CompressionType::None => Ok(data.to_vec()),
1004 CompressionType::Zlib => {
1005 let mut decoder = flate2::read::ZlibDecoder::new(data);
1006 let mut out = vec![0u8; output_len];
1007 decoder
1008 .read_exact(&mut out)
1009 .context("zlib decompression failed")?;
1010 Ok(out)
1011 }
1012 CompressionType::Zstd => zstd::bulk::decompress(data, output_len)
1013 .context("zstd decompression failed"),
1014 CompressionType::Lzo => decompress_lzo(data, output_len),
1015 CompressionType::Unknown(t) => {
1016 bail!("unsupported compression type {t}")
1017 }
1018 }
1019}
1020
1021fn decompress_lzo(data: &[u8], output_len: usize) -> Result<Vec<u8>> {
1026 const SECTOR_SIZE: usize = 4096;
1027
1028 if data.len() < 4 {
1029 bail!("LZO data too short for header");
1030 }
1031 let total_len = u32::from_le_bytes(data[0..4].try_into().unwrap()) as usize;
1032 if total_len > data.len() {
1033 bail!(
1034 "LZO total length {total_len} exceeds data length {}",
1035 data.len()
1036 );
1037 }
1038
1039 let mut out = Vec::with_capacity(output_len);
1040 let mut pos = 4;
1041
1042 while pos < total_len && out.len() < output_len {
1043 let sector_remaining = SECTOR_SIZE - (pos % SECTOR_SIZE);
1044 if sector_remaining < 4 {
1045 if total_len - pos <= sector_remaining {
1046 break;
1047 }
1048 pos += sector_remaining;
1049 }
1050
1051 if pos + 4 > total_len {
1052 bail!("LZO segment header truncated at offset {pos}");
1053 }
1054 let seg_len =
1055 u32::from_le_bytes(data[pos..pos + 4].try_into().unwrap()) as usize;
1056 pos += 4;
1057
1058 if pos + seg_len > data.len() {
1059 bail!(
1060 "LZO segment data truncated at offset {pos}, \
1061 need {seg_len} bytes"
1062 );
1063 }
1064
1065 let remaining = (output_len - out.len()).min(SECTOR_SIZE);
1066 let mut segment_out = vec![0u8; remaining];
1067 lzokay::decompress::decompress(
1068 &data[pos..pos + seg_len],
1069 &mut segment_out,
1070 )
1071 .map_err(|e| {
1072 anyhow::anyhow!("LZO decompression failed at offset {pos}: {e:?}")
1073 })?;
1074 out.extend_from_slice(&segment_out);
1075
1076 pos += seg_len;
1077 }
1078
1079 out.truncate(output_len);
1080 Ok(out)
1081}
1082
1083fn list_roots<R: Read + Seek>(
1085 reader: &mut reader::BlockReader<R>,
1086 root_bytenr: u64,
1087) -> Result<()> {
1088 let mut entries: Vec<(DiskKey, RootItem)> = Vec::new();
1089 collect_root_items_for_listing(reader, root_bytenr, &mut entries)?;
1090
1091 entries.sort_by_key(|(k, _)| k.objectid);
1093
1094 for (key, root_item) in &entries {
1095 println!(
1096 " tree key ({} ROOT_ITEM {}) {} level {}",
1097 key.objectid, key.offset, root_item.bytenr, root_item.level
1098 );
1099 }
1100
1101 Ok(())
1102}
1103
1104fn collect_root_items_for_listing<R: Read + Seek>(
1105 reader: &mut reader::BlockReader<R>,
1106 logical: u64,
1107 out: &mut Vec<(DiskKey, RootItem)>,
1108) -> Result<()> {
1109 let block = reader
1110 .read_tree_block(logical)
1111 .with_context(|| format!("failed to read tree block at {logical}"))?;
1112
1113 match &block {
1114 TreeBlock::Leaf {
1115 items: leaf_items,
1116 data,
1117 ..
1118 } => {
1119 let header_size = std::mem::size_of::<raw::btrfs_header>();
1120 for item in leaf_items {
1121 if item.key.key_type != KeyType::RootItem {
1122 continue;
1123 }
1124 let start = header_size + item.offset as usize;
1125 let end = start + item.size as usize;
1126 if end > data.len() {
1127 continue;
1128 }
1129 if let Some(ri) = RootItem::parse(&data[start..end]) {
1130 out.push((item.key, ri));
1131 }
1132 }
1133 }
1134 TreeBlock::Node { ptrs, .. } => {
1135 for ptr in ptrs {
1136 collect_root_items_for_listing(reader, ptr.blockptr, out)?;
1137 }
1138 }
1139 }
1140
1141 Ok(())
1142}