1use crate::util::get_uuid;
10use crate::{
11 raw,
12 tree::{DiskKey, KeyType, ObjectId},
13 util::{raw_crc32c, write_disk_key},
14};
15use bytes::{Buf, BufMut};
16use std::{fmt, mem};
17use uuid::Uuid;
18
19bitflags::bitflags! {
20 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
26 pub struct BlockGroupFlags: u64 {
27 const DATA = raw::BTRFS_BLOCK_GROUP_DATA as u64;
28 const SYSTEM = raw::BTRFS_BLOCK_GROUP_SYSTEM as u64;
29 const METADATA = raw::BTRFS_BLOCK_GROUP_METADATA as u64;
30 const RAID0 = raw::BTRFS_BLOCK_GROUP_RAID0 as u64;
31 const RAID1 = raw::BTRFS_BLOCK_GROUP_RAID1 as u64;
32 const DUP = raw::BTRFS_BLOCK_GROUP_DUP as u64;
33 const RAID10 = raw::BTRFS_BLOCK_GROUP_RAID10 as u64;
34 const RAID5 = raw::BTRFS_BLOCK_GROUP_RAID5 as u64;
35 const RAID6 = raw::BTRFS_BLOCK_GROUP_RAID6 as u64;
36 const RAID1C3 = raw::BTRFS_BLOCK_GROUP_RAID1C3 as u64;
37 const RAID1C4 = raw::BTRFS_BLOCK_GROUP_RAID1C4 as u64;
38
39 const SINGLE = raw::BTRFS_AVAIL_ALLOC_BIT_SINGLE;
42
43 const GLOBAL_RSV = raw::BTRFS_SPACE_INFO_GLOBAL_RSV;
45 }
46}
47
48impl BlockGroupFlags {
49 #[must_use]
51 pub fn type_name(self) -> &'static str {
52 if self.contains(Self::GLOBAL_RSV) {
53 return "GlobalReserve";
54 }
55 let ty = self & (Self::DATA | Self::SYSTEM | Self::METADATA);
56 match ty {
57 t if t == Self::DATA => "Data",
58 t if t == Self::SYSTEM => "System",
59 t if t == Self::METADATA => "Metadata",
60 t if t == Self::DATA | Self::METADATA => "Data+Metadata",
61 _ => "unknown",
62 }
63 }
64
65 #[must_use]
67 pub fn profile_name(self) -> &'static str {
68 let profile = self
69 & (Self::RAID0
70 | Self::RAID1
71 | Self::DUP
72 | Self::RAID10
73 | Self::RAID5
74 | Self::RAID6
75 | Self::RAID1C3
76 | Self::RAID1C4
77 | Self::SINGLE);
78 match profile {
79 p if p == Self::RAID0 => "RAID0",
80 p if p == Self::RAID1 => "RAID1",
81 p if p == Self::DUP => "DUP",
82 p if p == Self::RAID10 => "RAID10",
83 p if p == Self::RAID5 => "RAID5",
84 p if p == Self::RAID6 => "RAID6",
85 p if p == Self::RAID1C3 => "RAID1C3",
86 p if p == Self::RAID1C4 => "RAID1C4",
87 _ => "single",
89 }
90 }
91}
92
93bitflags::bitflags! {
94 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
96 pub struct InodeFlags: u64 {
97 const NODATASUM = raw::BTRFS_INODE_NODATASUM as u64;
98 const NODATACOW = raw::BTRFS_INODE_NODATACOW as u64;
99 const READONLY = raw::BTRFS_INODE_READONLY as u64;
100 const NOCOMPRESS = raw::BTRFS_INODE_NOCOMPRESS as u64;
101 const PREALLOC = raw::BTRFS_INODE_PREALLOC as u64;
102 const SYNC = raw::BTRFS_INODE_SYNC as u64;
103 const IMMUTABLE = raw::BTRFS_INODE_IMMUTABLE as u64;
104 const APPEND = raw::BTRFS_INODE_APPEND as u64;
105 const NODUMP = raw::BTRFS_INODE_NODUMP as u64;
106 const NOATIME = raw::BTRFS_INODE_NOATIME as u64;
107 const DIRSYNC = raw::BTRFS_INODE_DIRSYNC as u64;
108 const COMPRESS = raw::BTRFS_INODE_COMPRESS as u64;
109 const ROOT_ITEM_INIT = raw::BTRFS_INODE_ROOT_ITEM_INIT as u64;
110 const _ = !0;
112 }
113}
114
115impl fmt::Display for InodeFlags {
116 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
117 const NAMES: &[(InodeFlags, &str)] = &[
118 (InodeFlags::NODATASUM, "NODATASUM"),
119 (InodeFlags::NODATACOW, "NODATACOW"),
120 (InodeFlags::READONLY, "READONLY"),
121 (InodeFlags::NOCOMPRESS, "NOCOMPRESS"),
122 (InodeFlags::PREALLOC, "PREALLOC"),
123 (InodeFlags::SYNC, "SYNC"),
124 (InodeFlags::IMMUTABLE, "IMMUTABLE"),
125 (InodeFlags::APPEND, "APPEND"),
126 (InodeFlags::NODUMP, "NODUMP"),
127 (InodeFlags::NOATIME, "NOATIME"),
128 (InodeFlags::DIRSYNC, "DIRSYNC"),
129 (InodeFlags::COMPRESS, "COMPRESS"),
130 (InodeFlags::ROOT_ITEM_INIT, "ROOT_ITEM_INIT"),
131 ];
132 let known: InodeFlags = NAMES
133 .iter()
134 .fold(InodeFlags::empty(), |a, &(flag, _)| a | flag);
135 let mut parts: Vec<String> = NAMES
136 .iter()
137 .filter(|&&(flag, _)| self.contains(flag))
138 .map(|&(_, name)| name.to_string())
139 .collect();
140 let unknown = *self & !known;
141 if !unknown.is_empty() {
142 parts.push(format!("UNKNOWN: 0x{:x}", unknown.bits()));
143 }
144 if parts.is_empty() {
145 write!(f, "none")
146 } else {
147 write!(f, "{}", parts.join("|"))
148 }
149 }
150}
151#[derive(Debug, Clone, Copy)]
153pub struct Timespec {
154 pub sec: u64,
156 pub nsec: u32,
158}
159
160impl Timespec {
161 fn parse(buf: &mut &[u8]) -> Self {
162 Self {
163 sec: buf.get_u64_le(),
164 nsec: buf.get_u32_le(),
165 }
166 }
167
168 fn write_to(&self, buf: &mut Vec<u8>) {
170 buf.put_u64_le(self.sec);
171 buf.put_u32_le(self.nsec);
172 }
173}
174
175#[derive(Debug, Clone, Copy, PartialEq, Eq)]
177pub enum CompressionType {
180 None,
182 Zlib,
184 Lzo,
186 Zstd,
188 Unknown(u8),
190}
191
192impl CompressionType {
193 #[must_use]
195 pub fn from_raw(v: u8) -> Self {
196 match v {
197 0 => Self::None,
198 1 => Self::Zlib,
199 2 => Self::Lzo,
200 3 => Self::Zstd,
201 _ => Self::Unknown(v),
202 }
203 }
204
205 #[must_use]
207 pub fn name(&self) -> &'static str {
208 match self {
209 Self::None => "none",
210 Self::Zlib => "zlib",
211 Self::Lzo => "lzo",
212 Self::Zstd => "zstd",
213 Self::Unknown(_) => "unknown",
214 }
215 }
216
217 #[must_use]
219 pub fn to_raw(self) -> u8 {
220 match self {
221 Self::None => 0,
222 Self::Zlib => 1,
223 Self::Lzo => 2,
224 Self::Zstd => 3,
225 Self::Unknown(v) => v,
226 }
227 }
228}
229
230#[derive(Debug, Clone, Copy, PartialEq, Eq)]
232pub enum FileExtentType {
233 Inline,
235 Regular,
237 Prealloc,
239 Unknown(u8),
241}
242
243impl FileExtentType {
244 #[must_use]
246 pub fn from_raw(v: u8) -> Self {
247 match u32::from(v) {
248 raw::BTRFS_FILE_EXTENT_INLINE => Self::Inline,
249 raw::BTRFS_FILE_EXTENT_REG => Self::Regular,
250 raw::BTRFS_FILE_EXTENT_PREALLOC => Self::Prealloc,
251 _ => Self::Unknown(v),
252 }
253 }
254
255 #[must_use]
257 pub fn name(&self) -> &'static str {
258 match self {
259 Self::Inline => "inline",
260 Self::Regular => "regular",
261 Self::Prealloc => "prealloc",
262 Self::Unknown(_) => "unknown",
263 }
264 }
265
266 #[must_use]
268 #[allow(clippy::cast_possible_truncation)]
269 pub fn to_raw(self) -> u8 {
270 match self {
271 Self::Inline => raw::BTRFS_FILE_EXTENT_INLINE as u8,
272 Self::Regular => raw::BTRFS_FILE_EXTENT_REG as u8,
273 Self::Prealloc => raw::BTRFS_FILE_EXTENT_PREALLOC as u8,
274 Self::Unknown(v) => v,
275 }
276 }
277}
278
279#[derive(Debug, Clone, Copy, PartialEq, Eq)]
281pub enum FileType {
282 Unknown,
284 RegFile,
286 Dir,
288 Chrdev,
290 Blkdev,
292 Fifo,
294 Sock,
296 Symlink,
298 Xattr,
300 Other(u8),
302}
303
304impl FileType {
305 #[must_use]
307 pub fn from_raw(v: u8) -> Self {
308 match u32::from(v) {
309 raw::BTRFS_FT_UNKNOWN => Self::Unknown,
310 raw::BTRFS_FT_REG_FILE => Self::RegFile,
311 raw::BTRFS_FT_DIR => Self::Dir,
312 raw::BTRFS_FT_CHRDEV => Self::Chrdev,
313 raw::BTRFS_FT_BLKDEV => Self::Blkdev,
314 raw::BTRFS_FT_FIFO => Self::Fifo,
315 raw::BTRFS_FT_SOCK => Self::Sock,
316 raw::BTRFS_FT_SYMLINK => Self::Symlink,
317 raw::BTRFS_FT_XATTR => Self::Xattr,
318 _ => Self::Other(v),
319 }
320 }
321
322 #[must_use]
324 pub fn name(&self) -> &'static str {
325 match self {
326 Self::Unknown | Self::Other(_) => "UNKNOWN",
327 Self::RegFile => "FILE",
328 Self::Dir => "DIR",
329 Self::Chrdev => "CHRDEV",
330 Self::Blkdev => "BLKDEV",
331 Self::Fifo => "FIFO",
332 Self::Sock => "SOCK",
333 Self::Symlink => "SYMLINK",
334 Self::Xattr => "XATTR",
335 }
336 }
337}
338
339#[derive(Debug, Clone)]
344pub struct InodeItem {
345 pub generation: u64,
347 pub transid: u64,
349 pub size: u64,
351 pub nbytes: u64,
353 pub block_group: u64,
355 pub nlink: u32,
357 pub uid: u32,
359 pub gid: u32,
361 pub mode: u32,
363 pub rdev: u64,
365 pub flags: InodeFlags,
367 pub sequence: u64,
369 pub atime: Timespec,
371 pub ctime: Timespec,
373 pub mtime: Timespec,
375 pub otime: Timespec,
377}
378
379impl InodeItem {
380 #[must_use]
383 pub fn parse(data: &[u8]) -> Option<Self> {
384 if data.len() < mem::size_of::<raw::btrfs_inode_item>() {
385 return None;
386 }
387 let mut buf = data;
388 Some(Self {
389 generation: buf.get_u64_le(),
390 transid: buf.get_u64_le(),
391 size: buf.get_u64_le(),
392 nbytes: buf.get_u64_le(),
393 block_group: buf.get_u64_le(),
394 nlink: buf.get_u32_le(),
395 uid: buf.get_u32_le(),
396 gid: buf.get_u32_le(),
397 mode: buf.get_u32_le(),
398 rdev: buf.get_u64_le(),
399 flags: InodeFlags::from_bits_truncate(buf.get_u64_le()),
400 sequence: buf.get_u64_le(),
401 atime: {
403 buf.advance(32);
404 Timespec::parse(&mut buf)
405 },
406 ctime: Timespec::parse(&mut buf),
407 mtime: Timespec::parse(&mut buf),
408 otime: Timespec::parse(&mut buf),
409 })
410 }
411}
412
413pub struct InodeItemArgs {
415 pub generation: u64,
417 pub size: u64,
419 pub nbytes: u64,
421 pub nlink: u32,
423 pub uid: u32,
425 pub gid: u32,
427 pub mode: u32,
429 pub time: Timespec,
431}
432
433impl InodeItemArgs {
434 #[must_use]
436 pub fn to_bytes(&self) -> Vec<u8> {
437 let mut buf = Vec::with_capacity(160);
438 buf.put_u64_le(self.generation);
439 buf.put_u64_le(self.generation); buf.put_u64_le(self.size);
441 buf.put_u64_le(self.nbytes);
442 buf.put_u64_le(0); buf.put_u32_le(self.nlink);
444 buf.put_u32_le(self.uid);
445 buf.put_u32_le(self.gid);
446 buf.put_u32_le(self.mode);
447 buf.put_u64_le(0); buf.put_u64_le(0); buf.put_u64_le(0); buf.extend_from_slice(&[0u8; 32]); for _ in 0..4 {
452 self.time.write_to(&mut buf);
453 }
454 debug_assert_eq!(buf.len(), 160);
455 buf
456 }
457}
458
459#[derive(Debug, Clone)]
465pub struct InodeRef {
466 pub index: u64,
468 pub name: Vec<u8>,
470}
471
472impl InodeRef {
473 #[must_use]
475 pub fn parse_all(data: &[u8]) -> Vec<Self> {
476 let mut result = Vec::new();
477 let mut buf = data;
478 while buf.remaining() >= 10 {
479 let index = buf.get_u64_le();
480 let name_len = buf.get_u16_le() as usize;
481 if buf.remaining() < name_len {
482 break;
483 }
484 let name = buf[..name_len].to_vec();
485 buf.advance(name_len);
486 result.push(Self { index, name });
487 }
488 result
489 }
490
491 #[must_use]
495 pub fn serialize(index: u64, name: &[u8]) -> Vec<u8> {
496 let mut buf = Vec::with_capacity(10 + name.len());
497 buf.put_u64_le(index);
498 #[allow(clippy::cast_possible_truncation)]
499 buf.put_u16_le(name.len() as u16);
500 buf.extend_from_slice(name);
501 buf
502 }
503}
504
505#[derive(Debug, Clone)]
511pub struct InodeExtref {
512 pub parent: u64,
514 pub index: u64,
516 pub name: Vec<u8>,
518}
519
520impl InodeExtref {
521 #[must_use]
523 pub fn parse_all(data: &[u8]) -> Vec<Self> {
524 let mut result = Vec::new();
525 let mut buf = data;
526 while buf.remaining() >= 18 {
527 let parent = buf.get_u64_le();
528 let index = buf.get_u64_le();
529 let name_len = buf.get_u16_le() as usize;
530 if buf.remaining() < name_len {
531 break;
532 }
533 let name = buf[..name_len].to_vec();
534 buf.advance(name_len);
535 result.push(Self {
536 parent,
537 index,
538 name,
539 });
540 }
541 result
542 }
543}
544
545#[derive(Debug, Clone)]
551pub struct DirItem {
552 pub location: DiskKey,
554 pub transid: u64,
556 pub file_type: FileType,
558 pub name: Vec<u8>,
560 pub data: Vec<u8>,
562}
563
564impl DirItem {
565 #[must_use]
567 pub fn parse_all(data: &[u8]) -> Vec<Self> {
568 let mut result = Vec::new();
569 let dir_item_size = mem::size_of::<raw::btrfs_dir_item>();
570 let mut buf = data;
571
572 while buf.remaining() >= dir_item_size {
573 let location = DiskKey::parse(buf, 0);
574 buf.advance(17); let transid = buf.get_u64_le();
576 let data_len = buf.get_u16_le() as usize;
577 let name_len = buf.get_u16_le() as usize;
578 let file_type = FileType::from_raw(buf.get_u8());
579
580 if buf.remaining() < name_len + data_len {
581 break;
582 }
583 let name = buf[..name_len].to_vec();
584 buf.advance(name_len);
585 let item_data = buf[..data_len].to_vec();
586 buf.advance(data_len);
587 result.push(Self {
588 location,
589 transid,
590 file_type,
591 name,
592 data: item_data,
593 });
594 }
595 result
596 }
597
598 #[must_use]
603 pub fn serialize(
604 location: &DiskKey,
605 transid: u64,
606 file_type: u8,
607 name: &[u8],
608 ) -> Vec<u8> {
609 let mut buf = Vec::with_capacity(30 + name.len());
610 let key_off = buf.len();
611 buf.extend_from_slice(&[0u8; 17]);
612 write_disk_key(&mut buf[key_off..], 0, location);
613 buf.put_u64_le(transid);
614 buf.put_u16_le(0); #[allow(clippy::cast_possible_truncation)]
616 buf.put_u16_le(name.len() as u16);
617 buf.put_u8(file_type);
618 buf.extend_from_slice(name);
619 buf
620 }
621}
622
623bitflags::bitflags! {
624 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
626 pub struct RootItemFlags: u64 {
627 const RDONLY = raw::BTRFS_ROOT_SUBVOL_RDONLY as u64;
628 const DEAD = raw::BTRFS_ROOT_SUBVOL_DEAD;
629 const _ = !0;
631 }
632}
633
634impl fmt::Display for RootItemFlags {
635 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
636 if self.contains(Self::RDONLY) {
637 write!(f, "RDONLY")
638 } else {
639 write!(f, "none")
640 }
641 }
642}
643
644#[derive(Debug, Clone)]
650pub struct RootItem {
651 pub inode_data: Vec<u8>,
658 pub generation: u64,
660 pub root_dirid: u64,
662 pub bytenr: u64,
664 pub byte_limit: u64,
666 pub bytes_used: u64,
668 pub last_snapshot: u64,
670 pub flags: RootItemFlags,
672 pub refs: u32,
674 pub drop_progress: DiskKey,
676 pub drop_level: u8,
678 pub level: u8,
680 pub generation_v2: u64,
682 pub uuid: Uuid,
684 pub parent_uuid: Uuid,
686 pub received_uuid: Uuid,
688 pub ctransid: u64,
690 pub otransid: u64,
692 pub stransid: u64,
694 pub rtransid: u64,
696 pub ctime: Timespec,
698 pub otime: Timespec,
700 pub stime: Timespec,
702 pub rtime: Timespec,
704}
705
706impl RootItem {
707 #[must_use]
711 #[allow(clippy::too_many_lines)]
712 pub fn parse(data: &[u8]) -> Option<Self> {
713 let inode_size = mem::size_of::<raw::btrfs_inode_item>();
714 if data.len() < inode_size + 8 {
715 return None;
716 }
717
718 let inode_data = data[..inode_size].to_vec();
719 let mut buf = &data[inode_size..];
720 let generation = buf.get_u64_le();
721 let root_dirid = buf.get_u64_le();
722 let bytenr = buf.get_u64_le();
723 let byte_limit = buf.get_u64_le();
724 let bytes_used = buf.get_u64_le();
725 let last_snapshot = buf.get_u64_le();
726 let flags = RootItemFlags::from_bits_truncate(buf.get_u64_le());
727 let refs = buf.get_u32_le();
728
729 let dp_off = inode_size + 60;
730 let drop_progress = if dp_off + 17 <= data.len() {
731 DiskKey::parse(data, dp_off)
732 } else {
733 DiskKey::parse(&[0; 17], 0)
734 };
735 let drop_level = if dp_off + 17 < data.len() {
736 data[dp_off + 17]
737 } else {
738 0
739 };
740
741 let level_off = mem::offset_of!(raw::btrfs_root_item, level);
742 let level = if level_off < data.len() {
743 data[level_off]
744 } else {
745 0
746 };
747 let generation_v2 = if level_off + 1 + 8 <= data.len() {
748 let mut b = &data[level_off + 1..];
749 b.get_u64_le()
750 } else {
751 0
752 };
753
754 let uuid_off = mem::offset_of!(raw::btrfs_root_item, uuid);
755 let uuid = if uuid_off + 16 <= data.len() {
756 let mut b = &data[uuid_off..];
757 get_uuid(&mut b)
758 } else {
759 Uuid::nil()
760 };
761 let parent_uuid = if uuid_off + 32 <= data.len() {
762 let mut b = &data[uuid_off + 16..];
763 get_uuid(&mut b)
764 } else {
765 Uuid::nil()
766 };
767 let received_uuid = if uuid_off + 48 <= data.len() {
768 let mut b = &data[uuid_off + 32..];
769 get_uuid(&mut b)
770 } else {
771 Uuid::nil()
772 };
773
774 let ct_off = mem::offset_of!(raw::btrfs_root_item, ctransid);
775 let ctransid = if ct_off + 8 <= data.len() {
776 let mut b = &data[ct_off..];
777 b.get_u64_le()
778 } else {
779 0
780 };
781 let otransid = if ct_off + 16 <= data.len() {
782 let mut b = &data[ct_off + 8..];
783 b.get_u64_le()
784 } else {
785 0
786 };
787 let stransid = if ct_off + 24 <= data.len() {
788 let mut b = &data[ct_off + 16..];
789 b.get_u64_le()
790 } else {
791 0
792 };
793 let rtransid = if ct_off + 32 <= data.len() {
794 let mut b = &data[ct_off + 24..];
795 b.get_u64_le()
796 } else {
797 0
798 };
799
800 let ctime_off = mem::offset_of!(raw::btrfs_root_item, ctime);
801 let ts_size = mem::size_of::<raw::btrfs_timespec>();
802 let ctime = if ctime_off + ts_size <= data.len() {
803 let mut b = &data[ctime_off..];
804 Timespec::parse(&mut b)
805 } else {
806 Timespec { sec: 0, nsec: 0 }
807 };
808 let otime = if ctime_off + 2 * ts_size <= data.len() {
809 let mut b = &data[ctime_off + ts_size..];
810 Timespec::parse(&mut b)
811 } else {
812 Timespec { sec: 0, nsec: 0 }
813 };
814 let stime = if ctime_off + 3 * ts_size <= data.len() {
815 let mut b = &data[ctime_off + 2 * ts_size..];
816 Timespec::parse(&mut b)
817 } else {
818 Timespec { sec: 0, nsec: 0 }
819 };
820 let rtime = if ctime_off + 4 * ts_size <= data.len() {
821 let mut b = &data[ctime_off + 3 * ts_size..];
822 Timespec::parse(&mut b)
823 } else {
824 Timespec { sec: 0, nsec: 0 }
825 };
826
827 Some(Self {
828 inode_data,
829 generation,
830 root_dirid,
831 bytenr,
832 byte_limit,
833 bytes_used,
834 last_snapshot,
835 flags,
836 refs,
837 drop_progress,
838 drop_level,
839 level,
840 generation_v2,
841 uuid,
842 parent_uuid,
843 received_uuid,
844 ctransid,
845 otransid,
846 stransid,
847 rtransid,
848 ctime,
849 otime,
850 stime,
851 rtime,
852 })
853 }
854
855 #[must_use]
860 pub fn new_internal(generation: u64, bytenr: u64, level: u8) -> Self {
861 Self {
862 inode_data: vec![0u8; 160],
863 generation,
864 root_dirid: 0,
865 bytenr,
866 byte_limit: 0,
867 bytes_used: 0,
868 last_snapshot: 0,
869 flags: RootItemFlags::empty(),
870 refs: 1,
871 drop_progress: DiskKey {
872 objectid: 0,
873 key_type: KeyType::from_raw(0),
874 offset: 0,
875 },
876 drop_level: 0,
877 level,
878 generation_v2: generation,
879 uuid: Uuid::nil(),
880 parent_uuid: Uuid::nil(),
881 received_uuid: Uuid::nil(),
882 ctransid: 0,
883 otransid: 0,
884 stransid: 0,
885 rtransid: 0,
886 ctime: Timespec { sec: 0, nsec: 0 },
887 otime: Timespec { sec: 0, nsec: 0 },
888 stime: Timespec { sec: 0, nsec: 0 },
889 rtime: Timespec { sec: 0, nsec: 0 },
890 }
891 }
892
893 #[must_use]
898 pub fn to_bytes(&self) -> Vec<u8> {
899 let mut buf = Vec::with_capacity(496);
900
901 buf.extend_from_slice(&self.inode_data);
904
905 buf.put_u64_le(self.generation);
906 buf.put_u64_le(self.root_dirid);
907 buf.put_u64_le(self.bytenr);
908 buf.put_u64_le(self.byte_limit);
909 buf.put_u64_le(self.bytes_used);
910 buf.put_u64_le(self.last_snapshot);
911 buf.put_u64_le(self.flags.bits());
912 buf.put_u32_le(self.refs);
913
914 let key_off = buf.len();
915 buf.extend_from_slice(&[0u8; 17]);
916 write_disk_key(&mut buf[key_off..], 0, &self.drop_progress);
917
918 buf.put_u8(self.drop_level);
919 buf.put_u8(self.level);
920 buf.put_u64_le(self.generation_v2);
921
922 buf.extend_from_slice(self.uuid.as_bytes());
923 buf.extend_from_slice(self.parent_uuid.as_bytes());
924 buf.extend_from_slice(self.received_uuid.as_bytes());
925
926 buf.put_u64_le(self.ctransid);
927 buf.put_u64_le(self.otransid);
928 buf.put_u64_le(self.stransid);
929 buf.put_u64_le(self.rtransid);
930
931 self.ctime.write_to(&mut buf);
932 self.otime.write_to(&mut buf);
933 self.stime.write_to(&mut buf);
934 self.rtime.write_to(&mut buf);
935
936 buf.resize(mem::size_of::<raw::btrfs_root_item>(), 0);
939 buf
940 }
941}
942
943#[derive(Debug, Clone)]
948pub struct RootRef {
949 pub dirid: u64,
951 pub sequence: u64,
953 pub name: Vec<u8>,
955}
956
957impl RootRef {
958 #[must_use]
960 pub fn parse(data: &[u8]) -> Option<Self> {
961 if data.len() < mem::size_of::<raw::btrfs_root_ref>() {
962 return None;
963 }
964 let mut buf = data;
965 let dirid = buf.get_u64_le();
966 let sequence = buf.get_u64_le();
967 let name_len = buf.get_u16_le() as usize;
968 let name_start = mem::size_of::<raw::btrfs_root_ref>();
969 let name = if name_start + name_len <= data.len() {
970 data[name_start..name_start + name_len].to_vec()
971 } else {
972 Vec::new()
973 };
974 Some(Self {
975 dirid,
976 sequence,
977 name,
978 })
979 }
980
981 #[must_use]
992 pub fn serialize(dirid: u64, sequence: u64, name: &[u8]) -> Vec<u8> {
993 let header = mem::size_of::<raw::btrfs_root_ref>();
994 let mut buf = Vec::with_capacity(header + name.len());
995 buf.put_u64_le(dirid);
996 buf.put_u64_le(sequence);
997 buf.put_u16_le(
998 u16::try_from(name.len())
999 .expect("RootRef::serialize: name length does not fit in u16"),
1000 );
1001 buf.put_slice(name);
1002 buf
1003 }
1004}
1005
1006#[derive(Debug, Clone)]
1013pub struct FileExtentItem {
1014 pub generation: u64,
1016 pub ram_bytes: u64,
1018 pub compression: CompressionType,
1020 pub extent_type: FileExtentType,
1022 pub body: FileExtentBody,
1024}
1025
1026#[derive(Debug, Clone)]
1028pub enum FileExtentBody {
1029 Inline {
1031 inline_size: usize,
1033 },
1034 Regular {
1036 disk_bytenr: u64,
1038 disk_num_bytes: u64,
1040 offset: u64,
1042 num_bytes: u64,
1044 },
1045}
1046
1047impl FileExtentItem {
1048 #[must_use]
1050 pub fn parse(data: &[u8]) -> Option<Self> {
1051 if data.len() < 21 {
1052 return None;
1053 }
1054 let mut buf = data;
1055 let generation = buf.get_u64_le();
1056 let ram_bytes = buf.get_u64_le();
1057 let compression = CompressionType::from_raw(buf.get_u8());
1058 buf.advance(3); let extent_type = FileExtentType::from_raw(buf.get_u8());
1060
1061 let body = if extent_type == FileExtentType::Inline {
1062 FileExtentBody::Inline {
1063 inline_size: buf.remaining(),
1064 }
1065 } else if buf.remaining() >= 32 {
1066 FileExtentBody::Regular {
1067 disk_bytenr: buf.get_u64_le(),
1068 disk_num_bytes: buf.get_u64_le(),
1069 offset: buf.get_u64_le(),
1070 num_bytes: buf.get_u64_le(),
1071 }
1072 } else {
1073 return None;
1074 };
1075
1076 Some(Self {
1077 generation,
1078 ram_bytes,
1079 compression,
1080 extent_type,
1081 body,
1082 })
1083 }
1084
1085 pub const HEADER_SIZE: usize = 21;
1088
1089 pub const REGULAR_SIZE: usize = 53;
1092
1093 #[must_use]
1098 #[allow(clippy::too_many_arguments)]
1099 pub fn to_bytes_regular(
1100 generation: u64,
1101 ram_bytes: u64,
1102 compression: CompressionType,
1103 prealloc: bool,
1104 disk_bytenr: u64,
1105 disk_num_bytes: u64,
1106 offset: u64,
1107 num_bytes: u64,
1108 ) -> Vec<u8> {
1109 let extent_type = if prealloc {
1110 FileExtentType::Prealloc
1111 } else {
1112 FileExtentType::Regular
1113 };
1114 let mut buf = Vec::with_capacity(Self::REGULAR_SIZE);
1115 buf.put_u64_le(generation);
1116 buf.put_u64_le(ram_bytes);
1117 buf.put_u8(compression.to_raw());
1118 buf.put_u8(0); buf.put_u16_le(0); buf.put_u8(extent_type.to_raw());
1121 buf.put_u64_le(disk_bytenr);
1122 buf.put_u64_le(disk_num_bytes);
1123 buf.put_u64_le(offset);
1124 buf.put_u64_le(num_bytes);
1125 debug_assert_eq!(buf.len(), Self::REGULAR_SIZE);
1126 buf
1127 }
1128
1129 #[must_use]
1135 pub fn to_bytes_inline(
1136 generation: u64,
1137 ram_bytes: u64,
1138 compression: CompressionType,
1139 data: &[u8],
1140 ) -> Vec<u8> {
1141 let mut buf = Vec::with_capacity(Self::HEADER_SIZE + data.len());
1142 buf.put_u64_le(generation);
1143 buf.put_u64_le(ram_bytes);
1144 buf.put_u8(compression.to_raw());
1145 buf.put_u8(0); buf.put_u16_le(0); buf.put_u8(FileExtentType::Inline.to_raw());
1148 debug_assert_eq!(buf.len(), Self::HEADER_SIZE);
1149 buf.extend_from_slice(data);
1150 buf
1151 }
1152}
1153
1154#[must_use]
1158pub fn extent_data_ref_hash(root: u64, objectid: u64, offset: u64) -> u64 {
1159 let high_crc = raw_crc32c(!0u32, &root.to_le_bytes());
1160 let low_crc = raw_crc32c(!0u32, &objectid.to_le_bytes());
1161 let low_crc = raw_crc32c(low_crc, &offset.to_le_bytes());
1162 (u64::from(high_crc) << 31) ^ u64::from(low_crc)
1163}
1164
1165#[derive(Debug, Clone)]
1167pub enum InlineRef {
1168 TreeBlockBackref {
1171 ref_offset: u64,
1173 root: u64,
1175 },
1176 SharedBlockBackref {
1179 ref_offset: u64,
1181 parent: u64,
1183 },
1184 ExtentDataBackref {
1187 ref_offset: u64,
1189 root: u64,
1191 objectid: u64,
1193 offset: u64,
1195 count: u32,
1197 },
1198 SharedDataBackref {
1201 ref_offset: u64,
1203 parent: u64,
1205 count: u32,
1207 },
1208 ExtentOwnerRef {
1211 ref_offset: u64,
1213 root: u64,
1215 },
1216}
1217
1218impl InlineRef {
1219 #[must_use]
1221 #[allow(clippy::cast_possible_truncation)]
1222 pub fn raw_type(&self) -> u8 {
1223 match self {
1224 Self::TreeBlockBackref { .. } => {
1225 raw::BTRFS_TREE_BLOCK_REF_KEY as u8
1226 }
1227 Self::SharedBlockBackref { .. } => {
1228 raw::BTRFS_SHARED_BLOCK_REF_KEY as u8
1229 }
1230 Self::ExtentDataBackref { .. } => {
1231 raw::BTRFS_EXTENT_DATA_REF_KEY as u8
1232 }
1233 Self::SharedDataBackref { .. } => {
1234 raw::BTRFS_SHARED_DATA_REF_KEY as u8
1235 }
1236 Self::ExtentOwnerRef { .. } => {
1237 raw::BTRFS_EXTENT_OWNER_REF_KEY as u8
1238 }
1239 }
1240 }
1241
1242 #[must_use]
1244 pub fn raw_offset(&self) -> u64 {
1245 match self {
1246 Self::TreeBlockBackref { ref_offset, .. }
1247 | Self::SharedBlockBackref { ref_offset, .. }
1248 | Self::ExtentDataBackref { ref_offset, .. }
1249 | Self::SharedDataBackref { ref_offset, .. }
1250 | Self::ExtentOwnerRef { ref_offset, .. } => *ref_offset,
1251 }
1252 }
1253}
1254
1255#[must_use]
1265#[allow(clippy::cast_possible_truncation)]
1266pub fn inline_ref_size(type_byte: u8) -> Option<usize> {
1267 match u32::from(type_byte) {
1268 raw::BTRFS_TREE_BLOCK_REF_KEY | raw::BTRFS_EXTENT_OWNER_REF_KEY => {
1269 Some(9)
1270 }
1271 raw::BTRFS_SHARED_BLOCK_REF_KEY => Some(9),
1272 raw::BTRFS_EXTENT_DATA_REF_KEY => Some(29),
1273 raw::BTRFS_SHARED_DATA_REF_KEY => Some(13),
1274 _ => None,
1275 }
1276}
1277
1278bitflags::bitflags! {
1279 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
1281 pub struct ExtentFlags: u64 {
1282 const DATA = raw::BTRFS_EXTENT_FLAG_DATA as u64;
1283 const TREE_BLOCK = raw::BTRFS_EXTENT_FLAG_TREE_BLOCK as u64;
1284 const FULL_BACKREF = raw::BTRFS_BLOCK_FLAG_FULL_BACKREF as u64;
1285 const _ = !0;
1287 }
1288}
1289
1290impl fmt::Display for ExtentFlags {
1291 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1292 let mut parts = Vec::new();
1293 if self.contains(Self::DATA) {
1294 parts.push("DATA");
1295 }
1296 if self.contains(Self::TREE_BLOCK) {
1297 parts.push("TREE_BLOCK");
1298 }
1299 if self.contains(Self::FULL_BACKREF) {
1300 parts.push("FULL_BACKREF");
1301 }
1302 write!(f, "{}", parts.join("|"))
1303 }
1304}
1305
1306#[derive(Debug, Clone)]
1312pub struct ExtentItem {
1313 pub refs: u64,
1315 pub generation: u64,
1317 pub flags: ExtentFlags,
1319 pub tree_block_key: Option<DiskKey>,
1321 pub tree_block_level: Option<u8>,
1323 pub skinny_level: Option<u64>,
1325 pub inline_refs: Vec<InlineRef>,
1327}
1328
1329impl ExtentItem {
1330 #[must_use]
1332 pub fn is_data(&self) -> bool {
1333 self.flags.contains(ExtentFlags::DATA)
1334 }
1335
1336 #[must_use]
1338 pub fn is_tree_block(&self) -> bool {
1339 self.flags.contains(ExtentFlags::TREE_BLOCK)
1340 }
1341
1342 #[must_use]
1345 pub fn parse(data: &[u8], key: &DiskKey) -> Option<Self> {
1346 use crate::tree::KeyType;
1347
1348 if data.len() < mem::size_of::<raw::btrfs_extent_item>() {
1349 return None;
1350 }
1351 let mut buf = data;
1352 let refs = buf.get_u64_le();
1353 let generation = buf.get_u64_le();
1354 let flags = ExtentFlags::from_bits_truncate(buf.get_u64_le());
1355
1356 let is_tree_block = flags.contains(ExtentFlags::TREE_BLOCK);
1357
1358 let mut tree_block_key = None;
1359 let mut tree_block_level = None;
1360 if is_tree_block
1361 && key.key_type == KeyType::ExtentItem
1362 && buf.remaining() > 17
1363 {
1364 tree_block_key = Some(DiskKey::parse(buf, 0));
1365 buf.advance(17); tree_block_level = Some(buf.get_u8());
1367 }
1368
1369 let skinny_level =
1370 if key.key_type == KeyType::MetadataItem && is_tree_block {
1371 Some(key.offset)
1372 } else {
1373 None
1374 };
1375
1376 let mut inline_refs = Vec::new();
1377 while buf.remaining() > 0 {
1378 let ref_type = buf.get_u8();
1379 let ref_offset = if buf.remaining() >= 8 {
1380 buf.get_u64_le()
1381 } else {
1382 0
1383 };
1384
1385 match u32::from(ref_type) {
1386 raw::BTRFS_TREE_BLOCK_REF_KEY => {
1387 inline_refs.push(InlineRef::TreeBlockBackref {
1388 ref_offset,
1389 root: ref_offset,
1390 });
1391 }
1392 raw::BTRFS_SHARED_BLOCK_REF_KEY => {
1393 inline_refs.push(InlineRef::SharedBlockBackref {
1394 ref_offset,
1395 parent: ref_offset,
1396 });
1397 }
1398 raw::BTRFS_EXTENT_DATA_REF_KEY => {
1399 let root = ref_offset; if buf.remaining() >= 20 {
1405 let oid = buf.get_u64_le();
1406 let off = buf.get_u64_le();
1407 let count = buf.get_u32_le();
1408 let hash = extent_data_ref_hash(root, oid, off);
1411 inline_refs.push(InlineRef::ExtentDataBackref {
1412 ref_offset: hash,
1413 root,
1414 objectid: oid,
1415 offset: off,
1416 count,
1417 });
1418 } else {
1419 break;
1420 }
1421 }
1422 raw::BTRFS_SHARED_DATA_REF_KEY => {
1423 if buf.remaining() >= 4 {
1424 let count = buf.get_u32_le();
1425 inline_refs.push(InlineRef::SharedDataBackref {
1426 ref_offset,
1427 parent: ref_offset,
1428 count,
1429 });
1430 } else {
1431 break;
1432 }
1433 }
1434 raw::BTRFS_EXTENT_OWNER_REF_KEY => {
1435 inline_refs.push(InlineRef::ExtentOwnerRef {
1436 ref_offset,
1437 root: ref_offset,
1438 });
1439 }
1440 _ => break,
1441 }
1442 }
1443
1444 Some(Self {
1445 refs,
1446 generation,
1447 flags,
1448 tree_block_key,
1449 tree_block_level,
1450 skinny_level,
1451 inline_refs,
1452 })
1453 }
1454
1455 pub const SKINNY_SIZE: usize = 33;
1459
1460 pub const NON_SKINNY_SIZE: usize = 51;
1465
1466 #[must_use]
1469 pub fn to_bytes_skinny(
1470 refs: u64,
1471 generation: u64,
1472 root_id: u64,
1473 ) -> Vec<u8> {
1474 let mut buf = Vec::with_capacity(Self::SKINNY_SIZE);
1475 buf.put_u64_le(refs);
1476 buf.put_u64_le(generation);
1477 buf.put_u64_le(ExtentFlags::TREE_BLOCK.bits());
1478 buf.put_u8(KeyType::TreeBlockRef.to_raw());
1479 buf.put_u64_le(root_id);
1480 debug_assert_eq!(buf.len(), Self::SKINNY_SIZE);
1481 buf
1482 }
1483
1484 #[must_use]
1487 pub fn to_bytes_non_skinny(
1488 refs: u64,
1489 generation: u64,
1490 root_id: u64,
1491 first_key: &DiskKey,
1492 level: u8,
1493 ) -> Vec<u8> {
1494 let mut buf = Vec::with_capacity(Self::NON_SKINNY_SIZE);
1495 buf.put_u64_le(refs);
1496 buf.put_u64_le(generation);
1497 buf.put_u64_le(ExtentFlags::TREE_BLOCK.bits());
1498 let key_off = buf.len();
1500 buf.extend_from_slice(&[0u8; 17]);
1501 write_disk_key(&mut buf[key_off..], 0, first_key);
1502 buf.put_u8(level);
1503 buf.put_u8(KeyType::TreeBlockRef.to_raw());
1504 buf.put_u64_le(root_id);
1505 debug_assert_eq!(buf.len(), Self::NON_SKINNY_SIZE);
1506 buf
1507 }
1508
1509 pub const DATA_INLINE_SIZE: usize = 53;
1513
1514 #[must_use]
1517 pub fn to_bytes_data(
1518 refs: u64,
1519 generation: u64,
1520 root: u64,
1521 objectid: u64,
1522 offset: u64,
1523 count: u32,
1524 ) -> Vec<u8> {
1525 let mut buf = Vec::with_capacity(Self::DATA_INLINE_SIZE);
1526 buf.put_u64_le(refs); buf.put_u64_le(generation); buf.put_u64_le(ExtentFlags::DATA.bits()); buf.put_u8(KeyType::ExtentDataRef.to_raw()); buf.put_u64_le(root); buf.put_u64_le(objectid); buf.put_u64_le(offset); buf.put_u32_le(count); debug_assert_eq!(buf.len(), Self::DATA_INLINE_SIZE);
1535 buf
1536 }
1537}
1538
1539#[derive(Debug, Clone)]
1544pub struct ExtentDataRef {
1545 pub root: u64,
1547 pub objectid: u64,
1549 pub offset: u64,
1551 pub count: u32,
1553}
1554
1555impl ExtentDataRef {
1556 #[must_use]
1558 pub fn parse(data: &[u8]) -> Option<Self> {
1559 if data.len() < mem::size_of::<raw::btrfs_extent_data_ref>() {
1560 return None;
1561 }
1562 let mut buf = data;
1563 Some(Self {
1564 root: buf.get_u64_le(),
1565 objectid: buf.get_u64_le(),
1566 offset: buf.get_u64_le(),
1567 count: buf.get_u32_le(),
1568 })
1569 }
1570}
1571
1572#[derive(Debug, Clone)]
1576pub struct SharedDataRef {
1577 pub count: u32,
1579}
1580
1581impl SharedDataRef {
1582 #[must_use]
1584 pub fn parse(data: &[u8]) -> Option<Self> {
1585 if data.len() < 4 {
1586 return None;
1587 }
1588 let mut buf = data;
1589 Some(Self {
1590 count: buf.get_u32_le(),
1591 })
1592 }
1593}
1594
1595#[derive(Debug, Clone)]
1599pub struct BlockGroupItem {
1600 pub used: u64,
1602 pub chunk_objectid: u64,
1604 pub flags: BlockGroupFlags,
1606}
1607
1608impl BlockGroupItem {
1609 #[must_use]
1611 pub fn parse(data: &[u8]) -> Option<Self> {
1612 if data.len() < mem::size_of::<raw::btrfs_block_group_item>() {
1613 return None;
1614 }
1615 let mut buf = data;
1616 Some(Self {
1617 used: buf.get_u64_le(),
1618 chunk_objectid: buf.get_u64_le(),
1619 flags: BlockGroupFlags::from_bits_truncate(buf.get_u64_le()),
1620 })
1621 }
1622
1623 #[must_use]
1625 pub fn to_bytes(&self) -> Vec<u8> {
1626 let mut buf = Vec::with_capacity(24);
1627 buf.put_u64_le(self.used);
1628 buf.put_u64_le(self.chunk_objectid);
1629 buf.put_u64_le(self.flags.bits());
1630 buf
1631 }
1632}
1633
1634#[derive(Debug, Clone)]
1639pub struct ChunkItem {
1640 pub length: u64,
1642 pub owner: u64,
1644 pub stripe_len: u64,
1646 pub chunk_type: BlockGroupFlags,
1648 pub io_align: u32,
1650 pub io_width: u32,
1652 pub sector_size: u32,
1654 pub num_stripes: u16,
1656 pub sub_stripes: u16,
1658 pub stripes: Vec<ChunkStripe>,
1660}
1661
1662#[derive(Debug, Clone)]
1664pub struct ChunkStripe {
1665 pub devid: u64,
1667 pub offset: u64,
1669 pub dev_uuid: Uuid,
1671}
1672
1673impl ChunkItem {
1674 #[must_use]
1676 pub fn parse(data: &[u8]) -> Option<Self> {
1677 let chunk_base_size = mem::offset_of!(raw::btrfs_chunk, stripe);
1678 if data.len() < chunk_base_size {
1679 return None;
1680 }
1681 let mut buf = data;
1682 let length = buf.get_u64_le();
1683 let owner = buf.get_u64_le();
1684 let stripe_len = buf.get_u64_le();
1685 let chunk_type = BlockGroupFlags::from_bits_truncate(buf.get_u64_le());
1686 let io_align = buf.get_u32_le();
1687 let io_width = buf.get_u32_le();
1688 let sector_size = buf.get_u32_le();
1689 let num_stripes = buf.get_u16_le();
1690 let sub_stripes = buf.get_u16_le();
1691 let stripe_size = mem::size_of::<raw::btrfs_stripe>();
1692 let mut stripes = Vec::with_capacity(num_stripes as usize);
1693 let mut sbuf = &data[chunk_base_size..];
1694 for i in 0..num_stripes as usize {
1695 let s_off = chunk_base_size + i * stripe_size;
1696 if s_off + stripe_size > data.len() {
1697 break;
1698 }
1699 let devid = sbuf.get_u64_le();
1700 let offset = sbuf.get_u64_le();
1701 let dev_uuid = get_uuid(&mut sbuf);
1702 stripes.push(ChunkStripe {
1703 devid,
1704 offset,
1705 dev_uuid,
1706 });
1707 }
1708 Some(Self {
1709 length,
1710 owner,
1711 stripe_len,
1712 chunk_type,
1713 io_align,
1714 io_width,
1715 sector_size,
1716 num_stripes,
1717 sub_stripes,
1718 stripes,
1719 })
1720 }
1721}
1722
1723impl ChunkItem {
1724 #[must_use]
1727 pub fn to_mapping(&self, logical: u64) -> crate::chunk::ChunkMapping {
1728 crate::chunk::ChunkMapping {
1729 logical,
1730 length: self.length,
1731 stripe_len: self.stripe_len,
1732 chunk_type: self.chunk_type.bits(),
1733 num_stripes: self.num_stripes,
1734 sub_stripes: self.sub_stripes,
1735 stripes: self
1736 .stripes
1737 .iter()
1738 .map(|s| crate::chunk::Stripe {
1739 devid: s.devid,
1740 offset: s.offset,
1741 dev_uuid: s.dev_uuid,
1742 })
1743 .collect(),
1744 }
1745 }
1746}
1747
1748#[derive(Debug, Clone)]
1753pub struct DeviceItem {
1754 pub devid: u64,
1756 pub total_bytes: u64,
1758 pub bytes_used: u64,
1760 pub io_align: u32,
1762 pub io_width: u32,
1764 pub sector_size: u32,
1766 pub dev_type: u64,
1768 pub generation: u64,
1770 pub start_offset: u64,
1772 pub dev_group: u32,
1774 pub seek_speed: u8,
1776 pub bandwidth: u8,
1778 pub uuid: Uuid,
1780 pub fsid: Uuid,
1782}
1783
1784impl DeviceItem {
1785 pub fn write_bytes(&self, buf: &mut impl BufMut) {
1787 buf.put_u64_le(self.devid);
1788 buf.put_u64_le(self.total_bytes);
1789 buf.put_u64_le(self.bytes_used);
1790 buf.put_u32_le(self.io_align);
1791 buf.put_u32_le(self.io_width);
1792 buf.put_u32_le(self.sector_size);
1793 buf.put_u64_le(self.dev_type);
1794 buf.put_u64_le(self.generation);
1795 buf.put_u64_le(self.start_offset);
1796 buf.put_u32_le(self.dev_group);
1797 buf.put_u8(self.seek_speed);
1798 buf.put_u8(self.bandwidth);
1799 buf.put_slice(self.uuid.as_bytes());
1800 buf.put_slice(self.fsid.as_bytes());
1801 }
1802
1803 #[must_use]
1805 pub fn parse(data: &[u8]) -> Option<Self> {
1806 if data.len() < mem::size_of::<raw::btrfs_dev_item>() {
1807 return None;
1808 }
1809 let mut buf = data;
1810 let devid = buf.get_u64_le();
1811 let total_bytes = buf.get_u64_le();
1812 let bytes_used = buf.get_u64_le();
1813 let io_align = buf.get_u32_le();
1814 let io_width = buf.get_u32_le();
1815 let sector_size = buf.get_u32_le();
1816 let dev_type = buf.get_u64_le();
1817 let generation = buf.get_u64_le();
1818 let start_offset = buf.get_u64_le();
1819 let dev_group = buf.get_u32_le();
1820 let seek_speed = buf.get_u8();
1821 let bandwidth = buf.get_u8();
1822 let uuid = get_uuid(&mut buf);
1823 let fsid = get_uuid(&mut buf);
1824 Some(Self {
1825 devid,
1826 total_bytes,
1827 bytes_used,
1828 io_align,
1829 io_width,
1830 sector_size,
1831 dev_type,
1832 generation,
1833 start_offset,
1834 dev_group,
1835 seek_speed,
1836 bandwidth,
1837 uuid,
1838 fsid,
1839 })
1840 }
1841}
1842
1843#[derive(Debug, Clone)]
1848pub struct DeviceExtent {
1849 pub chunk_tree: u64,
1851 pub chunk_objectid: u64,
1853 pub chunk_offset: u64,
1855 pub length: u64,
1857 pub chunk_tree_uuid: Uuid,
1859}
1860
1861impl DeviceExtent {
1862 #[must_use]
1864 pub fn parse(data: &[u8]) -> Option<Self> {
1865 if data.len() < mem::size_of::<raw::btrfs_dev_extent>() {
1866 return None;
1867 }
1868 let mut buf = data;
1869 let chunk_tree = buf.get_u64_le();
1870 let chunk_objectid = buf.get_u64_le();
1871 let chunk_offset = buf.get_u64_le();
1872 let length = buf.get_u64_le();
1873 let chunk_tree_uuid = get_uuid(&mut buf);
1874 Some(Self {
1875 chunk_tree,
1876 chunk_objectid,
1877 chunk_offset,
1878 length,
1879 chunk_tree_uuid,
1880 })
1881 }
1882}
1883
1884bitflags::bitflags! {
1885 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
1887 pub struct FreeSpaceInfoFlags: u32 {
1888 const USING_BITMAPS = raw::BTRFS_FREE_SPACE_USING_BITMAPS;
1889 const _ = !0;
1891 }
1892}
1893
1894impl fmt::Display for FreeSpaceInfoFlags {
1895 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1897 write!(f, "{}", self.bits())
1898 }
1899}
1900
1901#[derive(Debug, Clone)]
1905pub struct FreeSpaceInfo {
1906 pub extent_count: u32,
1908 pub flags: FreeSpaceInfoFlags,
1910}
1911
1912impl FreeSpaceInfo {
1913 #[must_use]
1915 pub fn parse(data: &[u8]) -> Option<Self> {
1916 if data.len() < 8 {
1917 return None;
1918 }
1919 let mut buf = data;
1920 Some(Self {
1921 extent_count: buf.get_u32_le(),
1922 flags: FreeSpaceInfoFlags::from_bits_truncate(buf.get_u32_le()),
1923 })
1924 }
1925}
1926
1927#[derive(Debug, Clone)]
1931pub struct QgroupStatus {
1932 pub version: u64,
1934 pub generation: u64,
1936 pub flags: u64,
1938 pub scan: u64,
1940 pub enable_gen: Option<u64>,
1942}
1943
1944impl QgroupStatus {
1945 #[must_use]
1947 pub fn parse(data: &[u8]) -> Option<Self> {
1948 if data.len() < 32 {
1949 return None;
1950 }
1951 let mut buf = data;
1952 let version = buf.get_u64_le();
1953 let generation = buf.get_u64_le();
1954 let flags = buf.get_u64_le();
1955 let scan = buf.get_u64_le();
1956 let enable_gen = if buf.remaining() >= 8 {
1957 Some(buf.get_u64_le())
1958 } else {
1959 None
1960 };
1961 Some(Self {
1962 version,
1963 generation,
1964 flags,
1965 scan,
1966 enable_gen,
1967 })
1968 }
1969}
1970
1971#[derive(Debug, Clone)]
1976pub struct QgroupInfo {
1977 pub generation: u64,
1979 pub referenced: u64,
1981 pub referenced_compressed: u64,
1983 pub exclusive: u64,
1985 pub exclusive_compressed: u64,
1987}
1988
1989impl QgroupInfo {
1990 #[must_use]
1992 pub fn parse(data: &[u8]) -> Option<Self> {
1993 if data.len() < mem::size_of::<raw::btrfs_qgroup_info_item>() {
1994 return None;
1995 }
1996 let mut buf = data;
1997 Some(Self {
1998 generation: buf.get_u64_le(),
1999 referenced: buf.get_u64_le(),
2000 referenced_compressed: buf.get_u64_le(),
2001 exclusive: buf.get_u64_le(),
2002 exclusive_compressed: buf.get_u64_le(),
2003 })
2004 }
2005}
2006
2007#[derive(Debug, Clone)]
2012pub struct QgroupLimit {
2013 pub flags: u64,
2015 pub max_referenced: u64,
2017 pub max_exclusive: u64,
2019 pub rsv_referenced: u64,
2021 pub rsv_exclusive: u64,
2023}
2024
2025impl QgroupLimit {
2026 #[must_use]
2028 pub fn parse(data: &[u8]) -> Option<Self> {
2029 if data.len() < mem::size_of::<raw::btrfs_qgroup_limit_item>() {
2030 return None;
2031 }
2032 let mut buf = data;
2033 Some(Self {
2034 flags: buf.get_u64_le(),
2035 max_referenced: buf.get_u64_le(),
2036 max_exclusive: buf.get_u64_le(),
2037 rsv_referenced: buf.get_u64_le(),
2038 rsv_exclusive: buf.get_u64_le(),
2039 })
2040 }
2041}
2042
2043#[derive(Debug, Clone)]
2049pub struct DeviceStats {
2050 pub values: Vec<(String, u64)>,
2052}
2053
2054impl DeviceStats {
2055 #[must_use]
2058 pub fn parse(data: &[u8]) -> Self {
2059 let stat_names = [
2060 "write_errs",
2061 "read_errs",
2062 "flush_errs",
2063 "corruption_errs",
2064 "generation",
2065 ];
2066 let mut buf = data;
2067 let mut values = Vec::new();
2068 for name in &stat_names {
2069 if buf.remaining() >= 8 {
2070 values.push((name.to_string(), buf.get_u64_le()));
2071 }
2072 }
2073 DeviceStats { values }
2074 }
2075}
2076
2077#[derive(Debug, Clone)]
2081pub struct UuidItem {
2082 pub subvol_ids: Vec<u64>,
2084}
2085
2086impl UuidItem {
2087 #[must_use]
2089 pub fn parse(data: &[u8]) -> Self {
2090 let mut buf = data;
2091 let mut subvol_ids = Vec::new();
2092 while buf.remaining() >= 8 {
2093 subvol_ids.push(buf.get_u64_le());
2094 }
2095 Self { subvol_ids }
2096 }
2097}
2098
2099pub enum ItemPayload {
2105 InodeItem(InodeItem),
2107 InodeRef(Vec<InodeRef>),
2109 InodeExtref(Vec<InodeExtref>),
2111 DirItem(Vec<DirItem>),
2113 DirLogItem {
2115 end: u64,
2117 },
2118 OrphanItem,
2120 RootItem(RootItem),
2122 RootRef(RootRef),
2124 FileExtentItem(FileExtentItem),
2126 ExtentCsum {
2128 data: Vec<u8>,
2130 },
2131 ExtentItem(ExtentItem),
2133 TreeBlockRef,
2135 SharedBlockRef,
2137 ExtentDataRef(ExtentDataRef),
2139 SharedDataRef(SharedDataRef),
2141 ExtentOwnerRef {
2143 root: u64,
2145 },
2146 BlockGroupItem(BlockGroupItem),
2148 FreeSpaceInfo(FreeSpaceInfo),
2150 FreeSpaceExtent,
2152 FreeSpaceBitmap,
2154 ChunkItem(ChunkItem),
2156 DeviceItem(DeviceItem),
2158 DeviceExtent(DeviceExtent),
2160 QgroupStatus(QgroupStatus),
2162 QgroupInfo(QgroupInfo),
2164 QgroupLimit(QgroupLimit),
2166 QgroupRelation,
2168 DeviceStats(DeviceStats),
2170 BalanceItem {
2172 flags: u64,
2174 },
2175 DeviceReplace(DeviceReplaceItem),
2177 UuidItem(UuidItem),
2179 StringItem(Vec<u8>),
2181 RaidStripe(RaidStripeItem),
2183 Unknown(Vec<u8>),
2185}
2186
2187#[derive(Debug, Clone)]
2191pub struct DeviceReplaceItem {
2192 pub src_devid: u64,
2194 pub cursor_left: u64,
2196 pub cursor_right: u64,
2198 pub replace_mode: u64,
2200 pub replace_state: u64,
2202 pub time_started: u64,
2204 pub time_stopped: u64,
2206 pub num_write_errors: u64,
2208 pub num_uncorrectable_read_errors: u64,
2210}
2211
2212impl DeviceReplaceItem {
2213 #[must_use]
2215 pub fn parse(data: &[u8]) -> Option<Self> {
2216 if data.len() < 80 {
2217 return None;
2218 }
2219 let mut buf = data;
2220 Some(Self {
2221 src_devid: buf.get_u64_le(),
2222 cursor_left: buf.get_u64_le(),
2223 cursor_right: buf.get_u64_le(),
2224 replace_mode: buf.get_u64_le(),
2225 replace_state: buf.get_u64_le(),
2226 time_started: buf.get_u64_le(),
2227 time_stopped: buf.get_u64_le(),
2228 num_write_errors: buf.get_u64_le(),
2229 num_uncorrectable_read_errors: buf.get_u64_le(),
2230 })
2231 }
2232}
2233
2234#[derive(Debug, Clone)]
2238pub struct RaidStripeItem {
2239 pub encoding: u64,
2241 pub stripes: Vec<RaidStripeEntry>,
2243}
2244
2245#[derive(Debug, Clone)]
2247pub struct RaidStripeEntry {
2248 pub devid: u64,
2250 pub physical: u64,
2252}
2253
2254impl RaidStripeItem {
2255 #[must_use]
2257 pub fn parse(data: &[u8]) -> Option<Self> {
2258 if data.len() < 8 {
2259 return None;
2260 }
2261 let mut buf = data;
2262 let encoding = buf.get_u64_le();
2263 let mut stripes = Vec::new();
2264 while buf.remaining() >= 16 {
2265 stripes.push(RaidStripeEntry {
2266 devid: buf.get_u64_le(),
2267 physical: buf.get_u64_le(),
2268 });
2269 }
2270 Some(Self { encoding, stripes })
2271 }
2272}
2273
2274#[must_use]
2276#[allow(clippy::too_many_lines)]
2277pub fn parse_item_payload(key: &DiskKey, data: &[u8]) -> ItemPayload {
2278 use crate::tree::KeyType;
2279
2280 match key.key_type {
2281 KeyType::InodeItem => match InodeItem::parse(data) {
2282 Some(v) => ItemPayload::InodeItem(v),
2283 None => ItemPayload::Unknown(data.to_vec()),
2284 },
2285 KeyType::InodeRef => ItemPayload::InodeRef(InodeRef::parse_all(data)),
2286 KeyType::InodeExtref => {
2287 ItemPayload::InodeExtref(InodeExtref::parse_all(data))
2288 }
2289 KeyType::DirItem | KeyType::DirIndex | KeyType::XattrItem => {
2290 ItemPayload::DirItem(DirItem::parse_all(data))
2291 }
2292 KeyType::DirLogItem | KeyType::DirLogIndex => {
2293 let end = if data.len() >= 8 {
2294 let mut buf = data;
2295 buf.get_u64_le()
2296 } else {
2297 0
2298 };
2299 ItemPayload::DirLogItem { end }
2300 }
2301 KeyType::OrphanItem => ItemPayload::OrphanItem,
2302 KeyType::RootItem => match RootItem::parse(data) {
2303 Some(v) => ItemPayload::RootItem(v),
2304 None => ItemPayload::Unknown(data.to_vec()),
2305 },
2306 KeyType::RootRef | KeyType::RootBackref => match RootRef::parse(data) {
2307 Some(v) => ItemPayload::RootRef(v),
2308 None => ItemPayload::Unknown(data.to_vec()),
2309 },
2310 KeyType::ExtentData => match FileExtentItem::parse(data) {
2311 Some(v) => ItemPayload::FileExtentItem(v),
2312 None => ItemPayload::Unknown(data.to_vec()),
2313 },
2314 KeyType::ExtentCsum => ItemPayload::ExtentCsum {
2315 data: data.to_vec(),
2316 },
2317 KeyType::ExtentItem | KeyType::MetadataItem => {
2318 match ExtentItem::parse(data, key) {
2319 Some(v) => ItemPayload::ExtentItem(v),
2320 None => ItemPayload::Unknown(data.to_vec()),
2321 }
2322 }
2323 KeyType::TreeBlockRef => ItemPayload::TreeBlockRef,
2324 KeyType::SharedBlockRef => ItemPayload::SharedBlockRef,
2325 KeyType::ExtentDataRef => match ExtentDataRef::parse(data) {
2326 Some(v) => ItemPayload::ExtentDataRef(v),
2327 None => ItemPayload::Unknown(data.to_vec()),
2328 },
2329 KeyType::SharedDataRef => match SharedDataRef::parse(data) {
2330 Some(v) => ItemPayload::SharedDataRef(v),
2331 None => ItemPayload::Unknown(data.to_vec()),
2332 },
2333 KeyType::ExtentOwnerRef => {
2334 if data.len() >= 8 {
2335 let mut buf = data;
2336 ItemPayload::ExtentOwnerRef {
2337 root: buf.get_u64_le(),
2338 }
2339 } else {
2340 ItemPayload::Unknown(data.to_vec())
2341 }
2342 }
2343 KeyType::BlockGroupItem => match BlockGroupItem::parse(data) {
2344 Some(v) => ItemPayload::BlockGroupItem(v),
2345 None => ItemPayload::Unknown(data.to_vec()),
2346 },
2347 KeyType::FreeSpaceInfo => match FreeSpaceInfo::parse(data) {
2348 Some(v) => ItemPayload::FreeSpaceInfo(v),
2349 None => ItemPayload::Unknown(data.to_vec()),
2350 },
2351 KeyType::FreeSpaceExtent => ItemPayload::FreeSpaceExtent,
2352 KeyType::FreeSpaceBitmap => ItemPayload::FreeSpaceBitmap,
2353 KeyType::ChunkItem => match ChunkItem::parse(data) {
2354 Some(v) => ItemPayload::ChunkItem(v),
2355 None => ItemPayload::Unknown(data.to_vec()),
2356 },
2357 KeyType::DeviceItem => match DeviceItem::parse(data) {
2358 Some(v) => ItemPayload::DeviceItem(v),
2359 None => ItemPayload::Unknown(data.to_vec()),
2360 },
2361 KeyType::DeviceExtent => match DeviceExtent::parse(data) {
2362 Some(v) => ItemPayload::DeviceExtent(v),
2363 None => ItemPayload::Unknown(data.to_vec()),
2364 },
2365 KeyType::QgroupStatus => match QgroupStatus::parse(data) {
2366 Some(v) => ItemPayload::QgroupStatus(v),
2367 None => ItemPayload::Unknown(data.to_vec()),
2368 },
2369 KeyType::QgroupInfo => match QgroupInfo::parse(data) {
2370 Some(v) => ItemPayload::QgroupInfo(v),
2371 None => ItemPayload::Unknown(data.to_vec()),
2372 },
2373 KeyType::QgroupLimit => match QgroupLimit::parse(data) {
2374 Some(v) => ItemPayload::QgroupLimit(v),
2375 None => ItemPayload::Unknown(data.to_vec()),
2376 },
2377 KeyType::QgroupRelation => ItemPayload::QgroupRelation,
2378 KeyType::PersistentItem => {
2379 if key.objectid == u64::from(raw::BTRFS_DEV_STATS_OBJECTID) {
2380 ItemPayload::DeviceStats(DeviceStats::parse(data))
2381 } else {
2382 ItemPayload::Unknown(data.to_vec())
2383 }
2384 }
2385 KeyType::TemporaryItem => {
2386 if ObjectId::from_raw(key.objectid) == ObjectId::Balance
2387 && data.len() >= 8
2388 {
2389 ItemPayload::BalanceItem {
2390 flags: {
2391 let mut buf = data;
2392 buf.get_u64_le()
2393 },
2394 }
2395 } else {
2396 ItemPayload::Unknown(data.to_vec())
2397 }
2398 }
2399 KeyType::DeviceReplace => match DeviceReplaceItem::parse(data) {
2400 Some(v) => ItemPayload::DeviceReplace(v),
2401 None => ItemPayload::Unknown(data.to_vec()),
2402 },
2403 KeyType::UuidKeySubvol | KeyType::UuidKeyReceivedSubvol => {
2404 ItemPayload::UuidItem(UuidItem::parse(data))
2405 }
2406 KeyType::StringItem => ItemPayload::StringItem(data.to_vec()),
2407 KeyType::RaidStripe => match RaidStripeItem::parse(data) {
2408 Some(v) => ItemPayload::RaidStripe(v),
2409 None => ItemPayload::Unknown(data.to_vec()),
2410 },
2411 _ => ItemPayload::Unknown(data.to_vec()),
2412 }
2413}
2414
2415#[cfg(test)]
2416mod tests {
2417 use super::*;
2418
2419 #[test]
2422 fn compression_type_round_trip() {
2423 for v in 0..=3 {
2424 let ct = CompressionType::from_raw(v);
2425 assert_eq!(ct.to_raw(), v);
2426 }
2427 assert_eq!(CompressionType::from_raw(0), CompressionType::None);
2428 assert_eq!(CompressionType::from_raw(1), CompressionType::Zlib);
2429 assert_eq!(CompressionType::from_raw(2), CompressionType::Lzo);
2430 assert_eq!(CompressionType::from_raw(3), CompressionType::Zstd);
2431 assert_eq!(CompressionType::from_raw(99), CompressionType::Unknown(99));
2432 assert_eq!(CompressionType::Unknown(99).to_raw(), 99);
2433 }
2434
2435 #[test]
2436 fn compression_type_names() {
2437 assert_eq!(CompressionType::None.name(), "none");
2438 assert_eq!(CompressionType::Zlib.name(), "zlib");
2439 assert_eq!(CompressionType::Lzo.name(), "lzo");
2440 assert_eq!(CompressionType::Zstd.name(), "zstd");
2441 assert_eq!(CompressionType::Unknown(42).name(), "unknown");
2442 }
2443
2444 #[test]
2445 fn file_extent_type_round_trip() {
2446 assert_eq!(FileExtentType::from_raw(0), FileExtentType::Inline);
2447 assert_eq!(FileExtentType::from_raw(1), FileExtentType::Regular);
2448 assert_eq!(FileExtentType::from_raw(2), FileExtentType::Prealloc);
2449 assert_eq!(FileExtentType::from_raw(77), FileExtentType::Unknown(77));
2450 for v in 0..=2 {
2451 let ft = FileExtentType::from_raw(v);
2452 assert_eq!(ft.to_raw(), v);
2453 }
2454 assert_eq!(FileExtentType::Unknown(77).to_raw(), 77);
2455 }
2456
2457 #[test]
2458 fn file_extent_type_names() {
2459 assert_eq!(FileExtentType::Inline.name(), "inline");
2460 assert_eq!(FileExtentType::Regular.name(), "regular");
2461 assert_eq!(FileExtentType::Prealloc.name(), "prealloc");
2462 assert_eq!(FileExtentType::Unknown(5).name(), "unknown");
2463 }
2464
2465 #[test]
2466 fn file_type_from_raw_all_variants() {
2467 assert_eq!(FileType::from_raw(0), FileType::Unknown);
2468 assert_eq!(FileType::from_raw(1), FileType::RegFile);
2469 assert_eq!(FileType::from_raw(2), FileType::Dir);
2470 assert_eq!(FileType::from_raw(3), FileType::Chrdev);
2471 assert_eq!(FileType::from_raw(4), FileType::Blkdev);
2472 assert_eq!(FileType::from_raw(5), FileType::Fifo);
2473 assert_eq!(FileType::from_raw(6), FileType::Sock);
2474 assert_eq!(FileType::from_raw(7), FileType::Symlink);
2475 assert_eq!(FileType::from_raw(8), FileType::Xattr);
2476 assert_eq!(FileType::from_raw(99), FileType::Other(99));
2477 }
2478
2479 #[test]
2480 fn file_type_names() {
2481 assert_eq!(FileType::Unknown.name(), "UNKNOWN");
2482 assert_eq!(FileType::RegFile.name(), "FILE");
2483 assert_eq!(FileType::Dir.name(), "DIR");
2484 assert_eq!(FileType::Chrdev.name(), "CHRDEV");
2485 assert_eq!(FileType::Blkdev.name(), "BLKDEV");
2486 assert_eq!(FileType::Fifo.name(), "FIFO");
2487 assert_eq!(FileType::Sock.name(), "SOCK");
2488 assert_eq!(FileType::Symlink.name(), "SYMLINK");
2489 assert_eq!(FileType::Xattr.name(), "XATTR");
2490 assert_eq!(FileType::Other(200).name(), "UNKNOWN");
2491 }
2492
2493 #[test]
2496 fn block_group_item_parse() {
2497 let mut buf = Vec::new();
2498 buf.extend_from_slice(&1000u64.to_le_bytes()); buf.extend_from_slice(&256u64.to_le_bytes()); buf.extend_from_slice(
2501 &(raw::BTRFS_BLOCK_GROUP_DATA as u64).to_le_bytes(),
2502 );
2503 let item = BlockGroupItem::parse(&buf).unwrap();
2504 assert_eq!(item.used, 1000);
2505 assert_eq!(item.chunk_objectid, 256);
2506 assert_eq!(item.flags, BlockGroupFlags::DATA);
2507 }
2508
2509 #[test]
2510 fn block_group_item_too_short() {
2511 assert!(BlockGroupItem::parse(&[0; 23]).is_none());
2512 }
2513
2514 #[test]
2515 fn free_space_info_parse() {
2516 let mut buf = Vec::new();
2517 buf.extend_from_slice(&42u32.to_le_bytes());
2518 buf.extend_from_slice(&7u32.to_le_bytes());
2519 let info = FreeSpaceInfo::parse(&buf).unwrap();
2520 assert_eq!(info.extent_count, 42);
2521 assert_eq!(info.flags, FreeSpaceInfoFlags::from_bits_truncate(7));
2522 }
2523
2524 #[test]
2525 fn free_space_info_too_short() {
2526 assert!(FreeSpaceInfo::parse(&[0; 7]).is_none());
2527 }
2528
2529 #[test]
2530 fn dev_extent_parse() {
2531 let size = mem::size_of::<raw::btrfs_dev_extent>();
2532 let mut buf = vec![0u8; size];
2533 buf[0..8].copy_from_slice(&3u64.to_le_bytes()); buf[8..16].copy_from_slice(&256u64.to_le_bytes()); buf[16..24].copy_from_slice(&0x10000u64.to_le_bytes()); buf[24..32].copy_from_slice(&0x40000u64.to_le_bytes()); buf[32..48].copy_from_slice(&[0xAB; 16]);
2539 let de = DeviceExtent::parse(&buf).unwrap();
2540 assert_eq!(de.chunk_tree, 3);
2541 assert_eq!(de.chunk_objectid, 256);
2542 assert_eq!(de.chunk_offset, 0x10000);
2543 assert_eq!(de.length, 0x40000);
2544 assert_eq!(de.chunk_tree_uuid.as_bytes(), &[0xAB; 16]);
2545 }
2546
2547 #[test]
2548 fn dev_extent_too_short() {
2549 let size = mem::size_of::<raw::btrfs_dev_extent>();
2550 assert!(DeviceExtent::parse(&vec![0u8; size - 1]).is_none());
2551 }
2552
2553 #[test]
2554 fn extent_data_ref_parse() {
2555 let size = mem::size_of::<raw::btrfs_extent_data_ref>();
2556 let mut buf = vec![0u8; size];
2557 buf[0..8].copy_from_slice(&5u64.to_le_bytes()); buf[8..16].copy_from_slice(&256u64.to_le_bytes()); buf[16..24].copy_from_slice(&0u64.to_le_bytes()); buf[24..28].copy_from_slice(&1u32.to_le_bytes()); let edr = ExtentDataRef::parse(&buf).unwrap();
2562 assert_eq!(edr.root, 5);
2563 assert_eq!(edr.objectid, 256);
2564 assert_eq!(edr.offset, 0);
2565 assert_eq!(edr.count, 1);
2566 }
2567
2568 #[test]
2569 fn extent_data_ref_too_short() {
2570 assert!(ExtentDataRef::parse(&[0; 27]).is_none());
2571 }
2572
2573 #[test]
2574 fn shared_data_ref_parse() {
2575 let buf = 17u32.to_le_bytes();
2576 let sdr = SharedDataRef::parse(&buf).unwrap();
2577 assert_eq!(sdr.count, 17);
2578 }
2579
2580 #[test]
2581 fn shared_data_ref_too_short() {
2582 assert!(SharedDataRef::parse(&[0; 3]).is_none());
2583 }
2584
2585 #[test]
2586 fn qgroup_info_parse() {
2587 let mut buf = Vec::new();
2588 buf.extend_from_slice(&100u64.to_le_bytes()); buf.extend_from_slice(&4096u64.to_le_bytes()); buf.extend_from_slice(&4096u64.to_le_bytes()); buf.extend_from_slice(&2048u64.to_le_bytes()); buf.extend_from_slice(&2048u64.to_le_bytes()); let qi = QgroupInfo::parse(&buf).unwrap();
2594 assert_eq!(qi.generation, 100);
2595 assert_eq!(qi.referenced, 4096);
2596 assert_eq!(qi.referenced_compressed, 4096);
2597 assert_eq!(qi.exclusive, 2048);
2598 assert_eq!(qi.exclusive_compressed, 2048);
2599 }
2600
2601 #[test]
2602 fn qgroup_info_too_short() {
2603 assert!(QgroupInfo::parse(&[0; 39]).is_none());
2604 }
2605
2606 #[test]
2607 fn qgroup_limit_parse() {
2608 let mut buf = Vec::new();
2609 buf.extend_from_slice(&3u64.to_le_bytes()); buf.extend_from_slice(&1_000_000u64.to_le_bytes()); buf.extend_from_slice(&500_000u64.to_le_bytes()); buf.extend_from_slice(&0u64.to_le_bytes()); buf.extend_from_slice(&0u64.to_le_bytes()); let ql = QgroupLimit::parse(&buf).unwrap();
2615 assert_eq!(ql.flags, 3);
2616 assert_eq!(ql.max_referenced, 1_000_000);
2617 assert_eq!(ql.max_exclusive, 500_000);
2618 assert_eq!(ql.rsv_referenced, 0);
2619 assert_eq!(ql.rsv_exclusive, 0);
2620 }
2621
2622 #[test]
2623 fn qgroup_limit_too_short() {
2624 assert!(QgroupLimit::parse(&[0; 39]).is_none());
2625 }
2626
2627 #[test]
2628 fn qgroup_status_parse_minimal() {
2629 let mut buf = Vec::new();
2630 buf.extend_from_slice(&1u64.to_le_bytes()); buf.extend_from_slice(&50u64.to_le_bytes()); buf.extend_from_slice(&2u64.to_le_bytes()); buf.extend_from_slice(&0u64.to_le_bytes()); let qs = QgroupStatus::parse(&buf).unwrap();
2635 assert_eq!(qs.version, 1);
2636 assert_eq!(qs.generation, 50);
2637 assert_eq!(qs.flags, 2);
2638 assert_eq!(qs.scan, 0);
2639 assert!(qs.enable_gen.is_none());
2640 }
2641
2642 #[test]
2643 fn qgroup_status_parse_with_enable_gen() {
2644 let mut buf = Vec::new();
2645 buf.extend_from_slice(&1u64.to_le_bytes());
2646 buf.extend_from_slice(&50u64.to_le_bytes());
2647 buf.extend_from_slice(&2u64.to_le_bytes());
2648 buf.extend_from_slice(&0u64.to_le_bytes());
2649 buf.extend_from_slice(&99u64.to_le_bytes()); let qs = QgroupStatus::parse(&buf).unwrap();
2651 assert_eq!(qs.enable_gen, Some(99));
2652 }
2653
2654 #[test]
2655 fn qgroup_status_too_short() {
2656 assert!(QgroupStatus::parse(&[0; 31]).is_none());
2657 }
2658
2659 #[test]
2660 fn dev_replace_item_parse() {
2661 let mut buf = vec![0u8; 80];
2662 buf[0..8].copy_from_slice(&1u64.to_le_bytes()); buf[8..16].copy_from_slice(&0x1000u64.to_le_bytes()); buf[16..24].copy_from_slice(&0x2000u64.to_le_bytes()); buf[24..32].copy_from_slice(&0u64.to_le_bytes()); buf[32..40].copy_from_slice(&2u64.to_le_bytes()); buf[40..48].copy_from_slice(&1700000000u64.to_le_bytes()); buf[48..56].copy_from_slice(&1700000100u64.to_le_bytes()); buf[56..64].copy_from_slice(&3u64.to_le_bytes()); buf[64..72].copy_from_slice(&5u64.to_le_bytes()); let dri = DeviceReplaceItem::parse(&buf).unwrap();
2672 assert_eq!(dri.src_devid, 1);
2673 assert_eq!(dri.cursor_left, 0x1000);
2674 assert_eq!(dri.cursor_right, 0x2000);
2675 assert_eq!(dri.replace_state, 2);
2676 assert_eq!(dri.time_started, 1700000000);
2677 assert_eq!(dri.time_stopped, 1700000100);
2678 assert_eq!(dri.num_write_errors, 3);
2679 assert_eq!(dri.num_uncorrectable_read_errors, 5);
2680 }
2681
2682 #[test]
2683 fn dev_replace_item_too_short() {
2684 assert!(DeviceReplaceItem::parse(&[0; 79]).is_none());
2685 }
2686
2687 #[test]
2688 fn raid_stripe_item_parse() {
2689 let mut buf = Vec::new();
2690 buf.extend_from_slice(&1u64.to_le_bytes()); buf.extend_from_slice(&1u64.to_le_bytes()); buf.extend_from_slice(&0x10000u64.to_le_bytes()); buf.extend_from_slice(&2u64.to_le_bytes());
2696 buf.extend_from_slice(&0x20000u64.to_le_bytes());
2697 let rsi = RaidStripeItem::parse(&buf).unwrap();
2698 assert_eq!(rsi.encoding, 1);
2699 assert_eq!(rsi.stripes.len(), 2);
2700 assert_eq!(rsi.stripes[0].devid, 1);
2701 assert_eq!(rsi.stripes[0].physical, 0x10000);
2702 assert_eq!(rsi.stripes[1].devid, 2);
2703 assert_eq!(rsi.stripes[1].physical, 0x20000);
2704 }
2705
2706 #[test]
2707 fn raid_stripe_item_no_stripes() {
2708 let buf = 42u64.to_le_bytes();
2709 let rsi = RaidStripeItem::parse(&buf).unwrap();
2710 assert_eq!(rsi.encoding, 42);
2711 assert!(rsi.stripes.is_empty());
2712 }
2713
2714 #[test]
2715 fn raid_stripe_item_too_short() {
2716 assert!(RaidStripeItem::parse(&[0; 7]).is_none());
2717 }
2718
2719 #[test]
2722 fn inode_ref_parse_single() {
2723 let mut buf = Vec::new();
2724 buf.extend_from_slice(&42u64.to_le_bytes()); buf.extend_from_slice(&4u16.to_le_bytes()); buf.extend_from_slice(b"test");
2727 let refs = InodeRef::parse_all(&buf);
2728 assert_eq!(refs.len(), 1);
2729 assert_eq!(refs[0].index, 42);
2730 assert_eq!(refs[0].name, b"test");
2731 }
2732
2733 #[test]
2734 fn inode_ref_parse_multiple() {
2735 let mut buf = Vec::new();
2736 buf.extend_from_slice(&1u64.to_le_bytes());
2738 buf.extend_from_slice(&3u16.to_le_bytes());
2739 buf.extend_from_slice(b"abc");
2740 buf.extend_from_slice(&2u64.to_le_bytes());
2742 buf.extend_from_slice(&2u16.to_le_bytes());
2743 buf.extend_from_slice(b"xy");
2744 let refs = InodeRef::parse_all(&buf);
2745 assert_eq!(refs.len(), 2);
2746 assert_eq!(refs[0].index, 1);
2747 assert_eq!(refs[0].name, b"abc");
2748 assert_eq!(refs[1].index, 2);
2749 assert_eq!(refs[1].name, b"xy");
2750 }
2751
2752 #[test]
2753 fn inode_ref_parse_truncated() {
2754 let mut buf = Vec::new();
2756 buf.extend_from_slice(&1u64.to_le_bytes());
2757 buf.extend_from_slice(&10u16.to_le_bytes()); buf.extend_from_slice(b"abc"); let refs = InodeRef::parse_all(&buf);
2760 assert!(refs.is_empty());
2761 }
2762
2763 #[test]
2764 fn inode_extref_parse_single() {
2765 let mut buf = Vec::new();
2766 buf.extend_from_slice(&256u64.to_le_bytes()); buf.extend_from_slice(&3u64.to_le_bytes()); buf.extend_from_slice(&5u16.to_le_bytes()); buf.extend_from_slice(b"hello");
2770 let refs = InodeExtref::parse_all(&buf);
2771 assert_eq!(refs.len(), 1);
2772 assert_eq!(refs[0].parent, 256);
2773 assert_eq!(refs[0].index, 3);
2774 assert_eq!(refs[0].name, b"hello");
2775 }
2776
2777 #[test]
2778 fn dir_item_parse_single() {
2779 let dir_item_size = mem::size_of::<raw::btrfs_dir_item>();
2780 let mut buf = vec![0u8; dir_item_size];
2781 buf[0..8].copy_from_slice(&256u64.to_le_bytes()); buf[8] = 1; buf[9..17].copy_from_slice(&0u64.to_le_bytes()); buf[17..25].copy_from_slice(&100u64.to_le_bytes());
2787 buf[25..27].copy_from_slice(&0u16.to_le_bytes());
2789 buf[27..29].copy_from_slice(&4u16.to_le_bytes());
2791 buf[29] = 1; buf.extend_from_slice(b"file");
2795 let items = DirItem::parse_all(&buf);
2796 assert_eq!(items.len(), 1);
2797 assert_eq!(items[0].transid, 100);
2798 assert_eq!(items[0].file_type, FileType::RegFile);
2799 assert_eq!(items[0].name, b"file");
2800 assert!(items[0].data.is_empty());
2801 }
2802
2803 #[test]
2804 fn root_ref_parse() {
2805 let hdr_size = mem::size_of::<raw::btrfs_root_ref>();
2806 let mut buf = vec![0u8; hdr_size];
2807 buf[0..8].copy_from_slice(&256u64.to_le_bytes()); buf[8..16].copy_from_slice(&7u64.to_le_bytes()); buf[16..18].copy_from_slice(&6u16.to_le_bytes()); buf.extend_from_slice(b"subvol");
2811 let rr = RootRef::parse(&buf).unwrap();
2812 assert_eq!(rr.dirid, 256);
2813 assert_eq!(rr.sequence, 7);
2814 assert_eq!(rr.name, b"subvol");
2815 }
2816
2817 #[test]
2818 fn root_ref_too_short() {
2819 let hdr_size = mem::size_of::<raw::btrfs_root_ref>();
2820 assert!(RootRef::parse(&vec![0u8; hdr_size - 1]).is_none());
2821 }
2822
2823 #[test]
2824 fn root_ref_serialize_round_trip() {
2825 let bytes = RootRef::serialize(256, 42, b"snapshot-1");
2826 let parsed = RootRef::parse(&bytes).unwrap();
2827 assert_eq!(parsed.dirid, 256);
2828 assert_eq!(parsed.sequence, 42);
2829 assert_eq!(parsed.name, b"snapshot-1");
2830 assert_eq!(bytes.len(), mem::size_of::<raw::btrfs_root_ref>() + 10);
2832 }
2833
2834 #[test]
2835 fn root_ref_serialize_empty_name() {
2836 let bytes = RootRef::serialize(7, 0, b"");
2837 let parsed = RootRef::parse(&bytes).unwrap();
2838 assert_eq!(parsed.dirid, 7);
2839 assert_eq!(parsed.sequence, 0);
2840 assert!(parsed.name.is_empty());
2841 }
2842
2843 #[test]
2844 fn uuid_item_parse() {
2845 let mut buf = Vec::new();
2846 buf.extend_from_slice(&256u64.to_le_bytes());
2847 buf.extend_from_slice(&257u64.to_le_bytes());
2848 buf.extend_from_slice(&258u64.to_le_bytes());
2849 let ui = UuidItem::parse(&buf);
2850 assert_eq!(ui.subvol_ids, vec![256, 257, 258]);
2851 }
2852
2853 #[test]
2854 fn uuid_item_empty() {
2855 let ui = UuidItem::parse(&[]);
2856 assert!(ui.subvol_ids.is_empty());
2857 }
2858
2859 #[test]
2860 fn dev_stats_parse() {
2861 let mut buf = Vec::new();
2862 buf.extend_from_slice(&1u64.to_le_bytes()); buf.extend_from_slice(&2u64.to_le_bytes()); buf.extend_from_slice(&3u64.to_le_bytes()); buf.extend_from_slice(&4u64.to_le_bytes()); buf.extend_from_slice(&5u64.to_le_bytes()); let ds = DeviceStats::parse(&buf);
2868 assert_eq!(ds.values.len(), 5);
2869 assert_eq!(ds.values[0], ("write_errs".to_string(), 1));
2870 assert_eq!(ds.values[1], ("read_errs".to_string(), 2));
2871 assert_eq!(ds.values[2], ("flush_errs".to_string(), 3));
2872 assert_eq!(ds.values[3], ("corruption_errs".to_string(), 4));
2873 assert_eq!(ds.values[4], ("generation".to_string(), 5));
2874 }
2875
2876 #[test]
2877 fn dev_stats_partial() {
2878 let mut buf = Vec::new();
2880 buf.extend_from_slice(&10u64.to_le_bytes());
2881 buf.extend_from_slice(&20u64.to_le_bytes());
2882 let ds = DeviceStats::parse(&buf);
2883 assert_eq!(ds.values.len(), 2);
2884 assert_eq!(ds.values[0].1, 10);
2885 assert_eq!(ds.values[1].1, 20);
2886 }
2887
2888 #[test]
2891 fn file_extent_item_inline() {
2892 let mut buf = vec![0u8; 21 + 10]; buf[0..8].copy_from_slice(&7u64.to_le_bytes()); buf[8..16].copy_from_slice(&10u64.to_le_bytes()); buf[16] = 0; buf[20] = 0; buf[21..31].copy_from_slice(&[0xAA; 10]); let fei = FileExtentItem::parse(&buf).unwrap();
2900 assert_eq!(fei.generation, 7);
2901 assert_eq!(fei.ram_bytes, 10);
2902 assert_eq!(fei.compression, CompressionType::None);
2903 assert_eq!(fei.extent_type, FileExtentType::Inline);
2904 match fei.body {
2905 FileExtentBody::Inline { inline_size } => {
2906 assert_eq!(inline_size, 10)
2907 }
2908 _ => panic!("expected inline body"),
2909 }
2910 }
2911
2912 #[test]
2913 fn file_extent_item_regular() {
2914 let mut buf = vec![0u8; 53];
2915 buf[0..8].copy_from_slice(&100u64.to_le_bytes()); buf[8..16].copy_from_slice(&4096u64.to_le_bytes()); buf[16] = 1; buf[20] = 1; buf[21..29].copy_from_slice(&0x100000u64.to_le_bytes()); buf[29..37].copy_from_slice(&4096u64.to_le_bytes()); buf[37..45].copy_from_slice(&0u64.to_le_bytes()); buf[45..53].copy_from_slice(&4096u64.to_le_bytes()); let fei = FileExtentItem::parse(&buf).unwrap();
2924 assert_eq!(fei.generation, 100);
2925 assert_eq!(fei.compression, CompressionType::Zlib);
2926 assert_eq!(fei.extent_type, FileExtentType::Regular);
2927 match fei.body {
2928 FileExtentBody::Regular {
2929 disk_bytenr,
2930 disk_num_bytes,
2931 offset,
2932 num_bytes,
2933 } => {
2934 assert_eq!(disk_bytenr, 0x100000);
2935 assert_eq!(disk_num_bytes, 4096);
2936 assert_eq!(offset, 0);
2937 assert_eq!(num_bytes, 4096);
2938 }
2939 _ => panic!("expected regular body"),
2940 }
2941 }
2942
2943 #[test]
2944 fn file_extent_item_too_short() {
2945 assert!(FileExtentItem::parse(&[0; 20]).is_none());
2946 }
2947
2948 #[test]
2949 fn file_extent_item_regular_too_short() {
2950 let mut buf = vec![0u8; 21];
2952 buf[20] = 1; assert!(FileExtentItem::parse(&buf).is_none());
2954 }
2955
2956 #[test]
2957 fn file_extent_item_to_bytes_regular_round_trip() {
2958 let bytes = FileExtentItem::to_bytes_regular(
2959 42,
2960 65536,
2961 CompressionType::Zstd,
2962 false,
2963 0x200000,
2964 4096,
2965 0,
2966 65536,
2967 );
2968 assert_eq!(bytes.len(), FileExtentItem::REGULAR_SIZE);
2969 let parsed = FileExtentItem::parse(&bytes).unwrap();
2970 assert_eq!(parsed.generation, 42);
2971 assert_eq!(parsed.ram_bytes, 65536);
2972 assert_eq!(parsed.compression, CompressionType::Zstd);
2973 assert_eq!(parsed.extent_type, FileExtentType::Regular);
2974 match parsed.body {
2975 FileExtentBody::Regular {
2976 disk_bytenr,
2977 disk_num_bytes,
2978 offset,
2979 num_bytes,
2980 } => {
2981 assert_eq!(disk_bytenr, 0x200000);
2982 assert_eq!(disk_num_bytes, 4096);
2983 assert_eq!(offset, 0);
2984 assert_eq!(num_bytes, 65536);
2985 }
2986 _ => panic!("expected regular body"),
2987 }
2988 }
2989
2990 #[test]
2991 fn file_extent_item_to_bytes_regular_prealloc_flag() {
2992 let bytes = FileExtentItem::to_bytes_regular(
2993 1,
2994 4096,
2995 CompressionType::None,
2996 true,
2997 0x10000,
2998 4096,
2999 0,
3000 4096,
3001 );
3002 let parsed = FileExtentItem::parse(&bytes).unwrap();
3003 assert_eq!(parsed.extent_type, FileExtentType::Prealloc);
3004 }
3005
3006 #[test]
3007 fn file_extent_item_to_bytes_inline_round_trip() {
3008 let payload = b"hello inline";
3009 let bytes = FileExtentItem::to_bytes_inline(
3010 7,
3011 payload.len() as u64,
3012 CompressionType::None,
3013 payload,
3014 );
3015 assert_eq!(bytes.len(), FileExtentItem::HEADER_SIZE + payload.len());
3016 let parsed = FileExtentItem::parse(&bytes).unwrap();
3017 assert_eq!(parsed.generation, 7);
3018 assert_eq!(parsed.ram_bytes, payload.len() as u64);
3019 assert_eq!(parsed.compression, CompressionType::None);
3020 assert_eq!(parsed.extent_type, FileExtentType::Inline);
3021 match parsed.body {
3022 FileExtentBody::Inline { inline_size } => {
3023 assert_eq!(inline_size, payload.len());
3024 }
3025 _ => panic!("expected inline body"),
3026 }
3027 assert_eq!(&bytes[FileExtentItem::HEADER_SIZE..], payload);
3029 }
3030
3031 #[test]
3034 fn raw_crc32c_known_value() {
3035 assert_eq!(raw_crc32c(0, &[]), 0);
3037 let raw = raw_crc32c(0, b"123456789");
3040 let standard = crc32c::crc32c(b"123456789");
3041 assert_eq!(standard, 0xE3069283);
3042 assert_ne!(raw, standard);
3043 assert_eq!(raw, raw_crc32c(0, b"123456789"));
3045 let chained = raw_crc32c(raw, b"more");
3047 assert_ne!(chained, raw);
3048 }
3049
3050 #[test]
3051 fn extent_data_ref_hash_deterministic() {
3052 let h1 = extent_data_ref_hash(5, 256, 0);
3053 let h2 = extent_data_ref_hash(5, 256, 0);
3054 assert_eq!(h1, h2);
3055 let h3 = extent_data_ref_hash(5, 256, 4096);
3057 assert_ne!(h1, h3);
3058 }
3059
3060 #[test]
3061 fn block_group_flags_type_name() {
3062 assert_eq!(BlockGroupFlags::DATA.type_name(), "Data");
3063 assert_eq!(BlockGroupFlags::METADATA.type_name(), "Metadata");
3064 assert_eq!(BlockGroupFlags::SYSTEM.type_name(), "System");
3065 assert_eq!(
3066 (BlockGroupFlags::DATA | BlockGroupFlags::METADATA).type_name(),
3067 "Data+Metadata"
3068 );
3069 assert_eq!(BlockGroupFlags::GLOBAL_RSV.type_name(), "GlobalReserve");
3070 }
3071
3072 #[test]
3073 fn block_group_flags_profile_name() {
3074 assert_eq!(BlockGroupFlags::DATA.profile_name(), "single");
3075 assert_eq!(
3076 (BlockGroupFlags::DATA | BlockGroupFlags::DUP).profile_name(),
3077 "DUP"
3078 );
3079 assert_eq!(
3080 (BlockGroupFlags::DATA | BlockGroupFlags::RAID0).profile_name(),
3081 "RAID0"
3082 );
3083 assert_eq!(
3084 (BlockGroupFlags::DATA | BlockGroupFlags::RAID1).profile_name(),
3085 "RAID1"
3086 );
3087 assert_eq!(
3088 (BlockGroupFlags::DATA | BlockGroupFlags::RAID10).profile_name(),
3089 "RAID10"
3090 );
3091 assert_eq!(
3092 (BlockGroupFlags::DATA | BlockGroupFlags::RAID5).profile_name(),
3093 "RAID5"
3094 );
3095 assert_eq!(
3096 (BlockGroupFlags::DATA | BlockGroupFlags::RAID6).profile_name(),
3097 "RAID6"
3098 );
3099 assert_eq!(
3100 (BlockGroupFlags::DATA | BlockGroupFlags::RAID1C3).profile_name(),
3101 "RAID1C3"
3102 );
3103 assert_eq!(
3104 (BlockGroupFlags::DATA | BlockGroupFlags::RAID1C4).profile_name(),
3105 "RAID1C4"
3106 );
3107 }
3108
3109 #[test]
3110 fn extent_item_skinny_size() {
3111 let bytes = ExtentItem::to_bytes_skinny(1, 42, 5);
3112 assert_eq!(bytes.len(), ExtentItem::SKINNY_SIZE);
3113 assert_eq!(bytes.len(), 33);
3114 }
3115
3116 #[test]
3117 fn extent_item_non_skinny_size() {
3118 let key = DiskKey {
3119 objectid: 256,
3120 key_type: KeyType::InodeItem,
3121 offset: 0,
3122 };
3123 let bytes = ExtentItem::to_bytes_non_skinny(1, 42, 5, &key, 0);
3124 assert_eq!(bytes.len(), ExtentItem::NON_SKINNY_SIZE);
3125 assert_eq!(bytes.len(), 51);
3126 }
3127
3128 #[test]
3129 fn extent_item_skinny_non_skinny_header_match() {
3130 let skinny = ExtentItem::to_bytes_skinny(1, 42, 5);
3131 let key = DiskKey {
3132 objectid: 0,
3133 key_type: KeyType::from_raw(0),
3134 offset: 0,
3135 };
3136 let non_skinny = ExtentItem::to_bytes_non_skinny(1, 42, 5, &key, 0);
3137 assert_eq!(&skinny[..24], &non_skinny[..24]);
3139 }
3140
3141 #[test]
3142 fn extent_item_flags_are_tree_block() {
3143 let bytes = ExtentItem::to_bytes_skinny(1, 42, 5);
3144 let flags = u64::from_le_bytes(bytes[16..24].try_into().unwrap());
3145 assert_eq!(flags, ExtentFlags::TREE_BLOCK.bits());
3146 }
3147
3148 #[test]
3149 fn root_item_to_bytes_round_trip() {
3150 let original = RootItem::new_internal(42, 65536, 0);
3151 let bytes = original.to_bytes();
3152 assert_eq!(bytes.len(), 439);
3153 let parsed = RootItem::parse(&bytes).expect("parse failed");
3154 assert_eq!(parsed.generation, 42);
3155 assert_eq!(parsed.bytenr, 65536);
3156 assert_eq!(parsed.level, 0);
3157 assert_eq!(parsed.refs, 1);
3158 }
3159
3160 #[test]
3161 fn block_group_item_to_bytes_round_trip() {
3162 let bg = BlockGroupItem {
3163 used: 1024 * 1024,
3164 chunk_objectid: 256,
3165 flags: BlockGroupFlags::METADATA | BlockGroupFlags::DUP,
3166 };
3167 let bytes = bg.to_bytes();
3168 assert_eq!(bytes.len(), 24);
3169 let parsed = BlockGroupItem::parse(&bytes).unwrap();
3170 assert_eq!(parsed.used, bg.used);
3171 assert_eq!(parsed.chunk_objectid, bg.chunk_objectid);
3172 assert_eq!(parsed.flags, bg.flags);
3173 }
3174
3175 #[test]
3176 fn inode_item_args_to_bytes_size() {
3177 let args = InodeItemArgs {
3178 generation: 7,
3179 size: 42,
3180 nbytes: 4096,
3181 nlink: 1,
3182 uid: 1000,
3183 gid: 1000,
3184 mode: 0o100644,
3185 time: Timespec { sec: 100, nsec: 0 },
3186 };
3187 let bytes = args.to_bytes();
3188 assert_eq!(bytes.len(), 160);
3189 let parsed = InodeItem::parse(&bytes).unwrap();
3190 assert_eq!(parsed.generation, 7);
3191 assert_eq!(parsed.size, 42);
3192 assert_eq!(parsed.nlink, 1);
3193 }
3194
3195 #[test]
3196 fn dir_item_serialize_round_trip() {
3197 let location = DiskKey {
3198 objectid: 257,
3199 key_type: KeyType::InodeItem,
3200 offset: 0,
3201 };
3202 let bytes = DirItem::serialize(
3203 &location,
3204 7,
3205 raw::BTRFS_FT_REG_FILE as u8,
3206 b"hello.txt",
3207 );
3208 let items = DirItem::parse_all(&bytes);
3209 assert_eq!(items.len(), 1);
3210 assert_eq!(items[0].location.objectid, 257);
3211 assert_eq!(items[0].transid, 7);
3212 assert_eq!(items[0].name, b"hello.txt");
3213 }
3214
3215 #[test]
3216 fn inode_ref_serialize_round_trip() {
3217 let bytes = InodeRef::serialize(2, b"hello.txt");
3218 let refs = InodeRef::parse_all(&bytes);
3219 assert_eq!(refs.len(), 1);
3220 assert_eq!(refs[0].index, 2);
3221 assert_eq!(refs[0].name, b"hello.txt");
3222 }
3223}