use crate::{
raw::{
BTRFS_BLOCK_GROUP_ITEM_KEY, BTRFS_CHUNK_ITEM_KEY,
BTRFS_CHUNK_TREE_OBJECTID, BTRFS_EXTENT_TREE_OBJECTID,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
},
space::BlockGroupFlags,
tree_search::{Key, SearchFilter, tree_search},
};
use btrfs_disk::items::ChunkItem;
use std::os::unix::io::BorrowedFd;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DeviceAllocation {
pub devid: u64,
pub flags: BlockGroupFlags,
pub bytes: u64,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ChunkEntry {
pub devid: u64,
pub physical_start: u64,
pub logical_start: u64,
pub length: u64,
pub flags: BlockGroupFlags,
pub used: u64,
}
pub fn device_chunk_allocations(
fd: BorrowedFd,
) -> nix::Result<Vec<DeviceAllocation>> {
let mut allocs: Vec<DeviceAllocation> = Vec::new();
tree_search(
fd,
SearchFilter::for_type(
u64::from(BTRFS_CHUNK_TREE_OBJECTID),
BTRFS_CHUNK_ITEM_KEY,
),
|_hdr, data| {
if let Some((stripe_len, flags, stripes)) = parse_chunk(data) {
for devid in stripes {
accumulate(&mut allocs, devid, flags, stripe_len);
}
}
Ok(())
},
)?;
Ok(allocs)
}
pub fn chunk_list(fd: BorrowedFd) -> nix::Result<Vec<ChunkEntry>> {
let mut entries: Vec<ChunkEntry> = Vec::new();
tree_search(
fd,
SearchFilter::for_objectid_range(
u64::from(BTRFS_CHUNK_TREE_OBJECTID),
BTRFS_CHUNK_ITEM_KEY,
u64::from(BTRFS_FIRST_CHUNK_TREE_OBJECTID),
u64::from(BTRFS_FIRST_CHUNK_TREE_OBJECTID),
),
|hdr, data| {
if let Some(chunk) = ChunkItem::parse(data) {
let logical_start = hdr.offset;
let flags = BlockGroupFlags::from_bits_truncate(
chunk.chunk_type.bits(),
);
let used = block_group_used(fd, logical_start).unwrap_or(0);
for stripe in &chunk.stripes {
entries.push(ChunkEntry {
devid: stripe.devid,
physical_start: stripe.offset,
logical_start,
length: chunk.length,
flags,
used,
});
}
}
Ok(())
},
)?;
Ok(entries)
}
fn block_group_used(fd: BorrowedFd, logical_start: u64) -> Option<u64> {
let mut used: Option<u64> = None;
tree_search(
fd,
SearchFilter {
tree_id: u64::from(BTRFS_EXTENT_TREE_OBJECTID),
start: Key {
objectid: logical_start,
item_type: BTRFS_BLOCK_GROUP_ITEM_KEY,
offset: 0,
},
end: Key {
objectid: logical_start,
item_type: BTRFS_BLOCK_GROUP_ITEM_KEY,
offset: u64::MAX,
},
min_transid: 0,
max_transid: u64::MAX,
},
|_hdr, data| {
if let Some(bg) = btrfs_disk::items::BlockGroupItem::parse(data) {
used = Some(bg.used);
}
Ok(())
},
)
.ok()?;
used
}
fn parse_chunk(data: &[u8]) -> Option<(u64, BlockGroupFlags, Vec<u64>)> {
let chunk = ChunkItem::parse(data)?;
let flags = BlockGroupFlags::from_bits_truncate(chunk.chunk_type.bits());
let devids: Vec<u64> = chunk.stripes.iter().map(|s| s.devid).collect();
Some((chunk.stripe_len, flags, devids))
}
fn accumulate(
allocs: &mut Vec<DeviceAllocation>,
devid: u64,
flags: BlockGroupFlags,
bytes: u64,
) {
if let Some(entry) = allocs
.iter_mut()
.find(|a| a.devid == devid && a.flags == flags)
{
entry.bytes += bytes;
} else {
allocs.push(DeviceAllocation {
devid,
flags,
bytes,
});
}
}
#[cfg(test)]
mod tests {
use super::*;
fn build_chunk_buf(
length: u64,
stripe_len: u64,
type_bits: u64,
num_stripes: u16,
stripes: &[(u64, u64)], ) -> Vec<u8> {
let mut buf = Vec::new();
buf.extend_from_slice(&length.to_le_bytes());
buf.extend_from_slice(&0u64.to_le_bytes()); buf.extend_from_slice(&stripe_len.to_le_bytes());
buf.extend_from_slice(&type_bits.to_le_bytes()); buf.extend_from_slice(&4096u32.to_le_bytes()); buf.extend_from_slice(&4096u32.to_le_bytes()); buf.extend_from_slice(&4096u32.to_le_bytes()); buf.extend_from_slice(&num_stripes.to_le_bytes());
buf.extend_from_slice(&0u16.to_le_bytes()); for &(devid, offset) in stripes {
buf.extend_from_slice(&devid.to_le_bytes());
buf.extend_from_slice(&offset.to_le_bytes());
buf.extend_from_slice(&[0u8; 16]); }
buf
}
#[test]
fn parse_chunk_single_stripe() {
let data_flags = BlockGroupFlags::DATA.bits();
let buf = build_chunk_buf(1024 * 1024, 65536, data_flags, 1, &[(1, 0)]);
let (stripe_len, flags, devids) = parse_chunk(&buf).unwrap();
assert_eq!(stripe_len, 65536);
assert_eq!(flags, BlockGroupFlags::DATA);
assert_eq!(devids, vec![1]);
}
#[test]
fn parse_chunk_two_stripes() {
let flags_bits =
(BlockGroupFlags::DATA | BlockGroupFlags::RAID1).bits();
let buf = build_chunk_buf(
1 << 30,
1 << 30,
flags_bits,
2,
&[(1, 0), (2, 4096)],
);
let (_, flags, devids) = parse_chunk(&buf).unwrap();
assert_eq!(flags, BlockGroupFlags::DATA | BlockGroupFlags::RAID1);
assert_eq!(devids, vec![1, 2]);
}
#[test]
fn parse_chunk_too_short() {
let buf = vec![0u8; 10];
assert!(parse_chunk(&buf).is_none());
}
#[test]
fn parse_chunk_claims_more_stripes_than_fit() {
let buf = build_chunk_buf(1024, 1024, 0, 5, &[(1, 0)]);
let result = parse_chunk(&buf);
assert!(result.is_some());
let (_, _, devids) = result.unwrap();
assert_eq!(devids.len(), 1);
}
#[test]
fn accumulate_new_entry() {
let mut allocs = Vec::new();
accumulate(&mut allocs, 1, BlockGroupFlags::DATA, 1000);
assert_eq!(allocs.len(), 1);
assert_eq!(allocs[0].devid, 1);
assert_eq!(allocs[0].bytes, 1000);
}
#[test]
fn accumulate_merge_same_devid_flags() {
let mut allocs = Vec::new();
accumulate(&mut allocs, 1, BlockGroupFlags::DATA, 1000);
accumulate(&mut allocs, 1, BlockGroupFlags::DATA, 2000);
assert_eq!(allocs.len(), 1);
assert_eq!(allocs[0].bytes, 3000);
}
#[test]
fn accumulate_separate_different_flags() {
let mut allocs = Vec::new();
accumulate(&mut allocs, 1, BlockGroupFlags::DATA, 1000);
accumulate(&mut allocs, 1, BlockGroupFlags::METADATA, 2000);
assert_eq!(allocs.len(), 2);
}
#[test]
fn accumulate_separate_different_devids() {
let mut allocs = Vec::new();
accumulate(&mut allocs, 1, BlockGroupFlags::DATA, 1000);
accumulate(&mut allocs, 2, BlockGroupFlags::DATA, 2000);
assert_eq!(allocs.len(), 2);
}
}