use crate::args::Profile;
use btrfs_disk::raw;
pub const SYSTEM_GROUP_OFFSET: u64 = 1024 * 1024;
pub const SYSTEM_GROUP_SIZE: u64 = 4 * 1024 * 1024;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TreeId {
Root,
Extent,
Chunk,
Dev,
Fs,
Csum,
FreeSpace,
DataReloc,
BlockGroup,
}
impl TreeId {
#[must_use]
pub fn objectid(self) -> u64 {
match self {
TreeId::Root => u64::from(raw::BTRFS_ROOT_TREE_OBJECTID),
TreeId::Extent => u64::from(raw::BTRFS_EXTENT_TREE_OBJECTID),
TreeId::Chunk => u64::from(raw::BTRFS_CHUNK_TREE_OBJECTID),
TreeId::Dev => u64::from(raw::BTRFS_DEV_TREE_OBJECTID),
TreeId::Fs => u64::from(raw::BTRFS_FS_TREE_OBJECTID),
TreeId::Csum => u64::from(raw::BTRFS_CSUM_TREE_OBJECTID),
TreeId::FreeSpace => u64::from(raw::BTRFS_FREE_SPACE_TREE_OBJECTID),
#[allow(clippy::cast_sign_loss)]
TreeId::DataReloc => raw::BTRFS_DATA_RELOC_TREE_OBJECTID as u64,
TreeId::BlockGroup => {
u64::from(raw::BTRFS_BLOCK_GROUP_TREE_OBJECTID)
}
}
}
pub const ALL: [TreeId; 8] = [
TreeId::Root,
TreeId::Extent,
TreeId::Chunk,
TreeId::Dev,
TreeId::Fs,
TreeId::Csum,
TreeId::FreeSpace,
TreeId::DataReloc,
];
pub const ROOT_ITEM_TREES: [TreeId; 6] = [
TreeId::Extent,
TreeId::Dev,
TreeId::Fs,
TreeId::Csum,
TreeId::FreeSpace,
TreeId::DataReloc,
];
}
pub const NON_CHUNK_TREES: [TreeId; 7] = [
TreeId::Root,
TreeId::Extent,
TreeId::Dev,
TreeId::Fs,
TreeId::Csum,
TreeId::FreeSpace,
TreeId::DataReloc,
];
pub struct BlockLayout {
nodesize: u32,
meta_logical: u64,
}
impl BlockLayout {
#[must_use]
pub fn new(nodesize: u32, meta_logical: u64) -> Self {
Self {
nodesize,
meta_logical,
}
}
#[must_use]
pub fn block_addr(&self, tree: TreeId) -> u64 {
if tree == TreeId::Chunk {
SYSTEM_GROUP_OFFSET
} else if tree == TreeId::BlockGroup {
self.meta_logical
+ (NON_CHUNK_TREES.len() as u64) * u64::from(self.nodesize)
} else {
let index =
NON_CHUNK_TREES.iter().position(|&t| t == tree).unwrap();
self.meta_logical + (index as u64) * u64::from(self.nodesize)
}
}
#[must_use]
pub fn system_used(&self) -> u64 {
u64::from(self.nodesize)
}
#[must_use]
pub fn metadata_used(&self, has_block_group_tree: bool) -> u64 {
let count = if has_block_group_tree {
NON_CHUNK_TREES.len() as u64 + 1
} else {
NON_CHUNK_TREES.len() as u64
};
count * u64::from(self.nodesize)
}
}
pub const STRIPE_LEN: u64 = 64 * 1024;
pub struct StripeInfo {
pub devid: u64,
pub offset: u64,
pub dev_uuid: uuid::Uuid,
}
pub const CHUNK_START: u64 = SYSTEM_GROUP_OFFSET + SYSTEM_GROUP_SIZE;
pub struct ChunkLayout {
pub meta_logical: u64,
pub meta_size: u64,
pub meta_stripes: Vec<StripeInfo>,
pub data_logical: u64,
pub data_size: u64,
pub data_stripes: Vec<StripeInfo>,
}
pub struct ChunkDevice {
pub devid: u64,
pub total_bytes: u64,
pub dev_uuid: uuid::Uuid,
}
impl ChunkLayout {
#[must_use]
#[allow(clippy::too_many_lines)]
#[allow(clippy::similar_names)]
pub fn new(
devices: &[ChunkDevice],
metadata_profile: Profile,
data_profile: Profile,
) -> Option<Self> {
assert!(!devices.is_empty());
let total_bytes: u64 = devices.iter().map(|d| d.total_bytes).sum();
let meta_size =
(total_bytes / 10).clamp(32 * 1024 * 1024, 256 * 1024 * 1024);
let meta_size = meta_size / STRIPE_LEN * STRIPE_LEN;
let data_size =
(total_bytes / 10).clamp(64 * 1024 * 1024, 1024 * 1024 * 1024);
let data_size = data_size / STRIPE_LEN * STRIPE_LEN;
let meta_stripes = match metadata_profile {
Profile::Dup => {
vec![
StripeInfo {
devid: devices[0].devid,
offset: CHUNK_START,
dev_uuid: devices[0].dev_uuid,
},
StripeInfo {
devid: devices[0].devid,
offset: CHUNK_START + meta_size,
dev_uuid: devices[0].dev_uuid,
},
]
}
Profile::Raid1 | Profile::Raid1c3 | Profile::Raid1c4 => {
let n = metadata_profile.num_stripes(devices.len()) as usize;
if devices.len() < n {
return None;
}
(0..n)
.map(|i| StripeInfo {
devid: devices[i].devid,
offset: CHUNK_START,
dev_uuid: devices[i].dev_uuid,
})
.collect()
}
Profile::Single => {
vec![StripeInfo {
devid: devices[0].devid,
offset: CHUNK_START,
dev_uuid: devices[0].dev_uuid,
}]
}
_ => {
return None;
}
};
let dev1_meta_end = meta_stripes
.iter()
.filter(|s| s.devid == devices[0].devid)
.map(|s| s.offset + meta_size)
.max()
.unwrap_or(CHUNK_START);
let data_stripes = match data_profile {
Profile::Single => {
vec![StripeInfo {
devid: devices[0].devid,
offset: dev1_meta_end,
dev_uuid: devices[0].dev_uuid,
}]
}
Profile::Dup => {
vec![
StripeInfo {
devid: devices[0].devid,
offset: dev1_meta_end,
dev_uuid: devices[0].dev_uuid,
},
StripeInfo {
devid: devices[0].devid,
offset: dev1_meta_end + data_size,
dev_uuid: devices[0].dev_uuid,
},
]
}
Profile::Raid1
| Profile::Raid1c3
| Profile::Raid1c4
| Profile::Raid0 => {
let n = data_profile.num_stripes(devices.len()) as usize;
if devices.len() < n {
return None;
}
(0..n)
.map(|i| {
let dev_meta_end = meta_stripes
.iter()
.filter(|s| s.devid == devices[i].devid)
.map(|s| s.offset + meta_size)
.max()
.unwrap_or(CHUNK_START);
StripeInfo {
devid: devices[i].devid,
offset: dev_meta_end,
dev_uuid: devices[i].dev_uuid,
}
})
.collect()
}
_ => {
return None;
}
};
for dev in devices {
let used = Self::compute_dev_physical_end(
dev.devid,
&meta_stripes,
meta_size,
&data_stripes,
data_size,
);
if used > dev.total_bytes {
return None;
}
}
let meta_logical = CHUNK_START;
let data_logical = CHUNK_START + meta_size;
Some(ChunkLayout {
meta_logical,
meta_size,
meta_stripes,
data_logical,
data_size,
data_stripes,
})
}
fn compute_dev_physical_end(
devid: u64,
meta_stripes: &[StripeInfo],
meta_size: u64,
data_stripes: &[StripeInfo],
data_size: u64,
) -> u64 {
let mut end = if devid == 1 {
SYSTEM_GROUP_OFFSET + SYSTEM_GROUP_SIZE
} else {
0
};
for s in meta_stripes {
if s.devid == devid {
end = end.max(s.offset + meta_size);
}
}
for s in data_stripes {
if s.devid == devid {
end = end.max(s.offset + data_size);
}
}
end
}
#[must_use]
pub fn dev_bytes_used_for(&self, devid: u64) -> u64 {
let mut used = if devid == 1 { SYSTEM_GROUP_SIZE } else { 0 };
for s in &self.meta_stripes {
if s.devid == devid {
used += self.meta_size;
}
}
for s in &self.data_stripes {
if s.devid == devid {
used += self.data_size;
}
}
used
}
#[must_use]
pub fn total_bytes_used(&self) -> u64 {
SYSTEM_GROUP_SIZE
+ (self.meta_stripes.len() as u64 * self.meta_size)
+ (self.data_stripes.len() as u64 * self.data_size)
}
#[must_use]
pub fn logical_to_physical(&self, logical: u64) -> Vec<(u64, u64)> {
let sys_range =
SYSTEM_GROUP_OFFSET..SYSTEM_GROUP_OFFSET + SYSTEM_GROUP_SIZE;
let meta_range = self.meta_logical..self.meta_logical + self.meta_size;
let data_range = self.data_logical..self.data_logical + self.data_size;
if sys_range.contains(&logical) {
vec![(1, logical)]
} else if meta_range.contains(&logical) {
let off = logical - self.meta_logical;
self.meta_stripes
.iter()
.map(|s| (s.devid, s.offset + off))
.collect()
} else if data_range.contains(&logical) {
let off = logical - self.data_logical;
self.data_stripes
.iter()
.map(|s| (s.devid, s.offset + off))
.collect()
} else {
panic!("logical address {logical:#x} not in any chunk")
}
}
}
pub struct BlockAllocator {
nodesize: u32,
system_start: u64,
next_system: u64,
system_end: u64,
meta_start: u64,
next_meta: u64,
meta_end: u64,
}
impl BlockAllocator {
#[must_use]
pub fn new(nodesize: u32, meta_logical: u64, meta_size: u64) -> Self {
Self {
nodesize,
system_start: SYSTEM_GROUP_OFFSET,
next_system: SYSTEM_GROUP_OFFSET,
system_end: SYSTEM_GROUP_OFFSET + SYSTEM_GROUP_SIZE,
meta_start: meta_logical,
next_meta: meta_logical,
meta_end: meta_logical + meta_size,
}
}
pub fn alloc_system(&mut self) -> anyhow::Result<u64> {
let addr = self.next_system;
if addr + u64::from(self.nodesize) > self.system_end {
anyhow::bail!(
"system chunk full: cannot allocate more tree blocks"
);
}
self.next_system += u64::from(self.nodesize);
Ok(addr)
}
pub fn alloc_metadata(&mut self) -> anyhow::Result<u64> {
let addr = self.next_meta;
if addr + u64::from(self.nodesize) > self.meta_end {
anyhow::bail!(
"metadata chunk full: cannot allocate more tree blocks"
);
}
self.next_meta += u64::from(self.nodesize);
Ok(addr)
}
#[must_use]
pub fn system_used(&self) -> u64 {
self.next_system - self.system_start
}
#[must_use]
pub fn metadata_used(&self) -> u64 {
self.next_meta - self.meta_start
}
pub fn reset(&mut self) {
self.next_system = self.system_start;
self.next_meta = self.meta_start;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn block_allocator_basic() {
let mut alloc =
BlockAllocator::new(16384, CHUNK_START, 32 * 1024 * 1024);
let a1 = alloc.alloc_system().unwrap();
assert_eq!(a1, SYSTEM_GROUP_OFFSET);
let a2 = alloc.alloc_metadata().unwrap();
assert_eq!(a2, CHUNK_START);
let a3 = alloc.alloc_metadata().unwrap();
assert_eq!(a3, CHUNK_START + 16384);
assert_eq!(alloc.system_used(), 16384);
assert_eq!(alloc.metadata_used(), 32768);
}
#[test]
fn block_allocator_reset() {
let mut alloc =
BlockAllocator::new(16384, CHUNK_START, 32 * 1024 * 1024);
alloc.alloc_system().unwrap();
alloc.alloc_metadata().unwrap();
alloc.reset();
assert_eq!(alloc.system_used(), 0);
assert_eq!(alloc.metadata_used(), 0);
let a1 = alloc.alloc_system().unwrap();
assert_eq!(a1, SYSTEM_GROUP_OFFSET);
}
#[test]
fn block_addresses() {
let meta_logical = CHUNK_START;
let layout = BlockLayout::new(16384, meta_logical);
assert_eq!(layout.block_addr(TreeId::Chunk), SYSTEM_GROUP_OFFSET);
assert_eq!(layout.block_addr(TreeId::Root), meta_logical);
assert_eq!(layout.block_addr(TreeId::Extent), meta_logical + 16384);
assert_eq!(layout.block_addr(TreeId::Dev), meta_logical + 2 * 16384);
assert_eq!(layout.block_addr(TreeId::Fs), meta_logical + 3 * 16384);
assert_eq!(layout.block_addr(TreeId::Csum), meta_logical + 4 * 16384);
assert_eq!(
layout.block_addr(TreeId::FreeSpace),
meta_logical + 5 * 16384
);
assert_eq!(
layout.block_addr(TreeId::DataReloc),
meta_logical + 6 * 16384
);
}
#[test]
fn system_and_metadata_used() {
let layout = BlockLayout::new(16384, CHUNK_START);
assert_eq!(layout.system_used(), 16384);
assert_eq!(layout.metadata_used(false), 7 * 16384);
assert_eq!(layout.metadata_used(true), 8 * 16384);
}
fn test_uuid() -> uuid::Uuid {
uuid::Uuid::parse_str("deadbeef-dead-beef-dead-beefdeadbeef").unwrap()
}
fn single_device(size: u64) -> Vec<ChunkDevice> {
vec![ChunkDevice {
devid: 1,
total_bytes: size,
dev_uuid: test_uuid(),
}]
}
#[test]
fn chunk_layout_256m() {
let devs = single_device(256 * 1024 * 1024);
let cl =
ChunkLayout::new(&devs, Profile::Dup, Profile::Single).unwrap();
assert_eq!(cl.meta_size, 32 * 1024 * 1024);
assert_eq!(cl.data_size, 64 * 1024 * 1024);
assert_eq!(cl.meta_stripes.len(), 2);
assert_eq!(cl.meta_stripes[0].offset, CHUNK_START);
assert_eq!(cl.meta_stripes[1].offset, CHUNK_START + 32 * 1024 * 1024);
assert_eq!(cl.data_stripes.len(), 1);
assert_eq!(cl.data_stripes[0].offset, CHUNK_START + 64 * 1024 * 1024);
assert_eq!(cl.meta_logical, CHUNK_START);
assert_eq!(cl.data_logical, CHUNK_START + 32 * 1024 * 1024);
}
#[test]
fn chunk_layout_1g() {
let devs = single_device(1024 * 1024 * 1024);
let cl =
ChunkLayout::new(&devs, Profile::Dup, Profile::Single).unwrap();
let expected_stripe =
(1024 * 1024 * 1024 / 10) / STRIPE_LEN * STRIPE_LEN;
assert_eq!(cl.meta_size, expected_stripe);
assert_eq!(cl.data_size, expected_stripe);
}
#[test]
fn chunk_layout_10g() {
let devs = single_device(10 * 1024 * 1024 * 1024);
let cl =
ChunkLayout::new(&devs, Profile::Dup, Profile::Single).unwrap();
assert_eq!(cl.meta_size, 256 * 1024 * 1024);
assert_eq!(cl.data_size, 1024 * 1024 * 1024);
}
#[test]
fn chunk_layout_too_small() {
let devs = single_device(100 * 1024 * 1024);
assert!(
ChunkLayout::new(&devs, Profile::Dup, Profile::Single).is_none()
);
}
#[test]
fn chunk_layout_total_bytes_used() {
let devs = single_device(256 * 1024 * 1024);
let cl =
ChunkLayout::new(&devs, Profile::Dup, Profile::Single).unwrap();
assert_eq!(
cl.total_bytes_used(),
SYSTEM_GROUP_SIZE + 2 * 32 * 1024 * 1024 + 64 * 1024 * 1024
);
}
#[test]
fn chunk_layout_dev_bytes_used_single_device() {
let devs = single_device(256 * 1024 * 1024);
let cl =
ChunkLayout::new(&devs, Profile::Dup, Profile::Single).unwrap();
assert_eq!(
cl.dev_bytes_used_for(1),
SYSTEM_GROUP_SIZE + 2 * 32 * 1024 * 1024 + 64 * 1024 * 1024
);
}
fn two_devices(size: u64) -> Vec<ChunkDevice> {
let uuid2 =
uuid::Uuid::parse_str("cafebabe-cafe-babe-cafe-babecafebabe")
.unwrap();
vec![
ChunkDevice {
devid: 1,
total_bytes: size,
dev_uuid: test_uuid(),
},
ChunkDevice {
devid: 2,
total_bytes: size,
dev_uuid: uuid2,
},
]
}
#[test]
fn chunk_layout_raid1_stripes() {
let devs = two_devices(256 * 1024 * 1024);
let cl =
ChunkLayout::new(&devs, Profile::Raid1, Profile::Single).unwrap();
assert_eq!(cl.meta_stripes.len(), 2);
assert_eq!(cl.meta_stripes[0].devid, 1);
assert_eq!(cl.meta_stripes[0].offset, CHUNK_START);
assert_eq!(cl.meta_stripes[1].devid, 2);
assert_eq!(cl.meta_stripes[1].offset, CHUNK_START);
assert_eq!(cl.data_stripes.len(), 1);
assert_eq!(cl.data_stripes[0].devid, 1);
assert_eq!(cl.data_stripes[0].offset, CHUNK_START + cl.meta_size);
}
#[test]
fn chunk_layout_raid1_dev_bytes() {
let devs = two_devices(256 * 1024 * 1024);
let cl =
ChunkLayout::new(&devs, Profile::Raid1, Profile::Single).unwrap();
assert_eq!(
cl.dev_bytes_used_for(1),
SYSTEM_GROUP_SIZE + cl.meta_size + cl.data_size
);
assert_eq!(cl.dev_bytes_used_for(2), cl.meta_size);
}
#[test]
fn logical_to_physical_returns_devid() {
let devs = two_devices(256 * 1024 * 1024);
let cl =
ChunkLayout::new(&devs, Profile::Raid1, Profile::Single).unwrap();
let sys = cl.logical_to_physical(SYSTEM_GROUP_OFFSET);
assert_eq!(sys, vec![(1, SYSTEM_GROUP_OFFSET)]);
let meta = cl.logical_to_physical(cl.meta_logical);
assert_eq!(meta.len(), 2);
assert_eq!(meta[0].0, 1);
assert_eq!(meta[1].0, 2);
}
}