use crate::cache::spacemap::RangeTree;
use crate::fscore::structs::Dva;
use crate::tier::trim::TRIM_REACTOR;
use crate::{FsError, FsResult};
use alloc::sync::Arc;
use alloc::vec::Vec;
use lazy_static::lazy_static;
use spin::Mutex;
const ZONE_SHIFT: u8 = 30;
#[derive(Debug, PartialEq)]
pub enum ZoneState {
Empty,
Active,
Full,
ReadOnly,
}
pub struct Zone {
pub id: usize,
pub start_offset: u64,
pub capacity: u64,
pub write_pointer: u64,
pub state: ZoneState,
pub liveness_map: Mutex<RangeTree>,
}
pub struct SpaceController {
pub zones: Vec<Arc<Mutex<Zone>>>,
pub active_zone_idx: usize,
pub total_capacity: u64,
pub total_free: u64,
pub initialized: bool,
}
lazy_static! {
pub static ref ALLOCATOR: Mutex<SpaceController> = Mutex::new(SpaceController {
zones: Vec::new(),
active_zone_idx: 0,
total_capacity: 0,
total_free: 0,
initialized: false,
});
}
pub use self::ALLOCATOR as METASLAB;
impl SpaceController {
pub fn init(&mut self, disk_size_bytes: u64) {
if self.initialized {
return;
}
self.grow_pool(disk_size_bytes);
crate::lcpfs_println!(
"[ ALLOC] ZNS Allocator Online. Strategy: Sequential Write / Zone Append."
);
self.initialized = true;
}
#[cfg(test)]
pub fn reset_for_testing(&mut self, new_capacity: u64) {
self.zones.clear();
self.active_zone_idx = 0;
self.total_capacity = 0;
self.total_free = 0;
self.initialized = false;
self.init(new_capacity);
}
pub fn grow_pool(&mut self, new_capacity: u64) {
let start_offset = self.total_capacity;
let added_size = new_capacity.saturating_sub(start_offset);
if added_size == 0 {
return;
}
let zone_size = 1u64 << ZONE_SHIFT;
let new_zones_count = added_size.div_ceil(zone_size);
crate::lcpfs_println!(
"[ ALLOC] Initializing {} ZNS Zones (1GB each)...",
new_zones_count
);
for _ in 0..new_zones_count {
let id = self.zones.len();
let zone_start = start_offset + (id as u64 * zone_size);
let this_zone_size = core::cmp::min(
zone_size,
(start_offset + added_size).saturating_sub(zone_start),
);
if this_zone_size == 0 {
break;
}
let zone = Zone {
id,
start_offset: zone_start,
capacity: this_zone_size,
write_pointer: 0,
state: ZoneState::Empty,
liveness_map: Mutex::new(RangeTree::new(this_zone_size, 0)),
};
self.zones.push(Arc::new(Mutex::new(zone)));
}
self.total_capacity += added_size;
self.total_free += added_size;
}
pub fn allocate(&mut self, size: u64) -> FsResult<Dva> {
if self.zones.is_empty() {
return Err(FsError::PoolNotImported);
}
if let Some(zone_arc) = self.zones.get(self.active_zone_idx) {
let mut zone = zone_arc.lock();
if zone.write_pointer + size <= zone.capacity {
let offset = zone.start_offset + zone.write_pointer;
zone.write_pointer += size;
if zone.state == ZoneState::Empty {
zone.state = ZoneState::Active;
}
if zone.write_pointer == zone.capacity {
zone.state = ZoneState::Full;
}
self.total_free = self.total_free.saturating_sub(size);
return Ok(Dva { vdev: 0, offset });
} else {
zone.state = ZoneState::Full;
}
}
let start_idx = self.active_zone_idx;
let count = self.zones.len();
for i in 1..count {
let idx = (start_idx + i) % count;
let zone_arc = &self.zones[idx];
let mut zone = zone_arc.lock();
if zone.state == ZoneState::Empty {
self.active_zone_idx = idx;
zone.state = ZoneState::Active;
let offset = zone.start_offset;
zone.write_pointer += size;
crate::lcpfs_println!("[ ALLOC] Switched to Zone {} (Append Mode).", idx);
return Ok(Dva { vdev: 0, offset });
}
}
crate::lcpfs_println!("[ ALLOC] CRITICAL: No Empty Zones. Garbage Collection Required.");
Err(FsError::DiskFull { needed_bytes: size })
}
pub fn free(&mut self, dva: Dva, size: u64) {
let zone_size = 1u64 << ZONE_SHIFT;
let zone_idx = (dva.offset / zone_size) as usize;
if let Some(zone_arc) = self.zones.get(zone_idx) {
let zone = zone_arc.lock();
let local_offset = dva.offset - zone.start_offset;
let mut tree = zone.liveness_map.lock();
tree.free(local_offset, size);
}
let mut reactor = TRIM_REACTOR.lock();
reactor.queue_discard(dva.offset, size);
}
}