use alloc::collections::BTreeMap;
use alloc::string::String;
use alloc::vec::Vec;
use super::alloc::BlockAllocator;
use super::types::{
BlockMapping, BlockState, MappingFlags, PhysicalBlock, ThinError, ThinResult, VirtualBlock,
VolumeConfig, VolumeStats,
};
#[derive(Debug)]
pub struct ThinVolume {
id: u64,
name: String,
virtual_size: u64,
block_size: u64,
virtual_blocks: u64,
mappings: BTreeMap<u64, BlockMapping>,
stats: VolumeStats,
config: VolumeConfig,
active: bool,
parent_id: Option<u64>,
snapshots: Vec<u64>,
}
impl ThinVolume {
pub fn new(id: u64, config: VolumeConfig) -> Self {
let virtual_blocks = config.virtual_blocks();
Self {
id,
name: config.name.clone(),
virtual_size: config.virtual_size,
block_size: config.block_size,
virtual_blocks,
mappings: BTreeMap::new(),
stats: VolumeStats::new(config.virtual_size),
config,
active: false,
parent_id: None,
snapshots: Vec::new(),
}
}
pub fn create_snapshot(&mut self, snapshot_id: u64, name: String) -> ThinVolume {
for mapping in self.mappings.values_mut() {
mapping.flags.0 |= MappingFlags::SHARED;
mapping.add_ref();
}
let mut snapshot = ThinVolume {
id: snapshot_id,
name,
virtual_size: self.virtual_size,
block_size: self.block_size,
virtual_blocks: self.virtual_blocks,
mappings: self.mappings.clone(),
stats: self.stats.clone(),
config: self.config.clone(),
active: false,
parent_id: Some(self.id),
snapshots: Vec::new(),
};
self.snapshots.push(snapshot_id);
snapshot
}
pub fn id(&self) -> u64 {
self.id
}
pub fn name(&self) -> &str {
&self.name
}
pub fn virtual_size(&self) -> u64 {
self.virtual_size
}
pub fn block_size(&self) -> u64 {
self.block_size
}
pub fn virtual_blocks(&self) -> u64 {
self.virtual_blocks
}
pub fn is_active(&self) -> bool {
self.active
}
pub fn activate(&mut self) {
self.active = true;
}
pub fn deactivate(&mut self) {
self.active = false;
}
pub fn stats(&self) -> &VolumeStats {
&self.stats
}
pub fn stats_mut(&mut self) -> &mut VolumeStats {
&mut self.stats
}
pub fn parent_id(&self) -> Option<u64> {
self.parent_id
}
pub fn snapshots(&self) -> &[u64] {
&self.snapshots
}
pub fn is_snapshot(&self) -> bool {
self.parent_id.is_some()
}
pub fn get_mapping(&self, vblock: VirtualBlock) -> Option<&BlockMapping> {
self.mappings.get(&vblock.0)
}
pub fn get_mapping_mut(&mut self, vblock: VirtualBlock) -> Option<&mut BlockMapping> {
self.mappings.get_mut(&vblock.0)
}
pub fn is_allocated(&self, vblock: VirtualBlock) -> bool {
self.mappings
.get(&vblock.0)
.map(|m| m.is_allocated())
.unwrap_or(false)
}
pub fn is_shared(&self, vblock: VirtualBlock) -> bool {
self.mappings
.get(&vblock.0)
.map(|m| m.flags.is_shared())
.unwrap_or(false)
}
pub fn get_physical(&self, vblock: VirtualBlock) -> Option<PhysicalBlock> {
self.mappings.get(&vblock.0).and_then(|m| m.physical_block)
}
pub fn map_block(&mut self, vblock: VirtualBlock, pblock: PhysicalBlock) {
let mapping = BlockMapping::allocated(vblock, pblock);
self.mappings.insert(vblock.0, mapping);
self.stats.allocated_blocks += 1;
self.stats.physical_used += self.block_size;
self.stats.alloc_ops += 1;
}
pub fn unmap_block(&mut self, vblock: VirtualBlock) -> Option<PhysicalBlock> {
if let Some(mapping) = self.mappings.get_mut(&vblock.0) {
let pblock = mapping.physical_block;
mapping.deallocate();
self.stats.allocated_blocks = self.stats.allocated_blocks.saturating_sub(1);
self.stats.deallocated_blocks += 1;
self.stats.physical_used = self.stats.physical_used.saturating_sub(self.block_size);
self.stats.dealloc_ops += 1;
pblock
} else {
None
}
}
pub fn prepare_cow(&mut self, vblock: VirtualBlock) -> Option<PhysicalBlock> {
if let Some(mapping) = self.mappings.get(&vblock.0) {
if mapping.flags.is_shared() && mapping.refcount > 1 {
return mapping.physical_block;
}
}
None
}
pub fn complete_cow(&mut self, vblock: VirtualBlock, new_pblock: PhysicalBlock) {
if let Some(mapping) = self.mappings.get_mut(&vblock.0) {
mapping.release();
mapping.physical_block = Some(new_pblock);
mapping.flags.0 &= !MappingFlags::SHARED;
mapping.refcount = 1;
self.stats.cow_ops += 1;
}
}
pub fn offset_to_block(&self, offset: u64) -> VirtualBlock {
VirtualBlock::from_offset(offset, self.block_size)
}
pub fn block_to_range(&self, vblock: VirtualBlock) -> (u64, u64) {
let start = vblock.offset(self.block_size);
let end = start + self.block_size;
(start, end.min(self.virtual_size))
}
pub fn range_to_blocks(&self, offset: u64, len: u64) -> impl Iterator<Item = VirtualBlock> {
let start_block = offset / self.block_size;
let end_block = (offset + len).div_ceil(self.block_size);
(start_block..end_block).map(VirtualBlock::new)
}
pub fn record_read(&mut self, bytes: u64) {
self.stats.bytes_read += bytes;
}
pub fn record_write(&mut self, bytes: u64) {
self.stats.bytes_written += bytes;
}
pub fn physical_used(&self) -> u64 {
self.stats.physical_used
}
pub fn allocation_ratio(&self) -> f64 {
self.stats.allocation_ratio()
}
pub fn count_shared_blocks(&self) -> u64 {
self.mappings
.values()
.filter(|m| m.flags.is_shared())
.count() as u64
}
pub fn recalculate_stats(&mut self) {
self.stats.allocated_blocks = 0;
self.stats.unallocated_blocks = 0;
self.stats.deallocated_blocks = 0;
self.stats.shared_blocks = 0;
self.stats.physical_used = 0;
for mapping in self.mappings.values() {
match mapping.state {
BlockState::Allocated | BlockState::Pending => {
self.stats.allocated_blocks += 1;
self.stats.physical_used += self.block_size;
}
BlockState::Unallocated => {
self.stats.unallocated_blocks += 1;
}
BlockState::Deallocated => {
self.stats.deallocated_blocks += 1;
}
BlockState::Reserved => {
self.stats.allocated_blocks += 1;
}
}
if mapping.flags.is_shared() {
self.stats.shared_blocks += 1;
}
}
let mapped = self.mappings.len() as u64;
self.stats.unallocated_blocks = self.virtual_blocks.saturating_sub(mapped);
}
}
#[derive(Debug)]
pub struct IoContext<'a> {
pub volume: &'a mut ThinVolume,
pub allocator: &'a mut BlockAllocator,
}
impl<'a> IoContext<'a> {
pub fn new(volume: &'a mut ThinVolume, allocator: &'a mut BlockAllocator) -> Self {
Self { volume, allocator }
}
pub fn read_block(&mut self, vblock: VirtualBlock) -> Option<PhysicalBlock> {
self.volume.get_physical(vblock)
}
pub fn write_block(&mut self, vblock: VirtualBlock) -> ThinResult<PhysicalBlock> {
if let Some(_old_pblock) = self.volume.prepare_cow(vblock) {
let new_pblock = self.allocator.allocate()?;
self.volume.complete_cow(vblock, new_pblock);
return Ok(new_pblock);
}
if let Some(pblock) = self.volume.get_physical(vblock) {
return Ok(pblock);
}
let pblock = self.allocator.allocate()?;
self.volume.map_block(vblock, pblock);
Ok(pblock)
}
pub fn punch_hole(&mut self, vblock: VirtualBlock) -> Option<PhysicalBlock> {
if let Some(pblock) = self.volume.unmap_block(vblock) {
if let Some(mapping) = self.volume.get_mapping(vblock) {
if !mapping.flags.is_shared() {
self.allocator.free(pblock);
}
} else {
self.allocator.free(pblock);
}
Some(pblock)
} else {
None
}
}
pub fn zero_range(&mut self, offset: u64, len: u64) -> u64 {
let mut zeroed = 0;
for vblock in self.volume.range_to_blocks(offset, len).collect::<Vec<_>>() {
if self.punch_hole(vblock).is_some() {
zeroed += self.volume.block_size;
}
}
zeroed
}
}
#[cfg(test)]
mod tests {
use super::*;
fn create_test_volume() -> ThinVolume {
let config = VolumeConfig::new("test-vol", 1024 * 1024 * 1024); ThinVolume::new(1, config)
}
#[test]
fn test_volume_creation() {
let vol = create_test_volume();
assert_eq!(vol.id(), 1);
assert_eq!(vol.name(), "test-vol");
assert_eq!(vol.virtual_size(), 1024 * 1024 * 1024);
assert!(!vol.is_active());
assert!(!vol.is_snapshot());
}
#[test]
fn test_volume_activation() {
let mut vol = create_test_volume();
assert!(!vol.is_active());
vol.activate();
assert!(vol.is_active());
vol.deactivate();
assert!(!vol.is_active());
}
#[test]
fn test_block_mapping() {
let mut vol = create_test_volume();
let vblock = VirtualBlock::new(100);
let pblock = PhysicalBlock::new(5000);
assert!(!vol.is_allocated(vblock));
assert!(vol.get_physical(vblock).is_none());
vol.map_block(vblock, pblock);
assert!(vol.is_allocated(vblock));
assert_eq!(vol.get_physical(vblock), Some(pblock));
assert_eq!(vol.stats().allocated_blocks, 1);
}
#[test]
fn test_block_unmap() {
let mut vol = create_test_volume();
let vblock = VirtualBlock::new(100);
let pblock = PhysicalBlock::new(5000);
vol.map_block(vblock, pblock);
assert!(vol.is_allocated(vblock));
let freed = vol.unmap_block(vblock);
assert_eq!(freed, Some(pblock));
assert!(!vol.is_allocated(vblock));
assert_eq!(vol.stats().deallocated_blocks, 1);
}
#[test]
fn test_offset_to_block() {
let vol = create_test_volume();
let block_size = vol.block_size();
assert_eq!(vol.offset_to_block(0).block(), 0);
assert_eq!(vol.offset_to_block(block_size - 1).block(), 0);
assert_eq!(vol.offset_to_block(block_size).block(), 1);
assert_eq!(vol.offset_to_block(block_size * 10 + 100).block(), 10);
}
#[test]
fn test_range_to_blocks() {
let vol = create_test_volume();
let block_size = vol.block_size();
let blocks: Vec<_> = vol
.range_to_blocks(block_size / 2, block_size * 2)
.collect();
assert_eq!(blocks.len(), 3);
assert_eq!(blocks[0].block(), 0);
assert_eq!(blocks[1].block(), 1);
assert_eq!(blocks[2].block(), 2);
}
#[test]
fn test_snapshot_creation() {
let mut vol = create_test_volume();
let vblock = VirtualBlock::new(100);
let pblock = PhysicalBlock::new(5000);
vol.map_block(vblock, pblock);
let snapshot = vol.create_snapshot(2, "snap1".into());
assert_eq!(snapshot.id(), 2);
assert!(snapshot.is_snapshot());
assert_eq!(snapshot.parent_id(), Some(1));
assert_eq!(vol.get_physical(vblock), snapshot.get_physical(vblock));
assert!(vol.is_shared(vblock));
assert!(snapshot.is_shared(vblock));
}
#[test]
fn test_cow_preparation() {
let mut vol = create_test_volume();
let vblock = VirtualBlock::new(100);
let pblock = PhysicalBlock::new(5000);
vol.map_block(vblock, pblock);
assert!(vol.prepare_cow(vblock).is_none());
let _snap = vol.create_snapshot(2, "snap1".into());
assert_eq!(vol.prepare_cow(vblock), Some(pblock));
}
#[test]
fn test_statistics_recording() {
let mut vol = create_test_volume();
vol.record_read(4096);
vol.record_write(8192);
assert_eq!(vol.stats().bytes_read, 4096);
assert_eq!(vol.stats().bytes_written, 8192);
}
#[test]
fn test_io_context_write() {
let config = VolumeConfig::new("test", 1024 * 1024);
let mut vol = ThinVolume::new(1, config);
let mut alloc = BlockAllocator::new(10000, 128 * 1024, 0);
let vblock = VirtualBlock::new(5);
{
let mut ctx = IoContext::new(&mut vol, &mut alloc);
let pblock = ctx.write_block(vblock).unwrap();
assert!(pblock.0 < 10000);
}
assert!(vol.is_allocated(vblock));
}
#[test]
fn test_io_context_punch_hole() {
let config = VolumeConfig::new("test", 1024 * 1024);
let mut vol = ThinVolume::new(1, config);
let mut alloc = BlockAllocator::new(10000, 128 * 1024, 0);
let vblock = VirtualBlock::new(5);
{
let mut ctx = IoContext::new(&mut vol, &mut alloc);
ctx.write_block(vblock).unwrap();
}
let initial_free = alloc.free_blocks();
{
let mut ctx = IoContext::new(&mut vol, &mut alloc);
ctx.punch_hole(vblock);
}
assert_eq!(alloc.free_blocks(), initial_free + 1);
assert!(!vol.is_allocated(vblock));
}
}