lcpfs 2026.1.102

LCP File System - A ZFS-inspired copy-on-write filesystem for Rust
// Copyright 2025 LunaOS Contributors
// SPDX-License-Identifier: Apache-2.0

//! # Space Map (Range Tree)
//!
//! This module implements free space management using a range tree data
//! structure with automatic coalescing of adjacent free regions.
//!
//! ## Overview
//!
//! The space map tracks free space in the pool as a sorted collection of
//! (offset, size) ranges. When blocks are freed, adjacent ranges are
//! automatically merged to minimize fragmentation.
//!
//! ## Data Structure
//!
//! ```text
//! Range Tree (BTreeMap):
//!   Offset 0x1000 --> Size 4096    (4 KB free)
//!   Offset 0x5000 --> Size 8192    (8 KB free)
//!   Offset 0xA000 --> Size 16384   (16 KB free)
//! ```
//!
//! ## Allocation Strategy
//!
//! Uses first-fit allocation with 4KB alignment:
//! 1. Scan for first segment >= requested size
//! 2. Allocate from start of segment
//! 3. Shrink or remove the segment
//!
//! ## Coalescing
//!
//! When freeing space, the allocator checks for adjacent free segments:
//! - Merge with left neighbor if `left.end == freed.start`
//! - Merge with right neighbor if `freed.end == right.start`
//! - Results in larger contiguous free regions

use alloc::collections::BTreeMap;

/// Range tree for tracking free space
pub struct RangeTree {
    /// Map of offset to size for free segments
    pub segments: BTreeMap<u64, u64>,
    /// Total free space tracked
    pub total_free: u64,
}

impl RangeTree {
    /// Create a new range tree with initial free space
    pub fn new(disk_size: u64, reserved_start: u64) -> Self {
        let mut segments = BTreeMap::new();
        // Initial State: One giant free segment covering the whole disk
        // (minus the reserved boot/label area at the start)
        let usable_size = disk_size.saturating_sub(reserved_start);
        if usable_size > 0 {
            segments.insert(reserved_start, usable_size);
        }

        Self {
            segments,
            total_free: usable_size,
        }
    }

    /// Allocates space (First-Fit strategy).
    pub fn allocate(&mut self, size: u64) -> Option<u64> {
        let aligned_size = (size + 4095) & !4095;

        // FIX: Changed pattern to work with Edition 2024
        let target_key = self
            .segments
            .iter()
            .find(|&(_, seg_size)| *seg_size >= aligned_size)
            .map(|(offset, _)| *offset);

        if let Some(offset) = target_key {
            // SAFETY INVARIANT: offset was just found via find() above; remove() on
            // an existing key always returns Some.
            debug_assert!(
                self.segments.contains_key(&offset),
                "offset found via find() above must exist"
            );
            let available_size = self.segments.remove(&offset)?;

            if available_size > aligned_size {
                let new_start = offset + aligned_size;
                let new_size = available_size - aligned_size;
                self.segments.insert(new_start, new_size);
            }

            self.total_free -= aligned_size;
            return Some(offset);
        }

        None // Disk Full
    }

    /// Check if an offset is currently allocated (not in free tree).
    ///
    /// Returns true if the offset is allocated, false if free.
    pub fn is_allocated(&self, offset: u64) -> bool {
        // Check if offset falls within any free segment
        for (&seg_start, &seg_size) in &self.segments {
            if offset >= seg_start && offset < seg_start + seg_size {
                return false; // Offset is in a free segment
            }
        }
        true // Not in any free segment, so it's allocated
    }

    /// Frees space and coalesces neighbors.
    ///
    /// # Safety
    ///
    /// Returns an error if the offset is already free (double-free detection).
    /// This prevents corruption from freeing the same block twice.
    pub fn free(&mut self, offset: u64, size: u64) -> Result<(), &'static str> {
        // Double-free protection: check if offset is already in free tree
        if !self.is_allocated(offset) {
            crate::lcpfs_println!("[ SPACE] WARNING: Double-free attempted at 0x{:x}!", offset);
            return Err("Double-free detected: block already free");
        }

        let aligned_size = (size + 4095) & !4095;
        let mut start = offset;
        let mut len = aligned_size;

        // 1. Check LEFT Neighbor
        let left_neighbor = self.segments.range(..offset).next_back();
        let mut remove_left = None;

        if let Some((&l_off, &l_size)) = left_neighbor {
            if l_off + l_size == offset {
                start = l_off;
                len += l_size;
                remove_left = Some(l_off);
            }
        }

        if let Some(k) = remove_left {
            self.segments.remove(&k);
        }

        // 2. Check RIGHT Neighbor
        let right_neighbor_key = offset + aligned_size;

        if let Some(&r_size) = self.segments.get(&right_neighbor_key) {
            len += r_size;
            self.segments.remove(&right_neighbor_key);
        }

        // 3. Insert the new super-segment
        self.segments.insert(start, len);
        self.total_free += aligned_size;

        crate::lcpfs_println!(
            "[ SPACE] Freed 0x{:x} -> Merged: 0x{:x} len {}",
            offset,
            start,
            len
        );
        Ok(())
    }

    /// Legacy free without error handling (for backwards compatibility).
    /// Prefer `free()` which provides double-free detection.
    pub fn free_unchecked(&mut self, offset: u64, size: u64) {
        let _ = self.free(offset, size);
    }
}