use alloc::collections::BTreeMap;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use lazy_static::lazy_static;
use spin::Mutex;
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct L2ArcHeader {
pub magic: u64,
pub block_id: u64,
pub data_size: u32,
pub checksum: u32,
pub timestamp: u64,
pub access_count: u32,
pub compression: u8,
pub pad: [u8; 3],
}
impl L2ArcHeader {
const MAGIC: u64 = 0x4C32415243;
pub fn new(block_id: u64, data_size: u32) -> Self {
Self {
magic: Self::MAGIC,
block_id,
data_size,
checksum: 0, timestamp: 0, access_count: 0,
compression: 0,
pad: [0; 3],
}
}
pub fn is_valid(&self) -> bool {
self.magic == Self::MAGIC
}
pub fn calculate_checksum(data: &[u8]) -> u32 {
data.iter().fold(0u32, |acc, &b| acc.wrapping_add(b as u32))
}
}
#[derive(Debug, Clone)]
pub struct L2ArcEntry {
pub block_id: u64,
pub ssd_offset: u64,
pub data_size: u32,
pub timestamp: u64,
pub access_count: u32,
pub dirty: bool,
}
lazy_static! {
static ref L2ARC: Mutex<L2Arc> = Mutex::new(L2Arc::new());
}
pub type WritebackCallback =
fn(block_id: u64, ssd_offset: u64, data_size: u32) -> Result<(), &'static str>;
pub struct L2Arc {
index: BTreeMap<u64, L2ArcEntry>,
ssd_device: Option<u64>,
ssd_capacity: u64,
next_offset: u64,
hits: u64,
misses: u64,
timestamp: u64,
writeback_cb: Option<WritebackCallback>,
}
impl Default for L2Arc {
fn default() -> Self {
Self::new()
}
}
impl L2Arc {
pub fn new() -> Self {
Self {
index: BTreeMap::new(),
ssd_device: None,
ssd_capacity: 0,
next_offset: 0,
hits: 0,
misses: 0,
timestamp: 0,
writeback_cb: None,
}
}
pub fn set_writeback_callback(&mut self, callback: WritebackCallback) {
self.writeback_cb = Some(callback);
}
pub fn init(&mut self, device_id: u64, capacity: u64) {
self.ssd_device = Some(device_id);
self.ssd_capacity = capacity;
self.next_offset = 0;
self.index.clear();
}
pub fn lookup(&mut self, block_id: u64) -> Option<L2ArcEntry> {
if let Some(entry) = self.index.get_mut(&block_id) {
self.timestamp += 1;
entry.timestamp = self.timestamp;
entry.access_count += 1;
self.hits += 1;
Some(entry.clone())
} else {
self.misses += 1;
None
}
}
pub fn insert(&mut self, block_id: u64, data_size: u32) -> Option<u64> {
if self.next_offset + data_size as u64 > self.ssd_capacity {
self.evict_lru()?; }
let ssd_offset = self.next_offset;
self.next_offset += data_size as u64;
self.timestamp += 1;
let entry = L2ArcEntry {
block_id,
ssd_offset,
data_size,
timestamp: self.timestamp,
access_count: 1,
dirty: false,
};
self.index.insert(block_id, entry);
Some(ssd_offset)
}
fn evict_lru(&mut self) -> Option<u64> {
let oldest = self
.index
.iter()
.min_by_key(|(_, entry)| entry.timestamp)
.map(|(id, entry)| (*id, entry.clone()))?;
let (block_id, entry) = oldest;
if entry.dirty {
if let Some(callback) = self.writeback_cb {
if callback(block_id, entry.ssd_offset, entry.data_size).is_err() {
return None;
}
}
}
self.index.remove(&block_id);
Some(entry.data_size as u64)
}
pub fn remove(&mut self, block_id: u64) {
self.index.remove(&block_id);
}
pub fn mark_dirty(&mut self, block_id: u64) {
if let Some(entry) = self.index.get_mut(&block_id) {
entry.dirty = true;
}
}
pub fn get_stats(&self) -> (u64, u64, usize, u64, u64) {
(
self.hits,
self.misses,
self.index.len(),
self.ssd_capacity,
self.next_offset,
)
}
pub fn flush(&mut self) -> usize {
let mut flushed = 0;
for entry in self.index.values_mut() {
if entry.dirty {
entry.dirty = false;
flushed += 1;
}
}
flushed
}
pub fn persist_index(&self) -> Result<u64, &'static str> {
if self.ssd_device.is_none() {
return Err("L2ARC not initialized");
}
let bytes_written = core::mem::size_of::<u64>() + self.index.len() * core::mem::size_of::<L2ArcEntry>();
Ok(bytes_written as u64)
}
pub fn load_index(&mut self) -> Result<usize, &'static str> {
if self.ssd_device.is_none() {
return Err("L2ARC not initialized");
}
Ok(self.index.len())
}
}
pub struct L2ArcEngine;
impl L2ArcEngine {
pub fn init(device_id: u64, capacity: u64) {
let mut l2arc = L2ARC.lock();
l2arc.init(device_id, capacity);
}
pub fn lookup(block_id: u64) -> Option<L2ArcEntry> {
let mut l2arc = L2ARC.lock();
l2arc.lookup(block_id)
}
pub fn insert(block_id: u64, data_size: u32) -> Option<u64> {
let mut l2arc = L2ARC.lock();
l2arc.insert(block_id, data_size)
}
pub fn remove(block_id: u64) {
let mut l2arc = L2ARC.lock();
l2arc.remove(block_id);
}
pub fn get_stats() -> (u64, u64, usize, u64, u64) {
let l2arc = L2ARC.lock();
l2arc.get_stats()
}
pub fn persist() -> Result<u64, &'static str> {
let l2arc = L2ARC.lock();
l2arc.persist_index()
}
pub fn load() -> Result<usize, &'static str> {
let mut l2arc = L2ARC.lock();
l2arc.load_index()
}
pub fn flush() -> usize {
let mut l2arc = L2ARC.lock();
l2arc.flush()
}
pub fn mark_dirty(block_id: u64) {
let mut l2arc = L2ARC.lock();
l2arc.mark_dirty(block_id);
}
pub fn set_writeback_callback(callback: WritebackCallback) {
let mut l2arc = L2ARC.lock();
l2arc.set_writeback_callback(callback);
}
pub fn is_initialized() -> bool {
let l2arc = L2ARC.lock();
l2arc.ssd_device.is_some()
}
pub fn hit_rate() -> f64 {
let l2arc = L2ARC.lock();
let total = l2arc.hits + l2arc.misses;
if total == 0 {
0.0
} else {
(l2arc.hits as f64 / total as f64) * 100.0
}
}
pub fn utilization() -> f64 {
let l2arc = L2ARC.lock();
if l2arc.ssd_capacity == 0 {
0.0
} else {
(l2arc.next_offset as f64 / l2arc.ssd_capacity as f64) * 100.0
}
}
pub fn evict_to_target(target_free_bytes: u64) -> u64 {
let mut l2arc = L2ARC.lock();
l2arc.evict_to_target(target_free_bytes)
}
pub fn prefetch(block_ids: &[u64], data_size: u32) -> usize {
let mut l2arc = L2ARC.lock();
l2arc.prefetch(block_ids, data_size)
}
}
#[derive(Debug, Clone)]
pub struct L2ArcConfig {
pub max_size: u64,
pub compress: bool,
pub compress_algo: u8,
pub min_access_count: u32,
pub max_age: u64,
pub persist_index: bool,
pub write_buffer_size: u64,
pub enable_trim: bool,
}
impl Default for L2ArcConfig {
fn default() -> Self {
Self {
max_size: 0, compress: true,
compress_algo: 1, min_access_count: 2, max_age: 7 * 24 * 3600, persist_index: true,
write_buffer_size: 16 * 1024 * 1024, enable_trim: true,
}
}
}
#[derive(Debug, Clone, Default)]
pub struct L2ArcStats {
pub hits: u64,
pub misses: u64,
pub entries: usize,
pub capacity: u64,
pub used: u64,
pub bytes_read: u64,
pub bytes_written: u64,
pub evictions: u64,
pub writebacks: u64,
pub compression_ratio: f32,
pub avg_latency_us: u64,
}
impl L2ArcStats {
pub fn hit_rate(&self) -> f64 {
let total = self.hits + self.misses;
if total == 0 {
0.0
} else {
(self.hits as f64 / total as f64) * 100.0
}
}
pub fn utilization(&self) -> f64 {
if self.capacity == 0 {
0.0
} else {
(self.used as f64 / self.capacity as f64) * 100.0
}
}
}
#[derive(Debug, Clone)]
pub struct L2ArcDevice {
pub device_id: u64,
pub path: String,
pub capacity: u64,
pub block_size: u32,
pub state: L2ArcDeviceState,
pub write_errors: u64,
pub read_errors: u64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum L2ArcDeviceState {
Online,
Initializing,
Degraded,
Offline,
Removing,
}
impl L2Arc {
pub fn evict_to_target(&mut self, target_free_bytes: u64) -> u64 {
let mut freed = 0u64;
let current_free = self.ssd_capacity.saturating_sub(self.next_offset);
while current_free + freed < target_free_bytes {
match self.evict_lru() {
Some(bytes) => freed += bytes,
None => break, }
}
freed
}
pub fn prefetch(&mut self, block_ids: &[u64], data_size: u32) -> usize {
let mut count = 0;
for &block_id in block_ids {
if self.index.contains_key(&block_id) {
continue; }
if self.insert(block_id, data_size).is_some() {
count += 1;
} else {
break; }
}
count
}
pub fn get_detailed_stats(&self) -> L2ArcStats {
L2ArcStats {
hits: self.hits,
misses: self.misses,
entries: self.index.len(),
capacity: self.ssd_capacity,
used: self.next_offset,
bytes_read: 0, bytes_written: 0, evictions: 0, writebacks: 0, compression_ratio: 1.0,
avg_latency_us: 0,
}
}
pub fn contains(&self, block_id: u64) -> bool {
self.index.contains_key(&block_id)
}
pub fn peek(&self, block_id: u64) -> Option<&L2ArcEntry> {
self.index.get(&block_id)
}
pub fn invalidate_range(&mut self, start_block: u64, end_block: u64) {
let to_remove: Vec<u64> = self
.index
.keys()
.filter(|&&id| id >= start_block && id <= end_block)
.copied()
.collect();
for block_id in to_remove {
self.index.remove(&block_id);
}
}
pub fn clear(&mut self) {
self.index.clear();
self.next_offset = 0;
self.hits = 0;
self.misses = 0;
}
pub fn serialize_index(&self) -> Vec<u8> {
let mut data = Vec::new();
let count = self.index.len() as u64;
data.extend_from_slice(&count.to_le_bytes());
for (block_id, entry) in &self.index {
data.extend_from_slice(&block_id.to_le_bytes());
data.extend_from_slice(&entry.ssd_offset.to_le_bytes());
data.extend_from_slice(&entry.data_size.to_le_bytes());
data.extend_from_slice(&entry.timestamp.to_le_bytes());
data.extend_from_slice(&entry.access_count.to_le_bytes());
data.push(if entry.dirty { 1 } else { 0 });
}
data
}
pub fn deserialize_index(&mut self, data: &[u8]) -> Result<usize, &'static str> {
if data.len() < 8 {
return Err("Invalid index data: too short");
}
let count = u64::from_le_bytes(data[0..8].try_into().unwrap()) as usize;
let entry_size = 8 + 8 + 4 + 8 + 4 + 1;
if data.len() < 8 + count * entry_size {
return Err("Invalid index data: truncated");
}
self.index.clear();
let mut offset = 8;
for _ in 0..count {
let block_id = u64::from_le_bytes(data[offset..offset + 8].try_into().unwrap());
offset += 8;
let ssd_offset = u64::from_le_bytes(data[offset..offset + 8].try_into().unwrap());
offset += 8;
let data_size = u32::from_le_bytes(data[offset..offset + 4].try_into().unwrap());
offset += 4;
let timestamp = u64::from_le_bytes(data[offset..offset + 8].try_into().unwrap());
offset += 8;
let access_count = u32::from_le_bytes(data[offset..offset + 4].try_into().unwrap());
offset += 4;
let dirty = data[offset] != 0;
offset += 1;
self.index.insert(
block_id,
L2ArcEntry {
block_id,
ssd_offset,
data_size,
timestamp,
access_count,
dirty,
},
);
}
Ok(count)
}
}
impl L2ArcEngine {
pub fn get_detailed_stats() -> L2ArcStats {
let l2arc = L2ARC.lock();
l2arc.get_detailed_stats()
}
pub fn contains(block_id: u64) -> bool {
let l2arc = L2ARC.lock();
l2arc.contains(block_id)
}
pub fn invalidate_range(start_block: u64, end_block: u64) {
let mut l2arc = L2ARC.lock();
l2arc.invalidate_range(start_block, end_block);
}
pub fn clear() {
let mut l2arc = L2ARC.lock();
l2arc.clear();
}
pub fn serialize_index() -> Vec<u8> {
let l2arc = L2ARC.lock();
l2arc.serialize_index()
}
pub fn deserialize_index(data: &[u8]) -> Result<usize, &'static str> {
let mut l2arc = L2ARC.lock();
l2arc.deserialize_index(data)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_l2arc_init() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
assert_eq!(l2arc.ssd_device, Some(1));
assert_eq!(l2arc.ssd_capacity, 1_000_000);
}
#[test]
fn test_l2arc_insert_lookup() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
let offset = l2arc
.insert(100, 4096)
.expect("test: operation should succeed");
assert_eq!(offset, 0);
let entry = l2arc.lookup(100).expect("test: operation should succeed");
assert_eq!(entry.block_id, 100);
assert_eq!(entry.ssd_offset, 0);
assert_eq!(entry.data_size, 4096);
assert_eq!(entry.access_count, 2); assert_eq!(l2arc.hits, 1);
assert_eq!(l2arc.misses, 0);
}
#[test]
fn test_l2arc_miss() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
assert!(l2arc.lookup(999).is_none());
assert_eq!(l2arc.hits, 0);
assert_eq!(l2arc.misses, 1);
}
#[test]
fn test_l2arc_remove() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
l2arc.insert(100, 4096);
l2arc.remove(100);
assert!(l2arc.lookup(100).is_none());
}
#[test]
fn test_l2arc_stats() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
l2arc.insert(1, 4096);
l2arc.insert(2, 8192);
l2arc.lookup(1);
l2arc.lookup(999);
let (hits, misses, entries, capacity, used) = l2arc.get_stats();
assert_eq!(hits, 1);
assert_eq!(misses, 1);
assert_eq!(entries, 2);
assert_eq!(capacity, 1_000_000);
assert_eq!(used, 4096 + 8192);
}
#[test]
fn test_l2arc_header() {
let header = L2ArcHeader::new(123, 4096);
assert!(header.is_valid());
assert_eq!(header.block_id, 123);
assert_eq!(header.data_size, 4096);
}
#[test]
fn test_checksum_calculation() {
let data = b"test data";
let checksum1 = L2ArcHeader::calculate_checksum(data);
let checksum2 = L2ArcHeader::calculate_checksum(data);
assert_eq!(checksum1, checksum2);
let different = b"different";
let checksum3 = L2ArcHeader::calculate_checksum(different);
assert_ne!(checksum1, checksum3); }
#[test]
fn test_l2arc_evict_to_target() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 100_000);
for i in 0..10 {
l2arc.insert(i, 10_000);
}
assert_eq!(l2arc.index.len(), 10);
assert_eq!(l2arc.next_offset, 100_000);
let freed = l2arc.evict_to_target(30_000);
assert!(freed >= 30_000);
}
#[test]
fn test_l2arc_prefetch() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
let block_ids: Vec<u64> = (100..110).collect();
let count = l2arc.prefetch(&block_ids, 4096);
assert_eq!(count, 10);
assert_eq!(l2arc.index.len(), 10);
let count2 = l2arc.prefetch(&block_ids, 4096);
assert_eq!(count2, 0);
}
#[test]
fn test_l2arc_contains() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
assert!(!l2arc.contains(100));
l2arc.insert(100, 4096);
assert!(l2arc.contains(100));
}
#[test]
fn test_l2arc_peek() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
l2arc.insert(100, 4096);
let before_hits = l2arc.hits;
let entry = l2arc.peek(100);
assert!(entry.is_some());
assert_eq!(l2arc.hits, before_hits);
}
#[test]
fn test_l2arc_invalidate_range() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
for i in 0..100 {
l2arc.insert(i, 1000);
}
assert_eq!(l2arc.index.len(), 100);
l2arc.invalidate_range(25, 74);
assert_eq!(l2arc.index.len(), 50);
assert!(l2arc.contains(0));
assert!(l2arc.contains(24));
assert!(!l2arc.contains(25));
assert!(!l2arc.contains(74));
assert!(l2arc.contains(75));
assert!(l2arc.contains(99));
}
#[test]
fn test_l2arc_clear() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
for i in 0..10 {
l2arc.insert(i, 4096);
}
l2arc.lookup(0);
l2arc.lookup(999);
assert_eq!(l2arc.index.len(), 10);
assert_eq!(l2arc.hits, 1);
assert_eq!(l2arc.misses, 1);
l2arc.clear();
assert_eq!(l2arc.index.len(), 0);
assert_eq!(l2arc.next_offset, 0);
assert_eq!(l2arc.hits, 0);
assert_eq!(l2arc.misses, 0);
}
#[test]
fn test_l2arc_serialize_deserialize() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
for i in 0..5 {
l2arc.insert(i * 100, 4096);
}
l2arc.mark_dirty(200);
let data = l2arc.serialize_index();
assert!(!data.is_empty());
let mut l2arc2 = L2Arc::new();
l2arc2.init(2, 2_000_000);
let count = l2arc2.deserialize_index(&data).unwrap();
assert_eq!(count, 5);
assert_eq!(l2arc2.index.len(), 5);
let entry = l2arc2.peek(200).unwrap();
assert!(entry.dirty);
assert_eq!(entry.data_size, 4096);
}
#[test]
fn test_l2arc_detailed_stats() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
l2arc.insert(1, 4096);
l2arc.insert(2, 8192);
l2arc.lookup(1);
l2arc.lookup(999);
let stats = l2arc.get_detailed_stats();
assert_eq!(stats.hits, 1);
assert_eq!(stats.misses, 1);
assert_eq!(stats.entries, 2);
assert_eq!(stats.capacity, 1_000_000);
assert_eq!(stats.used, 4096 + 8192);
assert!((stats.hit_rate() - 50.0).abs() < 0.01);
}
#[test]
fn test_l2arc_config_default() {
let config = L2ArcConfig::default();
assert!(config.compress);
assert_eq!(config.compress_algo, 1); assert_eq!(config.min_access_count, 2);
assert!(config.persist_index);
assert!(config.enable_trim);
}
#[test]
fn test_l2arc_stats_methods() {
let stats = L2ArcStats {
hits: 90,
misses: 10,
entries: 100,
capacity: 1_000_000,
used: 500_000,
..Default::default()
};
assert!((stats.hit_rate() - 90.0).abs() < 0.01);
assert!((stats.utilization() - 50.0).abs() < 0.01);
}
#[test]
fn test_l2arc_dirty_tracking() {
let mut l2arc = L2Arc::new();
l2arc.init(1, 1_000_000);
l2arc.insert(100, 4096);
assert!(!l2arc.peek(100).unwrap().dirty);
l2arc.mark_dirty(100);
assert!(l2arc.peek(100).unwrap().dirty);
let flushed = l2arc.flush();
assert_eq!(flushed, 1);
assert!(!l2arc.peek(100).unwrap().dirty);
}
}