use alloc::collections::BTreeMap;
use alloc::vec::Vec;
use lazy_static::lazy_static;
use spin::Mutex;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum CompStorOp {
Checksum,
Compress,
Decompress,
DedupHash,
PatternScan,
Encrypt,
Decrypt,
}
impl CompStorOp {
pub fn name(&self) -> &'static str {
match self {
CompStorOp::Checksum => "checksum",
CompStorOp::Compress => "compress",
CompStorOp::Decompress => "decompress",
CompStorOp::DedupHash => "dedup_hash",
CompStorOp::PatternScan => "pattern_scan",
CompStorOp::Encrypt => "encrypt",
CompStorOp::Decrypt => "decrypt",
}
}
pub fn cpu_cost(&self) -> u64 {
match self {
CompStorOp::Checksum => 10,
CompStorOp::Compress => 100,
CompStorOp::Decompress => 50,
CompStorOp::DedupHash => 20,
CompStorOp::PatternScan => 30,
CompStorOp::Encrypt => 80,
CompStorOp::Decrypt => 80,
}
}
}
#[derive(Debug, Clone)]
pub struct CompStorCapabilities {
pub device_id: u64,
pub device_name: &'static str,
pub supported_ops: Vec<CompStorOp>,
pub max_queue_depth: usize,
pub throughput_ops_sec: u64,
}
impl CompStorCapabilities {
pub fn new(device_id: u64, device_name: &'static str) -> Self {
Self {
device_id,
device_name,
supported_ops: Vec::new(),
max_queue_depth: 32,
throughput_ops_sec: 100_000,
}
}
pub fn add_support(&mut self, op: CompStorOp) {
if !self.supported_ops.contains(&op) {
self.supported_ops.push(op);
}
}
pub fn supports(&self, op: CompStorOp) -> bool {
self.supported_ops.contains(&op)
}
}
#[derive(Debug, Clone)]
pub struct CompStorCommand {
pub cmd_id: u64,
pub operation: CompStorOp,
pub input_lba: u64,
pub input_size: u64,
pub output_lba: u64,
pub status: CompStorStatus,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CompStorStatus {
Queued,
Executing,
Completed,
Failed,
FallbackCpu,
}
#[derive(Debug, Clone, Default)]
pub struct CompStorStats {
pub total_offloaded: u64,
pub device_completed: u64,
pub cpu_fallback: u64,
pub failed: u64,
pub cpu_cycles_saved: u64,
}
lazy_static! {
static ref COMPSTOR_MANAGER: Mutex<CompStorManager> = Mutex::new(CompStorManager::new());
}
pub struct CompStorManager {
devices: BTreeMap<u64, CompStorCapabilities>,
commands: BTreeMap<u64, CompStorCommand>,
next_cmd_id: u64,
stats_per_op: BTreeMap<CompStorOp, CompStorStats>,
global_stats: CompStorStats,
}
impl Default for CompStorManager {
fn default() -> Self {
Self::new()
}
}
impl CompStorManager {
pub fn new() -> Self {
Self {
devices: BTreeMap::new(),
commands: BTreeMap::new(),
next_cmd_id: 1,
stats_per_op: BTreeMap::new(),
global_stats: CompStorStats::default(),
}
}
pub fn register_device(&mut self, capabilities: CompStorCapabilities) {
self.devices.insert(capabilities.device_id, capabilities);
}
fn find_capable_device(&self, op: CompStorOp) -> Option<u64> {
self.devices
.iter()
.find(|(_, caps)| caps.supports(op))
.map(|(id, _)| *id)
}
pub fn submit_command(
&mut self,
operation: CompStorOp,
input_lba: u64,
input_size: u64,
output_lba: u64,
) -> Option<u64> {
let device_id = self.find_capable_device(operation)?;
let cmd_id = self.next_cmd_id;
self.next_cmd_id += 1;
let command = CompStorCommand {
cmd_id,
operation,
input_lba,
input_size,
output_lba,
status: CompStorStatus::Queued,
};
self.commands.insert(cmd_id, command);
self.global_stats.total_offloaded += 1;
self.stats_per_op
.entry(operation)
.or_default()
.total_offloaded += 1;
Some(cmd_id)
}
pub fn execute_command(&mut self, cmd_id: u64) -> Result<(), &'static str> {
let cmd = self.commands.get_mut(&cmd_id).ok_or("Command not found")?;
cmd.status = CompStorStatus::Executing;
cmd.status = CompStorStatus::Completed;
self.global_stats.device_completed += 1;
self.stats_per_op
.entry(cmd.operation)
.or_default()
.device_completed += 1;
let cycles_saved = cmd.operation.cpu_cost() * cmd.input_size;
self.global_stats.cpu_cycles_saved += cycles_saved;
self.stats_per_op
.entry(cmd.operation)
.or_default()
.cpu_cycles_saved += cycles_saved;
Ok(())
}
pub fn cpu_fallback(&mut self, operation: CompStorOp) {
self.global_stats.cpu_fallback += 1;
self.stats_per_op.entry(operation).or_default().cpu_fallback += 1;
}
pub fn get_command_status(&self, cmd_id: u64) -> Option<CompStorStatus> {
self.commands.get(&cmd_id).map(|cmd| cmd.status)
}
pub fn get_op_stats(&self, op: CompStorOp) -> CompStorStats {
self.stats_per_op.get(&op).cloned().unwrap_or_default()
}
pub fn get_global_stats(&self) -> CompStorStats {
self.global_stats.clone()
}
pub fn offload_efficiency(&self) -> f64 {
let total = self.global_stats.total_offloaded + self.global_stats.cpu_fallback;
if total == 0 {
return 0.0;
}
(self.global_stats.device_completed as f64 / total as f64) * 100.0
}
pub fn device_count(&self) -> usize {
self.devices.len()
}
}
pub struct CompStorEngine;
impl CompStorEngine {
pub fn register_device(capabilities: CompStorCapabilities) {
let mut mgr = COMPSTOR_MANAGER.lock();
mgr.register_device(capabilities);
}
pub fn offload(
operation: CompStorOp,
input_lba: u64,
input_size: u64,
output_lba: u64,
) -> Option<u64> {
let mut mgr = COMPSTOR_MANAGER.lock();
if let Some(cmd_id) = mgr.submit_command(operation, input_lba, input_size, output_lba) {
let _ = mgr.execute_command(cmd_id);
Some(cmd_id)
} else {
mgr.cpu_fallback(operation);
None
}
}
pub fn command_status(cmd_id: u64) -> Option<CompStorStatus> {
let mgr = COMPSTOR_MANAGER.lock();
mgr.get_command_status(cmd_id)
}
pub fn op_stats(op: CompStorOp) -> CompStorStats {
let mgr = COMPSTOR_MANAGER.lock();
mgr.get_op_stats(op)
}
pub fn global_stats() -> CompStorStats {
let mgr = COMPSTOR_MANAGER.lock();
mgr.get_global_stats()
}
pub fn efficiency() -> f64 {
let mgr = COMPSTOR_MANAGER.lock();
mgr.offload_efficiency()
}
pub fn device_count() -> usize {
let mgr = COMPSTOR_MANAGER.lock();
mgr.device_count()
}
}
pub fn create_typical_compstor_device(device_id: u64) -> CompStorCapabilities {
let mut caps = CompStorCapabilities::new(device_id, "Samsung SmartSSD");
caps.add_support(CompStorOp::Checksum);
caps.add_support(CompStorOp::Compress);
caps.add_support(CompStorOp::Decompress);
caps.add_support(CompStorOp::DedupHash);
caps.add_support(CompStorOp::PatternScan);
caps.max_queue_depth = 64;
caps.throughput_ops_sec = 500_000;
caps
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::vec;
#[test]
fn test_op_properties() {
assert_eq!(CompStorOp::Checksum.name(), "checksum");
assert!(CompStorOp::Compress.cpu_cost() > CompStorOp::Checksum.cpu_cost());
}
#[test]
fn test_capabilities() {
let mut caps = CompStorCapabilities::new(1, "TestDevice");
assert!(!caps.supports(CompStorOp::Checksum));
caps.add_support(CompStorOp::Checksum);
assert!(caps.supports(CompStorOp::Checksum));
}
#[test]
fn test_register_device() {
let mut mgr = CompStorManager::new();
let caps = create_typical_compstor_device(1);
mgr.register_device(caps);
assert_eq!(mgr.device_count(), 1);
}
#[test]
fn test_submit_command() {
let mut mgr = CompStorManager::new();
let caps = create_typical_compstor_device(1);
mgr.register_device(caps);
let cmd_id = mgr.submit_command(CompStorOp::Checksum, 0, 4096, 1000);
assert!(cmd_id.is_some());
let status = mgr.get_command_status(cmd_id.expect("test: operation should succeed"));
assert_eq!(status, Some(CompStorStatus::Queued));
}
#[test]
fn test_execute_command() {
let mut mgr = CompStorManager::new();
let caps = create_typical_compstor_device(1);
mgr.register_device(caps);
let cmd_id = mgr
.submit_command(CompStorOp::Checksum, 0, 4096, 1000)
.expect("test: operation should succeed");
mgr.execute_command(cmd_id)
.expect("test: operation should succeed");
let status = mgr.get_command_status(cmd_id);
assert_eq!(status, Some(CompStorStatus::Completed));
}
#[test]
fn test_cpu_fallback() {
let mut mgr = CompStorManager::new();
let cmd_id = mgr.submit_command(CompStorOp::Checksum, 0, 4096, 1000);
assert!(cmd_id.is_none());
mgr.cpu_fallback(CompStorOp::Checksum);
assert_eq!(mgr.global_stats.cpu_fallback, 1);
}
#[test]
fn test_statistics() {
let mut mgr = CompStorManager::new();
let caps = create_typical_compstor_device(1);
mgr.register_device(caps);
for i in 0..10 {
if let Some(cmd_id) =
mgr.submit_command(CompStorOp::Checksum, i * 4096, 4096, 10000 + i * 4096)
{
mgr.execute_command(cmd_id)
.expect("test: operation should succeed");
}
}
let stats = mgr.get_global_stats();
assert_eq!(stats.total_offloaded, 10);
assert_eq!(stats.device_completed, 10);
assert!(stats.cpu_cycles_saved > 0);
}
#[test]
fn test_offload_efficiency() {
let mut mgr = CompStorManager::new();
let caps = create_typical_compstor_device(1);
mgr.register_device(caps);
for _ in 0..8 {
if let Some(cmd_id) = mgr.submit_command(CompStorOp::Checksum, 0, 4096, 1000) {
mgr.execute_command(cmd_id)
.expect("test: operation should succeed");
}
}
for _ in 0..2 {
mgr.cpu_fallback(CompStorOp::Encrypt); }
let efficiency = mgr.offload_efficiency();
assert!(efficiency > 75.0 && efficiency < 85.0); }
#[test]
fn test_per_op_stats() {
let mut mgr = CompStorManager::new();
let caps = create_typical_compstor_device(1);
mgr.register_device(caps);
for _ in 0..5 {
if let Some(cmd_id) = mgr.submit_command(CompStorOp::Checksum, 0, 4096, 1000) {
mgr.execute_command(cmd_id)
.expect("test: operation should succeed");
}
}
for _ in 0..3 {
if let Some(cmd_id) = mgr.submit_command(CompStorOp::Compress, 0, 4096, 1000) {
mgr.execute_command(cmd_id)
.expect("test: operation should succeed");
}
}
let checksum_stats = mgr.get_op_stats(CompStorOp::Checksum);
let compress_stats = mgr.get_op_stats(CompStorOp::Compress);
assert_eq!(checksum_stats.device_completed, 5);
assert_eq!(compress_stats.device_completed, 3);
}
#[test]
fn test_typical_device() {
let caps = create_typical_compstor_device(1);
assert!(caps.supports(CompStorOp::Checksum));
assert!(caps.supports(CompStorOp::Compress));
assert!(caps.supports(CompStorOp::DedupHash));
assert!(!caps.supports(CompStorOp::Encrypt)); }
}