use crate::storage::schema::Storage;
use rusqlite::{params, OptionalExtension, Result as SqliteResult};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct NodeHash(String);
impl NodeHash {
pub fn new(data: &[u8]) -> Self {
let hash = blake3::hash(data);
Self(hash.to_hex().to_string())
}
pub fn as_str(&self) -> &str {
&self.0
}
pub fn from_str_name(s: &str) -> Option<Self> {
if s.len() == 64 {
Some(Self(s.to_string()))
} else {
None
}
}
}
pub struct IncrementalCache {
storage: Storage,
}
impl IncrementalCache {
pub fn new(storage: Storage) -> Self {
Self { storage }
}
pub fn is_cached(&self, hash: &NodeHash) -> SqliteResult<bool> {
let mut stmt = self
.storage
.conn()
.prepare("SELECT COUNT(*) FROM analysis_cache WHERE node_hash = ?1")?;
let count: i64 = stmt.query_row(params![hash.as_str()], |row| row.get(0))?;
Ok(count > 0)
}
pub fn get(&self, hash: &NodeHash) -> SqliteResult<Option<CachedComputation>> {
let mut stmt = self.storage.conn().prepare(
"SELECT cfg_data, complexity_metrics, timestamp FROM analysis_cache WHERE node_hash = ?1"
)?;
let result = stmt.query_row(params![hash.as_str()], |row| {
Ok(CachedComputation {
cfg_data: row.get(0)?,
complexity_metrics: row.get(1)?,
timestamp: row.get(2)?,
})
});
let optional = result.optional()?;
if optional.is_some() {
let _ = self.bump_cache_hits();
} else {
let _ = self.bump_cache_misses();
}
Ok(optional)
}
pub fn put(&mut self, hash: &NodeHash, computation: &CachedComputation) -> SqliteResult<()> {
self.storage.conn().execute(
"INSERT INTO analysis_cache (node_hash, cfg_data, complexity_metrics, timestamp)
VALUES (?1, ?2, ?3, ?4)
ON CONFLICT DO UPDATE SET
cfg_data = excluded.cfg_data,
complexity_metrics = excluded.complexity_metrics,
timestamp = excluded.timestamp",
params![
hash.as_str(),
computation.cfg_data,
computation.complexity_metrics,
computation.timestamp,
],
)?;
let _ = self.bump_cache_writes();
Ok(())
}
fn bump_cache_hits(&self) -> SqliteResult<usize> {
self.storage.conn().execute(
"UPDATE cache_telemetry
SET cache_hits = cache_hits + 1,
updated_at = strftime('%s', 'now')
WHERE id = 1",
[],
)
}
fn bump_cache_misses(&self) -> SqliteResult<usize> {
self.storage.conn().execute(
"UPDATE cache_telemetry
SET cache_misses = cache_misses + 1,
updated_at = strftime('%s', 'now')
WHERE id = 1",
[],
)
}
fn bump_cache_writes(&self) -> SqliteResult<usize> {
self.storage.conn().execute(
"UPDATE cache_telemetry
SET cache_writes = cache_writes + 1,
updated_at = strftime('%s', 'now')
WHERE id = 1",
[],
)
}
pub fn invalidate_before(&mut self, timestamp: i64) -> SqliteResult<usize> {
let result = self.storage.conn().execute(
"DELETE FROM analysis_cache WHERE timestamp < ?1",
params![timestamp],
)?;
Ok(result)
}
pub fn clear(&mut self) -> SqliteResult<usize> {
let result = self
.storage
.conn()
.execute("DELETE FROM analysis_cache", [])?;
Ok(result)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CachedComputation {
pub cfg_data: Option<Vec<u8>>,
pub complexity_metrics: Option<Vec<u8>>,
pub timestamp: i64,
}
pub struct QueryInvalidation {
storage: Storage,
}
impl QueryInvalidation {
pub fn new(storage: Storage) -> Self {
Self { storage }
}
pub fn invalidate_node(&mut self, node_hash: &NodeHash) -> SqliteResult<()> {
self.storage.conn().execute(
"DELETE FROM analysis_cache WHERE node_hash = ?1",
params![node_hash.as_str()],
)?;
Ok(())
}
pub fn get_affected_nodes(&self, file_path: &str) -> SqliteResult<Vec<String>> {
let mut stmt = self
.storage
.conn()
.prepare("SELECT content_hash FROM intel_nodes WHERE file_path = ?1")?;
let hashes = stmt
.query_map(params![file_path], |row| row.get::<_, String>(0))?
.collect::<SqliteResult<Vec<_>>>()?;
Ok(hashes)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::schema::Storage;
use tempfile::NamedTempFile;
#[test]
fn test_node_hash_creation() {
let data = b"hello world";
let hash = NodeHash::new(data);
assert_eq!(hash.as_str().len(), 64);
}
#[test]
fn test_incremental_cache() {
let temp_file = NamedTempFile::new().unwrap();
let storage = Storage::open(temp_file.path()).unwrap();
let mut cache = IncrementalCache::new(storage);
let hash = NodeHash::new(b"test data");
let computation = CachedComputation {
cfg_data: Some(vec![1, 2, 3]),
complexity_metrics: Some(vec![4, 5, 6]),
timestamp: chrono::Utc::now().timestamp(),
};
cache.put(&hash, &computation).unwrap();
assert!(cache.is_cached(&hash).unwrap());
let retrieved = cache.get(&hash).unwrap().unwrap();
assert_eq!(retrieved.cfg_data, Some(vec![1, 2, 3]));
let telemetry: (i64, i64, i64) = cache
.storage
.conn()
.query_row(
"SELECT cache_hits, cache_misses, cache_writes FROM cache_telemetry WHERE id = 1",
[],
|row| Ok((row.get(0)?, row.get(1)?, row.get(2)?)),
)
.unwrap();
assert!(telemetry.0 >= 1, "expected cache hit telemetry");
assert!(telemetry.2 >= 1, "expected cache write telemetry");
}
}