use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
use serde::{Serialize, Deserialize};
use async_graphql::Result;
use uuid::Uuid;
#[derive(Clone, Debug)]
struct CacheEntry<T> {
value: T,
created_at: Instant,
ttl: Duration,
access_count: u64,
last_accessed: Instant,
}
impl<T> CacheEntry<T> {
fn new(value: T, ttl: Duration) -> Self {
let now = Instant::now();
Self {
value,
created_at: now,
ttl,
access_count: 1,
last_accessed: now,
}
}
fn is_expired(&self) -> bool {
self.created_at.elapsed() > self.ttl
}
fn access(&mut self) -> &T {
self.access_count += 1;
self.last_accessed = Instant::now();
&self.value
}
}
#[derive(Clone)]
pub struct GraphQLCache<T: Clone> {
entries: Arc<RwLock<HashMap<String, CacheEntry<T>>>>,
max_size: usize,
default_ttl: Duration,
}
impl<T: Clone> GraphQLCache<T> {
pub fn new(max_size: usize, default_ttl: Duration) -> Self {
Self {
entries: Arc::new(RwLock::new(HashMap::new())),
max_size,
default_ttl,
}
}
pub async fn get(&self, key: &str) -> Option<T> {
let mut entries = self.entries.write().await;
if let Some(entry) = entries.get_mut(key) {
if entry.is_expired() {
entries.remove(key);
None
} else {
Some(entry.access().clone())
}
} else {
None
}
}
pub async fn put_with_ttl(&self, key: String, value: T, ttl: Duration) {
let mut entries = self.entries.write().await;
if entries.len() >= self.max_size {
self.evict_lru(&mut entries);
}
entries.insert(key, CacheEntry::new(value, ttl));
}
pub async fn put(&self, key: String, value: T) {
self.put_with_ttl(key, value, self.default_ttl).await;
}
pub async fn cleanup_expired(&self) {
let mut entries = self.entries.write().await;
entries.retain(|_, entry| !entry.is_expired());
}
pub async fn clear(&self) {
let mut entries = self.entries.write().await;
entries.clear();
}
pub async fn stats(&self) -> CacheStats {
let entries = self.entries.read().await;
let total_entries = entries.len();
let expired_count = entries.values()
.filter(|entry| entry.is_expired())
.count();
CacheStats {
total_entries,
expired_count,
hit_rate: 0.0, }
}
fn evict_lru(&self, entries: &mut HashMap<String, CacheEntry<T>>) {
if entries.is_empty() {
return;
}
let lru_key = entries
.iter()
.min_by_key(|(_, entry)| entry.last_accessed)
.map(|(key, _)| key.clone())
.unwrap();
entries.remove(&lru_key);
}
}
#[derive(Debug, Clone, Serialize, async_graphql::SimpleObject)]
pub struct CacheStats {
pub total_entries: usize,
pub expired_count: usize,
pub hit_rate: f64,
}
#[derive(Debug, Clone)]
pub struct CacheConfig {
pub max_size: usize,
pub default_ttl: Duration,
pub cleanup_interval: Duration,
}
impl Default for CacheConfig {
fn default() -> Self {
Self {
max_size: 10_000,
default_ttl: Duration::from_secs(300), cleanup_interval: Duration::from_secs(60), }
}
}
#[derive(Clone)]
pub struct GraphQLCacheManager {
pub database_cache: GraphQLCache<DatabaseCacheEntry>,
pub table_cache: GraphQLCache<TableCacheEntry>,
pub query_cache: GraphQLCache<QueryCacheEntry>,
config: CacheConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DatabaseCacheEntry {
pub id: String,
pub name: String,
pub status: String,
pub encryption_algorithm: String,
pub created_at: String,
pub updated_at: String,
pub table_count: i32,
pub storage_size_bytes: i64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TableCacheEntry {
pub id: String,
pub name: String,
pub database: String,
pub record_count: i32,
pub encryption_enabled: bool,
pub created_at: String,
pub updated_at: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueryCacheEntry {
pub query_hash: String,
pub result: serde_json::Value,
pub record_count: usize,
pub execution_time_ms: u64,
}
impl GraphQLCacheManager {
pub fn new(config: CacheConfig) -> Self {
Self {
database_cache: GraphQLCache::new(config.max_size, config.default_ttl),
table_cache: GraphQLCache::new(config.max_size, config.default_ttl),
query_cache: GraphQLCache::new(config.max_size / 2, Duration::from_secs(60)), config,
}
}
pub fn start_cleanup_task(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
tokio::spawn(async move {
let mut interval = tokio::time::interval(self.config.cleanup_interval);
loop {
interval.tick().await;
tokio::join!(
self.database_cache.cleanup_expired(),
self.table_cache.cleanup_expired(),
self.query_cache.cleanup_expired(),
);
}
})
}
pub fn database_key(&self, name: &str) -> String {
format!("db:{}", name)
}
pub fn table_key(&self, database: &str, table: &str) -> String {
format!("table:{}:{}", database, table)
}
pub fn query_key(&self, database: &str, table: &str, query_hash: &str) -> String {
format!("query:{}:{}:{}", database, table, query_hash)
}
pub async fn get_stats(&self) -> CacheManagerStats {
let (db_stats, table_stats, query_stats) = tokio::join!(
self.database_cache.stats(),
self.table_cache.stats(),
self.query_cache.stats(),
);
CacheManagerStats {
database: db_stats,
table: table_stats,
query: query_stats,
}
}
}
#[derive(Debug, Clone, Serialize, async_graphql::SimpleObject)]
pub struct CacheManagerStats {
pub database: CacheStats,
pub table: CacheStats,
pub query: CacheStats,
}
pub struct QueryHasher;
impl QueryHasher {
pub fn hash_query(
database: &str,
table: &str,
filters: &serde_json::Value,
pagination: &Option<crate::graphql::types::PaginationInput>,
) -> String {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
database.hash(&mut hasher);
table.hash(&mut hasher);
filters.to_string().hash(&mut hasher);
if let Some(pagination) = pagination {
pagination.page.hash(&mut hasher);
pagination.page_size.hash(&mut hasher);
}
format!("{:x}", hasher.finish())
}
}