use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::hash::Hash;
use tokio::sync::RwLock;
use serde::{Serialize, Deserialize};
#[derive(Clone, Debug)]
struct CacheEntry<T> {
value: T,
created_at: Instant,
ttl: Duration,
access_count: u64,
last_accessed: Instant,
}
impl<T> CacheEntry<T> {
fn new(value: T, ttl: Duration) -> Self {
let now = Instant::now();
Self {
value,
created_at: now,
ttl,
access_count: 1,
last_accessed: now,
}
}
fn is_expired(&self) -> bool {
self.created_at.elapsed() > self.ttl
}
fn access(&mut self) -> &T {
self.access_count += 1;
self.last_accessed = Instant::now();
&self.value
}
}
#[derive(Clone)]
pub struct GraphQLCache<T: Clone + Send + Sync + 'static> {
entries: Arc<RwLock<HashMap<String, CacheEntry<T>>>>,
lru_order: Arc<RwLock<VecDeque<String>>>,
max_size: usize,
default_ttl: Duration,
}
impl<T: Clone + Send + Sync + 'static> GraphQLCache<T> {
pub fn new(max_size: usize, default_ttl: Duration) -> Self {
Self {
entries: Arc::new(RwLock::new(HashMap::new())),
lru_order: Arc::new(RwLock::new(VecDeque::new())),
max_size,
default_ttl,
}
}
pub async fn get(&self, key: &str) -> Option<T> {
let entries = self.entries.read().await;
if let Some(entry) = entries.get(key) {
if entry.is_expired() {
let key_owned = key.to_string();
drop(entries);
self.schedule_cleanup(key_owned);
return None;
}
let value = entry.value.clone();
let access_count = entry.access_count;
drop(entries);
self.schedule_access_update(key, access_count);
return Some(value);
}
None
}
fn schedule_cleanup(&self, key: String) {
let cache = self.clone();
tokio::spawn(async move {
let mut entries = cache.entries.write().await;
let mut lru_order = cache.lru_order.write().await;
entries.remove(&key);
lru_order.retain(|k| k != &key);
});
}
fn schedule_access_update(&self, key: &str, access_count: u64) {
let cache = self.clone();
let key_owned = key.to_string();
tokio::spawn(async move {
{
let mut lru_order = cache.lru_order.write().await;
lru_order.retain(|k| k != &key_owned);
lru_order.push_front(key_owned.clone());
}
{
let mut entries = cache.entries.write().await;
if let Some(entry) = entries.get_mut(&key_owned) {
entry.access_count = access_count + 1;
entry.last_accessed = std::time::Instant::now();
}
}
});
}
pub async fn put_with_ttl(&self, key: String, value: T, ttl: Duration) {
let mut entries = self.entries.write().await;
let mut lru_order = self.lru_order.write().await;
if entries.contains_key(&key) {
lru_order.retain(|k| k != &key);
} else {
while entries.len() >= self.max_size {
if let Some(lru_key) = lru_order.pop_back() {
entries.remove(&lru_key);
} else {
break; }
}
}
entries.insert(key.clone(), CacheEntry::new(value, ttl));
lru_order.push_front(key);
}
pub async fn put(&self, key: String, value: T) {
self.put_with_ttl(key, value, self.default_ttl).await;
}
pub async fn cleanup_expired(&self) {
let expired_keys = {
let entries = self.entries.read().await;
entries.iter()
.filter(|(_, entry)| entry.is_expired())
.map(|(key, _)| key.clone())
.collect::<Vec<_>>()
};
if !expired_keys.is_empty() {
let mut entries = self.entries.write().await;
let mut lru_order = self.lru_order.write().await;
for key in &expired_keys {
entries.remove(key);
}
lru_order.retain(|k| !expired_keys.contains(k));
}
}
pub async fn clear(&self) {
let mut entries = self.entries.write().await;
let mut lru_order = self.lru_order.write().await;
entries.clear();
lru_order.clear();
}
pub async fn stats(&self) -> CacheStats {
let entries = self.entries.read().await;
let total_entries = entries.len();
let expired_count = entries.values()
.filter(|entry| entry.is_expired())
.count();
CacheStats {
total_entries,
expired_count,
hit_rate: 0.0, }
}
fn evict_lru(&self, entries: &mut HashMap<String, CacheEntry<T>>, lru_order: &mut VecDeque<String>) {
if let Some(lru_key) = lru_order.pop_back() {
entries.remove(&lru_key);
}
}
}
#[derive(Debug, Clone, Serialize, async_graphql::SimpleObject)]
pub struct CacheStats {
pub total_entries: usize,
pub expired_count: usize,
pub hit_rate: f64,
}
#[derive(Debug, Clone)]
pub struct CacheConfig {
pub max_size: usize,
pub default_ttl: Duration,
pub cleanup_interval: Duration,
}
impl Default for CacheConfig {
fn default() -> Self {
Self {
max_size: 10_000,
default_ttl: Duration::from_secs(300), cleanup_interval: Duration::from_secs(60), }
}
}
#[derive(Clone)]
pub struct GraphQLCacheManager {
pub database_cache: GraphQLCache<DatabaseCacheEntry>,
pub table_cache: GraphQLCache<TableCacheEntry>,
pub query_cache: GraphQLCache<QueryCacheEntry>,
config: CacheConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DatabaseCacheEntry {
pub id: String,
pub name: String,
pub status: String,
pub encryption_algorithm: String,
pub created_at: String,
pub updated_at: String,
pub table_count: i32,
pub storage_size_bytes: i64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TableCacheEntry {
pub id: String,
pub name: String,
pub database: String,
pub record_count: i32,
pub encryption_enabled: bool,
pub created_at: String,
pub updated_at: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueryCacheEntry {
pub query_hash: String,
pub result: serde_json::Value,
pub record_count: usize,
pub execution_time_ms: u64,
}
impl GraphQLCacheManager {
pub fn new(config: CacheConfig) -> Self {
Self {
database_cache: GraphQLCache::new(config.max_size, config.default_ttl),
table_cache: GraphQLCache::new(config.max_size, config.default_ttl),
query_cache: GraphQLCache::new(config.max_size / 2, Duration::from_secs(60)), config,
}
}
pub fn start_cleanup_task(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
tokio::spawn(async move {
let mut interval = tokio::time::interval(self.config.cleanup_interval);
loop {
interval.tick().await;
tokio::join!(
self.database_cache.cleanup_expired(),
self.table_cache.cleanup_expired(),
self.query_cache.cleanup_expired(),
);
}
})
}
pub fn database_key(&self, name: &str) -> String {
let mut key = String::with_capacity(name.len() + 3);
key.push_str("db:");
key.push_str(name);
key
}
pub fn table_key(&self, database: &str, table: &str) -> String {
let mut key = String::with_capacity(database.len() + table.len() + 6);
key.push_str("table:");
key.push_str(database);
key.push_str(":");
key.push_str(table);
key
}
pub fn query_key(&self, database: &str, table: &str, query_hash: &str) -> String {
let mut key = String::with_capacity(database.len() + table.len() + query_hash.len() + 7);
key.push_str("query:");
key.push_str(database);
key.push_str(":");
key.push_str(table);
key.push_str(":");
key.push_str(query_hash);
key
}
pub async fn get_stats(&self) -> CacheManagerStats {
let (db_stats, table_stats, query_stats) = tokio::join!(
self.database_cache.stats(),
self.table_cache.stats(),
self.query_cache.stats(),
);
CacheManagerStats {
database: db_stats,
table: table_stats,
query: query_stats,
}
}
}
#[derive(Debug, Clone, Serialize, async_graphql::SimpleObject)]
pub struct CacheManagerStats {
pub database: CacheStats,
pub table: CacheStats,
pub query: CacheStats,
}
pub struct QueryHasher;
impl QueryHasher {
fn hash_json_value(value: &serde_json::Value, hasher: &mut std::collections::hash_map::DefaultHasher) {
match value {
serde_json::Value::Null => 0.hash(hasher),
serde_json::Value::Bool(b) => b.hash(hasher),
serde_json::Value::Number(n) => {
if let Some(i) = n.as_i64() {
i.hash(hasher);
} else if let Some(u) = n.as_u64() {
u.hash(hasher);
} else if let Some(f) = n.as_f64() {
f.to_bits().hash(hasher);
}
}
serde_json::Value::String(s) => s.hash(hasher),
serde_json::Value::Array(arr) => {
arr.len().hash(hasher);
for item in arr {
Self::hash_json_value(item, hasher);
}
}
serde_json::Value::Object(obj) => {
let mut keys: Vec<_> = obj.keys().collect();
keys.sort_unstable();
keys.len().hash(hasher);
for key in keys {
key.hash(hasher);
Self::hash_json_value(&obj[key], hasher);
}
}
}
}
pub fn hash_query(
database: &str,
table: &str,
filters: &serde_json::Value,
pagination: &Option<crate::graphql::types::PaginationInput>,
) -> String {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
database.hash(&mut hasher);
table.hash(&mut hasher);
Self::hash_json_value(filters, &mut hasher);
if let Some(pagination) = pagination {
pagination.page.hash(&mut hasher);
pagination.page_size.hash(&mut hasher);
}
format!("{:016x}", hasher.finish())
}
}