use std::borrow::Cow;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use std::sync::{Arc, RwLock};
use tracing::debug;
#[derive(Debug)]
pub struct QueryCache {
max_size: usize,
cache: RwLock<HashMap<QueryKey, CachedQuery>>,
stats: RwLock<CacheStats>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct QueryKey {
key: Cow<'static, str>,
}
impl QueryKey {
#[inline]
pub const fn new(key: &'static str) -> Self {
Self {
key: Cow::Borrowed(key),
}
}
#[inline]
pub fn owned(key: String) -> Self {
Self {
key: Cow::Owned(key),
}
}
}
impl From<&'static str> for QueryKey {
fn from(s: &'static str) -> Self {
Self::new(s)
}
}
impl From<String> for QueryKey {
fn from(s: String) -> Self {
Self::owned(s)
}
}
#[derive(Debug, Clone)]
pub struct CachedQuery {
pub sql: String,
pub param_count: usize,
access_count: u64,
}
impl CachedQuery {
pub fn new(sql: impl Into<String>, param_count: usize) -> Self {
Self {
sql: sql.into(),
param_count,
access_count: 0,
}
}
#[inline]
pub fn sql(&self) -> &str {
&self.sql
}
#[inline]
pub fn param_count(&self) -> usize {
self.param_count
}
}
#[derive(Debug, Default, Clone)]
pub struct CacheStats {
pub hits: u64,
pub misses: u64,
pub evictions: u64,
pub insertions: u64,
}
impl CacheStats {
#[inline]
pub fn hit_rate(&self) -> f64 {
let total = self.hits + self.misses;
if total == 0 {
0.0
} else {
self.hits as f64 / total as f64
}
}
}
impl QueryCache {
pub fn new(max_size: usize) -> Self {
tracing::info!(max_size, "QueryCache initialized");
Self {
max_size,
cache: RwLock::new(HashMap::with_capacity(max_size)),
stats: RwLock::new(CacheStats::default()),
}
}
pub fn insert(&self, key: impl Into<QueryKey>, sql: impl Into<String>) {
let key = key.into();
let sql = sql.into();
let param_count = count_placeholders(&sql);
debug!(key = ?key.key, sql_len = sql.len(), param_count, "QueryCache::insert()");
let mut cache = self.cache.write().unwrap();
let mut stats = self.stats.write().unwrap();
if cache.len() >= self.max_size && !cache.contains_key(&key) {
self.evict_lru(&mut cache);
stats.evictions += 1;
debug!("QueryCache evicted entry");
}
cache.insert(key, CachedQuery::new(sql, param_count));
stats.insertions += 1;
}
pub fn insert_with_params(
&self,
key: impl Into<QueryKey>,
sql: impl Into<String>,
param_count: usize,
) {
let key = key.into();
let sql = sql.into();
let mut cache = self.cache.write().unwrap();
let mut stats = self.stats.write().unwrap();
if cache.len() >= self.max_size && !cache.contains_key(&key) {
self.evict_lru(&mut cache);
stats.evictions += 1;
}
cache.insert(key, CachedQuery::new(sql, param_count));
stats.insertions += 1;
}
pub fn get(&self, key: impl Into<QueryKey>) -> Option<String> {
let key = key.into();
{
let cache = self.cache.read().unwrap();
if let Some(entry) = cache.get(&key) {
let mut stats = self.stats.write().unwrap();
stats.hits += 1;
debug!(key = ?key.key, "QueryCache hit");
return Some(entry.sql.clone());
}
}
let mut stats = self.stats.write().unwrap();
stats.misses += 1;
debug!(key = ?key.key, "QueryCache miss");
None
}
pub fn get_entry(&self, key: impl Into<QueryKey>) -> Option<CachedQuery> {
let key = key.into();
let cache = self.cache.read().unwrap();
if let Some(entry) = cache.get(&key) {
let mut stats = self.stats.write().unwrap();
stats.hits += 1;
return Some(entry.clone());
}
let mut stats = self.stats.write().unwrap();
stats.misses += 1;
None
}
pub fn get_or_insert<F>(&self, key: impl Into<QueryKey>, f: F) -> String
where
F: FnOnce() -> String,
{
let key = key.into();
if let Some(sql) = self.get(key.clone()) {
return sql;
}
let sql = f();
self.insert(key, sql.clone());
sql
}
pub fn contains(&self, key: impl Into<QueryKey>) -> bool {
let key = key.into();
let cache = self.cache.read().unwrap();
cache.contains_key(&key)
}
pub fn remove(&self, key: impl Into<QueryKey>) -> Option<String> {
let key = key.into();
let mut cache = self.cache.write().unwrap();
cache.remove(&key).map(|e| e.sql)
}
pub fn clear(&self) {
let mut cache = self.cache.write().unwrap();
cache.clear();
}
pub fn len(&self) -> usize {
let cache = self.cache.read().unwrap();
cache.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn max_size(&self) -> usize {
self.max_size
}
pub fn stats(&self) -> CacheStats {
let stats = self.stats.read().unwrap();
stats.clone()
}
pub fn reset_stats(&self) {
let mut stats = self.stats.write().unwrap();
*stats = CacheStats::default();
}
fn evict_lru(&self, cache: &mut HashMap<QueryKey, CachedQuery>) {
let to_evict = cache.len() / 4; if to_evict == 0 {
return;
}
let mut entries: Vec<_> = cache
.iter()
.map(|(k, v)| (k.clone(), v.access_count))
.collect();
entries.sort_by_key(|(_, count)| *count);
for (key, _) in entries.into_iter().take(to_evict) {
cache.remove(&key);
}
}
}
impl Default for QueryCache {
fn default() -> Self {
Self::new(1000)
}
}
fn count_placeholders(sql: &str) -> usize {
let mut count = 0;
let mut chars = sql.chars().peekable();
while let Some(c) = chars.next() {
if c == '$' {
let mut num = String::new();
while let Some(&d) = chars.peek() {
if d.is_ascii_digit() {
num.push(d);
chars.next();
} else {
break;
}
}
if !num.is_empty()
&& let Ok(n) = num.parse::<usize>()
{
count = count.max(n);
}
} else if c == '?' {
count += 1;
}
}
count
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct QueryHash(u64);
impl QueryHash {
pub fn new(sql: &str) -> Self {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
sql.hash(&mut hasher);
Self(hasher.finish())
}
#[inline]
pub fn value(&self) -> u64 {
self.0
}
}
pub mod patterns {
use super::QueryKey;
#[inline]
pub fn select_by_id(table: &str) -> QueryKey {
QueryKey::owned(format!("select_by_id:{}", table))
}
#[inline]
pub fn select_all(table: &str) -> QueryKey {
QueryKey::owned(format!("select_all:{}", table))
}
#[inline]
pub fn insert(table: &str, columns: usize) -> QueryKey {
QueryKey::owned(format!("insert:{}:{}", table, columns))
}
#[inline]
pub fn update_by_id(table: &str, columns: usize) -> QueryKey {
QueryKey::owned(format!("update_by_id:{}:{}", table, columns))
}
#[inline]
pub fn delete_by_id(table: &str) -> QueryKey {
QueryKey::owned(format!("delete_by_id:{}", table))
}
#[inline]
pub fn count(table: &str) -> QueryKey {
QueryKey::owned(format!("count:{}", table))
}
#[inline]
pub fn count_filtered(table: &str, filter_hash: u64) -> QueryKey {
QueryKey::owned(format!("count:{}:{}", table, filter_hash))
}
}
#[derive(Debug)]
pub struct SqlTemplateCache {
max_size: usize,
templates: parking_lot::RwLock<HashMap<u64, Arc<SqlTemplate>>>,
key_index: parking_lot::RwLock<HashMap<Cow<'static, str>, u64>>,
stats: parking_lot::RwLock<CacheStats>,
}
#[derive(Debug)]
pub struct SqlTemplate {
pub sql: Arc<str>,
pub hash: u64,
pub param_count: usize,
last_access: std::sync::atomic::AtomicU64,
}
impl Clone for SqlTemplate {
fn clone(&self) -> Self {
use std::sync::atomic::Ordering;
Self {
sql: Arc::clone(&self.sql),
hash: self.hash,
param_count: self.param_count,
last_access: std::sync::atomic::AtomicU64::new(
self.last_access.load(Ordering::Relaxed),
),
}
}
}
impl SqlTemplate {
pub fn new(sql: impl AsRef<str>) -> Self {
let sql_str = sql.as_ref();
let param_count = count_placeholders(sql_str);
let hash = {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
sql_str.hash(&mut hasher);
hasher.finish()
};
Self {
sql: Arc::from(sql_str),
hash,
param_count,
last_access: std::sync::atomic::AtomicU64::new(0),
}
}
#[inline(always)]
pub fn sql(&self) -> &str {
&self.sql
}
#[inline(always)]
pub fn sql_arc(&self) -> Arc<str> {
Arc::clone(&self.sql)
}
#[inline]
fn touch(&self) {
use std::sync::atomic::Ordering;
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
self.last_access.store(now, Ordering::Relaxed);
}
}
impl SqlTemplateCache {
pub fn new(max_size: usize) -> Self {
tracing::info!(max_size, "SqlTemplateCache initialized");
Self {
max_size,
templates: parking_lot::RwLock::new(HashMap::with_capacity(max_size)),
key_index: parking_lot::RwLock::new(HashMap::with_capacity(max_size)),
stats: parking_lot::RwLock::new(CacheStats::default()),
}
}
#[inline]
pub fn register(
&self,
key: impl Into<Cow<'static, str>>,
sql: impl AsRef<str>,
) -> Arc<SqlTemplate> {
let key = key.into();
let template = Arc::new(SqlTemplate::new(sql));
let hash = template.hash;
let mut templates = self.templates.write();
let mut key_index = self.key_index.write();
let mut stats = self.stats.write();
if templates.len() >= self.max_size {
self.evict_lru_internal(&mut templates, &mut key_index);
stats.evictions += 1;
}
key_index.insert(key, hash);
templates.insert(hash, Arc::clone(&template));
stats.insertions += 1;
debug!(hash, "SqlTemplateCache::register()");
template
}
#[inline]
pub fn register_by_hash(&self, hash: u64, sql: impl AsRef<str>) -> Arc<SqlTemplate> {
let template = Arc::new(SqlTemplate::new(sql));
let mut templates = self.templates.write();
let mut stats = self.stats.write();
if templates.len() >= self.max_size {
let mut key_index = self.key_index.write();
self.evict_lru_internal(&mut templates, &mut key_index);
stats.evictions += 1;
}
templates.insert(hash, Arc::clone(&template));
stats.insertions += 1;
template
}
#[inline]
pub fn get(&self, key: &str) -> Option<Arc<SqlTemplate>> {
let hash = {
let key_index = self.key_index.read();
match key_index.get(key) {
Some(&h) => h,
None => {
drop(key_index); let mut stats = self.stats.write();
stats.misses += 1;
return None;
}
}
};
let templates = self.templates.read();
if let Some(template) = templates.get(&hash) {
template.touch();
let mut stats = self.stats.write();
stats.hits += 1;
return Some(Arc::clone(template));
}
let mut stats = self.stats.write();
stats.misses += 1;
None
}
#[inline(always)]
pub fn get_by_hash(&self, hash: u64) -> Option<Arc<SqlTemplate>> {
let templates = self.templates.read();
if let Some(template) = templates.get(&hash) {
template.touch();
return Some(Arc::clone(template));
}
None
}
#[inline]
pub fn get_sql(&self, key: &str) -> Option<Arc<str>> {
self.get(key).map(|t| t.sql_arc())
}
#[inline]
pub fn get_or_register<F>(&self, key: impl Into<Cow<'static, str>>, f: F) -> Arc<SqlTemplate>
where
F: FnOnce() -> String,
{
let key = key.into();
if let Some(template) = self.get(&key) {
return template;
}
let sql = f();
self.register(key, sql)
}
#[inline]
pub fn contains(&self, key: &str) -> bool {
let key_index = self.key_index.read();
key_index.contains_key(key)
}
pub fn stats(&self) -> CacheStats {
self.stats.read().clone()
}
pub fn len(&self) -> usize {
self.templates.read().len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn clear(&self) {
self.templates.write().clear();
self.key_index.write().clear();
}
fn evict_lru_internal(
&self,
templates: &mut HashMap<u64, Arc<SqlTemplate>>,
key_index: &mut HashMap<Cow<'static, str>, u64>,
) {
use std::sync::atomic::Ordering;
let to_evict = templates.len() / 4;
if to_evict == 0 {
return;
}
let mut entries: Vec<_> = templates
.iter()
.map(|(&hash, t)| (hash, t.last_access.load(Ordering::Relaxed)))
.collect();
entries.sort_by_key(|(_, time)| *time);
for (hash, _) in entries.into_iter().take(to_evict) {
templates.remove(&hash);
key_index.retain(|_, h| *h != hash);
}
}
}
impl Default for SqlTemplateCache {
fn default() -> Self {
Self::new(1000)
}
}
static GLOBAL_TEMPLATE_CACHE: std::sync::OnceLock<SqlTemplateCache> = std::sync::OnceLock::new();
#[inline(always)]
pub fn global_template_cache() -> &'static SqlTemplateCache {
GLOBAL_TEMPLATE_CACHE.get_or_init(|| SqlTemplateCache::new(10000))
}
#[inline]
pub fn register_global_template(
key: impl Into<Cow<'static, str>>,
sql: impl AsRef<str>,
) -> Arc<SqlTemplate> {
global_template_cache().register(key, sql)
}
#[inline(always)]
pub fn get_global_template(key: &str) -> Option<Arc<SqlTemplate>> {
global_template_cache().get(key)
}
#[inline]
pub fn precompute_query_hash(key: &str) -> u64 {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
key.hash(&mut hasher);
hasher.finish()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_query_cache_basic() {
let cache = QueryCache::new(10);
cache.insert("users_by_id", "SELECT * FROM users WHERE id = $1");
assert!(cache.contains("users_by_id"));
let sql = cache.get("users_by_id");
assert_eq!(sql, Some("SELECT * FROM users WHERE id = $1".to_string()));
}
#[test]
fn test_query_cache_get_or_insert() {
let cache = QueryCache::new(10);
let sql1 = cache.get_or_insert("test", || "SELECT 1".to_string());
assert_eq!(sql1, "SELECT 1");
let sql2 = cache.get_or_insert("test", || "SELECT 2".to_string());
assert_eq!(sql2, "SELECT 1"); }
#[test]
fn test_query_cache_stats() {
let cache = QueryCache::new(10);
cache.insert("test", "SELECT 1");
cache.get("test"); cache.get("test"); cache.get("missing");
let stats = cache.stats();
assert_eq!(stats.hits, 2);
assert_eq!(stats.misses, 1);
assert_eq!(stats.insertions, 1);
}
#[test]
fn test_count_placeholders_postgres() {
assert_eq!(count_placeholders("SELECT * FROM users WHERE id = $1"), 1);
assert_eq!(
count_placeholders("SELECT * FROM users WHERE id = $1 AND name = $2"),
2
);
assert_eq!(count_placeholders("SELECT * FROM users WHERE id = $10"), 10);
}
#[test]
fn test_count_placeholders_mysql() {
assert_eq!(count_placeholders("SELECT * FROM users WHERE id = ?"), 1);
assert_eq!(
count_placeholders("SELECT * FROM users WHERE id = ? AND name = ?"),
2
);
}
#[test]
fn test_query_hash() {
let hash1 = QueryHash::new("SELECT * FROM users");
let hash2 = QueryHash::new("SELECT * FROM users");
let hash3 = QueryHash::new("SELECT * FROM posts");
assert_eq!(hash1, hash2);
assert_ne!(hash1, hash3);
}
#[test]
fn test_patterns() {
let key = patterns::select_by_id("users");
assert!(key.key.starts_with("select_by_id:"));
}
#[test]
fn test_sql_template_cache_basic() {
let cache = SqlTemplateCache::new(100);
let template = cache.register("users_by_id", "SELECT * FROM users WHERE id = $1");
assert_eq!(template.sql(), "SELECT * FROM users WHERE id = $1");
assert_eq!(template.param_count, 1);
}
#[test]
fn test_sql_template_cache_get() {
let cache = SqlTemplateCache::new(100);
cache.register("test_query", "SELECT * FROM test WHERE x = $1");
let result = cache.get("test_query");
assert!(result.is_some());
assert_eq!(result.unwrap().sql(), "SELECT * FROM test WHERE x = $1");
let missing = cache.get("nonexistent");
assert!(missing.is_none());
}
#[test]
fn test_sql_template_cache_get_by_hash() {
let cache = SqlTemplateCache::new(100);
let template = cache.register("fast_query", "SELECT 1");
let hash = template.hash;
let result = cache.get_by_hash(hash);
assert!(result.is_some());
assert_eq!(result.unwrap().sql(), "SELECT 1");
}
#[test]
fn test_sql_template_cache_get_or_register() {
let cache = SqlTemplateCache::new(100);
let t1 = cache.get_or_register("computed", || "SELECT * FROM computed".to_string());
assert_eq!(t1.sql(), "SELECT * FROM computed");
let t2 = cache.get_or_register("computed", || panic!("Should not be called"));
assert_eq!(t2.sql(), "SELECT * FROM computed");
assert_eq!(t1.hash, t2.hash);
}
#[test]
fn test_sql_template_cache_stats() {
let cache = SqlTemplateCache::new(100);
cache.register("q1", "SELECT 1");
cache.get("q1"); cache.get("q1"); cache.get("missing");
let stats = cache.stats();
assert_eq!(stats.hits, 2);
assert_eq!(stats.misses, 1);
assert_eq!(stats.insertions, 1);
}
#[test]
fn test_global_template_cache() {
let template = register_global_template("global_test", "SELECT * FROM global");
assert_eq!(template.sql(), "SELECT * FROM global");
let result = get_global_template("global_test");
assert!(result.is_some());
assert_eq!(result.unwrap().sql(), "SELECT * FROM global");
}
#[test]
fn test_precompute_query_hash() {
let hash1 = precompute_query_hash("test_key");
let hash2 = precompute_query_hash("test_key");
let hash3 = precompute_query_hash("other_key");
assert_eq!(hash1, hash2);
assert_ne!(hash1, hash3);
}
#[test]
fn test_execution_plan_cache() {
let cache = ExecutionPlanCache::new(100);
let plan = cache.register(
"users_by_email",
"SELECT * FROM users WHERE email = $1",
PlanHint::IndexScan("users_email_idx".into()),
);
assert_eq!(plan.sql.as_ref(), "SELECT * FROM users WHERE email = $1");
let result = cache.get("users_by_email");
assert!(result.is_some());
assert!(matches!(result.unwrap().hint, PlanHint::IndexScan(_)));
}
}
#[derive(Debug, Clone, Default)]
pub enum PlanHint {
#[default]
None,
IndexScan(String),
SeqScan,
Parallel(u32),
CachePlan,
Timeout(std::time::Duration),
Custom(String),
}
#[derive(Debug)]
pub struct ExecutionPlan {
pub sql: Arc<str>,
pub hash: u64,
pub hint: PlanHint,
pub estimated_cost: Option<f64>,
use_count: std::sync::atomic::AtomicU64,
avg_execution_us: std::sync::atomic::AtomicU64,
}
fn compute_hash(s: &str) -> u64 {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
s.hash(&mut hasher);
hasher.finish()
}
impl ExecutionPlan {
pub fn new(sql: impl AsRef<str>, hint: PlanHint) -> Self {
let sql_str = sql.as_ref();
Self {
sql: Arc::from(sql_str),
hash: compute_hash(sql_str),
hint,
estimated_cost: None,
use_count: std::sync::atomic::AtomicU64::new(0),
avg_execution_us: std::sync::atomic::AtomicU64::new(0),
}
}
pub fn with_cost(sql: impl AsRef<str>, hint: PlanHint, cost: f64) -> Self {
let sql_str = sql.as_ref();
Self {
sql: Arc::from(sql_str),
hash: compute_hash(sql_str),
hint,
estimated_cost: Some(cost),
use_count: std::sync::atomic::AtomicU64::new(0),
avg_execution_us: std::sync::atomic::AtomicU64::new(0),
}
}
pub fn record_execution(&self, duration_us: u64) {
let old_count = self
.use_count
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let old_avg = self
.avg_execution_us
.load(std::sync::atomic::Ordering::Relaxed);
let new_avg = if old_count == 0 {
duration_us
} else {
(old_avg * old_count + duration_us) / (old_count + 1)
};
self.avg_execution_us
.store(new_avg, std::sync::atomic::Ordering::Relaxed);
}
pub fn use_count(&self) -> u64 {
self.use_count.load(std::sync::atomic::Ordering::Relaxed)
}
pub fn avg_execution_us(&self) -> u64 {
self.avg_execution_us
.load(std::sync::atomic::Ordering::Relaxed)
}
}
#[derive(Debug)]
pub struct ExecutionPlanCache {
max_size: usize,
plans: parking_lot::RwLock<HashMap<u64, Arc<ExecutionPlan>>>,
key_index: parking_lot::RwLock<HashMap<Cow<'static, str>, u64>>,
}
impl ExecutionPlanCache {
pub fn new(max_size: usize) -> Self {
Self {
max_size,
plans: parking_lot::RwLock::new(HashMap::with_capacity(max_size / 2)),
key_index: parking_lot::RwLock::new(HashMap::with_capacity(max_size / 2)),
}
}
pub fn register(
&self,
key: impl Into<Cow<'static, str>>,
sql: impl AsRef<str>,
hint: PlanHint,
) -> Arc<ExecutionPlan> {
let key = key.into();
let plan = Arc::new(ExecutionPlan::new(sql, hint));
let hash = plan.hash;
let mut plans = self.plans.write();
let mut key_index = self.key_index.write();
if plans.len() >= self.max_size && !plans.contains_key(&hash) {
if let Some((&evict_hash, _)) = plans.iter().min_by_key(|(_, p)| p.use_count()) {
plans.remove(&evict_hash);
key_index.retain(|_, &mut v| v != evict_hash);
}
}
plans.insert(hash, Arc::clone(&plan));
key_index.insert(key, hash);
plan
}
pub fn register_with_cost(
&self,
key: impl Into<Cow<'static, str>>,
sql: impl AsRef<str>,
hint: PlanHint,
cost: f64,
) -> Arc<ExecutionPlan> {
let key = key.into();
let plan = Arc::new(ExecutionPlan::with_cost(sql, hint, cost));
let hash = plan.hash;
let mut plans = self.plans.write();
let mut key_index = self.key_index.write();
if plans.len() >= self.max_size
&& !plans.contains_key(&hash)
&& let Some((&evict_hash, _)) = plans.iter().min_by_key(|(_, p)| p.use_count())
{
plans.remove(&evict_hash);
key_index.retain(|_, &mut v| v != evict_hash);
}
plans.insert(hash, Arc::clone(&plan));
key_index.insert(key, hash);
plan
}
pub fn get(&self, key: &str) -> Option<Arc<ExecutionPlan>> {
let hash = {
let key_index = self.key_index.read();
*key_index.get(key)?
};
self.plans.read().get(&hash).cloned()
}
pub fn get_by_hash(&self, hash: u64) -> Option<Arc<ExecutionPlan>> {
self.plans.read().get(&hash).cloned()
}
pub fn get_or_register<F>(
&self,
key: impl Into<Cow<'static, str>>,
sql_fn: F,
hint: PlanHint,
) -> Arc<ExecutionPlan>
where
F: FnOnce() -> String,
{
let key = key.into();
if let Some(plan) = self.get(key.as_ref()) {
return plan;
}
self.register(key, sql_fn(), hint)
}
pub fn record_execution(&self, key: &str, duration_us: u64) {
if let Some(plan) = self.get(key) {
plan.record_execution(duration_us);
}
}
pub fn slowest_queries(&self, limit: usize) -> Vec<Arc<ExecutionPlan>> {
let plans = self.plans.read();
let mut sorted: Vec<_> = plans.values().cloned().collect();
sorted.sort_by_key(|a| std::cmp::Reverse(a.avg_execution_us()));
sorted.truncate(limit);
sorted
}
pub fn most_used(&self, limit: usize) -> Vec<Arc<ExecutionPlan>> {
let plans = self.plans.read();
let mut sorted: Vec<_> = plans.values().cloned().collect();
sorted.sort_by_key(|a| std::cmp::Reverse(a.use_count()));
sorted.truncate(limit);
sorted
}
pub fn clear(&self) {
self.plans.write().clear();
self.key_index.write().clear();
}
pub fn len(&self) -> usize {
self.plans.read().len()
}
pub fn is_empty(&self) -> bool {
self.plans.read().is_empty()
}
}