use parking_lot::RwLock;
use std::sync::Arc;
use std::time::{Duration, Instant};
#[derive(Debug, Clone, Default)]
pub struct OptimizationMetrics {
pub cache_first: CacheFirstMetrics,
pub batching: BatchingMetrics,
pub prepared_statements: PreparedStatementMetrics,
pub query_optimization: QueryOptimizationMetrics,
}
#[derive(Debug, Clone, Default)]
pub struct CacheFirstMetrics {
pub total_reads: u64,
pub cache_hits: u64,
pub cache_misses: u64,
pub avg_cache_hit_latency_us: u64,
pub avg_cache_miss_latency_us: u64,
pub queries_avoided: u64,
}
impl CacheFirstMetrics {
pub fn hit_rate(&self) -> f64 {
if self.total_reads == 0 {
0.0
} else {
self.cache_hits as f64 / self.total_reads as f64
}
}
pub fn query_reduction_pct(&self) -> f64 {
self.hit_rate() * 100.0
}
pub fn latency_improvement_pct(&self) -> f64 {
if self.avg_cache_miss_latency_us == 0 {
0.0
} else {
let improvement = self.avg_cache_miss_latency_us - self.avg_cache_hit_latency_us;
(improvement as f64 / self.avg_cache_miss_latency_us as f64) * 100.0
}
}
}
#[derive(Debug, Clone, Default)]
pub struct BatchingMetrics {
pub total_operations: u64,
pub batched_operations: u64,
pub individual_operations: u64,
pub avg_batch_size: f64,
pub round_trips_avoided: u64,
pub avg_batch_latency_us: u64,
pub avg_individual_latency_us: u64,
}
impl BatchingMetrics {
pub fn batching_efficiency(&self) -> f64 {
if self.total_operations == 0 {
0.0
} else {
self.batched_operations as f64 / self.total_operations as f64 * 100.0
}
}
pub fn round_trip_reduction_pct(&self) -> f64 {
if self.total_operations == 0 {
0.0
} else {
self.round_trips_avoided as f64 / self.total_operations as f64 * 100.0
}
}
pub fn latency_improvement_pct(&self) -> f64 {
if self.avg_individual_latency_us == 0 {
0.0
} else {
let improvement = self.avg_individual_latency_us - self.avg_batch_latency_us;
(improvement as f64 / self.avg_individual_latency_us as f64) * 100.0
}
}
}
#[derive(Debug, Clone, Default)]
pub struct PreparedStatementMetrics {
pub total_queries: u64,
pub cached_statements: u64,
pub uncached_statements: u64,
pub avg_preparation_time_us: u64,
pub avg_cached_execution_us: u64,
pub avg_uncached_execution_us: u64,
}
impl PreparedStatementMetrics {
pub fn cache_hit_rate(&self) -> f64 {
if self.total_queries == 0 {
0.0
} else {
self.cached_statements as f64 / self.total_queries as f64
}
}
pub fn query_speedup_pct(&self) -> f64 {
if self.avg_uncached_execution_us == 0 {
0.0
} else {
let improvement = self.avg_uncached_execution_us - self.avg_cached_execution_us;
(improvement as f64 / self.avg_uncached_execution_us as f64) * 100.0
}
}
}
#[derive(Debug, Clone, Default)]
pub struct QueryOptimizationMetrics {
pub total_metadata_queries: u64,
pub json_extract_queries: u64,
pub like_pattern_queries: u64,
pub avg_json_extract_latency_us: u64,
pub avg_like_pattern_latency_us: u64,
}
impl QueryOptimizationMetrics {
pub fn optimization_rate(&self) -> f64 {
if self.total_metadata_queries == 0 {
0.0
} else {
self.json_extract_queries as f64 / self.total_metadata_queries as f64 * 100.0
}
}
pub fn query_speedup_pct(&self) -> f64 {
if self.avg_like_pattern_latency_us == 0 {
0.0
} else {
let improvement = self.avg_like_pattern_latency_us - self.avg_json_extract_latency_us;
(improvement as f64 / self.avg_like_pattern_latency_us as f64) * 100.0
}
}
}
pub struct PerformanceMetrics {
metrics: Arc<RwLock<OptimizationMetrics>>,
start_time: Instant,
}
impl PerformanceMetrics {
pub fn new() -> Self {
Self {
metrics: Arc::new(RwLock::new(OptimizationMetrics::default())),
start_time: Instant::now(),
}
}
pub fn record_cache_read(&self, hit: bool, latency: Duration) {
let mut metrics = self.metrics.write();
metrics.cache_first.total_reads += 1;
let latency_us = latency.as_micros() as u64;
if hit {
metrics.cache_first.cache_hits += 1;
metrics.cache_first.queries_avoided += 1;
let n = metrics.cache_first.cache_hits;
metrics.cache_first.avg_cache_hit_latency_us =
((metrics.cache_first.avg_cache_hit_latency_us * (n - 1)) + latency_us) / n;
} else {
metrics.cache_first.cache_misses += 1;
let n = metrics.cache_first.cache_misses;
metrics.cache_first.avg_cache_miss_latency_us =
((metrics.cache_first.avg_cache_miss_latency_us * (n - 1)) + latency_us) / n;
}
}
pub fn record_batch_operation(&self, batch_size: usize, latency: Duration) {
let mut metrics = self.metrics.write();
metrics.batching.total_operations += batch_size as u64;
metrics.batching.batched_operations += batch_size as u64;
let total_batches = metrics.batching.batched_operations / batch_size as u64;
metrics.batching.avg_batch_size =
((metrics.batching.avg_batch_size * (total_batches - 1) as f64) + batch_size as f64)
/ total_batches as f64;
metrics.batching.round_trips_avoided += (batch_size - 1) as u64;
let latency_per_op = latency.as_micros() as u64 / batch_size as u64;
let n = metrics.batching.batched_operations;
metrics.batching.avg_batch_latency_us = ((metrics.batching.avg_batch_latency_us
* (n - batch_size as u64))
+ (latency_per_op * batch_size as u64))
/ n;
}
pub fn record_individual_operation(&self, latency: Duration) {
let mut metrics = self.metrics.write();
metrics.batching.total_operations += 1;
metrics.batching.individual_operations += 1;
let latency_us = latency.as_micros() as u64;
let n = metrics.batching.individual_operations;
metrics.batching.avg_individual_latency_us =
((metrics.batching.avg_individual_latency_us * (n - 1)) + latency_us) / n;
}
pub fn record_prepared_statement(&self, cached: bool, latency: Duration) {
let mut metrics = self.metrics.write();
metrics.prepared_statements.total_queries += 1;
let latency_us = latency.as_micros() as u64;
if cached {
metrics.prepared_statements.cached_statements += 1;
let n = metrics.prepared_statements.cached_statements;
metrics.prepared_statements.avg_cached_execution_us =
((metrics.prepared_statements.avg_cached_execution_us * (n - 1)) + latency_us) / n;
} else {
metrics.prepared_statements.uncached_statements += 1;
let n = metrics.prepared_statements.uncached_statements;
metrics.prepared_statements.avg_uncached_execution_us =
((metrics.prepared_statements.avg_uncached_execution_us * (n - 1)) + latency_us)
/ n;
}
}
pub fn record_metadata_query(&self, uses_json_extract: bool, latency: Duration) {
let mut metrics = self.metrics.write();
metrics.query_optimization.total_metadata_queries += 1;
let latency_us = latency.as_micros() as u64;
if uses_json_extract {
metrics.query_optimization.json_extract_queries += 1;
let n = metrics.query_optimization.json_extract_queries;
metrics.query_optimization.avg_json_extract_latency_us =
((metrics.query_optimization.avg_json_extract_latency_us * (n - 1)) + latency_us)
/ n;
} else {
metrics.query_optimization.like_pattern_queries += 1;
let n = metrics.query_optimization.like_pattern_queries;
metrics.query_optimization.avg_like_pattern_latency_us =
((metrics.query_optimization.avg_like_pattern_latency_us * (n - 1)) + latency_us)
/ n;
}
}
pub fn snapshot(&self) -> OptimizationMetrics {
self.metrics.read().clone()
}
pub fn uptime(&self) -> Duration {
self.start_time.elapsed()
}
pub fn report(&self) -> String {
let metrics = self.snapshot();
let uptime = self.uptime();
format!(
r#"
╔══════════════════════════════════════════════════════════════════╗
║ Turso Performance Optimization Report (Phase 1) ║
╠══════════════════════════════════════════════════════════════════╣
║ Uptime: {:.2} hours
╠══════════════════════════════════════════════════════════════════╣
║ 1. Cache-First Read Strategy ║
╠══════════════════════════════════════════════════════════════════╣
║ Total Reads: {:>10} ║
║ Cache Hits: {:>10} ({:>5.1}%) ║
║ Cache Misses: {:>10} ({:>5.1}%) ║
║ Turso Queries Avoided: {:>10} ║
║ Avg Hit Latency: {:>8} µs ║
║ Avg Miss Latency: {:>8} µs ║
║ Latency Improvement: {:>5.1}% ║
╠══════════════════════════════════════════════════════════════════╣
║ 2. Request Batching ║
╠══════════════════════════════════════════════════════════════════╣
║ Total Operations: {:>10} ║
║ Batched Operations: {:>10} ({:>5.1}%) ║
║ Individual Operations: {:>10} ({:>5.1}%) ║
║ Avg Batch Size: {:>10.1} ║
║ Round Trips Avoided: {:>10} ({:>5.1}% reduction) ║
║ Avg Batch Latency: {:>8} µs/op ║
║ Avg Individual Latency:{:>8} µs/op ║
║ Latency Improvement: {:>5.1}% ║
╠══════════════════════════════════════════════════════════════════╣
║ 3. Prepared Statement Caching ║
╠══════════════════════════════════════════════════════════════════╣
║ Total Queries: {:>10} ║
║ Cached Statements: {:>10} ({:>5.1}%) ║
║ Uncached Statements: {:>10} ({:>5.1}%) ║
║ Avg Cached Latency: {:>8} µs ║
║ Avg Uncached Latency: {:>8} µs ║
║ Query Speedup: {:>5.1}% ║
╠══════════════════════════════════════════════════════════════════╣
║ 4. Metadata Query Optimization (json_extract) ║
╠══════════════════════════════════════════════════════════════════╣
║ Total Metadata Queries:{:>10} ║
║ json_extract Queries: {:>10} ({:>5.1}%) ║
║ LIKE Pattern Queries: {:>10} ({:>5.1}%) ║
║ Avg json_extract: {:>8} µs ║
║ Avg LIKE Pattern: {:>8} µs ║
║ Query Speedup: {:>5.1}% ║
╚══════════════════════════════════════════════════════════════════╝
"#,
uptime.as_secs_f64() / 3600.0,
metrics.cache_first.total_reads,
metrics.cache_first.cache_hits,
metrics.cache_first.hit_rate(),
metrics.cache_first.cache_misses,
100.0 - metrics.cache_first.hit_rate(),
metrics.cache_first.queries_avoided,
metrics.cache_first.avg_cache_hit_latency_us,
metrics.cache_first.avg_cache_miss_latency_us,
metrics.cache_first.latency_improvement_pct(),
metrics.batching.total_operations,
metrics.batching.batched_operations,
metrics.batching.batching_efficiency(),
metrics.batching.individual_operations,
100.0 - metrics.batching.batching_efficiency(),
metrics.batching.avg_batch_size,
metrics.batching.round_trips_avoided,
metrics.batching.round_trip_reduction_pct(),
metrics.batching.avg_batch_latency_us,
metrics.batching.avg_individual_latency_us,
metrics.batching.latency_improvement_pct(),
metrics.prepared_statements.total_queries,
metrics.prepared_statements.cached_statements,
metrics.prepared_statements.cache_hit_rate() * 100.0,
metrics.prepared_statements.uncached_statements,
100.0 - metrics.prepared_statements.cache_hit_rate() * 100.0,
metrics.prepared_statements.avg_cached_execution_us,
metrics.prepared_statements.avg_uncached_execution_us,
metrics.prepared_statements.query_speedup_pct(),
metrics.query_optimization.total_metadata_queries,
metrics.query_optimization.json_extract_queries,
metrics.query_optimization.optimization_rate(),
metrics.query_optimization.like_pattern_queries,
100.0 - metrics.query_optimization.optimization_rate(),
metrics.query_optimization.avg_json_extract_latency_us,
metrics.query_optimization.avg_like_pattern_latency_us,
metrics.query_optimization.query_speedup_pct(),
)
}
pub fn reset(&self) {
let mut metrics = self.metrics.write();
*metrics = OptimizationMetrics::default();
}
}
impl Default for PerformanceMetrics {
fn default() -> Self {
Self::new()
}
}