1use chrono::{DateTime, Utc};
4use parking_lot::RwLock;
5use serde::{Deserialize, Serialize};
6use std::collections::HashMap;
7use std::sync::Arc;
8use std::time::Instant;
9use tracing::debug;
10use uuid::Uuid;
11
12#[derive(Debug, Clone)]
14pub struct QueryContext {
15 pub query_id: Uuid,
16 pub query_type: String,
17 pub query_text: String,
18 pub parameters: Vec<String>,
19 pub start_time: Instant,
20 pub cache_hit: bool,
21 pub result_size: Option<usize>,
22}
23
24#[derive(Debug, Clone, Serialize, Deserialize)]
26pub struct QueryPerformanceMetrics {
27 pub query_id: Uuid,
28 pub query_type: String,
29 pub query_text: String,
30 pub execution_time_ms: u64,
31 pub cache_hit: bool,
32 pub result_size: Option<usize>,
33 pub memory_usage_bytes: Option<u64>,
34 pub cpu_usage_percent: Option<f64>,
35 pub timestamp: DateTime<Utc>,
36 pub parameters: Vec<String>,
37 pub optimization_applied: Vec<String>,
38}
39
40#[derive(Debug, Clone, Serialize, Deserialize)]
42pub struct QueryPerformanceStats {
43 pub query_type: String,
44 pub total_executions: u64,
45 pub cache_hits: u64,
46 pub cache_misses: u64,
47 pub average_execution_time_ms: f64,
48 pub min_execution_time_ms: u64,
49 pub max_execution_time_ms: u64,
50 pub p95_execution_time_ms: u64,
51 pub p99_execution_time_ms: u64,
52 pub average_result_size: f64,
53 pub total_memory_usage_bytes: u64,
54 pub average_cpu_usage_percent: f64,
55 pub cache_hit_rate: f64,
56 pub slow_queries_count: u64,
57 pub fast_queries_count: u64,
58 pub last_executed: Option<DateTime<Utc>>,
59}
60
61#[derive(Debug, Clone, Serialize, Deserialize)]
63pub struct QueryOptimizationSuggestion {
64 pub query_type: String,
65 pub suggestion_type: OptimizationType,
66 pub description: String,
67 pub potential_improvement_percent: f64,
68 pub priority: OptimizationPriority,
69 pub implementation_effort: ImplementationEffort,
70}
71
72#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
74pub enum OptimizationType {
75 AddIndex,
77 UsePreparedStatement,
79 OptimizeQuery,
81 AddCaching,
83 ReduceResultSet,
85 UseConnectionPooling,
87}
88
89#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
91pub enum OptimizationPriority {
92 Low,
93 Medium,
94 High,
95 Critical,
96}
97
98#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
100pub enum ImplementationEffort {
101 Low,
102 Medium,
103 High,
104}
105
106pub struct QueryPerformanceTracker {
108 metrics: Arc<RwLock<Vec<QueryPerformanceMetrics>>>,
110 stats: Arc<RwLock<HashMap<String, QueryPerformanceStats>>>,
112 suggestions: Arc<RwLock<Vec<QueryOptimizationSuggestion>>>,
114 max_metrics: usize,
116 slow_query_threshold_ms: u64,
118 fast_query_threshold_ms: u64,
120}
121
122impl QueryPerformanceTracker {
123 #[must_use]
125 pub fn new(
126 max_metrics: usize,
127 slow_query_threshold_ms: u64,
128 fast_query_threshold_ms: u64,
129 ) -> Self {
130 Self {
131 metrics: Arc::new(RwLock::new(Vec::new())),
132 stats: Arc::new(RwLock::new(HashMap::new())),
133 suggestions: Arc::new(RwLock::new(Vec::new())),
134 max_metrics,
135 slow_query_threshold_ms,
136 fast_query_threshold_ms,
137 }
138 }
139
140 #[must_use]
142 pub fn new_default() -> Self {
143 Self::new(10000, 1000, 100) }
145
146 #[must_use]
148 pub fn start_query(
149 &self,
150 query_type: &str,
151 query_text: &str,
152 parameters: Vec<String>,
153 ) -> QueryContext {
154 QueryContext {
155 query_id: Uuid::new_v4(),
156 query_type: query_type.to_string(),
157 query_text: query_text.to_string(),
158 parameters,
159 start_time: Instant::now(),
160 cache_hit: false,
161 result_size: None,
162 }
163 }
164
165 #[allow(clippy::cast_possible_truncation)]
167 pub fn complete_query(
168 &self,
169 context: QueryContext,
170 cache_hit: bool,
171 result_size: Option<usize>,
172 memory_usage_bytes: Option<u64>,
173 cpu_usage_percent: Option<f64>,
174 optimization_applied: Vec<String>,
175 ) {
176 let execution_time = context.start_time.elapsed();
177 let execution_time_ms = execution_time.as_millis() as u64;
178
179 let metric = QueryPerformanceMetrics {
180 query_id: context.query_id,
181 query_type: context.query_type.clone(),
182 query_text: context.query_text,
183 execution_time_ms,
184 cache_hit,
185 result_size,
186 memory_usage_bytes,
187 cpu_usage_percent,
188 timestamp: Utc::now(),
189 parameters: context.parameters,
190 optimization_applied,
191 };
192
193 {
195 let mut metrics = self.metrics.write();
196 metrics.push(metric.clone());
197
198 if metrics.len() > self.max_metrics {
200 let excess = metrics.len() - self.max_metrics;
201 metrics.drain(0..excess);
202 }
203 }
204
205 self.update_stats(&metric);
207
208 self.generate_optimization_suggestions(&metric);
210
211 debug!(
212 "Query completed: {} ({}ms, cache_hit: {}, size: {:?})",
213 context.query_type, execution_time_ms, cache_hit, result_size
214 );
215 }
216
217 #[must_use]
219 pub fn get_stats(&self, query_type: &str) -> Option<QueryPerformanceStats> {
220 let stats = self.stats.read();
221 stats.get(query_type).cloned()
222 }
223
224 #[must_use]
226 pub fn get_all_stats(&self) -> HashMap<String, QueryPerformanceStats> {
227 let stats = self.stats.read();
228 stats.clone()
229 }
230
231 #[must_use]
233 pub fn get_optimization_suggestions(&self) -> Vec<QueryOptimizationSuggestion> {
234 let suggestions = self.suggestions.read();
235 suggestions.clone()
236 }
237
238 #[must_use]
240 pub fn get_slow_queries(&self) -> Vec<QueryPerformanceMetrics> {
241 let metrics = self.metrics.read();
242 metrics
243 .iter()
244 .filter(|m| m.execution_time_ms >= self.slow_query_threshold_ms)
245 .cloned()
246 .collect()
247 }
248
249 #[must_use]
251 pub fn get_fast_queries(&self) -> Vec<QueryPerformanceMetrics> {
252 let metrics = self.metrics.read();
253 metrics
254 .iter()
255 .filter(|m| m.execution_time_ms <= self.fast_query_threshold_ms)
256 .cloned()
257 .collect()
258 }
259
260 #[must_use]
262 #[allow(clippy::cast_precision_loss)]
263 pub fn get_performance_summary(&self) -> QueryPerformanceSummary {
264 let stats = self.get_all_stats();
265 let suggestions = self.get_optimization_suggestions();
266 let slow_queries = self.get_slow_queries();
267 let fast_queries = self.get_fast_queries();
268
269 let total_queries: u64 = stats.values().map(|s| s.total_executions).sum();
270 let total_cache_hits: u64 = stats.values().map(|s| s.cache_hits).sum();
271 let overall_cache_hit_rate = if total_queries > 0 {
272 total_cache_hits as f64 / total_queries as f64
273 } else {
274 0.0
275 };
276
277 let average_execution_time = if stats.is_empty() {
278 0.0
279 } else {
280 stats
281 .values()
282 .map(|s| s.average_execution_time_ms)
283 .sum::<f64>()
284 / stats.len() as f64
285 };
286
287 QueryPerformanceSummary {
288 timestamp: Utc::now(),
289 total_queries,
290 overall_cache_hit_rate,
291 average_execution_time_ms: average_execution_time,
292 slow_queries_count: slow_queries.len() as u64,
293 fast_queries_count: fast_queries.len() as u64,
294 optimization_suggestions_count: suggestions.len() as u64,
295 stats,
296 suggestions,
297 }
298 }
299
300 #[allow(clippy::cast_precision_loss)]
302 fn update_stats(&self, metric: &QueryPerformanceMetrics) {
303 let mut stats = self.stats.write();
304 let entry =
305 stats
306 .entry(metric.query_type.clone())
307 .or_insert_with(|| QueryPerformanceStats {
308 query_type: metric.query_type.clone(),
309 total_executions: 0,
310 cache_hits: 0,
311 cache_misses: 0,
312 average_execution_time_ms: 0.0,
313 min_execution_time_ms: u64::MAX,
314 max_execution_time_ms: 0,
315 p95_execution_time_ms: 0,
316 p99_execution_time_ms: 0,
317 average_result_size: 0.0,
318 total_memory_usage_bytes: 0,
319 average_cpu_usage_percent: 0.0,
320 cache_hit_rate: 0.0,
321 slow_queries_count: 0,
322 fast_queries_count: 0,
323 last_executed: None,
324 });
325
326 entry.total_executions += 1;
327 entry.last_executed = Some(metric.timestamp);
328
329 if metric.cache_hit {
330 entry.cache_hits += 1;
331 } else {
332 entry.cache_misses += 1;
333 }
334
335 if metric.execution_time_ms < entry.min_execution_time_ms {
337 entry.min_execution_time_ms = metric.execution_time_ms;
338 }
339 if metric.execution_time_ms > entry.max_execution_time_ms {
340 entry.max_execution_time_ms = metric.execution_time_ms;
341 }
342
343 entry.average_execution_time_ms = (entry.average_execution_time_ms
345 * (entry.total_executions - 1) as f64
346 + metric.execution_time_ms as f64)
347 / entry.total_executions as f64;
348
349 if let Some(size) = metric.result_size {
351 entry.average_result_size =
352 (entry.average_result_size * (entry.total_executions - 1) as f64 + size as f64)
353 / entry.total_executions as f64;
354 }
355
356 if let Some(memory) = metric.memory_usage_bytes {
358 entry.total_memory_usage_bytes += memory;
359 }
360
361 if let Some(cpu) = metric.cpu_usage_percent {
363 entry.average_cpu_usage_percent =
364 (entry.average_cpu_usage_percent * (entry.total_executions - 1) as f64 + cpu)
365 / entry.total_executions as f64;
366 }
367
368 entry.cache_hit_rate = if entry.total_executions > 0 {
370 entry.cache_hits as f64 / entry.total_executions as f64
371 } else {
372 0.0
373 };
374
375 if metric.execution_time_ms >= self.slow_query_threshold_ms {
377 entry.slow_queries_count += 1;
378 }
379 if metric.execution_time_ms <= self.fast_query_threshold_ms {
380 entry.fast_queries_count += 1;
381 }
382
383 self.calculate_percentiles(entry);
385 }
386
387 #[allow(
389 clippy::cast_precision_loss,
390 clippy::cast_possible_truncation,
391 clippy::cast_sign_loss
392 )]
393 fn calculate_percentiles(&self, stats: &mut QueryPerformanceStats) {
394 let metrics = self.metrics.read();
396 let mut execution_times: Vec<u64> = metrics
397 .iter()
398 .filter(|m| m.query_type == stats.query_type)
399 .map(|m| m.execution_time_ms)
400 .collect();
401
402 execution_times.sort_unstable();
403
404 if !execution_times.is_empty() {
405 let len = execution_times.len();
406
407 let p95_index = (len as f64 * 0.95) as usize;
409 stats.p95_execution_time_ms = execution_times[p95_index.min(len - 1)];
410
411 let p99_index = (len as f64 * 0.99) as usize;
413 stats.p99_execution_time_ms = execution_times[p99_index.min(len - 1)];
414 }
415 }
416
417 fn generate_optimization_suggestions(&self, metric: &QueryPerformanceMetrics) {
419 let mut suggestions = self.suggestions.write();
420
421 suggestions.retain(|s| s.query_type != metric.query_type);
423
424 let mut new_suggestions = Vec::new();
425
426 if metric.execution_time_ms >= self.slow_query_threshold_ms {
428 new_suggestions.push(QueryOptimizationSuggestion {
429 query_type: metric.query_type.clone(),
430 suggestion_type: OptimizationType::AddIndex,
431 description: format!(
432 "Query is slow ({}ms). Consider adding database indexes.",
433 metric.execution_time_ms
434 ),
435 potential_improvement_percent: 50.0,
436 priority: OptimizationPriority::High,
437 implementation_effort: ImplementationEffort::Medium,
438 });
439
440 new_suggestions.push(QueryOptimizationSuggestion {
441 query_type: metric.query_type.clone(),
442 suggestion_type: OptimizationType::OptimizeQuery,
443 description: "Query structure could be optimized for better performance."
444 .to_string(),
445 potential_improvement_percent: 30.0,
446 priority: OptimizationPriority::Medium,
447 implementation_effort: ImplementationEffort::High,
448 });
449 }
450
451 if !metric.cache_hit {
453 new_suggestions.push(QueryOptimizationSuggestion {
454 query_type: metric.query_type.clone(),
455 suggestion_type: OptimizationType::AddCaching,
456 description:
457 "Query is not cached. Consider implementing caching for better performance."
458 .to_string(),
459 potential_improvement_percent: 80.0,
460 priority: OptimizationPriority::High,
461 implementation_effort: ImplementationEffort::Medium,
462 });
463 }
464
465 if let Some(size) = metric.result_size {
467 if size > 1000 {
468 new_suggestions.push(QueryOptimizationSuggestion {
469 query_type: metric.query_type.clone(),
470 suggestion_type: OptimizationType::ReduceResultSet,
471 description: format!(
472 "Large result set ({size} items). Consider pagination or filtering."
473 ),
474 potential_improvement_percent: 40.0,
475 priority: OptimizationPriority::Medium,
476 implementation_effort: ImplementationEffort::Low,
477 });
478 }
479 }
480
481 if let Some(memory) = metric.memory_usage_bytes {
483 if memory > 10 * 1024 * 1024 {
484 new_suggestions.push(QueryOptimizationSuggestion {
486 query_type: metric.query_type.clone(),
487 suggestion_type: OptimizationType::UsePreparedStatement,
488 description: "High memory usage. Consider using prepared statements."
489 .to_string(),
490 potential_improvement_percent: 20.0,
491 priority: OptimizationPriority::Low,
492 implementation_effort: ImplementationEffort::Low,
493 });
494 }
495 }
496
497 suggestions.extend(new_suggestions);
498 }
499}
500
501#[derive(Debug, Clone, Serialize, Deserialize)]
503pub struct QueryPerformanceSummary {
504 pub timestamp: DateTime<Utc>,
505 pub total_queries: u64,
506 pub overall_cache_hit_rate: f64,
507 pub average_execution_time_ms: f64,
508 pub slow_queries_count: u64,
509 pub fast_queries_count: u64,
510 pub optimization_suggestions_count: u64,
511 pub stats: HashMap<String, QueryPerformanceStats>,
512 pub suggestions: Vec<QueryOptimizationSuggestion>,
513}
514
515#[cfg(test)]
516mod tests {
517 use super::*;
518 use std::thread;
519 use std::time::Duration;
520
521 #[test]
522 fn test_query_performance_tracking() {
523 let tracker = QueryPerformanceTracker::new_default();
524
525 let context = tracker.start_query(
527 "test_query",
528 "SELECT * FROM tasks",
529 vec!["param1".to_string()],
530 );
531
532 thread::sleep(Duration::from_millis(100));
534
535 tracker.complete_query(
537 context,
538 false, Some(100), Some(1024), Some(5.0), vec!["index_optimization".to_string()],
543 );
544
545 let stats = tracker.get_stats("test_query");
547 assert!(stats.is_some());
548 let stats = stats.unwrap();
549 assert_eq!(stats.total_executions, 1);
550 assert_eq!(stats.cache_misses, 1);
551 assert_eq!(stats.cache_hits, 0);
552 assert!(stats.average_execution_time_ms >= 100.0);
553 }
554
555 #[test]
556 fn test_optimization_suggestions() {
557 let tracker = QueryPerformanceTracker::new(1000, 50, 10); let context = tracker.start_query("slow_query", "SELECT * FROM tasks", vec![]);
561 thread::sleep(Duration::from_millis(60)); tracker.complete_query(context, false, Some(2000), None, None, vec![]);
563
564 let suggestions = tracker.get_optimization_suggestions();
566 assert!(!suggestions.is_empty());
567 assert!(suggestions.iter().any(|s| s.query_type == "slow_query"));
568 }
569
570 #[test]
571 fn test_performance_summary() {
572 let tracker = QueryPerformanceTracker::new_default();
573
574 for i in 0..5 {
576 let context = tracker.start_query("test_query", "SELECT * FROM tasks", vec![]);
577 thread::sleep(Duration::from_millis(10));
578 tracker.complete_query(
579 context,
580 i % 2 == 0, Some(100),
582 None,
583 None,
584 vec![],
585 );
586 }
587
588 let summary = tracker.get_performance_summary();
589 assert_eq!(summary.total_queries, 5);
590 assert!(summary.overall_cache_hit_rate > 0.0);
591 assert!(summary.average_execution_time_ms > 0.0);
592 }
593}