1use anyhow::{anyhow, Result};
7use parking_lot::RwLock;
8use serde::{Deserialize, Serialize};
9use std::collections::{HashMap, VecDeque};
10use std::sync::Arc;
11use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
12
13#[derive(Debug, Clone, Serialize, Deserialize)]
15pub struct MonitoringConfig {
16 pub enable_real_time: bool,
18 pub enable_query_logging: bool,
20 pub enable_system_monitoring: bool,
22 pub enable_quality_metrics: bool,
24 pub retention_period: Duration,
26 pub sampling_rate: f32,
28 pub alert_thresholds: AlertThresholds,
30 pub export_config: ExportConfig,
32}
33
34impl Default for MonitoringConfig {
35 fn default() -> Self {
36 Self {
37 enable_real_time: true,
38 enable_query_logging: true,
39 enable_system_monitoring: true,
40 enable_quality_metrics: true,
41 retention_period: Duration::from_secs(24 * 60 * 60), sampling_rate: 1.0,
43 alert_thresholds: AlertThresholds::default(),
44 export_config: ExportConfig::default(),
45 }
46 }
47}
48
49#[derive(Debug, Clone, Serialize, Deserialize)]
51pub struct AlertThresholds {
52 pub max_query_latency: f64,
54 pub max_error_rate: f32,
56 pub min_recall_at_10: f32,
58 pub max_memory_usage: u64,
60 pub max_cpu_usage: f32,
62}
63
64impl Default for AlertThresholds {
65 fn default() -> Self {
66 Self {
67 max_query_latency: 1000.0, max_error_rate: 0.05, min_recall_at_10: 0.90, max_memory_usage: 8192, max_cpu_usage: 0.80, }
73 }
74}
75
76#[derive(Debug, Clone, Serialize, Deserialize)]
78pub struct ExportConfig {
79 pub format: ExportFormat,
81 pub export_interval: Duration,
83 pub destination: ExportDestination,
85 pub include_detailed: bool,
87}
88
89#[derive(Debug, Clone, Serialize, Deserialize)]
90pub enum ExportFormat {
91 JSON,
92 CSV,
93 Prometheus,
94 InfluxDB,
95 ElasticSearch,
96}
97
98#[derive(Debug, Clone, Serialize, Deserialize)]
99pub enum ExportDestination {
100 File(String),
101 Http(String),
102 Database(String),
103 Console,
104}
105
106impl Default for ExportConfig {
107 fn default() -> Self {
108 Self {
109 format: ExportFormat::JSON,
110 export_interval: Duration::from_secs(60),
111 destination: ExportDestination::Console,
112 include_detailed: false,
113 }
114 }
115}
116
117pub struct EnhancedPerformanceMonitor {
119 config: MonitoringConfig,
120 query_metrics: Arc<RwLock<QueryMetricsCollector>>,
121 system_metrics: Arc<RwLock<SystemMetricsCollector>>,
122 quality_metrics: Arc<RwLock<QualityMetricsCollector>>,
123 alert_manager: AlertManager,
124 analytics_engine: AnalyticsEngine,
125 dashboard_data: Arc<RwLock<DashboardData>>,
126}
127
128impl EnhancedPerformanceMonitor {
129 pub fn new(config: MonitoringConfig) -> Self {
131 Self {
132 config: config.clone(),
133 query_metrics: Arc::new(RwLock::new(QueryMetricsCollector::new())),
134 system_metrics: Arc::new(RwLock::new(SystemMetricsCollector::new())),
135 quality_metrics: Arc::new(RwLock::new(QualityMetricsCollector::new())),
136 alert_manager: AlertManager::new(config.alert_thresholds.clone()),
137 analytics_engine: AnalyticsEngine::new(),
138 dashboard_data: Arc::new(RwLock::new(DashboardData::new())),
139 }
140 }
141
142 pub fn record_query(&self, query_info: QueryInfo) {
144 if !self.config.enable_real_time {
145 return;
146 }
147
148 if {
150 use scirs2_core::random::{Random, Rng};
151 let mut rng = Random::seed(42);
152 rng.gen_range(0.0..1.0)
153 } > self.config.sampling_rate
154 {
155 return;
156 }
157
158 {
160 let mut metrics = self.query_metrics.write();
161 metrics.record_query(query_info.clone());
162 }
163
164 self.alert_manager.check_query_alerts(&query_info);
166
167 {
169 let mut dashboard = self.dashboard_data.write();
170 dashboard.update_query_stats(&query_info);
171 }
172
173 self.analytics_engine.analyze_query(&query_info);
175 }
176
177 pub fn record_system_metrics(&self, metrics: SystemMetrics) {
179 if !self.config.enable_system_monitoring {
180 return;
181 }
182
183 {
184 let mut collector = self.system_metrics.write();
185 collector.record_metrics(metrics.clone());
186 }
187
188 self.alert_manager.check_system_alerts(&metrics);
190
191 {
193 let mut dashboard = self.dashboard_data.write();
194 dashboard.update_system_stats(&metrics);
195 }
196 }
197
198 pub fn record_quality_metrics(&self, metrics: QualityMetrics) {
200 if !self.config.enable_quality_metrics {
201 return;
202 }
203
204 {
205 let mut collector = self.quality_metrics.write();
206 collector.record_metrics(metrics.clone());
207 }
208
209 self.alert_manager.check_quality_alerts(&metrics);
211
212 {
214 let mut dashboard = self.dashboard_data.write();
215 dashboard.update_quality_stats(&metrics);
216 }
217 }
218
219 pub fn get_dashboard_data(&self) -> DashboardData {
221 self.dashboard_data.read().clone()
222 }
223
224 pub fn generate_analytics_report(&self) -> AnalyticsReport {
226 let query_stats = self.query_metrics.read().get_statistics();
227 let system_stats = self.system_metrics.read().get_statistics();
228 let quality_stats = self.quality_metrics.read().get_statistics();
229 let alerts = self.alert_manager.get_active_alerts();
230
231 AnalyticsReport {
232 timestamp: SystemTime::now(),
233 query_statistics: query_stats,
234 system_statistics: system_stats,
235 quality_statistics: quality_stats,
236 active_alerts: alerts,
237 trends: self.analytics_engine.get_trends(),
238 recommendations: self.analytics_engine.get_recommendations(),
239 }
240 }
241
242 pub fn export_metrics(&self) -> Result<String> {
244 let report = self.generate_analytics_report();
245
246 match self.config.export_config.format {
247 ExportFormat::JSON => serde_json::to_string_pretty(&report)
248 .map_err(|e| anyhow!("JSON serialization error: {}", e)),
249 ExportFormat::CSV => self.generate_csv_export(&report),
250 ExportFormat::Prometheus => self.generate_prometheus_export(&report),
251 _ => Err(anyhow!("Export format not yet implemented")),
252 }
253 }
254
255 fn generate_csv_export(&self, report: &AnalyticsReport) -> Result<String> {
257 let mut csv = String::new();
258 csv.push_str("metric,value,timestamp\n");
259
260 csv.push_str(&format!(
261 "total_queries,{},{}\n",
262 report.query_statistics.total_queries,
263 report
264 .timestamp
265 .duration_since(UNIX_EPOCH)
266 .unwrap()
267 .as_secs()
268 ));
269
270 csv.push_str(&format!(
271 "avg_latency,{:.2},{}\n",
272 report.query_statistics.average_latency.as_millis(),
273 report
274 .timestamp
275 .duration_since(UNIX_EPOCH)
276 .unwrap()
277 .as_secs()
278 ));
279
280 Ok(csv)
281 }
282
283 fn generate_prometheus_export(&self, report: &AnalyticsReport) -> Result<String> {
285 let mut prometheus = String::new();
286
287 prometheus.push_str("# HELP vector_search_queries_total Total number of queries\n");
288 prometheus.push_str("# TYPE vector_search_queries_total counter\n");
289 prometheus.push_str(&format!(
290 "vector_search_queries_total {}\n",
291 report.query_statistics.total_queries
292 ));
293
294 prometheus.push_str("# HELP vector_search_latency_seconds Query latency in seconds\n");
295 prometheus.push_str("# TYPE vector_search_latency_seconds histogram\n");
296 prometheus.push_str(&format!(
297 "vector_search_latency_seconds {:.6}\n",
298 report.query_statistics.average_latency.as_secs_f64()
299 ));
300
301 Ok(prometheus)
302 }
303
304 pub fn start_background_monitoring(&self) {
306 }
309
310 pub fn stop_monitoring(&self) {
312 }
314}
315
316#[derive(Debug, Clone)]
318pub struct QueryInfo {
319 pub query_id: String,
320 pub query_type: QueryType,
321 pub query_text: Option<String>,
322 pub vector_dimensions: Option<usize>,
323 pub k_value: Option<usize>,
324 pub threshold: Option<f32>,
325 pub start_time: Instant,
326 pub end_time: Instant,
327 pub success: bool,
328 pub error_message: Option<String>,
329 pub results_count: usize,
330 pub index_used: Option<String>,
331 pub cache_hit: bool,
332}
333
334#[derive(Debug, Clone)]
335pub enum QueryType {
336 KNNSearch,
337 ThresholdSearch,
338 SimilarityCalculation,
339 TextEmbedding,
340 IndexUpdate,
341 Custom(String),
342}
343
344#[derive(Debug)]
346pub struct QueryMetricsCollector {
347 queries: VecDeque<QueryInfo>,
348 statistics: QueryStatistics,
349 max_retention: usize,
350}
351
352impl Default for QueryMetricsCollector {
353 fn default() -> Self {
354 Self::new()
355 }
356}
357
358impl QueryMetricsCollector {
359 pub fn new() -> Self {
360 Self {
361 queries: VecDeque::new(),
362 statistics: QueryStatistics::default(),
363 max_retention: 10000, }
365 }
366
367 pub fn record_query(&mut self, query: QueryInfo) {
368 let latency = query.end_time.duration_since(query.start_time);
369
370 self.statistics.total_queries += 1;
372 if query.success {
373 self.statistics.successful_queries += 1;
374 } else {
375 self.statistics.failed_queries += 1;
376 }
377
378 if self.statistics.total_queries == 1 {
380 self.statistics.average_latency = latency;
381 self.statistics.min_latency = latency;
382 self.statistics.max_latency = latency;
383 } else {
384 let total_time = self
386 .statistics
387 .average_latency
388 .mul_f64(self.statistics.total_queries as f64 - 1.0)
389 + latency;
390 self.statistics.average_latency =
391 total_time.div_f64(self.statistics.total_queries as f64);
392
393 if latency < self.statistics.min_latency {
394 self.statistics.min_latency = latency;
395 }
396 if latency > self.statistics.max_latency {
397 self.statistics.max_latency = latency;
398 }
399 }
400
401 let latency_ms = latency.as_millis() as f64;
403 if latency_ms < 10.0 {
404 self.statistics.latency_distribution.p10 += 1;
405 } else if latency_ms < 50.0 {
406 self.statistics.latency_distribution.p50 += 1;
407 } else if latency_ms < 100.0 {
408 self.statistics.latency_distribution.p90 += 1;
409 } else if latency_ms < 500.0 {
410 self.statistics.latency_distribution.p95 += 1;
411 } else {
412 self.statistics.latency_distribution.p99 += 1;
413 }
414
415 if query.cache_hit {
417 self.statistics.cache_hit_rate =
418 (self.statistics.cache_hit_rate * (self.statistics.total_queries - 1) as f32 + 1.0)
419 / self.statistics.total_queries as f32;
420 } else {
421 self.statistics.cache_hit_rate = (self.statistics.cache_hit_rate
422 * (self.statistics.total_queries - 1) as f32)
423 / self.statistics.total_queries as f32;
424 }
425
426 self.queries.push_back(query);
428 if self.queries.len() > self.max_retention {
429 self.queries.pop_front();
430 }
431 }
432
433 pub fn get_statistics(&self) -> QueryStatistics {
434 self.statistics.clone()
435 }
436
437 pub fn get_recent_queries(&self, count: usize) -> Vec<&QueryInfo> {
438 self.queries.iter().rev().take(count).collect()
439 }
440}
441
442#[derive(Debug, Clone, Default, Serialize, Deserialize)]
444pub struct QueryStatistics {
445 pub total_queries: u64,
446 pub successful_queries: u64,
447 pub failed_queries: u64,
448 pub average_latency: Duration,
449 pub min_latency: Duration,
450 pub max_latency: Duration,
451 pub latency_distribution: LatencyDistribution,
452 pub cache_hit_rate: f32,
453 pub throughput_qps: f32,
454}
455
456#[derive(Debug, Clone, Default, Serialize, Deserialize)]
457pub struct LatencyDistribution {
458 pub p10: u64, pub p50: u64, pub p90: u64, pub p95: u64, pub p99: u64, }
464
465#[derive(Debug, Clone, Serialize, Deserialize)]
467pub struct SystemMetrics {
468 pub timestamp: SystemTime,
469 pub cpu_usage: f32,
470 pub memory_usage: u64, pub memory_total: u64, pub disk_usage: u64, pub disk_total: u64, pub network_in: u64, pub network_out: u64, pub open_file_descriptors: u32,
477 pub thread_count: u32,
478}
479
480#[derive(Debug)]
482pub struct SystemMetricsCollector {
483 metrics_history: VecDeque<SystemMetrics>,
484 statistics: SystemStatistics,
485 max_retention: usize,
486}
487
488impl Default for SystemMetricsCollector {
489 fn default() -> Self {
490 Self::new()
491 }
492}
493
494impl SystemMetricsCollector {
495 pub fn new() -> Self {
496 Self {
497 metrics_history: VecDeque::new(),
498 statistics: SystemStatistics::default(),
499 max_retention: 1440, }
501 }
502
503 pub fn record_metrics(&mut self, metrics: SystemMetrics) {
504 self.statistics.current_cpu_usage = metrics.cpu_usage;
506 self.statistics.current_memory_usage = metrics.memory_usage;
507
508 if metrics.cpu_usage > self.statistics.peak_cpu_usage {
509 self.statistics.peak_cpu_usage = metrics.cpu_usage;
510 }
511
512 if metrics.memory_usage > self.statistics.peak_memory_usage {
513 self.statistics.peak_memory_usage = metrics.memory_usage;
514 }
515
516 self.metrics_history.push_back(metrics);
518 if self.metrics_history.len() > self.max_retention {
519 self.metrics_history.pop_front();
520 }
521 }
522
523 pub fn get_statistics(&self) -> SystemStatistics {
524 self.statistics.clone()
525 }
526
527 pub fn get_recent_metrics(&self, count: usize) -> Vec<&SystemMetrics> {
528 self.metrics_history.iter().rev().take(count).collect()
529 }
530}
531
532#[derive(Debug, Clone, Default, Serialize, Deserialize)]
534pub struct SystemStatistics {
535 pub current_cpu_usage: f32,
536 pub peak_cpu_usage: f32,
537 pub current_memory_usage: u64,
538 pub peak_memory_usage: u64,
539 pub average_cpu_usage: f32,
540 pub average_memory_usage: u64,
541}
542
543#[derive(Debug, Clone, Serialize, Deserialize)]
545pub struct QualityMetrics {
546 pub timestamp: SystemTime,
547 pub precision_at_1: f32,
548 pub precision_at_5: f32,
549 pub precision_at_10: f32,
550 pub recall_at_1: f32,
551 pub recall_at_5: f32,
552 pub recall_at_10: f32,
553 pub f1_score: f32,
554 pub mrr: f32, pub ndcg: f32, pub query_coverage: f32,
557 pub result_diversity: f32,
558}
559
560#[derive(Debug)]
562pub struct QualityMetricsCollector {
563 metrics_history: VecDeque<QualityMetrics>,
564 statistics: QualityStatistics,
565 max_retention: usize,
566}
567
568impl Default for QualityMetricsCollector {
569 fn default() -> Self {
570 Self::new()
571 }
572}
573
574impl QualityMetricsCollector {
575 pub fn new() -> Self {
576 Self {
577 metrics_history: VecDeque::new(),
578 statistics: QualityStatistics::default(),
579 max_retention: 1000,
580 }
581 }
582
583 pub fn record_metrics(&mut self, metrics: QualityMetrics) {
584 let count = self.metrics_history.len() as f32;
586 if count > 0.0 {
587 self.statistics.average_precision_at_10 =
588 (self.statistics.average_precision_at_10 * count + metrics.precision_at_10)
589 / (count + 1.0);
590 self.statistics.average_recall_at_10 = (self.statistics.average_recall_at_10 * count
591 + metrics.recall_at_10)
592 / (count + 1.0);
593 self.statistics.average_f1_score =
594 (self.statistics.average_f1_score * count + metrics.f1_score) / (count + 1.0);
595 } else {
596 self.statistics.average_precision_at_10 = metrics.precision_at_10;
597 self.statistics.average_recall_at_10 = metrics.recall_at_10;
598 self.statistics.average_f1_score = metrics.f1_score;
599 }
600
601 self.metrics_history.push_back(metrics);
603 if self.metrics_history.len() > self.max_retention {
604 self.metrics_history.pop_front();
605 }
606 }
607
608 pub fn get_statistics(&self) -> QualityStatistics {
609 self.statistics.clone()
610 }
611}
612
613#[derive(Debug, Clone, Default, Serialize, Deserialize)]
615pub struct QualityStatistics {
616 pub average_precision_at_10: f32,
617 pub average_recall_at_10: f32,
618 pub average_f1_score: f32,
619 pub trend_precision: f32,
620 pub trend_recall: f32,
621}
622
623pub struct AlertManager {
625 thresholds: AlertThresholds,
626 active_alerts: Arc<RwLock<Vec<Alert>>>,
627}
628
629impl AlertManager {
630 pub fn new(thresholds: AlertThresholds) -> Self {
631 Self {
632 thresholds,
633 active_alerts: Arc::new(RwLock::new(Vec::new())),
634 }
635 }
636
637 pub fn check_query_alerts(&self, query: &QueryInfo) {
638 let latency_ms = query.end_time.duration_since(query.start_time).as_millis() as f64;
639
640 if latency_ms > self.thresholds.max_query_latency {
641 self.add_alert(Alert {
642 alert_type: AlertType::HighLatency,
643 severity: AlertSeverity::Warning,
644 message: format!(
645 "Query latency {}ms exceeds threshold {}ms",
646 latency_ms, self.thresholds.max_query_latency
647 ),
648 timestamp: SystemTime::now(),
649 source: "query_monitor".to_string(),
650 });
651 }
652 }
653
654 pub fn check_system_alerts(&self, metrics: &SystemMetrics) {
655 if metrics.cpu_usage > self.thresholds.max_cpu_usage {
656 self.add_alert(Alert {
657 alert_type: AlertType::HighCpuUsage,
658 severity: AlertSeverity::Warning,
659 message: format!(
660 "CPU usage {:.1}% exceeds threshold {:.1}%",
661 metrics.cpu_usage * 100.0,
662 self.thresholds.max_cpu_usage * 100.0
663 ),
664 timestamp: SystemTime::now(),
665 source: "system_monitor".to_string(),
666 });
667 }
668
669 let memory_mb = metrics.memory_usage / (1024 * 1024);
670 if memory_mb > self.thresholds.max_memory_usage {
671 self.add_alert(Alert {
672 alert_type: AlertType::HighMemoryUsage,
673 severity: AlertSeverity::Critical,
674 message: format!(
675 "Memory usage {}MB exceeds threshold {}MB",
676 memory_mb, self.thresholds.max_memory_usage
677 ),
678 timestamp: SystemTime::now(),
679 source: "system_monitor".to_string(),
680 });
681 }
682 }
683
684 pub fn check_quality_alerts(&self, metrics: &QualityMetrics) {
685 if metrics.recall_at_10 < self.thresholds.min_recall_at_10 {
686 self.add_alert(Alert {
687 alert_type: AlertType::LowRecall,
688 severity: AlertSeverity::Warning,
689 message: format!(
690 "Recall@10 {:.3} below threshold {:.3}",
691 metrics.recall_at_10, self.thresholds.min_recall_at_10
692 ),
693 timestamp: SystemTime::now(),
694 source: "quality_monitor".to_string(),
695 });
696 }
697 }
698
699 fn add_alert(&self, alert: Alert) {
700 let mut alerts = self.active_alerts.write();
701 alerts.push(alert);
702
703 let cutoff = SystemTime::now() - Duration::from_secs(3600);
705 alerts.retain(|a| a.timestamp > cutoff);
706 }
707
708 pub fn get_active_alerts(&self) -> Vec<Alert> {
709 self.active_alerts.read().clone()
710 }
711}
712
713#[derive(Debug, Clone, Serialize, Deserialize)]
715pub struct Alert {
716 pub alert_type: AlertType,
717 pub severity: AlertSeverity,
718 pub message: String,
719 pub timestamp: SystemTime,
720 pub source: String,
721}
722
723#[derive(Debug, Clone, Serialize, Deserialize)]
724pub enum AlertType {
725 HighLatency,
726 HighCpuUsage,
727 HighMemoryUsage,
728 LowRecall,
729 HighErrorRate,
730 IndexCorruption,
731 ServiceDown,
732}
733
734#[derive(Debug, Clone, Serialize, Deserialize)]
735pub enum AlertSeverity {
736 Info,
737 Warning,
738 Critical,
739 Emergency,
740}
741
742pub struct AnalyticsEngine {
744 trends: Arc<RwLock<HashMap<String, TrendData>>>,
745 recommendations: Arc<RwLock<Vec<Recommendation>>>,
746}
747
748impl Default for AnalyticsEngine {
749 fn default() -> Self {
750 Self::new()
751 }
752}
753
754impl AnalyticsEngine {
755 pub fn new() -> Self {
756 Self {
757 trends: Arc::new(RwLock::new(HashMap::new())),
758 recommendations: Arc::new(RwLock::new(Vec::new())),
759 }
760 }
761
762 pub fn analyze_query(&self, _query: &QueryInfo) {
763 }
766
767 pub fn get_trends(&self) -> HashMap<String, TrendData> {
768 self.trends.read().clone()
769 }
770
771 pub fn get_recommendations(&self) -> Vec<Recommendation> {
772 self.recommendations.read().clone()
773 }
774}
775
776#[derive(Debug, Clone, Serialize, Deserialize)]
778pub struct TrendData {
779 pub metric_name: String,
780 pub values: Vec<f64>,
781 pub timestamps: Vec<SystemTime>,
782 pub trend_direction: TrendDirection,
783 pub confidence: f32,
784}
785
786#[derive(Debug, Clone, Serialize, Deserialize)]
787pub enum TrendDirection {
788 Increasing,
789 Decreasing,
790 Stable,
791 Volatile,
792}
793
794#[derive(Debug, Clone, Serialize, Deserialize)]
796pub struct Recommendation {
797 pub category: RecommendationCategory,
798 pub priority: RecommendationPriority,
799 pub title: String,
800 pub description: String,
801 pub estimated_impact: String,
802 pub implementation_effort: String,
803}
804
805#[derive(Debug, Clone, Serialize, Deserialize)]
806pub enum RecommendationCategory {
807 Performance,
808 Quality,
809 ResourceOptimization,
810 Configuration,
811 Maintenance,
812}
813
814#[derive(Debug, Clone, Serialize, Deserialize)]
815pub enum RecommendationPriority {
816 Low,
817 Medium,
818 High,
819 Critical,
820}
821
822#[derive(Debug, Clone, Serialize, Deserialize)]
824pub struct DashboardData {
825 pub last_updated: SystemTime,
826 pub query_stats: QueryStatistics,
827 pub system_stats: SystemStatistics,
828 pub quality_stats: QualityStatistics,
829 pub alerts_count: usize,
830 pub trends: HashMap<String, Vec<f64>>,
831}
832
833impl DashboardData {
834 pub fn new() -> Self {
835 Self {
836 last_updated: SystemTime::now(),
837 query_stats: QueryStatistics::default(),
838 system_stats: SystemStatistics::default(),
839 quality_stats: QualityStatistics::default(),
840 alerts_count: 0,
841 trends: HashMap::new(),
842 }
843 }
844
845 pub fn update_query_stats(&mut self, _query: &QueryInfo) {
846 self.last_updated = SystemTime::now();
848 }
850
851 pub fn update_system_stats(&mut self, _metrics: &SystemMetrics) {
852 self.last_updated = SystemTime::now();
854 }
856
857 pub fn update_quality_stats(&mut self, _metrics: &QualityMetrics) {
858 self.last_updated = SystemTime::now();
860 }
862}
863
864impl Default for DashboardData {
865 fn default() -> Self {
866 Self::new()
867 }
868}
869
870#[derive(Debug, Clone, Serialize, Deserialize)]
872pub struct AnalyticsReport {
873 pub timestamp: SystemTime,
874 pub query_statistics: QueryStatistics,
875 pub system_statistics: SystemStatistics,
876 pub quality_statistics: QualityStatistics,
877 pub active_alerts: Vec<Alert>,
878 pub trends: HashMap<String, TrendData>,
879 pub recommendations: Vec<Recommendation>,
880}
881
882#[cfg(test)]
883mod tests {
884 use super::*;
885
886 #[test]
887 fn test_query_metrics_collection() {
888 let mut collector = QueryMetricsCollector::new();
889
890 let query = QueryInfo {
891 query_id: "test_query".to_string(),
892 query_type: QueryType::KNNSearch,
893 query_text: Some("test query".to_string()),
894 vector_dimensions: Some(384),
895 k_value: Some(10),
896 threshold: None,
897 start_time: Instant::now() - Duration::from_millis(50),
898 end_time: Instant::now(),
899 success: true,
900 error_message: None,
901 results_count: 5,
902 index_used: Some("hnsw".to_string()),
903 cache_hit: false,
904 };
905
906 collector.record_query(query);
907
908 let stats = collector.get_statistics();
909 assert_eq!(stats.total_queries, 1);
910 assert_eq!(stats.successful_queries, 1);
911 assert_eq!(stats.failed_queries, 0);
912 }
913
914 #[test]
915 fn test_alert_generation() {
916 let thresholds = AlertThresholds {
917 max_query_latency: 100.0,
918 max_error_rate: 0.05,
919 min_recall_at_10: 0.90,
920 max_memory_usage: 1024,
921 max_cpu_usage: 0.80,
922 };
923
924 let alert_manager = AlertManager::new(thresholds);
925
926 let query = QueryInfo {
927 query_id: "slow_query".to_string(),
928 query_type: QueryType::KNNSearch,
929 query_text: None,
930 vector_dimensions: None,
931 k_value: None,
932 threshold: None,
933 start_time: Instant::now() - Duration::from_millis(200), end_time: Instant::now(),
935 success: true,
936 error_message: None,
937 results_count: 5,
938 index_used: None,
939 cache_hit: false,
940 };
941
942 alert_manager.check_query_alerts(&query);
943
944 let alerts = alert_manager.get_active_alerts();
945 assert_eq!(alerts.len(), 1);
946 assert!(matches!(alerts[0].alert_type, AlertType::HighLatency));
947 }
948
949 #[test]
950 fn test_performance_monitor() {
951 let config = MonitoringConfig::default();
952 let monitor = EnhancedPerformanceMonitor::new(config);
953
954 let query = QueryInfo {
955 query_id: "test".to_string(),
956 query_type: QueryType::KNNSearch,
957 query_text: None,
958 vector_dimensions: Some(384),
959 k_value: Some(10),
960 threshold: None,
961 start_time: Instant::now() - Duration::from_millis(25),
962 end_time: Instant::now(),
963 success: true,
964 error_message: None,
965 results_count: 8,
966 index_used: Some("hnsw".to_string()),
967 cache_hit: true,
968 };
969
970 monitor.record_query(query);
971
972 let dashboard = monitor.get_dashboard_data();
973 assert!(dashboard.last_updated <= SystemTime::now());
974 }
975}