Skip to main content

mockforge_analytics/
queries.rs

1//! High-level query API for analytics data
2
3use crate::database::AnalyticsDatabase;
4use crate::error::Result;
5use crate::models::{
6    AnalyticsFilter, EndpointStat, ErrorSummary, Granularity, LatencyTrend, MetricsAggregate,
7    OverviewMetrics, ProtocolStat, TimeSeries, TimeSeriesPoint,
8};
9use chrono::{DateTime, Utc};
10use sqlx::Row;
11
12impl AnalyticsDatabase {
13    /// Get overview metrics for the dashboard
14    ///
15    /// # Errors
16    ///
17    /// Returns an error if any database query fails.
18    #[allow(clippy::too_many_lines)]
19    pub async fn get_overview_metrics(&self, duration_seconds: i64) -> Result<OverviewMetrics> {
20        let end_time = Utc::now().timestamp();
21        let start_time = end_time - duration_seconds;
22
23        let filter = AnalyticsFilter {
24            start_time: Some(start_time),
25            end_time: Some(end_time),
26            ..Default::default()
27        };
28
29        let aggregates = self.get_minute_aggregates(&filter).await?;
30
31        let total_requests: i64 = aggregates.iter().map(|a| a.request_count).sum();
32        let total_errors: i64 = aggregates.iter().map(|a| a.error_count).sum();
33        #[allow(clippy::cast_precision_loss)]
34        let error_rate = if total_requests > 0 {
35            (total_errors as f64 / total_requests as f64) * 100.0
36        } else {
37            0.0
38        };
39
40        let total_latency: f64 = aggregates.iter().map(|a| a.latency_sum).sum();
41        #[allow(clippy::cast_precision_loss, clippy::cast_possible_wrap)]
42        let latency_count = aggregates.iter().filter(|a| a.latency_sum > 0.0).count() as i64;
43        #[allow(clippy::cast_precision_loss)]
44        let avg_latency_ms = if latency_count > 0 {
45            total_latency / latency_count as f64
46        } else {
47            0.0
48        };
49
50        let p95_latencies: Vec<f64> = aggregates.iter().filter_map(|a| a.latency_p95).collect();
51        #[allow(clippy::cast_precision_loss)]
52        let p95_latency_ms = if p95_latencies.is_empty() {
53            0.0
54        } else {
55            p95_latencies.iter().sum::<f64>() / p95_latencies.len() as f64
56        };
57
58        let p99_latencies: Vec<f64> = aggregates.iter().filter_map(|a| a.latency_p99).collect();
59        #[allow(clippy::cast_precision_loss)]
60        let p99_latency_ms = if p99_latencies.is_empty() {
61            0.0
62        } else {
63            p99_latencies.iter().sum::<f64>() / p99_latencies.len() as f64
64        };
65
66        let total_bytes_sent: i64 = aggregates.iter().map(|a| a.bytes_sent).sum();
67        let total_bytes_received: i64 = aggregates.iter().map(|a| a.bytes_received).sum();
68
69        let active_connections =
70            aggregates.iter().filter_map(|a| a.active_connections).max().unwrap_or(0);
71
72        #[allow(clippy::cast_precision_loss)]
73        let requests_per_second = total_requests as f64 / duration_seconds as f64;
74
75        // Get top protocols
76        let top_protocols = self.get_top_protocols(5, None).await?;
77
78        // Get top endpoints
79        let top_endpoints_data = self.get_top_endpoints(10, None).await?;
80        let top_endpoints: Vec<EndpointStat> = top_endpoints_data
81            .iter()
82            .map(|e| {
83                #[allow(clippy::cast_precision_loss)]
84                let error_rate = if e.total_requests > 0 {
85                    (e.total_errors as f64 / e.total_requests as f64) * 100.0
86                } else {
87                    0.0
88                };
89                EndpointStat {
90                    endpoint: e.endpoint.clone(),
91                    protocol: e.protocol.clone(),
92                    method: e.method.clone(),
93                    request_count: e.total_requests,
94                    error_count: e.total_errors,
95                    error_rate,
96                    avg_latency_ms: e.avg_latency_ms.unwrap_or(0.0),
97                    p95_latency_ms: e.p95_latency_ms.unwrap_or(0.0),
98                }
99            })
100            .collect();
101
102        Ok(OverviewMetrics {
103            total_requests,
104            total_errors,
105            error_rate,
106            avg_latency_ms,
107            p95_latency_ms,
108            p99_latency_ms,
109            active_connections,
110            total_bytes_sent,
111            total_bytes_received,
112            requests_per_second,
113            top_protocols,
114            top_endpoints,
115        })
116    }
117
118    /// Get top protocols by request count
119    ///
120    /// # Errors
121    ///
122    /// Returns an error if the database query fails.
123    pub async fn get_top_protocols(
124        &self,
125        limit: i64,
126        workspace_id: Option<&str>,
127    ) -> Result<Vec<ProtocolStat>> {
128        let mut query = String::from(
129            r"
130            SELECT
131                protocol,
132                SUM(request_count) as total_requests,
133                SUM(error_count) as total_errors,
134                AVG(latency_sum / NULLIF(request_count, 0)) as avg_latency_ms
135            FROM metrics_aggregates_minute
136            WHERE 1=1
137            ",
138        );
139
140        if workspace_id.is_some() {
141            query.push_str(" AND workspace_id = ?");
142        }
143
144        query.push_str(
145            "
146            GROUP BY protocol
147            ORDER BY total_requests DESC
148            LIMIT ?
149            ",
150        );
151
152        let mut sql_query = sqlx::query(&query);
153
154        if let Some(workspace) = workspace_id {
155            sql_query = sql_query.bind(workspace);
156        }
157
158        sql_query = sql_query.bind(limit);
159
160        let rows = sql_query.fetch_all(self.pool()).await?;
161
162        let mut protocols = Vec::new();
163        for row in rows {
164            protocols.push(ProtocolStat {
165                protocol: row.get("protocol"),
166                request_count: row.get("total_requests"),
167                error_count: row.get("total_errors"),
168                avg_latency_ms: row.try_get("avg_latency_ms").unwrap_or(0.0),
169            });
170        }
171
172        Ok(protocols)
173    }
174
175    /// Get request count time series
176    ///
177    /// # Errors
178    ///
179    /// Returns an error if the database query fails.
180    #[allow(clippy::cast_precision_loss)]
181    pub async fn get_request_time_series(
182        &self,
183        filter: &AnalyticsFilter,
184        granularity: Granularity,
185    ) -> Result<Vec<TimeSeries>> {
186        let aggregates = self.get_minute_aggregates(filter).await?;
187
188        let bucket_size = match granularity {
189            Granularity::Minute => 60,
190            Granularity::Hour => 3600,
191            Granularity::Day => 86400,
192        };
193
194        // Group by protocol and time bucket
195        let mut series_map: std::collections::HashMap<String, Vec<TimeSeriesPoint>> =
196            std::collections::HashMap::new();
197
198        for agg in aggregates {
199            let bucket = (agg.timestamp / bucket_size) * bucket_size;
200            let point = TimeSeriesPoint {
201                timestamp: bucket,
202                value: agg.request_count as f64,
203            };
204
205            series_map.entry(agg.protocol.clone()).or_default().push(point);
206        }
207
208        // Convert to TimeSeries objects
209        let mut result: Vec<TimeSeries> = series_map
210            .into_iter()
211            .map(|(protocol, mut points)| {
212                points.sort_by_key(|p| p.timestamp);
213
214                // Aggregate points in the same bucket
215                let mut bucketed = Vec::new();
216                let mut current_bucket = None;
217                let mut current_sum = 0.0;
218
219                for point in points {
220                    match current_bucket {
221                        Some(bucket) if bucket == point.timestamp => {
222                            current_sum += point.value;
223                        }
224                        _ => {
225                            if let Some(bucket) = current_bucket {
226                                bucketed.push(TimeSeriesPoint {
227                                    timestamp: bucket,
228                                    value: current_sum,
229                                });
230                            }
231                            current_bucket = Some(point.timestamp);
232                            current_sum = point.value;
233                        }
234                    }
235                }
236
237                if let Some(bucket) = current_bucket {
238                    bucketed.push(TimeSeriesPoint {
239                        timestamp: bucket,
240                        value: current_sum,
241                    });
242                }
243
244                TimeSeries {
245                    label: protocol,
246                    data: bucketed,
247                }
248            })
249            .collect();
250
251        result.sort_by(|a, b| b.data.len().cmp(&a.data.len()));
252        Ok(result)
253    }
254
255    /// Get latency trends
256    ///
257    /// # Errors
258    ///
259    /// Returns an error if the database query fails.
260    #[allow(clippy::cast_precision_loss)]
261    pub async fn get_latency_trends(&self, filter: &AnalyticsFilter) -> Result<Vec<LatencyTrend>> {
262        let aggregates = self.get_minute_aggregates(filter).await?;
263
264        let mut trends = Vec::new();
265
266        // Group by timestamp and aggregate
267        let mut bucket_map: std::collections::HashMap<i64, Vec<&MetricsAggregate>> =
268            std::collections::HashMap::new();
269
270        for agg in &aggregates {
271            bucket_map.entry(agg.timestamp).or_default().push(agg);
272        }
273
274        for (timestamp, group) in bucket_map {
275            let avg = group
276                .iter()
277                .filter_map(|a| {
278                    if a.request_count > 0 {
279                        Some(a.latency_sum / a.request_count as f64)
280                    } else {
281                        None
282                    }
283                })
284                .sum::<f64>()
285                / group.len() as f64;
286
287            let min = group.iter().filter_map(|a| a.latency_min).fold(f64::INFINITY, f64::min);
288            let max = group.iter().filter_map(|a| a.latency_max).fold(f64::NEG_INFINITY, f64::max);
289            let p50 = group.iter().filter_map(|a| a.latency_p50).sum::<f64>() / group.len() as f64;
290            let p95 = group.iter().filter_map(|a| a.latency_p95).sum::<f64>() / group.len() as f64;
291            let p99 = group.iter().filter_map(|a| a.latency_p99).sum::<f64>() / group.len() as f64;
292
293            trends.push(LatencyTrend {
294                timestamp,
295                p50,
296                p95,
297                p99,
298                avg,
299                min: if min.is_finite() { min } else { 0.0 },
300                max: if max.is_finite() { max } else { 0.0 },
301            });
302        }
303
304        trends.sort_by_key(|t| t.timestamp);
305        Ok(trends)
306    }
307
308    /// Get error summary
309    ///
310    /// # Errors
311    ///
312    /// Returns an error if the database query fails.
313    pub async fn get_error_summary(
314        &self,
315        filter: &AnalyticsFilter,
316        limit: i64,
317    ) -> Result<Vec<ErrorSummary>> {
318        let errors = self.get_recent_errors(1000, filter).await?;
319
320        // Group by error type
321        let mut error_map: std::collections::HashMap<
322            String,
323            (i64, std::collections::HashSet<String>, i64),
324        > = std::collections::HashMap::new();
325
326        for error in errors {
327            let error_type = error.error_type.clone().unwrap_or_else(|| "unknown".to_string());
328            let error_category =
329                error.error_category.clone().unwrap_or_else(|| "other".to_string());
330            let endpoint = error.endpoint.clone().unwrap_or_default();
331
332            let entry = error_map
333                .entry(format!("{error_category}:{error_type}"))
334                .or_insert_with(|| (0, std::collections::HashSet::new(), 0));
335
336            entry.0 += 1;
337            entry.1.insert(endpoint);
338            entry.2 = entry.2.max(error.timestamp);
339        }
340
341        let mut summaries: Vec<ErrorSummary> = error_map
342            .into_iter()
343            .map(|(key, (count, endpoints, last_ts))| {
344                let parts: Vec<&str> = key.split(':').collect();
345                ErrorSummary {
346                    error_type: (*parts.get(1).unwrap_or(&"unknown")).to_string(),
347                    error_category: (*parts.first().unwrap_or(&"other")).to_string(),
348                    count,
349                    endpoints: endpoints.into_iter().collect(),
350                    last_occurrence: DateTime::from_timestamp(last_ts, 0).unwrap_or_else(Utc::now),
351                }
352            })
353            .collect();
354
355        summaries.sort_by(|a, b| b.count.cmp(&a.count));
356        #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
357        summaries.truncate(limit as usize);
358
359        Ok(summaries)
360    }
361}
362
363#[cfg(test)]
364#[allow(clippy::float_cmp)]
365mod tests {
366    use super::*;
367    use crate::database::AnalyticsDatabase;
368    use crate::models::{ErrorEvent, MetricsAggregate};
369    use std::path::Path;
370
371    async fn setup_test_db() -> AnalyticsDatabase {
372        let db = AnalyticsDatabase::new(Path::new(":memory:")).await.unwrap();
373        db.run_migrations().await.unwrap();
374        db
375    }
376
377    fn create_test_aggregate(
378        timestamp: i64,
379        protocol: &str,
380        request_count: i64,
381        error_count: i64,
382        latency_sum: f64,
383    ) -> MetricsAggregate {
384        MetricsAggregate {
385            id: None,
386            timestamp,
387            protocol: protocol.to_string(),
388            method: Some("GET".to_string()),
389            endpoint: Some("/api/test".to_string()),
390            status_code: Some(200),
391            workspace_id: None,
392            environment: None,
393            request_count,
394            error_count,
395            latency_sum,
396            latency_min: Some(10.0),
397            latency_max: Some(100.0),
398            latency_p50: Some(50.0),
399            latency_p95: Some(95.0),
400            latency_p99: Some(99.0),
401            bytes_sent: 1000,
402            bytes_received: 500,
403            active_connections: Some(5),
404            created_at: None,
405        }
406    }
407
408    fn create_test_error(
409        timestamp: i64,
410        error_type: &str,
411        error_category: &str,
412        endpoint: &str,
413    ) -> ErrorEvent {
414        ErrorEvent {
415            id: None,
416            timestamp,
417            protocol: "http".to_string(),
418            method: Some("GET".to_string()),
419            endpoint: Some(endpoint.to_string()),
420            status_code: Some(500),
421            error_type: Some(error_type.to_string()),
422            error_message: Some("Test error".to_string()),
423            error_category: Some(error_category.to_string()),
424            request_id: Some("req-123".to_string()),
425            trace_id: None,
426            span_id: None,
427            client_ip: Some("127.0.0.1".to_string()),
428            user_agent: None,
429            workspace_id: None,
430            environment: None,
431            metadata: None,
432            created_at: None,
433        }
434    }
435
436    // ==================== get_overview_metrics Tests ====================
437
438    #[tokio::test]
439    async fn test_get_overview_metrics_empty_db() {
440        let db = setup_test_db().await;
441        let metrics = db.get_overview_metrics(3600).await.unwrap();
442
443        assert_eq!(metrics.total_requests, 0);
444        assert_eq!(metrics.total_errors, 0);
445        assert_eq!(metrics.error_rate, 0.0);
446        assert_eq!(metrics.avg_latency_ms, 0.0);
447        assert_eq!(metrics.requests_per_second, 0.0);
448    }
449
450    #[tokio::test]
451    async fn test_get_overview_metrics_with_data() {
452        let db = setup_test_db().await;
453
454        // Insert test data
455        let now = Utc::now().timestamp();
456        let agg1 = create_test_aggregate(now - 60, "http", 100, 5, 5000.0);
457        let agg2 = create_test_aggregate(now - 120, "http", 200, 10, 10000.0);
458
459        db.insert_minute_aggregate(&agg1).await.unwrap();
460        db.insert_minute_aggregate(&agg2).await.unwrap();
461
462        let metrics = db.get_overview_metrics(3600).await.unwrap();
463
464        assert_eq!(metrics.total_requests, 300);
465        assert_eq!(metrics.total_errors, 15);
466        assert!((metrics.error_rate - 5.0).abs() < 0.01); // 15/300 * 100 = 5%
467    }
468
469    #[tokio::test]
470    async fn test_get_overview_metrics_calculates_rps() {
471        let db = setup_test_db().await;
472
473        let now = Utc::now().timestamp();
474        let agg = create_test_aggregate(now - 30, "http", 100, 0, 1000.0);
475        db.insert_minute_aggregate(&agg).await.unwrap();
476
477        let metrics = db.get_overview_metrics(100).await.unwrap();
478
479        // 100 requests over 100 seconds = 1.0 rps
480        assert!((metrics.requests_per_second - 1.0).abs() < 0.01);
481    }
482
483    // ==================== get_top_protocols Tests ====================
484
485    #[tokio::test]
486    async fn test_get_top_protocols_empty() {
487        let db = setup_test_db().await;
488        let protocols = db.get_top_protocols(5, None).await.unwrap();
489        assert!(protocols.is_empty());
490    }
491
492    #[tokio::test]
493    async fn test_get_top_protocols_multiple_protocols() {
494        let db = setup_test_db().await;
495
496        let now = Utc::now().timestamp();
497        // HTTP has more requests
498        let http_agg = create_test_aggregate(now - 60, "http", 1000, 10, 50000.0);
499        // gRPC has fewer requests
500        let grpc_agg = create_test_aggregate(now - 60, "grpc", 500, 5, 25000.0);
501        // WebSocket has the fewest
502        let ws_agg = create_test_aggregate(now - 60, "websocket", 100, 1, 5000.0);
503
504        db.insert_minute_aggregate(&http_agg).await.unwrap();
505        db.insert_minute_aggregate(&grpc_agg).await.unwrap();
506        db.insert_minute_aggregate(&ws_agg).await.unwrap();
507
508        let protocols = db.get_top_protocols(10, None).await.unwrap();
509
510        assert_eq!(protocols.len(), 3);
511        // Should be ordered by request count descending
512        assert_eq!(protocols[0].protocol, "http");
513        assert_eq!(protocols[0].request_count, 1000);
514        assert_eq!(protocols[1].protocol, "grpc");
515        assert_eq!(protocols[1].request_count, 500);
516        assert_eq!(protocols[2].protocol, "websocket");
517        assert_eq!(protocols[2].request_count, 100);
518    }
519
520    #[tokio::test]
521    async fn test_get_top_protocols_respects_limit() {
522        let db = setup_test_db().await;
523
524        let now = Utc::now().timestamp();
525        db.insert_minute_aggregate(&create_test_aggregate(now, "http", 100, 0, 1000.0))
526            .await
527            .unwrap();
528        db.insert_minute_aggregate(&create_test_aggregate(now, "grpc", 80, 0, 800.0))
529            .await
530            .unwrap();
531        db.insert_minute_aggregate(&create_test_aggregate(now, "websocket", 60, 0, 600.0))
532            .await
533            .unwrap();
534
535        let protocols = db.get_top_protocols(2, None).await.unwrap();
536        assert_eq!(protocols.len(), 2);
537    }
538
539    // ==================== get_request_time_series Tests ====================
540
541    #[tokio::test]
542    async fn test_get_request_time_series_empty() {
543        let db = setup_test_db().await;
544
545        let filter = AnalyticsFilter::default();
546        let series = db.get_request_time_series(&filter, Granularity::Minute).await.unwrap();
547
548        assert!(series.is_empty());
549    }
550
551    #[tokio::test]
552    async fn test_get_request_time_series_minute_granularity() {
553        let db = setup_test_db().await;
554
555        // Insert data at different minute timestamps
556        let base_time = 1_700_000_000i64; // Fixed timestamp for reproducibility
557        db.insert_minute_aggregate(&create_test_aggregate(base_time, "http", 100, 0, 1000.0))
558            .await
559            .unwrap();
560        db.insert_minute_aggregate(&create_test_aggregate(base_time + 60, "http", 150, 0, 1500.0))
561            .await
562            .unwrap();
563
564        let filter = AnalyticsFilter {
565            start_time: Some(base_time - 60),
566            end_time: Some(base_time + 120),
567            ..Default::default()
568        };
569
570        let series = db.get_request_time_series(&filter, Granularity::Minute).await.unwrap();
571
572        assert!(!series.is_empty());
573        // Should have HTTP series
574        let http_series = series.iter().find(|s| s.label == "http").unwrap();
575        assert!(!http_series.data.is_empty());
576    }
577
578    #[tokio::test]
579    async fn test_get_request_time_series_hour_granularity() {
580        let db = setup_test_db().await;
581
582        // Insert data in the same hour
583        let base_time = 1_700_000_000i64;
584        db.insert_minute_aggregate(&create_test_aggregate(base_time, "http", 100, 0, 1000.0))
585            .await
586            .unwrap();
587        db.insert_minute_aggregate(&create_test_aggregate(base_time + 60, "http", 100, 0, 1000.0))
588            .await
589            .unwrap();
590
591        let filter = AnalyticsFilter {
592            start_time: Some(base_time - 60),
593            end_time: Some(base_time + 3700),
594            ..Default::default()
595        };
596
597        let series = db.get_request_time_series(&filter, Granularity::Hour).await.unwrap();
598
599        assert!(!series.is_empty());
600        let http_series = series.iter().find(|s| s.label == "http").unwrap();
601        // Data points in the same hour bucket should be aggregated
602        // Both 100 request counts should aggregate to 200
603        let total: f64 = http_series.data.iter().map(|p| p.value).sum();
604        assert_eq!(total, 200.0);
605    }
606
607    // ==================== get_latency_trends Tests ====================
608
609    #[tokio::test]
610    async fn test_get_latency_trends_empty() {
611        let db = setup_test_db().await;
612
613        let filter = AnalyticsFilter::default();
614        let trends = db.get_latency_trends(&filter).await.unwrap();
615
616        assert!(trends.is_empty());
617    }
618
619    #[tokio::test]
620    async fn test_get_latency_trends_with_data() {
621        let db = setup_test_db().await;
622
623        let base_time = 1_700_000_000i64;
624        let mut agg = create_test_aggregate(base_time, "http", 100, 0, 5000.0);
625        agg.latency_p50 = Some(50.0);
626        agg.latency_p95 = Some(95.0);
627        agg.latency_p99 = Some(99.0);
628        agg.latency_min = Some(10.0);
629        agg.latency_max = Some(150.0);
630
631        db.insert_minute_aggregate(&agg).await.unwrap();
632
633        let filter = AnalyticsFilter {
634            start_time: Some(base_time - 60),
635            end_time: Some(base_time + 60),
636            ..Default::default()
637        };
638
639        let trends = db.get_latency_trends(&filter).await.unwrap();
640
641        assert_eq!(trends.len(), 1);
642        let trend = &trends[0];
643        assert_eq!(trend.timestamp, base_time);
644        assert_eq!(trend.p50, 50.0);
645        assert_eq!(trend.p95, 95.0);
646        assert_eq!(trend.p99, 99.0);
647        assert_eq!(trend.min, 10.0);
648        assert_eq!(trend.max, 150.0);
649    }
650
651    #[tokio::test]
652    async fn test_get_latency_trends_sorted_by_timestamp() {
653        let db = setup_test_db().await;
654
655        let base_time = 1_700_000_000i64;
656        db.insert_minute_aggregate(&create_test_aggregate(base_time + 120, "http", 100, 0, 1000.0))
657            .await
658            .unwrap();
659        db.insert_minute_aggregate(&create_test_aggregate(base_time, "http", 100, 0, 1000.0))
660            .await
661            .unwrap();
662        db.insert_minute_aggregate(&create_test_aggregate(base_time + 60, "http", 100, 0, 1000.0))
663            .await
664            .unwrap();
665
666        let filter = AnalyticsFilter {
667            start_time: Some(base_time - 60),
668            end_time: Some(base_time + 180),
669            ..Default::default()
670        };
671
672        let trends = db.get_latency_trends(&filter).await.unwrap();
673
674        // Should be sorted by timestamp ascending
675        assert!(trends.windows(2).all(|w| w[0].timestamp <= w[1].timestamp));
676    }
677
678    // ==================== get_error_summary Tests ====================
679
680    #[tokio::test]
681    async fn test_get_error_summary_empty() {
682        let db = setup_test_db().await;
683
684        let filter = AnalyticsFilter::default();
685        let summary = db.get_error_summary(&filter, 10).await.unwrap();
686
687        assert!(summary.is_empty());
688    }
689
690    #[tokio::test]
691    async fn test_get_error_summary_groups_by_type() {
692        let db = setup_test_db().await;
693
694        let base_time = Utc::now().timestamp();
695        // Insert multiple errors of the same type
696        for i in 0..5 {
697            db.insert_error_event(&create_test_error(
698                base_time + i,
699                "ConnectionError",
700                "network_error",
701                "/api/users",
702            ))
703            .await
704            .unwrap();
705        }
706
707        // Insert errors of a different type
708        for i in 0..3 {
709            db.insert_error_event(&create_test_error(
710                base_time + i,
711                "ValidationError",
712                "client_error",
713                "/api/orders",
714            ))
715            .await
716            .unwrap();
717        }
718
719        let filter = AnalyticsFilter {
720            start_time: Some(base_time - 60),
721            end_time: Some(base_time + 60),
722            ..Default::default()
723        };
724
725        let summary = db.get_error_summary(&filter, 10).await.unwrap();
726
727        assert_eq!(summary.len(), 2);
728        // Should be sorted by count descending
729        assert_eq!(summary[0].count, 5);
730        assert_eq!(summary[0].error_type, "ConnectionError");
731        assert_eq!(summary[1].count, 3);
732        assert_eq!(summary[1].error_type, "ValidationError");
733    }
734
735    #[tokio::test]
736    async fn test_get_error_summary_collects_endpoints() {
737        let db = setup_test_db().await;
738
739        let base_time = Utc::now().timestamp();
740        // Same error type from different endpoints
741        db.insert_error_event(&create_test_error(
742            base_time,
743            "Timeout",
744            "timeout_error",
745            "/api/users",
746        ))
747        .await
748        .unwrap();
749        db.insert_error_event(&create_test_error(
750            base_time + 1,
751            "Timeout",
752            "timeout_error",
753            "/api/orders",
754        ))
755        .await
756        .unwrap();
757        db.insert_error_event(&create_test_error(
758            base_time + 2,
759            "Timeout",
760            "timeout_error",
761            "/api/products",
762        ))
763        .await
764        .unwrap();
765
766        let filter = AnalyticsFilter {
767            start_time: Some(base_time - 60),
768            end_time: Some(base_time + 60),
769            ..Default::default()
770        };
771
772        let summary = db.get_error_summary(&filter, 10).await.unwrap();
773
774        assert_eq!(summary.len(), 1);
775        assert_eq!(summary[0].count, 3);
776        assert_eq!(summary[0].endpoints.len(), 3);
777        assert!(summary[0].endpoints.contains(&"/api/users".to_string()));
778        assert!(summary[0].endpoints.contains(&"/api/orders".to_string()));
779        assert!(summary[0].endpoints.contains(&"/api/products".to_string()));
780    }
781
782    #[tokio::test]
783    async fn test_get_error_summary_respects_limit() {
784        let db = setup_test_db().await;
785
786        let base_time = Utc::now().timestamp();
787        // Create 5 different error types
788        for i in 0..5 {
789            db.insert_error_event(&create_test_error(
790                base_time + i,
791                &format!("Error{i}"),
792                "server_error",
793                "/api/test",
794            ))
795            .await
796            .unwrap();
797        }
798
799        let filter = AnalyticsFilter {
800            start_time: Some(base_time - 60),
801            end_time: Some(base_time + 60),
802            ..Default::default()
803        };
804
805        let summary = db.get_error_summary(&filter, 3).await.unwrap();
806
807        assert_eq!(summary.len(), 3);
808    }
809
810    #[tokio::test]
811    async fn test_get_error_summary_tracks_last_occurrence() {
812        let db = setup_test_db().await;
813
814        let base_time = 1_700_000_000i64;
815        db.insert_error_event(&create_test_error(
816            base_time,
817            "TestError",
818            "server_error",
819            "/api/test",
820        ))
821        .await
822        .unwrap();
823        db.insert_error_event(&create_test_error(
824            base_time + 100,
825            "TestError",
826            "server_error",
827            "/api/test",
828        ))
829        .await
830        .unwrap();
831        db.insert_error_event(&create_test_error(
832            base_time + 50,
833            "TestError",
834            "server_error",
835            "/api/test",
836        ))
837        .await
838        .unwrap();
839
840        let filter = AnalyticsFilter {
841            start_time: Some(base_time - 60),
842            end_time: Some(base_time + 200),
843            ..Default::default()
844        };
845
846        let summary = db.get_error_summary(&filter, 10).await.unwrap();
847
848        assert_eq!(summary.len(), 1);
849        // Last occurrence should be the max timestamp
850        assert_eq!(summary[0].last_occurrence.timestamp(), base_time + 100);
851    }
852}