1use crate::database::AnalyticsDatabase;
4use crate::error::Result;
5use crate::models::{
6 AnalyticsFilter, EndpointStat, ErrorSummary, Granularity, LatencyTrend, MetricsAggregate,
7 OverviewMetrics, ProtocolStat, TimeSeries, TimeSeriesPoint,
8};
9use chrono::{DateTime, Utc};
10use sqlx::Row;
11
12impl AnalyticsDatabase {
13 pub async fn get_overview_metrics(&self, duration_seconds: i64) -> Result<OverviewMetrics> {
15 let end_time = Utc::now().timestamp();
16 let start_time = end_time - duration_seconds;
17
18 let filter = AnalyticsFilter {
19 start_time: Some(start_time),
20 end_time: Some(end_time),
21 ..Default::default()
22 };
23
24 let aggregates = self.get_minute_aggregates(&filter).await?;
25
26 let total_requests: i64 = aggregates.iter().map(|a| a.request_count).sum();
27 let total_errors: i64 = aggregates.iter().map(|a| a.error_count).sum();
28 let error_rate = if total_requests > 0 {
29 (total_errors as f64 / total_requests as f64) * 100.0
30 } else {
31 0.0
32 };
33
34 let total_latency: f64 = aggregates.iter().map(|a| a.latency_sum).sum();
35 let latency_count: i64 = aggregates.iter().filter(|a| a.latency_sum > 0.0).count() as i64;
36 let avg_latency_ms = if latency_count > 0 {
37 total_latency / latency_count as f64
38 } else {
39 0.0
40 };
41
42 let p95_latencies: Vec<f64> = aggregates.iter().filter_map(|a| a.latency_p95).collect();
43 let p95_latency_ms = if p95_latencies.is_empty() {
44 0.0
45 } else {
46 p95_latencies.iter().sum::<f64>() / p95_latencies.len() as f64
47 };
48
49 let p99_latencies: Vec<f64> = aggregates.iter().filter_map(|a| a.latency_p99).collect();
50 let p99_latency_ms = if p99_latencies.is_empty() {
51 0.0
52 } else {
53 p99_latencies.iter().sum::<f64>() / p99_latencies.len() as f64
54 };
55
56 let total_bytes_sent: i64 = aggregates.iter().map(|a| a.bytes_sent).sum();
57 let total_bytes_received: i64 = aggregates.iter().map(|a| a.bytes_received).sum();
58
59 let active_connections =
60 aggregates.iter().filter_map(|a| a.active_connections).max().unwrap_or(0);
61
62 let requests_per_second = total_requests as f64 / duration_seconds as f64;
63
64 let top_protocols = self.get_top_protocols(5, None).await?;
66
67 let top_endpoints_data = self.get_top_endpoints(10, None).await?;
69 let top_endpoints: Vec<EndpointStat> = top_endpoints_data
70 .iter()
71 .map(|e| {
72 let error_rate = if e.total_requests > 0 {
73 (e.total_errors as f64 / e.total_requests as f64) * 100.0
74 } else {
75 0.0
76 };
77 EndpointStat {
78 endpoint: e.endpoint.clone(),
79 protocol: e.protocol.clone(),
80 method: e.method.clone(),
81 request_count: e.total_requests,
82 error_count: e.total_errors,
83 error_rate,
84 avg_latency_ms: e.avg_latency_ms.unwrap_or(0.0),
85 p95_latency_ms: e.p95_latency_ms.unwrap_or(0.0),
86 }
87 })
88 .collect();
89
90 Ok(OverviewMetrics {
91 total_requests,
92 total_errors,
93 error_rate,
94 avg_latency_ms,
95 p95_latency_ms,
96 p99_latency_ms,
97 active_connections,
98 total_bytes_sent,
99 total_bytes_received,
100 requests_per_second,
101 top_protocols,
102 top_endpoints,
103 })
104 }
105
106 pub async fn get_top_protocols(
108 &self,
109 limit: i64,
110 workspace_id: Option<&str>,
111 ) -> Result<Vec<ProtocolStat>> {
112 let mut query = String::from(
113 r"
114 SELECT
115 protocol,
116 SUM(request_count) as total_requests,
117 SUM(error_count) as total_errors,
118 AVG(latency_sum / NULLIF(request_count, 0)) as avg_latency_ms
119 FROM metrics_aggregates_minute
120 WHERE 1=1
121 ",
122 );
123
124 if workspace_id.is_some() {
125 query.push_str(" AND workspace_id = ?");
126 }
127
128 query.push_str(
129 "
130 GROUP BY protocol
131 ORDER BY total_requests DESC
132 LIMIT ?
133 ",
134 );
135
136 let mut sql_query = sqlx::query(&query);
137
138 if let Some(workspace) = workspace_id {
139 sql_query = sql_query.bind(workspace);
140 }
141
142 sql_query = sql_query.bind(limit);
143
144 let rows = sql_query.fetch_all(self.pool()).await?;
145
146 let mut protocols = Vec::new();
147 for row in rows {
148 protocols.push(ProtocolStat {
149 protocol: row.get("protocol"),
150 request_count: row.get("total_requests"),
151 error_count: row.get("total_errors"),
152 avg_latency_ms: row.try_get("avg_latency_ms").unwrap_or(0.0),
153 });
154 }
155
156 Ok(protocols)
157 }
158
159 pub async fn get_request_time_series(
161 &self,
162 filter: &AnalyticsFilter,
163 granularity: Granularity,
164 ) -> Result<Vec<TimeSeries>> {
165 let aggregates = self.get_minute_aggregates(filter).await?;
166
167 let bucket_size = match granularity {
168 Granularity::Minute => 60,
169 Granularity::Hour => 3600,
170 Granularity::Day => 86400,
171 };
172
173 let mut series_map: std::collections::HashMap<String, Vec<TimeSeriesPoint>> =
175 std::collections::HashMap::new();
176
177 for agg in aggregates {
178 let bucket = (agg.timestamp / bucket_size) * bucket_size;
179 let point = TimeSeriesPoint {
180 timestamp: bucket,
181 value: agg.request_count as f64,
182 };
183
184 series_map.entry(agg.protocol.clone()).or_default().push(point);
185 }
186
187 let mut result: Vec<TimeSeries> = series_map
189 .into_iter()
190 .map(|(protocol, mut points)| {
191 points.sort_by_key(|p| p.timestamp);
192
193 let mut aggregated = Vec::new();
195 let mut current_bucket = None;
196 let mut current_sum = 0.0;
197
198 for point in points {
199 match current_bucket {
200 Some(bucket) if bucket == point.timestamp => {
201 current_sum += point.value;
202 }
203 _ => {
204 if let Some(bucket) = current_bucket {
205 aggregated.push(TimeSeriesPoint {
206 timestamp: bucket,
207 value: current_sum,
208 });
209 }
210 current_bucket = Some(point.timestamp);
211 current_sum = point.value;
212 }
213 }
214 }
215
216 if let Some(bucket) = current_bucket {
217 aggregated.push(TimeSeriesPoint {
218 timestamp: bucket,
219 value: current_sum,
220 });
221 }
222
223 TimeSeries {
224 label: protocol,
225 data: aggregated,
226 }
227 })
228 .collect();
229
230 result.sort_by(|a, b| b.data.len().cmp(&a.data.len()));
231 Ok(result)
232 }
233
234 pub async fn get_latency_trends(&self, filter: &AnalyticsFilter) -> Result<Vec<LatencyTrend>> {
236 let aggregates = self.get_minute_aggregates(filter).await?;
237
238 let mut trends = Vec::new();
239
240 let mut bucket_map: std::collections::HashMap<i64, Vec<&MetricsAggregate>> =
242 std::collections::HashMap::new();
243
244 for agg in &aggregates {
245 bucket_map.entry(agg.timestamp).or_default().push(agg);
246 }
247
248 for (timestamp, group) in bucket_map {
249 let avg = group
250 .iter()
251 .filter_map(|a| {
252 if a.request_count > 0 {
253 Some(a.latency_sum / a.request_count as f64)
254 } else {
255 None
256 }
257 })
258 .sum::<f64>()
259 / group.len() as f64;
260
261 let min = group.iter().filter_map(|a| a.latency_min).fold(f64::INFINITY, f64::min);
262 let max = group.iter().filter_map(|a| a.latency_max).fold(f64::NEG_INFINITY, f64::max);
263 let p50 = group.iter().filter_map(|a| a.latency_p50).sum::<f64>() / group.len() as f64;
264 let p95 = group.iter().filter_map(|a| a.latency_p95).sum::<f64>() / group.len() as f64;
265 let p99 = group.iter().filter_map(|a| a.latency_p99).sum::<f64>() / group.len() as f64;
266
267 trends.push(LatencyTrend {
268 timestamp,
269 p50,
270 p95,
271 p99,
272 avg,
273 min: if min.is_finite() { min } else { 0.0 },
274 max: if max.is_finite() { max } else { 0.0 },
275 });
276 }
277
278 trends.sort_by_key(|t| t.timestamp);
279 Ok(trends)
280 }
281
282 pub async fn get_error_summary(
284 &self,
285 filter: &AnalyticsFilter,
286 limit: i64,
287 ) -> Result<Vec<ErrorSummary>> {
288 let errors = self.get_recent_errors(1000, filter).await?;
289
290 let mut error_map: std::collections::HashMap<
292 String,
293 (i64, std::collections::HashSet<String>, i64),
294 > = std::collections::HashMap::new();
295
296 for error in errors {
297 let error_type = error.error_type.clone().unwrap_or_else(|| "unknown".to_string());
298 let error_category =
299 error.error_category.clone().unwrap_or_else(|| "other".to_string());
300 let endpoint = error.endpoint.clone().unwrap_or_default();
301
302 let entry = error_map.entry(format!("{error_category}:{error_type}")).or_insert((
303 0,
304 std::collections::HashSet::new(),
305 0,
306 ));
307
308 entry.0 += 1;
309 entry.1.insert(endpoint);
310 entry.2 = entry.2.max(error.timestamp);
311 }
312
313 let mut summaries: Vec<ErrorSummary> = error_map
314 .into_iter()
315 .map(|(key, (count, endpoints, last_ts))| {
316 let parts: Vec<&str> = key.split(':').collect();
317 ErrorSummary {
318 error_type: (*parts.get(1).unwrap_or(&"unknown")).to_string(),
319 error_category: (*parts.first().unwrap_or(&"other")).to_string(),
320 count,
321 endpoints: endpoints.into_iter().collect(),
322 last_occurrence: DateTime::from_timestamp(last_ts, 0).unwrap_or_else(Utc::now),
323 }
324 })
325 .collect();
326
327 summaries.sort_by(|a, b| b.count.cmp(&a.count));
328 summaries.truncate(limit as usize);
329
330 Ok(summaries)
331 }
332}
333
334#[cfg(test)]
335mod tests {
336 use super::*;
337 use crate::database::AnalyticsDatabase;
338 use crate::models::{ErrorEvent, MetricsAggregate};
339 use std::path::Path;
340
341 async fn setup_test_db() -> AnalyticsDatabase {
342 let db = AnalyticsDatabase::new(Path::new(":memory:")).await.unwrap();
343 db.run_migrations().await.unwrap();
344 db
345 }
346
347 fn create_test_aggregate(
348 timestamp: i64,
349 protocol: &str,
350 request_count: i64,
351 error_count: i64,
352 latency_sum: f64,
353 ) -> MetricsAggregate {
354 MetricsAggregate {
355 id: None,
356 timestamp,
357 protocol: protocol.to_string(),
358 method: Some("GET".to_string()),
359 endpoint: Some("/api/test".to_string()),
360 status_code: Some(200),
361 workspace_id: None,
362 environment: None,
363 request_count,
364 error_count,
365 latency_sum,
366 latency_min: Some(10.0),
367 latency_max: Some(100.0),
368 latency_p50: Some(50.0),
369 latency_p95: Some(95.0),
370 latency_p99: Some(99.0),
371 bytes_sent: 1000,
372 bytes_received: 500,
373 active_connections: Some(5),
374 created_at: None,
375 }
376 }
377
378 fn create_test_error(
379 timestamp: i64,
380 error_type: &str,
381 error_category: &str,
382 endpoint: &str,
383 ) -> ErrorEvent {
384 ErrorEvent {
385 id: None,
386 timestamp,
387 protocol: "http".to_string(),
388 method: Some("GET".to_string()),
389 endpoint: Some(endpoint.to_string()),
390 status_code: Some(500),
391 error_type: Some(error_type.to_string()),
392 error_message: Some("Test error".to_string()),
393 error_category: Some(error_category.to_string()),
394 request_id: Some("req-123".to_string()),
395 trace_id: None,
396 span_id: None,
397 client_ip: Some("127.0.0.1".to_string()),
398 user_agent: None,
399 workspace_id: None,
400 environment: None,
401 metadata: None,
402 created_at: None,
403 }
404 }
405
406 #[tokio::test]
409 async fn test_get_overview_metrics_empty_db() {
410 let db = setup_test_db().await;
411 let metrics = db.get_overview_metrics(3600).await.unwrap();
412
413 assert_eq!(metrics.total_requests, 0);
414 assert_eq!(metrics.total_errors, 0);
415 assert_eq!(metrics.error_rate, 0.0);
416 assert_eq!(metrics.avg_latency_ms, 0.0);
417 assert_eq!(metrics.requests_per_second, 0.0);
418 }
419
420 #[tokio::test]
421 async fn test_get_overview_metrics_with_data() {
422 let db = setup_test_db().await;
423
424 let now = Utc::now().timestamp();
426 let agg1 = create_test_aggregate(now - 60, "http", 100, 5, 5000.0);
427 let agg2 = create_test_aggregate(now - 120, "http", 200, 10, 10000.0);
428
429 db.insert_minute_aggregate(&agg1).await.unwrap();
430 db.insert_minute_aggregate(&agg2).await.unwrap();
431
432 let metrics = db.get_overview_metrics(3600).await.unwrap();
433
434 assert_eq!(metrics.total_requests, 300);
435 assert_eq!(metrics.total_errors, 15);
436 assert!((metrics.error_rate - 5.0).abs() < 0.01); }
438
439 #[tokio::test]
440 async fn test_get_overview_metrics_calculates_rps() {
441 let db = setup_test_db().await;
442
443 let now = Utc::now().timestamp();
444 let agg = create_test_aggregate(now - 30, "http", 100, 0, 1000.0);
445 db.insert_minute_aggregate(&agg).await.unwrap();
446
447 let metrics = db.get_overview_metrics(100).await.unwrap();
448
449 assert!((metrics.requests_per_second - 1.0).abs() < 0.01);
451 }
452
453 #[tokio::test]
456 async fn test_get_top_protocols_empty() {
457 let db = setup_test_db().await;
458 let protocols = db.get_top_protocols(5, None).await.unwrap();
459 assert!(protocols.is_empty());
460 }
461
462 #[tokio::test]
463 async fn test_get_top_protocols_multiple_protocols() {
464 let db = setup_test_db().await;
465
466 let now = Utc::now().timestamp();
467 let http_agg = create_test_aggregate(now - 60, "http", 1000, 10, 50000.0);
469 let grpc_agg = create_test_aggregate(now - 60, "grpc", 500, 5, 25000.0);
471 let ws_agg = create_test_aggregate(now - 60, "websocket", 100, 1, 5000.0);
473
474 db.insert_minute_aggregate(&http_agg).await.unwrap();
475 db.insert_minute_aggregate(&grpc_agg).await.unwrap();
476 db.insert_minute_aggregate(&ws_agg).await.unwrap();
477
478 let protocols = db.get_top_protocols(10, None).await.unwrap();
479
480 assert_eq!(protocols.len(), 3);
481 assert_eq!(protocols[0].protocol, "http");
483 assert_eq!(protocols[0].request_count, 1000);
484 assert_eq!(protocols[1].protocol, "grpc");
485 assert_eq!(protocols[1].request_count, 500);
486 assert_eq!(protocols[2].protocol, "websocket");
487 assert_eq!(protocols[2].request_count, 100);
488 }
489
490 #[tokio::test]
491 async fn test_get_top_protocols_respects_limit() {
492 let db = setup_test_db().await;
493
494 let now = Utc::now().timestamp();
495 db.insert_minute_aggregate(&create_test_aggregate(now, "http", 100, 0, 1000.0))
496 .await
497 .unwrap();
498 db.insert_minute_aggregate(&create_test_aggregate(now, "grpc", 80, 0, 800.0))
499 .await
500 .unwrap();
501 db.insert_minute_aggregate(&create_test_aggregate(now, "websocket", 60, 0, 600.0))
502 .await
503 .unwrap();
504
505 let protocols = db.get_top_protocols(2, None).await.unwrap();
506 assert_eq!(protocols.len(), 2);
507 }
508
509 #[tokio::test]
512 async fn test_get_request_time_series_empty() {
513 let db = setup_test_db().await;
514
515 let filter = AnalyticsFilter::default();
516 let series = db.get_request_time_series(&filter, Granularity::Minute).await.unwrap();
517
518 assert!(series.is_empty());
519 }
520
521 #[tokio::test]
522 async fn test_get_request_time_series_minute_granularity() {
523 let db = setup_test_db().await;
524
525 let base_time = 1700000000i64; db.insert_minute_aggregate(&create_test_aggregate(base_time, "http", 100, 0, 1000.0))
528 .await
529 .unwrap();
530 db.insert_minute_aggregate(&create_test_aggregate(base_time + 60, "http", 150, 0, 1500.0))
531 .await
532 .unwrap();
533
534 let filter = AnalyticsFilter {
535 start_time: Some(base_time - 60),
536 end_time: Some(base_time + 120),
537 ..Default::default()
538 };
539
540 let series = db.get_request_time_series(&filter, Granularity::Minute).await.unwrap();
541
542 assert!(!series.is_empty());
543 let http_series = series.iter().find(|s| s.label == "http").unwrap();
545 assert!(!http_series.data.is_empty());
546 }
547
548 #[tokio::test]
549 async fn test_get_request_time_series_hour_granularity() {
550 let db = setup_test_db().await;
551
552 let base_time = 1700000000i64;
554 db.insert_minute_aggregate(&create_test_aggregate(base_time, "http", 100, 0, 1000.0))
555 .await
556 .unwrap();
557 db.insert_minute_aggregate(&create_test_aggregate(base_time + 60, "http", 100, 0, 1000.0))
558 .await
559 .unwrap();
560
561 let filter = AnalyticsFilter {
562 start_time: Some(base_time - 60),
563 end_time: Some(base_time + 3700),
564 ..Default::default()
565 };
566
567 let series = db.get_request_time_series(&filter, Granularity::Hour).await.unwrap();
568
569 assert!(!series.is_empty());
570 let http_series = series.iter().find(|s| s.label == "http").unwrap();
571 let total: f64 = http_series.data.iter().map(|p| p.value).sum();
574 assert_eq!(total, 200.0);
575 }
576
577 #[tokio::test]
580 async fn test_get_latency_trends_empty() {
581 let db = setup_test_db().await;
582
583 let filter = AnalyticsFilter::default();
584 let trends = db.get_latency_trends(&filter).await.unwrap();
585
586 assert!(trends.is_empty());
587 }
588
589 #[tokio::test]
590 async fn test_get_latency_trends_with_data() {
591 let db = setup_test_db().await;
592
593 let base_time = 1700000000i64;
594 let mut agg = create_test_aggregate(base_time, "http", 100, 0, 5000.0);
595 agg.latency_p50 = Some(50.0);
596 agg.latency_p95 = Some(95.0);
597 agg.latency_p99 = Some(99.0);
598 agg.latency_min = Some(10.0);
599 agg.latency_max = Some(150.0);
600
601 db.insert_minute_aggregate(&agg).await.unwrap();
602
603 let filter = AnalyticsFilter {
604 start_time: Some(base_time - 60),
605 end_time: Some(base_time + 60),
606 ..Default::default()
607 };
608
609 let trends = db.get_latency_trends(&filter).await.unwrap();
610
611 assert_eq!(trends.len(), 1);
612 let trend = &trends[0];
613 assert_eq!(trend.timestamp, base_time);
614 assert_eq!(trend.p50, 50.0);
615 assert_eq!(trend.p95, 95.0);
616 assert_eq!(trend.p99, 99.0);
617 assert_eq!(trend.min, 10.0);
618 assert_eq!(trend.max, 150.0);
619 }
620
621 #[tokio::test]
622 async fn test_get_latency_trends_sorted_by_timestamp() {
623 let db = setup_test_db().await;
624
625 let base_time = 1700000000i64;
626 db.insert_minute_aggregate(&create_test_aggregate(base_time + 120, "http", 100, 0, 1000.0))
627 .await
628 .unwrap();
629 db.insert_minute_aggregate(&create_test_aggregate(base_time, "http", 100, 0, 1000.0))
630 .await
631 .unwrap();
632 db.insert_minute_aggregate(&create_test_aggregate(base_time + 60, "http", 100, 0, 1000.0))
633 .await
634 .unwrap();
635
636 let filter = AnalyticsFilter {
637 start_time: Some(base_time - 60),
638 end_time: Some(base_time + 180),
639 ..Default::default()
640 };
641
642 let trends = db.get_latency_trends(&filter).await.unwrap();
643
644 assert!(trends.windows(2).all(|w| w[0].timestamp <= w[1].timestamp));
646 }
647
648 #[tokio::test]
651 async fn test_get_error_summary_empty() {
652 let db = setup_test_db().await;
653
654 let filter = AnalyticsFilter::default();
655 let summary = db.get_error_summary(&filter, 10).await.unwrap();
656
657 assert!(summary.is_empty());
658 }
659
660 #[tokio::test]
661 async fn test_get_error_summary_groups_by_type() {
662 let db = setup_test_db().await;
663
664 let base_time = Utc::now().timestamp();
665 for i in 0..5 {
667 db.insert_error_event(&create_test_error(
668 base_time + i,
669 "ConnectionError",
670 "network_error",
671 "/api/users",
672 ))
673 .await
674 .unwrap();
675 }
676
677 for i in 0..3 {
679 db.insert_error_event(&create_test_error(
680 base_time + i,
681 "ValidationError",
682 "client_error",
683 "/api/orders",
684 ))
685 .await
686 .unwrap();
687 }
688
689 let filter = AnalyticsFilter {
690 start_time: Some(base_time - 60),
691 end_time: Some(base_time + 60),
692 ..Default::default()
693 };
694
695 let summary = db.get_error_summary(&filter, 10).await.unwrap();
696
697 assert_eq!(summary.len(), 2);
698 assert_eq!(summary[0].count, 5);
700 assert_eq!(summary[0].error_type, "ConnectionError");
701 assert_eq!(summary[1].count, 3);
702 assert_eq!(summary[1].error_type, "ValidationError");
703 }
704
705 #[tokio::test]
706 async fn test_get_error_summary_collects_endpoints() {
707 let db = setup_test_db().await;
708
709 let base_time = Utc::now().timestamp();
710 db.insert_error_event(&create_test_error(
712 base_time,
713 "Timeout",
714 "timeout_error",
715 "/api/users",
716 ))
717 .await
718 .unwrap();
719 db.insert_error_event(&create_test_error(
720 base_time + 1,
721 "Timeout",
722 "timeout_error",
723 "/api/orders",
724 ))
725 .await
726 .unwrap();
727 db.insert_error_event(&create_test_error(
728 base_time + 2,
729 "Timeout",
730 "timeout_error",
731 "/api/products",
732 ))
733 .await
734 .unwrap();
735
736 let filter = AnalyticsFilter {
737 start_time: Some(base_time - 60),
738 end_time: Some(base_time + 60),
739 ..Default::default()
740 };
741
742 let summary = db.get_error_summary(&filter, 10).await.unwrap();
743
744 assert_eq!(summary.len(), 1);
745 assert_eq!(summary[0].count, 3);
746 assert_eq!(summary[0].endpoints.len(), 3);
747 assert!(summary[0].endpoints.contains(&"/api/users".to_string()));
748 assert!(summary[0].endpoints.contains(&"/api/orders".to_string()));
749 assert!(summary[0].endpoints.contains(&"/api/products".to_string()));
750 }
751
752 #[tokio::test]
753 async fn test_get_error_summary_respects_limit() {
754 let db = setup_test_db().await;
755
756 let base_time = Utc::now().timestamp();
757 for i in 0..5 {
759 db.insert_error_event(&create_test_error(
760 base_time + i,
761 &format!("Error{}", i),
762 "server_error",
763 "/api/test",
764 ))
765 .await
766 .unwrap();
767 }
768
769 let filter = AnalyticsFilter {
770 start_time: Some(base_time - 60),
771 end_time: Some(base_time + 60),
772 ..Default::default()
773 };
774
775 let summary = db.get_error_summary(&filter, 3).await.unwrap();
776
777 assert_eq!(summary.len(), 3);
778 }
779
780 #[tokio::test]
781 async fn test_get_error_summary_tracks_last_occurrence() {
782 let db = setup_test_db().await;
783
784 let base_time = 1700000000i64;
785 db.insert_error_event(&create_test_error(
786 base_time,
787 "TestError",
788 "server_error",
789 "/api/test",
790 ))
791 .await
792 .unwrap();
793 db.insert_error_event(&create_test_error(
794 base_time + 100,
795 "TestError",
796 "server_error",
797 "/api/test",
798 ))
799 .await
800 .unwrap();
801 db.insert_error_event(&create_test_error(
802 base_time + 50,
803 "TestError",
804 "server_error",
805 "/api/test",
806 ))
807 .await
808 .unwrap();
809
810 let filter = AnalyticsFilter {
811 start_time: Some(base_time - 60),
812 end_time: Some(base_time + 200),
813 ..Default::default()
814 };
815
816 let summary = db.get_error_summary(&filter, 10).await.unwrap();
817
818 assert_eq!(summary.len(), 1);
819 assert_eq!(summary[0].last_occurrence.timestamp(), base_time + 100);
821 }
822}