use serde::{Deserialize, Serialize};
use std::cmp::Reverse;
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime};
use tokio::sync::RwLock;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum MetricType {
ExecutionTime,
ParseTime,
ValidationTime,
ResolutionTime,
SerializationTime,
DatabaseTime,
CacheMetric,
MemoryUsage,
CpuUsage,
Throughput,
ErrorRate,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum AggregationPeriod {
Minute,
Hour,
Day,
Week,
}
impl AggregationPeriod {
#[allow(dead_code)]
pub fn duration(&self) -> Duration {
match self {
AggregationPeriod::Minute => Duration::from_secs(60),
AggregationPeriod::Hour => Duration::from_secs(3600),
AggregationPeriod::Day => Duration::from_secs(86400),
AggregationPeriod::Week => Duration::from_secs(604800),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueryTrace {
pub trace_id: String,
pub query_fingerprint: String,
pub operation_name: Option<String>,
pub operation_type: String,
pub started_at: SystemTime,
pub total_duration_ms: u64,
pub phases: Vec<PhaseTrace>,
pub field_traces: Vec<FieldTrace>,
pub variables: HashMap<String, String>,
pub result_size_bytes: usize,
pub error_count: u32,
pub cache_hit: bool,
pub client_info: Option<ClientInfo>,
}
impl QueryTrace {
pub fn new(query_fingerprint: &str, operation_type: &str) -> Self {
Self {
trace_id: uuid::Uuid::new_v4().to_string(),
query_fingerprint: query_fingerprint.to_string(),
operation_name: None,
operation_type: operation_type.to_string(),
started_at: SystemTime::now(),
total_duration_ms: 0,
phases: Vec::new(),
field_traces: Vec::new(),
variables: HashMap::new(),
result_size_bytes: 0,
error_count: 0,
cache_hit: false,
client_info: None,
}
}
pub fn with_operation_name(mut self, name: &str) -> Self {
self.operation_name = Some(name.to_string());
self
}
pub fn add_phase(&mut self, phase: PhaseTrace) {
self.phases.push(phase);
}
pub fn add_field_trace(&mut self, trace: FieldTrace) {
self.field_traces.push(trace);
}
pub fn finalize(&mut self, duration_ms: u64) {
self.total_duration_ms = duration_ms;
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PhaseTrace {
pub name: String,
pub duration_ms: u64,
pub start_offset_ms: u64,
pub details: HashMap<String, String>,
}
impl PhaseTrace {
pub fn new(name: &str, duration_ms: u64, start_offset_ms: u64) -> Self {
Self {
name: name.to_string(),
duration_ms,
start_offset_ms,
details: HashMap::new(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FieldTrace {
pub path: String,
pub parent_type: String,
pub field_name: String,
pub return_type: String,
pub duration_ms: u64,
pub start_offset_ms: u64,
pub is_resolver: bool,
pub error: Option<String>,
}
impl FieldTrace {
pub fn new(path: &str, parent_type: &str, field_name: &str) -> Self {
Self {
path: path.to_string(),
parent_type: parent_type.to_string(),
field_name: field_name.to_string(),
return_type: String::new(),
duration_ms: 0,
start_offset_ms: 0,
is_resolver: false,
error: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClientInfo {
pub name: Option<String>,
pub version: Option<String>,
pub ip_hash: Option<String>,
pub user_agent: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AggregatedMetrics {
pub bucket_start: SystemTime,
pub bucket_end: SystemTime,
pub request_count: u64,
pub error_count: u64,
pub avg_duration_ms: f64,
pub p50_duration_ms: u64,
pub p95_duration_ms: u64,
pub p99_duration_ms: u64,
pub max_duration_ms: u64,
pub min_duration_ms: u64,
pub cache_hit_rate: f64,
pub throughput: f64,
pub total_bytes: u64,
}
impl Default for AggregatedMetrics {
fn default() -> Self {
let now = SystemTime::now();
Self {
bucket_start: now,
bucket_end: now,
request_count: 0,
error_count: 0,
avg_duration_ms: 0.0,
p50_duration_ms: 0,
p95_duration_ms: 0,
p99_duration_ms: 0,
max_duration_ms: 0,
min_duration_ms: u64::MAX,
cache_hit_rate: 0.0,
throughput: 0.0,
total_bytes: 0,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueryStats {
pub fingerprint: String,
pub operation_name: Option<String>,
pub total_executions: u64,
pub error_count: u64,
pub avg_duration_ms: f64,
pub p95_duration_ms: u64,
pub p99_duration_ms: u64,
pub max_duration_ms: u64,
pub cache_hit_rate: f64,
pub avg_result_size: f64,
pub first_seen: SystemTime,
pub last_seen: SystemTime,
}
impl QueryStats {
fn new(fingerprint: &str) -> Self {
let now = SystemTime::now();
Self {
fingerprint: fingerprint.to_string(),
operation_name: None,
total_executions: 0,
error_count: 0,
avg_duration_ms: 0.0,
p95_duration_ms: 0,
p99_duration_ms: 0,
max_duration_ms: 0,
cache_hit_rate: 0.0,
avg_result_size: 0.0,
first_seen: now,
last_seen: now,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceInsight {
pub id: String,
pub insight_type: InsightType,
pub severity: Severity,
pub title: String,
pub description: String,
pub affected_queries: Vec<String>,
pub recommendation: Option<String>,
pub detected_at: SystemTime,
pub supporting_data: HashMap<String, String>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum InsightType {
SlowQuery,
HighErrorRate,
Regression,
Optimization,
NPlusOne,
LowCacheHit,
HighMemory,
TrafficAnomaly,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum Severity {
Info,
Warning,
Critical,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertConfig {
pub name: String,
pub metric_type: MetricType,
pub threshold: f64,
pub operator: ComparisonOperator,
pub duration: Duration,
pub enabled: bool,
pub channels: Vec<String>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum ComparisonOperator {
GreaterThan,
LessThan,
GreaterThanOrEqual,
LessThanOrEqual,
Equal,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DashboardConfig {
pub trace_retention: Duration,
pub max_traces: usize,
pub aggregation_periods: Vec<AggregationPeriod>,
pub slow_query_threshold_ms: u64,
pub enable_field_tracing: bool,
pub trace_sample_rate: f64,
pub alerts: Vec<AlertConfig>,
}
impl Default for DashboardConfig {
fn default() -> Self {
Self {
trace_retention: Duration::from_secs(86400), max_traces: 10000,
aggregation_periods: vec![
AggregationPeriod::Minute,
AggregationPeriod::Hour,
AggregationPeriod::Day,
],
slow_query_threshold_ms: 1000,
enable_field_tracing: true,
trace_sample_rate: 1.0, alerts: Vec::new(),
}
}
}
struct DashboardState {
traces: VecDeque<QueryTrace>,
query_stats: HashMap<String, QueryStats>,
duration_samples: HashMap<String, Vec<u64>>,
aggregations: HashMap<AggregationPeriod, VecDeque<AggregatedMetrics>>,
current_period_metrics: HashMap<AggregationPeriod, Vec<u64>>,
insights: Vec<PerformanceInsight>,
triggered_alerts: Vec<(SystemTime, String, String)>,
started_at: Instant,
}
impl DashboardState {
fn new() -> Self {
Self {
traces: VecDeque::new(),
query_stats: HashMap::new(),
duration_samples: HashMap::new(),
aggregations: HashMap::new(),
current_period_metrics: HashMap::new(),
insights: Vec::new(),
triggered_alerts: Vec::new(),
started_at: Instant::now(),
}
}
}
pub struct PerformanceInsightsDashboard {
config: DashboardConfig,
state: Arc<RwLock<DashboardState>>,
}
impl PerformanceInsightsDashboard {
pub fn new(config: DashboardConfig) -> Self {
Self {
config,
state: Arc::new(RwLock::new(DashboardState::new())),
}
}
pub async fn record_trace(&self, trace: QueryTrace) {
let mut state = self.state.write().await;
let samples = state
.duration_samples
.entry(trace.query_fingerprint.clone())
.or_default();
samples.push(trace.total_duration_ms);
if samples.len() > 1000 {
samples.drain(0..100);
}
let sum: u64 = samples.iter().sum();
let avg_duration = sum as f64 / samples.len() as f64;
let mut sorted = samples.clone();
sorted.sort();
let p95_duration = sorted[(sorted.len() as f64 * 0.95) as usize];
let p99_duration = sorted[(sorted.len() as f64 * 0.99) as usize];
let stats = state
.query_stats
.entry(trace.query_fingerprint.clone())
.or_insert_with(|| QueryStats::new(&trace.query_fingerprint));
stats.total_executions += 1;
stats.error_count += trace.error_count as u64;
stats.last_seen = SystemTime::now();
if stats.operation_name.is_none() {
stats.operation_name = trace.operation_name.clone();
}
stats.avg_duration_ms = avg_duration;
stats.p95_duration_ms = p95_duration;
stats.p99_duration_ms = p99_duration;
stats.max_duration_ms = stats.max_duration_ms.max(trace.total_duration_ms);
for period in &self.config.aggregation_periods {
let metrics = state.current_period_metrics.entry(*period).or_default();
metrics.push(trace.total_duration_ms);
}
state.traces.push_back(trace.clone());
while state.traces.len() > self.config.max_traces {
state.traces.pop_front();
}
if trace.total_duration_ms > self.config.slow_query_threshold_ms {
self.detect_slow_query(&mut state, &trace);
}
}
fn detect_slow_query(&self, state: &mut DashboardState, trace: &QueryTrace) {
let insight = PerformanceInsight {
id: uuid::Uuid::new_v4().to_string(),
insight_type: InsightType::SlowQuery,
severity: if trace.total_duration_ms > self.config.slow_query_threshold_ms * 5 {
Severity::Critical
} else {
Severity::Warning
},
title: "Slow Query Detected".to_string(),
description: format!(
"Query took {}ms (threshold: {}ms)",
trace.total_duration_ms, self.config.slow_query_threshold_ms
),
affected_queries: vec![trace.query_fingerprint.clone()],
recommendation: Some(
"Consider adding indexes, caching, or optimizing resolvers".to_string(),
),
detected_at: SystemTime::now(),
supporting_data: {
let mut data = HashMap::new();
data.insert(
"duration_ms".to_string(),
trace.total_duration_ms.to_string(),
);
data.insert("operation".to_string(), trace.operation_type.clone());
data
},
};
state.insights.push(insight);
if state.insights.len() > 1000 {
state.insights.drain(0..100);
}
}
pub async fn get_aggregated_metrics(
&self,
period: AggregationPeriod,
) -> Vec<AggregatedMetrics> {
let state = self.state.read().await;
state
.aggregations
.get(&period)
.map(|v| v.iter().cloned().collect())
.unwrap_or_default()
}
pub async fn get_query_stats(&self) -> Vec<QueryStats> {
let state = self.state.read().await;
let mut stats: Vec<_> = state.query_stats.values().cloned().collect();
stats.sort_by_key(|s| Reverse(s.total_executions));
stats
}
pub async fn get_slowest_queries(&self, limit: usize) -> Vec<QueryStats> {
let state = self.state.read().await;
let mut stats: Vec<_> = state.query_stats.values().cloned().collect();
stats.sort_by_key(|s| Reverse(s.p99_duration_ms));
stats.truncate(limit);
stats
}
pub async fn get_most_frequent_queries(&self, limit: usize) -> Vec<QueryStats> {
let state = self.state.read().await;
let mut stats: Vec<_> = state.query_stats.values().cloned().collect();
stats.sort_by_key(|s| Reverse(s.total_executions));
stats.truncate(limit);
stats
}
pub async fn get_recent_traces(&self, limit: usize) -> Vec<QueryTrace> {
let state = self.state.read().await;
state.traces.iter().rev().take(limit).cloned().collect()
}
pub async fn get_trace(&self, trace_id: &str) -> Option<QueryTrace> {
let state = self.state.read().await;
state
.traces
.iter()
.find(|t| t.trace_id == trace_id)
.cloned()
}
pub async fn get_insights(&self) -> Vec<PerformanceInsight> {
let state = self.state.read().await;
state.insights.clone()
}
pub async fn get_insights_by_severity(&self, severity: Severity) -> Vec<PerformanceInsight> {
let state = self.state.read().await;
state
.insights
.iter()
.filter(|i| i.severity == severity)
.cloned()
.collect()
}
pub async fn generate_recommendations(&self) -> Vec<Recommendation> {
let state = self.state.read().await;
let mut recommendations = Vec::new();
for stats in state.query_stats.values() {
if stats.p95_duration_ms > self.config.slow_query_threshold_ms {
recommendations.push(Recommendation {
id: uuid::Uuid::new_v4().to_string(),
category: RecommendationCategory::Performance,
priority: Priority::High,
title: format!("Optimize slow query: {:?}", stats.operation_name),
description: format!(
"P95 latency is {}ms. Consider adding caching or optimizing resolvers.",
stats.p95_duration_ms
),
affected_queries: vec![stats.fingerprint.clone()],
estimated_impact: Some("Could improve response time by 50%+".to_string()),
});
}
if stats.cache_hit_rate < 0.5 && stats.total_executions > 100 {
recommendations.push(Recommendation {
id: uuid::Uuid::new_v4().to_string(),
category: RecommendationCategory::Caching,
priority: Priority::Medium,
title: format!("Improve caching for: {:?}", stats.operation_name),
description: format!(
"Cache hit rate is {:.1}%. Consider adjusting cache TTL or query patterns.",
stats.cache_hit_rate * 100.0
),
affected_queries: vec![stats.fingerprint.clone()],
estimated_impact: Some("Could reduce database load significantly".to_string()),
});
}
}
let total_executions: u64 = state.query_stats.values().map(|s| s.total_executions).sum();
let total_errors: u64 = state.query_stats.values().map(|s| s.error_count).sum();
if total_executions > 0 {
let error_rate = total_errors as f64 / total_executions as f64;
if error_rate > 0.01 {
recommendations.push(Recommendation {
id: uuid::Uuid::new_v4().to_string(),
category: RecommendationCategory::Reliability,
priority: Priority::High,
title: "High overall error rate".to_string(),
description: format!(
"Error rate is {:.2}%. Investigate error sources.",
error_rate * 100.0
),
affected_queries: Vec::new(),
estimated_impact: Some(
"Improving reliability will enhance user experience".to_string(),
),
});
}
}
recommendations
}
pub async fn get_summary(&self) -> DashboardSummary {
let state = self.state.read().await;
let total_executions: u64 = state.query_stats.values().map(|s| s.total_executions).sum();
let total_errors: u64 = state.query_stats.values().map(|s| s.error_count).sum();
let all_durations: Vec<u64> = state
.duration_samples
.values()
.flat_map(|v| v.iter())
.copied()
.collect();
let avg_duration = if !all_durations.is_empty() {
all_durations.iter().sum::<u64>() as f64 / all_durations.len() as f64
} else {
0.0
};
let uptime = state.started_at.elapsed();
DashboardSummary {
total_queries: total_executions,
total_errors,
error_rate: if total_executions > 0 {
total_errors as f64 / total_executions as f64
} else {
0.0
},
avg_duration_ms: avg_duration,
unique_queries: state.query_stats.len(),
active_insights: state.insights.len(),
critical_insights: state
.insights
.iter()
.filter(|i| i.severity == Severity::Critical)
.count(),
uptime_seconds: uptime.as_secs(),
}
}
pub async fn export_prometheus_metrics(&self) -> String {
let summary = self.get_summary().await;
let mut output = String::new();
output.push_str("# HELP graphql_queries_total Total number of GraphQL queries\n");
output.push_str("# TYPE graphql_queries_total counter\n");
output.push_str(&format!(
"graphql_queries_total {}\n",
summary.total_queries
));
output.push_str("# HELP graphql_errors_total Total number of GraphQL errors\n");
output.push_str("# TYPE graphql_errors_total counter\n");
output.push_str(&format!("graphql_errors_total {}\n", summary.total_errors));
output.push_str("# HELP graphql_duration_ms_avg Average query duration in milliseconds\n");
output.push_str("# TYPE graphql_duration_ms_avg gauge\n");
output.push_str(&format!(
"graphql_duration_ms_avg {:.2}\n",
summary.avg_duration_ms
));
output.push_str("# HELP graphql_unique_queries Number of unique query patterns\n");
output.push_str("# TYPE graphql_unique_queries gauge\n");
output.push_str(&format!(
"graphql_unique_queries {}\n",
summary.unique_queries
));
output
}
pub async fn cleanup(&self) {
let mut state = self.state.write().await;
let retention = self.config.trace_retention;
let cutoff = SystemTime::now()
.checked_sub(retention)
.unwrap_or(SystemTime::UNIX_EPOCH);
state.traces.retain(|t| t.started_at > cutoff);
state.insights.retain(|i| i.detected_at > cutoff);
state.triggered_alerts.retain(|(t, _, _)| *t > cutoff);
}
}
impl Default for PerformanceInsightsDashboard {
fn default() -> Self {
Self::new(DashboardConfig::default())
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Recommendation {
pub id: String,
pub category: RecommendationCategory,
pub priority: Priority,
pub title: String,
pub description: String,
pub affected_queries: Vec<String>,
pub estimated_impact: Option<String>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum RecommendationCategory {
Performance,
Caching,
Reliability,
Security,
Schema,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum Priority {
Low,
Medium,
High,
Critical,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DashboardSummary {
pub total_queries: u64,
pub total_errors: u64,
pub error_rate: f64,
pub avg_duration_ms: f64,
pub unique_queries: usize,
pub active_insights: usize,
pub critical_insights: usize,
pub uptime_seconds: u64,
}
pub struct TracingSession {
trace: QueryTrace,
start_time: Instant,
phase_start: Option<(String, Instant)>,
}
impl TracingSession {
pub fn start(query_fingerprint: &str, operation_type: &str) -> Self {
Self {
trace: QueryTrace::new(query_fingerprint, operation_type),
start_time: Instant::now(),
phase_start: None,
}
}
pub fn set_operation_name(&mut self, name: &str) {
self.trace.operation_name = Some(name.to_string());
}
pub fn start_phase(&mut self, name: &str) {
self.phase_start = Some((name.to_string(), Instant::now()));
}
pub fn end_phase(&mut self) {
if let Some((name, start)) = self.phase_start.take() {
let duration = start.elapsed().as_millis() as u64;
let start_offset = (self.start_time.elapsed() - start.elapsed()).as_millis() as u64;
self.trace
.add_phase(PhaseTrace::new(&name, duration, start_offset));
}
}
pub fn record_field(
&mut self,
path: &str,
parent_type: &str,
field_name: &str,
duration_ms: u64,
) {
let mut field_trace = FieldTrace::new(path, parent_type, field_name);
field_trace.duration_ms = duration_ms;
field_trace.start_offset_ms = self.start_time.elapsed().as_millis() as u64 - duration_ms;
self.trace.add_field_trace(field_trace);
}
pub fn set_cache_hit(&mut self, hit: bool) {
self.trace.cache_hit = hit;
}
pub fn set_result_size(&mut self, size: usize) {
self.trace.result_size_bytes = size;
}
pub fn record_error(&mut self) {
self.trace.error_count += 1;
}
pub fn finish(mut self) -> QueryTrace {
let duration = self.start_time.elapsed().as_millis() as u64;
self.trace.finalize(duration);
self.trace
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_dashboard_creation() {
let dashboard = PerformanceInsightsDashboard::default();
let summary = dashboard.get_summary().await;
assert_eq!(summary.total_queries, 0);
}
#[tokio::test]
async fn test_record_trace() {
let dashboard = PerformanceInsightsDashboard::default();
let mut trace = QueryTrace::new("{ users { id } }", "query");
trace.total_duration_ms = 50;
trace.result_size_bytes = 1024;
dashboard.record_trace(trace).await;
let summary = dashboard.get_summary().await;
assert_eq!(summary.total_queries, 1);
assert_eq!(summary.unique_queries, 1);
}
#[tokio::test]
async fn test_slow_query_detection() {
let config = DashboardConfig {
slow_query_threshold_ms: 100,
..Default::default()
};
let dashboard = PerformanceInsightsDashboard::new(config);
let mut trace = QueryTrace::new("{ slowQuery }", "query");
trace.total_duration_ms = 500;
dashboard.record_trace(trace).await;
let insights = dashboard.get_insights().await;
assert!(!insights.is_empty());
assert_eq!(insights[0].insight_type, InsightType::SlowQuery);
}
#[tokio::test]
async fn test_query_stats() {
let dashboard = PerformanceInsightsDashboard::default();
for i in 0..10 {
let mut trace = QueryTrace::new("{ users { id } }", "query");
trace.total_duration_ms = 50 + i * 10;
dashboard.record_trace(trace).await;
}
let stats = dashboard.get_query_stats().await;
assert_eq!(stats.len(), 1);
assert_eq!(stats[0].total_executions, 10);
}
#[tokio::test]
async fn test_slowest_queries() {
let dashboard = PerformanceInsightsDashboard::default();
for (i, duration) in [100, 500, 200].iter().enumerate() {
let mut trace = QueryTrace::new(&format!("query{}", i), "query");
trace.total_duration_ms = *duration;
dashboard.record_trace(trace).await;
}
let slowest = dashboard.get_slowest_queries(2).await;
assert_eq!(slowest.len(), 2);
assert!(slowest[0].p99_duration_ms >= slowest[1].p99_duration_ms);
}
#[tokio::test]
async fn test_tracing_session() {
let mut session = TracingSession::start("{ users }", "query");
session.set_operation_name("GetUsers");
session.start_phase("parse");
tokio::time::sleep(Duration::from_millis(1)).await;
session.end_phase();
session.start_phase("execute");
tokio::time::sleep(Duration::from_millis(1)).await;
session.end_phase();
session.set_cache_hit(true);
session.set_result_size(1024);
let trace = session.finish();
assert_eq!(trace.operation_name, Some("GetUsers".to_string()));
assert_eq!(trace.phases.len(), 2);
assert!(trace.cache_hit);
}
#[tokio::test]
async fn test_prometheus_export() {
let dashboard = PerformanceInsightsDashboard::default();
let mut trace = QueryTrace::new("{ users }", "query");
trace.total_duration_ms = 50;
dashboard.record_trace(trace).await;
let metrics = dashboard.export_prometheus_metrics().await;
assert!(metrics.contains("graphql_queries_total 1"));
}
#[tokio::test]
async fn test_recommendations() {
let config = DashboardConfig {
slow_query_threshold_ms: 50,
..Default::default()
};
let dashboard = PerformanceInsightsDashboard::new(config);
for _ in 0..10 {
let mut trace = QueryTrace::new("{ slowQuery }", "query");
trace.total_duration_ms = 200;
dashboard.record_trace(trace).await;
}
let recommendations = dashboard.generate_recommendations().await;
assert!(!recommendations.is_empty());
}
#[tokio::test]
async fn test_get_trace_by_id() {
let dashboard = PerformanceInsightsDashboard::default();
let trace = QueryTrace::new("{ users }", "query");
let trace_id = trace.trace_id.clone();
dashboard.record_trace(trace).await;
let retrieved = dashboard.get_trace(&trace_id).await;
assert!(retrieved.is_some());
assert_eq!(retrieved.expect("should succeed").trace_id, trace_id);
}
#[tokio::test]
async fn test_insights_by_severity() {
let config = DashboardConfig {
slow_query_threshold_ms: 100,
..Default::default()
};
let dashboard = PerformanceInsightsDashboard::new(config);
let mut trace = QueryTrace::new("{ verySlowQuery }", "query");
trace.total_duration_ms = 600;
dashboard.record_trace(trace).await;
let mut trace = QueryTrace::new("{ slowQuery }", "query");
trace.total_duration_ms = 200;
dashboard.record_trace(trace).await;
let critical = dashboard.get_insights_by_severity(Severity::Critical).await;
assert!(!critical.is_empty());
let warnings = dashboard.get_insights_by_severity(Severity::Warning).await;
assert!(!warnings.is_empty());
}
}