Skip to main content

fraiseql_server/
metrics_server.rs

1//! Canonical metrics implementation for `fraiseql-server`.
2//!
3//! Use [`MetricsCollector`] to instrument request handling.
4//! The previous `observability/metrics.rs` ghost layer has been removed.
5//!
6//! Tracks:
7//! - GraphQL query execution time
8//! - Query success/error rates
9//! - Database query performance
10//! - Connection pool statistics
11//! - HTTP request/response metrics
12
13use std::{
14    fmt::Write as _,
15    sync::{
16        Arc,
17        atomic::{AtomicU64, Ordering},
18    },
19    time::Instant,
20};
21
22use dashmap::DashMap;
23
24/// Metrics collector for the server.
25#[derive(Debug, Clone)]
26pub struct MetricsCollector {
27    /// Total GraphQL queries executed
28    pub queries_total: Arc<AtomicU64>,
29
30    /// Total successful queries
31    pub queries_success: Arc<AtomicU64>,
32
33    /// Total failed queries
34    pub queries_error: Arc<AtomicU64>,
35
36    /// Total query execution time (microseconds)
37    pub queries_duration_us: Arc<AtomicU64>,
38
39    /// Total database queries executed
40    pub db_queries_total: Arc<AtomicU64>,
41
42    /// Total database query time (microseconds)
43    pub db_queries_duration_us: Arc<AtomicU64>,
44
45    /// Total validation errors
46    pub validation_errors_total: Arc<AtomicU64>,
47
48    /// Total parse errors
49    pub parse_errors_total: Arc<AtomicU64>,
50
51    /// Total execution errors
52    pub execution_errors_total: Arc<AtomicU64>,
53
54    /// Total HTTP requests
55    pub http_requests_total: Arc<AtomicU64>,
56
57    /// Total HTTP 2xx responses
58    pub http_responses_2xx: Arc<AtomicU64>,
59
60    /// Total HTTP 4xx responses
61    pub http_responses_4xx: Arc<AtomicU64>,
62
63    /// Total HTTP 5xx responses
64    pub http_responses_5xx: Arc<AtomicU64>,
65
66    /// Cache hits
67    pub cache_hits: Arc<AtomicU64>,
68
69    /// Cache misses
70    pub cache_misses: Arc<AtomicU64>,
71
72    // Federation Metrics
73    /// Federation entity resolutions (total)
74    pub federation_entity_resolutions_total: Arc<AtomicU64>,
75
76    /// Federation entity resolutions (errors)
77    pub federation_entity_resolutions_errors: Arc<AtomicU64>,
78
79    /// Federation entity resolution duration (microseconds)
80    pub federation_entity_resolution_duration_us: Arc<AtomicU64>,
81
82    /// Federation subgraph requests (total)
83    pub federation_subgraph_requests_total: Arc<AtomicU64>,
84
85    /// Federation subgraph requests (errors)
86    pub federation_subgraph_requests_errors: Arc<AtomicU64>,
87
88    /// Federation subgraph request duration (microseconds)
89    pub federation_subgraph_request_duration_us: Arc<AtomicU64>,
90
91    /// Federation mutations (total)
92    pub federation_mutations_total: Arc<AtomicU64>,
93
94    /// Federation mutations (errors)
95    pub federation_mutations_errors: Arc<AtomicU64>,
96
97    /// Federation mutation duration (microseconds)
98    pub federation_mutation_duration_us: Arc<AtomicU64>,
99
100    /// Federation entity cache hits
101    pub federation_entity_cache_hits: Arc<AtomicU64>,
102
103    /// Federation entity cache misses
104    pub federation_entity_cache_misses: Arc<AtomicU64>,
105
106    /// Federation errors
107    pub federation_errors_total: Arc<AtomicU64>,
108
109    /// Per-operation metrics (histogram + error counter)
110    pub operation_metrics: Arc<OperationMetricsRegistry>,
111
112    /// HTTP request duration histogram
113    pub http_request_duration: Arc<Histogram>,
114
115    /// Database query duration histogram
116    pub db_query_duration: Arc<Histogram>,
117
118    /// Total successful schema reloads
119    pub schema_reloads_total: Arc<AtomicU64>,
120
121    /// Total failed schema reload attempts
122    pub schema_reload_errors_total: Arc<AtomicU64>,
123}
124
125impl MetricsCollector {
126    /// Create new metrics collector.
127    #[must_use]
128    pub fn new() -> Self {
129        Self {
130            queries_total: Arc::new(AtomicU64::new(0)),
131            queries_success: Arc::new(AtomicU64::new(0)),
132            queries_error: Arc::new(AtomicU64::new(0)),
133            queries_duration_us: Arc::new(AtomicU64::new(0)),
134            db_queries_total: Arc::new(AtomicU64::new(0)),
135            db_queries_duration_us: Arc::new(AtomicU64::new(0)),
136            validation_errors_total: Arc::new(AtomicU64::new(0)),
137            parse_errors_total: Arc::new(AtomicU64::new(0)),
138            execution_errors_total: Arc::new(AtomicU64::new(0)),
139            http_requests_total: Arc::new(AtomicU64::new(0)),
140            http_responses_2xx: Arc::new(AtomicU64::new(0)),
141            http_responses_4xx: Arc::new(AtomicU64::new(0)),
142            http_responses_5xx: Arc::new(AtomicU64::new(0)),
143            cache_hits: Arc::new(AtomicU64::new(0)),
144            cache_misses: Arc::new(AtomicU64::new(0)),
145            federation_entity_resolutions_total: Arc::new(AtomicU64::new(0)),
146            federation_entity_resolutions_errors: Arc::new(AtomicU64::new(0)),
147            federation_entity_resolution_duration_us: Arc::new(AtomicU64::new(0)),
148            federation_subgraph_requests_total: Arc::new(AtomicU64::new(0)),
149            federation_subgraph_requests_errors: Arc::new(AtomicU64::new(0)),
150            federation_subgraph_request_duration_us: Arc::new(AtomicU64::new(0)),
151            federation_mutations_total: Arc::new(AtomicU64::new(0)),
152            federation_mutations_errors: Arc::new(AtomicU64::new(0)),
153            federation_mutation_duration_us: Arc::new(AtomicU64::new(0)),
154            federation_entity_cache_hits: Arc::new(AtomicU64::new(0)),
155            federation_entity_cache_misses: Arc::new(AtomicU64::new(0)),
156            federation_errors_total: Arc::new(AtomicU64::new(0)),
157            operation_metrics: Arc::new(OperationMetricsRegistry::default()),
158            http_request_duration: Arc::new(Histogram::new()),
159            db_query_duration: Arc::new(Histogram::new()),
160            schema_reloads_total: Arc::new(AtomicU64::new(0)),
161            schema_reload_errors_total: Arc::new(AtomicU64::new(0)),
162        }
163    }
164}
165
166impl MetricsCollector {
167    /// Record entity resolution completion (all strategies).
168    ///
169    /// # Arguments
170    ///
171    /// * `duration_us` - Resolution duration in microseconds
172    /// * `success` - Whether resolution succeeded
173    pub fn record_entity_resolution(&self, duration_us: u64, success: bool) {
174        self.federation_entity_resolutions_total.fetch_add(1, Ordering::Relaxed);
175        self.federation_entity_resolution_duration_us
176            .fetch_add(duration_us, Ordering::Relaxed);
177        if !success {
178            self.federation_entity_resolutions_errors.fetch_add(1, Ordering::Relaxed);
179            self.federation_errors_total.fetch_add(1, Ordering::Relaxed);
180        }
181    }
182
183    /// Record subgraph request completion.
184    ///
185    /// # Arguments
186    ///
187    /// * `duration_us` - Request duration in microseconds
188    /// * `success` - Whether request succeeded (HTTP 2xx)
189    pub fn record_subgraph_request(&self, duration_us: u64, success: bool) {
190        self.federation_subgraph_requests_total.fetch_add(1, Ordering::Relaxed);
191        self.federation_subgraph_request_duration_us
192            .fetch_add(duration_us, Ordering::Relaxed);
193        if !success {
194            self.federation_subgraph_requests_errors.fetch_add(1, Ordering::Relaxed);
195            self.federation_errors_total.fetch_add(1, Ordering::Relaxed);
196        }
197    }
198
199    /// Record federation mutation execution.
200    ///
201    /// # Arguments
202    ///
203    /// * `duration_us` - Mutation duration in microseconds
204    /// * `success` - Whether mutation succeeded
205    pub fn record_mutation(&self, duration_us: u64, success: bool) {
206        self.federation_mutations_total.fetch_add(1, Ordering::Relaxed);
207        self.federation_mutation_duration_us.fetch_add(duration_us, Ordering::Relaxed);
208        if !success {
209            self.federation_mutations_errors.fetch_add(1, Ordering::Relaxed);
210            self.federation_errors_total.fetch_add(1, Ordering::Relaxed);
211        }
212    }
213
214    /// Record entity cache hit.
215    pub fn record_entity_cache_hit(&self) {
216        self.federation_entity_cache_hits.fetch_add(1, Ordering::Relaxed);
217    }
218
219    /// Record entity cache miss.
220    pub fn record_entity_cache_miss(&self) {
221        self.federation_entity_cache_misses.fetch_add(1, Ordering::Relaxed);
222    }
223}
224
225impl Default for MetricsCollector {
226    fn default() -> Self {
227        Self::new()
228    }
229}
230
231/// Histogram bucket upper bounds in microseconds.
232/// Corresponds to: 1ms, 5ms, 10ms, 25ms, 50ms, 100ms, 250ms, 500ms, 1s, 2.5s, 5s
233const HISTOGRAM_BUCKET_BOUNDS_US: [u64; 11] = [
234    1_000, 5_000, 10_000, 25_000, 50_000, 100_000, 250_000, 500_000, 1_000_000, 2_500_000,
235    5_000_000,
236];
237
238/// Prometheus `le` labels matching [`HISTOGRAM_BUCKET_BOUNDS_US`], in seconds.
239const HISTOGRAM_LE_LABELS: [&str; 11] = [
240    "0.001", "0.005", "0.01", "0.025", "0.05", "0.1", "0.25", "0.5", "1", "2.5", "5",
241];
242
243/// Per-operation metrics: count, total duration, error count, and histogram buckets.
244#[derive(Debug)]
245pub struct OperationMetrics {
246    count:         AtomicU64,
247    duration_us:   AtomicU64,
248    error_count:   AtomicU64,
249    bucket_counts: [AtomicU64; 11],
250}
251
252impl OperationMetrics {
253    fn new() -> Self {
254        Self {
255            count:         AtomicU64::new(0),
256            duration_us:   AtomicU64::new(0),
257            error_count:   AtomicU64::new(0),
258            bucket_counts: std::array::from_fn(|_| AtomicU64::new(0)),
259        }
260    }
261
262    fn record(&self, duration_us: u64, is_error: bool) {
263        self.count.fetch_add(1, Ordering::Relaxed);
264        self.duration_us.fetch_add(duration_us, Ordering::Relaxed);
265        if is_error {
266            self.error_count.fetch_add(1, Ordering::Relaxed);
267        }
268        // Increment only the first (smallest) bucket whose bound >= duration.
269        // The cumulative sum is computed at render time.
270        for (i, &bound) in HISTOGRAM_BUCKET_BOUNDS_US.iter().enumerate() {
271            if duration_us <= bound {
272                self.bucket_counts[i].fetch_add(1, Ordering::Relaxed);
273                return;
274            }
275        }
276        // Duration exceeds all finite buckets — it only appears in +Inf
277    }
278}
279
280/// Registry of per-operation metrics with cardinality guard.
281///
282/// Operations beyond `max_operations` are folded into an `__overflow__` bucket
283/// to prevent unbounded label cardinality.
284#[derive(Debug)]
285pub struct OperationMetricsRegistry {
286    operations:     DashMap<String, OperationMetrics>,
287    max_operations: usize,
288    overflow:       OperationMetrics,
289}
290
291impl OperationMetricsRegistry {
292    /// Create a new registry with the given cardinality limit.
293    #[must_use]
294    pub fn new(max_operations: usize) -> Self {
295        Self {
296            operations: DashMap::new(),
297            max_operations,
298            overflow: OperationMetrics::new(),
299        }
300    }
301
302    /// Record a query execution for the given operation name.
303    pub fn record(&self, name: &str, duration_us: u64, is_error: bool) {
304        let canonical = if name.is_empty() {
305            "__anonymous__"
306        } else {
307            name
308        };
309
310        // Check if already tracked
311        if let Some(entry) = self.operations.get(canonical) {
312            entry.record(duration_us, is_error);
313            return;
314        }
315
316        // Check cardinality limit before inserting
317        if self.operations.len() >= self.max_operations {
318            self.overflow.record(duration_us, is_error);
319            return;
320        }
321
322        // Insert and record (race-safe: entry() handles concurrent inserts)
323        self.operations
324            .entry(canonical.to_owned())
325            .or_insert_with(OperationMetrics::new)
326            .record(duration_us, is_error);
327    }
328
329    /// Render all per-operation metrics in Prometheus text exposition format.
330    #[must_use]
331    pub fn to_prometheus_format(&self) -> String {
332        let mut out = String::new();
333
334        // Collect entries for deterministic output (sorted by name)
335        let mut entries: Vec<(String, u64, u64, u64, [u64; 11])> = self
336            .operations
337            .iter()
338            .map(|e| {
339                let buckets: [u64; 11] =
340                    std::array::from_fn(|i| e.value().bucket_counts[i].load(Ordering::Relaxed));
341                (
342                    e.key().clone(),
343                    e.value().count.load(Ordering::Relaxed),
344                    e.value().duration_us.load(Ordering::Relaxed),
345                    e.value().error_count.load(Ordering::Relaxed),
346                    buckets,
347                )
348            })
349            .collect();
350
351        // Add overflow if it has data
352        let overflow_count = self.overflow.count.load(Ordering::Relaxed);
353        if overflow_count > 0 {
354            let buckets: [u64; 11] =
355                std::array::from_fn(|i| self.overflow.bucket_counts[i].load(Ordering::Relaxed));
356            entries.push((
357                "__overflow__".to_owned(),
358                overflow_count,
359                self.overflow.duration_us.load(Ordering::Relaxed),
360                self.overflow.error_count.load(Ordering::Relaxed),
361                buckets,
362            ));
363        }
364
365        if entries.is_empty() {
366            return out;
367        }
368
369        entries.sort_by(|a, b| a.0.cmp(&b.0));
370
371        // Duration histogram
372        out.push_str(
373            "\n# HELP fraiseql_query_duration_seconds Per-operation query duration histogram\n\
374             # TYPE fraiseql_query_duration_seconds histogram\n",
375        );
376        for (name, count, duration_us, _, buckets) in &entries {
377            let mut cumulative: u64 = 0;
378            for (i, &bucket_count) in buckets.iter().enumerate() {
379                cumulative += bucket_count;
380                let _ = writeln!(
381                    out,
382                    "fraiseql_query_duration_seconds_bucket{{operation=\"{name}\",le=\"{}\"}} \
383                     {cumulative}",
384                    HISTOGRAM_LE_LABELS[i],
385                );
386            }
387            let _ = writeln!(
388                out,
389                "fraiseql_query_duration_seconds_bucket{{operation=\"{name}\",le=\"+Inf\"}} \
390                 {count}",
391            );
392            #[allow(clippy::cast_precision_loss)]
393            // Reason: precision loss is acceptable for metrics reporting
394            let sum_secs = *duration_us as f64 / 1_000_000.0;
395            let _ = writeln!(
396                out,
397                "fraiseql_query_duration_seconds_sum{{operation=\"{name}\"}} {sum_secs:.6}",
398            );
399            let _ = writeln!(
400                out,
401                "fraiseql_query_duration_seconds_count{{operation=\"{name}\"}} {count}",
402            );
403        }
404
405        // Error counter
406        out.push_str(
407            "\n# HELP fraiseql_query_errors_total Per-operation query error count\n\
408             # TYPE fraiseql_query_errors_total counter\n",
409        );
410        for (name, _, _, error_count, _) in &entries {
411            let _ =
412                writeln!(out, "fraiseql_query_errors_total{{operation=\"{name}\"}} {error_count}",);
413        }
414
415        out
416    }
417}
418
419impl Default for OperationMetricsRegistry {
420    fn default() -> Self {
421        Self::new(500)
422    }
423}
424
425/// General-purpose histogram using the standard 11-bucket scheme.
426#[derive(Debug)]
427pub struct Histogram {
428    count:         AtomicU64,
429    sum_us:        AtomicU64,
430    bucket_counts: [AtomicU64; 11],
431}
432
433impl Histogram {
434    /// Create a new empty histogram.
435    #[must_use]
436    pub fn new() -> Self {
437        Self {
438            count:         AtomicU64::new(0),
439            sum_us:        AtomicU64::new(0),
440            bucket_counts: std::array::from_fn(|_| AtomicU64::new(0)),
441        }
442    }
443
444    /// Observe a duration in microseconds.
445    pub fn observe_us(&self, duration_us: u64) {
446        self.count.fetch_add(1, Ordering::Relaxed);
447        self.sum_us.fetch_add(duration_us, Ordering::Relaxed);
448        for (i, &bound) in HISTOGRAM_BUCKET_BOUNDS_US.iter().enumerate() {
449            if duration_us <= bound {
450                self.bucket_counts[i].fetch_add(1, Ordering::Relaxed);
451                return;
452            }
453        }
454    }
455
456    /// Render as Prometheus text format with the given metric name.
457    #[allow(clippy::cast_precision_loss)] // Reason: microsecond precision loss at >2^53 us (~285 years) is acceptable
458    #[must_use]
459    pub fn to_prometheus_lines(&self, name: &str, help: &str) -> String {
460        let mut out = String::new();
461        let _ = writeln!(out, "\n# HELP {name} {help}");
462        let _ = writeln!(out, "# TYPE {name} histogram");
463        let count = self.count.load(Ordering::Relaxed);
464        let sum_us = self.sum_us.load(Ordering::Relaxed);
465        let mut cumulative = 0u64;
466        for (i, le) in HISTOGRAM_LE_LABELS.iter().enumerate() {
467            cumulative += self.bucket_counts[i].load(Ordering::Relaxed);
468            let _ = writeln!(out, "{name}_bucket{{le=\"{le}\"}} {cumulative}");
469        }
470        let _ = writeln!(out, "{name}_bucket{{le=\"+Inf\"}} {count}");
471        let sum_secs = sum_us as f64 / 1_000_000.0;
472        let _ = writeln!(out, "{name}_sum {sum_secs:.6}");
473        let _ = writeln!(out, "{name}_count {count}");
474        out
475    }
476}
477
478impl Default for Histogram {
479    fn default() -> Self {
480        Self::new()
481    }
482}
483
484/// Guard for timing metrics.
485pub struct TimingGuard {
486    start:           Instant,
487    duration_atomic: Arc<AtomicU64>,
488}
489
490impl TimingGuard {
491    /// Create new timing guard.
492    pub fn new(duration_atomic: Arc<AtomicU64>) -> Self {
493        Self {
494            start: Instant::now(),
495            duration_atomic,
496        }
497    }
498
499    /// Record duration in microseconds and consume guard.
500    pub fn record(self) {
501        #[allow(clippy::cast_possible_truncation)]
502        // Reason: microsecond counter cannot exceed u64 in any practical uptime
503        let duration_us = self.start.elapsed().as_micros() as u64;
504        self.duration_atomic.fetch_add(duration_us, Ordering::Relaxed);
505    }
506}
507
508/// Prometheus metrics output format.
509#[derive(Debug)]
510pub struct PrometheusMetrics {
511    /// Total GraphQL queries executed
512    pub queries_total:              u64,
513    /// Successful GraphQL queries
514    pub queries_success:            u64,
515    /// Failed GraphQL queries
516    pub queries_error:              u64,
517    /// Average query duration in milliseconds
518    pub queries_avg_duration_ms:    f64,
519    /// Total database queries executed
520    pub db_queries_total:           u64,
521    /// Average database query duration in milliseconds
522    pub db_queries_avg_duration_ms: f64,
523    /// Total validation errors
524    pub validation_errors_total:    u64,
525    /// Total parse errors
526    pub parse_errors_total:         u64,
527    /// Total execution errors
528    pub execution_errors_total:     u64,
529    /// Total HTTP requests processed
530    pub http_requests_total:        u64,
531    /// HTTP 2xx responses
532    pub http_responses_2xx:         u64,
533    /// HTTP 4xx responses
534    pub http_responses_4xx:         u64,
535    /// HTTP 5xx responses
536    pub http_responses_5xx:         u64,
537    /// Cache hit count
538    pub cache_hits:                 u64,
539    /// Cache miss count
540    pub cache_misses:               u64,
541    /// Cache hit ratio (0.0 to 1.0)
542    pub cache_hit_ratio:            f64,
543}
544
545impl PrometheusMetrics {
546    /// Generate Prometheus text format output.
547    #[must_use]
548    pub fn to_prometheus_format(&self) -> String {
549        format!(
550            r"# HELP fraiseql_graphql_queries_total Total GraphQL queries executed
551# TYPE fraiseql_graphql_queries_total counter
552fraiseql_graphql_queries_total {}
553
554# HELP fraiseql_graphql_queries_success Total successful GraphQL queries
555# TYPE fraiseql_graphql_queries_success counter
556fraiseql_graphql_queries_success {}
557
558# HELP fraiseql_graphql_queries_error Total failed GraphQL queries
559# TYPE fraiseql_graphql_queries_error counter
560fraiseql_graphql_queries_error {}
561
562# HELP fraiseql_graphql_query_duration_ms Average query execution time in milliseconds
563# TYPE fraiseql_graphql_query_duration_ms gauge
564fraiseql_graphql_query_duration_ms {}
565
566# HELP fraiseql_database_queries_total Total database queries executed
567# TYPE fraiseql_database_queries_total counter
568fraiseql_database_queries_total {}
569
570# HELP fraiseql_database_query_duration_ms Average database query time in milliseconds
571# TYPE fraiseql_database_query_duration_ms gauge
572fraiseql_database_query_duration_ms {}
573
574# HELP fraiseql_validation_errors_total Total validation errors
575# TYPE fraiseql_validation_errors_total counter
576fraiseql_validation_errors_total {}
577
578# HELP fraiseql_parse_errors_total Total parse errors
579# TYPE fraiseql_parse_errors_total counter
580fraiseql_parse_errors_total {}
581
582# HELP fraiseql_execution_errors_total Total execution errors
583# TYPE fraiseql_execution_errors_total counter
584fraiseql_execution_errors_total {}
585
586# HELP fraiseql_http_requests_total Total HTTP requests
587# TYPE fraiseql_http_requests_total counter
588fraiseql_http_requests_total {}
589
590# HELP fraiseql_http_responses_2xx Total 2xx HTTP responses
591# TYPE fraiseql_http_responses_2xx counter
592fraiseql_http_responses_2xx {}
593
594# HELP fraiseql_http_responses_4xx Total 4xx HTTP responses
595# TYPE fraiseql_http_responses_4xx counter
596fraiseql_http_responses_4xx {}
597
598# HELP fraiseql_http_responses_5xx Total 5xx HTTP responses
599# TYPE fraiseql_http_responses_5xx counter
600fraiseql_http_responses_5xx {}
601
602# HELP fraiseql_cache_hits Total cache hits
603# TYPE fraiseql_cache_hits counter
604fraiseql_cache_hits {}
605
606# HELP fraiseql_cache_misses Total cache misses
607# TYPE fraiseql_cache_misses counter
608fraiseql_cache_misses {}
609
610# HELP fraiseql_cache_hit_ratio Cache hit ratio (0-1)
611# TYPE fraiseql_cache_hit_ratio gauge
612fraiseql_cache_hit_ratio {:.3}
613",
614            self.queries_total,
615            self.queries_success,
616            self.queries_error,
617            self.queries_avg_duration_ms,
618            self.db_queries_total,
619            self.db_queries_avg_duration_ms,
620            self.validation_errors_total,
621            self.parse_errors_total,
622            self.execution_errors_total,
623            self.http_requests_total,
624            self.http_responses_2xx,
625            self.http_responses_4xx,
626            self.http_responses_5xx,
627            self.cache_hits,
628            self.cache_misses,
629            self.cache_hit_ratio,
630        )
631    }
632}
633
634impl From<&MetricsCollector> for PrometheusMetrics {
635    fn from(collector: &MetricsCollector) -> Self {
636        let queries_total = collector.queries_total.load(Ordering::Relaxed);
637        let queries_success = collector.queries_success.load(Ordering::Relaxed);
638        let queries_error = collector.queries_error.load(Ordering::Relaxed);
639        let queries_duration_us = collector.queries_duration_us.load(Ordering::Relaxed);
640
641        let db_queries_total = collector.db_queries_total.load(Ordering::Relaxed);
642        let db_queries_duration_us = collector.db_queries_duration_us.load(Ordering::Relaxed);
643
644        let cache_hits = collector.cache_hits.load(Ordering::Relaxed);
645        let cache_misses = collector.cache_misses.load(Ordering::Relaxed);
646        let cache_total = cache_hits + cache_misses;
647
648        Self {
649            queries_total,
650            queries_success,
651            queries_error,
652            #[allow(clippy::cast_precision_loss)] // Reason: precision loss is acceptable for metrics/statistics
653            queries_avg_duration_ms: if queries_total > 0 {
654                (queries_duration_us as f64 / queries_total as f64) / 1000.0
655            } else {
656                0.0
657            },
658            db_queries_total,
659            #[allow(clippy::cast_precision_loss)] // Reason: precision loss is acceptable for metrics/statistics
660            db_queries_avg_duration_ms: if db_queries_total > 0 {
661                (db_queries_duration_us as f64 / db_queries_total as f64) / 1000.0
662            } else {
663                0.0
664            },
665            validation_errors_total: collector.validation_errors_total.load(Ordering::Relaxed),
666            parse_errors_total: collector.parse_errors_total.load(Ordering::Relaxed),
667            execution_errors_total: collector.execution_errors_total.load(Ordering::Relaxed),
668            http_requests_total: collector.http_requests_total.load(Ordering::Relaxed),
669            http_responses_2xx: collector.http_responses_2xx.load(Ordering::Relaxed),
670            http_responses_4xx: collector.http_responses_4xx.load(Ordering::Relaxed),
671            http_responses_5xx: collector.http_responses_5xx.load(Ordering::Relaxed),
672            cache_hits,
673            cache_misses,
674            #[allow(clippy::cast_precision_loss)] // Reason: precision loss is acceptable for metrics/statistics
675            cache_hit_ratio: if cache_total > 0 {
676                cache_hits as f64 / cache_total as f64
677            } else {
678                0.0
679            },
680        }
681    }
682}
683
684#[cfg(test)]
685mod tests {
686    use super::*;
687
688    #[test]
689    fn test_metrics_collector_creation() {
690        let collector = MetricsCollector::new();
691        assert_eq!(collector.queries_total.load(Ordering::Relaxed), 0);
692        assert_eq!(collector.queries_success.load(Ordering::Relaxed), 0);
693    }
694
695    #[test]
696    fn test_metrics_increment() {
697        let collector = MetricsCollector::new();
698        collector.queries_total.fetch_add(5, Ordering::Relaxed);
699        collector.queries_success.fetch_add(4, Ordering::Relaxed);
700        collector.queries_error.fetch_add(1, Ordering::Relaxed);
701
702        assert_eq!(collector.queries_total.load(Ordering::Relaxed), 5);
703        assert_eq!(collector.queries_success.load(Ordering::Relaxed), 4);
704        assert_eq!(collector.queries_error.load(Ordering::Relaxed), 1);
705    }
706
707    #[test]
708    fn test_prometheus_output_format() {
709        let collector = MetricsCollector::new();
710        collector.queries_total.store(100, Ordering::Relaxed);
711        collector.queries_success.store(95, Ordering::Relaxed);
712        collector.queries_error.store(5, Ordering::Relaxed);
713
714        let metrics = PrometheusMetrics::from(&collector);
715        let output = metrics.to_prometheus_format();
716
717        assert!(output.contains("fraiseql_graphql_queries_total 100"));
718        assert!(output.contains("fraiseql_graphql_queries_success 95"));
719        assert!(output.contains("fraiseql_graphql_queries_error 5"));
720        assert!(output.contains("# HELP"));
721        assert!(output.contains("# TYPE"));
722    }
723
724    #[test]
725    fn test_timing_guard() {
726        let duration_atomic = Arc::new(AtomicU64::new(0));
727        let guard = TimingGuard::new(duration_atomic.clone());
728
729        guard.record();
730
731        let recorded = duration_atomic.load(Ordering::Relaxed);
732        assert!(recorded < 1_000_000); // Must complete in under 1 second
733    }
734
735    #[test]
736    fn test_cache_hit_ratio_calculation() {
737        let collector = MetricsCollector::new();
738        collector.cache_hits.store(75, Ordering::Relaxed);
739        collector.cache_misses.store(25, Ordering::Relaxed);
740
741        let metrics = PrometheusMetrics::from(&collector);
742        assert!((metrics.cache_hit_ratio - 0.75).abs() < 0.001);
743    }
744
745    #[test]
746    fn test_average_duration_calculation() {
747        let collector = MetricsCollector::new();
748        collector.queries_total.store(10, Ordering::Relaxed);
749        collector.queries_duration_us.store(50_000, Ordering::Relaxed); // 50ms total
750
751        let metrics = PrometheusMetrics::from(&collector);
752        assert!((metrics.queries_avg_duration_ms - 5.0).abs() < 0.01); // 5ms average
753    }
754
755    #[test]
756    fn test_operation_metrics_record_and_render() {
757        let registry = OperationMetricsRegistry::new(500);
758        registry.record("GetUsers", 10_000, false); // 10ms
759        registry.record("GetUsers", 20_000, false); // 20ms
760        registry.record("GetPosts", 5_000, true); // 5ms error
761
762        let output = registry.to_prometheus_format();
763        assert!(output.contains("fraiseql_query_duration_seconds_bucket{operation=\"GetUsers\""));
764        assert!(output.contains("fraiseql_query_duration_seconds_count{operation=\"GetUsers\"} 2"));
765        assert!(output.contains("fraiseql_query_duration_seconds_count{operation=\"GetPosts\"} 1"));
766        assert!(output.contains("fraiseql_query_errors_total{operation=\"GetPosts\"} 1"));
767        assert!(output.contains("fraiseql_query_errors_total{operation=\"GetUsers\"} 0"));
768    }
769
770    #[test]
771    fn test_anonymous_operation_label() {
772        let registry = OperationMetricsRegistry::new(500);
773        registry.record("", 1_000, false);
774
775        let output = registry.to_prometheus_format();
776        assert!(output.contains("operation=\"__anonymous__\""));
777    }
778
779    #[test]
780    fn test_overflow_bucketing() {
781        let registry = OperationMetricsRegistry::new(3);
782        registry.record("Op1", 1_000, false);
783        registry.record("Op2", 1_000, false);
784        registry.record("Op3", 1_000, false);
785        // This should go to overflow
786        registry.record("Op4", 1_000, false);
787
788        let output = registry.to_prometheus_format();
789        assert!(output.contains("operation=\"__overflow__\""));
790        assert!(
791            output.contains("fraiseql_query_duration_seconds_count{operation=\"__overflow__\"} 1")
792        );
793    }
794
795    #[test]
796    fn test_histogram_bucket_correctness() {
797        let registry = OperationMetricsRegistry::new(500);
798        // 50ms = 50_000us → should increment le=0.05 and all buckets above
799        registry.record("TestOp", 50_000, false);
800
801        let output = registry.to_prometheus_format();
802        // le=0.025 (25ms) should be 0 (50ms > 25ms)
803        assert!(output.contains(
804            "fraiseql_query_duration_seconds_bucket{operation=\"TestOp\",le=\"0.025\"} 0"
805        ));
806        // le=0.05 (50ms) should be 1 (50ms <= 50ms)
807        assert!(output.contains(
808            "fraiseql_query_duration_seconds_bucket{operation=\"TestOp\",le=\"0.05\"} 1"
809        ));
810        // le=0.1 (100ms) should be 1 (cumulative)
811        assert!(
812            output.contains(
813                "fraiseql_query_duration_seconds_bucket{operation=\"TestOp\",le=\"0.1\"} 1"
814            )
815        );
816    }
817}