Skip to main content

kaccy_ai/examples/
reports_dashboard.rs

1//! Report generation and dashboard integration examples.
2
3use crate::error::Result;
4
5/// Example: Professional report generation
6///
7/// Demonstrates how to:
8/// - Generate cost analysis reports
9/// - Create performance benchmark reports
10/// - Produce fraud detection summaries
11/// - Export reports in multiple formats (Markdown, JSON, CSV)
12pub struct ReportGenerationExample;
13
14impl ReportGenerationExample {
15    /// Generate a comprehensive cost analysis report
16    pub async fn cost_analysis_report() -> Result<()> {
17        use crate::reports::{CostAnalysisReport, ReportFormat, ReportGenerator, ReportType};
18
19        println!("=== Cost Analysis Report Generation ===");
20        println!();
21
22        // Create a cost analysis report
23        let mut report = CostAnalysisReport::new(
24            "AI Operations Cost Analysis - Q1 2026".to_string(),
25            "January - March 2026".to_string(),
26        );
27
28        // Add provider costs
29        report.add_provider_cost("OpenAI".to_string(), 1_850.50);
30        report.add_provider_cost("Anthropic".to_string(), 1_420.75);
31        report.add_provider_cost("Gemini".to_string(), 345.00);
32        report.add_provider_cost("DeepSeek".to_string(), 125.25);
33        report.add_provider_cost("Ollama".to_string(), 0.0);
34
35        // Add operation costs
36        report.add_operation_cost("Code Evaluation".to_string(), 1_680.00);
37        report.add_operation_cost("Commitment Verification".to_string(), 895.75);
38        report.add_operation_cost("Fraud Detection".to_string(), 540.50);
39        report.add_operation_cost("Plagiarism Detection".to_string(), 425.25);
40        report.add_operation_cost("Document Analysis".to_string(), 200.00);
41
42        // Set request metrics
43        report.set_total_requests(15_450);
44        report.cost_trend = Some(-12.5); // 12.5% cost reduction
45
46        // Add recommendations
47        report.add_recommendation(
48            "Switch 40% of simple tasks to Gemini Flash to reduce costs by ~$600/month".to_string(),
49        );
50        report.add_recommendation(
51            "Use Ollama for development and testing to eliminate dev environment costs".to_string(),
52        );
53        report.add_recommendation(
54            "Implement request batching to reduce API calls by 25%".to_string(),
55        );
56        report.add_recommendation(
57            "Enable caching for repetitive operations (estimated 15% cost savings)".to_string(),
58        );
59
60        println!("Report Summary:");
61        println!("  * Total Cost: ${:.2}", report.total_cost);
62        println!("  * Total Requests: {}", report.total_requests);
63        println!("  * Avg Cost/Request: ${:.4}", report.avg_cost_per_request);
64        println!("  * Cost Trend: down 12.5%");
65        println!();
66
67        // Generate Markdown report
68        println!("=== Markdown Report ===");
69        let markdown = ReportGenerator::generate(
70            ReportType::CostAnalysis(report.clone()),
71            ReportFormat::Markdown,
72        )?;
73        println!("{markdown}");
74
75        // Generate JSON report
76        println!("=== JSON Report (Preview) ===");
77        let json = ReportGenerator::generate(
78            ReportType::CostAnalysis(report.clone()),
79            ReportFormat::Json,
80        )?;
81        println!("{}...{}", &json[..200], &json[json.len() - 50..]);
82        println!();
83
84        // Generate CSV report
85        println!("=== CSV Report ===");
86        let csv = ReportGenerator::generate(ReportType::CostAnalysis(report), ReportFormat::Csv)?;
87        let lines: Vec<&str> = csv.lines().take(8).collect();
88        println!("{}", lines.join("\n"));
89        println!("...");
90        println!();
91
92        println!("Cost analysis report generated in 3 formats");
93
94        Ok(())
95    }
96
97    /// Generate a performance benchmark report
98    pub async fn performance_benchmark_report() -> Result<()> {
99        use crate::reports::{
100            OperationBenchmark, PerformanceBenchmarkReport, ReportFormat, ReportGenerator,
101            ReportType,
102        };
103
104        println!("=== Performance Benchmark Report Generation ===");
105        println!();
106
107        let mut report = PerformanceBenchmarkReport::new(
108            "AI Services Performance Benchmarks".to_string(),
109            "2026-01-09".to_string(),
110        );
111
112        // Add operation benchmarks
113        report.add_operation(
114            "code_evaluation".to_string(),
115            OperationBenchmark {
116                name: "Code Evaluation".to_string(),
117                avg_latency_ms: 285.5,
118                median_latency_ms: 265.0,
119                p95_latency_ms: 420.0,
120                p99_latency_ms: 580.0,
121                total_ops: 5_420,
122                success_rate: 99.2,
123            },
124        );
125
126        report.add_operation(
127            "commitment_verification".to_string(),
128            OperationBenchmark {
129                name: "Commitment Verification".to_string(),
130                avg_latency_ms: 320.2,
131                median_latency_ms: 295.0,
132                p95_latency_ms: 495.0,
133                p99_latency_ms: 650.0,
134                total_ops: 3_850,
135                success_rate: 98.7,
136            },
137        );
138
139        report.add_operation(
140            "fraud_detection".to_string(),
141            OperationBenchmark {
142                name: "Fraud Detection".to_string(),
143                avg_latency_ms: 195.8,
144                median_latency_ms: 180.0,
145                p95_latency_ms: 280.0,
146                p99_latency_ms: 385.0,
147                total_ops: 2_240,
148                success_rate: 99.5,
149            },
150        );
151
152        report.add_operation(
153            "plagiarism_detection".to_string(),
154            OperationBenchmark {
155                name: "Plagiarism Detection".to_string(),
156                avg_latency_ms: 425.3,
157                median_latency_ms: 390.0,
158                p95_latency_ms: 620.0,
159                p99_latency_ms: 820.0,
160                total_ops: 1_650,
161                success_rate: 97.8,
162            },
163        );
164
165        report.add_operation(
166            "document_analysis".to_string(),
167            OperationBenchmark {
168                name: "Document Analysis".to_string(),
169                avg_latency_ms: 520.7,
170                median_latency_ms: 475.0,
171                p95_latency_ms: 780.0,
172                p99_latency_ms: 1050.0,
173                total_ops: 890,
174                success_rate: 98.3,
175            },
176        );
177
178        // Calculate summary statistics
179        report.calculate_summary();
180
181        println!("Performance Summary:");
182        println!("  * Total Operations: {}", report.summary.total_operations);
183        println!(
184            "  * Overall Avg Latency: {:.2}ms",
185            report.summary.overall_avg_latency_ms
186        );
187        println!(
188            "  * Overall Success Rate: {:.1}%",
189            report.summary.overall_success_rate
190        );
191        if let Some(ref fastest) = report.summary.fastest_operation {
192            println!("  * Fastest Operation: {fastest}");
193        }
194        if let Some(ref slowest) = report.summary.slowest_operation {
195            println!("  * Slowest Operation: {slowest}");
196        }
197        println!();
198
199        // Generate Markdown report
200        println!("=== Markdown Report ===");
201        let markdown = ReportGenerator::generate(
202            ReportType::PerformanceBenchmark(report.clone()),
203            ReportFormat::Markdown,
204        )?;
205        println!("{markdown}");
206
207        // Generate JSON report
208        println!("=== JSON Report (Preview) ===");
209        let json = ReportGenerator::generate(
210            ReportType::PerformanceBenchmark(report),
211            ReportFormat::Json,
212        )?;
213        let lines: Vec<&str> = json.lines().take(10).collect();
214        println!("{}", lines.join("\n"));
215        println!("...");
216        println!();
217
218        println!("Performance benchmark report generated");
219
220        Ok(())
221    }
222
223    /// Generate a fraud detection summary report
224    pub async fn fraud_summary_report() -> Result<()> {
225        use crate::fraud::RiskLevel;
226        use crate::reports::{FraudSummaryReport, ReportFormat, ReportGenerator, ReportType};
227
228        println!("=== Fraud Detection Summary Report Generation ===");
229        println!();
230
231        let mut report = FraudSummaryReport::new(
232            "Fraud Detection Analysis - Q1 2026".to_string(),
233            "January - March 2026".to_string(),
234        );
235
236        // Add fraud cases
237        for _ in 0..125 {
238            report.add_case(RiskLevel::Low);
239        }
240        for _ in 0..48 {
241            report.add_case(RiskLevel::Medium);
242        }
243        for _ in 0..23 {
244            report.add_case(RiskLevel::High);
245        }
246        for _ in 0..7 {
247            report.add_case(RiskLevel::Critical);
248        }
249
250        // Set common fraud types
251        report.set_common_fraud_types(vec![
252            ("Sybil Attack".to_string(), 45),
253            ("Wash Trading".to_string(), 28),
254            ("Reputation Gaming".to_string(), 22),
255            ("Image Manipulation".to_string(), 18),
256            ("Content Plagiarism".to_string(), 15),
257        ]);
258
259        // Set detection accuracy
260        report.accuracy = Some(96.8);
261
262        // Add insights
263        report.add_insight(
264            "Sybil attacks increased by 32% compared to Q4 2026, primarily from newly registered accounts".to_string(),
265        );
266        report.add_insight(
267            "Average detection time improved by 45% due to enhanced ML models".to_string(),
268        );
269        report.add_insight(
270            "Image manipulation attempts decreased by 18% after implementing perceptual hashing"
271                .to_string(),
272        );
273        report.add_insight(
274            "Critical risk cases require average 15 minutes for manual review and final decision"
275                .to_string(),
276        );
277
278        println!("Fraud Detection Summary:");
279        println!("  * Total Cases Analyzed: {}", report.total_cases);
280        println!("  * Detection Accuracy: {:.1}%", report.accuracy.unwrap());
281        println!("  * Most Common: Sybil Attack (45 cases)");
282        println!("  * Critical Cases: 7");
283        println!();
284
285        // Generate Markdown report
286        println!("=== Markdown Report ===");
287        let markdown = ReportGenerator::generate(
288            ReportType::FraudSummary(report.clone()),
289            ReportFormat::Markdown,
290        )?;
291        println!("{markdown}");
292
293        // Generate JSON report
294        println!("=== JSON Report (Preview) ===");
295        let json = ReportGenerator::generate(ReportType::FraudSummary(report), ReportFormat::Json)?;
296        let lines: Vec<&str> = json.lines().take(15).collect();
297        println!("{}", lines.join("\n"));
298        println!("...");
299        println!();
300
301        println!("Fraud detection summary report generated");
302
303        Ok(())
304    }
305
306    /// Demonstrate multi-format report export
307    pub async fn multi_format_export() -> Result<()> {
308        use crate::reports::{CostAnalysisReport, ReportFormat, ReportGenerator, ReportType};
309
310        println!("=== Multi-Format Report Export ===");
311        println!();
312
313        let mut report = CostAnalysisReport::new(
314            "Weekly Cost Report".to_string(),
315            "Week of Jan 9, 2026".to_string(),
316        );
317
318        report.add_provider_cost("OpenAI".to_string(), 425.00);
319        report.add_provider_cost("Anthropic".to_string(), 325.00);
320        report.add_provider_cost("Gemini".to_string(), 85.00);
321        report.set_total_requests(3_500);
322
323        println!("Exporting report in 3 formats...");
324        println!();
325
326        // Export as Markdown
327        let markdown = ReportGenerator::generate(
328            ReportType::CostAnalysis(report.clone()),
329            ReportFormat::Markdown,
330        )?;
331        println!("Markdown export: {} bytes", markdown.len());
332        println!("  Use case: Documentation, GitHub issues, Slack messages");
333        println!();
334
335        // Export as JSON
336        let json = ReportGenerator::generate(
337            ReportType::CostAnalysis(report.clone()),
338            ReportFormat::Json,
339        )?;
340        println!("JSON export: {} bytes", json.len());
341        println!("  Use case: API responses, data storage, dashboard integration");
342        println!();
343
344        // Export as CSV
345        let csv = ReportGenerator::generate(ReportType::CostAnalysis(report), ReportFormat::Csv)?;
346        println!("CSV export: {} bytes", csv.len());
347        println!("  Use case: Excel analysis, data science, financial reporting");
348        println!();
349
350        println!("All reports generated successfully!");
351        println!();
352        println!("Integration tips:");
353        println!("  * Save to files: write reports to disk for archival");
354        println!("  * Send via email: attach reports to automated email notifications");
355        println!("  * Post to Slack: use Markdown format for rich formatting");
356        println!("  * Store in DB: save JSON format for queryable storage");
357        println!("  * Generate dashboards: parse JSON data for real-time visualizations");
358
359        Ok(())
360    }
361
362    /// Demonstrate automated report scheduling
363    pub async fn automated_reporting() -> Result<()> {
364        use crate::reports::{CostAnalysisReport, ReportFormat, ReportGenerator, ReportType};
365
366        println!("=== Automated Report Scheduling ===");
367        println!();
368
369        println!("Example: Daily cost monitoring workflow");
370        println!();
371
372        // Simulate daily report generation
373        let mut report = CostAnalysisReport::new(
374            "Daily Cost Report".to_string(),
375            format!("Date: {}", chrono::Local::now().format("%Y-%m-%d")),
376        );
377
378        report.add_provider_cost("OpenAI".to_string(), 125.50);
379        report.add_provider_cost("Anthropic".to_string(), 95.25);
380        report.set_total_requests(1_250);
381        report.cost_trend = Some(5.2); // 5.2% increase
382
383        // Generate alert if cost exceeds threshold
384        if report.total_cost > 200.0 {
385            println!("ALERT: Daily cost exceeds $200 threshold!");
386            println!("  Current: ${:.2}", report.total_cost);
387            println!("  Trend: +{:.1}%", report.cost_trend.unwrap());
388            println!();
389        }
390
391        // Add automated recommendations
392        if let Some(trend) = report.cost_trend {
393            if trend > 10.0 {
394                report.add_recommendation(
395                    "Cost increased significantly. Review recent usage patterns".to_string(),
396                );
397            }
398        }
399
400        let _markdown =
401            ReportGenerator::generate(ReportType::CostAnalysis(report), ReportFormat::Markdown)?;
402
403        println!("Report generated and ready for distribution:");
404        println!("  * Post to Slack #finance channel");
405        println!("  * Email to finance@company.com");
406        println!("  * Archive in S3 bucket");
407        println!("  * Update monitoring dashboard");
408        println!();
409
410        println!("Scheduling options:");
411        println!("  * Use cron: Schedule daily/weekly/monthly reports");
412        println!("  * Use tokio interval: In-app scheduled reporting");
413        println!("  * Use GitHub Actions: Automated CI/CD reports");
414        println!("  * Use AWS Lambda: Serverless scheduled reports");
415
416        Ok(())
417    }
418}
419
420/// Example: Dashboard integration and monitoring
421///
422/// Demonstrates how to:
423/// - Export metrics to Prometheus
424/// - Integrate with Grafana
425/// - Generate health check endpoints
426/// - Monitor AI operations in real-time
427pub struct DashboardIntegrationExample;
428
429impl DashboardIntegrationExample {
430    /// Export metrics to Prometheus format
431    pub async fn prometheus_export() -> Result<()> {
432        use crate::dashboard::DashboardMetrics;
433
434        println!("=== Prometheus Metrics Export ===");
435        println!();
436
437        // Create dashboard metrics
438        let mut metrics = DashboardMetrics::new();
439        metrics.request_count = 15_450;
440        metrics.total_cost = 3_741.50;
441        metrics.avg_latency_ms = 285.5;
442        metrics.error_count = 127;
443        metrics.success_rate = 99.18;
444        metrics.cache_hit_rate = 42.5;
445        metrics.circuit_breaker_open = 3;
446        metrics.budget_utilization = 67.8;
447        metrics.active_providers = vec![
448            "openai".to_string(),
449            "anthropic".to_string(),
450            "gemini".to_string(),
451        ];
452
453        // Add custom metrics
454        metrics.add_custom_metric("fraud_detection_accuracy".to_string(), 96.8);
455        metrics.add_custom_metric("plagiarism_checks_total".to_string(), 1_250.0);
456
457        println!("Current Metrics:");
458        println!("  * Total Requests: {}", metrics.request_count);
459        println!("  * Total Cost: ${:.2}", metrics.total_cost);
460        println!("  * Avg Latency: {:.2}ms", metrics.avg_latency_ms);
461        println!("  * Success Rate: {:.2}%", metrics.success_rate);
462        println!("  * Cache Hit Rate: {:.2}%", metrics.cache_hit_rate);
463        println!();
464
465        // Export to Prometheus format
466        let prometheus_metrics = metrics.to_prometheus();
467
468        println!("=== Prometheus Format Output ===");
469        println!();
470        for metric in prometheus_metrics.iter().take(3) {
471            println!("{}", metric.to_prometheus_format());
472        }
473        println!("... {} more metrics", prometheus_metrics.len() - 3);
474        println!();
475
476        println!("Integration:");
477        println!("  * Expose this endpoint at /metrics");
478        println!("  * Configure Prometheus to scrape this endpoint");
479        println!("  * Default scrape interval: 15s");
480        println!();
481
482        println!("Example Prometheus Configuration:");
483        println!("```yaml");
484        println!("scrape_configs:");
485        println!("  - job_name: 'kaccy-ai'");
486        println!("    static_configs:");
487        println!("      - targets: ['localhost:8080']");
488        println!("    metrics_path: '/metrics'");
489        println!("    scrape_interval: 15s");
490        println!("```");
491
492        Ok(())
493    }
494
495    /// Integrate with Grafana dashboard
496    pub async fn grafana_integration() -> Result<()> {
497        use crate::dashboard::{DashboardMetrics, to_grafana_format};
498
499        println!("=== Grafana Dashboard Integration ===");
500        println!();
501
502        let mut metrics = DashboardMetrics::new();
503        metrics.request_count = 8_500;
504        metrics.total_cost = 1_847.25;
505        metrics.avg_latency_ms = 320.5;
506        metrics.success_rate = 98.9;
507        metrics.cache_hit_rate = 38.2;
508        metrics.budget_utilization = 52.1;
509
510        // Convert to Grafana format
511        let datapoints = to_grafana_format(&metrics);
512
513        println!("Grafana Data Points (JSON Simple Format):");
514        println!();
515        for dp in datapoints.iter().take(3) {
516            let json = dp.to_json()?;
517            let lines: Vec<&str> = json.lines().take(5).collect();
518            println!("{}", lines.join("\n"));
519            println!("  ...");
520            println!();
521        }
522
523        println!("Dashboard Setup:");
524        println!("  1. Add JSON data source in Grafana");
525        println!("  2. Configure endpoint: http://your-api/grafana-metrics");
526        println!("  3. Set refresh interval: 10s");
527        println!("  4. Create panels for each metric");
528        println!();
529
530        println!("Recommended Grafana Panels:");
531        println!("  * Request Rate (Time Series)");
532        println!("  * Cost Tracking (Stat + Time Series)");
533        println!("  * Latency Distribution (Histogram)");
534        println!("  * Success Rate (Gauge)");
535        println!("  * Cache Hit Rate (Bar Gauge)");
536        println!("  * Budget Utilization (Gauge with thresholds)");
537
538        Ok(())
539    }
540
541    /// Health check endpoint for monitoring
542    pub async fn health_check_endpoint() -> Result<()> {
543        use crate::dashboard::{ComponentHealth, HealthCheckStatus};
544        use std::time::{SystemTime, UNIX_EPOCH};
545
546        println!("=== Health Check Endpoint ===");
547        println!();
548
549        let mut status = HealthCheckStatus::new();
550
551        // Check LLM client health
552        let llm_health = ComponentHealth {
553            healthy: true,
554            last_check: SystemTime::now()
555                .duration_since(UNIX_EPOCH)
556                .unwrap()
557                .as_secs(),
558            response_time_ms: Some(45.2),
559            error: None,
560        };
561        status.add_component("llm_client".to_string(), llm_health);
562
563        // Check cache health
564        let cache_health = ComponentHealth {
565            healthy: true,
566            last_check: SystemTime::now()
567                .duration_since(UNIX_EPOCH)
568                .unwrap()
569                .as_secs(),
570            response_time_ms: Some(2.1),
571            error: None,
572        };
573        status.add_component("cache".to_string(), cache_health);
574
575        // Check database health (simulating degraded state)
576        let db_health = ComponentHealth {
577            healthy: true,
578            last_check: SystemTime::now()
579                .duration_since(UNIX_EPOCH)
580                .unwrap()
581                .as_secs(),
582            response_time_ms: Some(125.8),
583            error: None,
584        };
585        status.add_component("database".to_string(), db_health);
586
587        println!("Health Check Status:");
588        println!(
589            "  * Overall: {}",
590            if status.healthy {
591                "HEALTHY"
592            } else {
593                "UNHEALTHY"
594            }
595        );
596        println!("  * Components: {}", status.components.len());
597        println!();
598
599        let json = status.to_json()?;
600        println!("=== JSON Response ===");
601        let lines: Vec<&str> = json.lines().take(15).collect();
602        println!("{}", lines.join("\n"));
603        println!("  ...");
604        println!();
605
606        println!("Integration:");
607        println!("  * Expose at /health or /healthz");
608        println!("  * Used by Kubernetes liveness/readiness probes");
609        println!("  * Monitored by uptime services (StatusPage, Pingdom)");
610        println!("  * Return HTTP 200 if healthy, 503 if unhealthy");
611
612        Ok(())
613    }
614
615    /// Real-time metrics monitoring
616    pub async fn realtime_monitoring() -> Result<()> {
617        use crate::dashboard::DashboardMetrics;
618
619        println!("=== Real-Time Metrics Monitoring ===");
620        println!();
621
622        println!("Simulating real-time metric updates...");
623        println!();
624
625        // Simulate collecting metrics over time
626        let mut metrics_history: Vec<DashboardMetrics> = Vec::new();
627
628        for i in 0..5 {
629            let mut metrics = DashboardMetrics::new();
630            metrics.request_count = 1000 + i * 250;
631            metrics.total_cost = 250.0 + (i as f64 * 62.5);
632            metrics.avg_latency_ms = 280.0 + (i as f64 * 10.0);
633            metrics.success_rate = 99.5 - (i as f64 * 0.1);
634            metrics.cache_hit_rate = 40.0 + (i as f64 * 2.5);
635
636            metrics_history.push(metrics);
637        }
638
639        println!("+---------+-----------+------------+------------+-------------+");
640        println!("| Time    | Requests  | Cost       | Latency    | Success %   |");
641        println!("+---------+-----------+------------+------------+-------------+");
642
643        for (i, m) in metrics_history.iter().enumerate() {
644            println!(
645                "| T+{}min  | {:>9} | ${:>9.2} | {:>8.1}ms | {:>10.2}% |",
646                i * 5,
647                m.request_count,
648                m.total_cost,
649                m.avg_latency_ms,
650                m.success_rate
651            );
652        }
653        println!("+---------+-----------+------------+------------+-------------+");
654        println!();
655
656        println!("Monitoring Strategy:");
657        println!("  * Update metrics every 10-30 seconds");
658        println!("  * Store historical data for trend analysis");
659        println!("  * Alert on threshold violations:");
660        println!("    - Success rate < 95%");
661        println!("    - Latency > 500ms (P95)");
662        println!("    - Cost growth > 20% per hour");
663        println!("    - Error rate > 5%");
664        println!();
665
666        println!("Alert Channels:");
667        println!("  * PagerDuty for critical alerts");
668        println!("  * Slack for warnings");
669        println!("  * Email for daily summaries");
670        println!("  * Webhook for custom integrations");
671
672        Ok(())
673    }
674
675    /// Custom metrics and instrumentation
676    pub async fn custom_instrumentation() -> Result<()> {
677        use crate::dashboard::{DashboardMetrics, MetricType, PrometheusMetric, TimeSeriesPoint};
678
679        println!("=== Custom Metrics and Instrumentation ===");
680        println!();
681
682        let mut metrics = DashboardMetrics::new();
683
684        // Add domain-specific custom metrics
685        metrics.add_custom_metric("code_evaluations_completed".to_string(), 5_420.0);
686        metrics.add_custom_metric("fraud_cases_detected".to_string(), 127.0);
687        metrics.add_custom_metric("plagiarism_similarity_avg".to_string(), 15.3);
688        metrics.add_custom_metric("github_verifications".to_string(), 3_850.0);
689        metrics.add_custom_metric("document_pages_analyzed".to_string(), 12_450.0);
690
691        println!("Custom Metrics Registered:");
692        for (name, value) in &metrics.custom_metrics {
693            println!("  * {name}: {value}");
694        }
695        println!();
696
697        // Create labeled metrics for detailed tracking
698        let mut provider_latency = PrometheusMetric::new(
699            "kaccy_ai_provider_latency_ms".to_string(),
700            MetricType::Histogram,
701            "LLM provider latency in milliseconds".to_string(),
702        );
703
704        provider_latency.add_point(
705            TimeSeriesPoint::now(285.5)
706                .with_label("provider".to_string(), "openai".to_string())
707                .with_label("model".to_string(), "gpt-4-turbo".to_string()),
708        );
709
710        provider_latency.add_point(
711            TimeSeriesPoint::now(320.2)
712                .with_label("provider".to_string(), "anthropic".to_string())
713                .with_label("model".to_string(), "claude-3-opus".to_string()),
714        );
715
716        provider_latency.add_point(
717            TimeSeriesPoint::now(195.8)
718                .with_label("provider".to_string(), "gemini".to_string())
719                .with_label("model".to_string(), "gemini-1.5-flash".to_string()),
720        );
721
722        println!("=== Labeled Metrics (Prometheus Format) ===");
723        println!("{}", provider_latency.to_prometheus_format());
724
725        println!("Benefits of Custom Metrics:");
726        println!("  * Track domain-specific KPIs");
727        println!("  * Monitor business logic performance");
728        println!("  * Identify bottlenecks and optimization opportunities");
729        println!("  * Support data-driven decision making");
730        println!("  * Enable advanced alerting rules");
731        println!();
732
733        println!("Best Practices:");
734        println!("  * Use meaningful metric names (snake_case)");
735        println!("  * Add appropriate labels for filtering");
736        println!("  * Choose correct metric type (Counter/Gauge/Histogram)");
737        println!("  * Document metrics in code comments");
738        println!("  * Set up retention policies for historical data");
739
740        Ok(())
741    }
742}