ricecoder_teams/
analytics.rs

1/// Analytics and metrics tracking
2use crate::error::Result;
3use crate::models::{AdoptionMetrics, EffectivenessMetrics, TeamAnalyticsReport};
4use chrono::Utc;
5
6/// Tracks rule adoption and effectiveness metrics
7///
8/// Integrates with ricecoder-learning AnalyticsEngine to track:
9/// - Rule adoption metrics (percentage of team members applying rules)
10/// - Rule effectiveness metrics (positive/negative outcomes from rule application)
11/// - Team analytics reports (comprehensive metrics across all rules)
12pub struct AnalyticsDashboard {
13    // Placeholder for ricecoder-learning AnalyticsEngine integration
14    // In production, this would hold a reference to the AnalyticsEngine
15    _phantom: std::marker::PhantomData<()>,
16}
17
18impl AnalyticsDashboard {
19    /// Create a new AnalyticsDashboard
20    ///
21    /// # Arguments
22    /// * No arguments required for basic initialization
23    ///
24    /// # Returns
25    /// A new AnalyticsDashboard instance
26    pub fn new() -> Self {
27        AnalyticsDashboard {
28            _phantom: std::marker::PhantomData,
29        }
30    }
31
32    /// Get adoption metrics for a rule
33    ///
34    /// Calculates the adoption percentage for a rule by tracking how many team members
35    /// have applied the rule. Also tracks adoption trends over time.
36    ///
37    /// # Arguments
38    /// * `rule_id` - The ID of the rule to get adoption metrics for
39    ///
40    /// # Returns
41    /// * `Result<AdoptionMetrics>` - Adoption metrics including percentage and trend
42    ///
43    /// # Requirements
44    /// * Requirement 2.5: Track adoption metrics showing percentage of team members applying the rule
45    pub async fn get_adoption_metrics(&self, rule_id: &str) -> Result<AdoptionMetrics> {
46        tracing::info!(rule_id = %rule_id, "Retrieving adoption metrics");
47
48        // In production, this would query ricecoder-learning AnalyticsEngine
49        // For now, return a placeholder with zero adoption
50        Ok(AdoptionMetrics {
51            rule_id: rule_id.to_string(),
52            total_members: 0,
53            adopting_members: 0,
54            adoption_percentage: 0.0,
55            adoption_trend: Vec::new(),
56        })
57    }
58
59    /// Get effectiveness metrics for a rule
60    ///
61    /// Calculates the effectiveness score for a rule by measuring positive and negative
62    /// outcomes from rule application. Also tracks impact trends over time.
63    ///
64    /// # Arguments
65    /// * `rule_id` - The ID of the rule to get effectiveness metrics for
66    ///
67    /// # Returns
68    /// * `Result<EffectivenessMetrics>` - Effectiveness metrics including score and trend
69    ///
70    /// # Requirements
71    /// * Requirement 2.6: Track effectiveness metrics measuring positive outcomes from rule application
72    pub async fn get_effectiveness_metrics(&self, rule_id: &str) -> Result<EffectivenessMetrics> {
73        tracing::info!(rule_id = %rule_id, "Retrieving effectiveness metrics");
74
75        // In production, this would query ricecoder-learning AnalyticsEngine
76        // For now, return a placeholder with zero effectiveness
77        Ok(EffectivenessMetrics {
78            rule_id: rule_id.to_string(),
79            positive_outcomes: 0,
80            negative_outcomes: 0,
81            effectiveness_score: 0.0,
82            impact_trend: Vec::new(),
83        })
84    }
85
86    /// Generate comprehensive team analytics report
87    ///
88    /// Generates a comprehensive report of team analytics including adoption and effectiveness
89    /// metrics for all rules in the team.
90    ///
91    /// # Arguments
92    /// * `team_id` - The ID of the team to generate report for
93    ///
94    /// # Returns
95    /// * `Result<TeamAnalyticsReport>` - Comprehensive team analytics report
96    ///
97    /// # Requirements
98    /// * Requirement 2.5: Track adoption metrics showing percentage of team members applying the rule
99    /// * Requirement 2.6: Track effectiveness metrics measuring positive outcomes from rule application
100    pub async fn generate_report(&self, team_id: &str) -> Result<TeamAnalyticsReport> {
101        tracing::info!(team_id = %team_id, "Generating analytics report");
102
103        // In production, this would query ricecoder-learning AnalyticsEngine
104        // to aggregate adoption and effectiveness metrics for all rules in the team
105        Ok(TeamAnalyticsReport {
106            team_id: team_id.to_string(),
107            total_members: 0,
108            adoption_metrics: Vec::new(),
109            effectiveness_metrics: Vec::new(),
110            generated_at: Utc::now(),
111        })
112    }
113}
114
115impl Default for AnalyticsDashboard {
116    fn default() -> Self {
117        Self::new()
118    }
119}
120
121#[cfg(test)]
122mod tests {
123    use super::*;
124
125    #[tokio::test]
126    async fn test_analytics_dashboard_creation() {
127        let dashboard = AnalyticsDashboard::new();
128        // Verify dashboard is created successfully
129        let _ = dashboard;
130    }
131
132    #[tokio::test]
133    async fn test_analytics_dashboard_default() {
134        let dashboard = AnalyticsDashboard::default();
135        // Verify dashboard is created successfully via default
136        let _ = dashboard;
137    }
138
139    #[tokio::test]
140    async fn test_get_adoption_metrics() {
141        let dashboard = AnalyticsDashboard::new();
142        let metrics = dashboard.get_adoption_metrics("rule-1").await.unwrap();
143
144        assert_eq!(metrics.rule_id, "rule-1");
145        assert_eq!(metrics.total_members, 0);
146        assert_eq!(metrics.adopting_members, 0);
147        assert_eq!(metrics.adoption_percentage, 0.0);
148        assert_eq!(metrics.adoption_trend.len(), 0);
149    }
150
151    #[tokio::test]
152    async fn test_get_effectiveness_metrics() {
153        let dashboard = AnalyticsDashboard::new();
154        let metrics = dashboard.get_effectiveness_metrics("rule-1").await.unwrap();
155
156        assert_eq!(metrics.rule_id, "rule-1");
157        assert_eq!(metrics.positive_outcomes, 0);
158        assert_eq!(metrics.negative_outcomes, 0);
159        assert_eq!(metrics.effectiveness_score, 0.0);
160        assert_eq!(metrics.impact_trend.len(), 0);
161    }
162
163    #[tokio::test]
164    async fn test_generate_report() {
165        let dashboard = AnalyticsDashboard::new();
166        let report = dashboard.generate_report("team-1").await.unwrap();
167
168        assert_eq!(report.team_id, "team-1");
169        assert_eq!(report.total_members, 0);
170        assert_eq!(report.adoption_metrics.len(), 0);
171        assert_eq!(report.effectiveness_metrics.len(), 0);
172    }
173
174    #[tokio::test]
175    async fn test_adoption_metrics_serialization() {
176        let dashboard = AnalyticsDashboard::new();
177        let metrics = dashboard.get_adoption_metrics("rule-1").await.unwrap();
178
179        let json = serde_json::to_string(&metrics).expect("Failed to serialize");
180        assert!(json.contains("\"rule_id\":\"rule-1\""));
181
182        let deserialized: AdoptionMetrics =
183            serde_json::from_str(&json).expect("Failed to deserialize");
184        assert_eq!(deserialized.rule_id, metrics.rule_id);
185    }
186
187    #[tokio::test]
188    async fn test_effectiveness_metrics_serialization() {
189        let dashboard = AnalyticsDashboard::new();
190        let metrics = dashboard.get_effectiveness_metrics("rule-1").await.unwrap();
191
192        let json = serde_json::to_string(&metrics).expect("Failed to serialize");
193        assert!(json.contains("\"rule_id\":\"rule-1\""));
194
195        let deserialized: EffectivenessMetrics =
196            serde_json::from_str(&json).expect("Failed to deserialize");
197        assert_eq!(deserialized.rule_id, metrics.rule_id);
198    }
199
200    #[tokio::test]
201    async fn test_report_serialization() {
202        let dashboard = AnalyticsDashboard::new();
203        let report = dashboard.generate_report("team-1").await.unwrap();
204
205        let json = serde_json::to_string(&report).expect("Failed to serialize");
206        assert!(json.contains("\"team_id\":\"team-1\""));
207
208        let deserialized: TeamAnalyticsReport =
209            serde_json::from_str(&json).expect("Failed to deserialize");
210        assert_eq!(deserialized.team_id, report.team_id);
211    }
212
213    #[tokio::test]
214    async fn test_multiple_rules_metrics() {
215        let dashboard = AnalyticsDashboard::new();
216
217        let metrics1 = dashboard.get_adoption_metrics("rule-1").await.unwrap();
218        let metrics2 = dashboard.get_adoption_metrics("rule-2").await.unwrap();
219
220        assert_eq!(metrics1.rule_id, "rule-1");
221        assert_eq!(metrics2.rule_id, "rule-2");
222        assert_ne!(metrics1.rule_id, metrics2.rule_id);
223    }
224
225    #[tokio::test]
226    async fn test_adoption_metrics_fields() {
227        let dashboard = AnalyticsDashboard::new();
228        let metrics = dashboard.get_adoption_metrics("rule-1").await.unwrap();
229
230        // Verify all fields are present and have expected types
231        assert!(!metrics.rule_id.is_empty());
232        assert!(metrics.adoption_percentage >= 0.0);
233        assert!(metrics.adoption_percentage <= 100.0);
234    }
235
236    #[tokio::test]
237    async fn test_effectiveness_metrics_fields() {
238        let dashboard = AnalyticsDashboard::new();
239        let metrics = dashboard.get_effectiveness_metrics("rule-1").await.unwrap();
240
241        // Verify all fields are present and have expected types
242        assert!(!metrics.rule_id.is_empty());
243        assert!(metrics.effectiveness_score >= 0.0);
244        assert!(metrics.effectiveness_score <= 1.0);
245    }
246
247    #[tokio::test]
248    async fn test_report_fields() {
249        let dashboard = AnalyticsDashboard::new();
250        let report = dashboard.generate_report("team-1").await.unwrap();
251
252        // Verify all fields are present
253        assert!(!report.team_id.is_empty());
254        assert!(report.adoption_metrics.is_empty());
255        assert!(report.effectiveness_metrics.is_empty());
256    }
257}