1use chrono::{DateTime, Utc};
9use serde::{Deserialize, Serialize};
10use std::collections::HashMap;
11
12use crate::thinking::{ThinkingProgress, ThinkingStats, ThoughtData};
13
14#[derive(Debug, Clone, Serialize, Deserialize)]
16pub struct AnalyticsConfig {
17 pub enabled: bool,
19 pub endpoint: String,
21 pub api_key: Option<String>,
23 pub collection_interval: u64,
25 pub detailed_metrics: bool,
27 pub retention_days: u32,
29 pub anonymize_data: bool,
31 pub export_analytics: bool,
33}
34
35impl Default for AnalyticsConfig {
36 fn default() -> Self {
37 Self {
38 enabled: false,
39 endpoint: "http://localhost:9090".to_string(),
40 api_key: None,
41 collection_interval: 60,
42 detailed_metrics: true,
43 retention_days: 30,
44 anonymize_data: false,
45 export_analytics: false,
46 }
47 }
48}
49
50#[derive(Debug, Clone, Serialize, Deserialize)]
52pub struct SessionAnalytics {
53 pub session_id: String,
55 pub session_title: String,
57 pub analyzed_at: DateTime<Utc>,
59 pub basic_metrics: BasicMetrics,
61 pub thinking_patterns: ThinkingPatterns,
63 pub performance_metrics: PerformanceMetrics,
65 pub quality_metrics: QualityMetrics,
67 pub insights: Vec<Insight>,
69 pub recommendations: Vec<Recommendation>,
71}
72
73#[derive(Debug, Clone, Serialize, Deserialize)]
75pub struct BasicMetrics {
76 pub total_thoughts: u32,
78 pub total_revisions: u32,
80 pub total_branches: u32,
82 pub session_duration: u64,
84 pub avg_thought_length: f64,
86 pub completion_rate: f64,
88 pub efficiency_score: f64,
90}
91
92#[derive(Debug, Clone, Serialize, Deserialize)]
94pub struct ThinkingPatterns {
95 pub revision_frequency: f64,
97 pub branching_frequency: f64,
99 pub complexity_trend: ComplexityTrend,
101 pub thinking_style: ThinkingStyle,
103 pub common_patterns: Vec<Pattern>,
105}
106
107#[derive(Debug, Clone, Serialize, Deserialize)]
109pub enum ComplexityTrend {
110 Increasing,
111 Decreasing,
112 Stable,
113 Variable,
114}
115
116#[derive(Debug, Clone, Serialize, Deserialize)]
118pub enum ThinkingStyle {
119 Linear,
120 Iterative,
121 Exploratory,
122 Analytical,
123 Creative,
124 Mixed,
125}
126
127#[derive(Debug, Clone, Serialize, Deserialize)]
129pub struct Pattern {
130 pub pattern_type: String,
132 pub description: String,
134 pub frequency: u32,
136 pub confidence: f64,
138}
139
140#[derive(Debug, Clone, Serialize, Deserialize)]
142pub struct PerformanceMetrics {
143 pub avg_processing_time_ms: f64,
145 pub total_processing_time_ms: u64,
147 pub throughput: f64,
149 pub response_time_distribution: HashMap<String, u32>,
151 pub bottlenecks: Vec<Bottleneck>,
153}
154
155#[derive(Debug, Clone, Serialize, Deserialize)]
157pub struct Bottleneck {
158 pub bottleneck_type: String,
160 pub description: String,
162 pub impact_level: ImpactLevel,
164 pub suggested_solution: String,
166}
167
168#[derive(Debug, Clone, Serialize, Deserialize)]
170pub enum ImpactLevel {
171 Low,
172 Medium,
173 High,
174 Critical,
175}
176
177#[derive(Debug, Clone, Serialize, Deserialize)]
179pub struct QualityMetrics {
180 pub coherence_score: f64,
182 pub logical_flow_score: f64,
184 pub completeness_score: f64,
186 pub clarity_score: f64,
188 pub overall_quality_score: f64,
190 pub quality_issues: Vec<QualityIssue>,
192}
193
194#[derive(Debug, Clone, Serialize, Deserialize)]
196pub struct QualityIssue {
197 pub issue_type: String,
199 pub description: String,
201 pub severity: Severity,
203 pub affected_thoughts: Vec<u32>,
205}
206
207#[derive(Debug, Clone, Serialize, Deserialize)]
209pub enum Severity {
210 Minor,
211 Moderate,
212 Major,
213 Critical,
214}
215
216#[derive(Debug, Clone, Serialize, Deserialize)]
218pub struct Insight {
219 pub insight_type: String,
221 pub description: String,
223 pub confidence: f64,
225 pub supporting_data: HashMap<String, serde_json::Value>,
227}
228
229#[derive(Debug, Clone, Serialize, Deserialize)]
231pub struct Recommendation {
232 pub recommendation_type: String,
234 pub description: String,
236 pub priority: Priority,
238 pub expected_impact: String,
240 pub implementation_difficulty: Difficulty,
242}
243
244#[derive(Debug, Clone, Serialize, Deserialize)]
246pub enum Priority {
247 Low,
248 Medium,
249 High,
250 Critical,
251}
252
253#[derive(Debug, Clone, Serialize, Deserialize)]
255pub enum Difficulty {
256 Easy,
257 Medium,
258 Hard,
259 VeryHard,
260}
261
262pub struct AnalyticsEngine {
264 #[allow(dead_code)]
265 config: AnalyticsConfig,
266 analytics_data: HashMap<String, SessionAnalytics>,
268 metrics_aggregator: MetricsAggregator,
270}
271
272#[derive(Debug, Clone, serde::Serialize)]
274pub struct MetricsAggregator {
275 pub total_sessions: u64,
277 pub avg_session_duration: f64,
279 pub avg_thoughts_per_session: f64,
281 pub avg_revisions_per_session: f64,
283 pub avg_branches_per_session: f64,
285 pub performance_trends: HashMap<String, Vec<f64>>,
287}
288
289impl Default for MetricsAggregator {
290 fn default() -> Self {
291 Self {
292 total_sessions: 0,
293 avg_session_duration: 0.0,
294 avg_thoughts_per_session: 0.0,
295 avg_revisions_per_session: 0.0,
296 avg_branches_per_session: 0.0,
297 performance_trends: HashMap::new(),
298 }
299 }
300}
301
302impl AnalyticsEngine {
303 pub fn new() -> Self {
305 Self {
306 config: AnalyticsConfig::default(),
307 analytics_data: HashMap::new(),
308 metrics_aggregator: MetricsAggregator::default(),
309 }
310 }
311
312 pub fn with_config(config: AnalyticsConfig) -> Self {
314 Self {
315 config,
316 analytics_data: HashMap::new(),
317 metrics_aggregator: MetricsAggregator::default(),
318 }
319 }
320
321 pub fn analyze_session(
323 &mut self,
324 session_id: &str,
325 session_title: &str,
326 _thoughts: &[ThoughtData],
327 stats: &ThinkingStats,
328 progress: &ThinkingProgress,
329 ) -> SessionAnalytics {
330 let analyzed_at = Utc::now();
331
332 let basic_metrics = self.calculate_basic_metrics(stats, progress);
334
335 let thinking_patterns = self.analyze_thinking_patterns(_thoughts);
337
338 let performance_metrics = self.calculate_performance_metrics(stats);
340
341 let quality_metrics = self.calculate_quality_metrics(_thoughts);
343
344 let insights = self.generate_insights(_thoughts, &basic_metrics, &thinking_patterns);
346
347 let recommendations = self.generate_recommendations(&basic_metrics, &quality_metrics);
349
350 let analytics = SessionAnalytics {
351 session_id: session_id.to_string(),
352 session_title: session_title.to_string(),
353 analyzed_at,
354 basic_metrics,
355 thinking_patterns,
356 performance_metrics,
357 quality_metrics,
358 insights,
359 recommendations,
360 };
361
362 self.analytics_data
364 .insert(session_id.to_string(), analytics.clone());
365
366 self.update_aggregator(&analytics);
368
369 analytics
370 }
371
372 fn calculate_basic_metrics(
374 &self,
375 stats: &ThinkingStats,
376 progress: &ThinkingProgress,
377 ) -> BasicMetrics {
378 let total_thoughts = stats.total_thoughts as u32;
379 let total_revisions = stats.total_revisions as u32;
380 let total_branches = stats.total_branches as u32;
381
382 let session_duration = 0u64;
383 let avg_thought_length = if stats.total_thought_length > 0 {
384 stats.total_thought_length as f64 / total_thoughts as f64
385 } else {
386 0.0
387 };
388 let completion_rate = if progress.total_thoughts > 0 {
389 progress.completed_thoughts as f64 / progress.total_thoughts as f64
390 } else {
391 0.0
392 };
393
394 let efficiency_score = self.calculate_efficiency_score(stats);
395
396 BasicMetrics {
397 total_thoughts,
398 total_revisions,
399 total_branches,
400 session_duration,
401 avg_thought_length,
402 completion_rate,
403 efficiency_score,
404 }
405 }
406
407 fn calculate_efficiency_score(&self, stats: &ThinkingStats) -> f64 {
409 if stats.total_thoughts == 0 {
410 return 0.0;
411 }
412
413 let revision_ratio = if stats.total_thoughts > 0 {
414 stats.total_revisions as f64 / stats.total_thoughts as f64
415 } else {
416 0.0
417 };
418
419 let branch_ratio = if stats.total_thoughts > 0 {
420 stats.total_branches as f64 / stats.total_thoughts as f64
421 } else {
422 0.0
423 };
424
425 let base_score = 1.0;
427 let revision_penalty = revision_ratio * 0.3;
428 let branch_penalty = branch_ratio * 0.2;
429
430 (base_score - revision_penalty - branch_penalty).max(0.0)
431 }
432
433 fn analyze_thinking_patterns(&self, thoughts: &[ThoughtData]) -> ThinkingPatterns {
435 let revision_frequency = if thoughts.len() > 1 {
436 thoughts.iter().filter(|t| t.is_revision()).count() as f64 / (thoughts.len() - 1) as f64
437 } else {
438 0.0
439 };
440
441 let branching_frequency = if thoughts.len() > 1 {
442 thoughts.iter().filter(|t| t.is_branch()).count() as f64 / (thoughts.len() - 1) as f64
443 } else {
444 0.0
445 };
446
447 let complexity_trend = self.analyze_complexity_trend(thoughts);
448 let thinking_style = self.classify_thinking_style(thoughts);
449 let common_patterns = self.identify_patterns(thoughts);
450
451 ThinkingPatterns {
452 revision_frequency,
453 branching_frequency,
454 complexity_trend,
455 thinking_style,
456 common_patterns,
457 }
458 }
459
460 fn analyze_complexity_trend(&self, thoughts: &[ThoughtData]) -> ComplexityTrend {
462 if thoughts.len() < 3 {
463 return ComplexityTrend::Stable;
464 }
465
466 let complexities: Vec<usize> = thoughts.iter().map(|t| t.thought.len()).collect();
467 let first_third = complexities.len() / 3;
468 let last_third = complexities.len() - first_third;
469
470 let avg_first =
471 complexities[..first_third].iter().sum::<usize>() as f64 / first_third as f64;
472 let avg_last = complexities[last_third..].iter().sum::<usize>() as f64
473 / (complexities.len() - last_third) as f64;
474
475 let change_ratio = (avg_last - avg_first) / avg_first.max(1.0);
476
477 if change_ratio > 0.2 {
478 ComplexityTrend::Increasing
479 } else if change_ratio < -0.2 {
480 ComplexityTrend::Decreasing
481 } else if change_ratio.abs() < 0.1 {
482 ComplexityTrend::Stable
483 } else {
484 ComplexityTrend::Variable
485 }
486 }
487
488 fn classify_thinking_style(&self, thoughts: &[ThoughtData]) -> ThinkingStyle {
490 let revisions = thoughts.iter().filter(|t| t.is_revision()).count();
491 let branches = thoughts.iter().filter(|t| t.is_branch()).count();
492 let total = thoughts.len();
493
494 if revisions > total / 3 {
495 ThinkingStyle::Iterative
496 } else if branches > total / 4 {
497 ThinkingStyle::Exploratory
498 } else if revisions == 0 && branches == 0 {
499 ThinkingStyle::Linear
500 } else if total > 10 {
501 ThinkingStyle::Analytical
502 } else {
503 ThinkingStyle::Mixed
504 }
505 }
506
507 fn identify_patterns(&self, thoughts: &[ThoughtData]) -> Vec<Pattern> {
509 let mut patterns = Vec::new();
510
511 let revision_count = thoughts.iter().filter(|t| t.is_revision()).count();
513 if revision_count > thoughts.len() / 4 {
514 patterns.push(Pattern {
515 pattern_type: "frequent_revisions".to_string(),
516 description: "High frequency of thought revisions".to_string(),
517 frequency: revision_count as u32,
518 confidence: 0.8,
519 });
520 }
521
522 let branch_count = thoughts.iter().filter(|t| t.is_branch()).count();
524 if branch_count > thoughts.len() / 5 {
525 patterns.push(Pattern {
526 pattern_type: "branching_exploration".to_string(),
527 description: "Exploratory thinking with multiple branches".to_string(),
528 frequency: branch_count as u32,
529 confidence: 0.7,
530 });
531 }
532
533 if revision_count == 0 && branch_count == 0 && thoughts.len() > 3 {
535 patterns.push(Pattern {
536 pattern_type: "linear_progression".to_string(),
537 description: "Straightforward linear thinking process".to_string(),
538 frequency: thoughts.len() as u32,
539 confidence: 0.9,
540 });
541 }
542
543 patterns
544 }
545
546 fn calculate_performance_metrics(&self, stats: &ThinkingStats) -> PerformanceMetrics {
548 let throughput = if stats.total_processing_time_ms > 0 {
549 (stats.total_thoughts as f64 * 60000.0) / stats.total_processing_time_ms as f64
550 } else {
551 0.0
552 };
553
554 let mut response_time_distribution = HashMap::new();
555 response_time_distribution.insert("fast".to_string(), 0);
556 response_time_distribution.insert("medium".to_string(), 0);
557 response_time_distribution.insert("slow".to_string(), 0);
558
559 let bottlenecks = Vec::new(); PerformanceMetrics {
562 avg_processing_time_ms: stats.avg_processing_time_ms,
563 total_processing_time_ms: stats.total_processing_time_ms,
564 throughput,
565 response_time_distribution,
566 bottlenecks,
567 }
568 }
569
570 fn calculate_quality_metrics(&self, thoughts: &[ThoughtData]) -> QualityMetrics {
572 let coherence_score = self.calculate_coherence_score(thoughts);
573 let logical_flow_score = self.calculate_logical_flow_score(thoughts);
574 let completeness_score = self.calculate_completeness_score(thoughts);
575 let clarity_score = self.calculate_clarity_score(thoughts);
576
577 let overall_quality_score =
578 (coherence_score + logical_flow_score + completeness_score + clarity_score) / 4.0;
579
580 let quality_issues = self.identify_quality_issues(thoughts);
581
582 QualityMetrics {
583 coherence_score,
584 logical_flow_score,
585 completeness_score,
586 clarity_score,
587 overall_quality_score,
588 quality_issues,
589 }
590 }
591
592 fn calculate_coherence_score(&self, thoughts: &[ThoughtData]) -> f64 {
594 if thoughts.len() < 2 {
595 return 1.0;
596 }
597
598 let mut coherence_score: f32 = 1.0;
599
600 for i in 1..thoughts.len() {
601 let prev_thought = &thoughts[i - 1];
602 let curr_thought = &thoughts[i];
603
604 let has_connection = curr_thought.thought.to_lowercase().contains(
606 &prev_thought.thought.to_lowercase()[..prev_thought.thought.len().min(10)],
607 );
608
609 if !has_connection && !curr_thought.is_revision() && !curr_thought.is_branch() {
610 coherence_score -= 0.1;
611 }
612 }
613
614 coherence_score.max(0.0) as f64
615 }
616
617 fn calculate_logical_flow_score(&self, thoughts: &[ThoughtData]) -> f64 {
619 if thoughts.is_empty() {
620 return 0.0;
621 }
622
623 let mut flow_score: f32 = 1.0;
624 let mut consecutive_revisions = 0;
625
626 for thought in thoughts {
627 if thought.is_revision() {
628 consecutive_revisions += 1;
629 if consecutive_revisions > 2 {
630 flow_score -= 0.1;
631 }
632 } else {
633 consecutive_revisions = 0;
634 }
635 }
636
637 flow_score.max(0.0) as f64
638 }
639
640 fn calculate_completeness_score(&self, thoughts: &[ThoughtData]) -> f64 {
642 if thoughts.is_empty() {
643 return 0.0;
644 }
645
646 let avg_length =
647 thoughts.iter().map(|t| t.thought.len()).sum::<usize>() as f64 / thoughts.len() as f64;
648 let min_acceptable_length = 20.0;
649
650 if avg_length < min_acceptable_length {
651 0.5
652 } else if avg_length > 100.0 {
653 1.0
654 } else {
655 0.5 + (avg_length - min_acceptable_length) / (100.0 - min_acceptable_length) * 0.5
656 }
657 }
658
659 fn calculate_clarity_score(&self, thoughts: &[ThoughtData]) -> f64 {
661 if thoughts.is_empty() {
662 return 0.0;
663 }
664
665 let mut clarity_score: f32 = 1.0;
666
667 for thought in thoughts {
668 let words = thought.thought.split_whitespace().count();
669 if words < 5 {
670 clarity_score -= 0.1;
671 }
672 if thought.thought.len() > 500 {
673 clarity_score -= 0.05;
674 }
675 }
676
677 clarity_score.max(0.0) as f64
678 }
679
680 fn identify_quality_issues(&self, thoughts: &[ThoughtData]) -> Vec<QualityIssue> {
682 let mut issues = Vec::new();
683
684 for (i, thought) in thoughts.iter().enumerate() {
685 if thought.thought.len() < 10 {
686 issues.push(QualityIssue {
687 issue_type: "short_thought".to_string(),
688 description: "Thought is too short".to_string(),
689 severity: Severity::Minor,
690 affected_thoughts: vec![i as u32 + 1],
691 });
692 }
693
694 if thought.thought.len() > 1000 {
695 issues.push(QualityIssue {
696 issue_type: "long_thought".to_string(),
697 description: "Thought is too long".to_string(),
698 severity: Severity::Moderate,
699 affected_thoughts: vec![i as u32 + 1],
700 });
701 }
702 }
703
704 issues
705 }
706
707 fn generate_insights(
709 &self,
710 _thoughts: &[ThoughtData],
711 basic_metrics: &BasicMetrics,
712 thinking_patterns: &ThinkingPatterns,
713 ) -> Vec<Insight> {
714 let mut insights = Vec::new();
715
716 if thinking_patterns.revision_frequency > 0.3 {
718 insights.push(Insight {
719 insight_type: "high_revision_rate".to_string(),
720 description:
721 "High frequency of thought revisions suggests iterative thinking process"
722 .to_string(),
723 confidence: 0.8,
724 supporting_data: HashMap::new(),
725 });
726 }
727
728 if basic_metrics.efficiency_score > 0.8 {
730 insights.push(Insight {
731 insight_type: "efficient_thinking".to_string(),
732 description: "High efficiency score indicates effective problem-solving approach"
733 .to_string(),
734 confidence: 0.9,
735 supporting_data: HashMap::new(),
736 });
737 }
738
739 if thinking_patterns.branching_frequency > 0.2 {
741 insights.push(Insight {
742 insight_type: "exploratory_thinking".to_string(),
743 description: "Multiple branches indicate exploratory thinking approach".to_string(),
744 confidence: 0.7,
745 supporting_data: HashMap::new(),
746 });
747 }
748
749 insights
750 }
751
752 fn generate_recommendations(
754 &self,
755 basic_metrics: &BasicMetrics,
756 quality_metrics: &QualityMetrics,
757 ) -> Vec<Recommendation> {
758 let mut recommendations = Vec::new();
759
760 if basic_metrics.efficiency_score < 0.6 {
762 recommendations.push(Recommendation {
763 recommendation_type: "improve_efficiency".to_string(),
764 description: "Consider reducing revisions and branches to improve efficiency"
765 .to_string(),
766 priority: Priority::High,
767 expected_impact: "20% improvement in efficiency".to_string(),
768 implementation_difficulty: Difficulty::Medium,
769 });
770 }
771
772 if quality_metrics.overall_quality_score < 0.7 {
774 recommendations.push(Recommendation {
775 recommendation_type: "improve_quality".to_string(),
776 description: "Focus on thought clarity and logical flow".to_string(),
777 priority: Priority::Medium,
778 expected_impact: "15% improvement in quality".to_string(),
779 implementation_difficulty: Difficulty::Easy,
780 });
781 }
782
783 recommendations
784 }
785
786 fn update_aggregator(&mut self, analytics: &SessionAnalytics) {
788 self.metrics_aggregator.total_sessions += 1;
789
790 let total_sessions = self.metrics_aggregator.total_sessions as f64;
791
792 self.metrics_aggregator.avg_session_duration =
794 (self.metrics_aggregator.avg_session_duration * (total_sessions - 1.0)
795 + analytics.basic_metrics.session_duration as f64)
796 / total_sessions;
797
798 self.metrics_aggregator.avg_thoughts_per_session =
799 (self.metrics_aggregator.avg_thoughts_per_session * (total_sessions - 1.0)
800 + analytics.basic_metrics.total_thoughts as f64)
801 / total_sessions;
802
803 self.metrics_aggregator.avg_revisions_per_session =
804 (self.metrics_aggregator.avg_revisions_per_session * (total_sessions - 1.0)
805 + analytics.basic_metrics.total_revisions as f64)
806 / total_sessions;
807
808 self.metrics_aggregator.avg_branches_per_session =
809 (self.metrics_aggregator.avg_branches_per_session * (total_sessions - 1.0)
810 + analytics.basic_metrics.total_branches as f64)
811 / total_sessions;
812 }
813
814 pub fn get_session_analytics(&self, session_id: &str) -> Option<&SessionAnalytics> {
816 self.analytics_data.get(session_id)
817 }
818
819 pub fn get_aggregated_metrics(&self) -> &MetricsAggregator {
821 &self.metrics_aggregator
822 }
823
824 pub fn export_analytics(&self) -> serde_json::Value {
826 serde_json::json!({
827 "analytics_data": self.analytics_data,
828 "aggregated_metrics": self.metrics_aggregator,
829 "exported_at": Utc::now()
830 })
831 }
832}
833
834impl Default for AnalyticsEngine {
835 fn default() -> Self {
836 Self::new()
837 }
838}
839
840#[cfg(test)]
841mod tests {
842 use super::*;
843 use crate::thinking::ThoughtData;
844
845 #[test]
846 fn test_analytics_engine_creation() {
847 let engine = AnalyticsEngine::new();
848 assert!(!engine.config.enabled);
849 assert_eq!(engine.config.endpoint, "http://localhost:9090");
850 }
851
852 #[test]
853 fn test_basic_metrics_calculation() {
854 let engine = AnalyticsEngine::new();
855 let thoughts = vec![
856 ThoughtData::new("First thought".to_string(), 1, 3),
857 ThoughtData::new("Second thought".to_string(), 2, 3),
858 ThoughtData::new("Third thought".to_string(), 3, 3),
859 ];
860
861 let stats = ThinkingStats {
862 total_thoughts: thoughts.len() as u64,
863 total_thought_length: thoughts.iter().map(|t| t.thought.len() as u64).sum(),
864 ..ThinkingStats::default()
865 };
866 let progress = ThinkingProgress::new(3, 3);
867 let metrics = engine.calculate_basic_metrics(&stats, &progress);
868
869 assert_eq!(metrics.total_thoughts, 3);
870 assert_eq!(metrics.total_revisions, 0);
871 assert_eq!(metrics.total_branches, 0);
872 assert!(metrics.avg_thought_length > 0.0);
873 }
874
875 #[test]
876 fn test_thinking_patterns_analysis() {
877 let engine = AnalyticsEngine::new();
878 let _thoughts = vec![
879 ThoughtData::new("First thought".to_string(), 1, 3),
880 ThoughtData::revision("Revised thought".to_string(), 2, 1),
881 ThoughtData::new("Third thought".to_string(), 3, 3),
882 ];
883
884 let patterns = engine.analyze_thinking_patterns(&_thoughts);
885
886 assert!(patterns.revision_frequency > 0.0);
887 assert_eq!(patterns.revision_frequency, 0.5);
888 }
889
890 #[test]
891 fn test_quality_metrics_calculation() {
892 let engine = AnalyticsEngine::new();
893 let _thoughts = vec![
894 ThoughtData::new(
895 "This is a well-formed thought with sufficient detail".to_string(),
896 1,
897 3,
898 ),
899 ThoughtData::new("Another comprehensive thought".to_string(), 2, 3),
900 ];
901
902 let metrics = engine.calculate_quality_metrics(&_thoughts);
903
904 assert!(metrics.coherence_score > 0.0);
905 assert!(metrics.logical_flow_score > 0.0);
906 assert!(metrics.completeness_score > 0.0);
907 assert!(metrics.clarity_score > 0.0);
908 assert!(metrics.overall_quality_score > 0.0);
909 }
910}