1use crate::{
14 core::{GraphRAGError, KnowledgeGraph, Result},
15 ollama::OllamaClient,
16};
17use serde::{Deserialize, Serialize};
18use std::collections::HashMap;
19
20#[derive(Debug, Clone, Serialize, Deserialize)]
22pub struct OptimizationStep {
23 pub iteration: usize,
25
26 pub relevance_score: f32,
28
29 pub faithfulness_score: f32,
31
32 pub conciseness_score: f32,
34
35 pub combined_score: f32,
37
38 pub weights_snapshot: HashMap<String, f32>,
40}
41
42impl OptimizationStep {
43 pub fn new(iteration: usize) -> Self {
45 Self {
46 iteration,
47 relevance_score: 0.0,
48 faithfulness_score: 0.0,
49 conciseness_score: 0.0,
50 combined_score: 0.0,
51 weights_snapshot: HashMap::new(),
52 }
53 }
54
55 pub fn calculate_combined(&mut self, weights: &ObjectiveWeights) {
57 self.combined_score = self.relevance_score * weights.relevance
58 + self.faithfulness_score * weights.faithfulness
59 + self.conciseness_score * weights.conciseness;
60 }
61}
62
63#[derive(Debug, Clone, Serialize, Deserialize)]
65pub struct ObjectiveWeights {
66 pub relevance: f32,
68
69 pub faithfulness: f32,
71
72 pub conciseness: f32,
74}
75
76impl Default for ObjectiveWeights {
77 fn default() -> Self {
78 Self {
79 relevance: 0.4,
80 faithfulness: 0.4,
81 conciseness: 0.2,
82 }
83 }
84}
85
86impl ObjectiveWeights {
87 pub fn normalize(&mut self) {
89 let sum = self.relevance + self.faithfulness + self.conciseness;
90 if sum > 0.0 {
91 self.relevance /= sum;
92 self.faithfulness /= sum;
93 self.conciseness /= sum;
94 }
95 }
96
97 pub fn boost_objective(&mut self, objective: &str, boost: f32) {
99 match objective {
100 "relevance" => self.relevance += boost,
101 "faithfulness" => self.faithfulness += boost,
102 "conciseness" => self.conciseness += boost,
103 _ => {},
104 }
105 self.normalize();
106 }
107}
108
109#[derive(Debug, Clone)]
111pub struct TestQuery {
112 pub query: String,
114
115 pub expected_answer: String,
117
118 pub weight: f32,
120}
121
122impl TestQuery {
123 pub fn new(query: String, expected_answer: String) -> Self {
125 Self {
126 query,
127 expected_answer,
128 weight: 1.0,
129 }
130 }
131
132 pub fn with_weight(mut self, weight: f32) -> Self {
134 self.weight = weight;
135 self
136 }
137}
138
139#[derive(Debug, Clone)]
141pub struct OptimizerConfig {
142 pub learning_rate: f32,
144
145 pub max_iterations: usize,
147
148 pub slope_window: usize,
150
151 pub stagnation_threshold: f32,
153
154 pub objective_weights: ObjectiveWeights,
156
157 pub use_llm_eval: bool,
159}
160
161impl Default for OptimizerConfig {
162 fn default() -> Self {
163 Self {
164 learning_rate: 0.1,
165 max_iterations: 20,
166 slope_window: 3,
167 stagnation_threshold: 0.01,
168 objective_weights: ObjectiveWeights::default(),
169 use_llm_eval: true,
170 }
171 }
172}
173
174pub struct GraphWeightOptimizer {
176 config: OptimizerConfig,
178
179 history: Vec<OptimizationStep>,
181
182 ollama_client: Option<OllamaClient>,
184
185 current_weights: ObjectiveWeights,
187}
188
189impl GraphWeightOptimizer {
190 pub fn new() -> Self {
192 Self {
193 config: OptimizerConfig::default(),
194 history: Vec::new(),
195 ollama_client: None,
196 current_weights: ObjectiveWeights::default(),
197 }
198 }
199
200 pub fn with_config(config: OptimizerConfig) -> Self {
202 let current_weights = config.objective_weights.clone();
203 Self {
204 config,
205 history: Vec::new(),
206 ollama_client: None,
207 current_weights,
208 }
209 }
210
211 pub fn with_ollama_client(mut self, client: OllamaClient) -> Self {
213 self.ollama_client = Some(client);
214 self
215 }
216
217 #[cfg(feature = "async")]
228 pub async fn optimize_weights(
229 &mut self,
230 graph: &mut KnowledgeGraph,
231 test_queries: &[TestQuery],
232 ) -> Result<()> {
233 if test_queries.is_empty() {
234 return Err(GraphRAGError::Config {
235 message: "No test queries provided for optimization".to_string(),
236 });
237 }
238
239 #[cfg(feature = "tracing")]
240 tracing::info!(
241 max_iterations = self.config.max_iterations,
242 num_queries = test_queries.len(),
243 "Starting graph weight optimization"
244 );
245
246 for iteration in 0..self.config.max_iterations {
248 let mut step = OptimizationStep::new(iteration);
249
250 let metrics = self.evaluate_graph_quality(graph, test_queries).await?;
252 step.relevance_score = metrics.0;
253 step.faithfulness_score = metrics.1;
254 step.conciseness_score = metrics.2;
255
256 step.calculate_combined(&self.current_weights);
258
259 step.weights_snapshot = self.snapshot_weights(graph);
261
262 self.history.push(step.clone());
264
265 #[cfg(feature = "tracing")]
266 tracing::info!(
267 iteration = iteration,
268 relevance = step.relevance_score,
269 faithfulness = step.faithfulness_score,
270 conciseness = step.conciseness_score,
271 combined = step.combined_score,
272 "Optimization iteration complete"
273 );
274
275 if iteration >= self.config.slope_window {
277 self.detect_and_adjust_stagnation();
278 }
279
280 if step.relevance_score > 0.95
282 && step.faithfulness_score > 0.95
283 && step.conciseness_score > 0.95
284 {
285 #[cfg(feature = "tracing")]
286 tracing::info!("Early stopping: all metrics excellent");
287 break;
288 }
289
290 if iteration < self.config.max_iterations - 1 {
292 self.adjust_graph_weights(graph, test_queries, &step)
293 .await?;
294 }
295 }
296
297 #[cfg(feature = "tracing")]
298 tracing::info!(
299 iterations = self.history.len(),
300 final_score = self.history.last().map(|s| s.combined_score).unwrap_or(0.0),
301 "Optimization complete"
302 );
303
304 Ok(())
305 }
306
307 #[cfg(feature = "async")]
311 async fn evaluate_graph_quality(
312 &self,
313 graph: &KnowledgeGraph,
314 test_queries: &[TestQuery],
315 ) -> Result<(f32, f32, f32)> {
316 let mut total_relevance = 0.0;
317 let mut total_faithfulness = 0.0;
318 let mut total_conciseness = 0.0;
319 let mut total_weight = 0.0;
320
321 for test_query in test_queries {
322 let (relevance, faithfulness, conciseness) =
329 if self.config.use_llm_eval && self.ollama_client.is_some() {
330 self.evaluate_with_llm(graph, test_query).await?
332 } else {
333 self.evaluate_with_heuristics(graph, test_query)?
335 };
336
337 total_relevance += relevance * test_query.weight;
338 total_faithfulness += faithfulness * test_query.weight;
339 total_conciseness += conciseness * test_query.weight;
340 total_weight += test_query.weight;
341 }
342
343 if total_weight > 0.0 {
344 Ok((
345 total_relevance / total_weight,
346 total_faithfulness / total_weight,
347 total_conciseness / total_weight,
348 ))
349 } else {
350 Ok((0.0, 0.0, 0.0))
351 }
352 }
353
354 fn evaluate_with_heuristics(
358 &self,
359 graph: &KnowledgeGraph,
360 test_query: &TestQuery,
361 ) -> Result<(f32, f32, f32)> {
362 let query_tokens: Vec<String> = test_query
364 .query
365 .to_lowercase()
366 .split_whitespace()
367 .filter(|t| t.len() > 2) .map(|s| s.to_string())
369 .collect();
370
371 let answer_tokens: Vec<String> = test_query
373 .expected_answer
374 .to_lowercase()
375 .split_whitespace()
376 .map(|s| s.to_string())
377 .collect();
378
379 let mut matching_entities = 0;
381 let mut total_entities = 0;
382
383 for entity in graph.entities() {
384 total_entities += 1;
385 let entity_name_lower = entity.name.to_lowercase();
386
387 if query_tokens
388 .iter()
389 .any(|token| entity_name_lower.contains(token))
390 {
391 matching_entities += 1;
392 }
393 }
394
395 let relevance = if total_entities > 0 {
396 (matching_entities as f32 / total_entities.min(10) as f32).min(1.0)
397 } else {
398 0.0
399 };
400
401 let mut answer_token_found = 0;
403
404 for token in &answer_tokens {
405 let found_in_graph = graph.entities().any(|e| {
407 e.name.to_lowercase().contains(token)
408 || e.entity_type.to_lowercase().contains(token)
409 }) || graph
410 .get_all_relationships()
411 .iter()
412 .any(|r| r.relation_type.to_lowercase().contains(token));
413
414 if found_in_graph {
415 answer_token_found += 1;
416 }
417 }
418
419 let faithfulness = if !answer_tokens.is_empty() {
420 answer_token_found as f32 / answer_tokens.len() as f32
421 } else {
422 0.5 };
424
425 let avg_confidence: f32 = graph
428 .get_all_relationships()
429 .iter()
430 .map(|r| r.confidence)
431 .sum::<f32>()
432 / graph.get_all_relationships().len().max(1) as f32;
433
434 let complexity_penalty = (graph.get_all_relationships().len() as f32 / 100.0).min(1.0);
435 let conciseness = (avg_confidence * 0.7) + ((1.0 - complexity_penalty) * 0.3);
436
437 Ok((relevance, faithfulness, conciseness))
438 }
439
440 #[cfg(feature = "async")]
444 async fn evaluate_with_llm(
445 &self,
446 graph: &KnowledgeGraph,
447 test_query: &TestQuery,
448 ) -> Result<(f32, f32, f32)> {
449 let ollama_client = self
450 .ollama_client
451 .as_ref()
452 .ok_or_else(|| GraphRAGError::Config {
453 message: "LLM evaluation requested but no Ollama client available".to_string(),
454 })?;
455
456 let context = self.build_graph_context(graph, &test_query.query, 5);
458
459 let prompt = format!(
461 "Evaluate the quality of information retrieval for this query.\n\n\
462 Query: {}\n\
463 Expected Answer: {}\n\n\
464 Retrieved Information:\n{}\n\n\
465 Please evaluate on three dimensions (0.0-1.0 scale):\n\
466 1. Relevance: How well does the retrieved information match the query?\n\
467 2. Faithfulness: How accurate is the information compared to the expected answer?\n\
468 3. Conciseness: How focused and non-redundant is the information?\n\n\
469 Respond with JSON format:\n\
470 {{\"relevance\": 0.8, \"faithfulness\": 0.7, \"conciseness\": 0.9}}",
471 test_query.query, test_query.expected_answer, context
472 );
473
474 let response =
476 ollama_client
477 .generate(&prompt)
478 .await
479 .map_err(|e| GraphRAGError::LanguageModel {
480 message: format!("LLM evaluation failed: {}", e),
481 })?;
482
483 self.parse_llm_evaluation(&response)
485 }
486
487 fn build_graph_context(&self, graph: &KnowledgeGraph, query: &str, top_k: usize) -> String {
489 let query_lower = query.to_lowercase();
490 let query_tokens: Vec<_> = query_lower.split_whitespace().collect();
491
492 let mut entity_scores: Vec<_> = graph
494 .entities()
495 .map(|e| {
496 let name_lower = e.name.to_lowercase();
497 let score = query_tokens
498 .iter()
499 .filter(|&&token| name_lower.contains(token))
500 .count();
501 (e, score)
502 })
503 .filter(|(_, score)| *score > 0)
504 .collect();
505
506 entity_scores.sort_by(|a, b| b.1.cmp(&a.1));
507
508 let mut context = String::new();
509 context.push_str("Entities:\n");
510 for (entity, _) in entity_scores.iter().take(top_k) {
511 context.push_str(&format!("- {} ({})\n", entity.name, entity.entity_type));
512 }
513
514 context.push_str("\nRelationships:\n");
515 for rel in graph.get_all_relationships().iter().take(top_k) {
516 context.push_str(&format!(
517 "- {} --[{}]--> {}\n",
518 rel.source.0, rel.relation_type, rel.target.0
519 ));
520 }
521
522 context
523 }
524
525 fn parse_llm_evaluation(&self, response: &str) -> Result<(f32, f32, f32)> {
527 let json_start = response.find('{');
529 let json_end = response.rfind('}');
530
531 if let (Some(start), Some(end)) = (json_start, json_end) {
532 if end > start {
533 let json_str = &response[start..=end];
534 if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(json_str) {
535 let relevance = parsed["relevance"].as_f64().unwrap_or(0.5) as f32;
536 let faithfulness = parsed["faithfulness"].as_f64().unwrap_or(0.5) as f32;
537 let conciseness = parsed["conciseness"].as_f64().unwrap_or(0.5) as f32;
538
539 return Ok((
540 relevance.clamp(0.0, 1.0),
541 faithfulness.clamp(0.0, 1.0),
542 conciseness.clamp(0.0, 1.0),
543 ));
544 }
545 }
546 }
547
548 #[cfg(feature = "tracing")]
550 tracing::warn!("Failed to parse LLM evaluation, using default scores");
551
552 Ok((0.5, 0.5, 0.5))
553 }
554
555 fn snapshot_weights(&self, graph: &KnowledgeGraph) -> HashMap<String, f32> {
557 let mut weights = HashMap::new();
558
559 for rel in graph.get_all_relationships() {
560 let key = format!("{}_{}", rel.source.0, rel.target.0);
561 weights.insert(key, rel.confidence);
562 }
563
564 weights
565 }
566
567 fn detect_and_adjust_stagnation(&mut self) {
569 let window_size = self.config.slope_window;
570 let history_len = self.history.len();
571
572 if history_len < window_size + 1 {
573 return;
574 }
575
576 let relevance_slope = self.calculate_slope(window_size, |s| s.relevance_score);
578 let faithfulness_slope = self.calculate_slope(window_size, |s| s.faithfulness_score);
579 let conciseness_slope = self.calculate_slope(window_size, |s| s.conciseness_score);
580
581 #[cfg(feature = "tracing")]
582 tracing::debug!(
583 relevance_slope = relevance_slope,
584 faithfulness_slope = faithfulness_slope,
585 conciseness_slope = conciseness_slope,
586 threshold = self.config.stagnation_threshold,
587 "Stagnation detection"
588 );
589
590 if relevance_slope.abs() < self.config.stagnation_threshold {
592 self.current_weights.boost_objective("relevance", 0.05);
593 #[cfg(feature = "tracing")]
594 tracing::info!("Boosting relevance weight due to stagnation");
595 }
596
597 if faithfulness_slope.abs() < self.config.stagnation_threshold {
598 self.current_weights.boost_objective("faithfulness", 0.05);
599 #[cfg(feature = "tracing")]
600 tracing::info!("Boosting faithfulness weight due to stagnation");
601 }
602
603 if conciseness_slope.abs() < self.config.stagnation_threshold {
604 self.current_weights.boost_objective("conciseness", 0.05);
605 #[cfg(feature = "tracing")]
606 tracing::info!("Boosting conciseness weight due to stagnation");
607 }
608 }
609
610 fn calculate_slope<F>(&self, window_size: usize, metric_fn: F) -> f32
612 where
613 F: Fn(&OptimizationStep) -> f32,
614 {
615 let history_len = self.history.len();
616 if history_len < window_size + 1 {
617 return 0.0;
618 }
619
620 let recent_steps = &self.history[history_len - window_size - 1..];
621 let first_value = metric_fn(&recent_steps[0]);
622 let last_value = metric_fn(&recent_steps[window_size]);
623
624 (last_value - first_value) / window_size as f32
625 }
626
627 #[cfg(feature = "async")]
629 async fn adjust_graph_weights(
630 &self,
631 graph: &mut KnowledgeGraph,
632 _test_queries: &[TestQuery],
633 current_step: &OptimizationStep,
634 ) -> Result<()> {
635 let needs_relevance = current_step.relevance_score < 0.8;
637 let needs_faithfulness = current_step.faithfulness_score < 0.8;
638 let needs_conciseness = current_step.conciseness_score < 0.8;
639
640 let relationships = graph.get_all_relationships().to_vec();
642 for rel in relationships {
643 let mut new_confidence = rel.confidence;
644
645 if needs_relevance {
647 if rel.embedding.is_some() {
649 new_confidence *= 1.0 + self.config.learning_rate * 0.5;
650 }
651 }
652
653 if needs_faithfulness {
654 if rel.temporal_type.is_some() || rel.causal_strength.is_some() {
656 new_confidence *= 1.0 + self.config.learning_rate * 0.3;
657 }
658 }
659
660 if needs_conciseness {
661 new_confidence *= 1.0 - self.config.learning_rate * 0.1;
663 }
664
665 new_confidence = new_confidence.clamp(0.1, 1.0);
667
668 let _ = new_confidence; }
672
673 Ok(())
674 }
675
676 pub fn history(&self) -> &[OptimizationStep] {
678 &self.history
679 }
680
681 pub fn final_metrics(&self) -> Option<(f32, f32, f32, f32)> {
683 self.history.last().map(|step| {
684 (
685 step.relevance_score,
686 step.faithfulness_score,
687 step.conciseness_score,
688 step.combined_score,
689 )
690 })
691 }
692
693 pub fn total_improvement(&self) -> f32 {
695 if self.history.len() < 2 {
696 return 0.0;
697 }
698
699 let first = self.history.first().unwrap().combined_score;
700 let last = self.history.last().unwrap().combined_score;
701 last - first
702 }
703}
704
705impl Default for GraphWeightOptimizer {
706 fn default() -> Self {
707 Self::new()
708 }
709}
710
711#[cfg(test)]
712mod tests {
713 use super::*;
714
715 #[test]
716 fn test_optimization_step_creation() {
717 let step = OptimizationStep::new(0);
718 assert_eq!(step.iteration, 0);
719 assert_eq!(step.relevance_score, 0.0);
720 }
721
722 #[test]
723 fn test_objective_weights_normalization() {
724 let mut weights = ObjectiveWeights {
725 relevance: 2.0,
726 faithfulness: 2.0,
727 conciseness: 2.0,
728 };
729
730 weights.normalize();
731
732 let sum = weights.relevance + weights.faithfulness + weights.conciseness;
734 assert!((sum - 1.0).abs() < 0.001);
735 }
736
737 #[test]
738 fn test_objective_weights_boost() {
739 let mut weights = ObjectiveWeights::default();
740 let original_relevance = weights.relevance;
741
742 weights.boost_objective("relevance", 0.1);
743
744 assert!(weights.relevance > original_relevance);
746
747 let sum = weights.relevance + weights.faithfulness + weights.conciseness;
748 assert!((sum - 1.0).abs() < 0.001);
749 }
750
751 #[test]
752 fn test_test_query_creation() {
753 let query = TestQuery::new("test query".to_string(), "expected".to_string());
754 assert_eq!(query.weight, 1.0);
755
756 let weighted = TestQuery::new("test".to_string(), "expected".to_string()).with_weight(2.0);
757 assert_eq!(weighted.weight, 2.0);
758 }
759
760 #[test]
761 fn test_optimizer_initialization() {
762 let optimizer = GraphWeightOptimizer::new();
763 assert_eq!(optimizer.history.len(), 0);
764 assert_eq!(optimizer.config.max_iterations, 20);
765 }
766
767 #[test]
768 fn test_slope_calculation() {
769 let mut optimizer = GraphWeightOptimizer::new();
770
771 for i in 0..5 {
773 let mut step = OptimizationStep::new(i);
774 step.relevance_score = 0.5 + (i as f32 * 0.1);
775 optimizer.history.push(step);
776 }
777
778 let slope = optimizer.calculate_slope(3, |s| s.relevance_score);
779
780 assert!(slope > 0.0);
782 }
783
784 #[test]
785 fn test_combined_score_calculation() {
786 let weights = ObjectiveWeights {
787 relevance: 0.5,
788 faithfulness: 0.3,
789 conciseness: 0.2,
790 };
791
792 let mut step = OptimizationStep::new(0);
793 step.relevance_score = 0.8;
794 step.faithfulness_score = 0.6;
795 step.conciseness_score = 0.9;
796
797 step.calculate_combined(&weights);
798
799 let expected = 0.8 * 0.5 + 0.6 * 0.3 + 0.9 * 0.2;
800 assert!((step.combined_score - expected).abs() < 0.001);
801 }
802
803 #[test]
804 fn test_heuristic_evaluation() {
805 use crate::core::{Entity, EntityId, Relationship};
806
807 let mut graph = KnowledgeGraph::new();
809
810 let socrates = Entity {
812 id: EntityId("socrates".to_string()),
813 name: "Socrates".to_string(),
814 entity_type: "PERSON".to_string(),
815 confidence: 0.95,
816 mentions: vec![],
817 embedding: None,
818 first_mentioned: None,
819 last_mentioned: None,
820 temporal_validity: None,
821 };
822
823 let philosophy = Entity {
824 id: EntityId("philosophy".to_string()),
825 name: "Philosophy".to_string(),
826 entity_type: "CONCEPT".to_string(),
827 confidence: 0.9,
828 mentions: vec![],
829 embedding: None,
830 first_mentioned: None,
831 last_mentioned: None,
832 temporal_validity: None,
833 };
834
835 graph.add_entity(socrates).unwrap();
836 graph.add_entity(philosophy).unwrap();
837
838 let rel = Relationship::new(
840 EntityId("socrates".to_string()),
841 EntityId("philosophy".to_string()),
842 "FOUNDED".to_string(),
843 0.9,
844 );
845 graph.add_relationship(rel).unwrap();
846
847 let optimizer = GraphWeightOptimizer::new();
849
850 let query = TestQuery::new(
852 "Tell me about Socrates and philosophy".to_string(),
853 "Socrates founded philosophy".to_string(),
854 );
855
856 let (relevance, faithfulness, conciseness) =
858 optimizer.evaluate_with_heuristics(&graph, &query).unwrap();
859
860 assert!(
862 relevance >= 0.0 && relevance <= 1.0,
863 "Relevance out of range: {}",
864 relevance
865 );
866 assert!(
867 faithfulness >= 0.0 && faithfulness <= 1.0,
868 "Faithfulness out of range: {}",
869 faithfulness
870 );
871 assert!(
872 conciseness >= 0.0 && conciseness <= 1.0,
873 "Conciseness out of range: {}",
874 conciseness
875 );
876
877 assert!(
880 relevance > 0.0,
881 "Should find some relevant entities (relevance={})",
882 relevance
883 );
884
885 assert!(
887 faithfulness > 0.0,
888 "Should match expected answer (faithfulness={})",
889 faithfulness
890 );
891 }
892
893 #[test]
894 fn test_heuristic_evaluation_empty_graph() {
895 let graph = KnowledgeGraph::new();
896 let optimizer = GraphWeightOptimizer::new();
897
898 let query = TestQuery::new("test query".to_string(), "test answer".to_string());
899
900 let (relevance, faithfulness, conciseness) =
901 optimizer.evaluate_with_heuristics(&graph, &query).unwrap();
902
903 assert_eq!(relevance, 0.0, "Empty graph should have zero relevance");
905 assert!(faithfulness >= 0.0, "Faithfulness should be non-negative");
906 assert!(conciseness >= 0.0, "Conciseness should be non-negative");
907 }
908
909 #[test]
910 fn test_graph_context_building() {
911 use crate::core::{Entity, EntityId, Relationship};
912
913 let mut graph = KnowledgeGraph::new();
914
915 for i in 0..5 {
917 let entity = Entity {
918 id: EntityId(format!("entity_{}", i)),
919 name: format!("Entity {}", i),
920 entity_type: "TEST".to_string(),
921 confidence: 0.9,
922 mentions: vec![],
923 embedding: None,
924 first_mentioned: None,
925 last_mentioned: None,
926 temporal_validity: None,
927 };
928 graph.add_entity(entity).unwrap();
929 }
930
931 for i in 0..4 {
933 let rel = Relationship::new(
934 EntityId(format!("entity_{}", i)),
935 EntityId(format!("entity_{}", i + 1)),
936 "RELATES_TO".to_string(),
937 0.8,
938 );
939 graph.add_relationship(rel).unwrap();
940 }
941
942 let optimizer = GraphWeightOptimizer::new();
943 let context = optimizer.build_graph_context(&graph, "entity 0", 3);
944
945 assert!(
947 context.contains("Entities:"),
948 "Context should include entities"
949 );
950 assert!(
951 context.contains("Relationships:"),
952 "Context should include relationships"
953 );
954 assert!(context.len() > 0, "Context should not be empty");
955 }
956
957 #[test]
958 fn test_llm_evaluation_parse_json() {
959 let optimizer = GraphWeightOptimizer::new();
960
961 let response = r#"Here is my evaluation:
963 {"relevance": 0.8, "faithfulness": 0.7, "conciseness": 0.9}
964 That's my assessment."#;
965
966 let (relevance, faithfulness, conciseness) =
967 optimizer.parse_llm_evaluation(response).unwrap();
968
969 assert!((relevance - 0.8).abs() < 0.001);
970 assert!((faithfulness - 0.7).abs() < 0.001);
971 assert!((conciseness - 0.9).abs() < 0.001);
972 }
973
974 #[test]
975 fn test_llm_evaluation_parse_fallback() {
976 let optimizer = GraphWeightOptimizer::new();
977
978 let response = "This is not JSON at all";
980
981 let (relevance, faithfulness, conciseness) =
982 optimizer.parse_llm_evaluation(response).unwrap();
983
984 assert_eq!(relevance, 0.5);
986 assert_eq!(faithfulness, 0.5);
987 assert_eq!(conciseness, 0.5);
988 }
989}