graphrag_core/generation/
async_mock_llm.rs1use crate::core::traits::{AsyncLanguageModel, GenerationParams, ModelInfo, ModelUsageStats};
7use crate::core::{GraphRAGError, Result};
8use crate::generation::LLMInterface;
9use crate::text::TextProcessor;
10use async_trait::async_trait;
11use std::collections::HashMap;
12use std::sync::atomic::{AtomicU64, Ordering};
13use std::sync::Arc;
14use std::time::{Duration, Instant};
15use tokio::sync::RwLock;
16
17#[derive(Debug)]
19pub struct AsyncMockLLM {
20 response_templates: Arc<RwLock<HashMap<String, String>>>,
21 text_processor: Arc<TextProcessor>,
22 stats: Arc<AsyncLLMStats>,
23 simulate_delay: Option<Duration>,
24}
25
26#[derive(Debug, Default)]
28struct AsyncLLMStats {
29 total_requests: AtomicU64,
30 total_tokens_processed: AtomicU64,
31 total_response_time: Arc<RwLock<Duration>>,
32 error_count: AtomicU64,
33}
34
35impl AsyncMockLLM {
36 pub async fn new() -> Result<Self> {
38 let mut templates = HashMap::new();
39
40 templates.insert(
42 "default".to_string(),
43 "Based on the provided context, here is what I found: {context}".to_string(),
44 );
45 templates.insert(
46 "not_found".to_string(),
47 "I could not find specific information about this in the provided context.".to_string(),
48 );
49 templates.insert(
50 "insufficient_context".to_string(),
51 "The available context is insufficient to provide a complete answer.".to_string(),
52 );
53
54 let text_processor = TextProcessor::new(1000, 100)?;
55
56 Ok(Self {
57 response_templates: Arc::new(RwLock::new(templates)),
58 text_processor: Arc::new(text_processor),
59 stats: Arc::new(AsyncLLMStats::default()),
60 simulate_delay: Some(Duration::from_millis(100)), })
62 }
63
64 pub async fn with_templates(templates: HashMap<String, String>) -> Result<Self> {
66 let text_processor = TextProcessor::new(1000, 100)?;
67
68 Ok(Self {
69 response_templates: Arc::new(RwLock::new(templates)),
70 text_processor: Arc::new(text_processor),
71 stats: Arc::new(AsyncLLMStats::default()),
72 simulate_delay: Some(Duration::from_millis(100)),
73 })
74 }
75
76 pub fn set_simulate_delay(&mut self, delay: Option<Duration>) {
78 self.simulate_delay = delay;
79 }
80
81 async fn generate_extractive_answer(&self, context: &str, query: &str) -> Result<String> {
83 if let Some(delay) = self.simulate_delay {
85 tokio::time::sleep(delay).await;
86 }
87
88 let sentences = self.text_processor.extract_sentences(context);
89 if sentences.is_empty() {
90 return Ok("No relevant context found.".to_string());
91 }
92
93 let query_lower = query.to_lowercase();
95 let query_words: Vec<&str> = query_lower
96 .split_whitespace()
97 .filter(|w| w.len() > 2) .collect();
99
100 if query_words.is_empty() {
101 return Ok("Query too short or contains no meaningful words.".to_string());
102 }
103
104 let mut sentence_scores: Vec<(usize, f32)> = sentences
105 .iter()
106 .enumerate()
107 .map(|(i, sentence)| {
108 let sentence_lower = sentence.to_lowercase();
109 let mut total_score = 0.0;
110 let mut matches = 0;
111
112 for word in &query_words {
113 if sentence_lower.contains(word) {
115 total_score += 2.0;
116 matches += 1;
117 }
118 else if word.len() > 4 {
120 for sentence_word in sentence_lower.split_whitespace() {
121 if sentence_word.contains(word) || word.contains(sentence_word) {
122 total_score += 1.0;
123 matches += 1;
124 break;
125 }
126 }
127 }
128 }
129
130 let coverage_bonus = (matches as f32 / query_words.len() as f32) * 0.5;
132 let final_score = total_score + coverage_bonus;
133
134 (i, final_score)
135 })
136 .collect();
137
138 sentence_scores.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
140
141 let mut answer_sentences = Vec::new();
143 for (idx, score) in sentence_scores.iter().take(5) {
144 if *score > 0.5 {
145 answer_sentences.push(format!(
147 "{} (relevance: {:.1})",
148 sentences[*idx].trim(),
149 score
150 ));
151 }
152 }
153
154 if answer_sentences.is_empty() {
155 for (idx, score) in sentence_scores.iter().take(2) {
157 if *score > 0.0 {
158 answer_sentences.push(format!(
159 "{} (low confidence: {:.1})",
160 sentences[*idx].trim(),
161 score
162 ));
163 }
164 }
165 }
166
167 if answer_sentences.is_empty() {
168 Ok("No directly relevant information found in the context.".to_string())
169 } else {
170 Ok(answer_sentences.join("\n\n"))
171 }
172 }
173
174 async fn generate_smart_answer(&self, context: &str, question: &str) -> Result<String> {
176 let extractive_result = self.generate_extractive_answer(context, question).await?;
178
179 if extractive_result.contains("No relevant") || extractive_result.contains("No directly") {
181 return self.generate_contextual_response(context, question).await;
182 }
183
184 Ok(extractive_result)
185 }
186
187 async fn generate_contextual_response(&self, context: &str, question: &str) -> Result<String> {
189 let question_lower = question.to_lowercase();
190 let context_lower = context.to_lowercase();
191
192 if question_lower.contains("who") && question_lower.contains("friend") {
194 let names = self.extract_character_names(&context_lower).await;
196 if !names.is_empty() {
197 return Ok(format!("Based on the context, the main characters mentioned include: {}. These appear to be friends and companions in the story.", names.join(", ")));
198 }
199 }
200
201 if question_lower.contains("what")
202 && (question_lower.contains("adventure") || question_lower.contains("happen"))
203 {
204 let events = self.extract_key_events(&context_lower).await;
205 if !events.is_empty() {
206 return Ok(format!(
207 "The context describes several events: {}",
208 events.join(", ")
209 ));
210 }
211 }
212
213 if question_lower.contains("where") {
214 let locations = self.extract_locations(&context_lower).await;
215 if !locations.is_empty() {
216 return Ok(format!(
217 "The story takes place in locations such as: {}",
218 locations.join(", ")
219 ));
220 }
221 }
222
223 let summary = self.generate_summary_async(context, 150).await?;
225 Ok(format!("Based on the available context: {summary}"))
226 }
227
228 async fn generate_question_response(&self, question: &str) -> Result<String> {
230 let question_lower = question.to_lowercase();
231
232 if question_lower.contains("friend") || question_lower.contains("relationship") {
234 return Ok("The text describes various character relationships and friendships throughout the narrative.".to_string());
235 }
236
237 if question_lower.contains("main character") || question_lower.contains("protagonist") {
238 return Ok("The text features several important characters who drive the narrative forward.".to_string());
239 }
240
241 if question_lower.contains("event") || question_lower.contains("scene") {
242 return Ok("The text contains various significant events and scenes that advance the story.".to_string());
243 }
244
245 Ok(
246 "I need more specific context to provide a detailed answer to this question."
247 .to_string(),
248 )
249 }
250
251 async fn extract_character_names(&self, text: &str) -> Vec<String> {
253 let mut found_names = Vec::new();
254
255 for word in text.split_whitespace() {
257 let clean_word = word.trim_matches(|c: char| !c.is_alphabetic());
258 if clean_word.len() > 2
259 && clean_word.chars().next().unwrap().is_uppercase()
260 && clean_word.chars().all(|c| c.is_alphabetic())
261 {
262 found_names.push(clean_word.to_lowercase());
263 }
264 }
265
266 found_names
267 }
268
269 async fn extract_key_events(&self, text: &str) -> Vec<String> {
271 let event_keywords = [
272 "adventure",
273 "treasure",
274 "cave",
275 "island",
276 "painting",
277 "school",
278 "church",
279 "graveyard",
280 "river",
281 ];
282 let mut found_events = Vec::new();
283
284 for event in &event_keywords {
285 if text.contains(event) {
286 found_events.push(format!("events involving {event}"));
287 }
288 }
289
290 found_events
291 }
292
293 async fn extract_locations(&self, text: &str) -> Vec<String> {
295 let locations = [
296 "village",
297 "mississippi",
298 "river",
299 "cave",
300 "island",
301 "town",
302 "church",
303 "school",
304 "house",
305 ];
306 let mut found_locations = Vec::new();
307
308 for location in &locations {
309 if text.contains(location) {
310 found_locations.push(location.to_string());
311 }
312 }
313
314 found_locations
315 }
316
317 async fn generate_summary_async(&self, content: &str, max_length: usize) -> Result<String> {
319 let sentences = self.text_processor.extract_sentences(content);
320 if sentences.is_empty() {
321 return Ok(String::new());
322 }
323
324 let mut summary = String::new();
325 for sentence in sentences.iter().take(3) {
326 if summary.len() + sentence.len() > max_length {
327 break;
328 }
329 if !summary.is_empty() {
330 summary.push(' ');
331 }
332 summary.push_str(sentence);
333 }
334
335 Ok(summary)
336 }
337
338 async fn update_stats(&self, tokens: usize, response_time: Duration, is_error: bool) {
340 self.stats.total_requests.fetch_add(1, Ordering::Relaxed);
341
342 if is_error {
343 self.stats.error_count.fetch_add(1, Ordering::Relaxed);
344 } else {
345 self.stats
346 .total_tokens_processed
347 .fetch_add(tokens as u64, Ordering::Relaxed);
348 }
349
350 let mut total_time = self.stats.total_response_time.write().await;
351 *total_time += response_time;
352 }
353}
354
355#[async_trait]
356impl AsyncLanguageModel for AsyncMockLLM {
357 type Error = GraphRAGError;
358
359 async fn complete(&self, prompt: &str) -> Result<String> {
360 let start_time = Instant::now();
361
362 if let Some(delay) = self.simulate_delay {
364 tokio::time::sleep(delay).await;
365 }
366
367 let result = self.generate_response_internal(prompt).await;
368 let response_time = start_time.elapsed();
369
370 let tokens = prompt.len() / 4;
372 self.update_stats(tokens, response_time, result.is_err())
373 .await;
374
375 result
376 }
377
378 async fn complete_with_params(
379 &self,
380 prompt: &str,
381 _params: GenerationParams,
382 ) -> Result<String> {
383 self.complete(prompt).await
385 }
386
387 async fn complete_batch(&self, prompts: &[&str]) -> Result<Vec<String>> {
388 let mut handles = Vec::new();
390
391 for prompt in prompts {
392 let prompt_owned = prompt.to_string();
393 let self_clone = self.clone();
394 handles.push(tokio::spawn(async move {
395 self_clone.complete(&prompt_owned).await
396 }));
397 }
398
399 let mut results = Vec::with_capacity(prompts.len());
400 for handle in handles {
401 match handle.await {
402 Ok(result) => results.push(result?),
403 Err(e) => {
404 return Err(GraphRAGError::Generation {
405 message: format!("Task join error: {e}"),
406 })
407 }
408 }
409 }
410
411 Ok(results)
412 }
413
414 async fn is_available(&self) -> bool {
415 true
416 }
417
418 async fn model_info(&self) -> ModelInfo {
419 ModelInfo {
420 name: "AsyncMockLLM".to_string(),
421 version: Some("1.0.0".to_string()),
422 max_context_length: Some(4096),
423 supports_streaming: true,
424 }
425 }
426
427 async fn get_usage_stats(&self) -> Result<ModelUsageStats> {
428 let total_requests = self.stats.total_requests.load(Ordering::Relaxed);
429 let total_tokens = self.stats.total_tokens_processed.load(Ordering::Relaxed);
430 let error_count = self.stats.error_count.load(Ordering::Relaxed);
431 let total_time = *self.stats.total_response_time.read().await;
432
433 let average_response_time_ms = if total_requests > 0 {
434 total_time.as_millis() as f64 / total_requests as f64
435 } else {
436 0.0
437 };
438
439 let error_rate = if total_requests > 0 {
440 error_count as f64 / total_requests as f64
441 } else {
442 0.0
443 };
444
445 Ok(ModelUsageStats {
446 total_requests,
447 total_tokens_processed: total_tokens,
448 average_response_time_ms,
449 error_rate,
450 })
451 }
452
453 async fn estimate_tokens(&self, prompt: &str) -> Result<usize> {
454 Ok(prompt.len() / 4)
456 }
457}
458
459impl AsyncMockLLM {
460 async fn generate_response_internal(&self, prompt: &str) -> Result<String> {
462 let prompt_lower = prompt.to_lowercase();
463
464 if prompt_lower.contains("context:") && prompt_lower.contains("question:") {
466 if let Some(context_start) = prompt.find("Context:") {
467 let context_section = &prompt[context_start + 8..];
468 if let Some(question_start) = context_section.find("Question:") {
469 let context = context_section[..question_start].trim();
470 let question_section = context_section[question_start + 9..].trim();
471
472 return self.generate_smart_answer(context, question_section).await;
473 }
474 }
475 }
476
477 if prompt_lower.contains("who")
479 || prompt_lower.contains("what")
480 || prompt_lower.contains("where")
481 || prompt_lower.contains("when")
482 || prompt_lower.contains("how")
483 || prompt_lower.contains("why")
484 {
485 return self.generate_question_response(prompt).await;
486 }
487
488 let templates = self.response_templates.read().await;
490 Ok(templates
491 .get("default")
492 .unwrap_or(&"I cannot provide a response based on the given prompt.".to_string())
493 .replace("{context}", &prompt[..prompt.len().min(200)]))
494 }
495}
496
497impl Clone for AsyncMockLLM {
499 fn clone(&self) -> Self {
500 Self {
501 response_templates: Arc::clone(&self.response_templates),
502 text_processor: Arc::clone(&self.text_processor),
503 stats: Arc::clone(&self.stats),
504 simulate_delay: self.simulate_delay,
505 }
506 }
507}
508
509#[async_trait]
511impl LLMInterface for AsyncMockLLM {
512 fn generate_response(&self, prompt: &str) -> Result<String> {
513 if tokio::runtime::Handle::try_current().is_ok() {
515 tokio::task::block_in_place(|| {
516 tokio::runtime::Handle::current().block_on(self.complete(prompt))
517 })
518 } else {
519 let rt = tokio::runtime::Runtime::new().map_err(|e| GraphRAGError::Generation {
521 message: format!("Failed to create async runtime: {e}"),
522 })?;
523 rt.block_on(self.complete(prompt))
524 }
525 }
526
527 fn generate_summary(&self, content: &str, max_length: usize) -> Result<String> {
528 if tokio::runtime::Handle::try_current().is_ok() {
529 tokio::task::block_in_place(|| {
530 tokio::runtime::Handle::current()
531 .block_on(self.generate_summary_async(content, max_length))
532 })
533 } else {
534 let rt = tokio::runtime::Runtime::new().map_err(|e| GraphRAGError::Generation {
535 message: format!("Failed to create async runtime: {e}"),
536 })?;
537 rt.block_on(self.generate_summary_async(content, max_length))
538 }
539 }
540
541 fn extract_key_points(&self, content: &str, num_points: usize) -> Result<Vec<String>> {
542 let keywords = self
543 .text_processor
544 .extract_keywords(content, num_points * 2);
545 let sentences = self.text_processor.extract_sentences(content);
546
547 let mut key_points = Vec::new();
548 for keyword in keywords.iter().take(num_points) {
549 if let Some(sentence) = sentences
551 .iter()
552 .find(|s| s.to_lowercase().contains(&keyword.to_lowercase()))
553 {
554 key_points.push(sentence.clone());
555 } else {
556 key_points.push(format!("Key concept: {keyword}"));
557 }
558 }
559
560 Ok(key_points)
561 }
562}
563
564#[cfg(test)]
565mod tests {
566 use super::*;
567
568 #[tokio::test]
569 async fn test_async_mock_llm_creation() {
570 let llm = AsyncMockLLM::new().await;
571 assert!(llm.is_ok());
572 }
573
574 #[tokio::test]
575 async fn test_async_completion() {
576 let llm = AsyncMockLLM::new().await.unwrap();
577 let result = llm.complete("Hello, world!").await;
578 assert!(result.is_ok());
579 }
580
581 #[tokio::test]
582 async fn test_async_batch_completion() {
583 let llm = AsyncMockLLM::new().await.unwrap();
584 let prompts = vec!["Hello", "World", "Test"];
585 let results = llm.complete_batch(&prompts).await;
586 assert!(results.is_ok());
587 assert_eq!(results.unwrap().len(), 3);
588 }
589
590 #[tokio::test]
591 async fn test_async_usage_stats() {
592 let llm = AsyncMockLLM::new().await.unwrap();
593
594 let _ = llm.complete("Test prompt 1").await;
596 let _ = llm.complete("Test prompt 2").await;
597
598 let stats = llm.get_usage_stats().await.unwrap();
599 assert_eq!(stats.total_requests, 2);
600 assert!(stats.average_response_time_ms > 0.0);
601 }
602
603 #[tokio::test]
604 async fn test_async_model_availability() {
605 let llm = AsyncMockLLM::new().await.unwrap();
606 let is_available = llm.is_available().await;
607 assert!(is_available);
608 }
609
610 #[tokio::test]
611 async fn test_async_model_info() {
612 let llm = AsyncMockLLM::new().await.unwrap();
613 let info = llm.model_info().await;
614 assert_eq!(info.name, "AsyncMockLLM");
615 assert_eq!(info.version, Some("1.0.0".to_string()));
616 assert!(info.supports_streaming);
617 }
618
619 #[tokio::test]
620 async fn test_token_estimation() {
621 let llm = AsyncMockLLM::new().await.unwrap();
622 let tokens = llm.estimate_tokens("This is a test prompt").await.unwrap();
623 assert!(tokens > 0);
624 }
625}