mockforge_data/
intelligent_mock.rs

1//! Intelligent mock generation using LLMs
2//!
3//! This module provides AI-driven mock data generation that goes beyond static templates,
4//! allowing users to define intent instead of explicit examples.
5
6use crate::rag::{RagConfig, RagEngine};
7use mockforge_core::{Error, Result};
8use serde::{Deserialize, Serialize};
9use serde_json::Value;
10use std::collections::HashMap;
11
12/// Response generation mode
13#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
14#[serde(rename_all = "lowercase")]
15pub enum ResponseMode {
16    /// Static response with templates
17    Static,
18    /// Intelligent response using LLM
19    Intelligent,
20    /// Hybrid mode - use templates with LLM enhancement
21    Hybrid,
22}
23
24impl Default for ResponseMode {
25    fn default() -> Self {
26        Self::Static
27    }
28}
29
30/// Intelligent mock configuration
31#[derive(Debug, Clone, Serialize, Deserialize)]
32pub struct IntelligentMockConfig {
33    /// Response generation mode
34    pub mode: ResponseMode,
35    /// Intent/prompt for LLM-based generation
36    pub prompt: Option<String>,
37    /// Context for generation (e.g., schema, domain knowledge)
38    pub context: Option<String>,
39    /// Number of examples to generate
40    pub count: usize,
41    /// Schema to conform to (JSON Schema format)
42    pub schema: Option<Value>,
43    /// Additional constraints
44    pub constraints: HashMap<String, Value>,
45    /// Temperature for LLM (0.0 to 2.0)
46    pub temperature: Option<f32>,
47    /// Enable caching for repeated requests
48    pub cache_enabled: bool,
49    /// RAG configuration
50    pub rag_config: Option<RagConfig>,
51}
52
53impl Default for IntelligentMockConfig {
54    fn default() -> Self {
55        Self {
56            mode: ResponseMode::Static,
57            prompt: None,
58            context: None,
59            count: 1,
60            schema: None,
61            constraints: HashMap::new(),
62            temperature: Some(0.7),
63            cache_enabled: true,
64            rag_config: None,
65        }
66    }
67}
68
69impl IntelligentMockConfig {
70    /// Create a new intelligent mock configuration
71    pub fn new(mode: ResponseMode) -> Self {
72        Self {
73            mode,
74            ..Default::default()
75        }
76    }
77
78    /// Set the intent prompt
79    pub fn with_prompt(mut self, prompt: String) -> Self {
80        self.prompt = Some(prompt);
81        self
82    }
83
84    /// Set the context
85    pub fn with_context(mut self, context: String) -> Self {
86        self.context = Some(context);
87        self
88    }
89
90    /// Set the schema
91    pub fn with_schema(mut self, schema: Value) -> Self {
92        self.schema = Some(schema);
93        self
94    }
95
96    /// Set the count
97    pub fn with_count(mut self, count: usize) -> Self {
98        self.count = count;
99        self
100    }
101
102    /// Add a constraint
103    pub fn with_constraint(mut self, key: String, value: Value) -> Self {
104        self.constraints.insert(key, value);
105        self
106    }
107
108    /// Set temperature
109    pub fn with_temperature(mut self, temperature: f32) -> Self {
110        self.temperature = Some(temperature);
111        self
112    }
113
114    /// Set RAG configuration
115    pub fn with_rag_config(mut self, config: RagConfig) -> Self {
116        self.rag_config = Some(config);
117        self
118    }
119
120    /// Validate the configuration
121    pub fn validate(&self) -> Result<()> {
122        if (self.mode == ResponseMode::Intelligent || self.mode == ResponseMode::Hybrid)
123            && self.prompt.is_none()
124        {
125            return Err(Error::generic("Prompt is required for intelligent/hybrid response mode"));
126        }
127
128        if let Some(temp) = self.temperature {
129            if !(0.0..=2.0).contains(&temp) {
130                return Err(Error::generic("Temperature must be between 0.0 and 2.0"));
131            }
132        }
133
134        Ok(())
135    }
136}
137
138/// Intelligent mock generator
139pub struct IntelligentMockGenerator {
140    /// Configuration
141    config: IntelligentMockConfig,
142    /// RAG engine for LLM-based generation
143    rag_engine: Option<RagEngine>,
144    /// Response cache
145    cache: HashMap<String, Value>,
146}
147
148impl IntelligentMockGenerator {
149    /// Create a new intelligent mock generator
150    pub fn new(config: IntelligentMockConfig) -> Result<Self> {
151        config.validate()?;
152
153        let rag_engine = if config.mode != ResponseMode::Static {
154            let rag_config = config.rag_config.clone().unwrap_or_default();
155            Some(RagEngine::new(rag_config))
156        } else {
157            None
158        };
159
160        Ok(Self {
161            config,
162            rag_engine,
163            cache: HashMap::new(),
164        })
165    }
166
167    /// Generate a mock response based on the configuration
168    pub async fn generate(&mut self) -> Result<Value> {
169        match self.config.mode {
170            ResponseMode::Static => self.generate_static(),
171            ResponseMode::Intelligent => self.generate_intelligent().await,
172            ResponseMode::Hybrid => self.generate_hybrid().await,
173        }
174    }
175
176    /// Generate a batch of mock responses
177    pub async fn generate_batch(&mut self, count: usize) -> Result<Vec<Value>> {
178        let mut results = Vec::with_capacity(count);
179        for _ in 0..count {
180            let response = self.generate().await?;
181            results.push(response);
182        }
183        Ok(results)
184    }
185
186    /// Generate static response (fallback)
187    fn generate_static(&self) -> Result<Value> {
188        if let Some(schema) = &self.config.schema {
189            Ok(schema.clone())
190        } else {
191            Ok(serde_json::json!({}))
192        }
193    }
194
195    /// Generate intelligent response using LLM
196    async fn generate_intelligent(&mut self) -> Result<Value> {
197        let prompt = self.config.prompt.as_ref().ok_or_else(|| {
198            Error::generic("Prompt is required for intelligent response generation")
199        })?;
200
201        // Check cache first
202        if self.config.cache_enabled {
203            let cache_key = format!("{:?}:{}", self.config.mode, prompt);
204            if let Some(cached) = self.cache.get(&cache_key) {
205                return Ok(cached.clone());
206            }
207        }
208
209        let rag_engine = self
210            .rag_engine
211            .as_mut()
212            .ok_or_else(|| Error::generic("RAG engine not initialized for intelligent mode"))?;
213
214        // Build the generation prompt
215        let mut full_prompt =
216            format!("Generate realistic mock data based on the following intent:\n\n{}\n", prompt);
217
218        if let Some(context) = &self.config.context {
219            full_prompt.push_str(&format!("\nContext: {}\n", context));
220        }
221
222        if let Some(schema) = &self.config.schema {
223            full_prompt.push_str(&format!(
224                "\nConform to this schema:\n{}\n",
225                serde_json::to_string_pretty(schema).unwrap_or_default()
226            ));
227        }
228
229        if !self.config.constraints.is_empty() {
230            full_prompt.push_str("\nAdditional constraints:\n");
231            for (key, value) in &self.config.constraints {
232                full_prompt.push_str(&format!("- {}: {}\n", key, value));
233            }
234        }
235
236        full_prompt.push_str("\nReturn valid JSON only, no additional text.");
237
238        // Generate using RAG engine
239        let response = rag_engine.generate_text(&full_prompt).await?;
240
241        // Parse the response as JSON
242        let json_response = self.extract_json(&response)?;
243
244        // Cache the result
245        if self.config.cache_enabled {
246            let cache_key = format!("{:?}:{}", self.config.mode, prompt);
247            self.cache.insert(cache_key, json_response.clone());
248        }
249
250        Ok(json_response)
251    }
252
253    /// Generate hybrid response (template + LLM enhancement)
254    async fn generate_hybrid(&mut self) -> Result<Value> {
255        // First generate static response
256        let mut base_response = self.generate_static()?;
257
258        // Then enhance with LLM
259        let prompt =
260            self.config.prompt.as_ref().ok_or_else(|| {
261                Error::generic("Prompt is required for hybrid response generation")
262            })?;
263
264        let rag_engine = self
265            .rag_engine
266            .as_mut()
267            .ok_or_else(|| Error::generic("RAG engine not initialized for hybrid mode"))?;
268
269        let enhancement_prompt = format!(
270            "Enhance this mock data based on the intent: {}\n\nCurrent data:\n{}\n\nReturn the enhanced JSON only.",
271            prompt,
272            serde_json::to_string_pretty(&base_response).unwrap_or_default()
273        );
274
275        let response = rag_engine.generate_text(&enhancement_prompt).await?;
276        let enhanced_response = self.extract_json(&response)?;
277
278        // Merge the enhanced response with the base
279        if let (Some(base_obj), Some(enhanced_obj)) =
280            (base_response.as_object_mut(), enhanced_response.as_object())
281        {
282            for (key, value) in enhanced_obj {
283                base_obj.insert(key.clone(), value.clone());
284            }
285        } else {
286            base_response = enhanced_response;
287        }
288
289        Ok(base_response)
290    }
291
292    /// Extract JSON from LLM response (handles markdown code blocks)
293    fn extract_json(&self, response: &str) -> Result<Value> {
294        let trimmed = response.trim();
295
296        // Try to extract from markdown code blocks
297        let json_str = if trimmed.starts_with("```json") {
298            trimmed
299                .strip_prefix("```json")
300                .and_then(|s| s.strip_suffix("```"))
301                .unwrap_or(trimmed)
302                .trim()
303        } else if trimmed.starts_with("```") {
304            trimmed
305                .strip_prefix("```")
306                .and_then(|s| s.strip_suffix("```"))
307                .unwrap_or(trimmed)
308                .trim()
309        } else {
310            trimmed
311        };
312
313        // Parse JSON
314        serde_json::from_str(json_str)
315            .map_err(|e| Error::generic(format!("Failed to parse LLM response as JSON: {}", e)))
316    }
317
318    /// Update configuration
319    pub fn update_config(&mut self, config: IntelligentMockConfig) -> Result<()> {
320        config.validate()?;
321        self.config = config;
322        Ok(())
323    }
324
325    /// Clear the cache
326    pub fn clear_cache(&mut self) {
327        self.cache.clear();
328    }
329
330    /// Get cache size
331    pub fn cache_size(&self) -> usize {
332        self.cache.len()
333    }
334
335    /// Get current configuration
336    pub fn config(&self) -> &IntelligentMockConfig {
337        &self.config
338    }
339}
340
341#[cfg(test)]
342mod tests {
343    use super::*;
344
345    #[test]
346    fn test_response_mode_default() {
347        assert_eq!(ResponseMode::default(), ResponseMode::Static);
348    }
349
350    #[test]
351    fn test_intelligent_mock_config_default() {
352        let config = IntelligentMockConfig::default();
353        assert_eq!(config.mode, ResponseMode::Static);
354        assert_eq!(config.count, 1);
355        assert!(config.cache_enabled);
356    }
357
358    #[test]
359    fn test_intelligent_mock_config_builder() {
360        let config = IntelligentMockConfig::new(ResponseMode::Intelligent)
361            .with_prompt("Generate customer data".to_string())
362            .with_count(10)
363            .with_temperature(0.8);
364
365        assert_eq!(config.mode, ResponseMode::Intelligent);
366        assert_eq!(config.prompt, Some("Generate customer data".to_string()));
367        assert_eq!(config.count, 10);
368        assert_eq!(config.temperature, Some(0.8));
369    }
370
371    #[test]
372    fn test_intelligent_mock_config_validate_missing_prompt() {
373        let config = IntelligentMockConfig::new(ResponseMode::Intelligent);
374        assert!(config.validate().is_err());
375    }
376
377    #[test]
378    fn test_intelligent_mock_config_validate_invalid_temperature() {
379        let config = IntelligentMockConfig::new(ResponseMode::Static).with_temperature(3.0);
380        assert!(config.validate().is_err());
381    }
382
383    #[test]
384    fn test_intelligent_mock_config_validate_valid() {
385        let config = IntelligentMockConfig::new(ResponseMode::Intelligent)
386            .with_prompt("Test prompt".to_string());
387        assert!(config.validate().is_ok());
388    }
389
390    #[test]
391    fn test_extract_json_plain() {
392        let generator =
393            IntelligentMockGenerator::new(IntelligentMockConfig::new(ResponseMode::Static))
394                .unwrap();
395
396        let json_str = r#"{"key": "value"}"#;
397        let result = generator.extract_json(json_str);
398        assert!(result.is_ok());
399    }
400
401    #[test]
402    fn test_extract_json_markdown() {
403        let generator =
404            IntelligentMockGenerator::new(IntelligentMockConfig::new(ResponseMode::Static))
405                .unwrap();
406
407        let json_str = "```json\n{\"key\": \"value\"}\n```";
408        let result = generator.extract_json(json_str);
409        assert!(result.is_ok());
410    }
411}