Skip to main content

mockforge_data/
intelligent_mock.rs

1//! Intelligent mock generation using LLMs
2//!
3//! This module provides AI-driven mock data generation that goes beyond static templates,
4//! allowing users to define intent instead of explicit examples.
5
6use crate::rag::{RagConfig, RagEngine};
7use crate::{Error, Result};
8use serde::{Deserialize, Serialize};
9use serde_json::Value;
10use std::collections::HashMap;
11
12/// Response generation mode
13#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
14#[serde(rename_all = "lowercase")]
15pub enum ResponseMode {
16    /// Static response with templates
17    #[default]
18    Static,
19    /// Intelligent response using LLM
20    Intelligent,
21    /// Hybrid mode - use templates with LLM enhancement
22    Hybrid,
23}
24
25/// Intelligent mock configuration
26#[derive(Debug, Clone, Serialize, Deserialize)]
27pub struct IntelligentMockConfig {
28    /// Response generation mode
29    pub mode: ResponseMode,
30    /// Intent/prompt for LLM-based generation
31    pub prompt: Option<String>,
32    /// Context for generation (e.g., schema, domain knowledge)
33    pub context: Option<String>,
34    /// Number of examples to generate
35    pub count: usize,
36    /// Schema to conform to (JSON Schema format)
37    pub schema: Option<Value>,
38    /// Additional constraints
39    pub constraints: HashMap<String, Value>,
40    /// Temperature for LLM (0.0 to 2.0)
41    pub temperature: Option<f32>,
42    /// Enable caching for repeated requests
43    pub cache_enabled: bool,
44    /// RAG configuration
45    pub rag_config: Option<RagConfig>,
46}
47
48impl Default for IntelligentMockConfig {
49    fn default() -> Self {
50        Self {
51            mode: ResponseMode::Static,
52            prompt: None,
53            context: None,
54            count: 1,
55            schema: None,
56            constraints: HashMap::new(),
57            temperature: Some(0.7),
58            cache_enabled: true,
59            rag_config: None,
60        }
61    }
62}
63
64impl IntelligentMockConfig {
65    /// Create a new intelligent mock configuration
66    pub fn new(mode: ResponseMode) -> Self {
67        Self {
68            mode,
69            ..Default::default()
70        }
71    }
72
73    /// Set the intent prompt
74    pub fn with_prompt(mut self, prompt: String) -> Self {
75        self.prompt = Some(prompt);
76        self
77    }
78
79    /// Set the context
80    pub fn with_context(mut self, context: String) -> Self {
81        self.context = Some(context);
82        self
83    }
84
85    /// Set the schema
86    pub fn with_schema(mut self, schema: Value) -> Self {
87        self.schema = Some(schema);
88        self
89    }
90
91    /// Set the count
92    pub fn with_count(mut self, count: usize) -> Self {
93        self.count = count;
94        self
95    }
96
97    /// Add a constraint
98    pub fn with_constraint(mut self, key: String, value: Value) -> Self {
99        self.constraints.insert(key, value);
100        self
101    }
102
103    /// Set temperature
104    pub fn with_temperature(mut self, temperature: f32) -> Self {
105        self.temperature = Some(temperature);
106        self
107    }
108
109    /// Set RAG configuration
110    pub fn with_rag_config(mut self, config: RagConfig) -> Self {
111        self.rag_config = Some(config);
112        self
113    }
114
115    /// Validate the configuration
116    pub fn validate(&self) -> Result<()> {
117        if (self.mode == ResponseMode::Intelligent || self.mode == ResponseMode::Hybrid)
118            && self.prompt.is_none()
119        {
120            return Err(Error::generic("Prompt is required for intelligent/hybrid response mode"));
121        }
122
123        if let Some(temp) = self.temperature {
124            if !(0.0..=2.0).contains(&temp) {
125                return Err(Error::generic("Temperature must be between 0.0 and 2.0"));
126            }
127        }
128
129        Ok(())
130    }
131}
132
133/// Intelligent mock generator
134pub struct IntelligentMockGenerator {
135    /// Configuration
136    config: IntelligentMockConfig,
137    /// RAG engine for LLM-based generation
138    rag_engine: Option<RagEngine>,
139    /// Response cache
140    cache: HashMap<String, Value>,
141}
142
143impl IntelligentMockGenerator {
144    /// Create a new intelligent mock generator
145    pub fn new(config: IntelligentMockConfig) -> Result<Self> {
146        config.validate()?;
147
148        let rag_engine = if config.mode != ResponseMode::Static {
149            let rag_config = config.rag_config.clone().unwrap_or_default();
150            Some(RagEngine::new(rag_config))
151        } else {
152            None
153        };
154
155        Ok(Self {
156            config,
157            rag_engine,
158            cache: HashMap::new(),
159        })
160    }
161
162    /// Generate a mock response based on the configuration
163    pub async fn generate(&mut self) -> Result<Value> {
164        match self.config.mode {
165            ResponseMode::Static => self.generate_static(),
166            ResponseMode::Intelligent => self.generate_intelligent().await,
167            ResponseMode::Hybrid => self.generate_hybrid().await,
168        }
169    }
170
171    /// Generate a batch of mock responses
172    pub async fn generate_batch(&mut self, count: usize) -> Result<Vec<Value>> {
173        let mut results = Vec::with_capacity(count);
174        for _ in 0..count {
175            let response = self.generate().await?;
176            results.push(response);
177        }
178        Ok(results)
179    }
180
181    /// Generate static response (fallback)
182    fn generate_static(&self) -> Result<Value> {
183        if let Some(schema) = &self.config.schema {
184            Ok(schema.clone())
185        } else {
186            Ok(serde_json::json!({}))
187        }
188    }
189
190    /// Generate intelligent response using LLM
191    async fn generate_intelligent(&mut self) -> Result<Value> {
192        let prompt = self.config.prompt.as_ref().ok_or_else(|| {
193            Error::generic("Prompt is required for intelligent response generation")
194        })?;
195
196        // Check cache first
197        if self.config.cache_enabled {
198            let cache_key = format!("{:?}:{}", self.config.mode, prompt);
199            if let Some(cached) = self.cache.get(&cache_key) {
200                return Ok(cached.clone());
201            }
202        }
203
204        let rag_engine = self
205            .rag_engine
206            .as_mut()
207            .ok_or_else(|| Error::generic("RAG engine not initialized for intelligent mode"))?;
208
209        // Build the generation prompt
210        let mut full_prompt =
211            format!("Generate realistic mock data based on the following intent:\n\n{}\n", prompt);
212
213        if let Some(context) = &self.config.context {
214            full_prompt.push_str(&format!("\nContext: {}\n", context));
215        }
216
217        if let Some(schema) = &self.config.schema {
218            full_prompt.push_str(&format!(
219                "\nConform to this schema:\n{}\n",
220                serde_json::to_string_pretty(schema).unwrap_or_default()
221            ));
222        }
223
224        if !self.config.constraints.is_empty() {
225            full_prompt.push_str("\nAdditional constraints:\n");
226            for (key, value) in &self.config.constraints {
227                full_prompt.push_str(&format!("- {}: {}\n", key, value));
228            }
229        }
230
231        full_prompt.push_str("\nReturn valid JSON only, no additional text.");
232
233        // Generate using RAG engine
234        let response = rag_engine.generate_text(&full_prompt).await?;
235
236        // Parse the response as JSON
237        let json_response = self.extract_json(&response)?;
238
239        // Cache the result
240        if self.config.cache_enabled {
241            let cache_key = format!("{:?}:{}", self.config.mode, prompt);
242            self.cache.insert(cache_key, json_response.clone());
243        }
244
245        Ok(json_response)
246    }
247
248    /// Generate hybrid response (template + LLM enhancement)
249    async fn generate_hybrid(&mut self) -> Result<Value> {
250        // First generate static response
251        let mut base_response = self.generate_static()?;
252
253        // Then enhance with LLM
254        let prompt =
255            self.config.prompt.as_ref().ok_or_else(|| {
256                Error::generic("Prompt is required for hybrid response generation")
257            })?;
258
259        let rag_engine = self
260            .rag_engine
261            .as_mut()
262            .ok_or_else(|| Error::generic("RAG engine not initialized for hybrid mode"))?;
263
264        let enhancement_prompt = format!(
265            "Enhance this mock data based on the intent: {}\n\nCurrent data:\n{}\n\nReturn the enhanced JSON only.",
266            prompt,
267            serde_json::to_string_pretty(&base_response).unwrap_or_default()
268        );
269
270        let response = rag_engine.generate_text(&enhancement_prompt).await?;
271        let enhanced_response = self.extract_json(&response)?;
272
273        // Merge the enhanced response with the base
274        if let (Some(base_obj), Some(enhanced_obj)) =
275            (base_response.as_object_mut(), enhanced_response.as_object())
276        {
277            for (key, value) in enhanced_obj {
278                base_obj.insert(key.clone(), value.clone());
279            }
280        } else {
281            base_response = enhanced_response;
282        }
283
284        Ok(base_response)
285    }
286
287    /// Extract JSON from LLM response (handles markdown code blocks)
288    fn extract_json(&self, response: &str) -> Result<Value> {
289        let trimmed = response.trim();
290
291        // Try to extract from markdown code blocks
292        let json_str = if trimmed.starts_with("```json") {
293            trimmed
294                .strip_prefix("```json")
295                .and_then(|s| s.strip_suffix("```"))
296                .unwrap_or(trimmed)
297                .trim()
298        } else if trimmed.starts_with("```") {
299            trimmed
300                .strip_prefix("```")
301                .and_then(|s| s.strip_suffix("```"))
302                .unwrap_or(trimmed)
303                .trim()
304        } else {
305            trimmed
306        };
307
308        // Parse JSON
309        serde_json::from_str(json_str)
310            .map_err(|e| Error::generic(format!("Failed to parse LLM response as JSON: {}", e)))
311    }
312
313    /// Update configuration
314    pub fn update_config(&mut self, config: IntelligentMockConfig) -> Result<()> {
315        config.validate()?;
316        self.config = config;
317        Ok(())
318    }
319
320    /// Clear the cache
321    pub fn clear_cache(&mut self) {
322        self.cache.clear();
323    }
324
325    /// Get cache size
326    pub fn cache_size(&self) -> usize {
327        self.cache.len()
328    }
329
330    /// Get current configuration
331    pub fn config(&self) -> &IntelligentMockConfig {
332        &self.config
333    }
334}
335
336#[cfg(test)]
337mod tests {
338    use super::*;
339
340    #[test]
341    fn test_response_mode_default() {
342        assert_eq!(ResponseMode::default(), ResponseMode::Static);
343    }
344
345    #[test]
346    fn test_intelligent_mock_config_default() {
347        let config = IntelligentMockConfig::default();
348        assert_eq!(config.mode, ResponseMode::Static);
349        assert_eq!(config.count, 1);
350        assert!(config.cache_enabled);
351    }
352
353    #[test]
354    fn test_intelligent_mock_config_builder() {
355        let config = IntelligentMockConfig::new(ResponseMode::Intelligent)
356            .with_prompt("Generate customer data".to_string())
357            .with_count(10)
358            .with_temperature(0.8);
359
360        assert_eq!(config.mode, ResponseMode::Intelligent);
361        assert_eq!(config.prompt, Some("Generate customer data".to_string()));
362        assert_eq!(config.count, 10);
363        assert_eq!(config.temperature, Some(0.8));
364    }
365
366    #[test]
367    fn test_intelligent_mock_config_validate_missing_prompt() {
368        let config = IntelligentMockConfig::new(ResponseMode::Intelligent);
369        assert!(config.validate().is_err());
370    }
371
372    #[test]
373    fn test_intelligent_mock_config_validate_invalid_temperature() {
374        let config = IntelligentMockConfig::new(ResponseMode::Static).with_temperature(3.0);
375        assert!(config.validate().is_err());
376    }
377
378    #[test]
379    fn test_intelligent_mock_config_validate_valid() {
380        let config = IntelligentMockConfig::new(ResponseMode::Intelligent)
381            .with_prompt("Test prompt".to_string());
382        assert!(config.validate().is_ok());
383    }
384
385    #[test]
386    fn test_extract_json_plain() {
387        let generator =
388            IntelligentMockGenerator::new(IntelligentMockConfig::new(ResponseMode::Static))
389                .unwrap();
390
391        let json_str = r#"{"key": "value"}"#;
392        let result = generator.extract_json(json_str);
393        assert!(result.is_ok());
394    }
395
396    #[test]
397    fn test_extract_json_markdown() {
398        let generator =
399            IntelligentMockGenerator::new(IntelligentMockConfig::new(ResponseMode::Static))
400                .unwrap();
401
402        let json_str = "```json\n{\"key\": \"value\"}\n```";
403        let result = generator.extract_json(json_str);
404        assert!(result.is_ok());
405    }
406}