mockforge_core/voice/
command_parser.rs

1//! LLM-based command parser for voice commands
2//!
3//! This module parses natural language voice commands and extracts API requirements
4//! using MockForge's LLM infrastructure.
5
6use crate::intelligent_behavior::{
7    config::IntelligentBehaviorConfig, llm_client::LlmClient, types::LlmGenerationRequest,
8};
9use crate::Result;
10use serde::{Deserialize, Serialize};
11use std::collections::HashMap;
12
13/// Voice command parser that uses LLM to interpret natural language commands
14pub struct VoiceCommandParser {
15    /// LLM client for parsing commands
16    llm_client: LlmClient,
17    /// Configuration
18    config: IntelligentBehaviorConfig,
19}
20
21impl VoiceCommandParser {
22    /// Create a new voice command parser
23    pub fn new(config: IntelligentBehaviorConfig) -> Self {
24        let behavior_model = config.behavior_model.clone();
25        let llm_client = LlmClient::new(behavior_model);
26
27        Self { llm_client, config }
28    }
29
30    /// Parse a natural language command into structured API requirements
31    ///
32    /// This method uses the LLM to extract:
33    /// - API type (e-commerce, social media, etc.)
34    /// - Endpoints and HTTP methods
35    /// - Data models and relationships
36    /// - Sample data counts
37    /// - Business flows (checkout, auth, etc.)
38    pub async fn parse_command(&self, command: &str) -> Result<ParsedCommand> {
39        // Build system prompt for command parsing
40        let system_prompt = r#"You are an expert API designer. Your task is to parse natural language commands
41that describe API requirements and extract structured information.
42
43Extract the following information from the command:
441. API type/category (e.g., e-commerce, social media, blog, todo app)
452. Endpoints with HTTP methods (GET, POST, PUT, DELETE, PATCH)
463. Data models with fields and types
474. Relationships between models
485. Sample data counts (e.g., "20 products")
496. Business flows (e.g., checkout, authentication, user registration)
50
51Return your response as a JSON object with this structure:
52{
53  "api_type": "string (e.g., e-commerce, social-media, blog)",
54  "title": "string (API title)",
55  "description": "string (API description)",
56  "endpoints": [
57    {
58      "path": "string (e.g., /api/products)",
59      "method": "string (GET, POST, PUT, DELETE, PATCH)",
60      "description": "string",
61      "request_body": {
62        "schema": "object schema if applicable",
63        "required": ["array of required fields"]
64      },
65      "response": {
66        "status": 200,
67        "schema": "object schema",
68        "is_array": false,
69        "count": null or number if specified
70      }
71    }
72  ],
73  "models": [
74    {
75      "name": "string (e.g., Product)",
76      "fields": [
77        {
78          "name": "string",
79          "type": "string (string, number, integer, boolean, array, object)",
80          "description": "string",
81          "required": true
82        }
83      ]
84    }
85  ],
86  "relationships": [
87    {
88      "from": "string (model name)",
89      "to": "string (model name)",
90      "type": "string (one-to-many, many-to-many, one-to-one)"
91    }
92  ],
93  "sample_counts": {
94    "model_name": number
95  },
96  "flows": [
97    {
98      "name": "string (e.g., checkout)",
99      "description": "string",
100      "steps": ["array of step descriptions"]
101    }
102  ]
103}
104
105Be specific and extract all details mentioned in the command. If something is not mentioned,
106don't include it in the response."#;
107
108        // Build user prompt with the command
109        let user_prompt =
110            format!("Parse this API creation command and extract all requirements:\n\n{}", command);
111
112        // Create LLM request
113        let llm_request = LlmGenerationRequest {
114            system_prompt: system_prompt.to_string(),
115            user_prompt,
116            temperature: 0.3, // Lower temperature for more consistent parsing
117            max_tokens: 2000,
118            schema: None,
119        };
120
121        // Generate response from LLM
122        let response = self.llm_client.generate(&llm_request).await?;
123
124        // Parse the response into ParsedCommand
125        let response_str = serde_json::to_string(&response).unwrap_or_default();
126        let parsed: ParsedCommand = serde_json::from_value(response).map_err(|e| {
127            crate::Error::generic(format!(
128                "Failed to parse LLM response as ParsedCommand: {}. Response: {}",
129                e, response_str
130            ))
131        })?;
132
133        Ok(parsed)
134    }
135
136    /// Parse a conversational command (for multi-turn interactions)
137    ///
138    /// This method parses commands that modify or extend an existing API specification.
139    /// It takes the current conversation context into account.
140    pub async fn parse_conversational_command(
141        &self,
142        command: &str,
143        context: &super::conversation::ConversationContext,
144    ) -> Result<ParsedCommand> {
145        // Build system prompt for conversational parsing
146        let system_prompt = r#"You are an expert API designer helping to build an API through conversation.
147The user is providing incremental commands to modify or extend an existing API specification.
148
149Extract the following information from the command:
1501. What is being added/modified (endpoints, models, flows)
1512. Details about the addition/modification
1523. Any relationships or dependencies
153
154Return your response as a JSON object with the same structure as parse_command, but focus only
155on what is NEW or MODIFIED. If the command is asking to add something, include it. If it's asking
156to modify something, include the modified version.
157
158If the command is asking a question or requesting confirmation, return an empty endpoints array
159and include a "question" or "confirmation" field in the response."#;
160
161        // Build context summary
162        let context_summary = format!(
163            "Current API: {}\nExisting endpoints: {}\nExisting models: {}",
164            context.current_spec.as_ref().map(|s| s.title()).unwrap_or("None"),
165            context
166                .current_spec
167                .as_ref()
168                .map(|s| {
169                    s.all_paths_and_operations()
170                        .iter()
171                        .map(|(path, ops)| {
172                            format!(
173                                "{} ({})",
174                                path,
175                                ops.keys().map(|s| s.as_str()).collect::<Vec<_>>().join(", ")
176                            )
177                        })
178                        .collect::<Vec<_>>()
179                        .join(", ")
180                })
181                .unwrap_or_else(|| "None".to_string()),
182            context
183                .current_spec
184                .as_ref()
185                .and_then(|s| s.spec.components.as_ref())
186                .map(|c| c.schemas.keys().cloned().collect::<Vec<_>>().join(", "))
187                .unwrap_or_else(|| "None".to_string())
188        );
189
190        // Build user prompt
191        let user_prompt = format!("Context:\n{}\n\nNew command:\n{}", context_summary, command);
192
193        // Create LLM request
194        let llm_request = LlmGenerationRequest {
195            system_prompt: system_prompt.to_string(),
196            user_prompt,
197            temperature: 0.3,
198            max_tokens: 2000,
199            schema: None,
200        };
201
202        // Generate response
203        let response = self.llm_client.generate(&llm_request).await?;
204
205        // Parse response
206        let response_str = serde_json::to_string(&response).unwrap_or_default();
207        let parsed: ParsedCommand = serde_json::from_value(response).map_err(|e| {
208            crate::Error::generic(format!(
209                "Failed to parse conversational LLM response: {}. Response: {}",
210                e, response_str
211            ))
212        })?;
213
214        Ok(parsed)
215    }
216}
217
218/// Parsed command structure containing extracted API requirements
219#[derive(Debug, Clone, Serialize, Deserialize)]
220pub struct ParsedCommand {
221    /// API type/category
222    pub api_type: String,
223    /// API title
224    pub title: String,
225    /// API description
226    pub description: String,
227    /// List of endpoints
228    pub endpoints: Vec<EndpointRequirement>,
229    /// List of data models
230    pub models: Vec<ModelRequirement>,
231    /// Relationships between models
232    #[serde(default)]
233    pub relationships: Vec<RelationshipRequirement>,
234    /// Sample data counts per model
235    #[serde(default)]
236    pub sample_counts: HashMap<String, usize>,
237    /// Business flows
238    #[serde(default)]
239    pub flows: Vec<FlowRequirement>,
240}
241
242/// Endpoint requirement extracted from command
243#[derive(Debug, Clone, Serialize, Deserialize)]
244pub struct EndpointRequirement {
245    /// Path (e.g., /api/products)
246    pub path: String,
247    /// HTTP method
248    pub method: String,
249    /// Description
250    pub description: String,
251    /// Request body schema (if applicable)
252    #[serde(default)]
253    pub request_body: Option<RequestBodyRequirement>,
254    /// Response schema
255    #[serde(default)]
256    pub response: Option<ResponseRequirement>,
257}
258
259/// Request body requirement
260#[derive(Debug, Clone, Serialize, Deserialize)]
261pub struct RequestBodyRequirement {
262    /// Schema definition
263    #[serde(default)]
264    pub schema: Option<serde_json::Value>,
265    /// Required fields
266    #[serde(default)]
267    pub required: Vec<String>,
268}
269
270/// Response requirement
271#[derive(Debug, Clone, Serialize, Deserialize)]
272pub struct ResponseRequirement {
273    /// HTTP status code
274    #[serde(default = "default_status")]
275    pub status: u16,
276    /// Response schema
277    #[serde(default)]
278    pub schema: Option<serde_json::Value>,
279    /// Whether response is an array
280    #[serde(default)]
281    pub is_array: bool,
282    /// Count of items (if specified)
283    #[serde(default)]
284    pub count: Option<usize>,
285}
286
287fn default_status() -> u16 {
288    200
289}
290
291/// Model requirement extracted from command
292#[derive(Debug, Clone, Serialize, Deserialize)]
293pub struct ModelRequirement {
294    /// Model name
295    pub name: String,
296    /// List of fields
297    pub fields: Vec<FieldRequirement>,
298}
299
300/// Field requirement
301#[derive(Debug, Clone, Serialize, Deserialize)]
302pub struct FieldRequirement {
303    /// Field name
304    pub name: String,
305    /// Field type
306    pub r#type: String,
307    /// Field description
308    #[serde(default)]
309    pub description: String,
310    /// Whether field is required
311    #[serde(default = "default_true")]
312    pub required: bool,
313}
314
315fn default_true() -> bool {
316    true
317}
318
319/// Relationship requirement
320#[derive(Debug, Clone, Serialize, Deserialize)]
321pub struct RelationshipRequirement {
322    /// Source model
323    pub from: String,
324    /// Target model
325    pub to: String,
326    /// Relationship type
327    pub r#type: String,
328}
329
330/// Flow requirement
331#[derive(Debug, Clone, Serialize, Deserialize)]
332pub struct FlowRequirement {
333    /// Flow name
334    pub name: String,
335    /// Flow description
336    pub description: String,
337    /// Steps in the flow
338    #[serde(default)]
339    pub steps: Vec<String>,
340}
341
342/// Alias for API requirement (for backwards compatibility)
343pub type ApiRequirement = ParsedCommand;