1use crate::intelligent_behavior::{
7 config::IntelligentBehaviorConfig, llm_client::LlmClient, types::LlmGenerationRequest,
8};
9use crate::Result;
10use serde::{Deserialize, Serialize};
11use std::collections::HashMap;
12
13pub struct VoiceCommandParser {
15 llm_client: LlmClient,
17 config: IntelligentBehaviorConfig,
19}
20
21impl VoiceCommandParser {
22 pub fn new(config: IntelligentBehaviorConfig) -> Self {
24 let behavior_model = config.behavior_model.clone();
25 let llm_client = LlmClient::new(behavior_model);
26
27 Self { llm_client, config }
28 }
29
30 pub async fn parse_command(&self, command: &str) -> Result<ParsedCommand> {
39 let system_prompt = r#"You are an expert API designer. Your task is to parse natural language commands
41that describe API requirements and extract structured information.
42
43Extract the following information from the command:
441. API type/category (e.g., e-commerce, social media, blog, todo app)
452. Endpoints with HTTP methods (GET, POST, PUT, DELETE, PATCH)
463. Data models with fields and types
474. Relationships between models
485. Sample data counts (e.g., "20 products")
496. Business flows (e.g., checkout, authentication, user registration)
50
51Return your response as a JSON object with this structure:
52{
53 "api_type": "string (e.g., e-commerce, social-media, blog)",
54 "title": "string (API title)",
55 "description": "string (API description)",
56 "endpoints": [
57 {
58 "path": "string (e.g., /api/products)",
59 "method": "string (GET, POST, PUT, DELETE, PATCH)",
60 "description": "string",
61 "request_body": {
62 "schema": "object schema if applicable",
63 "required": ["array of required fields"]
64 },
65 "response": {
66 "status": 200,
67 "schema": "object schema",
68 "is_array": false,
69 "count": null or number if specified
70 }
71 }
72 ],
73 "models": [
74 {
75 "name": "string (e.g., Product)",
76 "fields": [
77 {
78 "name": "string",
79 "type": "string (string, number, integer, boolean, array, object)",
80 "description": "string",
81 "required": true
82 }
83 ]
84 }
85 ],
86 "relationships": [
87 {
88 "from": "string (model name)",
89 "to": "string (model name)",
90 "type": "string (one-to-many, many-to-many, one-to-one)"
91 }
92 ],
93 "sample_counts": {
94 "model_name": number
95 },
96 "flows": [
97 {
98 "name": "string (e.g., checkout)",
99 "description": "string",
100 "steps": ["array of step descriptions"]
101 }
102 ]
103}
104
105Be specific and extract all details mentioned in the command. If something is not mentioned,
106don't include it in the response."#;
107
108 let user_prompt =
110 format!("Parse this API creation command and extract all requirements:\n\n{}", command);
111
112 let llm_request = LlmGenerationRequest {
114 system_prompt: system_prompt.to_string(),
115 user_prompt,
116 temperature: 0.3, max_tokens: 2000,
118 schema: None,
119 };
120
121 let response = self.llm_client.generate(&llm_request).await?;
123
124 let response_str = serde_json::to_string(&response).unwrap_or_default();
126 let parsed: ParsedCommand = serde_json::from_value(response).map_err(|e| {
127 crate::Error::generic(format!(
128 "Failed to parse LLM response as ParsedCommand: {}. Response: {}",
129 e, response_str
130 ))
131 })?;
132
133 Ok(parsed)
134 }
135
136 pub async fn parse_conversational_command(
141 &self,
142 command: &str,
143 context: &super::conversation::ConversationContext,
144 ) -> Result<ParsedCommand> {
145 let system_prompt = r#"You are an expert API designer helping to build an API through conversation.
147The user is providing incremental commands to modify or extend an existing API specification.
148
149Extract the following information from the command:
1501. What is being added/modified (endpoints, models, flows)
1512. Details about the addition/modification
1523. Any relationships or dependencies
153
154Return your response as a JSON object with the same structure as parse_command, but focus only
155on what is NEW or MODIFIED. If the command is asking to add something, include it. If it's asking
156to modify something, include the modified version.
157
158If the command is asking a question or requesting confirmation, return an empty endpoints array
159and include a "question" or "confirmation" field in the response."#;
160
161 let context_summary = format!(
163 "Current API: {}\nExisting endpoints: {}\nExisting models: {}",
164 context.current_spec.as_ref().map(|s| s.title()).unwrap_or("None"),
165 context
166 .current_spec
167 .as_ref()
168 .map(|s| {
169 s.all_paths_and_operations()
170 .iter()
171 .map(|(path, ops)| {
172 format!(
173 "{} ({})",
174 path,
175 ops.keys().map(|s| s.as_str()).collect::<Vec<_>>().join(", ")
176 )
177 })
178 .collect::<Vec<_>>()
179 .join(", ")
180 })
181 .unwrap_or_else(|| "None".to_string()),
182 context
183 .current_spec
184 .as_ref()
185 .and_then(|s| s.spec.components.as_ref())
186 .map(|c| c.schemas.keys().cloned().collect::<Vec<_>>().join(", "))
187 .unwrap_or_else(|| "None".to_string())
188 );
189
190 let user_prompt = format!("Context:\n{}\n\nNew command:\n{}", context_summary, command);
192
193 let llm_request = LlmGenerationRequest {
195 system_prompt: system_prompt.to_string(),
196 user_prompt,
197 temperature: 0.3,
198 max_tokens: 2000,
199 schema: None,
200 };
201
202 let response = self.llm_client.generate(&llm_request).await?;
204
205 let response_str = serde_json::to_string(&response).unwrap_or_default();
207 let parsed: ParsedCommand = serde_json::from_value(response).map_err(|e| {
208 crate::Error::generic(format!(
209 "Failed to parse conversational LLM response: {}. Response: {}",
210 e, response_str
211 ))
212 })?;
213
214 Ok(parsed)
215 }
216
217 pub async fn parse_workspace_scenario_command(
222 &self,
223 command: &str,
224 ) -> Result<ParsedWorkspaceScenario> {
225 let system_prompt = r#"You are an expert at parsing natural language descriptions of workspace scenarios
227and extracting structured information for creating complete mock environments.
228
229Extract the following information from the command:
2301. Domain/industry (e.g., bank, e-commerce, healthcare, etc.)
2312. Chaos/failure characteristics (flaky rates, slow KYC, high latency, etc.)
2323. Initial data requirements (number of users, disputes, orders, etc.)
2334. API endpoints needed for the domain
2345. Behavioral rules (failure rates, latency patterns, etc.)
2356. Data models and relationships
236
237Return your response as a JSON object with this structure:
238{
239 "domain": "string (e.g., bank, e-commerce, healthcare)",
240 "title": "string (workspace title)",
241 "description": "string (workspace description)",
242 "chaos_characteristics": [
243 {
244 "type": "string (latency|failure|rate_limit|etc.)",
245 "description": "string (e.g., flaky foreign exchange rates)",
246 "config": {
247 "probability": 0.0-1.0,
248 "delay_ms": number,
249 "error_rate": 0.0-1.0,
250 "error_codes": [500, 502, 503],
251 "details": "additional configuration details"
252 }
253 }
254 ],
255 "initial_data": {
256 "users": number,
257 "disputes": number,
258 "orders": number,
259 "custom": {
260 "entity_name": number
261 }
262 },
263 "api_requirements": {
264 "endpoints": [
265 {
266 "path": "string",
267 "method": "string",
268 "description": "string"
269 }
270 ],
271 "models": [
272 {
273 "name": "string",
274 "fields": [
275 {
276 "name": "string",
277 "type": "string"
278 }
279 ]
280 }
281 ]
282 },
283 "behavioral_rules": [
284 {
285 "description": "string",
286 "type": "string",
287 "config": {}
288 }
289 ]
290}
291
292Be specific and extract all details mentioned in the command."#;
293
294 let user_prompt = format!(
296 "Parse this workspace scenario description and extract all requirements:\n\n{}",
297 command
298 );
299
300 let llm_request = LlmGenerationRequest {
302 system_prompt: system_prompt.to_string(),
303 user_prompt,
304 temperature: 0.3,
305 max_tokens: 3000,
306 schema: None,
307 };
308
309 let response = self.llm_client.generate(&llm_request).await?;
311
312 let response_str = serde_json::to_string(&response).unwrap_or_default();
314 let parsed: ParsedWorkspaceScenario = serde_json::from_value(response).map_err(|e| {
315 crate::Error::generic(format!(
316 "Failed to parse LLM response as ParsedWorkspaceScenario: {}. Response: {}",
317 e, response_str
318 ))
319 })?;
320
321 Ok(parsed)
322 }
323
324 pub async fn parse_workspace_creation_command(
334 &self,
335 command: &str,
336 ) -> Result<ParsedWorkspaceCreation> {
337 let system_prompt = r#"You are an expert at parsing natural language descriptions of workspace creation
339and extracting structured information for creating complete mock backends with personas, scenarios, and configuration.
340
341Extract the following information from the command:
3421. Workspace name and description
3432. Entities (customers, orders, payments, products, etc.)
3443. Personas with their traits and relationships (e.g., customer owns orders)
3454. Behavioral scenarios:
346 - Happy path scenarios (successful flows)
347 - Failure path scenarios (error cases)
348 - Slow path scenarios (latency/performance issues)
3495. Reality continuum preferences (e.g., "80% mock, 20% real prod for catalog only")
3506. Drift budget preferences (e.g., "strict drift budget", "moderate tolerance")
351
352Return your response as a JSON object with this structure:
353{
354 "workspace_name": "string (e.g., e-commerce-workspace)",
355 "workspace_description": "string",
356 "entities": [
357 {
358 "name": "string (e.g., Customer, Order, Payment)",
359 "description": "string",
360 "endpoints": [
361 {
362 "path": "string",
363 "method": "string",
364 "description": "string"
365 }
366 ],
367 "fields": [
368 {
369 "name": "string",
370 "type": "string",
371 "description": "string"
372 }
373 ]
374 }
375 ],
376 "personas": [
377 {
378 "name": "string (e.g., premium-customer, regular-customer)",
379 "description": "string",
380 "traits": {
381 "trait_name": "trait_value"
382 },
383 "relationships": [
384 {
385 "type": "string (e.g., owns, belongs_to, has)",
386 "target_entity": "string (e.g., Order, Payment)"
387 }
388 ]
389 }
390 ],
391 "scenarios": [
392 {
393 "name": "string (e.g., happy-path-checkout, failed-payment, slow-shipping)",
394 "type": "string (happy_path|failure|slow_path)",
395 "description": "string",
396 "steps": [
397 {
398 "description": "string (e.g., Create order, Process payment)",
399 "endpoint": "string (e.g., POST /api/orders)",
400 "expected_outcome": "string"
401 }
402 ]
403 }
404 ],
405 "reality_continuum": {
406 "default_ratio": 0.0-1.0 (0.0 = 100% mock, 1.0 = 100% real),
407 "route_rules": [
408 {
409 "pattern": "string (e.g., /api/catalog/*)",
410 "ratio": 0.0-1.0,
411 "description": "string"
412 }
413 ],
414 "transition_mode": "string (manual|time_based|scheduled)"
415 },
416 "drift_budget": {
417 "strictness": "string (strict|moderate|lenient)",
418 "max_breaking_changes": number,
419 "max_non_breaking_changes": number,
420 "description": "string"
421 }
422}
423
424Be specific and extract all details mentioned in the command. Ensure at least 2-3 endpoints per entity,
4252-3 personas with relationships, and 2-3 behavioral scenarios."#;
426
427 let user_prompt = format!(
429 "Parse this workspace creation command and extract all requirements:\n\n{}",
430 command
431 );
432
433 let llm_request = LlmGenerationRequest {
435 system_prompt: system_prompt.to_string(),
436 user_prompt,
437 temperature: 0.3,
438 max_tokens: 4000,
439 schema: None,
440 };
441
442 let response = self.llm_client.generate(&llm_request).await?;
444
445 let response_str = serde_json::to_string(&response).unwrap_or_default();
447 let parsed: ParsedWorkspaceCreation = serde_json::from_value(response).map_err(|e| {
448 crate::Error::generic(format!(
449 "Failed to parse LLM response as ParsedWorkspaceCreation: {}. Response: {}",
450 e, response_str
451 ))
452 })?;
453
454 Ok(parsed)
455 }
456
457 pub async fn parse_reality_continuum_command(
462 &self,
463 command: &str,
464 ) -> Result<ParsedRealityContinuum> {
465 let system_prompt = r#"You are an expert at parsing natural language descriptions of reality continuum
467configuration and extracting structured blend ratio settings.
468
469Extract the following information from the command:
4701. Default blend ratio (e.g., "80% mock, 20% real" means ratio 0.2)
4712. Route-specific rules (e.g., "catalog only", "for /api/products/*")
4723. Transition mode preferences (manual, time-based, scheduled)
473
474Return your response as a JSON object with this structure:
475{
476 "default_ratio": 0.0-1.0 (0.0 = 100% mock, 1.0 = 100% real),
477 "enabled": true/false,
478 "route_rules": [
479 {
480 "pattern": "string (e.g., /api/catalog/*, /api/products/*)",
481 "ratio": 0.0-1.0,
482 "description": "string"
483 }
484 ],
485 "transition_mode": "string (manual|time_based|scheduled)",
486 "merge_strategy": "string (field_level|weighted|body_blend)"
487}
488
489Examples:
490- "80% mock, 20% real" → default_ratio: 0.2
491- "Make catalog 50% real" → route_rules: [{pattern: "/api/catalog/*", ratio: 0.5}]
492- "100% mock for now" → default_ratio: 0.0, enabled: true"#;
493
494 let user_prompt =
496 format!("Parse this reality continuum configuration command:\n\n{}", command);
497
498 let llm_request = LlmGenerationRequest {
500 system_prompt: system_prompt.to_string(),
501 user_prompt,
502 temperature: 0.3,
503 max_tokens: 2000,
504 schema: None,
505 };
506
507 let response = self.llm_client.generate(&llm_request).await?;
509
510 let response_str = serde_json::to_string(&response).unwrap_or_default();
512 let parsed: ParsedRealityContinuum = serde_json::from_value(response).map_err(|e| {
513 crate::Error::generic(format!(
514 "Failed to parse LLM response as ParsedRealityContinuum: {}. Response: {}",
515 e, response_str
516 ))
517 })?;
518
519 Ok(parsed)
520 }
521
522 pub async fn parse_drift_budget_command(&self, command: &str) -> Result<ParsedDriftBudget> {
527 let system_prompt = r#"You are an expert at parsing natural language descriptions of drift budget
529configuration and extracting structured budget settings.
530
531Extract the following information from the command:
5321. Strictness level (strict, moderate, lenient)
5332. Breaking change tolerance
5343. Non-breaking change tolerance
5354. Per-service/endpoint preferences
536
537Return your response as a JSON object with this structure:
538{
539 "strictness": "string (strict|moderate|lenient)",
540 "enabled": true/false,
541 "max_breaking_changes": number (0 for strict, higher for lenient),
542 "max_non_breaking_changes": number,
543 "max_field_churn_percent": number (0.0-100.0, optional),
544 "time_window_days": number (optional, for percentage-based budgets),
545 "per_service_budgets": {
546 "service_name": {
547 "max_breaking_changes": number,
548 "max_non_breaking_changes": number
549 }
550 },
551 "description": "string"
552}
553
554Examples:
555- "strict drift budget" → strictness: "strict", max_breaking_changes: 0, max_non_breaking_changes: 5
556- "moderate tolerance" → strictness: "moderate", max_breaking_changes: 1, max_non_breaking_changes: 10
557- "lenient, allow up to 5 breaking changes" → strictness: "lenient", max_breaking_changes: 5"#;
558
559 let user_prompt = format!("Parse this drift budget configuration command:\n\n{}", command);
561
562 let llm_request = LlmGenerationRequest {
564 system_prompt: system_prompt.to_string(),
565 user_prompt,
566 temperature: 0.3,
567 max_tokens: 2000,
568 schema: None,
569 };
570
571 let response = self.llm_client.generate(&llm_request).await?;
573
574 let response_str = serde_json::to_string(&response).unwrap_or_default();
576 let parsed: ParsedDriftBudget = serde_json::from_value(response).map_err(|e| {
577 crate::Error::generic(format!(
578 "Failed to parse LLM response as ParsedDriftBudget: {}. Response: {}",
579 e, response_str
580 ))
581 })?;
582
583 Ok(parsed)
584 }
585}
586
587#[derive(Debug, Clone, Serialize, Deserialize)]
589pub struct ParsedCommand {
590 pub api_type: String,
592 pub title: String,
594 pub description: String,
596 pub endpoints: Vec<EndpointRequirement>,
598 pub models: Vec<ModelRequirement>,
600 #[serde(default)]
602 pub relationships: Vec<RelationshipRequirement>,
603 #[serde(default)]
605 pub sample_counts: HashMap<String, usize>,
606 #[serde(default)]
608 pub flows: Vec<FlowRequirement>,
609}
610
611#[derive(Debug, Clone, Serialize, Deserialize)]
613pub struct EndpointRequirement {
614 pub path: String,
616 pub method: String,
618 pub description: String,
620 #[serde(default)]
622 pub request_body: Option<RequestBodyRequirement>,
623 #[serde(default)]
625 pub response: Option<ResponseRequirement>,
626}
627
628#[derive(Debug, Clone, Serialize, Deserialize)]
630pub struct RequestBodyRequirement {
631 #[serde(default)]
633 pub schema: Option<serde_json::Value>,
634 #[serde(default)]
636 pub required: Vec<String>,
637}
638
639#[derive(Debug, Clone, Serialize, Deserialize)]
641pub struct ResponseRequirement {
642 #[serde(default = "default_status")]
644 pub status: u16,
645 #[serde(default)]
647 pub schema: Option<serde_json::Value>,
648 #[serde(default)]
650 pub is_array: bool,
651 #[serde(default)]
653 pub count: Option<usize>,
654}
655
656fn default_status() -> u16 {
657 200
658}
659
660#[derive(Debug, Clone, Serialize, Deserialize)]
662pub struct ModelRequirement {
663 pub name: String,
665 pub fields: Vec<FieldRequirement>,
667}
668
669#[derive(Debug, Clone, Serialize, Deserialize)]
671pub struct FieldRequirement {
672 pub name: String,
674 pub r#type: String,
676 #[serde(default)]
678 pub description: String,
679 #[serde(default = "default_true")]
681 pub required: bool,
682}
683
684fn default_true() -> bool {
685 true
686}
687
688#[derive(Debug, Clone, Serialize, Deserialize)]
690pub struct RelationshipRequirement {
691 pub from: String,
693 pub to: String,
695 pub r#type: String,
697}
698
699#[derive(Debug, Clone, Serialize, Deserialize)]
701pub struct FlowRequirement {
702 pub name: String,
704 pub description: String,
706 #[serde(default)]
708 pub steps: Vec<String>,
709}
710
711pub type ApiRequirement = ParsedCommand;
713
714#[derive(Debug, Clone, Serialize, Deserialize)]
716pub struct ParsedWorkspaceScenario {
717 pub domain: String,
719 pub title: String,
721 pub description: String,
723 #[serde(default)]
725 pub chaos_characteristics: Vec<ChaosCharacteristic>,
726 #[serde(default)]
728 pub initial_data: InitialDataRequirements,
729 #[serde(default)]
731 pub api_requirements: ApiRequirements,
732 #[serde(default)]
734 pub behavioral_rules: Vec<BehavioralRule>,
735}
736
737#[derive(Debug, Clone, Serialize, Deserialize)]
739pub struct ChaosCharacteristic {
740 pub r#type: String,
742 pub description: String,
744 #[serde(default)]
746 pub config: serde_json::Value,
747}
748
749#[derive(Debug, Clone, Serialize, Deserialize, Default)]
751pub struct InitialDataRequirements {
752 #[serde(default)]
754 pub users: Option<usize>,
755 #[serde(default)]
757 pub disputes: Option<usize>,
758 #[serde(default)]
760 pub orders: Option<usize>,
761 #[serde(default)]
763 pub custom: HashMap<String, usize>,
764}
765
766#[derive(Debug, Clone, Serialize, Deserialize, Default)]
768pub struct ApiRequirements {
769 #[serde(default)]
771 pub endpoints: Vec<EndpointRequirement>,
772 #[serde(default)]
774 pub models: Vec<ModelRequirement>,
775}
776
777#[derive(Debug, Clone, Serialize, Deserialize)]
779pub struct BehavioralRule {
780 pub description: String,
782 pub r#type: String,
784 #[serde(default)]
786 pub config: serde_json::Value,
787}
788
789#[derive(Debug, Clone, Serialize, Deserialize)]
791pub struct ParsedWorkspaceCreation {
792 pub workspace_name: String,
794 pub workspace_description: String,
796 #[serde(default)]
798 pub entities: Vec<EntityRequirement>,
799 #[serde(default)]
801 pub personas: Vec<PersonaRequirement>,
802 #[serde(default)]
804 pub scenarios: Vec<ScenarioRequirement>,
805 #[serde(default)]
807 pub reality_continuum: Option<ParsedRealityContinuum>,
808 #[serde(default)]
810 pub drift_budget: Option<ParsedDriftBudget>,
811}
812
813#[derive(Debug, Clone, Serialize, Deserialize)]
815pub struct EntityRequirement {
816 pub name: String,
818 pub description: String,
820 #[serde(default)]
822 pub endpoints: Vec<EntityEndpointRequirement>,
823 #[serde(default)]
825 pub fields: Vec<FieldRequirement>,
826}
827
828#[derive(Debug, Clone, Serialize, Deserialize)]
830pub struct EntityEndpointRequirement {
831 pub path: String,
833 pub method: String,
835 pub description: String,
837}
838
839#[derive(Debug, Clone, Serialize, Deserialize)]
841pub struct PersonaRequirement {
842 pub name: String,
844 pub description: String,
846 #[serde(default)]
848 pub traits: HashMap<String, String>,
849 #[serde(default)]
851 pub relationships: Vec<PersonaRelationship>,
852}
853
854#[derive(Debug, Clone, Serialize, Deserialize)]
856pub struct PersonaRelationship {
857 pub r#type: String,
859 pub target_entity: String,
861}
862
863#[derive(Debug, Clone, Serialize, Deserialize)]
865pub struct ScenarioRequirement {
866 pub name: String,
868 pub r#type: String,
870 pub description: String,
872 #[serde(default)]
874 pub steps: Vec<ScenarioStepRequirement>,
875}
876
877#[derive(Debug, Clone, Serialize, Deserialize)]
879pub struct ScenarioStepRequirement {
880 pub description: String,
882 pub endpoint: String,
884 pub expected_outcome: String,
886}
887
888#[derive(Debug, Clone, Serialize, Deserialize)]
890pub struct ParsedRealityContinuum {
891 #[serde(default = "default_blend_ratio")]
893 pub default_ratio: f64,
894 #[serde(default = "default_true")]
896 pub enabled: bool,
897 #[serde(default)]
899 pub route_rules: Vec<ParsedContinuumRule>,
900 #[serde(default)]
902 pub transition_mode: String,
903 #[serde(default)]
905 pub merge_strategy: String,
906}
907
908fn default_blend_ratio() -> f64 {
909 0.0
910}
911
912#[derive(Debug, Clone, Serialize, Deserialize)]
914pub struct ParsedContinuumRule {
915 pub pattern: String,
917 pub ratio: f64,
919 #[serde(default)]
921 pub description: String,
922}
923
924#[derive(Debug, Clone, Serialize, Deserialize)]
926pub struct ParsedDriftBudget {
927 pub strictness: String,
929 #[serde(default = "default_true")]
931 pub enabled: bool,
932 #[serde(default)]
934 pub max_breaking_changes: u32,
935 #[serde(default)]
937 pub max_non_breaking_changes: u32,
938 #[serde(default, skip_serializing_if = "Option::is_none")]
940 pub max_field_churn_percent: Option<f64>,
941 #[serde(default, skip_serializing_if = "Option::is_none")]
943 pub time_window_days: Option<u32>,
944 #[serde(default)]
946 pub per_service_budgets: HashMap<String, ParsedServiceBudget>,
947 #[serde(default)]
949 pub description: String,
950}
951
952#[derive(Debug, Clone, Serialize, Deserialize)]
954pub struct ParsedServiceBudget {
955 #[serde(default)]
957 pub max_breaking_changes: u32,
958 #[serde(default)]
960 pub max_non_breaking_changes: u32,
961}