mockforge_core/voice/
command_parser.rs

1//! LLM-based command parser for voice commands
2//!
3//! This module parses natural language voice commands and extracts API requirements
4//! using MockForge's LLM infrastructure.
5
6use crate::intelligent_behavior::{
7    config::IntelligentBehaviorConfig, llm_client::LlmClient, types::LlmGenerationRequest,
8};
9use crate::Result;
10use serde::{Deserialize, Serialize};
11use std::collections::HashMap;
12
13/// Voice command parser that uses LLM to interpret natural language commands
14pub struct VoiceCommandParser {
15    /// LLM client for parsing commands
16    llm_client: LlmClient,
17    /// Configuration
18    config: IntelligentBehaviorConfig,
19}
20
21impl VoiceCommandParser {
22    /// Create a new voice command parser
23    pub fn new(config: IntelligentBehaviorConfig) -> Self {
24        let behavior_model = config.behavior_model.clone();
25        let llm_client = LlmClient::new(behavior_model);
26
27        Self { llm_client, config }
28    }
29
30    /// Parse a natural language command into structured API requirements
31    ///
32    /// This method uses the LLM to extract:
33    /// - API type (e-commerce, social media, etc.)
34    /// - Endpoints and HTTP methods
35    /// - Data models and relationships
36    /// - Sample data counts
37    /// - Business flows (checkout, auth, etc.)
38    pub async fn parse_command(&self, command: &str) -> Result<ParsedCommand> {
39        // Build system prompt for command parsing
40        let system_prompt = r#"You are an expert API designer. Your task is to parse natural language commands
41that describe API requirements and extract structured information.
42
43Extract the following information from the command:
441. API type/category (e.g., e-commerce, social media, blog, todo app)
452. Endpoints with HTTP methods (GET, POST, PUT, DELETE, PATCH)
463. Data models with fields and types
474. Relationships between models
485. Sample data counts (e.g., "20 products")
496. Business flows (e.g., checkout, authentication, user registration)
50
51Return your response as a JSON object with this structure:
52{
53  "api_type": "string (e.g., e-commerce, social-media, blog)",
54  "title": "string (API title)",
55  "description": "string (API description)",
56  "endpoints": [
57    {
58      "path": "string (e.g., /api/products)",
59      "method": "string (GET, POST, PUT, DELETE, PATCH)",
60      "description": "string",
61      "request_body": {
62        "schema": "object schema if applicable",
63        "required": ["array of required fields"]
64      },
65      "response": {
66        "status": 200,
67        "schema": "object schema",
68        "is_array": false,
69        "count": null or number if specified
70      }
71    }
72  ],
73  "models": [
74    {
75      "name": "string (e.g., Product)",
76      "fields": [
77        {
78          "name": "string",
79          "type": "string (string, number, integer, boolean, array, object)",
80          "description": "string",
81          "required": true
82        }
83      ]
84    }
85  ],
86  "relationships": [
87    {
88      "from": "string (model name)",
89      "to": "string (model name)",
90      "type": "string (one-to-many, many-to-many, one-to-one)"
91    }
92  ],
93  "sample_counts": {
94    "model_name": number
95  },
96  "flows": [
97    {
98      "name": "string (e.g., checkout)",
99      "description": "string",
100      "steps": ["array of step descriptions"]
101    }
102  ]
103}
104
105Be specific and extract all details mentioned in the command. If something is not mentioned,
106don't include it in the response."#;
107
108        // Build user prompt with the command
109        let user_prompt =
110            format!("Parse this API creation command and extract all requirements:\n\n{}", command);
111
112        // Create LLM request
113        let llm_request = LlmGenerationRequest {
114            system_prompt: system_prompt.to_string(),
115            user_prompt,
116            temperature: 0.3, // Lower temperature for more consistent parsing
117            max_tokens: 2000,
118            schema: None,
119        };
120
121        // Generate response from LLM
122        let response = self.llm_client.generate(&llm_request).await?;
123
124        // Parse the response into ParsedCommand
125        let response_str = serde_json::to_string(&response).unwrap_or_default();
126        let parsed: ParsedCommand = serde_json::from_value(response).map_err(|e| {
127            crate::Error::generic(format!(
128                "Failed to parse LLM response as ParsedCommand: {}. Response: {}",
129                e, response_str
130            ))
131        })?;
132
133        Ok(parsed)
134    }
135
136    /// Parse a conversational command (for multi-turn interactions)
137    ///
138    /// This method parses commands that modify or extend an existing API specification.
139    /// It takes the current conversation context into account.
140    pub async fn parse_conversational_command(
141        &self,
142        command: &str,
143        context: &super::conversation::ConversationContext,
144    ) -> Result<ParsedCommand> {
145        // Build system prompt for conversational parsing
146        let system_prompt = r#"You are an expert API designer helping to build an API through conversation.
147The user is providing incremental commands to modify or extend an existing API specification.
148
149Extract the following information from the command:
1501. What is being added/modified (endpoints, models, flows)
1512. Details about the addition/modification
1523. Any relationships or dependencies
153
154Return your response as a JSON object with the same structure as parse_command, but focus only
155on what is NEW or MODIFIED. If the command is asking to add something, include it. If it's asking
156to modify something, include the modified version.
157
158If the command is asking a question or requesting confirmation, return an empty endpoints array
159and include a "question" or "confirmation" field in the response."#;
160
161        // Build context summary
162        let context_summary = format!(
163            "Current API: {}\nExisting endpoints: {}\nExisting models: {}",
164            context.current_spec.as_ref().map(|s| s.title()).unwrap_or("None"),
165            context
166                .current_spec
167                .as_ref()
168                .map(|s| {
169                    s.all_paths_and_operations()
170                        .iter()
171                        .map(|(path, ops)| {
172                            format!(
173                                "{} ({})",
174                                path,
175                                ops.keys().map(|s| s.as_str()).collect::<Vec<_>>().join(", ")
176                            )
177                        })
178                        .collect::<Vec<_>>()
179                        .join(", ")
180                })
181                .unwrap_or_else(|| "None".to_string()),
182            context
183                .current_spec
184                .as_ref()
185                .and_then(|s| s.spec.components.as_ref())
186                .map(|c| c.schemas.keys().cloned().collect::<Vec<_>>().join(", "))
187                .unwrap_or_else(|| "None".to_string())
188        );
189
190        // Build user prompt
191        let user_prompt = format!("Context:\n{}\n\nNew command:\n{}", context_summary, command);
192
193        // Create LLM request
194        let llm_request = LlmGenerationRequest {
195            system_prompt: system_prompt.to_string(),
196            user_prompt,
197            temperature: 0.3,
198            max_tokens: 2000,
199            schema: None,
200        };
201
202        // Generate response
203        let response = self.llm_client.generate(&llm_request).await?;
204
205        // Parse response
206        let response_str = serde_json::to_string(&response).unwrap_or_default();
207        let parsed: ParsedCommand = serde_json::from_value(response).map_err(|e| {
208            crate::Error::generic(format!(
209                "Failed to parse conversational LLM response: {}. Response: {}",
210                e, response_str
211            ))
212        })?;
213
214        Ok(parsed)
215    }
216
217    /// Parse a workspace scenario description
218    ///
219    /// This method extracts information about creating a complete workspace scenario,
220    /// including domain, chaos characteristics, initial data, and API requirements.
221    pub async fn parse_workspace_scenario_command(
222        &self,
223        command: &str,
224    ) -> Result<ParsedWorkspaceScenario> {
225        // Build system prompt for workspace scenario parsing
226        let system_prompt = r#"You are an expert at parsing natural language descriptions of workspace scenarios
227and extracting structured information for creating complete mock environments.
228
229Extract the following information from the command:
2301. Domain/industry (e.g., bank, e-commerce, healthcare, etc.)
2312. Chaos/failure characteristics (flaky rates, slow KYC, high latency, etc.)
2323. Initial data requirements (number of users, disputes, orders, etc.)
2334. API endpoints needed for the domain
2345. Behavioral rules (failure rates, latency patterns, etc.)
2356. Data models and relationships
236
237Return your response as a JSON object with this structure:
238{
239  "domain": "string (e.g., bank, e-commerce, healthcare)",
240  "title": "string (workspace title)",
241  "description": "string (workspace description)",
242  "chaos_characteristics": [
243    {
244      "type": "string (latency|failure|rate_limit|etc.)",
245      "description": "string (e.g., flaky foreign exchange rates)",
246      "config": {
247        "probability": 0.0-1.0,
248        "delay_ms": number,
249        "error_rate": 0.0-1.0,
250        "error_codes": [500, 502, 503],
251        "details": "additional configuration details"
252      }
253    }
254  ],
255  "initial_data": {
256    "users": number,
257    "disputes": number,
258    "orders": number,
259    "custom": {
260      "entity_name": number
261    }
262  },
263  "api_requirements": {
264    "endpoints": [
265      {
266        "path": "string",
267        "method": "string",
268        "description": "string"
269      }
270    ],
271    "models": [
272      {
273        "name": "string",
274        "fields": [
275          {
276            "name": "string",
277            "type": "string"
278          }
279        ]
280      }
281    ]
282  },
283  "behavioral_rules": [
284    {
285      "description": "string",
286      "type": "string",
287      "config": {}
288    }
289  ]
290}
291
292Be specific and extract all details mentioned in the command."#;
293
294        // Build user prompt with the command
295        let user_prompt = format!(
296            "Parse this workspace scenario description and extract all requirements:\n\n{}",
297            command
298        );
299
300        // Create LLM request
301        let llm_request = LlmGenerationRequest {
302            system_prompt: system_prompt.to_string(),
303            user_prompt,
304            temperature: 0.3,
305            max_tokens: 3000,
306            schema: None,
307        };
308
309        // Generate response from LLM
310        let response = self.llm_client.generate(&llm_request).await?;
311
312        // Parse the response into ParsedWorkspaceScenario
313        let response_str = serde_json::to_string(&response).unwrap_or_default();
314        let parsed: ParsedWorkspaceScenario = serde_json::from_value(response).map_err(|e| {
315            crate::Error::generic(format!(
316                "Failed to parse LLM response as ParsedWorkspaceScenario: {}. Response: {}",
317                e, response_str
318            ))
319        })?;
320
321        Ok(parsed)
322    }
323
324    /// Parse a workspace creation command
325    ///
326    /// This method extracts information about creating a complete workspace including:
327    /// - Workspace name and description
328    /// - Entities (customers, orders, payments, etc.)
329    /// - Personas with relationships
330    /// - Behavioral scenarios (happy path, failure, slow path)
331    /// - Reality continuum preferences
332    /// - Drift budget preferences
333    pub async fn parse_workspace_creation_command(
334        &self,
335        command: &str,
336    ) -> Result<ParsedWorkspaceCreation> {
337        // Build system prompt for workspace creation parsing
338        let system_prompt = r#"You are an expert at parsing natural language descriptions of workspace creation
339and extracting structured information for creating complete mock backends with personas, scenarios, and configuration.
340
341Extract the following information from the command:
3421. Workspace name and description
3432. Entities (customers, orders, payments, products, etc.)
3443. Personas with their traits and relationships (e.g., customer owns orders)
3454. Behavioral scenarios:
346   - Happy path scenarios (successful flows)
347   - Failure path scenarios (error cases)
348   - Slow path scenarios (latency/performance issues)
3495. Reality continuum preferences (e.g., "80% mock, 20% real prod for catalog only")
3506. Drift budget preferences (e.g., "strict drift budget", "moderate tolerance")
351
352Return your response as a JSON object with this structure:
353{
354  "workspace_name": "string (e.g., e-commerce-workspace)",
355  "workspace_description": "string",
356  "entities": [
357    {
358      "name": "string (e.g., Customer, Order, Payment)",
359      "description": "string",
360      "endpoints": [
361        {
362          "path": "string",
363          "method": "string",
364          "description": "string"
365        }
366      ],
367      "fields": [
368        {
369          "name": "string",
370          "type": "string",
371          "description": "string"
372        }
373      ]
374    }
375  ],
376  "personas": [
377    {
378      "name": "string (e.g., premium-customer, regular-customer)",
379      "description": "string",
380      "traits": {
381        "trait_name": "trait_value"
382      },
383      "relationships": [
384        {
385          "type": "string (e.g., owns, belongs_to, has)",
386          "target_entity": "string (e.g., Order, Payment)"
387        }
388      ]
389    }
390  ],
391  "scenarios": [
392    {
393      "name": "string (e.g., happy-path-checkout, failed-payment, slow-shipping)",
394      "type": "string (happy_path|failure|slow_path)",
395      "description": "string",
396      "steps": [
397        {
398          "description": "string (e.g., Create order, Process payment)",
399          "endpoint": "string (e.g., POST /api/orders)",
400          "expected_outcome": "string"
401        }
402      ]
403    }
404  ],
405  "reality_continuum": {
406    "default_ratio": 0.0-1.0 (0.0 = 100% mock, 1.0 = 100% real),
407    "route_rules": [
408      {
409        "pattern": "string (e.g., /api/catalog/*)",
410        "ratio": 0.0-1.0,
411        "description": "string"
412      }
413    ],
414    "transition_mode": "string (manual|time_based|scheduled)"
415  },
416  "drift_budget": {
417    "strictness": "string (strict|moderate|lenient)",
418    "max_breaking_changes": number,
419    "max_non_breaking_changes": number,
420    "description": "string"
421  }
422}
423
424Be specific and extract all details mentioned in the command. Ensure at least 2-3 endpoints per entity,
4252-3 personas with relationships, and 2-3 behavioral scenarios."#;
426
427        // Build user prompt with the command
428        let user_prompt = format!(
429            "Parse this workspace creation command and extract all requirements:\n\n{}",
430            command
431        );
432
433        // Create LLM request
434        let llm_request = LlmGenerationRequest {
435            system_prompt: system_prompt.to_string(),
436            user_prompt,
437            temperature: 0.3,
438            max_tokens: 4000,
439            schema: None,
440        };
441
442        // Generate response from LLM
443        let response = self.llm_client.generate(&llm_request).await?;
444
445        // Parse the response into ParsedWorkspaceCreation
446        let response_str = serde_json::to_string(&response).unwrap_or_default();
447        let parsed: ParsedWorkspaceCreation = serde_json::from_value(response).map_err(|e| {
448            crate::Error::generic(format!(
449                "Failed to parse LLM response as ParsedWorkspaceCreation: {}. Response: {}",
450                e, response_str
451            ))
452        })?;
453
454        Ok(parsed)
455    }
456
457    /// Parse a reality continuum configuration command
458    ///
459    /// This method extracts reality continuum preferences from natural language,
460    /// such as "80% mock, 20% real prod for catalog only".
461    pub async fn parse_reality_continuum_command(
462        &self,
463        command: &str,
464    ) -> Result<ParsedRealityContinuum> {
465        // Build system prompt for reality continuum parsing
466        let system_prompt = r#"You are an expert at parsing natural language descriptions of reality continuum
467configuration and extracting structured blend ratio settings.
468
469Extract the following information from the command:
4701. Default blend ratio (e.g., "80% mock, 20% real" means ratio 0.2)
4712. Route-specific rules (e.g., "catalog only", "for /api/products/*")
4723. Transition mode preferences (manual, time-based, scheduled)
473
474Return your response as a JSON object with this structure:
475{
476  "default_ratio": 0.0-1.0 (0.0 = 100% mock, 1.0 = 100% real),
477  "enabled": true/false,
478  "route_rules": [
479    {
480      "pattern": "string (e.g., /api/catalog/*, /api/products/*)",
481      "ratio": 0.0-1.0,
482      "description": "string"
483    }
484  ],
485  "transition_mode": "string (manual|time_based|scheduled)",
486  "merge_strategy": "string (field_level|weighted|body_blend)"
487}
488
489Examples:
490- "80% mock, 20% real" → default_ratio: 0.2
491- "Make catalog 50% real" → route_rules: [{pattern: "/api/catalog/*", ratio: 0.5}]
492- "100% mock for now" → default_ratio: 0.0, enabled: true"#;
493
494        // Build user prompt with the command
495        let user_prompt =
496            format!("Parse this reality continuum configuration command:\n\n{}", command);
497
498        // Create LLM request
499        let llm_request = LlmGenerationRequest {
500            system_prompt: system_prompt.to_string(),
501            user_prompt,
502            temperature: 0.3,
503            max_tokens: 2000,
504            schema: None,
505        };
506
507        // Generate response from LLM
508        let response = self.llm_client.generate(&llm_request).await?;
509
510        // Parse the response into ParsedRealityContinuum
511        let response_str = serde_json::to_string(&response).unwrap_or_default();
512        let parsed: ParsedRealityContinuum = serde_json::from_value(response).map_err(|e| {
513            crate::Error::generic(format!(
514                "Failed to parse LLM response as ParsedRealityContinuum: {}. Response: {}",
515                e, response_str
516            ))
517        })?;
518
519        Ok(parsed)
520    }
521
522    /// Parse a drift budget configuration command
523    ///
524    /// This method extracts drift budget preferences from natural language,
525    /// such as "strict drift budget" or "moderate tolerance for changes".
526    pub async fn parse_drift_budget_command(&self, command: &str) -> Result<ParsedDriftBudget> {
527        // Build system prompt for drift budget parsing
528        let system_prompt = r#"You are an expert at parsing natural language descriptions of drift budget
529configuration and extracting structured budget settings.
530
531Extract the following information from the command:
5321. Strictness level (strict, moderate, lenient)
5332. Breaking change tolerance
5343. Non-breaking change tolerance
5354. Per-service/endpoint preferences
536
537Return your response as a JSON object with this structure:
538{
539  "strictness": "string (strict|moderate|lenient)",
540  "enabled": true/false,
541  "max_breaking_changes": number (0 for strict, higher for lenient),
542  "max_non_breaking_changes": number,
543  "max_field_churn_percent": number (0.0-100.0, optional),
544  "time_window_days": number (optional, for percentage-based budgets),
545  "per_service_budgets": {
546    "service_name": {
547      "max_breaking_changes": number,
548      "max_non_breaking_changes": number
549    }
550  },
551  "description": "string"
552}
553
554Examples:
555- "strict drift budget" → strictness: "strict", max_breaking_changes: 0, max_non_breaking_changes: 5
556- "moderate tolerance" → strictness: "moderate", max_breaking_changes: 1, max_non_breaking_changes: 10
557- "lenient, allow up to 5 breaking changes" → strictness: "lenient", max_breaking_changes: 5"#;
558
559        // Build user prompt with the command
560        let user_prompt = format!("Parse this drift budget configuration command:\n\n{}", command);
561
562        // Create LLM request
563        let llm_request = LlmGenerationRequest {
564            system_prompt: system_prompt.to_string(),
565            user_prompt,
566            temperature: 0.3,
567            max_tokens: 2000,
568            schema: None,
569        };
570
571        // Generate response from LLM
572        let response = self.llm_client.generate(&llm_request).await?;
573
574        // Parse the response into ParsedDriftBudget
575        let response_str = serde_json::to_string(&response).unwrap_or_default();
576        let parsed: ParsedDriftBudget = serde_json::from_value(response).map_err(|e| {
577            crate::Error::generic(format!(
578                "Failed to parse LLM response as ParsedDriftBudget: {}. Response: {}",
579                e, response_str
580            ))
581        })?;
582
583        Ok(parsed)
584    }
585}
586
587/// Parsed command structure containing extracted API requirements
588#[derive(Debug, Clone, Serialize, Deserialize)]
589pub struct ParsedCommand {
590    /// API type/category
591    pub api_type: String,
592    /// API title
593    pub title: String,
594    /// API description
595    pub description: String,
596    /// List of endpoints
597    pub endpoints: Vec<EndpointRequirement>,
598    /// List of data models
599    pub models: Vec<ModelRequirement>,
600    /// Relationships between models
601    #[serde(default)]
602    pub relationships: Vec<RelationshipRequirement>,
603    /// Sample data counts per model
604    #[serde(default)]
605    pub sample_counts: HashMap<String, usize>,
606    /// Business flows
607    #[serde(default)]
608    pub flows: Vec<FlowRequirement>,
609}
610
611/// Endpoint requirement extracted from command
612#[derive(Debug, Clone, Serialize, Deserialize)]
613pub struct EndpointRequirement {
614    /// Path (e.g., /api/products)
615    pub path: String,
616    /// HTTP method
617    pub method: String,
618    /// Description
619    pub description: String,
620    /// Request body schema (if applicable)
621    #[serde(default)]
622    pub request_body: Option<RequestBodyRequirement>,
623    /// Response schema
624    #[serde(default)]
625    pub response: Option<ResponseRequirement>,
626}
627
628/// Request body requirement
629#[derive(Debug, Clone, Serialize, Deserialize)]
630pub struct RequestBodyRequirement {
631    /// Schema definition
632    #[serde(default)]
633    pub schema: Option<serde_json::Value>,
634    /// Required fields
635    #[serde(default)]
636    pub required: Vec<String>,
637}
638
639/// Response requirement
640#[derive(Debug, Clone, Serialize, Deserialize)]
641pub struct ResponseRequirement {
642    /// HTTP status code
643    #[serde(default = "default_status")]
644    pub status: u16,
645    /// Response schema
646    #[serde(default)]
647    pub schema: Option<serde_json::Value>,
648    /// Whether response is an array
649    #[serde(default)]
650    pub is_array: bool,
651    /// Count of items (if specified)
652    #[serde(default)]
653    pub count: Option<usize>,
654}
655
656fn default_status() -> u16 {
657    200
658}
659
660/// Model requirement extracted from command
661#[derive(Debug, Clone, Serialize, Deserialize)]
662pub struct ModelRequirement {
663    /// Model name
664    pub name: String,
665    /// List of fields
666    pub fields: Vec<FieldRequirement>,
667}
668
669/// Field requirement
670#[derive(Debug, Clone, Serialize, Deserialize)]
671pub struct FieldRequirement {
672    /// Field name
673    pub name: String,
674    /// Field type
675    pub r#type: String,
676    /// Field description
677    #[serde(default)]
678    pub description: String,
679    /// Whether field is required
680    #[serde(default = "default_true")]
681    pub required: bool,
682}
683
684fn default_true() -> bool {
685    true
686}
687
688/// Relationship requirement
689#[derive(Debug, Clone, Serialize, Deserialize)]
690pub struct RelationshipRequirement {
691    /// Source model
692    pub from: String,
693    /// Target model
694    pub to: String,
695    /// Relationship type
696    pub r#type: String,
697}
698
699/// Flow requirement
700#[derive(Debug, Clone, Serialize, Deserialize)]
701pub struct FlowRequirement {
702    /// Flow name
703    pub name: String,
704    /// Flow description
705    pub description: String,
706    /// Steps in the flow
707    #[serde(default)]
708    pub steps: Vec<String>,
709}
710
711/// Alias for API requirement (for backwards compatibility)
712pub type ApiRequirement = ParsedCommand;
713
714/// Parsed workspace scenario structure
715#[derive(Debug, Clone, Serialize, Deserialize)]
716pub struct ParsedWorkspaceScenario {
717    /// Domain/industry
718    pub domain: String,
719    /// Workspace title
720    pub title: String,
721    /// Workspace description
722    pub description: String,
723    /// Chaos characteristics
724    #[serde(default)]
725    pub chaos_characteristics: Vec<ChaosCharacteristic>,
726    /// Initial data requirements
727    #[serde(default)]
728    pub initial_data: InitialDataRequirements,
729    /// API requirements
730    #[serde(default)]
731    pub api_requirements: ApiRequirements,
732    /// Behavioral rules
733    #[serde(default)]
734    pub behavioral_rules: Vec<BehavioralRule>,
735}
736
737/// Chaos characteristic
738#[derive(Debug, Clone, Serialize, Deserialize)]
739pub struct ChaosCharacteristic {
740    /// Type of chaos (latency, failure, rate_limit, etc.)
741    pub r#type: String,
742    /// Description
743    pub description: String,
744    /// Configuration details
745    #[serde(default)]
746    pub config: serde_json::Value,
747}
748
749/// Initial data requirements
750#[derive(Debug, Clone, Serialize, Deserialize, Default)]
751pub struct InitialDataRequirements {
752    /// Number of users
753    #[serde(default)]
754    pub users: Option<usize>,
755    /// Number of disputes
756    #[serde(default)]
757    pub disputes: Option<usize>,
758    /// Number of orders
759    #[serde(default)]
760    pub orders: Option<usize>,
761    /// Custom entity counts
762    #[serde(default)]
763    pub custom: HashMap<String, usize>,
764}
765
766/// API requirements for the scenario
767#[derive(Debug, Clone, Serialize, Deserialize, Default)]
768pub struct ApiRequirements {
769    /// List of endpoints
770    #[serde(default)]
771    pub endpoints: Vec<EndpointRequirement>,
772    /// List of models
773    #[serde(default)]
774    pub models: Vec<ModelRequirement>,
775}
776
777/// Behavioral rule
778#[derive(Debug, Clone, Serialize, Deserialize)]
779pub struct BehavioralRule {
780    /// Rule description
781    pub description: String,
782    /// Rule type
783    pub r#type: String,
784    /// Rule configuration
785    #[serde(default)]
786    pub config: serde_json::Value,
787}
788
789/// Parsed workspace creation structure
790#[derive(Debug, Clone, Serialize, Deserialize)]
791pub struct ParsedWorkspaceCreation {
792    /// Workspace name
793    pub workspace_name: String,
794    /// Workspace description
795    pub workspace_description: String,
796    /// List of entities
797    #[serde(default)]
798    pub entities: Vec<EntityRequirement>,
799    /// List of personas
800    #[serde(default)]
801    pub personas: Vec<PersonaRequirement>,
802    /// List of behavioral scenarios
803    #[serde(default)]
804    pub scenarios: Vec<ScenarioRequirement>,
805    /// Reality continuum preferences
806    #[serde(default)]
807    pub reality_continuum: Option<ParsedRealityContinuum>,
808    /// Drift budget preferences
809    #[serde(default)]
810    pub drift_budget: Option<ParsedDriftBudget>,
811}
812
813/// Entity requirement for workspace creation
814#[derive(Debug, Clone, Serialize, Deserialize)]
815pub struct EntityRequirement {
816    /// Entity name (e.g., Customer, Order, Payment)
817    pub name: String,
818    /// Entity description
819    pub description: String,
820    /// Endpoints for this entity
821    #[serde(default)]
822    pub endpoints: Vec<EntityEndpointRequirement>,
823    /// Fields for this entity
824    #[serde(default)]
825    pub fields: Vec<FieldRequirement>,
826}
827
828/// Endpoint requirement for an entity
829#[derive(Debug, Clone, Serialize, Deserialize)]
830pub struct EntityEndpointRequirement {
831    /// Path (e.g., /api/customers)
832    pub path: String,
833    /// HTTP method
834    pub method: String,
835    /// Description
836    pub description: String,
837}
838
839/// Persona requirement for workspace creation
840#[derive(Debug, Clone, Serialize, Deserialize)]
841pub struct PersonaRequirement {
842    /// Persona name (e.g., premium-customer, regular-customer)
843    pub name: String,
844    /// Persona description
845    pub description: String,
846    /// Persona traits
847    #[serde(default)]
848    pub traits: HashMap<String, String>,
849    /// Relationships to other entities
850    #[serde(default)]
851    pub relationships: Vec<PersonaRelationship>,
852}
853
854/// Persona relationship
855#[derive(Debug, Clone, Serialize, Deserialize)]
856pub struct PersonaRelationship {
857    /// Relationship type (e.g., owns, belongs_to, has)
858    pub r#type: String,
859    /// Target entity name
860    pub target_entity: String,
861}
862
863/// Scenario requirement for workspace creation
864#[derive(Debug, Clone, Serialize, Deserialize)]
865pub struct ScenarioRequirement {
866    /// Scenario name (e.g., happy-path-checkout, failed-payment)
867    pub name: String,
868    /// Scenario type (happy_path, failure, slow_path)
869    pub r#type: String,
870    /// Scenario description
871    pub description: String,
872    /// Steps in the scenario
873    #[serde(default)]
874    pub steps: Vec<ScenarioStepRequirement>,
875}
876
877/// Scenario step requirement
878#[derive(Debug, Clone, Serialize, Deserialize)]
879pub struct ScenarioStepRequirement {
880    /// Step description
881    pub description: String,
882    /// Endpoint for this step (e.g., POST /api/orders)
883    pub endpoint: String,
884    /// Expected outcome
885    pub expected_outcome: String,
886}
887
888/// Parsed reality continuum configuration
889#[derive(Debug, Clone, Serialize, Deserialize)]
890pub struct ParsedRealityContinuum {
891    /// Default blend ratio (0.0 = 100% mock, 1.0 = 100% real)
892    #[serde(default = "default_blend_ratio")]
893    pub default_ratio: f64,
894    /// Whether reality continuum is enabled
895    #[serde(default = "default_true")]
896    pub enabled: bool,
897    /// Route-specific rules
898    #[serde(default)]
899    pub route_rules: Vec<ParsedContinuumRule>,
900    /// Transition mode
901    #[serde(default)]
902    pub transition_mode: String,
903    /// Merge strategy
904    #[serde(default)]
905    pub merge_strategy: String,
906}
907
908fn default_blend_ratio() -> f64 {
909    0.0
910}
911
912/// Parsed continuum rule
913#[derive(Debug, Clone, Serialize, Deserialize)]
914pub struct ParsedContinuumRule {
915    /// Path pattern (e.g., /api/catalog/*)
916    pub pattern: String,
917    /// Blend ratio for this route
918    pub ratio: f64,
919    /// Description
920    #[serde(default)]
921    pub description: String,
922}
923
924/// Parsed drift budget configuration
925#[derive(Debug, Clone, Serialize, Deserialize)]
926pub struct ParsedDriftBudget {
927    /// Strictness level (strict, moderate, lenient)
928    pub strictness: String,
929    /// Whether drift budget is enabled
930    #[serde(default = "default_true")]
931    pub enabled: bool,
932    /// Maximum breaking changes allowed
933    #[serde(default)]
934    pub max_breaking_changes: u32,
935    /// Maximum non-breaking changes allowed
936    #[serde(default)]
937    pub max_non_breaking_changes: u32,
938    /// Maximum field churn percentage (optional)
939    #[serde(default, skip_serializing_if = "Option::is_none")]
940    pub max_field_churn_percent: Option<f64>,
941    /// Time window in days (optional)
942    #[serde(default, skip_serializing_if = "Option::is_none")]
943    pub time_window_days: Option<u32>,
944    /// Per-service budgets
945    #[serde(default)]
946    pub per_service_budgets: HashMap<String, ParsedServiceBudget>,
947    /// Description
948    #[serde(default)]
949    pub description: String,
950}
951
952/// Parsed service budget
953#[derive(Debug, Clone, Serialize, Deserialize)]
954pub struct ParsedServiceBudget {
955    /// Maximum breaking changes for this service
956    #[serde(default)]
957    pub max_breaking_changes: u32,
958    /// Maximum non-breaking changes for this service
959    #[serde(default)]
960    pub max_non_breaking_changes: u32,
961}