mockforge_ui/handlers/
ai_studio.rs

1//! AI Studio API handlers for Admin UI
2//!
3//! Provides endpoints for the unified AI Studio interface, including natural language
4//! chat, mock generation, debugging, persona generation, and artifact freezing.
5
6use axum::{
7    extract::{Json, Query, State},
8    http::StatusCode,
9    response::Json as ResponseJson,
10};
11use json_patch::{patch, Patch};
12use jsonptr::PointerBuf;
13use mockforge_core::ai_studio::{
14    get_conversation_store, initialize_conversation_store, ArtifactFreezer, BudgetConfig,
15    BudgetManager, ChatContext, ChatMessage, ChatOrchestrator, ChatRequest, ChatResponse,
16    ContractDiffHandler, ContractDiffQueryResult, DebugAnalyzer, DebugContextIntegrator,
17    DebugRequest, DebugResponse, FreezeRequest, FrozenArtifact, MockGenerator, OrgAiControlsConfig,
18    OrgControls, PersonaGenerationRequest, PersonaGenerationResponse, PersonaGenerator, UsageStats,
19};
20use mockforge_core::intelligent_behavior::IntelligentBehaviorConfig;
21use serde::{Deserialize, Serialize};
22use serde_json::Value;
23use std::collections::HashMap;
24
25use crate::handlers::AdminState;
26use crate::models::ApiResponse;
27use mockforge_core::ai_studio::config::DeterministicModeConfig;
28
29/// Request for chat interaction
30#[derive(Debug, Clone, Serialize, Deserialize)]
31pub struct ChatRequestPayload {
32    /// User's message
33    pub message: String,
34
35    /// Optional conversation ID
36    pub conversation_id: Option<String>,
37
38    /// Optional workspace ID
39    pub workspace_id: Option<String>,
40}
41
42/// Process a chat message
43///
44/// POST /api/v1/ai-studio/chat
45pub async fn chat(
46    Json(request): Json<ChatRequestPayload>,
47) -> Result<ResponseJson<ApiResponse<ChatResponse>>, StatusCode> {
48    if request.message.trim().is_empty() {
49        return Err(StatusCode::BAD_REQUEST);
50    }
51
52    // Create orchestrator with default config
53    let config = IntelligentBehaviorConfig::default();
54    let orchestrator = ChatOrchestrator::new(config);
55
56    // Initialize conversation store if not already done
57    // Note: In production, this should be done at application startup
58    let _ = initialize_conversation_store().await;
59
60    // Load conversation context if conversation_id provided
61    let context = if let Some(conv_id) = &request.conversation_id {
62        let store = get_conversation_store();
63        match store.get_context(conv_id).await {
64            Ok(Some(ctx)) => Some(ctx),
65            Ok(None) => {
66                // Conversation not found, create new context
67                Some(ChatContext {
68                    history: vec![],
69                    workspace_id: request.workspace_id.clone(),
70                })
71            }
72            Err(_) => {
73                // Error loading, use empty context
74                Some(ChatContext {
75                    history: vec![],
76                    workspace_id: request.workspace_id.clone(),
77                })
78            }
79        }
80    } else {
81        None
82    };
83
84    // Build chat request
85    let chat_request = ChatRequest {
86        message: request.message.clone(),
87        context,
88        workspace_id: request.workspace_id.clone(),
89        org_id: None,
90        user_id: None,
91    };
92
93    // Process request
94    let response_result = orchestrator.process(&chat_request).await;
95
96    // Save conversation history if conversation_id provided
97    if let Some(conv_id) = &request.conversation_id {
98        let store = get_conversation_store();
99
100        // Add user message to conversation
101        let user_message = ChatMessage {
102            role: "user".to_string(),
103            content: request.message.clone(),
104        };
105        let _ = store.add_message(conv_id, user_message).await;
106
107        // Add assistant response to conversation
108        if let Ok(ref response) = response_result {
109            let assistant_message = ChatMessage {
110                role: "assistant".to_string(),
111                content: response.message.clone(),
112            };
113            let _ = store.add_message(conv_id, assistant_message).await;
114        }
115    } else {
116        // Create new conversation if none specified
117        let store = get_conversation_store();
118        if let Ok(conv_id) = store.create_conversation(request.workspace_id.clone()).await {
119            // Add messages to new conversation
120            let user_message = ChatMessage {
121                role: "user".to_string(),
122                content: request.message.clone(),
123            };
124            let _ = store.add_message(&conv_id, user_message).await;
125
126            if let Ok(ref response) = response_result {
127                let assistant_message = ChatMessage {
128                    role: "assistant".to_string(),
129                    content: response.message.clone(),
130                };
131                let _ = store.add_message(&conv_id, assistant_message).await;
132            }
133        }
134    }
135
136    // Return response
137    match response_result {
138        Ok(response) => Ok(ResponseJson(ApiResponse::success(response))),
139        Err(e) => Ok(ResponseJson(ApiResponse::error(format!("Failed to process chat: {}", e)))),
140    }
141}
142
143/// Request for mock generation
144#[derive(Debug, Clone, Serialize, Deserialize)]
145pub struct GenerateMockRequest {
146    /// Natural language description
147    pub description: String,
148
149    /// Optional workspace ID
150    pub workspace_id: Option<String>,
151}
152
153/// Response from mock generation
154#[derive(Debug, Clone, Serialize, Deserialize)]
155pub struct GenerateMockResponse {
156    /// Generated OpenAPI spec (if any)
157    pub spec: Option<Value>,
158
159    /// Status message
160    pub message: String,
161}
162
163/// Generate a mock from natural language
164///
165/// POST /api/v1/ai-studio/generate-mock
166pub async fn generate_mock(
167    State(state): State<AdminState>,
168    Json(request): Json<GenerateMockRequest>,
169) -> Result<ResponseJson<ApiResponse<GenerateMockResponse>>, StatusCode> {
170    if request.description.trim().is_empty() {
171        return Err(StatusCode::BAD_REQUEST);
172    }
173
174    let generator = MockGenerator::new();
175
176    // Get workspace config to check ai_mode and deterministic config
177    let ai_mode = if let Some(workspace_id) = &request.workspace_id {
178        // Try to load workspace to get ai_mode
179        if let Ok(workspace) = state.workspace_persistence.load_workspace(workspace_id).await {
180            workspace.config.ai_mode
181        } else {
182            None // Default to live mode if workspace not found
183        }
184    } else {
185        None // Default to live mode if no workspace_id provided
186    };
187
188    // Get deterministic config from workspace if available
189    let deterministic_config = if let Some(workspace_id) = &request.workspace_id {
190        if let Ok(workspace) = state.workspace_persistence.load_workspace(workspace_id).await {
191            // Check if workspace has deterministic mode config
192            // For now, use default config - in production this would come from workspace config
193            Some(DeterministicModeConfig::default())
194        } else {
195            None
196        }
197    } else {
198        None
199    };
200
201    match generator
202        .generate(
203            &request.description,
204            request.workspace_id.as_deref(),
205            ai_mode,
206            deterministic_config.as_ref(),
207        )
208        .await
209    {
210        Ok(result) => {
211            let response = GenerateMockResponse {
212                spec: result.spec,
213                message: result.message,
214            };
215            Ok(ResponseJson(ApiResponse::success(response)))
216        }
217        Err(e) => Ok(ResponseJson(ApiResponse::error(format!("Failed to generate mock: {}", e)))),
218    }
219}
220
221/// Request for debug analysis
222#[derive(Debug, Clone, Serialize, Deserialize)]
223pub struct DebugTestRequest {
224    /// Test failure logs
225    pub test_logs: String,
226
227    /// Test name/identifier
228    pub test_name: Option<String>,
229
230    /// Workspace ID
231    pub workspace_id: Option<String>,
232}
233
234/// Analyze a test failure
235///
236/// POST /api/v1/ai-studio/debug-test
237pub async fn debug_test(
238    Json(request): Json<DebugTestRequest>,
239) -> Result<ResponseJson<ApiResponse<DebugResponse>>, StatusCode> {
240    if request.test_logs.trim().is_empty() {
241        return Err(StatusCode::BAD_REQUEST);
242    }
243
244    let analyzer = DebugAnalyzer::new();
245
246    let debug_request = DebugRequest {
247        test_logs: request.test_logs,
248        test_name: request.test_name,
249        workspace_id: request.workspace_id,
250    };
251
252    match analyzer.analyze(&debug_request).await {
253        Ok(response) => Ok(ResponseJson(ApiResponse::success(response))),
254        Err(e) => Ok(ResponseJson(ApiResponse::error(format!(
255            "Failed to analyze test failure: {}",
256            e
257        )))),
258    }
259}
260
261/// Request for debug analysis with context
262#[derive(Debug, Clone, Serialize, Deserialize)]
263pub struct DebugWithContextRequest {
264    /// Test failure logs
265    pub test_logs: String,
266
267    /// Test name/identifier
268    pub test_name: Option<String>,
269
270    /// Workspace ID
271    pub workspace_id: Option<String>,
272
273    /// Organization ID (for context)
274    pub org_id: Option<String>,
275}
276
277/// Analyze a test failure with comprehensive context from subsystems
278///
279/// POST /api/v1/ai-studio/debug/analyze-with-context
280pub async fn debug_analyze_with_context(
281    Json(request): Json<DebugWithContextRequest>,
282) -> Result<ResponseJson<ApiResponse<DebugResponse>>, StatusCode> {
283    if request.test_logs.trim().is_empty() {
284        return Err(StatusCode::BAD_REQUEST);
285    }
286
287    let analyzer = DebugAnalyzer::new();
288
289    // Create debug context integrator (optional - would need subsystem references in production)
290    // For now, pass None as we don't have direct access to RealityEngine, etc. here
291    // In production, these would be injected via State
292    let integrator: Option<&DebugContextIntegrator> = None;
293
294    let debug_request = DebugRequest {
295        test_logs: request.test_logs,
296        test_name: request.test_name,
297        workspace_id: request.workspace_id,
298    };
299
300    match analyzer.analyze(&debug_request).await {
301        Ok(response) => Ok(ResponseJson(ApiResponse::success(response))),
302        Err(e) => Ok(ResponseJson(ApiResponse::error(format!(
303            "Failed to analyze test failure with context: {}",
304            e
305        )))),
306    }
307}
308
309/// Request for persona generation
310#[derive(Debug, Clone, Serialize, Deserialize)]
311pub struct GeneratePersonaRequest {
312    /// Natural language description
313    pub description: String,
314
315    /// Optional base persona ID to tweak
316    pub base_persona_id: Option<String>,
317
318    /// Workspace ID
319    pub workspace_id: Option<String>,
320}
321
322/// Generate or tweak a persona
323///
324/// POST /api/v1/ai-studio/generate-persona
325pub async fn generate_persona(
326    State(state): State<AdminState>,
327    Json(request): Json<GeneratePersonaRequest>,
328) -> Result<ResponseJson<ApiResponse<PersonaGenerationResponse>>, StatusCode> {
329    if request.description.trim().is_empty() {
330        return Err(StatusCode::BAD_REQUEST);
331    }
332
333    let generator = PersonaGenerator::new();
334
335    let persona_request = PersonaGenerationRequest {
336        description: request.description.clone(),
337        base_persona_id: request.base_persona_id,
338        workspace_id: request.workspace_id.clone(),
339    };
340
341    // Get workspace config to check ai_mode and deterministic config
342    let ai_mode = if let Some(workspace_id) = &request.workspace_id {
343        if let Ok(workspace) = state.workspace_persistence.load_workspace(workspace_id).await {
344            workspace.config.ai_mode
345        } else {
346            None
347        }
348    } else {
349        None
350    };
351
352    let deterministic_config = if let Some(workspace_id) = &request.workspace_id {
353        if let Ok(_workspace) = state.workspace_persistence.load_workspace(workspace_id).await {
354            Some(DeterministicModeConfig::default())
355        } else {
356            None
357        }
358    } else {
359        None
360    };
361
362    match generator
363        .generate(&persona_request, ai_mode, deterministic_config.as_ref())
364        .await
365    {
366        Ok(response) => Ok(ResponseJson(ApiResponse::success(response))),
367        Err(e) => {
368            Ok(ResponseJson(ApiResponse::error(format!("Failed to generate persona: {}", e))))
369        }
370    }
371}
372
373/// Request for artifact freezing
374#[derive(Debug, Clone, Serialize, Deserialize)]
375pub struct FreezeArtifactRequest {
376    /// Type of artifact
377    pub artifact_type: String,
378
379    /// Artifact content
380    pub content: Value,
381
382    /// Output format (yaml or json)
383    pub format: String,
384
385    /// Output path
386    pub path: Option<String>,
387}
388
389/// Request for artifact freezing with metadata
390#[derive(Debug, Clone, Serialize, Deserialize)]
391pub struct FreezeArtifactRequestWithMetadata {
392    /// Type of artifact
393    pub artifact_type: String,
394
395    /// Artifact content
396    pub content: Value,
397
398    /// Output format (yaml or json)
399    pub format: String,
400
401    /// Output path
402    pub path: Option<String>,
403
404    /// Optional metadata
405    pub metadata: Option<FreezeMetadataPayload>,
406}
407
408/// Metadata payload for freezing
409#[derive(Debug, Clone, Serialize, Deserialize)]
410pub struct FreezeMetadataPayload {
411    /// LLM provider used
412    pub llm_provider: Option<String>,
413
414    /// LLM model used
415    pub llm_model: Option<String>,
416
417    /// LLM version
418    pub llm_version: Option<String>,
419
420    /// Hash of the input prompt
421    pub prompt_hash: Option<String>,
422
423    /// Original prompt/description
424    pub original_prompt: Option<String>,
425}
426
427/// Freeze an AI-generated artifact to deterministic format
428///
429/// POST /api/v1/ai-studio/freeze
430pub async fn freeze_artifact(
431    Json(request): Json<FreezeArtifactRequestWithMetadata>,
432) -> Result<ResponseJson<ApiResponse<FrozenArtifact>>, StatusCode> {
433    let freezer = ArtifactFreezer::new();
434
435    let metadata = request.metadata.map(|m| {
436        use mockforge_core::ai_studio::FreezeMetadata;
437        FreezeMetadata {
438            llm_provider: m.llm_provider,
439            llm_model: m.llm_model,
440            llm_version: m.llm_version,
441            prompt_hash: m.prompt_hash,
442            output_hash: None, // Will be calculated by freezer
443            original_prompt: m.original_prompt,
444        }
445    });
446
447    let freeze_request = FreezeRequest {
448        artifact_type: request.artifact_type,
449        content: request.content,
450        format: request.format,
451        path: request.path,
452        metadata,
453    };
454
455    match freezer.freeze(&freeze_request).await {
456        Ok(artifact) => Ok(ResponseJson(ApiResponse::success(artifact))),
457        Err(e) => Ok(ResponseJson(ApiResponse::error(format!("Failed to freeze artifact: {}", e)))),
458    }
459}
460
461/// Query parameters for listing frozen artifacts
462#[derive(Debug, Clone, Serialize, Deserialize)]
463pub struct ListFrozenQuery {
464    /// Filter by artifact type
465    pub artifact_type: Option<String>,
466
467    /// Workspace ID
468    pub workspace_id: Option<String>,
469}
470
471/// List frozen artifacts
472///
473/// GET /api/v1/ai-studio/frozen
474pub async fn list_frozen(
475    Query(params): Query<ListFrozenQuery>,
476) -> Result<ResponseJson<ApiResponse<Vec<FrozenArtifact>>>, StatusCode> {
477    let freezer = ArtifactFreezer::new();
478    let base_dir = freezer.base_dir().to_path_buf();
479
480    // Read all files from the freeze directory
481    let mut artifacts = Vec::new();
482
483    if let Ok(mut entries) = tokio::fs::read_dir(&base_dir).await {
484        while let Ok(Some(entry)) = entries.next_entry().await {
485            let path = entry.path();
486            if path.is_file() {
487                // Check if file matches artifact_type filter
488                let file_name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
489                if let Some(ref artifact_type) = params.artifact_type {
490                    if !file_name.starts_with(&format!("{}_", artifact_type)) {
491                        continue;
492                    }
493                }
494
495                // Try to load the frozen artifact
496                let content = match tokio::fs::read_to_string(&path).await {
497                    Ok(c) => c,
498                    Err(_) => continue,
499                };
500
501                let content_value: Value = if path.extension().and_then(|e| e.to_str())
502                    == Some("yaml")
503                    || path.extension().and_then(|e| e.to_str()) == Some("yml")
504                {
505                    match serde_yaml::from_str(&content) {
506                        Ok(v) => v,
507                        Err(_) => continue,
508                    }
509                } else {
510                    match serde_json::from_str(&content) {
511                        Ok(v) => v,
512                        Err(_) => continue,
513                    }
514                };
515
516                // Extract metadata from content
517                let metadata = content_value
518                    .get("_frozen_metadata")
519                    .and_then(|m| serde_json::from_value(m.clone()).ok());
520
521                // Extract output_hash from metadata
522                let output_hash = content_value
523                    .get("_frozen_metadata")
524                    .and_then(|m| m.get("output_hash"))
525                    .and_then(|h| h.as_str())
526                    .map(|s| s.to_string());
527
528                // Determine artifact type from filename or metadata
529                let artifact_type = content_value
530                    .get("_frozen_metadata")
531                    .and_then(|m| m.get("artifact_type"))
532                    .and_then(|t| t.as_str())
533                    .map(|s| s.to_string())
534                    .unwrap_or_else(|| {
535                        // Fallback: extract from filename
536                        file_name.split('_').next().unwrap_or("unknown").to_string()
537                    });
538
539                artifacts.push(FrozenArtifact {
540                    artifact_type,
541                    content: content_value,
542                    format: if path.extension().and_then(|e| e.to_str()) == Some("yaml")
543                        || path.extension().and_then(|e| e.to_str()) == Some("yml")
544                    {
545                        "yaml".to_string()
546                    } else {
547                        "json".to_string()
548                    },
549                    path: path.to_string_lossy().to_string(),
550                    metadata,
551                    output_hash,
552                });
553            }
554        }
555    }
556
557    // Sort by path (most recent first if timestamps are in filename)
558    artifacts.sort_by(|a, b| b.path.cmp(&a.path));
559
560    Ok(ResponseJson(ApiResponse::success(artifacts)))
561}
562
563/// Get usage statistics
564///
565/// GET /api/v1/ai-studio/usage
566pub async fn get_usage(
567    Query(params): Query<HashMap<String, String>>,
568) -> Result<ResponseJson<ApiResponse<UsageStats>>, StatusCode> {
569    let workspace_id = params.get("workspace_id").cloned().unwrap_or_default();
570
571    let budget_config = BudgetConfig::default();
572    let budget_manager = BudgetManager::new(budget_config);
573
574    match budget_manager.get_usage(&workspace_id).await {
575        Ok(stats) => Ok(ResponseJson(ApiResponse::success(stats))),
576        Err(e) => Ok(ResponseJson(ApiResponse::error(format!("Failed to get usage stats: {}", e)))),
577    }
578}
579
580/// Request for applying a debug patch
581#[derive(Debug, Clone, Serialize, Deserialize)]
582pub struct ApplyPatchRequest {
583    /// JSON Patch operation
584    pub patch: Value,
585
586    /// Configuration file path to apply patch to
587    pub config_path: Option<String>,
588}
589
590/// Apply a debug patch to configuration
591///
592/// POST /api/v1/ai-studio/apply-patch
593pub async fn apply_patch(
594    State(state): State<AdminState>,
595    Json(request): Json<ApplyPatchRequest>,
596) -> Result<ResponseJson<ApiResponse<Value>>, StatusCode> {
597    // Determine config file path
598    let config_path = request.config_path.unwrap_or_else(|| "mockforge.yaml".to_string());
599
600    // Load the config file
601    let config_content = match tokio::fs::read_to_string(&config_path).await {
602        Ok(content) => content,
603        Err(e) => {
604            return Ok(ResponseJson(ApiResponse::error(format!(
605                "Failed to read config file {}: {}",
606                config_path, e
607            ))));
608        }
609    };
610
611    // Parse config as JSON (works for YAML too via serde_yaml)
612    let mut config_value: Value = if config_path.ends_with(".yaml") || config_path.ends_with(".yml")
613    {
614        serde_yaml::from_str(&config_content).map_err(|e| StatusCode::BAD_REQUEST)?
615    } else {
616        serde_json::from_str(&config_content).map_err(|e| StatusCode::BAD_REQUEST)?
617    };
618
619    // Parse patch operations
620    let patch_ops: Patch =
621        if let Some(ops_array) = request.patch.get("operations").and_then(|v| v.as_array()) {
622            // Multiple operations
623            Patch(ops_array.iter().filter_map(|op| parse_patch_operation(op).ok()).collect())
624        } else {
625            // Single operation
626            Patch(vec![parse_patch_operation(&request.patch)?])
627        };
628
629    // Apply patch
630    patch(&mut config_value, &patch_ops).map_err(|e| StatusCode::BAD_REQUEST)?;
631
632    // Save updated config
633    let updated_content = if config_path.ends_with(".yaml") || config_path.ends_with(".yml") {
634        serde_yaml::to_string(&config_value).map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?
635    } else {
636        serde_json::to_string_pretty(&config_value)
637            .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?
638    };
639
640    tokio::fs::write(&config_path, updated_content)
641        .await
642        .map_err(|e| StatusCode::INTERNAL_SERVER_ERROR)?;
643
644    Ok(ResponseJson(ApiResponse::success(serde_json::json!({
645        "message": "Patch applied successfully",
646        "config_path": config_path,
647        "updated_config": config_value
648    }))))
649}
650
651/// Parse a single patch operation from JSON
652fn parse_patch_operation(op: &Value) -> Result<json_patch::PatchOperation, StatusCode> {
653    use json_patch::{AddOperation, PatchOperation, RemoveOperation, ReplaceOperation};
654
655    let op_type = op.get("op").and_then(|v| v.as_str()).ok_or(StatusCode::BAD_REQUEST)?;
656
657    let path_str = op.get("path").and_then(|v| v.as_str()).ok_or(StatusCode::BAD_REQUEST)?;
658
659    let path: PointerBuf = path_str.parse().map_err(|_| StatusCode::BAD_REQUEST)?;
660
661    match op_type {
662        "add" => {
663            let value = op.get("value").ok_or(StatusCode::BAD_REQUEST)?;
664            Ok(PatchOperation::Add(AddOperation {
665                path,
666                value: value.clone(),
667            }))
668        }
669        "remove" => Ok(PatchOperation::Remove(RemoveOperation { path })),
670        "replace" => {
671            let value = op.get("value").ok_or(StatusCode::BAD_REQUEST)?;
672            Ok(PatchOperation::Replace(ReplaceOperation {
673                path,
674                value: value.clone(),
675            }))
676        }
677        "copy" => {
678            let from = op.get("from").and_then(|v| v.as_str()).ok_or(StatusCode::BAD_REQUEST)?;
679            let from_path: PointerBuf = from.parse().map_err(|_| StatusCode::BAD_REQUEST)?;
680            Ok(PatchOperation::Copy(json_patch::CopyOperation {
681                path,
682                from: from_path,
683            }))
684        }
685        "move" => {
686            let from = op.get("from").and_then(|v| v.as_str()).ok_or(StatusCode::BAD_REQUEST)?;
687            let from_path: PointerBuf = from.parse().map_err(|_| StatusCode::BAD_REQUEST)?;
688            Ok(PatchOperation::Move(json_patch::MoveOperation {
689                path,
690                from: from_path,
691            }))
692        }
693        "test" => {
694            let value = op.get("value").ok_or(StatusCode::BAD_REQUEST)?;
695            Ok(PatchOperation::Test(json_patch::TestOperation {
696                path,
697                value: value.clone(),
698            }))
699        }
700        _ => Err(StatusCode::BAD_REQUEST),
701    }
702}
703
704/// Get organization-level AI controls
705///
706/// GET /api/v1/ai-studio/org-controls
707pub async fn get_org_controls(
708    Query(params): Query<HashMap<String, String>>,
709) -> Result<ResponseJson<ApiResponse<OrgAiControlsConfig>>, StatusCode> {
710    let org_id = params.get("org_id").cloned();
711    let workspace_id = params.get("workspace_id").cloned();
712
713    // Create org controls service with default YAML config
714    // In production, this would be injected via State
715    let org_controls = OrgControls::new(OrgAiControlsConfig::default());
716
717    match org_controls
718        .load_org_config(org_id.as_deref().unwrap_or("default"), workspace_id.as_deref())
719        .await
720    {
721        Ok(controls) => Ok(ResponseJson(ApiResponse::success(controls))),
722        Err(e) => {
723            Ok(ResponseJson(ApiResponse::error(format!("Failed to get org controls: {}", e))))
724        }
725    }
726}
727
728/// Update organization-level AI controls
729///
730/// PUT /api/v1/ai-studio/org-controls
731pub async fn update_org_controls(
732    Query(params): Query<HashMap<String, String>>,
733    Json(controls): Json<OrgAiControlsConfig>,
734) -> Result<ResponseJson<ApiResponse<OrgAiControlsConfig>>, StatusCode> {
735    let org_id = params.get("org_id").cloned();
736    let workspace_id = params.get("workspace_id").cloned();
737
738    // Note: Database persistence requires OrgControlsAccessor to be available in State
739    // To enable database persistence:
740    // 1. Add OrgControlsAccessor (e.g., DbOrgControls) to AdminState
741    // 2. Call accessor.save_controls(org_id, workspace_id, &controls).await
742    // 3. Return updated controls from database
743    // For now, controls are returned as-is (in-memory only)
744
745    Ok(ResponseJson(ApiResponse::success(controls)))
746}
747
748/// Get organization-level AI usage statistics
749///
750/// GET /api/v1/ai-studio/org-controls/usage
751pub async fn get_org_usage(
752    Query(params): Query<HashMap<String, String>>,
753) -> Result<ResponseJson<ApiResponse<Value>>, StatusCode> {
754    let org_id = params.get("org_id").cloned();
755    let workspace_id = params.get("workspace_id").cloned();
756
757    // Get period filter (default to current month)
758    let period_start = params
759        .get("period_start")
760        .and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok())
761        .map(|dt| dt.with_timezone(&chrono::Utc))
762        .unwrap_or_else(|| {
763            // Default to start of current month
764            let now = chrono::Utc::now();
765            {
766                use chrono::{Datelike, TimeZone};
767                chrono::NaiveDate::from_ymd_opt(now.year(), now.month(), 1)
768                    .and_then(|d| d.and_hms_opt(0, 0, 0))
769                    .map(|dt| chrono::Utc.from_utc_datetime(&dt))
770                    .unwrap()
771            }
772        });
773
774    let period_end = params
775        .get("period_end")
776        .and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok())
777        .map(|dt| dt.with_timezone(&chrono::Utc))
778        .unwrap_or_else(chrono::Utc::now);
779
780    // Note: Database querying requires a database connection pool
781    // In production with database access, you would:
782    // 1. Get database pool from State
783    // 2. Query org_ai_usage_logs table:
784    //    SELECT
785    //      SUM(tokens_used) as total_tokens,
786    //      SUM(cost_usd) as total_cost,
787    //      COUNT(*) as total_calls,
788    //      feature_name,
789    //      COUNT(DISTINCT user_id) as unique_users
790    //    FROM org_ai_usage_logs
791    //    WHERE org_id = $1
792    //      AND (workspace_id = $2 OR $2 IS NULL)
793    //      AND created_at >= $3
794    //      AND created_at <= $4
795    //    GROUP BY feature_name
796
797    // For now, return structure that matches what the query would return
798    Ok(ResponseJson(ApiResponse::success(serde_json::json!({
799        "org_id": org_id,
800        "workspace_id": workspace_id,
801        "period_start": period_start.to_rfc3339(),
802        "period_end": period_end.to_rfc3339(),
803        "total_tokens": 0,
804        "total_cost": 0.0,
805        "total_calls": 0,
806        "feature_breakdown": {},
807        "message": "Usage stats require database connection. Connect to registry server database to enable."
808    }))))
809}
810
811/// Request for contract diff query
812#[derive(Debug, Clone, Serialize, Deserialize)]
813pub struct ContractDiffQueryRequest {
814    /// Natural language query
815    pub query: String,
816
817    /// Optional workspace ID
818    pub workspace_id: Option<String>,
819
820    /// Optional organization ID
821    pub org_id: Option<String>,
822}
823
824/// Process a natural language query about contract diffs
825///
826/// POST /api/v1/ai-studio/contract-diff/query
827pub async fn contract_diff_query(
828    Json(request): Json<ContractDiffQueryRequest>,
829) -> Result<ResponseJson<ApiResponse<ContractDiffQueryResult>>, StatusCode> {
830    if request.query.trim().is_empty() {
831        return Err(StatusCode::BAD_REQUEST);
832    }
833
834    let handler = ContractDiffHandler::new().map_err(|e| {
835        tracing::error!("Failed to create ContractDiffHandler: {}", e);
836        StatusCode::INTERNAL_SERVER_ERROR
837    })?;
838
839    // For now, we don't have direct access to specs/requests in the handler
840    // In production, these would be loaded from workspace/request storage
841    match handler.analyze_from_query(&request.query, None, None).await {
842        Ok(result) => Ok(ResponseJson(ApiResponse::success(result))),
843        Err(e) => Ok(ResponseJson(ApiResponse::error(format!(
844            "Failed to process contract diff query: {}",
845            e
846        )))),
847    }
848}