1use axum::{
2 extract::{Path, Query, State},
3 http::StatusCode,
4 response::{IntoResponse, Json},
5};
6use serde::Deserialize;
7use tracing::*;
8
9use super::ManagementState;
10
11#[derive(Debug, Deserialize)]
15pub struct GenerateSpecRequest {
16 pub query: String,
18 pub spec_type: String,
20 pub api_version: Option<String>,
22}
23
24#[derive(Debug, Deserialize)]
26pub struct GenerateOpenApiFromTrafficRequest {
27 #[serde(default)]
29 pub database_path: Option<String>,
30 #[serde(default)]
32 pub since: Option<String>,
33 #[serde(default)]
35 pub until: Option<String>,
36 #[serde(default)]
38 pub path_pattern: Option<String>,
39 #[serde(default = "default_min_confidence")]
41 pub min_confidence: f64,
42}
43
44fn default_min_confidence() -> f64 {
45 0.7
46}
47
48#[cfg(feature = "data-faker")]
50pub(crate) async fn generate_ai_spec(
51 State(_state): State<ManagementState>,
52 Json(request): Json<GenerateSpecRequest>,
53) -> impl IntoResponse {
54 use mockforge_data::rag::{
55 config::{LlmProvider, RagConfig},
56 engine::RagEngine,
57 storage::DocumentStorage,
58 };
59 use std::sync::Arc;
60
61 let api_key = std::env::var("MOCKFORGE_RAG_API_KEY")
63 .ok()
64 .or_else(|| std::env::var("OPENAI_API_KEY").ok());
65
66 if api_key.is_none() {
68 return (
69 StatusCode::SERVICE_UNAVAILABLE,
70 Json(serde_json::json!({
71 "error": "AI service not configured",
72 "message": "Please provide an API key via MOCKFORGE_RAG_API_KEY or OPENAI_API_KEY"
73 })),
74 )
75 .into_response();
76 }
77
78 let provider_str = std::env::var("MOCKFORGE_RAG_PROVIDER")
80 .unwrap_or_else(|_| "openai".to_string())
81 .to_lowercase();
82
83 let provider = match provider_str.as_str() {
84 "openai" => LlmProvider::OpenAI,
85 "anthropic" => LlmProvider::Anthropic,
86 "ollama" => LlmProvider::Ollama,
87 "openai-compatible" | "openai_compatible" => LlmProvider::OpenAICompatible,
88 _ => LlmProvider::OpenAI,
89 };
90
91 let api_endpoint =
92 std::env::var("MOCKFORGE_RAG_API_ENDPOINT").unwrap_or_else(|_| match provider {
93 LlmProvider::OpenAI => "https://api.openai.com/v1".to_string(),
94 LlmProvider::Anthropic => "https://api.anthropic.com/v1".to_string(),
95 LlmProvider::Ollama => "http://localhost:11434/api".to_string(),
96 LlmProvider::OpenAICompatible => "http://localhost:8000/v1".to_string(),
97 });
98
99 let model = std::env::var("MOCKFORGE_RAG_MODEL").unwrap_or_else(|_| match provider {
100 LlmProvider::OpenAI => "gpt-3.5-turbo".to_string(),
101 LlmProvider::Anthropic => "claude-3-sonnet-20240229".to_string(),
102 LlmProvider::Ollama => "llama2".to_string(),
103 LlmProvider::OpenAICompatible => "gpt-3.5-turbo".to_string(),
104 });
105
106 let rag_config = RagConfig {
108 provider,
109 api_endpoint,
110 api_key,
111 model,
112 max_tokens: std::env::var("MOCKFORGE_RAG_MAX_TOKENS")
113 .unwrap_or_else(|_| "4096".to_string())
114 .parse()
115 .unwrap_or(4096),
116 temperature: std::env::var("MOCKFORGE_RAG_TEMPERATURE")
117 .unwrap_or_else(|_| "0.3".to_string())
118 .parse()
119 .unwrap_or(0.3), timeout_secs: std::env::var("MOCKFORGE_RAG_TIMEOUT")
121 .unwrap_or_else(|_| "60".to_string())
122 .parse()
123 .unwrap_or(60),
124 max_context_length: std::env::var("MOCKFORGE_RAG_CONTEXT_WINDOW")
125 .unwrap_or_else(|_| "4000".to_string())
126 .parse()
127 .unwrap_or(4000),
128 ..Default::default()
129 };
130
131 let spec_type_label = match request.spec_type.as_str() {
133 "openapi" => "OpenAPI 3.0",
134 "graphql" => "GraphQL",
135 "asyncapi" => "AsyncAPI",
136 _ => "OpenAPI 3.0",
137 };
138
139 let api_version = request.api_version.as_deref().unwrap_or("3.0.0");
140
141 let prompt = format!(
142 r#"You are an expert API architect. Generate a complete {} specification based on the following user requirements.
143
144User Requirements:
145{}
146
147Instructions:
1481. Generate a complete, valid {} specification
1492. Include all paths, operations, request/response schemas, and components
1503. Use realistic field names and data types
1514. Include proper descriptions and examples
1525. Follow {} best practices
1536. Return ONLY the specification, no additional explanation
1547. For OpenAPI, use version {}
155
156Return the specification in {} format."#,
157 spec_type_label,
158 request.query,
159 spec_type_label,
160 spec_type_label,
161 api_version,
162 if request.spec_type == "graphql" {
163 "GraphQL SDL"
164 } else {
165 "YAML"
166 }
167 );
168
169 use mockforge_data::rag::storage::InMemoryStorage;
174 let storage: Arc<dyn DocumentStorage> = Arc::new(InMemoryStorage::new());
175
176 let mut rag_engine = match RagEngine::new(rag_config.clone(), storage) {
178 Ok(engine) => engine,
179 Err(e) => {
180 return (
181 StatusCode::INTERNAL_SERVER_ERROR,
182 Json(serde_json::json!({
183 "error": "Failed to initialize RAG engine",
184 "message": e.to_string()
185 })),
186 )
187 .into_response();
188 }
189 };
190
191 match rag_engine.generate(&prompt, None).await {
193 Ok(generated_text) => {
194 let spec = if request.spec_type == "graphql" {
196 extract_graphql_schema(&generated_text)
198 } else {
199 extract_yaml_spec(&generated_text)
201 };
202
203 Json(serde_json::json!({
204 "success": true,
205 "spec": spec,
206 "spec_type": request.spec_type,
207 }))
208 .into_response()
209 }
210 Err(e) => (
211 StatusCode::INTERNAL_SERVER_ERROR,
212 Json(serde_json::json!({
213 "error": "AI generation failed",
214 "message": e.to_string()
215 })),
216 )
217 .into_response(),
218 }
219}
220
221#[cfg(not(feature = "data-faker"))]
222pub(crate) async fn generate_ai_spec(
223 State(_state): State<ManagementState>,
224 Json(_request): Json<GenerateSpecRequest>,
225) -> impl IntoResponse {
226 (
227 StatusCode::NOT_IMPLEMENTED,
228 Json(serde_json::json!({
229 "error": "AI features not enabled",
230 "message": "Please enable the 'data-faker' feature to use AI-powered specification generation"
231 })),
232 )
233 .into_response()
234}
235
236#[cfg(feature = "behavioral-cloning")]
238pub(crate) async fn generate_openapi_from_traffic(
239 State(_state): State<ManagementState>,
240 Json(request): Json<GenerateOpenApiFromTrafficRequest>,
241) -> impl IntoResponse {
242 use chrono::{DateTime, Utc};
243 use mockforge_core::intelligent_behavior::{
244 openapi_generator::{OpenApiGenerationConfig, OpenApiSpecGenerator},
245 IntelligentBehaviorConfig,
246 };
247 use mockforge_recorder::{
248 database::RecorderDatabase,
249 openapi_export::{QueryFilters, RecordingsToOpenApi},
250 };
251 use std::path::PathBuf;
252
253 let db_path = if let Some(ref path) = request.database_path {
255 PathBuf::from(path)
256 } else {
257 std::env::current_dir()
258 .unwrap_or_else(|_| PathBuf::from("."))
259 .join("recordings.db")
260 };
261
262 let db = match RecorderDatabase::new(&db_path).await {
264 Ok(db) => db,
265 Err(e) => {
266 return (
267 StatusCode::BAD_REQUEST,
268 Json(serde_json::json!({
269 "error": "Database error",
270 "message": format!("Failed to open recorder database: {}", e)
271 })),
272 )
273 .into_response();
274 }
275 };
276
277 let since_dt = if let Some(ref since_str) = request.since {
279 match DateTime::parse_from_rfc3339(since_str) {
280 Ok(dt) => Some(dt.with_timezone(&Utc)),
281 Err(e) => {
282 return (
283 StatusCode::BAD_REQUEST,
284 Json(serde_json::json!({
285 "error": "Invalid date format",
286 "message": format!("Invalid --since format: {}. Use ISO 8601 format (e.g., 2025-01-01T00:00:00Z)", e)
287 })),
288 )
289 .into_response();
290 }
291 }
292 } else {
293 None
294 };
295
296 let until_dt = if let Some(ref until_str) = request.until {
297 match DateTime::parse_from_rfc3339(until_str) {
298 Ok(dt) => Some(dt.with_timezone(&Utc)),
299 Err(e) => {
300 return (
301 StatusCode::BAD_REQUEST,
302 Json(serde_json::json!({
303 "error": "Invalid date format",
304 "message": format!("Invalid --until format: {}. Use ISO 8601 format (e.g., 2025-01-01T00:00:00Z)", e)
305 })),
306 )
307 .into_response();
308 }
309 }
310 } else {
311 None
312 };
313
314 let query_filters = QueryFilters {
316 since: since_dt,
317 until: until_dt,
318 path_pattern: request.path_pattern.clone(),
319 min_status_code: None,
320 max_requests: Some(1000),
321 };
322
323 let exchanges_from_recorder =
328 match RecordingsToOpenApi::query_http_exchanges(&db, Some(query_filters)).await {
329 Ok(exchanges) => exchanges,
330 Err(e) => {
331 return (
332 StatusCode::INTERNAL_SERVER_ERROR,
333 Json(serde_json::json!({
334 "error": "Query error",
335 "message": format!("Failed to query HTTP exchanges: {}", e)
336 })),
337 )
338 .into_response();
339 }
340 };
341
342 if exchanges_from_recorder.is_empty() {
343 return (
344 StatusCode::NOT_FOUND,
345 Json(serde_json::json!({
346 "error": "No exchanges found",
347 "message": "No HTTP exchanges found matching the specified filters"
348 })),
349 )
350 .into_response();
351 }
352
353 use mockforge_core::intelligent_behavior::openapi_generator::HttpExchange as LocalHttpExchange;
355 let exchanges: Vec<LocalHttpExchange> = exchanges_from_recorder
356 .into_iter()
357 .map(|e| LocalHttpExchange {
358 method: e.method,
359 path: e.path,
360 query_params: e.query_params,
361 headers: e.headers,
362 body: e.body,
363 body_encoding: e.body_encoding,
364 status_code: e.status_code,
365 response_headers: e.response_headers,
366 response_body: e.response_body,
367 response_body_encoding: e.response_body_encoding,
368 timestamp: e.timestamp,
369 })
370 .collect();
371
372 let behavior_config = IntelligentBehaviorConfig::default();
374 let gen_config = OpenApiGenerationConfig {
375 min_confidence: request.min_confidence,
376 behavior_model: Some(behavior_config.behavior_model),
377 };
378
379 let generator = OpenApiSpecGenerator::new(gen_config);
381 let result = match generator.generate_from_exchanges(exchanges).await {
382 Ok(result) => result,
383 Err(e) => {
384 return (
385 StatusCode::INTERNAL_SERVER_ERROR,
386 Json(serde_json::json!({
387 "error": "Generation error",
388 "message": format!("Failed to generate OpenAPI spec: {}", e)
389 })),
390 )
391 .into_response();
392 }
393 };
394
395 let spec_json = if let Some(ref raw) = result.spec.raw_document {
397 raw.clone()
398 } else {
399 match serde_json::to_value(&result.spec.spec) {
400 Ok(json) => json,
401 Err(e) => {
402 return (
403 StatusCode::INTERNAL_SERVER_ERROR,
404 Json(serde_json::json!({
405 "error": "Serialization error",
406 "message": format!("Failed to serialize OpenAPI spec: {}", e)
407 })),
408 )
409 .into_response();
410 }
411 }
412 };
413
414 let response = serde_json::json!({
416 "spec": spec_json,
417 "metadata": {
418 "requests_analyzed": result.metadata.requests_analyzed,
419 "paths_inferred": result.metadata.paths_inferred,
420 "path_confidence": result.metadata.path_confidence,
421 "generated_at": result.metadata.generated_at.to_rfc3339(),
422 "duration_ms": result.metadata.duration_ms,
423 }
424 });
425
426 Json(response).into_response()
427}
428
429pub(crate) async fn list_rule_explanations(
431 State(state): State<ManagementState>,
432 Query(params): Query<std::collections::HashMap<String, String>>,
433) -> impl IntoResponse {
434 use mockforge_core::intelligent_behavior::RuleType;
435
436 let explanations = state.rule_explanations.read().await;
437 let mut explanations_vec: Vec<_> = explanations.values().cloned().collect();
438
439 if let Some(rule_type_str) = params.get("rule_type") {
441 if let Ok(rule_type) = serde_json::from_str::<RuleType>(&format!("\"{}\"", rule_type_str)) {
442 explanations_vec.retain(|e| e.rule_type == rule_type);
443 }
444 }
445
446 if let Some(min_confidence_str) = params.get("min_confidence") {
448 if let Ok(min_confidence) = min_confidence_str.parse::<f64>() {
449 explanations_vec.retain(|e| e.confidence >= min_confidence);
450 }
451 }
452
453 explanations_vec.sort_by(|a, b| {
455 b.confidence
456 .partial_cmp(&a.confidence)
457 .unwrap_or(std::cmp::Ordering::Equal)
458 .then_with(|| b.generated_at.cmp(&a.generated_at))
459 });
460
461 Json(serde_json::json!({
462 "explanations": explanations_vec,
463 "total": explanations_vec.len(),
464 }))
465 .into_response()
466}
467
468pub(crate) async fn get_rule_explanation(
470 State(state): State<ManagementState>,
471 Path(rule_id): Path<String>,
472) -> impl IntoResponse {
473 let explanations = state.rule_explanations.read().await;
474
475 match explanations.get(&rule_id) {
476 Some(explanation) => Json(serde_json::json!({
477 "explanation": explanation,
478 }))
479 .into_response(),
480 None => (
481 StatusCode::NOT_FOUND,
482 Json(serde_json::json!({
483 "error": "Rule explanation not found",
484 "message": format!("No explanation found for rule ID: {}", rule_id)
485 })),
486 )
487 .into_response(),
488 }
489}
490
491#[derive(Debug, Deserialize)]
493pub struct LearnFromExamplesRequest {
494 pub examples: Vec<ExamplePairRequest>,
496 #[serde(default)]
498 pub config: Option<serde_json::Value>,
499}
500
501#[derive(Debug, Deserialize)]
503pub struct ExamplePairRequest {
504 pub request: serde_json::Value,
506 pub response: serde_json::Value,
508}
509
510pub(crate) async fn learn_from_examples(
515 State(state): State<ManagementState>,
516 Json(request): Json<LearnFromExamplesRequest>,
517) -> impl IntoResponse {
518 use mockforge_core::intelligent_behavior::{
519 config::{BehaviorModelConfig, IntelligentBehaviorConfig},
520 rule_generator::{ExamplePair, RuleGenerator},
521 };
522
523 if request.examples.is_empty() {
524 return (
525 StatusCode::BAD_REQUEST,
526 Json(serde_json::json!({
527 "error": "No examples provided",
528 "message": "At least one example pair is required"
529 })),
530 )
531 .into_response();
532 }
533
534 let example_pairs: Result<Vec<ExamplePair>, String> = request
536 .examples
537 .into_iter()
538 .enumerate()
539 .map(|(idx, ex)| {
540 let method = ex
542 .request
543 .get("method")
544 .and_then(|v| v.as_str())
545 .map(|s| s.to_string())
546 .unwrap_or_else(|| "GET".to_string());
547 let path = ex
548 .request
549 .get("path")
550 .and_then(|v| v.as_str())
551 .map(|s| s.to_string())
552 .unwrap_or_else(|| "/".to_string());
553 let request_body = ex.request.get("body").cloned();
554 let query_params = ex
555 .request
556 .get("query_params")
557 .and_then(|v| v.as_object())
558 .map(|obj| {
559 obj.iter()
560 .filter_map(|(k, v)| v.as_str().map(|s| (k.clone(), s.to_string())))
561 .collect()
562 })
563 .unwrap_or_default();
564 let headers = ex
565 .request
566 .get("headers")
567 .and_then(|v| v.as_object())
568 .map(|obj| {
569 obj.iter()
570 .filter_map(|(k, v)| v.as_str().map(|s| (k.clone(), s.to_string())))
571 .collect()
572 })
573 .unwrap_or_default();
574
575 let status = ex
577 .response
578 .get("status_code")
579 .or_else(|| ex.response.get("status"))
580 .and_then(|v| v.as_u64())
581 .map(|n| n as u16)
582 .unwrap_or(200);
583 let response_body = ex.response.get("body").cloned();
584
585 Ok(ExamplePair {
586 method,
587 path,
588 request: request_body,
589 status,
590 response: response_body,
591 query_params,
592 headers,
593 metadata: {
594 let mut meta = std::collections::HashMap::new();
595 meta.insert("source".to_string(), "api".to_string());
596 meta.insert("example_index".to_string(), idx.to_string());
597 meta
598 },
599 })
600 })
601 .collect();
602
603 let example_pairs = match example_pairs {
604 Ok(pairs) => pairs,
605 Err(e) => {
606 return (
607 StatusCode::BAD_REQUEST,
608 Json(serde_json::json!({
609 "error": "Invalid examples",
610 "message": e
611 })),
612 )
613 .into_response();
614 }
615 };
616
617 let behavior_config = if let Some(config_json) = request.config {
619 serde_json::from_value(config_json)
621 .unwrap_or_else(|_| IntelligentBehaviorConfig::default())
622 .behavior_model
623 } else {
624 BehaviorModelConfig::default()
625 };
626
627 let generator = RuleGenerator::new(behavior_config);
629
630 let (rules, explanations) =
632 match generator.generate_rules_with_explanations(example_pairs).await {
633 Ok(result) => result,
634 Err(e) => {
635 return (
636 StatusCode::INTERNAL_SERVER_ERROR,
637 Json(serde_json::json!({
638 "error": "Rule generation failed",
639 "message": format!("Failed to generate rules: {}", e)
640 })),
641 )
642 .into_response();
643 }
644 };
645
646 {
648 let mut stored_explanations = state.rule_explanations.write().await;
649 for explanation in &explanations {
650 stored_explanations.insert(explanation.rule_id.clone(), explanation.clone());
651 }
652 }
653
654 let response = serde_json::json!({
656 "success": true,
657 "rules_generated": {
658 "consistency_rules": rules.consistency_rules.len(),
659 "schemas": rules.schemas.len(),
660 "state_machines": rules.state_transitions.len(),
661 "system_prompt": !rules.system_prompt.is_empty(),
662 },
663 "explanations": explanations.iter().map(|e| serde_json::json!({
664 "rule_id": e.rule_id,
665 "rule_type": e.rule_type,
666 "confidence": e.confidence,
667 "reasoning": e.reasoning,
668 })).collect::<Vec<_>>(),
669 "total_explanations": explanations.len(),
670 });
671
672 Json(response).into_response()
673}
674
675#[cfg(feature = "data-faker")]
676fn extract_yaml_spec(text: &str) -> String {
677 if let Some(start) = text.find("```yaml") {
679 let yaml_start = text[start + 7..].trim_start();
680 if let Some(end) = yaml_start.find("```") {
681 return yaml_start[..end].trim().to_string();
682 }
683 }
684 if let Some(start) = text.find("```") {
685 let content_start = text[start + 3..].trim_start();
686 if let Some(end) = content_start.find("```") {
687 return content_start[..end].trim().to_string();
688 }
689 }
690
691 if text.trim_start().starts_with("openapi:") || text.trim_start().starts_with("asyncapi:") {
693 return text.trim().to_string();
694 }
695
696 text.trim().to_string()
698}
699
700#[cfg(feature = "data-faker")]
702fn extract_graphql_schema(text: &str) -> String {
703 if let Some(start) = text.find("```graphql") {
705 let schema_start = text[start + 10..].trim_start();
706 if let Some(end) = schema_start.find("```") {
707 return schema_start[..end].trim().to_string();
708 }
709 }
710 if let Some(start) = text.find("```") {
711 let content_start = text[start + 3..].trim_start();
712 if let Some(end) = content_start.find("```") {
713 return content_start[..end].trim().to_string();
714 }
715 }
716
717 if text.trim_start().starts_with("type ") || text.trim_start().starts_with("schema ") {
719 return text.trim().to_string();
720 }
721
722 text.trim().to_string()
723}
724
725pub(crate) async fn get_chaos_config(State(_state): State<ManagementState>) -> impl IntoResponse {
729 #[cfg(feature = "chaos")]
730 {
731 if let Some(chaos_state) = &_state.chaos_api_state {
732 let config = chaos_state.config.read().await;
733 Json(serde_json::json!({
735 "enabled": config.enabled,
736 "latency": config.latency.as_ref().map(|l| serde_json::to_value(l).unwrap_or(serde_json::Value::Null)),
737 "fault_injection": config.fault_injection.as_ref().map(|f| serde_json::to_value(f).unwrap_or(serde_json::Value::Null)),
738 "rate_limit": config.rate_limit.as_ref().map(|r| serde_json::to_value(r).unwrap_or(serde_json::Value::Null)),
739 "traffic_shaping": config.traffic_shaping.as_ref().map(|t| serde_json::to_value(t).unwrap_or(serde_json::Value::Null)),
740 }))
741 .into_response()
742 } else {
743 Json(serde_json::json!({
745 "enabled": false,
746 "latency": null,
747 "fault_injection": null,
748 "rate_limit": null,
749 "traffic_shaping": null,
750 }))
751 .into_response()
752 }
753 }
754 #[cfg(not(feature = "chaos"))]
755 {
756 Json(serde_json::json!({
758 "enabled": false,
759 "latency": null,
760 "fault_injection": null,
761 "rate_limit": null,
762 "traffic_shaping": null,
763 }))
764 .into_response()
765 }
766}
767
768#[derive(Debug, Deserialize)]
770pub struct ChaosConfigUpdate {
771 pub enabled: Option<bool>,
773 pub latency: Option<serde_json::Value>,
775 pub fault_injection: Option<serde_json::Value>,
777 pub rate_limit: Option<serde_json::Value>,
779 pub traffic_shaping: Option<serde_json::Value>,
781}
782
783pub(crate) async fn update_chaos_config(
785 State(_state): State<ManagementState>,
786 Json(_config_update): Json<ChaosConfigUpdate>,
787) -> impl IntoResponse {
788 #[cfg(feature = "chaos")]
789 {
790 if let Some(chaos_state) = &_state.chaos_api_state {
791 use mockforge_chaos::config::{
792 FaultInjectionConfig, LatencyConfig, RateLimitConfig, TrafficShapingConfig,
793 };
794
795 let mut config = chaos_state.config.write().await;
796
797 if let Some(enabled) = _config_update.enabled {
799 config.enabled = enabled;
800 }
801
802 if let Some(latency_json) = _config_update.latency {
804 if let Ok(latency) = serde_json::from_value::<LatencyConfig>(latency_json) {
805 config.latency = Some(latency);
806 }
807 }
808
809 if let Some(fault_json) = _config_update.fault_injection {
811 if let Ok(fault) = serde_json::from_value::<FaultInjectionConfig>(fault_json) {
812 config.fault_injection = Some(fault);
813 }
814 }
815
816 if let Some(rate_json) = _config_update.rate_limit {
818 if let Ok(rate) = serde_json::from_value::<RateLimitConfig>(rate_json) {
819 config.rate_limit = Some(rate);
820 }
821 }
822
823 if let Some(traffic_json) = _config_update.traffic_shaping {
825 if let Ok(traffic) = serde_json::from_value::<TrafficShapingConfig>(traffic_json) {
826 config.traffic_shaping = Some(traffic);
827 }
828 }
829
830 drop(config);
833
834 info!("Chaos configuration updated successfully");
835 Json(serde_json::json!({
836 "success": true,
837 "message": "Chaos configuration updated and applied"
838 }))
839 .into_response()
840 } else {
841 (
842 StatusCode::SERVICE_UNAVAILABLE,
843 Json(serde_json::json!({
844 "success": false,
845 "error": "Chaos API not available",
846 "message": "Chaos engineering is not enabled or configured"
847 })),
848 )
849 .into_response()
850 }
851 }
852 #[cfg(not(feature = "chaos"))]
853 {
854 (
855 StatusCode::NOT_IMPLEMENTED,
856 Json(serde_json::json!({
857 "success": false,
858 "error": "Chaos feature not enabled",
859 "message": "Chaos engineering feature is not compiled into this build"
860 })),
861 )
862 .into_response()
863 }
864}
865
866pub(crate) async fn list_network_profiles() -> impl IntoResponse {
870 use mockforge_chaos::core_network_profiles::NetworkProfileCatalog;
871
872 let catalog = NetworkProfileCatalog::default();
873 let profiles: Vec<serde_json::Value> = catalog
874 .list_profiles_with_description()
875 .iter()
876 .map(|(name, description)| {
877 serde_json::json!({
878 "name": name,
879 "description": description,
880 })
881 })
882 .collect();
883
884 Json(serde_json::json!({
885 "profiles": profiles
886 }))
887 .into_response()
888}
889
890#[derive(Debug, Deserialize)]
891pub struct ApplyNetworkProfileRequest {
893 pub profile_name: String,
895}
896
897pub(crate) async fn apply_network_profile(
899 State(state): State<ManagementState>,
900 Json(request): Json<ApplyNetworkProfileRequest>,
901) -> impl IntoResponse {
902 use mockforge_chaos::core_network_profiles::NetworkProfileCatalog;
903
904 let catalog = NetworkProfileCatalog::default();
905 if let Some(profile) = catalog.get(&request.profile_name) {
906 if let Some(server_config) = &state.server_config {
909 let mut config = server_config.write().await;
910
911 use mockforge_core::config::NetworkShapingConfig;
913
914 let network_shaping = NetworkShapingConfig {
918 enabled: profile.traffic_shaping.bandwidth.enabled
919 || profile.traffic_shaping.burst_loss.enabled,
920 bandwidth_limit_bps: profile.traffic_shaping.bandwidth.max_bytes_per_sec * 8, packet_loss_percent: profile.traffic_shaping.burst_loss.loss_rate_during_burst,
922 max_connections: 1000, };
924
925 if let Some(ref mut chaos) = config.observability.chaos {
928 chaos.traffic_shaping = Some(network_shaping);
929 } else {
930 use mockforge_core::config::ChaosEngConfig;
932 config.observability.chaos = Some(ChaosEngConfig {
933 enabled: true,
934 latency: None,
935 fault_injection: None,
936 rate_limit: None,
937 traffic_shaping: Some(network_shaping),
938 scenario: None,
939 });
940 }
941
942 info!("Network profile '{}' applied to server configuration", request.profile_name);
943 } else {
944 warn!("Server configuration not available in ManagementState - profile applied but not persisted");
945 }
946
947 #[cfg(feature = "chaos")]
949 {
950 if let Some(chaos_state) = &state.chaos_api_state {
951 use mockforge_chaos::config::TrafficShapingConfig;
952
953 let mut chaos_config = chaos_state.config.write().await;
954 let chaos_traffic_shaping = TrafficShapingConfig {
956 enabled: profile.traffic_shaping.bandwidth.enabled
957 || profile.traffic_shaping.burst_loss.enabled,
958 bandwidth_limit_bps: profile.traffic_shaping.bandwidth.max_bytes_per_sec * 8, packet_loss_percent: profile.traffic_shaping.burst_loss.loss_rate_during_burst,
960 max_connections: 0,
961 connection_timeout_ms: 30000,
962 };
963 chaos_config.traffic_shaping = Some(chaos_traffic_shaping);
964 chaos_config.enabled = true; drop(chaos_config);
966 info!("Network profile '{}' applied to chaos API state", request.profile_name);
967 }
968 }
969
970 Json(serde_json::json!({
971 "success": true,
972 "message": format!("Network profile '{}' applied", request.profile_name),
973 "profile": {
974 "name": profile.name,
975 "description": profile.description,
976 }
977 }))
978 .into_response()
979 } else {
980 (
981 StatusCode::NOT_FOUND,
982 Json(serde_json::json!({
983 "error": "Profile not found",
984 "message": format!("Network profile '{}' not found", request.profile_name)
985 })),
986 )
987 .into_response()
988 }
989}