1#![allow(deprecated)]
5
6use axum::{
7 extract::{Path, Query, State},
8 http::StatusCode,
9 response::{IntoResponse, Json},
10};
11use serde::Deserialize;
12use tracing::*;
13
14use super::ManagementState;
15
16#[derive(Debug, Deserialize)]
20pub struct GenerateSpecRequest {
21 pub query: String,
23 pub spec_type: String,
25 pub api_version: Option<String>,
27}
28
29#[derive(Debug, Deserialize)]
31pub struct GenerateOpenApiFromTrafficRequest {
32 #[serde(default)]
34 pub database_path: Option<String>,
35 #[serde(default)]
37 pub since: Option<String>,
38 #[serde(default)]
40 pub until: Option<String>,
41 #[serde(default)]
43 pub path_pattern: Option<String>,
44 #[serde(default = "default_min_confidence")]
46 pub min_confidence: f64,
47}
48
49fn default_min_confidence() -> f64 {
50 0.7
51}
52
53#[cfg(feature = "data-faker")]
55pub(crate) async fn generate_ai_spec(
56 State(_state): State<ManagementState>,
57 Json(request): Json<GenerateSpecRequest>,
58) -> impl IntoResponse {
59 use mockforge_data::rag::{
60 config::{LlmProvider, RagConfig},
61 engine::RagEngine,
62 storage::DocumentStorage,
63 };
64 use std::sync::Arc;
65
66 let api_key = std::env::var("MOCKFORGE_RAG_API_KEY")
68 .ok()
69 .or_else(|| std::env::var("OPENAI_API_KEY").ok());
70
71 if api_key.is_none() {
73 return (
74 StatusCode::SERVICE_UNAVAILABLE,
75 Json(serde_json::json!({
76 "error": "AI service not configured",
77 "message": "Please provide an API key via MOCKFORGE_RAG_API_KEY or OPENAI_API_KEY"
78 })),
79 )
80 .into_response();
81 }
82
83 let provider_str = std::env::var("MOCKFORGE_RAG_PROVIDER")
85 .unwrap_or_else(|_| "openai".to_string())
86 .to_lowercase();
87
88 let provider = match provider_str.as_str() {
89 "openai" => LlmProvider::OpenAI,
90 "anthropic" => LlmProvider::Anthropic,
91 "ollama" => LlmProvider::Ollama,
92 "openai-compatible" | "openai_compatible" => LlmProvider::OpenAICompatible,
93 _ => LlmProvider::OpenAI,
94 };
95
96 let api_endpoint =
97 std::env::var("MOCKFORGE_RAG_API_ENDPOINT").unwrap_or_else(|_| match provider {
98 LlmProvider::OpenAI => "https://api.openai.com/v1".to_string(),
99 LlmProvider::Anthropic => "https://api.anthropic.com/v1".to_string(),
100 LlmProvider::Ollama => "http://localhost:11434/api".to_string(),
101 LlmProvider::OpenAICompatible => "http://localhost:8000/v1".to_string(),
102 });
103
104 let model = std::env::var("MOCKFORGE_RAG_MODEL").unwrap_or_else(|_| match provider {
105 LlmProvider::OpenAI => "gpt-3.5-turbo".to_string(),
106 LlmProvider::Anthropic => "claude-3-sonnet-20240229".to_string(),
107 LlmProvider::Ollama => "llama2".to_string(),
108 LlmProvider::OpenAICompatible => "gpt-3.5-turbo".to_string(),
109 });
110
111 let rag_config = RagConfig {
113 provider,
114 api_endpoint,
115 api_key,
116 model,
117 max_tokens: std::env::var("MOCKFORGE_RAG_MAX_TOKENS")
118 .unwrap_or_else(|_| "4096".to_string())
119 .parse()
120 .unwrap_or(4096),
121 temperature: std::env::var("MOCKFORGE_RAG_TEMPERATURE")
122 .unwrap_or_else(|_| "0.3".to_string())
123 .parse()
124 .unwrap_or(0.3), timeout_secs: std::env::var("MOCKFORGE_RAG_TIMEOUT")
126 .unwrap_or_else(|_| "60".to_string())
127 .parse()
128 .unwrap_or(60),
129 max_context_length: std::env::var("MOCKFORGE_RAG_CONTEXT_WINDOW")
130 .unwrap_or_else(|_| "4000".to_string())
131 .parse()
132 .unwrap_or(4000),
133 ..Default::default()
134 };
135
136 let spec_type_label = match request.spec_type.as_str() {
138 "openapi" => "OpenAPI 3.0",
139 "graphql" => "GraphQL",
140 "asyncapi" => "AsyncAPI",
141 _ => "OpenAPI 3.0",
142 };
143
144 let api_version = request.api_version.as_deref().unwrap_or("3.0.0");
145
146 let prompt = format!(
147 r#"You are an expert API architect. Generate a complete {} specification based on the following user requirements.
148
149User Requirements:
150{}
151
152Instructions:
1531. Generate a complete, valid {} specification
1542. Include all paths, operations, request/response schemas, and components
1553. Use realistic field names and data types
1564. Include proper descriptions and examples
1575. Follow {} best practices
1586. Return ONLY the specification, no additional explanation
1597. For OpenAPI, use version {}
160
161Return the specification in {} format."#,
162 spec_type_label,
163 request.query,
164 spec_type_label,
165 spec_type_label,
166 api_version,
167 if request.spec_type == "graphql" {
168 "GraphQL SDL"
169 } else {
170 "YAML"
171 }
172 );
173
174 use mockforge_data::rag::storage::InMemoryStorage;
179 let storage: Arc<dyn DocumentStorage> = Arc::new(InMemoryStorage::new());
180
181 let mut rag_engine = match RagEngine::new(rag_config.clone(), storage) {
183 Ok(engine) => engine,
184 Err(e) => {
185 return (
186 StatusCode::INTERNAL_SERVER_ERROR,
187 Json(serde_json::json!({
188 "error": "Failed to initialize RAG engine",
189 "message": e.to_string()
190 })),
191 )
192 .into_response();
193 }
194 };
195
196 match rag_engine.generate(&prompt, None).await {
198 Ok(generated_text) => {
199 let spec = if request.spec_type == "graphql" {
201 extract_graphql_schema(&generated_text)
203 } else {
204 extract_yaml_spec(&generated_text)
206 };
207
208 Json(serde_json::json!({
209 "success": true,
210 "spec": spec,
211 "spec_type": request.spec_type,
212 }))
213 .into_response()
214 }
215 Err(e) => (
216 StatusCode::INTERNAL_SERVER_ERROR,
217 Json(serde_json::json!({
218 "error": "AI generation failed",
219 "message": e.to_string()
220 })),
221 )
222 .into_response(),
223 }
224}
225
226#[cfg(not(feature = "data-faker"))]
227pub(crate) async fn generate_ai_spec(
228 State(_state): State<ManagementState>,
229 Json(_request): Json<GenerateSpecRequest>,
230) -> impl IntoResponse {
231 (
232 StatusCode::NOT_IMPLEMENTED,
233 Json(serde_json::json!({
234 "error": "AI features not enabled",
235 "message": "Please enable the 'data-faker' feature to use AI-powered specification generation"
236 })),
237 )
238 .into_response()
239}
240
241#[cfg(feature = "behavioral-cloning")]
243pub(crate) async fn generate_openapi_from_traffic(
244 State(_state): State<ManagementState>,
245 Json(request): Json<GenerateOpenApiFromTrafficRequest>,
246) -> impl IntoResponse {
247 use chrono::{DateTime, Utc};
248 use mockforge_core::intelligent_behavior::{
249 openapi_generator::{OpenApiGenerationConfig, OpenApiSpecGenerator},
250 IntelligentBehaviorConfig,
251 };
252 use mockforge_recorder::{
253 database::RecorderDatabase,
254 openapi_export::{QueryFilters, RecordingsToOpenApi},
255 };
256 use std::path::PathBuf;
257
258 let db_path = if let Some(ref path) = request.database_path {
260 PathBuf::from(path)
261 } else {
262 std::env::current_dir()
263 .unwrap_or_else(|_| PathBuf::from("."))
264 .join("recordings.db")
265 };
266
267 let db = match RecorderDatabase::new(&db_path).await {
269 Ok(db) => db,
270 Err(e) => {
271 return (
272 StatusCode::BAD_REQUEST,
273 Json(serde_json::json!({
274 "error": "Database error",
275 "message": format!("Failed to open recorder database: {}", e)
276 })),
277 )
278 .into_response();
279 }
280 };
281
282 let since_dt = if let Some(ref since_str) = request.since {
284 match DateTime::parse_from_rfc3339(since_str) {
285 Ok(dt) => Some(dt.with_timezone(&Utc)),
286 Err(e) => {
287 return (
288 StatusCode::BAD_REQUEST,
289 Json(serde_json::json!({
290 "error": "Invalid date format",
291 "message": format!("Invalid --since format: {}. Use ISO 8601 format (e.g., 2025-01-01T00:00:00Z)", e)
292 })),
293 )
294 .into_response();
295 }
296 }
297 } else {
298 None
299 };
300
301 let until_dt = if let Some(ref until_str) = request.until {
302 match DateTime::parse_from_rfc3339(until_str) {
303 Ok(dt) => Some(dt.with_timezone(&Utc)),
304 Err(e) => {
305 return (
306 StatusCode::BAD_REQUEST,
307 Json(serde_json::json!({
308 "error": "Invalid date format",
309 "message": format!("Invalid --until format: {}. Use ISO 8601 format (e.g., 2025-01-01T00:00:00Z)", e)
310 })),
311 )
312 .into_response();
313 }
314 }
315 } else {
316 None
317 };
318
319 let query_filters = QueryFilters {
321 since: since_dt,
322 until: until_dt,
323 path_pattern: request.path_pattern.clone(),
324 min_status_code: None,
325 max_requests: Some(1000),
326 };
327
328 let exchanges_from_recorder =
333 match RecordingsToOpenApi::query_http_exchanges(&db, Some(query_filters)).await {
334 Ok(exchanges) => exchanges,
335 Err(e) => {
336 return (
337 StatusCode::INTERNAL_SERVER_ERROR,
338 Json(serde_json::json!({
339 "error": "Query error",
340 "message": format!("Failed to query HTTP exchanges: {}", e)
341 })),
342 )
343 .into_response();
344 }
345 };
346
347 if exchanges_from_recorder.is_empty() {
348 return (
349 StatusCode::NOT_FOUND,
350 Json(serde_json::json!({
351 "error": "No exchanges found",
352 "message": "No HTTP exchanges found matching the specified filters"
353 })),
354 )
355 .into_response();
356 }
357
358 use mockforge_core::intelligent_behavior::openapi_generator::HttpExchange as LocalHttpExchange;
360 let exchanges: Vec<LocalHttpExchange> = exchanges_from_recorder
361 .into_iter()
362 .map(|e| LocalHttpExchange {
363 method: e.method,
364 path: e.path,
365 query_params: e.query_params,
366 headers: e.headers,
367 body: e.body,
368 body_encoding: e.body_encoding,
369 status_code: e.status_code,
370 response_headers: e.response_headers,
371 response_body: e.response_body,
372 response_body_encoding: e.response_body_encoding,
373 timestamp: e.timestamp,
374 })
375 .collect();
376
377 let behavior_config = IntelligentBehaviorConfig::default();
379 let gen_config = OpenApiGenerationConfig {
380 min_confidence: request.min_confidence,
381 behavior_model: Some(behavior_config.behavior_model),
382 };
383
384 let generator = OpenApiSpecGenerator::new(gen_config);
386 let result = match generator.generate_from_exchanges(exchanges).await {
387 Ok(result) => result,
388 Err(e) => {
389 return (
390 StatusCode::INTERNAL_SERVER_ERROR,
391 Json(serde_json::json!({
392 "error": "Generation error",
393 "message": format!("Failed to generate OpenAPI spec: {}", e)
394 })),
395 )
396 .into_response();
397 }
398 };
399
400 let spec_json = if let Some(ref raw) = result.spec.raw_document {
402 raw.clone()
403 } else {
404 match serde_json::to_value(&result.spec.spec) {
405 Ok(json) => json,
406 Err(e) => {
407 return (
408 StatusCode::INTERNAL_SERVER_ERROR,
409 Json(serde_json::json!({
410 "error": "Serialization error",
411 "message": format!("Failed to serialize OpenAPI spec: {}", e)
412 })),
413 )
414 .into_response();
415 }
416 }
417 };
418
419 let response = serde_json::json!({
421 "spec": spec_json,
422 "metadata": {
423 "requests_analyzed": result.metadata.requests_analyzed,
424 "paths_inferred": result.metadata.paths_inferred,
425 "path_confidence": result.metadata.path_confidence,
426 "generated_at": result.metadata.generated_at.to_rfc3339(),
427 "duration_ms": result.metadata.duration_ms,
428 }
429 });
430
431 Json(response).into_response()
432}
433
434pub(crate) async fn list_rule_explanations(
436 State(state): State<ManagementState>,
437 Query(params): Query<std::collections::HashMap<String, String>>,
438) -> impl IntoResponse {
439 use mockforge_foundation::intelligent_behavior::rule_types::RuleType;
440
441 let explanations = state.rule_explanations.read().await;
442 let mut explanations_vec: Vec<_> = explanations.values().cloned().collect();
443
444 if let Some(rule_type_str) = params.get("rule_type") {
446 if let Ok(rule_type) = serde_json::from_str::<RuleType>(&format!("\"{}\"", rule_type_str)) {
447 explanations_vec.retain(|e| e.rule_type == rule_type);
448 }
449 }
450
451 if let Some(min_confidence_str) = params.get("min_confidence") {
453 if let Ok(min_confidence) = min_confidence_str.parse::<f64>() {
454 explanations_vec.retain(|e| e.confidence >= min_confidence);
455 }
456 }
457
458 explanations_vec.sort_by(|a, b| {
460 b.confidence
461 .partial_cmp(&a.confidence)
462 .unwrap_or(std::cmp::Ordering::Equal)
463 .then_with(|| b.generated_at.cmp(&a.generated_at))
464 });
465
466 Json(serde_json::json!({
467 "explanations": explanations_vec,
468 "total": explanations_vec.len(),
469 }))
470 .into_response()
471}
472
473pub(crate) async fn get_rule_explanation(
475 State(state): State<ManagementState>,
476 Path(rule_id): Path<String>,
477) -> impl IntoResponse {
478 let explanations = state.rule_explanations.read().await;
479
480 match explanations.get(&rule_id) {
481 Some(explanation) => Json(serde_json::json!({
482 "explanation": explanation,
483 }))
484 .into_response(),
485 None => (
486 StatusCode::NOT_FOUND,
487 Json(serde_json::json!({
488 "error": "Rule explanation not found",
489 "message": format!("No explanation found for rule ID: {}", rule_id)
490 })),
491 )
492 .into_response(),
493 }
494}
495
496#[derive(Debug, Deserialize)]
498pub struct LearnFromExamplesRequest {
499 pub examples: Vec<ExamplePairRequest>,
501 #[serde(default)]
503 pub config: Option<serde_json::Value>,
504}
505
506#[derive(Debug, Deserialize)]
508pub struct ExamplePairRequest {
509 pub request: serde_json::Value,
511 pub response: serde_json::Value,
513}
514
515pub(crate) async fn learn_from_examples(
520 State(state): State<ManagementState>,
521 Json(request): Json<LearnFromExamplesRequest>,
522) -> impl IntoResponse {
523 use mockforge_core::intelligent_behavior::{
524 config::{BehaviorModelConfig, IntelligentBehaviorConfig},
525 rule_generator::{ExamplePair, RuleGenerator},
526 };
527
528 if request.examples.is_empty() {
529 return (
530 StatusCode::BAD_REQUEST,
531 Json(serde_json::json!({
532 "error": "No examples provided",
533 "message": "At least one example pair is required"
534 })),
535 )
536 .into_response();
537 }
538
539 let example_pairs: Result<Vec<ExamplePair>, String> = request
541 .examples
542 .into_iter()
543 .enumerate()
544 .map(|(idx, ex)| {
545 let method = ex
547 .request
548 .get("method")
549 .and_then(|v| v.as_str())
550 .map(|s| s.to_string())
551 .unwrap_or_else(|| "GET".to_string());
552 let path = ex
553 .request
554 .get("path")
555 .and_then(|v| v.as_str())
556 .map(|s| s.to_string())
557 .unwrap_or_else(|| "/".to_string());
558 let request_body = ex.request.get("body").cloned();
559 let query_params = ex
560 .request
561 .get("query_params")
562 .and_then(|v| v.as_object())
563 .map(|obj| {
564 obj.iter()
565 .filter_map(|(k, v)| v.as_str().map(|s| (k.clone(), s.to_string())))
566 .collect()
567 })
568 .unwrap_or_default();
569 let headers = ex
570 .request
571 .get("headers")
572 .and_then(|v| v.as_object())
573 .map(|obj| {
574 obj.iter()
575 .filter_map(|(k, v)| v.as_str().map(|s| (k.clone(), s.to_string())))
576 .collect()
577 })
578 .unwrap_or_default();
579
580 let status = ex
582 .response
583 .get("status_code")
584 .or_else(|| ex.response.get("status"))
585 .and_then(|v| v.as_u64())
586 .map(|n| n as u16)
587 .unwrap_or(200);
588 let response_body = ex.response.get("body").cloned();
589
590 Ok(ExamplePair {
591 method,
592 path,
593 request: request_body,
594 status,
595 response: response_body,
596 query_params,
597 headers,
598 metadata: {
599 let mut meta = std::collections::HashMap::new();
600 meta.insert("source".to_string(), "api".to_string());
601 meta.insert("example_index".to_string(), idx.to_string());
602 meta
603 },
604 })
605 })
606 .collect();
607
608 let example_pairs = match example_pairs {
609 Ok(pairs) => pairs,
610 Err(e) => {
611 return (
612 StatusCode::BAD_REQUEST,
613 Json(serde_json::json!({
614 "error": "Invalid examples",
615 "message": e
616 })),
617 )
618 .into_response();
619 }
620 };
621
622 let behavior_config = if let Some(config_json) = request.config {
624 serde_json::from_value(config_json)
626 .unwrap_or_else(|_| IntelligentBehaviorConfig::default())
627 .behavior_model
628 } else {
629 BehaviorModelConfig::default()
630 };
631
632 let generator = RuleGenerator::new(behavior_config);
634
635 let (rules, explanations) =
637 match generator.generate_rules_with_explanations(example_pairs).await {
638 Ok(result) => result,
639 Err(e) => {
640 return (
641 StatusCode::INTERNAL_SERVER_ERROR,
642 Json(serde_json::json!({
643 "error": "Rule generation failed",
644 "message": format!("Failed to generate rules: {}", e)
645 })),
646 )
647 .into_response();
648 }
649 };
650
651 {
653 let mut stored_explanations = state.rule_explanations.write().await;
654 for explanation in &explanations {
655 stored_explanations.insert(explanation.rule_id.clone(), explanation.clone());
656 }
657 }
658
659 let response = serde_json::json!({
661 "success": true,
662 "rules_generated": {
663 "consistency_rules": rules.consistency_rules.len(),
664 "schemas": rules.schemas.len(),
665 "state_machines": rules.state_transitions.len(),
666 "system_prompt": !rules.system_prompt.is_empty(),
667 },
668 "explanations": explanations.iter().map(|e| serde_json::json!({
669 "rule_id": e.rule_id,
670 "rule_type": e.rule_type,
671 "confidence": e.confidence,
672 "reasoning": e.reasoning,
673 })).collect::<Vec<_>>(),
674 "total_explanations": explanations.len(),
675 });
676
677 Json(response).into_response()
678}
679
680#[cfg(feature = "data-faker")]
681fn extract_yaml_spec(text: &str) -> String {
682 if let Some(start) = text.find("```yaml") {
684 let yaml_start = text[start + 7..].trim_start();
685 if let Some(end) = yaml_start.find("```") {
686 return yaml_start[..end].trim().to_string();
687 }
688 }
689 if let Some(start) = text.find("```") {
690 let content_start = text[start + 3..].trim_start();
691 if let Some(end) = content_start.find("```") {
692 return content_start[..end].trim().to_string();
693 }
694 }
695
696 if text.trim_start().starts_with("openapi:") || text.trim_start().starts_with("asyncapi:") {
698 return text.trim().to_string();
699 }
700
701 text.trim().to_string()
703}
704
705#[cfg(feature = "data-faker")]
707fn extract_graphql_schema(text: &str) -> String {
708 if let Some(start) = text.find("```graphql") {
710 let schema_start = text[start + 10..].trim_start();
711 if let Some(end) = schema_start.find("```") {
712 return schema_start[..end].trim().to_string();
713 }
714 }
715 if let Some(start) = text.find("```") {
716 let content_start = text[start + 3..].trim_start();
717 if let Some(end) = content_start.find("```") {
718 return content_start[..end].trim().to_string();
719 }
720 }
721
722 if text.trim_start().starts_with("type ") || text.trim_start().starts_with("schema ") {
724 return text.trim().to_string();
725 }
726
727 text.trim().to_string()
728}
729
730pub(crate) async fn get_chaos_config(State(_state): State<ManagementState>) -> impl IntoResponse {
734 #[cfg(feature = "chaos")]
735 {
736 if let Some(chaos_state) = &_state.chaos_api_state {
737 let config = chaos_state.config.read().await;
738 Json(serde_json::json!({
740 "enabled": config.enabled,
741 "latency": config.latency.as_ref().map(|l| serde_json::to_value(l).unwrap_or(serde_json::Value::Null)),
742 "fault_injection": config.fault_injection.as_ref().map(|f| serde_json::to_value(f).unwrap_or(serde_json::Value::Null)),
743 "rate_limit": config.rate_limit.as_ref().map(|r| serde_json::to_value(r).unwrap_or(serde_json::Value::Null)),
744 "traffic_shaping": config.traffic_shaping.as_ref().map(|t| serde_json::to_value(t).unwrap_or(serde_json::Value::Null)),
745 }))
746 .into_response()
747 } else {
748 Json(serde_json::json!({
750 "enabled": false,
751 "latency": null,
752 "fault_injection": null,
753 "rate_limit": null,
754 "traffic_shaping": null,
755 }))
756 .into_response()
757 }
758 }
759 #[cfg(not(feature = "chaos"))]
760 {
761 Json(serde_json::json!({
763 "enabled": false,
764 "latency": null,
765 "fault_injection": null,
766 "rate_limit": null,
767 "traffic_shaping": null,
768 }))
769 .into_response()
770 }
771}
772
773#[derive(Debug, Deserialize)]
775pub struct ChaosConfigUpdate {
776 pub enabled: Option<bool>,
778 pub latency: Option<serde_json::Value>,
780 pub fault_injection: Option<serde_json::Value>,
782 pub rate_limit: Option<serde_json::Value>,
784 pub traffic_shaping: Option<serde_json::Value>,
786}
787
788pub(crate) async fn update_chaos_config(
790 State(_state): State<ManagementState>,
791 Json(_config_update): Json<ChaosConfigUpdate>,
792) -> impl IntoResponse {
793 #[cfg(feature = "chaos")]
794 {
795 if let Some(chaos_state) = &_state.chaos_api_state {
796 use mockforge_chaos::config::{
797 FaultInjectionConfig, LatencyConfig, RateLimitConfig, TrafficShapingConfig,
798 };
799
800 let mut config = chaos_state.config.write().await;
801
802 if let Some(enabled) = _config_update.enabled {
804 config.enabled = enabled;
805 }
806
807 if let Some(latency_json) = _config_update.latency {
809 if let Ok(latency) = serde_json::from_value::<LatencyConfig>(latency_json) {
810 config.latency = Some(latency);
811 }
812 }
813
814 if let Some(fault_json) = _config_update.fault_injection {
816 if let Ok(fault) = serde_json::from_value::<FaultInjectionConfig>(fault_json) {
817 config.fault_injection = Some(fault);
818 }
819 }
820
821 if let Some(rate_json) = _config_update.rate_limit {
823 if let Ok(rate) = serde_json::from_value::<RateLimitConfig>(rate_json) {
824 config.rate_limit = Some(rate);
825 }
826 }
827
828 if let Some(traffic_json) = _config_update.traffic_shaping {
830 if let Ok(traffic) = serde_json::from_value::<TrafficShapingConfig>(traffic_json) {
831 config.traffic_shaping = Some(traffic);
832 }
833 }
834
835 drop(config);
838
839 info!("Chaos configuration updated successfully");
840 Json(serde_json::json!({
841 "success": true,
842 "message": "Chaos configuration updated and applied"
843 }))
844 .into_response()
845 } else {
846 (
847 StatusCode::SERVICE_UNAVAILABLE,
848 Json(serde_json::json!({
849 "success": false,
850 "error": "Chaos API not available",
851 "message": "Chaos engineering is not enabled or configured"
852 })),
853 )
854 .into_response()
855 }
856 }
857 #[cfg(not(feature = "chaos"))]
858 {
859 (
860 StatusCode::NOT_IMPLEMENTED,
861 Json(serde_json::json!({
862 "success": false,
863 "error": "Chaos feature not enabled",
864 "message": "Chaos engineering feature is not compiled into this build"
865 })),
866 )
867 .into_response()
868 }
869}
870
871pub(crate) async fn list_network_profiles() -> impl IntoResponse {
875 use mockforge_chaos::core_network_profiles::NetworkProfileCatalog;
876
877 let catalog = NetworkProfileCatalog::default();
878 let profiles: Vec<serde_json::Value> = catalog
879 .list_profiles_with_description()
880 .iter()
881 .map(|(name, description)| {
882 serde_json::json!({
883 "name": name,
884 "description": description,
885 })
886 })
887 .collect();
888
889 Json(serde_json::json!({
890 "profiles": profiles
891 }))
892 .into_response()
893}
894
895#[derive(Debug, Deserialize)]
896pub struct ApplyNetworkProfileRequest {
898 pub profile_name: String,
900}
901
902pub(crate) async fn apply_network_profile(
904 State(state): State<ManagementState>,
905 Json(request): Json<ApplyNetworkProfileRequest>,
906) -> impl IntoResponse {
907 use mockforge_chaos::core_network_profiles::NetworkProfileCatalog;
908
909 let catalog = NetworkProfileCatalog::default();
910 if let Some(profile) = catalog.get(&request.profile_name) {
911 if let Some(server_config) = &state.server_config {
914 let mut config = server_config.write().await;
915
916 use mockforge_core::config::NetworkShapingConfig;
918
919 let network_shaping = NetworkShapingConfig {
923 enabled: profile.traffic_shaping.bandwidth.enabled
924 || profile.traffic_shaping.burst_loss.enabled,
925 bandwidth_limit_bps: profile.traffic_shaping.bandwidth.max_bytes_per_sec * 8, packet_loss_percent: profile.traffic_shaping.burst_loss.loss_rate_during_burst,
927 max_connections: 1000, };
929
930 if let Some(ref mut chaos) = config.observability.chaos {
933 chaos.traffic_shaping = Some(network_shaping);
934 } else {
935 use mockforge_core::config::ChaosEngConfig;
937 config.observability.chaos = Some(ChaosEngConfig {
938 enabled: true,
939 latency: None,
940 fault_injection: None,
941 rate_limit: None,
942 traffic_shaping: Some(network_shaping),
943 scenario: None,
944 });
945 }
946
947 info!("Network profile '{}' applied to server configuration", request.profile_name);
948 } else {
949 warn!("Server configuration not available in ManagementState - profile applied but not persisted");
950 }
951
952 #[cfg(feature = "chaos")]
954 {
955 if let Some(chaos_state) = &state.chaos_api_state {
956 use mockforge_chaos::config::TrafficShapingConfig;
957
958 let mut chaos_config = chaos_state.config.write().await;
959 let chaos_traffic_shaping = TrafficShapingConfig {
961 enabled: profile.traffic_shaping.bandwidth.enabled
962 || profile.traffic_shaping.burst_loss.enabled,
963 bandwidth_limit_bps: profile.traffic_shaping.bandwidth.max_bytes_per_sec * 8, packet_loss_percent: profile.traffic_shaping.burst_loss.loss_rate_during_burst,
965 max_connections: 0,
966 connection_timeout_ms: 30000,
967 };
968 chaos_config.traffic_shaping = Some(chaos_traffic_shaping);
969 chaos_config.enabled = true; drop(chaos_config);
971 info!("Network profile '{}' applied to chaos API state", request.profile_name);
972 }
973 }
974
975 Json(serde_json::json!({
976 "success": true,
977 "message": format!("Network profile '{}' applied", request.profile_name),
978 "profile": {
979 "name": profile.name,
980 "description": profile.description,
981 }
982 }))
983 .into_response()
984 } else {
985 (
986 StatusCode::NOT_FOUND,
987 Json(serde_json::json!({
988 "error": "Profile not found",
989 "message": format!("Network profile '{}' not found", request.profile_name)
990 })),
991 )
992 .into_response()
993 }
994}