mockforge-http 0.3.116

HTTP/REST protocol support for MockForge
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
use axum::{
    extract::{Path, Query, State},
    http::StatusCode,
    response::{IntoResponse, Json},
};
use serde::Deserialize;
use tracing::*;

use super::ManagementState;

// ========== AI-Powered Features ==========

/// Request for AI-powered API specification generation
#[derive(Debug, Deserialize)]
pub struct GenerateSpecRequest {
    /// Natural language description of the API to generate
    pub query: String,
    /// Type of specification to generate: "openapi", "graphql", or "asyncapi"
    pub spec_type: String,
    /// Optional API version (e.g., "3.0.0" for OpenAPI)
    pub api_version: Option<String>,
}

/// Request for OpenAPI generation from recorded traffic
#[derive(Debug, Deserialize)]
pub struct GenerateOpenApiFromTrafficRequest {
    /// Path to recorder database (optional, defaults to ./recordings.db)
    #[serde(default)]
    pub database_path: Option<String>,
    /// Start time for filtering (ISO 8601 format, e.g., 2025-01-01T00:00:00Z)
    #[serde(default)]
    pub since: Option<String>,
    /// End time for filtering (ISO 8601 format)
    #[serde(default)]
    pub until: Option<String>,
    /// Path pattern filter (supports wildcards, e.g., /api/*)
    #[serde(default)]
    pub path_pattern: Option<String>,
    /// Minimum confidence score for including paths (0.0 to 1.0)
    #[serde(default = "default_min_confidence")]
    pub min_confidence: f64,
}

fn default_min_confidence() -> f64 {
    0.7
}

/// Generate API specification from natural language using AI
#[cfg(feature = "data-faker")]
pub(crate) async fn generate_ai_spec(
    State(_state): State<ManagementState>,
    Json(request): Json<GenerateSpecRequest>,
) -> impl IntoResponse {
    use mockforge_data::rag::{
        config::{LlmProvider, RagConfig},
        engine::RagEngine,
        storage::DocumentStorage,
    };
    use std::sync::Arc;

    // Build RAG config from environment variables
    let api_key = std::env::var("MOCKFORGE_RAG_API_KEY")
        .ok()
        .or_else(|| std::env::var("OPENAI_API_KEY").ok());

    // Check if RAG is configured - require API key
    if api_key.is_none() {
        return (
            StatusCode::SERVICE_UNAVAILABLE,
            Json(serde_json::json!({
                "error": "AI service not configured",
                "message": "Please provide an API key via MOCKFORGE_RAG_API_KEY or OPENAI_API_KEY"
            })),
        )
            .into_response();
    }

    // Build RAG configuration
    let provider_str = std::env::var("MOCKFORGE_RAG_PROVIDER")
        .unwrap_or_else(|_| "openai".to_string())
        .to_lowercase();

    let provider = match provider_str.as_str() {
        "openai" => LlmProvider::OpenAI,
        "anthropic" => LlmProvider::Anthropic,
        "ollama" => LlmProvider::Ollama,
        "openai-compatible" | "openai_compatible" => LlmProvider::OpenAICompatible,
        _ => LlmProvider::OpenAI,
    };

    let api_endpoint =
        std::env::var("MOCKFORGE_RAG_API_ENDPOINT").unwrap_or_else(|_| match provider {
            LlmProvider::OpenAI => "https://api.openai.com/v1".to_string(),
            LlmProvider::Anthropic => "https://api.anthropic.com/v1".to_string(),
            LlmProvider::Ollama => "http://localhost:11434/api".to_string(),
            LlmProvider::OpenAICompatible => "http://localhost:8000/v1".to_string(),
        });

    let model = std::env::var("MOCKFORGE_RAG_MODEL").unwrap_or_else(|_| match provider {
        LlmProvider::OpenAI => "gpt-3.5-turbo".to_string(),
        LlmProvider::Anthropic => "claude-3-sonnet-20240229".to_string(),
        LlmProvider::Ollama => "llama2".to_string(),
        LlmProvider::OpenAICompatible => "gpt-3.5-turbo".to_string(),
    });

    // Build RagConfig using struct literal with defaults
    let rag_config = RagConfig {
        provider,
        api_endpoint,
        api_key,
        model,
        max_tokens: std::env::var("MOCKFORGE_RAG_MAX_TOKENS")
            .unwrap_or_else(|_| "4096".to_string())
            .parse()
            .unwrap_or(4096),
        temperature: std::env::var("MOCKFORGE_RAG_TEMPERATURE")
            .unwrap_or_else(|_| "0.3".to_string())
            .parse()
            .unwrap_or(0.3), // Lower temperature for more structured output
        timeout_secs: std::env::var("MOCKFORGE_RAG_TIMEOUT")
            .unwrap_or_else(|_| "60".to_string())
            .parse()
            .unwrap_or(60),
        max_context_length: std::env::var("MOCKFORGE_RAG_CONTEXT_WINDOW")
            .unwrap_or_else(|_| "4000".to_string())
            .parse()
            .unwrap_or(4000),
        ..Default::default()
    };

    // Build the prompt for spec generation
    let spec_type_label = match request.spec_type.as_str() {
        "openapi" => "OpenAPI 3.0",
        "graphql" => "GraphQL",
        "asyncapi" => "AsyncAPI",
        _ => "OpenAPI 3.0",
    };

    let api_version = request.api_version.as_deref().unwrap_or("3.0.0");

    let prompt = format!(
        r#"You are an expert API architect. Generate a complete {} specification based on the following user requirements.

User Requirements:
{}

Instructions:
1. Generate a complete, valid {} specification
2. Include all paths, operations, request/response schemas, and components
3. Use realistic field names and data types
4. Include proper descriptions and examples
5. Follow {} best practices
6. Return ONLY the specification, no additional explanation
7. For OpenAPI, use version {}

Return the specification in {} format."#,
        spec_type_label,
        request.query,
        spec_type_label,
        spec_type_label,
        api_version,
        if request.spec_type == "graphql" {
            "GraphQL SDL"
        } else {
            "YAML"
        }
    );

    // Create in-memory storage for RAG engine
    // Note: StorageFactory::create_memory() returns Box<dyn DocumentStorage>
    // We need to use unsafe transmute or create a wrapper, but for now we'll use
    // a simpler approach: create InMemoryStorage directly
    use mockforge_data::rag::storage::InMemoryStorage;
    let storage: Arc<dyn DocumentStorage> = Arc::new(InMemoryStorage::new());

    // Create RAG engine
    let mut rag_engine = match RagEngine::new(rag_config.clone(), storage) {
        Ok(engine) => engine,
        Err(e) => {
            return (
                StatusCode::INTERNAL_SERVER_ERROR,
                Json(serde_json::json!({
                    "error": "Failed to initialize RAG engine",
                    "message": e.to_string()
                })),
            )
                .into_response();
        }
    };

    // Generate using RAG engine
    match rag_engine.generate(&prompt, None).await {
        Ok(generated_text) => {
            // Try to extract just the YAML/JSON/SDL content if LLM added explanation
            let spec = if request.spec_type == "graphql" {
                // For GraphQL, extract SDL
                extract_graphql_schema(&generated_text)
            } else {
                // For OpenAPI/AsyncAPI, extract YAML
                extract_yaml_spec(&generated_text)
            };

            Json(serde_json::json!({
                "success": true,
                "spec": spec,
                "spec_type": request.spec_type,
            }))
            .into_response()
        }
        Err(e) => (
            StatusCode::INTERNAL_SERVER_ERROR,
            Json(serde_json::json!({
                "error": "AI generation failed",
                "message": e.to_string()
            })),
        )
            .into_response(),
    }
}

#[cfg(not(feature = "data-faker"))]
pub(crate) async fn generate_ai_spec(
    State(_state): State<ManagementState>,
    Json(_request): Json<GenerateSpecRequest>,
) -> impl IntoResponse {
    (
        StatusCode::NOT_IMPLEMENTED,
        Json(serde_json::json!({
            "error": "AI features not enabled",
            "message": "Please enable the 'data-faker' feature to use AI-powered specification generation"
        })),
    )
        .into_response()
}

/// Generate OpenAPI specification from recorded traffic
#[cfg(feature = "behavioral-cloning")]
pub(crate) async fn generate_openapi_from_traffic(
    State(_state): State<ManagementState>,
    Json(request): Json<GenerateOpenApiFromTrafficRequest>,
) -> impl IntoResponse {
    use chrono::{DateTime, Utc};
    use mockforge_core::intelligent_behavior::{
        openapi_generator::{OpenApiGenerationConfig, OpenApiSpecGenerator},
        IntelligentBehaviorConfig,
    };
    use mockforge_recorder::{
        database::RecorderDatabase,
        openapi_export::{QueryFilters, RecordingsToOpenApi},
    };
    use std::path::PathBuf;

    // Determine database path
    let db_path = if let Some(ref path) = request.database_path {
        PathBuf::from(path)
    } else {
        std::env::current_dir()
            .unwrap_or_else(|_| PathBuf::from("."))
            .join("recordings.db")
    };

    // Open database
    let db = match RecorderDatabase::new(&db_path).await {
        Ok(db) => db,
        Err(e) => {
            return (
                StatusCode::BAD_REQUEST,
                Json(serde_json::json!({
                    "error": "Database error",
                    "message": format!("Failed to open recorder database: {}", e)
                })),
            )
                .into_response();
        }
    };

    // Parse time filters
    let since_dt = if let Some(ref since_str) = request.since {
        match DateTime::parse_from_rfc3339(since_str) {
            Ok(dt) => Some(dt.with_timezone(&Utc)),
            Err(e) => {
                return (
                    StatusCode::BAD_REQUEST,
                    Json(serde_json::json!({
                        "error": "Invalid date format",
                        "message": format!("Invalid --since format: {}. Use ISO 8601 format (e.g., 2025-01-01T00:00:00Z)", e)
                    })),
                )
                    .into_response();
            }
        }
    } else {
        None
    };

    let until_dt = if let Some(ref until_str) = request.until {
        match DateTime::parse_from_rfc3339(until_str) {
            Ok(dt) => Some(dt.with_timezone(&Utc)),
            Err(e) => {
                return (
                    StatusCode::BAD_REQUEST,
                    Json(serde_json::json!({
                        "error": "Invalid date format",
                        "message": format!("Invalid --until format: {}. Use ISO 8601 format (e.g., 2025-01-01T00:00:00Z)", e)
                    })),
                )
                    .into_response();
            }
        }
    } else {
        None
    };

    // Build query filters
    let query_filters = QueryFilters {
        since: since_dt,
        until: until_dt,
        path_pattern: request.path_pattern.clone(),
        min_status_code: None,
        max_requests: Some(1000),
    };

    // Query HTTP exchanges
    // Note: We need to convert from mockforge-recorder's HttpExchange to mockforge-core's HttpExchange
    // to avoid version mismatch issues. The converter returns the version from mockforge-recorder's
    // dependency, so we need to manually convert to the local version.
    let exchanges_from_recorder =
        match RecordingsToOpenApi::query_http_exchanges(&db, Some(query_filters)).await {
            Ok(exchanges) => exchanges,
            Err(e) => {
                return (
                    StatusCode::INTERNAL_SERVER_ERROR,
                    Json(serde_json::json!({
                        "error": "Query error",
                        "message": format!("Failed to query HTTP exchanges: {}", e)
                    })),
                )
                    .into_response();
            }
        };

    if exchanges_from_recorder.is_empty() {
        return (
            StatusCode::NOT_FOUND,
            Json(serde_json::json!({
                "error": "No exchanges found",
                "message": "No HTTP exchanges found matching the specified filters"
            })),
        )
            .into_response();
    }

    // Convert to local HttpExchange type to avoid version mismatch
    use mockforge_core::intelligent_behavior::openapi_generator::HttpExchange as LocalHttpExchange;
    let exchanges: Vec<LocalHttpExchange> = exchanges_from_recorder
        .into_iter()
        .map(|e| LocalHttpExchange {
            method: e.method,
            path: e.path,
            query_params: e.query_params,
            headers: e.headers,
            body: e.body,
            body_encoding: e.body_encoding,
            status_code: e.status_code,
            response_headers: e.response_headers,
            response_body: e.response_body,
            response_body_encoding: e.response_body_encoding,
            timestamp: e.timestamp,
        })
        .collect();

    // Create OpenAPI generator config
    let behavior_config = IntelligentBehaviorConfig::default();
    let gen_config = OpenApiGenerationConfig {
        min_confidence: request.min_confidence,
        behavior_model: Some(behavior_config.behavior_model),
    };

    // Generate OpenAPI spec
    let generator = OpenApiSpecGenerator::new(gen_config);
    let result = match generator.generate_from_exchanges(exchanges).await {
        Ok(result) => result,
        Err(e) => {
            return (
                StatusCode::INTERNAL_SERVER_ERROR,
                Json(serde_json::json!({
                    "error": "Generation error",
                    "message": format!("Failed to generate OpenAPI spec: {}", e)
                })),
            )
                .into_response();
        }
    };

    // Prepare response
    let spec_json = if let Some(ref raw) = result.spec.raw_document {
        raw.clone()
    } else {
        match serde_json::to_value(&result.spec.spec) {
            Ok(json) => json,
            Err(e) => {
                return (
                    StatusCode::INTERNAL_SERVER_ERROR,
                    Json(serde_json::json!({
                        "error": "Serialization error",
                        "message": format!("Failed to serialize OpenAPI spec: {}", e)
                    })),
                )
                    .into_response();
            }
        }
    };

    // Build response with metadata
    let response = serde_json::json!({
        "spec": spec_json,
        "metadata": {
            "requests_analyzed": result.metadata.requests_analyzed,
            "paths_inferred": result.metadata.paths_inferred,
            "path_confidence": result.metadata.path_confidence,
            "generated_at": result.metadata.generated_at.to_rfc3339(),
            "duration_ms": result.metadata.duration_ms,
        }
    });

    Json(response).into_response()
}

/// List all rule explanations
pub(crate) async fn list_rule_explanations(
    State(state): State<ManagementState>,
    Query(params): Query<std::collections::HashMap<String, String>>,
) -> impl IntoResponse {
    use mockforge_core::intelligent_behavior::RuleType;

    let explanations = state.rule_explanations.read().await;
    let mut explanations_vec: Vec<_> = explanations.values().cloned().collect();

    // Filter by rule type if provided
    if let Some(rule_type_str) = params.get("rule_type") {
        if let Ok(rule_type) = serde_json::from_str::<RuleType>(&format!("\"{}\"", rule_type_str)) {
            explanations_vec.retain(|e| e.rule_type == rule_type);
        }
    }

    // Filter by minimum confidence if provided
    if let Some(min_confidence_str) = params.get("min_confidence") {
        if let Ok(min_confidence) = min_confidence_str.parse::<f64>() {
            explanations_vec.retain(|e| e.confidence >= min_confidence);
        }
    }

    // Sort by confidence (descending) and then by generated_at (descending)
    explanations_vec.sort_by(|a, b| {
        b.confidence
            .partial_cmp(&a.confidence)
            .unwrap_or(std::cmp::Ordering::Equal)
            .then_with(|| b.generated_at.cmp(&a.generated_at))
    });

    Json(serde_json::json!({
        "explanations": explanations_vec,
        "total": explanations_vec.len(),
    }))
    .into_response()
}

/// Get a specific rule explanation by ID
pub(crate) async fn get_rule_explanation(
    State(state): State<ManagementState>,
    Path(rule_id): Path<String>,
) -> impl IntoResponse {
    let explanations = state.rule_explanations.read().await;

    match explanations.get(&rule_id) {
        Some(explanation) => Json(serde_json::json!({
            "explanation": explanation,
        }))
        .into_response(),
        None => (
            StatusCode::NOT_FOUND,
            Json(serde_json::json!({
                "error": "Rule explanation not found",
                "message": format!("No explanation found for rule ID: {}", rule_id)
            })),
        )
            .into_response(),
    }
}

/// Request for learning from examples
#[derive(Debug, Deserialize)]
pub struct LearnFromExamplesRequest {
    /// Example request/response pairs to learn from
    pub examples: Vec<ExamplePairRequest>,
    /// Optional configuration override
    #[serde(default)]
    pub config: Option<serde_json::Value>,
}

/// Example pair request format
#[derive(Debug, Deserialize)]
pub struct ExamplePairRequest {
    /// Request data (method, path, body, etc.)
    pub request: serde_json::Value,
    /// Response data (status_code, body, etc.)
    pub response: serde_json::Value,
}

/// Learn behavioral rules from example pairs
///
/// This endpoint accepts example request/response pairs, generates behavioral rules
/// with explanations, and stores the explanations for later retrieval.
pub(crate) async fn learn_from_examples(
    State(state): State<ManagementState>,
    Json(request): Json<LearnFromExamplesRequest>,
) -> impl IntoResponse {
    use mockforge_core::intelligent_behavior::{
        config::{BehaviorModelConfig, IntelligentBehaviorConfig},
        rule_generator::{ExamplePair, RuleGenerator},
    };

    if request.examples.is_empty() {
        return (
            StatusCode::BAD_REQUEST,
            Json(serde_json::json!({
                "error": "No examples provided",
                "message": "At least one example pair is required"
            })),
        )
            .into_response();
    }

    // Convert request examples to ExamplePair format
    let example_pairs: Result<Vec<ExamplePair>, String> = request
        .examples
        .into_iter()
        .enumerate()
        .map(|(idx, ex)| {
            // Parse request JSON to extract method, path, body, etc.
            let method = ex
                .request
                .get("method")
                .and_then(|v| v.as_str())
                .map(|s| s.to_string())
                .unwrap_or_else(|| "GET".to_string());
            let path = ex
                .request
                .get("path")
                .and_then(|v| v.as_str())
                .map(|s| s.to_string())
                .unwrap_or_else(|| "/".to_string());
            let request_body = ex.request.get("body").cloned();
            let query_params = ex
                .request
                .get("query_params")
                .and_then(|v| v.as_object())
                .map(|obj| {
                    obj.iter()
                        .filter_map(|(k, v)| v.as_str().map(|s| (k.clone(), s.to_string())))
                        .collect()
                })
                .unwrap_or_default();
            let headers = ex
                .request
                .get("headers")
                .and_then(|v| v.as_object())
                .map(|obj| {
                    obj.iter()
                        .filter_map(|(k, v)| v.as_str().map(|s| (k.clone(), s.to_string())))
                        .collect()
                })
                .unwrap_or_default();

            // Parse response JSON to extract status, body, etc.
            let status = ex
                .response
                .get("status_code")
                .or_else(|| ex.response.get("status"))
                .and_then(|v| v.as_u64())
                .map(|n| n as u16)
                .unwrap_or(200);
            let response_body = ex.response.get("body").cloned();

            Ok(ExamplePair {
                method,
                path,
                request: request_body,
                status,
                response: response_body,
                query_params,
                headers,
                metadata: {
                    let mut meta = std::collections::HashMap::new();
                    meta.insert("source".to_string(), "api".to_string());
                    meta.insert("example_index".to_string(), idx.to_string());
                    meta
                },
            })
        })
        .collect();

    let example_pairs = match example_pairs {
        Ok(pairs) => pairs,
        Err(e) => {
            return (
                StatusCode::BAD_REQUEST,
                Json(serde_json::json!({
                    "error": "Invalid examples",
                    "message": e
                })),
            )
                .into_response();
        }
    };

    // Create behavior config (use provided config or default)
    let behavior_config = if let Some(config_json) = request.config {
        // Try to deserialize custom config, fall back to default
        serde_json::from_value(config_json)
            .unwrap_or_else(|_| IntelligentBehaviorConfig::default())
            .behavior_model
    } else {
        BehaviorModelConfig::default()
    };

    // Create rule generator
    let generator = RuleGenerator::new(behavior_config);

    // Generate rules with explanations
    let (rules, explanations) =
        match generator.generate_rules_with_explanations(example_pairs).await {
            Ok(result) => result,
            Err(e) => {
                return (
                    StatusCode::INTERNAL_SERVER_ERROR,
                    Json(serde_json::json!({
                        "error": "Rule generation failed",
                        "message": format!("Failed to generate rules: {}", e)
                    })),
                )
                    .into_response();
            }
        };

    // Store explanations in ManagementState
    {
        let mut stored_explanations = state.rule_explanations.write().await;
        for explanation in &explanations {
            stored_explanations.insert(explanation.rule_id.clone(), explanation.clone());
        }
    }

    // Prepare response
    let response = serde_json::json!({
        "success": true,
        "rules_generated": {
            "consistency_rules": rules.consistency_rules.len(),
            "schemas": rules.schemas.len(),
            "state_machines": rules.state_transitions.len(),
            "system_prompt": !rules.system_prompt.is_empty(),
        },
        "explanations": explanations.iter().map(|e| serde_json::json!({
            "rule_id": e.rule_id,
            "rule_type": e.rule_type,
            "confidence": e.confidence,
            "reasoning": e.reasoning,
        })).collect::<Vec<_>>(),
        "total_explanations": explanations.len(),
    });

    Json(response).into_response()
}

#[cfg(feature = "data-faker")]
fn extract_yaml_spec(text: &str) -> String {
    // Try to find YAML code blocks
    if let Some(start) = text.find("```yaml") {
        let yaml_start = text[start + 7..].trim_start();
        if let Some(end) = yaml_start.find("```") {
            return yaml_start[..end].trim().to_string();
        }
    }
    if let Some(start) = text.find("```") {
        let content_start = text[start + 3..].trim_start();
        if let Some(end) = content_start.find("```") {
            return content_start[..end].trim().to_string();
        }
    }

    // Check if it starts with openapi: or asyncapi:
    if text.trim_start().starts_with("openapi:") || text.trim_start().starts_with("asyncapi:") {
        return text.trim().to_string();
    }

    // Return as-is if no code blocks found
    text.trim().to_string()
}

/// Extract GraphQL schema from text content
#[cfg(feature = "data-faker")]
fn extract_graphql_schema(text: &str) -> String {
    // Try to find GraphQL code blocks
    if let Some(start) = text.find("```graphql") {
        let schema_start = text[start + 10..].trim_start();
        if let Some(end) = schema_start.find("```") {
            return schema_start[..end].trim().to_string();
        }
    }
    if let Some(start) = text.find("```") {
        let content_start = text[start + 3..].trim_start();
        if let Some(end) = content_start.find("```") {
            return content_start[..end].trim().to_string();
        }
    }

    // Check if it looks like GraphQL SDL (starts with type, schema, etc.)
    if text.trim_start().starts_with("type ") || text.trim_start().starts_with("schema ") {
        return text.trim().to_string();
    }

    text.trim().to_string()
}

// ========== Chaos Engineering Management ==========

/// Get current chaos engineering configuration
pub(crate) async fn get_chaos_config(State(_state): State<ManagementState>) -> impl IntoResponse {
    #[cfg(feature = "chaos")]
    {
        if let Some(chaos_state) = &_state.chaos_api_state {
            let config = chaos_state.config.read().await;
            // Convert ChaosConfig to JSON response format
            Json(serde_json::json!({
                "enabled": config.enabled,
                "latency": config.latency.as_ref().map(|l| serde_json::to_value(l).unwrap_or(serde_json::Value::Null)),
                "fault_injection": config.fault_injection.as_ref().map(|f| serde_json::to_value(f).unwrap_or(serde_json::Value::Null)),
                "rate_limit": config.rate_limit.as_ref().map(|r| serde_json::to_value(r).unwrap_or(serde_json::Value::Null)),
                "traffic_shaping": config.traffic_shaping.as_ref().map(|t| serde_json::to_value(t).unwrap_or(serde_json::Value::Null)),
            }))
            .into_response()
        } else {
            // Chaos API not available, return default
            Json(serde_json::json!({
                "enabled": false,
                "latency": null,
                "fault_injection": null,
                "rate_limit": null,
                "traffic_shaping": null,
            }))
            .into_response()
        }
    }
    #[cfg(not(feature = "chaos"))]
    {
        // Chaos feature not enabled
        Json(serde_json::json!({
            "enabled": false,
            "latency": null,
            "fault_injection": null,
            "rate_limit": null,
            "traffic_shaping": null,
        }))
        .into_response()
    }
}

/// Request to update chaos configuration
#[derive(Debug, Deserialize)]
pub struct ChaosConfigUpdate {
    /// Whether to enable chaos engineering
    pub enabled: Option<bool>,
    /// Latency configuration
    pub latency: Option<serde_json::Value>,
    /// Fault injection configuration
    pub fault_injection: Option<serde_json::Value>,
    /// Rate limiting configuration
    pub rate_limit: Option<serde_json::Value>,
    /// Traffic shaping configuration
    pub traffic_shaping: Option<serde_json::Value>,
}

/// Update chaos engineering configuration
pub(crate) async fn update_chaos_config(
    State(_state): State<ManagementState>,
    Json(_config_update): Json<ChaosConfigUpdate>,
) -> impl IntoResponse {
    #[cfg(feature = "chaos")]
    {
        if let Some(chaos_state) = &_state.chaos_api_state {
            use mockforge_chaos::config::{
                FaultInjectionConfig, LatencyConfig, RateLimitConfig, TrafficShapingConfig,
            };

            let mut config = chaos_state.config.write().await;

            // Update enabled flag if provided
            if let Some(enabled) = _config_update.enabled {
                config.enabled = enabled;
            }

            // Update latency config if provided
            if let Some(latency_json) = _config_update.latency {
                if let Ok(latency) = serde_json::from_value::<LatencyConfig>(latency_json) {
                    config.latency = Some(latency);
                }
            }

            // Update fault injection config if provided
            if let Some(fault_json) = _config_update.fault_injection {
                if let Ok(fault) = serde_json::from_value::<FaultInjectionConfig>(fault_json) {
                    config.fault_injection = Some(fault);
                }
            }

            // Update rate limit config if provided
            if let Some(rate_json) = _config_update.rate_limit {
                if let Ok(rate) = serde_json::from_value::<RateLimitConfig>(rate_json) {
                    config.rate_limit = Some(rate);
                }
            }

            // Update traffic shaping config if provided
            if let Some(traffic_json) = _config_update.traffic_shaping {
                if let Ok(traffic) = serde_json::from_value::<TrafficShapingConfig>(traffic_json) {
                    config.traffic_shaping = Some(traffic);
                }
            }

            // Reinitialize middleware injectors with new config
            // The middleware will pick up the changes on the next request
            drop(config);

            info!("Chaos configuration updated successfully");
            Json(serde_json::json!({
                "success": true,
                "message": "Chaos configuration updated and applied"
            }))
            .into_response()
        } else {
            (
                StatusCode::SERVICE_UNAVAILABLE,
                Json(serde_json::json!({
                    "success": false,
                    "error": "Chaos API not available",
                    "message": "Chaos engineering is not enabled or configured"
                })),
            )
                .into_response()
        }
    }
    #[cfg(not(feature = "chaos"))]
    {
        (
            StatusCode::NOT_IMPLEMENTED,
            Json(serde_json::json!({
                "success": false,
                "error": "Chaos feature not enabled",
                "message": "Chaos engineering feature is not compiled into this build"
            })),
        )
            .into_response()
    }
}

// ========== Network Profile Management ==========

/// List available network profiles
pub(crate) async fn list_network_profiles() -> impl IntoResponse {
    use mockforge_chaos::core_network_profiles::NetworkProfileCatalog;

    let catalog = NetworkProfileCatalog::default();
    let profiles: Vec<serde_json::Value> = catalog
        .list_profiles_with_description()
        .iter()
        .map(|(name, description)| {
            serde_json::json!({
                "name": name,
                "description": description,
            })
        })
        .collect();

    Json(serde_json::json!({
        "profiles": profiles
    }))
    .into_response()
}

#[derive(Debug, Deserialize)]
/// Request to apply a network profile
pub struct ApplyNetworkProfileRequest {
    /// Name of the network profile to apply
    pub profile_name: String,
}

/// Apply a network profile
pub(crate) async fn apply_network_profile(
    State(state): State<ManagementState>,
    Json(request): Json<ApplyNetworkProfileRequest>,
) -> impl IntoResponse {
    use mockforge_chaos::core_network_profiles::NetworkProfileCatalog;

    let catalog = NetworkProfileCatalog::default();
    if let Some(profile) = catalog.get(&request.profile_name) {
        // Apply profile to server configuration if available
        // NetworkProfile contains latency and traffic_shaping configs
        if let Some(server_config) = &state.server_config {
            let mut config = server_config.write().await;

            // Apply network profile's traffic shaping to core config
            use mockforge_core::config::NetworkShapingConfig;

            // Convert NetworkProfile's TrafficShapingConfig to NetworkShapingConfig
            // NetworkProfile uses mockforge_core::traffic_shaping::TrafficShapingConfig
            // which has bandwidth and burst_loss fields
            let network_shaping = NetworkShapingConfig {
                enabled: profile.traffic_shaping.bandwidth.enabled
                    || profile.traffic_shaping.burst_loss.enabled,
                bandwidth_limit_bps: profile.traffic_shaping.bandwidth.max_bytes_per_sec * 8, // Convert bytes to bits
                packet_loss_percent: profile.traffic_shaping.burst_loss.loss_rate_during_burst,
                max_connections: 1000, // Default value
            };

            // Update chaos config if it exists, or create it
            // Chaos config is in observability.chaos, not core.chaos
            if let Some(ref mut chaos) = config.observability.chaos {
                chaos.traffic_shaping = Some(network_shaping);
            } else {
                // Create minimal chaos config with traffic shaping
                use mockforge_core::config::ChaosEngConfig;
                config.observability.chaos = Some(ChaosEngConfig {
                    enabled: true,
                    latency: None,
                    fault_injection: None,
                    rate_limit: None,
                    traffic_shaping: Some(network_shaping),
                    scenario: None,
                });
            }

            info!("Network profile '{}' applied to server configuration", request.profile_name);
        } else {
            warn!("Server configuration not available in ManagementState - profile applied but not persisted");
        }

        // Also update chaos API state if available
        #[cfg(feature = "chaos")]
        {
            if let Some(chaos_state) = &state.chaos_api_state {
                use mockforge_chaos::config::TrafficShapingConfig;

                let mut chaos_config = chaos_state.config.write().await;
                // Apply profile's traffic shaping to chaos API state
                let chaos_traffic_shaping = TrafficShapingConfig {
                    enabled: profile.traffic_shaping.bandwidth.enabled
                        || profile.traffic_shaping.burst_loss.enabled,
                    bandwidth_limit_bps: profile.traffic_shaping.bandwidth.max_bytes_per_sec * 8, // Convert bytes to bits
                    packet_loss_percent: profile.traffic_shaping.burst_loss.loss_rate_during_burst,
                    max_connections: 0,
                    connection_timeout_ms: 30000,
                };
                chaos_config.traffic_shaping = Some(chaos_traffic_shaping);
                chaos_config.enabled = true; // Enable chaos when applying a profile
                drop(chaos_config);
                info!("Network profile '{}' applied to chaos API state", request.profile_name);
            }
        }

        Json(serde_json::json!({
            "success": true,
            "message": format!("Network profile '{}' applied", request.profile_name),
            "profile": {
                "name": profile.name,
                "description": profile.description,
            }
        }))
        .into_response()
    } else {
        (
            StatusCode::NOT_FOUND,
            Json(serde_json::json!({
                "error": "Profile not found",
                "message": format!("Network profile '{}' not found", request.profile_name)
            })),
        )
            .into_response()
    }
}