mockforge-registry-server 0.3.131

Plugin registry server for MockForge
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
//! Cloud AI Studio handlers.
//!
//! Endpoints under `/api/v1/ai-studio/*`. Every handler runs the same
//! pipeline:
//!
//!   resolve_org_context ─▶ load_byok_config ─▶ pick_provider
//!     ─▶ check_ai_quota ─▶ build LlmCall ─▶ call_llm
//!     ─▶ record_ai_usage ─▶ return content + UsageMeta
//!
//! That pipeline lives in `run_completion`. Public handlers just
//! build a `PromptInputs` and shape the response.
//!
//! See `docs/cloud/CLOUD_AI_STUDIO_DESIGN.md` for the full design.

use axum::{extract::State, http::HeaderMap, Json};
use mockforge_registry_core::models::{BYOKConfig, Plan};
use serde::{Deserialize, Serialize};

use crate::{
    ai::{
        call_llm, check_ai_quota, pick_provider, record_ai_usage, LlmCall, Provider,
        ProviderSelection,
    },
    error::{ApiError, ApiResult},
    handlers::settings::decrypt_api_key,
    middleware::{resolve_org_context, AuthUser},
    AppState,
};

const DEFAULT_TEMPERATURE: f64 = 0.7;
const DEFAULT_MAX_TOKENS: u32 = 1024;
const DEFAULT_SYSTEM_PROMPT: &str = "You are a helpful AI assistant integrated into MockForge.";

/// Common usage metadata embedded in every AI Studio response so the UI
/// can render the BYOK/platform badge and the quota meter without extra
/// round-trips.
#[derive(Debug, Serialize)]
pub struct UsageMeta {
    /// Which key paid for this call.
    pub provider: &'static str,
    /// Tokens used by this single call (prompt + completion).
    pub tokens_used: u64,
    /// Updated monthly counter, for the UI's quota meter.
    pub tokens_used_this_period: i64,
    /// Monthly platform-token limit. `-1` means unlimited.
    pub tokens_limit: i64,
}

/// Internal: what each handler hands to `run_completion`. `pub(crate)`
/// so the sibling mockai handler module can reuse the same pipeline.
pub(crate) struct PromptInputs {
    pub(crate) system: String,
    pub(crate) user: String,
    /// Optional model override; falls back to BYOK / platform default.
    pub(crate) model: Option<String>,
    pub(crate) temperature: f64,
    pub(crate) max_tokens: u32,
}

// --- /chat ------------------------------------------------------------------

#[derive(Debug, Deserialize)]
pub struct ChatRequest {
    /// User prompt; required.
    pub prompt: String,
    #[serde(default)]
    pub system: Option<String>,
    #[serde(default)]
    pub model: Option<String>,
    #[serde(default)]
    pub temperature: Option<f64>,
    #[serde(default)]
    pub max_tokens: Option<u32>,
}

#[derive(Debug, Serialize)]
pub struct ChatResponse {
    pub content: String,
    #[serde(flatten)]
    pub meta: UsageMeta,
}

/// `POST /api/v1/ai-studio/chat`
pub async fn chat(
    State(state): State<AppState>,
    AuthUser(user_id): AuthUser,
    headers: HeaderMap,
    Json(request): Json<ChatRequest>,
) -> ApiResult<Json<ChatResponse>> {
    if request.prompt.trim().is_empty() {
        return Err(ApiError::InvalidRequest("prompt must not be empty".into()));
    }

    let inputs = PromptInputs {
        system: request.system.unwrap_or_else(|| DEFAULT_SYSTEM_PROMPT.into()),
        user: request.prompt,
        model: request.model,
        temperature: request.temperature.unwrap_or(DEFAULT_TEMPERATURE),
        max_tokens: request.max_tokens.unwrap_or(DEFAULT_MAX_TOKENS),
    };

    let (content, meta) = run_completion(&state, user_id, &headers, inputs).await?;
    Ok(Json(ChatResponse { content, meta }))
}

// --- /generate-openapi ------------------------------------------------------

#[derive(Debug, Deserialize)]
pub struct GenerateOpenApiRequest {
    /// Natural-language description of the API to mock.
    pub description: String,
    /// Optional title for the generated spec.
    #[serde(default)]
    pub title: Option<String>,
    #[serde(default)]
    pub model: Option<String>,
}

#[derive(Debug, Serialize)]
pub struct GenerateOpenApiResponse {
    /// Raw text returned by the LLM (useful for debugging).
    pub content: String,
    /// Best-effort parsed OpenAPI 3 document. `None` if the model
    /// response wasn't valid JSON; the UI can fall back to `content`.
    pub spec: Option<serde_json::Value>,
    #[serde(flatten)]
    pub meta: UsageMeta,
}

/// `POST /api/v1/ai-studio/generate-openapi`
pub async fn generate_openapi(
    State(state): State<AppState>,
    AuthUser(user_id): AuthUser,
    headers: HeaderMap,
    Json(request): Json<GenerateOpenApiRequest>,
) -> ApiResult<Json<GenerateOpenApiResponse>> {
    if request.description.trim().is_empty() {
        return Err(ApiError::InvalidRequest("description must not be empty".into()));
    }

    let title = request.title.as_deref().unwrap_or("Generated API");
    let inputs = PromptInputs {
        system: format!(
            "You are an expert API designer. Generate a complete, valid OpenAPI 3.0 \
             specification in JSON for the API described by the user. Include realistic \
             paths, request/response schemas, examples, and at least one error response \
             per endpoint. Output ONLY the JSON document, no prose, no markdown fences. \
             Use `{title}` as the spec's `info.title` unless a different title is in the \
             user's description."
        ),
        user: request.description,
        model: request.model,
        // Lower temperature for structured output: we want valid JSON, not creativity.
        temperature: 0.2,
        // OpenAPI specs can be large; raise the cap.
        max_tokens: 4096,
    };

    let (content, meta) = run_completion(&state, user_id, &headers, inputs).await?;
    let spec = extract_json_payload(&content);
    Ok(Json(GenerateOpenApiResponse {
        content,
        spec,
        meta,
    }))
}

// --- /explain-rule ----------------------------------------------------------

#[derive(Debug, Deserialize)]
pub struct ExplainRuleRequest {
    /// Identifier (e.g. rule name or path) — used in the prompt for context.
    pub rule_id: String,
    /// Rule definition (JSON). Anything serializable; the prompt embeds it as-is.
    pub definition: serde_json::Value,
    /// Optional extra context (e.g. surrounding workspace name).
    #[serde(default)]
    pub context: Option<String>,
    #[serde(default)]
    pub model: Option<String>,
}

#[derive(Debug, Serialize)]
pub struct ExplainRuleResponse {
    /// Plain-language explanation of what the rule does and when it fires.
    pub explanation: String,
    #[serde(flatten)]
    pub meta: UsageMeta,
}

/// `POST /api/v1/ai-studio/explain-rule`
pub async fn explain_rule(
    State(state): State<AppState>,
    AuthUser(user_id): AuthUser,
    headers: HeaderMap,
    Json(request): Json<ExplainRuleRequest>,
) -> ApiResult<Json<ExplainRuleResponse>> {
    if request.rule_id.trim().is_empty() {
        return Err(ApiError::InvalidRequest("rule_id must not be empty".into()));
    }
    let definition_str = serde_json::to_string_pretty(&request.definition).map_err(|e| {
        ApiError::InvalidRequest(format!("definition must be serializable JSON: {e}"))
    })?;

    let context_blurb = request
        .context
        .as_ref()
        .map(|c| format!("\n\nContext: {c}"))
        .unwrap_or_default();

    let inputs = PromptInputs {
        system: "You are a senior engineer explaining MockForge mock rules to a junior \
                 teammate. Be specific: when does this rule fire, what does it return, \
                 and what edge cases does it cover? Keep it under 200 words and avoid \
                 marketing language."
            .into(),
        user: format!(
            "Rule id: {id}\n\nDefinition:\n```json\n{def}\n```{ctx}",
            id = request.rule_id,
            def = definition_str,
            ctx = context_blurb,
        ),
        model: request.model,
        temperature: 0.4,
        max_tokens: 800,
    };

    let (explanation, meta) = run_completion(&state, user_id, &headers, inputs).await?;
    Ok(Json(ExplainRuleResponse { explanation, meta }))
}

// --- /quota -----------------------------------------------------------------

/// What the UI needs to render the AI provider/quota banner before the
/// user has issued a prompt. Mirrors the shape `UsageMeta` returns in
/// post-call responses, plus a boolean for "can I call right now."
#[derive(Debug, Serialize)]
pub struct QuotaResponse {
    /// 'byok' | 'platform' | 'disabled'
    pub provider: &'static str,
    /// Tokens used this billing period.
    pub tokens_used_this_period: i64,
    /// Monthly platform-token limit. `-1` means unlimited.
    pub tokens_limit: i64,
    /// Convenience flag: true if a chat call right now would clear the
    /// quota check. False means quota exhausted (Platform) or AI fully
    /// disabled (Free without BYOK).
    pub call_allowed: bool,
}

/// `GET /api/v1/ai-studio/quota`
///
/// Read-only quota snapshot. Doesn't increment any meter, doesn't call
/// the LLM. UIs poll this to render "X of Y tokens remaining" without
/// having to issue a doomed prompt to discover the cap.
pub async fn quota(
    State(state): State<AppState>,
    AuthUser(user_id): AuthUser,
    headers: HeaderMap,
) -> ApiResult<Json<QuotaResponse>> {
    let org_ctx = resolve_org_context(&state, user_id, &headers, None)
        .await
        .map_err(|_| ApiError::InvalidRequest("Organization not found".into()))?;
    let byok = load_byok_config(&state, org_ctx.org_id).await?;
    let is_paid_plan = matches!(org_ctx.org.plan(), Plan::Pro | Plan::Team);
    let provider = pick_provider(is_paid_plan, byok);
    let selection = provider.selection();
    let q = check_ai_quota(&state, &org_ctx.org, selection).await?;

    Ok(Json(QuotaResponse {
        provider: match selection {
            ProviderSelection::Byok => "byok",
            ProviderSelection::Platform => "platform",
            ProviderSelection::Disabled => "disabled",
        },
        tokens_used_this_period: q.used,
        tokens_limit: q.limit,
        call_allowed: q.allowed,
    }))
}

// --- /voice/* ---------------------------------------------------------------
//
// Cloud equivalents of the local-only `/api/v2/voice/*` endpoints. Each
// is a thin wrapper over `run_completion` with a fixed system prompt;
// they exist so the local Voice page (and its child components) can
// dispatch through `aiStudioApi` when `isCloudMode()` is true.

#[derive(Debug, Deserialize)]
pub struct VoiceProcessRequest {
    /// User-spoken / typed natural-language instruction.
    pub command: String,
    #[serde(default)]
    pub model: Option<String>,
}

#[derive(Debug, Serialize)]
pub struct VoiceProcessResponse {
    /// LLM-derived JSON summarizing the parsed intent. Best-effort;
    /// `None` if the model didn't return parseable JSON.
    pub intent: Option<serde_json::Value>,
    /// Raw model output, always returned for fallback rendering.
    pub content: String,
    #[serde(flatten)]
    pub meta: UsageMeta,
}

/// `POST /api/v1/ai-studio/voice/process`
pub async fn voice_process(
    State(state): State<AppState>,
    AuthUser(user_id): AuthUser,
    headers: HeaderMap,
    Json(request): Json<VoiceProcessRequest>,
) -> ApiResult<Json<VoiceProcessResponse>> {
    if request.command.trim().is_empty() {
        return Err(ApiError::InvalidRequest("command must not be empty".into()));
    }
    let inputs = PromptInputs {
        system: "You are a voice-command parser for MockForge. The user describes a mock \
                 they want to build. Output a single JSON object with fields: intent (one of \
                 \"create_endpoint\" | \"modify_response\" | \"add_scenario\" | \"unknown\"), \
                 confidence (0..1), and parsed (any object summarizing the parsed details). \
                 Return ONLY the JSON, no prose."
            .into(),
        user: request.command,
        model: request.model,
        temperature: 0.2,
        max_tokens: 800,
    };
    let (content, meta) = run_completion(&state, user_id, &headers, inputs).await?;
    let intent = extract_json_payload(&content);
    Ok(Json(VoiceProcessResponse {
        intent,
        content,
        meta,
    }))
}

#[derive(Debug, Deserialize)]
pub struct VoiceTranspileHookRequest {
    /// Natural-language description of the hook behavior.
    pub description: String,
    #[serde(default)]
    pub model: Option<String>,
}

#[derive(Debug, Serialize)]
pub struct VoiceTranspileHookResponse {
    /// Generated JavaScript hook source.
    pub hook_source: String,
    /// Raw model output (same as hook_source unless the model added prose).
    pub content: String,
    #[serde(flatten)]
    pub meta: UsageMeta,
}

/// `POST /api/v1/ai-studio/voice/transpile-hook`
pub async fn voice_transpile_hook(
    State(state): State<AppState>,
    AuthUser(user_id): AuthUser,
    headers: HeaderMap,
    Json(request): Json<VoiceTranspileHookRequest>,
) -> ApiResult<Json<VoiceTranspileHookResponse>> {
    if request.description.trim().is_empty() {
        return Err(ApiError::InvalidRequest("description must not be empty".into()));
    }
    let inputs = PromptInputs {
        system: "You write MockForge JavaScript hooks. Given the user's description, output \
                 a single JS function body (no markdown fences, no surrounding prose) that \
                 implements the described behavior. The function receives `(req, res, ctx)` \
                 and may modify `res.status`, `res.headers`, `res.body`."
            .into(),
        user: request.description,
        model: request.model,
        temperature: 0.3,
        max_tokens: 1024,
    };
    let (content, meta) = run_completion(&state, user_id, &headers, inputs).await?;
    let hook_source = strip_code_fences(&content);
    Ok(Json(VoiceTranspileHookResponse {
        hook_source,
        content,
        meta,
    }))
}

#[derive(Debug, Deserialize)]
pub struct VoiceCreateScenarioRequest {
    /// Natural-language description of the desired workspace scenario.
    pub description: String,
    /// Workspace context (name / id) that the model can reference.
    #[serde(default)]
    pub workspace_context: Option<String>,
    #[serde(default)]
    pub model: Option<String>,
}

#[derive(Debug, Serialize)]
pub struct VoiceCreateScenarioResponse {
    /// Best-effort parsed scenario JSON. `None` if model output wasn't JSON.
    pub scenario: Option<serde_json::Value>,
    /// Raw model output, always returned for fallback rendering.
    pub content: String,
    #[serde(flatten)]
    pub meta: UsageMeta,
}

/// `POST /api/v1/ai-studio/voice/create-workspace-scenario`
pub async fn voice_create_workspace_scenario(
    State(state): State<AppState>,
    AuthUser(user_id): AuthUser,
    headers: HeaderMap,
    Json(request): Json<VoiceCreateScenarioRequest>,
) -> ApiResult<Json<VoiceCreateScenarioResponse>> {
    if request.description.trim().is_empty() {
        return Err(ApiError::InvalidRequest("description must not be empty".into()));
    }
    let context_blurb = request
        .workspace_context
        .as_ref()
        .map(|c| format!("\n\nWorkspace context: {c}"))
        .unwrap_or_default();
    let inputs = PromptInputs {
        system: "You build MockForge workspace scenarios. Output a single JSON object with \
                 fields: name, description, steps (array of {action, target, value} objects). \
                 Return ONLY the JSON, no prose, no markdown fences."
            .into(),
        user: format!("{}{}", request.description, context_blurb),
        model: request.model,
        temperature: 0.3,
        max_tokens: 1500,
    };
    let (content, meta) = run_completion(&state, user_id, &headers, inputs).await?;
    let scenario = extract_json_payload(&content);
    Ok(Json(VoiceCreateScenarioResponse {
        scenario,
        content,
        meta,
    }))
}

// --- shared pipeline --------------------------------------------------------

/// Runs the full provider-routing + quota + LLM-call + metering pipeline
/// for one prompt. Returns the model's raw text plus the usage metadata
/// every AI Studio response embeds. `pub(crate)` so the mockai handler
/// module can reuse the same pipeline.
pub(crate) async fn run_completion(
    state: &AppState,
    user_id: uuid::Uuid,
    headers: &HeaderMap,
    prompt: PromptInputs,
) -> ApiResult<(String, UsageMeta)> {
    let org_ctx = resolve_org_context(state, user_id, headers, None)
        .await
        .map_err(|_| ApiError::InvalidRequest("Organization not found".into()))?;
    run_completion_for_org(state, &org_ctx.org, prompt).await
}

/// Same pipeline as [`run_completion`] but skips the user→org resolution
/// and operates on a known [`Organization`]. Internal callers (e.g. the
/// runner-facing `/api/v1/internal/contract-diff/score` endpoint, where
/// the auth model is a shared internal token rather than a user
/// session) use this entry point.
pub(crate) async fn run_completion_for_org(
    state: &AppState,
    org: &mockforge_registry_core::models::Organization,
    prompt: PromptInputs,
) -> ApiResult<(String, UsageMeta)> {
    // 1. BYOK lookup.
    let byok = load_byok_config(state, org.id).await?;

    // 2. Provider routing (pure).
    let is_paid_plan = matches!(org.plan(), Plan::Pro | Plan::Team);
    let provider = pick_provider(is_paid_plan, byok);

    // 3. Pre-call quota check.
    let quota = check_ai_quota(state, org, provider.selection()).await?;
    if !quota.allowed {
        return Err(quota.into_error());
    }

    // 4. Build LLM call.
    let selection = provider.selection();
    let llm_call = build_llm_call(&provider, prompt)?;

    // 5. Call.
    let result = call_llm(llm_call).await?;

    // 6. Meter (Platform only; BYOK skips the platform quota).
    let total_tokens = result.total_tokens();
    record_ai_usage(state, org.id, selection, total_tokens as i64).await?;

    // 7. Build response metadata.
    let billed_now = if matches!(selection, ProviderSelection::Platform) {
        total_tokens as i64
    } else {
        0
    };

    let meta = UsageMeta {
        provider: match selection {
            ProviderSelection::Byok => "byok",
            ProviderSelection::Platform => "platform",
            ProviderSelection::Disabled => "disabled", // unreachable: quota check above
        },
        tokens_used: total_tokens,
        tokens_used_this_period: quota.used + billed_now,
        tokens_limit: quota.limit,
    };

    Ok((result.content, meta))
}

/// Read the org's BYOK config, returning `None` if missing or disabled.
async fn load_byok_config(state: &AppState, org_id: uuid::Uuid) -> ApiResult<Option<BYOKConfig>> {
    let setting = state.store.get_org_setting(org_id, "byok").await?;
    let Some(setting) = setting else {
        return Ok(None);
    };

    let cfg: BYOKConfig = match serde_json::from_value(setting.setting_value) {
        Ok(c) => c,
        Err(_) => return Ok(None), // tolerate legacy/malformed rows by treating as no-BYOK
    };

    if !cfg.enabled || cfg.api_key.is_empty() {
        return Ok(None);
    }
    Ok(Some(cfg))
}

/// Translate the provider decision + prompt into an `LlmCall`.
fn build_llm_call(provider: &Provider, prompt: PromptInputs) -> ApiResult<LlmCall> {
    match provider {
        Provider::Disabled => Err(ApiError::ResourceLimitExceeded(
            "AI is not available — add a BYOK key or upgrade your plan".into(),
        )),
        Provider::Byok(cfg) => {
            let api_key = decrypt_api_key(&cfg.api_key)?;
            Ok(LlmCall {
                provider: cfg.provider.clone(),
                model: prompt
                    .model
                    .or_else(|| cfg.model.clone())
                    .unwrap_or_else(|| "gpt-4o-mini".into()),
                api_key,
                base_url: cfg.base_url.clone(),
                system: prompt.system,
                user: prompt.user,
                temperature: prompt.temperature,
                max_tokens: prompt.max_tokens,
            })
        }
        Provider::Platform => {
            let api_key = std::env::var("MOCKFORGE_PLATFORM_LLM_API_KEY").map_err(|_| {
                ApiError::Internal(anyhow::anyhow!(
                    "Platform LLM not configured: MOCKFORGE_PLATFORM_LLM_API_KEY missing"
                ))
            })?;
            let provider_name = std::env::var("MOCKFORGE_PLATFORM_LLM_PROVIDER")
                .unwrap_or_else(|_| "openai".into());
            let default_model = std::env::var("MOCKFORGE_PLATFORM_LLM_MODEL")
                .unwrap_or_else(|_| "gpt-4o-mini".into());
            let endpoint = std::env::var("MOCKFORGE_PLATFORM_LLM_ENDPOINT").ok();

            Ok(LlmCall {
                provider: provider_name,
                model: prompt.model.unwrap_or(default_model),
                api_key,
                base_url: endpoint,
                system: prompt.system,
                user: prompt.user,
                temperature: prompt.temperature,
                max_tokens: prompt.max_tokens,
            })
        }
    }
}

/// Best-effort JSON extraction. Returns `None` if `text` doesn't look like
/// JSON. Tolerates a single ```json fence wrapper because models love
/// adding them despite system-prompt instructions.
pub(crate) fn extract_json_payload(text: &str) -> Option<serde_json::Value> {
    let trimmed = text.trim();
    let stripped = trimmed
        .strip_prefix("```json")
        .or_else(|| trimmed.strip_prefix("```"))
        .map(|s| s.trim_start())
        .unwrap_or(trimmed);
    let stripped = stripped.strip_suffix("```").map(str::trim_end).unwrap_or(stripped);

    serde_json::from_str(stripped).ok()
}

/// Strip surrounding ```...``` markdown code fences (with or without a
/// language tag). Used for hook source extraction where the model
/// sometimes wraps the JS body despite system-prompt instructions.
fn strip_code_fences(text: &str) -> String {
    let trimmed = text.trim();
    let after_open = trimmed
        .strip_prefix("```javascript")
        .or_else(|| trimmed.strip_prefix("```js"))
        .or_else(|| trimmed.strip_prefix("```"))
        .map(|s| s.trim_start_matches('\n'))
        .unwrap_or(trimmed);
    let stripped = after_open.strip_suffix("```").map(str::trim_end).unwrap_or(after_open);
    stripped.to_string()
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn extract_json_handles_plain() {
        let v = extract_json_payload(r#"{"openapi": "3.0.0"}"#).unwrap();
        assert_eq!(v["openapi"], "3.0.0");
    }

    #[test]
    fn extract_json_handles_fenced_block() {
        let v = extract_json_payload("```json\n{\"openapi\": \"3.0.0\"}\n```").unwrap();
        assert_eq!(v["openapi"], "3.0.0");
    }

    #[test]
    fn extract_json_handles_unfenced_with_whitespace() {
        let v = extract_json_payload("\n  {\"x\": 1}  \n").unwrap();
        assert_eq!(v["x"], 1);
    }

    #[test]
    fn extract_json_returns_none_for_prose() {
        assert!(extract_json_payload("Sure, here's the spec…").is_none());
    }
}