llm-toolkit 0.63.1

A low-level, unopinionated Rust toolkit for the LLM last mile problem.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
//! Prompt definitions for orchestrator LLM interactions.
//!
//! This module uses llm-toolkit's own prompt generation capabilities

use serde::Serialize;

use crate::ToPrompt;

/// Request for generating an execution strategy from a blueprint and available agents.
#[derive(Serialize, ToPrompt)]
#[prompt(template = r##"
# Strategy Generation Task

You are an expert orchestrator tasked with creating a detailed execution strategy.

## User's Task
{{ task }}

{% if validation_constraint %}
## Validation Constraint
{{ validation_constraint }}
{% endif %}

{% if user_context %}
## User Request Context
The following context data is available in intent templates via `user_request.*` placeholders:
```json
{{ user_context }}
```
{% endif %}

## Available Agents
{{ agent_list }}

## Reference Workflow (Blueprint)
{{ blueprint_description }}

{% if blueprint_graph %}
### Visual Flow
```mermaid
{{ blueprint_graph }}
```
{% endif %}

---

## Your Task

Generate a detailed execution strategy as a JSON object with the following structure:

```json
{
  "goal": "A clear statement of what this strategy aims to achieve",
  "steps": [
    {
      "step_id": "step_1",
      "output_key": "world_concept",
      "description": "What this step accomplishes",
      "assigned_agent": "AgentName",
      "intent_template": "The prompt to give the agent (can include placeholders like {% raw %}{{ previous_output }}{% endraw %})",
      "expected_output": "Description of what output is expected",
      "requires_validation": true
    }
  ]
}
```

**Guidelines:**
1. Analyze the user's task and the available agents' expertise
2. Use the blueprint as a reference for the general flow
3. Assign the most appropriate agent to each step
4. Create clear, actionable intent templates
5. Ensure steps build upon each other logically
6. Use Mustache/Jinja2-style placeholders with **double curly braces** like {% raw %}`{{ previous_output }}`{% endraw %}, {% raw %}`{{ user_request }}`{% endraw %} in intent templates (NOT single braces like `{previous_output}`)
7. **IMPORTANT:** Use ONLY double curly braces {% raw %}`{{ }}`{% endraw %}, NOT triple braces {% raw %}`{{{ }}}`{% endraw %}. Intent templates are plain text and do not require HTML escaping.
7a. **CRITICAL - Template Engine (minijinja):**
   - Intent templates use minijinja, which automatically serializes JSON values
   - Arrays become JSON arrays: {% raw %}`{{ user_request.keywords }}`{% endraw %} → `["fantasy", "mystery"]`
   - Objects become JSON objects: {% raw %}`{{ user_request.config }}`{% endraw %} → `{"theme": "dark", "level": 5}`
   - DO NOT use filters like {% raw %}`| tojson`{% endraw %}, {% raw %}`| json`{% endraw %}, or any other filters - they are unnecessary and will cause errors
   - Simply reference placeholders directly: {% raw %}`{{ user_request.field }}`{% endraw %}
8. **output_key Best Practices** (CRITICAL):
   - **ALWAYS specify** `output_key` for every step with a unique, meaningful name
   - Use descriptive names like `world_concept`, `emblem`, `profile` instead of generic `step_1_output`
   - **IMMUTABLE design**: Each step should have ONE clear responsibility and produce ONE output type
   - **Append-only**: Never overwrite previous outputs. Create new steps for modifications (e.g., `concept_v1`, `concept_refined`)
   - **Consistent naming**: Use snake_case for output keys (e.g., `world_concept`, `character_profile`)
   - Subsequent steps can reference this output as {% raw %}`{{ world_concept.field }}`{% endraw %} instead of {% raw %}`{{ step_1_output.field }}`{% endraw %}
9. **Placeholder Reference Guide**: Intent templates can access context data using these patterns:
   - **Named outputs (via output_key)**: {% raw %}`{{ world_concept }}`{% endraw %} or {% raw %}`{{ world_concept.theme }}`{% endraw %} (preferred)
   - **Step outputs (auto-generated)**: {% raw %}`{{ step_N_output }}`{% endraw %} or {% raw %}`{{ step_N_output.field }}`{% endraw %} (e.g., {% raw %}`{{ step_1_output.concept }}`{% endraw %})
   - **Previous step (convenience)**: {% raw %}`{{ previous_output }}`{% endraw %} refers to the immediately previous step's output
   - **User request data**: {% raw %}`{{ user_request.field }}`{% endraw %} if the Blueprint defines INPUT context (e.g., {% raw %}`{{ user_request.world_seed.aesthetics }}`{% endraw %})
   - **Other external context**: {% raw %}`{{ context_key.field }}`{% endraw %} for any context added before execution
   - **Nested field access**: Use dot notation to access nested JSON fields (e.g., {% raw %}`{{ step_2_output.data.items[0].name }}`{% endraw %})
10. **Add Validation Steps**: For any step that produces a critical artifact (e.g., a final document, a piece of code, a detailed plan), you SHOULD add a dedicated validation step immediately after it. Select the most appropriate validator agent from the 'Available Agents' list (e.g., InnerValidatorAgent for general validation, or domain-specific validators if available)

**Important:** Return ONLY the JSON object, no additional explanation.
"##)]
pub struct StrategyGenerationRequest {
    pub task: String,
    pub agent_list: String,
    pub blueprint_description: String,
    pub blueprint_graph: String,
    pub user_context: String,
    pub validation_constraint: Option<String>,
}

impl StrategyGenerationRequest {
    /// Creates a new strategy generation request.
    pub fn new(
        task: String,
        agent_list: String,
        blueprint_description: String,
        blueprint_graph: Option<String>,
        user_context: Option<String>,
        enable_validation: bool,
    ) -> Self {
        let validation_constraint = if !enable_validation {
            Some("IMPORTANT: For all steps, you MUST set 'requires_validation' to false. Do not generate any validation steps.".to_string())
        } else {
            None
        };

        Self {
            task,
            agent_list,
            blueprint_description,
            blueprint_graph: blueprint_graph.unwrap_or_default(),
            user_context: user_context.unwrap_or_default(),
            validation_constraint,
        }
    }
}

/// Request for generating an optimized intent prompt for an agent.
#[derive(Serialize, ToPrompt)]
#[prompt(template = r##"
# Intent Generation Task

You are generating an intent prompt that will be given to an agent.

## Critical Constraint

**The agent will receive ONLY the intent text you generate - no separate context.**
Therefore, you MUST embed all necessary context data directly into the intent prompt itself.

## Step Information
**Description**: {{ step_description }}
**Expected Output**: {{ expected_output }}
**Agent Expertise**: {{ agent_expertise }}

## Intent Template (Base structure)
{{ intent_template }}

## Available Context Data
{{ context_info }}

---

## Your Task

Generate the complete intent prompt by:
1. Taking the intent template as a base structure
2. **Replacing all placeholders (like {% raw %}{{ previous_output }}{% endraw %}, {% raw %}{{ step_3_output }}{% endraw %}) with actual context values**
3. Ensuring the resulting intent is self-contained and actionable
4. Making it specific and concrete - avoid abstract requests like "review" or "refine" without concrete instructions
5. Matching the agent's expertise and capabilities

## Example

This example shows how to transform a template with placeholders into a complete, self-contained intent.

### INPUT

**Step Description:** "Review and refine the article for clarity and technical accuracy"

**Intent Template:**
"Review the article in {% raw %}{{ step_3_output }}{% endraw %} and suggest improvements"

**Available Context:**
- step_3_output: "# Rust Ownership\n\nRust uses ownership to manage memory safely without garbage collection. The three rules are:\n1. Each value has an owner\n2. Only one owner at a time\n3. When owner goes out of scope, value is dropped"

### OUTPUT

You must generate a complete intent that embeds the actual article content:

```
Review the following article and suggest 3 specific improvements for clarity and technical accuracy:

# Rust Ownership

Rust uses ownership to manage memory safely without garbage collection. The three rules are:
1. Each value has an owner
2. Only one owner at a time
3. When owner goes out of scope, value is dropped

For each improvement, provide:
1. Section/Line: [specific location]
2. Issue: [what needs improvement]
3. Suggestion: [concrete fix]
```

**Key point:** The agent receives ONLY the OUTPUT text above. It cannot access {% raw %}{{ step_3_output }}{% endraw %} separately, so you must copy the actual content into the intent.

**IMPORTANT NOTE ON PLACEHOLDER SYNTAX:**
- Always use **double curly braces** {% raw %}`{{ placeholder_name }}`{% endraw %} (Mustache/Jinja2 style)
- DO NOT use single braces `{placeholder_name}` - this will NOT be recognized
- DO NOT use triple braces {% raw %}`{{{ placeholder_name }}}`{% endraw %} - intent templates are plain text and do not require HTML escaping
- Include spaces inside braces: {% raw %}`{{ name }}`{% endraw %} not {% raw %}`{{name}}`{% endraw %}

**Important:** Return ONLY the final intent prompt text, no additional explanation or metadata.
"##)]
pub struct IntentGenerationRequest {
    pub step_description: String,
    pub expected_output: String,
    pub agent_expertise: String,
    pub intent_template: String,
    pub context_info: String,
}

impl IntentGenerationRequest {
    /// Creates a new intent generation request.
    pub fn new(
        step_description: String,
        expected_output: String,
        agent_expertise: String,
        intent_template: String,
        context_info: String,
    ) -> Self {
        Self {
            step_description,
            expected_output,
            agent_expertise,
            intent_template,
            context_info,
        }
    }
}

/// Request for deciding whether and how to redesign the strategy after an error.
#[derive(Serialize, ToPrompt)]
#[prompt(template = r##"
# Redesign Decision Task

An error occurred during workflow execution. Analyze the situation and decide on the appropriate recovery strategy.

## Goal
{{ goal }}

## Progress
- Completed Steps: {{ completed_steps }} / {{ total_steps }}
- Failed Step: {{ failed_step_description }}
- Error: {{ error_message }}

## Completed Work So Far
{{ completed_context }}

---

## Your Task

Analyze the error and determine the appropriate recovery strategy:

1. **RETRY** - The error is transient (network timeout, temporary service unavailability). Simply retry the same step.
2. **TACTICAL** - The error is localized. The failed step and subsequent steps need redesign, but previous work is still valid.
3. **FULL** - The error is fundamental. The entire strategy needs to be reconsidered from scratch.

**Important:** Respond with ONLY one word: `RETRY`, `TACTICAL`, or `FULL`.
"##)]
pub struct RedesignDecisionRequest {
    pub goal: String,
    pub completed_steps: usize,
    pub total_steps: usize,
    pub failed_step_description: String,
    pub error_message: String,
    pub completed_context: String,
}

impl RedesignDecisionRequest {
    /// Creates a new redesign decision request.
    pub fn new(
        goal: String,
        completed_steps: usize,
        total_steps: usize,
        failed_step_description: String,
        error_message: String,
        completed_context: String,
    ) -> Self {
        Self {
            goal,
            completed_steps,
            total_steps,
            failed_step_description,
            error_message,
            completed_context,
        }
    }
}

/// Request for tactical redesign of remaining steps after a failure.
#[derive(Serialize, ToPrompt)]
#[prompt(template = r##"
# Tactical Redesign Task

A step in the workflow has failed. Redesign the remaining steps to work around the error while preserving completed work.

## Overall Goal
{{ goal }}

## Current Strategy
{{ current_strategy }}

## Failed Step
**Index**: {{ failed_step_index }}
**Description**: {{ failed_step_description }}
**Error**: {{ error_message }}

## Completed Work (Preserve This)
{{ completed_context }}

## Available Agents
{{ agent_list }}

---

## Your Task

Redesign the steps starting from the failed step onwards to achieve the goal. You may:
- Modify the failed step to avoid the error
- Insert new steps to work around the problem
- Remove unnecessary steps
- Change agent assignments
- Adjust intent templates

Generate a JSON array of `StrategyStep` objects with the following structure:

```json
[
  {
    "step_id": "step_X",
    "output_key": "meaningful_name",
    "description": "What this step accomplishes",
    "assigned_agent": "AgentName",
    "intent_template": "The prompt template (can include {% raw %}{{ previous_output }}{% endraw %}, etc. - use double braces)",
    "expected_output": "Description of expected output"
  }
]
```

**IMPORTANT - output_key Best Practices:**
- **ALWAYS specify** `output_key` for every step with a unique, meaningful name (e.g., `world_concept`, `emblem`, `profile`)
- **IMMUTABLE design**: Each step = ONE responsibility = ONE output type
- **Append-only**: Never overwrite previous outputs

**CRITICAL - Template Engine (minijinja):**
- Intent templates use minijinja, which automatically serializes JSON values
- Arrays → JSON arrays: {% raw %}`{{ keywords }}`{% endraw %} → `["fantasy", "mystery"]`
- Objects → JSON objects: {% raw %}`{{ config }}`{% endraw %} → `{"theme": "dark"}`
- DO NOT use filters like {% raw %}`| tojson`{% endraw %}, {% raw %}`| json`{% endraw %} - they cause errors
- Simply reference placeholders directly: {% raw %}`{{ user_request.field }}`{% endraw %}

**Placeholder Reference Guide**: Intent templates can access context data using:
- **Named outputs (via output_key)**: {% raw %}`{{ world_concept }}`{% endraw %} or {% raw %}`{{ world_concept.theme }}`{% endraw %} (preferred)
- **Previous step output**: {% raw %}`{{ step_N_output }}`{% endraw %} or {% raw %}`{{ step_N_output.field }}`{% endraw %} (e.g., {% raw %}`{{ step_1_output.concept }}`{% endraw %})
- **Previous step (convenience)**: {% raw %}`{{ previous_output }}`{% endraw %}
- **User request data**: {% raw %}`{{ user_request.field }}`{% endraw %} for Blueprint INPUT context
- **Nested fields**: Use dot notation (e.g., {% raw %}`{{ step_2_output.data.count }}`{% endraw %})

**Important:** Return ONLY the JSON array, no additional explanation.
"##)]
pub struct TacticalRedesignRequest {
    pub goal: String,
    pub current_strategy: String,
    pub failed_step_index: usize,
    pub failed_step_description: String,
    pub error_message: String,
    pub completed_context: String,
    pub agent_list: String,
}

impl TacticalRedesignRequest {
    /// Creates a new tactical redesign request.
    #[allow(clippy::too_many_arguments)]
    pub fn new(
        goal: String,
        current_strategy: String,
        failed_step_index: usize,
        failed_step_description: String,
        error_message: String,
        completed_context: String,
        agent_list: String,
    ) -> Self {
        Self {
            goal,
            current_strategy,
            failed_step_index,
            failed_step_description,
            error_message,
            completed_context,
            agent_list,
        }
    }
}

/// Request for full strategy regeneration after a fundamental failure.
#[derive(Serialize, ToPrompt)]
#[prompt(template = r##"
# Full Strategy Regeneration Task

The previous execution strategy failed fundamentally. Generate a completely new strategy that learns from the failure.

## Original Task
{{ task }}

## Available Agents
{{ agent_list }}

## Reference Workflow (Blueprint)
{{ blueprint_description }}

{% if blueprint_graph %}
### Visual Flow
```mermaid
{{ blueprint_graph }}
```
{% endif %}

---

## Previous Attempt

### Failed Strategy
{{ failed_strategy }}

### What Went Wrong
{{ error_summary }}

### Completed Work (Can Be Referenced)
{{ completed_work }}

---

## Your Task

Analyze the failure and create a **completely new strategy** that:
1. Avoids the mistakes of the previous approach
2. Takes a different angle or uses different agents if needed
3. Leverages any completed work that's still valid
4. Has a higher chance of success

Generate a JSON object with the following structure:

```json
{
  "goal": "A clear statement of what this strategy aims to achieve",
  "steps": [
    {
      "step_id": "step_1",
      "output_key": "world_concept",
      "description": "What this step accomplishes",
      "assigned_agent": "AgentName",
      "intent_template": "The prompt to give the agent (can include placeholders like {% raw %}{{ previous_output }}{% endraw %})",
      "expected_output": "Description of what output is expected"
    }
  ]
}
```

**IMPORTANT - output_key Best Practices:**
- **ALWAYS specify** `output_key` for every step with a unique, meaningful name (e.g., `world_concept`, `emblem`, `profile`)
- **IMMUTABLE design**: Each step = ONE responsibility = ONE output type
- **Append-only**: Never overwrite previous outputs

**CRITICAL - Template Engine (minijinja):**
- Intent templates use minijinja, which automatically serializes JSON values
- Arrays → JSON arrays: {% raw %}`{{ keywords }}`{% endraw %} → `["fantasy", "mystery"]`
- Objects → JSON objects: {% raw %}`{{ config }}`{% endraw %} → `{"theme": "dark"}`
- DO NOT use filters like {% raw %}`| tojson`{% endraw %}, {% raw %}`| json`{% endraw %} - they cause errors
- Simply reference placeholders directly: {% raw %}`{{ user_request.field }}`{% endraw %}

**Placeholder Reference Guide**: Intent templates can access context data using:
- **Named outputs (via output_key)**: {% raw %}`{{ world_concept }}`{% endraw %} or {% raw %}`{{ world_concept.theme }}`{% endraw %} (preferred)
- **Previous step output**: {% raw %}`{{ step_N_output }}`{% endraw %} or {% raw %}`{{ step_N_output.field }}`{% endraw %} (e.g., {% raw %}`{{ step_1_output.concept }}`{% endraw %})
- **Previous step (convenience)**: {% raw %}`{{ previous_output }}`{% endraw %}
- **User request data**: {% raw %}`{{ user_request.field }}`{% endraw %} for Blueprint INPUT context
- **Nested fields**: Use dot notation (e.g., {% raw %}`{{ step_2_output.data.count }}`{% endraw %})

**Important:** Return ONLY the JSON object, no additional explanation.
"##)]
pub struct FullRegenerateRequest {
    pub task: String,
    pub agent_list: String,
    pub blueprint_description: String,
    pub blueprint_graph: String,
    pub failed_strategy: String,
    pub error_summary: String,
    pub completed_work: String,
}

impl FullRegenerateRequest {
    /// Creates a new full regeneration request.
    #[allow(clippy::too_many_arguments)]
    pub fn new(
        task: String,
        agent_list: String,
        blueprint_description: String,
        blueprint_graph: Option<String>,
        failed_strategy: String,
        error_summary: String,
        completed_work: String,
    ) -> Self {
        Self {
            task,
            agent_list,
            blueprint_description,
            blueprint_graph: blueprint_graph.unwrap_or_default(),
            failed_strategy,
            error_summary,
            completed_work,
        }
    }
}

/// Request for deciding whether to regenerate the strategy for a ParallelOrchestrator.
#[derive(Serialize, ToPrompt)]
#[prompt(template = r##"
# Workflow Recovery Task

A step in a parallel workflow has failed with a non-transient error. Analyze the situation and decide if the entire workflow is salvageable by regenerating a new plan.

## Overall Task
{{ task }}

## Failure Details
- **Failed Step ID**: {{ failed_step_id }}
- **Error Message**: {{ error_message }}

## Context (Outputs from successful steps)
```json
{{ successful_outputs }}
```

---

## Your Task

Based on the error and the work completed so far, can this workflow be salvaged by creating a new plan?

- If the error is fundamental or the goal is now unachievable, respond with **FAIL**.
- If you believe a different approach or plan could succeed, respond with **REGENERATE**.

**Important:** Respond with ONLY one word: `REGENERATE` or `FAIL`.
"##)]
pub struct ParallelRedesignDecisionRequest {
    pub task: String,
    pub failed_step_id: String,
    pub error_message: String,
    pub successful_outputs: String,
}

impl ParallelRedesignDecisionRequest {
    /// Creates a new parallel redesign decision request.
    pub fn new(
        task: String,
        failed_step_id: String,
        error_message: String,
        successful_outputs: String,
    ) -> Self {
        Self {
            task,
            failed_step_id,
            error_message,
            successful_outputs,
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_strategy_request_creation() {
        let req = StrategyGenerationRequest::new(
            "Test task".to_string(),
            "Agent1, Agent2".to_string(),
            "Blueprint description".to_string(),
            Some("graph TD\nA --> B".to_string()),
            None,
            true,
        );

        assert_eq!(req.task, "Test task");
        assert_eq!(req.blueprint_graph, "graph TD\nA --> B");
    }

    #[test]
    fn test_strategy_request_to_prompt() {
        use crate::prompt::ToPrompt;

        let req = StrategyGenerationRequest::new(
            "Write an article".to_string(),
            "- WriterAgent: Expert writer".to_string(),
            "1. Research\n2. Write\n3. Review".to_string(),
            None,
            None,
            true,
        );

        let prompt = req.to_prompt();
        assert!(prompt.contains("Write an article"));
        assert!(prompt.contains("WriterAgent"));
        assert!(prompt.contains("Research"));

        // CRITICAL: Verify that placeholder examples are preserved (not expanded by minijinja)
        assert!(
            prompt.contains("{{ previous_output }}"),
            "Placeholder {{ previous_output }} should be preserved in the prompt, not expanded to empty string"
        );
    }

    #[test]
    fn test_strategy_request_with_user_context() {
        use crate::prompt::ToPrompt;

        let user_context_json = r#"{
  "theme": "Gothic",
  "world_seed": {
    "aesthetics": "dark",
    "complexity": 5
  }
}"#;

        let req = StrategyGenerationRequest::new(
            "Create a fantasy world".to_string(),
            "- WorldConceptAgent: Expert in world building".to_string(),
            "1. Generate concept\n2. Create design".to_string(),
            None,
            Some(user_context_json.to_string()),
            true,
        );

        let prompt = req.to_prompt();

        // Debug: print actual prompt
        println!("\n=== Strategy Generation Prompt with User Context ===");
        println!("{}", prompt);
        println!("====================================================\n");

        // Should contain user context section
        assert!(prompt.contains("User Request Context"));
        assert!(prompt.contains("Gothic"));
        assert!(prompt.contains("world_seed"));
        assert!(prompt.contains("aesthetics"));
        assert!(prompt.contains("complexity"));

        // Should explain how to use user_request placeholders
        assert!(prompt.contains("user_request.*"));
    }

    #[test]
    fn test_strategy_request_without_user_context() {
        use crate::prompt::ToPrompt;

        let req = StrategyGenerationRequest::new(
            "Write an article".to_string(),
            "- WriterAgent: Expert writer".to_string(),
            "1. Research\n2. Write".to_string(),
            None,
            None,
            true,
        );

        let prompt = req.to_prompt();

        println!("\n=== Prompt without user_context ===");
        println!("{}", prompt);
        println!("====================================\n");

        // Should NOT contain user context section when empty
        assert!(!prompt.contains("User Request Context"));
    }

    #[test]
    fn test_minijinja_empty_string_condition() {
        use crate::prompt::ToPrompt;

        // Test with empty string (unwrap_or_default behavior)
        let req = StrategyGenerationRequest {
            task: "Test".to_string(),
            agent_list: "AgentA".to_string(),
            blueprint_description: "Blueprint".to_string(),
            blueprint_graph: String::new(),
            user_context: String::new(), // Empty string
            validation_constraint: None,
        };

        let prompt = req.to_prompt();

        println!("\n=== Test: Empty string for user_context ===");
        println!("user_context is empty string: '{}'", req.user_context);
        println!(
            "Prompt contains 'User Request Context': {}",
            prompt.contains("User Request Context")
        );
        println!("==========================================\n");

        // Empty string should be falsy in minijinja {% if %} conditions
        assert!(
            !prompt.contains("User Request Context"),
            "Empty string should not trigger the user_context conditional block"
        );
    }

    #[test]
    fn test_intent_generation_request() {
        let req = IntentGenerationRequest::new(
            "Analyze user requirements".to_string(),
            "List of requirements".to_string(),
            "Expert in requirement analysis".to_string(),
            "Analyze: {user_input}".to_string(),
            "user_input: Build a web app".to_string(),
        );

        let prompt = req.to_prompt();
        assert!(prompt.contains("Analyze user requirements"));
        assert!(prompt.contains("Expert in requirement analysis"));
    }

    #[test]
    fn test_redesign_decision_request() {
        let req = RedesignDecisionRequest::new(
            "Complete the task".to_string(),
            2,
            5,
            "Step 3 failed".to_string(),
            "Network timeout".to_string(),
            "Steps 1-2 completed".to_string(),
        );

        let prompt = req.to_prompt();
        assert!(prompt.contains("Complete the task"));
        assert!(prompt.contains("Network timeout"));
    }

    #[test]
    fn test_parallel_redesign_decision_request() {
        let req = ParallelRedesignDecisionRequest::new(
            "Analyze data".to_string(),
            "step_2".to_string(),
            "Invalid input format".to_string(),
            r#"{"step_1_output": {"data": "..."}}"#.to_string(),
        );

        let prompt = req.to_prompt();
        assert!(prompt.contains("Workflow Recovery Task"));
        assert!(prompt.contains("step_2"));
        assert!(prompt.contains("Invalid input format"));
        assert!(prompt.contains("step_1_output"));
    }
}