Skip to main content

car_agents/
planner.rs

1//! Planner agent — given a goal, produce an ordered list of action steps.
2//!
3//! Bridges natural language goals to CAR's ActionProposal format.
4//! Uses the inference engine to generate plans, then optionally scores
5//! them via car-planner for validation.
6
7use crate::{AgentContext, AgentResult};
8use car_inference::{GenerateParams, GenerateRequest};
9
10/// Planner agent configuration.
11#[derive(Debug, Clone)]
12pub struct PlanConfig {
13    pub max_tokens: usize,
14    pub temperature: f64,
15    pub model: Option<String>,
16    /// If true, include available tools in the prompt.
17    pub include_tools: bool,
18}
19
20impl Default for PlanConfig {
21    fn default() -> Self {
22        Self {
23            max_tokens: 2048,
24            temperature: 0.2,
25            model: None,
26            include_tools: true,
27        }
28    }
29}
30
31/// Planner: goal → ordered action steps.
32pub struct PlannerAgent {
33    ctx: AgentContext,
34    config: PlanConfig,
35}
36
37impl PlannerAgent {
38    pub fn new(ctx: AgentContext) -> Self {
39        Self {
40            ctx,
41            config: PlanConfig::default(),
42        }
43    }
44
45    pub fn with_config(ctx: AgentContext, config: PlanConfig) -> Self {
46        Self { ctx, config }
47    }
48
49    /// Generate a plan for achieving a goal.
50    pub async fn plan(&self, goal: &str, context: Option<&str>) -> AgentResult {
51        let prompt = format!(
52            "You are a planning agent. Break down the following goal into concrete, ordered steps.\n\n\
53            Goal: {goal}\n\n\
54            For each step, specify:\n\
55            1. What to do (action)\n\
56            2. What it depends on (which previous steps must complete)\n\
57            3. What it produces (output/state change)\n\
58            4. How to verify it worked\n\n\
59            Format as a numbered list. Be specific and actionable — no vague steps like 'analyze the situation.'"
60        );
61
62        let start = std::time::Instant::now();
63        let req = GenerateRequest {
64            prompt,
65            model: self.config.model.clone(),
66            params: GenerateParams {
67                temperature: self.config.temperature,
68                max_tokens: self.config.max_tokens,
69                ..Default::default()
70            },
71            context: context.map(String::from),
72            tools: None,
73            images: None,
74            messages: None,
75            cache_control: false,
76            response_format: None,
77            intent: None,
78        };
79
80        match self.ctx.inference.generate_tracked(req).await {
81            Ok(result) => {
82                // Count steps as a rough quality signal
83                let step_count = result
84                    .text
85                    .lines()
86                    .filter(|l| l.trim_start().starts_with(|c: char| c.is_ascii_digit()))
87                    .count();
88                let confidence = if step_count >= 3 { 0.8 } else { 0.5 };
89                AgentResult {
90                    agent: "planner".into(),
91                    output: result.text,
92                    confidence,
93                    model_used: result.model_used,
94                    latency_ms: start.elapsed().as_millis() as u64,
95                }
96            }
97            Err(e) => AgentResult {
98                agent: "planner".into(),
99                output: format!("Planning failed: {}", e),
100                confidence: 0.0,
101                model_used: String::new(),
102                latency_ms: start.elapsed().as_millis() as u64,
103            },
104        }
105    }
106}