car-agents 0.14.0

Built-in commodity agents for Common Agent Runtime
Documentation
//! Coordinator agent — decides which agents to invoke and in what pattern.
//!
//! Given a goal, the Coordinator classifies it, selects the right agents,
//! picks a coordination pattern (pipeline, swarm, supervisor), and produces
//! an execution plan. This is the meta-agent that orchestrates other agents.

use crate::{AgentContext, AgentResult};
use car_inference::{GenerateParams, GenerateRequest};

/// Coordination patterns available.
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum Pattern {
    /// Single agent, one shot.
    Solo,
    /// Sequential chain: each agent's output feeds the next.
    Pipeline,
    /// Parallel: multiple agents on the same problem, pick best.
    Swarm,
    /// Iterative: agent does work, supervisor reviews, repeat.
    Supervisor,
}

/// Coordinator's decision: which agents in what pattern.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct CoordinationPlan {
    pub pattern: Pattern,
    pub agents: Vec<String>,
    pub reasoning: String,
}

/// Coordinator configuration.
#[derive(Debug, Clone)]
pub struct CoordinatorConfig {
    pub max_tokens: usize,
    pub temperature: f64,
    pub model: Option<String>,
}

impl Default for CoordinatorConfig {
    fn default() -> Self {
        Self {
            max_tokens: 1024,
            temperature: 0.2,
            model: None,
        }
    }
}

/// Coordinator: goal → which agents + which pattern.
pub struct Coordinator {
    ctx: AgentContext,
    config: CoordinatorConfig,
}

impl Coordinator {
    pub fn new(ctx: AgentContext) -> Self {
        Self {
            ctx,
            config: CoordinatorConfig::default(),
        }
    }

    pub fn with_config(ctx: AgentContext, config: CoordinatorConfig) -> Self {
        Self { ctx, config }
    }

    /// Decide how to handle a goal: which agents and what coordination pattern.
    pub async fn coordinate(&self, goal: &str) -> (CoordinationPlan, AgentResult) {
        let prompt = format!(
            "You are a coordination agent. Given a goal, decide which agents to use and how to coordinate them.\n\n\
            Available agents:\n\
            - researcher: Searches the codebase and gathers concrete information, files, and evidence.\n\
            - summarizer: Synthesizes the researcher's findings into a direct, polished ANSWER to the user's question. Use this for analytical goals (review, describe, explain, audit, identify, summarize, what/how/why questions).\n\
            - planner: Breaks a goal into ordered action STEPS the user or another agent will execute. Only use this when the goal is to MAKE A CHANGE (implement, fix, add, refactor, build, migrate) and the user genuinely wants a step-by-step workflow back.\n\
            - verifier: Checks that the previous agent's output actually addresses the goal.\n\n\
            CRITICAL: If the user is asking you to describe/review/explain/identify/audit something, the pipeline must END with summarizer, NOT planner. The summarizer produces the final answer text. The planner turns answers into checklists — that is the wrong output shape for analytical goals.\n\n\
            Available patterns:\n\
            - solo: One agent handles it alone.\n\
            - pipeline: Sequential chain (A → B → C). The LAST agent's output is what the user sees.\n\
            - swarm: Multiple agents in parallel, pick best result.\n\
            - supervisor: Agent works, supervisor reviews, iterate.\n\n\
            Default choice for analytical goals: pipeline [researcher, summarizer, verifier].\n\
            Default choice for change-making goals: pipeline [researcher, planner, verifier].\n\n\
            Goal: {goal}\n\n\
            Respond with EXACTLY this JSON format:\n\
            {{\"pattern\": \"pipeline\", \"agents\": [\"researcher\", \"summarizer\", \"verifier\"], \"reasoning\": \"why this pattern\"}}"
        );

        let start = std::time::Instant::now();
        let req = GenerateRequest {
            prompt,
            model: self.config.model.clone(),
            params: GenerateParams {
                temperature: self.config.temperature,
                max_tokens: self.config.max_tokens,
                ..Default::default()
            },
            context: None,
            tools: None,
            images: None,
            messages: None,
            cache_control: false,
            response_format: None,
            intent: None,
        };

        match self.ctx.inference.generate_tracked(req).await {
            Ok(result) => {
                // Parse the coordination plan from the response
                let plan = parse_plan(&result.text).unwrap_or_else(|| {
                    // Default: pipeline with researcher → summarizer → verifier.
                    // Summarizer (not planner) because most bare calls are
                    // analytical — users want an answer, not a checklist.
                    CoordinationPlan {
                        pattern: Pattern::Pipeline,
                        agents: vec!["researcher".into(), "summarizer".into(), "verifier".into()],
                        reasoning: "default pipeline (failed to parse model response)".into(),
                    }
                });

                let agent_result = AgentResult {
                    agent: "coordinator".into(),
                    output: result.text,
                    confidence: if plan.reasoning.contains("default") {
                        0.5
                    } else {
                        0.8
                    },
                    model_used: result.model_used,
                    latency_ms: start.elapsed().as_millis() as u64,
                };

                (plan, agent_result)
            }
            Err(e) => {
                let plan = CoordinationPlan {
                    pattern: Pattern::Pipeline,
                    agents: vec!["researcher".into(), "summarizer".into()],
                    reasoning: format!("fallback (coordination failed: {})", e),
                };
                let agent_result = AgentResult {
                    agent: "coordinator".into(),
                    output: format!("Coordination failed: {}", e),
                    confidence: 0.3,
                    model_used: String::new(),
                    latency_ms: start.elapsed().as_millis() as u64,
                };
                (plan, agent_result)
            }
        }
    }
}

/// Parse a CoordinationPlan from LLM text output.
fn parse_plan(text: &str) -> Option<CoordinationPlan> {
    // Try to extract JSON from the response
    let start = text.find('{')?;
    let end = text.rfind('}')? + 1;
    let json_str = &text[start..end];
    serde_json::from_str(json_str).ok()
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn parse_plan_from_json() {
        let text = r#"Here's the plan: {"pattern": "pipeline", "agents": ["researcher", "verifier"], "reasoning": "research then verify"}"#;
        let plan = parse_plan(text).unwrap();
        assert_eq!(plan.pattern, Pattern::Pipeline);
        assert_eq!(plan.agents, vec!["researcher", "verifier"]);
    }

    #[test]
    fn parse_plan_from_clean_json() {
        let text = r#"{"pattern": "swarm", "agents": ["researcher", "researcher"], "reasoning": "parallel research"}"#;
        let plan = parse_plan(text).unwrap();
        assert_eq!(plan.pattern, Pattern::Swarm);
    }

    #[test]
    fn parse_plan_fails_gracefully() {
        assert!(parse_plan("not json").is_none());
    }
}