car-agents 0.14.0

Built-in commodity agents for Common Agent Runtime
Documentation
//! Verifier agent — check if output meets a specification.
//!
//! Takes work product + acceptance criteria, returns pass/fail with reasons.
//! The quality gate for any pipeline — nothing ships without verification.

use crate::{AgentContext, AgentResult};
use car_inference::{GenerateParams, GenerateRequest};

/// Verifier configuration.
#[derive(Debug, Clone)]
pub struct VerifyConfig {
    pub max_tokens: usize,
    pub temperature: f64,
    pub model: Option<String>,
}

impl Default for VerifyConfig {
    fn default() -> Self {
        Self {
            max_tokens: 2048,
            temperature: 0.1, // low temp for consistent judgment
            model: None,
        }
    }
}

/// Verifier: output + spec → pass/fail with reasons.
pub struct Verifier {
    ctx: AgentContext,
    config: VerifyConfig,
}

impl Verifier {
    pub fn new(ctx: AgentContext) -> Self {
        Self {
            ctx,
            config: VerifyConfig::default(),
        }
    }

    pub fn with_config(ctx: AgentContext, config: VerifyConfig) -> Self {
        Self { ctx, config }
    }

    /// Verify work output against acceptance criteria.
    pub async fn verify(&self, output: &str, criteria: &str) -> AgentResult {
        let prompt = format!(
            "You are a verification agent. Your job is to determine if the output meets the criteria.\n\n\
            ## Acceptance Criteria\n{criteria}\n\n\
            ## Output to Verify\n{output}\n\n\
            Respond with:\n\
            VERDICT: PASS or FAIL\n\
            REASONS:\n\
            - (specific reasons for your verdict)\n\
            ISSUES:\n\
            - (specific issues found, or 'None' if passing)"
        );

        let start = std::time::Instant::now();
        let req = GenerateRequest {
            prompt,
            model: self.config.model.clone(),
            params: GenerateParams {
                temperature: self.config.temperature,
                max_tokens: self.config.max_tokens,
                ..Default::default()
            },
            context: None,
            tools: None,
            images: None,
            messages: None,
            cache_control: false,
            response_format: None,
            intent: None,
        };

        match self.ctx.inference.generate_tracked(req).await {
            Ok(result) => {
                let passed = result.text.contains("VERDICT: PASS");
                AgentResult {
                    agent: "verifier".into(),
                    output: result.text,
                    confidence: if passed { 0.9 } else { 0.8 },
                    model_used: result.model_used,
                    latency_ms: start.elapsed().as_millis() as u64,
                }
            }
            Err(e) => AgentResult {
                agent: "verifier".into(),
                output: format!("Verification failed: {}", e),
                confidence: 0.0,
                model_used: String::new(),
                latency_ms: start.elapsed().as_millis() as u64,
            },
        }
    }
}