Skip to main content

scud/backend/
mod.rs

1//! Unified execution backend for AI agent tasks.
2//!
3//! Provides the [`AgentBackend`] trait that abstracts over different execution modes:
4//! - Direct API calls to LLM providers
5//! - CLI subprocess spawning (Claude Code, OpenCode, Cursor)
6//! - Simulated execution for testing and dry-runs
7//!
8//! All callers interact through the same interface regardless of how the
9//! underlying LLM execution happens.
10
11pub mod cli;
12pub mod direct;
13pub mod simulated;
14
15use anyhow::Result;
16use async_trait::async_trait;
17use serde::{Deserialize, Serialize};
18use std::path::PathBuf;
19use std::time::Duration;
20use tokio::sync::mpsc;
21use tokio_util::sync::CancellationToken;
22
23use crate::commands::spawn::terminal::Harness;
24
25/// A backend that can execute an AI agent task.
26///
27/// Callers don't know or care whether this is a raw API call,
28/// a headless CLI process, or something else.
29#[async_trait]
30pub trait AgentBackend: Send + Sync {
31    /// Execute a prompt with optional tool access.
32    ///
33    /// Returns a handle for streaming events and awaiting the final result.
34    async fn execute(&self, request: AgentRequest) -> Result<AgentHandle>;
35}
36
37/// Request to execute an agent task.
38#[derive(Debug, Clone)]
39pub struct AgentRequest {
40    /// The prompt to send to the agent.
41    pub prompt: String,
42    /// Optional system prompt override.
43    pub system_prompt: Option<String>,
44    /// Working directory for tool execution.
45    pub working_dir: PathBuf,
46    /// Model to use (provider-specific format).
47    pub model: Option<String>,
48    /// LLM provider name.
49    pub provider: Option<String>,
50    /// Maximum agentic turns before stopping.
51    pub max_turns: Option<usize>,
52    /// Overall timeout for the execution.
53    pub timeout: Option<Duration>,
54    /// Reasoning effort level (e.g., "high", "medium", "low").
55    pub reasoning_effort: Option<String>,
56}
57
58impl Default for AgentRequest {
59    fn default() -> Self {
60        Self {
61            prompt: String::new(),
62            system_prompt: None,
63            working_dir: std::env::current_dir().unwrap_or_default(),
64            model: None,
65            provider: None,
66            max_turns: None,
67            timeout: None,
68            reasoning_effort: None,
69        }
70    }
71}
72
73/// Handle to a running agent execution.
74///
75/// Provides access to streaming events and cancellation.
76pub struct AgentHandle {
77    /// Receiver for streaming events during execution.
78    pub events: mpsc::Receiver<AgentEvent>,
79    /// Token to cancel the execution.
80    pub cancel: CancellationToken,
81}
82
83impl AgentHandle {
84    /// Consume all events and return the final result.
85    ///
86    /// This drains the event stream, collecting text deltas and tool calls,
87    /// until a [`AgentEvent::Complete`] event is received.
88    pub async fn result(mut self) -> Result<AgentResult> {
89        let mut text_parts = Vec::new();
90        let mut tool_calls = Vec::new();
91        let mut status = AgentStatus::Completed;
92        let usage = None;
93
94        while let Some(event) = self.events.recv().await {
95            match event {
96                AgentEvent::TextDelta(delta) => text_parts.push(delta),
97                AgentEvent::TextComplete(text) => {
98                    text_parts.clear();
99                    text_parts.push(text);
100                }
101                AgentEvent::ToolCallStart { id, name } => {
102                    tool_calls.push(ToolCallRecord {
103                        id,
104                        name,
105                        output: String::new(),
106                    });
107                }
108                AgentEvent::ToolCallEnd { id, output } => {
109                    if let Some(record) = tool_calls.iter_mut().find(|r| r.id == id) {
110                        record.output = output;
111                    }
112                }
113                AgentEvent::Complete(result) => return Ok(result),
114                AgentEvent::Error(msg) => {
115                    status = AgentStatus::Failed(msg);
116                    break;
117                }
118                AgentEvent::ThinkingDelta(_) => {}
119            }
120        }
121
122        Ok(AgentResult {
123            text: text_parts.join(""),
124            status,
125            tool_calls,
126            usage,
127        })
128    }
129}
130
131/// Result of an agent execution.
132#[derive(Debug, Clone, Serialize, Deserialize)]
133pub struct AgentResult {
134    /// Final response text (concatenated from all text deltas).
135    pub text: String,
136    /// Execution status.
137    pub status: AgentStatus,
138    /// Audit trail of tool calls made during execution.
139    pub tool_calls: Vec<ToolCallRecord>,
140    /// Token usage statistics, if available.
141    pub usage: Option<TokenUsage>,
142}
143
144/// Status of an agent execution.
145#[derive(Debug, Clone, Serialize, Deserialize)]
146pub enum AgentStatus {
147    /// Completed successfully.
148    Completed,
149    /// Failed with an error message.
150    Failed(String),
151    /// Cancelled by the caller.
152    Cancelled,
153    /// Timed out.
154    Timeout,
155}
156
157impl AgentStatus {
158    /// Returns true if the execution completed successfully.
159    pub fn is_success(&self) -> bool {
160        matches!(self, AgentStatus::Completed)
161    }
162}
163
164/// Record of a single tool call during execution.
165#[derive(Debug, Clone, Serialize, Deserialize)]
166pub struct ToolCallRecord {
167    /// Unique ID of the tool call.
168    pub id: String,
169    /// Tool name (e.g., "Read", "Bash", "Edit").
170    pub name: String,
171    /// Tool output/result.
172    pub output: String,
173}
174
175/// Token usage statistics.
176#[derive(Debug, Clone, Serialize, Deserialize)]
177pub struct TokenUsage {
178    pub input_tokens: u64,
179    pub output_tokens: u64,
180}
181
182/// Events emitted during agent execution.
183///
184/// This is a superset of the existing [`StreamEventKind`] types,
185/// providing a unified event model for all backends.
186#[derive(Debug, Clone)]
187pub enum AgentEvent {
188    /// Incremental text output from the agent.
189    TextDelta(String),
190    /// Complete text response (replaces accumulated deltas).
191    TextComplete(String),
192    /// A tool call has started.
193    ToolCallStart { id: String, name: String },
194    /// A tool call has completed.
195    ToolCallEnd { id: String, output: String },
196    /// Thinking/reasoning text delta.
197    ThinkingDelta(String),
198    /// An error occurred.
199    Error(String),
200    /// Execution has completed with a final result.
201    Complete(AgentResult),
202}
203
204/// Create a backend from a harness specification.
205///
206/// This is the main factory function for creating backends.
207pub fn create_backend(harness: &Harness) -> Result<Box<dyn AgentBackend>> {
208    match harness {
209        #[cfg(feature = "direct-api")]
210        Harness::DirectApi => Ok(Box::new(direct::DirectApiBackend::new())),
211        _ => Ok(Box::new(cli::CliBackend::new(harness.clone())?)),
212    }
213}
214
215/// Create a simulated backend for testing and dry-runs.
216pub fn create_simulated_backend() -> Box<dyn AgentBackend> {
217    Box::new(simulated::SimulatedBackend)
218}
219
220#[cfg(test)]
221mod tests {
222    use super::*;
223
224    #[tokio::test]
225    async fn test_agent_request_default() {
226        let req = AgentRequest::default();
227        assert!(req.prompt.is_empty());
228        assert!(req.model.is_none());
229        assert!(req.timeout.is_none());
230    }
231
232    #[tokio::test]
233    async fn test_agent_status_is_success() {
234        assert!(AgentStatus::Completed.is_success());
235        assert!(!AgentStatus::Failed("err".into()).is_success());
236        assert!(!AgentStatus::Cancelled.is_success());
237        assert!(!AgentStatus::Timeout.is_success());
238    }
239
240    #[tokio::test]
241    async fn test_simulated_backend() {
242        let backend = create_simulated_backend();
243        let req = AgentRequest {
244            prompt: "Hello world".into(),
245            ..Default::default()
246        };
247        let handle = backend.execute(req).await.unwrap();
248        let result = handle.result().await.unwrap();
249        assert!(result.status.is_success());
250        assert!(result.text.contains("Simulated"));
251    }
252}