Skip to main content

a3s_code_core/
agent_api.rs

1//! Agent Facade API
2//!
3//! High-level, ergonomic API for using A3S Code as an embedded library.
4//!
5//! ## Example
6//!
7//! ```rust,no_run
8//! use a3s_code_core::Agent;
9//!
10//! # async fn run() -> anyhow::Result<()> {
11//! let agent = Agent::new("agent.hcl").await?;
12//! let session = agent.session("/my-project", None)?;
13//! let result = session.send("Explain the auth module", None).await?;
14//! println!("{}", result.text);
15//! # Ok(())
16//! # }
17//! ```
18
19use crate::agent::{AgentConfig, AgentEvent, AgentLoop, AgentResult};
20use crate::config::CodeConfig;
21use crate::error::Result;
22use crate::llm::{LlmClient, Message};
23use crate::queue::{
24    ExternalTask, ExternalTaskResult, LaneHandlerConfig, SessionLane, SessionQueueConfig,
25    SessionQueueStats,
26};
27use crate::session_lane_queue::SessionLaneQueue;
28use crate::tools::skill::Skill;
29use crate::tools::{load_skills, ToolContext, ToolExecutor};
30use a3s_lane::{DeadLetter, MetricsSnapshot};
31use anyhow::Context;
32use std::path::{Path, PathBuf};
33use std::sync::{Arc, RwLock};
34use tokio::sync::{broadcast, mpsc};
35use tokio::task::JoinHandle;
36
37// ============================================================================
38// ToolCallResult
39// ============================================================================
40
41/// Result of a direct tool execution (no LLM).
42#[derive(Debug, Clone)]
43pub struct ToolCallResult {
44    pub name: String,
45    pub output: String,
46    pub exit_code: i32,
47}
48
49// ============================================================================
50// SessionOptions
51// ============================================================================
52
53/// Optional per-session overrides.
54#[derive(Debug, Clone, Default)]
55pub struct SessionOptions {
56    /// Override the default model. Format: `"provider/model"` (e.g., `"openai/gpt-4o"`).
57    pub model: Option<String>,
58    /// Extra directories to scan for skill files (`.md` with YAML frontmatter).
59    /// Merged with any global `skill_dirs` from [`CodeConfig`].
60    pub skill_dirs: Vec<PathBuf>,
61    /// Extra directories to scan for agent files.
62    /// Merged with any global `agent_dirs` from [`CodeConfig`].
63    pub agent_dirs: Vec<PathBuf>,
64    /// Optional queue configuration for lane-based tool execution.
65    ///
66    /// When set, enables priority-based tool scheduling with parallel execution
67    /// of read-only (Query-lane) tools, DLQ, metrics, and external task handling.
68    pub queue_config: Option<SessionQueueConfig>,
69}
70
71impl SessionOptions {
72    pub fn new() -> Self {
73        Self::default()
74    }
75
76    pub fn with_model(mut self, model: impl Into<String>) -> Self {
77        self.model = Some(model.into());
78        self
79    }
80
81    pub fn with_skill_dir(mut self, dir: impl Into<PathBuf>) -> Self {
82        self.skill_dirs.push(dir.into());
83        self
84    }
85
86    pub fn with_agent_dir(mut self, dir: impl Into<PathBuf>) -> Self {
87        self.agent_dirs.push(dir.into());
88        self
89    }
90
91    pub fn with_queue_config(mut self, config: SessionQueueConfig) -> Self {
92        self.queue_config = Some(config);
93        self
94    }
95}
96
97// ============================================================================
98// Agent
99// ============================================================================
100
101/// High-level agent facade.
102///
103/// Holds the LLM client and agent config. Workspace-independent.
104/// Use [`Agent::session()`] to bind to a workspace.
105pub struct Agent {
106    llm_client: Arc<dyn LlmClient>,
107    code_config: CodeConfig,
108    config: AgentConfig,
109}
110
111impl std::fmt::Debug for Agent {
112    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
113        f.debug_struct("Agent").finish()
114    }
115}
116
117impl Agent {
118    /// Create from a config file path or inline config string.
119    ///
120    /// Auto-detects: file path (.hcl/.json) vs inline JSON vs inline HCL.
121    pub async fn new(config_source: impl Into<String>) -> Result<Self> {
122        let source = config_source.into();
123        let path = Path::new(&source);
124
125        let config = if path.extension().is_some() && path.exists() {
126            CodeConfig::from_file(path)
127                .with_context(|| format!("Failed to load config: {}", path.display()))?
128        } else {
129            CodeConfig::from_json(&source)
130                .or_else(|_| CodeConfig::from_hcl(&source))
131                .context("Failed to parse config as JSON or HCL")?
132        };
133
134        Self::from_config(config).await
135    }
136
137    /// Create from a config file path or inline config string.
138    ///
139    /// Alias for [`Agent::new()`] — provides a consistent API with
140    /// the Python and Node.js SDKs.
141    pub async fn create(config_source: impl Into<String>) -> Result<Self> {
142        Self::new(config_source).await
143    }
144
145    /// Create from a [`CodeConfig`] struct.
146    pub async fn from_config(config: CodeConfig) -> Result<Self> {
147        let llm_config = config
148            .default_llm_config()
149            .context("default_model must be set in 'provider/model' format with a valid API key")?;
150        let llm_client = crate::llm::create_client_with_config(llm_config);
151
152        let agent_config = AgentConfig {
153            max_tool_rounds: config
154                .max_tool_rounds
155                .unwrap_or(AgentConfig::default().max_tool_rounds),
156            ..AgentConfig::default()
157        };
158
159        Ok(Agent {
160            llm_client,
161            code_config: config,
162            config: agent_config,
163        })
164    }
165
166    /// Bind to a workspace directory, returning an [`AgentSession`].
167    ///
168    /// Pass `None` for defaults, or `Some(SessionOptions)` to override
169    /// the model, skill directories, or agent directories for this session.
170    pub fn session(
171        &self,
172        workspace: impl Into<String>,
173        options: Option<SessionOptions>,
174    ) -> Result<AgentSession> {
175        let opts = options.unwrap_or_default();
176
177        let llm_client = if let Some(ref model) = opts.model {
178            let (provider_name, model_id) = model
179                .split_once('/')
180                .context("model format must be 'provider/model' (e.g., 'openai/gpt-4o')")?;
181
182            let llm_config = self
183                .code_config
184                .llm_config(provider_name, model_id)
185                .with_context(|| {
186                    format!("provider '{provider_name}' or model '{model_id}' not found in config")
187                })?;
188
189            crate::llm::create_client_with_config(llm_config)
190        } else {
191            self.llm_client.clone()
192        };
193
194        self.build_session(workspace.into(), llm_client, &opts)
195    }
196
197    fn build_session(
198        &self,
199        workspace: String,
200        llm_client: Arc<dyn LlmClient>,
201        opts: &SessionOptions,
202    ) -> Result<AgentSession> {
203        let canonical =
204            std::fs::canonicalize(&workspace).unwrap_or_else(|_| PathBuf::from(&workspace));
205
206        let tool_executor = Arc::new(ToolExecutor::new(canonical.display().to_string()));
207        let tool_defs = tool_executor.definitions();
208
209        // Load skills from per-session dirs first, then global dirs
210        let mut skill_filters: Vec<Skill> = Vec::new();
211        for dir in &opts.skill_dirs {
212            skill_filters.extend(load_skills(dir));
213        }
214        for dir in &self.code_config.skill_dirs {
215            skill_filters.extend(load_skills(dir));
216        }
217
218        let config = AgentConfig {
219            tools: tool_defs,
220            skill_tool_filters: skill_filters,
221            ..self.config.clone()
222        };
223
224        // Create lane queue if configured
225        let command_queue = if let Some(ref queue_config) = opts.queue_config {
226            let (event_tx, _) = broadcast::channel(256);
227            let session_id = uuid::Uuid::new_v4().to_string();
228            let rt = tokio::runtime::Handle::try_current();
229            match rt {
230                Ok(handle) => {
231                    // We're inside an async runtime — use block_in_place
232                    let queue = tokio::task::block_in_place(|| {
233                        handle.block_on(SessionLaneQueue::new(
234                            &session_id,
235                            queue_config.clone(),
236                            event_tx,
237                        ))
238                    });
239                    match queue {
240                        Ok(q) => {
241                            // Start the queue
242                            let q = Arc::new(q);
243                            let q2 = Arc::clone(&q);
244                            tokio::task::block_in_place(|| {
245                                handle.block_on(async { q2.start().await.ok() })
246                            });
247                            Some(q)
248                        }
249                        Err(e) => {
250                            tracing::warn!("Failed to create session lane queue: {}", e);
251                            None
252                        }
253                    }
254                }
255                Err(_) => {
256                    tracing::warn!(
257                        "No async runtime available for queue creation — queue disabled"
258                    );
259                    None
260                }
261            }
262        } else {
263            None
264        };
265
266        Ok(AgentSession {
267            llm_client,
268            tool_executor,
269            tool_context: ToolContext::new(canonical.clone()),
270            config,
271            workspace: canonical,
272            history: RwLock::new(Vec::new()),
273            command_queue,
274        })
275    }
276}
277
278// ============================================================================
279// AgentSession
280// ============================================================================
281
282/// Workspace-bound session. All LLM and tool operations happen here.
283///
284/// History is automatically accumulated after each `send()` call.
285/// Use `history()` to retrieve the current conversation log.
286pub struct AgentSession {
287    llm_client: Arc<dyn LlmClient>,
288    tool_executor: Arc<ToolExecutor>,
289    tool_context: ToolContext,
290    config: AgentConfig,
291    workspace: PathBuf,
292    /// Internal conversation history, auto-updated after each `send()`.
293    history: RwLock<Vec<Message>>,
294    /// Optional lane queue for priority-based tool execution with parallelism.
295    command_queue: Option<Arc<SessionLaneQueue>>,
296}
297
298impl std::fmt::Debug for AgentSession {
299    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
300        f.debug_struct("AgentSession")
301            .field("workspace", &self.workspace.display().to_string())
302            .finish()
303    }
304}
305
306impl AgentSession {
307    /// Build an `AgentLoop` with the session's configuration.
308    ///
309    /// Propagates the lane queue (if configured) to enable parallel
310    /// Query-lane tool execution.
311    fn build_agent_loop(&self) -> AgentLoop {
312        let mut agent_loop = AgentLoop::new(
313            self.llm_client.clone(),
314            self.tool_executor.clone(),
315            self.tool_context.clone(),
316            self.config.clone(),
317        );
318        if let Some(ref queue) = self.command_queue {
319            agent_loop = agent_loop.with_queue(Arc::clone(queue));
320        }
321        agent_loop
322    }
323
324    /// Send a prompt and wait for the complete response.
325    ///
326    /// When `history` is `None`, uses (and auto-updates) the session's
327    /// internal conversation history. When `Some`, uses the provided
328    /// history instead (the internal history is **not** modified).
329    pub async fn send(&self, prompt: &str, history: Option<&[Message]>) -> Result<AgentResult> {
330        let agent_loop = self.build_agent_loop();
331
332        let use_internal = history.is_none();
333        let effective_history = match history {
334            Some(h) => h.to_vec(),
335            None => self.history.read().unwrap().clone(),
336        };
337
338        let result = agent_loop.execute(&effective_history, prompt, None).await?;
339
340        // Auto-accumulate: only update internal history when no custom
341        // history was provided.
342        if use_internal {
343            *self.history.write().unwrap() = result.messages.clone();
344        }
345
346        Ok(result)
347    }
348
349    /// Send a prompt and stream events back.
350    ///
351    /// When `history` is `None`, uses the session's internal history
352    /// (note: streaming does **not** auto-update internal history since
353    /// the result is consumed asynchronously via the channel).
354    /// When `Some`, uses the provided history instead.
355    pub async fn stream(
356        &self,
357        prompt: &str,
358        history: Option<&[Message]>,
359    ) -> Result<(mpsc::Receiver<AgentEvent>, JoinHandle<()>)> {
360        let (tx, rx) = mpsc::channel(256);
361        let agent_loop = self.build_agent_loop();
362        let effective_history = match history {
363            Some(h) => h.to_vec(),
364            None => self.history.read().unwrap().clone(),
365        };
366        let prompt = prompt.to_string();
367
368        let handle = tokio::spawn(async move {
369            let _ = agent_loop
370                .execute(&effective_history, &prompt, Some(tx))
371                .await;
372        });
373
374        Ok((rx, handle))
375    }
376
377    /// Return a snapshot of the session's conversation history.
378    pub fn history(&self) -> Vec<Message> {
379        self.history.read().unwrap().clone()
380    }
381
382    /// Read a file from the workspace.
383    pub async fn read_file(&self, path: &str) -> Result<String> {
384        let args = serde_json::json!({ "file_path": path });
385        let result = self.tool_executor.execute("read", &args).await?;
386        Ok(result.output)
387    }
388
389    /// Execute a bash command in the workspace.
390    pub async fn bash(&self, command: &str) -> Result<String> {
391        let args = serde_json::json!({ "command": command });
392        let result = self.tool_executor.execute("bash", &args).await?;
393        Ok(result.output)
394    }
395
396    /// Search for files matching a glob pattern.
397    pub async fn glob(&self, pattern: &str) -> Result<Vec<String>> {
398        let args = serde_json::json!({ "pattern": pattern });
399        let result = self.tool_executor.execute("glob", &args).await?;
400        let files: Vec<String> = result
401            .output
402            .lines()
403            .filter(|l| !l.is_empty())
404            .map(|l| l.to_string())
405            .collect();
406        Ok(files)
407    }
408
409    /// Search file contents with a regex pattern.
410    pub async fn grep(&self, pattern: &str) -> Result<String> {
411        let args = serde_json::json!({ "pattern": pattern });
412        let result = self.tool_executor.execute("grep", &args).await?;
413        Ok(result.output)
414    }
415
416    /// Execute a tool by name, bypassing the LLM.
417    pub async fn tool(&self, name: &str, args: serde_json::Value) -> Result<ToolCallResult> {
418        let result = self.tool_executor.execute(name, &args).await?;
419        Ok(ToolCallResult {
420            name: name.to_string(),
421            output: result.output,
422            exit_code: result.exit_code,
423        })
424    }
425
426    // ========================================================================
427    // Queue API
428    // ========================================================================
429
430    /// Returns whether this session has a lane queue configured.
431    pub fn has_queue(&self) -> bool {
432        self.command_queue.is_some()
433    }
434
435    /// Configure a lane's handler mode (Internal/External/Hybrid).
436    ///
437    /// Only effective when a queue is configured via `SessionOptions::with_queue_config`.
438    pub async fn set_lane_handler(&self, lane: SessionLane, config: LaneHandlerConfig) {
439        if let Some(ref queue) = self.command_queue {
440            queue.set_lane_handler(lane, config).await;
441        }
442    }
443
444    /// Complete an external task by ID.
445    ///
446    /// Returns `true` if the task was found and completed, `false` if not found.
447    pub async fn complete_external_task(&self, task_id: &str, result: ExternalTaskResult) -> bool {
448        if let Some(ref queue) = self.command_queue {
449            queue.complete_external_task(task_id, result).await
450        } else {
451            false
452        }
453    }
454
455    /// Get pending external tasks awaiting completion by an external handler.
456    pub async fn pending_external_tasks(&self) -> Vec<ExternalTask> {
457        if let Some(ref queue) = self.command_queue {
458            queue.pending_external_tasks().await
459        } else {
460            Vec::new()
461        }
462    }
463
464    /// Get queue statistics (pending, active, external counts per lane).
465    pub async fn queue_stats(&self) -> SessionQueueStats {
466        if let Some(ref queue) = self.command_queue {
467            queue.stats().await
468        } else {
469            SessionQueueStats::default()
470        }
471    }
472
473    /// Get a metrics snapshot from the queue (if metrics are enabled).
474    pub async fn queue_metrics(&self) -> Option<MetricsSnapshot> {
475        if let Some(ref queue) = self.command_queue {
476            queue.metrics_snapshot().await
477        } else {
478            None
479        }
480    }
481
482    /// Get dead letters from the queue's DLQ (if DLQ is enabled).
483    pub async fn dead_letters(&self) -> Vec<DeadLetter> {
484        if let Some(ref queue) = self.command_queue {
485            queue.dead_letters().await
486        } else {
487            Vec::new()
488        }
489    }
490}
491
492// ============================================================================
493// Tests
494// ============================================================================
495
496#[cfg(test)]
497mod tests {
498    use super::*;
499    use crate::config::{ModelConfig, ModelModalities, ProviderConfig};
500
501    fn test_config() -> CodeConfig {
502        CodeConfig {
503            default_model: Some("anthropic/claude-sonnet-4-20250514".to_string()),
504            providers: vec![
505                ProviderConfig {
506                    name: "anthropic".to_string(),
507                    api_key: Some("test-key".to_string()),
508                    base_url: None,
509                    models: vec![ModelConfig {
510                        id: "claude-sonnet-4-20250514".to_string(),
511                        name: "Claude Sonnet 4".to_string(),
512                        family: "claude-sonnet".to_string(),
513                        api_key: None,
514                        base_url: None,
515                        attachment: false,
516                        reasoning: false,
517                        tool_call: true,
518                        temperature: true,
519                        release_date: None,
520                        modalities: ModelModalities::default(),
521                        cost: Default::default(),
522                        limit: Default::default(),
523                    }],
524                },
525                ProviderConfig {
526                    name: "openai".to_string(),
527                    api_key: Some("test-openai-key".to_string()),
528                    base_url: None,
529                    models: vec![ModelConfig {
530                        id: "gpt-4o".to_string(),
531                        name: "GPT-4o".to_string(),
532                        family: "gpt-4".to_string(),
533                        api_key: None,
534                        base_url: None,
535                        attachment: false,
536                        reasoning: false,
537                        tool_call: true,
538                        temperature: true,
539                        release_date: None,
540                        modalities: ModelModalities::default(),
541                        cost: Default::default(),
542                        limit: Default::default(),
543                    }],
544                },
545            ],
546            ..Default::default()
547        }
548    }
549
550    #[tokio::test]
551    async fn test_from_config() {
552        let agent = Agent::from_config(test_config()).await;
553        assert!(agent.is_ok());
554    }
555
556    #[tokio::test]
557    async fn test_session_default() {
558        let agent = Agent::from_config(test_config()).await.unwrap();
559        let session = agent.session("/tmp/test-workspace", None);
560        assert!(session.is_ok());
561        let debug = format!("{:?}", session.unwrap());
562        assert!(debug.contains("AgentSession"));
563    }
564
565    #[tokio::test]
566    async fn test_session_with_model_override() {
567        let agent = Agent::from_config(test_config()).await.unwrap();
568        let opts = SessionOptions::new().with_model("openai/gpt-4o");
569        let session = agent.session("/tmp/test-workspace", Some(opts));
570        assert!(session.is_ok());
571    }
572
573    #[tokio::test]
574    async fn test_session_with_invalid_model_format() {
575        let agent = Agent::from_config(test_config()).await.unwrap();
576        let opts = SessionOptions::new().with_model("gpt-4o");
577        let session = agent.session("/tmp/test-workspace", Some(opts));
578        assert!(session.is_err());
579    }
580
581    #[tokio::test]
582    async fn test_session_with_model_not_found() {
583        let agent = Agent::from_config(test_config()).await.unwrap();
584        let opts = SessionOptions::new().with_model("openai/nonexistent");
585        let session = agent.session("/tmp/test-workspace", Some(opts));
586        assert!(session.is_err());
587    }
588
589    #[tokio::test]
590    async fn test_new_with_json_string() {
591        let json = r#"{
592            "defaultModel": "anthropic/claude-sonnet-4-20250514",
593            "providers": [{
594                "name": "anthropic",
595                "apiKey": "test-key",
596                "models": [{"id": "claude-sonnet-4-20250514", "name": "Claude Sonnet 4"}]
597            }]
598        }"#;
599        let agent = Agent::new(json).await;
600        assert!(agent.is_ok());
601    }
602
603    #[tokio::test]
604    async fn test_new_with_hcl_string() {
605        let hcl = r#"
606            default_model = "anthropic/claude-sonnet-4-20250514"
607            providers {
608                name    = "anthropic"
609                api_key = "test-key"
610                models {
611                    id   = "claude-sonnet-4-20250514"
612                    name = "Claude Sonnet 4"
613                }
614            }
615        "#;
616        let agent = Agent::new(hcl).await;
617        assert!(agent.is_ok());
618    }
619
620    #[tokio::test]
621    async fn test_create_alias_json() {
622        let json = r#"{
623            "defaultModel": "anthropic/claude-sonnet-4-20250514",
624            "providers": [{
625                "name": "anthropic",
626                "apiKey": "test-key",
627                "models": [{"id": "claude-sonnet-4-20250514", "name": "Claude Sonnet 4"}]
628            }]
629        }"#;
630        let agent = Agent::create(json).await;
631        assert!(agent.is_ok());
632    }
633
634    #[tokio::test]
635    async fn test_create_alias_hcl() {
636        let hcl = r#"
637            default_model = "anthropic/claude-sonnet-4-20250514"
638            providers {
639                name    = "anthropic"
640                api_key = "test-key"
641                models {
642                    id   = "claude-sonnet-4-20250514"
643                    name = "Claude Sonnet 4"
644                }
645            }
646        "#;
647        let agent = Agent::create(hcl).await;
648        assert!(agent.is_ok());
649    }
650
651    #[tokio::test]
652    async fn test_create_and_new_produce_same_result() {
653        let json = r#"{
654            "defaultModel": "anthropic/claude-sonnet-4-20250514",
655            "providers": [{
656                "name": "anthropic",
657                "apiKey": "test-key",
658                "models": [{"id": "claude-sonnet-4-20250514", "name": "Claude Sonnet 4"}]
659            }]
660        }"#;
661        let agent_new = Agent::new(json).await;
662        let agent_create = Agent::create(json).await;
663        assert!(agent_new.is_ok());
664        assert!(agent_create.is_ok());
665
666        // Both should produce working sessions
667        let session_new = agent_new.unwrap().session("/tmp/test-ws-new", None);
668        let session_create = agent_create.unwrap().session("/tmp/test-ws-create", None);
669        assert!(session_new.is_ok());
670        assert!(session_create.is_ok());
671    }
672
673    #[test]
674    fn test_from_config_requires_default_model() {
675        let rt = tokio::runtime::Runtime::new().unwrap();
676        let config = CodeConfig {
677            providers: vec![ProviderConfig {
678                name: "anthropic".to_string(),
679                api_key: Some("test-key".to_string()),
680                base_url: None,
681                models: vec![],
682            }],
683            ..Default::default()
684        };
685        let result = rt.block_on(Agent::from_config(config));
686        assert!(result.is_err());
687    }
688
689    #[tokio::test]
690    async fn test_history_empty_on_new_session() {
691        let agent = Agent::from_config(test_config()).await.unwrap();
692        let session = agent.session("/tmp/test-workspace", None).unwrap();
693        assert!(session.history().is_empty());
694    }
695
696    #[tokio::test]
697    async fn test_session_options_with_skill_dir() {
698        let opts = SessionOptions::new()
699            .with_skill_dir("/tmp/skills")
700            .with_skill_dir("/tmp/more-skills");
701        assert_eq!(opts.skill_dirs.len(), 2);
702        assert_eq!(opts.skill_dirs[0], PathBuf::from("/tmp/skills"));
703        assert_eq!(opts.skill_dirs[1], PathBuf::from("/tmp/more-skills"));
704    }
705
706    #[tokio::test]
707    async fn test_session_options_with_agent_dir() {
708        let opts = SessionOptions::new()
709            .with_agent_dir("/tmp/agents")
710            .with_agent_dir("/tmp/more-agents");
711        assert_eq!(opts.agent_dirs.len(), 2);
712        assert_eq!(opts.agent_dirs[0], PathBuf::from("/tmp/agents"));
713        assert_eq!(opts.agent_dirs[1], PathBuf::from("/tmp/more-agents"));
714    }
715
716    #[tokio::test]
717    async fn test_session_options_combined() {
718        let opts = SessionOptions::new()
719            .with_model("openai/gpt-4o")
720            .with_skill_dir("/tmp/skills")
721            .with_agent_dir("/tmp/agents");
722        assert_eq!(opts.model.as_deref(), Some("openai/gpt-4o"));
723        assert_eq!(opts.skill_dirs.len(), 1);
724        assert_eq!(opts.agent_dirs.len(), 1);
725    }
726
727    #[tokio::test]
728    async fn test_session_with_skill_dirs() {
729        let agent = Agent::from_config(test_config()).await.unwrap();
730        // Non-existent dir is fine — load_skills handles it gracefully
731        let opts = SessionOptions::new().with_skill_dir("/tmp/nonexistent-skills");
732        let session = agent.session("/tmp/test-workspace", Some(opts));
733        assert!(session.is_ok());
734    }
735
736    #[tokio::test]
737    async fn test_session_with_real_skill_dir() {
738        let agent = Agent::from_config(test_config()).await.unwrap();
739        let tmp = tempfile::tempdir().unwrap();
740        let skill_dir = tmp.path().join("skills");
741        std::fs::create_dir_all(&skill_dir).unwrap();
742
743        // Write a valid skill file
744        std::fs::write(
745            skill_dir.join("test-skill.md"),
746            "---\nname: test-skill\ndescription: A test skill\nallowed-tools: Bash(*)\n---\nTest instructions.\n",
747        ).unwrap();
748
749        let opts = SessionOptions::new().with_skill_dir(&skill_dir);
750        let session = agent.session("/tmp/test-workspace", Some(opts)).unwrap();
751        // Verify the skill was loaded into the config
752        assert_eq!(session.config.skill_tool_filters.len(), 1);
753        assert_eq!(session.config.skill_tool_filters[0].name, "test-skill");
754    }
755
756    // ========================================================================
757    // Queue Integration Tests
758    // ========================================================================
759
760    #[test]
761    fn test_session_options_with_queue_config() {
762        let qc = SessionQueueConfig::default().with_lane_features();
763        let opts = SessionOptions::new().with_queue_config(qc.clone());
764        assert!(opts.queue_config.is_some());
765
766        let config = opts.queue_config.unwrap();
767        assert!(config.enable_dlq);
768        assert!(config.enable_metrics);
769        assert!(config.enable_alerts);
770        assert_eq!(config.default_timeout_ms, Some(60_000));
771    }
772
773    #[tokio::test(flavor = "multi_thread")]
774    async fn test_session_with_queue_config() {
775        let agent = Agent::from_config(test_config()).await.unwrap();
776        let qc = SessionQueueConfig::default();
777        let opts = SessionOptions::new().with_queue_config(qc);
778        let session = agent.session("/tmp/test-workspace-queue", Some(opts));
779        assert!(session.is_ok());
780        let session = session.unwrap();
781        assert!(session.has_queue());
782    }
783
784    #[tokio::test]
785    async fn test_session_without_queue_config() {
786        let agent = Agent::from_config(test_config()).await.unwrap();
787        let session = agent.session("/tmp/test-workspace-noqueue", None).unwrap();
788        assert!(!session.has_queue());
789    }
790
791    #[tokio::test]
792    async fn test_session_queue_stats_without_queue() {
793        let agent = Agent::from_config(test_config()).await.unwrap();
794        let session = agent.session("/tmp/test-workspace-stats", None).unwrap();
795        let stats = session.queue_stats().await;
796        // Without a queue, stats should have zero values
797        assert_eq!(stats.total_pending, 0);
798        assert_eq!(stats.total_active, 0);
799    }
800
801    #[tokio::test(flavor = "multi_thread")]
802    async fn test_session_queue_stats_with_queue() {
803        let agent = Agent::from_config(test_config()).await.unwrap();
804        let qc = SessionQueueConfig::default();
805        let opts = SessionOptions::new().with_queue_config(qc);
806        let session = agent
807            .session("/tmp/test-workspace-qstats", Some(opts))
808            .unwrap();
809        let stats = session.queue_stats().await;
810        // Fresh queue with no commands should have zero stats
811        assert_eq!(stats.total_pending, 0);
812        assert_eq!(stats.total_active, 0);
813    }
814
815    #[tokio::test(flavor = "multi_thread")]
816    async fn test_session_pending_external_tasks_empty() {
817        let agent = Agent::from_config(test_config()).await.unwrap();
818        let qc = SessionQueueConfig::default();
819        let opts = SessionOptions::new().with_queue_config(qc);
820        let session = agent
821            .session("/tmp/test-workspace-ext", Some(opts))
822            .unwrap();
823        let tasks = session.pending_external_tasks().await;
824        assert!(tasks.is_empty());
825    }
826
827    #[tokio::test(flavor = "multi_thread")]
828    async fn test_session_dead_letters_empty() {
829        let agent = Agent::from_config(test_config()).await.unwrap();
830        let qc = SessionQueueConfig::default().with_dlq(Some(100));
831        let opts = SessionOptions::new().with_queue_config(qc);
832        let session = agent
833            .session("/tmp/test-workspace-dlq", Some(opts))
834            .unwrap();
835        let dead = session.dead_letters().await;
836        assert!(dead.is_empty());
837    }
838
839    #[tokio::test(flavor = "multi_thread")]
840    async fn test_session_queue_metrics_disabled() {
841        let agent = Agent::from_config(test_config()).await.unwrap();
842        // Metrics not enabled
843        let qc = SessionQueueConfig::default();
844        let opts = SessionOptions::new().with_queue_config(qc);
845        let session = agent
846            .session("/tmp/test-workspace-nomet", Some(opts))
847            .unwrap();
848        let metrics = session.queue_metrics().await;
849        assert!(metrics.is_none());
850    }
851
852    #[tokio::test(flavor = "multi_thread")]
853    async fn test_session_queue_metrics_enabled() {
854        let agent = Agent::from_config(test_config()).await.unwrap();
855        let qc = SessionQueueConfig::default().with_metrics();
856        let opts = SessionOptions::new().with_queue_config(qc);
857        let session = agent
858            .session("/tmp/test-workspace-met", Some(opts))
859            .unwrap();
860        let metrics = session.queue_metrics().await;
861        assert!(metrics.is_some());
862    }
863
864    #[tokio::test(flavor = "multi_thread")]
865    async fn test_session_set_lane_handler() {
866        let agent = Agent::from_config(test_config()).await.unwrap();
867        let qc = SessionQueueConfig::default();
868        let opts = SessionOptions::new().with_queue_config(qc);
869        let session = agent
870            .session("/tmp/test-workspace-handler", Some(opts))
871            .unwrap();
872
873        // Set Execute lane to External mode
874        session
875            .set_lane_handler(
876                SessionLane::Execute,
877                LaneHandlerConfig {
878                    mode: crate::queue::TaskHandlerMode::External,
879                    timeout_ms: 30_000,
880                },
881            )
882            .await;
883
884        // No panic = success. The handler config is stored internally.
885        // We can't directly read it back but we verify no errors.
886    }
887}