use crate::command::chat::permission::JcliConfig;
use crate::command::chat::storage::{ChatMessage, ModelProvider};
use crate::command::chat::tools::agent_shared::{
AgentToolShared, call_llm_non_stream, create_runtime_and_client, execute_tool_with_permission,
extract_tool_items,
};
use crate::command::chat::tools::{
PlanDecision, Tool, ToolRegistry, ToolResult, parse_tool_args, schema_to_tool_params,
};
use crate::util::log::write_info_log;
use crate::util::safe_lock;
use async_openai::types::chat::ChatCompletionTools;
use schemars::JsonSchema;
use serde::Deserialize;
use serde_json::{Value, json};
use std::sync::{
Arc, Mutex,
atomic::{AtomicBool, Ordering},
};
type SubAgentSnapshotRefs = (Arc<Mutex<String>>, Arc<Mutex<Vec<ChatMessage>>>);
#[derive(Deserialize, JsonSchema)]
struct AgentParams {
prompt: String,
#[serde(default)]
description: Option<String>,
#[serde(default)]
run_in_background: bool,
#[serde(default)]
worktree: bool,
#[serde(default)]
inherit_permissions: bool,
}
#[allow(dead_code)]
pub struct AgentTool {
pub shared: AgentToolShared,
}
impl AgentTool {
pub const NAME: &'static str = "Agent";
}
impl Tool for AgentTool {
fn name(&self) -> &str {
Self::NAME
}
fn description(&self) -> &str {
r#"
Launch a sub-agent to handle complex, multi-step tasks autonomously.
The sub-agent runs with a fresh context (system prompt + your prompt as user message).
It can use all tools except Agent (to prevent recursion).
When NOT to use the Agent tool:
- If you want to read a specific file path, use Read or Glob instead
- If you are searching for a specific class/function definition, use Grep or Glob instead
- If you are searching code within a specific file or 2-3 files, use Read instead
Usage notes:
- Always include a short description (3-5 words) summarizing what the agent will do
- The result returned by the agent is not visible to the user. To show the user the result, send a text message with a concise summary
- Use foreground (default) when you need the agent's results before proceeding
- Use background when you have genuinely independent work to do in parallel
- Clearly tell the agent whether you expect it to write code or just do research (search, file reads, web fetches, etc.)
- Provide clear, detailed prompts so the agent can work autonomously — explain what you're trying to accomplish, what you've already learned, and give enough context for the agent to make judgment calls
"#
}
fn parameters_schema(&self) -> Value {
schema_to_tool_params::<AgentParams>()
}
fn execute(&self, arguments: &str, cancelled: &Arc<AtomicBool>) -> ToolResult {
let params: AgentParams = match parse_tool_args(arguments) {
Ok(p) => p,
Err(e) => return e,
};
let prompt = params.prompt;
let description = params
.description
.unwrap_or_else(|| "sub-agent task".to_string());
let run_in_background = params.run_in_background;
let use_worktree = params.worktree;
let provider = safe_lock(&self.shared.provider, "AgentTool::provider").clone();
let system_prompt =
safe_lock(&self.shared.system_prompt, "AgentTool::system_prompt").clone();
let (sub_registry, _) = self.shared.build_sub_registry();
let sub_registry = Arc::new(sub_registry);
let mut disabled = self.shared.disabled_tools.as_ref().clone();
disabled.push("Agent".to_string());
let tools = sub_registry.to_openai_tools_filtered(&disabled);
let jcli_config = if params.inherit_permissions {
let mut cfg = self.shared.jcli_config.as_ref().clone();
cfg.permissions.allow_all = true;
Arc::new(cfg)
} else {
Arc::clone(&self.shared.jcli_config)
};
let worktree_info: Option<(std::path::PathBuf, String)> = if use_worktree {
match crate::command::chat::tools::worktree::create_agent_worktree(&description) {
Ok(info) => Some(info),
Err(e) => {
return ToolResult {
output: format!("创建 worktree 失败: {}", e),
is_error: true,
images: vec![],
plan_decision: PlanDecision::None,
};
}
}
} else {
None
};
if run_in_background {
let (task_id, output_buffer) = self.shared.background_manager.spawn_command(
&format!("Agent: {}", description),
None,
0,
);
self.shared.sub_agent_tracker.gc_finished();
let (_snap_id, snap_running, snap_system_prompt, snap_messages) = self
.shared
.sub_agent_tracker
.register(&description, "background");
let bg_manager = Arc::clone(&self.shared.background_manager);
let task_id_clone = task_id.clone();
let cancelled_clone = Arc::clone(cancelled);
std::thread::spawn(move || {
if let Some((ref wt_path, _)) = worktree_info {
crate::command::chat::teammate::set_thread_cwd(wt_path);
}
let result = run_headless_agent_loop(
provider,
system_prompt,
prompt,
tools,
sub_registry,
jcli_config,
&cancelled_clone,
Some((Arc::clone(&snap_system_prompt), Arc::clone(&snap_messages))),
);
snap_running.store(false, Ordering::Relaxed);
if let Some((ref wt_path, ref branch)) = worktree_info {
crate::command::chat::tools::worktree::remove_agent_worktree(wt_path, branch);
}
{
let mut buf = safe_lock(&output_buffer, "AgentTool::bg_output");
buf.push_str(&result);
}
bg_manager.complete_task(&task_id_clone, "completed", result);
});
ToolResult {
output: json!({
"task_id": task_id,
"description": description,
"status": "running in background"
})
.to_string(),
is_error: false,
images: vec![],
plan_decision: PlanDecision::None,
}
} else {
let old_cwd = crate::command::chat::teammate::thread_cwd();
if let Some((ref wt_path, _)) = worktree_info {
crate::command::chat::teammate::set_thread_cwd(wt_path);
}
self.shared.sub_agent_tracker.gc_finished();
let (_snap_id, snap_running, snap_system_prompt, snap_messages) = self
.shared
.sub_agent_tracker
.register(&description, "foreground");
let cancelled_clone = Arc::clone(cancelled);
let result = run_headless_agent_loop(
provider,
system_prompt,
prompt,
tools,
sub_registry,
jcli_config,
&cancelled_clone,
Some((Arc::clone(&snap_system_prompt), Arc::clone(&snap_messages))),
);
snap_running.store(false, Ordering::Relaxed);
if let Some((ref wt_path, ref branch)) = worktree_info {
crate::command::chat::tools::worktree::remove_agent_worktree(wt_path, branch);
}
match old_cwd {
Some(p) => crate::command::chat::teammate::set_thread_cwd(&p),
None => crate::command::chat::teammate::clear_thread_cwd(),
}
ToolResult {
output: result,
is_error: false,
images: vec![],
plan_decision: PlanDecision::None,
}
}
}
fn requires_confirmation(&self) -> bool {
false
}
}
#[allow(clippy::too_many_arguments)]
fn run_headless_agent_loop(
provider: ModelProvider,
system_prompt: Option<String>,
prompt: String,
tools: Vec<ChatCompletionTools>,
registry: Arc<ToolRegistry>,
jcli_config: Arc<JcliConfig>,
cancelled: &Arc<AtomicBool>,
snapshot: Option<SubAgentSnapshotRefs>,
) -> String {
let max_rounds = 30;
let (rt, client) = match create_runtime_and_client(&provider) {
Ok(pair) => pair,
Err(e) => return e,
};
if let Some((ref sp_snap, _)) = snapshot
&& let Ok(mut sp) = sp_snap.lock()
{
*sp = system_prompt.clone().unwrap_or_default();
}
let mut messages: Vec<ChatMessage> = vec![ChatMessage {
role: "user".to_string(),
content: prompt,
tool_calls: None,
tool_call_id: None,
images: None,
}];
let sync_messages = |msgs: &Vec<ChatMessage>| {
if let Some((_, ref msgs_snap)) = snapshot
&& let Ok(mut snap) = msgs_snap.lock()
{
*snap = msgs.clone();
}
};
sync_messages(&messages);
let mut final_text = String::new();
for round in 0..max_rounds {
if cancelled.load(Ordering::Relaxed) {
return format!("{}\n[Sub-agent cancelled]", final_text);
}
write_info_log("SubAgent", &format!("Round {}/{}", round + 1, max_rounds));
let choice = match call_llm_non_stream(
&rt,
&client,
&provider,
&messages,
&tools,
system_prompt.as_deref(),
) {
Ok(c) => c,
Err(e) => return format!("{}\n{}", final_text, e),
};
let assistant_text = choice.message.content.clone().unwrap_or_default();
if !assistant_text.is_empty() {
final_text = assistant_text.clone();
write_info_log("SubAgent", &format!("Reply: {}", &final_text));
}
let is_tool_calls = matches!(
choice.finish_reason,
Some(async_openai::types::chat::FinishReason::ToolCalls)
);
if !is_tool_calls || choice.message.tool_calls.is_none() {
break;
}
let tool_items = extract_tool_items(choice.message.tool_calls.as_ref().unwrap());
if tool_items.is_empty() {
break;
}
messages.push(ChatMessage {
role: "assistant".to_string(),
content: assistant_text,
tool_calls: Some(tool_items.clone()),
tool_call_id: None,
images: None,
});
for item in &tool_items {
let result_msg = execute_tool_with_permission(
item,
®istry,
&jcli_config,
cancelled,
"SubAgent",
true,
);
messages.push(result_msg);
}
sync_messages(&messages);
}
if final_text.is_empty() {
"[Sub-agent completed with no text output]".to_string()
} else {
final_text
}
}