1use super::{
7 BranchObservation, BranchRuntimeState, CacheConfig, CacheStats, CollapseController,
8 CollapsePolicy, DecompositionStrategy, ExecutionMode, StageStats, SwarmCache, SwarmConfig,
9 SwarmResult,
10 kubernetes_executor::{
11 RemoteSubtaskPayload, SWARM_SUBTASK_PAYLOAD_ENV, encode_payload, latest_probe_from_logs,
12 probe_changed_files_set, result_from_logs,
13 },
14 orchestrator::Orchestrator,
15 result_store::ResultStore,
16 subtask::{SubTask, SubTaskResult, SubTaskStatus},
17};
18use crate::bus::{AgentBus, BusMessage};
19use crate::k8s::{K8sManager, SubagentPodSpec, SubagentPodState};
20use crate::tui::swarm_view::{AgentMessageEntry, AgentToolCallDetail, SubTaskInfo, SwarmEvent};
21
22pub use super::SwarmMessage;
24use crate::{
25 agent::Agent,
26 provenance::{ExecutionOrigin, ExecutionProvenance},
27 provider::{CompletionRequest, ContentPart, FinishReason, Message, Provider, Role},
28 rlm::RlmExecutor,
29 session::helper::runtime::enrich_tool_input_with_runtime_context,
30 swarm::{SwarmArtifact, SwarmStats},
31 telemetry::SwarmTelemetryCollector,
32 tool::ToolRegistry,
33 worktree::{WorktreeInfo, WorktreeManager},
34};
35use anyhow::Result;
36use futures::stream::{FuturesUnordered, StreamExt};
37use std::collections::{HashMap, VecDeque};
38use std::sync::Arc;
39use std::time::Instant;
40use tokio::sync::{RwLock, mpsc};
41use tokio::task::AbortHandle;
42use tokio::time::{Duration, MissedTickBehavior, timeout};
43
44const DEFAULT_CONTEXT_LIMIT: usize = 256_000;
46
47const RESPONSE_RESERVE_TOKENS: usize = 8_192;
49
50const TRUNCATION_THRESHOLD: f64 = 0.85;
52
53fn estimate_tokens(text: &str) -> usize {
55 (text.len() as f64 / 3.5).ceil() as usize
59}
60
61fn estimate_message_tokens(message: &Message) -> usize {
63 let mut tokens = 4; for part in &message.content {
66 tokens += match part {
67 ContentPart::Text { text } => estimate_tokens(text),
68 ContentPart::ToolCall {
69 id,
70 name,
71 arguments,
72 ..
73 } => estimate_tokens(id) + estimate_tokens(name) + estimate_tokens(arguments) + 10,
74 ContentPart::ToolResult {
75 tool_call_id,
76 content,
77 } => estimate_tokens(tool_call_id) + estimate_tokens(content) + 6,
78 ContentPart::Image { .. } | ContentPart::File { .. } => 2000, ContentPart::Thinking { text } => estimate_tokens(text),
80 };
81 }
82
83 tokens
84}
85
86fn estimate_total_tokens(messages: &[Message]) -> usize {
88 messages.iter().map(estimate_message_tokens).sum()
89}
90
91fn truncate_messages_to_fit(messages: &mut Vec<Message>, context_limit: usize) {
99 let target_tokens =
100 ((context_limit as f64) * TRUNCATION_THRESHOLD) as usize - RESPONSE_RESERVE_TOKENS;
101
102 let current_tokens = estimate_total_tokens(messages);
103 if current_tokens <= target_tokens {
104 return;
105 }
106
107 tracing::warn!(
108 current_tokens = current_tokens,
109 target_tokens = target_tokens,
110 context_limit = context_limit,
111 "Context approaching limit, truncating conversation history"
112 );
113
114 truncate_large_tool_results(messages, 2000); let after_tool_truncation = estimate_total_tokens(messages);
118 if after_tool_truncation <= target_tokens {
119 tracing::info!(
120 old_tokens = current_tokens,
121 new_tokens = after_tool_truncation,
122 "Truncated large tool results, context now within limits"
123 );
124 return;
125 }
126
127 if messages.len() <= 6 {
129 tracing::warn!(
130 tokens = after_tool_truncation,
131 target = target_tokens,
132 "Cannot truncate further - conversation too short"
133 );
134 return;
135 }
136
137 let keep_start = 2;
140 let keep_end = 4;
141 let removable_count = messages.len() - keep_start - keep_end;
142
143 if removable_count == 0 {
144 return;
145 }
146
147 let removed_messages: Vec<_> = messages
149 .drain(keep_start..keep_start + removable_count)
150 .collect();
151 let summary = summarize_removed_messages(&removed_messages);
152
153 messages.insert(
155 keep_start,
156 Message {
157 role: Role::User,
158 content: vec![ContentPart::Text {
159 text: format!(
160 "[Context truncated: {} earlier messages removed to fit context window]\n{}",
161 removed_messages.len(),
162 summary
163 ),
164 }],
165 },
166 );
167
168 let new_tokens = estimate_total_tokens(messages);
169 tracing::info!(
170 removed_messages = removed_messages.len(),
171 old_tokens = current_tokens,
172 new_tokens = new_tokens,
173 "Truncated conversation history"
174 );
175}
176
177fn summarize_removed_messages(messages: &[Message]) -> String {
179 let mut summary = String::new();
180 let mut tool_calls: Vec<String> = Vec::new();
181
182 for msg in messages {
183 for part in &msg.content {
184 if let ContentPart::ToolCall { name, .. } = part
185 && !tool_calls.contains(name)
186 {
187 tool_calls.push(name.clone());
188 }
189 }
190 }
191
192 if !tool_calls.is_empty() {
193 summary.push_str(&format!(
194 "Tools used in truncated history: {}",
195 tool_calls.join(", ")
196 ));
197 }
198
199 summary
200}
201
202fn truncate_large_tool_results(messages: &mut [Message], max_tokens_per_result: usize) {
204 let char_limit = max_tokens_per_result * 3; let mut truncated_count = 0;
206 let mut saved_tokens = 0usize;
207
208 for message in messages.iter_mut() {
209 for part in message.content.iter_mut() {
210 if let ContentPart::ToolResult { content, .. } = part {
211 let tokens = estimate_tokens(content);
212 if tokens > max_tokens_per_result {
213 let old_len = content.len();
214 *content = truncate_single_result(content, char_limit);
215 saved_tokens += tokens.saturating_sub(estimate_tokens(content));
216 if content.len() < old_len {
217 truncated_count += 1;
218 }
219 }
220 }
221 }
222 }
223
224 if truncated_count > 0 {
225 tracing::info!(
226 truncated_count = truncated_count,
227 saved_tokens = saved_tokens,
228 max_tokens_per_result = max_tokens_per_result,
229 "Truncated large tool results"
230 );
231 }
232}
233
234fn truncate_single_result(content: &str, max_chars: usize) -> String {
236 if content.len() <= max_chars {
237 return content.to_string();
238 }
239
240 let safe_limit = {
242 let mut limit = max_chars.min(content.len());
243 while limit > 0 && !content.is_char_boundary(limit) {
244 limit -= 1;
245 }
246 limit
247 };
248
249 let break_point = content[..safe_limit].rfind('\n').unwrap_or(safe_limit);
251
252 let truncated = format!(
253 "{}...\n\n[OUTPUT TRUNCATED: {} → {} chars to fit context limit]",
254 &content[..break_point],
255 content.len(),
256 break_point
257 );
258
259 tracing::debug!(
260 original_len = content.len(),
261 truncated_len = truncated.len(),
262 "Truncated large result"
263 );
264
265 truncated
266}
267
268const RLM_THRESHOLD_CHARS: usize = 50_000;
270
271const SIMPLE_TRUNCATE_CHARS: usize = 6000;
273
274const COLLAPSE_SAMPLE_SECS: u64 = 5;
276const SWARM_FALLBACK_PROMPT_ENV: &str = "CODETETHER_SWARM_FALLBACK_PROMPT";
277const SWARM_FALLBACK_MODEL_ENV: &str = "CODETETHER_SWARM_FALLBACK_MODEL";
278const K8S_PASSTHROUGH_ENV_VARS: &[&str] = &[
279 "VAULT_ADDR",
280 "VAULT_TOKEN",
281 "VAULT_MOUNT",
282 "VAULT_SECRETS_PATH",
283 "VAULT_NAMESPACE",
284 "CODETETHER_AUTH_TOKEN",
285];
286
287#[derive(Debug, Clone)]
288struct ActiveK8sBranch {
289 branch: String,
290 started_at: Instant,
291}
292
293#[derive(Debug, Clone, Copy, PartialEq, Eq)]
294pub enum AgentLoopExit {
295 Completed,
296 MaxStepsReached,
297 TimedOut,
298}
299
300fn calculate_backoff_delay(
304 attempt: u32,
305 initial_delay_ms: u64,
306 max_delay_ms: u64,
307 multiplier: f64,
308) -> Duration {
309 let delay_ms =
310 (initial_delay_ms as f64 * multiplier.powi(attempt as i32)).min(max_delay_ms as f64);
311 Duration::from_millis(delay_ms as u64)
312}
313
314fn compute_resource_health(pod_state: Option<&SubagentPodState>) -> (f32, u32) {
315 let Some(pod_state) = pod_state else {
316 return (0.2, 1);
317 };
318
319 let reason = pod_state
320 .reason
321 .as_deref()
322 .unwrap_or_default()
323 .to_ascii_lowercase();
324 let phase = pod_state.phase.to_ascii_lowercase();
325
326 if reason.contains("oomkilled") {
327 return (0.0, 3);
328 }
329 if reason.contains("imagepullbackoff") || reason.contains("errimagepull") {
330 return (0.0, 3);
331 }
332 if reason.contains("crashloopbackoff") {
333 return (0.1, 2);
334 }
335 if phase == "failed" {
336 return (0.1, 2);
337 }
338
339 let mut score = 1.0f32;
340 let mut unhealthy_signals = 0u32;
341
342 if !pod_state.ready {
343 score -= 0.2;
344 }
345 if !reason.is_empty() {
346 score -= 0.3;
347 unhealthy_signals += 1;
348 }
349 if pod_state.restart_count > 0 {
350 score -= (pod_state.restart_count.min(3) as f32) * 0.2;
351 unhealthy_signals += 1;
352 }
353
354 (score.clamp(0.0, 1.0), unhealthy_signals)
355}
356
357async fn process_large_result_with_rlm(
359 content: &str,
360 tool_name: &str,
361 provider: Arc<dyn Provider>,
362 model: &str,
363) -> String {
364 if content.len() <= SIMPLE_TRUNCATE_CHARS {
365 return content.to_string();
366 }
367
368 if content.len() <= RLM_THRESHOLD_CHARS {
370 return truncate_single_result(content, SIMPLE_TRUNCATE_CHARS);
371 }
372
373 tracing::info!(
375 tool = %tool_name,
376 content_len = content.len(),
377 "Using RLM to process large tool result"
378 );
379
380 let query = format!(
381 "Summarize the key information from this {} output. \
382 Focus on: errors, warnings, important findings, and actionable items. \
383 Be concise but thorough.",
384 tool_name
385 );
386
387 let mut executor =
388 RlmExecutor::new(content.to_string(), provider, model.to_string()).with_max_iterations(3);
389
390 match executor.analyze(&query).await {
391 Ok(result) => {
392 let bounded_answer = truncate_single_result(&result.answer, SIMPLE_TRUNCATE_CHARS * 2);
393 tracing::info!(
394 tool = %tool_name,
395 original_len = content.len(),
396 summary_len = bounded_answer.len(),
397 iterations = result.iterations,
398 "RLM summarized large result"
399 );
400
401 format!(
402 "[RLM Summary of {} output ({} chars → {} chars)]\n\n{}",
403 tool_name,
404 content.len(),
405 bounded_answer.len(),
406 bounded_answer
407 )
408 }
409 Err(e) => {
410 tracing::warn!(
411 tool = %tool_name,
412 error = %e,
413 "RLM analysis failed, falling back to truncation"
414 );
415 truncate_single_result(content, SIMPLE_TRUNCATE_CHARS)
416 }
417 }
418}
419
420pub struct SwarmExecutor {
422 config: SwarmConfig,
423 coordinator_agent: Option<Arc<tokio::sync::Mutex<Agent>>>,
425 event_tx: Option<mpsc::Sender<SwarmEvent>>,
427 telemetry: Arc<tokio::sync::Mutex<SwarmTelemetryCollector>>,
429 cache: Option<Arc<tokio::sync::Mutex<SwarmCache>>>,
431 result_store: Arc<ResultStore>,
433 bus: Option<Arc<AgentBus>>,
435}
436
437impl SwarmExecutor {
438 pub fn new(config: SwarmConfig) -> Self {
440 Self {
441 config,
442 coordinator_agent: None,
443 event_tx: None,
444 telemetry: Arc::new(tokio::sync::Mutex::new(SwarmTelemetryCollector::default())),
445 cache: None,
446 result_store: ResultStore::new_arc(),
447 bus: None,
448 }
449 }
450
451 pub async fn with_cache(config: SwarmConfig, cache_config: CacheConfig) -> Result<Self> {
453 let cache = SwarmCache::new(cache_config).await?;
454 Ok(Self {
455 config,
456 coordinator_agent: None,
457 event_tx: None,
458 telemetry: Arc::new(tokio::sync::Mutex::new(SwarmTelemetryCollector::default())),
459 cache: Some(Arc::new(tokio::sync::Mutex::new(cache))),
460 result_store: ResultStore::new_arc(),
461 bus: None,
462 })
463 }
464
465 pub fn with_cache_instance(mut self, cache: Arc<tokio::sync::Mutex<SwarmCache>>) -> Self {
467 self.cache = Some(cache);
468 self
469 }
470
471 pub fn with_bus(mut self, bus: Arc<AgentBus>) -> Self {
473 self.bus = Some(bus);
474 self
475 }
476
477 pub fn bus(&self) -> Option<&Arc<AgentBus>> {
479 self.bus.as_ref()
480 }
481
482 pub fn with_event_tx(mut self, tx: mpsc::Sender<SwarmEvent>) -> Self {
484 self.event_tx = Some(tx);
485 self
486 }
487
488 pub fn with_coordinator_agent(mut self, agent: Arc<tokio::sync::Mutex<Agent>>) -> Self {
490 tracing::debug!("Setting coordinator agent for swarm execution");
491 self.coordinator_agent = Some(agent);
492 self
493 }
494
495 pub fn with_telemetry(
497 mut self,
498 telemetry: Arc<tokio::sync::Mutex<SwarmTelemetryCollector>>,
499 ) -> Self {
500 self.telemetry = telemetry;
501 self
502 }
503
504 pub fn telemetry_arc(&self) -> Arc<tokio::sync::Mutex<SwarmTelemetryCollector>> {
506 Arc::clone(&self.telemetry)
507 }
508 pub fn coordinator_agent(&self) -> Option<&Arc<tokio::sync::Mutex<Agent>>> {
510 tracing::debug!(
511 has_coordinator = self.coordinator_agent.is_some(),
512 "Getting coordinator agent"
513 );
514 self.coordinator_agent.as_ref()
515 }
516
517 pub fn result_store(&self) -> &Arc<ResultStore> {
519 &self.result_store
520 }
521
522 pub async fn cache_stats(&self) -> Option<CacheStats> {
524 if let Some(ref cache) = self.cache {
525 let cache_guard = cache.lock().await;
526 Some(cache_guard.stats().clone())
527 } else {
528 None
529 }
530 }
531
532 pub async fn clear_cache(&self) -> Result<()> {
534 if let Some(ref cache) = self.cache {
535 let mut cache_guard = cache.lock().await;
536 cache_guard.clear().await?;
537 }
538 Ok(())
539 }
540
541 pub fn retry_config(&self) -> (u32, u64, u64, f64) {
543 (
544 self.config.max_retries,
545 self.config.base_delay_ms,
546 self.config.max_delay_ms,
547 2.0, )
549 }
550
551 pub fn retries_enabled(&self) -> bool {
553 self.config.max_retries > 0
554 }
555
556 fn try_send_event(&self, event: SwarmEvent) {
558 if let Some(ref bus) = self.bus {
560 let handle = bus.handle("swarm-executor");
561 match &event {
562 SwarmEvent::Started { task, .. } => {
563 handle.send(
564 "broadcast",
565 BusMessage::AgentReady {
566 agent_id: "swarm-executor".to_string(),
567 capabilities: vec![format!("executing:{task}")],
568 },
569 );
570 }
571 SwarmEvent::Complete { success, .. } => {
572 let state = if *success {
573 crate::a2a::types::TaskState::Completed
574 } else {
575 crate::a2a::types::TaskState::Failed
576 };
577 handle.send_task_update("swarm", state, None);
578 }
579 _ => {} }
581 }
582
583 if let Some(ref tx) = self.event_tx {
584 let _ = tx.try_send(event);
585 }
586 }
587
588 pub async fn execute(
590 &self,
591 task: &str,
592 strategy: DecompositionStrategy,
593 ) -> Result<SwarmResult> {
594 let start_time = Instant::now();
595
596 let mut orchestrator = Orchestrator::new(self.config.clone()).await?;
598
599 tracing::info!(provider_name = %orchestrator.provider(), "Starting swarm execution for task");
600
601 let subtasks = orchestrator.decompose(task, strategy).await?;
603
604 if subtasks.is_empty() {
605 self.try_send_event(SwarmEvent::Error("No subtasks generated".to_string()));
606 return Ok(SwarmResult {
607 success: false,
608 result: String::new(),
609 subtask_results: Vec::new(),
610 stats: SwarmStats::default(),
611 artifacts: Vec::new(),
612 error: Some("No subtasks generated".to_string()),
613 });
614 }
615
616 tracing::info!(provider_name = %orchestrator.provider(), "Task decomposed into {} subtasks", subtasks.len());
617
618 self.try_send_event(SwarmEvent::Started {
619 task: task.to_string(),
620 total_subtasks: subtasks.len(),
621 });
622
623 self.try_send_event(SwarmEvent::Decomposed {
625 subtasks: subtasks
626 .iter()
627 .map(|s| SubTaskInfo {
628 id: s.id.clone(),
629 name: s.name.clone(),
630 status: SubTaskStatus::Pending,
631 stage: s.stage,
632 dependencies: s.dependencies.clone(),
633 agent_name: s.specialty.clone(),
634 current_tool: None,
635 steps: 0,
636 max_steps: self.config.max_steps_per_subagent,
637 tool_call_history: Vec::new(),
638 messages: Vec::new(),
639 output: None,
640 error: None,
641 })
642 .collect(),
643 });
644
645 let max_stage = subtasks.iter().map(|s| s.stage).max().unwrap_or(0);
647 let mut all_results: Vec<SubTaskResult> = Vec::new();
648 let artifacts: Vec<SwarmArtifact> = Vec::new();
649
650 let swarm_id = uuid::Uuid::new_v4().to_string();
652 let strategy_str = format!("{:?}", strategy);
653 self.telemetry
654 .lock()
655 .await
656 .start_swarm(&swarm_id, subtasks.len(), &strategy_str)
657 .await;
658
659 let completed_results: Arc<RwLock<HashMap<String, String>>> =
661 Arc::new(RwLock::new(HashMap::new()));
662
663 for stage in 0..=max_stage {
664 let stage_start = Instant::now();
665
666 let stage_subtasks: Vec<SubTask> = orchestrator
667 .subtasks_for_stage(stage)
668 .into_iter()
669 .cloned()
670 .collect();
671
672 tracing::debug!(
673 "Stage {} has {} subtasks (max_stage={})",
674 stage,
675 stage_subtasks.len(),
676 max_stage
677 );
678
679 if stage_subtasks.is_empty() {
680 continue;
681 }
682
683 tracing::info!(
684 provider_name = %orchestrator.provider(),
685 "Executing stage {} with {} subtasks",
686 stage,
687 stage_subtasks.len()
688 );
689
690 let stage_results = self
692 .execute_stage(
693 &orchestrator,
694 stage_subtasks,
695 completed_results.clone(),
696 &swarm_id,
697 )
698 .await?;
699
700 {
702 let mut completed = completed_results.write().await;
703 for result in &stage_results {
704 completed.insert(result.subtask_id.clone(), result.result.clone());
705 let tags = vec![
707 format!("stage:{stage}"),
708 format!("subtask:{}", result.subtask_id),
709 ];
710 let _ = self
711 .result_store
712 .publish(
713 &result.subtask_id,
714 &result.subagent_id,
715 &result.result,
716 tags,
717 None,
718 )
719 .await;
720 }
721 }
722
723 let stage_time = stage_start.elapsed().as_millis() as u64;
725 let max_steps = stage_results.iter().map(|r| r.steps).max().unwrap_or(0);
726 let total_steps: usize = stage_results.iter().map(|r| r.steps).sum();
727
728 orchestrator.stats_mut().stages.push(StageStats {
729 stage,
730 subagent_count: stage_results.len(),
731 max_steps,
732 total_steps,
733 execution_time_ms: stage_time,
734 });
735
736 for result in &stage_results {
738 orchestrator.complete_subtask(&result.subtask_id, result.clone());
739 }
740
741 let stage_completed = stage_results.iter().filter(|r| r.success).count();
743 let stage_failed = stage_results.iter().filter(|r| !r.success).count();
744 self.try_send_event(SwarmEvent::StageComplete {
745 stage,
746 completed: stage_completed,
747 failed: stage_failed,
748 });
749
750 all_results.extend(stage_results);
751 }
752
753 let provider_name = orchestrator.provider().to_string();
755
756 self.telemetry
758 .lock()
759 .await
760 .record_swarm_latency("total_execution", start_time.elapsed())
761 .await;
762
763 let stats = orchestrator.stats_mut();
765 stats.execution_time_ms = start_time.elapsed().as_millis() as u64;
766 stats.sequential_time_estimate_ms = all_results.iter().map(|r| r.execution_time_ms).sum();
767 stats.calculate_critical_path();
768 stats.calculate_speedup();
769
770 let success = all_results.iter().all(|r| r.success);
772
773 let _telemetry_metrics = self.telemetry.lock().await.complete_swarm(success).await;
775 let result = self.aggregate_results(&all_results).await?;
776
777 tracing::info!(
778 provider_name = %provider_name,
779 "Swarm execution complete: {} subtasks, {:.1}x speedup",
780 all_results.len(),
781 stats.speedup_factor
782 );
783
784 let final_stats = orchestrator.stats().clone();
785 self.try_send_event(SwarmEvent::Complete {
786 success,
787 stats: final_stats.clone(),
788 });
789
790 Ok(SwarmResult {
791 success,
792 result,
793 subtask_results: all_results,
794 stats: final_stats,
795 artifacts,
796 error: None,
797 })
798 }
799
800 async fn execute_stage(
802 &self,
803 orchestrator: &Orchestrator,
804 subtasks: Vec<SubTask>,
805 completed_results: Arc<RwLock<HashMap<String, String>>>,
806 swarm_id: &str,
807 ) -> Result<Vec<SubTaskResult>> {
808 if self.config.execution_mode == ExecutionMode::KubernetesPod {
809 return self
810 .execute_stage_kubernetes(orchestrator, subtasks, completed_results, swarm_id)
811 .await;
812 }
813
814 let mut handles: FuturesUnordered<
815 tokio::task::JoinHandle<(String, Result<SubTaskResult, anyhow::Error>)>,
816 > = FuturesUnordered::new();
817 let mut abort_handles: HashMap<String, AbortHandle> = HashMap::new();
818 let mut task_ids: HashMap<tokio::task::Id, String> = HashMap::new();
819 let mut active_worktrees: HashMap<String, WorktreeInfo> = HashMap::new();
820 let mut all_worktrees: HashMap<String, WorktreeInfo> = HashMap::new();
821 let mut cached_results: Vec<SubTaskResult> = Vec::new();
822 let mut completed_entries: Vec<(SubTaskResult, Option<WorktreeInfo>)> = Vec::new();
823 let mut kill_reasons: HashMap<String, String> = HashMap::new();
824 let mut promoted_subtask_id: Option<String> = None;
825
826 let semaphore = Arc::new(tokio::sync::Semaphore::new(
828 self.config.max_concurrent_requests,
829 ));
830 let delay_ms = self.config.request_delay_ms;
831
832 let model = orchestrator.model().to_string();
834 let provider_name = orchestrator.provider().to_string();
835 let providers = orchestrator.providers();
836 let provider = providers
837 .get(&provider_name)
838 .ok_or_else(|| anyhow::anyhow!("Provider {} not found", provider_name))?;
839
840 tracing::info!(provider_name = %provider_name, "Selected provider for subtask execution");
841
842 let base_tool_registry =
844 ToolRegistry::with_provider_arc(Arc::clone(&provider), model.clone());
845 let mut tool_definitions: Vec<_> = base_tool_registry
848 .definitions()
849 .into_iter()
850 .filter(|t| t.name != "question")
851 .collect();
852
853 let swarm_share_def = crate::provider::ToolDefinition {
855 name: "swarm_share".to_string(),
856 description: "Share results with other sub-agents in the swarm. Actions: publish \
857 (share a result), get (retrieve a result by key), query_tags (find \
858 results by tags), query_prefix (find results by key prefix), list \
859 (show all shared results)."
860 .to_string(),
861 parameters: serde_json::json!({
862 "type": "object",
863 "properties": {
864 "action": {
865 "type": "string",
866 "enum": ["publish", "get", "query_tags", "query_prefix", "list"],
867 "description": "Action to perform"
868 },
869 "key": {
870 "type": "string",
871 "description": "Result key (for publish/get)"
872 },
873 "value": {
874 "description": "Result value to publish (any JSON value)"
875 },
876 "tags": {
877 "type": "array",
878 "items": {"type": "string"},
879 "description": "Tags for publish or query_tags"
880 },
881 "prefix": {
882 "type": "string",
883 "description": "Key prefix for query_prefix"
884 }
885 },
886 "required": ["action"]
887 }),
888 };
889 tool_definitions.push(swarm_share_def);
890
891 let result_store = Arc::clone(&self.result_store);
893
894 let worktree_manager = if self.config.worktree_enabled {
896 let working_dir = self.config.working_dir.clone().unwrap_or_else(|| {
897 std::env::current_dir()
898 .map(|p| p.to_string_lossy().to_string())
899 .unwrap_or_else(|_| ".".to_string())
900 });
901
902 let mgr = WorktreeManager::new(&working_dir);
903 tracing::info!(
904 working_dir = %working_dir,
905 "Worktree isolation enabled for parallel sub-agents"
906 );
907 Some(Arc::new(mgr) as Arc<WorktreeManager>)
908 } else {
909 None
910 };
911
912 for (idx, subtask) in subtasks.into_iter().enumerate() {
913 let model = model.clone();
914 let _provider_name = provider_name.clone();
915 let provider = Arc::clone(&provider);
916
917 if let Some(ref cache) = self.cache {
919 let mut cache_guard = cache.lock().await;
920 if let Some(cached_result) = cache_guard.get(&subtask).await {
921 tracing::info!(
922 subtask_id = %subtask.id,
923 task_name = %subtask.name,
924 "Cache hit for subtask, skipping execution"
925 );
926 self.try_send_event(SwarmEvent::SubTaskUpdate {
927 id: subtask.id.clone(),
928 name: subtask.name.clone(),
929 status: SubTaskStatus::Completed,
930 agent_name: Some("cached".to_string()),
931 });
932 cached_results.push(cached_result);
933 continue;
934 }
935 }
936
937 let context = {
939 let completed = completed_results.read().await;
940 let mut dep_context = String::new();
941 for dep_id in &subtask.dependencies {
942 if let Some(result) = completed.get(dep_id) {
943 dep_context.push_str(&format!(
944 "\n--- Result from dependency {} ---\n{}\n",
945 dep_id, result
946 ));
947 }
948 }
949 dep_context
950 };
951
952 let instruction = subtask.instruction.clone();
953 let subtask_name = subtask.name.clone();
954 let specialty = subtask.specialty.clone().unwrap_or_default();
955 let subtask_id = subtask.id.clone();
956 let subtask_id_for_handle = subtask_id.clone();
957 let max_steps = self.config.max_steps_per_subagent;
958 let timeout_secs = self.config.subagent_timeout_secs;
959
960 let max_retries = self.config.max_retries;
962 let base_delay_ms = self.config.base_delay_ms;
963 let max_delay_ms = self.config.max_delay_ms;
964
965 let worktree_info = if let Some(ref mgr) = worktree_manager {
968 let task_slug = subtask_id.replace("-", "_");
969 match mgr.create(&task_slug).await {
970 Ok(wt) => {
971 if let Err(e) = mgr.inject_workspace_stub(&wt.path) {
972 tracing::warn!(
973 subtask_id = %subtask_id,
974 error = %e,
975 "Failed to inject workspace stub into worktree"
976 );
977 }
978 tracing::info!(
979 subtask_id = %subtask_id,
980 worktree_path = %wt.path.display(),
981 worktree_branch = %wt.branch,
982 "Created worktree for sub-agent"
983 );
984 active_worktrees.insert(subtask_id.clone(), wt.clone());
985 all_worktrees.insert(subtask_id.clone(), wt.clone());
986 Some(wt)
987 }
988 Err(e) => {
989 tracing::warn!(
990 subtask_id = %subtask_id,
991 error = %e,
992 "Failed to create worktree, using shared directory"
993 );
994 None
995 }
996 }
997 } else {
998 None
999 };
1000
1001 let working_dir = worktree_info
1002 .as_ref()
1003 .map(|wt| wt.path.display().to_string())
1004 .unwrap_or_else(|| ".".to_string());
1005 let working_dir_path = worktree_info.as_ref().map(|wt| wt.path.clone());
1006
1007 let tools = tool_definitions.clone();
1009 let _base_registry = Arc::clone(&base_tool_registry);
1010 let agent_result_store = Arc::clone(&result_store);
1011 let sem = Arc::clone(&semaphore);
1012 let stagger_delay = delay_ms * idx as u64; let event_tx = self.event_tx.clone();
1014
1015 let subagent_id = format!("agent-{}", uuid::Uuid::new_v4());
1017
1018 tracing::debug!(subagent_id = %subagent_id, swarm_id = %swarm_id, subtask = %subtask_id, specialty = %specialty, "Starting sub-agent");
1020
1021 let handle = tokio::spawn(async move {
1023 if stagger_delay > 0 {
1025 tokio::time::sleep(Duration::from_millis(stagger_delay)).await;
1026 }
1027 let _permit = match sem.acquire().await {
1028 Ok(permit) => permit,
1029 Err(_) => {
1030 return (
1031 subtask_id.clone(),
1032 Err(anyhow::anyhow!("Swarm execution cancelled")),
1033 );
1034 }
1035 };
1036
1037 let _agent_start = Instant::now();
1038
1039 let start = Instant::now();
1040
1041 let working_path = std::path::Path::new(&working_dir);
1043 let agents_md_content = crate::agent::builtin::load_agents_md(working_path)
1044 .map(|(content, _)| {
1045 format!("\n\nPROJECT INSTRUCTIONS (from AGENTS.md):\n{content}")
1046 })
1047 .unwrap_or_default();
1048
1049 let prd_filename = format!("prd_{}.json", subtask_id.replace("-", "_"));
1051 let system_prompt = format!(
1052 "You are a {} specialist sub-agent (ID: {}). You have access to tools to complete your task.
1053
1054WORKING DIRECTORY: {}
1055All file operations should be relative to this directory.
1056
1057IMPORTANT: You MUST use tools to make changes. Do not just describe what to do - actually do it using the tools available.
1058
1059Available tools:
1060- read: Read file contents
1061- write: Write/create files
1062- edit: Edit existing files (search and replace)
1063- multiedit: Make multiple edits at once
1064- glob: Find files by pattern
1065- grep: Search file contents
1066- bash: Run shell commands (use cwd: \"{}\" parameter)
1067- webfetch: Fetch web pages
1068- prd: Generate structured PRD for complex tasks
1069- ralph: Run autonomous agent loop on a PRD
1070- swarm_share: Share results with other sub-agents running in parallel
1071- agent: Spawn specialized helper agents when needed (smart delegation)
1072
1073SMART SPAWN POLICY (mandatory):
1074- Any spawned agent MUST use a different model than your current model ('{}')
1075- Spawned model MUST be free/subscription-eligible (e.g. '*:free', openai-codex/*, github-copilot/*, gemini-web/*, local_cuda/*)
1076- Include `model` when calling agent.spawn
1077
1078SHARING RESULTS:
1079Use swarm_share to collaborate with other sub-agents:
1080- swarm_share({{action: 'publish', key: 'my-finding', value: '...', tags: ['research']}}) to share a result
1081- swarm_share({{action: 'get', key: 'some-key'}}) to retrieve a result from another agent
1082- swarm_share({{action: 'list'}}) to see all shared results
1083- swarm_share({{action: 'query_tags', tags: ['research']}}) to find results by tag
1084
1085COMPLEX TASKS:
1086If your task is complex and involves multiple implementation steps, use the prd + ralph workflow:
10871. Call prd({{action: 'analyze', task_description: '...'}}) to understand what's needed
10882. Break down into user stories with acceptance criteria
10893. Call prd({{action: 'save', prd_path: '{}', project: '...', feature: '...', stories: [...]}})
10904. Call ralph({{action: 'run', prd_path: '{}'}}) to execute
1091
1092NOTE: Use your unique PRD file '{}' so parallel agents don't conflict.
1093
1094When done, provide a brief summary of what you accomplished.{agents_md_content}",
1095 specialty,
1096 subtask_id,
1097 working_dir,
1098 working_dir,
1099 model,
1100 prd_filename,
1101 prd_filename,
1102 prd_filename
1103 );
1104
1105 let user_prompt = if context.is_empty() {
1106 format!("Complete this task:\n\n{}", instruction)
1107 } else {
1108 format!(
1109 "Complete this task:\n\n{}\n\nContext from prior work:\n{}",
1110 instruction, context
1111 )
1112 };
1113
1114 if let Some(ref tx) = event_tx {
1116 let _ = tx.try_send(SwarmEvent::SubTaskUpdate {
1117 id: subtask_id.clone(),
1118 name: subtask_name.clone(),
1119 status: SubTaskStatus::Running,
1120 agent_name: Some(format!("agent-{}", subtask_id)),
1121 });
1122 let _ = tx.try_send(SwarmEvent::AgentStarted {
1123 subtask_id: subtask_id.clone(),
1124 agent_name: format!("agent-{}", subtask_id),
1125 specialty: specialty.clone(),
1126 });
1127 }
1128
1129 let mut agent_registry =
1132 ToolRegistry::with_provider(Arc::clone(&provider), model.clone());
1133 agent_registry.register(Arc::new(crate::tool::swarm_share::SwarmShareTool::new(
1134 Arc::clone(&agent_result_store),
1135 subtask_id.clone(),
1136 )));
1137 let registry = Arc::new(agent_registry);
1138
1139 let mut attempt = 0u32;
1141 let mut result: Result<(String, usize, usize, AgentLoopExit), anyhow::Error> =
1142 Err(anyhow::anyhow!("Not executed"));
1143
1144 while attempt <= max_retries {
1145 let _attempt_start = Instant::now();
1146
1147 result = run_agent_loop(
1149 Arc::clone(&provider),
1150 &model,
1151 &system_prompt,
1152 &user_prompt,
1153 tools.clone(),
1154 Arc::clone(®istry),
1155 max_steps,
1156 timeout_secs,
1157 event_tx.clone(),
1158 subtask_id.clone(),
1159 None,
1160 working_dir_path.clone(),
1161 )
1162 .await;
1163
1164 match &result {
1166 Ok((_, _, _, exit_reason)) => {
1167 if *exit_reason == AgentLoopExit::Completed {
1168 tracing::info!(
1170 subtask_id = %subtask_id,
1171 attempt = attempt + 1,
1172 "Sub-agent completed successfully on first attempt"
1173 );
1174 break;
1175 }
1176 let should_retry = attempt < max_retries;
1178 if should_retry {
1179 let delay = calculate_backoff_delay(
1180 attempt,
1181 base_delay_ms,
1182 max_delay_ms,
1183 2.0,
1184 );
1185 tracing::warn!(
1186 subtask_id = %subtask_id,
1187 attempt = attempt + 1,
1188 max_retries = max_retries,
1189 exit_reason = ?exit_reason,
1190 delay_ms = delay.as_millis(),
1191 "Sub-agent did not complete, retrying with backoff"
1192 );
1193 tokio::time::sleep(delay).await;
1194 } else {
1195 tracing::warn!(
1196 subtask_id = %subtask_id,
1197 attempt = attempt + 1,
1198 max_retries = max_retries,
1199 exit_reason = ?exit_reason,
1200 "Sub-agent did not complete, retries exhausted"
1201 );
1202 }
1203 }
1204 Err(e) => {
1205 let should_retry = attempt < max_retries;
1207 if should_retry {
1208 let delay = calculate_backoff_delay(
1209 attempt,
1210 base_delay_ms,
1211 max_delay_ms,
1212 2.0,
1213 );
1214 tracing::warn!(
1215 subtask_id = %subtask_id,
1216 attempt = attempt + 1,
1217 max_retries = max_retries,
1218 error = %e,
1219 delay_ms = delay.as_millis(),
1220 "Sub-agent error, retrying with backoff"
1221 );
1222 tokio::time::sleep(delay).await;
1223 } else {
1224 tracing::error!(
1225 subtask_id = %subtask_id,
1226 attempt = attempt + 1,
1227 max_retries = max_retries,
1228 error = %e,
1229 "Sub-agent error, retries exhausted"
1230 );
1231 }
1232 }
1233 }
1234
1235 attempt += 1;
1236 }
1237
1238 let result = result;
1239
1240 let task_result = match result {
1241 Ok((output, steps, tool_calls, exit_reason)) => {
1242 let (success, status, error) = match exit_reason {
1243 AgentLoopExit::Completed => (true, SubTaskStatus::Completed, None),
1244 AgentLoopExit::MaxStepsReached => (
1245 false,
1246 SubTaskStatus::Failed,
1247 Some(format!("Sub-agent hit max steps ({max_steps})")),
1248 ),
1249 AgentLoopExit::TimedOut => (
1250 false,
1251 SubTaskStatus::TimedOut,
1252 Some(format!("Sub-agent timed out after {timeout_secs}s")),
1253 ),
1254 };
1255
1256 let total_attempts = attempt + 1;
1258 let actual_retry_attempts = if total_attempts > 1 {
1259 total_attempts - 1
1260 } else {
1261 0
1262 };
1263
1264 if let Some(ref tx) = event_tx {
1266 let _ = tx.try_send(SwarmEvent::SubTaskUpdate {
1267 id: subtask_id.clone(),
1268 name: subtask_name.clone(),
1269 status,
1270 agent_name: Some(format!("agent-{}", subtask_id)),
1271 });
1272 if let Some(ref message) = error {
1273 let _ = tx.try_send(SwarmEvent::AgentError {
1274 subtask_id: subtask_id.clone(),
1275 error: message.clone(),
1276 });
1277 }
1278 let _ = tx.try_send(SwarmEvent::AgentOutput {
1279 subtask_id: subtask_id.clone(),
1280 output: output.clone(),
1281 });
1282 let _ = tx.try_send(SwarmEvent::AgentComplete {
1283 subtask_id: subtask_id.clone(),
1284 success,
1285 steps,
1286 });
1287 }
1288 Ok(SubTaskResult {
1289 subtask_id: subtask_id.clone(),
1290 subagent_id: format!("agent-{}", subtask_id),
1291 success,
1292 result: output,
1293 steps,
1294 tool_calls,
1295 execution_time_ms: start.elapsed().as_millis() as u64,
1296 error,
1297 artifacts: Vec::new(),
1298 retry_count: actual_retry_attempts,
1299 })
1300 }
1301 Err(e) => {
1302 let total_attempts = attempt + 1;
1304 let actual_retry_attempts = if total_attempts > 1 {
1305 total_attempts - 1
1306 } else {
1307 0
1308 };
1309
1310 if let Some(ref tx) = event_tx {
1312 let _ = tx.try_send(SwarmEvent::SubTaskUpdate {
1313 id: subtask_id.clone(),
1314 name: subtask_name.clone(),
1315 status: SubTaskStatus::Failed,
1316 agent_name: Some(format!("agent-{}", subtask_id)),
1317 });
1318 let _ = tx.try_send(SwarmEvent::AgentError {
1319 subtask_id: subtask_id.clone(),
1320 error: e.to_string(),
1321 });
1322 let _ = tx.try_send(SwarmEvent::AgentComplete {
1323 subtask_id: subtask_id.clone(),
1324 success: false,
1325 steps: 0,
1326 });
1327 }
1328 Ok(SubTaskResult {
1329 subtask_id: subtask_id.clone(),
1330 subagent_id: format!("agent-{}", subtask_id),
1331 success: false,
1332 result: String::new(),
1333 steps: 0,
1334 tool_calls: 0,
1335 execution_time_ms: start.elapsed().as_millis() as u64,
1336 error: Some(e.to_string()),
1337 artifacts: Vec::new(),
1338 retry_count: actual_retry_attempts,
1339 })
1340 }
1341 };
1342
1343 (subtask_id.clone(), task_result)
1344 });
1345
1346 let abort_handle = handle.abort_handle();
1347 abort_handles.insert(subtask_id_for_handle.clone(), abort_handle);
1348 task_ids.insert(handle.id(), subtask_id_for_handle.clone());
1349 handles.push(handle);
1350 }
1351
1352 let mut collapse_controller = if worktree_manager.is_some() && active_worktrees.len() > 1 {
1354 Some(CollapseController::new(CollapsePolicy::default()))
1355 } else {
1356 None
1357 };
1358 let mut collapse_tick = tokio::time::interval(Duration::from_secs(COLLAPSE_SAMPLE_SECS));
1359 collapse_tick.set_missed_tick_behavior(MissedTickBehavior::Skip);
1360 let _ = collapse_tick.tick().await;
1362
1363 while !handles.is_empty() {
1364 tokio::select! {
1365 maybe_join = handles.next() => {
1366 let Some(joined) = maybe_join else {
1367 continue;
1368 };
1369 match joined {
1370 Ok((subtask_id, Ok(result))) => {
1371 abort_handles.remove(&subtask_id);
1372 let wt = active_worktrees.remove(&subtask_id).or_else(|| all_worktrees.get(&subtask_id).cloned());
1373 completed_entries.push((result, wt));
1374 }
1375 Ok((subtask_id, Err(e))) => {
1376 abort_handles.remove(&subtask_id);
1377 active_worktrees.remove(&subtask_id);
1378 let wt = all_worktrees.get(&subtask_id).cloned();
1379 completed_entries.push((
1380 SubTaskResult {
1381 subtask_id: subtask_id.clone(),
1382 subagent_id: format!("agent-{subtask_id}"),
1383 success: false,
1384 result: String::new(),
1385 steps: 0,
1386 tool_calls: 0,
1387 execution_time_ms: 0,
1388 error: Some(e.to_string()),
1389 artifacts: Vec::new(),
1390 retry_count: 0,
1391 },
1392 wt,
1393 ));
1394 }
1395 Err(e) => {
1396 let subtask_id = task_ids
1397 .remove(&e.id())
1398 .unwrap_or_else(|| "unknown".to_string());
1399 abort_handles.remove(&subtask_id);
1400 active_worktrees.remove(&subtask_id);
1401 let wt = all_worktrees.get(&subtask_id).cloned();
1402 completed_entries.push((
1403 SubTaskResult {
1404 subtask_id: subtask_id.clone(),
1405 subagent_id: format!("agent-{subtask_id}"),
1406 success: false,
1407 result: String::new(),
1408 steps: 0,
1409 tool_calls: 0,
1410 execution_time_ms: 0,
1411 error: Some(format!("Task join error: {e}")),
1412 artifacts: Vec::new(),
1413 retry_count: 0,
1414 },
1415 wt,
1416 ));
1417 }
1418 }
1419 }
1420 _ = collapse_tick.tick(), if collapse_controller.is_some() && !active_worktrees.is_empty() => {
1421 let branches: Vec<BranchRuntimeState> = active_worktrees
1422 .iter()
1423 .map(|(subtask_id, wt)| BranchRuntimeState {
1424 subtask_id: subtask_id.clone(),
1425 branch: wt.branch.clone(),
1426 worktree_path: wt.path.clone(),
1427 })
1428 .collect();
1429
1430 if let Some(controller) = collapse_controller.as_mut() {
1431 match controller.sample(&branches) {
1432 Ok(tick) => {
1433 if promoted_subtask_id != tick.promoted_subtask_id {
1434 promoted_subtask_id = tick.promoted_subtask_id.clone();
1435 if let Some(ref promoted) = promoted_subtask_id {
1436 tracing::info!(
1437 subtask_id = %promoted,
1438 "Collapse controller promoted branch"
1439 );
1440 if let Some(audit) = crate::audit::try_audit_log() {
1441 audit.log_with_correlation(
1442 crate::audit::AuditCategory::Swarm,
1443 "collapse_promote_branch",
1444 crate::audit::AuditOutcome::Success,
1445 Some("collapse-controller".to_string()),
1446 Some(serde_json::json!({
1447 "swarm_id": swarm_id,
1448 "subtask_id": promoted,
1449 })),
1450 None,
1451 None,
1452 Some(swarm_id.to_string()),
1453 None,
1454 ).await;
1455 }
1456 }
1457 }
1458
1459 for kill in tick.kills {
1460 if kill_reasons.contains_key(&kill.subtask_id) {
1461 continue;
1462 }
1463 let Some(abort_handle) = abort_handles.get(&kill.subtask_id) else {
1464 continue;
1465 };
1466
1467 abort_handle.abort();
1468 abort_handles.remove(&kill.subtask_id);
1469 active_worktrees.remove(&kill.subtask_id);
1470 kill_reasons.insert(kill.subtask_id.clone(), kill.reason.clone());
1471
1472 tracing::warn!(
1473 subtask_id = %kill.subtask_id,
1474 branch = %kill.branch,
1475 reason = %kill.reason,
1476 "Collapse controller killed branch"
1477 );
1478
1479 if let Some(ref tx) = self.event_tx {
1480 let _ = tx.try_send(SwarmEvent::SubTaskUpdate {
1481 id: kill.subtask_id.clone(),
1482 name: kill.subtask_id.clone(),
1483 status: SubTaskStatus::Cancelled,
1484 agent_name: Some(format!("agent-{}", kill.subtask_id)),
1485 });
1486 let _ = tx.try_send(SwarmEvent::AgentError {
1487 subtask_id: kill.subtask_id.clone(),
1488 error: format!("Cancelled by collapse controller: {}", kill.reason),
1489 });
1490 }
1491
1492 if let Some(audit) = crate::audit::try_audit_log() {
1493 audit.log_with_correlation(
1494 crate::audit::AuditCategory::Swarm,
1495 "collapse_kill_branch",
1496 crate::audit::AuditOutcome::Success,
1497 Some("collapse-controller".to_string()),
1498 Some(serde_json::json!({
1499 "swarm_id": swarm_id,
1500 "subtask_id": kill.subtask_id,
1501 "branch": kill.branch,
1502 "reason": kill.reason,
1503 })),
1504 None,
1505 None,
1506 Some(swarm_id.to_string()),
1507 None,
1508 ).await;
1509 }
1510 }
1511 }
1512 Err(e) => {
1513 tracing::warn!(error = %e, "Collapse controller sampling failed");
1514 }
1515 }
1516 }
1517 }
1518 }
1519 }
1520
1521 if let Some(ref promoted) = promoted_subtask_id {
1523 completed_entries.sort_by_key(|(result, _)| {
1524 if &result.subtask_id == promoted {
1525 0usize
1526 } else {
1527 1usize
1528 }
1529 });
1530 }
1531
1532 let mut results = cached_results;
1533 let auto_merge = self.config.worktree_auto_merge;
1534
1535 for (mut result, worktree_info) in completed_entries {
1536 if let Some(wt) = worktree_info {
1538 if let Some(reason) = kill_reasons.get(&result.subtask_id) {
1539 result.error = Some(format!("Cancelled by collapse controller: {reason}"));
1540 result.result.push_str(&format!(
1541 "\n\n--- Collapse Controller ---\nBranch terminated: {reason}"
1542 ));
1543 if let Some(ref mgr) = worktree_manager
1544 && let Err(e) = mgr.cleanup(&wt.name).await
1545 {
1546 tracing::warn!(error = %e, "Failed to cleanup killed worktree");
1547 }
1548 } else if result.success && auto_merge {
1549 if let Some(ref mgr) = worktree_manager {
1550 match mgr.merge(&wt.name).await {
1551 Ok(merge_result) => {
1552 if merge_result.success {
1553 tracing::info!(
1554 subtask_id = %result.subtask_id,
1555 files_changed = merge_result.files_changed,
1556 "Merged worktree changes successfully"
1557 );
1558 result.result.push_str(&format!(
1559 "\n\n--- Merge Result ---\n{}",
1560 merge_result.summary
1561 ));
1562 } else if merge_result.aborted {
1563 tracing::warn!(
1564 subtask_id = %result.subtask_id,
1565 summary = %merge_result.summary,
1566 "Merge was aborted"
1567 );
1568 result.result.push_str(&format!(
1569 "\n\n--- Merge Aborted ---\n{}",
1570 merge_result.summary
1571 ));
1572 } else {
1573 tracing::warn!(
1574 subtask_id = %result.subtask_id,
1575 conflicts = ?merge_result.conflicts,
1576 "Merge had conflicts"
1577 );
1578 result.result.push_str(&format!(
1579 "\n\n--- Merge Conflicts ---\n{}",
1580 merge_result.summary
1581 ));
1582 }
1583 if let Err(e) = mgr.cleanup(&wt.name).await {
1584 tracing::warn!(error = %e, "Failed to cleanup worktree");
1585 }
1586 }
1587 Err(e) => {
1588 tracing::error!(
1589 subtask_id = %result.subtask_id,
1590 error = %e,
1591 "Failed to merge worktree"
1592 );
1593 }
1594 }
1595 }
1596 } else if !result.success {
1597 tracing::info!(
1598 subtask_id = %result.subtask_id,
1599 worktree_path = %wt.path.display(),
1600 "Keeping worktree for debugging (task failed)"
1601 );
1602 }
1603 }
1604
1605 if result.success
1607 && let Some(ref cache_arc) = self.cache
1608 {
1609 let mut cache_guard: tokio::sync::MutexGuard<'_, SwarmCache> =
1610 cache_arc.lock().await;
1611 let cache_subtask = SubTask::new(&result.subtask_id, &result.result);
1612 if let Err(e) = cache_guard.put(&cache_subtask, &result).await {
1613 tracing::warn!(
1614 subtask_id = %result.subtask_id,
1615 error = %e,
1616 "Failed to cache subtask result"
1617 );
1618 }
1619 }
1620
1621 results.push(result);
1622 }
1623
1624 Ok(results)
1625 }
1626
1627 async fn execute_stage_kubernetes(
1628 &self,
1629 orchestrator: &Orchestrator,
1630 subtasks: Vec<SubTask>,
1631 completed_results: Arc<RwLock<HashMap<String, String>>>,
1632 swarm_id: &str,
1633 ) -> Result<Vec<SubTaskResult>> {
1634 let k8s = K8sManager::new().await;
1635 if !k8s.is_available() {
1636 anyhow::bail!(
1637 "Kubernetes execution mode requested but K8s client is unavailable in this environment"
1638 );
1639 }
1640
1641 let provider_name = orchestrator.provider().to_string();
1642 let model = orchestrator.model().to_string();
1643 let pod_budget = self.config.k8s_pod_budget.max(1);
1644 let mut pending: VecDeque<SubTask> = subtasks.into_iter().collect();
1645 let mut active: HashMap<String, ActiveK8sBranch> = HashMap::new();
1646 let mut subtask_names: HashMap<String, String> = HashMap::new();
1647 let mut results: Vec<SubTaskResult> = Vec::new();
1648 let mut kill_reasons: HashMap<String, String> = HashMap::new();
1649 let mut promoted_subtask_id: Option<String> = None;
1650 let mut collapse_controller = CollapseController::new(CollapsePolicy::default());
1651
1652 let mut tick = tokio::time::interval(Duration::from_secs(COLLAPSE_SAMPLE_SECS));
1653 tick.set_missed_tick_behavior(MissedTickBehavior::Skip);
1654 let _ = tick.tick().await;
1655
1656 loop {
1657 while active.len() < pod_budget {
1658 let Some(subtask) = pending.pop_front() else {
1659 break;
1660 };
1661
1662 if let Some(ref cache) = self.cache {
1663 let mut cache_guard = cache.lock().await;
1664 if let Some(cached_result) = cache_guard.get(&subtask).await {
1665 tracing::info!(
1666 subtask_id = %subtask.id,
1667 task_name = %subtask.name,
1668 "Cache hit for subtask, skipping Kubernetes execution"
1669 );
1670 self.try_send_event(SwarmEvent::SubTaskUpdate {
1671 id: subtask.id.clone(),
1672 name: subtask.name.clone(),
1673 status: SubTaskStatus::Completed,
1674 agent_name: Some("cached".to_string()),
1675 });
1676 results.push(cached_result);
1677 continue;
1678 }
1679 }
1680
1681 let context = {
1682 let completed = completed_results.read().await;
1683 let mut dep_context = String::new();
1684 for dep_id in &subtask.dependencies {
1685 if let Some(result) = completed.get(dep_id) {
1686 dep_context.push_str(&format!(
1687 "\n--- Result from dependency {} ---\n{}\n",
1688 dep_id, result
1689 ));
1690 }
1691 }
1692 dep_context
1693 };
1694
1695 let payload = RemoteSubtaskPayload {
1696 swarm_id: swarm_id.to_string(),
1697 subtask_id: subtask.id.clone(),
1698 subtask_name: subtask.name.clone(),
1699 specialty: subtask.specialty.clone().unwrap_or_default(),
1700 instruction: subtask.instruction.clone(),
1701 context: context.clone(),
1702 provider: provider_name.clone(),
1703 model: model.clone(),
1704 max_steps: self.config.max_steps_per_subagent,
1705 timeout_secs: self.config.subagent_timeout_secs,
1706 working_dir: self.config.working_dir.clone(),
1707 probe_interval_secs: COLLAPSE_SAMPLE_SECS,
1708 };
1709 let payload_b64 = match encode_payload(&payload) {
1710 Ok(payload) => payload,
1711 Err(error) => {
1712 let error_text = format!("Failed to encode remote payload: {error}");
1713 tracing::error!(subtask_id = %subtask.id, error = %error, "K8s payload encoding failed");
1714 self.try_send_event(SwarmEvent::SubTaskUpdate {
1715 id: subtask.id.clone(),
1716 name: subtask.name.clone(),
1717 status: SubTaskStatus::Failed,
1718 agent_name: Some("k8s-encoder".to_string()),
1719 });
1720 self.try_send_event(SwarmEvent::AgentError {
1721 subtask_id: subtask.id.clone(),
1722 error: error_text.clone(),
1723 });
1724 results.push(SubTaskResult {
1725 subtask_id: subtask.id.clone(),
1726 subagent_id: format!("agent-{}", subtask.id),
1727 success: false,
1728 result: String::new(),
1729 steps: 0,
1730 tool_calls: 0,
1731 execution_time_ms: 0,
1732 error: Some(error_text),
1733 artifacts: Vec::new(),
1734 retry_count: 0,
1735 });
1736 continue;
1737 }
1738 };
1739
1740 let mut env_vars = HashMap::new();
1741 env_vars.insert(SWARM_SUBTASK_PAYLOAD_ENV.to_string(), payload_b64);
1742 for key in K8S_PASSTHROUGH_ENV_VARS {
1743 if let Ok(value) = std::env::var(key)
1744 && !value.trim().is_empty()
1745 {
1746 env_vars.insert((*key).to_string(), value);
1747 }
1748 }
1749 let fallback_prompt = if context.trim().is_empty() {
1750 format!(
1751 "You are executing swarm subtask '{}'.\n\nTask:\n{}\n\n\
1752Return only the final subtask answer.",
1753 subtask.id, subtask.instruction
1754 )
1755 } else {
1756 format!(
1757 "You are executing swarm subtask '{}'.\n\nTask:\n{}\n\n\
1758Dependency context:\n{}\n\nReturn only the final subtask answer.",
1759 subtask.id, subtask.instruction, context
1760 )
1761 };
1762 env_vars.insert(SWARM_FALLBACK_PROMPT_ENV.to_string(), fallback_prompt);
1763 env_vars.insert(SWARM_FALLBACK_MODEL_ENV.to_string(), model.clone());
1764
1765 let mut labels = HashMap::new();
1766 labels.insert("codetether.run/swarm-id".to_string(), swarm_id.to_string());
1767 labels.insert(
1768 "codetether.run/stage".to_string(),
1769 subtask.stage.to_string(),
1770 );
1771
1772 let spec = SubagentPodSpec {
1773 image: self.config.k8s_subagent_image.clone(),
1774 env_vars,
1775 labels,
1776 command: Some(vec!["sh".to_string(), "-lc".to_string()]),
1777 args: Some(vec![
1778 format!(
1779 "if codetether help swarm-subagent >/dev/null 2>&1; then \
1780exec codetether swarm-subagent --payload-env {payload_env}; \
1781else \
1782exec codetether run \"$${fallback_prompt_env}\" --model \"$${fallback_model_env}\"; \
1783fi",
1784 payload_env = SWARM_SUBTASK_PAYLOAD_ENV,
1785 fallback_prompt_env = SWARM_FALLBACK_PROMPT_ENV,
1786 fallback_model_env = SWARM_FALLBACK_MODEL_ENV,
1787 )
1788 .replace("$$", "$"),
1789 ]),
1790 };
1791
1792 if let Err(error) = k8s.spawn_subagent_pod_with_spec(&subtask.id, spec).await {
1793 let error_text = format!("Failed to spawn Kubernetes pod: {error}");
1794 tracing::error!(subtask_id = %subtask.id, error = %error, "K8s sub-agent pod spawn failed");
1795 self.try_send_event(SwarmEvent::SubTaskUpdate {
1796 id: subtask.id.clone(),
1797 name: subtask.name.clone(),
1798 status: SubTaskStatus::Failed,
1799 agent_name: Some("k8s-spawn".to_string()),
1800 });
1801 self.try_send_event(SwarmEvent::AgentError {
1802 subtask_id: subtask.id.clone(),
1803 error: error_text.clone(),
1804 });
1805 results.push(SubTaskResult {
1806 subtask_id: subtask.id.clone(),
1807 subagent_id: format!("agent-{}", subtask.id),
1808 success: false,
1809 result: String::new(),
1810 steps: 0,
1811 tool_calls: 0,
1812 execution_time_ms: 0,
1813 error: Some(error_text),
1814 artifacts: Vec::new(),
1815 retry_count: 0,
1816 });
1817 continue;
1818 }
1819
1820 let branch = K8sManager::subagent_pod_name(&subtask.id);
1821 subtask_names.insert(subtask.id.clone(), subtask.name.clone());
1822 active.insert(
1823 subtask.id.clone(),
1824 ActiveK8sBranch {
1825 branch: branch.clone(),
1826 started_at: Instant::now(),
1827 },
1828 );
1829
1830 self.try_send_event(SwarmEvent::SubTaskUpdate {
1831 id: subtask.id.clone(),
1832 name: subtask.name.clone(),
1833 status: SubTaskStatus::Running,
1834 agent_name: Some(format!("k8s-{branch}")),
1835 });
1836 self.try_send_event(SwarmEvent::AgentStarted {
1837 subtask_id: subtask.id.clone(),
1838 agent_name: format!("k8s-{branch}"),
1839 specialty: subtask
1840 .specialty
1841 .clone()
1842 .unwrap_or_else(|| "generalist".to_string()),
1843 });
1844
1845 tracing::info!(
1846 subtask_id = %subtask.id,
1847 pod = %branch,
1848 "Spawned Kubernetes sub-agent pod"
1849 );
1850 }
1851
1852 if pending.is_empty() && active.is_empty() {
1853 break;
1854 }
1855
1856 tick.tick().await;
1857
1858 let active_ids: Vec<String> = active.keys().cloned().collect();
1859 let mut finished_results: Vec<SubTaskResult> = Vec::new();
1860 for subtask_id in active_ids {
1861 let Some(active_state) = active.get(&subtask_id).cloned() else {
1862 continue;
1863 };
1864
1865 if active_state.started_at.elapsed()
1866 > Duration::from_secs(self.config.subagent_timeout_secs)
1867 {
1868 let reason = format!(
1869 "Timed out after {}s in Kubernetes pod",
1870 self.config.subagent_timeout_secs
1871 );
1872 kill_reasons.insert(subtask_id.clone(), reason.clone());
1873 if let Err(error) = k8s.delete_subagent_pod(&subtask_id).await {
1874 tracing::warn!(
1875 subtask_id = %subtask_id,
1876 error = %error,
1877 "Failed deleting timed-out Kubernetes pod"
1878 );
1879 }
1880 active.remove(&subtask_id);
1881 finished_results.push(SubTaskResult {
1882 subtask_id: subtask_id.clone(),
1883 subagent_id: format!("agent-{subtask_id}"),
1884 success: false,
1885 result: String::new(),
1886 steps: 0,
1887 tool_calls: 0,
1888 execution_time_ms: active_state.started_at.elapsed().as_millis() as u64,
1889 error: Some(reason),
1890 artifacts: Vec::new(),
1891 retry_count: 0,
1892 });
1893 continue;
1894 }
1895
1896 let pod_state = match k8s.get_subagent_pod_state(&subtask_id).await {
1897 Ok(state) => state,
1898 Err(error) => {
1899 tracing::warn!(
1900 subtask_id = %subtask_id,
1901 error = %error,
1902 "Failed to query Kubernetes pod state for sub-agent"
1903 );
1904 continue;
1905 }
1906 };
1907 let Some(pod_state) = pod_state else {
1908 active.remove(&subtask_id);
1909 finished_results.push(SubTaskResult {
1910 subtask_id: subtask_id.clone(),
1911 subagent_id: format!("agent-{subtask_id}"),
1912 success: false,
1913 result: String::new(),
1914 steps: 0,
1915 tool_calls: 0,
1916 execution_time_ms: active_state.started_at.elapsed().as_millis() as u64,
1917 error: Some("Sub-agent pod disappeared".to_string()),
1918 artifacts: Vec::new(),
1919 retry_count: 0,
1920 });
1921 continue;
1922 };
1923
1924 let phase = pod_state.phase.to_ascii_lowercase();
1925 let finished = pod_state.terminated || phase == "succeeded" || phase == "failed";
1926 if !finished {
1927 continue;
1928 }
1929
1930 let logs = k8s
1931 .subagent_logs(&subtask_id, 10_000)
1932 .await
1933 .unwrap_or_default();
1934 let mut result = result_from_logs(&logs).unwrap_or_else(|| SubTaskResult {
1935 subtask_id: subtask_id.clone(),
1936 subagent_id: format!("agent-{subtask_id}"),
1937 success: pod_state.exit_code.unwrap_or(1) == 0,
1938 result: logs,
1939 steps: 0,
1940 tool_calls: 0,
1941 execution_time_ms: active_state.started_at.elapsed().as_millis() as u64,
1942 error: if pod_state.exit_code.unwrap_or(1) == 0 {
1943 None
1944 } else {
1945 Some(
1946 pod_state
1947 .reason
1948 .clone()
1949 .unwrap_or_else(|| "Remote sub-agent failed".to_string()),
1950 )
1951 },
1952 artifacts: Vec::new(),
1953 retry_count: 0,
1954 });
1955
1956 if let Some(reason) = kill_reasons.get(&subtask_id) {
1957 result.success = false;
1958 result.error = Some(format!("Cancelled by collapse controller: {reason}"));
1959 result.result.push_str(&format!(
1960 "\n\n--- Collapse Controller ---\nBranch terminated: {reason}"
1961 ));
1962 }
1963
1964 active.remove(&subtask_id);
1965 if let Err(error) = k8s.delete_subagent_pod(&subtask_id).await {
1966 tracing::warn!(
1967 subtask_id = %subtask_id,
1968 error = %error,
1969 "Failed deleting completed Kubernetes pod"
1970 );
1971 }
1972 finished_results.push(result);
1973 }
1974
1975 for result in finished_results {
1976 if result.success {
1977 completed_results
1978 .write()
1979 .await
1980 .insert(result.subtask_id.clone(), result.result.clone());
1981 }
1982 if result.success
1983 && let Some(ref cache_arc) = self.cache
1984 {
1985 let mut cache_guard = cache_arc.lock().await;
1986 let cache_subtask = SubTask::new(&result.subtask_id, &result.result);
1987 let _ = cache_guard.put(&cache_subtask, &result).await;
1988 }
1989
1990 self.try_send_event(SwarmEvent::SubTaskUpdate {
1991 id: result.subtask_id.clone(),
1992 name: subtask_names
1993 .get(&result.subtask_id)
1994 .cloned()
1995 .unwrap_or_else(|| result.subtask_id.clone()),
1996 status: if result.success {
1997 SubTaskStatus::Completed
1998 } else {
1999 SubTaskStatus::Failed
2000 },
2001 agent_name: Some(format!("k8s-{}", result.subtask_id)),
2002 });
2003 if let Some(ref error) = result.error {
2004 self.try_send_event(SwarmEvent::AgentError {
2005 subtask_id: result.subtask_id.clone(),
2006 error: error.clone(),
2007 });
2008 }
2009 self.try_send_event(SwarmEvent::AgentOutput {
2010 subtask_id: result.subtask_id.clone(),
2011 output: result.result.clone(),
2012 });
2013 self.try_send_event(SwarmEvent::AgentComplete {
2014 subtask_id: result.subtask_id.clone(),
2015 success: result.success,
2016 steps: result.steps,
2017 });
2018 results.push(result);
2019 }
2020
2021 if active.len() > 1 {
2022 let mut observations = Vec::with_capacity(active.len());
2023 for (subtask_id, state) in &active {
2024 let pod_state = match k8s.get_subagent_pod_state(subtask_id).await {
2025 Ok(state) => state,
2026 Err(error) => {
2027 tracing::warn!(
2028 subtask_id = %subtask_id,
2029 error = %error,
2030 "Failed to query pod state while sampling branch observation"
2031 );
2032 None
2033 }
2034 };
2035 let (resource_health_score, infra_unhealthy_signals) =
2036 compute_resource_health(pod_state.as_ref());
2037
2038 let logs = k8s.subagent_logs(subtask_id, 500).await.unwrap_or_default();
2039 if let Some(probe) = latest_probe_from_logs(&logs) {
2040 let compile_ok = pod_state
2041 .as_ref()
2042 .map(|p| probe.compile_ok && !p.phase.eq_ignore_ascii_case("failed"))
2043 .unwrap_or(probe.compile_ok);
2044 observations.push(BranchObservation {
2045 subtask_id: subtask_id.clone(),
2046 branch: state.branch.clone(),
2047 compile_ok,
2048 changed_files: probe_changed_files_set(&probe),
2049 changed_lines: probe.changed_lines,
2050 resource_health_score,
2051 infra_unhealthy_signals,
2052 });
2053 continue;
2054 }
2055 let compile_ok = pod_state
2056 .as_ref()
2057 .map(|p| !p.phase.eq_ignore_ascii_case("failed"))
2058 .unwrap_or(false);
2059 observations.push(BranchObservation {
2060 subtask_id: subtask_id.clone(),
2061 branch: state.branch.clone(),
2062 compile_ok,
2063 changed_files: std::collections::HashSet::new(),
2064 changed_lines: 0,
2065 resource_health_score,
2066 infra_unhealthy_signals,
2067 });
2068 }
2069
2070 let tick = collapse_controller.sample_observations(&observations);
2071 if promoted_subtask_id != tick.promoted_subtask_id {
2072 promoted_subtask_id = tick.promoted_subtask_id.clone();
2073 if let Some(ref promoted) = promoted_subtask_id {
2074 tracing::info!(subtask_id = %promoted, "Collapse controller promoted branch");
2075 if let Some(audit) = crate::audit::try_audit_log() {
2076 audit
2077 .log_with_correlation(
2078 crate::audit::AuditCategory::Swarm,
2079 "collapse_promote_branch",
2080 crate::audit::AuditOutcome::Success,
2081 Some("collapse-controller".to_string()),
2082 Some(serde_json::json!({
2083 "swarm_id": swarm_id,
2084 "subtask_id": promoted,
2085 "execution_mode": "kubernetes_pod",
2086 })),
2087 None,
2088 None,
2089 Some(swarm_id.to_string()),
2090 None,
2091 )
2092 .await;
2093 }
2094 }
2095 }
2096
2097 for kill in tick.kills {
2098 if kill_reasons.contains_key(&kill.subtask_id) {
2099 continue;
2100 }
2101 if !active.contains_key(&kill.subtask_id) {
2102 continue;
2103 }
2104
2105 if let Err(error) = k8s.delete_subagent_pod(&kill.subtask_id).await {
2106 tracing::warn!(
2107 subtask_id = %kill.subtask_id,
2108 error = %error,
2109 "Failed deleting Kubernetes pod after collapse kill"
2110 );
2111 }
2112 kill_reasons.insert(kill.subtask_id.clone(), kill.reason.clone());
2113 let elapsed_ms = active
2114 .remove(&kill.subtask_id)
2115 .map(|s| s.started_at.elapsed().as_millis() as u64)
2116 .unwrap_or(0);
2117
2118 tracing::warn!(
2119 subtask_id = %kill.subtask_id,
2120 branch = %kill.branch,
2121 reason = %kill.reason,
2122 "Collapse controller killed Kubernetes branch"
2123 );
2124
2125 if let Some(audit) = crate::audit::try_audit_log() {
2126 audit
2127 .log_with_correlation(
2128 crate::audit::AuditCategory::Swarm,
2129 "collapse_kill_branch",
2130 crate::audit::AuditOutcome::Success,
2131 Some("collapse-controller".to_string()),
2132 Some(serde_json::json!({
2133 "swarm_id": swarm_id,
2134 "subtask_id": kill.subtask_id.clone(),
2135 "branch": kill.branch.clone(),
2136 "reason": kill.reason.clone(),
2137 "execution_mode": "kubernetes_pod",
2138 })),
2139 None,
2140 None,
2141 Some(swarm_id.to_string()),
2142 None,
2143 )
2144 .await;
2145 }
2146
2147 self.try_send_event(SwarmEvent::SubTaskUpdate {
2148 id: kill.subtask_id.clone(),
2149 name: subtask_names
2150 .get(&kill.subtask_id)
2151 .cloned()
2152 .unwrap_or_else(|| kill.subtask_id.clone()),
2153 status: SubTaskStatus::Cancelled,
2154 agent_name: Some(format!("agent-{}", kill.subtask_id)),
2155 });
2156 self.try_send_event(SwarmEvent::AgentError {
2157 subtask_id: kill.subtask_id.clone(),
2158 error: format!("Cancelled by collapse controller: {}", kill.reason),
2159 });
2160
2161 results.push(SubTaskResult {
2162 subtask_id: kill.subtask_id.clone(),
2163 subagent_id: format!("agent-{}", kill.subtask_id),
2164 success: false,
2165 result: format!(
2166 "\n\n--- Collapse Controller ---\nBranch terminated: {}",
2167 kill.reason
2168 ),
2169 steps: 0,
2170 tool_calls: 0,
2171 execution_time_ms: elapsed_ms,
2172 error: Some(format!("Cancelled by collapse controller: {}", kill.reason)),
2173 artifacts: Vec::new(),
2174 retry_count: 0,
2175 });
2176 }
2177 }
2178 }
2179
2180 if let Some(ref promoted) = promoted_subtask_id {
2181 results.sort_by_key(|result| {
2182 if &result.subtask_id == promoted {
2183 0usize
2184 } else {
2185 1usize
2186 }
2187 });
2188 }
2189
2190 if !active.is_empty() {
2191 let residual_ids: Vec<String> = active.keys().cloned().collect();
2192 for subtask_id in residual_ids {
2193 if let Err(error) = k8s.delete_subagent_pod(&subtask_id).await {
2194 tracing::warn!(
2195 subtask_id = %subtask_id,
2196 error = %error,
2197 "Failed deleting residual Kubernetes pod at stage end"
2198 );
2199 }
2200 }
2201 }
2202
2203 Ok(results)
2204 }
2205
2206 async fn aggregate_results(&self, results: &[SubTaskResult]) -> Result<String> {
2208 let mut aggregated = String::new();
2209
2210 for (i, result) in results.iter().enumerate() {
2211 if result.success {
2212 aggregated.push_str(&format!("=== Subtask {} ===\n{}\n\n", i + 1, result.result));
2213 } else {
2214 aggregated.push_str(&format!(
2215 "=== Subtask {} (FAILED) ===\nError: {}\n\n",
2216 i + 1,
2217 result.error.as_deref().unwrap_or("Unknown error")
2218 ));
2219 }
2220 }
2221
2222 Ok(aggregated)
2223 }
2224
2225 pub async fn execute_single(&self, task: &str) -> Result<SwarmResult> {
2227 self.execute(task, DecompositionStrategy::None).await
2228 }
2229}
2230
2231pub struct SwarmExecutorBuilder {
2233 config: SwarmConfig,
2234}
2235
2236impl SwarmExecutorBuilder {
2237 pub fn new() -> Self {
2238 Self {
2239 config: SwarmConfig::default(),
2240 }
2241 }
2242
2243 pub fn max_subagents(mut self, max: usize) -> Self {
2244 self.config.max_subagents = max;
2245 self
2246 }
2247
2248 pub fn max_steps_per_subagent(mut self, max: usize) -> Self {
2249 self.config.max_steps_per_subagent = max;
2250 self
2251 }
2252
2253 pub fn max_total_steps(mut self, max: usize) -> Self {
2254 self.config.max_total_steps = max;
2255 self
2256 }
2257
2258 pub fn timeout_secs(mut self, secs: u64) -> Self {
2259 self.config.subagent_timeout_secs = secs;
2260 self
2261 }
2262
2263 pub fn parallel_enabled(mut self, enabled: bool) -> Self {
2264 self.config.parallel_enabled = enabled;
2265 self
2266 }
2267
2268 pub fn build(self) -> SwarmExecutor {
2269 SwarmExecutor::new(self.config)
2270 }
2271}
2272
2273impl Default for SwarmExecutorBuilder {
2274 fn default() -> Self {
2275 Self::new()
2276 }
2277}
2278
2279#[allow(clippy::too_many_arguments)]
2281fn resolve_tool_paths(
2288 tool_name: &str,
2289 args: &mut serde_json::Value,
2290 working_dir: &std::path::Path,
2291) {
2292 match tool_name {
2293 "read" | "write" | "list" | "grep" | "codesearch" => {
2294 if let Some(path) = args.get("path").and_then(|v| v.as_str()).map(String::from)
2295 && !std::path::Path::new(&path).is_absolute()
2296 {
2297 args["path"] = serde_json::json!(working_dir.join(&path).display().to_string());
2298 }
2299 }
2300 "edit" => {
2301 if let Some(path) = args
2302 .get("filePath")
2303 .and_then(|v| v.as_str())
2304 .map(String::from)
2305 && !std::path::Path::new(&path).is_absolute()
2306 {
2307 args["filePath"] = serde_json::json!(working_dir.join(&path).display().to_string());
2308 }
2309 }
2310 "glob" => {
2311 if let Some(pattern) = args
2312 .get("pattern")
2313 .and_then(|v| v.as_str())
2314 .map(String::from)
2315 && !std::path::Path::new(&pattern).is_absolute()
2316 && !pattern.starts_with("*")
2317 {
2318 args["pattern"] =
2319 serde_json::json!(working_dir.join(&pattern).display().to_string());
2320 }
2321 }
2322 "multiedit" => {
2323 if let Some(edits) = args.get_mut("edits").and_then(|v| v.as_array_mut()) {
2324 for edit in edits.iter_mut() {
2325 if let Some(file) = edit.get("file").and_then(|v| v.as_str()).map(String::from)
2326 && !std::path::Path::new(&file).is_absolute()
2327 {
2328 edit["file"] =
2329 serde_json::json!(working_dir.join(&file).display().to_string());
2330 }
2331 }
2332 }
2333 }
2334 "patch" => {
2335 if let Some(path) = args.get("file").and_then(|v| v.as_str()).map(String::from)
2336 && !std::path::Path::new(&path).is_absolute()
2337 {
2338 args["file"] = serde_json::json!(working_dir.join(&path).display().to_string());
2339 }
2340 }
2341 "bash" => {
2342 if args.get("cwd").and_then(|v| v.as_str()).is_none() {
2344 args["cwd"] = serde_json::json!(working_dir.display().to_string());
2345 }
2346 }
2347 _ => {}
2348 }
2349}
2350
2351pub async fn run_agent_loop(
2352 provider: Arc<dyn Provider>,
2353 model: &str,
2354 system_prompt: &str,
2355 user_prompt: &str,
2356 tools: Vec<crate::provider::ToolDefinition>,
2357 registry: Arc<ToolRegistry>,
2358 max_steps: usize,
2359 timeout_secs: u64,
2360 event_tx: Option<mpsc::Sender<SwarmEvent>>,
2361 subtask_id: String,
2362 bus: Option<Arc<AgentBus>>,
2363 working_dir: Option<std::path::PathBuf>,
2364) -> Result<(String, usize, usize, AgentLoopExit)> {
2365 let temperature = 0.7;
2367
2368 tracing::info!(
2369 model = %model,
2370 max_steps = max_steps,
2371 timeout_secs = timeout_secs,
2372 "Sub-agent starting agentic loop"
2373 );
2374 tracing::debug!(system_prompt = %system_prompt, "Sub-agent system prompt");
2375 tracing::debug!(user_prompt = %user_prompt, "Sub-agent user prompt");
2376
2377 let mut messages = vec![
2379 Message {
2380 role: Role::System,
2381 content: vec![ContentPart::Text {
2382 text: system_prompt.to_string(),
2383 }],
2384 },
2385 Message {
2386 role: Role::User,
2387 content: vec![ContentPart::Text {
2388 text: user_prompt.to_string(),
2389 }],
2390 },
2391 ];
2392
2393 let mut steps = 0;
2394 let mut total_tool_calls = 0;
2395 let mut final_output = String::new();
2396
2397 let mut deadline = Instant::now() + Duration::from_secs(timeout_secs);
2398
2399 let exit_reason = loop {
2400 if steps >= max_steps {
2401 tracing::warn!(max_steps = max_steps, "Sub-agent reached max steps limit");
2402 break AgentLoopExit::MaxStepsReached;
2403 }
2404
2405 if Instant::now() > deadline {
2406 tracing::warn!(timeout_secs = timeout_secs, "Sub-agent timed out");
2407 break AgentLoopExit::TimedOut;
2408 }
2409
2410 steps += 1;
2411 tracing::info!(step = steps, "Sub-agent step starting");
2412
2413 truncate_messages_to_fit(&mut messages, DEFAULT_CONTEXT_LIMIT);
2415
2416 let request = CompletionRequest {
2417 messages: messages.clone(),
2418 tools: tools.clone(),
2419 model: model.to_string(),
2420 temperature: Some(temperature),
2421 top_p: None,
2422 max_tokens: Some(8192),
2423 stop: Vec::new(),
2424 };
2425
2426 let step_start = Instant::now();
2427 let response = timeout(Duration::from_secs(120), provider.complete(request)).await??;
2428 let step_duration = step_start.elapsed();
2429
2430 tracing::info!(
2431 step = steps,
2432 duration_ms = step_duration.as_millis() as u64,
2433 finish_reason = ?response.finish_reason,
2434 prompt_tokens = response.usage.prompt_tokens,
2435 completion_tokens = response.usage.completion_tokens,
2436 "Sub-agent step completed LLM call"
2437 );
2438
2439 let mut text_parts = Vec::new();
2441 let mut thinking_parts = Vec::new();
2442 let mut tool_calls = Vec::new();
2443
2444 for part in &response.message.content {
2445 match part {
2446 ContentPart::Text { text } => {
2447 text_parts.push(text.clone());
2448 }
2449 ContentPart::Thinking { text } if !text.is_empty() => {
2450 thinking_parts.push(text.clone());
2451 }
2452 ContentPart::ToolCall {
2453 id,
2454 name,
2455 arguments,
2456 ..
2457 } => {
2458 tool_calls.push((id.clone(), name.clone(), arguments.clone()));
2459 }
2460 _ => {}
2461 }
2462 }
2463
2464 if !thinking_parts.is_empty()
2466 && let Some(ref bus) = bus
2467 {
2468 let thinking_text = thinking_parts.join("\n");
2469 let handle = bus.handle(&subtask_id);
2470 handle.send(
2471 format!("agent.{subtask_id}.thinking"),
2472 BusMessage::AgentThinking {
2473 agent_id: subtask_id.clone(),
2474 thinking: thinking_text,
2475 step: steps,
2476 },
2477 );
2478 }
2479
2480 if !text_parts.is_empty() {
2482 let step_output = text_parts.join("\n");
2483 if !final_output.is_empty() {
2484 final_output.push('\n');
2485 }
2486 final_output.push_str(&step_output);
2487 tracing::info!(
2488 step = steps,
2489 output_len = final_output.len(),
2490 "Sub-agent text output"
2491 );
2492 tracing::debug!(step = steps, output = %final_output, "Sub-agent full output");
2493
2494 if let Some(ref tx) = event_tx {
2496 let preview = if step_output.len() > 500 {
2497 let mut end = 500;
2498 while end > 0 && !step_output.is_char_boundary(end) {
2499 end -= 1;
2500 }
2501 format!("{}...", &step_output[..end])
2502 } else {
2503 step_output.clone()
2504 };
2505 let _ = tx.try_send(SwarmEvent::AgentMessage {
2506 subtask_id: subtask_id.clone(),
2507 entry: AgentMessageEntry {
2508 role: "assistant".to_string(),
2509 content: preview,
2510 is_tool_call: false,
2511 },
2512 });
2513 }
2514 }
2515
2516 if !tool_calls.is_empty() {
2518 tracing::info!(
2519 step = steps,
2520 num_tool_calls = tool_calls.len(),
2521 tools = ?tool_calls.iter().map(|(_, name, _)| name.as_str()).collect::<Vec<_>>(),
2522 "Sub-agent requesting tool calls"
2523 );
2524 }
2525
2526 messages.push(response.message.clone());
2528
2529 if response.finish_reason != FinishReason::ToolCalls || tool_calls.is_empty() {
2531 tracing::info!(
2532 steps = steps,
2533 total_tool_calls = total_tool_calls,
2534 "Sub-agent finished"
2535 );
2536 break AgentLoopExit::Completed;
2537 }
2538
2539 let mut tool_results = Vec::new();
2541
2542 for (call_id, tool_name, arguments) in tool_calls {
2543 total_tool_calls += 1;
2544
2545 if let Some(ref tx) = event_tx {
2547 let _ = tx.try_send(SwarmEvent::AgentToolCall {
2548 subtask_id: subtask_id.clone(),
2549 tool_name: tool_name.clone(),
2550 });
2551 }
2552
2553 tracing::info!(
2554 step = steps,
2555 tool_call_id = %call_id,
2556 tool = %tool_name,
2557 "Executing tool"
2558 );
2559 tracing::debug!(
2560 tool = %tool_name,
2561 arguments = %arguments,
2562 "Tool call arguments"
2563 );
2564
2565 let tool_start = Instant::now();
2566 let mut tool_success = true;
2567 let result = if let Some(tool) = registry.get(&tool_name) {
2568 let mut args: serde_json::Value =
2570 serde_json::from_str(&arguments).unwrap_or_else(|e| {
2571 tracing::warn!(tool = %tool_name, error = %e, raw = %arguments, "Failed to parse tool arguments");
2572 serde_json::json!({})
2573 });
2574
2575 if let Some(ref wd) = working_dir {
2577 resolve_tool_paths(&tool_name, &mut args, wd);
2578 }
2579 let agent_name = format!("agent-{subtask_id}");
2580 let provenance =
2581 ExecutionProvenance::for_operation(&agent_name, ExecutionOrigin::Swarm);
2582 args = enrich_tool_input_with_runtime_context(
2583 &args,
2584 working_dir
2585 .as_deref()
2586 .unwrap_or_else(|| std::path::Path::new(".")),
2587 Some(model),
2588 &subtask_id,
2589 &agent_name,
2590 Some(&provenance),
2591 );
2592
2593 match tool.execute(args).await {
2594 Ok(r) => {
2595 if r.success {
2596 tracing::info!(
2597 tool = %tool_name,
2598 duration_ms = tool_start.elapsed().as_millis() as u64,
2599 success = true,
2600 "Tool execution completed"
2601 );
2602 r.output
2603 } else {
2604 tool_success = false;
2605 tracing::warn!(
2606 tool = %tool_name,
2607 error = %r.output,
2608 "Tool returned error"
2609 );
2610 format!("Tool error: {}", r.output)
2611 }
2612 }
2613 Err(e) => {
2614 tool_success = false;
2615 tracing::error!(
2616 tool = %tool_name,
2617 error = %e,
2618 "Tool execution failed"
2619 );
2620 format!("Tool execution failed: {}", e)
2621 }
2622 }
2623 } else {
2624 tool_success = false;
2625 tracing::error!(tool = %tool_name, "Unknown tool requested");
2626 format!("Unknown tool: {}", tool_name)
2627 };
2628
2629 if let Some(ref tx) = event_tx {
2631 let input_preview = if arguments.len() > 200 {
2632 let mut end = 200;
2633 while end > 0 && !arguments.is_char_boundary(end) {
2634 end -= 1;
2635 }
2636 format!("{}...", &arguments[..end])
2637 } else {
2638 arguments.clone()
2639 };
2640 let output_preview = if result.len() > 500 {
2641 let mut end = 500;
2642 while end > 0 && !result.is_char_boundary(end) {
2643 end -= 1;
2644 }
2645 format!("{}...", &result[..end])
2646 } else {
2647 result.clone()
2648 };
2649 let _ = tx.try_send(SwarmEvent::AgentToolCallDetail {
2650 subtask_id: subtask_id.clone(),
2651 detail: AgentToolCallDetail {
2652 tool_name: tool_name.clone(),
2653 input_preview,
2654 output_preview,
2655 success: tool_success,
2656 },
2657 });
2658 }
2659
2660 tracing::debug!(
2661 tool = %tool_name,
2662 result_len = result.len(),
2663 "Tool result"
2664 );
2665
2666 if let Some(ref bus) = bus {
2669 let handle = bus.handle(&subtask_id);
2670 handle.send(
2671 format!("tools.{tool_name}"),
2672 BusMessage::ToolOutputFull {
2673 agent_id: subtask_id.clone(),
2674 tool_name: tool_name.clone(),
2675 output: result.clone(),
2676 success: tool_success,
2677 step: steps,
2678 },
2679 );
2680 }
2681
2682 let result = if result.len() > RLM_THRESHOLD_CHARS {
2684 process_large_result_with_rlm(&result, &tool_name, Arc::clone(&provider), model)
2686 .await
2687 } else {
2688 truncate_single_result(&result, SIMPLE_TRUNCATE_CHARS)
2690 };
2691
2692 tool_results.push((call_id, tool_name, result));
2693 }
2694
2695 for (call_id, _tool_name, result) in tool_results {
2697 messages.push(Message {
2698 role: Role::Tool,
2699 content: vec![ContentPart::ToolResult {
2700 tool_call_id: call_id,
2701 content: result,
2702 }],
2703 });
2704 }
2705
2706 deadline = Instant::now() + Duration::from_secs(timeout_secs);
2708 };
2709
2710 Ok((final_output, steps, total_tool_calls, exit_reason))
2711}