use zeph_llm::provider::{LlmProvider, Message, MessageMetadata, MessagePart, Role};
use zeph_tools::executor::{ToolError, ToolOutput};
use super::super::{Agent, DOOM_LOOP_WINDOW, format_tool_output};
use super::{AnomalyOutcome, doom_loop_hash, first_tool_name};
use crate::channel::{Channel, ToolOutputEvent, ToolStartEvent};
use tokio_stream::StreamExt;
use tracing::Instrument;
use zeph_sanitizer::{ContentSource, ContentSourceKind}; use zeph_skills::evolution::FailureKind;
impl<C: Channel> Agent<C> {
pub(crate) async fn process_response(&mut self) -> Result<(), super::super::error::AgentError> {
self.security.flagged_urls.clear();
if self.provider.supports_tool_use() {
tracing::debug!(
provider = self.provider.name(),
"using native tool_use path"
);
return self.process_response_native_tools().await;
}
tracing::debug!(
provider = self.provider.name(),
"using legacy text extraction path"
);
self.tool_orchestrator.clear_doom_history();
self.tool_orchestrator.clear_recent_tool_calls();
for iteration in 0..self.tool_orchestrator.max_iterations {
if self.lifecycle.cancel_token.is_cancelled() {
tracing::info!("tool loop cancelled by user");
break;
}
if self.process_legacy_turn(iteration).await?.is_some() {
return Ok(());
}
}
Ok(())
}
async fn process_legacy_turn(
&mut self,
iteration: usize,
) -> Result<Option<()>, super::super::error::AgentError> {
self.channel.send_typing().await?;
if let Some(ref budget) = self.context_manager.budget {
let used = usize::try_from(self.providers.cached_prompt_tokens).unwrap_or(usize::MAX);
let threshold = budget.max_tokens() * 4 / 5;
if used >= threshold {
tracing::warn!(
iteration,
used,
threshold,
"stopping tool loop: context budget nearing limit"
);
self.channel
.send("Stopping: context window is nearly full.")
.await?;
return Ok(Some(()));
}
}
let _ = self.channel.send_status("thinking...").await;
let Some(response) = self.call_llm_with_retry(2).await? else {
let _ = self.channel.send_status("").await;
return Ok(Some(()));
};
let _ = self.channel.send_status("").await;
if response.trim().is_empty() {
tracing::warn!("received empty response from LLM, skipping");
self.record_skill_outcomes("empty_response", None, None)
.await;
if !self.learning_engine.was_reflection_used()
&& self
.attempt_self_reflection("LLM returned empty response", "")
.await?
{
return Ok(Some(()));
}
self.channel
.send("Received an empty response. Please try again.")
.await?;
return Ok(Some(()));
}
self.persist_message(Role::Assistant, &response, &[], false)
.await;
self.push_message(Message {
role: Role::Assistant,
content: response.clone(),
parts: vec![],
metadata: MessageMetadata::default(),
});
#[cfg(feature = "compression-guidelines")]
self.maybe_log_compression_failure(&response).await;
self.inject_active_skill_env();
let tool_name = first_tool_name(&response);
if let Some(result) = self.check_repeat_detection(&response, tool_name).await? {
return Ok(result);
}
let status_msg = format!("running {tool_name}...");
let _ = self.channel.send_status(&status_msg).await;
let result = self.execute_tool_with_trace(tool_name, &response).await;
let _ = self.channel.send_status("").await;
self.tool_executor.set_skill_env(None);
if !self.handle_tool_result(&response, result).await? {
return Ok(Some(()));
}
self.maybe_summarize_tool_pair().await;
let keep_recent = 2 * self.memory_state.tool_call_cutoff + 2;
self.prune_stale_tool_outputs(keep_recent);
self.maybe_apply_deferred_summaries();
self.flush_deferred_summaries().await;
self.maybe_soft_compact_mid_iteration();
self.flush_deferred_summaries().await;
if let Some(last_msg) = self.msg.messages.last() {
let hash = doom_loop_hash(&last_msg.content);
tracing::debug!(
iteration,
hash,
content_len = last_msg.content.len(),
content_preview = &last_msg.content[..last_msg.content.len().min(120)],
"doom-loop hash recorded"
);
self.tool_orchestrator.push_doom_hash(hash);
if self.tool_orchestrator.is_doom_loop() {
tracing::warn!(
iteration,
hash,
content_len = last_msg.content.len(),
content_preview = &last_msg.content[..last_msg.content.len().min(200)],
"doom-loop detected: {DOOM_LOOP_WINDOW} consecutive identical outputs"
);
self.channel
.send("Stopping: detected repeated identical tool outputs.")
.await?;
return Ok(Some(()));
}
}
Ok(None)
}
async fn execute_tool_with_trace(
&mut self,
tool_name: &str,
response: &str,
) -> Result<Option<zeph_tools::ToolOutput>, zeph_tools::ToolError> {
let trace_guard = self.debug_state.trace_collector.as_ref().and_then(|tc| {
self.debug_state
.current_iteration_span_id
.map(|id| tc.begin_tool_call(tool_name, id)) });
let start = std::time::Instant::now();
let result = self
.tool_executor
.execute_erased(response)
.instrument(tracing::info_span!("tool_exec"))
.await;
if let Some(guard) = trace_guard
&& let Some(ref mut tc) = self.debug_state.trace_collector
{
let latency = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX);
let is_error = result.is_err();
let error_kind = result.as_ref().err().map(std::string::ToString::to_string);
tc.end_tool_call(
guard,
tool_name,
crate::debug_dump::trace::ToolAttributes {
latency_ms: latency,
is_error,
error_kind,
},
);
}
result
}
async fn check_repeat_detection(
&mut self,
response: &str,
tool_name: &str,
) -> Result<Option<Option<()>>, super::super::error::AgentError> {
use std::hash::{DefaultHasher, Hash, Hasher};
let mut h = DefaultHasher::new();
response.hash(&mut h);
let args_hash = h.finish();
if self.tool_orchestrator.is_repeat(tool_name, args_hash) {
tracing::warn!(
tool = tool_name,
"[repeat-detect] identical tool call detected in legacy path"
);
self.tool_executor.set_skill_env(None);
let msg = format!(
"[error] Repeated identical call to {tool_name} detected. \
Use different arguments or a different approach."
);
if !self
.handle_tool_result(
response,
Ok(Some(zeph_tools::ToolOutput {
tool_name: tool_name.to_owned(),
summary: msg,
blocks_executed: 0,
filter_stats: None,
diff: None,
streamed: false,
terminal_id: None,
locations: None,
raw_response: None,
claim_source: None,
})),
)
.await?
{
return Ok(Some(Some(())));
}
return Ok(Some(None));
}
self.tool_orchestrator.push_tool_call(tool_name, args_hash);
Ok(None)
}
pub(super) async fn call_llm_with_timeout(
&mut self,
) -> Result<Option<String>, super::super::error::AgentError> {
if self.lifecycle.cancel_token.is_cancelled() {
return Ok(None);
}
if let Some(ref tracker) = self.metrics.cost_tracker
&& let Err(e) = tracker.check_budget()
{
self.channel
.send(&format!("Budget limit reached: {e}"))
.await?;
return Ok(None);
}
let query_embedding = match self.check_response_cache().await? {
super::CacheCheckResult::Hit(resp) => return Ok(Some(resp)),
super::CacheCheckResult::Miss { query_embedding } => query_embedding,
};
let llm_timeout = std::time::Duration::from_secs(self.runtime.timeouts.llm_seconds);
let start = std::time::Instant::now();
let prompt_estimate = self.providers.cached_prompt_tokens;
let dump_id =
self.debug_state
.debug_dumper
.as_ref()
.map(|d: &crate::debug_dump::DebugDumper| {
d.dump_request(&crate::debug_dump::RequestDebugDump {
model_name: &self.runtime.model_name,
messages: &self.msg.messages,
tools: &[],
provider_request: self.provider.debug_request_json(
&self.msg.messages,
&[],
self.provider.supports_streaming(),
),
})
});
let trace_guard = self.debug_state.trace_collector.as_ref().and_then(|tc| {
self.debug_state
.current_iteration_span_id
.map(|id| tc.begin_llm_request(id)) });
let llm_span = tracing::info_span!("llm_call", model = %self.runtime.model_name);
let result = if self.provider.supports_streaming() {
self.call_llm_streaming(
llm_timeout,
start,
prompt_estimate,
dump_id,
llm_span,
query_embedding,
)
.await
} else {
self.call_llm_non_streaming(
llm_timeout,
start,
prompt_estimate,
dump_id,
llm_span,
query_embedding,
)
.await
};
if let Some(guard) = trace_guard
&& let Some(ref mut tc) = self.debug_state.trace_collector
{
let latency = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX);
let (prompt_tokens, completion_tokens) =
self.provider.last_usage().unwrap_or((prompt_estimate, 0));
tc.end_llm_request(
guard,
&crate::debug_dump::trace::LlmAttributes {
model: self.runtime.model_name.clone(),
prompt_tokens,
completion_tokens,
latency_ms: latency,
streaming: self.provider.supports_streaming(),
cache_hit: false,
},
);
}
result
}
async fn call_llm_streaming(
&mut self,
llm_timeout: std::time::Duration,
start: std::time::Instant,
prompt_estimate: u64,
dump_id: Option<u32>,
llm_span: tracing::Span,
query_embedding: Option<Vec<f32>>,
) -> Result<Option<String>, super::super::error::AgentError> {
let cancel = self.lifecycle.cancel_token.clone();
let streaming_fut = self.process_response_streaming().instrument(llm_span);
let result = tokio::select! {
r = tokio::time::timeout(llm_timeout, streaming_fut) => r,
() = cancel.cancelled() => {
tracing::info!("LLM call cancelled by user");
self.update_metrics(|m| m.cancellations += 1);
self.channel.send("[Cancelled]").await?;
return Ok(None);
}
};
let Ok(r) = result else {
self.channel
.send("LLM request timed out. Please try again.")
.await?;
return Ok(None);
};
let latency = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX);
let (final_prompt, final_completion_opt) = self
.provider
.last_usage()
.map_or((prompt_estimate, None), |(p, c)| (p, Some(c)));
self.update_metrics(|m| {
m.api_calls += 1;
m.last_llm_latency_ms = latency;
m.context_tokens = final_prompt;
m.prompt_tokens += final_prompt;
if let Some(final_completion) = final_completion_opt {
m.completion_tokens = m
.completion_tokens
.saturating_sub(
r.as_ref()
.map_or(0, |s| u64::try_from(s.len()).unwrap_or(0) / 4),
)
.saturating_add(final_completion);
}
m.total_tokens = m.prompt_tokens + m.completion_tokens;
});
self.record_cache_usage();
let cost_completion = final_completion_opt.unwrap_or_else(|| {
r.as_ref()
.map_or(0, |s| u64::try_from(s.len()).unwrap_or(0) / 4)
});
self.record_cost(final_prompt, cost_completion);
let raw = r?;
if let (Some(d), Some(id)) = (self.debug_state.debug_dumper.as_ref(), dump_id) {
d.dump_response(id, &raw);
}
let redacted = self.maybe_redact(&raw).into_owned();
if self.run_response_verification(&redacted) {
let _ = self
.channel
.send("[security] Response blocked by injection detection.")
.await;
return Ok(None);
}
let cleaned = self.scan_output_and_warn(&redacted);
self.store_response_in_cache(&cleaned, query_embedding)
.await;
Ok(Some(cleaned))
}
async fn call_llm_non_streaming(
&mut self,
llm_timeout: std::time::Duration,
start: std::time::Instant,
prompt_estimate: u64,
dump_id: Option<u32>,
llm_span: tracing::Span,
query_embedding: Option<Vec<f32>>,
) -> Result<Option<String>, super::super::error::AgentError> {
let cancel = self.lifecycle.cancel_token.clone();
let chat_fut = self.provider.chat(&self.msg.messages).instrument(llm_span);
let result = tokio::select! {
r = tokio::time::timeout(llm_timeout, chat_fut) => r,
() = cancel.cancelled() => {
tracing::info!("LLM call cancelled by user");
self.update_metrics(|m| m.cancellations += 1);
self.channel.send("[Cancelled]").await?;
return Ok(None);
}
};
match result {
Ok(Ok(resp)) => {
let latency = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX);
let completion_heuristic = u64::try_from(resp.len()).unwrap_or(0) / 4;
let (final_prompt, final_completion) = self
.provider
.last_usage()
.unwrap_or((prompt_estimate, completion_heuristic));
self.update_metrics(|m| {
m.api_calls += 1;
m.last_llm_latency_ms = latency;
m.context_tokens = final_prompt;
m.prompt_tokens += final_prompt;
m.completion_tokens += final_completion;
m.total_tokens = m.prompt_tokens + m.completion_tokens;
});
self.record_cache_usage();
self.record_cost(final_prompt, final_completion);
if self.run_response_verification(&resp) {
let _ = self
.channel
.send("[security] Response blocked by injection detection.")
.await;
return Ok(None);
}
let cleaned = self.scan_output_and_warn(&resp);
if let (Some(d), Some(id)) = (self.debug_state.debug_dumper.as_ref(), dump_id) {
d.dump_response(id, &cleaned);
}
let display = self.maybe_redact(&cleaned);
self.channel.send(&display).await?;
self.store_response_in_cache(&cleaned, query_embedding)
.await;
Ok(Some(cleaned))
}
Ok(Err(e)) => Err(e.into()),
Err(_) => {
self.channel
.send("LLM request timed out. Please try again.")
.await?;
Ok(None)
}
}
}
pub(in crate::agent) async fn call_llm_with_retry(
&mut self,
max_attempts: usize,
) -> Result<Option<String>, super::super::error::AgentError> {
for attempt in 0..max_attempts {
match self.call_llm_with_timeout().await {
Ok(result) => return Ok(result),
Err(e) if e.is_context_length_error() && attempt + 1 < max_attempts => {
tracing::warn!(
attempt,
"LLM context length exceeded, compacting and retrying"
);
let _ = self
.channel
.send_status("context too long, compacting...")
.await;
self.compact_context().await?;
let _ = self.channel.send_status("").await;
}
Err(e) => return Err(e),
}
}
unreachable!("loop covers all attempts")
}
pub(super) async fn handle_tool_result(
&mut self,
response: &str,
result: Result<Option<ToolOutput>, ToolError>,
) -> Result<bool, super::super::error::AgentError> {
match result {
Ok(Some(output)) => self.process_successful_tool_output(output).await,
Ok(None) => {
self.record_skill_outcomes("success", None, None).await;
self.record_anomaly_outcome(AnomalyOutcome::Success).await?;
Ok(false)
}
Err(ToolError::Blocked { command }) => {
tracing::warn!("blocked command: {command}");
self.channel
.send("This command is blocked by security policy.")
.await?;
self.record_anomaly_outcome(AnomalyOutcome::Blocked).await?;
Ok(false)
}
Err(ToolError::ConfirmationRequired { command }) => {
self.handle_confirmation_required(response, &command).await
}
Err(ToolError::Cancelled) => {
tracing::info!("tool execution cancelled");
self.update_metrics(|m| m.cancellations += 1);
self.channel.send("[Cancelled]").await?;
Ok(false)
}
Err(ToolError::SandboxViolation { path }) => {
tracing::warn!("sandbox violation: {path}");
self.channel
.send("Command targets a path outside the sandbox.")
.await?;
self.record_anomaly_outcome(AnomalyOutcome::Error).await?;
Ok(false)
}
Err(e) => {
let category = e.category();
let err_str = format!("{e:#}");
tracing::error!("tool execution error: {err_str}");
if let Some(ref d) = self.debug_state.debug_dumper {
d.dump_tool_error("legacy", &e);
}
let kind = FailureKind::from(category);
let sanitized_err = self
.security
.sanitizer
.sanitize(&err_str, ContentSource::new(ContentSourceKind::McpResponse))
.body;
self.record_skill_outcomes("tool_failure", Some(&err_str), Some(kind.as_str()))
.await;
self.record_anomaly_outcome(AnomalyOutcome::Error).await?;
if !self.learning_engine.was_reflection_used()
&& self.attempt_self_reflection(&sanitized_err, "").await?
{
return Ok(false);
}
self.channel
.send("Tool execution failed. Please try a different approach.")
.await?;
Ok(false)
}
}
}
async fn process_successful_tool_output(
&mut self,
output: ToolOutput,
) -> Result<bool, super::super::error::AgentError> {
if let Some(ref fs) = output.filter_stats {
self.record_filter_metrics(fs);
}
if output.summary.trim().is_empty() {
tracing::warn!("tool execution returned empty output");
self.record_skill_outcomes("success", None, None).await;
return Ok(false);
}
if output.summary.contains("[error]") || output.summary.contains("[exit code") {
let kind = FailureKind::from_error(&output.summary);
self.record_skill_outcomes("tool_failure", Some(&output.summary), Some(kind.as_str()))
.await;
if !self.learning_engine.was_reflection_used()
&& self
.attempt_self_reflection(&output.summary, &output.summary)
.await?
{
return Ok(false);
}
} else {
self.record_skill_outcomes("success", None, None).await;
}
let tool_call_id = uuid::Uuid::new_v4().to_string();
let tool_started_at = std::time::Instant::now();
self.channel
.send_tool_start(ToolStartEvent {
tool_name: &output.tool_name,
tool_call_id: &tool_call_id,
params: None,
parent_tool_use_id: self.session.parent_tool_use_id.clone(),
})
.await?;
if let Some(ref d) = self.debug_state.debug_dumper {
let dump_content = if self.security.pii_filter.is_enabled() {
self.security.pii_filter.scrub(&output.summary).into_owned()
} else {
output.summary.clone()
};
d.dump_tool_output(&output.tool_name, &dump_content);
}
let processed = self.maybe_summarize_tool_output(&output.summary).await;
let body = if let Some(ref fs) = output.filter_stats
&& fs.filtered_chars < fs.raw_chars
{
format!("{}\n{processed}", fs.format_inline(&output.tool_name))
} else {
processed.clone()
};
let filter_stats_inline = output.filter_stats.as_ref().and_then(|fs| {
(fs.filtered_chars < fs.raw_chars).then(|| fs.format_inline(&output.tool_name))
});
let formatted_output = format_tool_output(&output.tool_name, &body);
self.channel
.send_tool_output(ToolOutputEvent {
tool_name: &output.tool_name,
body: &self.maybe_redact(&body),
diff: None,
filter_stats: filter_stats_inline,
kept_lines: None,
locations: output.locations,
tool_call_id: &tool_call_id,
is_error: false,
parent_tool_use_id: self.session.parent_tool_use_id.clone(),
raw_response: output.raw_response.map(|r| self.redact_json(r)),
started_at: Some(tool_started_at),
})
.await?;
let (llm_body, has_injection_flags) = self
.sanitize_tool_output(&processed, &output.tool_name)
.await;
let user_msg = Message::from_parts(
Role::User,
vec![MessagePart::ToolOutput {
tool_name: output.tool_name.clone(),
body: llm_body,
compacted_at: None,
}],
);
self.persist_message(
Role::User,
&formatted_output,
&user_msg.parts,
has_injection_flags || !self.security.flagged_urls.is_empty(),
)
.await;
self.push_message(user_msg);
let outcome = if output.summary.contains("[error]") || output.summary.contains("[stderr]") {
AnomalyOutcome::Error
} else {
AnomalyOutcome::Success
};
self.record_anomaly_outcome(outcome).await?;
Ok(true)
}
async fn handle_confirmation_required(
&mut self,
response: &str,
command: &str,
) -> Result<bool, super::super::error::AgentError> {
let prompt = format!("Allow command: {command}?");
if self.channel.confirm(&prompt).await? {
if let Ok(Some(out)) = self.tool_executor.execute_confirmed_erased(response).await {
let confirmed_tool_call_id = uuid::Uuid::new_v4().to_string();
let confirmed_started_at = std::time::Instant::now();
self.channel
.send_tool_start(ToolStartEvent {
tool_name: &out.tool_name,
tool_call_id: &confirmed_tool_call_id,
params: None,
parent_tool_use_id: self.session.parent_tool_use_id.clone(),
})
.await?;
if let Some(ref d) = self.debug_state.debug_dumper {
let dump_content = if self.security.pii_filter.is_enabled() {
self.security.pii_filter.scrub(&out.summary).into_owned()
} else {
out.summary.clone()
};
d.dump_tool_output(&out.tool_name, &dump_content);
}
let processed = self.maybe_summarize_tool_output(&out.summary).await;
let formatted = format_tool_output(&out.tool_name, &processed);
self.channel
.send_tool_output(ToolOutputEvent {
tool_name: &out.tool_name,
body: &self.maybe_redact(&processed),
diff: None,
filter_stats: None,
kept_lines: None,
locations: out.locations,
tool_call_id: &confirmed_tool_call_id,
is_error: false,
parent_tool_use_id: self.session.parent_tool_use_id.clone(),
raw_response: out.raw_response.map(|r| self.redact_json(r)),
started_at: Some(confirmed_started_at),
})
.await?;
let (llm_body, has_injection_flags) =
self.sanitize_tool_output(&processed, &out.tool_name).await;
let confirmed_msg = Message::from_parts(
Role::User,
vec![MessagePart::ToolOutput {
tool_name: out.tool_name.clone(),
body: llm_body,
compacted_at: None,
}],
);
self.persist_message(
Role::User,
&formatted,
&confirmed_msg.parts,
has_injection_flags || !self.security.flagged_urls.is_empty(),
)
.await;
self.push_message(confirmed_msg);
}
} else {
self.channel.send("Command cancelled.").await?;
}
Ok(false)
}
pub(super) async fn process_response_streaming(
&mut self,
) -> Result<String, super::super::error::AgentError> {
let mut stream = self.provider.chat_stream(&self.msg.messages).await?;
let mut response = String::with_capacity(2048);
loop {
let chunk_result = tokio::select! {
item = stream.next() => match item {
Some(r) => r,
None => break,
},
() = super::super::shutdown_signal(&mut self.lifecycle.shutdown) => {
tracing::info!("streaming interrupted by shutdown");
break;
}
() = self.lifecycle.cancel_token.cancelled() => {
tracing::info!("streaming interrupted by cancellation");
break;
}
};
match chunk_result? {
zeph_llm::StreamChunk::Content(chunk) => {
response.push_str(&chunk);
let display_chunk = self.maybe_redact(&chunk);
self.channel.send_chunk(&display_chunk).await?;
}
zeph_llm::StreamChunk::Thinking(thinking) => {
self.channel.send_thinking_chunk(&thinking).await?;
}
zeph_llm::StreamChunk::ToolUse(calls) => {
tracing::warn!(
count = calls.len(),
names = ?calls.iter().map(|c| c.name.as_str()).collect::<Vec<_>>(),
"tool calls received in streaming path (not handled; use chat_with_tools for tool execution)"
);
}
zeph_llm::StreamChunk::Compaction(raw_summary) => {
let _ = self
.channel
.send_status("Compacting context (server-side)...")
.await;
let source = ContentSource::new(ContentSourceKind::McpResponse);
let sanitized = self.security.sanitizer.sanitize(&raw_summary, source);
let summary = sanitized.body;
tracing::info!(
summary_len = summary.len(),
messages_before = self.msg.messages.len(),
"server-side compaction received via stream; pruning old messages"
);
let last_user = self
.msg
.messages
.iter()
.rposition(|m| m.role == Role::User)
.unwrap_or(0);
let tail: Vec<Message> = self.msg.messages.drain(last_user..).collect();
self.msg.messages.clear();
self.msg.messages.push(Message {
role: Role::Assistant,
content: summary.clone(),
parts: vec![MessagePart::Compaction {
summary: summary.clone(),
}],
metadata: MessageMetadata::default(),
});
self.msg.messages.extend(tail);
self.update_metrics(|m| m.server_compaction_events += 1);
let _ = self.channel.send_status("").await;
}
}
}
self.channel.flush_chunks().await?;
let completion_heuristic = u64::try_from(response.len()).unwrap_or(0) / 4;
let completion_tokens = self
.provider
.last_usage()
.map_or(completion_heuristic, |(_, out)| out);
self.update_metrics(|m| {
m.completion_tokens += completion_tokens;
m.total_tokens = m.prompt_tokens + m.completion_tokens;
});
Ok(response)
}
pub(super) async fn check_doom_loop(
&mut self,
iteration: usize,
) -> Result<bool, super::super::error::AgentError> {
if let Some(last_msg) = self.msg.messages.last() {
let hash = doom_loop_hash(&last_msg.content);
tracing::debug!(
iteration,
hash,
content_len = last_msg.content.len(),
content_preview = &last_msg.content[..last_msg.content.len().min(120)],
"doom-loop hash recorded"
);
self.tool_orchestrator.push_doom_hash(hash);
if self.tool_orchestrator.is_doom_loop() {
tracing::warn!(
iteration,
hash,
content_len = last_msg.content.len(),
content_preview = &last_msg.content[..last_msg.content.len().min(200)],
"doom-loop detected: {DOOM_LOOP_WINDOW} consecutive identical outputs"
);
self.channel
.send("Stopping: detected repeated identical tool outputs.")
.await?;
return Ok(true);
}
}
Ok(false)
}
}