Skip to main content

zeph_core/agent/
utils.rs

1// SPDX-FileCopyrightText: 2026 Andrei G <bug-ops>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4use zeph_llm::provider::{LlmProvider, Message, MessagePart, Role};
5
6use super::{Agent, CODE_CONTEXT_PREFIX};
7use crate::channel::Channel;
8use crate::metrics::{MetricsSnapshot, SECURITY_EVENT_CAP, SecurityEvent};
9use zeph_common::SecurityEventCategory;
10use zeph_tools::FilterStats;
11
12impl<C: Channel> Agent<C> {
13    /// Read the community-detection failure counter from `SemanticMemory` and update metrics.
14    pub fn sync_community_detection_failures(&self) {
15        if let Some(memory) = self.services.memory.persistence.memory.as_ref() {
16            let failures = memory.community_detection_failures();
17            self.update_metrics(|m| {
18                m.graph_community_detection_failures = failures;
19            });
20        }
21    }
22
23    /// Sync all graph counters (extraction count/failures) from `SemanticMemory` to metrics.
24    pub fn sync_graph_extraction_metrics(&self) {
25        if let Some(memory) = self.services.memory.persistence.memory.as_ref() {
26            let count = memory.graph_extraction_count();
27            let failures = memory.graph_extraction_failures();
28            self.update_metrics(|m| {
29                m.graph_extraction_count = count;
30                m.graph_extraction_failures = failures;
31            });
32        }
33    }
34
35    /// Fetch entity/edge/community counts from the graph store and write to metrics.
36    pub async fn sync_graph_counts(&self) {
37        let Some(memory) = self.services.memory.persistence.memory.as_ref() else {
38            return;
39        };
40        let Some(store) = memory.graph_store.as_ref() else {
41            return;
42        };
43        let (entities, edges, communities) = tokio::join!(
44            store.entity_count(),
45            store.active_edge_count(),
46            store.community_count()
47        );
48        self.update_metrics(|m| {
49            m.graph_entities_total = entities.unwrap_or(0).cast_unsigned();
50            m.graph_edges_total = edges.unwrap_or(0).cast_unsigned();
51            m.graph_communities_total = communities.unwrap_or(0).cast_unsigned();
52        });
53    }
54
55    /// Perform a real health check on the vector store and update metrics.
56    pub async fn check_vector_store_health(&self, backend_name: &str) {
57        let connected = match self.services.memory.persistence.memory.as_ref() {
58            Some(m) => m.is_vector_store_connected().await,
59            None => false,
60        };
61        let name = backend_name.to_owned();
62        self.update_metrics(|m| {
63            m.qdrant_available = connected;
64            m.vector_backend = name;
65        });
66    }
67
68    /// Fetch compression-guidelines metadata from `SQLite` and write to metrics.
69    ///
70    /// Only fetches version and `created_at`; does not load the full guidelines text.
71    /// Feature-gated: compiled only when `compression-guidelines` is enabled.
72    pub async fn sync_guidelines_status(&self) {
73        let Some(memory) = self.services.memory.persistence.memory.as_ref() else {
74            return;
75        };
76        let cid = self.services.memory.persistence.conversation_id;
77        match memory.sqlite().load_compression_guidelines_meta(cid).await {
78            Ok((version, created_at)) => {
79                #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
80                let version_u32 = u32::try_from(version).unwrap_or(0);
81                self.update_metrics(|m| {
82                    m.guidelines_version = version_u32;
83                    m.guidelines_updated_at = created_at;
84                });
85            }
86            Err(e) => {
87                tracing::warn!("failed to sync guidelines status: {e:#}");
88            }
89        }
90    }
91
92    pub(super) fn record_filter_metrics(&mut self, fs: &FilterStats) {
93        let saved = fs.estimated_tokens_saved() as u64;
94        let raw = (fs.raw_chars / 4) as u64;
95        let confidence = fs.confidence;
96        let was_filtered = fs.filtered_chars < fs.raw_chars;
97        self.update_metrics(|m| {
98            m.filter_raw_tokens += raw;
99            m.filter_saved_tokens += saved;
100            m.filter_applications += 1;
101            m.filter_total_commands += 1;
102            if was_filtered {
103                m.filter_filtered_commands += 1;
104            }
105            if let Some(c) = confidence {
106                match c {
107                    zeph_tools::FilterConfidence::Full => {
108                        m.filter_confidence_full += 1;
109                    }
110                    zeph_tools::FilterConfidence::Partial => {
111                        m.filter_confidence_partial += 1;
112                    }
113                    zeph_tools::FilterConfidence::Fallback => {
114                        m.filter_confidence_fallback += 1;
115                    }
116                }
117            }
118        });
119    }
120
121    pub(super) fn update_metrics(&self, f: impl FnOnce(&mut MetricsSnapshot)) {
122        if let Some(ref tx) = self.runtime.metrics.metrics_tx {
123            let elapsed = self.runtime.lifecycle.start_time.elapsed().as_secs();
124            tx.send_modify(|m| {
125                m.uptime_seconds = elapsed;
126                f(m);
127            });
128        }
129    }
130
131    /// Publish the effective context window limit from the active provider's budget into
132    /// [`MetricsSnapshot::context_max_tokens`].
133    ///
134    /// Call after the provider pool is constructed (builder) and on every successful `/provider`
135    /// switch so the TUI context gauge always reflects the active provider's window.
136    /// When no budget is configured the field is set to `0`, which the gauge renders as `"—"`.
137    pub(crate) fn publish_context_budget(&self) {
138        let max_tokens = self
139            .context_manager
140            .budget
141            .as_ref()
142            .map_or(0, |b| b.max_tokens() as u64);
143        self.update_metrics(|m| m.context_max_tokens = max_tokens);
144    }
145
146    /// Flush `metrics.pending_timings` into the rolling window and publish to the metrics snapshot.
147    ///
148    /// Call once per turn after all four phases have written to `pending_timings`.
149    /// Resets `pending_timings` to default after flushing.
150    pub(super) fn flush_turn_timings(&mut self) {
151        let timings = std::mem::take(&mut self.runtime.metrics.pending_timings);
152        tracing::debug!(
153            prepare_context_ms = timings.prepare_context_ms,
154            llm_chat_ms = timings.llm_chat_ms,
155            tool_exec_ms = timings.tool_exec_ms,
156            persist_message_ms = timings.persist_message_ms,
157            "turn timings"
158        );
159
160        if self.runtime.metrics.timing_window.len() >= 10 {
161            self.runtime.metrics.timing_window.pop_front();
162        }
163        self.runtime
164            .metrics
165            .timing_window
166            .push_back(timings.clone());
167
168        let count = self.runtime.metrics.timing_window.len();
169        let mut avg = crate::metrics::TurnTimings::default();
170        let mut max = crate::metrics::TurnTimings::default();
171        for t in &self.runtime.metrics.timing_window {
172            avg.prepare_context_ms = avg.prepare_context_ms.saturating_add(t.prepare_context_ms);
173            avg.llm_chat_ms = avg.llm_chat_ms.saturating_add(t.llm_chat_ms);
174            avg.tool_exec_ms = avg.tool_exec_ms.saturating_add(t.tool_exec_ms);
175            avg.persist_message_ms = avg.persist_message_ms.saturating_add(t.persist_message_ms);
176
177            max.prepare_context_ms = max.prepare_context_ms.max(t.prepare_context_ms);
178            max.llm_chat_ms = max.llm_chat_ms.max(t.llm_chat_ms);
179            max.tool_exec_ms = max.tool_exec_ms.max(t.tool_exec_ms);
180            max.persist_message_ms = max.persist_message_ms.max(t.persist_message_ms);
181        }
182        let n = count as u64;
183        avg.prepare_context_ms /= n;
184        avg.llm_chat_ms /= n;
185        avg.tool_exec_ms /= n;
186        avg.persist_message_ms /= n;
187
188        let total_ms = timings
189            .prepare_context_ms
190            .saturating_add(timings.llm_chat_ms)
191            .saturating_add(timings.tool_exec_ms)
192            .saturating_add(timings.persist_message_ms);
193
194        self.update_metrics(|m| {
195            m.last_turn_timings = timings;
196            m.avg_turn_timings = avg;
197            m.max_turn_timings = max;
198            m.timing_sample_count = n;
199        });
200
201        if let Some(ref recorder) = self.runtime.metrics.histogram_recorder {
202            recorder.observe_turn_duration(std::time::Duration::from_millis(total_ms));
203        }
204    }
205
206    /// Push the current classifier metrics snapshot into `MetricsSnapshot`.
207    ///
208    /// Call this after any classifier invocation (injection, PII, feedback) so the TUI panel
209    /// reflects the latest p50/p95 values. No-op when classifier metrics are not configured.
210    pub(super) fn push_classifier_metrics(&self) {
211        if let Some(ref m) = self.runtime.metrics.classifier_metrics {
212            let snapshot = m.snapshot();
213            self.update_metrics(|ms| ms.classifier = snapshot);
214        }
215    }
216
217    pub(super) fn push_security_event(
218        &self,
219        category: SecurityEventCategory,
220        source: &str,
221        detail: impl Into<String>,
222    ) {
223        if let Some(ref tx) = self.runtime.metrics.metrics_tx {
224            let event = SecurityEvent::new(category, source, detail);
225            let elapsed = self.runtime.lifecycle.start_time.elapsed().as_secs();
226            tx.send_modify(|m| {
227                m.uptime_seconds = elapsed;
228                if m.security_events.len() >= SECURITY_EVENT_CAP {
229                    m.security_events.pop_front();
230                }
231                m.security_events.push_back(event);
232            });
233        }
234    }
235
236    pub(super) fn recompute_prompt_tokens(&mut self) {
237        self.runtime.providers.cached_prompt_tokens = self
238            .msg
239            .messages
240            .iter()
241            .map(|m| self.runtime.metrics.token_counter.count_message_tokens(m) as u64)
242            .sum();
243    }
244
245    pub(super) fn push_message(&mut self, msg: Message) {
246        self.runtime.providers.cached_prompt_tokens +=
247            self.runtime
248                .metrics
249                .token_counter
250                .count_message_tokens(&msg) as u64;
251        if msg.role == zeph_llm::provider::Role::Assistant {
252            self.services.session.last_assistant_at = Some(std::time::Instant::now());
253        }
254        self.msg.messages.push(msg);
255        // Detect MagicDoc headers in tool output after pushing the message.
256        self.detect_magic_docs_in_messages();
257    }
258
259    pub(crate) fn record_cost_and_cache(&self, input_tokens: u64, output_tokens: u64) {
260        let (cache_write, cache_read) = self.provider.last_cache_usage().unwrap_or((0, 0));
261
262        if let Some(ref tracker) = self.runtime.metrics.cost_tracker {
263            let provider_name = if self.runtime.config.active_provider_name.is_empty() {
264                self.provider.name()
265            } else {
266                self.runtime.config.active_provider_name.as_str()
267            };
268            tracker.record_usage(
269                provider_name,
270                self.provider.provider_kind_str(),
271                &self.runtime.config.model_name,
272                input_tokens,
273                cache_read,
274                cache_write,
275                output_tokens,
276            );
277            let breakdown = tracker.provider_breakdown();
278            self.update_metrics(|m| {
279                m.cost_spent_cents = tracker.current_spend();
280                m.cache_creation_tokens += cache_write;
281                m.cache_read_tokens += cache_read;
282                m.provider_cost_breakdown = breakdown;
283            });
284        } else if cache_write > 0 || cache_read > 0 {
285            self.update_metrics(|m| {
286                m.cache_creation_tokens += cache_write;
287                m.cache_read_tokens += cache_read;
288            });
289        }
290    }
291
292    pub(crate) fn record_successful_task(&self) {
293        if let Some(ref tracker) = self.runtime.metrics.cost_tracker {
294            tracker.record_successful_task();
295            self.update_metrics(|m| {
296                m.cost_cps_cents = tracker.cps();
297                m.cost_successful_tasks = tracker.successful_tasks();
298            });
299        }
300    }
301
302    /// Extract a redacted preview of the last assistant message.
303    ///
304    /// Walks `self.msg.messages` in reverse to find the most recent `Role::Assistant`
305    /// message, takes up to `max_chars` Unicode scalar values from `message.content`,
306    /// and applies [`crate::redact::scrub_content`] to redact any secrets.
307    ///
308    /// Returns an empty string when no assistant message exists in the current turn.
309    pub(super) fn last_assistant_preview(&self, max_chars: usize) -> String {
310        let raw = self
311            .msg
312            .messages
313            .iter()
314            .rev()
315            .find(|m| m.role == Role::Assistant)
316            .map_or("", |m| m.content.as_str());
317
318        if raw.is_empty() {
319            return String::new();
320        }
321
322        // Truncate to max_chars before redaction to bound redaction work.
323        let truncated: &str = if raw.chars().count() > max_chars {
324            let end = raw
325                .char_indices()
326                .nth(max_chars)
327                .map_or(raw.len(), |(i, _)| i);
328            &raw[..end]
329        } else {
330            raw
331        };
332
333        crate::redact::scrub_content(truncated).into_owned()
334    }
335
336    /// Inject pre-formatted code context into the message list.
337    /// The caller is responsible for retrieving and formatting the text.
338    pub fn inject_code_context(&mut self, text: &str) {
339        self.remove_code_context_messages();
340        if text.is_empty() || self.msg.messages.len() <= 1 {
341            return;
342        }
343        let content = format!("{CODE_CONTEXT_PREFIX}{text}");
344        self.msg.messages.insert(
345            1,
346            Message::from_parts(
347                Role::System,
348                vec![MessagePart::CodeContext { text: content }],
349            ),
350        );
351    }
352
353    #[must_use]
354    pub fn context_messages(&self) -> &[Message] {
355        &self.msg.messages
356    }
357
358    /// Truncate stale tool result content in old messages to bound in-memory growth.
359    ///
360    /// After the LLM has seen and responded to tool output, the full content is no longer
361    /// needed in the hot message list (it is already persisted to `SQLite`). Truncating keeps
362    /// the in-process message vec small across long sessions.
363    ///
364    /// Skips the last 2 messages so the LLM retains full context for the next turn.
365    ///
366    /// Truncated variants: `MessagePart::ToolResult` (content) and `MessagePart::ToolOutput` (body).
367    pub(super) fn truncate_old_tool_results(&mut self) {
368        const LIMIT: usize = 2048;
369        const SUFFIX: &str = "…[truncated]";
370
371        let len = self.msg.messages.len();
372        if len <= 2 {
373            return;
374        }
375        for msg in &mut self.msg.messages[..len - 2] {
376            for part in &mut msg.parts {
377                match part {
378                    MessagePart::ToolResult { content, .. } if content.len() > LIMIT => {
379                        content.truncate(content.floor_char_boundary(LIMIT));
380                        content.push_str(SUFFIX);
381                    }
382                    MessagePart::ToolOutput { body, .. } if body.len() > LIMIT => {
383                        body.truncate(body.floor_char_boundary(LIMIT));
384                        body.push_str(SUFFIX);
385                    }
386                    _ => {}
387                }
388            }
389        }
390    }
391}
392
393#[cfg(test)]
394mod tests {
395    use super::super::agent_tests::{
396        MockChannel, MockToolExecutor, create_test_registry, mock_provider,
397    };
398    use super::*;
399    use zeph_llm::provider::{MessageMetadata, MessagePart};
400
401    #[test]
402    fn push_message_increments_cached_tokens() {
403        let provider = mock_provider(vec![]);
404        let channel = MockChannel::new(vec![]);
405        let registry = create_test_registry();
406        let executor = MockToolExecutor::no_tools();
407        let mut agent = Agent::new(provider, channel, registry, None, 5, executor);
408
409        let before = agent.runtime.providers.cached_prompt_tokens;
410        let msg = Message {
411            role: Role::User,
412            content: "hello world!!".to_string(),
413            parts: vec![],
414            metadata: MessageMetadata::default(),
415        };
416        let expected_delta = agent
417            .runtime
418            .metrics
419            .token_counter
420            .count_message_tokens(&msg) as u64;
421        agent.push_message(msg);
422        assert_eq!(
423            agent.runtime.providers.cached_prompt_tokens,
424            before + expected_delta
425        );
426    }
427
428    #[test]
429    fn recompute_prompt_tokens_matches_sum() {
430        let provider = mock_provider(vec![]);
431        let channel = MockChannel::new(vec![]);
432        let registry = create_test_registry();
433        let executor = MockToolExecutor::no_tools();
434        let mut agent = Agent::new(provider, channel, registry, None, 5, executor);
435
436        agent.msg.messages.push(Message {
437            role: Role::User,
438            content: "1234".to_string(),
439            parts: vec![],
440            metadata: MessageMetadata::default(),
441        });
442        agent.msg.messages.push(Message {
443            role: Role::Assistant,
444            content: "5678".to_string(),
445            parts: vec![],
446            metadata: MessageMetadata::default(),
447        });
448
449        agent.recompute_prompt_tokens();
450
451        let expected: u64 = agent
452            .msg
453            .messages
454            .iter()
455            .map(|m| agent.runtime.metrics.token_counter.count_message_tokens(m) as u64)
456            .sum();
457        assert_eq!(agent.runtime.providers.cached_prompt_tokens, expected);
458    }
459
460    #[test]
461    fn inject_code_context_into_messages_with_existing_content() {
462        let provider = mock_provider(vec![]);
463        let channel = MockChannel::new(vec![]);
464        let registry = create_test_registry();
465        let executor = MockToolExecutor::no_tools();
466        let mut agent = Agent::new(provider, channel, registry, None, 5, executor);
467
468        // Add a user message so we have more than 1 message
469        agent.push_message(Message {
470            role: Role::User,
471            content: "question".to_string(),
472            parts: vec![],
473            metadata: MessageMetadata::default(),
474        });
475
476        agent.inject_code_context("some code here");
477
478        let found = agent.msg.messages.iter().any(|m| {
479            m.parts.iter().any(|p| {
480                matches!(p, MessagePart::CodeContext { text } if text.contains("some code here"))
481            })
482        });
483        assert!(found, "code context should be injected into messages");
484    }
485
486    #[test]
487    fn inject_code_context_empty_text_is_noop() {
488        let provider = mock_provider(vec![]);
489        let channel = MockChannel::new(vec![]);
490        let registry = create_test_registry();
491        let executor = MockToolExecutor::no_tools();
492        let mut agent = Agent::new(provider, channel, registry, None, 5, executor);
493
494        agent.push_message(Message {
495            role: Role::User,
496            content: "question".to_string(),
497            parts: vec![],
498            metadata: MessageMetadata::default(),
499        });
500        let count_before = agent.msg.messages.len();
501
502        agent.inject_code_context("");
503
504        // No code context message inserted for empty text
505        assert_eq!(agent.msg.messages.len(), count_before);
506    }
507
508    #[test]
509    fn inject_code_context_with_single_message_is_noop() {
510        let provider = mock_provider(vec![]);
511        let channel = MockChannel::new(vec![]);
512        let registry = create_test_registry();
513        let executor = MockToolExecutor::no_tools();
514        let mut agent = Agent::new(provider, channel, registry, None, 5, executor);
515        // Only system prompt → len == 1 → inject should be noop
516        let count_before = agent.msg.messages.len();
517
518        agent.inject_code_context("some code");
519
520        assert_eq!(agent.msg.messages.len(), count_before);
521    }
522
523    #[test]
524    fn context_messages_returns_all_messages() {
525        let provider = mock_provider(vec![]);
526        let channel = MockChannel::new(vec![]);
527        let registry = create_test_registry();
528        let executor = MockToolExecutor::no_tools();
529        let mut agent = Agent::new(provider, channel, registry, None, 5, executor);
530
531        agent.push_message(Message {
532            role: Role::User,
533            content: "test".to_string(),
534            parts: vec![],
535            metadata: MessageMetadata::default(),
536        });
537
538        assert_eq!(agent.context_messages().len(), agent.msg.messages.len());
539    }
540
541    #[test]
542    fn truncate_old_tool_results_truncates_stale_content() {
543        let provider = mock_provider(vec![]);
544        let channel = MockChannel::new(vec![]);
545        let registry = create_test_registry();
546        let executor = MockToolExecutor::no_tools();
547        let mut agent = Agent::new(provider, channel, registry, None, 5, executor);
548
549        let big_content = "x".repeat(4096);
550
551        // Message 0 (old) — should be truncated.
552        agent.msg.messages.push(Message {
553            role: Role::User,
554            content: String::new(),
555            parts: vec![MessagePart::ToolResult {
556                tool_use_id: "id1".to_string(),
557                content: big_content.clone(),
558                is_error: false,
559            }],
560            metadata: MessageMetadata::default(),
561        });
562        // Message 1 (old) — ToolOutput should also be truncated.
563        agent.msg.messages.push(Message {
564            role: Role::User,
565            content: String::new(),
566            parts: vec![MessagePart::ToolOutput {
567                tool_name: "shell".into(),
568                body: big_content.clone(),
569                compacted_at: None,
570            }],
571            metadata: MessageMetadata::default(),
572        });
573        // Message 2 (recent) — must NOT be truncated.
574        agent.msg.messages.push(Message {
575            role: Role::Assistant,
576            content: "reply".to_string(),
577            parts: vec![MessagePart::ToolResult {
578                tool_use_id: "id3".to_string(),
579                content: big_content.clone(),
580                is_error: false,
581            }],
582            metadata: MessageMetadata::default(),
583        });
584        // Message 3 (most recent) — must NOT be truncated.
585        agent.msg.messages.push(Message {
586            role: Role::User,
587            content: "last".to_string(),
588            parts: vec![MessagePart::ToolResult {
589                tool_use_id: "id4".to_string(),
590                content: big_content.clone(),
591                is_error: false,
592            }],
593            metadata: MessageMetadata::default(),
594        });
595
596        // Agent::new inserts a system prompt at index 0, so our messages are at 1..=4.
597        let base = agent.msg.messages.len() - 4;
598
599        agent.truncate_old_tool_results();
600
601        // Old ToolResult truncated.
602        if let MessagePart::ToolResult { content, .. } = &agent.msg.messages[base].parts[0] {
603            assert!(
604                content.ends_with("…[truncated]"),
605                "msg[base] should be truncated"
606            );
607            assert!(content.len() <= 2048 + 16);
608        } else {
609            panic!("expected ToolResult at msg[base]");
610        }
611
612        // Old ToolOutput truncated.
613        if let MessagePart::ToolOutput { body, .. } = &agent.msg.messages[base + 1].parts[0] {
614            assert!(
615                body.ends_with("…[truncated]"),
616                "msg[base+1] should be truncated"
617            );
618        } else {
619            panic!("expected ToolOutput at msg[base+1]");
620        }
621
622        // Recent messages untouched.
623        if let MessagePart::ToolResult { content, .. } = &agent.msg.messages[base + 2].parts[0] {
624            assert_eq!(content.len(), 4096, "msg[base+2] should NOT be truncated");
625        } else {
626            panic!("expected ToolResult at msg[base+2]");
627        }
628        if let MessagePart::ToolResult { content, .. } = &agent.msg.messages[base + 3].parts[0] {
629            assert_eq!(content.len(), 4096, "msg[base+3] should NOT be truncated");
630        } else {
631            panic!("expected ToolResult at msg[base+3]");
632        }
633    }
634
635    #[test]
636    fn truncate_old_tool_results_noop_when_few_messages() {
637        let provider = mock_provider(vec![]);
638        let channel = MockChannel::new(vec![]);
639        let registry = create_test_registry();
640        let executor = MockToolExecutor::no_tools();
641        let mut agent = Agent::new(provider, channel, registry, None, 5, executor);
642
643        let big = "y".repeat(4096);
644        agent.msg.messages.push(Message {
645            role: Role::User,
646            content: String::new(),
647            parts: vec![MessagePart::ToolResult {
648                tool_use_id: "id".to_string(),
649                content: big.clone(),
650                is_error: false,
651            }],
652            metadata: MessageMetadata::default(),
653        });
654        agent.msg.messages.push(Message {
655            role: Role::Assistant,
656            content: "ok".to_string(),
657            parts: vec![MessagePart::ToolResult {
658                tool_use_id: "id2".to_string(),
659                content: big.clone(),
660                is_error: false,
661            }],
662            metadata: MessageMetadata::default(),
663        });
664
665        // Agent::new inserts a system prompt at index 0; our messages are at 1 and 2.
666        let len_before = agent.msg.messages.len();
667        agent.truncate_old_tool_results();
668
669        // Neither message truncated — both fall in the last-2 window (len=3, skip last 2).
670        assert_eq!(agent.msg.messages.len(), len_before);
671        if let MessagePart::ToolResult { content, .. } =
672            &agent.msg.messages[len_before - 2].parts[0]
673        {
674            assert_eq!(
675                content.len(),
676                4096,
677                "second-to-last should not be truncated"
678            );
679        } else {
680            panic!("expected ToolResult");
681        }
682        if let MessagePart::ToolResult { content, .. } =
683            &agent.msg.messages[len_before - 1].parts[0]
684        {
685            assert_eq!(content.len(), 4096, "last should not be truncated");
686        } else {
687            panic!("expected ToolResult");
688        }
689    }
690
691    fn make_timings(ctx: u64, llm: u64, tool: u64, persist: u64) -> crate::metrics::TurnTimings {
692        crate::metrics::TurnTimings {
693            prepare_context_ms: ctx,
694            llm_chat_ms: llm,
695            tool_exec_ms: tool,
696            persist_message_ms: persist,
697        }
698    }
699
700    fn agent_with_metrics_watch() -> (
701        Agent<MockChannel>,
702        tokio::sync::watch::Receiver<crate::metrics::MetricsSnapshot>,
703    ) {
704        let provider = mock_provider(vec![]);
705        let channel = MockChannel::new(vec![]);
706        let registry = create_test_registry();
707        let executor = MockToolExecutor::no_tools();
708        let mut agent = Agent::new(provider, channel, registry, None, 5, executor);
709
710        let (tx, rx) = tokio::sync::watch::channel(crate::metrics::MetricsSnapshot::default());
711        agent.runtime.metrics.metrics_tx = Some(tx);
712        (agent, rx)
713    }
714
715    // T1-a: single flush — last_turn_timings equals the flushed value, count == 1.
716    #[test]
717    fn flush_turn_timings_single_flush() {
718        let (mut agent, rx) = agent_with_metrics_watch();
719
720        agent.runtime.metrics.pending_timings = make_timings(10, 200, 50, 5);
721        agent.flush_turn_timings();
722
723        let snap = rx.borrow();
724        assert_eq!(snap.last_turn_timings.prepare_context_ms, 10);
725        assert_eq!(snap.last_turn_timings.llm_chat_ms, 200);
726        assert_eq!(snap.last_turn_timings.tool_exec_ms, 50);
727        assert_eq!(snap.last_turn_timings.persist_message_ms, 5);
728        assert_eq!(snap.timing_sample_count, 1);
729        // avg == last when sample_count == 1
730        assert_eq!(snap.avg_turn_timings.llm_chat_ms, 200);
731    }
732
733    // T1-b: pending_timings reset to default after flush.
734    #[test]
735    fn flush_turn_timings_resets_pending() {
736        let provider = mock_provider(vec![]);
737        let channel = MockChannel::new(vec![]);
738        let registry = create_test_registry();
739        let executor = MockToolExecutor::no_tools();
740        let mut agent = Agent::new(provider, channel, registry, None, 5, executor);
741
742        agent.runtime.metrics.pending_timings = make_timings(10, 200, 50, 5);
743        agent.flush_turn_timings();
744
745        let p = &agent.runtime.metrics.pending_timings;
746        assert_eq!(p.prepare_context_ms, 0);
747        assert_eq!(p.llm_chat_ms, 0);
748        assert_eq!(p.tool_exec_ms, 0);
749        assert_eq!(p.persist_message_ms, 0);
750    }
751
752    // T1-c: window capped at 10; avg and max computed correctly.
753    #[test]
754    fn flush_turn_timings_window_capped_at_10() {
755        let (mut agent, rx) = agent_with_metrics_watch();
756
757        // Push 12 turns: llm_chat_ms = i * 10 for i in 1..=12.
758        for i in 1_u64..=12 {
759            agent.runtime.metrics.pending_timings = make_timings(0, i * 10, 0, 0);
760            agent.flush_turn_timings();
761        }
762
763        let snap = rx.borrow();
764        // Window holds last 10: turns 3..=12, llm values 30..=120.
765        assert_eq!(snap.timing_sample_count, 10);
766        // max = 120
767        assert_eq!(snap.max_turn_timings.llm_chat_ms, 120);
768        // avg of 30,40,...,120 = (30+120)*10/2/10 = 75
769        assert_eq!(snap.avg_turn_timings.llm_chat_ms, 75);
770    }
771}