Skip to main content

codetether_agent/tui/app/
session_events.rs

1use crate::session::{Session, SessionEvent};
2use crate::tui::app::smart_switch::maybe_schedule_smart_switch_retry;
3use crate::tui::app::smart_switch::smart_switch_max_retries;
4use crate::tui::app::state::App;
5use crate::tui::app::text::truncate_preview;
6use crate::tui::app::worker_bridge::{handle_processing_started, handle_processing_stopped};
7use crate::tui::chat::message::{ChatMessage, MessageType};
8use crate::tui::worker_bridge::TuiWorkerBridge;
9
10pub async fn handle_session_event(
11    app: &mut App,
12    session: &mut Session,
13    worker_bridge: &Option<TuiWorkerBridge>,
14    evt: SessionEvent,
15) {
16    // Update watchdog timestamp on every session event
17    app.state.main_last_event_at = Some(std::time::Instant::now());
18    // Auto-follow latest output on every event (matches legacy TUI behavior).
19    // Without this, scroll stays pinned to wherever the user last left it and
20    // new streaming text / tool activity is rendered off-screen.
21    app.state.scroll_to_bottom();
22
23    match evt {
24        SessionEvent::Thinking => {
25            handle_processing_started(app, worker_bridge).await;
26            if app.state.processing_started_at.is_none() {
27                app.state.begin_request_timing();
28            }
29            app.state.status = "Thinking…".to_string();
30        }
31        SessionEvent::ToolCallStart { name, arguments } => {
32            handle_processing_started(app, worker_bridge).await;
33            if app.state.processing_started_at.is_none() {
34                app.state.begin_request_timing();
35            }
36            // Flush any in-flight streaming text into a real assistant message
37            // before showing the tool call. Otherwise the streamed reply is
38            // discarded when `streaming_text` is cleared on Done/TextComplete.
39            if !app.state.streaming_text.is_empty() {
40                let text = std::mem::take(&mut app.state.streaming_text);
41                app.state
42                    .messages
43                    .push(ChatMessage::new(MessageType::Assistant, text));
44            }
45            app.state.reset_tool_preview_scroll();
46            app.state.status = format!("Running tool: {name}");
47            app.state.messages.push(ChatMessage::new(
48                MessageType::ToolCall {
49                    name: name.clone(),
50                    arguments: arguments.clone(),
51                },
52                format!("{name}: {}", truncate_preview(&arguments, 240)),
53            ));
54            app.state.scroll_to_bottom();
55        }
56        SessionEvent::ToolCallComplete {
57            name,
58            output,
59            success,
60            duration_ms,
61        } => {
62            app.state.reset_tool_preview_scroll();
63            app.state.messages.push(ChatMessage::new(
64                MessageType::ToolResult {
65                    name: name.clone(),
66                    output: output.clone(),
67                    success,
68                    duration_ms: Some(duration_ms),
69                },
70                format!("{name}: {}", truncate_preview(&output, 600)),
71            ));
72            app.state.last_tool_name = Some(name.clone());
73            app.state.last_tool_latency_ms = Some(duration_ms);
74            app.state.last_tool_success = Some(success);
75            app.state.status = format!("Tool finished: {name}");
76            app.state.scroll_to_bottom();
77        }
78        SessionEvent::TextChunk(chunk) => {
79            app.state.scroll_to_bottom();
80            app.state.note_text_token();
81            app.state.streaming_text =
82                if chunk.len() > crate::tui::constants::MAX_STREAMING_TEXT_BYTES {
83                    let mut t = crate::util::truncate_bytes_safe(
84                        &chunk,
85                        crate::tui::constants::MAX_STREAMING_TEXT_BYTES,
86                    )
87                    .to_string();
88                    t.push_str(" …[truncated]");
89                    t
90                } else {
91                    chunk
92                };
93        }
94        SessionEvent::TextComplete(text) => {
95            app.state.note_text_token();
96            app.state.streaming_text.clear();
97            app.state
98                .messages
99                .push(ChatMessage::new(MessageType::Assistant, text));
100            app.state.status = "Assistant replied".to_string();
101            app.state.scroll_to_bottom();
102        }
103        SessionEvent::ThinkingComplete(text) => {
104            if !text.is_empty() {
105                app.state.reset_tool_preview_scroll();
106                app.state.messages.push(ChatMessage::new(
107                    MessageType::Thinking(text.clone()),
108                    truncate_preview(&text, 600),
109                ));
110                app.state.scroll_to_bottom();
111            }
112        }
113        SessionEvent::UsageReport {
114            model,
115            prompt_tokens,
116            completion_tokens,
117            duration_ms,
118        } => {
119            app.state.last_completion_model = Some(model.clone());
120            app.state.last_completion_latency_ms = Some(duration_ms);
121            app.state.last_completion_prompt_tokens = Some(prompt_tokens);
122            app.state.last_completion_output_tokens = Some(completion_tokens);
123            // Attach usage to the most recent message attributable to this
124            // completion: prefer the last Assistant text, else fall back to
125            // the last ToolCall. This gives a visible per-message token tag.
126            attach_usage_to_last_completion_message(
127                &mut app.state.messages,
128                crate::tui::chat::message::MessageUsage {
129                    model: model.clone(),
130                    prompt_tokens,
131                    completion_tokens,
132                    duration_ms,
133                },
134            );
135            app.state.status = format!(
136                "Completed with model {model} • {} in / {} out • {} ms",
137                prompt_tokens, completion_tokens, duration_ms
138            );
139        }
140        SessionEvent::SessionSync(updated) => {
141            *session = *updated;
142            session.attach_global_bus_if_missing();
143            app.state.session_id = Some(session.id.clone());
144        }
145        SessionEvent::Done => {
146            handle_processing_stopped(app, worker_bridge).await;
147            app.state.streaming_text.clear();
148            app.state.complete_request_timing();
149            app.state.status = "Ready".to_string();
150        }
151        SessionEvent::Error(err) => {
152            handle_processing_stopped(app, worker_bridge).await;
153            app.state.streaming_text.clear();
154            app.state.complete_request_timing();
155
156            // Attempt smart switch retry on retryable provider errors
157            let current_model = session.metadata.model.as_deref();
158            let current_provider = current_model.and_then(|m| m.split('/').next());
159            let prompt = app.state.main_inflight_prompt.clone().unwrap_or_default();
160
161            if let Some(pending) = maybe_schedule_smart_switch_retry(
162                &err,
163                current_model,
164                current_provider,
165                &app.state.available_models,
166                &prompt,
167                app.state.smart_switch_retry_count,
168                &app.state.smart_switch_attempted_models,
169            ) {
170                app.state.smart_switch_retry_count += 1;
171                app.state
172                    .smart_switch_attempted_models
173                    .push(current_model.unwrap_or("unknown").to_string());
174                app.state
175                    .smart_switch_attempted_models
176                    .push(pending.target_model.clone());
177                app.state.status = format!(
178                    "Smart switch retry {}/{} → {}",
179                    app.state.smart_switch_retry_count,
180                    smart_switch_max_retries(),
181                    pending.target_model,
182                );
183                app.state.pending_smart_switch_retry = Some(pending);
184            } else {
185                // No retry possible — reset smart switch state
186                app.state.smart_switch_retry_count = 0;
187                app.state.smart_switch_attempted_models.clear();
188                app.state.pending_smart_switch_retry = None;
189            }
190
191            app.state
192                .messages
193                .push(ChatMessage::new(MessageType::Error, err.clone()));
194            app.state.status = "Error".to_string();
195            app.state.scroll_to_bottom();
196        }
197        // New non-exhaustive variants (TokenEstimate, TokenUsage,
198        // RlmProgress, RlmComplete, Compaction*, ContextTruncated) are
199        // consumed by dedicated SessionBus subscribers, not this legacy
200        // mpsc handler. Intentionally ignored here.
201        _ => {}
202    }
203}
204
205/// Attach a [`MessageUsage`] to the chat message most likely produced
206/// by the completion that generated it.
207///
208/// Walks backward and attaches to the first `Assistant` or `ToolCall`
209/// message without a usage already set. If every recent candidate is
210/// already tagged (e.g. a repeat `UsageReport` for the same turn), the
211/// usage is silently dropped rather than clobbering an earlier tag.
212///
213/// # Examples
214///
215/// ```rust
216/// use codetether_agent::tui::chat::message::{ChatMessage, MessageType, MessageUsage};
217/// use codetether_agent::tui::app::session_events::attach_usage_to_last_completion_message;
218///
219/// let mut msgs = vec![
220///     ChatMessage::new(MessageType::User, "hi"),
221///     ChatMessage::new(MessageType::Assistant, "hello!"),
222/// ];
223/// attach_usage_to_last_completion_message(
224///     &mut msgs,
225///     MessageUsage {
226///         model: "test/model".into(),
227///         prompt_tokens: 10,
228///         completion_tokens: 3,
229///         duration_ms: 120,
230///     },
231/// );
232/// assert!(msgs[1].usage.is_some());
233/// ```
234pub fn attach_usage_to_last_completion_message(
235    messages: &mut [ChatMessage],
236    usage: crate::tui::chat::message::MessageUsage,
237) {
238    for msg in messages.iter_mut().rev() {
239        if msg.usage.is_some() {
240            continue;
241        }
242        match &msg.message_type {
243            MessageType::Assistant | MessageType::ToolCall { .. } => {
244                msg.usage = Some(usage);
245                return;
246            }
247            // Stop walking past a user turn — usage can't belong there.
248            MessageType::User => return,
249            _ => {}
250        }
251    }
252}
253
254#[cfg(test)]
255mod tests {
256    use super::*;
257    use crate::session::Session;
258    use crate::tui::chat::message::MessageType;
259
260    #[tokio::test]
261    async fn text_chunk_replaces_streaming_preview_with_latest_cumulative_text() {
262        let mut app = App::default();
263        let mut session = Session::new().await.expect("session should create");
264
265        handle_session_event(
266            &mut app,
267            &mut session,
268            &None,
269            SessionEvent::TextChunk("hel".to_string()),
270        )
271        .await;
272        assert_eq!(app.state.streaming_text, "hel");
273
274        handle_session_event(
275            &mut app,
276            &mut session,
277            &None,
278            SessionEvent::TextChunk("hello".to_string()),
279        )
280        .await;
281        assert_eq!(app.state.streaming_text, "hello");
282    }
283
284    #[tokio::test]
285    async fn tool_completion_records_duration_for_chat_and_latency_view() {
286        let mut app = App::default();
287        let mut session = Session::new().await.expect("session should create");
288
289        handle_session_event(
290            &mut app,
291            &mut session,
292            &None,
293            SessionEvent::ToolCallComplete {
294                name: "read".to_string(),
295                output: "src/main.rs".to_string(),
296                success: true,
297                duration_ms: 42,
298            },
299        )
300        .await;
301
302        let Some(message) = app.state.messages.last() else {
303            panic!("expected a tool result message");
304        };
305        match &message.message_type {
306            MessageType::ToolResult {
307                name,
308                success,
309                duration_ms,
310                ..
311            } => {
312                assert_eq!(name, "read");
313                assert!(*success);
314                assert_eq!(*duration_ms, Some(42));
315            }
316            other => panic!("expected tool result message, got {other:?}"),
317        }
318        assert_eq!(app.state.last_tool_name.as_deref(), Some("read"));
319        assert_eq!(app.state.last_tool_latency_ms, Some(42));
320        assert_eq!(app.state.last_tool_success, Some(true));
321    }
322
323    #[tokio::test]
324    async fn usage_report_updates_latency_snapshot() {
325        let mut app = App::default();
326        let mut session = Session::new().await.expect("session should create");
327
328        handle_session_event(
329            &mut app,
330            &mut session,
331            &None,
332            SessionEvent::UsageReport {
333                model: "openai/gpt-5.4".to_string(),
334                prompt_tokens: 120,
335                completion_tokens: 64,
336                duration_ms: 1_250,
337            },
338        )
339        .await;
340
341        assert_eq!(
342            app.state.last_completion_model.as_deref(),
343            Some("openai/gpt-5.4")
344        );
345        assert_eq!(app.state.last_completion_latency_ms, Some(1_250));
346        assert_eq!(app.state.last_completion_prompt_tokens, Some(120));
347        assert_eq!(app.state.last_completion_output_tokens, Some(64));
348    }
349
350    #[tokio::test]
351    async fn text_events_record_request_ttft_and_last_token() {
352        let mut app = App::default();
353        let mut session = Session::new().await.expect("session should create");
354        app.state.processing_started_at =
355            Some(std::time::Instant::now() - std::time::Duration::from_millis(15));
356
357        handle_session_event(
358            &mut app,
359            &mut session,
360            &None,
361            SessionEvent::TextChunk("hello".to_string()),
362        )
363        .await;
364
365        let first = app
366            .state
367            .current_request_first_token_ms
368            .expect("expected ttft after first chunk");
369        assert_eq!(app.state.current_request_last_token_ms, Some(first));
370
371        app.state.processing_started_at =
372            Some(std::time::Instant::now() - std::time::Duration::from_millis(30));
373        handle_session_event(
374            &mut app,
375            &mut session,
376            &None,
377            SessionEvent::TextComplete("hello".to_string()),
378        )
379        .await;
380
381        assert_eq!(app.state.current_request_first_token_ms, Some(first));
382        assert!(
383            app.state
384                .current_request_last_token_ms
385                .expect("expected last token timing")
386                >= first
387        );
388    }
389
390    #[tokio::test]
391    async fn done_promotes_request_timing_snapshot() {
392        let mut app = App::default();
393        let mut session = Session::new().await.expect("session should create");
394        app.state.processing_started_at = Some(std::time::Instant::now());
395        app.state.current_request_first_token_ms = Some(120);
396        app.state.current_request_last_token_ms = Some(980);
397
398        handle_session_event(&mut app, &mut session, &None, SessionEvent::Done).await;
399
400        assert_eq!(app.state.last_request_first_token_ms, Some(120));
401        assert_eq!(app.state.last_request_last_token_ms, Some(980));
402        assert!(app.state.processing_started_at.is_none());
403        assert!(app.state.current_request_first_token_ms.is_none());
404        assert!(app.state.current_request_last_token_ms.is_none());
405    }
406}