git_iris/agents/
status_messages.rs

1//! Dynamic status message generation using the fast model
2//!
3//! Generates witty, contextual waiting messages while users wait for
4//! agent operations to complete. Uses fire-and-forget async with hard
5//! timeout to ensure we never block on status messages.
6
7use anyhow::Result;
8use serde::{Deserialize, Serialize};
9use tokio::sync::mpsc;
10use tokio::time::{Duration, timeout};
11
12use crate::agents::provider::{self, DynAgent};
13
14/// Context for generating status messages
15#[derive(Debug, Clone)]
16pub struct StatusContext {
17    /// Type of task being performed
18    pub task_type: String,
19    /// Current branch name
20    pub branch: Option<String>,
21    /// Number of files being analyzed
22    pub file_count: Option<usize>,
23    /// Brief summary of what's happening (e.g., "analyzing commit changes")
24    pub activity: String,
25    /// Actual file names being changed (for richer context)
26    pub files: Vec<String>,
27    /// Whether this is a regeneration (we have more context available)
28    pub is_regeneration: bool,
29    /// Brief description of what's changing (e.g., "auth system, test fixes")
30    pub change_summary: Option<String>,
31    /// On regeneration: hint about current content (e.g., "commit about auth refactor")
32    pub current_content_hint: Option<String>,
33}
34
35impl StatusContext {
36    pub fn new(task_type: &str, activity: &str) -> Self {
37        Self {
38            task_type: task_type.to_string(),
39            branch: None,
40            file_count: None,
41            activity: activity.to_string(),
42            files: Vec::new(),
43            is_regeneration: false,
44            change_summary: None,
45            current_content_hint: None,
46        }
47    }
48
49    pub fn with_branch(mut self, branch: impl Into<String>) -> Self {
50        self.branch = Some(branch.into());
51        self
52    }
53
54    pub fn with_file_count(mut self, count: usize) -> Self {
55        self.file_count = Some(count);
56        self
57    }
58
59    pub fn with_files(mut self, files: Vec<String>) -> Self {
60        self.files = files;
61        self
62    }
63
64    pub fn with_regeneration(mut self, is_regen: bool) -> Self {
65        self.is_regeneration = is_regen;
66        self
67    }
68
69    pub fn with_change_summary(mut self, summary: impl Into<String>) -> Self {
70        self.change_summary = Some(summary.into());
71        self
72    }
73
74    pub fn with_content_hint(mut self, hint: impl Into<String>) -> Self {
75        self.current_content_hint = Some(hint.into());
76        self
77    }
78}
79
80/// A generated status message
81#[derive(Debug, Clone, Serialize, Deserialize)]
82pub struct StatusMessage {
83    /// The witty message to display
84    pub message: String,
85    /// Estimated time context (e.g., "a few seconds", "about 30 seconds")
86    pub time_hint: Option<String>,
87}
88
89impl Default for StatusMessage {
90    fn default() -> Self {
91        Self {
92            message: "Working on it...".to_string(),
93            time_hint: None,
94        }
95    }
96}
97
98/// Capitalize first letter of a string (sentence case)
99fn capitalize_first(s: &str) -> String {
100    let mut chars = s.chars();
101    match chars.next() {
102        None => String::new(),
103        Some(first) => first.to_uppercase().chain(chars).collect(),
104    }
105}
106
107/// Generator for dynamic status messages
108pub struct StatusMessageGenerator {
109    provider: String,
110    fast_model: String,
111    /// Hard timeout for status message generation (ms)
112    timeout_ms: u64,
113}
114
115impl StatusMessageGenerator {
116    /// Create a new status message generator
117    ///
118    /// # Arguments
119    /// * `provider` - LLM provider name (e.g., "anthropic", "openai")
120    /// * `fast_model` - Model to use for quick generations
121    /// * `timeout_ms` - Hard timeout in milliseconds (default: 500)
122    pub fn new(provider: impl Into<String>, fast_model: impl Into<String>) -> Self {
123        Self {
124            provider: provider.into(),
125            fast_model: fast_model.into(),
126            timeout_ms: 1500, // 1.5 seconds - fast model should respond quickly
127        }
128    }
129
130    /// Set custom timeout in milliseconds
131    pub fn with_timeout_ms(mut self, ms: u64) -> Self {
132        self.timeout_ms = ms;
133        self
134    }
135
136    /// Generate a status message synchronously with timeout
137    ///
138    /// Returns default message if generation fails or times out.
139    pub async fn generate(&self, context: &StatusContext) -> StatusMessage {
140        match timeout(
141            Duration::from_millis(self.timeout_ms),
142            self.generate_internal(context),
143        )
144        .await
145        {
146            Ok(Ok(msg)) => msg,
147            Ok(Err(_)) | Err(_) => Self::default_message(context),
148        }
149    }
150
151    /// Spawn fire-and-forget generation that sends result to channel
152    ///
153    /// This spawns an async task that will send the generated message
154    /// to the provided channel. If generation times out or fails, nothing
155    /// is sent (caller should already have a fallback displayed).
156    pub fn spawn_generation(
157        &self,
158        context: StatusContext,
159        tx: mpsc::UnboundedSender<StatusMessage>,
160    ) {
161        let provider = self.provider.clone();
162        let fast_model = self.fast_model.clone();
163        let timeout_ms = self.timeout_ms;
164
165        tokio::spawn(async move {
166            let generator = StatusMessageGenerator {
167                provider,
168                fast_model,
169                timeout_ms,
170            };
171
172            if let Ok(Ok(msg)) = timeout(
173                Duration::from_millis(timeout_ms),
174                generator.generate_internal(&context),
175            )
176            .await
177            {
178                let _ = tx.send(msg);
179            }
180        });
181    }
182
183    /// Create a channel for receiving status messages
184    pub fn create_channel() -> (
185        mpsc::UnboundedSender<StatusMessage>,
186        mpsc::UnboundedReceiver<StatusMessage>,
187    ) {
188        mpsc::unbounded_channel()
189    }
190
191    /// Build the agent for status message generation
192    fn build_status_agent(provider: &str, fast_model: &str) -> Result<DynAgent> {
193        let preamble = "You write fun waiting messages for a Git AI named Iris. \
194                        Concise, yet fun and encouraging, add vibes, be clever, not cheesy. \
195                        Capitalize first letter, end with ellipsis. Under 35 chars. No emojis. \
196                        Just the message text, nothing else.";
197
198        match provider {
199            "openai" => {
200                let agent = provider::openai_builder(fast_model)
201                    .preamble(preamble)
202                    .max_tokens(50)
203                    .build();
204                Ok(DynAgent::OpenAI(agent))
205            }
206            "anthropic" => {
207                let agent = provider::anthropic_builder(fast_model)
208                    .preamble(preamble)
209                    .max_tokens(50)
210                    .build();
211                Ok(DynAgent::Anthropic(agent))
212            }
213            "google" | "gemini" => {
214                let agent = provider::gemini_builder(fast_model)
215                    .preamble(preamble)
216                    .max_tokens(50)
217                    .build();
218                Ok(DynAgent::Gemini(agent))
219            }
220            _ => Err(anyhow::anyhow!("Unsupported provider: {}", provider)),
221        }
222    }
223
224    /// Internal generation logic
225    async fn generate_internal(&self, context: &StatusContext) -> Result<StatusMessage> {
226        let prompt = Self::build_prompt(context);
227        tracing::info!(
228            "Building status agent with provider={}, model={}",
229            self.provider,
230            self.fast_model
231        );
232
233        // Build agent synchronously (DynClientBuilder is not Send)
234        // The returned agent IS Send, so we can await after this
235        let agent = match Self::build_status_agent(&self.provider, &self.fast_model) {
236            Ok(a) => a,
237            Err(e) => {
238                tracing::warn!("Failed to build status agent: {}", e);
239                return Err(e);
240            }
241        };
242
243        tracing::info!("Prompting status agent...");
244        let response = match agent.prompt(&prompt).await {
245            Ok(r) => r,
246            Err(e) => {
247                tracing::warn!("Status agent prompt failed: {}", e);
248                return Err(anyhow::anyhow!("Prompt failed: {}", e));
249            }
250        };
251
252        let message = capitalize_first(response.trim());
253        tracing::info!(
254            "Status agent response ({} chars): {:?}",
255            message.len(),
256            message
257        );
258
259        // Sanity check - if response is too long or empty, use fallback
260        if message.is_empty() || message.len() > 80 {
261            tracing::info!("Response invalid (empty or too long), using fallback");
262            return Ok(Self::default_message(context));
263        }
264
265        Ok(StatusMessage {
266            message,
267            time_hint: None,
268        })
269    }
270
271    /// Build the prompt for status message generation
272    fn build_prompt(context: &StatusContext) -> String {
273        let mut prompt = String::from("Context:\n");
274
275        prompt.push_str(&format!("Task: {}\n", context.task_type));
276        prompt.push_str(&format!("Activity: {}\n", context.activity));
277
278        if let Some(branch) = &context.branch {
279            prompt.push_str(&format!("Branch: {}\n", branch));
280        }
281
282        if !context.files.is_empty() {
283            let file_list: Vec<&str> = context.files.iter().take(3).map(String::as_str).collect();
284            prompt.push_str(&format!("Files: {}\n", file_list.join(", ")));
285        } else if let Some(count) = context.file_count {
286            prompt.push_str(&format!("File count: {}\n", count));
287        }
288
289        prompt.push_str(
290            "\nYour task is to use the limited context above to generate a fun waiting message \
291             shown to the user while the main task executes. Concise, yet fun and encouraging. \
292             Add fun vibes depending on the context. Be clever. \
293             Capitalize the first letter and end with ellipsis. Under 35 chars. No emojis.\n\n\
294             Just the message:",
295        );
296        prompt
297    }
298
299    /// Get a default message based on context (used as fallback)
300    fn default_message(context: &StatusContext) -> StatusMessage {
301        let message = match context.task_type.as_str() {
302            "commit" => "Crafting your commit message...",
303            "review" => "Analyzing code changes...",
304            "pr" => "Writing PR description...",
305            "changelog" => "Generating changelog...",
306            "release_notes" => "Composing release notes...",
307            "chat" => "Thinking...",
308            "semantic_blame" => "Tracing code origins...",
309            _ => "Working on it...",
310        };
311
312        StatusMessage {
313            message: message.to_string(),
314            time_hint: None,
315        }
316    }
317
318    /// Generate a completion message when a task finishes
319    pub async fn generate_completion(&self, context: &StatusContext) -> StatusMessage {
320        match timeout(
321            Duration::from_millis(self.timeout_ms),
322            self.generate_completion_internal(context),
323        )
324        .await
325        {
326            Ok(Ok(msg)) => msg,
327            Ok(Err(_)) | Err(_) => Self::default_completion(context),
328        }
329    }
330
331    async fn generate_completion_internal(&self, context: &StatusContext) -> Result<StatusMessage> {
332        let prompt = Self::build_completion_prompt(context);
333
334        let agent = Self::build_status_agent(&self.provider, &self.fast_model)?;
335        let response = agent.prompt(&prompt).await?;
336        let message = capitalize_first(response.trim());
337
338        if message.is_empty() || message.len() > 80 {
339            return Ok(Self::default_completion(context));
340        }
341
342        Ok(StatusMessage {
343            message,
344            time_hint: None,
345        })
346    }
347
348    fn build_completion_prompt(context: &StatusContext) -> String {
349        let mut prompt = String::from("Task just completed:\n\n");
350        prompt.push_str(&format!("Task: {}\n", context.task_type));
351
352        if let Some(branch) = &context.branch {
353            prompt.push_str(&format!("Branch: {}\n", branch));
354        }
355
356        if let Some(hint) = &context.current_content_hint {
357            prompt.push_str(&format!("Content: {}\n", hint));
358        }
359
360        prompt.push_str(
361            "\nGenerate a brief completion message based on the content above.\n\n\
362             RULES:\n\
363             - Reference the SPECIFIC topic from content above (not generic \"changes\")\n\
364             - Sentence case, under 35 chars, no emojis\n\
365             - Just the message, nothing else:",
366        );
367        prompt
368    }
369
370    fn default_completion(context: &StatusContext) -> StatusMessage {
371        let message = match context.task_type.as_str() {
372            "commit" => "Ready to commit.",
373            "review" => "Review complete.",
374            "pr" => "PR description ready.",
375            "changelog" => "Changelog generated.",
376            "release_notes" => "Release notes ready.",
377            "chat" => "Here you go.",
378            "semantic_blame" => "Origins traced.",
379            _ => "Done.",
380        };
381
382        StatusMessage {
383            message: message.to_string(),
384            time_hint: None,
385        }
386    }
387}
388
389/// Batch of status messages for cycling display
390#[derive(Debug, Clone, Default)]
391pub struct StatusMessageBatch {
392    messages: Vec<StatusMessage>,
393    current_index: usize,
394}
395
396impl StatusMessageBatch {
397    pub fn new() -> Self {
398        Self::default()
399    }
400
401    /// Add a message to the batch
402    pub fn add(&mut self, message: StatusMessage) {
403        self.messages.push(message);
404    }
405
406    /// Get the current message (if any)
407    pub fn current(&self) -> Option<&StatusMessage> {
408        self.messages.get(self.current_index)
409    }
410
411    /// Advance to the next message (cycles back to start)
412    pub fn next(&mut self) {
413        if !self.messages.is_empty() {
414            self.current_index = (self.current_index + 1) % self.messages.len();
415        }
416    }
417
418    /// Check if we have any messages
419    pub fn is_empty(&self) -> bool {
420        self.messages.is_empty()
421    }
422
423    /// Number of messages in batch
424    pub fn len(&self) -> usize {
425        self.messages.len()
426    }
427
428    /// Clear all messages
429    pub fn clear(&mut self) {
430        self.messages.clear();
431        self.current_index = 0;
432    }
433}
434
435#[cfg(test)]
436mod tests {
437    use super::*;
438
439    #[test]
440    fn test_status_context_builder() {
441        let ctx = StatusContext::new("commit", "analyzing staged changes")
442            .with_branch("main")
443            .with_file_count(5);
444
445        assert_eq!(ctx.task_type, "commit");
446        assert_eq!(ctx.branch, Some("main".to_string()));
447        assert_eq!(ctx.file_count, Some(5));
448    }
449
450    #[test]
451    fn test_default_messages() {
452        let ctx = StatusContext::new("commit", "test");
453        let msg = StatusMessageGenerator::default_message(&ctx);
454        assert_eq!(msg.message, "Crafting your commit message...");
455
456        let ctx = StatusContext::new("review", "test");
457        let msg = StatusMessageGenerator::default_message(&ctx);
458        assert_eq!(msg.message, "Analyzing code changes...");
459
460        let ctx = StatusContext::new("unknown", "test");
461        let msg = StatusMessageGenerator::default_message(&ctx);
462        assert_eq!(msg.message, "Working on it...");
463    }
464
465    #[test]
466    fn test_message_batch_cycling() {
467        let mut batch = StatusMessageBatch::new();
468        assert!(batch.is_empty());
469        assert!(batch.current().is_none());
470
471        batch.add(StatusMessage {
472            message: "First".to_string(),
473            time_hint: None,
474        });
475        batch.add(StatusMessage {
476            message: "Second".to_string(),
477            time_hint: None,
478        });
479
480        assert_eq!(batch.len(), 2);
481        assert_eq!(batch.current().unwrap().message, "First");
482
483        batch.next();
484        assert_eq!(batch.current().unwrap().message, "Second");
485
486        batch.next();
487        assert_eq!(batch.current().unwrap().message, "First"); // Cycles back
488    }
489
490    #[test]
491    fn test_prompt_building() {
492        let ctx = StatusContext::new("commit", "analyzing staged changes")
493            .with_branch("feature/awesome")
494            .with_file_count(3);
495
496        let prompt = StatusMessageGenerator::build_prompt(&ctx);
497        assert!(prompt.contains("commit"));
498        assert!(prompt.contains("analyzing staged changes"));
499        assert!(prompt.contains("feature/awesome"));
500        assert!(prompt.contains("3"));
501    }
502
503    /// Debug test to evaluate status message quality
504    /// Run with: cargo test debug_status_messages -- --ignored --nocapture
505    #[test]
506    #[ignore]
507    fn debug_status_messages() {
508        use tokio::runtime::Runtime;
509
510        let rt = Runtime::new().unwrap();
511        rt.block_on(async {
512            // Get provider/model from env or use defaults
513            let provider =
514                std::env::var("IRIS_PROVIDER").unwrap_or_else(|_| "anthropic".to_string());
515            let model = std::env::var("IRIS_MODEL")
516                .unwrap_or_else(|_| "claude-haiku-4-5-20251001".to_string());
517
518            println!("\n{}", "=".repeat(60));
519            println!(
520                "Status Message Debug - Provider: {}, Model: {}",
521                provider, model
522            );
523            println!("{}\n", "=".repeat(60));
524
525            let generator = StatusMessageGenerator::new(&provider, &model);
526
527            // Test scenarios
528            let scenarios = vec![
529                StatusContext::new("commit", "crafting commit message")
530                    .with_branch("main")
531                    .with_files(vec![
532                        "mod.rs".to_string(),
533                        "status_messages.rs".to_string(),
534                        "agent_tasks.rs".to_string(),
535                    ])
536                    .with_file_count(3),
537                StatusContext::new("commit", "crafting commit message")
538                    .with_branch("feature/auth")
539                    .with_files(vec!["auth.rs".to_string(), "login.rs".to_string()])
540                    .with_file_count(2),
541                StatusContext::new("commit", "crafting commit message")
542                    .with_branch("main")
543                    .with_files(vec![
544                        "config.ts".to_string(),
545                        "App.tsx".to_string(),
546                        "hooks.ts".to_string(),
547                    ])
548                    .with_file_count(16)
549                    .with_regeneration(true)
550                    .with_content_hint("refactor: simplify auth flow"),
551                StatusContext::new("review", "analyzing code changes")
552                    .with_branch("pr/123")
553                    .with_files(vec!["reducer.rs".to_string()])
554                    .with_file_count(1),
555                StatusContext::new("pr", "drafting PR description")
556                    .with_branch("feature/dark-mode")
557                    .with_files(vec!["theme.rs".to_string(), "colors.rs".to_string()])
558                    .with_file_count(5),
559            ];
560
561            for (i, ctx) in scenarios.iter().enumerate() {
562                println!("--- Scenario {} ---", i + 1);
563                println!(
564                    "Task: {}, Branch: {:?}, Files: {:?}",
565                    ctx.task_type, ctx.branch, ctx.files
566                );
567                if ctx.is_regeneration {
568                    println!("(Regeneration, hint: {:?})", ctx.current_content_hint);
569                }
570                println!();
571
572                // Generate 5 messages for each scenario
573                for j in 1..=5 {
574                    let msg = generator.generate(&ctx).await;
575                    println!("  {}: {}", j, msg.message);
576                }
577                println!();
578            }
579        });
580    }
581}