Skip to main content

git_iris/agents/
status_messages.rs

1//! Dynamic status message generation using the fast model
2//!
3//! Generates witty, contextual waiting messages while users wait for
4//! agent operations to complete. Uses fire-and-forget async with hard
5//! timeout to ensure we never block on status messages.
6
7use anyhow::Result;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10use tokio::sync::mpsc;
11use tokio::time::{Duration, timeout};
12
13use crate::agents::provider::{self, CompletionProfile, DynAgent};
14
15/// Context for generating status messages
16#[derive(Debug, Clone)]
17pub struct StatusContext {
18    /// Type of task being performed
19    pub task_type: String,
20    /// Current branch name
21    pub branch: Option<String>,
22    /// Number of files being analyzed
23    pub file_count: Option<usize>,
24    /// Brief summary of what's happening (e.g., "analyzing commit changes")
25    pub activity: String,
26    /// Actual file names being changed (for richer context)
27    pub files: Vec<String>,
28    /// Whether this is a regeneration (we have more context available)
29    pub is_regeneration: bool,
30    /// Brief description of what's changing (e.g., "auth system, test fixes")
31    pub change_summary: Option<String>,
32    /// On regeneration: hint about current content (e.g., "commit about auth refactor")
33    pub current_content_hint: Option<String>,
34}
35
36impl StatusContext {
37    #[must_use]
38    pub fn new(task_type: &str, activity: &str) -> Self {
39        Self {
40            task_type: task_type.to_string(),
41            branch: None,
42            file_count: None,
43            activity: activity.to_string(),
44            files: Vec::new(),
45            is_regeneration: false,
46            change_summary: None,
47            current_content_hint: None,
48        }
49    }
50
51    #[must_use]
52    pub fn with_branch(mut self, branch: impl Into<String>) -> Self {
53        self.branch = Some(branch.into());
54        self
55    }
56
57    #[must_use]
58    pub fn with_file_count(mut self, count: usize) -> Self {
59        self.file_count = Some(count);
60        self
61    }
62
63    #[must_use]
64    pub fn with_files(mut self, files: Vec<String>) -> Self {
65        self.files = files;
66        self
67    }
68
69    #[must_use]
70    pub fn with_regeneration(mut self, is_regen: bool) -> Self {
71        self.is_regeneration = is_regen;
72        self
73    }
74
75    #[must_use]
76    pub fn with_change_summary(mut self, summary: impl Into<String>) -> Self {
77        self.change_summary = Some(summary.into());
78        self
79    }
80
81    #[must_use]
82    pub fn with_content_hint(mut self, hint: impl Into<String>) -> Self {
83        self.current_content_hint = Some(hint.into());
84        self
85    }
86}
87
88/// A generated status message
89#[derive(Debug, Clone, Serialize, Deserialize)]
90pub struct StatusMessage {
91    /// The witty message to display
92    pub message: String,
93    /// Estimated time context (e.g., "a few seconds", "about 30 seconds")
94    pub time_hint: Option<String>,
95}
96
97impl Default for StatusMessage {
98    fn default() -> Self {
99        Self {
100            message: "Working on it...".to_string(),
101            time_hint: None,
102        }
103    }
104}
105
106/// Capitalize first letter of a string (sentence case)
107fn capitalize_first(s: &str) -> String {
108    let mut chars = s.chars();
109    match chars.next() {
110        None => String::new(),
111        Some(first) => first.to_uppercase().chain(chars).collect(),
112    }
113}
114
115/// Generator for dynamic status messages
116pub struct StatusMessageGenerator {
117    provider: String,
118    fast_model: String,
119    /// API key for the provider (from config)
120    api_key: Option<String>,
121    /// Provider-specific params inherited from config
122    additional_params: HashMap<String, String>,
123    /// Hard timeout for status message generation (ms)
124    timeout_ms: u64,
125}
126
127impl StatusMessageGenerator {
128    /// Create a new status message generator
129    ///
130    /// # Arguments
131    /// * `provider` - LLM provider name (e.g., "anthropic", "openai")
132    /// * `fast_model` - Model to use for quick generations
133    /// * `api_key` - Optional API key (falls back to env var if not provided)
134    #[must_use]
135    pub fn new(
136        provider: impl Into<String>,
137        fast_model: impl Into<String>,
138        api_key: Option<String>,
139        additional_params: Option<HashMap<String, String>>,
140    ) -> Self {
141        Self {
142            provider: provider.into(),
143            fast_model: fast_model.into(),
144            api_key,
145            additional_params: additional_params.unwrap_or_default(),
146            timeout_ms: 1500, // 1.5 seconds - fast model should respond quickly
147        }
148    }
149
150    /// Set custom timeout in milliseconds
151    #[must_use]
152    pub fn with_timeout_ms(mut self, ms: u64) -> Self {
153        self.timeout_ms = ms;
154        self
155    }
156
157    /// Generate a status message synchronously with timeout
158    ///
159    /// Returns default message if generation fails or times out.
160    pub async fn generate(&self, context: &StatusContext) -> StatusMessage {
161        match timeout(
162            Duration::from_millis(self.timeout_ms),
163            self.generate_internal(context),
164        )
165        .await
166        {
167            Ok(Ok(msg)) => msg,
168            Ok(Err(_)) | Err(_) => Self::default_message(context),
169        }
170    }
171
172    /// Spawn fire-and-forget generation that sends result to channel
173    ///
174    /// This spawns an async task that will send the generated message
175    /// to the provided channel. If generation times out or fails, nothing
176    /// is sent (caller should already have a fallback displayed).
177    pub fn spawn_generation(
178        &self,
179        context: StatusContext,
180        tx: mpsc::UnboundedSender<StatusMessage>,
181    ) {
182        let provider = self.provider.clone();
183        let fast_model = self.fast_model.clone();
184        let api_key = self.api_key.clone();
185        let additional_params = self.additional_params.clone();
186        let timeout_ms = self.timeout_ms;
187
188        tokio::spawn(async move {
189            let generator = StatusMessageGenerator {
190                provider,
191                fast_model,
192                api_key,
193                additional_params,
194                timeout_ms,
195            };
196
197            if let Ok(Ok(msg)) = timeout(
198                Duration::from_millis(timeout_ms),
199                generator.generate_internal(&context),
200            )
201            .await
202            {
203                let _ = tx.send(msg);
204            }
205        });
206    }
207
208    /// Create a channel for receiving status messages
209    #[must_use]
210    pub fn create_channel() -> (
211        mpsc::UnboundedSender<StatusMessage>,
212        mpsc::UnboundedReceiver<StatusMessage>,
213    ) {
214        mpsc::unbounded_channel()
215    }
216
217    /// Build the agent for status message generation
218    fn build_status_agent(
219        provider: &str,
220        fast_model: &str,
221        api_key: Option<&str>,
222        additional_params: Option<&HashMap<String, String>>,
223    ) -> Result<DynAgent> {
224        let preamble = "You write fun waiting messages for a Git AI named Iris. \
225                        Concise, yet fun and encouraging, add vibes, be clever, not cheesy. \
226                        Capitalize first letter, end with ellipsis. Under 35 chars. No emojis. \
227                        Just the message text, nothing else.";
228        let provider_name = provider::provider_from_name(provider)?;
229
230        match provider {
231            "openai" => {
232                let builder = provider::openai_builder(fast_model, api_key)?.preamble(preamble);
233                let agent = provider::apply_completion_params(
234                    builder,
235                    provider_name,
236                    fast_model,
237                    50,
238                    additional_params,
239                    CompletionProfile::StatusMessage,
240                )
241                .build();
242                Ok(DynAgent::OpenAI(agent))
243            }
244            "anthropic" => {
245                let builder = provider::anthropic_builder(fast_model, api_key)?.preamble(preamble);
246                let agent = provider::apply_completion_params(
247                    builder,
248                    provider_name,
249                    fast_model,
250                    50,
251                    additional_params,
252                    CompletionProfile::StatusMessage,
253                )
254                .build();
255                Ok(DynAgent::Anthropic(agent))
256            }
257            "google" | "gemini" => {
258                let builder = provider::gemini_builder(fast_model, api_key)?.preamble(preamble);
259                let agent = provider::apply_completion_params(
260                    builder,
261                    provider_name,
262                    fast_model,
263                    50,
264                    additional_params,
265                    CompletionProfile::StatusMessage,
266                )
267                .build();
268                Ok(DynAgent::Gemini(agent))
269            }
270            _ => Err(anyhow::anyhow!("Unsupported provider: {}", provider)),
271        }
272    }
273
274    /// Internal generation logic
275    async fn generate_internal(&self, context: &StatusContext) -> Result<StatusMessage> {
276        let prompt = Self::build_prompt(context);
277        let agent = self.status_agent()?;
278        let response = Self::prompt_status_agent(&agent, &prompt).await?;
279
280        let message = capitalize_first(response.trim());
281        tracing::info!(
282            "Status agent response ({} chars): {:?}",
283            message.len(),
284            message
285        );
286
287        // Sanity check - if response is too long or empty, use fallback
288        if message.is_empty() || message.len() > 80 {
289            tracing::info!("Response invalid (empty or too long), using fallback");
290            return Ok(Self::default_message(context));
291        }
292
293        Ok(StatusMessage {
294            message,
295            time_hint: None,
296        })
297    }
298
299    fn status_agent(&self) -> Result<DynAgent> {
300        tracing::info!(
301            "Building status agent with provider={}, model={}",
302            self.provider,
303            self.fast_model
304        );
305
306        Self::build_status_agent(
307            &self.provider,
308            &self.fast_model,
309            self.api_key.as_deref(),
310            Some(&self.additional_params),
311        )
312        .inspect_err(|e| tracing::warn!("Failed to build status agent: {}", e))
313    }
314
315    async fn prompt_status_agent(agent: &DynAgent, prompt: &str) -> Result<String> {
316        tracing::info!("Prompting status agent...");
317        agent.prompt(prompt).await.map_err(|e| {
318            tracing::warn!("Status agent prompt failed: {}", e);
319            anyhow::anyhow!("Prompt failed: {}", e)
320        })
321    }
322
323    /// Build the prompt for status message generation
324    fn build_prompt(context: &StatusContext) -> String {
325        let mut prompt = String::from("Context:\n");
326
327        prompt.push_str(&format!("Task: {}\n", context.task_type));
328        prompt.push_str(&format!("Activity: {}\n", context.activity));
329
330        if let Some(branch) = &context.branch {
331            prompt.push_str(&format!("Branch: {}\n", branch));
332        }
333
334        if !context.files.is_empty() {
335            let file_list: Vec<&str> = context.files.iter().take(3).map(String::as_str).collect();
336            prompt.push_str(&format!("Files: {}\n", file_list.join(", ")));
337        } else if let Some(count) = context.file_count {
338            prompt.push_str(&format!("File count: {}\n", count));
339        }
340
341        prompt.push_str(
342            "\nYour task is to use the limited context above to generate a fun waiting message \
343             shown to the user while the main task executes. Concise, yet fun and encouraging. \
344             Add fun vibes depending on the context. Be clever. \
345             Capitalize the first letter and end with ellipsis. Under 35 chars. No emojis.\n\n\
346             Just the message:",
347        );
348        prompt
349    }
350
351    /// Get a default message based on context (used as fallback)
352    fn default_message(context: &StatusContext) -> StatusMessage {
353        let message = match context.task_type.as_str() {
354            "commit" => "Crafting your commit message...",
355            "review" => "Analyzing code changes...",
356            "pr" => "Writing PR description...",
357            "changelog" => "Generating changelog...",
358            "release_notes" => "Composing release notes...",
359            "chat" => "Thinking...",
360            "semantic_blame" => "Tracing code origins...",
361            _ => "Working on it...",
362        };
363
364        StatusMessage {
365            message: message.to_string(),
366            time_hint: None,
367        }
368    }
369
370    /// Generate a completion message when a task finishes
371    pub async fn generate_completion(&self, context: &StatusContext) -> StatusMessage {
372        match timeout(
373            Duration::from_millis(self.timeout_ms),
374            self.generate_completion_internal(context),
375        )
376        .await
377        {
378            Ok(Ok(msg)) => msg,
379            Ok(Err(_)) | Err(_) => Self::default_completion(context),
380        }
381    }
382
383    async fn generate_completion_internal(&self, context: &StatusContext) -> Result<StatusMessage> {
384        let prompt = Self::build_completion_prompt(context);
385
386        let agent = Self::build_status_agent(
387            &self.provider,
388            &self.fast_model,
389            self.api_key.as_deref(),
390            Some(&self.additional_params),
391        )?;
392        let response = agent.prompt(&prompt).await?;
393        let message = capitalize_first(response.trim());
394
395        if message.is_empty() || message.len() > 80 {
396            return Ok(Self::default_completion(context));
397        }
398
399        Ok(StatusMessage {
400            message,
401            time_hint: None,
402        })
403    }
404
405    fn build_completion_prompt(context: &StatusContext) -> String {
406        let mut prompt = String::from("Task just completed:\n\n");
407        prompt.push_str(&format!("Task: {}\n", context.task_type));
408
409        if let Some(branch) = &context.branch {
410            prompt.push_str(&format!("Branch: {}\n", branch));
411        }
412
413        if let Some(hint) = &context.current_content_hint {
414            prompt.push_str(&format!("Content: {}\n", hint));
415        }
416
417        prompt.push_str(
418            "\nGenerate a brief completion message based on the content above.\n\n\
419             RULES:\n\
420             - Reference the SPECIFIC topic from content above (not generic \"changes\")\n\
421             - Sentence case, under 35 chars, no emojis\n\
422             - Just the message, nothing else:",
423        );
424        prompt
425    }
426
427    fn default_completion(context: &StatusContext) -> StatusMessage {
428        let message = match context.task_type.as_str() {
429            "commit" => "Ready to commit.",
430            "review" => "Review complete.",
431            "pr" => "PR description ready.",
432            "changelog" => "Changelog generated.",
433            "release_notes" => "Release notes ready.",
434            "chat" => "Here you go.",
435            "semantic_blame" => "Origins traced.",
436            _ => "Done.",
437        };
438
439        StatusMessage {
440            message: message.to_string(),
441            time_hint: None,
442        }
443    }
444}
445
446/// Batch of status messages for cycling display
447#[derive(Debug, Clone, Default)]
448pub struct StatusMessageBatch {
449    messages: Vec<StatusMessage>,
450    current_index: usize,
451}
452
453impl StatusMessageBatch {
454    #[must_use]
455    pub fn new() -> Self {
456        Self::default()
457    }
458
459    /// Add a message to the batch
460    pub fn add(&mut self, message: StatusMessage) {
461        self.messages.push(message);
462    }
463
464    /// Get the current message (if any)
465    #[must_use]
466    pub fn current(&self) -> Option<&StatusMessage> {
467        self.messages.get(self.current_index)
468    }
469
470    /// Advance to the next message (cycles back to start)
471    pub fn next(&mut self) {
472        if !self.messages.is_empty() {
473            self.current_index = (self.current_index + 1) % self.messages.len();
474        }
475    }
476
477    /// Check if we have any messages
478    #[must_use]
479    pub fn is_empty(&self) -> bool {
480        self.messages.is_empty()
481    }
482
483    /// Number of messages in batch
484    #[must_use]
485    pub fn len(&self) -> usize {
486        self.messages.len()
487    }
488
489    /// Clear all messages
490    pub fn clear(&mut self) {
491        self.messages.clear();
492        self.current_index = 0;
493    }
494}
495
496#[cfg(test)]
497mod tests {
498    use super::*;
499
500    #[test]
501    fn test_status_context_builder() {
502        let ctx = StatusContext::new("commit", "analyzing staged changes")
503            .with_branch("main")
504            .with_file_count(5);
505
506        assert_eq!(ctx.task_type, "commit");
507        assert_eq!(ctx.branch, Some("main".to_string()));
508        assert_eq!(ctx.file_count, Some(5));
509    }
510
511    #[test]
512    fn test_default_messages() {
513        let ctx = StatusContext::new("commit", "test");
514        let msg = StatusMessageGenerator::default_message(&ctx);
515        assert_eq!(msg.message, "Crafting your commit message...");
516
517        let ctx = StatusContext::new("review", "test");
518        let msg = StatusMessageGenerator::default_message(&ctx);
519        assert_eq!(msg.message, "Analyzing code changes...");
520
521        let ctx = StatusContext::new("unknown", "test");
522        let msg = StatusMessageGenerator::default_message(&ctx);
523        assert_eq!(msg.message, "Working on it...");
524    }
525
526    #[test]
527    fn test_message_batch_cycling() {
528        let mut batch = StatusMessageBatch::new();
529        assert!(batch.is_empty());
530        assert!(batch.current().is_none());
531
532        batch.add(StatusMessage {
533            message: "First".to_string(),
534            time_hint: None,
535        });
536        batch.add(StatusMessage {
537            message: "Second".to_string(),
538            time_hint: None,
539        });
540
541        assert_eq!(batch.len(), 2);
542        assert_eq!(
543            batch.current().expect("should have current").message,
544            "First"
545        );
546
547        batch.next();
548        assert_eq!(
549            batch.current().expect("should have current").message,
550            "Second"
551        );
552
553        batch.next();
554        assert_eq!(
555            batch.current().expect("should have current").message,
556            "First"
557        ); // Cycles back
558    }
559
560    #[test]
561    fn test_prompt_building() {
562        let ctx = StatusContext::new("commit", "analyzing staged changes")
563            .with_branch("feature/awesome")
564            .with_file_count(3);
565
566        let prompt = StatusMessageGenerator::build_prompt(&ctx);
567        assert!(prompt.contains("commit"));
568        assert!(prompt.contains("analyzing staged changes"));
569        assert!(prompt.contains("feature/awesome"));
570        assert!(prompt.contains('3'));
571    }
572
573    /// Debug test to evaluate status message quality
574    /// Run with: cargo test `debug_status_messages` -- --ignored --nocapture
575    #[test]
576    #[ignore = "manual debug test for evaluating status message quality"]
577    fn debug_status_messages() {
578        use tokio::runtime::Runtime;
579
580        let rt = Runtime::new().expect("failed to create tokio runtime");
581        rt.block_on(async {
582            // Get provider/model from env or use defaults
583            let provider = std::env::var("IRIS_PROVIDER").unwrap_or_else(|_| "openai".to_string());
584            let model = std::env::var("IRIS_MODEL").unwrap_or_else(|_| "gpt-5.4-mini".to_string());
585
586            println!("\n{}", "=".repeat(60));
587            println!(
588                "Status Message Debug - Provider: {}, Model: {}",
589                provider, model
590            );
591            println!("{}\n", "=".repeat(60));
592
593            let generator = StatusMessageGenerator::new(&provider, &model, None, None);
594
595            // Test scenarios
596            let scenarios = [
597                StatusContext::new("commit", "crafting commit message")
598                    .with_branch("main")
599                    .with_files(vec![
600                        "mod.rs".to_string(),
601                        "status_messages.rs".to_string(),
602                        "agent_tasks.rs".to_string(),
603                    ])
604                    .with_file_count(3),
605                StatusContext::new("commit", "crafting commit message")
606                    .with_branch("feature/auth")
607                    .with_files(vec!["auth.rs".to_string(), "login.rs".to_string()])
608                    .with_file_count(2),
609                StatusContext::new("commit", "crafting commit message")
610                    .with_branch("main")
611                    .with_files(vec![
612                        "config.ts".to_string(),
613                        "App.tsx".to_string(),
614                        "hooks.ts".to_string(),
615                    ])
616                    .with_file_count(16)
617                    .with_regeneration(true)
618                    .with_content_hint("refactor: simplify auth flow"),
619                StatusContext::new("review", "analyzing code changes")
620                    .with_branch("pr/123")
621                    .with_files(vec!["reducer.rs".to_string()])
622                    .with_file_count(1),
623                StatusContext::new("pr", "drafting PR description")
624                    .with_branch("feature/dark-mode")
625                    .with_files(vec!["theme.rs".to_string(), "colors.rs".to_string()])
626                    .with_file_count(5),
627            ];
628
629            for (i, ctx) in scenarios.iter().enumerate() {
630                println!("--- Scenario {} ---", i + 1);
631                println!(
632                    "Task: {}, Branch: {:?}, Files: {:?}",
633                    ctx.task_type, ctx.branch, ctx.files
634                );
635                if ctx.is_regeneration {
636                    println!("(Regeneration, hint: {:?})", ctx.current_content_hint);
637                }
638                println!();
639
640                // Generate 5 messages for each scenario
641                for j in 1..=5 {
642                    let msg = generator.generate(ctx).await;
643                    println!("  {}: {}", j, msg.message);
644                }
645                println!();
646            }
647        });
648    }
649}