Skip to main content

git_iris/agents/
status_messages.rs

1//! Dynamic status message generation using the fast model
2//!
3//! Generates witty, contextual waiting messages while users wait for
4//! agent operations to complete. Uses fire-and-forget async with hard
5//! timeout to ensure we never block on status messages.
6
7use anyhow::Result;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10use tokio::sync::mpsc;
11use tokio::time::{Duration, timeout};
12
13use crate::agents::provider::{self, CompletionProfile, DynAgent};
14
15/// Context for generating status messages
16#[derive(Debug, Clone)]
17pub struct StatusContext {
18    /// Type of task being performed
19    pub task_type: String,
20    /// Current branch name
21    pub branch: Option<String>,
22    /// Number of files being analyzed
23    pub file_count: Option<usize>,
24    /// Brief summary of what's happening (e.g., "analyzing commit changes")
25    pub activity: String,
26    /// Actual file names being changed (for richer context)
27    pub files: Vec<String>,
28    /// Whether this is a regeneration (we have more context available)
29    pub is_regeneration: bool,
30    /// Brief description of what's changing (e.g., "auth system, test fixes")
31    pub change_summary: Option<String>,
32    /// On regeneration: hint about current content (e.g., "commit about auth refactor")
33    pub current_content_hint: Option<String>,
34}
35
36impl StatusContext {
37    #[must_use]
38    pub fn new(task_type: &str, activity: &str) -> Self {
39        Self {
40            task_type: task_type.to_string(),
41            branch: None,
42            file_count: None,
43            activity: activity.to_string(),
44            files: Vec::new(),
45            is_regeneration: false,
46            change_summary: None,
47            current_content_hint: None,
48        }
49    }
50
51    #[must_use]
52    pub fn with_branch(mut self, branch: impl Into<String>) -> Self {
53        self.branch = Some(branch.into());
54        self
55    }
56
57    #[must_use]
58    pub fn with_file_count(mut self, count: usize) -> Self {
59        self.file_count = Some(count);
60        self
61    }
62
63    #[must_use]
64    pub fn with_files(mut self, files: Vec<String>) -> Self {
65        self.files = files;
66        self
67    }
68
69    #[must_use]
70    pub fn with_regeneration(mut self, is_regen: bool) -> Self {
71        self.is_regeneration = is_regen;
72        self
73    }
74
75    #[must_use]
76    pub fn with_change_summary(mut self, summary: impl Into<String>) -> Self {
77        self.change_summary = Some(summary.into());
78        self
79    }
80
81    #[must_use]
82    pub fn with_content_hint(mut self, hint: impl Into<String>) -> Self {
83        self.current_content_hint = Some(hint.into());
84        self
85    }
86}
87
88/// A generated status message
89#[derive(Debug, Clone, Serialize, Deserialize)]
90pub struct StatusMessage {
91    /// The witty message to display
92    pub message: String,
93    /// Estimated time context (e.g., "a few seconds", "about 30 seconds")
94    pub time_hint: Option<String>,
95}
96
97impl Default for StatusMessage {
98    fn default() -> Self {
99        Self {
100            message: "Working on it...".to_string(),
101            time_hint: None,
102        }
103    }
104}
105
106/// Capitalize first letter of a string (sentence case)
107fn capitalize_first(s: &str) -> String {
108    let mut chars = s.chars();
109    match chars.next() {
110        None => String::new(),
111        Some(first) => first.to_uppercase().chain(chars).collect(),
112    }
113}
114
115/// Generator for dynamic status messages
116pub struct StatusMessageGenerator {
117    provider: String,
118    fast_model: String,
119    /// API key for the provider (from config)
120    api_key: Option<String>,
121    /// Provider-specific params inherited from config
122    additional_params: HashMap<String, String>,
123    /// Hard timeout for status message generation (ms)
124    timeout_ms: u64,
125}
126
127impl StatusMessageGenerator {
128    /// Create a new status message generator
129    ///
130    /// # Arguments
131    /// * `provider` - LLM provider name (e.g., "anthropic", "openai")
132    /// * `fast_model` - Model to use for quick generations
133    /// * `api_key` - Optional API key (falls back to env var if not provided)
134    #[must_use]
135    pub fn new(
136        provider: impl Into<String>,
137        fast_model: impl Into<String>,
138        api_key: Option<String>,
139        additional_params: Option<HashMap<String, String>>,
140    ) -> Self {
141        Self {
142            provider: provider.into(),
143            fast_model: fast_model.into(),
144            api_key,
145            additional_params: additional_params.unwrap_or_default(),
146            timeout_ms: 1500, // 1.5 seconds - fast model should respond quickly
147        }
148    }
149
150    /// Set custom timeout in milliseconds
151    #[must_use]
152    pub fn with_timeout_ms(mut self, ms: u64) -> Self {
153        self.timeout_ms = ms;
154        self
155    }
156
157    /// Generate a status message synchronously with timeout
158    ///
159    /// Returns default message if generation fails or times out.
160    pub async fn generate(&self, context: &StatusContext) -> StatusMessage {
161        match timeout(
162            Duration::from_millis(self.timeout_ms),
163            self.generate_internal(context),
164        )
165        .await
166        {
167            Ok(Ok(msg)) => msg,
168            Ok(Err(_)) | Err(_) => Self::default_message(context),
169        }
170    }
171
172    /// Spawn fire-and-forget generation that sends result to channel
173    ///
174    /// This spawns an async task that will send the generated message
175    /// to the provided channel. If generation times out or fails, nothing
176    /// is sent (caller should already have a fallback displayed).
177    pub fn spawn_generation(
178        &self,
179        context: StatusContext,
180        tx: mpsc::UnboundedSender<StatusMessage>,
181    ) {
182        let provider = self.provider.clone();
183        let fast_model = self.fast_model.clone();
184        let api_key = self.api_key.clone();
185        let additional_params = self.additional_params.clone();
186        let timeout_ms = self.timeout_ms;
187
188        tokio::spawn(async move {
189            let generator = StatusMessageGenerator {
190                provider,
191                fast_model,
192                api_key,
193                additional_params,
194                timeout_ms,
195            };
196
197            if let Ok(Ok(msg)) = timeout(
198                Duration::from_millis(timeout_ms),
199                generator.generate_internal(&context),
200            )
201            .await
202            {
203                let _ = tx.send(msg);
204            }
205        });
206    }
207
208    /// Create a channel for receiving status messages
209    #[must_use]
210    pub fn create_channel() -> (
211        mpsc::UnboundedSender<StatusMessage>,
212        mpsc::UnboundedReceiver<StatusMessage>,
213    ) {
214        mpsc::unbounded_channel()
215    }
216
217    /// Build the agent for status message generation
218    fn build_status_agent(
219        provider: &str,
220        fast_model: &str,
221        api_key: Option<&str>,
222        additional_params: Option<&HashMap<String, String>>,
223    ) -> Result<DynAgent> {
224        let preamble = "You write fun waiting messages for a Git AI named Iris. \
225                        Concise, yet fun and encouraging, add vibes, be clever, not cheesy. \
226                        Capitalize first letter, end with ellipsis. Under 35 chars. No emojis. \
227                        Just the message text, nothing else.";
228        let provider_name = provider::provider_from_name(provider)?;
229
230        match provider {
231            "openai" => {
232                let builder = provider::openai_builder(fast_model, api_key)?.preamble(preamble);
233                let agent = provider::apply_completion_params(
234                    builder,
235                    provider_name,
236                    fast_model,
237                    50,
238                    additional_params,
239                    CompletionProfile::StatusMessage,
240                )
241                .build();
242                Ok(DynAgent::OpenAI(agent))
243            }
244            "anthropic" => {
245                let builder = provider::anthropic_builder(fast_model, api_key)?.preamble(preamble);
246                let agent = provider::apply_completion_params(
247                    builder,
248                    provider_name,
249                    fast_model,
250                    50,
251                    additional_params,
252                    CompletionProfile::StatusMessage,
253                )
254                .build();
255                Ok(DynAgent::Anthropic(agent))
256            }
257            "google" | "gemini" => {
258                let builder = provider::gemini_builder(fast_model, api_key)?.preamble(preamble);
259                let agent = provider::apply_completion_params(
260                    builder,
261                    provider_name,
262                    fast_model,
263                    50,
264                    additional_params,
265                    CompletionProfile::StatusMessage,
266                )
267                .build();
268                Ok(DynAgent::Gemini(agent))
269            }
270            _ => Err(anyhow::anyhow!("Unsupported provider: {}", provider)),
271        }
272    }
273
274    /// Internal generation logic
275    async fn generate_internal(&self, context: &StatusContext) -> Result<StatusMessage> {
276        let prompt = Self::build_prompt(context);
277        tracing::info!(
278            "Building status agent with provider={}, model={}",
279            self.provider,
280            self.fast_model
281        );
282
283        // Build agent synchronously (DynClientBuilder is not Send)
284        // The returned agent IS Send, so we can await after this
285        let agent = match Self::build_status_agent(
286            &self.provider,
287            &self.fast_model,
288            self.api_key.as_deref(),
289            Some(&self.additional_params),
290        ) {
291            Ok(a) => a,
292            Err(e) => {
293                tracing::warn!("Failed to build status agent: {}", e);
294                return Err(e);
295            }
296        };
297
298        tracing::info!("Prompting status agent...");
299        let response = match agent.prompt(&prompt).await {
300            Ok(r) => r,
301            Err(e) => {
302                tracing::warn!("Status agent prompt failed: {}", e);
303                return Err(anyhow::anyhow!("Prompt failed: {}", e));
304            }
305        };
306
307        let message = capitalize_first(response.trim());
308        tracing::info!(
309            "Status agent response ({} chars): {:?}",
310            message.len(),
311            message
312        );
313
314        // Sanity check - if response is too long or empty, use fallback
315        if message.is_empty() || message.len() > 80 {
316            tracing::info!("Response invalid (empty or too long), using fallback");
317            return Ok(Self::default_message(context));
318        }
319
320        Ok(StatusMessage {
321            message,
322            time_hint: None,
323        })
324    }
325
326    /// Build the prompt for status message generation
327    fn build_prompt(context: &StatusContext) -> String {
328        let mut prompt = String::from("Context:\n");
329
330        prompt.push_str(&format!("Task: {}\n", context.task_type));
331        prompt.push_str(&format!("Activity: {}\n", context.activity));
332
333        if let Some(branch) = &context.branch {
334            prompt.push_str(&format!("Branch: {}\n", branch));
335        }
336
337        if !context.files.is_empty() {
338            let file_list: Vec<&str> = context.files.iter().take(3).map(String::as_str).collect();
339            prompt.push_str(&format!("Files: {}\n", file_list.join(", ")));
340        } else if let Some(count) = context.file_count {
341            prompt.push_str(&format!("File count: {}\n", count));
342        }
343
344        prompt.push_str(
345            "\nYour task is to use the limited context above to generate a fun waiting message \
346             shown to the user while the main task executes. Concise, yet fun and encouraging. \
347             Add fun vibes depending on the context. Be clever. \
348             Capitalize the first letter and end with ellipsis. Under 35 chars. No emojis.\n\n\
349             Just the message:",
350        );
351        prompt
352    }
353
354    /// Get a default message based on context (used as fallback)
355    fn default_message(context: &StatusContext) -> StatusMessage {
356        let message = match context.task_type.as_str() {
357            "commit" => "Crafting your commit message...",
358            "review" => "Analyzing code changes...",
359            "pr" => "Writing PR description...",
360            "changelog" => "Generating changelog...",
361            "release_notes" => "Composing release notes...",
362            "chat" => "Thinking...",
363            "semantic_blame" => "Tracing code origins...",
364            _ => "Working on it...",
365        };
366
367        StatusMessage {
368            message: message.to_string(),
369            time_hint: None,
370        }
371    }
372
373    /// Generate a completion message when a task finishes
374    pub async fn generate_completion(&self, context: &StatusContext) -> StatusMessage {
375        match timeout(
376            Duration::from_millis(self.timeout_ms),
377            self.generate_completion_internal(context),
378        )
379        .await
380        {
381            Ok(Ok(msg)) => msg,
382            Ok(Err(_)) | Err(_) => Self::default_completion(context),
383        }
384    }
385
386    async fn generate_completion_internal(&self, context: &StatusContext) -> Result<StatusMessage> {
387        let prompt = Self::build_completion_prompt(context);
388
389        let agent = Self::build_status_agent(
390            &self.provider,
391            &self.fast_model,
392            self.api_key.as_deref(),
393            Some(&self.additional_params),
394        )?;
395        let response = agent.prompt(&prompt).await?;
396        let message = capitalize_first(response.trim());
397
398        if message.is_empty() || message.len() > 80 {
399            return Ok(Self::default_completion(context));
400        }
401
402        Ok(StatusMessage {
403            message,
404            time_hint: None,
405        })
406    }
407
408    fn build_completion_prompt(context: &StatusContext) -> String {
409        let mut prompt = String::from("Task just completed:\n\n");
410        prompt.push_str(&format!("Task: {}\n", context.task_type));
411
412        if let Some(branch) = &context.branch {
413            prompt.push_str(&format!("Branch: {}\n", branch));
414        }
415
416        if let Some(hint) = &context.current_content_hint {
417            prompt.push_str(&format!("Content: {}\n", hint));
418        }
419
420        prompt.push_str(
421            "\nGenerate a brief completion message based on the content above.\n\n\
422             RULES:\n\
423             - Reference the SPECIFIC topic from content above (not generic \"changes\")\n\
424             - Sentence case, under 35 chars, no emojis\n\
425             - Just the message, nothing else:",
426        );
427        prompt
428    }
429
430    fn default_completion(context: &StatusContext) -> StatusMessage {
431        let message = match context.task_type.as_str() {
432            "commit" => "Ready to commit.",
433            "review" => "Review complete.",
434            "pr" => "PR description ready.",
435            "changelog" => "Changelog generated.",
436            "release_notes" => "Release notes ready.",
437            "chat" => "Here you go.",
438            "semantic_blame" => "Origins traced.",
439            _ => "Done.",
440        };
441
442        StatusMessage {
443            message: message.to_string(),
444            time_hint: None,
445        }
446    }
447}
448
449/// Batch of status messages for cycling display
450#[derive(Debug, Clone, Default)]
451pub struct StatusMessageBatch {
452    messages: Vec<StatusMessage>,
453    current_index: usize,
454}
455
456impl StatusMessageBatch {
457    #[must_use]
458    pub fn new() -> Self {
459        Self::default()
460    }
461
462    /// Add a message to the batch
463    pub fn add(&mut self, message: StatusMessage) {
464        self.messages.push(message);
465    }
466
467    /// Get the current message (if any)
468    #[must_use]
469    pub fn current(&self) -> Option<&StatusMessage> {
470        self.messages.get(self.current_index)
471    }
472
473    /// Advance to the next message (cycles back to start)
474    pub fn next(&mut self) {
475        if !self.messages.is_empty() {
476            self.current_index = (self.current_index + 1) % self.messages.len();
477        }
478    }
479
480    /// Check if we have any messages
481    #[must_use]
482    pub fn is_empty(&self) -> bool {
483        self.messages.is_empty()
484    }
485
486    /// Number of messages in batch
487    #[must_use]
488    pub fn len(&self) -> usize {
489        self.messages.len()
490    }
491
492    /// Clear all messages
493    pub fn clear(&mut self) {
494        self.messages.clear();
495        self.current_index = 0;
496    }
497}
498
499#[cfg(test)]
500mod tests {
501    use super::*;
502
503    #[test]
504    fn test_status_context_builder() {
505        let ctx = StatusContext::new("commit", "analyzing staged changes")
506            .with_branch("main")
507            .with_file_count(5);
508
509        assert_eq!(ctx.task_type, "commit");
510        assert_eq!(ctx.branch, Some("main".to_string()));
511        assert_eq!(ctx.file_count, Some(5));
512    }
513
514    #[test]
515    fn test_default_messages() {
516        let ctx = StatusContext::new("commit", "test");
517        let msg = StatusMessageGenerator::default_message(&ctx);
518        assert_eq!(msg.message, "Crafting your commit message...");
519
520        let ctx = StatusContext::new("review", "test");
521        let msg = StatusMessageGenerator::default_message(&ctx);
522        assert_eq!(msg.message, "Analyzing code changes...");
523
524        let ctx = StatusContext::new("unknown", "test");
525        let msg = StatusMessageGenerator::default_message(&ctx);
526        assert_eq!(msg.message, "Working on it...");
527    }
528
529    #[test]
530    fn test_message_batch_cycling() {
531        let mut batch = StatusMessageBatch::new();
532        assert!(batch.is_empty());
533        assert!(batch.current().is_none());
534
535        batch.add(StatusMessage {
536            message: "First".to_string(),
537            time_hint: None,
538        });
539        batch.add(StatusMessage {
540            message: "Second".to_string(),
541            time_hint: None,
542        });
543
544        assert_eq!(batch.len(), 2);
545        assert_eq!(
546            batch.current().expect("should have current").message,
547            "First"
548        );
549
550        batch.next();
551        assert_eq!(
552            batch.current().expect("should have current").message,
553            "Second"
554        );
555
556        batch.next();
557        assert_eq!(
558            batch.current().expect("should have current").message,
559            "First"
560        ); // Cycles back
561    }
562
563    #[test]
564    fn test_prompt_building() {
565        let ctx = StatusContext::new("commit", "analyzing staged changes")
566            .with_branch("feature/awesome")
567            .with_file_count(3);
568
569        let prompt = StatusMessageGenerator::build_prompt(&ctx);
570        assert!(prompt.contains("commit"));
571        assert!(prompt.contains("analyzing staged changes"));
572        assert!(prompt.contains("feature/awesome"));
573        assert!(prompt.contains('3'));
574    }
575
576    /// Debug test to evaluate status message quality
577    /// Run with: cargo test `debug_status_messages` -- --ignored --nocapture
578    #[test]
579    #[ignore = "manual debug test for evaluating status message quality"]
580    fn debug_status_messages() {
581        use tokio::runtime::Runtime;
582
583        let rt = Runtime::new().expect("failed to create tokio runtime");
584        rt.block_on(async {
585            // Get provider/model from env or use defaults
586            let provider = std::env::var("IRIS_PROVIDER").unwrap_or_else(|_| "openai".to_string());
587            let model = std::env::var("IRIS_MODEL").unwrap_or_else(|_| "gpt-5.4-mini".to_string());
588
589            println!("\n{}", "=".repeat(60));
590            println!(
591                "Status Message Debug - Provider: {}, Model: {}",
592                provider, model
593            );
594            println!("{}\n", "=".repeat(60));
595
596            let generator = StatusMessageGenerator::new(&provider, &model, None, None);
597
598            // Test scenarios
599            let scenarios = [
600                StatusContext::new("commit", "crafting commit message")
601                    .with_branch("main")
602                    .with_files(vec![
603                        "mod.rs".to_string(),
604                        "status_messages.rs".to_string(),
605                        "agent_tasks.rs".to_string(),
606                    ])
607                    .with_file_count(3),
608                StatusContext::new("commit", "crafting commit message")
609                    .with_branch("feature/auth")
610                    .with_files(vec!["auth.rs".to_string(), "login.rs".to_string()])
611                    .with_file_count(2),
612                StatusContext::new("commit", "crafting commit message")
613                    .with_branch("main")
614                    .with_files(vec![
615                        "config.ts".to_string(),
616                        "App.tsx".to_string(),
617                        "hooks.ts".to_string(),
618                    ])
619                    .with_file_count(16)
620                    .with_regeneration(true)
621                    .with_content_hint("refactor: simplify auth flow"),
622                StatusContext::new("review", "analyzing code changes")
623                    .with_branch("pr/123")
624                    .with_files(vec!["reducer.rs".to_string()])
625                    .with_file_count(1),
626                StatusContext::new("pr", "drafting PR description")
627                    .with_branch("feature/dark-mode")
628                    .with_files(vec!["theme.rs".to_string(), "colors.rs".to_string()])
629                    .with_file_count(5),
630            ];
631
632            for (i, ctx) in scenarios.iter().enumerate() {
633                println!("--- Scenario {} ---", i + 1);
634                println!(
635                    "Task: {}, Branch: {:?}, Files: {:?}",
636                    ctx.task_type, ctx.branch, ctx.files
637                );
638                if ctx.is_regeneration {
639                    println!("(Regeneration, hint: {:?})", ctx.current_content_hint);
640                }
641                println!();
642
643                // Generate 5 messages for each scenario
644                for j in 1..=5 {
645                    let msg = generator.generate(ctx).await;
646                    println!("  {}: {}", j, msg.message);
647                }
648                println!();
649            }
650        });
651    }
652}