Skip to main content

intent_engine/
llm.rs

1use crate::cli_handlers::config_commands::{config_get, config_set};
2use crate::error::Result;
3use serde::{Deserialize, Serialize};
4use sqlx::SqlitePool;
5use std::sync::atomic::{AtomicI64, Ordering};
6use std::time::{SystemTime, UNIX_EPOCH};
7
8/// Global cooldown to prevent unlimited task spawning
9/// Stores the timestamp of the last analysis (seconds since epoch)
10static LAST_ANALYSIS_TIME: AtomicI64 = AtomicI64::new(0);
11
12/// Default cooldown period: 5 minutes
13const DEFAULT_ANALYSIS_COOLDOWN_SECS: i64 = 300;
14
15/// Maximum number of active (non-dismissed) suggestions
16/// Prevents unbounded growth if user never dismisses suggestions
17const MAX_ACTIVE_SUGGESTIONS: i64 = 20;
18
19/// Get current timestamp in seconds since UNIX_EPOCH
20///
21/// Handles system clock errors gracefully by returning None.
22/// This prevents panics when system time is misconfigured.
23fn get_current_timestamp() -> Option<i64> {
24    SystemTime::now()
25        .duration_since(UNIX_EPOCH)
26        .ok()
27        .map(|d| d.as_secs() as i64)
28}
29
30/// Check if enough time has passed since last analysis
31///
32/// Handles edge cases:
33/// - System clock errors: allows analysis (fail-safe)
34/// - Clock skew (time went backwards): resets timer and allows analysis
35/// - Thread safety: uses Acquire ordering for visibility
36fn should_trigger_analysis() -> bool {
37    let now = match get_current_timestamp() {
38        Some(ts) => ts,
39        None => {
40            tracing::warn!("System clock error, allowing analysis as fail-safe");
41            return true;
42        },
43    };
44
45    let last = LAST_ANALYSIS_TIME.load(Ordering::Acquire);
46
47    // Handle clock skew (time went backwards)
48    if now < last {
49        tracing::warn!(
50            "Clock skew detected: current={}, last={}, resetting analysis timer",
51            now,
52            last
53        );
54        LAST_ANALYSIS_TIME.store(now, Ordering::Release);
55        return true;
56    }
57
58    // Use default cooldown (5 minutes)
59    let cooldown = DEFAULT_ANALYSIS_COOLDOWN_SECS;
60
61    now - last >= cooldown
62}
63
64/// Mark that analysis is starting now
65///
66/// Uses Release ordering to ensure visibility across threads.
67/// If system clock fails, logs warning but doesn't update (safe default).
68fn mark_analysis_started() {
69    if let Some(now) = get_current_timestamp() {
70        LAST_ANALYSIS_TIME.store(now, Ordering::Release);
71    } else {
72        tracing::warn!("System clock error, cannot update analysis timestamp");
73    }
74}
75
76/// LLM configuration resolved from env vars and workspace_state
77#[derive(Debug, Clone)]
78pub struct LlmConfig {
79    pub endpoint: String,
80    pub api_key: String,
81    pub model: String,
82}
83
84/// A chat message for the OpenAI-compatible API
85#[derive(Debug, Clone, Serialize, Deserialize)]
86pub struct ChatMessage {
87    pub role: String,
88    pub content: String,
89}
90
91/// OpenAI-compatible chat completion request
92#[derive(Debug, Serialize)]
93struct ChatRequest {
94    model: String,
95    messages: Vec<ChatMessage>,
96}
97
98/// OpenAI-compatible chat completion response
99#[derive(Debug, Deserialize)]
100struct ChatResponse {
101    choices: Vec<ChatChoice>,
102}
103
104#[derive(Debug, Deserialize)]
105struct ChatChoice {
106    message: ChatResponseMessage,
107}
108
109#[derive(Debug, Deserialize)]
110struct ChatResponseMessage {
111    content: String,
112}
113
114impl LlmConfig {
115    /// Resolve LLM config from env vars (priority) and workspace_state.
116    /// Returns Some only when all three fields (endpoint, api_key, model) are configured.
117    pub async fn resolve(pool: &SqlitePool) -> Result<Option<Self>> {
118        let endpoint = Self::resolve_field(pool, "IE_LLM_ENDPOINT", "llm.endpoint").await?;
119        let api_key = Self::resolve_field(pool, "IE_LLM_API_KEY", "llm.api_key").await?;
120        let model = Self::resolve_field(pool, "IE_LLM_MODEL", "llm.model").await?;
121
122        match (endpoint, api_key, model) {
123            (Some(endpoint), Some(api_key), Some(model)) => Ok(Some(Self {
124                endpoint,
125                api_key,
126                model,
127            })),
128            _ => Ok(None),
129        }
130    }
131
132    /// Resolve a single field: env var takes priority over workspace_state
133    async fn resolve_field(
134        pool: &SqlitePool,
135        env_var: &str,
136        config_key: &str,
137    ) -> Result<Option<String>> {
138        // Check env var first
139        if let Ok(val) = std::env::var(env_var) {
140            if !val.is_empty() {
141                return Ok(Some(val));
142            }
143        }
144
145        // Fall back to workspace_state
146        config_get(pool, config_key).await
147    }
148
149    /// Save LLM config to workspace_state
150    pub async fn save(&self, pool: &SqlitePool) -> Result<()> {
151        config_set(pool, "llm.endpoint", &self.endpoint).await?;
152        config_set(pool, "llm.api_key", &self.api_key).await?;
153        config_set(pool, "llm.model", &self.model).await?;
154        Ok(())
155    }
156}
157
158/// OpenAI-compatible LLM client
159pub struct LlmClient {
160    config: LlmConfig,
161    client: reqwest::Client,
162}
163
164impl LlmClient {
165    /// Create a new LlmClient from database config.
166    /// Returns an error if LLM is not fully configured.
167    pub async fn from_pool(pool: &SqlitePool) -> Result<Self> {
168        let config = LlmConfig::resolve(pool).await?.ok_or_else(|| {
169            crate::error::IntentError::InvalidInput(
170                "LLM not configured. Set llm.endpoint, llm.api_key, and llm.model via 'ie config set' or environment variables (IE_LLM_ENDPOINT, IE_LLM_API_KEY, IE_LLM_MODEL).".to_string(),
171            )
172        })?;
173
174        let client = reqwest::Client::builder()
175            .timeout(std::time::Duration::from_secs(30))
176            .build()
177            .map_err(|e| crate::error::IntentError::OtherError(e.into()))?;
178
179        Ok(Self { config, client })
180    }
181
182    /// Check whether LLM is fully configured (all three fields present)
183    pub async fn is_configured(pool: &SqlitePool) -> bool {
184        matches!(LlmConfig::resolve(pool).await, Ok(Some(_)))
185    }
186
187    /// Simple chat: send a single user prompt and get a response
188    pub async fn chat(&self, prompt: &str) -> Result<String> {
189        let messages = vec![ChatMessage {
190            role: "user".to_string(),
191            content: prompt.to_string(),
192        }];
193        self.chat_with_messages(messages).await
194    }
195
196    /// Chat with full message history
197    pub async fn chat_with_messages(&self, messages: Vec<ChatMessage>) -> Result<String> {
198        let request = ChatRequest {
199            model: self.config.model.clone(),
200            messages,
201        };
202
203        let response = self
204            .client
205            .post(&self.config.endpoint)
206            .header("Authorization", format!("Bearer {}", self.config.api_key))
207            .header("Content-Type", "application/json")
208            .json(&request)
209            .send()
210            .await
211            .map_err(|e| crate::error::IntentError::OtherError(e.into()))?;
212
213        if !response.status().is_success() {
214            let status = response.status();
215            let body = response
216                .text()
217                .await
218                .unwrap_or_else(|_| "(no body)".to_string());
219            return Err(crate::error::IntentError::OtherError(anyhow::anyhow!(
220                "LLM API error (HTTP {}): {}",
221                status,
222                body
223            )));
224        }
225
226        let chat_response: ChatResponse = response
227            .json()
228            .await
229            .map_err(|e| crate::error::IntentError::OtherError(e.into()))?;
230
231        chat_response
232            .choices
233            .into_iter()
234            .next()
235            .map(|c| c.message.content)
236            .ok_or_else(|| {
237                crate::error::IntentError::OtherError(anyhow::anyhow!("LLM returned empty choices"))
238            })
239    }
240
241    /// Synthesize task description from accumulated events
242    ///
243    /// This function takes a task and its event history, and uses the LLM to generate
244    /// a structured summary in markdown format.
245    pub async fn synthesize_task_description(
246        &self,
247        task_name: &str,
248        original_spec: Option<&str>,
249        events: &[crate::db::models::Event],
250    ) -> Result<String> {
251        // Build the event summary
252        let events_text = if events.is_empty() {
253            "No events recorded.".to_string()
254        } else {
255            events
256                .iter()
257                .map(|e| {
258                    format!(
259                        "[{}] {} - {}",
260                        e.log_type,
261                        e.timestamp.format("%Y-%m-%d %H:%M"),
262                        e.discussion_data
263                    )
264                })
265                .collect::<Vec<_>>()
266                .join("\n")
267        };
268
269        let original_spec_text = original_spec.unwrap_or("(No original description)");
270
271        // Detect language from task name and events to respond in same language
272        let is_cjk = task_name.chars().any(|c| {
273            matches!(c,
274                '\u{4E00}'..='\u{9FFF}' |  // CJK Unified Ideographs
275                '\u{3400}'..='\u{4DBF}' |  // CJK Extension A
276                '\u{3040}'..='\u{309F}' |  // Hiragana
277                '\u{30A0}'..='\u{30FF}' |  // Katakana
278                '\u{AC00}'..='\u{D7AF}'    // Hangul
279            )
280        });
281
282        let language_instruction = if is_cjk {
283            "Respond in Chinese (中文)."
284        } else {
285            "Respond in English."
286        };
287
288        // Construct the prompt
289        let prompt = format!(
290            r#"You are summarizing a completed task based on its execution history.
291
292Task: {}
293Original description: {}
294
295Events (chronological):
296{}
297
298Synthesize a clear, structured description capturing:
2991. Goal (what was the objective?)
3002. Approach (how was it accomplished?)
3013. Key Decisions (what choices were made and why?)
3024. Outcome (what was delivered?)
303
304Use markdown format with ## headers. Be concise but preserve critical context.
305Output ONLY the markdown summary, no preamble or explanation.
306
307IMPORTANT: {}"#,
308            task_name, original_spec_text, events_text, language_instruction
309        );
310
311        self.chat(&prompt).await
312    }
313}
314
315/// Synthesize task description using LLM (convenience function)
316///
317/// Returns None if LLM is not configured (graceful degradation)
318pub async fn synthesize_task_description(
319    pool: &SqlitePool,
320    task_name: &str,
321    original_spec: Option<&str>,
322    events: &[crate::db::models::Event],
323) -> Result<Option<String>> {
324    // Check if LLM is configured
325    if !LlmClient::is_configured(pool).await {
326        return Ok(None);
327    }
328
329    // Create client and synthesize
330    let client = LlmClient::from_pool(pool).await?;
331    let synthesis = client
332        .synthesize_task_description(task_name, original_spec, events)
333        .await?;
334
335    Ok(Some(synthesis))
336}
337
338/// Analyze task structure in background and store suggestions
339///
340/// This function runs asynchronously without blocking the caller.
341/// Suggestions are stored in the database and shown at next interaction.
342///
343/// **Rate Limiting**: Uses a cooldown period (default 5 minutes) to prevent
344/// unlimited task spawning. If called within the cooldown period, it's a no-op.
345pub fn analyze_task_structure_background(pool: SqlitePool) {
346    // Check cooldown BEFORE spawning to avoid unnecessary tasks
347    if !should_trigger_analysis() {
348        tracing::debug!("Analysis cooldown active, skipping background analysis");
349        return;
350    }
351
352    // Mark as started immediately to prevent race conditions
353    mark_analysis_started();
354
355    tokio::spawn(async move {
356        if let Err(e) = analyze_and_store_suggestions(&pool).await {
357            // Store error as a suggestion so user knows it failed
358            let error_msg = format!(
359                "## Analysis Error\n\n\
360                Background task structure analysis failed: {}\n\n\
361                This may indicate:\n\
362                - LLM API endpoint is unreachable\n\
363                - API quota exceeded\n\
364                - Network connectivity issues\n\n\
365                Check logs for details: `ie log`",
366                e
367            );
368
369            // Try to store the error
370            match store_suggestion(&pool, "error", &error_msg).await {
371                Ok(_) => {
372                    tracing::warn!("Background task analysis failed: {}", e);
373                },
374                Err(store_err) => {
375                    // Critical: both analysis AND storage failed
376                    // Log to stderr so user has some visibility
377                    tracing::error!("Failed to store error suggestion: {}", store_err);
378                    eprintln!(
379                        "\n⚠️  Background analysis failed AND couldn't store error.\n\
380                         Analysis error: {}\n\
381                         Storage error: {}\n\
382                         This may indicate database issues.",
383                        e, store_err
384                    );
385                },
386            }
387        }
388    });
389}
390
391/// Internal: Perform analysis and store suggestions
392async fn analyze_and_store_suggestions(pool: &SqlitePool) -> Result<()> {
393    // Check if LLM is configured
394    if !LlmClient::is_configured(pool).await {
395        return Ok(()); // Silent skip
396    }
397
398    // Get all tasks
399    let tasks: Vec<crate::db::models::Task> = sqlx::query_as(
400        "SELECT id, parent_id, name, spec, status, complexity, priority, \
401         first_todo_at, first_doing_at, first_done_at, active_form, owner, metadata \
402         FROM tasks ORDER BY id",
403    )
404    .fetch_all(pool)
405    .await?;
406
407    // Need at least 5 tasks to make meaningful suggestions
408    if tasks.len() < 5 {
409        return Ok(());
410    }
411
412    let analysis = perform_structure_analysis(pool, &tasks).await?;
413
414    // Only store if there are actual suggestions
415    if !analysis.contains("no reorganization needed") && !analysis.contains("looks good") {
416        store_suggestion(pool, "task_structure", &analysis).await?;
417    }
418
419    Ok(())
420}
421
422/// Internal: Perform the actual LLM analysis
423async fn perform_structure_analysis(
424    pool: &SqlitePool,
425    tasks: &[crate::db::models::Task],
426) -> Result<String> {
427    // Build task tree representation
428    let task_summary = tasks
429        .iter()
430        .map(|t| {
431            format!(
432                "#{} {} [{}] (parent: {})",
433                t.id,
434                t.name,
435                t.status,
436                t.parent_id.map_or("none".to_string(), |p| p.to_string())
437            )
438        })
439        .collect::<Vec<_>>()
440        .join("\n");
441
442    // Detect language from task names
443    let is_cjk = tasks.iter().any(|t| {
444        t.name.chars().any(|c| {
445            matches!(c,
446                '\u{4E00}'..='\u{9FFF}' |
447                '\u{3400}'..='\u{4DBF}' |
448                '\u{3040}'..='\u{309F}' |
449                '\u{30A0}'..='\u{30FF}' |
450                '\u{AC00}'..='\u{D7AF}'
451            )
452        })
453    });
454
455    let language_instruction = if is_cjk {
456        "Respond in Chinese (中文)."
457    } else {
458        "Respond in English."
459    };
460
461    let prompt = format!(
462        r#"You are analyzing a task hierarchy for structural issues.
463
464Current task tree:
465{}
466
467Identify tasks that should be reorganized:
4681. Semantically related tasks that should be grouped under a common parent
4692. Root tasks that could be subtasks of existing tasks
4703. Tasks with similar names or themes that should share a parent
471
472For each suggestion:
473- Explain WHY the reorganization makes sense
474- Provide the EXACT command to execute
475- Only suggest if there's clear semantic relationship
476
477Output format:
478## Suggestion 1: [Brief description]
479**Reason**: [Why this makes sense]
480**Command**: `ie task update <id> --parent <parent_id>`
481
482If no reorganization needed, respond with: "Task structure looks good, no reorganization needed."
483
484IMPORTANT: {}"#,
485        task_summary, language_instruction
486    );
487
488    let client = LlmClient::from_pool(pool).await?;
489    let analysis = client.chat(&prompt).await?;
490
491    Ok(analysis)
492}
493
494/// Internal: Store a suggestion in the database
495///
496/// Implements automatic cleanup when suggestion count exceeds MAX_ACTIVE_SUGGESTIONS.
497/// Old suggestions are auto-dismissed to prevent unbounded growth.
498async fn store_suggestion(pool: &SqlitePool, suggestion_type: &str, content: &str) -> Result<()> {
499    // Check current count of active suggestions
500    let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM suggestions WHERE dismissed = 0")
501        .fetch_one(pool)
502        .await?;
503
504    // If at limit, auto-dismiss oldest suggestion
505    if count >= MAX_ACTIVE_SUGGESTIONS {
506        let dismissed = sqlx::query(
507            "UPDATE suggestions SET dismissed = 1
508             WHERE id IN (
509                 SELECT id FROM suggestions
510                 WHERE dismissed = 0
511                 ORDER BY created_at ASC
512                 LIMIT 1
513             )",
514        )
515        .execute(pool)
516        .await?;
517
518        if dismissed.rows_affected() > 0 {
519            tracing::info!(
520                "Auto-dismissed oldest suggestion (limit: {})",
521                MAX_ACTIVE_SUGGESTIONS
522            );
523        }
524    }
525
526    // Store the new suggestion
527    sqlx::query("INSERT INTO suggestions (type, content) VALUES (?, ?)")
528        .bind(suggestion_type)
529        .bind(content)
530        .execute(pool)
531        .await?;
532
533    tracing::info!("Stored {} suggestion in database", suggestion_type);
534    Ok(())
535}
536
537/// Retrieve active (non-dismissed) suggestions from database
538pub async fn get_active_suggestions(
539    pool: &SqlitePool,
540) -> Result<Vec<crate::db::models::Suggestion>> {
541    let suggestions = sqlx::query_as::<_, crate::db::models::Suggestion>(
542        "SELECT id, type, content, created_at, dismissed \
543         FROM suggestions \
544         WHERE dismissed = 0 \
545         ORDER BY created_at DESC",
546    )
547    .fetch_all(pool)
548    .await?;
549
550    Ok(suggestions)
551}
552
553/// Dismiss a suggestion (mark as read/acted upon)
554pub async fn dismiss_suggestion(pool: &SqlitePool, id: i64) -> Result<()> {
555    sqlx::query("UPDATE suggestions SET dismissed = 1 WHERE id = ?")
556        .bind(id)
557        .execute(pool)
558        .await?;
559
560    Ok(())
561}
562
563/// Dismiss all active suggestions
564pub async fn dismiss_all_suggestions(pool: &SqlitePool) -> Result<usize> {
565    let result = sqlx::query("UPDATE suggestions SET dismissed = 1 WHERE dismissed = 0")
566        .execute(pool)
567        .await?;
568
569    Ok(result.rows_affected() as usize)
570}
571
572/// Clear all dismissed suggestions from database
573pub async fn clear_dismissed_suggestions(pool: &SqlitePool) -> Result<usize> {
574    let result = sqlx::query("DELETE FROM suggestions WHERE dismissed = 1")
575        .execute(pool)
576        .await?;
577
578    Ok(result.rows_affected() as usize)
579}
580
581/// Display suggestions to the user (called from CLI commands)
582pub async fn display_suggestions(pool: &SqlitePool) -> Result<()> {
583    let suggestions = get_active_suggestions(pool).await?;
584
585    if suggestions.is_empty() {
586        return Ok(());
587    }
588
589    // Separate errors from other suggestions
590    let (errors, others): (Vec<_>, Vec<_>) = suggestions
591        .iter()
592        .partition(|s| s.suggestion_type == "error");
593
594    // Display errors first (more urgent)
595    if !errors.is_empty() {
596        eprintln!("\n⚠️  Background Analysis Errors:");
597        for error in &errors {
598            eprintln!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
599            eprintln!("{}", error.content);
600            eprintln!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
601        }
602        eprintln!("\nTo dismiss: ie suggestions dismiss {}", errors[0].id);
603        eprintln!("To dismiss all: ie suggestions dismiss --all");
604    }
605
606    // Display regular suggestions
607    if !others.is_empty() {
608        eprintln!("\n💡 Suggestions:");
609        for suggestion in &others {
610            eprintln!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
611            eprintln!("{}", suggestion.content);
612            eprintln!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
613        }
614        eprintln!("\nTo dismiss: ie suggestions dismiss {}", others[0].id);
615        eprintln!("To list all: ie suggestions list");
616    }
617
618    Ok(())
619}
620
621#[cfg(test)]
622mod tests {
623    use super::*;
624    use crate::test_utils::test_helpers::TestContext;
625
626    #[tokio::test]
627    async fn test_llm_config_resolve_none_when_unconfigured() {
628        let ctx = TestContext::new().await;
629        let config = LlmConfig::resolve(ctx.pool()).await.unwrap();
630        assert!(config.is_none());
631    }
632
633    #[tokio::test]
634    async fn test_llm_config_resolve_partial_returns_none() {
635        let ctx = TestContext::new().await;
636        config_set(ctx.pool(), "llm.endpoint", "http://localhost:8080")
637            .await
638            .unwrap();
639        config_set(ctx.pool(), "llm.model", "gpt-4").await.unwrap();
640        // Missing api_key
641        let config = LlmConfig::resolve(ctx.pool()).await.unwrap();
642        assert!(config.is_none());
643    }
644
645    #[tokio::test]
646    async fn test_llm_config_resolve_full() {
647        let ctx = TestContext::new().await;
648        config_set(
649            ctx.pool(),
650            "llm.endpoint",
651            "http://localhost:8080/v1/chat/completions",
652        )
653        .await
654        .unwrap();
655        config_set(ctx.pool(), "llm.api_key", "sk-test123")
656            .await
657            .unwrap();
658        config_set(ctx.pool(), "llm.model", "gpt-4").await.unwrap();
659
660        let config = LlmConfig::resolve(ctx.pool()).await.unwrap();
661        assert!(config.is_some());
662        let config = config.unwrap();
663        assert_eq!(config.endpoint, "http://localhost:8080/v1/chat/completions");
664        assert_eq!(config.api_key, "sk-test123");
665        assert_eq!(config.model, "gpt-4");
666    }
667
668    #[tokio::test]
669    async fn test_llm_config_save_and_resolve() {
670        let ctx = TestContext::new().await;
671        let config = LlmConfig {
672            endpoint: "http://example.com/v1/chat/completions".to_string(),
673            api_key: "sk-saved".to_string(),
674            model: "claude-3".to_string(),
675        };
676        config.save(ctx.pool()).await.unwrap();
677
678        let resolved = LlmConfig::resolve(ctx.pool()).await.unwrap().unwrap();
679        assert_eq!(resolved.endpoint, "http://example.com/v1/chat/completions");
680        assert_eq!(resolved.api_key, "sk-saved");
681        assert_eq!(resolved.model, "claude-3");
682    }
683
684    #[tokio::test]
685    async fn test_is_configured_false() {
686        let ctx = TestContext::new().await;
687        assert!(!LlmClient::is_configured(ctx.pool()).await);
688    }
689
690    #[tokio::test]
691    async fn test_is_configured_true() {
692        let ctx = TestContext::new().await;
693        config_set(ctx.pool(), "llm.endpoint", "http://localhost:8080")
694            .await
695            .unwrap();
696        config_set(ctx.pool(), "llm.api_key", "sk-test")
697            .await
698            .unwrap();
699        config_set(ctx.pool(), "llm.model", "gpt-4").await.unwrap();
700        assert!(LlmClient::is_configured(ctx.pool()).await);
701    }
702
703    #[tokio::test]
704    async fn test_from_pool_errors_when_unconfigured() {
705        let ctx = TestContext::new().await;
706        let result = LlmClient::from_pool(ctx.pool()).await;
707        assert!(result.is_err());
708    }
709
710    #[tokio::test]
711    async fn test_chat_message_serialization() {
712        let msg = ChatMessage {
713            role: "user".to_string(),
714            content: "Hello".to_string(),
715        };
716        let json = serde_json::to_string(&msg).unwrap();
717        assert!(json.contains("\"role\":\"user\""));
718        assert!(json.contains("\"content\":\"Hello\""));
719    }
720
721    #[tokio::test]
722    async fn test_synthesize_task_description_when_unconfigured() {
723        let ctx = TestContext::new().await;
724
725        // Create a simple event for testing
726        use chrono::Utc;
727        let event = crate::db::models::Event {
728            id: 1,
729            task_id: 1,
730            log_type: "decision".to_string(),
731            discussion_data: "Test decision".to_string(),
732            timestamp: Utc::now(),
733        };
734
735        // Should return None when LLM not configured
736        let result =
737            synthesize_task_description(ctx.pool(), "Test Task", Some("Original spec"), &[event])
738                .await
739                .unwrap();
740
741        assert!(
742            result.is_none(),
743            "Should return None when LLM not configured"
744        );
745    }
746
747    #[tokio::test]
748    async fn test_synthesize_prompt_includes_task_info() {
749        // This test verifies the prompt structure without calling actual LLM
750        use chrono::Utc;
751
752        let events = [
753            crate::db::models::Event {
754                id: 1,
755                task_id: 1,
756                log_type: "decision".to_string(),
757                discussion_data: "Chose approach A".to_string(),
758                timestamp: Utc::now(),
759            },
760            crate::db::models::Event {
761                id: 2,
762                task_id: 1,
763                log_type: "milestone".to_string(),
764                discussion_data: "Completed phase 1".to_string(),
765                timestamp: Utc::now(),
766            },
767        ];
768
769        // Create a mock client (we can't test actual synthesis without LLM endpoint)
770        // But we can verify the prompt construction logic
771        let events_text: String = events
772            .iter()
773            .map(|e| {
774                format!(
775                    "[{}] {} - {}",
776                    e.log_type,
777                    e.timestamp.format("%Y-%m-%d %H:%M"),
778                    e.discussion_data
779                )
780            })
781            .collect::<Vec<_>>()
782            .join("\n");
783
784        // Verify event formatting
785        assert!(events_text.contains("decision"));
786        assert!(events_text.contains("Chose approach A"));
787        assert!(events_text.contains("milestone"));
788        assert!(events_text.contains("Completed phase 1"));
789    }
790
791    #[tokio::test]
792    async fn test_synthesize_with_empty_events() {
793        // Verify handling of tasks with no events
794        let events: Vec<crate::db::models::Event> = vec![];
795
796        // Should handle empty events gracefully
797        // (actual synthesis would still work, just with "No events recorded")
798        assert_eq!(events.len(), 0);
799    }
800
801    #[tokio::test]
802    async fn test_synthesize_with_no_original_spec() {
803        use chrono::Utc;
804
805        let original_spec: Option<&str> = None;
806        let events = [crate::db::models::Event {
807            id: 1,
808            task_id: 1,
809            log_type: "note".to_string(),
810            discussion_data: "Some work done".to_string(),
811            timestamp: Utc::now(),
812        }];
813
814        // Should handle missing original spec
815        // (prompt would use "(No original description)")
816        assert!(original_spec.is_none());
817        assert_eq!(events.len(), 1);
818    }
819
820    #[test]
821    fn test_language_detection() {
822        // Test CJK detection logic
823        let chinese_task = "实现用户认证";
824        let english_task = "Implement authentication";
825        let japanese_task = "認証を実装する";
826        let korean_task = "인증 구현";
827
828        // Chinese
829        let is_cjk = chinese_task.chars().any(|c| {
830            matches!(c,
831                '\u{4E00}'..='\u{9FFF}' |
832                '\u{3400}'..='\u{4DBF}' |
833                '\u{3040}'..='\u{309F}' |
834                '\u{30A0}'..='\u{30FF}' |
835                '\u{AC00}'..='\u{D7AF}'
836            )
837        });
838        assert!(is_cjk, "Should detect Chinese characters");
839
840        // English
841        let is_cjk = english_task.chars().any(|c| {
842            matches!(c,
843                '\u{4E00}'..='\u{9FFF}' |
844                '\u{3400}'..='\u{4DBF}' |
845                '\u{3040}'..='\u{309F}' |
846                '\u{30A0}'..='\u{30FF}' |
847                '\u{AC00}'..='\u{D7AF}'
848            )
849        });
850        assert!(!is_cjk, "Should not detect CJK in English text");
851
852        // Japanese
853        let is_cjk = japanese_task.chars().any(|c| {
854            matches!(c,
855                '\u{4E00}'..='\u{9FFF}' |
856                '\u{3400}'..='\u{4DBF}' |
857                '\u{3040}'..='\u{309F}' |
858                '\u{30A0}'..='\u{30FF}' |
859                '\u{AC00}'..='\u{D7AF}'
860            )
861        });
862        assert!(is_cjk, "Should detect Japanese characters");
863
864        // Korean
865        let is_cjk = korean_task.chars().any(|c| {
866            matches!(c,
867                '\u{4E00}'..='\u{9FFF}' |
868                '\u{3400}'..='\u{4DBF}' |
869                '\u{3040}'..='\u{309F}' |
870                '\u{30A0}'..='\u{30FF}' |
871                '\u{AC00}'..='\u{D7AF}'
872            )
873        });
874        assert!(is_cjk, "Should detect Korean characters");
875    }
876
877    #[tokio::test]
878    async fn test_store_and_retrieve_suggestions() {
879        let ctx = TestContext::new().await;
880
881        // Store a suggestion
882        store_suggestion(
883            ctx.pool(),
884            "task_structure",
885            "## Suggestion\nReorganize task #5 under task #3",
886        )
887        .await
888        .unwrap();
889
890        // Retrieve suggestions
891        let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
892        assert_eq!(suggestions.len(), 1);
893        assert_eq!(suggestions[0].suggestion_type, "task_structure");
894        assert!(suggestions[0].content.contains("Reorganize"));
895        assert!(!suggestions[0].dismissed);
896    }
897
898    #[tokio::test]
899    async fn test_dismiss_suggestion() {
900        let ctx = TestContext::new().await;
901
902        // Store a suggestion
903        store_suggestion(ctx.pool(), "task_structure", "Test suggestion")
904            .await
905            .unwrap();
906
907        // Get the suggestion
908        let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
909        assert_eq!(suggestions.len(), 1);
910        let suggestion_id = suggestions[0].id;
911
912        // Dismiss it
913        dismiss_suggestion(ctx.pool(), suggestion_id).await.unwrap();
914
915        // Should not appear in active suggestions anymore
916        let active = get_active_suggestions(ctx.pool()).await.unwrap();
917        assert_eq!(active.len(), 0);
918    }
919
920    #[tokio::test]
921    async fn test_multiple_suggestions() {
922        let ctx = TestContext::new().await;
923
924        // Store multiple suggestions
925        store_suggestion(ctx.pool(), "task_structure", "Suggestion 1")
926            .await
927            .unwrap();
928        store_suggestion(ctx.pool(), "task_structure", "Suggestion 2")
929            .await
930            .unwrap();
931        store_suggestion(ctx.pool(), "event_synthesis", "Suggestion 3")
932            .await
933            .unwrap();
934
935        // All should be active
936        let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
937        assert_eq!(suggestions.len(), 3);
938
939        // Newest first (ORDER BY created_at DESC)
940        assert!(suggestions[0].content.contains("Suggestion 3"));
941    }
942
943    #[tokio::test]
944    async fn test_dismiss_all_suggestions() {
945        let ctx = TestContext::new().await;
946
947        // Store 3 suggestions
948        store_suggestion(ctx.pool(), "task_structure", "Suggestion 1")
949            .await
950            .unwrap();
951        store_suggestion(ctx.pool(), "task_structure", "Suggestion 2")
952            .await
953            .unwrap();
954        store_suggestion(ctx.pool(), "error", "Error message")
955            .await
956            .unwrap();
957
958        // Verify all active
959        let active = get_active_suggestions(ctx.pool()).await.unwrap();
960        assert_eq!(active.len(), 3);
961
962        // Dismiss all
963        let count = dismiss_all_suggestions(ctx.pool()).await.unwrap();
964        assert_eq!(count, 3);
965
966        // No active suggestions left
967        let remaining = get_active_suggestions(ctx.pool()).await.unwrap();
968        assert_eq!(remaining.len(), 0);
969    }
970
971    #[tokio::test]
972    async fn test_clear_dismissed_suggestions() {
973        let ctx = TestContext::new().await;
974
975        // Store and dismiss some suggestions
976        store_suggestion(ctx.pool(), "task_structure", "Suggestion 1")
977            .await
978            .unwrap();
979        store_suggestion(ctx.pool(), "task_structure", "Suggestion 2")
980            .await
981            .unwrap();
982
983        let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
984        dismiss_suggestion(ctx.pool(), suggestions[0].id)
985            .await
986            .unwrap();
987        dismiss_suggestion(ctx.pool(), suggestions[1].id)
988            .await
989            .unwrap();
990
991        // Clear dismissed
992        let count = clear_dismissed_suggestions(ctx.pool()).await.unwrap();
993        assert_eq!(count, 2);
994
995        // Verify they're gone from database
996        let all: Vec<crate::db::models::Suggestion> =
997            sqlx::query_as("SELECT id, type, content, created_at, dismissed FROM suggestions")
998                .fetch_all(ctx.pool())
999                .await
1000                .unwrap();
1001        assert_eq!(all.len(), 0);
1002    }
1003
1004    #[tokio::test]
1005    async fn test_error_suggestion_storage() {
1006        let ctx = TestContext::new().await;
1007
1008        // Store an error suggestion
1009        let error_msg = "LLM API failed: connection timeout";
1010        store_suggestion(ctx.pool(), "error", error_msg)
1011            .await
1012            .unwrap();
1013
1014        // Verify it's stored
1015        let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
1016        assert_eq!(suggestions.len(), 1);
1017        assert_eq!(suggestions[0].suggestion_type, "error");
1018        assert!(suggestions[0].content.contains("timeout"));
1019    }
1020
1021    #[test]
1022    fn test_rate_limiting_cooldown() {
1023        // Save original state
1024        let original = LAST_ANALYSIS_TIME.load(Ordering::SeqCst);
1025
1026        // Reset global state to known value
1027        LAST_ANALYSIS_TIME.store(0, Ordering::SeqCst);
1028
1029        // First call should pass
1030        assert!(should_trigger_analysis());
1031        mark_analysis_started();
1032
1033        // Immediate second call should be blocked
1034        assert!(!should_trigger_analysis());
1035
1036        // Simulate time passing (set to past time)
1037        if let Some(now) = get_current_timestamp() {
1038            let past = now - DEFAULT_ANALYSIS_COOLDOWN_SECS - 1;
1039            LAST_ANALYSIS_TIME.store(past, Ordering::SeqCst);
1040
1041            // Now should pass again
1042            assert!(should_trigger_analysis());
1043        }
1044
1045        // Restore original state
1046        LAST_ANALYSIS_TIME.store(original, Ordering::SeqCst);
1047    }
1048
1049    #[test]
1050    fn test_rate_limiting_clock_skew() {
1051        // Save original state
1052        let original = LAST_ANALYSIS_TIME.load(Ordering::SeqCst);
1053
1054        if let Some(now) = get_current_timestamp() {
1055            // Set last analysis to future
1056            let future = now + 1000;
1057            LAST_ANALYSIS_TIME.store(future, Ordering::SeqCst);
1058
1059            // Should detect clock skew and allow analysis
1060            assert!(should_trigger_analysis());
1061
1062            // Timer should be reset to current time
1063            let reset_time = LAST_ANALYSIS_TIME.load(Ordering::SeqCst);
1064            assert!(
1065                reset_time <= now,
1066                "Timer should be reset to current or earlier"
1067            );
1068        }
1069
1070        // Restore original state
1071        LAST_ANALYSIS_TIME.store(original, Ordering::SeqCst);
1072    }
1073
1074    #[test]
1075    fn test_get_current_timestamp_returns_valid() {
1076        // Should always return Some in normal conditions
1077        let ts = get_current_timestamp();
1078        assert!(ts.is_some());
1079
1080        // Should be reasonable (after 2020)
1081        if let Some(timestamp) = ts {
1082            assert!(timestamp > 1577836800); // Jan 1, 2020
1083        }
1084    }
1085
1086    #[tokio::test]
1087    async fn test_max_active_suggestions_limit() {
1088        let ctx = TestContext::new().await;
1089
1090        // Store MAX_ACTIVE_SUGGESTIONS suggestions
1091        for i in 0..MAX_ACTIVE_SUGGESTIONS {
1092            store_suggestion(ctx.pool(), "task_structure", &format!("Suggestion {}", i))
1093                .await
1094                .unwrap();
1095        }
1096
1097        // Verify we have exactly MAX_ACTIVE_SUGGESTIONS
1098        let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM suggestions WHERE dismissed = 0")
1099            .fetch_one(ctx.pool())
1100            .await
1101            .unwrap();
1102        assert_eq!(count, MAX_ACTIVE_SUGGESTIONS);
1103
1104        // Store one more - should auto-dismiss oldest
1105        store_suggestion(ctx.pool(), "task_structure", "New suggestion")
1106            .await
1107            .unwrap();
1108
1109        // Should still have MAX_ACTIVE_SUGGESTIONS (not MAX + 1)
1110        let count_after: i64 =
1111            sqlx::query_scalar("SELECT COUNT(*) FROM suggestions WHERE dismissed = 0")
1112                .fetch_one(ctx.pool())
1113                .await
1114                .unwrap();
1115        assert_eq!(count_after, MAX_ACTIVE_SUGGESTIONS);
1116
1117        // Verify the newest suggestion is there
1118        let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
1119        assert!(suggestions[0].content.contains("New suggestion"));
1120
1121        // Verify one was auto-dismissed
1122        let dismissed_count: i64 =
1123            sqlx::query_scalar("SELECT COUNT(*) FROM suggestions WHERE dismissed = 1")
1124                .fetch_one(ctx.pool())
1125                .await
1126                .unwrap();
1127        assert_eq!(dismissed_count, 1);
1128    }
1129
1130    #[tokio::test]
1131    async fn test_error_type_suggestions() {
1132        let ctx = TestContext::new().await;
1133
1134        // Store error suggestion
1135        store_suggestion(ctx.pool(), "error", "## Analysis Error\n\nLLM API failed")
1136            .await
1137            .unwrap();
1138
1139        // Store normal suggestion
1140        store_suggestion(ctx.pool(), "task_structure", "Reorganize task #5")
1141            .await
1142            .unwrap();
1143
1144        // Get all suggestions
1145        let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
1146        assert_eq!(suggestions.len(), 2);
1147
1148        // Errors should be distinguishable by type
1149        let errors: Vec<_> = suggestions
1150            .iter()
1151            .filter(|s| s.suggestion_type == "error")
1152            .collect();
1153        assert_eq!(errors.len(), 1);
1154        assert!(errors[0].content.contains("Analysis Error"));
1155    }
1156
1157    #[tokio::test]
1158    async fn test_concurrent_cooldown_check() {
1159        use std::sync::Arc;
1160        use tokio::sync::Barrier;
1161
1162        // Save original state
1163        let original = LAST_ANALYSIS_TIME.load(Ordering::SeqCst);
1164
1165        // Reset to allow analysis
1166        LAST_ANALYSIS_TIME.store(0, Ordering::SeqCst);
1167
1168        // Create barrier for synchronization
1169        let barrier = Arc::new(Barrier::new(5));
1170
1171        // Spawn 5 concurrent checks
1172        let mut handles = vec![];
1173        for _ in 0..5 {
1174            let b = Arc::clone(&barrier);
1175            let handle = tokio::spawn(async move {
1176                // Wait for all tasks to be ready
1177                b.wait().await;
1178
1179                // All check at the same time
1180                should_trigger_analysis()
1181            });
1182            handles.push(handle);
1183        }
1184
1185        // Collect results
1186        let mut results = vec![];
1187        for handle in handles {
1188            results.push(handle.await.unwrap());
1189        }
1190
1191        // At least one should succeed (due to race, might be more in Relaxed)
1192        // But with proper ordering, should be predictable
1193        let success_count = results.iter().filter(|&&r| r).count();
1194        assert!(
1195            success_count > 0,
1196            "At least one concurrent check should succeed"
1197        );
1198
1199        // Restore original state
1200        LAST_ANALYSIS_TIME.store(original, Ordering::SeqCst);
1201    }
1202}