1use crate::cli_handlers::config_commands::{config_get, config_set};
2use crate::error::Result;
3use serde::{Deserialize, Serialize};
4use sqlx::SqlitePool;
5use std::sync::atomic::{AtomicI64, Ordering};
6use std::time::{SystemTime, UNIX_EPOCH};
7
8static LAST_ANALYSIS_TIME: AtomicI64 = AtomicI64::new(0);
11
12const DEFAULT_ANALYSIS_COOLDOWN_SECS: i64 = 300;
14
15const MAX_ACTIVE_SUGGESTIONS: i64 = 20;
18
19fn get_current_timestamp() -> Option<i64> {
24 SystemTime::now()
25 .duration_since(UNIX_EPOCH)
26 .ok()
27 .map(|d| d.as_secs() as i64)
28}
29
30fn should_trigger_analysis() -> bool {
37 let now = match get_current_timestamp() {
38 Some(ts) => ts,
39 None => {
40 tracing::warn!("System clock error, allowing analysis as fail-safe");
41 return true;
42 },
43 };
44
45 let last = LAST_ANALYSIS_TIME.load(Ordering::Acquire);
46
47 if now < last {
49 tracing::warn!(
50 "Clock skew detected: current={}, last={}, resetting analysis timer",
51 now,
52 last
53 );
54 LAST_ANALYSIS_TIME.store(now, Ordering::Release);
55 return true;
56 }
57
58 let cooldown = DEFAULT_ANALYSIS_COOLDOWN_SECS;
60
61 now - last >= cooldown
62}
63
64fn mark_analysis_started() {
69 if let Some(now) = get_current_timestamp() {
70 LAST_ANALYSIS_TIME.store(now, Ordering::Release);
71 } else {
72 tracing::warn!("System clock error, cannot update analysis timestamp");
73 }
74}
75
76#[derive(Debug, Clone)]
78pub struct LlmConfig {
79 pub endpoint: String,
80 pub api_key: String,
81 pub model: String,
82}
83
84#[derive(Debug, Clone, Serialize, Deserialize)]
86pub struct ChatMessage {
87 pub role: String,
88 pub content: String,
89}
90
91#[derive(Debug, Serialize)]
93struct ChatRequest {
94 model: String,
95 messages: Vec<ChatMessage>,
96}
97
98#[derive(Debug, Deserialize)]
100struct ChatResponse {
101 choices: Vec<ChatChoice>,
102}
103
104#[derive(Debug, Deserialize)]
105struct ChatChoice {
106 message: ChatResponseMessage,
107}
108
109#[derive(Debug, Deserialize)]
110struct ChatResponseMessage {
111 content: String,
112}
113
114impl LlmConfig {
115 pub async fn resolve(pool: &SqlitePool) -> Result<Option<Self>> {
118 let endpoint = Self::resolve_field(pool, "IE_LLM_ENDPOINT", "llm.endpoint").await?;
119 let api_key = Self::resolve_field(pool, "IE_LLM_API_KEY", "llm.api_key").await?;
120 let model = Self::resolve_field(pool, "IE_LLM_MODEL", "llm.model").await?;
121
122 match (endpoint, api_key, model) {
123 (Some(endpoint), Some(api_key), Some(model)) => Ok(Some(Self {
124 endpoint,
125 api_key,
126 model,
127 })),
128 _ => Ok(None),
129 }
130 }
131
132 async fn resolve_field(
134 pool: &SqlitePool,
135 env_var: &str,
136 config_key: &str,
137 ) -> Result<Option<String>> {
138 if let Ok(val) = std::env::var(env_var) {
140 if !val.is_empty() {
141 return Ok(Some(val));
142 }
143 }
144
145 config_get(pool, config_key).await
147 }
148
149 pub async fn save(&self, pool: &SqlitePool) -> Result<()> {
151 config_set(pool, "llm.endpoint", &self.endpoint).await?;
152 config_set(pool, "llm.api_key", &self.api_key).await?;
153 config_set(pool, "llm.model", &self.model).await?;
154 Ok(())
155 }
156}
157
158pub struct LlmClient {
160 config: LlmConfig,
161 client: reqwest::Client,
162}
163
164impl LlmClient {
165 pub async fn from_pool(pool: &SqlitePool) -> Result<Self> {
168 let config = LlmConfig::resolve(pool).await?.ok_or_else(|| {
169 crate::error::IntentError::InvalidInput(
170 "LLM not configured. Set llm.endpoint, llm.api_key, and llm.model via 'ie config set' or environment variables (IE_LLM_ENDPOINT, IE_LLM_API_KEY, IE_LLM_MODEL).".to_string(),
171 )
172 })?;
173
174 let client = reqwest::Client::builder()
175 .timeout(std::time::Duration::from_secs(30))
176 .build()
177 .map_err(|e| crate::error::IntentError::OtherError(e.into()))?;
178
179 Ok(Self { config, client })
180 }
181
182 pub async fn is_configured(pool: &SqlitePool) -> bool {
184 matches!(LlmConfig::resolve(pool).await, Ok(Some(_)))
185 }
186
187 pub async fn chat(&self, prompt: &str) -> Result<String> {
189 let messages = vec![ChatMessage {
190 role: "user".to_string(),
191 content: prompt.to_string(),
192 }];
193 self.chat_with_messages(messages).await
194 }
195
196 pub async fn chat_with_messages(&self, messages: Vec<ChatMessage>) -> Result<String> {
198 let request = ChatRequest {
199 model: self.config.model.clone(),
200 messages,
201 };
202
203 let response = self
204 .client
205 .post(&self.config.endpoint)
206 .header("Authorization", format!("Bearer {}", self.config.api_key))
207 .header("Content-Type", "application/json")
208 .json(&request)
209 .send()
210 .await
211 .map_err(|e| crate::error::IntentError::OtherError(e.into()))?;
212
213 if !response.status().is_success() {
214 let status = response.status();
215 let body = response
216 .text()
217 .await
218 .unwrap_or_else(|_| "(no body)".to_string());
219 return Err(crate::error::IntentError::OtherError(anyhow::anyhow!(
220 "LLM API error (HTTP {}): {}",
221 status,
222 body
223 )));
224 }
225
226 let chat_response: ChatResponse = response
227 .json()
228 .await
229 .map_err(|e| crate::error::IntentError::OtherError(e.into()))?;
230
231 chat_response
232 .choices
233 .into_iter()
234 .next()
235 .map(|c| c.message.content)
236 .ok_or_else(|| {
237 crate::error::IntentError::OtherError(anyhow::anyhow!("LLM returned empty choices"))
238 })
239 }
240
241 pub async fn synthesize_task_description(
246 &self,
247 task_name: &str,
248 original_spec: Option<&str>,
249 events: &[crate::db::models::Event],
250 ) -> Result<String> {
251 let events_text = if events.is_empty() {
253 "No events recorded.".to_string()
254 } else {
255 events
256 .iter()
257 .map(|e| {
258 format!(
259 "[{}] {} - {}",
260 e.log_type,
261 e.timestamp.format("%Y-%m-%d %H:%M"),
262 e.discussion_data
263 )
264 })
265 .collect::<Vec<_>>()
266 .join("\n")
267 };
268
269 let original_spec_text = original_spec.unwrap_or("(No original description)");
270
271 let is_cjk = task_name.chars().any(|c| {
273 matches!(c,
274 '\u{4E00}'..='\u{9FFF}' | '\u{3400}'..='\u{4DBF}' | '\u{3040}'..='\u{309F}' | '\u{30A0}'..='\u{30FF}' | '\u{AC00}'..='\u{D7AF}' )
280 });
281
282 let language_instruction = if is_cjk {
283 "Respond in Chinese (中文)."
284 } else {
285 "Respond in English."
286 };
287
288 let prompt = format!(
290 r#"You are summarizing a completed task based on its execution history.
291
292Task: {}
293Original description: {}
294
295Events (chronological):
296{}
297
298Synthesize a clear, structured description capturing:
2991. Goal (what was the objective?)
3002. Approach (how was it accomplished?)
3013. Key Decisions (what choices were made and why?)
3024. Outcome (what was delivered?)
303
304Use markdown format with ## headers. Be concise but preserve critical context.
305Output ONLY the markdown summary, no preamble or explanation.
306
307IMPORTANT: {}"#,
308 task_name, original_spec_text, events_text, language_instruction
309 );
310
311 self.chat(&prompt).await
312 }
313}
314
315pub async fn synthesize_task_description(
319 pool: &SqlitePool,
320 task_name: &str,
321 original_spec: Option<&str>,
322 events: &[crate::db::models::Event],
323) -> Result<Option<String>> {
324 if !LlmClient::is_configured(pool).await {
326 return Ok(None);
327 }
328
329 let client = LlmClient::from_pool(pool).await?;
331 let synthesis = client
332 .synthesize_task_description(task_name, original_spec, events)
333 .await?;
334
335 Ok(Some(synthesis))
336}
337
338pub fn analyze_task_structure_background(pool: SqlitePool) {
346 if !should_trigger_analysis() {
348 tracing::debug!("Analysis cooldown active, skipping background analysis");
349 return;
350 }
351
352 mark_analysis_started();
354
355 tokio::spawn(async move {
356 if let Err(e) = analyze_and_store_suggestions(&pool).await {
357 let error_msg = format!(
359 "## Analysis Error\n\n\
360 Background task structure analysis failed: {}\n\n\
361 This may indicate:\n\
362 - LLM API endpoint is unreachable\n\
363 - API quota exceeded\n\
364 - Network connectivity issues\n\n\
365 Check logs for details: `ie log`",
366 e
367 );
368
369 match store_suggestion(&pool, "error", &error_msg).await {
371 Ok(_) => {
372 tracing::warn!("Background task analysis failed: {}", e);
373 },
374 Err(store_err) => {
375 tracing::error!("Failed to store error suggestion: {}", store_err);
378 eprintln!(
379 "\n⚠️ Background analysis failed AND couldn't store error.\n\
380 Analysis error: {}\n\
381 Storage error: {}\n\
382 This may indicate database issues.",
383 e, store_err
384 );
385 },
386 }
387 }
388 });
389}
390
391async fn analyze_and_store_suggestions(pool: &SqlitePool) -> Result<()> {
393 if !LlmClient::is_configured(pool).await {
395 return Ok(()); }
397
398 let tasks: Vec<crate::db::models::Task> = sqlx::query_as(
400 "SELECT id, parent_id, name, spec, status, complexity, priority, \
401 first_todo_at, first_doing_at, first_done_at, active_form, owner, metadata \
402 FROM tasks ORDER BY id",
403 )
404 .fetch_all(pool)
405 .await?;
406
407 if tasks.len() < 5 {
409 return Ok(());
410 }
411
412 let analysis = perform_structure_analysis(pool, &tasks).await?;
413
414 if !analysis.contains("no reorganization needed") && !analysis.contains("looks good") {
416 store_suggestion(pool, "task_structure", &analysis).await?;
417 }
418
419 Ok(())
420}
421
422async fn perform_structure_analysis(
424 pool: &SqlitePool,
425 tasks: &[crate::db::models::Task],
426) -> Result<String> {
427 let task_summary = tasks
429 .iter()
430 .map(|t| {
431 format!(
432 "#{} {} [{}] (parent: {})",
433 t.id,
434 t.name,
435 t.status,
436 t.parent_id.map_or("none".to_string(), |p| p.to_string())
437 )
438 })
439 .collect::<Vec<_>>()
440 .join("\n");
441
442 let is_cjk = tasks.iter().any(|t| {
444 t.name.chars().any(|c| {
445 matches!(c,
446 '\u{4E00}'..='\u{9FFF}' |
447 '\u{3400}'..='\u{4DBF}' |
448 '\u{3040}'..='\u{309F}' |
449 '\u{30A0}'..='\u{30FF}' |
450 '\u{AC00}'..='\u{D7AF}'
451 )
452 })
453 });
454
455 let language_instruction = if is_cjk {
456 "Respond in Chinese (中文)."
457 } else {
458 "Respond in English."
459 };
460
461 let prompt = format!(
462 r#"You are analyzing a task hierarchy for structural issues.
463
464Current task tree:
465{}
466
467Identify tasks that should be reorganized:
4681. Semantically related tasks that should be grouped under a common parent
4692. Root tasks that could be subtasks of existing tasks
4703. Tasks with similar names or themes that should share a parent
471
472For each suggestion:
473- Explain WHY the reorganization makes sense
474- Provide the EXACT command to execute
475- Only suggest if there's clear semantic relationship
476
477Output format:
478## Suggestion 1: [Brief description]
479**Reason**: [Why this makes sense]
480**Command**: `ie task update <id> --parent <parent_id>`
481
482If no reorganization needed, respond with: "Task structure looks good, no reorganization needed."
483
484IMPORTANT: {}"#,
485 task_summary, language_instruction
486 );
487
488 let client = LlmClient::from_pool(pool).await?;
489 let analysis = client.chat(&prompt).await?;
490
491 Ok(analysis)
492}
493
494async fn store_suggestion(pool: &SqlitePool, suggestion_type: &str, content: &str) -> Result<()> {
499 let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM suggestions WHERE dismissed = 0")
501 .fetch_one(pool)
502 .await?;
503
504 if count >= MAX_ACTIVE_SUGGESTIONS {
506 let dismissed = sqlx::query(
507 "UPDATE suggestions SET dismissed = 1
508 WHERE id IN (
509 SELECT id FROM suggestions
510 WHERE dismissed = 0
511 ORDER BY created_at ASC
512 LIMIT 1
513 )",
514 )
515 .execute(pool)
516 .await?;
517
518 if dismissed.rows_affected() > 0 {
519 tracing::info!(
520 "Auto-dismissed oldest suggestion (limit: {})",
521 MAX_ACTIVE_SUGGESTIONS
522 );
523 }
524 }
525
526 sqlx::query("INSERT INTO suggestions (type, content) VALUES (?, ?)")
528 .bind(suggestion_type)
529 .bind(content)
530 .execute(pool)
531 .await?;
532
533 tracing::info!("Stored {} suggestion in database", suggestion_type);
534 Ok(())
535}
536
537pub async fn get_active_suggestions(
539 pool: &SqlitePool,
540) -> Result<Vec<crate::db::models::Suggestion>> {
541 let suggestions = sqlx::query_as::<_, crate::db::models::Suggestion>(
542 "SELECT id, type, content, created_at, dismissed \
543 FROM suggestions \
544 WHERE dismissed = 0 \
545 ORDER BY created_at DESC",
546 )
547 .fetch_all(pool)
548 .await?;
549
550 Ok(suggestions)
551}
552
553pub async fn dismiss_suggestion(pool: &SqlitePool, id: i64) -> Result<()> {
555 sqlx::query("UPDATE suggestions SET dismissed = 1 WHERE id = ?")
556 .bind(id)
557 .execute(pool)
558 .await?;
559
560 Ok(())
561}
562
563pub async fn dismiss_all_suggestions(pool: &SqlitePool) -> Result<usize> {
565 let result = sqlx::query("UPDATE suggestions SET dismissed = 1 WHERE dismissed = 0")
566 .execute(pool)
567 .await?;
568
569 Ok(result.rows_affected() as usize)
570}
571
572pub async fn clear_dismissed_suggestions(pool: &SqlitePool) -> Result<usize> {
574 let result = sqlx::query("DELETE FROM suggestions WHERE dismissed = 1")
575 .execute(pool)
576 .await?;
577
578 Ok(result.rows_affected() as usize)
579}
580
581pub async fn display_suggestions(pool: &SqlitePool) -> Result<()> {
583 let suggestions = get_active_suggestions(pool).await?;
584
585 if suggestions.is_empty() {
586 return Ok(());
587 }
588
589 let (errors, others): (Vec<_>, Vec<_>) = suggestions
591 .iter()
592 .partition(|s| s.suggestion_type == "error");
593
594 if !errors.is_empty() {
596 eprintln!("\n⚠️ Background Analysis Errors:");
597 for error in &errors {
598 eprintln!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
599 eprintln!("{}", error.content);
600 eprintln!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
601 }
602 eprintln!("\nTo dismiss: ie suggestions dismiss {}", errors[0].id);
603 eprintln!("To dismiss all: ie suggestions dismiss --all");
604 }
605
606 if !others.is_empty() {
608 eprintln!("\n💡 Suggestions:");
609 for suggestion in &others {
610 eprintln!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
611 eprintln!("{}", suggestion.content);
612 eprintln!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
613 }
614 eprintln!("\nTo dismiss: ie suggestions dismiss {}", others[0].id);
615 eprintln!("To list all: ie suggestions list");
616 }
617
618 Ok(())
619}
620
621#[cfg(test)]
622mod tests {
623 use super::*;
624 use crate::test_utils::test_helpers::TestContext;
625
626 #[tokio::test]
627 async fn test_llm_config_resolve_none_when_unconfigured() {
628 let ctx = TestContext::new().await;
629 let config = LlmConfig::resolve(ctx.pool()).await.unwrap();
630 assert!(config.is_none());
631 }
632
633 #[tokio::test]
634 async fn test_llm_config_resolve_partial_returns_none() {
635 let ctx = TestContext::new().await;
636 config_set(ctx.pool(), "llm.endpoint", "http://localhost:8080")
637 .await
638 .unwrap();
639 config_set(ctx.pool(), "llm.model", "gpt-4").await.unwrap();
640 let config = LlmConfig::resolve(ctx.pool()).await.unwrap();
642 assert!(config.is_none());
643 }
644
645 #[tokio::test]
646 async fn test_llm_config_resolve_full() {
647 let ctx = TestContext::new().await;
648 config_set(
649 ctx.pool(),
650 "llm.endpoint",
651 "http://localhost:8080/v1/chat/completions",
652 )
653 .await
654 .unwrap();
655 config_set(ctx.pool(), "llm.api_key", "sk-test123")
656 .await
657 .unwrap();
658 config_set(ctx.pool(), "llm.model", "gpt-4").await.unwrap();
659
660 let config = LlmConfig::resolve(ctx.pool()).await.unwrap();
661 assert!(config.is_some());
662 let config = config.unwrap();
663 assert_eq!(config.endpoint, "http://localhost:8080/v1/chat/completions");
664 assert_eq!(config.api_key, "sk-test123");
665 assert_eq!(config.model, "gpt-4");
666 }
667
668 #[tokio::test]
669 async fn test_llm_config_save_and_resolve() {
670 let ctx = TestContext::new().await;
671 let config = LlmConfig {
672 endpoint: "http://example.com/v1/chat/completions".to_string(),
673 api_key: "sk-saved".to_string(),
674 model: "claude-3".to_string(),
675 };
676 config.save(ctx.pool()).await.unwrap();
677
678 let resolved = LlmConfig::resolve(ctx.pool()).await.unwrap().unwrap();
679 assert_eq!(resolved.endpoint, "http://example.com/v1/chat/completions");
680 assert_eq!(resolved.api_key, "sk-saved");
681 assert_eq!(resolved.model, "claude-3");
682 }
683
684 #[tokio::test]
685 async fn test_is_configured_false() {
686 let ctx = TestContext::new().await;
687 assert!(!LlmClient::is_configured(ctx.pool()).await);
688 }
689
690 #[tokio::test]
691 async fn test_is_configured_true() {
692 let ctx = TestContext::new().await;
693 config_set(ctx.pool(), "llm.endpoint", "http://localhost:8080")
694 .await
695 .unwrap();
696 config_set(ctx.pool(), "llm.api_key", "sk-test")
697 .await
698 .unwrap();
699 config_set(ctx.pool(), "llm.model", "gpt-4").await.unwrap();
700 assert!(LlmClient::is_configured(ctx.pool()).await);
701 }
702
703 #[tokio::test]
704 async fn test_from_pool_errors_when_unconfigured() {
705 let ctx = TestContext::new().await;
706 let result = LlmClient::from_pool(ctx.pool()).await;
707 assert!(result.is_err());
708 }
709
710 #[tokio::test]
711 async fn test_chat_message_serialization() {
712 let msg = ChatMessage {
713 role: "user".to_string(),
714 content: "Hello".to_string(),
715 };
716 let json = serde_json::to_string(&msg).unwrap();
717 assert!(json.contains("\"role\":\"user\""));
718 assert!(json.contains("\"content\":\"Hello\""));
719 }
720
721 #[tokio::test]
722 async fn test_synthesize_task_description_when_unconfigured() {
723 let ctx = TestContext::new().await;
724
725 use chrono::Utc;
727 let event = crate::db::models::Event {
728 id: 1,
729 task_id: 1,
730 log_type: "decision".to_string(),
731 discussion_data: "Test decision".to_string(),
732 timestamp: Utc::now(),
733 };
734
735 let result =
737 synthesize_task_description(ctx.pool(), "Test Task", Some("Original spec"), &[event])
738 .await
739 .unwrap();
740
741 assert!(
742 result.is_none(),
743 "Should return None when LLM not configured"
744 );
745 }
746
747 #[tokio::test]
748 async fn test_synthesize_prompt_includes_task_info() {
749 use chrono::Utc;
751
752 let events = [
753 crate::db::models::Event {
754 id: 1,
755 task_id: 1,
756 log_type: "decision".to_string(),
757 discussion_data: "Chose approach A".to_string(),
758 timestamp: Utc::now(),
759 },
760 crate::db::models::Event {
761 id: 2,
762 task_id: 1,
763 log_type: "milestone".to_string(),
764 discussion_data: "Completed phase 1".to_string(),
765 timestamp: Utc::now(),
766 },
767 ];
768
769 let events_text: String = events
772 .iter()
773 .map(|e| {
774 format!(
775 "[{}] {} - {}",
776 e.log_type,
777 e.timestamp.format("%Y-%m-%d %H:%M"),
778 e.discussion_data
779 )
780 })
781 .collect::<Vec<_>>()
782 .join("\n");
783
784 assert!(events_text.contains("decision"));
786 assert!(events_text.contains("Chose approach A"));
787 assert!(events_text.contains("milestone"));
788 assert!(events_text.contains("Completed phase 1"));
789 }
790
791 #[tokio::test]
792 async fn test_synthesize_with_empty_events() {
793 let events: Vec<crate::db::models::Event> = vec![];
795
796 assert_eq!(events.len(), 0);
799 }
800
801 #[tokio::test]
802 async fn test_synthesize_with_no_original_spec() {
803 use chrono::Utc;
804
805 let original_spec: Option<&str> = None;
806 let events = [crate::db::models::Event {
807 id: 1,
808 task_id: 1,
809 log_type: "note".to_string(),
810 discussion_data: "Some work done".to_string(),
811 timestamp: Utc::now(),
812 }];
813
814 assert!(original_spec.is_none());
817 assert_eq!(events.len(), 1);
818 }
819
820 #[test]
821 fn test_language_detection() {
822 let chinese_task = "实现用户认证";
824 let english_task = "Implement authentication";
825 let japanese_task = "認証を実装する";
826 let korean_task = "인증 구현";
827
828 let is_cjk = chinese_task.chars().any(|c| {
830 matches!(c,
831 '\u{4E00}'..='\u{9FFF}' |
832 '\u{3400}'..='\u{4DBF}' |
833 '\u{3040}'..='\u{309F}' |
834 '\u{30A0}'..='\u{30FF}' |
835 '\u{AC00}'..='\u{D7AF}'
836 )
837 });
838 assert!(is_cjk, "Should detect Chinese characters");
839
840 let is_cjk = english_task.chars().any(|c| {
842 matches!(c,
843 '\u{4E00}'..='\u{9FFF}' |
844 '\u{3400}'..='\u{4DBF}' |
845 '\u{3040}'..='\u{309F}' |
846 '\u{30A0}'..='\u{30FF}' |
847 '\u{AC00}'..='\u{D7AF}'
848 )
849 });
850 assert!(!is_cjk, "Should not detect CJK in English text");
851
852 let is_cjk = japanese_task.chars().any(|c| {
854 matches!(c,
855 '\u{4E00}'..='\u{9FFF}' |
856 '\u{3400}'..='\u{4DBF}' |
857 '\u{3040}'..='\u{309F}' |
858 '\u{30A0}'..='\u{30FF}' |
859 '\u{AC00}'..='\u{D7AF}'
860 )
861 });
862 assert!(is_cjk, "Should detect Japanese characters");
863
864 let is_cjk = korean_task.chars().any(|c| {
866 matches!(c,
867 '\u{4E00}'..='\u{9FFF}' |
868 '\u{3400}'..='\u{4DBF}' |
869 '\u{3040}'..='\u{309F}' |
870 '\u{30A0}'..='\u{30FF}' |
871 '\u{AC00}'..='\u{D7AF}'
872 )
873 });
874 assert!(is_cjk, "Should detect Korean characters");
875 }
876
877 #[tokio::test]
878 async fn test_store_and_retrieve_suggestions() {
879 let ctx = TestContext::new().await;
880
881 store_suggestion(
883 ctx.pool(),
884 "task_structure",
885 "## Suggestion\nReorganize task #5 under task #3",
886 )
887 .await
888 .unwrap();
889
890 let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
892 assert_eq!(suggestions.len(), 1);
893 assert_eq!(suggestions[0].suggestion_type, "task_structure");
894 assert!(suggestions[0].content.contains("Reorganize"));
895 assert!(!suggestions[0].dismissed);
896 }
897
898 #[tokio::test]
899 async fn test_dismiss_suggestion() {
900 let ctx = TestContext::new().await;
901
902 store_suggestion(ctx.pool(), "task_structure", "Test suggestion")
904 .await
905 .unwrap();
906
907 let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
909 assert_eq!(suggestions.len(), 1);
910 let suggestion_id = suggestions[0].id;
911
912 dismiss_suggestion(ctx.pool(), suggestion_id).await.unwrap();
914
915 let active = get_active_suggestions(ctx.pool()).await.unwrap();
917 assert_eq!(active.len(), 0);
918 }
919
920 #[tokio::test]
921 async fn test_multiple_suggestions() {
922 let ctx = TestContext::new().await;
923
924 store_suggestion(ctx.pool(), "task_structure", "Suggestion 1")
926 .await
927 .unwrap();
928 store_suggestion(ctx.pool(), "task_structure", "Suggestion 2")
929 .await
930 .unwrap();
931 store_suggestion(ctx.pool(), "event_synthesis", "Suggestion 3")
932 .await
933 .unwrap();
934
935 let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
937 assert_eq!(suggestions.len(), 3);
938
939 assert!(suggestions[0].content.contains("Suggestion 3"));
941 }
942
943 #[tokio::test]
944 async fn test_dismiss_all_suggestions() {
945 let ctx = TestContext::new().await;
946
947 store_suggestion(ctx.pool(), "task_structure", "Suggestion 1")
949 .await
950 .unwrap();
951 store_suggestion(ctx.pool(), "task_structure", "Suggestion 2")
952 .await
953 .unwrap();
954 store_suggestion(ctx.pool(), "error", "Error message")
955 .await
956 .unwrap();
957
958 let active = get_active_suggestions(ctx.pool()).await.unwrap();
960 assert_eq!(active.len(), 3);
961
962 let count = dismiss_all_suggestions(ctx.pool()).await.unwrap();
964 assert_eq!(count, 3);
965
966 let remaining = get_active_suggestions(ctx.pool()).await.unwrap();
968 assert_eq!(remaining.len(), 0);
969 }
970
971 #[tokio::test]
972 async fn test_clear_dismissed_suggestions() {
973 let ctx = TestContext::new().await;
974
975 store_suggestion(ctx.pool(), "task_structure", "Suggestion 1")
977 .await
978 .unwrap();
979 store_suggestion(ctx.pool(), "task_structure", "Suggestion 2")
980 .await
981 .unwrap();
982
983 let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
984 dismiss_suggestion(ctx.pool(), suggestions[0].id)
985 .await
986 .unwrap();
987 dismiss_suggestion(ctx.pool(), suggestions[1].id)
988 .await
989 .unwrap();
990
991 let count = clear_dismissed_suggestions(ctx.pool()).await.unwrap();
993 assert_eq!(count, 2);
994
995 let all: Vec<crate::db::models::Suggestion> =
997 sqlx::query_as("SELECT id, type, content, created_at, dismissed FROM suggestions")
998 .fetch_all(ctx.pool())
999 .await
1000 .unwrap();
1001 assert_eq!(all.len(), 0);
1002 }
1003
1004 #[tokio::test]
1005 async fn test_error_suggestion_storage() {
1006 let ctx = TestContext::new().await;
1007
1008 let error_msg = "LLM API failed: connection timeout";
1010 store_suggestion(ctx.pool(), "error", error_msg)
1011 .await
1012 .unwrap();
1013
1014 let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
1016 assert_eq!(suggestions.len(), 1);
1017 assert_eq!(suggestions[0].suggestion_type, "error");
1018 assert!(suggestions[0].content.contains("timeout"));
1019 }
1020
1021 #[test]
1022 fn test_rate_limiting_cooldown() {
1023 let original = LAST_ANALYSIS_TIME.load(Ordering::SeqCst);
1025
1026 LAST_ANALYSIS_TIME.store(0, Ordering::SeqCst);
1028
1029 assert!(should_trigger_analysis());
1031 mark_analysis_started();
1032
1033 assert!(!should_trigger_analysis());
1035
1036 if let Some(now) = get_current_timestamp() {
1038 let past = now - DEFAULT_ANALYSIS_COOLDOWN_SECS - 1;
1039 LAST_ANALYSIS_TIME.store(past, Ordering::SeqCst);
1040
1041 assert!(should_trigger_analysis());
1043 }
1044
1045 LAST_ANALYSIS_TIME.store(original, Ordering::SeqCst);
1047 }
1048
1049 #[test]
1050 fn test_rate_limiting_clock_skew() {
1051 let original = LAST_ANALYSIS_TIME.load(Ordering::SeqCst);
1053
1054 if let Some(now) = get_current_timestamp() {
1055 let future = now + 1000;
1057 LAST_ANALYSIS_TIME.store(future, Ordering::SeqCst);
1058
1059 assert!(should_trigger_analysis());
1061
1062 let reset_time = LAST_ANALYSIS_TIME.load(Ordering::SeqCst);
1064 assert!(
1065 reset_time <= now,
1066 "Timer should be reset to current or earlier"
1067 );
1068 }
1069
1070 LAST_ANALYSIS_TIME.store(original, Ordering::SeqCst);
1072 }
1073
1074 #[test]
1075 fn test_get_current_timestamp_returns_valid() {
1076 let ts = get_current_timestamp();
1078 assert!(ts.is_some());
1079
1080 if let Some(timestamp) = ts {
1082 assert!(timestamp > 1577836800); }
1084 }
1085
1086 #[tokio::test]
1087 async fn test_max_active_suggestions_limit() {
1088 let ctx = TestContext::new().await;
1089
1090 for i in 0..MAX_ACTIVE_SUGGESTIONS {
1092 store_suggestion(ctx.pool(), "task_structure", &format!("Suggestion {}", i))
1093 .await
1094 .unwrap();
1095 }
1096
1097 let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM suggestions WHERE dismissed = 0")
1099 .fetch_one(ctx.pool())
1100 .await
1101 .unwrap();
1102 assert_eq!(count, MAX_ACTIVE_SUGGESTIONS);
1103
1104 store_suggestion(ctx.pool(), "task_structure", "New suggestion")
1106 .await
1107 .unwrap();
1108
1109 let count_after: i64 =
1111 sqlx::query_scalar("SELECT COUNT(*) FROM suggestions WHERE dismissed = 0")
1112 .fetch_one(ctx.pool())
1113 .await
1114 .unwrap();
1115 assert_eq!(count_after, MAX_ACTIVE_SUGGESTIONS);
1116
1117 let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
1119 assert!(suggestions[0].content.contains("New suggestion"));
1120
1121 let dismissed_count: i64 =
1123 sqlx::query_scalar("SELECT COUNT(*) FROM suggestions WHERE dismissed = 1")
1124 .fetch_one(ctx.pool())
1125 .await
1126 .unwrap();
1127 assert_eq!(dismissed_count, 1);
1128 }
1129
1130 #[tokio::test]
1131 async fn test_error_type_suggestions() {
1132 let ctx = TestContext::new().await;
1133
1134 store_suggestion(ctx.pool(), "error", "## Analysis Error\n\nLLM API failed")
1136 .await
1137 .unwrap();
1138
1139 store_suggestion(ctx.pool(), "task_structure", "Reorganize task #5")
1141 .await
1142 .unwrap();
1143
1144 let suggestions = get_active_suggestions(ctx.pool()).await.unwrap();
1146 assert_eq!(suggestions.len(), 2);
1147
1148 let errors: Vec<_> = suggestions
1150 .iter()
1151 .filter(|s| s.suggestion_type == "error")
1152 .collect();
1153 assert_eq!(errors.len(), 1);
1154 assert!(errors[0].content.contains("Analysis Error"));
1155 }
1156
1157 #[tokio::test]
1158 async fn test_concurrent_cooldown_check() {
1159 use std::sync::Arc;
1160 use tokio::sync::Barrier;
1161
1162 let original = LAST_ANALYSIS_TIME.load(Ordering::SeqCst);
1164
1165 LAST_ANALYSIS_TIME.store(0, Ordering::SeqCst);
1167
1168 let barrier = Arc::new(Barrier::new(5));
1170
1171 let mut handles = vec![];
1173 for _ in 0..5 {
1174 let b = Arc::clone(&barrier);
1175 let handle = tokio::spawn(async move {
1176 b.wait().await;
1178
1179 should_trigger_analysis()
1181 });
1182 handles.push(handle);
1183 }
1184
1185 let mut results = vec![];
1187 for handle in handles {
1188 results.push(handle.await.unwrap());
1189 }
1190
1191 let success_count = results.iter().filter(|&&r| r).count();
1194 assert!(
1195 success_count > 0,
1196 "At least one concurrent check should succeed"
1197 );
1198
1199 LAST_ANALYSIS_TIME.store(original, Ordering::SeqCst);
1201 }
1202}