scud/storage/
mod.rs

1use anyhow::{Context, Result};
2use fs2::FileExt;
3use std::collections::HashMap;
4use std::fs::{self, File, OpenOptions};
5use std::path::{Path, PathBuf};
6use std::sync::RwLock;
7use std::thread;
8use std::time::Duration;
9
10use crate::config::Config;
11use crate::formats::{parse_scg, serialize_scg};
12use crate::models::Phase;
13
14pub struct Storage {
15    project_root: PathBuf,
16    /// Cache for active group to avoid repeated workflow state loads
17    /// Option<Option<String>> represents: None = not cached, Some(None) = no active group, Some(Some(tag)) = cached tag
18    /// Uses RwLock for thread safety (useful for tests and potential daemon mode)
19    active_group_cache: RwLock<Option<Option<String>>>,
20}
21
22impl Storage {
23    pub fn new(project_root: Option<PathBuf>) -> Self {
24        let root = project_root.unwrap_or_else(|| std::env::current_dir().unwrap());
25        Storage {
26            project_root: root,
27            active_group_cache: RwLock::new(None),
28        }
29    }
30
31    /// Get the project root directory
32    pub fn project_root(&self) -> &Path {
33        &self.project_root
34    }
35
36    /// Acquire an exclusive file lock with retry logic
37    fn acquire_lock_with_retry(&self, file: &File, max_retries: u32) -> Result<()> {
38        let mut retries = 0;
39        let mut delay_ms = 10;
40
41        loop {
42            match file.try_lock_exclusive() {
43                Ok(_) => return Ok(()),
44                Err(_) if retries < max_retries => {
45                    retries += 1;
46                    thread::sleep(Duration::from_millis(delay_ms));
47                    delay_ms = (delay_ms * 2).min(1000); // Exponential backoff, max 1s
48                }
49                Err(e) => {
50                    anyhow::bail!(
51                        "Failed to acquire file lock after {} retries: {}",
52                        max_retries,
53                        e
54                    )
55                }
56            }
57        }
58    }
59
60    /// Perform a locked write operation on a file
61    fn write_with_lock<F>(&self, path: &Path, writer: F) -> Result<()>
62    where
63        F: FnOnce() -> Result<String>,
64    {
65        use std::io::Write;
66
67        let dir = path.parent().unwrap();
68        if !dir.exists() {
69            fs::create_dir_all(dir)?;
70        }
71
72        // Open file for writing
73        let mut file = OpenOptions::new()
74            .write(true)
75            .create(true)
76            .truncate(true)
77            .open(path)
78            .with_context(|| format!("Failed to open file for writing: {}", path.display()))?;
79
80        // Acquire lock with retry
81        self.acquire_lock_with_retry(&file, 10)?;
82
83        // Generate content and write through the locked handle
84        let content = writer()?;
85        file.write_all(content.as_bytes())
86            .with_context(|| format!("Failed to write to {}", path.display()))?;
87        file.flush()
88            .with_context(|| format!("Failed to flush {}", path.display()))?;
89
90        // Lock is automatically released when file is dropped
91        Ok(())
92    }
93
94    /// Perform a locked read operation on a file
95    fn read_with_lock(&self, path: &Path) -> Result<String> {
96        use std::io::Read;
97
98        if !path.exists() {
99            anyhow::bail!("File not found: {}", path.display());
100        }
101
102        // Open file for reading
103        let mut file = OpenOptions::new()
104            .read(true)
105            .open(path)
106            .with_context(|| format!("Failed to open file for reading: {}", path.display()))?;
107
108        // Acquire shared lock (allows multiple readers)
109        file.lock_shared()
110            .with_context(|| format!("Failed to acquire read lock on {}", path.display()))?;
111
112        // Read content through the locked handle
113        let mut content = String::new();
114        file.read_to_string(&mut content)
115            .with_context(|| format!("Failed to read from {}", path.display()))?;
116
117        // Lock is automatically released when file is dropped
118        Ok(content)
119    }
120
121    pub fn scud_dir(&self) -> PathBuf {
122        self.project_root.join(".scud")
123    }
124
125    pub fn tasks_file(&self) -> PathBuf {
126        self.scud_dir().join("tasks").join("tasks.scg")
127    }
128
129    fn active_tag_file(&self) -> PathBuf {
130        self.scud_dir().join("active-tag")
131    }
132
133    pub fn config_file(&self) -> PathBuf {
134        self.scud_dir().join("config.toml")
135    }
136
137    pub fn docs_dir(&self) -> PathBuf {
138        self.scud_dir().join("docs")
139    }
140
141    pub fn is_initialized(&self) -> bool {
142        self.scud_dir().exists() && self.tasks_file().exists()
143    }
144
145    pub fn initialize(&self) -> Result<()> {
146        let config = Config::default();
147        self.initialize_with_config(&config)
148    }
149
150    pub fn initialize_with_config(&self, config: &Config) -> Result<()> {
151        // Create .scud directory structure
152        let scud_dir = self.scud_dir();
153        fs::create_dir_all(scud_dir.join("tasks"))
154            .context("Failed to create .scud/tasks directory")?;
155
156        // Initialize config.toml
157        let config_file = self.config_file();
158        if !config_file.exists() {
159            config.save(&config_file)?;
160        }
161
162        // Initialize tasks.scg with empty content
163        let tasks_file = self.tasks_file();
164        if !tasks_file.exists() {
165            let empty_tasks: HashMap<String, Phase> = HashMap::new();
166            self.save_tasks(&empty_tasks)?;
167        }
168
169        // Create docs directories
170        let docs = self.docs_dir();
171        fs::create_dir_all(docs.join("prd"))?;
172        fs::create_dir_all(docs.join("phases"))?;
173        fs::create_dir_all(docs.join("architecture"))?;
174        fs::create_dir_all(docs.join("retrospectives"))?;
175
176        // Create CLAUDE.md with agent instructions
177        self.create_agent_instructions()?;
178
179        Ok(())
180    }
181
182    pub fn load_config(&self) -> Result<Config> {
183        let config_file = self.config_file();
184        if !config_file.exists() {
185            return Ok(Config::default());
186        }
187        Config::load(&config_file)
188    }
189
190    pub fn load_tasks(&self) -> Result<HashMap<String, Phase>> {
191        let path = self.tasks_file();
192        if !path.exists() {
193            anyhow::bail!("Tasks file not found: {}\nRun: scud init", path.display());
194        }
195
196        let content = self.read_with_lock(&path)?;
197        self.parse_multi_phase_scg(&content)
198    }
199
200    /// Parse multi-phase SCG format (multiple phases separated by ---)
201    fn parse_multi_phase_scg(&self, content: &str) -> Result<HashMap<String, Phase>> {
202        let mut phases = HashMap::new();
203
204        // Empty file returns empty map
205        if content.trim().is_empty() {
206            return Ok(phases);
207        }
208
209        // Split by phase separator (---)
210        let sections: Vec<&str> = content.split("\n---\n").collect();
211
212        for section in sections {
213            let section = section.trim();
214            if section.is_empty() {
215                continue;
216            }
217
218            // Parse the phase section
219            let phase = parse_scg(section).with_context(|| "Failed to parse SCG section")?;
220
221            phases.insert(phase.name.clone(), phase);
222        }
223
224        Ok(phases)
225    }
226
227    pub fn save_tasks(&self, tasks: &HashMap<String, Phase>) -> Result<()> {
228        let path = self.tasks_file();
229        self.write_with_lock(&path, || {
230            // Sort phases by tag for consistent output
231            let mut sorted_tags: Vec<_> = tasks.keys().collect();
232            sorted_tags.sort();
233
234            let mut output = String::new();
235            for (i, tag) in sorted_tags.iter().enumerate() {
236                if i > 0 {
237                    output.push_str("\n---\n\n");
238                }
239                let phase = tasks.get(*tag).unwrap();
240                output.push_str(&serialize_scg(phase));
241            }
242
243            Ok(output)
244        })
245    }
246
247    pub fn get_active_group(&self) -> Result<Option<String>> {
248        // Check cache first (read lock)
249        {
250            let cache = self.active_group_cache.read().unwrap();
251            if let Some(cached) = cache.as_ref() {
252                return Ok(cached.clone());
253            }
254        }
255
256        // Load from active-tag file
257        let active_tag_path = self.active_tag_file();
258        let active = if active_tag_path.exists() {
259            let content = fs::read_to_string(&active_tag_path)
260                .with_context(|| format!("Failed to read {}", active_tag_path.display()))?;
261            let tag = content.trim();
262            if tag.is_empty() {
263                None
264            } else {
265                Some(tag.to_string())
266            }
267        } else {
268            None
269        };
270
271        // Store in cache
272        *self.active_group_cache.write().unwrap() = Some(active.clone());
273
274        Ok(active)
275    }
276
277    pub fn set_active_group(&self, group_tag: &str) -> Result<()> {
278        let tasks = self.load_tasks()?;
279        if !tasks.contains_key(group_tag) {
280            anyhow::bail!("Task group '{}' not found", group_tag);
281        }
282
283        // Write to active-tag file
284        let active_tag_path = self.active_tag_file();
285        fs::write(&active_tag_path, group_tag)
286            .with_context(|| format!("Failed to write {}", active_tag_path.display()))?;
287
288        // Update cache
289        *self.active_group_cache.write().unwrap() = Some(Some(group_tag.to_string()));
290
291        Ok(())
292    }
293
294    /// Clear the active group cache
295    /// Useful when workflow state is modified externally or for testing
296    pub fn clear_cache(&self) {
297        *self.active_group_cache.write().unwrap() = None;
298    }
299
300    /// Clear the active group setting (remove the active-tag file)
301    pub fn clear_active_group(&self) -> Result<()> {
302        let active_tag_path = self.active_tag_file();
303        if active_tag_path.exists() {
304            fs::remove_file(&active_tag_path)
305                .with_context(|| format!("Failed to remove {}", active_tag_path.display()))?;
306        }
307        *self.active_group_cache.write().unwrap() = Some(None);
308        Ok(())
309    }
310
311    /// Load a single task group by tag
312    /// Parses the SCG file and extracts the requested group
313    pub fn load_group(&self, group_tag: &str) -> Result<Phase> {
314        let path = self.tasks_file();
315        let content = self.read_with_lock(&path)?;
316
317        let groups = self.parse_multi_phase_scg(&content)?;
318
319        groups
320            .get(group_tag)
321            .cloned()
322            .ok_or_else(|| anyhow::anyhow!("Task group '{}' not found", group_tag))
323    }
324
325    /// Load the active task group directly (optimized)
326    /// Combines get_active_group() and load_group() in one call
327    pub fn load_active_group(&self) -> Result<Phase> {
328        let active_tag = self
329            .get_active_group()?
330            .ok_or_else(|| anyhow::anyhow!("No active task group. Run: scud use-tag <tag>"))?;
331
332        self.load_group(&active_tag)
333    }
334
335    /// Update a single task group atomically
336    /// Holds exclusive lock across read-modify-write cycle to prevent races
337    pub fn update_group(&self, group_tag: &str, group: &Phase) -> Result<()> {
338        use std::io::{Read, Seek, SeekFrom, Write};
339
340        let path = self.tasks_file();
341
342        let dir = path.parent().unwrap();
343        if !dir.exists() {
344            fs::create_dir_all(dir)?;
345        }
346
347        // Open file for read+write with exclusive lock held throughout
348        // Note: truncate(false) is explicit - we read first, then truncate manually after
349        let mut file = OpenOptions::new()
350            .read(true)
351            .write(true)
352            .create(true)
353            .truncate(false)
354            .open(&path)
355            .with_context(|| format!("Failed to open file: {}", path.display()))?;
356
357        // Acquire exclusive lock with retry (held for entire operation)
358        self.acquire_lock_with_retry(&file, 10)?;
359
360        // Read current content while holding lock
361        let mut content = String::new();
362        file.read_to_string(&mut content)
363            .with_context(|| format!("Failed to read from {}", path.display()))?;
364
365        // Parse, modify, and serialize
366        let mut groups = self.parse_multi_phase_scg(&content)?;
367        groups.insert(group_tag.to_string(), group.clone());
368
369        let mut sorted_tags: Vec<_> = groups.keys().collect();
370        sorted_tags.sort();
371
372        let mut output = String::new();
373        for (i, tag) in sorted_tags.iter().enumerate() {
374            if i > 0 {
375                output.push_str("\n---\n\n");
376            }
377            let grp = groups.get(*tag).unwrap();
378            output.push_str(&serialize_scg(grp));
379        }
380
381        // Truncate and write back while still holding lock
382        file.seek(SeekFrom::Start(0))
383            .with_context(|| "Failed to seek to beginning of file")?;
384        file.set_len(0).with_context(|| "Failed to truncate file")?;
385        file.write_all(output.as_bytes())
386            .with_context(|| format!("Failed to write to {}", path.display()))?;
387        file.flush()
388            .with_context(|| format!("Failed to flush {}", path.display()))?;
389
390        // Lock released when file is dropped
391        Ok(())
392    }
393
394    pub fn read_file(&self, path: &Path) -> Result<String> {
395        fs::read_to_string(path).with_context(|| format!("Failed to read file: {}", path.display()))
396    }
397
398    /// Create or update CLAUDE.md with SCUD agent instructions
399    fn create_agent_instructions(&self) -> Result<()> {
400        let claude_md_path = self.project_root.join("CLAUDE.md");
401
402        let scud_instructions = r#"
403## SCUD Task Management
404
405This project uses SCUD Task Manager for task management.
406
407### Session Workflow
408
4091. **Start of session**: Run `scud warmup` to orient yourself
410   - Shows current working directory and recent git history
411   - Displays active tag, task counts, and any stale locks
412   - Identifies the next available task
413
4142. **Claim a task**: Use `/scud:task-next` or `scud next --claim --name "Claude"`
415   - Always claim before starting work to prevent conflicts
416   - Task context is stored in `.scud/current-task`
417
4183. **Work on the task**: Implement the requirements
419   - Reference task details with `/scud:task-show <id>`
420   - Dependencies are automatically tracked by the DAG
421
4224. **Commit with context**: Use `scud commit -m "message"` or `scud commit -a -m "message"`
423   - Automatically prefixes commits with `[TASK-ID]`
424   - Uses task title as default commit message if none provided
425
4265. **Complete the task**: Mark done with `/scud:task-status <id> done`
427   - The stop hook will prompt for task completion
428
429### Progress Journaling
430
431Keep a brief progress log during complex tasks:
432
433```
434## Progress Log
435
436### Session: 2025-01-15
437- Investigated auth module, found issue in token refresh
438- Updated refresh logic to handle edge case
439- Tests passing, ready for review
440```
441
442This helps maintain continuity across sessions and provides context for future work.
443
444### Key Commands
445
446- `scud warmup` - Session orientation
447- `scud next` - Find next available task
448- `scud show <id>` - View task details
449- `scud set-status <id> <status>` - Update task status
450- `scud commit` - Task-aware git commit
451- `scud stats` - View completion statistics
452"#;
453
454        if claude_md_path.exists() {
455            // Append to existing CLAUDE.md if SCUD section doesn't exist
456            let content = fs::read_to_string(&claude_md_path)
457                .with_context(|| "Failed to read existing CLAUDE.md")?;
458
459            if !content.contains("## SCUD Task Management") {
460                let mut new_content = content;
461                new_content.push_str(scud_instructions);
462                fs::write(&claude_md_path, new_content)
463                    .with_context(|| "Failed to update CLAUDE.md")?;
464            }
465        } else {
466            // Create new CLAUDE.md
467            let content = format!("# Project Instructions\n{}", scud_instructions);
468            fs::write(&claude_md_path, content).with_context(|| "Failed to create CLAUDE.md")?;
469        }
470
471        Ok(())
472    }
473}
474
475#[cfg(test)]
476mod tests {
477    use super::*;
478    use std::collections::HashMap;
479    use tempfile::TempDir;
480
481    fn create_test_storage() -> (Storage, TempDir) {
482        let temp_dir = TempDir::new().unwrap();
483        let storage = Storage::new(Some(temp_dir.path().to_path_buf()));
484        storage.initialize().unwrap();
485        (storage, temp_dir)
486    }
487
488    #[test]
489    fn test_write_with_lock_creates_file() {
490        let (storage, _temp_dir) = create_test_storage();
491        let test_file = storage.scud_dir().join("test.json");
492
493        storage
494            .write_with_lock(&test_file, || Ok(r#"{"test": "data"}"#.to_string()))
495            .unwrap();
496
497        assert!(test_file.exists());
498        let content = fs::read_to_string(&test_file).unwrap();
499        assert_eq!(content, r#"{"test": "data"}"#);
500    }
501
502    #[test]
503    fn test_read_with_lock_reads_existing_file() {
504        let (storage, _temp_dir) = create_test_storage();
505        let test_file = storage.scud_dir().join("test.json");
506
507        // Create a file
508        fs::write(&test_file, r#"{"test": "data"}"#).unwrap();
509
510        // Read with lock
511        let content = storage.read_with_lock(&test_file).unwrap();
512        assert_eq!(content, r#"{"test": "data"}"#);
513    }
514
515    #[test]
516    fn test_read_with_lock_fails_on_missing_file() {
517        let (storage, _temp_dir) = create_test_storage();
518        let test_file = storage.scud_dir().join("nonexistent.json");
519
520        let result = storage.read_with_lock(&test_file);
521        assert!(result.is_err());
522        assert!(result.unwrap_err().to_string().contains("File not found"));
523    }
524
525    #[test]
526    fn test_save_and_load_tasks_with_locking() {
527        let (storage, _temp_dir) = create_test_storage();
528        let mut tasks = HashMap::new();
529
530        let epic = crate::models::Phase::new("TEST-1".to_string());
531        tasks.insert("TEST-1".to_string(), epic);
532
533        // Save tasks
534        storage.save_tasks(&tasks).unwrap();
535
536        // Load tasks
537        let loaded_tasks = storage.load_tasks().unwrap();
538
539        assert_eq!(tasks.len(), loaded_tasks.len());
540        assert!(loaded_tasks.contains_key("TEST-1"));
541        assert_eq!(loaded_tasks.get("TEST-1").unwrap().name, "TEST-1");
542    }
543
544    #[test]
545    fn test_concurrent_writes_dont_corrupt_data() {
546        use std::sync::Arc;
547        use std::thread;
548
549        let (storage, _temp_dir) = create_test_storage();
550        let storage = Arc::new(storage);
551        let mut handles = vec![];
552
553        // Spawn 10 threads that each write tasks
554        for i in 0..10 {
555            let storage_clone = Arc::clone(&storage);
556            let handle = thread::spawn(move || {
557                let mut tasks = HashMap::new();
558                let epic = crate::models::Phase::new(format!("EPIC-{}", i));
559                tasks.insert(format!("EPIC-{}", i), epic);
560
561                // Each thread writes multiple times
562                for _ in 0..5 {
563                    storage_clone.save_tasks(&tasks).unwrap();
564                    thread::sleep(Duration::from_millis(1));
565                }
566            });
567            handles.push(handle);
568        }
569
570        // Wait for all threads to complete
571        for handle in handles {
572            handle.join().unwrap();
573        }
574
575        // Verify that the file is still valid JSON
576        let tasks = storage.load_tasks().unwrap();
577        // Should have the last written data (from one of the threads)
578        assert_eq!(tasks.len(), 1);
579    }
580
581    #[test]
582    fn test_lock_retry_on_contention() {
583        use std::sync::Arc;
584
585        let (storage, _temp_dir) = create_test_storage();
586        let storage = Arc::new(storage);
587        let test_file = storage.scud_dir().join("lock-test.json");
588
589        // Create file
590        storage
591            .write_with_lock(&test_file, || Ok(r#"{"initial": "data"}"#.to_string()))
592            .unwrap();
593
594        // Open and lock the file
595        let file = OpenOptions::new().write(true).open(&test_file).unwrap();
596        file.lock_exclusive().unwrap();
597
598        // Try to acquire lock with retry in another thread
599        let storage_clone = Arc::clone(&storage);
600        let test_file_clone = test_file.clone();
601        let handle = thread::spawn(move || {
602            // This should retry and succeed after lock release
603            storage_clone.write_with_lock(&test_file_clone, || {
604                Ok(r#"{"updated": "data"}"#.to_string())
605            })
606        });
607
608        // Keep lock for a bit
609        thread::sleep(Duration::from_millis(200));
610
611        // Release lock
612        file.unlock().unwrap();
613        drop(file);
614
615        // The write should have succeeded after retrying
616        let result = handle.join().unwrap();
617        assert!(result.is_ok());
618    }
619
620    // ==================== Error Handling Tests ====================
621
622    #[test]
623    fn test_load_tasks_with_malformed_json() {
624        let (storage, _temp_dir) = create_test_storage();
625        let tasks_file = storage.tasks_file();
626
627        // Write malformed JSON
628        fs::write(&tasks_file, r#"{"invalid": json here}"#).unwrap();
629
630        // Should return error
631        let result = storage.load_tasks();
632        assert!(result.is_err());
633    }
634
635    #[test]
636    fn test_load_tasks_with_empty_file() {
637        let (storage, _temp_dir) = create_test_storage();
638        let tasks_file = storage.tasks_file();
639
640        // Write empty file
641        fs::write(&tasks_file, "").unwrap();
642
643        // Empty SCG file is valid and returns empty HashMap
644        let result = storage.load_tasks();
645        assert!(result.is_ok());
646        assert!(result.unwrap().is_empty());
647    }
648
649    #[test]
650    fn test_load_tasks_missing_file_creates_default() {
651        let (storage, _temp_dir) = create_test_storage();
652        // Don't create tasks file
653
654        // Should return empty HashMap (default)
655        let tasks = storage.load_tasks().unwrap();
656        assert_eq!(tasks.len(), 0);
657    }
658
659    #[test]
660    fn test_save_tasks_creates_directory_if_missing() {
661        let temp_dir = TempDir::new().unwrap();
662        let storage = Storage::new(Some(temp_dir.path().to_path_buf()));
663        // Don't call initialize()
664
665        let mut tasks = HashMap::new();
666        let epic = crate::models::Phase::new("TEST-1".to_string());
667        tasks.insert("TEST-1".to_string(), epic);
668
669        // Should create directory and file
670        let result = storage.save_tasks(&tasks);
671        assert!(result.is_ok());
672
673        assert!(storage.scud_dir().exists());
674        assert!(storage.tasks_file().exists());
675    }
676
677    #[test]
678    fn test_write_with_lock_handles_directory_creation() {
679        let temp_dir = TempDir::new().unwrap();
680        let storage = Storage::new(Some(temp_dir.path().to_path_buf()));
681
682        let nested_file = temp_dir
683            .path()
684            .join("deeply")
685            .join("nested")
686            .join("test.json");
687
688        // Should create all parent directories
689        let result = storage.write_with_lock(&nested_file, || Ok("{}".to_string()));
690        assert!(result.is_ok());
691        assert!(nested_file.exists());
692    }
693
694    #[test]
695    fn test_load_tasks_with_invalid_structure() {
696        let (storage, _temp_dir) = create_test_storage();
697        let tasks_file = storage.tasks_file();
698
699        // Write valid JSON but invalid structure (array instead of object)
700        fs::write(&tasks_file, r#"["not", "an", "object"]"#).unwrap();
701
702        // Should return error
703        let result = storage.load_tasks();
704        assert!(result.is_err());
705    }
706
707    #[test]
708    fn test_save_and_load_with_unicode_content() {
709        let (storage, _temp_dir) = create_test_storage();
710
711        let mut tasks = HashMap::new();
712        let mut epic = crate::models::Phase::new("TEST-UNICODE".to_string());
713
714        // Add task with unicode content
715        let task = crate::models::Task::new(
716            "task-1".to_string(),
717            "测试 Unicode 🚀".to_string(),
718            "Descripción en español 日本語".to_string(),
719        );
720        epic.add_task(task);
721
722        tasks.insert("TEST-UNICODE".to_string(), epic);
723
724        // Save and load
725        storage.save_tasks(&tasks).unwrap();
726        let loaded_tasks = storage.load_tasks().unwrap();
727
728        let loaded_epic = loaded_tasks.get("TEST-UNICODE").unwrap();
729        let loaded_task = loaded_epic.get_task("task-1").unwrap();
730        assert_eq!(loaded_task.title, "测试 Unicode 🚀");
731        assert_eq!(loaded_task.description, "Descripción en español 日本語");
732    }
733
734    #[test]
735    fn test_save_and_load_with_large_dataset() {
736        let (storage, _temp_dir) = create_test_storage();
737
738        let mut tasks = HashMap::new();
739
740        // Create 100 epics with 50 tasks each
741        for i in 0..100 {
742            let mut epic = crate::models::Phase::new(format!("EPIC-{}", i));
743
744            for j in 0..50 {
745                let task = crate::models::Task::new(
746                    format!("task-{}-{}", i, j),
747                    format!("Task {} of Epic {}", j, i),
748                    format!("Description for task {}-{}", i, j),
749                );
750                epic.add_task(task);
751            }
752
753            tasks.insert(format!("EPIC-{}", i), epic);
754        }
755
756        // Save and load
757        storage.save_tasks(&tasks).unwrap();
758        let loaded_tasks = storage.load_tasks().unwrap();
759
760        assert_eq!(loaded_tasks.len(), 100);
761        for i in 0..100 {
762            let epic = loaded_tasks.get(&format!("EPIC-{}", i)).unwrap();
763            assert_eq!(epic.tasks.len(), 50);
764        }
765    }
766
767    #[test]
768    fn test_concurrent_read_and_write() {
769        use std::sync::Arc;
770        use std::thread;
771
772        let (storage, _temp_dir) = create_test_storage();
773        let storage = Arc::new(storage);
774
775        // Initialize with some data
776        let mut tasks = HashMap::new();
777        let epic = crate::models::Phase::new("INITIAL".to_string());
778        tasks.insert("INITIAL".to_string(), epic);
779        storage.save_tasks(&tasks).unwrap();
780
781        let mut handles = vec![];
782
783        // Spawn 5 readers
784        for _ in 0..5 {
785            let storage_clone = Arc::clone(&storage);
786            let handle = thread::spawn(move || {
787                for _ in 0..10 {
788                    let _ = storage_clone.load_tasks();
789                    thread::sleep(Duration::from_millis(1));
790                }
791            });
792            handles.push(handle);
793        }
794
795        // Spawn 2 writers
796        for i in 0..2 {
797            let storage_clone = Arc::clone(&storage);
798            let handle = thread::spawn(move || {
799                for j in 0..5 {
800                    let mut tasks = HashMap::new();
801                    let epic = crate::models::Phase::new(format!("WRITER-{}-{}", i, j));
802                    tasks.insert(format!("WRITER-{}-{}", i, j), epic);
803                    storage_clone.save_tasks(&tasks).unwrap();
804                    thread::sleep(Duration::from_millis(2));
805                }
806            });
807            handles.push(handle);
808        }
809
810        // Wait for all threads
811        for handle in handles {
812            handle.join().unwrap();
813        }
814
815        // File should still be valid
816        let tasks = storage.load_tasks().unwrap();
817        assert_eq!(tasks.len(), 1); // Last write wins
818    }
819
820    // ==================== Active Epic Cache Tests ====================
821
822    #[test]
823    fn test_active_epic_cached_on_second_call() {
824        let (storage, _temp_dir) = create_test_storage();
825
826        // Set active epic
827        let mut tasks = HashMap::new();
828        tasks.insert("TEST-1".to_string(), Phase::new("TEST-1".to_string()));
829        storage.save_tasks(&tasks).unwrap();
830        storage.set_active_group("TEST-1").unwrap();
831
832        // First call - loads from file
833        let active1 = storage.get_active_group().unwrap();
834        assert_eq!(active1, Some("TEST-1".to_string()));
835
836        // Modify file directly (bypass storage methods)
837        let active_tag_file = storage.active_tag_file();
838        fs::write(&active_tag_file, "DIFFERENT").unwrap();
839
840        // Second call - should return cached value (not file value)
841        let active2 = storage.get_active_group().unwrap();
842        assert_eq!(active2, Some("TEST-1".to_string())); // Still cached
843
844        // After cache clear - should reload from file
845        storage.clear_cache();
846        let active3 = storage.get_active_group().unwrap();
847        assert_eq!(active3, Some("DIFFERENT".to_string())); // From file
848    }
849
850    #[test]
851    fn test_cache_invalidated_on_set_active_epic() {
852        let (storage, _temp_dir) = create_test_storage();
853
854        let mut tasks = HashMap::new();
855        tasks.insert("EPIC-1".to_string(), Phase::new("EPIC-1".to_string()));
856        tasks.insert("EPIC-2".to_string(), Phase::new("EPIC-2".to_string()));
857        storage.save_tasks(&tasks).unwrap();
858
859        storage.set_active_group("EPIC-1").unwrap();
860        assert_eq!(
861            storage.get_active_group().unwrap(),
862            Some("EPIC-1".to_string())
863        );
864
865        // Change active epic - should update cache
866        storage.set_active_group("EPIC-2").unwrap();
867        assert_eq!(
868            storage.get_active_group().unwrap(),
869            Some("EPIC-2".to_string())
870        );
871    }
872
873    #[test]
874    fn test_cache_with_no_active_epic() {
875        let (storage, _temp_dir) = create_test_storage();
876
877        // Load when no active epic is set
878        let active = storage.get_active_group().unwrap();
879        assert_eq!(active, None);
880
881        // Should cache the None value
882        let active2 = storage.get_active_group().unwrap();
883        assert_eq!(active2, None);
884    }
885
886    // ==================== Lazy Epic Loading Tests ====================
887
888    #[test]
889    fn test_load_single_epic_from_many() {
890        let (storage, _temp_dir) = create_test_storage();
891
892        // Create 50 epics
893        let mut tasks = HashMap::new();
894        for i in 0..50 {
895            tasks.insert(format!("EPIC-{}", i), Phase::new(format!("EPIC-{}", i)));
896        }
897        storage.save_tasks(&tasks).unwrap();
898
899        // Load single epic - should only deserialize that one
900        let epic = storage.load_group("EPIC-25").unwrap();
901        assert_eq!(epic.name, "EPIC-25");
902    }
903
904    #[test]
905    fn test_load_epic_not_found() {
906        let (storage, _temp_dir) = create_test_storage();
907
908        let tasks = HashMap::new();
909        storage.save_tasks(&tasks).unwrap();
910
911        let result = storage.load_group("NONEXISTENT");
912        assert!(result.is_err());
913        assert!(result.unwrap_err().to_string().contains("not found"));
914    }
915
916    #[test]
917    fn test_load_epic_matches_full_load() {
918        let (storage, _temp_dir) = create_test_storage();
919
920        let mut tasks = HashMap::new();
921        let mut epic = Phase::new("TEST-1".to_string());
922        epic.add_task(crate::models::Task::new(
923            "task-1".to_string(),
924            "Test".to_string(),
925            "Desc".to_string(),
926        ));
927        tasks.insert("TEST-1".to_string(), epic.clone());
928        storage.save_tasks(&tasks).unwrap();
929
930        // Load via both methods
931        let epic_lazy = storage.load_group("TEST-1").unwrap();
932        let tasks_full = storage.load_tasks().unwrap();
933        let epic_full = tasks_full.get("TEST-1").unwrap();
934
935        // Should be identical
936        assert_eq!(epic_lazy.name, epic_full.name);
937        assert_eq!(epic_lazy.tasks.len(), epic_full.tasks.len());
938    }
939
940    #[test]
941    fn test_load_active_epic() {
942        let (storage, _temp_dir) = create_test_storage();
943
944        let mut tasks = HashMap::new();
945        let mut epic = Phase::new("ACTIVE-1".to_string());
946        epic.add_task(crate::models::Task::new(
947            "task-1".to_string(),
948            "Test".to_string(),
949            "Desc".to_string(),
950        ));
951        tasks.insert("ACTIVE-1".to_string(), epic);
952        storage.save_tasks(&tasks).unwrap();
953        storage.set_active_group("ACTIVE-1").unwrap();
954
955        // Load active epic directly
956        let epic = storage.load_active_group().unwrap();
957        assert_eq!(epic.name, "ACTIVE-1");
958        assert_eq!(epic.tasks.len(), 1);
959    }
960
961    #[test]
962    fn test_load_active_epic_when_none_set() {
963        let (storage, _temp_dir) = create_test_storage();
964
965        // Should error when no active epic
966        let result = storage.load_active_group();
967        assert!(result.is_err());
968        assert!(result
969            .unwrap_err()
970            .to_string()
971            .contains("No active task group"));
972    }
973
974    #[test]
975    fn test_update_epic_without_loading_all() {
976        let (storage, _temp_dir) = create_test_storage();
977
978        let mut tasks = HashMap::new();
979        tasks.insert("EPIC-1".to_string(), Phase::new("EPIC-1".to_string()));
980        tasks.insert("EPIC-2".to_string(), Phase::new("EPIC-2".to_string()));
981        storage.save_tasks(&tasks).unwrap();
982
983        // Update only EPIC-1
984        let mut epic1 = storage.load_group("EPIC-1").unwrap();
985        epic1.add_task(crate::models::Task::new(
986            "new-task".to_string(),
987            "New".to_string(),
988            "Desc".to_string(),
989        ));
990        storage.update_group("EPIC-1", &epic1).unwrap();
991
992        // Verify update
993        let loaded = storage.load_group("EPIC-1").unwrap();
994        assert_eq!(loaded.tasks.len(), 1);
995
996        // Verify EPIC-2 unchanged
997        let epic2 = storage.load_group("EPIC-2").unwrap();
998        assert_eq!(epic2.tasks.len(), 0);
999    }
1000}