scud/storage/
mod.rs

1use anyhow::{Context, Result};
2use fs2::FileExt;
3use std::collections::HashMap;
4use std::fs::{self, File, OpenOptions};
5use std::path::{Path, PathBuf};
6use std::sync::RwLock;
7use std::thread;
8use std::time::Duration;
9
10use crate::config::Config;
11use crate::formats::{parse_scg, serialize_scg};
12use crate::models::Phase;
13
14pub struct Storage {
15    project_root: PathBuf,
16    /// Cache for active group to avoid repeated workflow state loads
17    /// Option<Option<String>> represents: None = not cached, Some(None) = no active group, Some(Some(tag)) = cached tag
18    /// Uses RwLock for thread safety (useful for tests and potential daemon mode)
19    active_group_cache: RwLock<Option<Option<String>>>,
20}
21
22impl Storage {
23    pub fn new(project_root: Option<PathBuf>) -> Self {
24        let root = project_root.unwrap_or_else(|| std::env::current_dir().unwrap());
25        Storage {
26            project_root: root,
27            active_group_cache: RwLock::new(None),
28        }
29    }
30
31    /// Acquire an exclusive file lock with retry logic
32    fn acquire_lock_with_retry(&self, file: &File, max_retries: u32) -> Result<()> {
33        let mut retries = 0;
34        let mut delay_ms = 10;
35
36        loop {
37            match file.try_lock_exclusive() {
38                Ok(_) => return Ok(()),
39                Err(_) if retries < max_retries => {
40                    retries += 1;
41                    thread::sleep(Duration::from_millis(delay_ms));
42                    delay_ms = (delay_ms * 2).min(1000); // Exponential backoff, max 1s
43                }
44                Err(e) => {
45                    anyhow::bail!(
46                        "Failed to acquire file lock after {} retries: {}",
47                        max_retries,
48                        e
49                    )
50                }
51            }
52        }
53    }
54
55    /// Perform a locked write operation on a file
56    fn write_with_lock<F>(&self, path: &Path, writer: F) -> Result<()>
57    where
58        F: FnOnce() -> Result<String>,
59    {
60        use std::io::Write;
61
62        let dir = path.parent().unwrap();
63        if !dir.exists() {
64            fs::create_dir_all(dir)?;
65        }
66
67        // Open file for writing
68        let mut file = OpenOptions::new()
69            .write(true)
70            .create(true)
71            .truncate(true)
72            .open(path)
73            .with_context(|| format!("Failed to open file for writing: {}", path.display()))?;
74
75        // Acquire lock with retry
76        self.acquire_lock_with_retry(&file, 10)?;
77
78        // Generate content and write through the locked handle
79        let content = writer()?;
80        file.write_all(content.as_bytes())
81            .with_context(|| format!("Failed to write to {}", path.display()))?;
82        file.flush()
83            .with_context(|| format!("Failed to flush {}", path.display()))?;
84
85        // Lock is automatically released when file is dropped
86        Ok(())
87    }
88
89    /// Perform a locked read operation on a file
90    fn read_with_lock(&self, path: &Path) -> Result<String> {
91        use std::io::Read;
92
93        if !path.exists() {
94            anyhow::bail!("File not found: {}", path.display());
95        }
96
97        // Open file for reading
98        let mut file = OpenOptions::new()
99            .read(true)
100            .open(path)
101            .with_context(|| format!("Failed to open file for reading: {}", path.display()))?;
102
103        // Acquire shared lock (allows multiple readers)
104        file.lock_shared()
105            .with_context(|| format!("Failed to acquire read lock on {}", path.display()))?;
106
107        // Read content through the locked handle
108        let mut content = String::new();
109        file.read_to_string(&mut content)
110            .with_context(|| format!("Failed to read from {}", path.display()))?;
111
112        // Lock is automatically released when file is dropped
113        Ok(content)
114    }
115
116    pub fn scud_dir(&self) -> PathBuf {
117        self.project_root.join(".scud")
118    }
119
120    pub fn tasks_file(&self) -> PathBuf {
121        self.scud_dir().join("tasks").join("tasks.scg")
122    }
123
124    fn active_tag_file(&self) -> PathBuf {
125        self.scud_dir().join("active-tag")
126    }
127
128    pub fn config_file(&self) -> PathBuf {
129        self.scud_dir().join("config.toml")
130    }
131
132    pub fn docs_dir(&self) -> PathBuf {
133        self.scud_dir().join("docs")
134    }
135
136    pub fn is_initialized(&self) -> bool {
137        self.scud_dir().exists() && self.tasks_file().exists()
138    }
139
140    pub fn initialize(&self) -> Result<()> {
141        let config = Config::default();
142        self.initialize_with_config(&config)
143    }
144
145    pub fn initialize_with_config(&self, config: &Config) -> Result<()> {
146        // Create .scud directory structure
147        let scud_dir = self.scud_dir();
148        fs::create_dir_all(scud_dir.join("tasks"))
149            .context("Failed to create .scud/tasks directory")?;
150
151        // Initialize config.toml
152        let config_file = self.config_file();
153        if !config_file.exists() {
154            config.save(&config_file)?;
155        }
156
157        // Initialize tasks.scg with empty content
158        let tasks_file = self.tasks_file();
159        if !tasks_file.exists() {
160            let empty_tasks: HashMap<String, Phase> = HashMap::new();
161            self.save_tasks(&empty_tasks)?;
162        }
163
164        // Create docs directories
165        let docs = self.docs_dir();
166        fs::create_dir_all(docs.join("prd"))?;
167        fs::create_dir_all(docs.join("phases"))?;
168        fs::create_dir_all(docs.join("architecture"))?;
169        fs::create_dir_all(docs.join("retrospectives"))?;
170
171        // Create CLAUDE.md with agent instructions
172        self.create_agent_instructions()?;
173
174        Ok(())
175    }
176
177    pub fn load_config(&self) -> Result<Config> {
178        let config_file = self.config_file();
179        if !config_file.exists() {
180            return Ok(Config::default());
181        }
182        Config::load(&config_file)
183    }
184
185    pub fn load_tasks(&self) -> Result<HashMap<String, Phase>> {
186        let path = self.tasks_file();
187        if !path.exists() {
188            anyhow::bail!("Tasks file not found: {}\nRun: scud init", path.display());
189        }
190
191        let content = self.read_with_lock(&path)?;
192        self.parse_multi_phase_scg(&content)
193    }
194
195    /// Parse multi-phase SCG format (multiple phases separated by ---)
196    fn parse_multi_phase_scg(&self, content: &str) -> Result<HashMap<String, Phase>> {
197        let mut phases = HashMap::new();
198
199        // Empty file returns empty map
200        if content.trim().is_empty() {
201            return Ok(phases);
202        }
203
204        // Split by phase separator (---)
205        let sections: Vec<&str> = content.split("\n---\n").collect();
206
207        for section in sections {
208            let section = section.trim();
209            if section.is_empty() {
210                continue;
211            }
212
213            // Parse the phase section
214            let phase = parse_scg(section).with_context(|| "Failed to parse SCG section")?;
215
216            phases.insert(phase.name.clone(), phase);
217        }
218
219        Ok(phases)
220    }
221
222    pub fn save_tasks(&self, tasks: &HashMap<String, Phase>) -> Result<()> {
223        let path = self.tasks_file();
224        self.write_with_lock(&path, || {
225            // Sort phases by tag for consistent output
226            let mut sorted_tags: Vec<_> = tasks.keys().collect();
227            sorted_tags.sort();
228
229            let mut output = String::new();
230            for (i, tag) in sorted_tags.iter().enumerate() {
231                if i > 0 {
232                    output.push_str("\n---\n\n");
233                }
234                let phase = tasks.get(*tag).unwrap();
235                output.push_str(&serialize_scg(phase));
236            }
237
238            Ok(output)
239        })
240    }
241
242    pub fn get_active_group(&self) -> Result<Option<String>> {
243        // Check cache first (read lock)
244        {
245            let cache = self.active_group_cache.read().unwrap();
246            if let Some(cached) = cache.as_ref() {
247                return Ok(cached.clone());
248            }
249        }
250
251        // Load from active-tag file
252        let active_tag_path = self.active_tag_file();
253        let active = if active_tag_path.exists() {
254            let content = fs::read_to_string(&active_tag_path)
255                .with_context(|| format!("Failed to read {}", active_tag_path.display()))?;
256            let tag = content.trim();
257            if tag.is_empty() {
258                None
259            } else {
260                Some(tag.to_string())
261            }
262        } else {
263            None
264        };
265
266        // Store in cache
267        *self.active_group_cache.write().unwrap() = Some(active.clone());
268
269        Ok(active)
270    }
271
272    pub fn set_active_group(&self, group_tag: &str) -> Result<()> {
273        let tasks = self.load_tasks()?;
274        if !tasks.contains_key(group_tag) {
275            anyhow::bail!("Task group '{}' not found", group_tag);
276        }
277
278        // Write to active-tag file
279        let active_tag_path = self.active_tag_file();
280        fs::write(&active_tag_path, group_tag)
281            .with_context(|| format!("Failed to write {}", active_tag_path.display()))?;
282
283        // Update cache
284        *self.active_group_cache.write().unwrap() = Some(Some(group_tag.to_string()));
285
286        Ok(())
287    }
288
289    /// Clear the active group cache
290    /// Useful when workflow state is modified externally or for testing
291    pub fn clear_cache(&self) {
292        *self.active_group_cache.write().unwrap() = None;
293    }
294
295    /// Clear the active group setting (remove the active-tag file)
296    pub fn clear_active_group(&self) -> Result<()> {
297        let active_tag_path = self.active_tag_file();
298        if active_tag_path.exists() {
299            fs::remove_file(&active_tag_path)
300                .with_context(|| format!("Failed to remove {}", active_tag_path.display()))?;
301        }
302        *self.active_group_cache.write().unwrap() = Some(None);
303        Ok(())
304    }
305
306    /// Load a single task group by tag
307    /// Parses the SCG file and extracts the requested group
308    pub fn load_group(&self, group_tag: &str) -> Result<Phase> {
309        let path = self.tasks_file();
310        let content = self.read_with_lock(&path)?;
311
312        let groups = self.parse_multi_phase_scg(&content)?;
313
314        groups
315            .get(group_tag)
316            .cloned()
317            .ok_or_else(|| anyhow::anyhow!("Task group '{}' not found", group_tag))
318    }
319
320    /// Load the active task group directly (optimized)
321    /// Combines get_active_group() and load_group() in one call
322    pub fn load_active_group(&self) -> Result<Phase> {
323        let active_tag = self
324            .get_active_group()?
325            .ok_or_else(|| anyhow::anyhow!("No active task group. Run: scud use-tag <tag>"))?;
326
327        self.load_group(&active_tag)
328    }
329
330    /// Update a single task group atomically
331    /// Holds exclusive lock across read-modify-write cycle to prevent races
332    pub fn update_group(&self, group_tag: &str, group: &Phase) -> Result<()> {
333        use std::io::{Read, Seek, SeekFrom, Write};
334
335        let path = self.tasks_file();
336
337        let dir = path.parent().unwrap();
338        if !dir.exists() {
339            fs::create_dir_all(dir)?;
340        }
341
342        // Open file for read+write with exclusive lock held throughout
343        // Note: truncate(false) is explicit - we read first, then truncate manually after
344        let mut file = OpenOptions::new()
345            .read(true)
346            .write(true)
347            .create(true)
348            .truncate(false)
349            .open(&path)
350            .with_context(|| format!("Failed to open file: {}", path.display()))?;
351
352        // Acquire exclusive lock with retry (held for entire operation)
353        self.acquire_lock_with_retry(&file, 10)?;
354
355        // Read current content while holding lock
356        let mut content = String::new();
357        file.read_to_string(&mut content)
358            .with_context(|| format!("Failed to read from {}", path.display()))?;
359
360        // Parse, modify, and serialize
361        let mut groups = self.parse_multi_phase_scg(&content)?;
362        groups.insert(group_tag.to_string(), group.clone());
363
364        let mut sorted_tags: Vec<_> = groups.keys().collect();
365        sorted_tags.sort();
366
367        let mut output = String::new();
368        for (i, tag) in sorted_tags.iter().enumerate() {
369            if i > 0 {
370                output.push_str("\n---\n\n");
371            }
372            let grp = groups.get(*tag).unwrap();
373            output.push_str(&serialize_scg(grp));
374        }
375
376        // Truncate and write back while still holding lock
377        file.seek(SeekFrom::Start(0))
378            .with_context(|| "Failed to seek to beginning of file")?;
379        file.set_len(0).with_context(|| "Failed to truncate file")?;
380        file.write_all(output.as_bytes())
381            .with_context(|| format!("Failed to write to {}", path.display()))?;
382        file.flush()
383            .with_context(|| format!("Failed to flush {}", path.display()))?;
384
385        // Lock released when file is dropped
386        Ok(())
387    }
388
389    pub fn read_file(&self, path: &Path) -> Result<String> {
390        fs::read_to_string(path).with_context(|| format!("Failed to read file: {}", path.display()))
391    }
392
393    /// Create or update CLAUDE.md with SCUD agent instructions
394    fn create_agent_instructions(&self) -> Result<()> {
395        let claude_md_path = self.project_root.join("CLAUDE.md");
396
397        let scud_instructions = r#"
398## SCUD Task Management
399
400This project uses SCUD Task Manager for task management.
401
402### Session Workflow
403
4041. **Start of session**: Run `scud warmup` to orient yourself
405   - Shows current working directory and recent git history
406   - Displays active tag, task counts, and any stale locks
407   - Identifies the next available task
408
4092. **Claim a task**: Use `/scud:task-next` or `scud next --claim --name "Claude"`
410   - Always claim before starting work to prevent conflicts
411   - Task context is stored in `.scud/current-task`
412
4133. **Work on the task**: Implement the requirements
414   - Reference task details with `/scud:task-show <id>`
415   - Dependencies are automatically tracked by the DAG
416
4174. **Commit with context**: Use `scud commit -m "message"` or `scud commit -a -m "message"`
418   - Automatically prefixes commits with `[TASK-ID]`
419   - Uses task title as default commit message if none provided
420
4215. **Complete the task**: Mark done with `/scud:task-status <id> done`
422   - The stop hook will prompt for task completion
423
424### Progress Journaling
425
426Keep a brief progress log during complex tasks:
427
428```
429## Progress Log
430
431### Session: 2025-01-15
432- Investigated auth module, found issue in token refresh
433- Updated refresh logic to handle edge case
434- Tests passing, ready for review
435```
436
437This helps maintain continuity across sessions and provides context for future work.
438
439### Key Commands
440
441- `scud warmup` - Session orientation
442- `scud next` - Find next available task
443- `scud show <id>` - View task details
444- `scud set-status <id> <status>` - Update task status
445- `scud commit` - Task-aware git commit
446- `scud stats` - View completion statistics
447"#;
448
449        if claude_md_path.exists() {
450            // Append to existing CLAUDE.md if SCUD section doesn't exist
451            let content = fs::read_to_string(&claude_md_path)
452                .with_context(|| "Failed to read existing CLAUDE.md")?;
453
454            if !content.contains("## SCUD Task Management") {
455                let mut new_content = content;
456                new_content.push_str(scud_instructions);
457                fs::write(&claude_md_path, new_content)
458                    .with_context(|| "Failed to update CLAUDE.md")?;
459            }
460        } else {
461            // Create new CLAUDE.md
462            let content = format!("# Project Instructions\n{}", scud_instructions);
463            fs::write(&claude_md_path, content).with_context(|| "Failed to create CLAUDE.md")?;
464        }
465
466        Ok(())
467    }
468}
469
470#[cfg(test)]
471mod tests {
472    use super::*;
473    use std::collections::HashMap;
474    use tempfile::TempDir;
475
476    fn create_test_storage() -> (Storage, TempDir) {
477        let temp_dir = TempDir::new().unwrap();
478        let storage = Storage::new(Some(temp_dir.path().to_path_buf()));
479        storage.initialize().unwrap();
480        (storage, temp_dir)
481    }
482
483    #[test]
484    fn test_write_with_lock_creates_file() {
485        let (storage, _temp_dir) = create_test_storage();
486        let test_file = storage.scud_dir().join("test.json");
487
488        storage
489            .write_with_lock(&test_file, || Ok(r#"{"test": "data"}"#.to_string()))
490            .unwrap();
491
492        assert!(test_file.exists());
493        let content = fs::read_to_string(&test_file).unwrap();
494        assert_eq!(content, r#"{"test": "data"}"#);
495    }
496
497    #[test]
498    fn test_read_with_lock_reads_existing_file() {
499        let (storage, _temp_dir) = create_test_storage();
500        let test_file = storage.scud_dir().join("test.json");
501
502        // Create a file
503        fs::write(&test_file, r#"{"test": "data"}"#).unwrap();
504
505        // Read with lock
506        let content = storage.read_with_lock(&test_file).unwrap();
507        assert_eq!(content, r#"{"test": "data"}"#);
508    }
509
510    #[test]
511    fn test_read_with_lock_fails_on_missing_file() {
512        let (storage, _temp_dir) = create_test_storage();
513        let test_file = storage.scud_dir().join("nonexistent.json");
514
515        let result = storage.read_with_lock(&test_file);
516        assert!(result.is_err());
517        assert!(result.unwrap_err().to_string().contains("File not found"));
518    }
519
520    #[test]
521    fn test_save_and_load_tasks_with_locking() {
522        let (storage, _temp_dir) = create_test_storage();
523        let mut tasks = HashMap::new();
524
525        let epic = crate::models::Phase::new("TEST-1".to_string());
526        tasks.insert("TEST-1".to_string(), epic);
527
528        // Save tasks
529        storage.save_tasks(&tasks).unwrap();
530
531        // Load tasks
532        let loaded_tasks = storage.load_tasks().unwrap();
533
534        assert_eq!(tasks.len(), loaded_tasks.len());
535        assert!(loaded_tasks.contains_key("TEST-1"));
536        assert_eq!(loaded_tasks.get("TEST-1").unwrap().name, "TEST-1");
537    }
538
539    #[test]
540    fn test_concurrent_writes_dont_corrupt_data() {
541        use std::sync::Arc;
542        use std::thread;
543
544        let (storage, _temp_dir) = create_test_storage();
545        let storage = Arc::new(storage);
546        let mut handles = vec![];
547
548        // Spawn 10 threads that each write tasks
549        for i in 0..10 {
550            let storage_clone = Arc::clone(&storage);
551            let handle = thread::spawn(move || {
552                let mut tasks = HashMap::new();
553                let epic = crate::models::Phase::new(format!("EPIC-{}", i));
554                tasks.insert(format!("EPIC-{}", i), epic);
555
556                // Each thread writes multiple times
557                for _ in 0..5 {
558                    storage_clone.save_tasks(&tasks).unwrap();
559                    thread::sleep(Duration::from_millis(1));
560                }
561            });
562            handles.push(handle);
563        }
564
565        // Wait for all threads to complete
566        for handle in handles {
567            handle.join().unwrap();
568        }
569
570        // Verify that the file is still valid JSON
571        let tasks = storage.load_tasks().unwrap();
572        // Should have the last written data (from one of the threads)
573        assert_eq!(tasks.len(), 1);
574    }
575
576    #[test]
577    fn test_lock_retry_on_contention() {
578        use std::sync::Arc;
579
580        let (storage, _temp_dir) = create_test_storage();
581        let storage = Arc::new(storage);
582        let test_file = storage.scud_dir().join("lock-test.json");
583
584        // Create file
585        storage
586            .write_with_lock(&test_file, || Ok(r#"{"initial": "data"}"#.to_string()))
587            .unwrap();
588
589        // Open and lock the file
590        let file = OpenOptions::new().write(true).open(&test_file).unwrap();
591        file.lock_exclusive().unwrap();
592
593        // Try to acquire lock with retry in another thread
594        let storage_clone = Arc::clone(&storage);
595        let test_file_clone = test_file.clone();
596        let handle = thread::spawn(move || {
597            // This should retry and succeed after lock release
598            storage_clone.write_with_lock(&test_file_clone, || {
599                Ok(r#"{"updated": "data"}"#.to_string())
600            })
601        });
602
603        // Keep lock for a bit
604        thread::sleep(Duration::from_millis(200));
605
606        // Release lock
607        file.unlock().unwrap();
608        drop(file);
609
610        // The write should have succeeded after retrying
611        let result = handle.join().unwrap();
612        assert!(result.is_ok());
613    }
614
615    // ==================== Error Handling Tests ====================
616
617    #[test]
618    fn test_load_tasks_with_malformed_json() {
619        let (storage, _temp_dir) = create_test_storage();
620        let tasks_file = storage.tasks_file();
621
622        // Write malformed JSON
623        fs::write(&tasks_file, r#"{"invalid": json here}"#).unwrap();
624
625        // Should return error
626        let result = storage.load_tasks();
627        assert!(result.is_err());
628    }
629
630    #[test]
631    fn test_load_tasks_with_empty_file() {
632        let (storage, _temp_dir) = create_test_storage();
633        let tasks_file = storage.tasks_file();
634
635        // Write empty file
636        fs::write(&tasks_file, "").unwrap();
637
638        // Empty SCG file is valid and returns empty HashMap
639        let result = storage.load_tasks();
640        assert!(result.is_ok());
641        assert!(result.unwrap().is_empty());
642    }
643
644    #[test]
645    fn test_load_tasks_missing_file_creates_default() {
646        let (storage, _temp_dir) = create_test_storage();
647        // Don't create tasks file
648
649        // Should return empty HashMap (default)
650        let tasks = storage.load_tasks().unwrap();
651        assert_eq!(tasks.len(), 0);
652    }
653
654    #[test]
655    fn test_save_tasks_creates_directory_if_missing() {
656        let temp_dir = TempDir::new().unwrap();
657        let storage = Storage::new(Some(temp_dir.path().to_path_buf()));
658        // Don't call initialize()
659
660        let mut tasks = HashMap::new();
661        let epic = crate::models::Phase::new("TEST-1".to_string());
662        tasks.insert("TEST-1".to_string(), epic);
663
664        // Should create directory and file
665        let result = storage.save_tasks(&tasks);
666        assert!(result.is_ok());
667
668        assert!(storage.scud_dir().exists());
669        assert!(storage.tasks_file().exists());
670    }
671
672    #[test]
673    fn test_write_with_lock_handles_directory_creation() {
674        let temp_dir = TempDir::new().unwrap();
675        let storage = Storage::new(Some(temp_dir.path().to_path_buf()));
676
677        let nested_file = temp_dir
678            .path()
679            .join("deeply")
680            .join("nested")
681            .join("test.json");
682
683        // Should create all parent directories
684        let result = storage.write_with_lock(&nested_file, || Ok("{}".to_string()));
685        assert!(result.is_ok());
686        assert!(nested_file.exists());
687    }
688
689    #[test]
690    fn test_load_tasks_with_invalid_structure() {
691        let (storage, _temp_dir) = create_test_storage();
692        let tasks_file = storage.tasks_file();
693
694        // Write valid JSON but invalid structure (array instead of object)
695        fs::write(&tasks_file, r#"["not", "an", "object"]"#).unwrap();
696
697        // Should return error
698        let result = storage.load_tasks();
699        assert!(result.is_err());
700    }
701
702    #[test]
703    fn test_save_and_load_with_unicode_content() {
704        let (storage, _temp_dir) = create_test_storage();
705
706        let mut tasks = HashMap::new();
707        let mut epic = crate::models::Phase::new("TEST-UNICODE".to_string());
708
709        // Add task with unicode content
710        let task = crate::models::Task::new(
711            "task-1".to_string(),
712            "测试 Unicode 🚀".to_string(),
713            "Descripción en español 日本語".to_string(),
714        );
715        epic.add_task(task);
716
717        tasks.insert("TEST-UNICODE".to_string(), epic);
718
719        // Save and load
720        storage.save_tasks(&tasks).unwrap();
721        let loaded_tasks = storage.load_tasks().unwrap();
722
723        let loaded_epic = loaded_tasks.get("TEST-UNICODE").unwrap();
724        let loaded_task = loaded_epic.get_task("task-1").unwrap();
725        assert_eq!(loaded_task.title, "测试 Unicode 🚀");
726        assert_eq!(loaded_task.description, "Descripción en español 日本語");
727    }
728
729    #[test]
730    fn test_save_and_load_with_large_dataset() {
731        let (storage, _temp_dir) = create_test_storage();
732
733        let mut tasks = HashMap::new();
734
735        // Create 100 epics with 50 tasks each
736        for i in 0..100 {
737            let mut epic = crate::models::Phase::new(format!("EPIC-{}", i));
738
739            for j in 0..50 {
740                let task = crate::models::Task::new(
741                    format!("task-{}-{}", i, j),
742                    format!("Task {} of Epic {}", j, i),
743                    format!("Description for task {}-{}", i, j),
744                );
745                epic.add_task(task);
746            }
747
748            tasks.insert(format!("EPIC-{}", i), epic);
749        }
750
751        // Save and load
752        storage.save_tasks(&tasks).unwrap();
753        let loaded_tasks = storage.load_tasks().unwrap();
754
755        assert_eq!(loaded_tasks.len(), 100);
756        for i in 0..100 {
757            let epic = loaded_tasks.get(&format!("EPIC-{}", i)).unwrap();
758            assert_eq!(epic.tasks.len(), 50);
759        }
760    }
761
762    #[test]
763    fn test_concurrent_read_and_write() {
764        use std::sync::Arc;
765        use std::thread;
766
767        let (storage, _temp_dir) = create_test_storage();
768        let storage = Arc::new(storage);
769
770        // Initialize with some data
771        let mut tasks = HashMap::new();
772        let epic = crate::models::Phase::new("INITIAL".to_string());
773        tasks.insert("INITIAL".to_string(), epic);
774        storage.save_tasks(&tasks).unwrap();
775
776        let mut handles = vec![];
777
778        // Spawn 5 readers
779        for _ in 0..5 {
780            let storage_clone = Arc::clone(&storage);
781            let handle = thread::spawn(move || {
782                for _ in 0..10 {
783                    let _ = storage_clone.load_tasks();
784                    thread::sleep(Duration::from_millis(1));
785                }
786            });
787            handles.push(handle);
788        }
789
790        // Spawn 2 writers
791        for i in 0..2 {
792            let storage_clone = Arc::clone(&storage);
793            let handle = thread::spawn(move || {
794                for j in 0..5 {
795                    let mut tasks = HashMap::new();
796                    let epic = crate::models::Phase::new(format!("WRITER-{}-{}", i, j));
797                    tasks.insert(format!("WRITER-{}-{}", i, j), epic);
798                    storage_clone.save_tasks(&tasks).unwrap();
799                    thread::sleep(Duration::from_millis(2));
800                }
801            });
802            handles.push(handle);
803        }
804
805        // Wait for all threads
806        for handle in handles {
807            handle.join().unwrap();
808        }
809
810        // File should still be valid
811        let tasks = storage.load_tasks().unwrap();
812        assert_eq!(tasks.len(), 1); // Last write wins
813    }
814
815    // ==================== Active Epic Cache Tests ====================
816
817    #[test]
818    fn test_active_epic_cached_on_second_call() {
819        let (storage, _temp_dir) = create_test_storage();
820
821        // Set active epic
822        let mut tasks = HashMap::new();
823        tasks.insert("TEST-1".to_string(), Phase::new("TEST-1".to_string()));
824        storage.save_tasks(&tasks).unwrap();
825        storage.set_active_group("TEST-1").unwrap();
826
827        // First call - loads from file
828        let active1 = storage.get_active_group().unwrap();
829        assert_eq!(active1, Some("TEST-1".to_string()));
830
831        // Modify file directly (bypass storage methods)
832        let active_tag_file = storage.active_tag_file();
833        fs::write(&active_tag_file, "DIFFERENT").unwrap();
834
835        // Second call - should return cached value (not file value)
836        let active2 = storage.get_active_group().unwrap();
837        assert_eq!(active2, Some("TEST-1".to_string())); // Still cached
838
839        // After cache clear - should reload from file
840        storage.clear_cache();
841        let active3 = storage.get_active_group().unwrap();
842        assert_eq!(active3, Some("DIFFERENT".to_string())); // From file
843    }
844
845    #[test]
846    fn test_cache_invalidated_on_set_active_epic() {
847        let (storage, _temp_dir) = create_test_storage();
848
849        let mut tasks = HashMap::new();
850        tasks.insert("EPIC-1".to_string(), Phase::new("EPIC-1".to_string()));
851        tasks.insert("EPIC-2".to_string(), Phase::new("EPIC-2".to_string()));
852        storage.save_tasks(&tasks).unwrap();
853
854        storage.set_active_group("EPIC-1").unwrap();
855        assert_eq!(
856            storage.get_active_group().unwrap(),
857            Some("EPIC-1".to_string())
858        );
859
860        // Change active epic - should update cache
861        storage.set_active_group("EPIC-2").unwrap();
862        assert_eq!(
863            storage.get_active_group().unwrap(),
864            Some("EPIC-2".to_string())
865        );
866    }
867
868    #[test]
869    fn test_cache_with_no_active_epic() {
870        let (storage, _temp_dir) = create_test_storage();
871
872        // Load when no active epic is set
873        let active = storage.get_active_group().unwrap();
874        assert_eq!(active, None);
875
876        // Should cache the None value
877        let active2 = storage.get_active_group().unwrap();
878        assert_eq!(active2, None);
879    }
880
881    // ==================== Lazy Epic Loading Tests ====================
882
883    #[test]
884    fn test_load_single_epic_from_many() {
885        let (storage, _temp_dir) = create_test_storage();
886
887        // Create 50 epics
888        let mut tasks = HashMap::new();
889        for i in 0..50 {
890            tasks.insert(format!("EPIC-{}", i), Phase::new(format!("EPIC-{}", i)));
891        }
892        storage.save_tasks(&tasks).unwrap();
893
894        // Load single epic - should only deserialize that one
895        let epic = storage.load_group("EPIC-25").unwrap();
896        assert_eq!(epic.name, "EPIC-25");
897    }
898
899    #[test]
900    fn test_load_epic_not_found() {
901        let (storage, _temp_dir) = create_test_storage();
902
903        let tasks = HashMap::new();
904        storage.save_tasks(&tasks).unwrap();
905
906        let result = storage.load_group("NONEXISTENT");
907        assert!(result.is_err());
908        assert!(result.unwrap_err().to_string().contains("not found"));
909    }
910
911    #[test]
912    fn test_load_epic_matches_full_load() {
913        let (storage, _temp_dir) = create_test_storage();
914
915        let mut tasks = HashMap::new();
916        let mut epic = Phase::new("TEST-1".to_string());
917        epic.add_task(crate::models::Task::new(
918            "task-1".to_string(),
919            "Test".to_string(),
920            "Desc".to_string(),
921        ));
922        tasks.insert("TEST-1".to_string(), epic.clone());
923        storage.save_tasks(&tasks).unwrap();
924
925        // Load via both methods
926        let epic_lazy = storage.load_group("TEST-1").unwrap();
927        let tasks_full = storage.load_tasks().unwrap();
928        let epic_full = tasks_full.get("TEST-1").unwrap();
929
930        // Should be identical
931        assert_eq!(epic_lazy.name, epic_full.name);
932        assert_eq!(epic_lazy.tasks.len(), epic_full.tasks.len());
933    }
934
935    #[test]
936    fn test_load_active_epic() {
937        let (storage, _temp_dir) = create_test_storage();
938
939        let mut tasks = HashMap::new();
940        let mut epic = Phase::new("ACTIVE-1".to_string());
941        epic.add_task(crate::models::Task::new(
942            "task-1".to_string(),
943            "Test".to_string(),
944            "Desc".to_string(),
945        ));
946        tasks.insert("ACTIVE-1".to_string(), epic);
947        storage.save_tasks(&tasks).unwrap();
948        storage.set_active_group("ACTIVE-1").unwrap();
949
950        // Load active epic directly
951        let epic = storage.load_active_group().unwrap();
952        assert_eq!(epic.name, "ACTIVE-1");
953        assert_eq!(epic.tasks.len(), 1);
954    }
955
956    #[test]
957    fn test_load_active_epic_when_none_set() {
958        let (storage, _temp_dir) = create_test_storage();
959
960        // Should error when no active epic
961        let result = storage.load_active_group();
962        assert!(result.is_err());
963        assert!(result
964            .unwrap_err()
965            .to_string()
966            .contains("No active task group"));
967    }
968
969    #[test]
970    fn test_update_epic_without_loading_all() {
971        let (storage, _temp_dir) = create_test_storage();
972
973        let mut tasks = HashMap::new();
974        tasks.insert("EPIC-1".to_string(), Phase::new("EPIC-1".to_string()));
975        tasks.insert("EPIC-2".to_string(), Phase::new("EPIC-2".to_string()));
976        storage.save_tasks(&tasks).unwrap();
977
978        // Update only EPIC-1
979        let mut epic1 = storage.load_group("EPIC-1").unwrap();
980        epic1.add_task(crate::models::Task::new(
981            "new-task".to_string(),
982            "New".to_string(),
983            "Desc".to_string(),
984        ));
985        storage.update_group("EPIC-1", &epic1).unwrap();
986
987        // Verify update
988        let loaded = storage.load_group("EPIC-1").unwrap();
989        assert_eq!(loaded.tasks.len(), 1);
990
991        // Verify EPIC-2 unchanged
992        let epic2 = storage.load_group("EPIC-2").unwrap();
993        assert_eq!(epic2.tasks.len(), 0);
994    }
995}