scud/storage/
mod.rs

1use anyhow::{Context, Result};
2use fs2::FileExt;
3use std::collections::HashMap;
4use std::fs::{self, File, OpenOptions};
5use std::path::{Path, PathBuf};
6use std::sync::RwLock;
7use std::thread;
8use std::time::Duration;
9
10use crate::config::Config;
11use crate::formats::{parse_scg, serialize_scg};
12use crate::models::Phase;
13
14pub struct Storage {
15    project_root: PathBuf,
16    /// Cache for active group to avoid repeated workflow state loads
17    /// Option<Option<String>> represents: None = not cached, Some(None) = no active group, Some(Some(tag)) = cached tag
18    /// Uses RwLock for thread safety (useful for tests and potential daemon mode)
19    active_group_cache: RwLock<Option<Option<String>>>,
20}
21
22impl Storage {
23    pub fn new(project_root: Option<PathBuf>) -> Self {
24        let root = project_root.unwrap_or_else(|| std::env::current_dir().unwrap());
25        Storage {
26            project_root: root,
27            active_group_cache: RwLock::new(None),
28        }
29    }
30
31    /// Acquire an exclusive file lock with retry logic
32    fn acquire_lock_with_retry(&self, file: &File, max_retries: u32) -> Result<()> {
33        let mut retries = 0;
34        let mut delay_ms = 10;
35
36        loop {
37            match file.try_lock_exclusive() {
38                Ok(_) => return Ok(()),
39                Err(_) if retries < max_retries => {
40                    retries += 1;
41                    thread::sleep(Duration::from_millis(delay_ms));
42                    delay_ms = (delay_ms * 2).min(1000); // Exponential backoff, max 1s
43                }
44                Err(e) => {
45                    anyhow::bail!(
46                        "Failed to acquire file lock after {} retries: {}",
47                        max_retries,
48                        e
49                    )
50                }
51            }
52        }
53    }
54
55    /// Perform a locked write operation on a file
56    fn write_with_lock<F>(&self, path: &Path, writer: F) -> Result<()>
57    where
58        F: FnOnce() -> Result<String>,
59    {
60        use std::io::Write;
61
62        let dir = path.parent().unwrap();
63        if !dir.exists() {
64            fs::create_dir_all(dir)?;
65        }
66
67        // Open file for writing
68        let mut file = OpenOptions::new()
69            .write(true)
70            .create(true)
71            .truncate(true)
72            .open(path)
73            .with_context(|| format!("Failed to open file for writing: {}", path.display()))?;
74
75        // Acquire lock with retry
76        self.acquire_lock_with_retry(&file, 10)?;
77
78        // Generate content and write through the locked handle
79        let content = writer()?;
80        file.write_all(content.as_bytes())
81            .with_context(|| format!("Failed to write to {}", path.display()))?;
82        file.flush()
83            .with_context(|| format!("Failed to flush {}", path.display()))?;
84
85        // Lock is automatically released when file is dropped
86        Ok(())
87    }
88
89    /// Perform a locked read operation on a file
90    fn read_with_lock(&self, path: &Path) -> Result<String> {
91        use std::io::Read;
92
93        if !path.exists() {
94            anyhow::bail!("File not found: {}", path.display());
95        }
96
97        // Open file for reading
98        let mut file = OpenOptions::new()
99            .read(true)
100            .open(path)
101            .with_context(|| format!("Failed to open file for reading: {}", path.display()))?;
102
103        // Acquire shared lock (allows multiple readers)
104        file.lock_shared()
105            .with_context(|| format!("Failed to acquire read lock on {}", path.display()))?;
106
107        // Read content through the locked handle
108        let mut content = String::new();
109        file.read_to_string(&mut content)
110            .with_context(|| format!("Failed to read from {}", path.display()))?;
111
112        // Lock is automatically released when file is dropped
113        Ok(content)
114    }
115
116    pub fn scud_dir(&self) -> PathBuf {
117        self.project_root.join(".scud")
118    }
119
120    pub fn tasks_file(&self) -> PathBuf {
121        self.scud_dir().join("tasks").join("tasks.scg")
122    }
123
124    fn active_tag_file(&self) -> PathBuf {
125        self.scud_dir().join("active-tag")
126    }
127
128    pub fn config_file(&self) -> PathBuf {
129        self.scud_dir().join("config.toml")
130    }
131
132    pub fn docs_dir(&self) -> PathBuf {
133        self.scud_dir().join("docs")
134    }
135
136    pub fn is_initialized(&self) -> bool {
137        self.scud_dir().exists() && self.tasks_file().exists()
138    }
139
140    pub fn initialize(&self) -> Result<()> {
141        let config = Config::default();
142        self.initialize_with_config(&config)
143    }
144
145    pub fn initialize_with_config(&self, config: &Config) -> Result<()> {
146        // Create .scud directory structure
147        let scud_dir = self.scud_dir();
148        fs::create_dir_all(scud_dir.join("tasks"))
149            .context("Failed to create .scud/tasks directory")?;
150
151        // Initialize config.toml
152        let config_file = self.config_file();
153        if !config_file.exists() {
154            config.save(&config_file)?;
155        }
156
157        // Initialize tasks.scg with empty content
158        let tasks_file = self.tasks_file();
159        if !tasks_file.exists() {
160            let empty_tasks: HashMap<String, Phase> = HashMap::new();
161            self.save_tasks(&empty_tasks)?;
162        }
163
164        // Create docs directories
165        let docs = self.docs_dir();
166        fs::create_dir_all(docs.join("prd"))?;
167        fs::create_dir_all(docs.join("phases"))?;
168        fs::create_dir_all(docs.join("architecture"))?;
169        fs::create_dir_all(docs.join("retrospectives"))?;
170
171        // Create CLAUDE.md with agent instructions
172        self.create_agent_instructions()?;
173
174        Ok(())
175    }
176
177    pub fn load_config(&self) -> Result<Config> {
178        let config_file = self.config_file();
179        if !config_file.exists() {
180            return Ok(Config::default());
181        }
182        Config::load(&config_file)
183    }
184
185    pub fn load_tasks(&self) -> Result<HashMap<String, Phase>> {
186        let path = self.tasks_file();
187        if !path.exists() {
188            anyhow::bail!("Tasks file not found: {}\nRun: scud init", path.display());
189        }
190
191        let content = self.read_with_lock(&path)?;
192        self.parse_multi_phase_scg(&content)
193    }
194
195    /// Parse multi-phase SCG format (multiple phases separated by ---)
196    fn parse_multi_phase_scg(&self, content: &str) -> Result<HashMap<String, Phase>> {
197        let mut phases = HashMap::new();
198
199        // Empty file returns empty map
200        if content.trim().is_empty() {
201            return Ok(phases);
202        }
203
204        // Split by phase separator (---)
205        let sections: Vec<&str> = content.split("\n---\n").collect();
206
207        for section in sections {
208            let section = section.trim();
209            if section.is_empty() {
210                continue;
211            }
212
213            // Parse the phase section
214            let phase = parse_scg(section).with_context(|| "Failed to parse SCG section")?;
215
216            phases.insert(phase.name.clone(), phase);
217        }
218
219        Ok(phases)
220    }
221
222    pub fn save_tasks(&self, tasks: &HashMap<String, Phase>) -> Result<()> {
223        let path = self.tasks_file();
224        self.write_with_lock(&path, || {
225            // Sort phases by tag for consistent output
226            let mut sorted_tags: Vec<_> = tasks.keys().collect();
227            sorted_tags.sort();
228
229            let mut output = String::new();
230            for (i, tag) in sorted_tags.iter().enumerate() {
231                if i > 0 {
232                    output.push_str("\n---\n\n");
233                }
234                let phase = tasks.get(*tag).unwrap();
235                output.push_str(&serialize_scg(phase));
236            }
237
238            Ok(output)
239        })
240    }
241
242    pub fn get_active_group(&self) -> Result<Option<String>> {
243        // Check cache first (read lock)
244        {
245            let cache = self.active_group_cache.read().unwrap();
246            if let Some(cached) = cache.as_ref() {
247                return Ok(cached.clone());
248            }
249        }
250
251        // Load from active-tag file
252        let active_tag_path = self.active_tag_file();
253        let active = if active_tag_path.exists() {
254            let content = fs::read_to_string(&active_tag_path)
255                .with_context(|| format!("Failed to read {}", active_tag_path.display()))?;
256            let tag = content.trim();
257            if tag.is_empty() {
258                None
259            } else {
260                Some(tag.to_string())
261            }
262        } else {
263            None
264        };
265
266        // Store in cache
267        *self.active_group_cache.write().unwrap() = Some(active.clone());
268
269        Ok(active)
270    }
271
272    pub fn set_active_group(&self, group_tag: &str) -> Result<()> {
273        let tasks = self.load_tasks()?;
274        if !tasks.contains_key(group_tag) {
275            anyhow::bail!("Task group '{}' not found", group_tag);
276        }
277
278        // Write to active-tag file
279        let active_tag_path = self.active_tag_file();
280        fs::write(&active_tag_path, group_tag)
281            .with_context(|| format!("Failed to write {}", active_tag_path.display()))?;
282
283        // Update cache
284        *self.active_group_cache.write().unwrap() = Some(Some(group_tag.to_string()));
285
286        Ok(())
287    }
288
289    /// Clear the active group cache
290    /// Useful when workflow state is modified externally or for testing
291    pub fn clear_cache(&self) {
292        *self.active_group_cache.write().unwrap() = None;
293    }
294
295    /// Load a single task group by tag
296    /// Parses the SCG file and extracts the requested group
297    pub fn load_group(&self, group_tag: &str) -> Result<Phase> {
298        let path = self.tasks_file();
299        let content = self.read_with_lock(&path)?;
300
301        let groups = self.parse_multi_phase_scg(&content)?;
302
303        groups
304            .get(group_tag)
305            .cloned()
306            .ok_or_else(|| anyhow::anyhow!("Task group '{}' not found", group_tag))
307    }
308
309    /// Load the active task group directly (optimized)
310    /// Combines get_active_group() and load_group() in one call
311    pub fn load_active_group(&self) -> Result<Phase> {
312        let active_tag = self
313            .get_active_group()?
314            .ok_or_else(|| anyhow::anyhow!("No active task group. Run: scud use-tag <tag>"))?;
315
316        self.load_group(&active_tag)
317    }
318
319    /// Update a single task group atomically
320    /// Holds exclusive lock across read-modify-write cycle to prevent races
321    pub fn update_group(&self, group_tag: &str, group: &Phase) -> Result<()> {
322        use std::io::{Read, Seek, SeekFrom, Write};
323
324        let path = self.tasks_file();
325
326        let dir = path.parent().unwrap();
327        if !dir.exists() {
328            fs::create_dir_all(dir)?;
329        }
330
331        // Open file for read+write with exclusive lock held throughout
332        // Note: truncate(false) is explicit - we read first, then truncate manually after
333        let mut file = OpenOptions::new()
334            .read(true)
335            .write(true)
336            .create(true)
337            .truncate(false)
338            .open(&path)
339            .with_context(|| format!("Failed to open file: {}", path.display()))?;
340
341        // Acquire exclusive lock with retry (held for entire operation)
342        self.acquire_lock_with_retry(&file, 10)?;
343
344        // Read current content while holding lock
345        let mut content = String::new();
346        file.read_to_string(&mut content)
347            .with_context(|| format!("Failed to read from {}", path.display()))?;
348
349        // Parse, modify, and serialize
350        let mut groups = self.parse_multi_phase_scg(&content)?;
351        groups.insert(group_tag.to_string(), group.clone());
352
353        let mut sorted_tags: Vec<_> = groups.keys().collect();
354        sorted_tags.sort();
355
356        let mut output = String::new();
357        for (i, tag) in sorted_tags.iter().enumerate() {
358            if i > 0 {
359                output.push_str("\n---\n\n");
360            }
361            let grp = groups.get(*tag).unwrap();
362            output.push_str(&serialize_scg(grp));
363        }
364
365        // Truncate and write back while still holding lock
366        file.seek(SeekFrom::Start(0))
367            .with_context(|| "Failed to seek to beginning of file")?;
368        file.set_len(0).with_context(|| "Failed to truncate file")?;
369        file.write_all(output.as_bytes())
370            .with_context(|| format!("Failed to write to {}", path.display()))?;
371        file.flush()
372            .with_context(|| format!("Failed to flush {}", path.display()))?;
373
374        // Lock released when file is dropped
375        Ok(())
376    }
377
378    pub fn read_file(&self, path: &Path) -> Result<String> {
379        fs::read_to_string(path).with_context(|| format!("Failed to read file: {}", path.display()))
380    }
381
382    /// Create or update CLAUDE.md with SCUD agent instructions
383    fn create_agent_instructions(&self) -> Result<()> {
384        let claude_md_path = self.project_root.join("CLAUDE.md");
385
386        let scud_instructions = r#"
387## SCUD Task Management
388
389This project uses SCUD (Sprint Cycle Unified Development) for task management.
390
391### Session Workflow
392
3931. **Start of session**: Run `scud warmup` to orient yourself
394   - Shows current working directory and recent git history
395   - Displays active tag, task counts, and any stale locks
396   - Identifies the next available task
397
3982. **Claim a task**: Use `/scud:task-next` or `scud next --claim --name "Claude"`
399   - Always claim before starting work to prevent conflicts
400   - Task context is stored in `.scud/current-task`
401
4023. **Work on the task**: Implement the requirements
403   - Reference task details with `/scud:task-show <id>`
404   - Dependencies are automatically tracked by the DAG
405
4064. **Commit with context**: Use `scud commit -m "message"` or `scud commit -a -m "message"`
407   - Automatically prefixes commits with `[TASK-ID]`
408   - Uses task title as default commit message if none provided
409
4105. **Complete the task**: Mark done with `/scud:task-status <id> done`
411   - The stop hook will prompt for task completion
412
413### Progress Journaling
414
415Keep a brief progress log during complex tasks:
416
417```
418## Progress Log
419
420### Session: 2025-01-15
421- Investigated auth module, found issue in token refresh
422- Updated refresh logic to handle edge case
423- Tests passing, ready for review
424```
425
426This helps maintain continuity across sessions and provides context for future work.
427
428### Key Commands
429
430- `scud warmup` - Session orientation
431- `scud next` - Find next available task
432- `scud show <id>` - View task details
433- `scud set-status <id> <status>` - Update task status
434- `scud commit` - Task-aware git commit
435- `scud stats` - View completion statistics
436"#;
437
438        if claude_md_path.exists() {
439            // Append to existing CLAUDE.md if SCUD section doesn't exist
440            let content = fs::read_to_string(&claude_md_path)
441                .with_context(|| "Failed to read existing CLAUDE.md")?;
442
443            if !content.contains("## SCUD Task Management") {
444                let mut new_content = content;
445                new_content.push_str(scud_instructions);
446                fs::write(&claude_md_path, new_content)
447                    .with_context(|| "Failed to update CLAUDE.md")?;
448            }
449        } else {
450            // Create new CLAUDE.md
451            let content = format!("# Project Instructions\n{}", scud_instructions);
452            fs::write(&claude_md_path, content).with_context(|| "Failed to create CLAUDE.md")?;
453        }
454
455        Ok(())
456    }
457}
458
459#[cfg(test)]
460mod tests {
461    use super::*;
462    use std::collections::HashMap;
463    use tempfile::TempDir;
464
465    fn create_test_storage() -> (Storage, TempDir) {
466        let temp_dir = TempDir::new().unwrap();
467        let storage = Storage::new(Some(temp_dir.path().to_path_buf()));
468        storage.initialize().unwrap();
469        (storage, temp_dir)
470    }
471
472    #[test]
473    fn test_write_with_lock_creates_file() {
474        let (storage, _temp_dir) = create_test_storage();
475        let test_file = storage.scud_dir().join("test.json");
476
477        storage
478            .write_with_lock(&test_file, || Ok(r#"{"test": "data"}"#.to_string()))
479            .unwrap();
480
481        assert!(test_file.exists());
482        let content = fs::read_to_string(&test_file).unwrap();
483        assert_eq!(content, r#"{"test": "data"}"#);
484    }
485
486    #[test]
487    fn test_read_with_lock_reads_existing_file() {
488        let (storage, _temp_dir) = create_test_storage();
489        let test_file = storage.scud_dir().join("test.json");
490
491        // Create a file
492        fs::write(&test_file, r#"{"test": "data"}"#).unwrap();
493
494        // Read with lock
495        let content = storage.read_with_lock(&test_file).unwrap();
496        assert_eq!(content, r#"{"test": "data"}"#);
497    }
498
499    #[test]
500    fn test_read_with_lock_fails_on_missing_file() {
501        let (storage, _temp_dir) = create_test_storage();
502        let test_file = storage.scud_dir().join("nonexistent.json");
503
504        let result = storage.read_with_lock(&test_file);
505        assert!(result.is_err());
506        assert!(result.unwrap_err().to_string().contains("File not found"));
507    }
508
509    #[test]
510    fn test_save_and_load_tasks_with_locking() {
511        let (storage, _temp_dir) = create_test_storage();
512        let mut tasks = HashMap::new();
513
514        let epic = crate::models::Phase::new("TEST-1".to_string());
515        tasks.insert("TEST-1".to_string(), epic);
516
517        // Save tasks
518        storage.save_tasks(&tasks).unwrap();
519
520        // Load tasks
521        let loaded_tasks = storage.load_tasks().unwrap();
522
523        assert_eq!(tasks.len(), loaded_tasks.len());
524        assert!(loaded_tasks.contains_key("TEST-1"));
525        assert_eq!(loaded_tasks.get("TEST-1").unwrap().name, "TEST-1");
526    }
527
528    #[test]
529    fn test_concurrent_writes_dont_corrupt_data() {
530        use std::sync::Arc;
531        use std::thread;
532
533        let (storage, _temp_dir) = create_test_storage();
534        let storage = Arc::new(storage);
535        let mut handles = vec![];
536
537        // Spawn 10 threads that each write tasks
538        for i in 0..10 {
539            let storage_clone = Arc::clone(&storage);
540            let handle = thread::spawn(move || {
541                let mut tasks = HashMap::new();
542                let epic = crate::models::Phase::new(format!("EPIC-{}", i));
543                tasks.insert(format!("EPIC-{}", i), epic);
544
545                // Each thread writes multiple times
546                for _ in 0..5 {
547                    storage_clone.save_tasks(&tasks).unwrap();
548                    thread::sleep(Duration::from_millis(1));
549                }
550            });
551            handles.push(handle);
552        }
553
554        // Wait for all threads to complete
555        for handle in handles {
556            handle.join().unwrap();
557        }
558
559        // Verify that the file is still valid JSON
560        let tasks = storage.load_tasks().unwrap();
561        // Should have the last written data (from one of the threads)
562        assert_eq!(tasks.len(), 1);
563    }
564
565    #[test]
566    fn test_lock_retry_on_contention() {
567        use std::sync::Arc;
568
569        let (storage, _temp_dir) = create_test_storage();
570        let storage = Arc::new(storage);
571        let test_file = storage.scud_dir().join("lock-test.json");
572
573        // Create file
574        storage
575            .write_with_lock(&test_file, || Ok(r#"{"initial": "data"}"#.to_string()))
576            .unwrap();
577
578        // Open and lock the file
579        let file = OpenOptions::new().write(true).open(&test_file).unwrap();
580        file.lock_exclusive().unwrap();
581
582        // Try to acquire lock with retry in another thread
583        let storage_clone = Arc::clone(&storage);
584        let test_file_clone = test_file.clone();
585        let handle = thread::spawn(move || {
586            // This should retry and succeed after lock release
587            storage_clone.write_with_lock(&test_file_clone, || {
588                Ok(r#"{"updated": "data"}"#.to_string())
589            })
590        });
591
592        // Keep lock for a bit
593        thread::sleep(Duration::from_millis(200));
594
595        // Release lock
596        file.unlock().unwrap();
597        drop(file);
598
599        // The write should have succeeded after retrying
600        let result = handle.join().unwrap();
601        assert!(result.is_ok());
602    }
603
604    // ==================== Error Handling Tests ====================
605
606    #[test]
607    fn test_load_tasks_with_malformed_json() {
608        let (storage, _temp_dir) = create_test_storage();
609        let tasks_file = storage.tasks_file();
610
611        // Write malformed JSON
612        fs::write(&tasks_file, r#"{"invalid": json here}"#).unwrap();
613
614        // Should return error
615        let result = storage.load_tasks();
616        assert!(result.is_err());
617    }
618
619    #[test]
620    fn test_load_tasks_with_empty_file() {
621        let (storage, _temp_dir) = create_test_storage();
622        let tasks_file = storage.tasks_file();
623
624        // Write empty file
625        fs::write(&tasks_file, "").unwrap();
626
627        // Empty SCG file is valid and returns empty HashMap
628        let result = storage.load_tasks();
629        assert!(result.is_ok());
630        assert!(result.unwrap().is_empty());
631    }
632
633    #[test]
634    fn test_load_tasks_missing_file_creates_default() {
635        let (storage, _temp_dir) = create_test_storage();
636        // Don't create tasks file
637
638        // Should return empty HashMap (default)
639        let tasks = storage.load_tasks().unwrap();
640        assert_eq!(tasks.len(), 0);
641    }
642
643    #[test]
644    fn test_save_tasks_creates_directory_if_missing() {
645        let temp_dir = TempDir::new().unwrap();
646        let storage = Storage::new(Some(temp_dir.path().to_path_buf()));
647        // Don't call initialize()
648
649        let mut tasks = HashMap::new();
650        let epic = crate::models::Phase::new("TEST-1".to_string());
651        tasks.insert("TEST-1".to_string(), epic);
652
653        // Should create directory and file
654        let result = storage.save_tasks(&tasks);
655        assert!(result.is_ok());
656
657        assert!(storage.scud_dir().exists());
658        assert!(storage.tasks_file().exists());
659    }
660
661    #[test]
662    fn test_write_with_lock_handles_directory_creation() {
663        let temp_dir = TempDir::new().unwrap();
664        let storage = Storage::new(Some(temp_dir.path().to_path_buf()));
665
666        let nested_file = temp_dir
667            .path()
668            .join("deeply")
669            .join("nested")
670            .join("test.json");
671
672        // Should create all parent directories
673        let result = storage.write_with_lock(&nested_file, || Ok("{}".to_string()));
674        assert!(result.is_ok());
675        assert!(nested_file.exists());
676    }
677
678    #[test]
679    fn test_load_tasks_with_invalid_structure() {
680        let (storage, _temp_dir) = create_test_storage();
681        let tasks_file = storage.tasks_file();
682
683        // Write valid JSON but invalid structure (array instead of object)
684        fs::write(&tasks_file, r#"["not", "an", "object"]"#).unwrap();
685
686        // Should return error
687        let result = storage.load_tasks();
688        assert!(result.is_err());
689    }
690
691    #[test]
692    fn test_save_and_load_with_unicode_content() {
693        let (storage, _temp_dir) = create_test_storage();
694
695        let mut tasks = HashMap::new();
696        let mut epic = crate::models::Phase::new("TEST-UNICODE".to_string());
697
698        // Add task with unicode content
699        let task = crate::models::Task::new(
700            "task-1".to_string(),
701            "测试 Unicode 🚀".to_string(),
702            "Descripción en español 日本語".to_string(),
703        );
704        epic.add_task(task);
705
706        tasks.insert("TEST-UNICODE".to_string(), epic);
707
708        // Save and load
709        storage.save_tasks(&tasks).unwrap();
710        let loaded_tasks = storage.load_tasks().unwrap();
711
712        let loaded_epic = loaded_tasks.get("TEST-UNICODE").unwrap();
713        let loaded_task = loaded_epic.get_task("task-1").unwrap();
714        assert_eq!(loaded_task.title, "测试 Unicode 🚀");
715        assert_eq!(loaded_task.description, "Descripción en español 日本語");
716    }
717
718    #[test]
719    fn test_save_and_load_with_large_dataset() {
720        let (storage, _temp_dir) = create_test_storage();
721
722        let mut tasks = HashMap::new();
723
724        // Create 100 epics with 50 tasks each
725        for i in 0..100 {
726            let mut epic = crate::models::Phase::new(format!("EPIC-{}", i));
727
728            for j in 0..50 {
729                let task = crate::models::Task::new(
730                    format!("task-{}-{}", i, j),
731                    format!("Task {} of Epic {}", j, i),
732                    format!("Description for task {}-{}", i, j),
733                );
734                epic.add_task(task);
735            }
736
737            tasks.insert(format!("EPIC-{}", i), epic);
738        }
739
740        // Save and load
741        storage.save_tasks(&tasks).unwrap();
742        let loaded_tasks = storage.load_tasks().unwrap();
743
744        assert_eq!(loaded_tasks.len(), 100);
745        for i in 0..100 {
746            let epic = loaded_tasks.get(&format!("EPIC-{}", i)).unwrap();
747            assert_eq!(epic.tasks.len(), 50);
748        }
749    }
750
751    #[test]
752    fn test_concurrent_read_and_write() {
753        use std::sync::Arc;
754        use std::thread;
755
756        let (storage, _temp_dir) = create_test_storage();
757        let storage = Arc::new(storage);
758
759        // Initialize with some data
760        let mut tasks = HashMap::new();
761        let epic = crate::models::Phase::new("INITIAL".to_string());
762        tasks.insert("INITIAL".to_string(), epic);
763        storage.save_tasks(&tasks).unwrap();
764
765        let mut handles = vec![];
766
767        // Spawn 5 readers
768        for _ in 0..5 {
769            let storage_clone = Arc::clone(&storage);
770            let handle = thread::spawn(move || {
771                for _ in 0..10 {
772                    let _ = storage_clone.load_tasks();
773                    thread::sleep(Duration::from_millis(1));
774                }
775            });
776            handles.push(handle);
777        }
778
779        // Spawn 2 writers
780        for i in 0..2 {
781            let storage_clone = Arc::clone(&storage);
782            let handle = thread::spawn(move || {
783                for j in 0..5 {
784                    let mut tasks = HashMap::new();
785                    let epic = crate::models::Phase::new(format!("WRITER-{}-{}", i, j));
786                    tasks.insert(format!("WRITER-{}-{}", i, j), epic);
787                    storage_clone.save_tasks(&tasks).unwrap();
788                    thread::sleep(Duration::from_millis(2));
789                }
790            });
791            handles.push(handle);
792        }
793
794        // Wait for all threads
795        for handle in handles {
796            handle.join().unwrap();
797        }
798
799        // File should still be valid
800        let tasks = storage.load_tasks().unwrap();
801        assert_eq!(tasks.len(), 1); // Last write wins
802    }
803
804    // ==================== Active Epic Cache Tests ====================
805
806    #[test]
807    fn test_active_epic_cached_on_second_call() {
808        let (storage, _temp_dir) = create_test_storage();
809
810        // Set active epic
811        let mut tasks = HashMap::new();
812        tasks.insert("TEST-1".to_string(), Phase::new("TEST-1".to_string()));
813        storage.save_tasks(&tasks).unwrap();
814        storage.set_active_group("TEST-1").unwrap();
815
816        // First call - loads from file
817        let active1 = storage.get_active_group().unwrap();
818        assert_eq!(active1, Some("TEST-1".to_string()));
819
820        // Modify file directly (bypass storage methods)
821        let active_tag_file = storage.active_tag_file();
822        fs::write(&active_tag_file, "DIFFERENT").unwrap();
823
824        // Second call - should return cached value (not file value)
825        let active2 = storage.get_active_group().unwrap();
826        assert_eq!(active2, Some("TEST-1".to_string())); // Still cached
827
828        // After cache clear - should reload from file
829        storage.clear_cache();
830        let active3 = storage.get_active_group().unwrap();
831        assert_eq!(active3, Some("DIFFERENT".to_string())); // From file
832    }
833
834    #[test]
835    fn test_cache_invalidated_on_set_active_epic() {
836        let (storage, _temp_dir) = create_test_storage();
837
838        let mut tasks = HashMap::new();
839        tasks.insert("EPIC-1".to_string(), Phase::new("EPIC-1".to_string()));
840        tasks.insert("EPIC-2".to_string(), Phase::new("EPIC-2".to_string()));
841        storage.save_tasks(&tasks).unwrap();
842
843        storage.set_active_group("EPIC-1").unwrap();
844        assert_eq!(
845            storage.get_active_group().unwrap(),
846            Some("EPIC-1".to_string())
847        );
848
849        // Change active epic - should update cache
850        storage.set_active_group("EPIC-2").unwrap();
851        assert_eq!(
852            storage.get_active_group().unwrap(),
853            Some("EPIC-2".to_string())
854        );
855    }
856
857    #[test]
858    fn test_cache_with_no_active_epic() {
859        let (storage, _temp_dir) = create_test_storage();
860
861        // Load when no active epic is set
862        let active = storage.get_active_group().unwrap();
863        assert_eq!(active, None);
864
865        // Should cache the None value
866        let active2 = storage.get_active_group().unwrap();
867        assert_eq!(active2, None);
868    }
869
870    // ==================== Lazy Epic Loading Tests ====================
871
872    #[test]
873    fn test_load_single_epic_from_many() {
874        let (storage, _temp_dir) = create_test_storage();
875
876        // Create 50 epics
877        let mut tasks = HashMap::new();
878        for i in 0..50 {
879            tasks.insert(format!("EPIC-{}", i), Phase::new(format!("EPIC-{}", i)));
880        }
881        storage.save_tasks(&tasks).unwrap();
882
883        // Load single epic - should only deserialize that one
884        let epic = storage.load_group("EPIC-25").unwrap();
885        assert_eq!(epic.name, "EPIC-25");
886    }
887
888    #[test]
889    fn test_load_epic_not_found() {
890        let (storage, _temp_dir) = create_test_storage();
891
892        let tasks = HashMap::new();
893        storage.save_tasks(&tasks).unwrap();
894
895        let result = storage.load_group("NONEXISTENT");
896        assert!(result.is_err());
897        assert!(result.unwrap_err().to_string().contains("not found"));
898    }
899
900    #[test]
901    fn test_load_epic_matches_full_load() {
902        let (storage, _temp_dir) = create_test_storage();
903
904        let mut tasks = HashMap::new();
905        let mut epic = Phase::new("TEST-1".to_string());
906        epic.add_task(crate::models::Task::new(
907            "task-1".to_string(),
908            "Test".to_string(),
909            "Desc".to_string(),
910        ));
911        tasks.insert("TEST-1".to_string(), epic.clone());
912        storage.save_tasks(&tasks).unwrap();
913
914        // Load via both methods
915        let epic_lazy = storage.load_group("TEST-1").unwrap();
916        let tasks_full = storage.load_tasks().unwrap();
917        let epic_full = tasks_full.get("TEST-1").unwrap();
918
919        // Should be identical
920        assert_eq!(epic_lazy.name, epic_full.name);
921        assert_eq!(epic_lazy.tasks.len(), epic_full.tasks.len());
922    }
923
924    #[test]
925    fn test_load_active_epic() {
926        let (storage, _temp_dir) = create_test_storage();
927
928        let mut tasks = HashMap::new();
929        let mut epic = Phase::new("ACTIVE-1".to_string());
930        epic.add_task(crate::models::Task::new(
931            "task-1".to_string(),
932            "Test".to_string(),
933            "Desc".to_string(),
934        ));
935        tasks.insert("ACTIVE-1".to_string(), epic);
936        storage.save_tasks(&tasks).unwrap();
937        storage.set_active_group("ACTIVE-1").unwrap();
938
939        // Load active epic directly
940        let epic = storage.load_active_group().unwrap();
941        assert_eq!(epic.name, "ACTIVE-1");
942        assert_eq!(epic.tasks.len(), 1);
943    }
944
945    #[test]
946    fn test_load_active_epic_when_none_set() {
947        let (storage, _temp_dir) = create_test_storage();
948
949        // Should error when no active epic
950        let result = storage.load_active_group();
951        assert!(result.is_err());
952        assert!(result
953            .unwrap_err()
954            .to_string()
955            .contains("No active task group"));
956    }
957
958    #[test]
959    fn test_update_epic_without_loading_all() {
960        let (storage, _temp_dir) = create_test_storage();
961
962        let mut tasks = HashMap::new();
963        tasks.insert("EPIC-1".to_string(), Phase::new("EPIC-1".to_string()));
964        tasks.insert("EPIC-2".to_string(), Phase::new("EPIC-2".to_string()));
965        storage.save_tasks(&tasks).unwrap();
966
967        // Update only EPIC-1
968        let mut epic1 = storage.load_group("EPIC-1").unwrap();
969        epic1.add_task(crate::models::Task::new(
970            "new-task".to_string(),
971            "New".to_string(),
972            "Desc".to_string(),
973        ));
974        storage.update_group("EPIC-1", &epic1).unwrap();
975
976        // Verify update
977        let loaded = storage.load_group("EPIC-1").unwrap();
978        assert_eq!(loaded.tasks.len(), 1);
979
980        // Verify EPIC-2 unchanged
981        let epic2 = storage.load_group("EPIC-2").unwrap();
982        assert_eq!(epic2.tasks.len(), 0);
983    }
984}