Skip to main content

aida_core/
storage.rs

1// trace:FR-0153 | ai:claude:high
2use anyhow::{Context, Result};
3use chrono::{DateTime, Utc};
4use fs2::FileExt;
5use std::collections::HashMap;
6use std::fs::{self, File, OpenOptions};
7use std::io::{BufReader, Write};
8use std::path::{Path, PathBuf};
9use std::time::Duration;
10use uuid::Uuid;
11
12use crate::models::{Requirement, RequirementsStore};
13
14/// Error type for storage operations
15#[derive(Debug)]
16pub enum StorageError {
17    /// File is locked by another process
18    FileLocked,
19    /// Other IO error
20    IoError(std::io::Error),
21    /// Parse error
22    ParseError(String),
23    /// Conflict detected during save
24    Conflict(ConflictInfo),
25}
26
27/// Information about a conflict detected during save
28#[derive(Debug, Clone)]
29pub struct ConflictInfo {
30    /// ID of the requirement with conflict
31    pub requirement_id: Uuid,
32    /// SPEC-ID for display
33    pub spec_id: String,
34    /// Fields that have conflicting changes
35    pub conflicting_fields: Vec<FieldConflict>,
36    /// The version from disk (external changes)
37    pub disk_version: Box<Requirement>,
38    /// The version we're trying to save (local changes)
39    pub local_version: Box<Requirement>,
40}
41
42/// Describes a conflict in a specific field
43#[derive(Debug, Clone)]
44pub struct FieldConflict {
45    /// Name of the field
46    pub field_name: String,
47    /// Original value when we last loaded
48    pub original_value: String,
49    /// Value on disk (external change)
50    pub disk_value: String,
51    /// Value we're trying to save (local change)
52    pub local_value: String,
53}
54
55/// Result of attempting a save with conflict detection
56#[derive(Debug)]
57pub enum SaveResult {
58    /// Save succeeded without conflicts
59    Success,
60    /// Save succeeded after auto-merging non-conflicting changes
61    Merged {
62        /// Number of requirements that were merged
63        merged_count: usize,
64    },
65    /// Conflict detected - user action required
66    Conflict(ConflictInfo),
67}
68
69/// Resolution strategy when a conflict is detected
70#[derive(Debug, Clone, Copy, PartialEq, Eq)]
71pub enum ConflictResolution {
72    /// Keep the local version, overwrite disk
73    ForceLocal,
74    /// Keep the disk version, discard local changes
75    KeepDisk,
76    /// Merge field by field (take local changes for modified fields)
77    Merge,
78}
79
80/// Result of adding a new requirement atomically
81#[derive(Debug)]
82pub struct AddResult {
83    /// The updated store with all changes (including external)
84    pub store: RequirementsStore,
85    /// Number of external requirements that were merged in
86    pub external_changes_merged: usize,
87    /// The SPEC-ID assigned to the new requirement
88    pub spec_id: String,
89}
90
91impl std::fmt::Display for StorageError {
92    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
93        match self {
94            StorageError::FileLocked => write!(f, "File is locked by another user/process"),
95            StorageError::IoError(e) => write!(f, "IO error: {}", e),
96            StorageError::ParseError(s) => write!(f, "Parse error: {}", s),
97            StorageError::Conflict(info) => write!(
98                f,
99                "Conflict detected for {} ({}): {} field(s) changed externally",
100                info.spec_id,
101                info.requirement_id,
102                info.conflicting_fields.len()
103            ),
104        }
105    }
106}
107
108impl std::error::Error for StorageError {}
109
110/// Information about a user session with an open requirements file
111#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
112pub struct SessionInfo {
113    /// Unique session ID (process-specific)
114    pub session_id: String,
115    /// User name/handle
116    pub user_name: String,
117    /// Hostname or machine identifier
118    pub hostname: String,
119    /// Process ID
120    pub pid: u32,
121    /// When this session started
122    pub started_at: DateTime<Utc>,
123    /// Last heartbeat timestamp
124    pub last_heartbeat: DateTime<Utc>,
125    /// Requirement currently being edited (if any)
126    pub editing_requirement: Option<EditLock>,
127}
128
129/// Information about a requirement being edited
130#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
131pub struct EditLock {
132    /// ID of the requirement being edited
133    pub requirement_id: Uuid,
134    /// SPEC-ID for display
135    pub spec_id: String,
136    /// When edit mode started
137    pub started_at: DateTime<Utc>,
138}
139
140/// Lock file contents tracking all active sessions
141#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
142pub struct LockFileInfo {
143    /// All active sessions (keyed by session_id)
144    pub sessions: HashMap<String, SessionInfo>,
145}
146
147impl LockFileInfo {
148    /// Create a new empty lock file info
149    pub fn new() -> Self {
150        Self {
151            sessions: HashMap::new(),
152        }
153    }
154
155    /// Remove stale sessions (no heartbeat in the last N seconds)
156    pub fn remove_stale_sessions(&mut self, stale_threshold_secs: i64) {
157        let now = Utc::now();
158        self.sessions.retain(|_, session| {
159            let elapsed = now.signed_duration_since(session.last_heartbeat);
160            elapsed.num_seconds() < stale_threshold_secs
161        });
162    }
163
164    /// Get sessions that have a specific requirement open for editing
165    pub fn get_editors(&self, requirement_id: Uuid) -> Vec<&SessionInfo> {
166        self.sessions
167            .values()
168            .filter(|s| {
169                s.editing_requirement
170                    .as_ref()
171                    .map(|e| e.requirement_id == requirement_id)
172                    .unwrap_or(false)
173            })
174            .collect()
175    }
176
177    /// Get all active sessions except the current one
178    pub fn get_other_sessions(&self, current_session_id: &str) -> Vec<&SessionInfo> {
179        self.sessions
180            .values()
181            .filter(|s| s.session_id != current_session_id)
182            .collect()
183    }
184}
185
186impl SessionInfo {
187    /// Check if this session's heartbeat is stale
188    pub fn is_stale(&self, threshold_secs: i64) -> bool {
189        let elapsed = Utc::now().signed_duration_since(self.last_heartbeat);
190        elapsed.num_seconds() > threshold_secs
191    }
192}
193
194/// Handles saving and loading requirements from disk with file locking
195/// for rudimentary multi-user support
196pub struct Storage {
197    file_path: PathBuf,
198    lock_file_path: PathBuf,
199}
200
201impl Storage {
202    /// Creates a new Storage instance
203    pub fn new<P: AsRef<Path>>(file_path: P) -> Self {
204        let file_path = file_path.as_ref().to_path_buf();
205        let lock_file_path = file_path.with_extension("yaml.lock");
206        Self {
207            file_path,
208            lock_file_path,
209        }
210    }
211
212    /// Returns the path to the storage file
213    pub fn path(&self) -> &Path {
214        &self.file_path
215    }
216
217    /// Returns the path to the lock file
218    pub fn lock_file_path(&self) -> &Path {
219        &self.lock_file_path
220    }
221
222    /// Returns true if the storage file is a SQLite database (based on extension)
223    pub fn is_sqlite(&self) -> bool {
224        matches!(
225            self.file_path.extension().and_then(|e| e.to_str()),
226            Some("db") | Some("sqlite") | Some("sqlite3")
227        )
228    }
229
230    /// Read the current lock file info (session tracking)
231    pub fn read_lock_info(&self) -> Result<LockFileInfo> {
232        if !self.lock_file_path.exists() {
233            return Ok(LockFileInfo::new());
234        }
235
236        let content = fs::read_to_string(&self.lock_file_path)
237            .with_context(|| format!("Failed to read lock file: {:?}", self.lock_file_path))?;
238
239        // Try to parse as YAML, fall back to empty if invalid
240        Ok(serde_yaml::from_str(&content).unwrap_or_else(|_| LockFileInfo::new()))
241    }
242
243    /// Write lock file info (session tracking)
244    /// This atomically updates the lock file with current session info
245    pub fn write_lock_info(&self, info: &LockFileInfo) -> Result<()> {
246        // Create parent directories if needed
247        if let Some(parent) = self.lock_file_path.parent() {
248            fs::create_dir_all(parent)?;
249        }
250
251        let yaml = serde_yaml::to_string(info).context("Failed to serialize lock info")?;
252
253        fs::write(&self.lock_file_path, yaml)
254            .with_context(|| format!("Failed to write lock file: {:?}", self.lock_file_path))?;
255
256        Ok(())
257    }
258
259    /// Register a new session in the lock file
260    pub fn register_session(&self, session: SessionInfo) -> Result<LockFileInfo> {
261        let mut info = self.read_lock_info().unwrap_or_default();
262
263        // Clean up stale sessions (no heartbeat in 30 seconds)
264        info.remove_stale_sessions(30);
265
266        // Add/update our session
267        info.sessions.insert(session.session_id.clone(), session);
268
269        self.write_lock_info(&info)?;
270        Ok(info)
271    }
272
273    /// Update heartbeat for a session
274    pub fn update_heartbeat(
275        &self,
276        session_id: &str,
277        editing: Option<EditLock>,
278    ) -> Result<LockFileInfo> {
279        let mut info = self.read_lock_info().unwrap_or_default();
280
281        // Clean up stale sessions
282        info.remove_stale_sessions(30);
283
284        // Update our session's heartbeat
285        if let Some(session) = info.sessions.get_mut(session_id) {
286            session.last_heartbeat = Utc::now();
287            session.editing_requirement = editing;
288        }
289
290        self.write_lock_info(&info)?;
291        Ok(info)
292    }
293
294    /// Unregister a session from the lock file
295    pub fn unregister_session(&self, session_id: &str) -> Result<()> {
296        let mut info = self.read_lock_info().unwrap_or_default();
297        info.sessions.remove(session_id);
298
299        // If no sessions left, we can delete the lock file
300        if info.sessions.is_empty() {
301            let _ = fs::remove_file(&self.lock_file_path);
302        } else {
303            self.write_lock_info(&info)?;
304        }
305
306        Ok(())
307    }
308
309    /// Get current active sessions (for displaying warnings)
310    pub fn get_active_sessions(&self) -> Result<LockFileInfo> {
311        let mut info = self.read_lock_info().unwrap_or_default();
312        info.remove_stale_sessions(30);
313        Ok(info)
314    }
315
316    /// Acquire an exclusive lock on the file for writing
317    /// Returns the lock file handle which must be held during the operation
318    fn acquire_write_lock(&self) -> Result<File> {
319        // Create parent directories if needed
320        if let Some(parent) = self.lock_file_path.parent() {
321            fs::create_dir_all(parent)?;
322        }
323
324        let lock_file = OpenOptions::new()
325            .create(true)
326            .write(true)
327            .truncate(true)
328            .open(&self.lock_file_path)
329            .with_context(|| format!("Failed to create lock file: {:?}", self.lock_file_path))?;
330
331        // Try to acquire exclusive lock with timeout
332        let start = std::time::Instant::now();
333        let timeout = Duration::from_secs(5);
334
335        loop {
336            match lock_file.try_lock_exclusive() {
337                Ok(()) => return Ok(lock_file),
338                Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
339                    if start.elapsed() > timeout {
340                        anyhow::bail!(
341                            "Timeout waiting for file lock - another user may be editing: {:?}",
342                            self.file_path
343                        );
344                    }
345                    std::thread::sleep(Duration::from_millis(100));
346                }
347                Err(e) => {
348                    return Err(e).with_context(|| {
349                        format!("Failed to acquire lock on {:?}", self.lock_file_path)
350                    })
351                }
352            }
353        }
354    }
355
356    /// Acquire a shared lock on the file for reading
357    fn acquire_read_lock(&self) -> Result<Option<File>> {
358        if !self.lock_file_path.exists() {
359            return Ok(None);
360        }
361
362        let lock_file = OpenOptions::new()
363            .read(true)
364            .open(&self.lock_file_path)
365            .with_context(|| format!("Failed to open lock file: {:?}", self.lock_file_path))?;
366
367        // Try to acquire shared lock with timeout
368        let start = std::time::Instant::now();
369        let timeout = Duration::from_secs(5);
370
371        loop {
372            // Use fs2's try_lock_shared explicitly to avoid conflict with std::fs::File method
373            match fs2::FileExt::try_lock_shared(&lock_file) {
374                Ok(()) => return Ok(Some(lock_file)),
375                Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
376                    if start.elapsed() > timeout {
377                        anyhow::bail!(
378                            "Timeout waiting for file lock - another user may be editing: {:?}",
379                            self.file_path
380                        );
381                    }
382                    std::thread::sleep(Duration::from_millis(100));
383                }
384                Err(e) => {
385                    return Err(e).with_context(|| {
386                        format!("Failed to acquire lock on {:?}", self.lock_file_path)
387                    })
388                }
389            }
390        }
391    }
392
393    /// Loads requirements from file with file locking
394    /// Automatically detects file type from extension (.db/.sqlite for SQLite, otherwise YAML)
395    pub fn load(&self) -> Result<RequirementsStore> {
396        // Check if this is a SQLite database by extension
397        let is_sqlite = matches!(
398            self.file_path.extension().and_then(|e| e.to_str()),
399            Some("db") | Some("sqlite") | Some("sqlite3")
400        );
401
402        if is_sqlite {
403            // Use SQLite backend for .db files
404            return self.load_sqlite();
405        }
406
407        // Create the file if it doesn't exist
408        if !self.file_path.exists() {
409            let parent = self
410                .file_path
411                .parent()
412                .context("Failed to get parent directory")?;
413            fs::create_dir_all(parent)?;
414            let default_store = RequirementsStore::new();
415            self.save(&default_store)?;
416            return Ok(default_store);
417        }
418
419        // Acquire shared lock for reading
420        let _lock = self.acquire_read_lock()?;
421
422        // Open and read the file
423        let file = File::open(&self.file_path)
424            .with_context(|| format!("Failed to open file: {:?}", self.file_path))?;
425        let reader = BufReader::new(file);
426
427        // Parse the YAML content
428        let mut store: crate::models::RequirementsStore = serde_yaml::from_reader(reader)
429            .map_err(|e| {
430                eprintln!("YAML parse error details: {}", e);
431                e
432            })
433            .with_context(|| format!("Failed to parse YAML from {:?}", self.file_path))?;
434
435        // Migrate existing features to numbered format if needed
436        store.migrate_features();
437
438        // Assign SPEC-IDs to requirements that don't have them
439        let had_missing_spec_ids = store.requirements.iter().any(|r| r.spec_id.is_none());
440        store.assign_spec_ids();
441
442        // Migrate existing users to have $USER-XXX spec_ids
443        let had_missing_user_spec_ids = store.users.iter().any(|u| u.spec_id.is_none());
444        store.migrate_users_to_spec_ids();
445
446        // Add any missing built-in type definitions
447        let had_missing_types = store.migrate_type_definitions();
448
449        // Add any missing built-in types to id_config.requirement_types
450        // trace:FR-0309 | ai:claude:high
451        let had_missing_id_config_types = store.migrate_id_config_types();
452
453        // Repair any duplicate SPEC-IDs (auto-fix corruption)
454        let repaired_duplicates = store.repair_duplicate_spec_ids();
455
456        // Drop read lock before acquiring write lock for migration save
457        drop(_lock);
458
459        // Save back if we assigned any SPEC-IDs, added missing types, or repaired duplicates
460        if had_missing_spec_ids
461            || had_missing_user_spec_ids
462            || had_missing_types
463            || had_missing_id_config_types
464            || repaired_duplicates > 0
465        {
466            self.save(&store)?;
467        }
468
469        // Validate SPEC-ID uniqueness (should pass after repair, but let's be safe)
470        store.validate_unique_spec_ids()?;
471
472        Ok(store)
473    }
474
475    /// Loads requirements from a SQLite database file
476    fn load_sqlite(&self) -> Result<RequirementsStore> {
477        use crate::db::{DatabaseBackend, SqliteBackend};
478
479        let backend = SqliteBackend::new(&self.file_path)?;
480        let mut store = backend.load()?;
481
482        // Run migrations just like YAML loading
483        store.migrate_features();
484        let had_missing_spec_ids = store.requirements.iter().any(|r| r.spec_id.is_none());
485        store.assign_spec_ids();
486        let had_missing_user_spec_ids = store.users.iter().any(|u| u.spec_id.is_none());
487        store.migrate_users_to_spec_ids();
488        let had_missing_types = store.migrate_type_definitions();
489        // trace:FR-0309 | ai:claude:high
490        let had_missing_id_config_types = store.migrate_id_config_types();
491        let repaired_duplicates = store.repair_duplicate_spec_ids();
492
493        // Save back if we made any changes
494        if had_missing_spec_ids
495            || had_missing_user_spec_ids
496            || had_missing_types
497            || had_missing_id_config_types
498            || repaired_duplicates > 0
499        {
500            backend.save(&store)?;
501        }
502
503        store.validate_unique_spec_ids()?;
504        Ok(store)
505    }
506
507    /// Saves requirements to file with file locking
508    /// Automatically detects file type from extension (.db/.sqlite for SQLite, otherwise YAML)
509    pub fn save(&self, store: &RequirementsStore) -> Result<()> {
510        // Check if this is a SQLite database by extension
511        let is_sqlite = matches!(
512            self.file_path.extension().and_then(|e| e.to_str()),
513            Some("db") | Some("sqlite") | Some("sqlite3")
514        );
515
516        if is_sqlite {
517            return self.save_sqlite(store);
518        }
519
520        // Create parent directories if they don't exist
521        if let Some(parent) = self.file_path.parent() {
522            fs::create_dir_all(parent)?;
523        }
524
525        // Acquire exclusive lock for writing
526        let mut lock_file = self.acquire_write_lock()?;
527
528        // Write lock holder info (optional, for debugging)
529        let _ = writeln!(
530            lock_file,
531            "Locked by PID {} at {}",
532            std::process::id(),
533            chrono::Utc::now().to_rfc3339()
534        );
535
536        // Serialize and write to file
537        let yaml = serde_yaml::to_string(store)?;
538        fs::write(&self.file_path, yaml)?;
539
540        // Lock is automatically released when lock_file is dropped
541        Ok(())
542    }
543
544    /// Saves requirements to a SQLite database file
545    fn save_sqlite(&self, store: &RequirementsStore) -> Result<()> {
546        use crate::db::{DatabaseBackend, SqliteBackend};
547
548        let backend = SqliteBackend::new(&self.file_path)?;
549        backend.save(store)?;
550        Ok(())
551    }
552
553    /// Reload file from disk, detecting external changes
554    /// Returns (store, changed) where changed indicates if the file was modified externally
555    pub fn reload_if_changed(
556        &self,
557        current_store: &RequirementsStore,
558    ) -> Result<(RequirementsStore, bool)> {
559        let new_store = self.load()?;
560
561        // Simple check: compare requirement counts and last modification
562        // For more sophisticated detection, we could compare hashes
563        let changed = new_store.requirements.len() != current_store.requirements.len()
564            || new_store.users.len() != current_store.users.len()
565            || new_store.features.len() != current_store.features.len();
566
567        Ok((new_store, changed))
568    }
569
570    /// Perform an atomic update operation with proper locking
571    /// This reloads the file, applies changes, and saves atomically
572    pub fn update_atomically<F>(&self, update_fn: F) -> Result<RequirementsStore>
573    where
574        F: FnOnce(&mut RequirementsStore),
575    {
576        // Check if this is a SQLite database by extension
577        let is_sqlite = matches!(
578            self.file_path.extension().and_then(|e| e.to_str()),
579            Some("db") | Some("sqlite") | Some("sqlite3")
580        );
581
582        // For SQLite, use the existing load/save methods which handle SQLite properly
583        if is_sqlite {
584            let mut store = self.load()?;
585            update_fn(&mut store);
586            self.save(&store)?;
587            return Ok(store);
588        }
589
590        // YAML path: Acquire exclusive lock
591        let mut lock_file = self.acquire_write_lock()?;
592
593        // Write lock holder info
594        let _ = writeln!(
595            lock_file,
596            "Locked by PID {} at {}",
597            std::process::id(),
598            chrono::Utc::now().to_rfc3339()
599        );
600
601        // Load latest version from disk (YAML)
602        let file = File::open(&self.file_path)
603            .with_context(|| format!("Failed to open file: {:?}", self.file_path))?;
604        let reader = BufReader::new(file);
605        let mut store: RequirementsStore = serde_yaml::from_reader(reader)
606            .with_context(|| format!("Failed to parse YAML from {:?}", self.file_path))?;
607
608        // Apply the update
609        update_fn(&mut store);
610
611        // Save back
612        let yaml = serde_yaml::to_string(&store)?;
613        fs::write(&self.file_path, yaml)?;
614
615        // Lock is released when lock_file is dropped
616        Ok(store)
617    }
618
619    // trace:FR-0153 | ai:claude:high
620    /// Save with conflict detection for a specific requirement
621    ///
622    /// This method:
623    /// 1. Reloads the file from disk
624    /// 2. Checks if the requirement was modified externally (based on modified_at timestamp)
625    /// 3. If no external changes, applies the update
626    /// 4. If external changes exist, performs field-level conflict detection
627    /// 5. Auto-merges non-conflicting changes, returns conflict info for conflicts
628    ///
629    /// # Arguments
630    /// * `local_store` - The local copy of the store with pending changes
631    /// * `original_timestamps` - Map of requirement IDs to their modified_at timestamps when loaded
632    /// * `modified_requirement_ids` - Set of requirement IDs that were modified locally
633    ///
634    /// # Returns
635    /// * `SaveResult::Success` - No conflicts, save completed
636    /// * `SaveResult::Merged` - External changes merged, save completed
637    /// * `SaveResult::Conflict` - Conflict detected, user action required
638    pub fn save_with_conflict_detection(
639        &self,
640        local_store: &RequirementsStore,
641        original_timestamps: &HashMap<Uuid, DateTime<Utc>>,
642        modified_requirement_ids: &[Uuid],
643    ) -> Result<SaveResult> {
644        // Check if this is a SQLite database by extension
645        let is_sqlite = matches!(
646            self.file_path.extension().and_then(|e| e.to_str()),
647            Some("db") | Some("sqlite") | Some("sqlite3")
648        );
649
650        // For SQLite, use standard save - SQLite handles concurrency natively
651        if is_sqlite {
652            self.save(local_store)?;
653            return Ok(SaveResult::Success);
654        }
655
656        // Acquire exclusive lock
657        let mut lock_file = self.acquire_write_lock()?;
658
659        // Write lock holder info
660        let _ = writeln!(
661            lock_file,
662            "Locked by PID {} at {}",
663            std::process::id(),
664            chrono::Utc::now().to_rfc3339()
665        );
666
667        // Load latest version from disk (YAML path)
668        let disk_store = if self.file_path.exists() {
669            let file = File::open(&self.file_path)
670                .with_context(|| format!("Failed to open file: {:?}", self.file_path))?;
671            let reader = BufReader::new(file);
672            serde_yaml::from_reader(reader)
673                .with_context(|| format!("Failed to parse YAML from {:?}", self.file_path))?
674        } else {
675            RequirementsStore::new()
676        };
677
678        // Check for conflicts in modified requirements
679        let mut merged_count = 0;
680        let mut final_store = disk_store.clone();
681
682        for &req_id in modified_requirement_ids {
683            let local_req = local_store.requirements.iter().find(|r| r.id == req_id);
684            let disk_req = disk_store.requirements.iter().find(|r| r.id == req_id);
685            let original_timestamp = original_timestamps.get(&req_id);
686
687            match (local_req, disk_req, original_timestamp) {
688                // New requirement (not on disk yet)
689                (Some(local), None, _) => {
690                    final_store.requirements.push(local.clone());
691                }
692
693                // Requirement exists on disk - check for conflicts
694                (Some(local), Some(disk), Some(&orig_ts)) => {
695                    // Check if disk version was modified after we loaded
696                    if disk.modified_at > orig_ts {
697                        // External modification detected - check for field conflicts
698                        let conflicts = Self::detect_field_conflicts(local, disk, &orig_ts);
699
700                        if !conflicts.is_empty() {
701                            // Real conflict - fields we want to modify were also modified externally
702                            return Ok(SaveResult::Conflict(ConflictInfo {
703                                requirement_id: req_id,
704                                spec_id: disk.spec_id.clone().unwrap_or_else(|| req_id.to_string()),
705                                conflicting_fields: conflicts,
706                                disk_version: Box::new(disk.clone()),
707                                local_version: Box::new(local.clone()),
708                            }));
709                        }
710
711                        // No real conflicts - merge: take our changes + disk's other changes
712                        let merged = Self::merge_requirement(local, disk);
713                        if let Some(idx) =
714                            final_store.requirements.iter().position(|r| r.id == req_id)
715                        {
716                            final_store.requirements[idx] = merged;
717                        }
718                        merged_count += 1;
719                    } else {
720                        // No external changes - just use our version
721                        if let Some(idx) =
722                            final_store.requirements.iter().position(|r| r.id == req_id)
723                        {
724                            final_store.requirements[idx] = local.clone();
725                        }
726                    }
727                }
728
729                // Requirement exists on disk but we don't have original timestamp
730                // This is a fallback - just overwrite (legacy behavior)
731                (Some(local), Some(_disk), None) => {
732                    if let Some(idx) = final_store.requirements.iter().position(|r| r.id == req_id)
733                    {
734                        final_store.requirements[idx] = local.clone();
735                    }
736                }
737
738                // Local deletion (requirement exists on disk but not in local)
739                (None, Some(_), _) => {
740                    // Keep deletion - remove from final store
741                    final_store.requirements.retain(|r| r.id != req_id);
742                }
743
744                // Already deleted on both sides
745                (None, None, _) => {}
746            }
747        }
748
749        // Copy over non-requirement changes from local_store
750        // (users, features, id_config, etc. - these don't have per-item conflict detection yet)
751        final_store.name = local_store.name.clone();
752        final_store.title = local_store.title.clone();
753        final_store.description = local_store.description.clone();
754        final_store.users = local_store.users.clone();
755        final_store.id_config = local_store.id_config.clone();
756        final_store.features = local_store.features.clone();
757        final_store.relationship_definitions = local_store.relationship_definitions.clone();
758        final_store.reaction_definitions = local_store.reaction_definitions.clone();
759        final_store.type_definitions = local_store.type_definitions.clone();
760        final_store.ai_prompts = local_store.ai_prompts.clone();
761        final_store.allowed_prefixes = local_store.allowed_prefixes.clone();
762        final_store.restrict_prefixes = local_store.restrict_prefixes;
763
764        // Save the merged/updated store
765        let yaml = serde_yaml::to_string(&final_store)?;
766        fs::write(&self.file_path, yaml)?;
767
768        if merged_count > 0 {
769            Ok(SaveResult::Merged { merged_count })
770        } else {
771            Ok(SaveResult::Success)
772        }
773    }
774
775    /// Detect field-level conflicts between local and disk versions
776    /// Returns list of fields that were modified both locally and externally
777    fn detect_field_conflicts(
778        local: &Requirement,
779        disk: &Requirement,
780        _original_timestamp: &DateTime<Utc>,
781    ) -> Vec<FieldConflict> {
782        let mut conflicts = Vec::new();
783
784        // We consider a conflict if:
785        // 1. Local changed a field from its original value
786        // 2. Disk also changed that same field from its original value
787        // 3. The final values are different
788        //
789        // Since we don't have the original values stored separately,
790        // we compare local vs disk and flag as conflict if different
791        // This is a simpler but slightly more conservative approach
792
793        // Compare key fields
794        if local.title != disk.title {
795            conflicts.push(FieldConflict {
796                field_name: "title".to_string(),
797                original_value: String::new(), // Unknown without snapshot
798                disk_value: disk.title.clone(),
799                local_value: local.title.clone(),
800            });
801        }
802
803        if local.description != disk.description {
804            conflicts.push(FieldConflict {
805                field_name: "description".to_string(),
806                original_value: String::new(),
807                disk_value: disk.description.clone(),
808                local_value: local.description.clone(),
809            });
810        }
811
812        if local.status != disk.status {
813            conflicts.push(FieldConflict {
814                field_name: "status".to_string(),
815                original_value: String::new(),
816                disk_value: disk.status.to_string(),
817                local_value: local.status.to_string(),
818            });
819        }
820
821        if local.priority != disk.priority {
822            conflicts.push(FieldConflict {
823                field_name: "priority".to_string(),
824                original_value: String::new(),
825                disk_value: disk.priority.to_string(),
826                local_value: local.priority.to_string(),
827            });
828        }
829
830        if local.owner != disk.owner {
831            conflicts.push(FieldConflict {
832                field_name: "owner".to_string(),
833                original_value: String::new(),
834                disk_value: disk.owner.clone(),
835                local_value: local.owner.clone(),
836            });
837        }
838
839        if local.feature != disk.feature {
840            conflicts.push(FieldConflict {
841                field_name: "feature".to_string(),
842                original_value: String::new(),
843                disk_value: disk.feature.clone(),
844                local_value: local.feature.clone(),
845            });
846        }
847
848        if local.req_type != disk.req_type {
849            conflicts.push(FieldConflict {
850                field_name: "type".to_string(),
851                original_value: String::new(),
852                disk_value: disk.req_type.to_string(),
853                local_value: local.req_type.to_string(),
854            });
855        }
856
857        if local.tags != disk.tags {
858            conflicts.push(FieldConflict {
859                field_name: "tags".to_string(),
860                original_value: String::new(),
861                disk_value: disk.tags.iter().cloned().collect::<Vec<_>>().join(", "),
862                local_value: local.tags.iter().cloned().collect::<Vec<_>>().join(", "),
863            });
864        }
865
866        conflicts
867    }
868
869    /// Merge two versions of a requirement
870    /// Takes local changes and merges with disk version
871    fn merge_requirement(local: &Requirement, disk: &Requirement) -> Requirement {
872        // Start with local (our changes)
873        let mut merged = local.clone();
874
875        // Merge comments: include all comments from both versions
876        // Use a set to deduplicate by comment ID
877        let mut comment_ids: std::collections::HashSet<Uuid> =
878            merged.comments.iter().map(|c| c.id).collect();
879        for comment in &disk.comments {
880            if comment_ids.insert(comment.id) {
881                merged.comments.push(comment.clone());
882            }
883        }
884        // Sort comments by created_at
885        merged.comments.sort_by_key(|c| c.created_at);
886
887        // Merge history: include all history entries from both versions
888        let mut history_ids: std::collections::HashSet<Uuid> =
889            merged.history.iter().map(|h| h.id).collect();
890        for entry in &disk.history {
891            if history_ids.insert(entry.id) {
892                merged.history.push(entry.clone());
893            }
894        }
895        // Sort history by timestamp
896        merged.history.sort_by_key(|h| h.timestamp);
897
898        // Merge relationships: include all from both (dedupe by target_id + rel_type)
899        let existing_rels: std::collections::HashSet<_> = merged
900            .relationships
901            .iter()
902            .map(|r| (r.target_id, r.rel_type.clone()))
903            .collect();
904        for rel in &disk.relationships {
905            if !existing_rels.contains(&(rel.target_id, rel.rel_type.clone())) {
906                merged.relationships.push(rel.clone());
907            }
908        }
909
910        // Merge URLs: include all from both (dedupe by URL)
911        let existing_urls: std::collections::HashSet<_> =
912            merged.urls.iter().map(|u| u.url.clone()).collect();
913        for url in &disk.urls {
914            if !existing_urls.contains(&url.url) {
915                merged.urls.push(url.clone());
916            }
917        }
918
919        // Keep the later modified_at timestamp
920        if disk.modified_at > merged.modified_at {
921            merged.modified_at = disk.modified_at;
922        }
923
924        // If disk has AI evaluation and we don't, take it
925        if merged.ai_evaluation.is_none() && disk.ai_evaluation.is_some() {
926            merged.ai_evaluation = disk.ai_evaluation.clone();
927        }
928
929        merged
930    }
931
932    /// Force save with a specific conflict resolution strategy
933    pub fn save_with_resolution(
934        &self,
935        local_store: &RequirementsStore,
936        requirement_id: Uuid,
937        resolution: ConflictResolution,
938    ) -> Result<RequirementsStore> {
939        // Acquire exclusive lock
940        let mut lock_file = self.acquire_write_lock()?;
941
942        let _ = writeln!(
943            lock_file,
944            "Locked by PID {} at {}",
945            std::process::id(),
946            chrono::Utc::now().to_rfc3339()
947        );
948
949        // Load disk version
950        let mut disk_store: RequirementsStore = if self.file_path.exists() {
951            let file = File::open(&self.file_path)?;
952            let reader = BufReader::new(file);
953            serde_yaml::from_reader(reader)?
954        } else {
955            RequirementsStore::new()
956        };
957
958        match resolution {
959            ConflictResolution::ForceLocal => {
960                // Replace disk requirement with local version
961                if let Some(local_req) = local_store
962                    .requirements
963                    .iter()
964                    .find(|r| r.id == requirement_id)
965                {
966                    if let Some(idx) = disk_store
967                        .requirements
968                        .iter()
969                        .position(|r| r.id == requirement_id)
970                    {
971                        disk_store.requirements[idx] = local_req.clone();
972                    } else {
973                        disk_store.requirements.push(local_req.clone());
974                    }
975                }
976            }
977            ConflictResolution::KeepDisk => {
978                // Do nothing - disk version is already in disk_store
979            }
980            ConflictResolution::Merge => {
981                // Merge local changes into disk version
982                if let Some(local_req) = local_store
983                    .requirements
984                    .iter()
985                    .find(|r| r.id == requirement_id)
986                {
987                    if let Some(disk_req) = disk_store
988                        .requirements
989                        .iter()
990                        .find(|r| r.id == requirement_id)
991                    {
992                        let merged = Self::merge_requirement(local_req, disk_req);
993                        if let Some(idx) = disk_store
994                            .requirements
995                            .iter()
996                            .position(|r| r.id == requirement_id)
997                        {
998                            disk_store.requirements[idx] = merged;
999                        }
1000                    } else {
1001                        disk_store.requirements.push(local_req.clone());
1002                    }
1003                }
1004            }
1005        }
1006
1007        // Save the updated store
1008        let yaml = serde_yaml::to_string(&disk_store)?;
1009        fs::write(&self.file_path, yaml)?;
1010
1011        Ok(disk_store)
1012    }
1013
1014    /// Get a snapshot of requirement timestamps for conflict detection
1015    pub fn get_requirement_timestamps(store: &RequirementsStore) -> HashMap<Uuid, DateTime<Utc>> {
1016        store
1017            .requirements
1018            .iter()
1019            .map(|r| (r.id, r.modified_at))
1020            .collect()
1021    }
1022
1023    /// Atomically add a new requirement with reload-before-save
1024    ///
1025    /// This method:
1026    /// 1. Acquires exclusive lock
1027    /// 2. Reloads fresh data from disk
1028    /// 3. Merges local non-requirement changes (features, users, config, etc.)
1029    /// 4. Generates a new SPEC-ID based on fresh state
1030    /// 5. Adds the new requirement
1031    /// 6. Saves to disk
1032    /// 7. Returns updated store with all changes
1033    ///
1034    /// This ensures:
1035    /// - No duplicate SPEC-IDs even with concurrent modifications
1036    /// - External requirement additions are preserved
1037    /// - Local config/feature/user changes are applied
1038    ///
1039    /// # Arguments
1040    /// * `local_store` - The local store with pending config/feature changes
1041    /// * `new_req` - The new requirement to add (without SPEC-ID yet)
1042    /// * `feature_prefix` - Optional feature prefix for ID generation
1043    /// * `type_prefix` - Optional type prefix for ID generation
1044    ///
1045    /// # Returns
1046    /// * `Ok(AddResult)` - Contains updated store, merge count, and assigned SPEC-ID
1047    // trace:FR-0183 | ai:claude:high
1048    pub fn add_requirement_atomic(
1049        &self,
1050        local_store: &RequirementsStore,
1051        new_req: Requirement,
1052        feature_prefix: Option<&str>,
1053        type_prefix: Option<&str>,
1054    ) -> Result<AddResult> {
1055        // Use appropriate backend based on file extension
1056        if self.is_sqlite() {
1057            return self.add_requirement_atomic_sqlite(
1058                local_store,
1059                new_req,
1060                feature_prefix,
1061                type_prefix,
1062            );
1063        }
1064
1065        // YAML implementation follows
1066        // Acquire exclusive lock
1067        let mut lock_file = self.acquire_write_lock()?;
1068
1069        let _ = writeln!(
1070            lock_file,
1071            "Locked by PID {} at {}",
1072            std::process::id(),
1073            chrono::Utc::now().to_rfc3339()
1074        );
1075
1076        // Load fresh data from disk
1077        let mut disk_store: RequirementsStore = if self.file_path.exists() {
1078            let file = File::open(&self.file_path)?;
1079            let reader = BufReader::new(file);
1080            serde_yaml::from_reader(reader)?
1081        } else {
1082            RequirementsStore::new()
1083        };
1084
1085        // Count external requirement additions (requirements in disk but not in local)
1086        let local_req_ids: std::collections::HashSet<Uuid> =
1087            local_store.requirements.iter().map(|r| r.id).collect();
1088        let external_changes_merged = disk_store
1089            .requirements
1090            .iter()
1091            .filter(|r| !local_req_ids.contains(&r.id))
1092            .count();
1093
1094        // Apply local non-requirement changes to the fresh disk store
1095        // (users, features, config, etc. - these are simpler to just overwrite)
1096        disk_store.name = local_store.name.clone();
1097        disk_store.title = local_store.title.clone();
1098        disk_store.description = local_store.description.clone();
1099        disk_store.users = local_store.users.clone();
1100        disk_store.id_config = local_store.id_config.clone();
1101        disk_store.features = local_store.features.clone();
1102        disk_store.relationship_definitions = local_store.relationship_definitions.clone();
1103        disk_store.reaction_definitions = local_store.reaction_definitions.clone();
1104        disk_store.type_definitions = local_store.type_definitions.clone();
1105        disk_store.ai_prompts = local_store.ai_prompts.clone();
1106        disk_store.allowed_prefixes = local_store.allowed_prefixes.clone();
1107        disk_store.restrict_prefixes = local_store.restrict_prefixes;
1108
1109        // Generate SPEC-ID using the fresh disk store state (which has all existing IDs)
1110        // This ensures we don't create duplicates
1111        disk_store.add_requirement_with_id(new_req, feature_prefix, type_prefix);
1112
1113        // Get the SPEC-ID from the added requirement (last one in store)
1114        let spec_id = disk_store
1115            .requirements
1116            .last()
1117            .and_then(|r| r.spec_id.clone())
1118            .unwrap_or_default();
1119
1120        // Save the updated store
1121        let yaml = serde_yaml::to_string(&disk_store).map_err(|e| {
1122            // Log which field might contain control characters for debugging
1123            let check_ctrl = |s: &str, name: &str| {
1124                for (i, c) in s.chars().enumerate() {
1125                    if c.is_control() && c != '\n' && c != '\t' && c != '\r' {
1126                        eprintln!(
1127                            "Control char in {}: position {}, char code {}",
1128                            name, i, c as u32
1129                        );
1130                    }
1131                }
1132            };
1133            check_ctrl(&disk_store.name, "store.name");
1134            check_ctrl(&disk_store.title, "store.title");
1135            check_ctrl(&disk_store.description, "store.description");
1136            if let Some(req) = disk_store.requirements.last() {
1137                check_ctrl(&req.title, "new_req.title");
1138                check_ctrl(&req.description, "new_req.description");
1139                check_ctrl(&req.owner, "new_req.owner");
1140                check_ctrl(&req.feature, "new_req.feature");
1141                if let Some(ref created_by) = req.created_by {
1142                    check_ctrl(created_by, "new_req.created_by");
1143                }
1144            }
1145            e
1146        })?;
1147        fs::write(&self.file_path, yaml)?;
1148
1149        Ok(AddResult {
1150            store: disk_store,
1151            external_changes_merged,
1152            spec_id,
1153        })
1154    }
1155
1156    /// SQLite implementation of add_requirement_atomic
1157    /// Uses JSON serialization (via SQLite backend) instead of YAML
1158    fn add_requirement_atomic_sqlite(
1159        &self,
1160        local_store: &RequirementsStore,
1161        new_req: Requirement,
1162        feature_prefix: Option<&str>,
1163        type_prefix: Option<&str>,
1164    ) -> Result<AddResult> {
1165        use crate::db::{DatabaseBackend, SqliteBackend};
1166
1167        let backend = SqliteBackend::new(&self.file_path)?;
1168
1169        // Load fresh data from database
1170        let mut disk_store = backend.load()?;
1171
1172        // Count external requirement additions (requirements in disk but not in local)
1173        let local_req_ids: std::collections::HashSet<Uuid> =
1174            local_store.requirements.iter().map(|r| r.id).collect();
1175        let external_changes_merged = disk_store
1176            .requirements
1177            .iter()
1178            .filter(|r| !local_req_ids.contains(&r.id))
1179            .count();
1180
1181        // Apply local non-requirement changes to the fresh disk store
1182        // (users, features, config, etc. - these are simpler to just overwrite)
1183        disk_store.name = local_store.name.clone();
1184        disk_store.title = local_store.title.clone();
1185        disk_store.description = local_store.description.clone();
1186        disk_store.users = local_store.users.clone();
1187        disk_store.id_config = local_store.id_config.clone();
1188        disk_store.features = local_store.features.clone();
1189        disk_store.relationship_definitions = local_store.relationship_definitions.clone();
1190        disk_store.reaction_definitions = local_store.reaction_definitions.clone();
1191        disk_store.type_definitions = local_store.type_definitions.clone();
1192        disk_store.ai_prompts = local_store.ai_prompts.clone();
1193        disk_store.allowed_prefixes = local_store.allowed_prefixes.clone();
1194        disk_store.restrict_prefixes = local_store.restrict_prefixes;
1195
1196        // Generate SPEC-ID using the fresh disk store state (which has all existing IDs)
1197        // This ensures we don't create duplicates
1198        disk_store.add_requirement_with_id(new_req, feature_prefix, type_prefix);
1199
1200        // Get the SPEC-ID from the added requirement (last one in store)
1201        let spec_id = disk_store
1202            .requirements
1203            .last()
1204            .and_then(|r| r.spec_id.clone())
1205            .unwrap_or_default();
1206
1207        // Save the updated store to SQLite
1208        backend.save(&disk_store)?;
1209
1210        Ok(AddResult {
1211            store: disk_store,
1212            external_changes_merged,
1213            spec_id,
1214        })
1215    }
1216
1217    /// Returns the directory for storing attachments for a given spec_id
1218    /// Creates the directory if it doesn't exist
1219    pub fn get_attachments_dir(&self, spec_id: &str) -> Result<PathBuf> {
1220        let parent = self.file_path.parent().unwrap_or(Path::new("."));
1221        let attachments_dir = parent.join("attachments").join(spec_id);
1222
1223        if !attachments_dir.exists() {
1224            fs::create_dir_all(&attachments_dir).with_context(|| {
1225                format!(
1226                    "Failed to create attachments directory: {:?}",
1227                    attachments_dir
1228                )
1229            })?;
1230        }
1231
1232        Ok(attachments_dir)
1233    }
1234
1235    /// Copies a file to the attachments directory for a requirement
1236    /// Returns the relative path to the stored file and the file size
1237    pub fn store_attachment_file(
1238        &self,
1239        spec_id: &str,
1240        source_path: &Path,
1241    ) -> Result<(String, u64)> {
1242        let attachments_dir = self.get_attachments_dir(spec_id)?;
1243
1244        // Get the filename from the source path
1245        let filename = source_path
1246            .file_name()
1247            .ok_or_else(|| anyhow::anyhow!("Invalid source path: no filename"))?
1248            .to_string_lossy()
1249            .to_string();
1250
1251        // Handle potential filename conflicts by adding a suffix
1252        let dest_path = {
1253            let initial_path = attachments_dir.join(&filename);
1254            if !initial_path.exists() {
1255                initial_path
1256            } else {
1257                // Find a unique filename
1258                let stem = source_path
1259                    .file_stem()
1260                    .map(|s| s.to_string_lossy().to_string())
1261                    .unwrap_or_else(|| "file".to_string());
1262                let ext = source_path
1263                    .extension()
1264                    .map(|s| format!(".{}", s.to_string_lossy()))
1265                    .unwrap_or_default();
1266
1267                let mut counter = 1;
1268                loop {
1269                    let new_name = format!("{}_{}{}", stem, counter, ext);
1270                    let new_path = attachments_dir.join(&new_name);
1271                    if !new_path.exists() {
1272                        break new_path;
1273                    }
1274                    counter += 1;
1275                }
1276            }
1277        };
1278
1279        // Copy the file
1280        fs::copy(source_path, &dest_path)
1281            .with_context(|| format!("Failed to copy file to {:?}", dest_path))?;
1282
1283        // Get file size
1284        let metadata = fs::metadata(&dest_path)
1285            .with_context(|| format!("Failed to read file metadata: {:?}", dest_path))?;
1286        let size = metadata.len();
1287
1288        // Build relative path
1289        let rel_path = format!(
1290            "attachments/{}/{}",
1291            spec_id,
1292            dest_path.file_name().unwrap().to_string_lossy()
1293        );
1294
1295        Ok((rel_path, size))
1296    }
1297
1298    /// Stores attachment data from bytes (for drag-drop where path isn't available)
1299    pub fn store_attachment_bytes(
1300        &self,
1301        spec_id: &str,
1302        filename: &str,
1303        data: &[u8],
1304    ) -> Result<(String, u64)> {
1305        let attachments_dir = self.get_attachments_dir(spec_id)?;
1306
1307        // Handle potential filename conflicts
1308        let dest_path = {
1309            let initial_path = attachments_dir.join(filename);
1310            if !initial_path.exists() {
1311                initial_path
1312            } else {
1313                // Find a unique filename
1314                let path = Path::new(filename);
1315                let stem = path
1316                    .file_stem()
1317                    .map(|s| s.to_string_lossy().to_string())
1318                    .unwrap_or_else(|| "file".to_string());
1319                let ext = path
1320                    .extension()
1321                    .map(|s| format!(".{}", s.to_string_lossy()))
1322                    .unwrap_or_default();
1323
1324                let mut counter = 1;
1325                loop {
1326                    let new_name = format!("{}_{}{}", stem, counter, ext);
1327                    let new_path = attachments_dir.join(&new_name);
1328                    if !new_path.exists() {
1329                        break new_path;
1330                    }
1331                    counter += 1;
1332                }
1333            }
1334        };
1335
1336        // Write the file
1337        fs::write(&dest_path, data)
1338            .with_context(|| format!("Failed to write attachment file: {:?}", dest_path))?;
1339
1340        // Build relative path
1341        let rel_path = format!(
1342            "attachments/{}/{}",
1343            spec_id,
1344            dest_path.file_name().unwrap().to_string_lossy()
1345        );
1346
1347        Ok((rel_path, data.len() as u64))
1348    }
1349
1350    /// Removes an attachment file from disk
1351    pub fn remove_attachment_file(&self, spec_id: &str, stored_path: &str) -> Result<()> {
1352        let parent = self.file_path.parent().unwrap_or(Path::new("."));
1353        let full_path = parent.join(stored_path);
1354
1355        if full_path.exists() {
1356            fs::remove_file(&full_path)
1357                .with_context(|| format!("Failed to remove attachment file: {:?}", full_path))?;
1358        }
1359
1360        // Try to clean up empty directories
1361        let attachments_dir = parent.join("attachments").join(spec_id);
1362        if attachments_dir.exists() {
1363            // Only remove if empty
1364            if let Ok(mut entries) = fs::read_dir(&attachments_dir) {
1365                if entries.next().is_none() {
1366                    let _ = fs::remove_dir(&attachments_dir);
1367                }
1368            }
1369        }
1370
1371        Ok(())
1372    }
1373
1374    /// Gets the full path to an attachment file
1375    pub fn get_attachment_full_path(&self, stored_path: &str) -> PathBuf {
1376        let parent = self.file_path.parent().unwrap_or(Path::new("."));
1377        parent.join(stored_path)
1378    }
1379
1380    /// Checks if an attachment file exists
1381    pub fn attachment_exists(&self, stored_path: &str) -> bool {
1382        self.get_attachment_full_path(stored_path).exists()
1383    }
1384
1385    // --- GitLab Sync State Methods (STORY-0325) ---
1386
1387    /// Save a GitLab sync state record
1388    /// trace:STORY-0325 | ai:claude
1389    pub fn save_sync_state(&self, state: &crate::models::GitLabSyncState) -> Result<()> {
1390        if !self.is_sqlite() {
1391            anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1392        }
1393
1394        use crate::db::SqliteBackend;
1395        let backend = SqliteBackend::new(&self.file_path)?;
1396        backend.save_sync_state(state)
1397    }
1398
1399    /// Load a GitLab sync state by requirement ID and issue IID
1400    /// trace:STORY-0325 | ai:claude
1401    pub fn load_sync_state(
1402        &self,
1403        requirement_id: uuid::Uuid,
1404        issue_iid: u64,
1405    ) -> Result<Option<crate::models::GitLabSyncState>> {
1406        if !self.is_sqlite() {
1407            anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1408        }
1409
1410        use crate::db::SqliteBackend;
1411        let backend = SqliteBackend::new(&self.file_path)?;
1412        backend.load_sync_state(requirement_id, issue_iid)
1413    }
1414
1415    /// Load all GitLab sync states for a requirement
1416    /// trace:STORY-0325 | ai:claude
1417    pub fn load_sync_states_for_requirement(
1418        &self,
1419        requirement_id: uuid::Uuid,
1420    ) -> Result<Vec<crate::models::GitLabSyncState>> {
1421        if !self.is_sqlite() {
1422            anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1423        }
1424
1425        use crate::db::SqliteBackend;
1426        let backend = SqliteBackend::new(&self.file_path)?;
1427        backend.load_sync_states_for_requirement(requirement_id)
1428    }
1429
1430    /// Load all GitLab sync states
1431    /// trace:STORY-0325 | ai:claude
1432    pub fn load_all_sync_states(&self) -> Result<Vec<crate::models::GitLabSyncState>> {
1433        if !self.is_sqlite() {
1434            anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1435        }
1436
1437        use crate::db::SqliteBackend;
1438        let backend = SqliteBackend::new(&self.file_path)?;
1439        backend.load_all_sync_states()
1440    }
1441
1442    /// Load GitLab sync states by status
1443    /// trace:STORY-0325 | ai:claude
1444    pub fn load_sync_states_by_status(
1445        &self,
1446        status: crate::models::SyncStatus,
1447    ) -> Result<Vec<crate::models::GitLabSyncState>> {
1448        if !self.is_sqlite() {
1449            anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1450        }
1451
1452        use crate::db::SqliteBackend;
1453        let backend = SqliteBackend::new(&self.file_path)?;
1454        backend.load_sync_states_by_status(status)
1455    }
1456
1457    /// Delete a GitLab sync state
1458    /// trace:STORY-0325 | ai:claude
1459    pub fn delete_sync_state(&self, requirement_id: uuid::Uuid, issue_iid: u64) -> Result<bool> {
1460        if !self.is_sqlite() {
1461            anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1462        }
1463
1464        use crate::db::SqliteBackend;
1465        let backend = SqliteBackend::new(&self.file_path)?;
1466        backend.delete_sync_state(requirement_id, issue_iid)
1467    }
1468
1469    // =========================================================================
1470    // Queue Operations (STORY-0366)
1471    // =========================================================================
1472    // trace:STORY-0366 | ai:claude
1473
1474    /// List queue entries for a user
1475    pub fn queue_list(
1476        &self,
1477        user_id: &str,
1478        include_completed: bool,
1479    ) -> Result<Vec<crate::models::QueueEntry>> {
1480        if !self.is_sqlite() {
1481            anyhow::bail!("Queue is only supported for SQLite databases");
1482        }
1483        use crate::db::{DatabaseBackend, SqliteBackend};
1484        let backend = SqliteBackend::new(&self.file_path)?;
1485        backend.queue_list(user_id, include_completed)
1486    }
1487
1488    /// Add an entry to a user's queue
1489    pub fn queue_add(&self, entry: crate::models::QueueEntry) -> Result<()> {
1490        if !self.is_sqlite() {
1491            anyhow::bail!("Queue is only supported for SQLite databases");
1492        }
1493        use crate::db::{DatabaseBackend, SqliteBackend};
1494        let backend = SqliteBackend::new(&self.file_path)?;
1495        backend.queue_add(entry)
1496    }
1497
1498    /// Remove an entry from a user's queue
1499    pub fn queue_remove(&self, user_id: &str, requirement_id: &uuid::Uuid) -> Result<()> {
1500        if !self.is_sqlite() {
1501            anyhow::bail!("Queue is only supported for SQLite databases");
1502        }
1503        use crate::db::{DatabaseBackend, SqliteBackend};
1504        let backend = SqliteBackend::new(&self.file_path)?;
1505        backend.queue_remove(user_id, requirement_id)
1506    }
1507
1508    /// Reorder queue entries
1509    pub fn queue_reorder(&self, user_id: &str, items: &[(uuid::Uuid, i64)]) -> Result<()> {
1510        if !self.is_sqlite() {
1511            anyhow::bail!("Queue is only supported for SQLite databases");
1512        }
1513        use crate::db::{DatabaseBackend, SqliteBackend};
1514        let backend = SqliteBackend::new(&self.file_path)?;
1515        backend.queue_reorder(user_id, items)
1516    }
1517
1518    /// Clear queue entries
1519    pub fn queue_clear(&self, user_id: &str, completed_only: bool) -> Result<()> {
1520        if !self.is_sqlite() {
1521            anyhow::bail!("Queue is only supported for SQLite databases");
1522        }
1523        use crate::db::{DatabaseBackend, SqliteBackend};
1524        let backend = SqliteBackend::new(&self.file_path)?;
1525        backend.queue_clear(user_id, completed_only)
1526    }
1527}
1528
1529#[cfg(test)]
1530mod tests {
1531    use super::*;
1532    use tempfile::TempDir;
1533
1534    fn create_test_store() -> RequirementsStore {
1535        let mut store = RequirementsStore::new();
1536        store.name = "test".to_string();
1537        store
1538    }
1539
1540    fn create_test_requirement(title: &str) -> Requirement {
1541        Requirement::new(title.to_string(), format!("Description for {}", title))
1542    }
1543
1544    #[test]
1545    fn test_save_and_load() {
1546        let temp_dir = TempDir::new().unwrap();
1547        let file_path = temp_dir.path().join("test.yaml");
1548        let storage = Storage::new(&file_path);
1549
1550        let mut store = create_test_store();
1551        store.requirements.push(create_test_requirement("Test Req"));
1552
1553        // Save
1554        storage.save(&store).unwrap();
1555
1556        // Load
1557        let loaded = storage.load().unwrap();
1558        assert_eq!(loaded.requirements.len(), 1);
1559        assert_eq!(loaded.requirements[0].title, "Test Req");
1560    }
1561
1562    #[test]
1563    fn test_conflict_detection_no_conflict() {
1564        let temp_dir = TempDir::new().unwrap();
1565        let file_path = temp_dir.path().join("test.yaml");
1566        let storage = Storage::new(&file_path);
1567
1568        // Create initial store with one requirement
1569        let mut store = create_test_store();
1570        let mut req = create_test_requirement("Test Req");
1571        req.spec_id = Some("FR-0001".to_string());
1572        let req_id = req.id;
1573        store.requirements.push(req);
1574
1575        // Save initial version
1576        storage.save(&store).unwrap();
1577
1578        // Capture timestamps
1579        let timestamps = Storage::get_requirement_timestamps(&store);
1580
1581        // Modify the requirement
1582        store.requirements[0].title = "Modified Title".to_string();
1583        store.requirements[0].modified_at = Utc::now();
1584
1585        // Save with conflict detection - should succeed
1586        let result = storage
1587            .save_with_conflict_detection(&store, &timestamps, &[req_id])
1588            .unwrap();
1589
1590        match result {
1591            SaveResult::Success => {} // Expected
1592            _ => panic!("Expected SaveResult::Success"),
1593        }
1594    }
1595
1596    #[test]
1597    fn test_conflict_detection_with_external_change() {
1598        let temp_dir = TempDir::new().unwrap();
1599        let file_path = temp_dir.path().join("test.yaml");
1600        let storage = Storage::new(&file_path);
1601
1602        // Create initial store with one requirement
1603        let mut store = create_test_store();
1604        let mut req = create_test_requirement("Test Req");
1605        req.spec_id = Some("FR-0001".to_string());
1606        let req_id = req.id;
1607        store.requirements.push(req);
1608
1609        // Save initial version
1610        storage.save(&store).unwrap();
1611
1612        // Capture timestamps (simulating GUI loading the store)
1613        let timestamps = Storage::get_requirement_timestamps(&store);
1614        let mut local_store = store.clone();
1615
1616        // Simulate external change (another tool modifies the file)
1617        store.requirements[0].title = "External Change".to_string();
1618        store.requirements[0].modified_at = Utc::now();
1619        storage.save(&store).unwrap();
1620
1621        // Local change (different field modified by same field in external)
1622        local_store.requirements[0].title = "Local Change".to_string();
1623        local_store.requirements[0].modified_at = Utc::now();
1624
1625        // Save with conflict detection - should detect conflict
1626        let result = storage
1627            .save_with_conflict_detection(&local_store, &timestamps, &[req_id])
1628            .unwrap();
1629
1630        match result {
1631            SaveResult::Conflict(info) => {
1632                assert_eq!(info.requirement_id, req_id);
1633                assert!(!info.conflicting_fields.is_empty());
1634                assert!(info
1635                    .conflicting_fields
1636                    .iter()
1637                    .any(|f| f.field_name == "title"));
1638            }
1639            _ => panic!("Expected SaveResult::Conflict"),
1640        }
1641    }
1642
1643    #[test]
1644    fn test_conflict_resolution_force_local() {
1645        let temp_dir = TempDir::new().unwrap();
1646        let file_path = temp_dir.path().join("test.yaml");
1647        let storage = Storage::new(&file_path);
1648
1649        // Create initial store
1650        let mut store = create_test_store();
1651        let mut req = create_test_requirement("Test Req");
1652        req.spec_id = Some("FR-0001".to_string());
1653        let req_id = req.id;
1654        store.requirements.push(req);
1655        storage.save(&store).unwrap();
1656
1657        // External change
1658        store.requirements[0].title = "External".to_string();
1659        storage.save(&store).unwrap();
1660
1661        // Local version with different title
1662        let mut local_store = store.clone();
1663        local_store.requirements[0].title = "Local".to_string();
1664
1665        // Resolve with ForceLocal
1666        let result = storage
1667            .save_with_resolution(&local_store, req_id, ConflictResolution::ForceLocal)
1668            .unwrap();
1669
1670        assert_eq!(result.requirements[0].title, "Local");
1671    }
1672
1673    #[test]
1674    fn test_conflict_resolution_keep_disk() {
1675        let temp_dir = TempDir::new().unwrap();
1676        let file_path = temp_dir.path().join("test.yaml");
1677        let storage = Storage::new(&file_path);
1678
1679        // Create initial store
1680        let mut store = create_test_store();
1681        let mut req = create_test_requirement("Test Req");
1682        req.spec_id = Some("FR-0001".to_string());
1683        let req_id = req.id;
1684        store.requirements.push(req);
1685        storage.save(&store).unwrap();
1686
1687        // External change
1688        store.requirements[0].title = "External".to_string();
1689        storage.save(&store).unwrap();
1690
1691        // Local version with different title
1692        let mut local_store = store.clone();
1693        local_store.requirements[0].title = "Local".to_string();
1694
1695        // Resolve with KeepDisk
1696        let result = storage
1697            .save_with_resolution(&local_store, req_id, ConflictResolution::KeepDisk)
1698            .unwrap();
1699
1700        assert_eq!(result.requirements[0].title, "External");
1701    }
1702
1703    #[test]
1704    fn test_get_requirement_timestamps() {
1705        let mut store = create_test_store();
1706        let req1 = create_test_requirement("Req1");
1707        let req2 = create_test_requirement("Req2");
1708        let id1 = req1.id;
1709        let id2 = req2.id;
1710        let ts1 = req1.modified_at;
1711        let ts2 = req2.modified_at;
1712
1713        store.requirements.push(req1);
1714        store.requirements.push(req2);
1715
1716        let timestamps = Storage::get_requirement_timestamps(&store);
1717        assert_eq!(timestamps.len(), 2);
1718        assert_eq!(timestamps.get(&id1), Some(&ts1));
1719        assert_eq!(timestamps.get(&id2), Some(&ts2));
1720    }
1721
1722    #[test]
1723    fn test_new_requirement_preserves_external_additions() {
1724        // Scenario: Instance A adds R1, Instance B adds R2 (not knowing about R1)
1725        // Both should be preserved
1726        let temp_dir = TempDir::new().unwrap();
1727        let file_path = temp_dir.path().join("test.yaml");
1728        let storage = Storage::new(&file_path);
1729
1730        // Instance A: Create initial store with R1
1731        let mut store_a = create_test_store();
1732        let req1 = create_test_requirement("Req1 from Instance A");
1733        let req1_id = req1.id;
1734        store_a.requirements.push(req1);
1735        storage.save(&store_a).unwrap();
1736
1737        // Instance B: Started with empty store (before R1 was added)
1738        // Now adds R2 without knowing about R1
1739        let mut store_b = create_test_store();
1740        let req2 = create_test_requirement("Req2 from Instance B");
1741        let req2_id = req2.id;
1742        store_b.requirements.push(req2);
1743
1744        // Instance B saves with conflict detection
1745        // modified_requirement_ids contains only R2 (the new one)
1746        let original_timestamps: HashMap<Uuid, DateTime<Utc>> = HashMap::new();
1747        let modified_ids = vec![req2_id];
1748
1749        let result = storage
1750            .save_with_conflict_detection(&store_b, &original_timestamps, &modified_ids)
1751            .unwrap();
1752
1753        // Should succeed (no conflict - R2 is new)
1754        match result {
1755            SaveResult::Success => {}
1756            SaveResult::Merged { .. } => {}
1757            SaveResult::Conflict(_) => panic!("Should not have conflict"),
1758        }
1759
1760        // Verify both requirements are preserved
1761        let final_store = storage.load().unwrap();
1762        assert_eq!(final_store.requirements.len(), 2);
1763        assert!(final_store.requirements.iter().any(|r| r.id == req1_id));
1764        assert!(final_store.requirements.iter().any(|r| r.id == req2_id));
1765    }
1766
1767    #[test]
1768    fn test_deletion_preserves_external_additions() {
1769        // Scenario: Instance A adds R1, Instance B deletes R2 (not knowing about R1)
1770        // R1 should be preserved, R2 should be deleted
1771        let temp_dir = TempDir::new().unwrap();
1772        let file_path = temp_dir.path().join("test.yaml");
1773        let storage = Storage::new(&file_path);
1774
1775        // Initial store with R2
1776        let mut initial_store = create_test_store();
1777        let req2 = create_test_requirement("Req2");
1778        let req2_id = req2.id;
1779        initial_store.requirements.push(req2);
1780        storage.save(&initial_store).unwrap();
1781
1782        // Instance A: Adds R1 externally
1783        let req1 = create_test_requirement("Req1 from Instance A");
1784        let req1_id = req1.id;
1785        let mut store_a = initial_store.clone();
1786        store_a.requirements.push(req1);
1787        storage.save(&store_a).unwrap();
1788
1789        // Instance B: Started before R1, now deletes R2
1790        let store_b = create_test_store(); // Empty - R2 was deleted
1791        let original_timestamps: HashMap<Uuid, DateTime<Utc>> = HashMap::new();
1792        let modified_ids = vec![req2_id]; // Marking R2 as modified (deleted)
1793
1794        let result = storage
1795            .save_with_conflict_detection(&store_b, &original_timestamps, &modified_ids)
1796            .unwrap();
1797
1798        match result {
1799            SaveResult::Success => {}
1800            SaveResult::Merged { .. } => {}
1801            SaveResult::Conflict(_) => panic!("Should not have conflict"),
1802        }
1803
1804        // Verify R1 is preserved, R2 is deleted
1805        let final_store = storage.load().unwrap();
1806        assert_eq!(final_store.requirements.len(), 1);
1807        assert!(final_store.requirements.iter().any(|r| r.id == req1_id));
1808        assert!(!final_store.requirements.iter().any(|r| r.id == req2_id));
1809    }
1810}