1use anyhow::{Context, Result};
3use chrono::{DateTime, Utc};
4use fs2::FileExt;
5use std::collections::HashMap;
6use std::fs::{self, File, OpenOptions};
7use std::io::{BufReader, Write};
8use std::path::{Path, PathBuf};
9use std::time::Duration;
10use uuid::Uuid;
11
12use crate::models::{Requirement, RequirementsStore};
13
14#[derive(Debug)]
16pub enum StorageError {
17 FileLocked,
19 IoError(std::io::Error),
21 ParseError(String),
23 Conflict(ConflictInfo),
25}
26
27#[derive(Debug, Clone)]
29pub struct ConflictInfo {
30 pub requirement_id: Uuid,
32 pub spec_id: String,
34 pub conflicting_fields: Vec<FieldConflict>,
36 pub disk_version: Box<Requirement>,
38 pub local_version: Box<Requirement>,
40}
41
42#[derive(Debug, Clone)]
44pub struct FieldConflict {
45 pub field_name: String,
47 pub original_value: String,
49 pub disk_value: String,
51 pub local_value: String,
53}
54
55#[derive(Debug)]
57pub enum SaveResult {
58 Success,
60 Merged {
62 merged_count: usize,
64 },
65 Conflict(ConflictInfo),
67}
68
69#[derive(Debug, Clone, Copy, PartialEq, Eq)]
71pub enum ConflictResolution {
72 ForceLocal,
74 KeepDisk,
76 Merge,
78}
79
80#[derive(Debug)]
82pub struct AddResult {
83 pub store: RequirementsStore,
85 pub external_changes_merged: usize,
87 pub spec_id: String,
89}
90
91impl std::fmt::Display for StorageError {
92 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
93 match self {
94 StorageError::FileLocked => write!(f, "File is locked by another user/process"),
95 StorageError::IoError(e) => write!(f, "IO error: {}", e),
96 StorageError::ParseError(s) => write!(f, "Parse error: {}", s),
97 StorageError::Conflict(info) => write!(
98 f,
99 "Conflict detected for {} ({}): {} field(s) changed externally",
100 info.spec_id,
101 info.requirement_id,
102 info.conflicting_fields.len()
103 ),
104 }
105 }
106}
107
108impl std::error::Error for StorageError {}
109
110#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
112pub struct SessionInfo {
113 pub session_id: String,
115 pub user_name: String,
117 pub hostname: String,
119 pub pid: u32,
121 pub started_at: DateTime<Utc>,
123 pub last_heartbeat: DateTime<Utc>,
125 pub editing_requirement: Option<EditLock>,
127}
128
129#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
131pub struct EditLock {
132 pub requirement_id: Uuid,
134 pub spec_id: String,
136 pub started_at: DateTime<Utc>,
138}
139
140#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
142pub struct LockFileInfo {
143 pub sessions: HashMap<String, SessionInfo>,
145}
146
147impl LockFileInfo {
148 pub fn new() -> Self {
150 Self {
151 sessions: HashMap::new(),
152 }
153 }
154
155 pub fn remove_stale_sessions(&mut self, stale_threshold_secs: i64) {
157 let now = Utc::now();
158 self.sessions.retain(|_, session| {
159 let elapsed = now.signed_duration_since(session.last_heartbeat);
160 elapsed.num_seconds() < stale_threshold_secs
161 });
162 }
163
164 pub fn get_editors(&self, requirement_id: Uuid) -> Vec<&SessionInfo> {
166 self.sessions
167 .values()
168 .filter(|s| {
169 s.editing_requirement
170 .as_ref()
171 .map(|e| e.requirement_id == requirement_id)
172 .unwrap_or(false)
173 })
174 .collect()
175 }
176
177 pub fn get_other_sessions(&self, current_session_id: &str) -> Vec<&SessionInfo> {
179 self.sessions
180 .values()
181 .filter(|s| s.session_id != current_session_id)
182 .collect()
183 }
184}
185
186impl SessionInfo {
187 pub fn is_stale(&self, threshold_secs: i64) -> bool {
189 let elapsed = Utc::now().signed_duration_since(self.last_heartbeat);
190 elapsed.num_seconds() > threshold_secs
191 }
192}
193
194pub struct Storage {
197 file_path: PathBuf,
198 lock_file_path: PathBuf,
199}
200
201impl Storage {
202 pub fn new<P: AsRef<Path>>(file_path: P) -> Self {
204 let file_path = file_path.as_ref().to_path_buf();
205 let lock_file_path = file_path.with_extension("yaml.lock");
206 Self {
207 file_path,
208 lock_file_path,
209 }
210 }
211
212 pub fn path(&self) -> &Path {
214 &self.file_path
215 }
216
217 pub fn lock_file_path(&self) -> &Path {
219 &self.lock_file_path
220 }
221
222 pub fn is_sqlite(&self) -> bool {
224 matches!(
225 self.file_path.extension().and_then(|e| e.to_str()),
226 Some("db") | Some("sqlite") | Some("sqlite3")
227 )
228 }
229
230 pub fn read_lock_info(&self) -> Result<LockFileInfo> {
232 if !self.lock_file_path.exists() {
233 return Ok(LockFileInfo::new());
234 }
235
236 let content = fs::read_to_string(&self.lock_file_path)
237 .with_context(|| format!("Failed to read lock file: {:?}", self.lock_file_path))?;
238
239 Ok(serde_yaml::from_str(&content).unwrap_or_else(|_| LockFileInfo::new()))
241 }
242
243 pub fn write_lock_info(&self, info: &LockFileInfo) -> Result<()> {
246 if let Some(parent) = self.lock_file_path.parent() {
248 fs::create_dir_all(parent)?;
249 }
250
251 let yaml = serde_yaml::to_string(info).context("Failed to serialize lock info")?;
252
253 fs::write(&self.lock_file_path, yaml)
254 .with_context(|| format!("Failed to write lock file: {:?}", self.lock_file_path))?;
255
256 Ok(())
257 }
258
259 pub fn register_session(&self, session: SessionInfo) -> Result<LockFileInfo> {
261 let mut info = self.read_lock_info().unwrap_or_default();
262
263 info.remove_stale_sessions(30);
265
266 info.sessions.insert(session.session_id.clone(), session);
268
269 self.write_lock_info(&info)?;
270 Ok(info)
271 }
272
273 pub fn update_heartbeat(
275 &self,
276 session_id: &str,
277 editing: Option<EditLock>,
278 ) -> Result<LockFileInfo> {
279 let mut info = self.read_lock_info().unwrap_or_default();
280
281 info.remove_stale_sessions(30);
283
284 if let Some(session) = info.sessions.get_mut(session_id) {
286 session.last_heartbeat = Utc::now();
287 session.editing_requirement = editing;
288 }
289
290 self.write_lock_info(&info)?;
291 Ok(info)
292 }
293
294 pub fn unregister_session(&self, session_id: &str) -> Result<()> {
296 let mut info = self.read_lock_info().unwrap_or_default();
297 info.sessions.remove(session_id);
298
299 if info.sessions.is_empty() {
301 let _ = fs::remove_file(&self.lock_file_path);
302 } else {
303 self.write_lock_info(&info)?;
304 }
305
306 Ok(())
307 }
308
309 pub fn get_active_sessions(&self) -> Result<LockFileInfo> {
311 let mut info = self.read_lock_info().unwrap_or_default();
312 info.remove_stale_sessions(30);
313 Ok(info)
314 }
315
316 fn acquire_write_lock(&self) -> Result<File> {
319 if let Some(parent) = self.lock_file_path.parent() {
321 fs::create_dir_all(parent)?;
322 }
323
324 let lock_file = OpenOptions::new()
325 .create(true)
326 .write(true)
327 .truncate(true)
328 .open(&self.lock_file_path)
329 .with_context(|| format!("Failed to create lock file: {:?}", self.lock_file_path))?;
330
331 let start = std::time::Instant::now();
333 let timeout = Duration::from_secs(5);
334
335 loop {
336 match lock_file.try_lock_exclusive() {
337 Ok(()) => return Ok(lock_file),
338 Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
339 if start.elapsed() > timeout {
340 anyhow::bail!(
341 "Timeout waiting for file lock - another user may be editing: {:?}",
342 self.file_path
343 );
344 }
345 std::thread::sleep(Duration::from_millis(100));
346 }
347 Err(e) => {
348 return Err(e).with_context(|| {
349 format!("Failed to acquire lock on {:?}", self.lock_file_path)
350 })
351 }
352 }
353 }
354 }
355
356 fn acquire_read_lock(&self) -> Result<Option<File>> {
358 if !self.lock_file_path.exists() {
359 return Ok(None);
360 }
361
362 let lock_file = OpenOptions::new()
363 .read(true)
364 .open(&self.lock_file_path)
365 .with_context(|| format!("Failed to open lock file: {:?}", self.lock_file_path))?;
366
367 let start = std::time::Instant::now();
369 let timeout = Duration::from_secs(5);
370
371 loop {
372 match fs2::FileExt::try_lock_shared(&lock_file) {
374 Ok(()) => return Ok(Some(lock_file)),
375 Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
376 if start.elapsed() > timeout {
377 anyhow::bail!(
378 "Timeout waiting for file lock - another user may be editing: {:?}",
379 self.file_path
380 );
381 }
382 std::thread::sleep(Duration::from_millis(100));
383 }
384 Err(e) => {
385 return Err(e).with_context(|| {
386 format!("Failed to acquire lock on {:?}", self.lock_file_path)
387 })
388 }
389 }
390 }
391 }
392
393 pub fn load(&self) -> Result<RequirementsStore> {
396 let is_sqlite = matches!(
398 self.file_path.extension().and_then(|e| e.to_str()),
399 Some("db") | Some("sqlite") | Some("sqlite3")
400 );
401
402 if is_sqlite {
403 return self.load_sqlite();
405 }
406
407 if !self.file_path.exists() {
409 let parent = self
410 .file_path
411 .parent()
412 .context("Failed to get parent directory")?;
413 fs::create_dir_all(parent)?;
414 let default_store = RequirementsStore::new();
415 self.save(&default_store)?;
416 return Ok(default_store);
417 }
418
419 let _lock = self.acquire_read_lock()?;
421
422 let file = File::open(&self.file_path)
424 .with_context(|| format!("Failed to open file: {:?}", self.file_path))?;
425 let reader = BufReader::new(file);
426
427 let mut store: crate::models::RequirementsStore = serde_yaml::from_reader(reader)
429 .map_err(|e| {
430 eprintln!("YAML parse error details: {}", e);
431 e
432 })
433 .with_context(|| format!("Failed to parse YAML from {:?}", self.file_path))?;
434
435 store.migrate_features();
437
438 let had_missing_spec_ids = store.requirements.iter().any(|r| r.spec_id.is_none());
440 store.assign_spec_ids();
441
442 let had_missing_user_spec_ids = store.users.iter().any(|u| u.spec_id.is_none());
444 store.migrate_users_to_spec_ids();
445
446 let had_missing_types = store.migrate_type_definitions();
448
449 let had_missing_id_config_types = store.migrate_id_config_types();
452
453 let repaired_duplicates = store.repair_duplicate_spec_ids();
455
456 drop(_lock);
458
459 if had_missing_spec_ids
461 || had_missing_user_spec_ids
462 || had_missing_types
463 || had_missing_id_config_types
464 || repaired_duplicates > 0
465 {
466 self.save(&store)?;
467 }
468
469 store.validate_unique_spec_ids()?;
471
472 Ok(store)
473 }
474
475 fn load_sqlite(&self) -> Result<RequirementsStore> {
477 use crate::db::{DatabaseBackend, SqliteBackend};
478
479 let backend = SqliteBackend::new(&self.file_path)?;
480 let mut store = backend.load()?;
481
482 store.migrate_features();
484 let had_missing_spec_ids = store.requirements.iter().any(|r| r.spec_id.is_none());
485 store.assign_spec_ids();
486 let had_missing_user_spec_ids = store.users.iter().any(|u| u.spec_id.is_none());
487 store.migrate_users_to_spec_ids();
488 let had_missing_types = store.migrate_type_definitions();
489 let had_missing_id_config_types = store.migrate_id_config_types();
491 let repaired_duplicates = store.repair_duplicate_spec_ids();
492
493 if had_missing_spec_ids
495 || had_missing_user_spec_ids
496 || had_missing_types
497 || had_missing_id_config_types
498 || repaired_duplicates > 0
499 {
500 backend.save(&store)?;
501 }
502
503 store.validate_unique_spec_ids()?;
504 Ok(store)
505 }
506
507 pub fn save(&self, store: &RequirementsStore) -> Result<()> {
510 let is_sqlite = matches!(
512 self.file_path.extension().and_then(|e| e.to_str()),
513 Some("db") | Some("sqlite") | Some("sqlite3")
514 );
515
516 if is_sqlite {
517 return self.save_sqlite(store);
518 }
519
520 if let Some(parent) = self.file_path.parent() {
522 fs::create_dir_all(parent)?;
523 }
524
525 let mut lock_file = self.acquire_write_lock()?;
527
528 let _ = writeln!(
530 lock_file,
531 "Locked by PID {} at {}",
532 std::process::id(),
533 chrono::Utc::now().to_rfc3339()
534 );
535
536 let yaml = serde_yaml::to_string(store)?;
538 fs::write(&self.file_path, yaml)?;
539
540 Ok(())
542 }
543
544 fn save_sqlite(&self, store: &RequirementsStore) -> Result<()> {
546 use crate::db::{DatabaseBackend, SqliteBackend};
547
548 let backend = SqliteBackend::new(&self.file_path)?;
549 backend.save(store)?;
550 Ok(())
551 }
552
553 pub fn reload_if_changed(
556 &self,
557 current_store: &RequirementsStore,
558 ) -> Result<(RequirementsStore, bool)> {
559 let new_store = self.load()?;
560
561 let changed = new_store.requirements.len() != current_store.requirements.len()
564 || new_store.users.len() != current_store.users.len()
565 || new_store.features.len() != current_store.features.len();
566
567 Ok((new_store, changed))
568 }
569
570 pub fn update_atomically<F>(&self, update_fn: F) -> Result<RequirementsStore>
573 where
574 F: FnOnce(&mut RequirementsStore),
575 {
576 let is_sqlite = matches!(
578 self.file_path.extension().and_then(|e| e.to_str()),
579 Some("db") | Some("sqlite") | Some("sqlite3")
580 );
581
582 if is_sqlite {
584 let mut store = self.load()?;
585 update_fn(&mut store);
586 self.save(&store)?;
587 return Ok(store);
588 }
589
590 let mut lock_file = self.acquire_write_lock()?;
592
593 let _ = writeln!(
595 lock_file,
596 "Locked by PID {} at {}",
597 std::process::id(),
598 chrono::Utc::now().to_rfc3339()
599 );
600
601 let file = File::open(&self.file_path)
603 .with_context(|| format!("Failed to open file: {:?}", self.file_path))?;
604 let reader = BufReader::new(file);
605 let mut store: RequirementsStore = serde_yaml::from_reader(reader)
606 .with_context(|| format!("Failed to parse YAML from {:?}", self.file_path))?;
607
608 update_fn(&mut store);
610
611 let yaml = serde_yaml::to_string(&store)?;
613 fs::write(&self.file_path, yaml)?;
614
615 Ok(store)
617 }
618
619 pub fn save_with_conflict_detection(
639 &self,
640 local_store: &RequirementsStore,
641 original_timestamps: &HashMap<Uuid, DateTime<Utc>>,
642 modified_requirement_ids: &[Uuid],
643 ) -> Result<SaveResult> {
644 let is_sqlite = matches!(
646 self.file_path.extension().and_then(|e| e.to_str()),
647 Some("db") | Some("sqlite") | Some("sqlite3")
648 );
649
650 if is_sqlite {
652 self.save(local_store)?;
653 return Ok(SaveResult::Success);
654 }
655
656 let mut lock_file = self.acquire_write_lock()?;
658
659 let _ = writeln!(
661 lock_file,
662 "Locked by PID {} at {}",
663 std::process::id(),
664 chrono::Utc::now().to_rfc3339()
665 );
666
667 let disk_store = if self.file_path.exists() {
669 let file = File::open(&self.file_path)
670 .with_context(|| format!("Failed to open file: {:?}", self.file_path))?;
671 let reader = BufReader::new(file);
672 serde_yaml::from_reader(reader)
673 .with_context(|| format!("Failed to parse YAML from {:?}", self.file_path))?
674 } else {
675 RequirementsStore::new()
676 };
677
678 let mut merged_count = 0;
680 let mut final_store = disk_store.clone();
681
682 for &req_id in modified_requirement_ids {
683 let local_req = local_store.requirements.iter().find(|r| r.id == req_id);
684 let disk_req = disk_store.requirements.iter().find(|r| r.id == req_id);
685 let original_timestamp = original_timestamps.get(&req_id);
686
687 match (local_req, disk_req, original_timestamp) {
688 (Some(local), None, _) => {
690 final_store.requirements.push(local.clone());
691 }
692
693 (Some(local), Some(disk), Some(&orig_ts)) => {
695 if disk.modified_at > orig_ts {
697 let conflicts = Self::detect_field_conflicts(local, disk, &orig_ts);
699
700 if !conflicts.is_empty() {
701 return Ok(SaveResult::Conflict(ConflictInfo {
703 requirement_id: req_id,
704 spec_id: disk.spec_id.clone().unwrap_or_else(|| req_id.to_string()),
705 conflicting_fields: conflicts,
706 disk_version: Box::new(disk.clone()),
707 local_version: Box::new(local.clone()),
708 }));
709 }
710
711 let merged = Self::merge_requirement(local, disk);
713 if let Some(idx) =
714 final_store.requirements.iter().position(|r| r.id == req_id)
715 {
716 final_store.requirements[idx] = merged;
717 }
718 merged_count += 1;
719 } else {
720 if let Some(idx) =
722 final_store.requirements.iter().position(|r| r.id == req_id)
723 {
724 final_store.requirements[idx] = local.clone();
725 }
726 }
727 }
728
729 (Some(local), Some(_disk), None) => {
732 if let Some(idx) = final_store.requirements.iter().position(|r| r.id == req_id)
733 {
734 final_store.requirements[idx] = local.clone();
735 }
736 }
737
738 (None, Some(_), _) => {
740 final_store.requirements.retain(|r| r.id != req_id);
742 }
743
744 (None, None, _) => {}
746 }
747 }
748
749 final_store.name = local_store.name.clone();
752 final_store.title = local_store.title.clone();
753 final_store.description = local_store.description.clone();
754 final_store.users = local_store.users.clone();
755 final_store.id_config = local_store.id_config.clone();
756 final_store.features = local_store.features.clone();
757 final_store.relationship_definitions = local_store.relationship_definitions.clone();
758 final_store.reaction_definitions = local_store.reaction_definitions.clone();
759 final_store.type_definitions = local_store.type_definitions.clone();
760 final_store.ai_prompts = local_store.ai_prompts.clone();
761 final_store.allowed_prefixes = local_store.allowed_prefixes.clone();
762 final_store.restrict_prefixes = local_store.restrict_prefixes;
763
764 let yaml = serde_yaml::to_string(&final_store)?;
766 fs::write(&self.file_path, yaml)?;
767
768 if merged_count > 0 {
769 Ok(SaveResult::Merged { merged_count })
770 } else {
771 Ok(SaveResult::Success)
772 }
773 }
774
775 fn detect_field_conflicts(
778 local: &Requirement,
779 disk: &Requirement,
780 _original_timestamp: &DateTime<Utc>,
781 ) -> Vec<FieldConflict> {
782 let mut conflicts = Vec::new();
783
784 if local.title != disk.title {
795 conflicts.push(FieldConflict {
796 field_name: "title".to_string(),
797 original_value: String::new(), disk_value: disk.title.clone(),
799 local_value: local.title.clone(),
800 });
801 }
802
803 if local.description != disk.description {
804 conflicts.push(FieldConflict {
805 field_name: "description".to_string(),
806 original_value: String::new(),
807 disk_value: disk.description.clone(),
808 local_value: local.description.clone(),
809 });
810 }
811
812 if local.status != disk.status {
813 conflicts.push(FieldConflict {
814 field_name: "status".to_string(),
815 original_value: String::new(),
816 disk_value: disk.status.to_string(),
817 local_value: local.status.to_string(),
818 });
819 }
820
821 if local.priority != disk.priority {
822 conflicts.push(FieldConflict {
823 field_name: "priority".to_string(),
824 original_value: String::new(),
825 disk_value: disk.priority.to_string(),
826 local_value: local.priority.to_string(),
827 });
828 }
829
830 if local.owner != disk.owner {
831 conflicts.push(FieldConflict {
832 field_name: "owner".to_string(),
833 original_value: String::new(),
834 disk_value: disk.owner.clone(),
835 local_value: local.owner.clone(),
836 });
837 }
838
839 if local.feature != disk.feature {
840 conflicts.push(FieldConflict {
841 field_name: "feature".to_string(),
842 original_value: String::new(),
843 disk_value: disk.feature.clone(),
844 local_value: local.feature.clone(),
845 });
846 }
847
848 if local.req_type != disk.req_type {
849 conflicts.push(FieldConflict {
850 field_name: "type".to_string(),
851 original_value: String::new(),
852 disk_value: disk.req_type.to_string(),
853 local_value: local.req_type.to_string(),
854 });
855 }
856
857 if local.tags != disk.tags {
858 conflicts.push(FieldConflict {
859 field_name: "tags".to_string(),
860 original_value: String::new(),
861 disk_value: disk.tags.iter().cloned().collect::<Vec<_>>().join(", "),
862 local_value: local.tags.iter().cloned().collect::<Vec<_>>().join(", "),
863 });
864 }
865
866 conflicts
867 }
868
869 fn merge_requirement(local: &Requirement, disk: &Requirement) -> Requirement {
872 let mut merged = local.clone();
874
875 let mut comment_ids: std::collections::HashSet<Uuid> =
878 merged.comments.iter().map(|c| c.id).collect();
879 for comment in &disk.comments {
880 if comment_ids.insert(comment.id) {
881 merged.comments.push(comment.clone());
882 }
883 }
884 merged.comments.sort_by_key(|c| c.created_at);
886
887 let mut history_ids: std::collections::HashSet<Uuid> =
889 merged.history.iter().map(|h| h.id).collect();
890 for entry in &disk.history {
891 if history_ids.insert(entry.id) {
892 merged.history.push(entry.clone());
893 }
894 }
895 merged.history.sort_by_key(|h| h.timestamp);
897
898 let existing_rels: std::collections::HashSet<_> = merged
900 .relationships
901 .iter()
902 .map(|r| (r.target_id, r.rel_type.clone()))
903 .collect();
904 for rel in &disk.relationships {
905 if !existing_rels.contains(&(rel.target_id, rel.rel_type.clone())) {
906 merged.relationships.push(rel.clone());
907 }
908 }
909
910 let existing_urls: std::collections::HashSet<_> =
912 merged.urls.iter().map(|u| u.url.clone()).collect();
913 for url in &disk.urls {
914 if !existing_urls.contains(&url.url) {
915 merged.urls.push(url.clone());
916 }
917 }
918
919 if disk.modified_at > merged.modified_at {
921 merged.modified_at = disk.modified_at;
922 }
923
924 if merged.ai_evaluation.is_none() && disk.ai_evaluation.is_some() {
926 merged.ai_evaluation = disk.ai_evaluation.clone();
927 }
928
929 merged
930 }
931
932 pub fn save_with_resolution(
934 &self,
935 local_store: &RequirementsStore,
936 requirement_id: Uuid,
937 resolution: ConflictResolution,
938 ) -> Result<RequirementsStore> {
939 let mut lock_file = self.acquire_write_lock()?;
941
942 let _ = writeln!(
943 lock_file,
944 "Locked by PID {} at {}",
945 std::process::id(),
946 chrono::Utc::now().to_rfc3339()
947 );
948
949 let mut disk_store: RequirementsStore = if self.file_path.exists() {
951 let file = File::open(&self.file_path)?;
952 let reader = BufReader::new(file);
953 serde_yaml::from_reader(reader)?
954 } else {
955 RequirementsStore::new()
956 };
957
958 match resolution {
959 ConflictResolution::ForceLocal => {
960 if let Some(local_req) = local_store
962 .requirements
963 .iter()
964 .find(|r| r.id == requirement_id)
965 {
966 if let Some(idx) = disk_store
967 .requirements
968 .iter()
969 .position(|r| r.id == requirement_id)
970 {
971 disk_store.requirements[idx] = local_req.clone();
972 } else {
973 disk_store.requirements.push(local_req.clone());
974 }
975 }
976 }
977 ConflictResolution::KeepDisk => {
978 }
980 ConflictResolution::Merge => {
981 if let Some(local_req) = local_store
983 .requirements
984 .iter()
985 .find(|r| r.id == requirement_id)
986 {
987 if let Some(disk_req) = disk_store
988 .requirements
989 .iter()
990 .find(|r| r.id == requirement_id)
991 {
992 let merged = Self::merge_requirement(local_req, disk_req);
993 if let Some(idx) = disk_store
994 .requirements
995 .iter()
996 .position(|r| r.id == requirement_id)
997 {
998 disk_store.requirements[idx] = merged;
999 }
1000 } else {
1001 disk_store.requirements.push(local_req.clone());
1002 }
1003 }
1004 }
1005 }
1006
1007 let yaml = serde_yaml::to_string(&disk_store)?;
1009 fs::write(&self.file_path, yaml)?;
1010
1011 Ok(disk_store)
1012 }
1013
1014 pub fn get_requirement_timestamps(store: &RequirementsStore) -> HashMap<Uuid, DateTime<Utc>> {
1016 store
1017 .requirements
1018 .iter()
1019 .map(|r| (r.id, r.modified_at))
1020 .collect()
1021 }
1022
1023 pub fn add_requirement_atomic(
1049 &self,
1050 local_store: &RequirementsStore,
1051 new_req: Requirement,
1052 feature_prefix: Option<&str>,
1053 type_prefix: Option<&str>,
1054 ) -> Result<AddResult> {
1055 if self.is_sqlite() {
1057 return self.add_requirement_atomic_sqlite(
1058 local_store,
1059 new_req,
1060 feature_prefix,
1061 type_prefix,
1062 );
1063 }
1064
1065 let mut lock_file = self.acquire_write_lock()?;
1068
1069 let _ = writeln!(
1070 lock_file,
1071 "Locked by PID {} at {}",
1072 std::process::id(),
1073 chrono::Utc::now().to_rfc3339()
1074 );
1075
1076 let mut disk_store: RequirementsStore = if self.file_path.exists() {
1078 let file = File::open(&self.file_path)?;
1079 let reader = BufReader::new(file);
1080 serde_yaml::from_reader(reader)?
1081 } else {
1082 RequirementsStore::new()
1083 };
1084
1085 let local_req_ids: std::collections::HashSet<Uuid> =
1087 local_store.requirements.iter().map(|r| r.id).collect();
1088 let external_changes_merged = disk_store
1089 .requirements
1090 .iter()
1091 .filter(|r| !local_req_ids.contains(&r.id))
1092 .count();
1093
1094 disk_store.name = local_store.name.clone();
1097 disk_store.title = local_store.title.clone();
1098 disk_store.description = local_store.description.clone();
1099 disk_store.users = local_store.users.clone();
1100 disk_store.id_config = local_store.id_config.clone();
1101 disk_store.features = local_store.features.clone();
1102 disk_store.relationship_definitions = local_store.relationship_definitions.clone();
1103 disk_store.reaction_definitions = local_store.reaction_definitions.clone();
1104 disk_store.type_definitions = local_store.type_definitions.clone();
1105 disk_store.ai_prompts = local_store.ai_prompts.clone();
1106 disk_store.allowed_prefixes = local_store.allowed_prefixes.clone();
1107 disk_store.restrict_prefixes = local_store.restrict_prefixes;
1108
1109 disk_store.add_requirement_with_id(new_req, feature_prefix, type_prefix);
1112
1113 let spec_id = disk_store
1115 .requirements
1116 .last()
1117 .and_then(|r| r.spec_id.clone())
1118 .unwrap_or_default();
1119
1120 let yaml = serde_yaml::to_string(&disk_store).map_err(|e| {
1122 let check_ctrl = |s: &str, name: &str| {
1124 for (i, c) in s.chars().enumerate() {
1125 if c.is_control() && c != '\n' && c != '\t' && c != '\r' {
1126 eprintln!(
1127 "Control char in {}: position {}, char code {}",
1128 name, i, c as u32
1129 );
1130 }
1131 }
1132 };
1133 check_ctrl(&disk_store.name, "store.name");
1134 check_ctrl(&disk_store.title, "store.title");
1135 check_ctrl(&disk_store.description, "store.description");
1136 if let Some(req) = disk_store.requirements.last() {
1137 check_ctrl(&req.title, "new_req.title");
1138 check_ctrl(&req.description, "new_req.description");
1139 check_ctrl(&req.owner, "new_req.owner");
1140 check_ctrl(&req.feature, "new_req.feature");
1141 if let Some(ref created_by) = req.created_by {
1142 check_ctrl(created_by, "new_req.created_by");
1143 }
1144 }
1145 e
1146 })?;
1147 fs::write(&self.file_path, yaml)?;
1148
1149 Ok(AddResult {
1150 store: disk_store,
1151 external_changes_merged,
1152 spec_id,
1153 })
1154 }
1155
1156 fn add_requirement_atomic_sqlite(
1159 &self,
1160 local_store: &RequirementsStore,
1161 new_req: Requirement,
1162 feature_prefix: Option<&str>,
1163 type_prefix: Option<&str>,
1164 ) -> Result<AddResult> {
1165 use crate::db::{DatabaseBackend, SqliteBackend};
1166
1167 let backend = SqliteBackend::new(&self.file_path)?;
1168
1169 let mut disk_store = backend.load()?;
1171
1172 let local_req_ids: std::collections::HashSet<Uuid> =
1174 local_store.requirements.iter().map(|r| r.id).collect();
1175 let external_changes_merged = disk_store
1176 .requirements
1177 .iter()
1178 .filter(|r| !local_req_ids.contains(&r.id))
1179 .count();
1180
1181 disk_store.name = local_store.name.clone();
1184 disk_store.title = local_store.title.clone();
1185 disk_store.description = local_store.description.clone();
1186 disk_store.users = local_store.users.clone();
1187 disk_store.id_config = local_store.id_config.clone();
1188 disk_store.features = local_store.features.clone();
1189 disk_store.relationship_definitions = local_store.relationship_definitions.clone();
1190 disk_store.reaction_definitions = local_store.reaction_definitions.clone();
1191 disk_store.type_definitions = local_store.type_definitions.clone();
1192 disk_store.ai_prompts = local_store.ai_prompts.clone();
1193 disk_store.allowed_prefixes = local_store.allowed_prefixes.clone();
1194 disk_store.restrict_prefixes = local_store.restrict_prefixes;
1195
1196 disk_store.add_requirement_with_id(new_req, feature_prefix, type_prefix);
1199
1200 let spec_id = disk_store
1202 .requirements
1203 .last()
1204 .and_then(|r| r.spec_id.clone())
1205 .unwrap_or_default();
1206
1207 backend.save(&disk_store)?;
1209
1210 Ok(AddResult {
1211 store: disk_store,
1212 external_changes_merged,
1213 spec_id,
1214 })
1215 }
1216
1217 pub fn get_attachments_dir(&self, spec_id: &str) -> Result<PathBuf> {
1220 let parent = self.file_path.parent().unwrap_or(Path::new("."));
1221 let attachments_dir = parent.join("attachments").join(spec_id);
1222
1223 if !attachments_dir.exists() {
1224 fs::create_dir_all(&attachments_dir).with_context(|| {
1225 format!(
1226 "Failed to create attachments directory: {:?}",
1227 attachments_dir
1228 )
1229 })?;
1230 }
1231
1232 Ok(attachments_dir)
1233 }
1234
1235 pub fn store_attachment_file(
1238 &self,
1239 spec_id: &str,
1240 source_path: &Path,
1241 ) -> Result<(String, u64)> {
1242 let attachments_dir = self.get_attachments_dir(spec_id)?;
1243
1244 let filename = source_path
1246 .file_name()
1247 .ok_or_else(|| anyhow::anyhow!("Invalid source path: no filename"))?
1248 .to_string_lossy()
1249 .to_string();
1250
1251 let dest_path = {
1253 let initial_path = attachments_dir.join(&filename);
1254 if !initial_path.exists() {
1255 initial_path
1256 } else {
1257 let stem = source_path
1259 .file_stem()
1260 .map(|s| s.to_string_lossy().to_string())
1261 .unwrap_or_else(|| "file".to_string());
1262 let ext = source_path
1263 .extension()
1264 .map(|s| format!(".{}", s.to_string_lossy()))
1265 .unwrap_or_default();
1266
1267 let mut counter = 1;
1268 loop {
1269 let new_name = format!("{}_{}{}", stem, counter, ext);
1270 let new_path = attachments_dir.join(&new_name);
1271 if !new_path.exists() {
1272 break new_path;
1273 }
1274 counter += 1;
1275 }
1276 }
1277 };
1278
1279 fs::copy(source_path, &dest_path)
1281 .with_context(|| format!("Failed to copy file to {:?}", dest_path))?;
1282
1283 let metadata = fs::metadata(&dest_path)
1285 .with_context(|| format!("Failed to read file metadata: {:?}", dest_path))?;
1286 let size = metadata.len();
1287
1288 let rel_path = format!(
1290 "attachments/{}/{}",
1291 spec_id,
1292 dest_path.file_name().unwrap().to_string_lossy()
1293 );
1294
1295 Ok((rel_path, size))
1296 }
1297
1298 pub fn store_attachment_bytes(
1300 &self,
1301 spec_id: &str,
1302 filename: &str,
1303 data: &[u8],
1304 ) -> Result<(String, u64)> {
1305 let attachments_dir = self.get_attachments_dir(spec_id)?;
1306
1307 let dest_path = {
1309 let initial_path = attachments_dir.join(filename);
1310 if !initial_path.exists() {
1311 initial_path
1312 } else {
1313 let path = Path::new(filename);
1315 let stem = path
1316 .file_stem()
1317 .map(|s| s.to_string_lossy().to_string())
1318 .unwrap_or_else(|| "file".to_string());
1319 let ext = path
1320 .extension()
1321 .map(|s| format!(".{}", s.to_string_lossy()))
1322 .unwrap_or_default();
1323
1324 let mut counter = 1;
1325 loop {
1326 let new_name = format!("{}_{}{}", stem, counter, ext);
1327 let new_path = attachments_dir.join(&new_name);
1328 if !new_path.exists() {
1329 break new_path;
1330 }
1331 counter += 1;
1332 }
1333 }
1334 };
1335
1336 fs::write(&dest_path, data)
1338 .with_context(|| format!("Failed to write attachment file: {:?}", dest_path))?;
1339
1340 let rel_path = format!(
1342 "attachments/{}/{}",
1343 spec_id,
1344 dest_path.file_name().unwrap().to_string_lossy()
1345 );
1346
1347 Ok((rel_path, data.len() as u64))
1348 }
1349
1350 pub fn remove_attachment_file(&self, spec_id: &str, stored_path: &str) -> Result<()> {
1352 let parent = self.file_path.parent().unwrap_or(Path::new("."));
1353 let full_path = parent.join(stored_path);
1354
1355 if full_path.exists() {
1356 fs::remove_file(&full_path)
1357 .with_context(|| format!("Failed to remove attachment file: {:?}", full_path))?;
1358 }
1359
1360 let attachments_dir = parent.join("attachments").join(spec_id);
1362 if attachments_dir.exists() {
1363 if let Ok(mut entries) = fs::read_dir(&attachments_dir) {
1365 if entries.next().is_none() {
1366 let _ = fs::remove_dir(&attachments_dir);
1367 }
1368 }
1369 }
1370
1371 Ok(())
1372 }
1373
1374 pub fn get_attachment_full_path(&self, stored_path: &str) -> PathBuf {
1376 let parent = self.file_path.parent().unwrap_or(Path::new("."));
1377 parent.join(stored_path)
1378 }
1379
1380 pub fn attachment_exists(&self, stored_path: &str) -> bool {
1382 self.get_attachment_full_path(stored_path).exists()
1383 }
1384
1385 pub fn save_sync_state(&self, state: &crate::models::GitLabSyncState) -> Result<()> {
1390 if !self.is_sqlite() {
1391 anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1392 }
1393
1394 use crate::db::SqliteBackend;
1395 let backend = SqliteBackend::new(&self.file_path)?;
1396 backend.save_sync_state(state)
1397 }
1398
1399 pub fn load_sync_state(
1402 &self,
1403 requirement_id: uuid::Uuid,
1404 issue_iid: u64,
1405 ) -> Result<Option<crate::models::GitLabSyncState>> {
1406 if !self.is_sqlite() {
1407 anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1408 }
1409
1410 use crate::db::SqliteBackend;
1411 let backend = SqliteBackend::new(&self.file_path)?;
1412 backend.load_sync_state(requirement_id, issue_iid)
1413 }
1414
1415 pub fn load_sync_states_for_requirement(
1418 &self,
1419 requirement_id: uuid::Uuid,
1420 ) -> Result<Vec<crate::models::GitLabSyncState>> {
1421 if !self.is_sqlite() {
1422 anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1423 }
1424
1425 use crate::db::SqliteBackend;
1426 let backend = SqliteBackend::new(&self.file_path)?;
1427 backend.load_sync_states_for_requirement(requirement_id)
1428 }
1429
1430 pub fn load_all_sync_states(&self) -> Result<Vec<crate::models::GitLabSyncState>> {
1433 if !self.is_sqlite() {
1434 anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1435 }
1436
1437 use crate::db::SqliteBackend;
1438 let backend = SqliteBackend::new(&self.file_path)?;
1439 backend.load_all_sync_states()
1440 }
1441
1442 pub fn load_sync_states_by_status(
1445 &self,
1446 status: crate::models::SyncStatus,
1447 ) -> Result<Vec<crate::models::GitLabSyncState>> {
1448 if !self.is_sqlite() {
1449 anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1450 }
1451
1452 use crate::db::SqliteBackend;
1453 let backend = SqliteBackend::new(&self.file_path)?;
1454 backend.load_sync_states_by_status(status)
1455 }
1456
1457 pub fn delete_sync_state(&self, requirement_id: uuid::Uuid, issue_iid: u64) -> Result<bool> {
1460 if !self.is_sqlite() {
1461 anyhow::bail!("GitLab sync state is only supported for SQLite databases");
1462 }
1463
1464 use crate::db::SqliteBackend;
1465 let backend = SqliteBackend::new(&self.file_path)?;
1466 backend.delete_sync_state(requirement_id, issue_iid)
1467 }
1468
1469 pub fn queue_list(
1476 &self,
1477 user_id: &str,
1478 include_completed: bool,
1479 ) -> Result<Vec<crate::models::QueueEntry>> {
1480 if !self.is_sqlite() {
1481 anyhow::bail!("Queue is only supported for SQLite databases");
1482 }
1483 use crate::db::{DatabaseBackend, SqliteBackend};
1484 let backend = SqliteBackend::new(&self.file_path)?;
1485 backend.queue_list(user_id, include_completed)
1486 }
1487
1488 pub fn queue_add(&self, entry: crate::models::QueueEntry) -> Result<()> {
1490 if !self.is_sqlite() {
1491 anyhow::bail!("Queue is only supported for SQLite databases");
1492 }
1493 use crate::db::{DatabaseBackend, SqliteBackend};
1494 let backend = SqliteBackend::new(&self.file_path)?;
1495 backend.queue_add(entry)
1496 }
1497
1498 pub fn queue_remove(&self, user_id: &str, requirement_id: &uuid::Uuid) -> Result<()> {
1500 if !self.is_sqlite() {
1501 anyhow::bail!("Queue is only supported for SQLite databases");
1502 }
1503 use crate::db::{DatabaseBackend, SqliteBackend};
1504 let backend = SqliteBackend::new(&self.file_path)?;
1505 backend.queue_remove(user_id, requirement_id)
1506 }
1507
1508 pub fn queue_reorder(&self, user_id: &str, items: &[(uuid::Uuid, i64)]) -> Result<()> {
1510 if !self.is_sqlite() {
1511 anyhow::bail!("Queue is only supported for SQLite databases");
1512 }
1513 use crate::db::{DatabaseBackend, SqliteBackend};
1514 let backend = SqliteBackend::new(&self.file_path)?;
1515 backend.queue_reorder(user_id, items)
1516 }
1517
1518 pub fn queue_clear(&self, user_id: &str, completed_only: bool) -> Result<()> {
1520 if !self.is_sqlite() {
1521 anyhow::bail!("Queue is only supported for SQLite databases");
1522 }
1523 use crate::db::{DatabaseBackend, SqliteBackend};
1524 let backend = SqliteBackend::new(&self.file_path)?;
1525 backend.queue_clear(user_id, completed_only)
1526 }
1527}
1528
1529#[cfg(test)]
1530mod tests {
1531 use super::*;
1532 use tempfile::TempDir;
1533
1534 fn create_test_store() -> RequirementsStore {
1535 let mut store = RequirementsStore::new();
1536 store.name = "test".to_string();
1537 store
1538 }
1539
1540 fn create_test_requirement(title: &str) -> Requirement {
1541 Requirement::new(title.to_string(), format!("Description for {}", title))
1542 }
1543
1544 #[test]
1545 fn test_save_and_load() {
1546 let temp_dir = TempDir::new().unwrap();
1547 let file_path = temp_dir.path().join("test.yaml");
1548 let storage = Storage::new(&file_path);
1549
1550 let mut store = create_test_store();
1551 store.requirements.push(create_test_requirement("Test Req"));
1552
1553 storage.save(&store).unwrap();
1555
1556 let loaded = storage.load().unwrap();
1558 assert_eq!(loaded.requirements.len(), 1);
1559 assert_eq!(loaded.requirements[0].title, "Test Req");
1560 }
1561
1562 #[test]
1563 fn test_conflict_detection_no_conflict() {
1564 let temp_dir = TempDir::new().unwrap();
1565 let file_path = temp_dir.path().join("test.yaml");
1566 let storage = Storage::new(&file_path);
1567
1568 let mut store = create_test_store();
1570 let mut req = create_test_requirement("Test Req");
1571 req.spec_id = Some("FR-0001".to_string());
1572 let req_id = req.id;
1573 store.requirements.push(req);
1574
1575 storage.save(&store).unwrap();
1577
1578 let timestamps = Storage::get_requirement_timestamps(&store);
1580
1581 store.requirements[0].title = "Modified Title".to_string();
1583 store.requirements[0].modified_at = Utc::now();
1584
1585 let result = storage
1587 .save_with_conflict_detection(&store, ×tamps, &[req_id])
1588 .unwrap();
1589
1590 match result {
1591 SaveResult::Success => {} _ => panic!("Expected SaveResult::Success"),
1593 }
1594 }
1595
1596 #[test]
1597 fn test_conflict_detection_with_external_change() {
1598 let temp_dir = TempDir::new().unwrap();
1599 let file_path = temp_dir.path().join("test.yaml");
1600 let storage = Storage::new(&file_path);
1601
1602 let mut store = create_test_store();
1604 let mut req = create_test_requirement("Test Req");
1605 req.spec_id = Some("FR-0001".to_string());
1606 let req_id = req.id;
1607 store.requirements.push(req);
1608
1609 storage.save(&store).unwrap();
1611
1612 let timestamps = Storage::get_requirement_timestamps(&store);
1614 let mut local_store = store.clone();
1615
1616 store.requirements[0].title = "External Change".to_string();
1618 store.requirements[0].modified_at = Utc::now();
1619 storage.save(&store).unwrap();
1620
1621 local_store.requirements[0].title = "Local Change".to_string();
1623 local_store.requirements[0].modified_at = Utc::now();
1624
1625 let result = storage
1627 .save_with_conflict_detection(&local_store, ×tamps, &[req_id])
1628 .unwrap();
1629
1630 match result {
1631 SaveResult::Conflict(info) => {
1632 assert_eq!(info.requirement_id, req_id);
1633 assert!(!info.conflicting_fields.is_empty());
1634 assert!(info
1635 .conflicting_fields
1636 .iter()
1637 .any(|f| f.field_name == "title"));
1638 }
1639 _ => panic!("Expected SaveResult::Conflict"),
1640 }
1641 }
1642
1643 #[test]
1644 fn test_conflict_resolution_force_local() {
1645 let temp_dir = TempDir::new().unwrap();
1646 let file_path = temp_dir.path().join("test.yaml");
1647 let storage = Storage::new(&file_path);
1648
1649 let mut store = create_test_store();
1651 let mut req = create_test_requirement("Test Req");
1652 req.spec_id = Some("FR-0001".to_string());
1653 let req_id = req.id;
1654 store.requirements.push(req);
1655 storage.save(&store).unwrap();
1656
1657 store.requirements[0].title = "External".to_string();
1659 storage.save(&store).unwrap();
1660
1661 let mut local_store = store.clone();
1663 local_store.requirements[0].title = "Local".to_string();
1664
1665 let result = storage
1667 .save_with_resolution(&local_store, req_id, ConflictResolution::ForceLocal)
1668 .unwrap();
1669
1670 assert_eq!(result.requirements[0].title, "Local");
1671 }
1672
1673 #[test]
1674 fn test_conflict_resolution_keep_disk() {
1675 let temp_dir = TempDir::new().unwrap();
1676 let file_path = temp_dir.path().join("test.yaml");
1677 let storage = Storage::new(&file_path);
1678
1679 let mut store = create_test_store();
1681 let mut req = create_test_requirement("Test Req");
1682 req.spec_id = Some("FR-0001".to_string());
1683 let req_id = req.id;
1684 store.requirements.push(req);
1685 storage.save(&store).unwrap();
1686
1687 store.requirements[0].title = "External".to_string();
1689 storage.save(&store).unwrap();
1690
1691 let mut local_store = store.clone();
1693 local_store.requirements[0].title = "Local".to_string();
1694
1695 let result = storage
1697 .save_with_resolution(&local_store, req_id, ConflictResolution::KeepDisk)
1698 .unwrap();
1699
1700 assert_eq!(result.requirements[0].title, "External");
1701 }
1702
1703 #[test]
1704 fn test_get_requirement_timestamps() {
1705 let mut store = create_test_store();
1706 let req1 = create_test_requirement("Req1");
1707 let req2 = create_test_requirement("Req2");
1708 let id1 = req1.id;
1709 let id2 = req2.id;
1710 let ts1 = req1.modified_at;
1711 let ts2 = req2.modified_at;
1712
1713 store.requirements.push(req1);
1714 store.requirements.push(req2);
1715
1716 let timestamps = Storage::get_requirement_timestamps(&store);
1717 assert_eq!(timestamps.len(), 2);
1718 assert_eq!(timestamps.get(&id1), Some(&ts1));
1719 assert_eq!(timestamps.get(&id2), Some(&ts2));
1720 }
1721
1722 #[test]
1723 fn test_new_requirement_preserves_external_additions() {
1724 let temp_dir = TempDir::new().unwrap();
1727 let file_path = temp_dir.path().join("test.yaml");
1728 let storage = Storage::new(&file_path);
1729
1730 let mut store_a = create_test_store();
1732 let req1 = create_test_requirement("Req1 from Instance A");
1733 let req1_id = req1.id;
1734 store_a.requirements.push(req1);
1735 storage.save(&store_a).unwrap();
1736
1737 let mut store_b = create_test_store();
1740 let req2 = create_test_requirement("Req2 from Instance B");
1741 let req2_id = req2.id;
1742 store_b.requirements.push(req2);
1743
1744 let original_timestamps: HashMap<Uuid, DateTime<Utc>> = HashMap::new();
1747 let modified_ids = vec![req2_id];
1748
1749 let result = storage
1750 .save_with_conflict_detection(&store_b, &original_timestamps, &modified_ids)
1751 .unwrap();
1752
1753 match result {
1755 SaveResult::Success => {}
1756 SaveResult::Merged { .. } => {}
1757 SaveResult::Conflict(_) => panic!("Should not have conflict"),
1758 }
1759
1760 let final_store = storage.load().unwrap();
1762 assert_eq!(final_store.requirements.len(), 2);
1763 assert!(final_store.requirements.iter().any(|r| r.id == req1_id));
1764 assert!(final_store.requirements.iter().any(|r| r.id == req2_id));
1765 }
1766
1767 #[test]
1768 fn test_deletion_preserves_external_additions() {
1769 let temp_dir = TempDir::new().unwrap();
1772 let file_path = temp_dir.path().join("test.yaml");
1773 let storage = Storage::new(&file_path);
1774
1775 let mut initial_store = create_test_store();
1777 let req2 = create_test_requirement("Req2");
1778 let req2_id = req2.id;
1779 initial_store.requirements.push(req2);
1780 storage.save(&initial_store).unwrap();
1781
1782 let req1 = create_test_requirement("Req1 from Instance A");
1784 let req1_id = req1.id;
1785 let mut store_a = initial_store.clone();
1786 store_a.requirements.push(req1);
1787 storage.save(&store_a).unwrap();
1788
1789 let store_b = create_test_store(); let original_timestamps: HashMap<Uuid, DateTime<Utc>> = HashMap::new();
1792 let modified_ids = vec![req2_id]; let result = storage
1795 .save_with_conflict_detection(&store_b, &original_timestamps, &modified_ids)
1796 .unwrap();
1797
1798 match result {
1799 SaveResult::Success => {}
1800 SaveResult::Merged { .. } => {}
1801 SaveResult::Conflict(_) => panic!("Should not have conflict"),
1802 }
1803
1804 let final_store = storage.load().unwrap();
1806 assert_eq!(final_store.requirements.len(), 1);
1807 assert!(final_store.requirements.iter().any(|r| r.id == req1_id));
1808 assert!(!final_store.requirements.iter().any(|r| r.id == req2_id));
1809 }
1810}