Skip to main content

suture_core/repository/
repo_impl.rs

1//! The Suture Repository — high-level API for version control operations.
2//!
3//! A Repository combines:
4//! - `BlobStore` (CAS) for content-addressed blob storage
5//! - `PatchDag` (in-memory) for patch history
6//! - `MetadataStore` (SQLite) for persistent metadata
7//! - `Patch Application Engine` for reconstructing file trees
8//!
9//! # Repository Layout
10//!
11//! ```text
12//! my-project/
13//!   .suture/
14//!     objects/        # CAS blob storage
15//!     metadata.db     # SQLite metadata
16//!     HEAD            # Current branch reference
17//! ```
18//!
19//! .sutureignore (in repo root):
20//!   build/
21//!   *.o
22//!   target/
23
24use crate::cas::store::{BlobStore, CasError};
25use crate::dag::graph::{DagError, PatchDag};
26use crate::engine::apply::{ApplyError, apply_patch_chain, resolve_payload_to_hash};
27use crate::engine::diff::{DiffEntry, DiffType, diff_trees};
28use crate::engine::tree::FileTree;
29use crate::metadata::MetaError;
30use crate::patch::conflict::Conflict;
31use crate::patch::merge::MergeResult;
32use crate::patch::types::{FileChange, OperationType, Patch, PatchId, TouchSet};
33use serde::{Deserialize, Serialize};
34use std::cell::RefCell;
35use std::collections::{HashMap, HashSet, VecDeque};
36use std::fs;
37use std::io;
38use std::path::{Path, PathBuf};
39use suture_common::{BranchName, CommonError, FileStatus, Hash, RepoPath};
40use thiserror::Error;
41
42/// Repository errors.
43#[derive(Error, Debug)]
44pub enum RepoError {
45    /// The given path is not a Suture repository (no `.suture/` directory).
46    #[error("not a suture repository: {0}")]
47    NotARepository(PathBuf),
48
49    /// A Suture repository already exists at the given path.
50    #[error("repository already exists: {0}")]
51    AlreadyExists(PathBuf),
52
53    /// An error occurred in the Content Addressable Storage.
54    #[error("CAS error: {0}")]
55    Cas(#[from] CasError),
56
57    /// An error occurred in the Patch DAG.
58    #[error("DAG error: {0}")]
59    Dag(#[from] DagError),
60
61    /// An error occurred in the metadata store.
62    #[error("metadata error: {0}")]
63    Meta(#[from] MetaError),
64
65    /// An I/O error occurred.
66    #[error("I/O error: {0}")]
67    Io(#[from] std::io::Error),
68
69    /// An error occurred during patch application.
70    #[error("patch application error: {0}")]
71    Apply(#[from] ApplyError),
72
73    /// A patch-related error occurred.
74    #[error("patch error: {0}")]
75    Patch(String),
76
77    /// No changes are staged for commit.
78    #[error("nothing to commit")]
79    NothingToCommit,
80
81    /// A merge is in progress with unresolved conflicts.
82    #[error("merge in progress — resolve conflicts first")]
83    MergeInProgress,
84
85    /// Uncommitted staged changes would be overwritten by this operation.
86    #[error("uncommitted changes would be overwritten (staged: {0})")]
87    DirtyWorkingTree(usize),
88
89    /// The specified branch was not found.
90    #[error("branch not found: {0}")]
91    BranchNotFound(String),
92
93    /// An error from the `suture-common` crate.
94    #[error("common error: {0}")]
95    Common(#[from] CommonError),
96
97    /// A generic custom error.
98    #[error("{0}")]
99    Custom(String),
100
101    /// The requested operation is not supported.
102    #[error("unsupported operation: {0}")]
103    Unsupported(String),
104}
105
106/// Reset mode for the `reset` command.
107#[derive(Debug, Clone, Copy, PartialEq, Eq)]
108pub enum ResetMode {
109    /// Move branch pointer only; keep staging and working tree.
110    Soft,
111    /// Move branch pointer and clear staging; keep working tree.
112    Mixed,
113    /// Move branch pointer, clear staging, and restore working tree.
114    Hard,
115}
116
117/// The Suture Repository.
118pub struct Repository {
119    /// Path to the repository root (the directory containing `.suture/`).
120    root: PathBuf,
121    /// Path to the `.suture/` directory.
122    #[allow(dead_code)]
123    suture_dir: PathBuf,
124    /// Content Addressable Storage.
125    cas: BlobStore,
126    /// In-memory Patch DAG.
127    dag: PatchDag,
128    /// Persistent metadata store.
129    meta: crate::metadata::MetadataStore,
130    /// Current author name.
131    author: String,
132    /// Parsed ignore patterns.
133    ignore_patterns: Vec<String>,
134    /// Pending merge parents (set during a conflicting merge).
135    pending_merge_parents: Vec<PatchId>,
136    /// Cached FileTree snapshot for the current HEAD.
137    cached_head_snapshot: RefCell<Option<FileTree>>,
138    /// The patch ID that the cached snapshot corresponds to.
139    cached_head_id: RefCell<Option<PatchId>>,
140    /// The branch name that HEAD points to (cached).
141    cached_head_branch: RefCell<Option<String>>,
142    /// Per-repo configuration loaded from `.suture/config`.
143    repo_config: crate::metadata::repo_config::RepoConfig,
144    /// Whether this repository is a worktree (linked to a main repo).
145    is_worktree: bool,
146}
147
148impl Repository {
149    /// Initialize a new Suture repository at the given path.
150    pub fn init(path: &Path, author: &str) -> Result<Self, RepoError> {
151        let suture_dir = path.join(".suture");
152        if suture_dir.exists() {
153            return Err(RepoError::AlreadyExists(path.to_path_buf()));
154        }
155
156        // Create directory structure
157        fs::create_dir_all(suture_dir.join("objects"))?;
158
159        // Initialize CAS (disable per-read hash verification for performance;
160        // content addressing already ensures correctness by construction)
161        let mut cas = BlobStore::new(&suture_dir)?;
162        cas.set_verify_on_read(false);
163
164        // Initialize metadata
165        let meta = crate::metadata::MetadataStore::open(&suture_dir.join("metadata.db"))?;
166
167        // Create the in-memory DAG
168        let mut dag = PatchDag::new();
169
170        // Create root commit
171        let root_patch = Patch::new(
172            OperationType::Create,
173            TouchSet::empty(),
174            None,
175            vec![],
176            vec![],
177            author.to_string(),
178            "Initial commit".to_string(),
179        );
180        let root_id = dag.add_patch(root_patch.clone(), vec![])?;
181
182        // Persist root patch
183        meta.store_patch(&root_patch)?;
184
185        // Create default branch
186        let main_branch = BranchName::new("main").expect("hardcoded 'main' is always valid");
187        dag.create_branch(main_branch.clone(), root_id)?;
188        meta.set_branch(&main_branch, &root_id)?;
189
190        // Store author config
191        meta.set_config("author", author)?;
192
193        // Load ignore patterns
194        let ignore_patterns = load_ignore_patterns(path);
195
196        Ok(Self {
197            root: path.to_path_buf(),
198            suture_dir,
199            cas,
200            dag,
201            meta,
202            author: author.to_string(),
203            ignore_patterns,
204            pending_merge_parents: Vec::new(),
205            cached_head_snapshot: RefCell::new(None),
206            cached_head_id: RefCell::new(None),
207            cached_head_branch: RefCell::new(None),
208            repo_config: crate::metadata::repo_config::RepoConfig::default(),
209            is_worktree: false,
210        })
211    }
212    ///
213    /// Reconstructs the full DAG from the metadata database by loading
214    /// all stored patches and their edges.
215    pub fn open(path: &Path) -> Result<Self, RepoError> {
216        let suture_dir = path.join(".suture");
217        if !suture_dir.exists() {
218            return Err(RepoError::NotARepository(path.to_path_buf()));
219        }
220
221        let is_worktree = suture_dir.join("worktree").exists();
222
223        // Initialize CAS (disable per-read hash verification for performance)
224        let mut cas = BlobStore::new(&suture_dir)?;
225        cas.set_verify_on_read(false);
226        let meta = crate::metadata::MetadataStore::open(&suture_dir.join("metadata.db"))?;
227
228        // Reconstruct DAG from metadata — load ALL patches
229        let mut dag = PatchDag::new();
230
231        // Collect all patch IDs from the patches table
232        let all_patch_ids: Vec<PatchId> = {
233            let mut stmt = meta
234                .conn()
235                .prepare("SELECT id FROM patches ORDER BY id")
236                .map_err(|e: rusqlite::Error| RepoError::Custom(e.to_string()))?;
237            let rows = stmt
238                .query_map([], |row: &rusqlite::Row| row.get::<_, String>(0))
239                .map_err(|e: rusqlite::Error| RepoError::Custom(e.to_string()))?;
240            rows.filter_map(|r: Result<String, _>| r.ok())
241                .filter_map(|hex| Hash::from_hex(&hex).ok())
242                .collect()
243        };
244
245        // Load each patch and add to DAG, parents first
246        let mut loaded: HashSet<PatchId> = HashSet::new();
247        let mut attempts = 0;
248        while loaded.len() < all_patch_ids.len() && attempts < all_patch_ids.len() + 1 {
249            for patch_id in &all_patch_ids {
250                if loaded.contains(patch_id) {
251                    continue;
252                }
253                if let Ok(patch) = meta.get_patch(patch_id) {
254                    // Check if all parents are loaded
255                    let parents_ready = patch
256                        .parent_ids
257                        .iter()
258                        .all(|pid| loaded.contains(pid) || *pid == Hash::ZERO);
259                    if parents_ready {
260                        // Filter out non-existent parents (root commits)
261                        let valid_parents: Vec<PatchId> = patch
262                            .parent_ids
263                            .iter()
264                            .filter(|pid| loaded.contains(pid))
265                            .copied()
266                            .collect();
267                        let _ = dag.add_patch(patch, valid_parents);
268                        loaded.insert(*patch_id);
269                    }
270                }
271            }
272            attempts += 1;
273        }
274
275        // Recreate branches
276        let branches = meta.list_branches()?;
277        for (name, target_id) in &branches {
278            let branch_name = match BranchName::new(name) {
279                Ok(b) => b,
280                Err(_) => continue,
281            };
282            if !dag.branch_exists(&branch_name) {
283                let _ = dag.create_branch(branch_name, *target_id);
284            }
285        }
286
287        let author = meta
288            .get_config("user.name")
289            .unwrap_or(None)
290            .or_else(|| meta.get_config("author").unwrap_or(None))
291            .unwrap_or_else(|| "unknown".to_string());
292
293        // Restore pending merge parents if a merge was in progress
294        let restored_parents = restore_pending_merge_parents(&meta);
295
296        // Load ignore patterns
297        let ignore_patterns = load_ignore_patterns(path);
298
299        // Load per-repo config from .suture/config
300        let repo_config = crate::metadata::repo_config::RepoConfig::load(path);
301
302        Ok(Self {
303            root: path.to_path_buf(),
304            suture_dir,
305            cas,
306            dag,
307            meta,
308            author,
309            ignore_patterns,
310            pending_merge_parents: restored_parents,
311            cached_head_snapshot: RefCell::new(None),
312            cached_head_id: RefCell::new(None),
313            cached_head_branch: RefCell::new(None),
314            repo_config,
315            is_worktree,
316        })
317    }
318    /// Open an in-memory repository for testing or embedded use.
319    ///
320    /// Creates a repository backed entirely by in-memory storage. No
321    /// filesystem I/O occurs except for the initial tempdir creation.
322    /// The CAS uses a temporary directory that is cleaned up on drop.
323    pub fn open_in_memory() -> Result<Self, RepoError> {
324        let temp_root = tempfile::tempdir().map_err(RepoError::Io)?.keep();
325        let suture_dir = temp_root.join(".suture");
326        fs::create_dir_all(&suture_dir)?;
327
328        let mut cas = BlobStore::new(&suture_dir)?;
329        cas.set_verify_on_read(false);
330        let meta = crate::metadata::MetadataStore::open_in_memory()?;
331
332        let mut dag = PatchDag::new();
333        let root_patch = Patch::new(
334            OperationType::Create,
335            TouchSet::empty(),
336            None,
337            vec![],
338            vec![],
339            "suture".to_string(),
340            "Initial commit".to_string(),
341        );
342        let root_id = dag.add_patch(root_patch.clone(), vec![])?;
343        meta.store_patch(&root_patch)?;
344
345        let main_branch = BranchName::new("main").expect("hardcoded 'main' is always valid");
346        dag.create_branch(main_branch.clone(), root_id)?;
347        meta.set_branch(&main_branch, &root_id)?;
348        meta.set_config("author", "suture")?;
349
350        Ok(Self {
351            root: temp_root,
352            suture_dir,
353            cas,
354            dag,
355            meta,
356            author: "suture".to_string(),
357            ignore_patterns: Vec::new(),
358            pending_merge_parents: Vec::new(),
359            cached_head_snapshot: RefCell::new(None),
360            cached_head_id: RefCell::new(None),
361            cached_head_branch: RefCell::new(None),
362            repo_config: crate::metadata::repo_config::RepoConfig::default(),
363            is_worktree: false,
364        })
365    }
366
367    // =========================================================================
368    // Branch Operations
369    // =========================================================================
370
371    /// Create a new branch.
372    pub fn create_branch(&mut self, name: &str, target: Option<&str>) -> Result<(), RepoError> {
373        let branch = BranchName::new(name)?;
374        let target_id = match target {
375            Some(t) => {
376                if let Ok(bn) = BranchName::new(t) {
377                    self.dag
378                        .get_branch(&bn)
379                        .ok_or_else(|| RepoError::BranchNotFound(t.to_string()))?
380                } else {
381                    Hash::from_hex(t)
382                        .map_err(|_| RepoError::Custom(format!("invalid target: {}", t)))?
383                }
384            }
385            None => {
386                let head = self
387                    .dag
388                    .head()
389                    .ok_or_else(|| RepoError::Custom("no HEAD branch".to_string()))?;
390                head.1
391            }
392        };
393
394        self.dag.create_branch(branch.clone(), target_id)?;
395        self.meta.set_branch(&branch, &target_id)?;
396        Ok(())
397    }
398
399    /// Get the current branch and its target.
400    ///
401    /// Reads the `head_branch` config key to determine which branch is
402    /// currently checked out. Falls back to "main" if not set.
403    pub fn head(&self) -> Result<(String, PatchId), RepoError> {
404        if let Some(ref cached) = *self.cached_head_id.borrow()
405            && let Some(ref branch) = *self.cached_head_branch.borrow()
406        {
407            return Ok((branch.clone(), *cached));
408        }
409        let branch_name = self.read_head_branch()?;
410
411        let bn = BranchName::new(&branch_name)?;
412        let target_id = self
413            .dag
414            .get_branch(&bn)
415            .ok_or_else(|| RepoError::BranchNotFound(branch_name.clone()))?;
416
417        *self.cached_head_branch.borrow_mut() = Some(branch_name.clone());
418        *self.cached_head_id.borrow_mut() = Some(target_id);
419        Ok((branch_name, target_id))
420    }
421
422    /// List all branches.
423    pub fn list_branches(&self) -> Vec<(String, PatchId)> {
424        self.dag.list_branches()
425    }
426
427    /// Delete a branch. Cannot delete the currently checked-out branch.
428    pub fn delete_branch(&mut self, name: &str) -> Result<(), RepoError> {
429        let (current_branch, _) = self.head()?;
430        if current_branch == name {
431            return Err(RepoError::Custom(format!(
432                "cannot delete the current branch '{}'",
433                name
434            )));
435        }
436        let branch = BranchName::new(name)?;
437        self.dag.delete_branch(&branch)?;
438        // Also remove from metadata
439        self.meta
440            .conn()
441            .execute(
442                "DELETE FROM branches WHERE name = ?1",
443                rusqlite::params![name],
444            )
445            .map_err(|e| RepoError::Custom(e.to_string()))?;
446        Ok(())
447    }
448
449    // =========================================================================
450    // Config
451    // =========================================================================
452
453    /// Get a configuration value.
454    ///
455    /// Lookup order:
456    /// 1. `.suture/config` file (repo-level TOML config)
457    /// 2. SQLite config table (set via `suture config key=value`)
458    /// 3. Global config `~/.config/suture/config.toml`
459    pub fn get_config(&self, key: &str) -> Result<Option<String>, RepoError> {
460        // 1. Check repo-level config file
461        if let Some(val) = self.repo_config.get(key) {
462            return Ok(Some(val));
463        }
464        // 2. Check SQLite config
465        if let Some(val) = self.meta.get_config(key).map_err(RepoError::from)? {
466            return Ok(Some(val));
467        }
468        // 3. Check global config
469        let global = crate::metadata::global_config::GlobalConfig::load();
470        Ok(global.get(key))
471    }
472
473    /// Set a configuration value.
474    pub fn set_config(&mut self, key: &str, value: &str) -> Result<(), RepoError> {
475        self.meta.set_config(key, value).map_err(RepoError::from)
476    }
477
478    /// List all configuration key-value pairs.
479    pub fn list_config(&self) -> Result<Vec<(String, String)>, RepoError> {
480        self.meta.list_config().map_err(RepoError::from)
481    }
482
483    // =========================================================================
484    // Worktree HEAD (per-worktree branch pointer)
485    // =========================================================================
486
487    fn read_head_branch(&self) -> Result<String, RepoError> {
488        if self.is_worktree {
489            let head_path = self.suture_dir.join("HEAD");
490            if head_path.exists() {
491                Ok(fs::read_to_string(&head_path)?.trim().to_string())
492            } else {
493                Ok("main".to_string())
494            }
495        } else {
496            Ok(self
497                .meta
498                .get_config("head_branch")
499                .unwrap_or(None)
500                .unwrap_or_else(|| "main".to_string()))
501        }
502    }
503
504    fn write_head_branch(&self, branch: &str) -> Result<(), RepoError> {
505        if self.is_worktree {
506            fs::write(self.suture_dir.join("HEAD"), branch)?;
507        } else {
508            self.meta
509                .set_config("head_branch", branch)
510                .map_err(RepoError::Meta)?;
511        }
512        Ok(())
513    }
514
515    // =========================================================================
516    // Tag Operations
517    // =========================================================================
518
519    /// Create a tag pointing to a patch ID (or HEAD).
520    ///
521    /// Tags are stored as config entries `tag.<name>` pointing to a patch hash.
522    pub fn create_tag(&mut self, name: &str, target: Option<&str>) -> Result<(), RepoError> {
523        let target_id = match target {
524            Some(t) => {
525                if let Ok(bn) = BranchName::new(t) {
526                    self.dag
527                        .get_branch(&bn)
528                        .ok_or_else(|| RepoError::BranchNotFound(t.to_string()))?
529                } else {
530                    Hash::from_hex(t)
531                        .map_err(|_| RepoError::Custom(format!("invalid target: {}", t)))?
532                }
533            }
534            None => {
535                let (_, head_id) = self.head()?;
536                head_id
537            }
538        };
539        self.set_config(&format!("tag.{name}"), &target_id.to_hex())
540    }
541
542    /// Delete a tag.
543    pub fn delete_tag(&mut self, name: &str) -> Result<(), RepoError> {
544        self.meta
545            .conn()
546            .execute(
547                "DELETE FROM config WHERE key = ?1",
548                rusqlite::params![format!("tag.{name}")],
549            )
550            .map_err(|e| RepoError::Custom(e.to_string()))?;
551        Ok(())
552    }
553
554    /// List all tags as (name, target_patch_id).
555    pub fn list_tags(&self) -> Result<Vec<(String, PatchId)>, RepoError> {
556        let config = self.list_config()?;
557        let mut tags = Vec::new();
558        for (key, value) in config {
559            if let Some(name) = key.strip_prefix("tag.")
560                && let Ok(id) = Hash::from_hex(&value)
561            {
562                tags.push((name.to_string(), id));
563            }
564        }
565        tags.sort_by(|a, b| a.0.cmp(&b.0));
566        Ok(tags)
567    }
568
569    /// Resolve a tag name to a patch ID.
570    pub fn resolve_tag(&self, name: &str) -> Result<Option<PatchId>, RepoError> {
571        let val = self.get_config(&format!("tag.{name}"))?;
572        match val {
573            Some(hex) => Ok(Some(Hash::from_hex(&hex)?)),
574            None => Ok(None),
575        }
576    }
577
578    // =========================================================================
579    // Notes
580    // =========================================================================
581
582    /// Add a note to a commit.
583    pub fn add_note(&self, patch_id: &PatchId, note: &str) -> Result<(), RepoError> {
584        let existing = self.list_notes(patch_id)?;
585        let next_idx = existing.len();
586        let key = format!("note.{}.{}", patch_id, next_idx);
587        self.meta.set_config(&key, note).map_err(RepoError::Meta)
588    }
589
590    /// List notes for a commit.
591    pub fn list_notes(&self, patch_id: &PatchId) -> Result<Vec<String>, RepoError> {
592        let prefix = format!("note.{}.", patch_id);
593        let all_config = self.meta.list_config().map_err(RepoError::Meta)?;
594        let mut notes: Vec<(usize, String)> = Vec::new();
595        for (key, value) in &all_config {
596            if let Some(idx_str) = key.strip_prefix(&prefix)
597                && let Ok(idx) = idx_str.parse::<usize>()
598            {
599                notes.push((idx, value.clone()));
600            }
601        }
602        notes.sort_by_key(|(idx, _)| *idx);
603        Ok(notes.into_iter().map(|(_, v)| v).collect())
604    }
605
606    /// Remove a note from a commit.
607    pub fn remove_note(&self, patch_id: &PatchId, index: usize) -> Result<(), RepoError> {
608        let key = format!("note.{}.{}", patch_id, index);
609        self.meta.delete_config(&key).map_err(RepoError::Meta)
610    }
611
612    // =========================================================================
613    // Incremental Push Support
614    // =========================================================================
615
616    /// Get patches created after a given patch ID (ancestry walk).
617    ///
618    /// Returns patches reachable from branch tips but NOT ancestors of `since_id`.
619    pub fn patches_since(&self, since_id: &PatchId) -> Vec<Patch> {
620        let since_ancestors = self.dag.ancestors(since_id);
621        // Include since_id itself in the "already known" set
622        let mut known = since_ancestors;
623        known.insert(*since_id);
624
625        // Walk from all branch tips, collect patches not in `known`
626        let mut new_ids: HashSet<PatchId> = HashSet::new();
627        let mut stack: Vec<PatchId> = self.dag.list_branches().iter().map(|(_, id)| *id).collect();
628
629        while let Some(id) = stack.pop() {
630            if !known.contains(&id)
631                && new_ids.insert(id)
632                && let Some(node) = self.dag.get_node(&id)
633            {
634                for parent in &node.patch.parent_ids {
635                    if !known.contains(parent) && !new_ids.contains(parent) {
636                        stack.push(*parent);
637                    }
638                }
639            }
640        }
641
642        // Topological sort: parents before children (Kahn's algorithm)
643        let patches: HashMap<PatchId, Patch> = new_ids
644            .into_iter()
645            .filter_map(|id| self.dag.get_patch(&id).map(|p| (id, p.clone())))
646            .collect();
647
648        // Count in-edges from within our set
649        let mut in_degree: HashMap<PatchId, usize> = HashMap::new();
650        let mut children: HashMap<PatchId, Vec<PatchId>> = HashMap::new();
651        for (&id, patch) in &patches {
652            in_degree.entry(id).or_insert(0);
653            for parent_id in &patch.parent_ids {
654                if patches.contains_key(parent_id) {
655                    children.entry(*parent_id).or_default().push(id);
656                    *in_degree.entry(id).or_insert(0) += 1;
657                }
658            }
659        }
660
661        let mut queue: VecDeque<PatchId> = in_degree
662            .iter()
663            .filter(|&(_, deg)| *deg == 0)
664            .map(|(&id, _)| id)
665            .collect();
666        let mut sorted_ids: Vec<PatchId> = Vec::with_capacity(patches.len());
667
668        while let Some(id) = queue.pop_front() {
669            sorted_ids.push(id);
670            if let Some(kids) = children.get(&id) {
671                for &child in kids {
672                    let deg = in_degree
673                        .get_mut(&child)
674                        .expect("in-degree entry exists for child in topo sort");
675                    *deg -= 1;
676                    if *deg == 0 {
677                        queue.push_back(child);
678                    }
679                }
680            }
681        }
682
683        sorted_ids
684            .into_iter()
685            .filter_map(|id| patches.get(&id).cloned())
686            .collect()
687    }
688
689    // =========================================================================
690    // Staging & Commit
691    // =========================================================================
692
693    /// Get repository status.
694    pub fn status(&self) -> Result<RepoStatus, RepoError> {
695        let working_set = self.meta.working_set()?;
696        let branches = self.list_branches();
697        let head = self.head()?;
698
699        Ok(RepoStatus {
700            head_branch: Some(head.0),
701            head_patch: Some(head.1),
702            branch_count: branches.len(),
703            staged_files: working_set
704                .iter()
705                .filter(|(_, s)| {
706                    matches!(
707                        s,
708                        FileStatus::Added | FileStatus::Modified | FileStatus::Deleted
709                    )
710                })
711                .map(|(p, s)| (p.clone(), *s))
712                .collect(),
713            patch_count: self.dag.patch_count(),
714        })
715    }
716
717    /// Add a file to the staging area (working set).
718    pub fn add(&self, path: &str) -> Result<(), RepoError> {
719        let repo_path = RepoPath::new(path)?;
720        let full_path = self.root.join(path);
721
722        if !full_path.exists() {
723            if self.is_tracked(path)? {
724                self.meta.working_set_add(&repo_path, FileStatus::Deleted)?;
725                return Ok(());
726            }
727            return Err(RepoError::Io(io::Error::new(
728                io::ErrorKind::NotFound,
729                format!("file not found: {}", path),
730            )));
731        }
732
733        let status = if self.is_tracked(path)? {
734            FileStatus::Modified
735        } else {
736            FileStatus::Added
737        };
738
739        self.meta.working_set_add(&repo_path, status)?;
740        Ok(())
741    }
742
743    /// Add all files (respecting .sutureignore).
744    pub fn add_all(&self) -> Result<usize, RepoError> {
745        let tree = self.snapshot_head()?;
746        let mut count = 0;
747
748        for entry in walk_dir(&self.root, &self.ignore_patterns)? {
749            let rel_path = entry.relative;
750            let full_path = self.root.join(&rel_path);
751
752            let is_tracked = tree.contains(&rel_path);
753
754            // Check if file has changed
755            if is_tracked
756                && let Ok(data) = fs::read(&full_path)
757                && let Some(old_hash) = tree.get(&rel_path)
758                && Hash::from_data(&data) == *old_hash
759            {
760                continue; // Unchanged
761            }
762
763            let status = if is_tracked {
764                FileStatus::Modified
765            } else {
766                FileStatus::Added
767            };
768
769            let repo_path = RepoPath::new(&rel_path)?;
770            self.meta.working_set_add(&repo_path, status)?;
771            count += 1;
772        }
773
774        Ok(count)
775    }
776
777    /// Check if a path is tracked.
778    ///
779    /// Uses the SQLite file_trees table for O(1) lookups when HEAD is cached,
780    /// falling back to the in-memory DAG walk only on cold start.
781    fn is_tracked(&self, path: &str) -> Result<bool, RepoError> {
782        // Fast path: use in-memory cache if available
783        if let Some(ref tree) = *self.cached_head_snapshot.borrow() {
784            return Ok(tree.contains(path));
785        }
786        // Medium path: use SQLite file_trees table
787        if let Ok((_, head_id)) = self.head()
788            && let Ok(result) = self.meta.file_tree_contains(&head_id, path)
789        {
790            return Ok(result);
791        }
792        // Slow path: walk the DAG (shouldn't happen after first commit)
793        for id in self.dag.patch_ids() {
794            if let Some(node) = self.dag.get_node(&id)
795                && node.patch.target_path.as_deref() == Some(path)
796            {
797                return Ok(true);
798            }
799        }
800        Ok(false)
801    }
802
803    /// Create a commit from the working set.
804    pub fn commit(&mut self, message: &str) -> Result<PatchId, RepoError> {
805        let old_head = self.head().map(|(_, id)| id).unwrap_or(Hash::ZERO);
806        let working_set = self.meta.working_set()?;
807
808        let staged: Vec<_> = working_set
809            .iter()
810            .filter(|(_, s)| {
811                matches!(
812                    s,
813                    FileStatus::Added | FileStatus::Modified | FileStatus::Deleted
814                )
815            })
816            .collect();
817
818        if staged.is_empty() {
819            return Err(RepoError::NothingToCommit);
820        }
821
822        let (branch_name, head_id) = self.head()?;
823        let is_merge_resolution = !self.pending_merge_parents.is_empty();
824
825        let parent_ids = if self.pending_merge_parents.is_empty() {
826            vec![head_id]
827        } else {
828            std::mem::take(&mut self.pending_merge_parents)
829        };
830
831        // Clear persisted merge state on commit
832        let _ = self
833            .meta
834            .conn()
835            .execute("DELETE FROM config WHERE key = 'pending_merge_parents'", []);
836
837        // Build batched file changes
838        let mut file_changes = Vec::new();
839        for (path, status) in &staged {
840            let full_path = self.root.join(path);
841
842            let (op_type, payload) = match status {
843                FileStatus::Added | FileStatus::Modified => {
844                    let data = fs::read(&full_path)?;
845                    let hash = self.cas.put_blob(&data)?;
846                    let payload = hash.to_hex().as_bytes().to_vec();
847                    (OperationType::Modify, payload)
848                }
849                FileStatus::Deleted => (OperationType::Delete, Vec::new()),
850                _ => continue,
851            };
852            file_changes.push(FileChange {
853                op: op_type,
854                path: path.clone(),
855                payload,
856            });
857        }
858
859        if file_changes.is_empty() {
860            return Err(RepoError::NothingToCommit);
861        }
862
863        // Create single batched patch
864        let batch_patch = Patch::new_batch(
865            file_changes,
866            parent_ids.clone(),
867            self.author.clone(),
868            message.to_string(),
869        );
870
871        let patch_id = self.dag.add_patch(batch_patch.clone(), parent_ids)?;
872        self.meta.store_patch(&batch_patch)?;
873
874        // Clear working set entries
875        for (path, _) in &staged {
876            let repo_path = RepoPath::new(path.clone())?;
877            self.meta.working_set_remove(&repo_path)?;
878        }
879
880        let branch = BranchName::new(&branch_name)?;
881        self.dag.update_branch(&branch, patch_id)?;
882        self.meta.set_branch(&branch, &patch_id)?;
883
884        // Persist the file tree for this commit tip (enables O(1) cold-load later).
885        // Build the tree directly from patches (not from the stale cache).
886        if let Ok(tree) = self.snapshot_uncached(&patch_id) {
887            let tree_hash = tree.content_hash();
888            let _ = self.meta.set_config("head_tree_hash", &tree_hash.to_hex());
889            let _ = self.meta.store_file_tree(&patch_id, &tree);
890        }
891
892        self.invalidate_head_cache();
893
894        let _ = self.record_reflog(&old_head, &patch_id, &format!("commit: {}", message));
895
896        // If this was a merge resolution, update merge commit's parent_ids
897        if is_merge_resolution {
898            // The batch patch already has the correct merge parents
899            // (already handled above via pending_merge_parents)
900        }
901
902        Ok(patch_id)
903    }
904
905    // =========================================================================
906    // Stash
907    // =========================================================================
908
909    pub fn has_uncommitted_changes(&self) -> Result<bool, RepoError> {
910        let working_set = self.meta.working_set()?;
911
912        let has_staged = working_set.iter().any(|(_, s)| {
913            matches!(
914                s,
915                FileStatus::Added | FileStatus::Modified | FileStatus::Deleted
916            )
917        });
918        if has_staged {
919            return Ok(true);
920        }
921
922        if let Ok(head_tree) = self.snapshot_head() {
923            for (path, hash) in head_tree.iter() {
924                let full_path = self.root.join(path);
925                if let Ok(data) = fs::read(&full_path) {
926                    let current_hash = Hash::from_data(&data);
927                    if &current_hash != hash {
928                        return Ok(true);
929                    }
930                } else {
931                    return Ok(true);
932                }
933            }
934        }
935
936        Ok(false)
937    }
938
939    pub fn stash_push(&mut self, message: Option<&str>) -> Result<usize, RepoError> {
940        if !self.has_uncommitted_changes()? {
941            return Err(RepoError::NothingToCommit);
942        }
943
944        let working_set = self.meta.working_set()?;
945        let mut files: Vec<(String, Option<String>)> = Vec::new();
946
947        for (path, status) in &working_set {
948            match status {
949                FileStatus::Added | FileStatus::Modified => {
950                    let full_path = self.root.join(path);
951                    if let Ok(data) = fs::read(&full_path) {
952                        let hash = self.cas.put_blob(&data)?;
953                        files.push((path.clone(), Some(hash.to_hex())));
954                    } else {
955                        files.push((path.clone(), None));
956                    }
957                }
958                FileStatus::Deleted => {
959                    files.push((path.clone(), None));
960                }
961                _ => {}
962            }
963        }
964
965        if let Ok(head_tree) = self.snapshot_head() {
966            for (path, _hash) in head_tree.iter() {
967                let full_path = self.root.join(path);
968                if let Ok(data) = fs::read(&full_path) {
969                    let current_hash = Hash::from_data(&data);
970                    if &current_hash != _hash {
971                        let already = files.iter().any(|(p, _)| p == path);
972                        if !already {
973                            let hash = self.cas.put_blob(&data)?;
974                            files.push((path.clone(), Some(hash.to_hex())));
975                        }
976                    }
977                }
978            }
979        }
980
981        let mut index: usize = 0;
982        loop {
983            let key = format!("stash.{}.message", index);
984            if self.meta.get_config(&key)?.is_none() {
985                break;
986            }
987            index += 1;
988        }
989
990        let (branch_name, head_id) = self.head()?;
991        let msg = message.unwrap_or("WIP").to_string();
992        let files_json = serde_json::to_string(&files).unwrap_or_else(|_| "[]".to_string());
993
994        self.set_config(&format!("stash.{}.message", index), &msg)?;
995        self.set_config(&format!("stash.{}.head_branch", index), &branch_name)?;
996        self.set_config(&format!("stash.{}.head_id", index), &head_id.to_hex())?;
997        self.set_config(&format!("stash.{}.files", index), &files_json)?;
998
999        self.meta
1000            .conn()
1001            .execute("DELETE FROM working_set", [])
1002            .map_err(|e| RepoError::Meta(crate::metadata::MetaError::Database(e)))?;
1003
1004        if let Ok(head_tree) = self.snapshot_head() {
1005            let current_tree = head_tree;
1006            for (path, _) in current_tree.iter() {
1007                let full_path = self.root.join(path);
1008                if full_path.exists() {
1009                    let _ = fs::remove_file(&full_path);
1010                }
1011            }
1012            for (path, hash) in current_tree.iter() {
1013                let full_path = self.root.join(path);
1014                if let Some(parent) = full_path.parent() {
1015                    let _ = fs::create_dir_all(parent);
1016                }
1017                if let Ok(blob) = self.cas.get_blob(hash) {
1018                    let _ = fs::write(&full_path, &blob);
1019                }
1020            }
1021        }
1022
1023        Ok(index)
1024    }
1025
1026    pub fn stash_pop(&mut self) -> Result<(), RepoError> {
1027        let stashes = self.stash_list()?;
1028        if stashes.is_empty() {
1029            return Err(RepoError::Custom("No stashes found".to_string()));
1030        }
1031        let highest = stashes
1032            .iter()
1033            .map(|s| s.index)
1034            .max()
1035            .expect("stash list is non-empty (checked above)");
1036        self.stash_apply(highest)?;
1037        self.stash_drop(highest)?;
1038        Ok(())
1039    }
1040
1041    pub fn stash_apply(&mut self, index: usize) -> Result<(), RepoError> {
1042        let files_key = format!("stash.{}.files", index);
1043        let files_json = self
1044            .meta
1045            .get_config(&files_key)?
1046            .ok_or_else(|| RepoError::Custom(format!("stash@{{{}}} not found", index)))?;
1047
1048        let head_id_key = format!("stash.{}.head_id", index);
1049        let stash_head_id = self.meta.get_config(&head_id_key)?.unwrap_or_default();
1050
1051        if let Ok((_, current_head_id)) = self.head()
1052            && current_head_id.to_hex() != stash_head_id
1053        {
1054            tracing::warn!(
1055                "Warning: HEAD has moved since stash@{{{}}} was created",
1056                index
1057            );
1058        }
1059
1060        let files: Vec<(String, Option<String>)> =
1061            serde_json::from_str(&files_json).unwrap_or_default();
1062
1063        for (path, hash_opt) in &files {
1064            let full_path = self.root.join(path);
1065            match hash_opt {
1066                Some(hex_hash) => {
1067                    let hash = Hash::from_hex(hex_hash)
1068                        .map_err(|e| RepoError::Custom(format!("invalid hash in stash: {}", e)))?;
1069                    let blob = self.cas.get_blob(&hash)?;
1070                    if let Some(parent) = full_path.parent() {
1071                        fs::create_dir_all(parent)?;
1072                    }
1073                    fs::write(&full_path, &blob)?;
1074                    let repo_path = RepoPath::new(path.clone())?;
1075                    self.meta
1076                        .working_set_add(&repo_path, FileStatus::Modified)?;
1077                }
1078                None => {
1079                    if full_path.exists() {
1080                        fs::remove_file(&full_path)?;
1081                    }
1082                    let repo_path = RepoPath::new(path.clone())?;
1083                    self.meta.working_set_add(&repo_path, FileStatus::Deleted)?;
1084                }
1085            }
1086        }
1087
1088        Ok(())
1089    }
1090
1091    pub fn stash_list(&self) -> Result<Vec<StashEntry>, RepoError> {
1092        let all_config = self.list_config()?;
1093        let mut entries = Vec::new();
1094
1095        for (key, value) in &all_config {
1096            if let Some(rest) = key.strip_prefix("stash.")
1097                && let Some(idx_str) = rest.strip_suffix(".message")
1098                && let Ok(idx) = idx_str.parse::<usize>()
1099            {
1100                let branch_key = format!("stash.{}.head_branch", idx);
1101                let head_id_key = format!("stash.{}.head_id", idx);
1102                let branch = self.meta.get_config(&branch_key)?.unwrap_or_default();
1103                let head_id = self.meta.get_config(&head_id_key)?.unwrap_or_default();
1104                entries.push(StashEntry {
1105                    index: idx,
1106                    message: value.clone(),
1107                    branch,
1108                    head_id,
1109                });
1110            }
1111        }
1112
1113        entries.sort_by_key(|e| e.index);
1114        Ok(entries)
1115    }
1116
1117    pub fn stash_drop(&mut self, index: usize) -> Result<(), RepoError> {
1118        let prefix = format!("stash.{}.", index);
1119        let all_config = self.list_config()?;
1120        let keys_to_delete: Vec<String> = all_config
1121            .iter()
1122            .filter(|(k, _)| k.starts_with(&prefix))
1123            .map(|(k, _)| k.clone())
1124            .collect();
1125
1126        if keys_to_delete.is_empty() {
1127            return Err(RepoError::Custom(format!("stash@{{{}}} not found", index)));
1128        }
1129
1130        for key in &keys_to_delete {
1131            self.meta
1132                .conn()
1133                .execute("DELETE FROM config WHERE key = ?1", rusqlite::params![key])
1134                .map_err(|e| RepoError::Meta(crate::metadata::MetaError::Database(e)))?;
1135        }
1136
1137        Ok(())
1138    }
1139
1140    // =========================================================================
1141    // Snapshot & Checkout
1142    // =========================================================================
1143
1144    /// Build a FileTree snapshot for the HEAD commit.
1145    ///
1146    /// Returns a cached snapshot if the HEAD has not changed since the last
1147    /// call, making this O(1) instead of O(n) where n = total patches.
1148    pub fn snapshot_head(&self) -> Result<FileTree, RepoError> {
1149        // Always get the fresh head_id from the DAG (branch pointers may have
1150        // been updated externally, e.g., by do_fetch). Only use the in-memory
1151        // cache if the IDs match.
1152        let (branch_name, head_id) = {
1153            let branch_name = self.read_head_branch()?;
1154            let bn = BranchName::new(&branch_name)?;
1155            let target_id = self
1156                .dag
1157                .get_branch(&bn)
1158                .ok_or_else(|| RepoError::BranchNotFound(branch_name.clone()))?;
1159            (branch_name, target_id)
1160        };
1161
1162        // Update head caches
1163        *self.cached_head_branch.borrow_mut() = Some(branch_name.clone());
1164        *self.cached_head_id.borrow_mut() = Some(head_id);
1165
1166        if let Some(ref tree) = *self.cached_head_snapshot.borrow() {
1167            return Ok(tree.clone());
1168        }
1169
1170        // Try loading from SQLite (O(1) — no patch replay needed)
1171        if let Some(tree) = self
1172            .meta
1173            .load_file_tree(&head_id)
1174            .map_err(RepoError::Meta)?
1175        {
1176            // Verify the stored tree matches the expected hash
1177            let tree_hash = tree.content_hash();
1178            let stored_hash = self
1179                .meta
1180                .get_config("head_tree_hash")
1181                .ok()
1182                .flatten()
1183                .and_then(|h| Hash::from_hex(&h).ok());
1184
1185            if stored_hash.is_none_or(|h| h == tree_hash) {
1186                // Update stored hash if needed
1187                if stored_hash.is_none() {
1188                    let _ = self.meta.set_config("head_tree_hash", &tree_hash.to_hex());
1189                }
1190
1191                *self.cached_head_snapshot.borrow_mut() = Some(tree.clone());
1192                return Ok(tree);
1193            }
1194            // Hash mismatch — fall through to recompute
1195        }
1196
1197        // Cold path: replay all patches (expensive, but correct)
1198        let tree = self.snapshot_uncached(&head_id)?;
1199        let tree_hash = tree.content_hash();
1200
1201        let _ = self.meta.set_config("head_tree_hash", &tree_hash.to_hex());
1202
1203        // Persist the tree for next cold start
1204        let _ = self.meta.store_file_tree(&head_id, &tree);
1205
1206        *self.cached_head_snapshot.borrow_mut() = Some(tree.clone());
1207        Ok(tree)
1208    }
1209
1210    /// Invalidate the cached HEAD snapshot and branch name.
1211    ///
1212    /// Must be called after any operation that changes the HEAD pointer,
1213    /// including branch updates from external sources (e.g., fetch/pull).
1214    pub fn invalidate_head_cache(&self) {
1215        *self.cached_head_snapshot.borrow_mut() = None;
1216        *self.cached_head_id.borrow_mut() = None;
1217        *self.cached_head_branch.borrow_mut() = None;
1218        let _ = self
1219            .meta
1220            .conn()
1221            .execute("DELETE FROM config WHERE key = 'head_tree_hash'", []);
1222    }
1223
1224    /// Build a FileTree snapshot for a specific patch (uncached).
1225    fn snapshot_uncached(&self, patch_id: &PatchId) -> Result<FileTree, RepoError> {
1226        let mut chain = self.dag.patch_chain(patch_id);
1227        // patch_chain returns [tip, parent, ..., root] — reverse for oldest-first
1228        chain.reverse();
1229        let patches: Vec<Patch> = chain
1230            .iter()
1231            .filter_map(|id| self.dag.get_patch(id).cloned())
1232            .collect();
1233
1234        let tree = apply_patch_chain(&patches, resolve_payload_to_hash)?;
1235        Ok(tree)
1236    }
1237
1238    /// Build a FileTree snapshot for a specific patch.
1239    ///
1240    /// Tries loading from SQLite first (O(1)), falls back to patch replay (O(n)).
1241    pub fn snapshot(&self, patch_id: &PatchId) -> Result<FileTree, RepoError> {
1242        // Try SQLite first
1243        if let Some(tree) = self
1244            .meta
1245            .load_file_tree(patch_id)
1246            .map_err(RepoError::Meta)?
1247        {
1248            return Ok(tree);
1249        }
1250        // Fall back to patch replay, then persist
1251        let tree = self.snapshot_uncached(patch_id)?;
1252        let _ = self.meta.store_file_tree(patch_id, &tree);
1253        Ok(tree)
1254    }
1255
1256    /// Sync the working tree to match the current HEAD snapshot.
1257    ///
1258    /// Compares `old_tree` (the state before the operation) against the
1259    /// current HEAD snapshot and applies file additions, modifications,
1260    /// deletions, and renames to disk.
1261    /// Update the working tree to match the current HEAD snapshot.
1262    ///
1263    /// Compares `old_tree` (the state before the operation) against the
1264    /// current HEAD snapshot and applies file additions, modifications,
1265    /// deletions, and renames to disk.
1266    pub fn sync_working_tree(&self, old_tree: &FileTree) -> Result<(), RepoError> {
1267        use rayon::prelude::*;
1268
1269        let new_tree = self.snapshot_head()?;
1270        let diffs = diff_trees(old_tree, &new_tree);
1271
1272        // Extract fields needed by parallel closures (BlobStore is Send + Sync)
1273        let cas = &self.cas;
1274        let root = &self.root;
1275
1276        // Phase 1: Pre-fetch all blobs in parallel (the I/O-heavy part)
1277        let blob_results: Result<Vec<(String, Vec<u8>)>, CasError> = diffs
1278            .par_iter()
1279            .filter_map(|entry| {
1280                if let (DiffType::Added | DiffType::Modified, Some(new_hash)) =
1281                    (&entry.diff_type, &entry.new_hash)
1282                {
1283                    Some((entry.path.clone(), *new_hash))
1284                } else {
1285                    None
1286                }
1287            })
1288            .map(|(path, hash)| {
1289                let blob = cas.get_blob(&hash)?;
1290                Ok((path, blob))
1291            })
1292            .collect();
1293
1294        let blobs: Vec<(String, Vec<u8>)> = blob_results?;
1295
1296        // Phase 2: Ensure all parent directories exist (sequential, idempotent)
1297        for (path, _) in &blobs {
1298            let full_path = root.join(path);
1299            if let Some(parent) = full_path.parent() {
1300                fs::create_dir_all(parent)?;
1301            }
1302        }
1303
1304        // Phase 3: Write all files in parallel
1305        blobs
1306            .par_iter()
1307            .map(|(path, data)| {
1308                let full_path = root.join(path);
1309                fs::write(&full_path, data).map_err(RepoError::Io)
1310            })
1311            .collect::<Result<Vec<()>, RepoError>>()?;
1312
1313        // Phase 4: Handle deletions and renames (sequential — filesystem rename is not parallelizable)
1314        for entry in &diffs {
1315            let full_path = root.join(&entry.path);
1316            match &entry.diff_type {
1317                DiffType::Deleted => {
1318                    if full_path.exists() {
1319                        fs::remove_file(&full_path)?;
1320                    }
1321                }
1322                DiffType::Renamed { old_path, .. } => {
1323                    let old_full = root.join(old_path);
1324                    if old_full.exists() {
1325                        if let Some(parent) = full_path.parent() {
1326                            fs::create_dir_all(parent)?;
1327                        }
1328                        fs::rename(&old_full, &full_path)?;
1329                    }
1330                }
1331                DiffType::Added | DiffType::Modified => {
1332                    // Already handled in parallel phases above
1333                }
1334            }
1335        }
1336
1337        // Phase 5: Clean up files in old_tree but not in new_tree
1338        for (path, _) in old_tree.iter() {
1339            if !new_tree.contains(path) {
1340                let full_path = root.join(path);
1341                if full_path.exists() {
1342                    let _ = fs::remove_file(&full_path);
1343                }
1344            }
1345        }
1346
1347        Ok(())
1348    }
1349
1350    /// Checkout a branch, updating the working tree to match its tip state.
1351    ///
1352    /// This operation:
1353    /// 1. Builds the target FileTree from the branch's patch chain
1354    /// 2. Compares against the current working tree
1355    /// 3. Updates files (add/modify/delete) to match the target
1356    /// 4. Updates the HEAD reference
1357    ///
1358    /// Refuses to checkout if there are uncommitted staged changes.
1359    pub fn checkout(&mut self, branch_name: &str) -> Result<FileTree, RepoError> {
1360        let old_head = self.head().map(|(_, id)| id).unwrap_or(Hash::ZERO);
1361        let old_branch = self.head().ok().map(|(n, _)| n);
1362        let target = BranchName::new(branch_name)?;
1363
1364        let target_id = self
1365            .dag
1366            .get_branch(&target)
1367            .ok_or_else(|| RepoError::BranchNotFound(branch_name.to_string()))?;
1368
1369        let has_changes = self.has_uncommitted_changes()?;
1370        if has_changes {
1371            self.stash_push(Some("auto-stash before checkout"))?;
1372        }
1373
1374        let target_tree = self.snapshot(&target_id)?;
1375
1376        let current_tree = self.snapshot_head().unwrap_or_else(|_| FileTree::empty());
1377
1378        let diffs = diff_trees(&current_tree, &target_tree);
1379
1380        for entry in &diffs {
1381            let full_path = self.root.join(&entry.path);
1382            match &entry.diff_type {
1383                DiffType::Added | DiffType::Modified => {
1384                    if let Some(new_hash) = &entry.new_hash {
1385                        let blob = self.cas.get_blob(new_hash)?;
1386                        if let Some(parent) = full_path.parent() {
1387                            fs::create_dir_all(parent)?;
1388                        }
1389                        fs::write(&full_path, &blob)?;
1390                    }
1391                }
1392                DiffType::Deleted => {
1393                    if full_path.exists() {
1394                        fs::remove_file(&full_path)?;
1395                    }
1396                }
1397                DiffType::Renamed { old_path, .. } => {
1398                    let old_full = self.root.join(old_path);
1399                    if old_full.exists() {
1400                        if let Some(parent) = full_path.parent() {
1401                            fs::create_dir_all(parent)?;
1402                        }
1403                        fs::rename(&old_full, &full_path)?;
1404                    }
1405                }
1406            }
1407        }
1408
1409        for (path, _) in current_tree.iter() {
1410            if !target_tree.contains(path) {
1411                let full_path = self.root.join(path);
1412                if full_path.exists() {
1413                    let _ = fs::remove_file(&full_path);
1414                }
1415            }
1416        }
1417
1418        self.write_head_branch(branch_name)?;
1419
1420        self.invalidate_head_cache();
1421
1422        let _ = self.record_reflog(
1423            &old_head,
1424            &target_id,
1425            &format!(
1426                "checkout: moving from {} to {}",
1427                old_branch.as_deref().unwrap_or("HEAD"),
1428                branch_name
1429            ),
1430        );
1431
1432        if has_changes && let Err(e) = self.stash_pop() {
1433            tracing::warn!("Warning: could not restore stashed changes: {}", e);
1434        }
1435
1436        Ok(target_tree)
1437    }
1438
1439    // =========================================================================
1440    // Diff
1441    // =========================================================================
1442
1443    /// Compute the diff between two commits or branches.
1444    ///
1445    /// If `from` is None, compares the empty tree to `to`.
1446    pub fn diff(&self, from: Option<&str>, to: Option<&str>) -> Result<Vec<DiffEntry>, RepoError> {
1447        let resolve_id = |name: &str| -> Result<PatchId, RepoError> {
1448            if name == "HEAD" || name.starts_with("HEAD~") {
1449                let (_, head_id) = self.head()?;
1450                let mut target_id = head_id;
1451                if let Some(n_str) = name.strip_prefix("HEAD~") {
1452                    let n: usize = n_str
1453                        .parse()
1454                        .map_err(|_| RepoError::Custom(format!("invalid HEAD~N: {}", name)))?;
1455                    for _ in 0..n {
1456                        let patch = self.dag.get_patch(&target_id).ok_or_else(|| {
1457                            RepoError::Custom("HEAD ancestor not found".to_string())
1458                        })?;
1459                        target_id = patch
1460                            .parent_ids
1461                            .first()
1462                            .ok_or_else(|| RepoError::Custom("HEAD has no parent".to_string()))?
1463                            .to_owned();
1464                    }
1465                }
1466                return Ok(target_id);
1467            }
1468            // Try hex hash first (patch IDs are 64-char hex strings that
1469            // also happen to pass BranchName validation, so we must try
1470            // hex before branch name to avoid false branch lookups).
1471            if let Ok(hash) = Hash::from_hex(name)
1472                && self.dag.has_patch(&hash)
1473            {
1474                return Ok(hash);
1475            }
1476            // Try tag
1477            if let Ok(Some(tag_id)) = self.resolve_tag(name) {
1478                return Ok(tag_id);
1479            }
1480            // Fall back to branch name
1481            let bn = BranchName::new(name)?;
1482            self.dag
1483                .get_branch(&bn)
1484                .ok_or_else(|| RepoError::BranchNotFound(name.to_string()))
1485        };
1486
1487        // When both from and to are None, diff HEAD vs working tree
1488        // to show all uncommitted changes.
1489        if from.is_none() && to.is_none() {
1490            let head_tree = self.snapshot_head()?;
1491            let working_tree = self.build_working_tree()?;
1492            return Ok(diff_trees(&head_tree, &working_tree));
1493        }
1494
1495        let old_tree = match from {
1496            Some(f) => self.snapshot(&resolve_id(f)?)?,
1497            None => FileTree::empty(),
1498        };
1499
1500        let new_tree = match to {
1501            Some(t) => self.snapshot(&resolve_id(t)?)?,
1502            None => self.snapshot_head()?,
1503        };
1504
1505        Ok(diff_trees(&old_tree, &new_tree))
1506    }
1507
1508    /// Build a FileTree from the current working directory files.
1509    fn build_working_tree(&self) -> Result<FileTree, RepoError> {
1510        let mut tree = FileTree::empty();
1511        let entries = walk_dir(&self.root, &self.ignore_patterns)?;
1512        for entry in &entries {
1513            if let Ok(data) = fs::read(&entry.full_path) {
1514                let hash = Hash::from_data(&data);
1515                tree.insert(entry.relative.clone(), hash);
1516            }
1517        }
1518        Ok(tree)
1519    }
1520
1521    /// Show staged changes (diff of staged files vs HEAD).
1522    pub fn diff_staged(&self) -> Result<Vec<DiffEntry>, RepoError> {
1523        let head_tree = self.snapshot_head()?;
1524        let mut staged_tree = FileTree::empty();
1525        let working_set = self.meta.working_set()?;
1526        for (path, status) in &working_set {
1527            match status {
1528                FileStatus::Added | FileStatus::Modified => {
1529                    let full_path = self.root.join(path);
1530                    if let Ok(data) = fs::read(&full_path) {
1531                        let hash = Hash::from_data(&data);
1532                        staged_tree.insert(path.clone(), hash);
1533                    }
1534                }
1535                FileStatus::Deleted => {
1536                    // File is staged for deletion — it exists in HEAD but not in staged tree
1537                }
1538                _ => {}
1539            }
1540        }
1541        Ok(diff_trees(&head_tree, &staged_tree))
1542    }
1543
1544    // =========================================================================
1545    // Reset
1546    // =========================================================================
1547
1548    /// Reset HEAD to a specific commit.
1549    ///
1550    /// Resolves `target` (hex hash or branch name), moves the current branch
1551    /// pointer, and optionally clears staging and/or restores the working tree
1552    /// depending on `mode`.
1553    ///
1554    /// Returns the resolved target patch ID.
1555    pub fn reset(&mut self, target: &str, mode: ResetMode) -> Result<PatchId, RepoError> {
1556        let old_head = self.head().map(|(_, id)| id).unwrap_or(Hash::ZERO);
1557        let target_id = if target == "HEAD" {
1558            let (_, id) = self.head()?;
1559            id
1560        } else if let Some(rest) = target.strip_prefix("HEAD~") {
1561            let n: usize = rest
1562                .parse()
1563                .map_err(|_| RepoError::Custom(format!("invalid HEAD~N: {}", target)))?;
1564            let (_, head_id) = self.head()?;
1565            let mut current = head_id;
1566            for _ in 0..n {
1567                let patch = self
1568                    .dag
1569                    .get_patch(&current)
1570                    .ok_or_else(|| RepoError::Custom("HEAD ancestor not found".to_string()))?;
1571                current = patch
1572                    .parent_ids
1573                    .first()
1574                    .ok_or_else(|| RepoError::Custom("HEAD has no parent".to_string()))?
1575                    .to_owned();
1576            }
1577            current
1578        } else if let Ok(hash) = Hash::from_hex(target)
1579            && self.dag.has_patch(&hash)
1580        {
1581            hash
1582        } else {
1583            let bn = BranchName::new(target)?;
1584            self.dag
1585                .get_branch(&bn)
1586                .ok_or_else(|| RepoError::BranchNotFound(target.to_string()))?
1587        };
1588
1589        let (branch_name, _) = self.head()?;
1590        let old_tree = self.snapshot_head().unwrap_or_else(|_| FileTree::empty());
1591
1592        let branch = BranchName::new(&branch_name)?;
1593        self.dag.update_branch(&branch, target_id)?;
1594        self.meta.set_branch(&branch, &target_id)?;
1595        self.invalidate_head_cache();
1596
1597        match mode {
1598            ResetMode::Soft => {
1599                let new_tree = self.snapshot_head().unwrap_or_else(|_| FileTree::empty());
1600                let diffs = diff_trees(&new_tree, &old_tree);
1601                for entry in &diffs {
1602                    match &entry.diff_type {
1603                        DiffType::Added | DiffType::Modified => {
1604                            let repo_path = RepoPath::new(entry.path.clone())?;
1605                            self.meta
1606                                .working_set_add(&repo_path, FileStatus::Modified)?;
1607                        }
1608                        DiffType::Deleted => {
1609                            let repo_path = RepoPath::new(entry.path.clone())?;
1610                            self.meta.working_set_add(&repo_path, FileStatus::Deleted)?;
1611                        }
1612                        DiffType::Renamed { old_path, .. } => {
1613                            let repo_path = RepoPath::new(old_path.clone())?;
1614                            self.meta.working_set_add(&repo_path, FileStatus::Deleted)?;
1615                            let repo_path = RepoPath::new(entry.path.clone())?;
1616                            self.meta.working_set_add(&repo_path, FileStatus::Added)?;
1617                        }
1618                    }
1619                }
1620            }
1621            ResetMode::Mixed | ResetMode::Hard => {
1622                self.meta
1623                    .conn()
1624                    .execute("DELETE FROM working_set", [])
1625                    .map_err(|e| RepoError::Meta(crate::metadata::MetaError::Database(e)))?;
1626                if mode == ResetMode::Hard {
1627                    self.sync_working_tree(&old_tree)?;
1628                }
1629            }
1630        }
1631
1632        let _ = self.record_reflog(
1633            &old_head,
1634            &target_id,
1635            &format!("reset: moving to {}", target),
1636        );
1637
1638        Ok(target_id)
1639    }
1640
1641    // =========================================================================
1642    // Revert
1643    // =========================================================================
1644
1645    /// Revert a commit by creating a new patch that undoes its changes.
1646    ///
1647    /// The revert creates inverse patches (Delete for Create, etc.)
1648    /// and commits them on top of HEAD, then syncs the working tree.
1649    pub fn revert(
1650        &mut self,
1651        patch_id: &PatchId,
1652        message: Option<&str>,
1653    ) -> Result<PatchId, RepoError> {
1654        let patch = self
1655            .dag
1656            .get_patch(patch_id)
1657            .ok_or_else(|| RepoError::Custom(format!("patch not found: {}", patch_id)))?;
1658
1659        let (branch_name, head_id) = self.head()?;
1660        let msg = message
1661            .map(|m| m.to_string())
1662            .unwrap_or_else(|| format!("Revert {}", patch_id));
1663
1664        let old_tree = self.snapshot_head().unwrap_or_else(|_| FileTree::empty());
1665
1666        match &patch.operation_type {
1667            OperationType::Batch => {
1668                let changes = patch.file_changes().ok_or_else(|| {
1669                    RepoError::Custom("batch patch has invalid file changes".into())
1670                })?;
1671                if changes.is_empty() {
1672                    return Err(RepoError::Custom("cannot revert empty batch".into()));
1673                }
1674                let parent_tree = patch
1675                    .parent_ids
1676                    .first()
1677                    .map(|pid| self.snapshot(pid).unwrap_or_else(|_| FileTree::empty()))
1678                    .unwrap_or_else(FileTree::empty);
1679                let mut revert_changes = Vec::new();
1680                for change in &changes {
1681                    match change.op {
1682                        OperationType::Create | OperationType::Modify => {
1683                            revert_changes.push(FileChange {
1684                                op: OperationType::Delete,
1685                                path: change.path.clone(),
1686                                payload: Vec::new(),
1687                            });
1688                        }
1689                        OperationType::Delete => {
1690                            if let Some(hash) = parent_tree.get(&change.path) {
1691                                revert_changes.push(FileChange {
1692                                    op: OperationType::Modify,
1693                                    path: change.path.clone(),
1694                                    payload: hash.to_hex().as_bytes().to_vec(),
1695                                });
1696                            }
1697                        }
1698                        _ => {}
1699                    }
1700                }
1701                if revert_changes.is_empty() {
1702                    return Err(RepoError::Custom("nothing to revert in batch".into()));
1703                }
1704                let revert_patch =
1705                    Patch::new_batch(revert_changes, vec![head_id], self.author.clone(), msg);
1706                let revert_id = self.dag.add_patch(revert_patch.clone(), vec![head_id])?;
1707                self.meta.store_patch(&revert_patch)?;
1708
1709                let branch = BranchName::new(&branch_name)?;
1710                self.dag.update_branch(&branch, revert_id)?;
1711                self.meta.set_branch(&branch, &revert_id)?;
1712
1713                self.invalidate_head_cache();
1714
1715                self.sync_working_tree(&old_tree)?;
1716                Ok(revert_id)
1717            }
1718            OperationType::Create | OperationType::Modify => {
1719                let revert_patch = Patch::new(
1720                    OperationType::Delete,
1721                    patch.touch_set.clone(),
1722                    patch.target_path.clone(),
1723                    vec![],
1724                    vec![head_id],
1725                    self.author.clone(),
1726                    msg,
1727                );
1728
1729                let revert_id = self.dag.add_patch(revert_patch.clone(), vec![head_id])?;
1730                self.meta.store_patch(&revert_patch)?;
1731
1732                let branch = BranchName::new(&branch_name)?;
1733                self.dag.update_branch(&branch, revert_id)?;
1734                self.meta.set_branch(&branch, &revert_id)?;
1735
1736                self.invalidate_head_cache();
1737
1738                self.sync_working_tree(&old_tree)?;
1739                Ok(revert_id)
1740            }
1741            OperationType::Delete => {
1742                if let Some(parent_id) = patch.parent_ids.first() {
1743                    let parent_tree = self.snapshot(parent_id)?;
1744                    if let Some(path) = &patch.target_path
1745                        && let Some(hash) = parent_tree.get(path)
1746                    {
1747                        let payload = hash.to_hex().as_bytes().to_vec();
1748                        let revert_patch = Patch::new(
1749                            OperationType::Modify,
1750                            patch.touch_set.clone(),
1751                            patch.target_path.clone(),
1752                            payload,
1753                            vec![head_id],
1754                            self.author.clone(),
1755                            msg,
1756                        );
1757
1758                        let revert_id = self.dag.add_patch(revert_patch.clone(), vec![head_id])?;
1759                        self.meta.store_patch(&revert_patch)?;
1760
1761                        let branch = BranchName::new(&branch_name)?;
1762                        self.dag.update_branch(&branch, revert_id)?;
1763                        self.meta.set_branch(&branch, &revert_id)?;
1764
1765                        self.invalidate_head_cache();
1766
1767                        self.sync_working_tree(&old_tree)?;
1768                        return Ok(revert_id);
1769                    }
1770                }
1771                Err(RepoError::Custom(
1772                    "cannot revert delete: original file content not found".into(),
1773                ))
1774            }
1775            _ => Err(RepoError::Custom(format!(
1776                "cannot revert {:?} patches",
1777                patch.operation_type
1778            ))),
1779        }
1780    }
1781
1782    // =========================================================================
1783    // Squash
1784    // =========================================================================
1785
1786    /// Squash the last N patches on the current branch into a single patch.
1787    ///
1788    /// Returns the new tip patch ID.
1789    pub fn squash(&mut self, count: usize, message: &str) -> Result<PatchId, RepoError> {
1790        if count < 2 {
1791            return Err(RepoError::Custom(
1792                "need at least 2 patches to squash".into(),
1793            ));
1794        }
1795
1796        let (branch_name, tip_id) = self.head()?;
1797        let chain = self.dag().patch_chain(&tip_id);
1798
1799        // chain is tip-first, so the last N patches are chain[0..count]
1800        if chain.len() < count + 1 {
1801            return Err(RepoError::Custom(format!(
1802                "only {} patches on branch, cannot squash {}",
1803                chain.len(),
1804                count
1805            )));
1806        }
1807
1808        // Extract patches to squash (reversed to get oldest-first)
1809        let mut to_squash = Vec::new();
1810        for i in (0..count).rev() {
1811            let pid = &chain[i];
1812            let patch = self
1813                .dag()
1814                .get_patch(pid)
1815                .ok_or_else(|| RepoError::Custom(format!("patch not found: {}", pid.to_hex())))?;
1816            to_squash.push(patch.clone());
1817        }
1818
1819        let parent_of_first = *to_squash[0]
1820            .parent_ids
1821            .first()
1822            .ok_or_else(|| RepoError::Custom("cannot squash root patch".into()))?;
1823
1824        let result = crate::patch::compose::compose_chain(&to_squash, &self.author, message)
1825            .map_err(|e| RepoError::Custom(e.to_string()))?;
1826
1827        let new_id = self
1828            .dag_mut()
1829            .add_patch(result.patch.clone(), vec![parent_of_first])?;
1830        self.meta().store_patch(&result.patch)?;
1831
1832        let branch = BranchName::new(&branch_name).map_err(|e| RepoError::Custom(e.to_string()))?;
1833        self.dag_mut().update_branch(&branch, new_id)?;
1834        self.meta().set_branch(&branch, &new_id)?;
1835
1836        self.record_reflog(
1837            to_squash.last().map(|p| &p.id).unwrap_or(&parent_of_first),
1838            &new_id,
1839            &format!("squash: {} patches into one", count),
1840        )?;
1841
1842        self.invalidate_head_cache();
1843
1844        Ok(new_id)
1845    }
1846
1847    // =========================================================================
1848    // Merge
1849    // =========================================================================
1850
1851    /// Compute a merge plan between two branches.
1852    pub fn merge_plan(&self, branch_a: &str, branch_b: &str) -> Result<MergeResult, RepoError> {
1853        let ba = BranchName::new(branch_a)?;
1854        let bb = BranchName::new(branch_b)?;
1855        self.dag.merge_branches(&ba, &bb).map_err(RepoError::Dag)
1856    }
1857
1858    /// Execute a merge of `source_branch` into the current HEAD branch.
1859    ///
1860    /// For clean merges (no conflicts):
1861    /// 1. Collect unique patches from both branches (after LCA)
1862    /// 2. Apply the source branch's tree onto HEAD's working tree
1863    /// 3. Create a merge commit (patch with two parents)
1864    /// 4. Update the working tree to reflect the merge result
1865    ///
1866    /// For merges with conflicts:
1867    /// 1. Apply all non-conflicting patches from source
1868    /// 2. Return a `MergeExecutionResult` with conflict details
1869    /// 3. The caller can then resolve conflicts and commit
1870    pub fn execute_merge(
1871        &mut self,
1872        source_branch: &str,
1873    ) -> Result<MergeExecutionResult, RepoError> {
1874        if !self.pending_merge_parents.is_empty() {
1875            return Err(RepoError::MergeInProgress);
1876        }
1877
1878        let (head_branch, head_id) = self.head()?;
1879        let source_bn = BranchName::new(source_branch)?;
1880        let source_tip = self
1881            .dag
1882            .get_branch(&source_bn)
1883            .ok_or_else(|| RepoError::BranchNotFound(source_branch.to_string()))?;
1884
1885        let head_bn = BranchName::new(&head_branch)?;
1886
1887        let merge_result = self.dag.merge_branches(&head_bn, &source_bn)?;
1888
1889        if head_id == source_tip {
1890            return Ok(MergeExecutionResult {
1891                is_clean: true,
1892                merged_tree: self.snapshot_head()?,
1893                merge_patch_id: None,
1894                unresolved_conflicts: Vec::new(),
1895                patches_applied: 0,
1896            });
1897        }
1898
1899        if merge_result.patches_b_only.is_empty() && merge_result.patches_a_only.is_empty() {
1900            return Ok(MergeExecutionResult {
1901                is_clean: true,
1902                merged_tree: self.snapshot_head()?,
1903                merge_patch_id: None,
1904                unresolved_conflicts: Vec::new(),
1905                patches_applied: 0,
1906            });
1907        }
1908
1909        if merge_result.is_clean {
1910            self.execute_clean_merge(&head_id, &source_tip, &head_branch, &merge_result)
1911        } else {
1912            self.execute_conflicting_merge(
1913                &head_id,
1914                &source_tip,
1915                source_branch,
1916                &head_branch,
1917                &merge_result,
1918            )
1919        }
1920    }
1921
1922    fn execute_clean_merge(
1923        &mut self,
1924        head_id: &PatchId,
1925        source_tip: &PatchId,
1926        head_branch: &str,
1927        merge_result: &MergeResult,
1928    ) -> Result<MergeExecutionResult, RepoError> {
1929        let head_tree = self.snapshot(head_id)?;
1930        let source_tree = self.snapshot(source_tip)?;
1931        let lca_id = self
1932            .dag
1933            .lca(head_id, source_tip)
1934            .ok_or_else(|| RepoError::Custom("no common ancestor found".to_string()))?;
1935        let lca_tree = self.snapshot(&lca_id).unwrap_or_else(|_| FileTree::empty());
1936
1937        let source_diffs = diff_trees(&lca_tree, &source_tree);
1938        let mut merged_tree = head_tree.clone();
1939
1940        for entry in &source_diffs {
1941            let full_path = self.root.join(&entry.path);
1942            match &entry.diff_type {
1943                DiffType::Added | DiffType::Modified => {
1944                    if let Some(new_hash) = &entry.new_hash {
1945                        let blob = self.cas.get_blob(new_hash)?;
1946                        if let Some(parent) = full_path.parent() {
1947                            fs::create_dir_all(parent)?;
1948                        }
1949                        fs::write(&full_path, &blob)?;
1950                        merged_tree.insert(entry.path.clone(), *new_hash);
1951                    }
1952                }
1953                DiffType::Deleted => {
1954                    if full_path.exists() {
1955                        fs::remove_file(&full_path)?;
1956                    }
1957                    merged_tree.remove(&entry.path);
1958                }
1959                DiffType::Renamed { old_path, .. } => {
1960                    let old_full = self.root.join(old_path);
1961                    if old_full.exists() {
1962                        if let Some(parent) = full_path.parent() {
1963                            fs::create_dir_all(parent)?;
1964                        }
1965                        fs::rename(&old_full, &full_path)?;
1966                    }
1967                    if let Some(old_hash) = entry.old_hash {
1968                        merged_tree.remove(old_path);
1969                        merged_tree.insert(entry.path.clone(), old_hash);
1970                    }
1971                }
1972            }
1973        }
1974
1975        let merge_patch = Patch::new(
1976            OperationType::Merge,
1977            TouchSet::empty(),
1978            None,
1979            vec![],
1980            vec![*head_id, *source_tip],
1981            self.author.clone(),
1982            format!("Merge branch '{}' into {}", source_tip, head_branch),
1983        );
1984
1985        let merge_id = self
1986            .dag
1987            .add_patch(merge_patch.clone(), vec![*head_id, *source_tip])?;
1988        self.meta.store_patch(&merge_patch)?;
1989
1990        let branch = BranchName::new(head_branch)?;
1991        self.dag.update_branch(&branch, merge_id)?;
1992        self.meta.set_branch(&branch, &merge_id)?;
1993
1994        self.invalidate_head_cache();
1995
1996        Ok(MergeExecutionResult {
1997            is_clean: true,
1998            merged_tree,
1999            merge_patch_id: Some(merge_id),
2000            unresolved_conflicts: Vec::new(),
2001            patches_applied: merge_result.patches_b_only.len(),
2002        })
2003    }
2004
2005    fn execute_conflicting_merge(
2006        &mut self,
2007        head_id: &PatchId,
2008        source_tip: &PatchId,
2009        source_branch: &str,
2010        head_branch: &str,
2011        merge_result: &MergeResult,
2012    ) -> Result<MergeExecutionResult, RepoError> {
2013        let head_tree = self.snapshot(head_id)?;
2014        let source_tree = self.snapshot(source_tip)?;
2015
2016        let lca_id = self
2017            .dag
2018            .lca(head_id, source_tip)
2019            .ok_or_else(|| RepoError::Custom("no common ancestor found".to_string()))?;
2020        let lca_tree = self.snapshot(&lca_id).unwrap_or_else(|_| FileTree::empty());
2021
2022        let conflicting_patch_ids: HashSet<PatchId> = merge_result
2023            .conflicts
2024            .iter()
2025            .flat_map(|c| [c.patch_a_id, c.patch_b_id])
2026            .collect();
2027
2028        let mut merged_tree = head_tree.clone();
2029        let mut patches_applied = 0;
2030
2031        for entry in &merge_result.patches_b_only {
2032            if conflicting_patch_ids.contains(entry) {
2033                continue;
2034            }
2035            if let Some(patch) = self.dag.get_patch(entry) {
2036                if patch.is_identity() || patch.operation_type == OperationType::Merge {
2037                    continue;
2038                }
2039                if let Some(path) = &patch.target_path {
2040                    let full_path = self.root.join(path);
2041                    match patch.operation_type {
2042                        OperationType::Create | OperationType::Modify => {
2043                            if let Some(blob_hash) = resolve_payload_to_hash(patch)
2044                                && self.cas.has_blob(&blob_hash)
2045                            {
2046                                let blob = self.cas.get_blob(&blob_hash)?;
2047                                if let Some(parent) = full_path.parent() {
2048                                    fs::create_dir_all(parent)?;
2049                                }
2050                                fs::write(&full_path, &blob)?;
2051                                merged_tree.insert(path.clone(), blob_hash);
2052                            }
2053                        }
2054                        OperationType::Delete => {
2055                            if full_path.exists() {
2056                                fs::remove_file(&full_path)?;
2057                            }
2058                            merged_tree.remove(path);
2059                        }
2060                        _ => {}
2061                    }
2062                }
2063                patches_applied += 1;
2064            }
2065        }
2066
2067        let mut unresolved_conflicts = Vec::new();
2068
2069        for conflict in &merge_result.conflicts {
2070            let conflict_info =
2071                self.build_conflict_info(conflict, &head_tree, &source_tree, &lca_tree);
2072            if let Some(info) = conflict_info {
2073                let full_path = self.root.join(&info.path);
2074                if let Some(parent) = full_path.parent() {
2075                    fs::create_dir_all(parent)?;
2076                }
2077                let conflict_content =
2078                    self.write_conflict_markers(&info, source_branch, head_branch)?;
2079                fs::write(&full_path, conflict_content.as_bytes())?;
2080                let hash = self.cas.put_blob(conflict_content.as_bytes())?;
2081                merged_tree.insert(info.path.clone(), hash);
2082                unresolved_conflicts.push(info);
2083            }
2084        }
2085
2086        self.pending_merge_parents = vec![*head_id, *source_tip];
2087
2088        // Persist merge state so it survives repo reopen
2089        let parents_json = serde_json::to_string(&self.pending_merge_parents).unwrap_or_default();
2090        let _ = self.meta.set_config("pending_merge_parents", &parents_json);
2091
2092        Ok(MergeExecutionResult {
2093            is_clean: false,
2094            merged_tree,
2095            merge_patch_id: None,
2096            unresolved_conflicts,
2097            patches_applied,
2098        })
2099    }
2100
2101    fn build_conflict_info(
2102        &self,
2103        conflict: &Conflict,
2104        head_tree: &FileTree,
2105        source_tree: &FileTree,
2106        lca_tree: &FileTree,
2107    ) -> Option<ConflictInfo> {
2108        let patch_a = self.dag.get_patch(&conflict.patch_a_id)?;
2109        let patch_b = self.dag.get_patch(&conflict.patch_b_id)?;
2110
2111        let path = patch_a
2112            .target_path
2113            .clone()
2114            .or_else(|| patch_b.target_path.clone())
2115            .or_else(|| {
2116                // For batch patches, find the conflicting path from the conflict addresses
2117                conflict.conflict_addresses.first().cloned()
2118            })?;
2119
2120        let our_content_hash = head_tree.get(&path).copied();
2121        let their_content_hash = source_tree.get(&path).copied();
2122        let base_content_hash = lca_tree.get(&path).copied();
2123
2124        Some(ConflictInfo {
2125            path,
2126            our_patch_id: conflict.patch_a_id,
2127            their_patch_id: conflict.patch_b_id,
2128            our_content_hash,
2129            their_content_hash,
2130            base_content_hash,
2131        })
2132    }
2133
2134    fn write_conflict_markers(
2135        &self,
2136        info: &ConflictInfo,
2137        source_branch: &str,
2138        head_branch: &str,
2139    ) -> Result<String, RepoError> {
2140        let our_content = match info.our_content_hash {
2141            Some(hash) => String::from_utf8(self.cas.get_blob(&hash)?).unwrap_or_default(),
2142            None => String::new(),
2143        };
2144
2145        let their_content = match info.their_content_hash {
2146            Some(hash) => String::from_utf8(self.cas.get_blob(&hash)?).unwrap_or_default(),
2147            None => String::new(),
2148        };
2149
2150        let base_content = match info.base_content_hash {
2151            Some(hash) => Some(String::from_utf8(self.cas.get_blob(&hash)?).unwrap_or_default()),
2152            None => None,
2153        };
2154
2155        let merged = three_way_merge(
2156            base_content.as_deref(),
2157            &our_content,
2158            &their_content,
2159            head_branch,
2160            source_branch,
2161        );
2162
2163        match merged {
2164            Ok(content) => Ok(content),
2165            Err(conflict_lines) => {
2166                let mut result = String::new();
2167                for line in conflict_lines {
2168                    result.push_str(&line);
2169                    result.push('\n');
2170                }
2171                Ok(result)
2172            }
2173        }
2174    }
2175
2176    // =========================================================================
2177    // Cherry-pick
2178    // =========================================================================
2179
2180    /// Cherry-pick a patch onto the current HEAD branch.
2181    ///
2182    /// Creates a new patch with the same changes (operation_type, touch_set,
2183    /// target_path, payload) but with the current HEAD as its parent.
2184    pub fn cherry_pick(&mut self, patch_id: &PatchId) -> Result<PatchId, RepoError> {
2185        let old_head = self.head().map(|(_, id)| id).unwrap_or(Hash::ZERO);
2186        let patch = self
2187            .dag
2188            .get_patch(patch_id)
2189            .ok_or_else(|| RepoError::Custom(format!("patch not found: {}", patch_id)))?;
2190
2191        if patch.operation_type == OperationType::Identity
2192            || patch.operation_type == OperationType::Merge
2193            || patch.operation_type == OperationType::Create
2194        {
2195            return Err(RepoError::Custom(format!(
2196                "cannot cherry-pick {:?} patches",
2197                patch.operation_type
2198            )));
2199        }
2200
2201        let (branch_name, head_id) = self.head()?;
2202
2203        let new_patch = if patch.operation_type == OperationType::Batch {
2204            let changes = patch
2205                .file_changes()
2206                .ok_or_else(|| RepoError::Custom("batch patch has invalid file changes".into()))?;
2207            Patch::new_batch(
2208                changes,
2209                vec![head_id],
2210                self.author.clone(),
2211                patch.message.clone(),
2212            )
2213        } else {
2214            Patch::new(
2215                patch.operation_type.clone(),
2216                patch.touch_set.clone(),
2217                patch.target_path.clone(),
2218                patch.payload.clone(),
2219                vec![head_id],
2220                self.author.clone(),
2221                patch.message.clone(),
2222            )
2223        };
2224
2225        let new_id = match self.dag.add_patch(new_patch.clone(), vec![head_id]) {
2226            Ok(id) => id,
2227            Err(DagError::DuplicatePatch(_)) => {
2228                let head_ancestors = self.dag.ancestors(&head_id);
2229                let new_patch_id = new_patch.id;
2230                if head_ancestors.contains(&new_patch_id) {
2231                    return Ok(new_patch_id);
2232                }
2233                return Err(RepoError::Custom(
2234                    "patch already exists in DAG and is not reachable from HEAD".to_string(),
2235                ));
2236            }
2237            Err(e) => return Err(RepoError::Dag(e)),
2238        };
2239        self.meta.store_patch(&new_patch)?;
2240
2241        let branch = BranchName::new(&branch_name)?;
2242        let old_tree = self.snapshot_head().unwrap_or_else(|_| FileTree::empty());
2243        self.dag.update_branch(&branch, new_id)?;
2244        self.meta.set_branch(&branch, &new_id)?;
2245
2246        self.invalidate_head_cache();
2247
2248        let _ = self.record_reflog(&old_head, &new_id, &format!("cherry-pick: {}", patch_id));
2249
2250        self.sync_working_tree(&old_tree)?;
2251
2252        Ok(new_id)
2253    }
2254
2255    // =========================================================================
2256    // Rebase
2257    // =========================================================================
2258
2259    /// Rebase the current branch onto a target branch.
2260    ///
2261    /// Finds commits unique to the current branch (after the LCA with target),
2262    /// then replays them onto the target branch tip. Updates the current
2263    /// branch pointer to the new tip.
2264    pub fn rebase(&mut self, target_branch: &str) -> Result<RebaseResult, RepoError> {
2265        let old_head = self.head().map(|(_, id)| id).unwrap_or(Hash::ZERO);
2266        let (head_branch, head_id) = self.head()?;
2267        let target_bn = BranchName::new(target_branch)?;
2268        let target_tip = self
2269            .dag
2270            .get_branch(&target_bn)
2271            .ok_or_else(|| RepoError::BranchNotFound(target_branch.to_string()))?;
2272
2273        if head_id == target_tip {
2274            return Ok(RebaseResult {
2275                patches_replayed: 0,
2276                new_tip: head_id,
2277            });
2278        }
2279
2280        let lca_id = self
2281            .dag
2282            .lca(&head_id, &target_tip)
2283            .ok_or_else(|| RepoError::Custom("no common ancestor found".to_string()))?;
2284
2285        if lca_id == head_id {
2286            let branch = BranchName::new(&head_branch)?;
2287            let old_tree = self.snapshot_head().unwrap_or_else(|_| FileTree::empty());
2288            self.dag.update_branch(&branch, target_tip)?;
2289            self.meta.set_branch(&branch, &target_tip)?;
2290            self.invalidate_head_cache();
2291
2292            self.sync_working_tree(&old_tree)?;
2293
2294            return Ok(RebaseResult {
2295                patches_replayed: 0,
2296                new_tip: target_tip,
2297            });
2298        }
2299
2300        let mut head_ancestors = self.dag.ancestors(&lca_id);
2301        head_ancestors.insert(lca_id);
2302
2303        let mut to_replay: Vec<Patch> = Vec::new();
2304        let mut visited = HashSet::new();
2305        let mut stack = vec![head_id];
2306
2307        while let Some(id) = stack.pop() {
2308            if visited.contains(&id) || head_ancestors.contains(&id) {
2309                continue;
2310            }
2311            visited.insert(id);
2312            if let Some(patch) = self.dag.get_patch(&id) {
2313                to_replay.push(patch.clone());
2314                for parent_id in &patch.parent_ids {
2315                    if !visited.contains(parent_id) {
2316                        stack.push(*parent_id);
2317                    }
2318                }
2319            }
2320        }
2321
2322        to_replay.sort_by_key(|p| p.timestamp);
2323
2324        let branch = BranchName::new(&head_branch)?;
2325        let old_tree = self.snapshot_head().unwrap_or_else(|_| FileTree::empty());
2326        self.dag.update_branch(&branch, target_tip)?;
2327        self.meta.set_branch(&branch, &target_tip)?;
2328        self.invalidate_head_cache();
2329
2330        let mut current_parent = target_tip;
2331        let mut last_new_id = target_tip;
2332        let mut replayed = 0usize;
2333
2334        for patch in &to_replay {
2335            if patch.operation_type == OperationType::Merge
2336                || patch.operation_type == OperationType::Identity
2337                || patch.operation_type == OperationType::Create
2338            {
2339                continue;
2340            }
2341
2342            let new_patch = if patch.operation_type == OperationType::Batch {
2343                let changes = patch.file_changes().unwrap_or_default();
2344                Patch::new_batch(
2345                    changes,
2346                    vec![current_parent],
2347                    self.author.clone(),
2348                    patch.message.clone(),
2349                )
2350            } else {
2351                Patch::new(
2352                    patch.operation_type.clone(),
2353                    patch.touch_set.clone(),
2354                    patch.target_path.clone(),
2355                    patch.payload.clone(),
2356                    vec![current_parent],
2357                    self.author.clone(),
2358                    patch.message.clone(),
2359                )
2360            };
2361
2362            let new_id = self
2363                .dag
2364                .add_patch(new_patch.clone(), vec![current_parent])?;
2365            self.meta.store_patch(&new_patch)?;
2366
2367            last_new_id = new_id;
2368            current_parent = new_id;
2369            replayed += 1;
2370        }
2371
2372        self.dag.update_branch(&branch, last_new_id)?;
2373        self.meta.set_branch(&branch, &last_new_id)?;
2374        self.invalidate_head_cache();
2375
2376        self.sync_working_tree(&old_tree)?;
2377
2378        let _ = self.record_reflog(
2379            &old_head,
2380            &last_new_id,
2381            &format!("rebase onto {}", target_branch),
2382        );
2383
2384        Ok(RebaseResult {
2385            patches_replayed: replayed,
2386            new_tip: last_new_id,
2387        })
2388    }
2389
2390    // =========================================================================
2391    // Interactive Rebase
2392    // =========================================================================
2393
2394    /// Group a patch chain into logical commits.
2395    ///
2396    /// A "logical commit" is a contiguous chain of per-file patches that share
2397    /// the same message. Returns groups in oldest-first order (root to tip).
2398    pub fn commit_groups(&self, patches: &[Patch]) -> Vec<Vec<Patch>> {
2399        if patches.is_empty() {
2400            return Vec::new();
2401        }
2402
2403        // Sort oldest first
2404        let mut sorted: Vec<Patch> = patches.to_vec();
2405        sorted.sort_by_key(|p| p.timestamp);
2406
2407        let mut groups: Vec<Vec<Patch>> = Vec::new();
2408        let mut current_group: Vec<Patch> = Vec::new();
2409        let mut current_message: Option<String> = None;
2410
2411        for patch in &sorted {
2412            // Skip structural patches (same as the rebase skip logic)
2413            if patch.operation_type == OperationType::Merge
2414                || patch.operation_type == OperationType::Identity
2415                || patch.operation_type == OperationType::Create
2416            {
2417                continue;
2418            }
2419
2420            match &current_message {
2421                None => {
2422                    current_message = Some(patch.message.clone());
2423                    current_group.push(patch.clone());
2424                }
2425                Some(msg) if msg == &patch.message => {
2426                    // Same message — same logical commit
2427                    current_group.push(patch.clone());
2428                }
2429                Some(_) => {
2430                    // Different message — new logical commit
2431                    if !current_group.is_empty() {
2432                        groups.push(std::mem::take(&mut current_group));
2433                    }
2434                    current_message = Some(patch.message.clone());
2435                    current_group.push(patch.clone());
2436                }
2437            }
2438        }
2439
2440        if !current_group.is_empty() {
2441            groups.push(current_group);
2442        }
2443
2444        groups
2445    }
2446
2447    /// Get patches between a base commit and HEAD (exclusive of base).
2448    ///
2449    /// Walks the first-parent chain from HEAD back to `base`, collecting
2450    /// all patches that are NOT ancestors of `base`.
2451    pub fn patches_since_base(&self, base: &PatchId) -> Vec<Patch> {
2452        let base_ancestors = self.dag.ancestors(base);
2453        let mut exclusion = base_ancestors;
2454        exclusion.insert(*base);
2455
2456        let (_, head_id) = self
2457            .head()
2458            .unwrap_or_else(|_| ("main".to_string(), Hash::ZERO));
2459        let chain = self.dag.patch_chain(&head_id);
2460
2461        chain
2462            .into_iter()
2463            .filter(|id| !exclusion.contains(id))
2464            .filter_map(|id| self.dag.get_patch(&id).cloned())
2465            .collect()
2466    }
2467
2468    /// Generate a TODO file for interactive rebase.
2469    ///
2470    /// Returns the TODO file content as a string.
2471    pub fn generate_rebase_todo(&self, base: &PatchId) -> Result<String, RepoError> {
2472        let patches = self.patches_since_base(base);
2473        let groups = self.commit_groups(&patches);
2474
2475        let mut lines = vec![
2476            String::new(),
2477            "# Interactive Rebase TODO".to_string(),
2478            "#".to_string(),
2479            "# Commands:".to_string(),
2480            "#  pick   = use commit".to_string(),
2481            "#  reword = use commit, but edit the commit message".to_string(),
2482            "#  edit   = use commit, but stop for amending".to_string(),
2483            "#  squash = use commit, but meld into previous commit".to_string(),
2484            "#  drop   = remove commit".to_string(),
2485            String::new(),
2486        ];
2487
2488        for group in &groups {
2489            if let Some(patch) = group.first() {
2490                let short_hash = patch.id.to_hex().chars().take(8).collect::<String>();
2491                lines.push(format!("pick {} {}", short_hash, patch.message));
2492            }
2493        }
2494
2495        lines.push(String::new());
2496        Ok(lines.join("\n"))
2497    }
2498
2499    /// Parse a TODO file into a rebase plan.
2500    pub fn parse_rebase_todo(
2501        &self,
2502        todo_content: &str,
2503        base: &PatchId,
2504    ) -> Result<RebasePlan, RepoError> {
2505        let patches = self.patches_since_base(base);
2506        let groups = self.commit_groups(&patches);
2507
2508        // Build a map from short hash -> commit group
2509        let mut group_map: HashMap<String, (String, Vec<PatchId>)> = HashMap::new();
2510        for group in &groups {
2511            if let Some(first) = group.first() {
2512                let short_hash = first.id.to_hex().chars().take(8).collect::<String>();
2513                let patch_ids: Vec<PatchId> = group.iter().map(|p| p.id).collect();
2514                group_map.insert(short_hash, (first.message.clone(), patch_ids));
2515            }
2516        }
2517
2518        let mut entries = Vec::new();
2519
2520        for line in todo_content.lines() {
2521            let line = line.trim();
2522            if line.is_empty() || line.starts_with('#') {
2523                continue;
2524            }
2525
2526            let mut parts = line.splitn(3, ' ');
2527            let action_str = match parts.next() {
2528                Some(a) => a,
2529                None => continue,
2530            };
2531            let short_hash = match parts.next() {
2532                Some(h) => h,
2533                None => continue,
2534            };
2535            let message = parts.next().unwrap_or("").to_string();
2536
2537            let action = match action_str {
2538                "pick" | "p" => RebaseAction::Pick,
2539                "reword" | "r" => RebaseAction::Reword,
2540                "edit" | "e" => RebaseAction::Edit,
2541                "squash" | "s" => RebaseAction::Squash,
2542                "drop" | "d" => RebaseAction::Drop,
2543                _ => continue, // Skip unknown actions
2544            };
2545
2546            // Look up the commit group by short hash
2547            let (group_message, patch_ids) = group_map
2548                .get(short_hash)
2549                .cloned()
2550                .unwrap_or_else(|| (message.clone(), Vec::new()));
2551
2552            // Use the message from the TODO if the user changed it (for reword)
2553            let effective_message = if action == RebaseAction::Reword {
2554                message
2555            } else {
2556                group_message
2557            };
2558
2559            let commit_tip = patch_ids.last().copied().unwrap_or(Hash::ZERO);
2560
2561            entries.push(RebasePlanEntry {
2562                action,
2563                commit_tip,
2564                message: effective_message,
2565                patch_ids,
2566            });
2567        }
2568
2569        Ok(RebasePlan { entries })
2570    }
2571
2572    /// Execute an interactive rebase plan.
2573    ///
2574    /// Replays commits according to the plan, handling pick/reword/edit/squash/drop.
2575    /// Returns the new tip patch ID.
2576    pub fn rebase_interactive(
2577        &mut self,
2578        plan: &RebasePlan,
2579        onto: &PatchId,
2580    ) -> Result<PatchId, RepoError> {
2581        let old_head = self.head().map(|(_, id)| id).unwrap_or(Hash::ZERO);
2582        let (head_branch, _head_id) = self.head()?;
2583
2584        // Detach HEAD to point at the onto target
2585        let branch = BranchName::new(&head_branch)?;
2586        let old_tree = self.snapshot_head().unwrap_or_else(|_| FileTree::empty());
2587        self.dag.update_branch(&branch, *onto)?;
2588        self.meta.set_branch(&branch, onto)?;
2589        self.invalidate_head_cache();
2590
2591        let mut current_parent = *onto;
2592        let mut last_new_id = *onto;
2593        let mut squash_message_acc: Option<String> = None;
2594
2595        for entry in &plan.entries {
2596            match entry.action {
2597                RebaseAction::Drop => {
2598                    // Skip this commit entirely
2599                    continue;
2600                }
2601                RebaseAction::Pick
2602                | RebaseAction::Reword
2603                | RebaseAction::Edit
2604                | RebaseAction::Squash => {
2605                    // Get the original patches for this commit group
2606                    let patches: Vec<Patch> = entry
2607                        .patch_ids
2608                        .iter()
2609                        .filter_map(|id| self.dag.get_patch(id).cloned())
2610                        .collect();
2611
2612                    if patches.is_empty() {
2613                        continue;
2614                    }
2615
2616                    // Determine the message to use
2617                    let message = if entry.action == RebaseAction::Squash {
2618                        // For squash, we accumulate messages and use them later
2619                        let mut msg = squash_message_acc.take().unwrap_or_default();
2620                        if !msg.is_empty() {
2621                            msg.push('\n');
2622                        }
2623                        msg.push_str(&entry.message);
2624                        squash_message_acc = Some(msg);
2625                        continue; // Don't create patches yet — wait for next pick/edit/reword
2626                    } else {
2627                        // For pick/reword/edit: use accumulated squash message if any
2628                        if let Some(sq_msg) = squash_message_acc.take() {
2629                            let mut combined = sq_msg;
2630                            if !combined.is_empty() && !entry.message.is_empty() {
2631                                combined.push('\n');
2632                            }
2633                            combined.push_str(&entry.message);
2634                            combined
2635                        } else {
2636                            entry.message.clone()
2637                        }
2638                    };
2639
2640                    // Replay each patch in the commit group
2641                    for patch in &patches {
2642                        if patch.operation_type == OperationType::Merge
2643                            || patch.operation_type == OperationType::Identity
2644                            || patch.operation_type == OperationType::Create
2645                        {
2646                            continue;
2647                        }
2648
2649                        let new_patch = if patch.operation_type == OperationType::Batch {
2650                            let changes = patch.file_changes().unwrap_or_default();
2651                            Patch::new_batch(
2652                                changes,
2653                                vec![current_parent],
2654                                self.author.clone(),
2655                                message.clone(),
2656                            )
2657                        } else {
2658                            Patch::new(
2659                                patch.operation_type.clone(),
2660                                patch.touch_set.clone(),
2661                                patch.target_path.clone(),
2662                                patch.payload.clone(),
2663                                vec![current_parent],
2664                                self.author.clone(),
2665                                message.clone(),
2666                            )
2667                        };
2668
2669                        let new_id = self
2670                            .dag
2671                            .add_patch(new_patch.clone(), vec![current_parent])?;
2672                        self.meta.store_patch(&new_patch)?;
2673
2674                        last_new_id = new_id;
2675                        current_parent = new_id;
2676                    }
2677
2678                    // Handle edit: save state and return
2679                    if entry.action == RebaseAction::Edit {
2680                        let state = RebaseState {
2681                            original_head: old_head,
2682                            original_branch: head_branch.clone(),
2683                            onto: *onto,
2684                            next_entry: 0, // Will be set by caller
2685                            current_parent,
2686                            squash_message: None,
2687                            plan: Vec::new(), // Will be set by caller
2688                        };
2689                        let _ = self.save_rebase_state(&state);
2690                        // Point branch to current state so user can amend
2691                        self.dag.update_branch(&branch, last_new_id)?;
2692                        self.meta.set_branch(&branch, &last_new_id)?;
2693                        self.invalidate_head_cache();
2694                        self.sync_working_tree(&old_tree)?;
2695                        return Ok(last_new_id);
2696                    }
2697                }
2698            }
2699        }
2700
2701        // If there's an unflushed squash message, apply it to the last commit
2702        // (This shouldn't normally happen — squash should be followed by another action)
2703
2704        // Point branch to new tip and sync working tree
2705        self.dag.update_branch(&branch, last_new_id)?;
2706        self.meta.set_branch(&branch, &last_new_id)?;
2707        self.invalidate_head_cache();
2708        self.sync_working_tree(&old_tree)?;
2709
2710        let _ = self.record_reflog(&old_head, &last_new_id, "interactive rebase");
2711
2712        // Clean up rebase state
2713        let _ = self.clear_rebase_state();
2714
2715        Ok(last_new_id)
2716    }
2717
2718    /// Save interactive rebase state for --continue / --abort.
2719    fn save_rebase_state(&self, state: &RebaseState) -> Result<(), RepoError> {
2720        let serialized = serde_json::to_string(state)
2721            .map_err(|e| RepoError::Custom(format!("failed to serialize rebase state: {}", e)))?;
2722        self.meta
2723            .set_config("rebase_state", &serialized)
2724            .map_err(RepoError::Meta)?;
2725        Ok(())
2726    }
2727
2728    /// Load interactive rebase state.
2729    pub fn load_rebase_state(&self) -> Result<Option<RebaseState>, RepoError> {
2730        match self
2731            .meta
2732            .get_config("rebase_state")
2733            .map_err(RepoError::Meta)?
2734        {
2735            Some(json) => {
2736                let state: RebaseState = serde_json::from_str(&json).map_err(|e| {
2737                    RepoError::Custom(format!("failed to parse rebase state: {}", e))
2738                })?;
2739                Ok(Some(state))
2740            }
2741            None => Ok(None),
2742        }
2743    }
2744
2745    /// Clear interactive rebase state.
2746    fn clear_rebase_state(&self) -> Result<(), RepoError> {
2747        let _ = self
2748            .meta
2749            .conn()
2750            .execute("DELETE FROM config WHERE key = 'rebase_state'", []);
2751        Ok(())
2752    }
2753
2754    /// Abort an in-progress interactive rebase.
2755    ///
2756    /// Restores the branch to its original position before rebase started.
2757    pub fn rebase_abort(&mut self) -> Result<(), RepoError> {
2758        let state = self
2759            .load_rebase_state()?
2760            .ok_or_else(|| RepoError::Custom("no rebase in progress".to_string()))?;
2761
2762        let branch = BranchName::new(&state.original_branch)?;
2763        let old_tree = self.snapshot_head().unwrap_or_else(|_| FileTree::empty());
2764        self.dag.update_branch(&branch, state.original_head)?;
2765        self.meta.set_branch(&branch, &state.original_head)?;
2766        self.invalidate_head_cache();
2767        self.sync_working_tree(&old_tree)?;
2768
2769        let _ = self.record_reflog(
2770            &state.current_parent,
2771            &state.original_head,
2772            "rebase --abort",
2773        );
2774
2775        self.clear_rebase_state()?;
2776        Ok(())
2777    }
2778
2779    // =========================================================================
2780    // Blame
2781    // =========================================================================
2782
2783    /// Show per-line commit attribution for a file.
2784    ///
2785    /// Returns a vector of `BlameEntry` tuples, one per line in the file at HEAD.
2786    pub fn blame(&self, path: &str) -> Result<Vec<BlameEntry>, RepoError> {
2787        let head_tree = self.snapshot_head()?;
2788        let hash = head_tree
2789            .get(path)
2790            .ok_or_else(|| RepoError::Custom(format!("file not found in HEAD: {}", path)))?;
2791
2792        let blob = self.cas.get_blob(hash)?;
2793        let content = String::from_utf8_lossy(&blob);
2794        let lines: Vec<&str> = content.lines().collect();
2795
2796        let (_, head_id) = self.head()?;
2797        let chain = self.dag.patch_chain(&head_id);
2798
2799        let mut patches: Vec<Patch> = chain
2800            .iter()
2801            .filter_map(|id| self.dag.get_patch(id).cloned())
2802            .collect();
2803        patches.reverse();
2804
2805        let mut line_author: Vec<Option<(PatchId, String, String)>> = vec![None; lines.len()];
2806        let mut current_lines: Vec<String> = Vec::new();
2807
2808        for patch in &patches {
2809            match &patch.operation_type {
2810                OperationType::Batch => {
2811                    if let Some(changes) = patch.file_changes()
2812                        && let Some(change) = changes.iter().find(|c| c.path == path)
2813                    {
2814                        match change.op {
2815                            OperationType::Create | OperationType::Modify => {
2816                                let payload_hex = String::from_utf8_lossy(&change.payload);
2817                                let new_content =
2818                                    if let Ok(blob_hash) = Hash::from_hex(&payload_hex) {
2819                                        if let Ok(blob_data) = self.cas.get_blob(&blob_hash) {
2820                                            String::from_utf8_lossy(&blob_data).to_string()
2821                                        } else {
2822                                            continue;
2823                                        }
2824                                    } else {
2825                                        continue;
2826                                    };
2827
2828                                let old_refs: Vec<&str> =
2829                                    current_lines.iter().map(|s| s.as_str()).collect();
2830                                let new_refs: Vec<&str> = new_content.lines().collect();
2831                                let changes_diff =
2832                                    crate::engine::merge::diff_lines(&old_refs, &new_refs);
2833
2834                                let mut new_line_author: Vec<Option<(PatchId, String, String)>> =
2835                                    Vec::new();
2836                                let mut old_idx = 0usize;
2837
2838                                for change_diff in &changes_diff {
2839                                    match change_diff {
2840                                        crate::engine::merge::LineChange::Unchanged(clines) => {
2841                                            for i in 0..clines.len() {
2842                                                if old_idx + i < line_author.len() {
2843                                                    new_line_author
2844                                                        .push(line_author[old_idx + i].clone());
2845                                                } else {
2846                                                    new_line_author.push(None);
2847                                                }
2848                                            }
2849                                            old_idx += clines.len();
2850                                        }
2851                                        crate::engine::merge::LineChange::Deleted(clines) => {
2852                                            old_idx += clines.len();
2853                                        }
2854                                        crate::engine::merge::LineChange::Inserted(clines) => {
2855                                            for _ in 0..clines.len() {
2856                                                new_line_author.push(Some((
2857                                                    patch.id,
2858                                                    patch.message.clone(),
2859                                                    patch.author.clone(),
2860                                                )));
2861                                            }
2862                                        }
2863                                    }
2864                                }
2865
2866                                line_author = new_line_author;
2867                                current_lines =
2868                                    new_content.lines().map(|s| s.to_string()).collect();
2869                            }
2870                            OperationType::Delete => {
2871                                line_author.clear();
2872                                current_lines.clear();
2873                                break;
2874                            }
2875                            _ => {}
2876                        }
2877                    }
2878                }
2879                _ => {
2880                    let targets_file = patch.target_path.as_deref() == Some(path);
2881
2882                    match patch.operation_type {
2883                        OperationType::Create | OperationType::Modify if targets_file => {
2884                            let new_content = if !patch.payload.is_empty() {
2885                                let payload_hex = String::from_utf8_lossy(&patch.payload);
2886                                if let Ok(blob_hash) = Hash::from_hex(&payload_hex) {
2887                                    if let Ok(blob_data) = self.cas.get_blob(&blob_hash) {
2888                                        String::from_utf8_lossy(&blob_data).to_string()
2889                                    } else {
2890                                        continue;
2891                                    }
2892                                } else {
2893                                    continue;
2894                                }
2895                            } else {
2896                                continue;
2897                            };
2898
2899                            let old_refs: Vec<&str> =
2900                                current_lines.iter().map(|s| s.as_str()).collect();
2901                            let new_refs: Vec<&str> = new_content.lines().collect();
2902                            let changes = crate::engine::merge::diff_lines(&old_refs, &new_refs);
2903
2904                            let mut new_line_author: Vec<Option<(PatchId, String, String)>> =
2905                                Vec::new();
2906                            let mut old_idx = 0usize;
2907
2908                            for change in &changes {
2909                                match change {
2910                                    crate::engine::merge::LineChange::Unchanged(clines) => {
2911                                        for i in 0..clines.len() {
2912                                            if old_idx + i < line_author.len() {
2913                                                new_line_author
2914                                                    .push(line_author[old_idx + i].clone());
2915                                            } else {
2916                                                new_line_author.push(None);
2917                                            }
2918                                        }
2919                                        old_idx += clines.len();
2920                                    }
2921                                    crate::engine::merge::LineChange::Deleted(clines) => {
2922                                        old_idx += clines.len();
2923                                    }
2924                                    crate::engine::merge::LineChange::Inserted(clines) => {
2925                                        for _ in 0..clines.len() {
2926                                            new_line_author.push(Some((
2927                                                patch.id,
2928                                                patch.message.clone(),
2929                                                patch.author.clone(),
2930                                            )));
2931                                        }
2932                                    }
2933                                }
2934                            }
2935
2936                            line_author = new_line_author;
2937                            current_lines = new_content.lines().map(|s| s.to_string()).collect();
2938                        }
2939                        OperationType::Delete if targets_file => {
2940                            line_author.clear();
2941                            current_lines.clear();
2942                            break;
2943                        }
2944                        _ => {}
2945                    }
2946                }
2947            }
2948        }
2949
2950        let mut result = Vec::new();
2951        for (i, entry) in line_author.iter().enumerate() {
2952            let line_content = lines.get(i).unwrap_or(&"").to_string();
2953            if let Some((pid, msg, author)) = entry {
2954                result.push(BlameEntry {
2955                    patch_id: *pid,
2956                    message: msg.clone(),
2957                    author: author.clone(),
2958                    line: line_content,
2959                    line_number: i + 1,
2960                });
2961            } else {
2962                result.push(BlameEntry {
2963                    patch_id: Hash::ZERO,
2964                    message: String::new(),
2965                    author: String::new(),
2966                    line: line_content,
2967                    line_number: i + 1,
2968                });
2969            }
2970        }
2971
2972        Ok(result)
2973    }
2974
2975    // =========================================================================
2976    // Log
2977    // =========================================================================
2978
2979    /// Get the patch history (log) for a branch (first-parent chain only).
2980    pub fn log(&self, branch: Option<&str>) -> Result<Vec<Patch>, RepoError> {
2981        let target_id = match branch {
2982            Some(name) => {
2983                let bn = BranchName::new(name)?;
2984                self.dag
2985                    .get_branch(&bn)
2986                    .ok_or_else(|| RepoError::BranchNotFound(name.to_string()))?
2987            }
2988            None => {
2989                let (_, id) = self.head()?;
2990                id
2991            }
2992        };
2993
2994        let chain = self.dag.patch_chain(&target_id);
2995        let mut patches = Vec::new();
2996        for id in chain {
2997            if let Some(node) = self.dag.get_node(&id) {
2998                patches.push(node.patch.clone());
2999            }
3000        }
3001        Ok(patches)
3002    }
3003
3004    /// Get the full patch history for a branch, including all reachable commits
3005    /// (not just the first-parent chain). Merged branch commits are included.
3006    pub fn log_all(&self, branch: Option<&str>) -> Result<Vec<Patch>, RepoError> {
3007        let target_id = match branch {
3008            Some(name) => {
3009                let bn = BranchName::new(name)?;
3010                self.dag
3011                    .get_branch(&bn)
3012                    .ok_or_else(|| RepoError::BranchNotFound(name.to_string()))?
3013            }
3014            None => {
3015                let (_, id) = self.head()?;
3016                id
3017            }
3018        };
3019
3020        let mut patches = self.dag.reachable_patches(&target_id);
3021        patches.sort_by(|a, b| b.timestamp.cmp(&a.timestamp).then_with(|| a.id.cmp(&b.id)));
3022        Ok(patches)
3023    }
3024
3025    // =========================================================================
3026    // Accessors
3027    // =========================================================================
3028
3029    /// Get the repository root path.
3030    pub fn root(&self) -> &Path {
3031        &self.root
3032    }
3033
3034    /// Get a reference to the DAG.
3035    pub fn dag(&self) -> &PatchDag {
3036        &self.dag
3037    }
3038
3039    /// Get a mutable reference to the DAG.
3040    pub fn dag_mut(&mut self) -> &mut PatchDag {
3041        &mut self.dag
3042    }
3043
3044    /// Get a reference to the metadata store.
3045    pub fn meta(&self) -> &crate::metadata::MetadataStore {
3046        &self.meta
3047    }
3048
3049    /// Get a reference to the CAS.
3050    pub fn cas(&self) -> &BlobStore {
3051        &self.cas
3052    }
3053
3054    // =========================================================================
3055    // Remote Operations
3056    // =========================================================================
3057
3058    /// Add a remote Hub.
3059    /// Stores the remote URL in metadata config as "remote.<name>.url".
3060    pub fn add_remote(&self, name: &str, url: &str) -> Result<(), RepoError> {
3061        let key = format!("remote.{}.url", name);
3062        self.meta.set_config(&key, url).map_err(RepoError::Meta)
3063    }
3064
3065    /// List configured remotes.
3066    pub fn list_remotes(&self) -> Result<Vec<(String, String)>, RepoError> {
3067        let mut remotes = Vec::new();
3068        for (key, value) in self.meta.list_config()? {
3069            if let Some(name) = key
3070                .strip_prefix("remote.")
3071                .and_then(|n| n.strip_suffix(".url"))
3072            {
3073                remotes.push((name.to_string(), value));
3074            }
3075        }
3076        Ok(remotes)
3077    }
3078
3079    /// Remove a configured remote.
3080    pub fn remove_remote(&self, name: &str) -> Result<(), RepoError> {
3081        let key = format!("remote.{}.url", name);
3082        if self.meta.get_config(&key)?.is_none() {
3083            return Err(RepoError::Custom(format!("remote '{}' not found", name)));
3084        }
3085        self.meta.delete_config(&key)?;
3086        if let Ok(Some(_)) = self
3087            .meta
3088            .get_config(&format!("remote.{}.last_pushed", name))
3089        {
3090            self.meta
3091                .delete_config(&format!("remote.{}.last_pushed", name))?;
3092        }
3093        Ok(())
3094    }
3095
3096    // =========================================================================
3097    // Worktree Operations
3098    // =========================================================================
3099
3100    /// Check whether this repository is a linked worktree.
3101    pub fn is_worktree(&self) -> bool {
3102        self.is_worktree
3103    }
3104
3105    /// Add a worktree. Creates a new directory linked to this repo's data.
3106    pub fn add_worktree(
3107        &mut self,
3108        name: &str,
3109        path: &Path,
3110        branch: Option<&str>,
3111    ) -> Result<(), RepoError> {
3112        if name.is_empty()
3113            || name.contains('/')
3114            || name.contains('\\')
3115            || name.contains("..")
3116            || name.contains('\0')
3117        {
3118            return Err(RepoError::Custom("invalid worktree name".into()));
3119        }
3120        if path.exists() {
3121            return Err(RepoError::Custom(format!(
3122                "path '{}' already exists",
3123                path.display()
3124            )));
3125        }
3126        if self.is_worktree {
3127            return Err(RepoError::Custom(
3128                "cannot add worktree from a linked worktree; use the main repo".into(),
3129            ));
3130        }
3131
3132        let abs_path = if path.is_relative() {
3133            std::env::current_dir()?.join(path)
3134        } else {
3135            path.to_path_buf()
3136        };
3137
3138        fs::create_dir_all(&abs_path)?;
3139        let new_suture_dir = abs_path.join(".suture");
3140        fs::create_dir_all(&new_suture_dir)?;
3141
3142        #[cfg(unix)]
3143        {
3144            std::os::unix::fs::symlink(
3145                self.suture_dir.join("metadata.db"),
3146                new_suture_dir.join("metadata.db"),
3147            )?;
3148            if self.suture_dir.join("objects").exists() {
3149                std::os::unix::fs::symlink(
3150                    self.suture_dir.join("objects"),
3151                    new_suture_dir.join("objects"),
3152                )?;
3153            }
3154            if self.suture_dir.join("keys").exists() {
3155                std::os::unix::fs::symlink(
3156                    self.suture_dir.join("keys"),
3157                    new_suture_dir.join("keys"),
3158                )?;
3159            }
3160        }
3161        #[cfg(not(unix))]
3162        {
3163            return Err(RepoError::Unsupported(
3164                "worktrees require symlink support (Unix only)".into(),
3165            ));
3166        }
3167
3168        fs::write(
3169            new_suture_dir.join("worktree"),
3170            self.root.to_string_lossy().as_ref(),
3171        )?;
3172
3173        let branch_name = branch.unwrap_or("main");
3174        fs::write(new_suture_dir.join("HEAD"), branch_name)?;
3175
3176        self.set_config(
3177            &format!("worktree.{}.path", name),
3178            &abs_path.to_string_lossy(),
3179        )?;
3180        self.set_config(&format!("worktree.{}.branch", name), branch_name)?;
3181
3182        let mut wt_repo = Repository::open(&abs_path)?;
3183        wt_repo.checkout(branch_name)?;
3184
3185        Ok(())
3186    }
3187
3188    /// List all worktrees. Returns the main worktree plus any linked worktrees.
3189    pub fn list_worktrees(&self) -> Result<Vec<WorktreeEntry>, RepoError> {
3190        let mut worktrees = Vec::new();
3191
3192        let main_branch = self
3193            .head()
3194            .map(|(n, _)| n)
3195            .unwrap_or_else(|_| "main".to_string());
3196        worktrees.push(WorktreeEntry {
3197            name: String::new(),
3198            path: self.root.to_string_lossy().to_string(),
3199            branch: main_branch,
3200            is_main: true,
3201        });
3202
3203        let config = self.list_config()?;
3204        let mut names: Vec<&str> = Vec::new();
3205        for (key, _value) in &config {
3206            if let Some(n) = key
3207                .strip_prefix("worktree.")
3208                .and_then(|n| n.strip_suffix(".path"))
3209            {
3210                names.push(n);
3211            }
3212        }
3213        names.sort();
3214
3215        for name in names {
3216            let path_key = format!("worktree.{}.path", name);
3217            let branch_key = format!("worktree.{}.branch", name);
3218            let path_val = self
3219                .meta
3220                .get_config(&path_key)
3221                .unwrap_or(None)
3222                .unwrap_or_default();
3223            let branch_val = self
3224                .meta
3225                .get_config(&branch_key)
3226                .unwrap_or(None)
3227                .unwrap_or_default();
3228            worktrees.push(WorktreeEntry {
3229                name: name.to_string(),
3230                path: path_val,
3231                branch: branch_val,
3232                is_main: false,
3233            });
3234        }
3235
3236        Ok(worktrees)
3237    }
3238
3239    /// Remove a worktree by name. Deletes the worktree directory and cleans
3240    /// up the main repo's config entries.
3241    pub fn remove_worktree(&mut self, name: &str) -> Result<(), RepoError> {
3242        let path_key = format!("worktree.{}.path", name);
3243        let path_val = self
3244            .meta
3245            .get_config(&path_key)?
3246            .ok_or_else(|| RepoError::Custom(format!("worktree '{}' not found", name)))?;
3247
3248        let wt_path = Path::new(&path_val);
3249        if wt_path.exists() {
3250            fs::remove_dir_all(wt_path)?;
3251        }
3252
3253        self.meta.delete_config(&path_key)?;
3254        self.meta
3255            .delete_config(&format!("worktree.{}.branch", name))?;
3256
3257        Ok(())
3258    }
3259
3260    /// Rename a tracked file. Stages both the deletion of the old path
3261    /// and the addition of the new path.
3262    pub fn rename_file(&self, old_path: &str, new_path: &str) -> Result<(), RepoError> {
3263        let old = self.root.join(old_path);
3264        let new = self.root.join(new_path);
3265
3266        if !old.exists() {
3267            return Err(RepoError::Custom(format!("path not found: {}", old_path)));
3268        }
3269
3270        if new.exists() {
3271            return Err(RepoError::Custom(format!(
3272                "path already exists: {}",
3273                new_path
3274            )));
3275        }
3276
3277        fs::rename(old, new).map_err(|e| RepoError::Custom(format!("rename failed: {}", e)))?;
3278
3279        self.add(old_path)?;
3280        self.add(new_path)?;
3281
3282        Ok(())
3283    }
3284
3285    /// Get the URL for a remote.
3286    pub fn get_remote_url(&self, name: &str) -> Result<String, RepoError> {
3287        let key = format!("remote.{}.url", name);
3288        self.meta
3289            .get_config(&key)
3290            .unwrap_or(None)
3291            .ok_or_else(|| RepoError::Custom(format!("remote '{}' not found", name)))
3292    }
3293
3294    /// Get all patches in the DAG as a Vec.
3295    pub fn all_patches(&self) -> Vec<Patch> {
3296        self.dag
3297            .patch_ids()
3298            .iter()
3299            .filter_map(|id| self.dag.get_patch(id).cloned())
3300            .collect()
3301    }
3302
3303    // =========================================================================
3304    // Garbage Collection
3305    // =========================================================================
3306
3307    /// Remove unreachable patches from the repository.
3308    ///
3309    /// Patches not reachable from any branch tip are deleted from the
3310    /// metadata store (patches, edges, signatures tables). The in-memory
3311    /// DAG is not updated; reopen the repository after GC to get a clean DAG.
3312    pub fn gc(&self) -> Result<GcResult, RepoError> {
3313        let branches = self.dag.list_branches();
3314        let all_ids: HashSet<PatchId> = self.dag.patch_ids().into_iter().collect();
3315
3316        let mut reachable: HashSet<PatchId> = HashSet::new();
3317        for (_name, tip_id) in &branches {
3318            reachable.insert(*tip_id);
3319            for anc in self.dag.ancestors(tip_id) {
3320                reachable.insert(anc);
3321            }
3322        }
3323
3324        let unreachable: Vec<&PatchId> = all_ids
3325            .iter()
3326            .filter(|id| !reachable.contains(id))
3327            .collect();
3328        let conn = self.meta().conn();
3329
3330        for id in &unreachable {
3331            let hex = id.to_hex();
3332            conn.execute(
3333                "DELETE FROM signatures WHERE patch_id = ?1",
3334                rusqlite::params![hex],
3335            )
3336            .map_err(|e| RepoError::Custom(e.to_string()))?;
3337            conn.execute(
3338                "DELETE FROM edges WHERE parent_id = ?1 OR child_id = ?1",
3339                rusqlite::params![hex],
3340            )
3341            .map_err(|e| RepoError::Custom(e.to_string()))?;
3342            conn.execute("DELETE FROM patches WHERE id = ?1", rusqlite::params![hex])
3343                .map_err(|e| RepoError::Custom(e.to_string()))?;
3344        }
3345
3346        Ok(GcResult {
3347            patches_removed: unreachable.len(),
3348        })
3349    }
3350
3351    // =========================================================================
3352    // Filesystem Check
3353    // =========================================================================
3354
3355    /// Verify repository integrity.
3356    ///
3357    /// Checks DAG consistency (parent references), branch integrity
3358    /// (branch targets exist), blob references (CAS has blobs referenced
3359    /// by patches), and HEAD consistency.
3360    pub fn fsck(&self) -> Result<FsckResult, RepoError> {
3361        let mut checks_passed = 0usize;
3362        let mut warnings = Vec::new();
3363        let mut errors = Vec::new();
3364
3365        // 1. DAG consistency: every patch's parents exist in the DAG
3366        let all_ids: HashSet<PatchId> = self.dag.patch_ids().into_iter().collect();
3367        let mut parent_ok = true;
3368        for id in &all_ids {
3369            if let Some(node) = self.dag.get_node(id) {
3370                for parent_id in &node.parent_ids {
3371                    if !all_ids.contains(parent_id) {
3372                        errors.push(format!(
3373                            "patch {} references missing parent {}",
3374                            id.to_hex(),
3375                            parent_id.to_hex()
3376                        ));
3377                        parent_ok = false;
3378                    }
3379                }
3380            }
3381        }
3382        if parent_ok {
3383            checks_passed += 1;
3384        }
3385
3386        // 2. Branch integrity: every branch target exists in the DAG
3387        let branches = self.dag.list_branches();
3388        let mut branch_ok = true;
3389        for (name, target_id) in &branches {
3390            if !all_ids.contains(target_id) {
3391                errors.push(format!(
3392                    "branch '{}' targets non-existent patch {}",
3393                    name,
3394                    target_id.to_hex()
3395                ));
3396                branch_ok = false;
3397            }
3398        }
3399        if branch_ok {
3400            checks_passed += 1;
3401        }
3402
3403        // 3. Blob references: non-empty payloads should reference CAS blobs
3404        let mut blob_ok = true;
3405        let all_patches = self.all_patches();
3406        for patch in &all_patches {
3407            if patch.is_batch() {
3408                if let Some(changes) = patch.file_changes() {
3409                    for change in &changes {
3410                        if change.payload.is_empty() {
3411                            continue;
3412                        }
3413                        let hex = String::from_utf8_lossy(&change.payload);
3414                        if let Ok(hash) = Hash::from_hex(&hex)
3415                            && !self.cas().has_blob(&hash)
3416                        {
3417                            warnings.push(format!(
3418                                "batch patch {} references missing blob {} for path {}",
3419                                patch.id.to_hex(),
3420                                hash.to_hex(),
3421                                change.path
3422                            ));
3423                            blob_ok = false;
3424                        }
3425                    }
3426                }
3427                continue;
3428            }
3429            if patch.payload.is_empty() {
3430                continue;
3431            }
3432            if let Some(hash) = resolve_payload_to_hash(patch) {
3433                if !self.cas().has_blob(&hash) {
3434                    warnings.push(format!(
3435                        "patch {} references missing blob {}",
3436                        patch.id.to_hex(),
3437                        hash.to_hex()
3438                    ));
3439                    blob_ok = false;
3440                }
3441            } else {
3442                warnings.push(format!(
3443                    "patch {} has non-UTF-8 payload, cannot verify blob reference",
3444                    patch.id.to_hex()
3445                ));
3446                blob_ok = false;
3447            }
3448        }
3449        if blob_ok {
3450            checks_passed += 1;
3451        }
3452
3453        // 4. HEAD consistency: the current HEAD branch exists
3454        let mut head_ok = false;
3455        match self.head() {
3456            Ok((branch_name, _target_id)) => {
3457                if branches.iter().any(|(n, _)| n == &branch_name) {
3458                    head_ok = true;
3459                    checks_passed += 1;
3460                } else {
3461                    errors.push(format!(
3462                        "HEAD branch '{}' does not exist in branch list",
3463                        branch_name
3464                    ));
3465                }
3466            }
3467            Err(e) => {
3468                errors.push(format!("HEAD is invalid: {}", e));
3469            }
3470        }
3471        if head_ok {
3472            checks_passed += 1;
3473        }
3474
3475        Ok(FsckResult {
3476            checks_passed,
3477            warnings,
3478            errors,
3479        })
3480    }
3481
3482    // =========================================================================
3483    // Reflog
3484    // =========================================================================
3485
3486    fn record_reflog(
3487        &self,
3488        old_head: &PatchId,
3489        new_head: &PatchId,
3490        message: &str,
3491    ) -> Result<(), RepoError> {
3492        // Use the SQLite reflog table (O(1) append, no full-rewrite)
3493        self.meta
3494            .reflog_push(old_head, new_head, message)
3495            .map_err(RepoError::Meta)?;
3496        Ok(())
3497    }
3498
3499    /// Get reflog entries as (head_hash, entry_string) pairs.
3500    pub fn reflog_entries(&self) -> Result<Vec<(String, String)>, RepoError> {
3501        // Try the SQLite reflog table first
3502        let sqlite_entries = self.meta.reflog_list().map_err(RepoError::Meta)?;
3503
3504        if !sqlite_entries.is_empty() {
3505            // Convert (old_head, new_head, message) → (new_head, formatted_entry)
3506            let entries: Vec<(String, String)> = sqlite_entries
3507                .into_iter()
3508                .map(|(old_head, new_head, message)| {
3509                    let ts = std::time::SystemTime::now()
3510                        .duration_since(std::time::UNIX_EPOCH)
3511                        .unwrap_or_default()
3512                        .as_secs();
3513                    (new_head, format!("{}:{}:{}", ts, old_head, message))
3514                })
3515                .collect();
3516            return Ok(entries);
3517        }
3518
3519        // Fallback: migrate from legacy config-based reflog
3520        match self.meta.get_config("reflog").map_err(RepoError::Meta)? {
3521            Some(json) => {
3522                let legacy: Vec<(String, String)> = serde_json::from_str(&json).unwrap_or_default();
3523                // Migrate legacy entries to SQLite
3524                for (new_head, entry) in &legacy {
3525                    let parts: Vec<&str> = entry.splitn(3, ':').collect();
3526                    if parts.len() >= 3 {
3527                        let old_head = parts[1];
3528                        let msg = parts[2];
3529                        if let (Ok(old), Ok(new)) =
3530                            (Hash::from_hex(old_head), Hash::from_hex(new_head))
3531                        {
3532                            let _ = self.meta.reflog_push(&old, &new, msg);
3533                        }
3534                    }
3535                }
3536                // Clear legacy config after migration
3537                let _ = self.meta.delete_config("reflog");
3538                // Reload from SQLite
3539                let sqlite_entries = self.meta.reflog_list().map_err(RepoError::Meta)?;
3540                let entries: Vec<(String, String)> = sqlite_entries
3541                    .into_iter()
3542                    .map(|(old_head, new_head, message)| {
3543                        (new_head, format!("{}:{}:{}", 0, old_head, message))
3544                    })
3545                    .collect();
3546                Ok(entries)
3547            }
3548            None => Ok(Vec::new()),
3549        }
3550    }
3551}
3552
3553// =============================================================================
3554// .sutureignore Support
3555// =============================================================================
3556
3557/// Load and parse .sutureignore patterns from the repository root.
3558fn load_ignore_patterns(root: &Path) -> Vec<String> {
3559    let ignore_file = root.join(".sutureignore");
3560    if !ignore_file.exists() {
3561        return Vec::new();
3562    }
3563
3564    fs::read_to_string(&ignore_file)
3565        .unwrap_or_default()
3566        .lines()
3567        .map(|line| line.trim().to_string())
3568        .filter(|line| !line.is_empty() && !line.starts_with('#'))
3569        .collect()
3570}
3571
3572/// Check if a relative path matches any ignore pattern.
3573fn is_ignored(rel_path: &str, patterns: &[String]) -> bool {
3574    for pattern in patterns {
3575        if let Some(suffix) = pattern.strip_prefix('*') {
3576            // Suffix match: "*.o" matches "foo.o"
3577            if rel_path.ends_with(suffix) {
3578                return true;
3579            }
3580        } else if pattern.ends_with('/') {
3581            // Directory prefix match: "build/" matches "build/output.o"
3582            if rel_path.starts_with(pattern) {
3583                return true;
3584            }
3585        } else {
3586            // Exact match or path component match
3587            if rel_path == pattern || rel_path.starts_with(&format!("{}/", pattern)) {
3588                return true;
3589            }
3590        }
3591    }
3592    false
3593}
3594
3595/// A file entry found while walking the repository.
3596struct WalkEntry {
3597    relative: String,
3598    #[allow(dead_code)]
3599    full_path: PathBuf,
3600}
3601
3602/// Walk the repository directory, collecting files and respecting .sutureignore.
3603fn walk_dir(root: &Path, ignore_patterns: &[String]) -> Result<Vec<WalkEntry>, io::Error> {
3604    let mut entries = Vec::new();
3605    walk_dir_recursive(root, root, ignore_patterns, &mut entries)?;
3606    Ok(entries)
3607}
3608
3609fn walk_dir_recursive(
3610    root: &Path,
3611    current: &Path,
3612    ignore_patterns: &[String],
3613    entries: &mut Vec<WalkEntry>,
3614) -> Result<(), io::Error> {
3615    if !current.is_dir() {
3616        return Ok(());
3617    }
3618
3619    let mut dir_entries: Vec<_> = fs::read_dir(current)?
3620        .filter_map(|e| e.ok())
3621        .filter(|e| {
3622            // Skip .suture directory
3623            let name = e.file_name();
3624            name != ".suture"
3625        })
3626        .collect();
3627
3628    dir_entries.sort_by_key(|e| e.file_name());
3629
3630    for entry in dir_entries {
3631        let path = entry.path();
3632        let rel = path
3633            .strip_prefix(root)
3634            .unwrap_or(&path)
3635            .to_string_lossy()
3636            .replace('\\', "/");
3637
3638        // Skip ignored paths
3639        if is_ignored(&rel, ignore_patterns) {
3640            continue;
3641        }
3642
3643        if path.is_dir() {
3644            walk_dir_recursive(root, &path, ignore_patterns, entries)?;
3645        } else if path.is_file() {
3646            entries.push(WalkEntry {
3647                relative: rel,
3648                full_path: path,
3649            });
3650        }
3651    }
3652
3653    Ok(())
3654}
3655
3656/// Restore pending merge parents from config (persisted across repo reopens).
3657fn restore_pending_merge_parents(meta: &crate::metadata::MetadataStore) -> Vec<PatchId> {
3658    let Ok(Some(json)) = meta.get_config("pending_merge_parents") else {
3659        return Vec::new();
3660    };
3661    serde_json::from_str::<Vec<PatchId>>(&json).unwrap_or_default()
3662}
3663
3664// =============================================================================
3665// Repository Status
3666// =============================================================================
3667
3668/// A single stash entry.
3669#[derive(Debug, Clone)]
3670pub struct StashEntry {
3671    pub index: usize,
3672    pub message: String,
3673    pub branch: String,
3674    pub head_id: String,
3675}
3676
3677/// Information about a worktree.
3678#[derive(Debug, Clone)]
3679pub struct WorktreeEntry {
3680    pub name: String,
3681    pub path: String,
3682    pub branch: String,
3683    pub is_main: bool,
3684}
3685
3686/// A single blame entry for one line of a file.
3687#[derive(Debug, Clone)]
3688pub struct BlameEntry {
3689    /// The patch ID that last modified this line.
3690    pub patch_id: PatchId,
3691    /// The commit message.
3692    pub message: String,
3693    /// The author of the commit.
3694    pub author: String,
3695    /// The line content.
3696    pub line: String,
3697    /// The 1-based line number.
3698    pub line_number: usize,
3699}
3700
3701/// Result of a rebase operation.
3702#[derive(Debug, Clone)]
3703pub struct RebaseResult {
3704    /// Number of patches that were replayed.
3705    pub patches_replayed: usize,
3706    /// The new tip patch ID after rebase.
3707    pub new_tip: PatchId,
3708}
3709
3710/// Actions available during interactive rebase.
3711#[derive(Debug, Clone, PartialEq, Eq)]
3712pub enum RebaseAction {
3713    /// Apply the commit as-is.
3714    Pick,
3715    /// Apply the commit but edit the message.
3716    Reword,
3717    /// Apply the commit and pause for amending.
3718    Edit,
3719    /// Combine with the previous commit.
3720    Squash,
3721    /// Skip this commit entirely.
3722    Drop,
3723}
3724
3725/// A single entry in the interactive rebase plan.
3726#[derive(Debug, Clone)]
3727pub struct RebasePlanEntry {
3728    /// The action to perform.
3729    pub action: RebaseAction,
3730    /// The tip patch ID of the logical commit (last patch in the per-file chain).
3731    pub commit_tip: PatchId,
3732    /// The commit message (for display and reword).
3733    pub message: String,
3734    /// All patch IDs in this logical commit's chain.
3735    pub patch_ids: Vec<PatchId>,
3736}
3737
3738/// A complete interactive rebase plan.
3739#[derive(Debug, Clone)]
3740pub struct RebasePlan {
3741    pub entries: Vec<RebasePlanEntry>,
3742}
3743
3744/// State persisted during an interactive rebase for --continue / --abort.
3745#[derive(Debug, Clone, Serialize, Deserialize)]
3746pub struct RebaseState {
3747    /// Original HEAD before rebase started.
3748    pub original_head: PatchId,
3749    /// Original branch name.
3750    pub original_branch: String,
3751    /// Target we're rebasing onto.
3752    pub onto: PatchId,
3753    /// Index of the next entry to process.
3754    pub next_entry: usize,
3755    /// The plan entries remaining.
3756    pub plan: Vec<RebasePlanEntrySerialized>,
3757    /// Current parent for chaining new patches.
3758    pub current_parent: PatchId,
3759    /// Accumulated squash messages (for combining with squash action).
3760    pub squash_message: Option<String>,
3761}
3762
3763/// Serialized form of a plan entry (for state persistence).
3764#[derive(Debug, Clone, Serialize, Deserialize)]
3765pub struct RebasePlanEntrySerialized {
3766    pub action: String,
3767    pub commit_tip: String,
3768    pub message: String,
3769    pub patch_ids: Vec<String>,
3770}
3771
3772/// Repository status information.
3773#[derive(Debug, Clone)]
3774pub struct RepoStatus {
3775    /// Current HEAD branch name.
3776    pub head_branch: Option<String>,
3777    /// Current HEAD patch ID.
3778    pub head_patch: Option<PatchId>,
3779    /// Number of branches.
3780    pub branch_count: usize,
3781    /// Staged files (path, status).
3782    pub staged_files: Vec<(String, FileStatus)>,
3783    /// Total number of patches in the DAG.
3784    pub patch_count: usize,
3785}
3786
3787// =============================================================================
3788// Merge Execution Types
3789// =============================================================================
3790
3791/// Result of executing a merge.
3792#[derive(Debug, Clone)]
3793pub struct MergeExecutionResult {
3794    /// Whether the merge was fully clean (no conflicts).
3795    pub is_clean: bool,
3796    /// The resulting file tree after the merge.
3797    pub merged_tree: FileTree,
3798    /// The merge commit patch ID (set if is_clean or all conflicts resolved).
3799    pub merge_patch_id: Option<PatchId>,
3800    /// Unresolved conflicts (empty if is_clean).
3801    pub unresolved_conflicts: Vec<ConflictInfo>,
3802    /// Number of patches applied from the source branch.
3803    pub patches_applied: usize,
3804}
3805
3806/// Information about an unresolved merge conflict.
3807#[derive(Debug, Clone)]
3808pub struct ConflictInfo {
3809    /// The path where the conflict occurs.
3810    pub path: String,
3811    /// The patch ID from the current branch.
3812    pub our_patch_id: PatchId,
3813    /// The patch ID from the source branch.
3814    pub their_patch_id: PatchId,
3815    /// Our version of the file (blob hash).
3816    pub our_content_hash: Option<Hash>,
3817    /// Their version of the file (blob hash).
3818    pub their_content_hash: Option<Hash>,
3819    /// The base version of the file (blob hash from LCA).
3820    pub base_content_hash: Option<Hash>,
3821}
3822
3823/// Result of a garbage collection pass.
3824#[derive(Debug, Clone)]
3825pub struct GcResult {
3826    /// Number of unreachable patches removed.
3827    pub patches_removed: usize,
3828}
3829
3830/// Result of a filesystem check.
3831#[derive(Debug, Clone)]
3832pub struct FsckResult {
3833    /// Number of checks that passed without issues.
3834    pub checks_passed: usize,
3835    /// Non-fatal warnings encountered.
3836    pub warnings: Vec<String>,
3837    /// Fatal errors encountered.
3838    pub errors: Vec<String>,
3839}
3840
3841/// Line-level three-way merge using diff3 algorithm.
3842///
3843/// Returns `Ok(merged_content)` if clean, `Err(conflict_marker_lines)` if conflicts.
3844fn three_way_merge(
3845    base: Option<&str>,
3846    ours: &str,
3847    theirs: &str,
3848    head_branch: &str,
3849    source_branch: &str,
3850) -> Result<String, Vec<String>> {
3851    use crate::engine::merge::three_way_merge_lines;
3852
3853    let base_lines: Vec<&str> = base.map(|s| s.lines().collect()).unwrap_or_default();
3854    let ours_lines: Vec<&str> = ours.lines().collect();
3855    let theirs_lines: Vec<&str> = theirs.lines().collect();
3856
3857    let ours_label = if head_branch.is_empty() {
3858        "HEAD".to_string()
3859    } else {
3860        format!("{head_branch} (HEAD)")
3861    };
3862    let theirs_label = if source_branch.is_empty() {
3863        "theirs".to_string()
3864    } else {
3865        source_branch.to_string()
3866    };
3867
3868    let result = three_way_merge_lines(
3869        &base_lines,
3870        &ours_lines,
3871        &theirs_lines,
3872        &ours_label,
3873        &theirs_label,
3874    );
3875
3876    if result.is_clean {
3877        Ok(result.lines.join("\n"))
3878    } else {
3879        Err(result.lines)
3880    }
3881}
3882
3883// =============================================================================
3884// Tests
3885// =============================================================================
3886
3887#[cfg(test)]
3888mod tests {
3889    use super::*;
3890
3891    #[test]
3892    fn test_init_and_open() {
3893        let dir = tempfile::tempdir().unwrap();
3894        let repo_path = dir.path();
3895
3896        let _repo = Repository::init(repo_path, "alice").unwrap();
3897        assert!(repo_path.join(".suture").exists());
3898        assert!(repo_path.join(".suture/metadata.db").exists());
3899
3900        // Open the same repo
3901        let repo2 = Repository::open(repo_path).unwrap();
3902        assert_eq!(repo2.list_branches().len(), 1);
3903    }
3904
3905    #[test]
3906    fn test_init_already_exists() {
3907        let dir = tempfile::tempdir().unwrap();
3908        Repository::init(dir.path(), "alice").unwrap();
3909        let result = Repository::init(dir.path(), "alice");
3910        assert!(matches!(result, Err(RepoError::AlreadyExists(_))));
3911    }
3912
3913    #[test]
3914    fn test_create_branch() {
3915        let dir = tempfile::tempdir().unwrap();
3916        let mut repo = Repository::init(dir.path(), "alice").unwrap();
3917
3918        repo.create_branch("feature", None).unwrap();
3919        assert_eq!(repo.list_branches().len(), 2);
3920
3921        let result = repo.create_branch("feature", None);
3922        assert!(result.is_err());
3923    }
3924
3925    #[test]
3926    fn test_add_and_status() {
3927        let dir = tempfile::tempdir().unwrap();
3928        let repo = Repository::init(dir.path(), "alice").unwrap();
3929
3930        let test_file = dir.path().join("hello.txt");
3931        fs::write(&test_file, "hello, suture!").unwrap();
3932
3933        repo.add("hello.txt").unwrap();
3934        let status = repo.status().unwrap();
3935        assert_eq!(status.staged_files.len(), 1);
3936        assert_eq!(status.staged_files[0].0, "hello.txt");
3937        assert_eq!(status.staged_files[0].1, FileStatus::Added);
3938    }
3939
3940    #[test]
3941    fn test_add_nonexistent_file() {
3942        let dir = tempfile::tempdir().unwrap();
3943        let repo = Repository::init(dir.path(), "alice").unwrap();
3944        let result = repo.add("does_not_exist.txt");
3945        assert!(result.is_err());
3946    }
3947
3948    #[test]
3949    fn test_commit() {
3950        let dir = tempfile::tempdir().unwrap();
3951        let mut repo = Repository::init(dir.path(), "alice").unwrap();
3952
3953        let test_file = dir.path().join("test.txt");
3954        fs::write(&test_file, "test content").unwrap();
3955        repo.add("test.txt").unwrap();
3956
3957        let patch_id = repo.commit("initial file").unwrap();
3958
3959        let status = repo.status().unwrap();
3960        assert!(status.staged_files.is_empty());
3961        assert!(repo.dag.has_patch(&patch_id));
3962        assert_eq!(repo.dag.patch_count(), 2);
3963    }
3964
3965    #[test]
3966    fn test_commit_nothing() {
3967        let dir = tempfile::tempdir().unwrap();
3968        let mut repo = Repository::init(dir.path(), "alice").unwrap();
3969        let result = repo.commit("empty commit");
3970        assert!(matches!(result, Err(RepoError::NothingToCommit)));
3971    }
3972
3973    #[test]
3974    fn test_log() {
3975        let dir = tempfile::tempdir().unwrap();
3976        let mut repo = Repository::init(dir.path(), "alice").unwrap();
3977
3978        let test_file = dir.path().join("test.txt");
3979        fs::write(&test_file, "v1").unwrap();
3980        repo.add("test.txt").unwrap();
3981        repo.commit("first commit").unwrap();
3982
3983        fs::write(&test_file, "v2").unwrap();
3984        repo.add("test.txt").unwrap();
3985        repo.commit("second commit").unwrap();
3986
3987        let log = repo.log(None).unwrap();
3988        assert_eq!(log.len(), 3); // root + 2 commits
3989    }
3990
3991    #[test]
3992    fn test_snapshot_head() {
3993        let dir = tempfile::tempdir().unwrap();
3994        let mut repo = Repository::init(dir.path(), "alice").unwrap();
3995
3996        let test_file = dir.path().join("test.txt");
3997        fs::write(&test_file, "hello world").unwrap();
3998        repo.add("test.txt").unwrap();
3999        repo.commit("add test.txt").unwrap();
4000
4001        let tree = repo.snapshot_head().unwrap();
4002        assert!(tree.contains("test.txt"));
4003        assert_eq!(tree.get("test.txt"), Some(&Hash::from_data(b"hello world")));
4004    }
4005
4006    #[test]
4007    fn test_snapshot_empty_repo() {
4008        let dir = tempfile::tempdir().unwrap();
4009        let repo = Repository::init(dir.path(), "alice").unwrap();
4010
4011        let tree = repo.snapshot_head().unwrap();
4012        assert!(tree.is_empty());
4013    }
4014
4015    #[test]
4016    fn test_checkout() {
4017        let dir = tempfile::tempdir().unwrap();
4018        let mut repo = Repository::init(dir.path(), "alice").unwrap();
4019
4020        // Commit a file on main
4021        let main_file = dir.path().join("main.txt");
4022        fs::write(&main_file, "main content").unwrap();
4023        repo.add("main.txt").unwrap();
4024        repo.commit("add main.txt").unwrap();
4025
4026        // Create feature branch and add different file
4027        let (_, head_id) = repo.head().unwrap();
4028        let feat_patch = Patch::new(
4029            OperationType::Modify,
4030            TouchSet::single("feature.txt"),
4031            Some("feature.txt".to_string()),
4032            Hash::from_data(b"feature content")
4033                .to_hex()
4034                .as_bytes()
4035                .to_vec(),
4036            vec![head_id],
4037            "alice".to_string(),
4038            "add feature.txt".to_string(),
4039        );
4040        let _feat_id = repo
4041            .dag_mut()
4042            .add_patch(feat_patch.clone(), vec![head_id])
4043            .unwrap();
4044        repo.meta.store_patch(&feat_patch).unwrap();
4045
4046        // Checkout main (should remove feature.txt if it exists)
4047        repo.checkout("main").unwrap();
4048        assert!(!dir.path().join("feature.txt").exists());
4049        assert!(dir.path().join("main.txt").exists());
4050    }
4051
4052    #[test]
4053    fn test_checkout_refuses_dirty() {
4054        let dir = tempfile::tempdir().unwrap();
4055        let mut repo = Repository::init(dir.path(), "alice").unwrap();
4056
4057        // Stage a file but don't commit
4058        let staged = dir.path().join("staged.txt");
4059        fs::write(&staged, "staged").unwrap();
4060        repo.add("staged.txt").unwrap();
4061
4062        // Checkout now auto-stashes instead of refusing
4063        let result = repo.checkout("main");
4064        assert!(result.is_ok());
4065
4066        // After auto-stash pop, the stashed changes should be restored to the working set
4067        let working_set = repo.meta.working_set().unwrap();
4068        assert!(working_set.iter().any(|(p, _)| p == "staged.txt"));
4069    }
4070
4071    #[test]
4072    fn test_diff() {
4073        let dir = tempfile::tempdir().unwrap();
4074        let mut repo = Repository::init(dir.path(), "alice").unwrap();
4075
4076        let test_file = dir.path().join("test.txt");
4077        fs::write(&test_file, "v1").unwrap();
4078        repo.add("test.txt").unwrap();
4079        let first_commit = repo.commit("first").unwrap();
4080
4081        fs::write(&test_file, "v2").unwrap();
4082        repo.add("test.txt").unwrap();
4083        repo.commit("second").unwrap();
4084
4085        // Diff between first commit and HEAD
4086        let diffs = repo.diff(Some(&first_commit.to_hex()), None).unwrap();
4087        assert_eq!(diffs.len(), 1);
4088        assert_eq!(diffs[0].diff_type, DiffType::Modified);
4089    }
4090
4091    #[test]
4092    fn test_revert() {
4093        let dir = tempfile::tempdir().unwrap();
4094        let mut repo = Repository::init(dir.path(), "alice").unwrap();
4095
4096        let test_file = dir.path().join("test.txt");
4097        fs::write(&test_file, "original").unwrap();
4098        repo.add("test.txt").unwrap();
4099        let commit_id = repo.commit("add file").unwrap();
4100
4101        // Revert the commit — should remove the file from disk
4102        repo.revert(&commit_id, None).unwrap();
4103
4104        let tree = repo.snapshot_head().unwrap();
4105        assert!(!tree.contains("test.txt"));
4106        assert!(
4107            !test_file.exists(),
4108            "revert should remove the file from the working tree"
4109        );
4110    }
4111
4112    #[test]
4113    fn test_open_reconstructs_full_dag() {
4114        let dir = tempfile::tempdir().unwrap();
4115        let mut repo = Repository::init(dir.path(), "alice").unwrap();
4116
4117        // Create a chain of commits
4118        let f = dir.path().join("f.txt");
4119        fs::write(&f, "v1").unwrap();
4120        repo.add("f.txt").unwrap();
4121        repo.commit("first").unwrap();
4122
4123        fs::write(&f, "v2").unwrap();
4124        repo.add("f.txt").unwrap();
4125        repo.commit("second").unwrap();
4126
4127        fs::write(&f, "v3").unwrap();
4128        repo.add("f.txt").unwrap();
4129        repo.commit("third").unwrap();
4130
4131        let original_count = repo.dag.patch_count();
4132
4133        // Open and verify full DAG is reconstructed
4134        let repo2 = Repository::open(dir.path()).unwrap();
4135        assert_eq!(repo2.dag.patch_count(), original_count);
4136
4137        let log = repo2.log(None).unwrap();
4138        assert_eq!(log.len(), 4); // root + 3 commits
4139    }
4140
4141    #[test]
4142    fn test_ignore_patterns() {
4143        let patterns = vec![
4144            "target/".to_string(),
4145            "*.o".to_string(),
4146            "build".to_string(),
4147        ];
4148
4149        assert!(is_ignored("target/debug/main", &patterns));
4150        assert!(is_ignored("foo.o", &patterns));
4151        assert!(is_ignored("build/output", &patterns));
4152        assert!(is_ignored("build", &patterns));
4153        assert!(!is_ignored("src/main.rs", &patterns));
4154        assert!(!is_ignored("main.rs", &patterns));
4155    }
4156
4157    #[test]
4158    fn test_full_workflow_with_checkout() -> Result<(), Box<dyn std::error::Error>> {
4159        let dir = tempfile::tempdir().unwrap();
4160        let mut repo = Repository::init(dir.path(), "alice")?;
4161
4162        // Commit file A on main
4163        fs::write(dir.path().join("a.txt"), "version 1")?;
4164        repo.add("a.txt")?;
4165        repo.commit("add a.txt v1")?;
4166
4167        // Create feature branch
4168        repo.create_branch("feature", None)?;
4169
4170        // Modify A and add B on main
4171        fs::write(dir.path().join("a.txt"), "version 2")?;
4172        fs::write(dir.path().join("b.txt"), "new file")?;
4173        repo.add("a.txt")?;
4174        repo.add("b.txt")?;
4175        repo.commit("modify a, add b")?;
4176
4177        // Checkout feature (should have a.txt v1, no b.txt)
4178        repo.checkout("feature")?;
4179        let content = fs::read_to_string(dir.path().join("a.txt"))?;
4180        assert_eq!(content, "version 1");
4181        assert!(!dir.path().join("b.txt").exists());
4182
4183        Ok(())
4184    }
4185
4186    #[test]
4187    fn test_add_all() -> Result<(), Box<dyn std::error::Error>> {
4188        let dir = tempfile::tempdir().unwrap();
4189        let repo = Repository::init(dir.path(), "alice").unwrap();
4190
4191        fs::write(dir.path().join("a.txt"), "a")?;
4192        fs::write(dir.path().join("b.txt"), "b")?;
4193        // .suture is auto-ignored
4194        let count = repo.add_all().unwrap();
4195        assert_eq!(count, 2);
4196        Ok(())
4197    }
4198
4199    #[test]
4200    fn test_execute_merge_clean() {
4201        let dir = tempfile::tempdir().unwrap();
4202        let mut repo = Repository::init(dir.path(), "alice").unwrap();
4203
4204        fs::write(dir.path().join("base.txt"), "base").unwrap();
4205        repo.add("base.txt").unwrap();
4206        repo.commit("add base").unwrap();
4207
4208        repo.create_branch("feature", None).unwrap();
4209
4210        fs::write(dir.path().join("main_file.txt"), "main content").unwrap();
4211        repo.add("main_file.txt").unwrap();
4212        repo.commit("add main file").unwrap();
4213
4214        repo.checkout("feature").unwrap();
4215
4216        fs::write(dir.path().join("feat_file.txt"), "feature content").unwrap();
4217        repo.add("feat_file.txt").unwrap();
4218        repo.commit("add feature file").unwrap();
4219
4220        let result = repo.execute_merge("main").unwrap();
4221        assert!(result.is_clean);
4222        assert!(result.merge_patch_id.is_some());
4223        assert!(result.unresolved_conflicts.is_empty());
4224        assert!(dir.path().join("main_file.txt").exists());
4225        assert!(dir.path().join("feat_file.txt").exists());
4226        assert!(dir.path().join("base.txt").exists());
4227
4228        let log = repo.log(None).unwrap();
4229        let merge_patch = log
4230            .iter()
4231            .find(|p| p.operation_type == OperationType::Merge);
4232        assert!(merge_patch.is_some());
4233        assert_eq!(merge_patch.unwrap().parent_ids.len(), 2);
4234    }
4235
4236    #[test]
4237    fn test_execute_merge_conflicting() {
4238        let dir = tempfile::tempdir().unwrap();
4239        let mut repo = Repository::init(dir.path(), "alice").unwrap();
4240
4241        fs::write(dir.path().join("shared.txt"), "original").unwrap();
4242        repo.add("shared.txt").unwrap();
4243        repo.commit("add shared").unwrap();
4244
4245        repo.create_branch("feature", None).unwrap();
4246
4247        fs::write(dir.path().join("shared.txt"), "main version").unwrap();
4248        repo.add("shared.txt").unwrap();
4249        repo.commit("modify on main").unwrap();
4250
4251        repo.checkout("feature").unwrap();
4252
4253        fs::write(dir.path().join("shared.txt"), "feature version").unwrap();
4254        repo.add("shared.txt").unwrap();
4255        repo.commit("modify on feature").unwrap();
4256
4257        let result = repo.execute_merge("main").unwrap();
4258        assert!(!result.is_clean);
4259        assert!(result.merge_patch_id.is_none());
4260        assert_eq!(result.unresolved_conflicts.len(), 1);
4261        assert_eq!(result.unresolved_conflicts[0].path, "shared.txt");
4262
4263        let content = fs::read_to_string(dir.path().join("shared.txt")).unwrap();
4264        assert!(content.contains("<<<<<<< feature (HEAD)"));
4265        assert!(content.contains("main version"));
4266        assert!(content.contains("feature version"));
4267        assert!(content.contains(">>>>>>> main"));
4268    }
4269
4270    #[test]
4271    fn test_execute_merge_fast_forward() {
4272        let dir = tempfile::tempdir().unwrap();
4273        let mut repo = Repository::init(dir.path(), "alice").unwrap();
4274
4275        fs::write(dir.path().join("base.txt"), "base").unwrap();
4276        repo.add("base.txt").unwrap();
4277        repo.commit("add base").unwrap();
4278
4279        repo.create_branch("feature", None).unwrap();
4280
4281        repo.checkout("feature").unwrap();
4282        fs::write(dir.path().join("new_file.txt"), "new content").unwrap();
4283        repo.add("new_file.txt").unwrap();
4284        repo.commit("add new file on feature").unwrap();
4285
4286        repo.checkout("main").unwrap();
4287
4288        let result = repo.execute_merge("feature").unwrap();
4289        assert!(result.is_clean);
4290        assert!(dir.path().join("new_file.txt").exists());
4291    }
4292
4293    #[test]
4294    fn test_resolve_merge_conflict() {
4295        let dir = tempfile::tempdir().unwrap();
4296        let mut repo = Repository::init(dir.path(), "alice").unwrap();
4297
4298        fs::write(dir.path().join("shared.txt"), "original").unwrap();
4299        repo.add("shared.txt").unwrap();
4300        repo.commit("add shared").unwrap();
4301
4302        repo.create_branch("feature", None).unwrap();
4303
4304        fs::write(dir.path().join("shared.txt"), "main version").unwrap();
4305        repo.add("shared.txt").unwrap();
4306        repo.commit("modify on main").unwrap();
4307
4308        repo.checkout("feature").unwrap();
4309
4310        fs::write(dir.path().join("shared.txt"), "feature version").unwrap();
4311        repo.add("shared.txt").unwrap();
4312        repo.commit("modify on feature").unwrap();
4313
4314        let _result = repo.execute_merge("main").unwrap();
4315
4316        fs::write(dir.path().join("shared.txt"), "resolved content").unwrap();
4317        repo.add("shared.txt").unwrap();
4318        let commit_id = repo.commit("resolve merge conflict").unwrap();
4319
4320        assert!(repo.pending_merge_parents.is_empty());
4321
4322        let log = repo.log(None).unwrap();
4323        let resolve_patch = log.iter().find(|p| p.id == commit_id).unwrap();
4324        assert_eq!(resolve_patch.parent_ids.len(), 2);
4325    }
4326
4327    #[test]
4328    fn test_three_way_merge() {
4329        let ours = "line1\nline2-modified\nline3";
4330        let theirs = "line1\nline2-modified\nline3";
4331        let result = three_way_merge(Some("line1\nline2\nline3"), ours, theirs, "main", "feature");
4332        assert!(result.is_ok());
4333        assert_eq!(result.unwrap(), ours);
4334
4335        let result = three_way_merge(Some("base"), "base", "changed", "main", "feature");
4336        assert_eq!(result.unwrap(), "changed");
4337
4338        let result = three_way_merge(Some("base"), "changed", "base", "main", "feature");
4339        assert_eq!(result.unwrap(), "changed");
4340
4341        let result = three_way_merge(None, "ours content", "theirs content", "main", "feature");
4342        assert!(result.is_err());
4343        let lines = result.unwrap_err();
4344        assert!(lines[0].contains("<<<<<<<"));
4345        assert!(lines.last().unwrap().contains(">>>>>>>"));
4346    }
4347
4348    #[test]
4349    fn test_config_get_set() -> Result<(), Box<dyn std::error::Error>> {
4350        let dir = tempfile::tempdir().unwrap();
4351        let mut repo = Repository::init(dir.path(), "alice")?;
4352
4353        assert!(repo.get_config("user.name")?.is_none());
4354        assert!(repo.get_config("user.email")?.is_none());
4355
4356        repo.set_config("user.name", "Alice")?;
4357        repo.set_config("user.email", "alice@example.com")?;
4358
4359        assert_eq!(repo.get_config("user.name")?.unwrap(), "Alice");
4360        assert_eq!(repo.get_config("user.email")?.unwrap(), "alice@example.com");
4361
4362        // List config (filters internal keys)
4363        let config = repo.list_config()?;
4364        assert!(config.iter().any(|(k, v)| k == "user.name" && v == "Alice"));
4365        assert!(
4366            config
4367                .iter()
4368                .any(|(k, v)| k == "user.email" && v == "alice@example.com")
4369        );
4370        // Internal keys should be present in raw list
4371        assert!(config.iter().any(|(k, _)| k == "author"));
4372
4373        Ok(())
4374    }
4375
4376    #[test]
4377    fn test_delete_branch() -> Result<(), Box<dyn std::error::Error>> {
4378        let dir = tempfile::tempdir().unwrap();
4379        let mut repo = Repository::init(dir.path(), "alice")?;
4380
4381        repo.create_branch("feature", None)?;
4382        repo.create_branch("develop", None)?;
4383        assert_eq!(repo.list_branches().len(), 3);
4384
4385        // Cannot delete current branch
4386        let result = repo.delete_branch("main");
4387        assert!(result.is_err());
4388
4389        // Can delete other branches
4390        repo.delete_branch("feature")?;
4391        assert_eq!(repo.list_branches().len(), 2);
4392
4393        repo.delete_branch("develop")?;
4394        assert_eq!(repo.list_branches().len(), 1);
4395
4396        Ok(())
4397    }
4398
4399    #[test]
4400    fn test_tags() -> Result<(), Box<dyn std::error::Error>> {
4401        let dir = tempfile::tempdir().unwrap();
4402        let mut repo = Repository::init(dir.path(), "alice")?;
4403
4404        fs::write(dir.path().join("a.txt"), "v1")?;
4405        repo.add("a.txt")?;
4406        let _commit_id = repo.commit("first commit")?;
4407
4408        // Create tag at HEAD
4409        repo.create_tag("v1.0", None)?;
4410        let tags = repo.list_tags()?;
4411        assert_eq!(tags.len(), 1);
4412
4413        Ok(())
4414    }
4415
4416    #[test]
4417    fn test_patches_since() -> Result<(), Box<dyn std::error::Error>> {
4418        let dir = tempfile::tempdir().unwrap();
4419        let mut repo = Repository::init(dir.path(), "alice")?;
4420
4421        // Commit 1
4422        fs::write(dir.path().join("a.txt"), "v1")?;
4423        repo.add("a.txt")?;
4424        let id1 = repo.commit("first")?;
4425
4426        // Commit 2
4427        fs::write(dir.path().join("a.txt"), "v2")?;
4428        repo.add("a.txt")?;
4429        let id2 = repo.commit("second")?;
4430
4431        // Commit 3
4432        fs::write(dir.path().join("b.txt"), "new")?;
4433        repo.add("b.txt")?;
4434        let id3 = repo.commit("third")?;
4435
4436        // patches_since(id1) should return [id2, id3]
4437        let since = repo.patches_since(&id1);
4438        assert_eq!(since.len(), 2);
4439        assert_eq!(since[0].id, id2);
4440        assert_eq!(since[1].id, id3);
4441
4442        // patches_since(id3) should return []
4443        let since = repo.patches_since(&id3);
4444        assert!(since.is_empty());
4445
4446        // patches_since(root_patch) should return [id1, id2, id3] (3 file patches)
4447        // Get the root patch (Initial commit)
4448        let root_id = repo.log(None)?.last().unwrap().id;
4449        let since = repo.patches_since(&root_id);
4450        assert_eq!(since.len(), 3);
4451        assert_eq!(since[0].id, id1);
4452        assert_eq!(since[1].id, id2);
4453        assert_eq!(since[2].id, id3);
4454
4455        Ok(())
4456    }
4457
4458    #[test]
4459    fn test_pending_merge_persistence() -> Result<(), Box<dyn std::error::Error>> {
4460        let dir = tempfile::tempdir().unwrap();
4461        let mut repo = Repository::init(dir.path(), "alice")?;
4462
4463        fs::write(dir.path().join("shared.txt"), "original")?;
4464        repo.add("shared.txt")?;
4465        repo.commit("add shared")?;
4466
4467        repo.create_branch("feature", None)?;
4468
4469        fs::write(dir.path().join("shared.txt"), "main version")?;
4470        repo.add("shared.txt")?;
4471        repo.commit("modify on main")?;
4472
4473        repo.checkout("feature")?;
4474
4475        fs::write(dir.path().join("shared.txt"), "feature version")?;
4476        repo.add("shared.txt")?;
4477        repo.commit("modify on feature")?;
4478
4479        // Trigger conflicting merge — should persist parents
4480        let _ = repo.execute_merge("main")?;
4481        assert_eq!(repo.pending_merge_parents.len(), 2);
4482
4483        // Simulate repo close + reopen
4484        drop(repo);
4485        let mut repo2 = Repository::open(dir.path())?;
4486        assert_eq!(repo2.pending_merge_parents.len(), 2);
4487
4488        // Resolve the merge
4489        fs::write(dir.path().join("shared.txt"), "resolved")?;
4490        repo2.add("shared.txt")?;
4491        let resolve_id = repo2.commit("resolve")?;
4492        assert!(repo2.pending_merge_parents.is_empty());
4493
4494        // Verify merge commit has 2 parents
4495        let patch = repo2
4496            .log(None)?
4497            .into_iter()
4498            .find(|p| p.id == resolve_id)
4499            .unwrap();
4500        assert_eq!(patch.parent_ids.len(), 2);
4501
4502        Ok(())
4503    }
4504
4505    #[test]
4506    fn test_has_uncommitted_changes_clean() -> Result<(), Box<dyn std::error::Error>> {
4507        let dir = tempfile::tempdir().unwrap();
4508        let repo = Repository::init(dir.path(), "alice")?;
4509
4510        assert!(!repo.has_uncommitted_changes()?);
4511
4512        Ok(())
4513    }
4514
4515    #[test]
4516    fn test_has_uncommitted_changes_staged() -> Result<(), Box<dyn std::error::Error>> {
4517        let dir = tempfile::tempdir().unwrap();
4518        let repo = Repository::init(dir.path(), "alice")?;
4519
4520        fs::write(dir.path().join("a.txt"), "content")?;
4521        repo.add("a.txt")?;
4522
4523        assert!(repo.has_uncommitted_changes()?);
4524
4525        Ok(())
4526    }
4527
4528    #[test]
4529    fn test_has_uncommitted_changes_unstaged() -> Result<(), Box<dyn std::error::Error>> {
4530        let dir = tempfile::tempdir().unwrap();
4531        let mut repo = Repository::init(dir.path(), "alice")?;
4532
4533        fs::write(dir.path().join("a.txt"), "original")?;
4534        repo.add("a.txt")?;
4535        repo.commit("initial")?;
4536
4537        fs::write(dir.path().join("a.txt"), "modified on disk")?;
4538
4539        assert!(repo.has_uncommitted_changes()?);
4540
4541        Ok(())
4542    }
4543
4544    #[test]
4545    fn test_stash_push_pop() -> Result<(), Box<dyn std::error::Error>> {
4546        let dir = tempfile::tempdir().unwrap();
4547        let mut repo = Repository::init(dir.path(), "alice")?;
4548
4549        fs::write(dir.path().join("a.txt"), "original")?;
4550        repo.add("a.txt")?;
4551        repo.commit("initial")?;
4552
4553        fs::write(dir.path().join("a.txt"), "staged changes")?;
4554        repo.add("a.txt")?;
4555
4556        let stash_index = repo.stash_push(Some("my stash"))?;
4557        assert_eq!(stash_index, 0);
4558
4559        assert!(repo.meta.working_set()?.is_empty());
4560        let on_disk = fs::read_to_string(dir.path().join("a.txt"))?;
4561        assert_eq!(on_disk, "original");
4562
4563        repo.stash_pop()?;
4564
4565        let on_disk = fs::read_to_string(dir.path().join("a.txt"))?;
4566        assert_eq!(on_disk, "staged changes");
4567
4568        let ws = repo.meta.working_set()?;
4569        assert_eq!(ws.len(), 1);
4570        assert_eq!(ws[0].0, "a.txt");
4571        assert_eq!(ws[0].1, FileStatus::Modified);
4572
4573        Ok(())
4574    }
4575
4576    #[test]
4577    fn test_stash_list() -> Result<(), Box<dyn std::error::Error>> {
4578        let dir = tempfile::tempdir().unwrap();
4579        let mut repo = Repository::init(dir.path(), "alice")?;
4580
4581        fs::write(dir.path().join("a.txt"), "original")?;
4582        repo.add("a.txt")?;
4583        repo.commit("initial")?;
4584
4585        fs::write(dir.path().join("a.txt"), "change 1")?;
4586        repo.add("a.txt")?;
4587        let idx0 = repo.stash_push(Some("first stash"))?;
4588        assert_eq!(idx0, 0);
4589
4590        fs::write(dir.path().join("a.txt"), "change 2")?;
4591        repo.add("a.txt")?;
4592        let idx1 = repo.stash_push(Some("second stash"))?;
4593        assert_eq!(idx1, 1);
4594
4595        let list = repo.stash_list()?;
4596        assert_eq!(list.len(), 2);
4597        assert_eq!(list[0].index, 0);
4598        assert_eq!(list[0].message, "first stash");
4599        assert_eq!(list[1].index, 1);
4600        assert_eq!(list[1].message, "second stash");
4601
4602        Ok(())
4603    }
4604
4605    #[test]
4606    fn test_stash_apply_keeps_entry() -> Result<(), Box<dyn std::error::Error>> {
4607        let dir = tempfile::tempdir().unwrap();
4608        let mut repo = Repository::init(dir.path(), "alice")?;
4609
4610        fs::write(dir.path().join("a.txt"), "original")?;
4611        repo.add("a.txt")?;
4612        repo.commit("initial")?;
4613
4614        fs::write(dir.path().join("a.txt"), "changes to apply")?;
4615        repo.add("a.txt")?;
4616        let idx = repo.stash_push(Some("keep me"))?;
4617        assert_eq!(idx, 0);
4618
4619        repo.stash_apply(0)?;
4620
4621        let on_disk = fs::read_to_string(dir.path().join("a.txt"))?;
4622        assert_eq!(on_disk, "changes to apply");
4623
4624        let list = repo.stash_list()?;
4625        assert_eq!(list.len(), 1);
4626        assert_eq!(list[0].index, 0);
4627        assert_eq!(list[0].message, "keep me");
4628
4629        Ok(())
4630    }
4631
4632    #[test]
4633    fn test_stash_drop() -> Result<(), Box<dyn std::error::Error>> {
4634        let dir = tempfile::tempdir().unwrap();
4635        let mut repo = Repository::init(dir.path(), "alice")?;
4636
4637        fs::write(dir.path().join("a.txt"), "original")?;
4638        repo.add("a.txt")?;
4639        repo.commit("initial")?;
4640
4641        fs::write(dir.path().join("a.txt"), "stashed content")?;
4642        repo.add("a.txt")?;
4643        repo.stash_push(Some("droppable"))?;
4644
4645        repo.stash_drop(0)?;
4646
4647        let list = repo.stash_list()?;
4648        assert!(list.is_empty());
4649
4650        let result = repo.stash_drop(0);
4651        assert!(result.is_err());
4652
4653        Ok(())
4654    }
4655
4656    #[test]
4657    fn test_stash_pop_empty() -> Result<(), Box<dyn std::error::Error>> {
4658        let dir = tempfile::tempdir().unwrap();
4659        let mut repo = Repository::init(dir.path(), "alice")?;
4660
4661        let result = repo.stash_pop();
4662        assert!(result.is_err());
4663
4664        Ok(())
4665    }
4666
4667    #[test]
4668    fn test_stash_push_nothing() -> Result<(), Box<dyn std::error::Error>> {
4669        let dir = tempfile::tempdir().unwrap();
4670        let mut repo = Repository::init(dir.path(), "alice")?;
4671
4672        let result = repo.stash_push(None);
4673        assert!(result.is_err());
4674        let err = result.unwrap_err().to_string();
4675        assert!(err.contains("nothing to commit"));
4676
4677        Ok(())
4678    }
4679
4680    #[test]
4681    fn test_reset_soft() -> Result<(), Box<dyn std::error::Error>> {
4682        let dir = tempfile::tempdir().unwrap();
4683        let mut repo = Repository::init(dir.path(), "alice")?;
4684
4685        fs::write(dir.path().join("file1.txt"), "first content")?;
4686        repo.add("file1.txt")?;
4687        let first_commit = repo.commit("first commit")?;
4688
4689        fs::write(dir.path().join("file2.txt"), "second content")?;
4690        repo.add("file2.txt")?;
4691        repo.commit("second commit")?;
4692
4693        // Stage a modification before reset to verify soft preserves staging
4694        fs::write(dir.path().join("file2.txt"), "modified second")?;
4695        repo.add("file2.txt")?;
4696
4697        let result = repo.reset(&first_commit.to_hex(), ResetMode::Soft)?;
4698        assert_eq!(result, first_commit);
4699
4700        // HEAD points to first commit
4701        let (_, head_id) = repo.head()?;
4702        assert_eq!(head_id, first_commit);
4703
4704        // Working tree still has file2 (soft doesn't touch working tree)
4705        assert!(dir.path().join("file2.txt").exists());
4706        assert_eq!(
4707            fs::read_to_string(dir.path().join("file2.txt"))?,
4708            "modified second"
4709        );
4710
4711        // Staging area still has the staged changes (soft doesn't clear staging)
4712        let status = repo.status()?;
4713        assert_eq!(status.staged_files.len(), 1);
4714        assert_eq!(status.staged_files[0].0, "file2.txt");
4715
4716        Ok(())
4717    }
4718
4719    #[test]
4720    fn test_reset_mixed() -> Result<(), Box<dyn std::error::Error>> {
4721        let dir = tempfile::tempdir().unwrap();
4722        let mut repo = Repository::init(dir.path(), "alice")?;
4723
4724        fs::write(dir.path().join("file1.txt"), "first content")?;
4725        repo.add("file1.txt")?;
4726        let first_commit = repo.commit("first commit")?;
4727
4728        fs::write(dir.path().join("file2.txt"), "second content")?;
4729        repo.add("file2.txt")?;
4730        repo.commit("second commit")?;
4731
4732        // Stage a modification before reset to verify mixed clears staging
4733        fs::write(dir.path().join("file2.txt"), "modified second")?;
4734        repo.add("file2.txt")?;
4735
4736        let result = repo.reset(&first_commit.to_hex(), ResetMode::Mixed)?;
4737        assert_eq!(result, first_commit);
4738
4739        // HEAD points to first commit
4740        let (_, head_id) = repo.head()?;
4741        assert_eq!(head_id, first_commit);
4742
4743        // Working tree still has file2 content on disk (mixed doesn't touch working tree)
4744        assert!(dir.path().join("file2.txt").exists());
4745        assert_eq!(
4746            fs::read_to_string(dir.path().join("file2.txt"))?,
4747            "modified second"
4748        );
4749
4750        // Staging area is cleared
4751        let status = repo.status()?;
4752        assert!(status.staged_files.is_empty());
4753
4754        Ok(())
4755    }
4756
4757    #[test]
4758    fn test_reset_hard() -> Result<(), Box<dyn std::error::Error>> {
4759        let dir = tempfile::tempdir().unwrap();
4760        let mut repo = Repository::init(dir.path(), "alice")?;
4761
4762        fs::write(dir.path().join("file1.txt"), "first content")?;
4763        repo.add("file1.txt")?;
4764        let first_commit = repo.commit("first commit")?;
4765
4766        fs::write(dir.path().join("file2.txt"), "second content")?;
4767        repo.add("file2.txt")?;
4768        repo.commit("second commit")?;
4769
4770        let result = repo.reset(&first_commit.to_hex(), ResetMode::Hard)?;
4771        assert_eq!(result, first_commit);
4772
4773        // HEAD points to first commit
4774        let (_, head_id) = repo.head()?;
4775        assert_eq!(head_id, first_commit);
4776
4777        // Working tree matches first commit (file2 removed from disk)
4778        assert!(dir.path().join("file1.txt").exists());
4779        assert!(!dir.path().join("file2.txt").exists());
4780
4781        let tree = repo.snapshot_head()?;
4782        assert!(tree.contains("file1.txt"));
4783        assert!(!tree.contains("file2.txt"));
4784
4785        Ok(())
4786    }
4787
4788    #[test]
4789    fn test_cherry_pick() -> Result<(), Box<dyn std::error::Error>> {
4790        let dir = tempfile::tempdir().unwrap();
4791        let mut repo = Repository::init(dir.path(), "alice")?;
4792
4793        fs::write(dir.path().join("a.txt"), "content of a")?;
4794        repo.add("a.txt")?;
4795        repo.commit("add a.txt")?;
4796
4797        repo.create_branch("feature", None)?;
4798
4799        fs::write(dir.path().join("b.txt"), "content of b")?;
4800        repo.add("b.txt")?;
4801        let b_commit = repo.commit("add b.txt")?;
4802
4803        repo.checkout("feature")?;
4804
4805        // Add a commit on feature so parent_ids differ from the original b.txt commit
4806        fs::write(dir.path().join("c.txt"), "content of c")?;
4807        repo.add("c.txt")?;
4808        repo.commit("add c.txt on feature")?;
4809
4810        repo.cherry_pick(&b_commit)?;
4811
4812        assert!(dir.path().join("b.txt").exists());
4813        let content = fs::read_to_string(dir.path().join("b.txt"))?;
4814        assert_eq!(content, "content of b");
4815
4816        let log = repo.log(None)?;
4817        assert!(log.iter().any(|p| p.message == "add b.txt"));
4818
4819        Ok(())
4820    }
4821
4822    #[test]
4823    fn test_cherry_pick_nonexistent() {
4824        let dir = tempfile::tempdir().unwrap();
4825        let mut repo = Repository::init(dir.path(), "alice").unwrap();
4826
4827        let fake_hash = Hash::from_data(b"nonexistent");
4828        let result = repo.cherry_pick(&fake_hash);
4829        assert!(result.is_err());
4830    }
4831
4832    #[test]
4833    fn test_rebase() -> Result<(), Box<dyn std::error::Error>> {
4834        let dir = tempfile::tempdir().unwrap();
4835        let mut repo = Repository::init(dir.path(), "alice")?;
4836
4837        fs::write(dir.path().join("a.txt"), "content of a")?;
4838        repo.add("a.txt")?;
4839        repo.commit("add a.txt")?;
4840
4841        repo.create_branch("feature", None)?;
4842
4843        repo.checkout("feature")?;
4844        fs::write(dir.path().join("b.txt"), "content of b")?;
4845        repo.add("b.txt")?;
4846        repo.commit("add b.txt on feature")?;
4847
4848        repo.checkout("main")?;
4849        fs::write(dir.path().join("c.txt"), "content of c")?;
4850        repo.add("c.txt")?;
4851        repo.commit("add c.txt on main")?;
4852
4853        repo.checkout("feature")?;
4854
4855        let result = repo.rebase("main")?;
4856        assert!(result.patches_replayed > 0);
4857
4858        assert!(dir.path().join("b.txt").exists());
4859        assert!(dir.path().join("c.txt").exists());
4860
4861        let log = repo.log(None)?;
4862        assert!(log.iter().any(|p| p.message == "add b.txt on feature"));
4863        assert!(log.iter().any(|p| p.message == "add c.txt on main"));
4864
4865        Ok(())
4866    }
4867
4868    #[test]
4869    fn test_rebase_fast_forward() -> Result<(), Box<dyn std::error::Error>> {
4870        let dir = tempfile::tempdir().unwrap();
4871        let mut repo = Repository::init(dir.path(), "alice")?;
4872
4873        fs::write(dir.path().join("a.txt"), "content of a")?;
4874        repo.add("a.txt")?;
4875        repo.commit("add a.txt")?;
4876
4877        repo.create_branch("feature", None)?;
4878
4879        fs::write(dir.path().join("b.txt"), "content of b")?;
4880        repo.add("b.txt")?;
4881        repo.commit("add b.txt")?;
4882
4883        repo.checkout("feature")?;
4884
4885        let result = repo.rebase("main")?;
4886        assert_eq!(result.patches_replayed, 0);
4887
4888        assert!(dir.path().join("b.txt").exists());
4889
4890        Ok(())
4891    }
4892
4893    #[test]
4894    fn test_blame() -> Result<(), Box<dyn std::error::Error>> {
4895        let dir = tempfile::tempdir().unwrap();
4896        let mut repo = Repository::init(dir.path(), "alice")?;
4897
4898        fs::write(dir.path().join("test.txt"), "line1\nline2\nline3")?;
4899        repo.add("test.txt")?;
4900        let first_commit = repo.commit("initial content")?;
4901
4902        fs::write(dir.path().join("test.txt"), "line1\nline2-modified\nline3")?;
4903        repo.add("test.txt")?;
4904        let second_commit = repo.commit("modify line2")?;
4905
4906        let blame = repo.blame("test.txt")?;
4907
4908        assert_eq!(blame.len(), 3);
4909        assert_eq!(blame[0].line, "line1");
4910        assert_eq!(blame[0].patch_id, first_commit);
4911
4912        assert_eq!(blame[1].line, "line2-modified");
4913        assert_eq!(blame[1].patch_id, second_commit);
4914
4915        assert_eq!(blame[2].line, "line3");
4916        assert_eq!(blame[2].patch_id, first_commit);
4917
4918        Ok(())
4919    }
4920
4921    #[test]
4922    fn test_blame_nonexistent_file() {
4923        let dir = tempfile::tempdir().unwrap();
4924        let repo = Repository::init(dir.path(), "alice").unwrap();
4925
4926        let result = repo.blame("nonexistent.txt");
4927        assert!(result.is_err());
4928    }
4929
4930    #[test]
4931    fn test_rm_file() -> Result<(), Box<dyn std::error::Error>> {
4932        let dir = tempfile::tempdir().unwrap();
4933        let mut repo = Repository::init(dir.path(), "alice")?;
4934
4935        fs::write(dir.path().join("test.txt"), "content")?;
4936        repo.add("test.txt")?;
4937        repo.commit("initial")?;
4938
4939        fs::remove_file(dir.path().join("test.txt"))?;
4940        repo.add("test.txt")?;
4941
4942        assert!(!dir.path().join("test.txt").exists());
4943
4944        let ws = repo.meta.working_set()?;
4945        assert_eq!(ws.len(), 1);
4946        assert_eq!(ws[0].0, "test.txt");
4947        assert_eq!(ws[0].1, FileStatus::Deleted);
4948
4949        Ok(())
4950    }
4951
4952    #[test]
4953    fn test_rm_cached() -> Result<(), Box<dyn std::error::Error>> {
4954        let dir = tempfile::tempdir().unwrap();
4955        let mut repo = Repository::init(dir.path(), "alice")?;
4956
4957        fs::write(dir.path().join("test.txt"), "content")?;
4958        repo.add("test.txt")?;
4959        repo.commit("initial")?;
4960
4961        let repo_path = RepoPath::new("test.txt")?;
4962        repo.meta.working_set_add(&repo_path, FileStatus::Deleted)?;
4963
4964        assert!(dir.path().join("test.txt").exists());
4965
4966        let ws = repo.meta.working_set()?;
4967        assert_eq!(ws.len(), 1);
4968        assert_eq!(ws[0].0, "test.txt");
4969        assert_eq!(ws[0].1, FileStatus::Deleted);
4970
4971        Ok(())
4972    }
4973
4974    #[test]
4975    fn test_mv_file() -> Result<(), Box<dyn std::error::Error>> {
4976        let dir = tempfile::tempdir().unwrap();
4977        let mut repo = Repository::init(dir.path(), "alice")?;
4978
4979        fs::write(dir.path().join("old.txt"), "content")?;
4980        repo.add("old.txt")?;
4981        repo.commit("initial")?;
4982
4983        repo.rename_file("old.txt", "new.txt")?;
4984
4985        assert!(!dir.path().join("old.txt").exists());
4986        assert!(dir.path().join("new.txt").exists());
4987
4988        let ws = repo.meta.working_set()?;
4989        assert!(
4990            ws.iter()
4991                .any(|(p, s)| p == "old.txt" && *s == FileStatus::Deleted)
4992        );
4993        assert!(
4994            ws.iter()
4995                .any(|(p, s)| p == "new.txt" && *s == FileStatus::Added)
4996        );
4997
4998        Ok(())
4999    }
5000
5001    #[test]
5002    fn test_mv_nonexistent() {
5003        let dir = tempfile::tempdir().unwrap();
5004        let repo = Repository::init(dir.path(), "alice").unwrap();
5005
5006        let result = repo.rename_file("nonexistent.txt", "new.txt");
5007        assert!(result.is_err());
5008        let err = result.unwrap_err().to_string();
5009        assert!(err.contains("path not found"));
5010    }
5011
5012    #[test]
5013    fn test_remove_remote() -> Result<(), Box<dyn std::error::Error>> {
5014        let dir = tempfile::tempdir().unwrap();
5015        let repo = Repository::init(dir.path(), "alice")?;
5016
5017        repo.add_remote("origin", "http://example.com")?;
5018
5019        let remotes = repo.list_remotes()?;
5020        assert_eq!(remotes.len(), 1);
5021        assert_eq!(remotes[0].0, "origin");
5022
5023        repo.remove_remote("origin")?;
5024
5025        let remotes = repo.list_remotes()?;
5026        assert!(remotes.is_empty());
5027
5028        let result = repo.remove_remote("nonexistent");
5029        assert!(result.is_err());
5030
5031        Ok(())
5032    }
5033}