grex_core/fs/lock.rs
1//! File-scoped read/write lock backed by `fd-lock`.
2//!
3//! # Semantics
4//!
5//! Both Unix and Windows lock a **dedicated sidecar file** (`lock_path`),
6//! not the manifest itself. Cooperating parties serialize through the
7//! sidecar; reads and writes to the manifest happen by path inside the
8//! critical section.
9//!
10//! * **Unix**: advisory `flock(2)`-style lock.
11//! * **Windows**: `LockFileEx` — mandatory on the locked handle. Because
12//! the locked handle is the sidecar (not the manifest), the mandatory
13//! semantics do **not** propagate to the manifest file itself.
14//!
15//! # Known gap (Windows)
16//!
17//! A non-grex writer on Windows that opens the manifest directly — bypassing
18//! `ManifestLock` — is **not** blocked. An earlier attempt (Fix 4) tried to
19//! lock the manifest handle itself so `LockFileEx` would exclude bypass
20//! writers, but that breaks our cooperating `append_event(path)` API, which
21//! reopens the manifest inside the critical section; the in-process append
22//! would itself be denied by the mandatory byte-range lock. Until the append
23//! API changes to write through the locked handle, the Windows lock is
24//! effectively advisory for bypass writers. Pinned by
25//! `windows_advisory_vs_mandatory_lock`.
26//!
27//! The lock is released when the held guard is dropped.
28
29use fd_lock::RwLock;
30use std::fs::{File, OpenOptions};
31use std::io;
32use std::path::{Path, PathBuf};
33
34/// A cross-process lock guarding manifest reads and writes.
35///
36/// Open once per process, then call [`ManifestLock::read`] /
37/// [`ManifestLock::write`] around the critical section.
38pub struct ManifestLock {
39 inner: RwLock<File>,
40}
41
42impl ManifestLock {
43 /// Open a [`ManifestLock`] that serializes on `lock_path`.
44 ///
45 /// `manifest_path` is accepted for API symmetry with the write path and
46 /// for a future migration to locking the manifest handle directly. It
47 /// is currently unused.
48 pub fn open(manifest_path: &Path, lock_path: &Path) -> io::Result<Self> {
49 let _ = manifest_path;
50 let file = OpenOptions::new()
51 .read(true)
52 .write(true)
53 .create(true)
54 .truncate(false)
55 .open(lock_path)?;
56 Ok(Self { inner: RwLock::new(file) })
57 }
58
59 /// Run `f` while holding a **shared** read lock.
60 ///
61 /// Blocks until the lock is acquired. Other readers may hold the lock
62 /// concurrently; writers are excluded.
63 pub fn read<R>(&mut self, f: impl FnOnce() -> R) -> io::Result<R> {
64 let _guard = self.inner.read()?;
65 Ok(f())
66 }
67
68 /// Run `f` while holding an **exclusive** write lock.
69 ///
70 /// Blocks until the lock is acquired. All other readers and writers are
71 /// excluded.
72 pub fn write<R>(&mut self, f: impl FnOnce() -> R) -> io::Result<R> {
73 let _guard = self.inner.write()?;
74 Ok(f())
75 }
76}
77
78/// A non-blocking cross-process exclusive lock used to serialise
79/// operations on a path-keyed resource (a workspace, a per-repo directory).
80///
81/// Unlike [`ManifestLock`] (which blocks on contention because the critical
82/// section is small and cooperating), `ScopedLock` uses `try_lock_write` and
83/// surfaces the busy condition to the caller. Callers decide whether to
84/// fail fast or retry.
85///
86/// The lock file is created (`O_CREAT`) if missing and kept open for the
87/// lifetime of the struct. A `.lock` suffix is conventional but not required
88/// — any path will do. The lock is released on drop.
89///
90/// # Layering vs `ManifestLock`
91///
92/// `ManifestLock` wraps a blocking read/write critical section around a
93/// manifest append path. `ScopedLock` is a try-lock guard held for an entire
94/// operation (e.g. `sync::run`, `GixBackend::checkout`) where waiting would
95/// be the wrong UX — the user likely launched two processes by accident and
96/// needs to see the collision, not block on a second terminal they forgot
97/// about.
98pub struct ScopedLock {
99 inner: RwLock<File>,
100 path: PathBuf,
101}
102
103impl ScopedLock {
104 /// Open (and create if missing) the sidecar lock file at `lock_path`.
105 /// Does **not** acquire the lock — call [`ScopedLock::try_acquire`].
106 ///
107 /// # Errors
108 ///
109 /// Returns any [`io::Error`] from `OpenOptions::open`.
110 pub fn open(lock_path: &Path) -> io::Result<Self> {
111 if let Some(parent) = lock_path.parent() {
112 std::fs::create_dir_all(parent)?;
113 }
114 let file = OpenOptions::new()
115 .read(true)
116 .write(true)
117 .create(true)
118 .truncate(false)
119 .open(lock_path)?;
120 Ok(Self { inner: RwLock::new(file), path: lock_path.to_path_buf() })
121 }
122
123 /// Acquire the exclusive write lock, blocking until it is free.
124 ///
125 /// Use for per-resource serialisation where the right behaviour under
126 /// contention is to wait (e.g. two syncs both wanting to `fetch` the
127 /// same clone — the second simply runs after the first finishes).
128 ///
129 /// # Errors
130 ///
131 /// Propagates any OS-level lock error from `fd-lock`.
132 pub fn acquire(&mut self) -> io::Result<fd_lock::RwLockWriteGuard<'_, File>> {
133 self.inner.write()
134 }
135
136 /// Try to acquire the exclusive write lock without blocking.
137 ///
138 /// Returns `Ok(Some(guard))` on success, `Ok(None)` if another process /
139 /// thread already holds the lock, or `Err(e)` on an unexpected OS error.
140 pub fn try_acquire(&mut self) -> io::Result<Option<fd_lock::RwLockWriteGuard<'_, File>>> {
141 match self.inner.try_write() {
142 Ok(guard) => Ok(Some(guard)),
143 Err(e) => {
144 // fd-lock exposes the contended condition via `WouldBlock`
145 // on Unix and `ERROR_LOCK_VIOLATION`/`WouldBlock` on Windows.
146 // Map both to `Ok(None)` so callers distinguish "busy" from
147 // "I/O went wrong".
148 if e.kind() == io::ErrorKind::WouldBlock {
149 Ok(None)
150 } else {
151 Err(e)
152 }
153 }
154 }
155 }
156
157 /// Return the filesystem path of the sidecar lock file. Useful for
158 /// error messages — e.g. "remove `<path>` if stale".
159 #[must_use]
160 pub fn path(&self) -> &Path {
161 &self.path
162 }
163}
164
165impl std::fmt::Debug for ScopedLock {
166 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
167 f.debug_struct("ScopedLock").field("path", &self.path).finish()
168 }
169}
170
171#[cfg(test)]
172mod tests {
173 use super::*;
174 use tempfile::tempdir;
175
176 #[test]
177 fn open_creates_lock_file() {
178 let dir = tempdir().unwrap();
179 let m = dir.path().join(".grex/events.jsonl");
180 let p = dir.path().join(".grex.lock");
181 let _l = ManifestLock::open(&m, &p).unwrap();
182 assert!(p.exists());
183 }
184
185 #[test]
186 fn read_runs_closure() {
187 let dir = tempdir().unwrap();
188 let m = dir.path().join(".grex/events.jsonl");
189 let p = dir.path().join(".grex.lock");
190 let mut l = ManifestLock::open(&m, &p).unwrap();
191 let v = l.read(|| 42u32).unwrap();
192 assert_eq!(v, 42);
193 }
194
195 #[test]
196 fn write_runs_closure() {
197 let dir = tempdir().unwrap();
198 let m = dir.path().join(".grex/events.jsonl");
199 let p = dir.path().join(".grex.lock");
200 let mut l = ManifestLock::open(&m, &p).unwrap();
201 let v = l.write(|| "ok").unwrap();
202 assert_eq!(v, "ok");
203 }
204
205 #[test]
206 fn scoped_lock_creates_parent() {
207 let dir = tempdir().unwrap();
208 let p = dir.path().join("nested").join(".grex.sync.lock");
209 let _l = ScopedLock::open(&p).unwrap();
210 assert!(p.exists());
211 }
212
213 #[test]
214 fn scoped_lock_try_acquire_succeeds_once() {
215 let dir = tempdir().unwrap();
216 let p = dir.path().join(".grex.sync.lock");
217 let mut l = ScopedLock::open(&p).unwrap();
218 let g = l.try_acquire().unwrap();
219 assert!(g.is_some(), "first acquire must succeed");
220 }
221
222 #[test]
223 fn scoped_lock_second_acquire_reports_busy() {
224 let dir = tempdir().unwrap();
225 let p = dir.path().join(".grex.sync.lock");
226 let mut l1 = ScopedLock::open(&p).unwrap();
227 let mut l2 = ScopedLock::open(&p).unwrap();
228 let _g1 = l1.try_acquire().unwrap().expect("first acquires");
229 let g2 = l2.try_acquire().unwrap();
230 assert!(g2.is_none(), "second acquire must report busy while first held");
231 }
232
233 #[test]
234 fn scoped_lock_reacquire_after_drop() {
235 let dir = tempdir().unwrap();
236 let p = dir.path().join(".grex.sync.lock");
237 let mut l1 = ScopedLock::open(&p).unwrap();
238 {
239 let _g = l1.try_acquire().unwrap().expect("held");
240 }
241 let mut l2 = ScopedLock::open(&p).unwrap();
242 let g2 = l2.try_acquire().unwrap();
243 assert!(g2.is_some(), "lock reacquires after first guard drops");
244 }
245}