Skip to main content

reddb_server/storage/wal/
transaction.rs

1//! Transaction Manager
2//!
3//! Provides ACID transaction support for the RedDB storage engine.
4//!
5//! # Transaction Lifecycle
6//!
7//! 1. Begin: Allocate transaction ID, write Begin record to WAL
8//! 2. Read/Write: Track page reads and buffer page writes
9//! 3. Commit: Write Commit record to WAL, sync WAL
10//! 4. Rollback: Write Rollback record to WAL, discard buffered writes
11//!
12//! # Isolation Level
13//!
14//! Currently implements Read Committed isolation:
15//! - Reads see committed data at the start of the statement
16//! - No dirty reads
17//! - Possible non-repeatable reads
18//!
19//! # References
20//!
21//! - Turso `core/transaction.rs` - Transaction implementation
22//! - SQLite transaction documentation
23
24use std::collections::HashMap;
25use std::io;
26use std::path::{Path, PathBuf};
27use std::sync::atomic::{AtomicU64, Ordering};
28use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard};
29
30use parking_lot::{Mutex, MutexGuard};
31
32use super::append_coordinator::WalAppendCoordinator;
33use super::record::WalRecord;
34use super::writer::WalWriter;
35use crate::storage::engine::{Page, Pager, PAGE_SIZE};
36
37/// Global transaction ID counter
38static NEXT_TX_ID: AtomicU64 = AtomicU64::new(1);
39
40/// Generate a new unique transaction ID
41fn next_transaction_id() -> u64 {
42    NEXT_TX_ID.fetch_add(1, Ordering::SeqCst)
43}
44
45/// Transaction state
46#[derive(Debug, Clone, Copy, PartialEq, Eq)]
47pub enum TxState {
48    /// Transaction is active and can perform operations
49    Active,
50    /// Transaction has been committed
51    Committed,
52    /// Transaction has been rolled back
53    Aborted,
54}
55
56/// Transaction error types
57#[derive(Debug)]
58pub enum TxError {
59    /// I/O error
60    Io(io::Error),
61    /// Pager error
62    Pager(String),
63    /// Internal lock was poisoned by a panic
64    LockPoisoned(&'static str),
65    /// Transaction is not active
66    NotActive,
67    /// Transaction already committed
68    AlreadyCommitted,
69    /// Transaction already aborted
70    AlreadyAborted,
71    /// Write conflict
72    WriteConflict(u32),
73    /// Invalid page data
74    InvalidPage(String),
75}
76
77impl std::fmt::Display for TxError {
78    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
79        match self {
80            Self::Io(e) => write!(f, "I/O error: {}", e),
81            Self::Pager(msg) => write!(f, "Pager error: {}", msg),
82            Self::LockPoisoned(name) => write!(f, "Lock poisoned: {}", name),
83            Self::NotActive => write!(f, "Transaction is not active"),
84            Self::AlreadyCommitted => write!(f, "Transaction already committed"),
85            Self::AlreadyAborted => write!(f, "Transaction already aborted"),
86            Self::WriteConflict(page_id) => write!(f, "Write conflict on page {}", page_id),
87            Self::InvalidPage(msg) => write!(f, "Invalid page: {}", msg),
88        }
89    }
90}
91
92impl std::error::Error for TxError {}
93
94impl From<io::Error> for TxError {
95    fn from(e: io::Error) -> Self {
96        Self::Io(e)
97    }
98}
99
100/// A buffered page write
101#[derive(Clone)]
102struct BufferedWrite {
103    page_id: u32,
104    data: [u8; PAGE_SIZE],
105}
106
107/// A single transaction
108///
109/// Transactions buffer writes and commit them atomically to the WAL.
110pub struct Transaction {
111    /// Transaction ID
112    id: u64,
113    /// Transaction state
114    state: TxState,
115    /// Buffered page writes (page_id -> page data)
116    write_set: HashMap<u32, BufferedWrite>,
117    /// Pages read in this transaction (for conflict detection)
118    read_set: Vec<u32>,
119    /// Reference to the transaction manager
120    manager: Arc<TransactionManager>,
121}
122
123impl Transaction {
124    /// Get transaction ID
125    pub fn id(&self) -> u64 {
126        self.id
127    }
128
129    /// Get transaction state
130    pub fn state(&self) -> TxState {
131        self.state
132    }
133
134    /// Check if transaction is active
135    pub fn is_active(&self) -> bool {
136        self.state == TxState::Active
137    }
138
139    /// Read a page through this transaction
140    ///
141    /// If the page has been written in this transaction, returns the buffered version.
142    /// Otherwise, reads from the pager.
143    pub fn read_page(&mut self, page_id: u32) -> Result<Page, TxError> {
144        if self.state != TxState::Active {
145            return Err(TxError::NotActive);
146        }
147
148        // Check write set first
149        if let Some(buffered) = self.write_set.get(&page_id) {
150            return Ok(Page::from_bytes(buffered.data));
151        }
152
153        // Track the read
154        self.read_set.push(page_id);
155
156        // Read from pager
157        self.manager
158            .pager
159            .read_page(page_id)
160            .map_err(|e| TxError::Pager(e.to_string()))
161    }
162
163    /// Write a page through this transaction
164    ///
165    /// The write is buffered and will be committed to the WAL on commit.
166    pub fn write_page(&mut self, page_id: u32, page: Page) -> Result<(), TxError> {
167        if self.state != TxState::Active {
168            return Err(TxError::NotActive);
169        }
170
171        // Buffer the write
172        let mut data = [0u8; PAGE_SIZE];
173        data.copy_from_slice(page.as_bytes());
174
175        self.write_set
176            .insert(page_id, BufferedWrite { page_id, data });
177
178        Ok(())
179    }
180
181    /// Commit the transaction
182    ///
183    /// Writes all buffered pages to the WAL, then writes a Commit record.
184    ///
185    /// **Read-only fast path:** when `write_set` is empty, the
186    /// transaction wrote nothing, so there is nothing to make
187    /// durable. We skip the WAL append, the `wal.sync()` (which costs
188    /// ~100 µs of fsync), and the pager apply loop entirely. The
189    /// transaction still transitions to `Committed` and unregisters
190    /// from the manager so subsequent state checks work correctly.
191    /// This mirrors postgres' optimisation in `RecordTransactionCommit`
192    /// (`xact.c`) which skips `XLogFlush` when nothing was written.
193    pub fn commit(mut self) -> Result<(), TxError> {
194        if self.state != TxState::Active {
195            return match self.state {
196                TxState::Committed => Err(TxError::AlreadyCommitted),
197                TxState::Aborted => Err(TxError::AlreadyAborted),
198                _ => Err(TxError::NotActive),
199            };
200        }
201
202        // ── Read-only fast path ─────────────────────────────────────
203        // No writes → no WAL record → no fsync. Saves ~100 µs per
204        // read-only commit and removes contention on the WAL writer
205        // mutex for read-heavy workloads.
206        if self.write_set.is_empty() {
207            self.state = TxState::Committed;
208            self.manager.unregister_transaction(self.id);
209            return Ok(());
210        }
211
212        // ── Encode phase (no lock) ──────────────────────────────────
213        // Encode every WAL record into one contiguous byte blob
214        // OUTSIDE any lock. This is the bulk of the per-commit work
215        // and pays no contention cost — the old path encoded each
216        // record while holding `Mutex<WalWriter>`, which under 16-way
217        // concurrency produced the park-convoy that bottlenecked
218        // `concurrent` and `insert_sequential`.
219        let mut blob = Vec::with_capacity(64 + self.write_set.len() * (PAGE_SIZE + 32));
220        for (page_id, buffered) in &self.write_set {
221            let record = WalRecord::PageWrite {
222                tx_id: self.id,
223                page_id: *page_id,
224                data: buffered.data.to_vec(),
225            };
226            blob.extend_from_slice(&record.encode());
227        }
228        blob.extend_from_slice(&WalRecord::Commit { tx_id: self.id }.encode());
229
230        // ── Reserve + enqueue (lock-free) ───────────────────────────
231        // One atomic fetch_add reserves our LSN range; one
232        // SegQueue::push hands the bytes to the leader. Both
233        // operations are wait-free for the writer.
234        let commit_lsn = self.manager.coordinator.reserve_and_enqueue(blob);
235
236        // ── Wait for durability ─────────────────────────────────────
237        // If `durable_lsn >= commit_lsn` already, the leader (some
238        // earlier thread) covered us — return immediately. Otherwise
239        // we either become the leader and drive the drain, or park
240        // on the coordinator's parking_lot::Condvar until a leader
241        // publishes a `durable_lsn` past our target.
242        self.manager
243            .coordinator
244            .commit_at_least(commit_lsn, &self.manager.wal)
245            .map_err(TxError::Io)?;
246
247        // Apply writes to pager cache (for immediate visibility)
248        for (page_id, buffered) in &self.write_set {
249            let page = Page::from_bytes(buffered.data);
250            self.manager
251                .pager
252                .write_page(*page_id, page)
253                .map_err(|e| TxError::Pager(e.to_string()))?;
254        }
255
256        self.state = TxState::Committed;
257
258        // Unregister from manager
259        self.manager.unregister_transaction(self.id);
260
261        Ok(())
262    }
263
264    /// Rollback the transaction
265    ///
266    /// Discards all buffered writes and writes a Rollback record to the WAL.
267    pub fn rollback(mut self) -> Result<(), TxError> {
268        if self.state != TxState::Active {
269            return match self.state {
270                TxState::Committed => Err(TxError::AlreadyCommitted),
271                TxState::Aborted => Err(TxError::AlreadyAborted),
272                _ => Err(TxError::NotActive),
273            };
274        }
275
276        // Route rollback through the coordinator so its bytes land
277        // in LSN order with any concurrent commits. Going around the
278        // coordinator (direct `wal.lock().append`) would race with
279        // the leader's `append_bytes` and corrupt the file.
280        let blob = WalRecord::Rollback { tx_id: self.id }.encode();
281        let target = self.manager.coordinator.reserve_and_enqueue(blob);
282        self.manager
283            .coordinator
284            .commit_at_least(target, &self.manager.wal)
285            .map_err(TxError::Io)?;
286
287        // Clear write set
288        self.write_set.clear();
289        self.state = TxState::Aborted;
290
291        // Unregister from manager
292        self.manager.unregister_transaction(self.id);
293
294        Ok(())
295    }
296}
297
298impl Drop for Transaction {
299    fn drop(&mut self) {
300        // If transaction is still active when dropped, it means it was neither
301        // committed nor rolled back. This is a bug, but we'll clean up anyway.
302        if self.state == TxState::Active {
303            // Best-effort rollback through the coordinator. We can't
304            // bypass the coordinator with a direct `wal.lock()` here:
305            // any in-flight reservations would still be holding LSN
306            // slots ahead of us and the file would gain a hole. The
307            // coordinator handles ordering correctly even from Drop.
308            let blob = WalRecord::Rollback { tx_id: self.id }.encode();
309            let target = self.manager.coordinator.reserve_and_enqueue(blob);
310            let _ = self
311                .manager
312                .coordinator
313                .commit_at_least(target, &self.manager.wal);
314            self.manager.unregister_transaction(self.id);
315        }
316    }
317}
318
319/// Transaction Manager
320///
321/// Coordinates transactions and manages the WAL.
322pub struct TransactionManager {
323    /// Pager for reading/writing pages
324    pager: Arc<Pager>,
325    /// WAL writer protected by a `parking_lot::Mutex`. The mutex is
326    /// taken only by the leader-flush path inside the coordinator;
327    /// concurrent writers no longer contend on it during the
328    /// encode-and-append step. See [`WalAppendCoordinator`].
329    wal: Mutex<WalWriter>,
330    /// WAL file path
331    wal_path: PathBuf,
332    /// Active transaction IDs
333    active_transactions: RwLock<Vec<u64>>,
334    /// Lock-free append coordinator. Replaces the per-commit
335    /// `wal.lock()` that used to serialise 16 concurrent writers
336    /// (Roadmap #2 / issue #157). Writers reserve an LSN range via
337    /// atomic fetch_add, push their encoded bytes onto a SegQueue,
338    /// then call `commit_at_least` to wait for durability. The
339    /// first thread into `commit_at_least` becomes the leader and
340    /// drives the WAL drain + fsync; everyone else parks on the
341    /// coordinator's condvar.
342    coordinator: WalAppendCoordinator,
343}
344
345impl TransactionManager {
346    /// Create a new transaction manager
347    ///
348    /// # Arguments
349    ///
350    /// * `pager` - The pager to use for page I/O
351    /// * `wal_path` - Path to the WAL file
352    pub fn new(pager: Arc<Pager>, wal_path: impl AsRef<Path>) -> io::Result<Self> {
353        let wal_path = wal_path.as_ref().to_path_buf();
354        let wal = WalWriter::open(&wal_path)?;
355        let initial_current = wal.current_lsn();
356        let initial_durable = wal.durable_lsn();
357
358        Ok(Self {
359            pager,
360            wal: Mutex::new(wal),
361            wal_path,
362            active_transactions: RwLock::new(Vec::new()),
363            coordinator: WalAppendCoordinator::new(initial_current, initial_durable),
364        })
365    }
366
367    fn wal_writer(&self) -> Result<MutexGuard<'_, WalWriter>, TxError> {
368        // parking_lot::Mutex does not poison on panic, so this is
369        // infallible. We keep the `Result` return type to avoid
370        // touching every call site, but the error variant is now
371        // unreachable in normal operation. Tests that previously
372        // poisoned the std::sync::Mutex deliberately have been
373        // adjusted to assert the new non-poisoning behaviour.
374        Ok(self.wal.lock())
375    }
376
377    fn active_transactions_write(&self) -> RwLockWriteGuard<'_, Vec<u64>> {
378        self.active_transactions
379            .write()
380            .unwrap_or_else(|poisoned| poisoned.into_inner())
381    }
382
383    fn active_transactions_read(&self) -> RwLockReadGuard<'_, Vec<u64>> {
384        self.active_transactions
385            .read()
386            .unwrap_or_else(|poisoned| poisoned.into_inner())
387    }
388
389    /// Begin a new transaction
390    pub fn begin(self: &Arc<Self>) -> Result<Transaction, TxError> {
391        let tx_id = next_transaction_id();
392
393        // Route the Begin record through the coordinator (same
394        // ordering guarantees as commit/rollback). Begin is not
395        // strictly required to be durable before subsequent appends
396        // — recovery treats a Begin without a matching Commit as a
397        // rolled-back txn — so we don't wait on `commit_at_least`.
398        // The bytes are queued and the next leader picks them up
399        // alongside our own Commit record when we eventually commit.
400        let blob = WalRecord::Begin { tx_id }.encode();
401        let _begin_lsn = self.coordinator.reserve_and_enqueue(blob);
402
403        // Register transaction
404        {
405            let mut active = self.active_transactions_write();
406            active.push(tx_id);
407        }
408
409        Ok(Transaction {
410            id: tx_id,
411            state: TxState::Active,
412            write_set: HashMap::new(),
413            read_set: Vec::new(),
414            manager: Arc::clone(self),
415        })
416    }
417
418    /// Unregister a transaction (called on commit/rollback)
419    fn unregister_transaction(&self, tx_id: u64) {
420        let mut active = self.active_transactions_write();
421        active.retain(|&id| id != tx_id);
422    }
423
424    /// Get list of active transaction IDs
425    pub fn active_transactions(&self) -> Vec<u64> {
426        self.active_transactions_read().clone()
427    }
428
429    /// Get WAL file path
430    pub fn wal_path(&self) -> &Path {
431        &self.wal_path
432    }
433
434    /// Get reference to pager
435    pub fn pager(&self) -> &Arc<Pager> {
436        &self.pager
437    }
438
439    /// Sync WAL to disk. Drains every byte that has been reserved
440    /// via the coordinator — i.e. waits until the coordinator's
441    /// `next_lsn` is durable.
442    pub fn sync_wal(&self) -> io::Result<()> {
443        let target = self.coordinator.next_lsn();
444        self.coordinator.commit_at_least(target, &self.wal)
445    }
446
447    /// Check if there are active transactions
448    pub fn has_active_transactions(&self) -> bool {
449        !self.active_transactions_read().is_empty()
450    }
451}
452
453#[cfg(test)]
454mod tests {
455    use super::*;
456    use crate::storage::engine::PageType;
457    use std::fs;
458    use std::time::{SystemTime, UNIX_EPOCH};
459
460    fn temp_dir() -> PathBuf {
461        let timestamp = SystemTime::now()
462            .duration_since(UNIX_EPOCH)
463            .unwrap()
464            .as_nanos();
465        std::env::temp_dir().join(format!("reddb_tx_test_{}", timestamp))
466    }
467
468    fn cleanup(dir: &Path) {
469        let _ = fs::remove_dir_all(dir);
470    }
471
472    #[test]
473    fn test_transaction_commit() {
474        let dir = temp_dir();
475        let _ = fs::create_dir_all(&dir);
476        let db_path = dir.join("test.db");
477        let wal_path = dir.join("test.wal");
478
479        // Create pager
480        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
481
482        // Allocate a page
483        let page = pager.allocate_page(PageType::BTreeLeaf).unwrap();
484        let page_id = page.page_id();
485
486        // Create transaction manager
487        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
488
489        // Begin transaction
490        let mut tx = tm.begin().unwrap();
491        assert!(tx.is_active());
492
493        // Write through transaction
494        let mut page = Page::new(PageType::BTreeLeaf, page_id);
495        page.as_bytes_mut()[100] = 0xAB;
496        tx.write_page(page_id, page).unwrap();
497
498        // Read through transaction (should see buffered write)
499        let read_page = tx.read_page(page_id).unwrap();
500        assert_eq!(read_page.as_bytes()[100], 0xAB);
501
502        // Commit
503        tx.commit().unwrap();
504
505        // Verify write is visible through pager
506        let final_page = pager.read_page(page_id).unwrap();
507        assert_eq!(final_page.as_bytes()[100], 0xAB);
508
509        cleanup(&dir);
510    }
511
512    #[test]
513    fn test_transaction_rollback() {
514        let dir = temp_dir();
515        let _ = fs::create_dir_all(&dir);
516        let db_path = dir.join("test.db");
517        let wal_path = dir.join("test.wal");
518
519        // Create pager
520        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
521
522        // Allocate a page and write initial value
523        let mut page = pager.allocate_page(PageType::BTreeLeaf).unwrap();
524        let page_id = page.page_id();
525        page.as_bytes_mut()[100] = 0x11;
526        pager.write_page(page_id, page).unwrap();
527
528        // Create transaction manager
529        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
530
531        // Begin transaction
532        let mut tx = tm.begin().unwrap();
533
534        // Write through transaction
535        let mut page = Page::new(PageType::BTreeLeaf, page_id);
536        page.as_bytes_mut()[100] = 0xAB;
537        tx.write_page(page_id, page).unwrap();
538
539        // Read through transaction (should see buffered write)
540        let read_page = tx.read_page(page_id).unwrap();
541        assert_eq!(read_page.as_bytes()[100], 0xAB);
542
543        // Rollback
544        tx.rollback().unwrap();
545
546        // Original value should be preserved
547        let final_page = pager.read_page(page_id).unwrap();
548        assert_eq!(final_page.as_bytes()[100], 0x11);
549
550        cleanup(&dir);
551    }
552
553    #[test]
554    fn test_multiple_transactions() {
555        let dir = temp_dir();
556        let _ = fs::create_dir_all(&dir);
557        let db_path = dir.join("test.db");
558        let wal_path = dir.join("test.wal");
559
560        // Create pager
561        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
562
563        // Allocate two pages
564        let page1 = pager.allocate_page(PageType::BTreeLeaf).unwrap();
565        let page2 = pager.allocate_page(PageType::BTreeLeaf).unwrap();
566        let page1_id = page1.page_id();
567        let page2_id = page2.page_id();
568
569        // Create transaction manager
570        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
571
572        // Transaction 1: Write to page 1
573        let mut tx1 = tm.begin().unwrap();
574        let mut page1 = Page::new(PageType::BTreeLeaf, page1_id);
575        page1.as_bytes_mut()[100] = 0x11;
576        tx1.write_page(page1_id, page1).unwrap();
577        tx1.commit().unwrap();
578
579        // Transaction 2: Write to page 2
580        let mut tx2 = tm.begin().unwrap();
581        let mut page2 = Page::new(PageType::BTreeLeaf, page2_id);
582        page2.as_bytes_mut()[100] = 0x22;
583        tx2.write_page(page2_id, page2).unwrap();
584        tx2.commit().unwrap();
585
586        // Verify both writes
587        let final_page1 = pager.read_page(page1_id).unwrap();
588        let final_page2 = pager.read_page(page2_id).unwrap();
589        assert_eq!(final_page1.as_bytes()[100], 0x11);
590        assert_eq!(final_page2.as_bytes()[100], 0x22);
591
592        cleanup(&dir);
593    }
594
595    #[test]
596    fn test_transaction_isolation() {
597        let dir = temp_dir();
598        let _ = fs::create_dir_all(&dir);
599        let db_path = dir.join("test.db");
600        let wal_path = dir.join("test.wal");
601
602        // Create pager
603        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
604
605        // Allocate a page with initial value
606        let mut page = pager.allocate_page(PageType::BTreeLeaf).unwrap();
607        let page_id = page.page_id();
608        page.as_bytes_mut()[100] = 0x00;
609        pager.write_page(page_id, page).unwrap();
610
611        // Create transaction manager
612        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
613
614        // Transaction 1: Begin and write (but don't commit yet)
615        let mut tx1 = tm.begin().unwrap();
616        let mut page1 = Page::new(PageType::BTreeLeaf, page_id);
617        page1.as_bytes_mut()[100] = 0x11;
618        tx1.write_page(page_id, page1).unwrap();
619
620        // Transaction 1 should see its own write
621        let tx1_read = tx1.read_page(page_id).unwrap();
622        assert_eq!(tx1_read.as_bytes()[100], 0x11);
623
624        // Another read from pager should not see uncommitted write
625        let pager_read = pager.read_page(page_id).unwrap();
626        assert_eq!(pager_read.as_bytes()[100], 0x00);
627
628        // Commit tx1
629        tx1.commit().unwrap();
630
631        // Now pager should see the write
632        let final_read = pager.read_page(page_id).unwrap();
633        assert_eq!(final_read.as_bytes()[100], 0x11);
634
635        cleanup(&dir);
636    }
637
638    #[test]
639    fn test_active_transaction_tracking() {
640        let dir = temp_dir();
641        let _ = fs::create_dir_all(&dir);
642        let db_path = dir.join("test.db");
643        let wal_path = dir.join("test.wal");
644
645        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
646        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
647
648        assert!(!tm.has_active_transactions());
649
650        let tx1 = tm.begin().unwrap();
651        let tx1_id = tx1.id();
652        assert!(tm.has_active_transactions());
653        assert!(tm.active_transactions().contains(&tx1_id));
654
655        let tx2 = tm.begin().unwrap();
656        let tx2_id = tx2.id();
657        assert_eq!(tm.active_transactions().len(), 2);
658
659        tx1.commit().unwrap();
660        assert!(!tm.active_transactions().contains(&tx1_id));
661        assert!(tm.active_transactions().contains(&tx2_id));
662
663        tx2.rollback().unwrap();
664        assert!(!tm.has_active_transactions());
665
666        cleanup(&dir);
667    }
668
669    #[test]
670    fn test_transaction_double_commit() {
671        let dir = temp_dir();
672        let _ = fs::create_dir_all(&dir);
673        let db_path = dir.join("test.db");
674        let wal_path = dir.join("test.wal");
675
676        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
677        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
678
679        // The transaction is consumed on commit, so double commit is impossible at compile time
680        // This test just verifies commit works
681        let tx = tm.begin().unwrap();
682        tx.commit().unwrap();
683
684        cleanup(&dir);
685    }
686
687    #[test]
688    fn test_begin_succeeds_after_panic_in_lock_holder() {
689        // After Roadmap #2 the WAL mutex is `parking_lot::Mutex`,
690        // which does NOT poison on panic. A previous version of this
691        // test asserted that a panicking thread holding the lock
692        // produced a `TxError::LockPoisoned` on the next `begin()` —
693        // that was a property of `std::sync::Mutex`. The new
694        // contract is the opposite: writers continue uninterrupted.
695        let dir = temp_dir();
696        let _ = fs::create_dir_all(&dir);
697        let db_path = dir.join("test.db");
698        let wal_path = dir.join("test.wal");
699
700        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
701        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
702
703        let poison_target = Arc::clone(&tm);
704        let _ = std::thread::spawn(move || {
705            let _guard = poison_target.wal.lock();
706            panic!("would-poison the wal mutex on std::sync");
707        })
708        .join();
709
710        // parking_lot recovers transparently. begin() must succeed.
711        match tm.begin() {
712            Ok(_) => {}
713            Err(err) => panic!("begin must succeed despite prior panic: {err:?}"),
714        }
715
716        cleanup(&dir);
717    }
718
719    // ---------------------------------------------------------------
720    // Perf 1.2: read-only commit fast path
721    // ---------------------------------------------------------------
722
723    #[test]
724    fn read_only_commit_does_not_advance_durable_lsn() {
725        let dir = temp_dir();
726        let _ = fs::create_dir_all(&dir);
727        let db_path = dir.join("ro_durable.db");
728        let wal_path = dir.join("ro_durable.wal");
729
730        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
731        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
732
733        // Snapshot the WAL durable_lsn BEFORE the txn.
734        let before = {
735            let wal = tm.wal_writer().unwrap();
736            wal.durable_lsn()
737        };
738
739        let tx = tm.begin().unwrap();
740        // Empty write_set on purpose — read-only.
741        tx.commit().unwrap();
742
743        // After RO commit, the WAL durable_lsn must NOT have advanced.
744        // No Begin record, no Commit record, no fsync.
745        let after = {
746            let wal = tm.wal_writer().unwrap();
747            wal.durable_lsn()
748        };
749        assert_eq!(
750            before, after,
751            "read-only commit must not advance durable_lsn (was {} → {})",
752            before, after
753        );
754
755        cleanup(&dir);
756    }
757
758    #[test]
759    fn read_only_commit_does_not_grow_wal_file() {
760        let dir = temp_dir();
761        let _ = fs::create_dir_all(&dir);
762        let db_path = dir.join("ro_size.db");
763        let wal_path = dir.join("ro_size.wal");
764
765        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
766        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
767
768        // Snapshot file size after WAL header.
769        let size_before = std::fs::metadata(&wal_path).unwrap().len();
770        assert_eq!(
771            size_before, 8,
772            "fresh WAL must be exactly the 8-byte header"
773        );
774
775        // 100 read-only commits in a loop.
776        for _ in 0..100 {
777            let tx = tm.begin().unwrap();
778            tx.commit().unwrap();
779        }
780
781        let size_after = std::fs::metadata(&wal_path).unwrap().len();
782        assert_eq!(
783            size_after, size_before,
784            "100 read-only commits should not have written any WAL bytes"
785        );
786        cleanup(&dir);
787    }
788
789    #[test]
790    fn read_only_commit_marks_transaction_committed() {
791        let dir = temp_dir();
792        let _ = fs::create_dir_all(&dir);
793        let db_path = dir.join("ro_state.db");
794        let wal_path = dir.join("ro_state.wal");
795
796        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
797        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
798
799        let tx = tm.begin().unwrap();
800        let id = tx.id();
801        tx.commit().unwrap();
802
803        // Manager must have unregistered this txn — the active list
804        // no longer contains its id.
805        assert!(
806            !tm.active_transactions().contains(&id),
807            "RO-committed txn {id} must no longer be active in the manager"
808        );
809
810        cleanup(&dir);
811    }
812
813    #[test]
814    fn writing_commit_still_syncs_after_ro_fast_path() {
815        // Sanity: the fast path must NOT short-circuit a transaction
816        // that did write something. Verify the writing commit path
817        // still flushes WAL and the value lands in the pager.
818        let dir = temp_dir();
819        let _ = fs::create_dir_all(&dir);
820        let db_path = dir.join("rw_after_ro.db");
821        let wal_path = dir.join("rw_after_ro.wal");
822
823        let pager = Arc::new(Pager::open_default(&db_path).unwrap());
824        let allocated = pager.allocate_page(PageType::BTreeLeaf).unwrap();
825        let page_id = allocated.page_id();
826        let tm = Arc::new(TransactionManager::new(Arc::clone(&pager), &wal_path).unwrap());
827
828        // First a RO commit (must take the fast path).
829        let ro = tm.begin().unwrap();
830        ro.commit().unwrap();
831
832        // Then a real writing commit.
833        let mut rw = tm.begin().unwrap();
834        let mut page = Page::new(PageType::BTreeLeaf, page_id);
835        page.as_bytes_mut()[42] = 0x77;
836        rw.write_page(page_id, page).unwrap();
837        rw.commit().unwrap();
838
839        // The WAL file must now contain bytes (PageWrite + Commit
840        // records, and the BufWriter has been flushed by sync()).
841        let size = std::fs::metadata(&wal_path).unwrap().len();
842        assert!(size > 8, "writing commit should grow the WAL");
843
844        // The pager cache must reflect the write.
845        let read_back = pager.read_page(page_id).unwrap();
846        assert_eq!(read_back.as_bytes()[42], 0x77);
847
848        cleanup(&dir);
849    }
850}