Skip to main content

fsqlite_types/
glossary.rs

1//! Glossary types (§0.3).
2//!
3//! This module defines (or re-exports) the core cross-cutting types referenced
4//! throughout the FrankenSQLite specification: MVCC identifiers, SSI witness
5//! keys, and ECS content-addressed identities.
6
7use std::fmt;
8use std::num::NonZeroU64;
9
10use crate::encoding::{
11    append_u16_le, append_u32_le, append_u64_le, read_u16_le, read_u32_le, read_u64_le,
12};
13use crate::{ObjectId, PageData, PageNumber};
14
15/// Monotonically increasing transaction identifier.
16///
17/// Domain: `1..=(2^62 - 1)`.
18///
19/// The top two bits are reserved for TxnSlot sentinel encoding (CLAIMING /
20/// CLEANING) per §5.6.2; sentinel values are *not* represented as `TxnId`.
21#[derive(
22    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
23)]
24#[repr(transparent)]
25pub struct TxnId(NonZeroU64);
26
27impl TxnId {
28    /// Maximum raw value representable by a real transaction id.
29    pub const MAX_RAW: u64 = (1_u64 << 62) - 1;
30
31    /// Construct a `TxnId` if `raw` is in-domain.
32    #[inline]
33    pub const fn new(raw: u64) -> Option<Self> {
34        if raw > Self::MAX_RAW {
35            return None;
36        }
37        match NonZeroU64::new(raw) {
38            Some(nz) => Some(Self(nz)),
39            None => None,
40        }
41    }
42
43    /// Get the raw u64 value.
44    #[inline]
45    pub const fn get(self) -> u64 {
46        self.0.get()
47    }
48
49    /// Return the next transaction id if it stays in-domain.
50    #[inline]
51    pub const fn checked_next(self) -> Option<Self> {
52        Self::new(self.get().wrapping_add(1))
53    }
54}
55
56impl fmt::Display for TxnId {
57    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
58        write!(f, "txn#{}", self.get())
59    }
60}
61
62impl TryFrom<u64> for TxnId {
63    type Error = InvalidTxnId;
64
65    fn try_from(value: u64) -> Result<Self, Self::Error> {
66        Self::new(value).ok_or(InvalidTxnId { raw: value })
67    }
68}
69
70/// Error returned when attempting to construct an out-of-domain `TxnId`.
71#[derive(Debug, Clone, Copy, PartialEq, Eq)]
72pub struct InvalidTxnId {
73    raw: u64,
74}
75
76impl fmt::Display for InvalidTxnId {
77    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
78        write!(
79            f,
80            "invalid TxnId {} (must satisfy 1 <= id <= {})",
81            self.raw,
82            TxnId::MAX_RAW
83        )
84    }
85}
86
87impl std::error::Error for InvalidTxnId {}
88
89/// Monotonically increasing global commit sequence number ("commit clock").
90#[derive(
91    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
92)]
93#[repr(transparent)]
94pub struct CommitSeq(u64);
95
96impl CommitSeq {
97    pub const ZERO: Self = Self(0);
98
99    #[inline]
100    pub const fn new(raw: u64) -> Self {
101        Self(raw)
102    }
103
104    #[inline]
105    pub const fn get(self) -> u64 {
106        self.0
107    }
108
109    #[inline]
110    #[must_use]
111    pub const fn next(self) -> Self {
112        Self(self.0.wrapping_add(1))
113    }
114}
115
116impl fmt::Display for CommitSeq {
117    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
118        write!(f, "cs#{}", self.get())
119    }
120}
121
122/// Per-transaction epoch used to disambiguate slot reuse across crashes.
123#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
124#[repr(transparent)]
125pub struct TxnEpoch(u32);
126
127impl TxnEpoch {
128    #[inline]
129    pub const fn new(raw: u32) -> Self {
130        Self(raw)
131    }
132
133    #[inline]
134    pub const fn get(self) -> u32 {
135        self.0
136    }
137}
138
139/// A stable transaction identity pair: (TxnId, TxnEpoch).
140#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
141pub struct TxnToken {
142    pub id: TxnId,
143    pub epoch: TxnEpoch,
144}
145
146impl TxnToken {
147    #[inline]
148    pub const fn new(id: TxnId, epoch: TxnEpoch) -> Self {
149        Self { id, epoch }
150    }
151}
152
153/// Monotonically increasing schema epoch (invalidates prepared statements).
154#[derive(
155    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
156)]
157#[repr(transparent)]
158pub struct SchemaEpoch(u64);
159
160impl SchemaEpoch {
161    pub const ZERO: Self = Self(0);
162
163    #[inline]
164    pub const fn new(raw: u64) -> Self {
165        Self(raw)
166    }
167
168    #[inline]
169    pub const fn get(self) -> u64 {
170        self.0
171    }
172}
173
174/// A frozen view of the database at BEGIN time.
175///
176/// Visibility check is a single integer comparison: `version.commit_seq <= snapshot.high`.
177#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
178pub struct Snapshot {
179    pub high: CommitSeq,
180    pub schema_epoch: SchemaEpoch,
181}
182
183impl Snapshot {
184    #[inline]
185    pub const fn new(high: CommitSeq, schema_epoch: SchemaEpoch) -> Self {
186        Self { high, schema_epoch }
187    }
188}
189
190/// Opaque pointer to a previous page version in a version chain.
191///
192/// In the implementation this is expected to be an arena index or object
193/// locator, not a raw pointer.
194#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
195#[repr(transparent)]
196pub struct VersionPointer(u64);
197
198impl VersionPointer {
199    #[inline]
200    pub const fn new(raw: u64) -> Self {
201        Self(raw)
202    }
203
204    #[inline]
205    pub const fn get(self) -> u64 {
206        self.0
207    }
208}
209
210/// A single committed version of a database page.
211#[derive(Debug, Clone, PartialEq, Eq)]
212pub struct PageVersion {
213    pub pgno: PageNumber,
214    pub commit_seq: CommitSeq,
215    pub created_by: TxnToken,
216    pub data: PageData,
217    pub prev: Option<VersionPointer>,
218}
219
220/// Database operating mode (§7.10).
221///
222/// Selectable via `PRAGMA fsqlite.mode = compatibility | native`.
223/// Per-database (not per-connection). Default: [`Compatibility`](Self::Compatibility).
224#[derive(
225    Debug, Clone, Copy, PartialEq, Eq, Hash, Default, serde::Serialize, serde::Deserialize,
226)]
227pub enum OperatingMode {
228    /// Standard SQLite WAL format. Legacy reader interop, single coordinator
229    /// holds `WAL_WRITE_LOCK`. Sidecars (`.wal-fec`, `.db-fec`) present but
230    /// core `.db` stays compatible when checkpointed.
231    #[default]
232    Compatibility,
233    /// ECS-based storage. `CommitCapsules` + `CommitMarkers`, no legacy
234    /// interop, full concurrent writes.
235    Native,
236}
237
238impl fmt::Display for OperatingMode {
239    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
240        match self {
241            Self::Compatibility => f.write_str("compatibility"),
242            Self::Native => f.write_str("native"),
243        }
244    }
245}
246
247impl OperatingMode {
248    /// Parse from the PRAGMA string value (case-insensitive).
249    #[must_use]
250    pub fn from_pragma(s: &str) -> Option<Self> {
251        let lower = s.trim().to_ascii_lowercase();
252        match lower.as_str() {
253            "compatibility" | "compat" => Some(Self::Compatibility),
254            "native" => Some(Self::Native),
255            _ => None,
256        }
257    }
258
259    /// Whether this mode uses ECS-based storage.
260    #[must_use]
261    pub const fn is_native(self) -> bool {
262        matches!(self, Self::Native)
263    }
264
265    /// Whether legacy SQLite readers can attach.
266    #[must_use]
267    pub const fn legacy_readers_allowed(self) -> bool {
268        matches!(self, Self::Compatibility)
269    }
270}
271
272/// A commit capsule is the durable ECS object that a native-mode commit
273/// refers to (§7.11.1).
274///
275/// Contains the transaction's intent log, page deltas, snapshot basis,
276/// and SSI witness-plane evidence references. Built deterministically by the
277/// writer before submission to the `WriteCoordinator`.
278#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
279pub struct CommitCapsule {
280    /// Content-addressed identity of this capsule ECS object.
281    pub object_id: ObjectId,
282    /// The commit-seq snapshot this transaction read from.
283    pub snapshot_basis: CommitSeq,
284    /// Semantic intent log (ordered operations).
285    pub intent_log: Vec<IntentOp>,
286    /// Page-level deltas: `(page_number, delta_bytes)`.
287    pub page_deltas: Vec<(PageNumber, Vec<u8>)>,
288    /// BLAKE3 digest of the transaction's read set.
289    pub read_set_digest: [u8; 32],
290    /// BLAKE3 digest of the transaction's write set.
291    pub write_set_digest: [u8; 32],
292    /// ECS `ObjectId` refs to `ReadWitness` objects.
293    pub read_witness_refs: Vec<ObjectId>,
294    /// ECS `ObjectId` refs to `WriteWitness` objects.
295    pub write_witness_refs: Vec<ObjectId>,
296    /// ECS `ObjectId` refs to `DependencyEdge` objects.
297    pub dependency_edge_refs: Vec<ObjectId>,
298    /// ECS `ObjectId` refs to `MergeWitness` objects.
299    pub merge_witness_refs: Vec<ObjectId>,
300}
301
302/// Commit marker persisted in the commit chain (§7.11.2).
303///
304/// The marker is the point of no return: a transaction is committed if and
305/// only if its marker is durable. The marker stream is append-only and
306/// sequential; each record is small (~88 bytes V1) so fsync latency is
307/// minimized.
308#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
309pub struct CommitMarker {
310    pub commit_seq: CommitSeq,
311    /// Monotonic non-decreasing: `max(now_unix_ns(), prev + 1)`.
312    pub commit_time_unix_ns: u64,
313    pub capsule_object_id: ObjectId,
314    pub proof_object_id: ObjectId,
315    /// Previous marker in the chain (`None` for the genesis marker).
316    pub prev_marker: Option<ObjectId>,
317    /// XXH3-128 integrity hash covering all preceding fields.
318    pub integrity_hash: [u8; 16],
319}
320
321/// Wire size of a `CommitMarkerRecord` V1: 88 bytes.
322///
323/// Layout: `version(1) + flags(1) + commit_seq(8) + commit_time_unix_ns(8)
324/// + capsule_oid(16) + proof_oid(16) + prev_marker_oid(16) + has_prev(1)
325/// + integrity_hash(16) + reserved(5) = 88`.
326pub const COMMIT_MARKER_RECORD_V1_SIZE: usize = 88;
327
328/// Version byte for the current marker record format.
329const COMMIT_MARKER_RECORD_VERSION: u8 = 1;
330
331impl CommitMarker {
332    /// Serialize to the canonical 88-byte V1 wire format (little-endian).
333    #[must_use]
334    pub fn to_record_bytes(&self) -> [u8; COMMIT_MARKER_RECORD_V1_SIZE] {
335        let mut buf = [0u8; COMMIT_MARKER_RECORD_V1_SIZE];
336        buf[0] = COMMIT_MARKER_RECORD_VERSION;
337        buf[1] = 0; // flags (reserved)
338
339        // commit_seq at offset 2
340        buf[2..10].copy_from_slice(&self.commit_seq.get().to_le_bytes());
341        // commit_time_unix_ns at offset 10
342        buf[10..18].copy_from_slice(&self.commit_time_unix_ns.to_le_bytes());
343        // capsule_object_id at offset 18
344        buf[18..34].copy_from_slice(self.capsule_object_id.as_bytes());
345        // proof_object_id at offset 34
346        buf[34..50].copy_from_slice(self.proof_object_id.as_bytes());
347        // prev_marker at offset 50 (16 bytes, all-zero if None)
348        if let Some(prev) = self.prev_marker {
349            buf[50..66].copy_from_slice(prev.as_bytes());
350        }
351        // has_prev flag at offset 66
352        buf[66] = u8::from(self.prev_marker.is_some());
353        // integrity_hash at offset 67
354        buf[67..83].copy_from_slice(&self.integrity_hash);
355        // bytes 83..88 are reserved (zero)
356        buf
357    }
358
359    /// Deserialize from the canonical 88-byte V1 wire format.
360    #[must_use]
361    pub fn from_record_bytes(data: &[u8; COMMIT_MARKER_RECORD_V1_SIZE]) -> Option<Self> {
362        if data[0] != COMMIT_MARKER_RECORD_VERSION {
363            return None;
364        }
365
366        let commit_seq = CommitSeq::new(u64::from_le_bytes(data[2..10].try_into().ok()?));
367        let commit_time_unix_ns = u64::from_le_bytes(data[10..18].try_into().ok()?);
368        let capsule_object_id = ObjectId::from_bytes(data[18..34].try_into().ok()?);
369        let proof_object_id = ObjectId::from_bytes(data[34..50].try_into().ok()?);
370        let has_prev = data[66] != 0;
371        let prev_marker = if has_prev {
372            Some(ObjectId::from_bytes(data[50..66].try_into().ok()?))
373        } else {
374            None
375        };
376        let mut integrity_hash = [0u8; 16];
377        integrity_hash.copy_from_slice(&data[67..83]);
378
379        Some(Self {
380            commit_seq,
381            commit_time_unix_ns,
382            capsule_object_id,
383            proof_object_id,
384            prev_marker,
385            integrity_hash,
386        })
387    }
388
389    /// Compute the integrity hash (XXH3-128) over all fields except the
390    /// integrity hash itself.
391    #[must_use]
392    pub fn compute_integrity_hash(&self) -> [u8; 16] {
393        let mut buf = Vec::with_capacity(74);
394        append_u64_le(&mut buf, self.commit_seq.get());
395        append_u64_le(&mut buf, self.commit_time_unix_ns);
396        buf.extend_from_slice(self.capsule_object_id.as_bytes());
397        buf.extend_from_slice(self.proof_object_id.as_bytes());
398        if let Some(prev) = self.prev_marker {
399            buf.push(1);
400            buf.extend_from_slice(prev.as_bytes());
401        } else {
402            buf.push(0);
403            buf.extend_from_slice(&[0u8; 16]);
404        }
405        let hash128 = xxhash_rust::xxh3::xxh3_128(&buf);
406        hash128.to_le_bytes()
407    }
408
409    /// Build a marker with the integrity hash computed automatically.
410    #[must_use]
411    pub fn new(
412        commit_seq: CommitSeq,
413        commit_time_unix_ns: u64,
414        capsule_object_id: ObjectId,
415        proof_object_id: ObjectId,
416        prev_marker: Option<ObjectId>,
417    ) -> Self {
418        let mut marker = Self {
419            commit_seq,
420            commit_time_unix_ns,
421            capsule_object_id,
422            proof_object_id,
423            prev_marker,
424            integrity_hash: [0u8; 16],
425        };
426        marker.integrity_hash = marker.compute_integrity_hash();
427        marker
428    }
429
430    /// Verify the integrity hash.
431    #[must_use]
432    pub fn verify_integrity(&self) -> bool {
433        self.integrity_hash == self.compute_integrity_hash()
434    }
435}
436
437/// Object Transmission Information (RaptorQ / RFC 6330).
438///
439/// This is an internal encoding, NOT the RFC 6330 Common FEC OTI wire format.
440/// Field widths are widened for implementation convenience:
441/// - `f` is `u64` (RFC: 40-bit)
442/// - `t` is `u32` (RFC: 16-bit) -- supports `page_size = 65_536`
443/// - `z` is `u32` (RFC: 12-bit)
444/// - `n` is `u32` (RFC: 8-bit)
445#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
446pub struct Oti {
447    /// Transfer length (bytes).
448    pub f: u64,
449    /// Alignment parameter.
450    pub al: u16,
451    /// Symbol size (bytes). `u32` to represent all valid SQLite page sizes.
452    pub t: u32,
453    /// Number of source blocks.
454    pub z: u32,
455    /// Number of sub-blocks.
456    pub n: u32,
457}
458
459/// Serialized size of [`Oti`] on the wire: `8 + 2 + 4 + 4 + 4 = 22` bytes.
460pub const OTI_WIRE_SIZE: usize = 22;
461
462impl Oti {
463    /// Serialize to canonical little-endian bytes.
464    #[must_use]
465    pub fn to_bytes(self) -> [u8; OTI_WIRE_SIZE] {
466        let mut as_vec = Vec::with_capacity(OTI_WIRE_SIZE);
467        append_u64_le(&mut as_vec, self.f);
468        append_u16_le(&mut as_vec, self.al);
469        append_u32_le(&mut as_vec, self.t);
470        append_u32_le(&mut as_vec, self.z);
471        append_u32_le(&mut as_vec, self.n);
472
473        let mut buf = [0u8; OTI_WIRE_SIZE];
474        buf.copy_from_slice(&as_vec);
475        buf
476    }
477
478    /// Deserialize from canonical little-endian bytes.
479    ///
480    /// Returns `None` if `data` is shorter than [`OTI_WIRE_SIZE`].
481    #[must_use]
482    pub fn from_bytes(data: &[u8]) -> Option<Self> {
483        if data.len() < OTI_WIRE_SIZE {
484            return None;
485        }
486        Some(Self {
487            f: read_u64_le(&data[0..8])?,
488            al: read_u16_le(&data[8..10])?,
489            t: read_u32_le(&data[10..14])?,
490            z: read_u32_le(&data[14..18])?,
491            n: read_u32_le(&data[18..22])?,
492        })
493    }
494}
495
496/// Proof that a decode was correct (structure depends on codec mode).
497#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
498pub struct DecodeProof {
499    pub object_id: ObjectId,
500    pub oti: Oti,
501}
502
503/// Capability context + cooperative budget types.
504///
505/// Canonical definitions live in `crate::cx` (per `bd-3go.1`).
506pub use crate::cx::{Budget, Cx};
507
508/// Result outcome lattice for cooperative cancellation and failure.
509#[derive(
510    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
511)]
512pub enum Outcome {
513    Ok,
514    Err,
515    Cancelled,
516    Panicked,
517}
518
519/// Global epoch identifier (monotonically increasing).
520#[derive(
521    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
522)]
523#[repr(transparent)]
524pub struct EpochId(u64);
525
526impl EpochId {
527    /// The zero epoch (initial/bootstrap).
528    pub const ZERO: Self = Self(0);
529
530    #[inline]
531    pub const fn new(raw: u64) -> Self {
532        Self(raw)
533    }
534
535    #[inline]
536    pub const fn get(self) -> u64 {
537        self.0
538    }
539
540    /// Return the next epoch (current + 1).
541    ///
542    /// Returns `None` on overflow (saturated at `u64::MAX`).
543    #[must_use]
544    pub const fn next(self) -> Option<Self> {
545        match self.0.checked_add(1) {
546            Some(val) => Some(Self(val)),
547            None => None,
548        }
549    }
550}
551
552/// Validity window for symbols or proofs (inclusive bounds).
553#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
554pub struct SymbolValidityWindow {
555    pub from_epoch: EpochId,
556    pub to_epoch: EpochId,
557}
558
559impl SymbolValidityWindow {
560    #[must_use]
561    pub const fn new(from_epoch: EpochId, to_epoch: EpochId) -> Self {
562        Self {
563            from_epoch,
564            to_epoch,
565        }
566    }
567
568    /// Build the default validity window `[0, current_epoch]` per §4.18.1.
569    #[must_use]
570    pub const fn default_window(current_epoch: EpochId) -> Self {
571        Self {
572            from_epoch: EpochId::ZERO,
573            to_epoch: current_epoch,
574        }
575    }
576
577    /// Check whether `epoch` falls within this window (inclusive bounds).
578    ///
579    /// Fail-closed: returns `false` for any epoch outside the window,
580    /// including future epochs (§4.18.1 normative requirement).
581    #[must_use]
582    pub const fn contains(&self, epoch: EpochId) -> bool {
583        epoch.0 >= self.from_epoch.0 && epoch.0 <= self.to_epoch.0
584    }
585}
586
587/// Capability token authorizing access to a remote endpoint.
588#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
589#[repr(transparent)]
590pub struct RemoteCap([u8; 16]);
591
592impl RemoteCap {
593    #[must_use]
594    pub const fn from_bytes(bytes: [u8; 16]) -> Self {
595        Self(bytes)
596    }
597
598    #[must_use]
599    pub const fn as_bytes(&self) -> &[u8; 16] {
600        &self.0
601    }
602}
603
604/// Capability token for the symbol authentication master key.
605#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
606#[repr(transparent)]
607pub struct SymbolAuthMasterKeyCap([u8; 32]);
608
609impl SymbolAuthMasterKeyCap {
610    #[must_use]
611    pub const fn from_bytes(bytes: [u8; 32]) -> Self {
612        Self(bytes)
613    }
614
615    #[must_use]
616    pub const fn as_bytes(&self) -> &[u8; 32] {
617        &self.0
618    }
619}
620
621/// Stable idempotency key for retry-safe operations.
622#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
623#[repr(transparent)]
624pub struct IdempotencyKey([u8; 16]);
625
626impl IdempotencyKey {
627    #[must_use]
628    pub const fn from_bytes(bytes: [u8; 16]) -> Self {
629        Self(bytes)
630    }
631
632    #[must_use]
633    pub const fn as_bytes(&self) -> &[u8; 16] {
634        &self.0
635    }
636}
637
638/// Saga identifier (ties together a multi-step idempotent workflow).
639#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
640pub struct Saga {
641    pub key: IdempotencyKey,
642}
643
644impl IdempotencyKey {
645    /// Deterministically derive a key from request bytes + ECS epoch.
646    ///
647    /// Domain separation:
648    /// `BLAKE3("fsqlite:idempotency:v1" || le_u64(ecs_epoch) || request_bytes)`.
649    #[must_use]
650    pub fn derive(ecs_epoch: u64, request_bytes: &[u8]) -> Self {
651        let mut hasher = blake3::Hasher::new();
652        hasher.update(b"fsqlite:idempotency:v1");
653        hasher.update(&ecs_epoch.to_le_bytes());
654        hasher.update(request_bytes);
655        let digest = hasher.finalize();
656        let mut out = [0_u8; 16];
657        out.copy_from_slice(&digest.as_bytes()[..16]);
658        Self(out)
659    }
660}
661
662impl Saga {
663    /// Create a saga identifier from an idempotency key.
664    #[must_use]
665    pub const fn new(key: IdempotencyKey) -> Self {
666        Self { key }
667    }
668
669    /// Access the saga idempotency key.
670    #[must_use]
671    pub const fn key(self) -> IdempotencyKey {
672        self.key
673    }
674}
675
676/// Logical region identifier (tiering / placement / replication scope).
677#[derive(
678    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
679)]
680#[repr(transparent)]
681pub struct Region(u32);
682
683impl Region {
684    #[inline]
685    pub const fn new(raw: u32) -> Self {
686        Self(raw)
687    }
688
689    #[inline]
690    pub const fn get(self) -> u32 {
691        self.0
692    }
693}
694
695/// SSI witness key basis (§5.6.4.3).
696///
697/// Canonical key space for SSI rw-antidependency tracking. Always valid to
698/// fall back to `Page(pgno)` — finer keys reduce false positives but never
699/// compromise correctness.
700#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
701pub enum WitnessKey {
702    /// Coarse witness: entire page.
703    Page(PageNumber),
704    /// Semantic witness: specific B-tree cell identified by domain-separated hash.
705    ///
706    /// `tag` is `low32(xxh3_64("fsqlite:witness:cell:v1" || le_u32(btree_root) || key_bytes))`.
707    Cell { btree_root: PageNumber, tag: u64 },
708    /// Semantic witness: structured byte range on a page.
709    ByteRange {
710        page: PageNumber,
711        start: u32,
712        len: u32,
713    },
714    /// Key range witness for reduced false positives on range scans (optional, advanced).
715    KeyRange {
716        btree_root: PageNumber,
717        lo: Vec<u8>,
718        hi: Vec<u8>,
719    },
720    /// Custom namespace witness (extensibility point).
721    Custom { namespace: u32, bytes: Vec<u8> },
722}
723
724impl WitnessKey {
725    /// Derive a deterministic cell tag from a B-tree root page and canonical key bytes.
726    ///
727    /// Uses domain-separated xxh3_64 (§5.6.4.3):
728    /// `cell_tag = low32(xxh3_64("fsqlite:witness:cell:v1" || le_u32(btree_root_pgno) || key_bytes))`
729    #[must_use]
730    pub fn cell_tag(btree_root: PageNumber, canonical_key_bytes: &[u8]) -> u64 {
731        use xxhash_rust::xxh3::xxh3_64;
732        let mut buf =
733            Vec::with_capacity(b"fsqlite:witness:cell:v1".len() + 4 + canonical_key_bytes.len());
734        buf.extend_from_slice(b"fsqlite:witness:cell:v1");
735        buf.extend_from_slice(&btree_root.get().to_le_bytes());
736        buf.extend_from_slice(canonical_key_bytes);
737        // Store full 64-bit hash; low32 extraction done at comparison site if needed.
738        xxh3_64(&buf)
739    }
740
741    /// Create a cell witness for a point read/uniqueness check.
742    #[must_use]
743    pub fn for_cell_read(btree_root: PageNumber, canonical_key_bytes: &[u8]) -> Self {
744        Self::Cell {
745            btree_root,
746            tag: Self::cell_tag(btree_root, canonical_key_bytes),
747        }
748    }
749
750    /// Create page-level witnesses for a range scan (phantom protection).
751    ///
752    /// Returns one `Page(leaf_pgno)` witness per visited leaf page (§5.6.4.3).
753    #[must_use]
754    pub fn for_range_scan(leaf_pages: &[PageNumber]) -> Vec<Self> {
755        leaf_pages.iter().copied().map(Self::Page).collect()
756    }
757
758    /// Create a cell + page witness pair for a point write.
759    ///
760    /// Writes register both `Cell(btree_root, cell_tag)` AND `Page(leaf_pgno)`
761    /// as write witnesses (§5.6.4.3).
762    #[must_use]
763    pub fn for_point_write(
764        btree_root: PageNumber,
765        canonical_key_bytes: &[u8],
766        leaf_pgno: PageNumber,
767    ) -> (Self, Self) {
768        let cell = Self::Cell {
769            btree_root,
770            tag: Self::cell_tag(btree_root, canonical_key_bytes),
771        };
772        let page = Self::Page(leaf_pgno);
773        (cell, page)
774    }
775
776    /// Returns `true` if this is a coarse page-level witness.
777    #[must_use]
778    pub fn is_page(&self) -> bool {
779        matches!(self, Self::Page(_))
780    }
781
782    /// Returns `true` if this is a cell-level semantic witness.
783    #[must_use]
784    pub fn is_cell(&self) -> bool {
785        matches!(self, Self::Cell { .. })
786    }
787}
788
789/// Witness hierarchy range key (prefix-based bucketing).
790#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
791pub struct RangeKey {
792    pub level: u8,
793    pub hash_prefix: u32,
794}
795
796/// A recorded SSI read witness.
797#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
798pub struct ReadWitness {
799    pub txn: TxnId,
800    pub key: WitnessKey,
801}
802
803/// A recorded SSI write witness.
804#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
805pub struct WriteWitness {
806    pub txn: TxnId,
807    pub key: WitnessKey,
808}
809
810/// A persisted segment of witness index updates.
811#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
812pub struct WitnessIndexSegment {
813    pub epoch: EpochId,
814    pub reads: Vec<ReadWitness>,
815    pub writes: Vec<WriteWitness>,
816}
817
818/// A dependency edge in the SSI serialization graph.
819#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
820pub struct DependencyEdge {
821    pub from: TxnId,
822    pub to: TxnId,
823    pub key_basis: WitnessKey,
824    pub observed_by: TxnId,
825}
826
827/// Proof object tying together the dependency edges relevant to a commit
828/// decision (§7.11.2 step 3).
829///
830/// Persisted as an ECS object by the `WriteCoordinator` after FCW + SSI
831/// re-validation succeeds. Referenced by the corresponding `CommitMarker`.
832#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
833pub struct CommitProof {
834    /// The commit sequence this proof was generated for.
835    pub commit_seq: CommitSeq,
836    /// SSI dependency edges that were validated.
837    pub edges: Vec<DependencyEdge>,
838    /// ECS `ObjectId` refs to witness evidence objects.
839    pub evidence_refs: Vec<ObjectId>,
840}
841
842/// Identifier for a table b-tree root (logical, not physical file page).
843#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
844#[repr(transparent)]
845pub struct TableId(u32);
846
847impl TableId {
848    #[inline]
849    pub const fn new(raw: u32) -> Self {
850        Self(raw)
851    }
852
853    #[inline]
854    pub const fn get(self) -> u32 {
855        self.0
856    }
857}
858
859/// Identifier for an index b-tree root (logical, not physical file page).
860#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
861#[repr(transparent)]
862pub struct IndexId(u32);
863
864impl IndexId {
865    #[inline]
866    pub const fn new(raw: u32) -> Self {
867        Self(raw)
868    }
869
870    #[inline]
871    pub const fn get(self) -> u32 {
872        self.0
873    }
874}
875
876/// RowId / INTEGER PRIMARY KEY key space (SQLite uses signed 64-bit).
877#[derive(
878    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
879)]
880#[repr(transparent)]
881pub struct RowId(i64);
882
883impl RowId {
884    /// Maximum RowId value: 2^63 - 1.
885    pub const MAX: Self = Self(i64::MAX);
886
887    #[inline]
888    pub const fn new(raw: i64) -> Self {
889        Self(raw)
890    }
891
892    #[inline]
893    pub const fn get(self) -> i64 {
894        self.0
895    }
896}
897
898/// Rowid allocation mode for a table.
899#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
900pub enum RowIdMode {
901    /// Normal rowid: max(rowid)+1, deleted rowids may be reused.
902    Normal,
903    /// AUTOINCREMENT: never reuse deleted rowids. Uses sqlite_sequence
904    /// high-water mark. Returns error at MAX_ROWID.
905    AutoIncrement,
906}
907
908/// Rowid allocator implementing SQLite's allocation semantics.
909///
910/// - Normal mode: next rowid = max(existing) + 1. Deleted rowids may be reused
911///   when max rowid is not the table maximum.
912/// - AUTOINCREMENT mode: next rowid = max(max_existing, sqlite_sequence) + 1.
913///   Rowids are never reused. When MAX_ROWID is reached, allocation fails.
914#[derive(Debug, Clone)]
915pub struct RowIdAllocator {
916    mode: RowIdMode,
917    /// High-water mark from sqlite_sequence (AUTOINCREMENT only).
918    sequence_high_water: i64,
919}
920
921impl RowIdAllocator {
922    /// Create a new allocator.
923    pub const fn new(mode: RowIdMode) -> Self {
924        Self {
925            mode,
926            sequence_high_water: 0,
927        }
928    }
929
930    /// Allocate the next rowid given the current maximum rowid in the table.
931    ///
932    /// `max_existing` is `None` if the table is empty.
933    ///
934    /// Returns `Ok(rowid)` or `Err` if MAX_ROWID is exhausted (AUTOINCREMENT only).
935    pub fn allocate(&mut self, max_existing: Option<RowId>) -> Result<RowId, RowIdExhausted> {
936        let max_val = max_existing.map_or(0, RowId::get);
937
938        match self.mode {
939            RowIdMode::Normal => {
940                if max_val < i64::MAX {
941                    Ok(RowId::new(max_val + 1))
942                } else {
943                    // MAX_ROWID reached: SQLite tries random probing.
944                    // For the type-level implementation, we signal exhaustion.
945                    Err(RowIdExhausted)
946                }
947            }
948            RowIdMode::AutoIncrement => {
949                let base = max_val.max(self.sequence_high_water);
950                if base == i64::MAX {
951                    return Err(RowIdExhausted);
952                }
953                let next = base + 1;
954                self.sequence_high_water = next;
955                Ok(RowId::new(next))
956            }
957        }
958    }
959
960    /// Get the current sqlite_sequence high-water mark.
961    pub const fn sequence_high_water(&self) -> i64 {
962        self.sequence_high_water
963    }
964
965    /// Set the sqlite_sequence high-water mark (loaded from DB).
966    pub fn set_sequence_high_water(&mut self, val: i64) {
967        self.sequence_high_water = val;
968    }
969}
970
971/// Error when rowid space is exhausted.
972#[derive(Debug, Clone, PartialEq, Eq)]
973pub struct RowIdExhausted;
974
975impl std::fmt::Display for RowIdExhausted {
976    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
977        f.write_str("database or object is full (rowid exhausted)")
978    }
979}
980
981/// Column index within a table (0-based).
982#[derive(
983    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
984)]
985#[repr(transparent)]
986pub struct ColumnIdx(u32);
987
988impl ColumnIdx {
989    #[inline]
990    pub const fn new(raw: u32) -> Self {
991        Self(raw)
992    }
993
994    #[inline]
995    pub const fn get(self) -> u32 {
996        self.0
997    }
998}
999
1000// ---------------------------------------------------------------------------
1001// §5.10.1 Intent Logs — Semantic Operations + Footprints
1002// ---------------------------------------------------------------------------
1003
1004/// Reference to a B-tree (either table or index).
1005#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1006pub enum BtreeRef {
1007    Table(TableId),
1008    Index(IndexId),
1009}
1010
1011/// Kind of semantic key reference.
1012#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1013pub enum SemanticKeyKind {
1014    TableRow,
1015    IndexEntry,
1016}
1017
1018/// Semantic key reference with a stable BLAKE3-based digest.
1019///
1020/// `key_digest = Trunc128(BLAKE3("fsqlite:btree:key:v1" || kind || btree_id || canonical_key_bytes))`
1021#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1022pub struct SemanticKeyRef {
1023    pub btree: BtreeRef,
1024    pub kind: SemanticKeyKind,
1025    pub key_digest: [u8; 16],
1026}
1027
1028impl SemanticKeyRef {
1029    /// Domain separation prefix for the key digest.
1030    const DOMAIN_SEP: &'static [u8] = b"fsqlite:btree:key:v1";
1031
1032    /// Compute the key digest from kind, btree id, and canonical key bytes.
1033    #[must_use]
1034    pub fn compute_digest(
1035        kind: SemanticKeyKind,
1036        btree: BtreeRef,
1037        canonical_key_bytes: &[u8],
1038    ) -> [u8; 16] {
1039        let mut hasher = blake3::Hasher::new();
1040        hasher.update(Self::DOMAIN_SEP);
1041        hasher.update(&[match kind {
1042            SemanticKeyKind::TableRow => 0,
1043            SemanticKeyKind::IndexEntry => 1,
1044        }]);
1045        match btree {
1046            BtreeRef::Table(id) => {
1047                hasher.update(&[0]);
1048                hasher.update(&id.get().to_le_bytes());
1049            }
1050            BtreeRef::Index(id) => {
1051                hasher.update(&[1]);
1052                hasher.update(&id.get().to_le_bytes());
1053            }
1054        }
1055        hasher.update(canonical_key_bytes);
1056        let hash = hasher.finalize();
1057        let bytes = hash.as_bytes();
1058        let mut digest = [0u8; 16];
1059        digest.copy_from_slice(&bytes[..16]);
1060        digest
1061    }
1062
1063    /// Construct a `SemanticKeyRef` by computing the digest.
1064    #[must_use]
1065    pub fn new(btree: BtreeRef, kind: SemanticKeyKind, canonical_key_bytes: &[u8]) -> Self {
1066        let key_digest = Self::compute_digest(kind, btree, canonical_key_bytes);
1067        Self {
1068            btree,
1069            kind,
1070            key_digest,
1071        }
1072    }
1073}
1074
1075bitflags::bitflags! {
1076    /// Structural side effects that make operations non-commutative.
1077    #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
1078    pub struct StructuralEffects: u32 {
1079        /// No structural effects (simple leaf operations).
1080        const NONE = 0;
1081        /// A B-tree page was split.
1082        const PAGE_SPLIT = 1;
1083        /// A B-tree page was merged.
1084        const PAGE_MERGE = 2;
1085        /// Multi-page balance operation.
1086        const BALANCE_MULTI_PAGE = 4;
1087        /// An overflow page was allocated.
1088        const OVERFLOW_ALLOC = 8;
1089        /// An overflow chain was mutated.
1090        const OVERFLOW_MUTATE = 16;
1091        /// The freelist was modified.
1092        const FREELIST_MUTATE = 32;
1093        /// The pointer map was modified.
1094        const POINTER_MAP_MUTATE = 64;
1095        /// Cells were moved during defragmentation.
1096        const DEFRAG_MOVE_CELLS = 128;
1097    }
1098}
1099
1100impl serde::Serialize for StructuralEffects {
1101    fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
1102        self.bits().serialize(serializer)
1103    }
1104}
1105
1106impl<'de> serde::Deserialize<'de> for StructuralEffects {
1107    fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
1108        let bits = u32::deserialize(deserializer)?;
1109        Self::from_bits(bits).ok_or_else(|| {
1110            serde::de::Error::custom(format!("invalid StructuralEffects bits: {bits:#x}"))
1111        })
1112    }
1113}
1114
1115impl Default for StructuralEffects {
1116    fn default() -> Self {
1117        Self::NONE
1118    }
1119}
1120
1121/// Semantic read/write footprint of an intent operation (§5.10.1).
1122#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1123pub struct IntentFootprint {
1124    pub reads: Vec<SemanticKeyRef>,
1125    pub writes: Vec<SemanticKeyRef>,
1126    pub structural: StructuralEffects,
1127}
1128
1129impl IntentFootprint {
1130    /// Create an empty footprint with no effects.
1131    #[must_use]
1132    pub fn empty() -> Self {
1133        Self {
1134            reads: Vec::new(),
1135            writes: Vec::new(),
1136            structural: StructuralEffects::NONE,
1137        }
1138    }
1139}
1140
1141impl Default for IntentFootprint {
1142    fn default() -> Self {
1143        Self::empty()
1144    }
1145}
1146
1147/// Replayable expression AST for deterministic rebase (§5.10.1).
1148///
1149/// Allowed forms are intentionally strict: only proven-deterministic
1150/// expressions may appear. Enforced by `expr_is_rebase_safe()`.
1151#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
1152pub enum RebaseExpr {
1153    /// Reference to a column in the current row.
1154    ColumnRef(ColumnIdx),
1155    /// A literal value.
1156    Literal(crate::SqliteValue),
1157    /// A unary operation.
1158    UnaryOp {
1159        op: RebaseUnaryOp,
1160        operand: Box<Self>,
1161    },
1162    /// A binary operation.
1163    BinaryOp {
1164        op: RebaseBinaryOp,
1165        left: Box<Self>,
1166        right: Box<Self>,
1167    },
1168    /// A deterministic function call.
1169    FunctionCall { name: String, args: Vec<Self> },
1170    /// CAST(expr AS type).
1171    Cast { expr: Box<Self>, type_name: String },
1172    /// CASE WHEN ... THEN ... ELSE ... END.
1173    Case {
1174        operand: Option<Box<Self>>,
1175        when_clauses: Vec<(Self, Self)>,
1176        else_clause: Option<Box<Self>>,
1177    },
1178    /// COALESCE(expr, expr, ...).
1179    Coalesce(Vec<Self>),
1180    /// NULLIF(expr, expr).
1181    NullIf { left: Box<Self>, right: Box<Self> },
1182    /// String concatenation (||).
1183    Concat { left: Box<Self>, right: Box<Self> },
1184}
1185
1186/// Unary operators allowed in rebase expressions.
1187#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1188pub enum RebaseUnaryOp {
1189    Negate,
1190    BitwiseNot,
1191    Not,
1192}
1193
1194/// Binary operators allowed in rebase expressions.
1195#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1196pub enum RebaseBinaryOp {
1197    Add,
1198    Subtract,
1199    Multiply,
1200    Divide,
1201    Remainder,
1202    BitwiseAnd,
1203    BitwiseOr,
1204    ShiftLeft,
1205    ShiftRight,
1206}
1207
1208/// The kind of semantic operation in an intent log entry.
1209#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
1210pub enum IntentOpKind {
1211    Insert {
1212        table: TableId,
1213        key: RowId,
1214        record: Vec<u8>,
1215    },
1216    Delete {
1217        table: TableId,
1218        key: RowId,
1219    },
1220    Update {
1221        table: TableId,
1222        key: RowId,
1223        new_record: Vec<u8>,
1224    },
1225    IndexInsert {
1226        index: IndexId,
1227        key: Vec<u8>,
1228        rowid: RowId,
1229    },
1230    IndexDelete {
1231        index: IndexId,
1232        key: Vec<u8>,
1233        rowid: RowId,
1234    },
1235    /// Column-level rebase expressions for deterministic rebase (§5.10.1).
1236    UpdateExpression {
1237        table: TableId,
1238        key: RowId,
1239        column_updates: Vec<(ColumnIdx, RebaseExpr)>,
1240    },
1241}
1242
1243/// A single entry in the transaction intent log (§5.10.1).
1244#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
1245pub struct IntentOp {
1246    pub schema_epoch: u64,
1247    pub footprint: IntentFootprint,
1248    pub op: IntentOpKind,
1249}
1250
1251/// Transaction intent log: an ordered sequence of semantic operations.
1252pub type IntentLog = Vec<IntentOp>;
1253
1254/// History of versions for a page, used by debugging and invariant checks.
1255#[derive(Debug, Clone, PartialEq, Eq)]
1256pub struct PageHistory {
1257    pub pgno: PageNumber,
1258    pub versions: Vec<PageVersion>,
1259}
1260
1261/// ARC cache placeholder type (Adaptive Replacement Cache).
1262///
1263/// The actual ARC algorithm lives in `fsqlite-pager`; this type exists to keep
1264/// glossary terminology stable across crates.
1265#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1266pub struct ArcCache;
1267
1268/// Root manifest tying together the durable roots of the database state.
1269///
1270/// `ecs_epoch` is the monotone epoch counter stored durably here and mirrored
1271/// in `SharedMemoryLayout.ecs_epoch` (§4.18, §5.6.1).
1272#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1273pub struct RootManifest {
1274    pub schema_epoch: SchemaEpoch,
1275    pub root_page: PageNumber,
1276    /// Global ECS epoch — monotonically increasing, never reused (§4.18).
1277    pub ecs_epoch: EpochId,
1278}
1279
1280/// Transaction slot index (cross-process shared memory slot).
1281#[derive(
1282    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
1283)]
1284#[repr(transparent)]
1285pub struct TxnSlot(u32);
1286
1287impl TxnSlot {
1288    #[inline]
1289    pub const fn new(raw: u32) -> Self {
1290        Self(raw)
1291    }
1292
1293    #[inline]
1294    pub const fn get(self) -> u32 {
1295        self.0
1296    }
1297}
1298
1299#[cfg(test)]
1300mod tests {
1301    use std::collections::HashSet;
1302    use std::time::Duration;
1303
1304    use proptest::prelude::*;
1305
1306    use crate::PayloadHash;
1307
1308    use super::*;
1309
1310    #[test]
1311    fn test_txn_id_nonzero_enforced() {
1312        assert!(TxnId::new(0).is_none());
1313        assert!(TxnId::try_from(0_u64).is_err());
1314        assert!(TxnId::new(1).is_some());
1315        assert!(TxnId::new(TxnId::MAX_RAW).is_some());
1316    }
1317
1318    #[test]
1319    fn test_txn_id_62_bit_max() {
1320        assert!(TxnId::new(TxnId::MAX_RAW + 1).is_none());
1321        assert!(TxnId::try_from(TxnId::MAX_RAW + 1).is_err());
1322    }
1323
1324    #[test]
1325    fn test_object_id_16_bytes_blake3_truncation() {
1326        let header = b"hdr:v1";
1327        let payload = b"payload";
1328        let oid = ObjectId::derive(header, PayloadHash::blake3(payload));
1329        assert_eq!(oid.as_bytes().len(), ObjectId::LEN);
1330    }
1331
1332    #[test]
1333    fn test_object_id_content_addressed() {
1334        let header = b"hdr:v1";
1335        let payload = b"payload";
1336        let a = ObjectId::derive(header, PayloadHash::blake3(payload));
1337        let b = ObjectId::derive(header, PayloadHash::blake3(payload));
1338        assert_eq!(a, b);
1339
1340        let c = ObjectId::derive(header, PayloadHash::blake3(b"payload2"));
1341        assert_ne!(a, c);
1342    }
1343
1344    #[test]
1345    fn prop_object_id_collision_resistance() {
1346        let header = b"hdr:v1";
1347        let mut ids = HashSet::<ObjectId>::with_capacity(10_000);
1348
1349        let mut state: u64 = 0xD6E8_FEB8_6659_FD93;
1350        for i in 0..10_000_u64 {
1351            // Deterministic pseudo-randomness, but ensure distinct inputs by embedding i.
1352            state = state
1353                .wrapping_mul(6_364_136_223_846_793_005_u64)
1354                .wrapping_add(1_442_695_040_888_963_407_u64);
1355
1356            let mut payload = [0_u8; 32];
1357            payload[..8].copy_from_slice(&i.to_le_bytes());
1358            payload[8..16].copy_from_slice(&state.to_le_bytes());
1359            payload[16..24].copy_from_slice(&state.rotate_left(17).to_le_bytes());
1360            payload[24..32].copy_from_slice(&state.rotate_left(41).to_le_bytes());
1361
1362            let oid = ObjectId::derive(header, PayloadHash::blake3(&payload));
1363            assert!(ids.insert(oid), "ObjectId collision at i={i}");
1364        }
1365    }
1366
1367    #[test]
1368    fn test_snapshot_fields() {
1369        let snap = Snapshot::new(CommitSeq::new(7), SchemaEpoch::new(9));
1370        assert_eq!(snap.high.get(), 7);
1371        assert_eq!(snap.schema_epoch.get(), 9);
1372    }
1373
1374    #[test]
1375    fn test_oti_field_widths_allow_large_symbol_size() {
1376        // §3.5.2 requires T/Z/N to represent values >= 65536.
1377        let oti = Oti {
1378            f: 1,
1379            al: 4,
1380            t: 65_536,
1381            z: 1,
1382            n: 1,
1383        };
1384        assert_eq!(oti.t, 65_536);
1385    }
1386
1387    #[test]
1388    fn test_budget_product_lattice_semantics() {
1389        let a = Budget {
1390            deadline: Some(Duration::from_millis(100)),
1391            poll_quota: 10,
1392            cost_quota: Some(500),
1393            priority: 1,
1394        };
1395        let b = Budget {
1396            deadline: Some(Duration::from_millis(50)),
1397            poll_quota: 20,
1398            cost_quota: Some(400),
1399            priority: 9,
1400        };
1401        let c = a.meet(b);
1402        assert_eq!(c.deadline, Some(Duration::from_millis(50)));
1403        assert_eq!(c.poll_quota, 10);
1404        assert_eq!(c.cost_quota, Some(400));
1405        assert_eq!(c.priority, 9);
1406    }
1407
1408    #[test]
1409    fn test_outcome_ordering_lattice() {
1410        assert!(Outcome::Ok < Outcome::Err);
1411        assert!(Outcome::Err < Outcome::Cancelled);
1412        assert!(Outcome::Cancelled < Outcome::Panicked);
1413    }
1414
1415    #[test]
1416    fn test_witness_key_variants_exhaustive() {
1417        let pn = PageNumber::new(1).unwrap();
1418
1419        let a = WitnessKey::Page(pn);
1420        let b = WitnessKey::Cell {
1421            btree_root: pn,
1422            tag: 7,
1423        };
1424        let c = WitnessKey::ByteRange {
1425            page: pn,
1426            start: 0,
1427            len: 16,
1428        };
1429
1430        assert!(matches!(a, WitnessKey::Page(_)));
1431        assert!(matches!(b, WitnessKey::Cell { .. }));
1432        assert!(matches!(c, WitnessKey::ByteRange { .. }));
1433    }
1434
1435    #[test]
1436    fn test_all_glossary_types_derive_debug_clone() {
1437        fn assert_debug_clone<T: fmt::Debug + Clone>() {}
1438
1439        assert_debug_clone::<TxnId>();
1440        assert_debug_clone::<CommitSeq>();
1441        assert_debug_clone::<TxnEpoch>();
1442        assert_debug_clone::<TxnToken>();
1443        assert_debug_clone::<SchemaEpoch>();
1444        assert_debug_clone::<Snapshot>();
1445        assert_debug_clone::<VersionPointer>();
1446        assert_debug_clone::<PageVersion>();
1447        assert_debug_clone::<ObjectId>();
1448        assert_debug_clone::<CommitCapsule>();
1449        assert_debug_clone::<CommitMarker>();
1450        assert_debug_clone::<Oti>();
1451        assert_debug_clone::<DecodeProof>();
1452        assert_debug_clone::<Cx<crate::cx::ComputeCaps>>();
1453        assert_debug_clone::<Budget>();
1454        assert_debug_clone::<Outcome>();
1455        assert_debug_clone::<EpochId>();
1456        assert_debug_clone::<SymbolValidityWindow>();
1457        assert_debug_clone::<RemoteCap>();
1458        assert_debug_clone::<SymbolAuthMasterKeyCap>();
1459        assert_debug_clone::<IdempotencyKey>();
1460        assert_debug_clone::<Saga>();
1461        assert_debug_clone::<Region>();
1462        assert_debug_clone::<WitnessKey>();
1463        assert_debug_clone::<RangeKey>();
1464        assert_debug_clone::<ReadWitness>();
1465        assert_debug_clone::<WriteWitness>();
1466        assert_debug_clone::<WitnessIndexSegment>();
1467        assert_debug_clone::<DependencyEdge>();
1468        assert_debug_clone::<CommitProof>();
1469        assert_debug_clone::<TableId>();
1470        assert_debug_clone::<IndexId>();
1471        assert_debug_clone::<RowId>();
1472        assert_debug_clone::<ColumnIdx>();
1473        assert_debug_clone::<BtreeRef>();
1474        assert_debug_clone::<SemanticKeyKind>();
1475        assert_debug_clone::<SemanticKeyRef>();
1476        assert_debug_clone::<StructuralEffects>();
1477        assert_debug_clone::<IntentFootprint>();
1478        assert_debug_clone::<RebaseExpr>();
1479        assert_debug_clone::<RebaseUnaryOp>();
1480        assert_debug_clone::<RebaseBinaryOp>();
1481        assert_debug_clone::<IntentOpKind>();
1482        assert_debug_clone::<IntentOp>();
1483        assert_debug_clone::<PageHistory>();
1484        assert_debug_clone::<ArcCache>();
1485        assert_debug_clone::<RootManifest>();
1486        assert_debug_clone::<TxnSlot>();
1487        assert_debug_clone::<OperatingMode>();
1488    }
1489
1490    #[test]
1491    fn test_remote_cap_from_bytes_roundtrip() {
1492        let raw = [0xAB_u8; 16];
1493        let cap = RemoteCap::from_bytes(raw);
1494        assert_eq!(cap.as_bytes(), &raw);
1495    }
1496
1497    #[test]
1498    fn test_idempotency_key_derivation_is_deterministic() {
1499        let req = b"fetch:object=42";
1500        let a = IdempotencyKey::derive(7, req);
1501        let b = IdempotencyKey::derive(7, req);
1502        let c = IdempotencyKey::derive(8, req);
1503        assert_eq!(a, b);
1504        assert_ne!(a, c);
1505    }
1506
1507    #[test]
1508    fn test_remote_cap_roundtrip() {
1509        let raw = [0xAB_u8; 16];
1510        let cap = RemoteCap::from_bytes(raw);
1511        assert_eq!(cap.as_bytes(), &raw);
1512    }
1513
1514    #[test]
1515    fn test_symbol_auth_master_key_cap_roundtrip() {
1516        let raw = [0xCD_u8; 32];
1517        let cap = SymbolAuthMasterKeyCap::from_bytes(raw);
1518        assert_eq!(cap.as_bytes(), &raw);
1519    }
1520
1521    #[test]
1522    fn test_idempotency_key_roundtrip() {
1523        let raw = [0x11_u8; 16];
1524        let key = IdempotencyKey::from_bytes(raw);
1525        assert_eq!(key.as_bytes(), &raw);
1526    }
1527
1528    #[test]
1529    fn test_saga_constructor() {
1530        let key = IdempotencyKey::from_bytes([0x22_u8; 16]);
1531        let saga = Saga::new(key);
1532        assert_eq!(saga.key(), key);
1533    }
1534
1535    fn arb_budget() -> impl Strategy<Value = Budget> {
1536        (
1537            prop::option::of(any::<u64>()),
1538            any::<u32>(),
1539            prop::option::of(any::<u64>()),
1540            any::<u8>(),
1541        )
1542            .prop_map(|(deadline_ms, poll_quota, cost_quota, priority)| Budget {
1543                deadline: deadline_ms.map(Duration::from_millis),
1544                poll_quota,
1545                cost_quota,
1546                priority,
1547            })
1548    }
1549
1550    proptest! {
1551        #[test]
1552        fn prop_budget_combine_associative(a in arb_budget(), b in arb_budget(), c in arb_budget()) {
1553            prop_assert_eq!(a.meet(b).meet(c), a.meet(b.meet(c)));
1554        }
1555
1556        #[test]
1557        fn prop_budget_combine_commutative(a in arb_budget(), b in arb_budget()) {
1558            prop_assert_eq!(a.meet(b), b.meet(a));
1559        }
1560    }
1561
1562    // ── bd-13r.5: RowId + AUTOINCREMENT Semantics ──
1563
1564    #[test]
1565    fn test_rowid_reuse_without_autoincrement() {
1566        let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1567        // Table has max rowid 5 → next is 6.
1568        let r = alloc.allocate(Some(RowId::new(5))).unwrap();
1569        assert_eq!(r.get(), 6);
1570
1571        // After deleting row 6, if max existing drops to 3, next is 4 (reuse).
1572        let r = alloc.allocate(Some(RowId::new(3))).unwrap();
1573        assert_eq!(r.get(), 4);
1574    }
1575
1576    #[test]
1577    fn test_autoincrement_no_reuse() {
1578        let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1579        // First allocation, table max is 5.
1580        let r = alloc.allocate(Some(RowId::new(5))).unwrap();
1581        assert_eq!(r.get(), 6);
1582
1583        // After deleting row 6, max existing drops to 3. But AUTOINCREMENT
1584        // uses high-water mark (6), so next is 7 (no reuse).
1585        let r = alloc.allocate(Some(RowId::new(3))).unwrap();
1586        assert_eq!(r.get(), 7);
1587    }
1588
1589    #[test]
1590    fn test_sqlite_sequence_updates() {
1591        let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1592        assert_eq!(alloc.sequence_high_water(), 0);
1593
1594        let _ = alloc.allocate(Some(RowId::new(10))).unwrap();
1595        assert_eq!(alloc.sequence_high_water(), 11);
1596
1597        // Loading from DB.
1598        alloc.set_sequence_high_water(100);
1599        let r = alloc.allocate(Some(RowId::new(50))).unwrap();
1600        assert_eq!(r.get(), 101);
1601        assert_eq!(alloc.sequence_high_water(), 101);
1602    }
1603
1604    #[test]
1605    fn test_max_rowid_exhausted_autoincrement() {
1606        let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1607        // MAX_ROWID reached: AUTOINCREMENT must fail.
1608        let result = alloc.allocate(Some(RowId::MAX));
1609        assert!(result.is_err());
1610    }
1611
1612    #[test]
1613    fn test_max_rowid_exhausted_normal() {
1614        let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1615        // MAX_ROWID reached in normal mode: also fails (random probing
1616        // would happen at the B-tree level, not in the type allocator).
1617        let result = alloc.allocate(Some(RowId::MAX));
1618        assert!(result.is_err());
1619    }
1620
1621    #[test]
1622    fn test_rowid_allocate_empty_table() {
1623        let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1624        let r = alloc.allocate(None).unwrap();
1625        assert_eq!(r.get(), 1);
1626
1627        let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1628        let r = alloc.allocate(None).unwrap();
1629        assert_eq!(r.get(), 1);
1630    }
1631
1632    // ── bd-2blq: IntentOpKind, SemanticKeyRef, StructuralEffects, RowId ──
1633
1634    #[test]
1635    fn test_intent_op_all_variants_encode_decode_roundtrip() {
1636        use crate::SqliteValue;
1637
1638        let variants: Vec<IntentOpKind> = vec![
1639            IntentOpKind::Insert {
1640                table: TableId::new(1),
1641                key: RowId::new(100),
1642                record: vec![0x01, 0x02, 0x03],
1643            },
1644            IntentOpKind::Delete {
1645                table: TableId::new(2),
1646                key: RowId::new(200),
1647            },
1648            IntentOpKind::Update {
1649                table: TableId::new(3),
1650                key: RowId::new(300),
1651                new_record: vec![0x04, 0x05],
1652            },
1653            IntentOpKind::IndexInsert {
1654                index: IndexId::new(10),
1655                key: vec![0xAA, 0xBB],
1656                rowid: RowId::new(400),
1657            },
1658            IntentOpKind::IndexDelete {
1659                index: IndexId::new(11),
1660                key: vec![0xCC],
1661                rowid: RowId::new(500),
1662            },
1663            IntentOpKind::UpdateExpression {
1664                table: TableId::new(4),
1665                key: RowId::new(600),
1666                column_updates: vec![
1667                    (
1668                        ColumnIdx::new(0),
1669                        RebaseExpr::BinaryOp {
1670                            op: RebaseBinaryOp::Add,
1671                            left: Box::new(RebaseExpr::ColumnRef(ColumnIdx::new(0))),
1672                            right: Box::new(RebaseExpr::Literal(SqliteValue::Integer(1))),
1673                        },
1674                    ),
1675                    (
1676                        ColumnIdx::new(2),
1677                        RebaseExpr::Coalesce(vec![
1678                            RebaseExpr::ColumnRef(ColumnIdx::new(2)),
1679                            RebaseExpr::Literal(SqliteValue::Integer(0)),
1680                        ]),
1681                    ),
1682                ],
1683            },
1684        ];
1685
1686        for variant in &variants {
1687            let op = IntentOp {
1688                schema_epoch: 42,
1689                footprint: IntentFootprint::empty(),
1690                op: variant.clone(),
1691            };
1692
1693            let json = serde_json::to_string(&op).expect("serialize must succeed");
1694            let decoded: IntentOp = serde_json::from_str(&json).expect("deserialize must succeed");
1695
1696            assert_eq!(decoded, op, "roundtrip failed for variant: {variant:?}");
1697        }
1698    }
1699
1700    #[test]
1701    fn test_semantic_key_ref_digest_stable() {
1702        let table = BtreeRef::Table(TableId::new(42));
1703        let key_bytes = b"canonical_key_data";
1704
1705        // Compute digest twice — must be identical.
1706        let d1 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, table, key_bytes);
1707        let d2 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, table, key_bytes);
1708        assert_eq!(d1, d2, "digest must be stable across calls");
1709
1710        // Construct via `new()` — digest must match.
1711        let skr = SemanticKeyRef::new(table, SemanticKeyKind::TableRow, key_bytes);
1712        assert_eq!(skr.key_digest, d1);
1713
1714        // Different key bytes produce different digest.
1715        let d3 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, table, b"different_key");
1716        assert_ne!(d1, d3);
1717
1718        // Different kind produces different digest.
1719        let d4 = SemanticKeyRef::compute_digest(SemanticKeyKind::IndexEntry, table, key_bytes);
1720        assert_ne!(d1, d4);
1721
1722        // Different btree produces different digest.
1723        let index = BtreeRef::Index(IndexId::new(42));
1724        let d5 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, index, key_bytes);
1725        assert_ne!(d1, d5);
1726
1727        // Digest is 16 bytes (Trunc128).
1728        assert_eq!(d1.len(), 16);
1729    }
1730
1731    #[test]
1732    fn test_structural_effects_bitflags() {
1733        // NONE = 0.
1734        assert_eq!(StructuralEffects::NONE.bits(), 0);
1735        assert!(StructuralEffects::NONE.is_empty());
1736
1737        // Simple leaf operations have no structural effects.
1738        let leaf = StructuralEffects::NONE;
1739        assert!(!leaf.contains(StructuralEffects::PAGE_SPLIT));
1740        assert!(!leaf.contains(StructuralEffects::FREELIST_MUTATE));
1741
1742        // Page split + overflow alloc.
1743        let split_overflow = StructuralEffects::PAGE_SPLIT | StructuralEffects::OVERFLOW_ALLOC;
1744        assert!(split_overflow.contains(StructuralEffects::PAGE_SPLIT));
1745        assert!(split_overflow.contains(StructuralEffects::OVERFLOW_ALLOC));
1746        assert!(!split_overflow.contains(StructuralEffects::PAGE_MERGE));
1747
1748        // All flags can be combined.
1749        let all = StructuralEffects::PAGE_SPLIT
1750            | StructuralEffects::PAGE_MERGE
1751            | StructuralEffects::BALANCE_MULTI_PAGE
1752            | StructuralEffects::OVERFLOW_ALLOC
1753            | StructuralEffects::OVERFLOW_MUTATE
1754            | StructuralEffects::FREELIST_MUTATE
1755            | StructuralEffects::POINTER_MAP_MUTATE
1756            | StructuralEffects::DEFRAG_MOVE_CELLS;
1757        assert!(all.contains(StructuralEffects::FREELIST_MUTATE));
1758        assert!(all.contains(StructuralEffects::DEFRAG_MOVE_CELLS));
1759
1760        // Serde roundtrip.
1761        let json = serde_json::to_string(&split_overflow).expect("serialize");
1762        let decoded: StructuralEffects = serde_json::from_str(&json).expect("deserialize");
1763        assert_eq!(decoded, split_overflow);
1764    }
1765
1766    #[test]
1767    fn test_rowid_allocator_monotone_no_collision() {
1768        // Two "concurrent writers" allocating from the same allocator must
1769        // produce disjoint, monotonically increasing rowids.
1770        let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1771        let mut ids: Vec<RowId> = Vec::new();
1772
1773        // Writer A gets range.
1774        for _ in 0..5 {
1775            let max_existing = ids.last().copied();
1776            let r = alloc.allocate(max_existing).unwrap();
1777            ids.push(r);
1778        }
1779
1780        // Writer B continues from same state.
1781        for _ in 0..5 {
1782            let max_existing = ids.last().copied();
1783            let r = alloc.allocate(max_existing).unwrap();
1784            ids.push(r);
1785        }
1786
1787        // Verify monotonic and disjoint.
1788        let raw_ids: Vec<i64> = ids.iter().map(|r| r.get()).collect();
1789        for window in raw_ids.windows(2) {
1790            assert!(
1791                window[1] > window[0],
1792                "RowIds must be strictly monotonically increasing: {} <= {}",
1793                window[0],
1794                window[1]
1795            );
1796        }
1797
1798        // Verify no duplicates.
1799        let unique: HashSet<i64> = raw_ids.iter().copied().collect();
1800        assert_eq!(unique.len(), raw_ids.len(), "RowIds must be disjoint");
1801    }
1802
1803    #[test]
1804    fn test_rowid_allocator_bump_on_explicit_rowid() {
1805        let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1806
1807        // Normal allocation: start at 1.
1808        let r1 = alloc.allocate(None).unwrap();
1809        assert_eq!(r1.get(), 1);
1810
1811        // Explicit rowid 1000 bumps the high-water mark.
1812        alloc.set_sequence_high_water(1000);
1813
1814        // Next allocation must be at least 1001.
1815        let r2 = alloc.allocate(Some(RowId::new(999))).unwrap();
1816        assert!(
1817            r2.get() >= 1001,
1818            "allocator must bump past explicit rowid 1000, got {}",
1819            r2.get()
1820        );
1821
1822        // Verify subsequent allocations continue above.
1823        let r3 = alloc.allocate(Some(r2)).unwrap();
1824        assert!(r3.get() > r2.get());
1825    }
1826}