Skip to main content

fsqlite_types/
glossary.rs

1//! Glossary types (§0.3).
2//!
3//! This module defines (or re-exports) the core cross-cutting types referenced
4//! throughout the FrankenSQLite specification: MVCC identifiers, SSI witness
5//! keys, and ECS content-addressed identities.
6
7use std::fmt;
8use std::num::NonZeroU64;
9
10use crate::encoding::{
11    append_u16_le, append_u32_le, append_u64_le, read_u16_le, read_u32_le, read_u64_le,
12};
13use crate::{ObjectId, PageData, PageNumber};
14
15/// Monotonically increasing transaction identifier.
16///
17/// Domain: `1..=(2^62 - 1)`.
18///
19/// The top two bits are reserved for TxnSlot sentinel encoding (CLAIMING /
20/// CLEANING) per §5.6.2; sentinel values are *not* represented as `TxnId`.
21#[derive(
22    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
23)]
24#[repr(transparent)]
25pub struct TxnId(NonZeroU64);
26
27impl TxnId {
28    /// Maximum raw value representable by a real transaction id.
29    pub const MAX_RAW: u64 = (1_u64 << 62) - 1;
30
31    /// Construct a `TxnId` if `raw` is in-domain.
32    #[inline]
33    pub const fn new(raw: u64) -> Option<Self> {
34        if raw > Self::MAX_RAW {
35            return None;
36        }
37        match NonZeroU64::new(raw) {
38            Some(nz) => Some(Self(nz)),
39            None => None,
40        }
41    }
42
43    /// Get the raw u64 value.
44    #[inline]
45    pub const fn get(self) -> u64 {
46        self.0.get()
47    }
48
49    /// Return the next transaction id if it stays in-domain.
50    #[inline]
51    pub const fn checked_next(self) -> Option<Self> {
52        Self::new(self.get().wrapping_add(1))
53    }
54}
55
56impl fmt::Display for TxnId {
57    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
58        write!(f, "txn#{}", self.get())
59    }
60}
61
62impl TryFrom<u64> for TxnId {
63    type Error = InvalidTxnId;
64
65    fn try_from(value: u64) -> Result<Self, Self::Error> {
66        Self::new(value).ok_or(InvalidTxnId { raw: value })
67    }
68}
69
70/// Error returned when attempting to construct an out-of-domain `TxnId`.
71#[derive(Debug, Clone, Copy, PartialEq, Eq)]
72pub struct InvalidTxnId {
73    raw: u64,
74}
75
76impl fmt::Display for InvalidTxnId {
77    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
78        write!(
79            f,
80            "invalid TxnId {} (must satisfy 1 <= id <= {})",
81            self.raw,
82            TxnId::MAX_RAW
83        )
84    }
85}
86
87impl std::error::Error for InvalidTxnId {}
88
89/// Monotonically increasing global commit sequence number ("commit clock").
90#[derive(
91    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
92)]
93#[repr(transparent)]
94pub struct CommitSeq(u64);
95
96impl CommitSeq {
97    pub const ZERO: Self = Self(0);
98
99    #[inline]
100    pub const fn new(raw: u64) -> Self {
101        Self(raw)
102    }
103
104    #[inline]
105    pub const fn get(self) -> u64 {
106        self.0
107    }
108
109    #[inline]
110    #[must_use]
111    pub const fn next(self) -> Self {
112        Self(self.0.wrapping_add(1))
113    }
114}
115
116impl fmt::Display for CommitSeq {
117    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
118        write!(f, "cs#{}", self.get())
119    }
120}
121
122/// Per-transaction epoch used to disambiguate slot reuse across crashes.
123#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
124#[repr(transparent)]
125pub struct TxnEpoch(u32);
126
127impl TxnEpoch {
128    #[inline]
129    pub const fn new(raw: u32) -> Self {
130        Self(raw)
131    }
132
133    #[inline]
134    pub const fn get(self) -> u32 {
135        self.0
136    }
137}
138
139/// A stable transaction identity pair: (TxnId, TxnEpoch).
140#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
141pub struct TxnToken {
142    pub id: TxnId,
143    pub epoch: TxnEpoch,
144}
145
146impl TxnToken {
147    #[inline]
148    pub const fn new(id: TxnId, epoch: TxnEpoch) -> Self {
149        Self { id, epoch }
150    }
151}
152
153/// Monotonically increasing schema epoch (invalidates prepared statements).
154#[derive(
155    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
156)]
157#[repr(transparent)]
158pub struct SchemaEpoch(u64);
159
160impl SchemaEpoch {
161    pub const ZERO: Self = Self(0);
162
163    #[inline]
164    pub const fn new(raw: u64) -> Self {
165        Self(raw)
166    }
167
168    #[inline]
169    pub const fn get(self) -> u64 {
170        self.0
171    }
172}
173
174/// A frozen view of the database at BEGIN time.
175///
176/// Visibility check is a single integer comparison: `version.commit_seq <= snapshot.high`.
177#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
178pub struct Snapshot {
179    pub high: CommitSeq,
180    pub schema_epoch: SchemaEpoch,
181}
182
183impl Snapshot {
184    #[inline]
185    pub const fn new(high: CommitSeq, schema_epoch: SchemaEpoch) -> Self {
186        Self { high, schema_epoch }
187    }
188}
189
190/// Opaque pointer to a previous page version in a version chain.
191///
192/// In the implementation this is expected to be an arena index or object
193/// locator, not a raw pointer.
194#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
195#[repr(transparent)]
196pub struct VersionPointer(u64);
197
198impl VersionPointer {
199    #[inline]
200    pub const fn new(raw: u64) -> Self {
201        Self(raw)
202    }
203
204    #[inline]
205    pub const fn get(self) -> u64 {
206        self.0
207    }
208}
209
210/// A single committed version of a database page.
211#[derive(Debug, Clone, PartialEq, Eq)]
212pub struct PageVersion {
213    pub pgno: PageNumber,
214    pub commit_seq: CommitSeq,
215    pub created_by: TxnToken,
216    pub data: PageData,
217    pub prev: Option<VersionPointer>,
218}
219
220/// Database operating mode (§7.10).
221///
222/// Selectable via `PRAGMA fsqlite.mode = compatibility | native`.
223/// Per-database (not per-connection). Default: [`Compatibility`](Self::Compatibility).
224#[derive(
225    Debug, Clone, Copy, PartialEq, Eq, Hash, Default, serde::Serialize, serde::Deserialize,
226)]
227pub enum OperatingMode {
228    /// Standard SQLite WAL format. Legacy reader interop, single coordinator
229    /// holds `WAL_WRITE_LOCK`. Sidecars (`.wal-fec`, `.db-fec`) present but
230    /// core `.db` stays compatible when checkpointed.
231    #[default]
232    Compatibility,
233    /// ECS-based storage. `CommitCapsules` + `CommitMarkers`, no legacy
234    /// interop, full concurrent writes.
235    Native,
236}
237
238impl fmt::Display for OperatingMode {
239    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
240        match self {
241            Self::Compatibility => f.write_str("compatibility"),
242            Self::Native => f.write_str("native"),
243        }
244    }
245}
246
247impl OperatingMode {
248    /// Parse from the PRAGMA string value (case-insensitive).
249    #[must_use]
250    pub fn from_pragma(s: &str) -> Option<Self> {
251        let lower = s.trim().to_ascii_lowercase();
252        match lower.as_str() {
253            "compatibility" | "compat" => Some(Self::Compatibility),
254            "native" => Some(Self::Native),
255            _ => None,
256        }
257    }
258
259    /// Whether this mode uses ECS-based storage.
260    #[must_use]
261    pub const fn is_native(self) -> bool {
262        matches!(self, Self::Native)
263    }
264
265    /// Whether legacy SQLite readers can attach.
266    #[must_use]
267    pub const fn legacy_readers_allowed(self) -> bool {
268        matches!(self, Self::Compatibility)
269    }
270}
271
272/// A commit capsule is the durable ECS object that a native-mode commit
273/// refers to (§7.11.1).
274///
275/// Contains the transaction's intent log, page deltas, snapshot basis,
276/// and SSI witness-plane evidence references. Built deterministically by the
277/// writer before submission to the `WriteCoordinator`.
278#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
279pub struct CommitCapsule {
280    /// Content-addressed identity of this capsule ECS object.
281    pub object_id: ObjectId,
282    /// The commit-seq snapshot this transaction read from.
283    pub snapshot_basis: CommitSeq,
284    /// Semantic intent log (ordered operations).
285    pub intent_log: Vec<IntentOp>,
286    /// Page-level deltas: `(page_number, delta_bytes)`.
287    pub page_deltas: Vec<(PageNumber, Vec<u8>)>,
288    /// BLAKE3 digest of the transaction's read set.
289    pub read_set_digest: [u8; 32],
290    /// BLAKE3 digest of the transaction's write set.
291    pub write_set_digest: [u8; 32],
292    /// ECS `ObjectId` refs to `ReadWitness` objects.
293    pub read_witness_refs: Vec<ObjectId>,
294    /// ECS `ObjectId` refs to `WriteWitness` objects.
295    pub write_witness_refs: Vec<ObjectId>,
296    /// ECS `ObjectId` refs to `DependencyEdge` objects.
297    pub dependency_edge_refs: Vec<ObjectId>,
298    /// ECS `ObjectId` refs to `MergeWitness` objects.
299    pub merge_witness_refs: Vec<ObjectId>,
300}
301
302/// Commit marker persisted in the commit chain (§7.11.2).
303///
304/// The marker is the point of no return: a transaction is committed if and
305/// only if its marker is durable. The marker stream is append-only and
306/// sequential; each record is small (~88 bytes V1) so fsync latency is
307/// minimized.
308#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
309pub struct CommitMarker {
310    pub commit_seq: CommitSeq,
311    /// Monotonic non-decreasing: `max(now_unix_ns(), prev + 1)`.
312    pub commit_time_unix_ns: u64,
313    pub capsule_object_id: ObjectId,
314    pub proof_object_id: ObjectId,
315    /// Previous marker in the chain (`None` for the genesis marker).
316    pub prev_marker: Option<ObjectId>,
317    /// XXH3-128 integrity hash covering all preceding fields.
318    pub integrity_hash: [u8; 16],
319}
320
321/// Wire size of a `CommitMarkerRecord` V1: 88 bytes.
322///
323/// Layout: `version(1) + flags(1) + commit_seq(8) + commit_time_unix_ns(8)
324/// + capsule_oid(16) + proof_oid(16) + prev_marker_oid(16) + has_prev(1)
325/// + integrity_hash(16) + reserved(5) = 88`.
326pub const COMMIT_MARKER_RECORD_V1_SIZE: usize = 88;
327
328/// Version byte for the current marker record format.
329const COMMIT_MARKER_RECORD_VERSION: u8 = 1;
330
331impl CommitMarker {
332    /// Serialize to the canonical 88-byte V1 wire format (little-endian).
333    #[must_use]
334    pub fn to_record_bytes(&self) -> [u8; COMMIT_MARKER_RECORD_V1_SIZE] {
335        let mut buf = [0u8; COMMIT_MARKER_RECORD_V1_SIZE];
336        buf[0] = COMMIT_MARKER_RECORD_VERSION;
337        buf[1] = 0; // flags (reserved)
338
339        // commit_seq at offset 2
340        buf[2..10].copy_from_slice(&self.commit_seq.get().to_le_bytes());
341        // commit_time_unix_ns at offset 10
342        buf[10..18].copy_from_slice(&self.commit_time_unix_ns.to_le_bytes());
343        // capsule_object_id at offset 18
344        buf[18..34].copy_from_slice(self.capsule_object_id.as_bytes());
345        // proof_object_id at offset 34
346        buf[34..50].copy_from_slice(self.proof_object_id.as_bytes());
347        // prev_marker at offset 50 (16 bytes, all-zero if None)
348        if let Some(prev) = self.prev_marker {
349            buf[50..66].copy_from_slice(prev.as_bytes());
350        }
351        // has_prev flag at offset 66
352        buf[66] = u8::from(self.prev_marker.is_some());
353        // integrity_hash at offset 67
354        buf[67..83].copy_from_slice(&self.integrity_hash);
355        // bytes 83..88 are reserved (zero)
356        buf
357    }
358
359    /// Deserialize from the canonical 88-byte V1 wire format.
360    #[must_use]
361    pub fn from_record_bytes(data: &[u8; COMMIT_MARKER_RECORD_V1_SIZE]) -> Option<Self> {
362        if data[0] != COMMIT_MARKER_RECORD_VERSION {
363            return None;
364        }
365
366        let commit_seq = CommitSeq::new(u64::from_le_bytes(data[2..10].try_into().ok()?));
367        let commit_time_unix_ns = u64::from_le_bytes(data[10..18].try_into().ok()?);
368        let capsule_object_id = ObjectId::from_bytes(data[18..34].try_into().ok()?);
369        let proof_object_id = ObjectId::from_bytes(data[34..50].try_into().ok()?);
370        let has_prev = data[66] != 0;
371        let prev_marker = if has_prev {
372            Some(ObjectId::from_bytes(data[50..66].try_into().ok()?))
373        } else {
374            None
375        };
376        let mut integrity_hash = [0u8; 16];
377        integrity_hash.copy_from_slice(&data[67..83]);
378
379        Some(Self {
380            commit_seq,
381            commit_time_unix_ns,
382            capsule_object_id,
383            proof_object_id,
384            prev_marker,
385            integrity_hash,
386        })
387    }
388
389    /// Compute the integrity hash (XXH3-128) over all fields except the
390    /// integrity hash itself.
391    #[must_use]
392    pub fn compute_integrity_hash(&self) -> [u8; 16] {
393        let mut buf = Vec::with_capacity(74);
394        append_u64_le(&mut buf, self.commit_seq.get());
395        append_u64_le(&mut buf, self.commit_time_unix_ns);
396        buf.extend_from_slice(self.capsule_object_id.as_bytes());
397        buf.extend_from_slice(self.proof_object_id.as_bytes());
398        if let Some(prev) = self.prev_marker {
399            buf.push(1);
400            buf.extend_from_slice(prev.as_bytes());
401        } else {
402            buf.push(0);
403            buf.extend_from_slice(&[0u8; 16]);
404        }
405        let hash128 = xxhash_rust::xxh3::xxh3_128(&buf);
406        hash128.to_le_bytes()
407    }
408
409    /// Build a marker with the integrity hash computed automatically.
410    #[must_use]
411    pub fn new(
412        commit_seq: CommitSeq,
413        commit_time_unix_ns: u64,
414        capsule_object_id: ObjectId,
415        proof_object_id: ObjectId,
416        prev_marker: Option<ObjectId>,
417    ) -> Self {
418        let mut marker = Self {
419            commit_seq,
420            commit_time_unix_ns,
421            capsule_object_id,
422            proof_object_id,
423            prev_marker,
424            integrity_hash: [0u8; 16],
425        };
426        marker.integrity_hash = marker.compute_integrity_hash();
427        marker
428    }
429
430    /// Verify the integrity hash.
431    #[must_use]
432    pub fn verify_integrity(&self) -> bool {
433        self.integrity_hash == self.compute_integrity_hash()
434    }
435}
436
437/// Object Transmission Information (RaptorQ / RFC 6330).
438///
439/// This is an internal encoding, NOT the RFC 6330 Common FEC OTI wire format.
440/// Field widths are widened for implementation convenience:
441/// - `f` is `u64` (RFC: 40-bit)
442/// - `t` is `u32` (RFC: 16-bit) -- supports `page_size = 65_536`
443/// - `z` is `u32` (RFC: 12-bit)
444/// - `n` is `u32` (RFC: 8-bit)
445#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
446pub struct Oti {
447    /// Transfer length (bytes).
448    pub f: u64,
449    /// Alignment parameter.
450    pub al: u16,
451    /// Symbol size (bytes). `u32` to represent all valid SQLite page sizes.
452    pub t: u32,
453    /// Number of source blocks.
454    pub z: u32,
455    /// Number of sub-blocks.
456    pub n: u32,
457}
458
459/// Serialized size of [`Oti`] on the wire: `8 + 2 + 4 + 4 + 4 = 22` bytes.
460pub const OTI_WIRE_SIZE: usize = 22;
461
462impl Oti {
463    /// Serialize to canonical little-endian bytes.
464    #[must_use]
465    pub fn to_bytes(self) -> [u8; OTI_WIRE_SIZE] {
466        let mut as_vec = Vec::with_capacity(OTI_WIRE_SIZE);
467        append_u64_le(&mut as_vec, self.f);
468        append_u16_le(&mut as_vec, self.al);
469        append_u32_le(&mut as_vec, self.t);
470        append_u32_le(&mut as_vec, self.z);
471        append_u32_le(&mut as_vec, self.n);
472
473        let mut buf = [0u8; OTI_WIRE_SIZE];
474        buf.copy_from_slice(&as_vec);
475        buf
476    }
477
478    /// Deserialize from canonical little-endian bytes.
479    ///
480    /// Returns `None` if `data` is shorter than [`OTI_WIRE_SIZE`].
481    #[must_use]
482    pub fn from_bytes(data: &[u8]) -> Option<Self> {
483        if data.len() < OTI_WIRE_SIZE {
484            return None;
485        }
486        Some(Self {
487            f: read_u64_le(&data[0..8])?,
488            al: read_u16_le(&data[8..10])?,
489            t: read_u32_le(&data[10..14])?,
490            z: read_u32_le(&data[14..18])?,
491            n: read_u32_le(&data[18..22])?,
492        })
493    }
494}
495
496/// Proof that a decode was correct (structure depends on codec mode).
497#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
498pub struct DecodeProof {
499    pub object_id: ObjectId,
500    pub oti: Oti,
501}
502
503/// Capability context + cooperative budget types.
504///
505/// Canonical definitions live in `crate::cx` (per `bd-3go.1`).
506pub use crate::cx::{Budget, Cx};
507
508/// Result outcome lattice for cooperative cancellation and failure.
509#[derive(
510    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
511)]
512pub enum Outcome {
513    Ok,
514    Err,
515    Cancelled,
516    Panicked,
517}
518
519/// Global epoch identifier (monotonically increasing).
520#[derive(
521    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
522)]
523#[repr(transparent)]
524pub struct EpochId(u64);
525
526impl EpochId {
527    /// The zero epoch (initial/bootstrap).
528    pub const ZERO: Self = Self(0);
529
530    #[inline]
531    pub const fn new(raw: u64) -> Self {
532        Self(raw)
533    }
534
535    #[inline]
536    pub const fn get(self) -> u64 {
537        self.0
538    }
539
540    /// Return the next epoch (current + 1).
541    ///
542    /// Returns `None` on overflow (saturated at `u64::MAX`).
543    #[must_use]
544    pub const fn next(self) -> Option<Self> {
545        match self.0.checked_add(1) {
546            Some(val) => Some(Self(val)),
547            None => None,
548        }
549    }
550}
551
552/// Validity window for symbols or proofs (inclusive bounds).
553#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
554pub struct SymbolValidityWindow {
555    pub from_epoch: EpochId,
556    pub to_epoch: EpochId,
557}
558
559impl SymbolValidityWindow {
560    #[must_use]
561    pub const fn new(from_epoch: EpochId, to_epoch: EpochId) -> Self {
562        Self {
563            from_epoch,
564            to_epoch,
565        }
566    }
567
568    /// Build the default validity window `[0, current_epoch]` per §4.18.1.
569    #[must_use]
570    pub const fn default_window(current_epoch: EpochId) -> Self {
571        Self {
572            from_epoch: EpochId::ZERO,
573            to_epoch: current_epoch,
574        }
575    }
576
577    /// Check whether `epoch` falls within this window (inclusive bounds).
578    ///
579    /// Fail-closed: returns `false` for any epoch outside the window,
580    /// including future epochs (§4.18.1 normative requirement).
581    #[must_use]
582    pub const fn contains(&self, epoch: EpochId) -> bool {
583        epoch.0 >= self.from_epoch.0 && epoch.0 <= self.to_epoch.0
584    }
585}
586
587/// Capability token authorizing access to a remote endpoint.
588#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
589#[repr(transparent)]
590pub struct RemoteCap([u8; 16]);
591
592impl RemoteCap {
593    #[must_use]
594    pub const fn from_bytes(bytes: [u8; 16]) -> Self {
595        Self(bytes)
596    }
597
598    #[must_use]
599    pub const fn as_bytes(&self) -> &[u8; 16] {
600        &self.0
601    }
602}
603
604/// Capability token for the symbol authentication master key.
605#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
606#[repr(transparent)]
607pub struct SymbolAuthMasterKeyCap([u8; 32]);
608
609impl SymbolAuthMasterKeyCap {
610    #[must_use]
611    pub const fn from_bytes(bytes: [u8; 32]) -> Self {
612        Self(bytes)
613    }
614
615    #[must_use]
616    pub const fn as_bytes(&self) -> &[u8; 32] {
617        &self.0
618    }
619}
620
621/// Stable idempotency key for retry-safe operations.
622#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
623#[repr(transparent)]
624pub struct IdempotencyKey([u8; 16]);
625
626impl IdempotencyKey {
627    #[must_use]
628    pub const fn from_bytes(bytes: [u8; 16]) -> Self {
629        Self(bytes)
630    }
631
632    #[must_use]
633    pub const fn as_bytes(&self) -> &[u8; 16] {
634        &self.0
635    }
636}
637
638/// Saga identifier (ties together a multi-step idempotent workflow).
639#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
640pub struct Saga {
641    pub key: IdempotencyKey,
642}
643
644impl IdempotencyKey {
645    /// Deterministically derive a key from request bytes + ECS epoch.
646    ///
647    /// Domain separation:
648    /// `BLAKE3("fsqlite:idempotency:v1" || le_u64(ecs_epoch) || request_bytes)`.
649    #[must_use]
650    pub fn derive(ecs_epoch: u64, request_bytes: &[u8]) -> Self {
651        let mut hasher = blake3::Hasher::new();
652        hasher.update(b"fsqlite:idempotency:v1");
653        hasher.update(&ecs_epoch.to_le_bytes());
654        hasher.update(request_bytes);
655        let digest = hasher.finalize();
656        let mut out = [0_u8; 16];
657        out.copy_from_slice(&digest.as_bytes()[..16]);
658        Self(out)
659    }
660}
661
662impl Saga {
663    /// Create a saga identifier from an idempotency key.
664    #[must_use]
665    pub const fn new(key: IdempotencyKey) -> Self {
666        Self { key }
667    }
668
669    /// Access the saga idempotency key.
670    #[must_use]
671    pub const fn key(self) -> IdempotencyKey {
672        self.key
673    }
674}
675
676/// Logical region identifier (tiering / placement / replication scope).
677#[derive(
678    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
679)]
680#[repr(transparent)]
681pub struct Region(u32);
682
683impl Region {
684    #[inline]
685    pub const fn new(raw: u32) -> Self {
686        Self(raw)
687    }
688
689    #[inline]
690    pub const fn get(self) -> u32 {
691        self.0
692    }
693}
694
695/// SSI witness key basis (§5.6.4.3).
696///
697/// Canonical key space for SSI rw-antidependency tracking. Always valid to
698/// fall back to `Page(pgno)` — finer keys reduce false positives but never
699/// compromise correctness.
700#[derive(
701    Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
702)]
703pub enum WitnessKey {
704    /// Coarse witness: entire page.
705    Page(PageNumber),
706    /// Semantic witness: specific B-tree cell identified by domain-separated hash.
707    ///
708    /// `tag` is `low32(xxh3_64("fsqlite:witness:cell:v1" || le_u32(btree_root) || key_bytes))`.
709    Cell {
710        btree_root: PageNumber,
711        leaf_page: PageNumber,
712        tag: u64,
713    },
714    /// Semantic witness: structured byte range on a page.
715    ByteRange {
716        page: PageNumber,
717        start: u32,
718        len: u32,
719    },
720    /// Key range witness for reduced false positives on range scans (optional, advanced).
721    KeyRange {
722        btree_root: PageNumber,
723        lo: Vec<u8>,
724        hi: Vec<u8>,
725    },
726    /// Custom namespace witness (extensibility point).
727    Custom { namespace: u32, bytes: Vec<u8> },
728}
729
730impl WitnessKey {
731    /// Derive a deterministic cell tag from a B-tree root page and canonical key bytes.
732    ///
733    /// Uses domain-separated xxh3_64 (§5.6.4.3):
734    /// `cell_tag = low32(xxh3_64("fsqlite:witness:cell:v1" || le_u32(btree_root_pgno) || key_bytes))`
735    #[must_use]
736    pub fn cell_tag(btree_root: PageNumber, canonical_key_bytes: &[u8]) -> u64 {
737        use xxhash_rust::xxh3::xxh3_64;
738        let mut buf =
739            Vec::with_capacity(b"fsqlite:witness:cell:v1".len() + 4 + canonical_key_bytes.len());
740        buf.extend_from_slice(b"fsqlite:witness:cell:v1");
741        buf.extend_from_slice(&btree_root.get().to_le_bytes());
742        buf.extend_from_slice(canonical_key_bytes);
743        // Store full 64-bit hash; low32 extraction done at comparison site if needed.
744        xxh3_64(&buf)
745    }
746
747    /// Create a cell witness for a point read/uniqueness check.
748    #[must_use]
749    pub fn for_cell_read(
750        btree_root: PageNumber,
751        leaf_page: PageNumber,
752        canonical_key_bytes: &[u8],
753    ) -> Self {
754        Self::Cell {
755            btree_root,
756            leaf_page,
757            tag: Self::cell_tag(btree_root, canonical_key_bytes),
758        }
759    }
760
761    /// Create page-level witnesses for a range scan (phantom protection).
762    ///
763    /// Returns one `Page(leaf_pgno)` witness per visited leaf page (§5.6.4.3).
764    #[must_use]
765    pub fn for_range_scan(leaf_pages: &[PageNumber]) -> Vec<Self> {
766        leaf_pages.iter().copied().map(Self::Page).collect()
767    }
768
769    /// Create a cell + page witness pair for a point write.
770    ///
771    /// Writes register both `Cell(btree_root, leaf_page, cell_tag)` AND
772    /// `Page(leaf_pgno)` as write witnesses (§5.6.4.3).
773    #[must_use]
774    pub fn for_point_write(
775        btree_root: PageNumber,
776        canonical_key_bytes: &[u8],
777        leaf_pgno: PageNumber,
778    ) -> (Self, Self) {
779        let cell = Self::Cell {
780            btree_root,
781            leaf_page: leaf_pgno,
782            tag: Self::cell_tag(btree_root, canonical_key_bytes),
783        };
784        let page = Self::Page(leaf_pgno);
785        (cell, page)
786    }
787
788    /// Return the page-like bucket associated with this witness.
789    ///
790    /// Page and byte-range witnesses map to their physical page. Cell and
791    /// key-range witnesses are bucketed by their B-tree root page so higher
792    /// layers can retain O(1) page/root-level witness indexes. Custom
793    /// witnesses are intentionally left unbucketed.
794    #[must_use]
795    pub const fn page_number(&self) -> Option<PageNumber> {
796        match self {
797            Self::Page(page) | Self::ByteRange { page, .. } => Some(*page),
798            Self::Cell { btree_root, .. } | Self::KeyRange { btree_root, .. } => Some(*btree_root),
799            Self::Custom { .. } => None,
800        }
801    }
802
803    /// Returns `true` if this is a coarse page-level witness.
804    #[must_use]
805    pub fn is_page(&self) -> bool {
806        matches!(self, Self::Page(_))
807    }
808
809    /// Returns `true` if this is a cell-level semantic witness.
810    #[must_use]
811    pub fn is_cell(&self) -> bool {
812        matches!(self, Self::Cell { .. })
813    }
814}
815
816/// Witness hierarchy range key (prefix-based bucketing).
817#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
818pub struct RangeKey {
819    pub level: u8,
820    pub hash_prefix: u32,
821}
822
823/// A recorded SSI read witness.
824#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
825pub struct ReadWitness {
826    pub txn: TxnId,
827    pub key: WitnessKey,
828}
829
830/// A recorded SSI write witness.
831#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
832pub struct WriteWitness {
833    pub txn: TxnId,
834    pub key: WitnessKey,
835}
836
837/// A persisted segment of witness index updates.
838#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
839pub struct WitnessIndexSegment {
840    pub epoch: EpochId,
841    pub reads: Vec<ReadWitness>,
842    pub writes: Vec<WriteWitness>,
843}
844
845/// A dependency edge in the SSI serialization graph.
846#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
847pub struct DependencyEdge {
848    pub from: TxnId,
849    pub to: TxnId,
850    pub key_basis: WitnessKey,
851    pub observed_by: TxnId,
852}
853
854/// Proof object tying together the dependency edges relevant to a commit
855/// decision (§7.11.2 step 3).
856///
857/// Persisted as an ECS object by the `WriteCoordinator` after FCW + SSI
858/// re-validation succeeds. Referenced by the corresponding `CommitMarker`.
859#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
860pub struct CommitProof {
861    /// The commit sequence this proof was generated for.
862    pub commit_seq: CommitSeq,
863    /// SSI dependency edges that were validated.
864    pub edges: Vec<DependencyEdge>,
865    /// ECS `ObjectId` refs to witness evidence objects.
866    pub evidence_refs: Vec<ObjectId>,
867}
868
869/// Identifier for a table b-tree root (logical, not physical file page).
870#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
871#[repr(transparent)]
872pub struct TableId(u32);
873
874impl TableId {
875    #[inline]
876    pub const fn new(raw: u32) -> Self {
877        Self(raw)
878    }
879
880    #[inline]
881    pub const fn get(self) -> u32 {
882        self.0
883    }
884}
885
886/// Identifier for an index b-tree root (logical, not physical file page).
887#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
888#[repr(transparent)]
889pub struct IndexId(u32);
890
891impl IndexId {
892    #[inline]
893    pub const fn new(raw: u32) -> Self {
894        Self(raw)
895    }
896
897    #[inline]
898    pub const fn get(self) -> u32 {
899        self.0
900    }
901}
902
903/// RowId / INTEGER PRIMARY KEY key space (SQLite uses signed 64-bit).
904#[derive(
905    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
906)]
907#[repr(transparent)]
908pub struct RowId(i64);
909
910impl RowId {
911    /// Maximum RowId value: 2^63 - 1.
912    pub const MAX: Self = Self(i64::MAX);
913
914    #[inline]
915    pub const fn new(raw: i64) -> Self {
916        Self(raw)
917    }
918
919    #[inline]
920    pub const fn get(self) -> i64 {
921        self.0
922    }
923}
924
925/// Rowid allocation mode for a table.
926#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
927pub enum RowIdMode {
928    /// Normal rowid: max(rowid)+1, deleted rowids may be reused.
929    Normal,
930    /// AUTOINCREMENT: never reuse deleted rowids. Uses sqlite_sequence
931    /// high-water mark. Returns error at MAX_ROWID.
932    AutoIncrement,
933}
934
935/// Rowid allocator implementing SQLite's allocation semantics.
936///
937/// - Normal mode: next rowid = max(existing) + 1. Deleted rowids may be reused
938///   when max rowid is not the table maximum.
939/// - AUTOINCREMENT mode: next rowid = max(max_existing, sqlite_sequence) + 1.
940///   Rowids are never reused. When MAX_ROWID is reached, allocation fails.
941#[derive(Debug, Clone)]
942pub struct RowIdAllocator {
943    mode: RowIdMode,
944    /// High-water mark from sqlite_sequence (AUTOINCREMENT only).
945    sequence_high_water: i64,
946}
947
948impl RowIdAllocator {
949    /// Create a new allocator.
950    pub const fn new(mode: RowIdMode) -> Self {
951        Self {
952            mode,
953            sequence_high_water: 0,
954        }
955    }
956
957    /// Allocate the next rowid given the current maximum rowid in the table.
958    ///
959    /// `max_existing` is `None` if the table is empty.
960    ///
961    /// Returns `Ok(rowid)` or `Err` if MAX_ROWID is exhausted (AUTOINCREMENT only).
962    pub fn allocate(&mut self, max_existing: Option<RowId>) -> Result<RowId, RowIdExhausted> {
963        let max_val = max_existing.map_or(0, RowId::get);
964
965        match self.mode {
966            RowIdMode::Normal => {
967                if max_val < i64::MAX {
968                    Ok(RowId::new(max_val + 1))
969                } else {
970                    // MAX_ROWID reached: SQLite tries random probing.
971                    // For the type-level implementation, we signal exhaustion.
972                    Err(RowIdExhausted)
973                }
974            }
975            RowIdMode::AutoIncrement => {
976                let base = max_val.max(self.sequence_high_water);
977                if base == i64::MAX {
978                    return Err(RowIdExhausted);
979                }
980                let next = base + 1;
981                self.sequence_high_water = next;
982                Ok(RowId::new(next))
983            }
984        }
985    }
986
987    /// Get the current sqlite_sequence high-water mark.
988    pub const fn sequence_high_water(&self) -> i64 {
989        self.sequence_high_water
990    }
991
992    /// Set the sqlite_sequence high-water mark (loaded from DB).
993    pub fn set_sequence_high_water(&mut self, val: i64) {
994        self.sequence_high_water = val;
995    }
996}
997
998/// Error when rowid space is exhausted.
999#[derive(Debug, Clone, PartialEq, Eq)]
1000pub struct RowIdExhausted;
1001
1002impl std::fmt::Display for RowIdExhausted {
1003    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1004        f.write_str("database or object is full (rowid exhausted)")
1005    }
1006}
1007
1008/// Column index within a table (0-based).
1009#[derive(
1010    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
1011)]
1012#[repr(transparent)]
1013pub struct ColumnIdx(u32);
1014
1015impl ColumnIdx {
1016    #[inline]
1017    pub const fn new(raw: u32) -> Self {
1018        Self(raw)
1019    }
1020
1021    #[inline]
1022    pub const fn get(self) -> u32 {
1023        self.0
1024    }
1025}
1026
1027// ---------------------------------------------------------------------------
1028// §5.10.1 Intent Logs — Semantic Operations + Footprints
1029// ---------------------------------------------------------------------------
1030
1031/// Reference to a B-tree (either table or index).
1032#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1033pub enum BtreeRef {
1034    Table(TableId),
1035    Index(IndexId),
1036}
1037
1038/// Kind of semantic key reference.
1039#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1040pub enum SemanticKeyKind {
1041    TableRow,
1042    IndexEntry,
1043}
1044
1045/// Semantic key reference with a stable BLAKE3-based digest.
1046///
1047/// `key_digest = Trunc128(BLAKE3("fsqlite:btree:key:v1" || kind || btree_id || canonical_key_bytes))`
1048#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1049pub struct SemanticKeyRef {
1050    pub btree: BtreeRef,
1051    pub kind: SemanticKeyKind,
1052    pub key_digest: [u8; 16],
1053}
1054
1055impl SemanticKeyRef {
1056    /// Domain separation prefix for the key digest.
1057    const DOMAIN_SEP: &'static [u8] = b"fsqlite:btree:key:v1";
1058
1059    /// Compute the key digest from kind, btree id, and canonical key bytes.
1060    #[must_use]
1061    pub fn compute_digest(
1062        kind: SemanticKeyKind,
1063        btree: BtreeRef,
1064        canonical_key_bytes: &[u8],
1065    ) -> [u8; 16] {
1066        let mut hasher = blake3::Hasher::new();
1067        hasher.update(Self::DOMAIN_SEP);
1068        hasher.update(&[match kind {
1069            SemanticKeyKind::TableRow => 0,
1070            SemanticKeyKind::IndexEntry => 1,
1071        }]);
1072        match btree {
1073            BtreeRef::Table(id) => {
1074                hasher.update(&[0]);
1075                hasher.update(&id.get().to_le_bytes());
1076            }
1077            BtreeRef::Index(id) => {
1078                hasher.update(&[1]);
1079                hasher.update(&id.get().to_le_bytes());
1080            }
1081        }
1082        hasher.update(canonical_key_bytes);
1083        let hash = hasher.finalize();
1084        let bytes = hash.as_bytes();
1085        let mut digest = [0u8; 16];
1086        digest.copy_from_slice(&bytes[..16]);
1087        digest
1088    }
1089
1090    /// Construct a `SemanticKeyRef` by computing the digest.
1091    #[must_use]
1092    pub fn new(btree: BtreeRef, kind: SemanticKeyKind, canonical_key_bytes: &[u8]) -> Self {
1093        let key_digest = Self::compute_digest(kind, btree, canonical_key_bytes);
1094        Self {
1095            btree,
1096            kind,
1097            key_digest,
1098        }
1099    }
1100}
1101
1102bitflags::bitflags! {
1103    /// Structural side effects that make operations non-commutative.
1104    #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
1105    pub struct StructuralEffects: u32 {
1106        /// No structural effects (simple leaf operations).
1107        const NONE = 0;
1108        /// A B-tree page was split.
1109        const PAGE_SPLIT = 1;
1110        /// A B-tree page was merged.
1111        const PAGE_MERGE = 2;
1112        /// Multi-page balance operation.
1113        const BALANCE_MULTI_PAGE = 4;
1114        /// An overflow page was allocated.
1115        const OVERFLOW_ALLOC = 8;
1116        /// An overflow chain was mutated.
1117        const OVERFLOW_MUTATE = 16;
1118        /// The freelist was modified.
1119        const FREELIST_MUTATE = 32;
1120        /// The pointer map was modified.
1121        const POINTER_MAP_MUTATE = 64;
1122        /// Cells were moved during defragmentation.
1123        const DEFRAG_MOVE_CELLS = 128;
1124    }
1125}
1126
1127impl serde::Serialize for StructuralEffects {
1128    fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
1129        self.bits().serialize(serializer)
1130    }
1131}
1132
1133impl<'de> serde::Deserialize<'de> for StructuralEffects {
1134    fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
1135        let bits = u32::deserialize(deserializer)?;
1136        Self::from_bits(bits).ok_or_else(|| {
1137            serde::de::Error::custom(format!("invalid StructuralEffects bits: {bits:#x}"))
1138        })
1139    }
1140}
1141
1142impl Default for StructuralEffects {
1143    fn default() -> Self {
1144        Self::NONE
1145    }
1146}
1147
1148/// Semantic read/write footprint of an intent operation (§5.10.1).
1149#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1150pub struct IntentFootprint {
1151    pub reads: Vec<SemanticKeyRef>,
1152    pub writes: Vec<SemanticKeyRef>,
1153    pub structural: StructuralEffects,
1154}
1155
1156impl IntentFootprint {
1157    /// Create an empty footprint with no effects.
1158    #[must_use]
1159    pub fn empty() -> Self {
1160        Self {
1161            reads: Vec::new(),
1162            writes: Vec::new(),
1163            structural: StructuralEffects::NONE,
1164        }
1165    }
1166}
1167
1168impl Default for IntentFootprint {
1169    fn default() -> Self {
1170        Self::empty()
1171    }
1172}
1173
1174/// Replayable expression AST for deterministic rebase (§5.10.1).
1175///
1176/// Allowed forms are intentionally strict: only proven-deterministic
1177/// expressions may appear. Enforced by `expr_is_rebase_safe()`.
1178#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
1179pub enum RebaseExpr {
1180    /// Reference to a column in the current row.
1181    ColumnRef(ColumnIdx),
1182    /// A literal value.
1183    Literal(crate::SqliteValue),
1184    /// A unary operation.
1185    UnaryOp {
1186        op: RebaseUnaryOp,
1187        operand: Box<Self>,
1188    },
1189    /// A binary operation.
1190    BinaryOp {
1191        op: RebaseBinaryOp,
1192        left: Box<Self>,
1193        right: Box<Self>,
1194    },
1195    /// A deterministic function call.
1196    FunctionCall { name: String, args: Vec<Self> },
1197    /// CAST(expr AS type).
1198    Cast { expr: Box<Self>, type_name: String },
1199    /// CASE WHEN ... THEN ... ELSE ... END.
1200    Case {
1201        operand: Option<Box<Self>>,
1202        when_clauses: Vec<(Self, Self)>,
1203        else_clause: Option<Box<Self>>,
1204    },
1205    /// COALESCE(expr, expr, ...).
1206    Coalesce(Vec<Self>),
1207    /// NULLIF(expr, expr).
1208    NullIf { left: Box<Self>, right: Box<Self> },
1209    /// String concatenation (||).
1210    Concat { left: Box<Self>, right: Box<Self> },
1211}
1212
1213/// Unary operators allowed in rebase expressions.
1214#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1215pub enum RebaseUnaryOp {
1216    Negate,
1217    BitwiseNot,
1218    Not,
1219}
1220
1221/// Binary operators allowed in rebase expressions.
1222#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1223pub enum RebaseBinaryOp {
1224    Add,
1225    Subtract,
1226    Multiply,
1227    Divide,
1228    Remainder,
1229    BitwiseAnd,
1230    BitwiseOr,
1231    ShiftLeft,
1232    ShiftRight,
1233}
1234
1235/// The kind of semantic operation in an intent log entry.
1236#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
1237pub enum IntentOpKind {
1238    Insert {
1239        table: TableId,
1240        key: RowId,
1241        record: Vec<u8>,
1242    },
1243    Delete {
1244        table: TableId,
1245        key: RowId,
1246    },
1247    Update {
1248        table: TableId,
1249        key: RowId,
1250        new_record: Vec<u8>,
1251    },
1252    IndexInsert {
1253        index: IndexId,
1254        key: Vec<u8>,
1255        rowid: RowId,
1256    },
1257    IndexDelete {
1258        index: IndexId,
1259        key: Vec<u8>,
1260        rowid: RowId,
1261    },
1262    /// Column-level rebase expressions for deterministic rebase (§5.10.1).
1263    UpdateExpression {
1264        table: TableId,
1265        key: RowId,
1266        column_updates: Vec<(ColumnIdx, RebaseExpr)>,
1267    },
1268}
1269
1270/// A single entry in the transaction intent log (§5.10.1).
1271#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
1272pub struct IntentOp {
1273    pub schema_epoch: u64,
1274    pub footprint: IntentFootprint,
1275    pub op: IntentOpKind,
1276}
1277
1278/// Transaction intent log: an ordered sequence of semantic operations.
1279pub type IntentLog = Vec<IntentOp>;
1280
1281/// History of versions for a page, used by debugging and invariant checks.
1282#[derive(Debug, Clone, PartialEq, Eq)]
1283pub struct PageHistory {
1284    pub pgno: PageNumber,
1285    pub versions: Vec<PageVersion>,
1286}
1287
1288/// ARC cache placeholder type (Adaptive Replacement Cache).
1289///
1290/// The actual ARC algorithm lives in `fsqlite-pager`; this type exists to keep
1291/// glossary terminology stable across crates.
1292#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1293pub struct ArcCache;
1294
1295/// Root manifest tying together the durable roots of the database state.
1296///
1297/// `ecs_epoch` is the monotone epoch counter stored durably here and mirrored
1298/// in `SharedMemoryLayout.ecs_epoch` (§4.18, §5.6.1).
1299#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1300pub struct RootManifest {
1301    pub schema_epoch: SchemaEpoch,
1302    pub root_page: PageNumber,
1303    /// Global ECS epoch — monotonically increasing, never reused (§4.18).
1304    pub ecs_epoch: EpochId,
1305}
1306
1307/// Transaction slot index (cross-process shared memory slot).
1308#[derive(
1309    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
1310)]
1311#[repr(transparent)]
1312pub struct TxnSlot(u32);
1313
1314impl TxnSlot {
1315    #[inline]
1316    pub const fn new(raw: u32) -> Self {
1317        Self(raw)
1318    }
1319
1320    #[inline]
1321    pub const fn get(self) -> u32 {
1322        self.0
1323    }
1324}
1325
1326#[cfg(test)]
1327mod tests {
1328    use std::collections::HashSet;
1329    use std::time::Duration;
1330
1331    use proptest::prelude::*;
1332
1333    use crate::PayloadHash;
1334
1335    use super::*;
1336
1337    #[test]
1338    fn test_txn_id_nonzero_enforced() {
1339        assert!(TxnId::new(0).is_none());
1340        assert!(TxnId::try_from(0_u64).is_err());
1341        assert!(TxnId::new(1).is_some());
1342        assert!(TxnId::new(TxnId::MAX_RAW).is_some());
1343    }
1344
1345    #[test]
1346    fn test_txn_id_62_bit_max() {
1347        assert!(TxnId::new(TxnId::MAX_RAW + 1).is_none());
1348        assert!(TxnId::try_from(TxnId::MAX_RAW + 1).is_err());
1349    }
1350
1351    #[test]
1352    fn test_object_id_16_bytes_blake3_truncation() {
1353        let header = b"hdr:v1";
1354        let payload = b"payload";
1355        let oid = ObjectId::derive(header, PayloadHash::blake3(payload));
1356        assert_eq!(oid.as_bytes().len(), ObjectId::LEN);
1357    }
1358
1359    #[test]
1360    fn test_object_id_content_addressed() {
1361        let header = b"hdr:v1";
1362        let payload = b"payload";
1363        let a = ObjectId::derive(header, PayloadHash::blake3(payload));
1364        let b = ObjectId::derive(header, PayloadHash::blake3(payload));
1365        assert_eq!(a, b);
1366
1367        let c = ObjectId::derive(header, PayloadHash::blake3(b"payload2"));
1368        assert_ne!(a, c);
1369    }
1370
1371    #[test]
1372    fn prop_object_id_collision_resistance() {
1373        let header = b"hdr:v1";
1374        let mut ids = HashSet::<ObjectId>::with_capacity(10_000);
1375
1376        let mut state: u64 = 0xD6E8_FEB8_6659_FD93;
1377        for i in 0..10_000_u64 {
1378            // Deterministic pseudo-randomness, but ensure distinct inputs by embedding i.
1379            state = state
1380                .wrapping_mul(6_364_136_223_846_793_005_u64)
1381                .wrapping_add(1_442_695_040_888_963_407_u64);
1382
1383            let mut payload = [0_u8; 32];
1384            payload[..8].copy_from_slice(&i.to_le_bytes());
1385            payload[8..16].copy_from_slice(&state.to_le_bytes());
1386            payload[16..24].copy_from_slice(&state.rotate_left(17).to_le_bytes());
1387            payload[24..32].copy_from_slice(&state.rotate_left(41).to_le_bytes());
1388
1389            let oid = ObjectId::derive(header, PayloadHash::blake3(&payload));
1390            assert!(ids.insert(oid), "ObjectId collision at i={i}");
1391        }
1392    }
1393
1394    #[test]
1395    fn test_snapshot_fields() {
1396        let snap = Snapshot::new(CommitSeq::new(7), SchemaEpoch::new(9));
1397        assert_eq!(snap.high.get(), 7);
1398        assert_eq!(snap.schema_epoch.get(), 9);
1399    }
1400
1401    #[test]
1402    fn test_oti_field_widths_allow_large_symbol_size() {
1403        // §3.5.2 requires T/Z/N to represent values >= 65536.
1404        let oti = Oti {
1405            f: 1,
1406            al: 4,
1407            t: 65_536,
1408            z: 1,
1409            n: 1,
1410        };
1411        assert_eq!(oti.t, 65_536);
1412    }
1413
1414    #[test]
1415    fn test_budget_product_lattice_semantics() {
1416        let a = Budget {
1417            deadline: Some(Duration::from_millis(100)),
1418            poll_quota: 10,
1419            cost_quota: Some(500),
1420            priority: 1,
1421        };
1422        let b = Budget {
1423            deadline: Some(Duration::from_millis(50)),
1424            poll_quota: 20,
1425            cost_quota: Some(400),
1426            priority: 9,
1427        };
1428        let c = a.meet(b);
1429        assert_eq!(c.deadline, Some(Duration::from_millis(50)));
1430        assert_eq!(c.poll_quota, 10);
1431        assert_eq!(c.cost_quota, Some(400));
1432        assert_eq!(c.priority, 9);
1433    }
1434
1435    #[test]
1436    fn test_outcome_ordering_lattice() {
1437        assert!(Outcome::Ok < Outcome::Err);
1438        assert!(Outcome::Err < Outcome::Cancelled);
1439        assert!(Outcome::Cancelled < Outcome::Panicked);
1440    }
1441
1442    #[test]
1443    fn test_witness_key_variants_exhaustive() {
1444        let pn = PageNumber::new(1).unwrap();
1445        let a = WitnessKey::Page(pn);
1446        let b = WitnessKey::Cell {
1447            btree_root: pn,
1448            leaf_page: pn,
1449            tag: 7,
1450        };
1451        let c = WitnessKey::ByteRange {
1452            page: pn,
1453            start: 0,
1454            len: 16,
1455        };
1456
1457        assert!(matches!(a, WitnessKey::Page(_)));
1458        assert!(matches!(b, WitnessKey::Cell { .. }));
1459        assert!(matches!(c, WitnessKey::ByteRange { .. }));
1460    }
1461
1462    #[test]
1463    fn test_all_glossary_types_derive_debug_clone() {
1464        fn assert_debug_clone<T: fmt::Debug + Clone>() {}
1465
1466        assert_debug_clone::<TxnId>();
1467        assert_debug_clone::<CommitSeq>();
1468        assert_debug_clone::<TxnEpoch>();
1469        assert_debug_clone::<TxnToken>();
1470        assert_debug_clone::<SchemaEpoch>();
1471        assert_debug_clone::<Snapshot>();
1472        assert_debug_clone::<VersionPointer>();
1473        assert_debug_clone::<PageVersion>();
1474        assert_debug_clone::<ObjectId>();
1475        assert_debug_clone::<CommitCapsule>();
1476        assert_debug_clone::<CommitMarker>();
1477        assert_debug_clone::<Oti>();
1478        assert_debug_clone::<DecodeProof>();
1479        assert_debug_clone::<Cx<crate::cx::ComputeCaps>>();
1480        assert_debug_clone::<Budget>();
1481        assert_debug_clone::<Outcome>();
1482        assert_debug_clone::<EpochId>();
1483        assert_debug_clone::<SymbolValidityWindow>();
1484        assert_debug_clone::<RemoteCap>();
1485        assert_debug_clone::<SymbolAuthMasterKeyCap>();
1486        assert_debug_clone::<IdempotencyKey>();
1487        assert_debug_clone::<Saga>();
1488        assert_debug_clone::<Region>();
1489        assert_debug_clone::<WitnessKey>();
1490        assert_debug_clone::<RangeKey>();
1491        assert_debug_clone::<ReadWitness>();
1492        assert_debug_clone::<WriteWitness>();
1493        assert_debug_clone::<WitnessIndexSegment>();
1494        assert_debug_clone::<DependencyEdge>();
1495        assert_debug_clone::<CommitProof>();
1496        assert_debug_clone::<TableId>();
1497        assert_debug_clone::<IndexId>();
1498        assert_debug_clone::<RowId>();
1499        assert_debug_clone::<ColumnIdx>();
1500        assert_debug_clone::<BtreeRef>();
1501        assert_debug_clone::<SemanticKeyKind>();
1502        assert_debug_clone::<SemanticKeyRef>();
1503        assert_debug_clone::<StructuralEffects>();
1504        assert_debug_clone::<IntentFootprint>();
1505        assert_debug_clone::<RebaseExpr>();
1506        assert_debug_clone::<RebaseUnaryOp>();
1507        assert_debug_clone::<RebaseBinaryOp>();
1508        assert_debug_clone::<IntentOpKind>();
1509        assert_debug_clone::<IntentOp>();
1510        assert_debug_clone::<PageHistory>();
1511        assert_debug_clone::<ArcCache>();
1512        assert_debug_clone::<RootManifest>();
1513        assert_debug_clone::<TxnSlot>();
1514        assert_debug_clone::<OperatingMode>();
1515    }
1516
1517    #[test]
1518    fn test_remote_cap_from_bytes_roundtrip() {
1519        let raw = [0xAB_u8; 16];
1520        let cap = RemoteCap::from_bytes(raw);
1521        assert_eq!(cap.as_bytes(), &raw);
1522    }
1523
1524    #[test]
1525    fn test_idempotency_key_derivation_is_deterministic() {
1526        let req = b"fetch:object=42";
1527        let a = IdempotencyKey::derive(7, req);
1528        let b = IdempotencyKey::derive(7, req);
1529        let c = IdempotencyKey::derive(8, req);
1530        assert_eq!(a, b);
1531        assert_ne!(a, c);
1532    }
1533
1534    #[test]
1535    fn test_remote_cap_roundtrip() {
1536        let raw = [0xAB_u8; 16];
1537        let cap = RemoteCap::from_bytes(raw);
1538        assert_eq!(cap.as_bytes(), &raw);
1539    }
1540
1541    #[test]
1542    fn test_symbol_auth_master_key_cap_roundtrip() {
1543        let raw = [0xCD_u8; 32];
1544        let cap = SymbolAuthMasterKeyCap::from_bytes(raw);
1545        assert_eq!(cap.as_bytes(), &raw);
1546    }
1547
1548    #[test]
1549    fn test_idempotency_key_roundtrip() {
1550        let raw = [0x11_u8; 16];
1551        let key = IdempotencyKey::from_bytes(raw);
1552        assert_eq!(key.as_bytes(), &raw);
1553    }
1554
1555    #[test]
1556    fn test_saga_constructor() {
1557        let key = IdempotencyKey::from_bytes([0x22_u8; 16]);
1558        let saga = Saga::new(key);
1559        assert_eq!(saga.key(), key);
1560    }
1561
1562    fn arb_budget() -> impl Strategy<Value = Budget> {
1563        (
1564            prop::option::of(any::<u64>()),
1565            any::<u32>(),
1566            prop::option::of(any::<u64>()),
1567            any::<u8>(),
1568        )
1569            .prop_map(|(deadline_ms, poll_quota, cost_quota, priority)| Budget {
1570                deadline: deadline_ms.map(Duration::from_millis),
1571                poll_quota,
1572                cost_quota,
1573                priority,
1574            })
1575    }
1576
1577    proptest! {
1578        #[test]
1579        fn prop_budget_combine_associative(a in arb_budget(), b in arb_budget(), c in arb_budget()) {
1580            prop_assert_eq!(a.meet(b).meet(c), a.meet(b.meet(c)));
1581        }
1582
1583        #[test]
1584        fn prop_budget_combine_commutative(a in arb_budget(), b in arb_budget()) {
1585            prop_assert_eq!(a.meet(b), b.meet(a));
1586        }
1587    }
1588
1589    // ── bd-13r.5: RowId + AUTOINCREMENT Semantics ──
1590
1591    #[test]
1592    fn test_rowid_reuse_without_autoincrement() {
1593        let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1594        // Table has max rowid 5 → next is 6.
1595        let r = alloc.allocate(Some(RowId::new(5))).unwrap();
1596        assert_eq!(r.get(), 6);
1597
1598        // After deleting row 6, if max existing drops to 3, next is 4 (reuse).
1599        let r = alloc.allocate(Some(RowId::new(3))).unwrap();
1600        assert_eq!(r.get(), 4);
1601    }
1602
1603    #[test]
1604    fn test_autoincrement_no_reuse() {
1605        let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1606        // First allocation, table max is 5.
1607        let r = alloc.allocate(Some(RowId::new(5))).unwrap();
1608        assert_eq!(r.get(), 6);
1609
1610        // After deleting row 6, max existing drops to 3. But AUTOINCREMENT
1611        // uses high-water mark (6), so next is 7 (no reuse).
1612        let r = alloc.allocate(Some(RowId::new(3))).unwrap();
1613        assert_eq!(r.get(), 7);
1614    }
1615
1616    #[test]
1617    fn test_sqlite_sequence_updates() {
1618        let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1619        assert_eq!(alloc.sequence_high_water(), 0);
1620
1621        let _ = alloc.allocate(Some(RowId::new(10))).unwrap();
1622        assert_eq!(alloc.sequence_high_water(), 11);
1623
1624        // Loading from DB.
1625        alloc.set_sequence_high_water(100);
1626        let r = alloc.allocate(Some(RowId::new(50))).unwrap();
1627        assert_eq!(r.get(), 101);
1628        assert_eq!(alloc.sequence_high_water(), 101);
1629    }
1630
1631    #[test]
1632    fn test_max_rowid_exhausted_autoincrement() {
1633        let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1634        // MAX_ROWID reached: AUTOINCREMENT must fail.
1635        let result = alloc.allocate(Some(RowId::MAX));
1636        assert!(result.is_err());
1637    }
1638
1639    #[test]
1640    fn test_max_rowid_exhausted_normal() {
1641        let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1642        // MAX_ROWID reached in normal mode: also fails (random probing
1643        // would happen at the B-tree level, not in the type allocator).
1644        let result = alloc.allocate(Some(RowId::MAX));
1645        assert!(result.is_err());
1646    }
1647
1648    #[test]
1649    fn test_rowid_allocate_empty_table() {
1650        let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1651        let r = alloc.allocate(None).unwrap();
1652        assert_eq!(r.get(), 1);
1653
1654        let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1655        let r = alloc.allocate(None).unwrap();
1656        assert_eq!(r.get(), 1);
1657    }
1658
1659    // ── bd-2blq: IntentOpKind, SemanticKeyRef, StructuralEffects, RowId ──
1660
1661    #[test]
1662    fn test_intent_op_all_variants_encode_decode_roundtrip() {
1663        use crate::SqliteValue;
1664
1665        let variants: Vec<IntentOpKind> = vec![
1666            IntentOpKind::Insert {
1667                table: TableId::new(1),
1668                key: RowId::new(100),
1669                record: vec![0x01, 0x02, 0x03],
1670            },
1671            IntentOpKind::Delete {
1672                table: TableId::new(2),
1673                key: RowId::new(200),
1674            },
1675            IntentOpKind::Update {
1676                table: TableId::new(3),
1677                key: RowId::new(300),
1678                new_record: vec![0x04, 0x05],
1679            },
1680            IntentOpKind::IndexInsert {
1681                index: IndexId::new(10),
1682                key: vec![0xAA, 0xBB],
1683                rowid: RowId::new(400),
1684            },
1685            IntentOpKind::IndexDelete {
1686                index: IndexId::new(11),
1687                key: vec![0xCC],
1688                rowid: RowId::new(500),
1689            },
1690            IntentOpKind::UpdateExpression {
1691                table: TableId::new(4),
1692                key: RowId::new(600),
1693                column_updates: vec![
1694                    (
1695                        ColumnIdx::new(0),
1696                        RebaseExpr::BinaryOp {
1697                            op: RebaseBinaryOp::Add,
1698                            left: Box::new(RebaseExpr::ColumnRef(ColumnIdx::new(0))),
1699                            right: Box::new(RebaseExpr::Literal(SqliteValue::Integer(1))),
1700                        },
1701                    ),
1702                    (
1703                        ColumnIdx::new(2),
1704                        RebaseExpr::Coalesce(vec![
1705                            RebaseExpr::ColumnRef(ColumnIdx::new(2)),
1706                            RebaseExpr::Literal(SqliteValue::Integer(0)),
1707                        ]),
1708                    ),
1709                ],
1710            },
1711        ];
1712
1713        for variant in &variants {
1714            let op = IntentOp {
1715                schema_epoch: 42,
1716                footprint: IntentFootprint::empty(),
1717                op: variant.clone(),
1718            };
1719
1720            let json = serde_json::to_string(&op).expect("serialize must succeed");
1721            let decoded: IntentOp = serde_json::from_str(&json).expect("deserialize must succeed");
1722
1723            assert_eq!(decoded, op, "roundtrip failed for variant: {variant:?}");
1724        }
1725    }
1726
1727    #[test]
1728    fn test_semantic_key_ref_digest_stable() {
1729        let table = BtreeRef::Table(TableId::new(42));
1730        let key_bytes = b"canonical_key_data";
1731
1732        // Compute digest twice — must be identical.
1733        let d1 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, table, key_bytes);
1734        let d2 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, table, key_bytes);
1735        assert_eq!(d1, d2, "digest must be stable across calls");
1736
1737        // Construct via `new()` — digest must match.
1738        let skr = SemanticKeyRef::new(table, SemanticKeyKind::TableRow, key_bytes);
1739        assert_eq!(skr.key_digest, d1);
1740
1741        // Different key bytes produce different digest.
1742        let d3 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, table, b"different_key");
1743        assert_ne!(d1, d3);
1744
1745        // Different kind produces different digest.
1746        let d4 = SemanticKeyRef::compute_digest(SemanticKeyKind::IndexEntry, table, key_bytes);
1747        assert_ne!(d1, d4);
1748
1749        // Different btree produces different digest.
1750        let index = BtreeRef::Index(IndexId::new(42));
1751        let d5 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, index, key_bytes);
1752        assert_ne!(d1, d5);
1753
1754        // Digest is 16 bytes (Trunc128).
1755        assert_eq!(d1.len(), 16);
1756    }
1757
1758    #[test]
1759    fn test_structural_effects_bitflags() {
1760        // NONE = 0.
1761        assert_eq!(StructuralEffects::NONE.bits(), 0);
1762        assert!(StructuralEffects::NONE.is_empty());
1763
1764        // Simple leaf operations have no structural effects.
1765        let leaf = StructuralEffects::NONE;
1766        assert!(!leaf.contains(StructuralEffects::PAGE_SPLIT));
1767        assert!(!leaf.contains(StructuralEffects::FREELIST_MUTATE));
1768
1769        // Page split + overflow alloc.
1770        let split_overflow = StructuralEffects::PAGE_SPLIT | StructuralEffects::OVERFLOW_ALLOC;
1771        assert!(split_overflow.contains(StructuralEffects::PAGE_SPLIT));
1772        assert!(split_overflow.contains(StructuralEffects::OVERFLOW_ALLOC));
1773        assert!(!split_overflow.contains(StructuralEffects::PAGE_MERGE));
1774
1775        // All flags can be combined.
1776        let all = StructuralEffects::PAGE_SPLIT
1777            | StructuralEffects::PAGE_MERGE
1778            | StructuralEffects::BALANCE_MULTI_PAGE
1779            | StructuralEffects::OVERFLOW_ALLOC
1780            | StructuralEffects::OVERFLOW_MUTATE
1781            | StructuralEffects::FREELIST_MUTATE
1782            | StructuralEffects::POINTER_MAP_MUTATE
1783            | StructuralEffects::DEFRAG_MOVE_CELLS;
1784        assert!(all.contains(StructuralEffects::FREELIST_MUTATE));
1785        assert!(all.contains(StructuralEffects::DEFRAG_MOVE_CELLS));
1786
1787        // Serde roundtrip.
1788        let json = serde_json::to_string(&split_overflow).expect("serialize");
1789        let decoded: StructuralEffects = serde_json::from_str(&json).expect("deserialize");
1790        assert_eq!(decoded, split_overflow);
1791    }
1792
1793    #[test]
1794    fn test_rowid_allocator_monotone_no_collision() {
1795        // Two "concurrent writers" allocating from the same allocator must
1796        // produce disjoint, monotonically increasing rowids.
1797        let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1798        let mut ids: Vec<RowId> = Vec::new();
1799
1800        // Writer A gets range.
1801        for _ in 0..5 {
1802            let max_existing = ids.last().copied();
1803            let r = alloc.allocate(max_existing).unwrap();
1804            ids.push(r);
1805        }
1806
1807        // Writer B continues from same state.
1808        for _ in 0..5 {
1809            let max_existing = ids.last().copied();
1810            let r = alloc.allocate(max_existing).unwrap();
1811            ids.push(r);
1812        }
1813
1814        // Verify monotonic and disjoint.
1815        let raw_ids: Vec<i64> = ids.iter().map(|r| r.get()).collect();
1816        for window in raw_ids.windows(2) {
1817            assert!(
1818                window[1] > window[0],
1819                "RowIds must be strictly monotonically increasing: {} <= {}",
1820                window[0],
1821                window[1]
1822            );
1823        }
1824
1825        // Verify no duplicates.
1826        let unique: HashSet<i64> = raw_ids.iter().copied().collect();
1827        assert_eq!(unique.len(), raw_ids.len(), "RowIds must be disjoint");
1828    }
1829
1830    #[test]
1831    fn test_rowid_allocator_bump_on_explicit_rowid() {
1832        let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1833
1834        // Normal allocation: start at 1.
1835        let r1 = alloc.allocate(None).unwrap();
1836        assert_eq!(r1.get(), 1);
1837
1838        // Explicit rowid 1000 bumps the high-water mark.
1839        alloc.set_sequence_high_water(1000);
1840
1841        // Next allocation must be at least 1001.
1842        let r2 = alloc.allocate(Some(RowId::new(999))).unwrap();
1843        assert!(
1844            r2.get() >= 1001,
1845            "allocator must bump past explicit rowid 1000, got {}",
1846            r2.get()
1847        );
1848
1849        // Verify subsequent allocations continue above.
1850        let r3 = alloc.allocate(Some(r2)).unwrap();
1851        assert!(r3.get() > r2.get());
1852    }
1853}