1use std::fmt;
8use std::num::NonZeroU64;
9
10use crate::encoding::{
11 append_u16_le, append_u32_le, append_u64_le, read_u16_le, read_u32_le, read_u64_le,
12};
13use crate::{ObjectId, PageData, PageNumber};
14
15#[derive(
22 Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
23)]
24#[repr(transparent)]
25pub struct TxnId(NonZeroU64);
26
27impl TxnId {
28 pub const MAX_RAW: u64 = (1_u64 << 62) - 1;
30
31 #[inline]
33 pub const fn new(raw: u64) -> Option<Self> {
34 if raw > Self::MAX_RAW {
35 return None;
36 }
37 match NonZeroU64::new(raw) {
38 Some(nz) => Some(Self(nz)),
39 None => None,
40 }
41 }
42
43 #[inline]
45 pub const fn get(self) -> u64 {
46 self.0.get()
47 }
48
49 #[inline]
51 pub const fn checked_next(self) -> Option<Self> {
52 Self::new(self.get().wrapping_add(1))
53 }
54}
55
56impl fmt::Display for TxnId {
57 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
58 write!(f, "txn#{}", self.get())
59 }
60}
61
62impl TryFrom<u64> for TxnId {
63 type Error = InvalidTxnId;
64
65 fn try_from(value: u64) -> Result<Self, Self::Error> {
66 Self::new(value).ok_or(InvalidTxnId { raw: value })
67 }
68}
69
70#[derive(Debug, Clone, Copy, PartialEq, Eq)]
72pub struct InvalidTxnId {
73 raw: u64,
74}
75
76impl fmt::Display for InvalidTxnId {
77 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
78 write!(
79 f,
80 "invalid TxnId {} (must satisfy 1 <= id <= {})",
81 self.raw,
82 TxnId::MAX_RAW
83 )
84 }
85}
86
87impl std::error::Error for InvalidTxnId {}
88
89#[derive(
91 Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
92)]
93#[repr(transparent)]
94pub struct CommitSeq(u64);
95
96impl CommitSeq {
97 pub const ZERO: Self = Self(0);
98
99 #[inline]
100 pub const fn new(raw: u64) -> Self {
101 Self(raw)
102 }
103
104 #[inline]
105 pub const fn get(self) -> u64 {
106 self.0
107 }
108
109 #[inline]
110 #[must_use]
111 pub const fn next(self) -> Self {
112 Self(self.0.wrapping_add(1))
113 }
114}
115
116impl fmt::Display for CommitSeq {
117 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
118 write!(f, "cs#{}", self.get())
119 }
120}
121
122#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
124#[repr(transparent)]
125pub struct TxnEpoch(u32);
126
127impl TxnEpoch {
128 #[inline]
129 pub const fn new(raw: u32) -> Self {
130 Self(raw)
131 }
132
133 #[inline]
134 pub const fn get(self) -> u32 {
135 self.0
136 }
137}
138
139#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
141pub struct TxnToken {
142 pub id: TxnId,
143 pub epoch: TxnEpoch,
144}
145
146impl TxnToken {
147 #[inline]
148 pub const fn new(id: TxnId, epoch: TxnEpoch) -> Self {
149 Self { id, epoch }
150 }
151}
152
153#[derive(
155 Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
156)]
157#[repr(transparent)]
158pub struct SchemaEpoch(u64);
159
160impl SchemaEpoch {
161 pub const ZERO: Self = Self(0);
162
163 #[inline]
164 pub const fn new(raw: u64) -> Self {
165 Self(raw)
166 }
167
168 #[inline]
169 pub const fn get(self) -> u64 {
170 self.0
171 }
172}
173
174#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
178pub struct Snapshot {
179 pub high: CommitSeq,
180 pub schema_epoch: SchemaEpoch,
181}
182
183impl Snapshot {
184 #[inline]
185 pub const fn new(high: CommitSeq, schema_epoch: SchemaEpoch) -> Self {
186 Self { high, schema_epoch }
187 }
188}
189
190#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
195#[repr(transparent)]
196pub struct VersionPointer(u64);
197
198impl VersionPointer {
199 #[inline]
200 pub const fn new(raw: u64) -> Self {
201 Self(raw)
202 }
203
204 #[inline]
205 pub const fn get(self) -> u64 {
206 self.0
207 }
208}
209
210#[derive(Debug, Clone, PartialEq, Eq)]
212pub struct PageVersion {
213 pub pgno: PageNumber,
214 pub commit_seq: CommitSeq,
215 pub created_by: TxnToken,
216 pub data: PageData,
217 pub prev: Option<VersionPointer>,
218}
219
220#[derive(
225 Debug, Clone, Copy, PartialEq, Eq, Hash, Default, serde::Serialize, serde::Deserialize,
226)]
227pub enum OperatingMode {
228 #[default]
232 Compatibility,
233 Native,
236}
237
238impl fmt::Display for OperatingMode {
239 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
240 match self {
241 Self::Compatibility => f.write_str("compatibility"),
242 Self::Native => f.write_str("native"),
243 }
244 }
245}
246
247impl OperatingMode {
248 #[must_use]
250 pub fn from_pragma(s: &str) -> Option<Self> {
251 let lower = s.trim().to_ascii_lowercase();
252 match lower.as_str() {
253 "compatibility" | "compat" => Some(Self::Compatibility),
254 "native" => Some(Self::Native),
255 _ => None,
256 }
257 }
258
259 #[must_use]
261 pub const fn is_native(self) -> bool {
262 matches!(self, Self::Native)
263 }
264
265 #[must_use]
267 pub const fn legacy_readers_allowed(self) -> bool {
268 matches!(self, Self::Compatibility)
269 }
270}
271
272#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
279pub struct CommitCapsule {
280 pub object_id: ObjectId,
282 pub snapshot_basis: CommitSeq,
284 pub intent_log: Vec<IntentOp>,
286 pub page_deltas: Vec<(PageNumber, Vec<u8>)>,
288 pub read_set_digest: [u8; 32],
290 pub write_set_digest: [u8; 32],
292 pub read_witness_refs: Vec<ObjectId>,
294 pub write_witness_refs: Vec<ObjectId>,
296 pub dependency_edge_refs: Vec<ObjectId>,
298 pub merge_witness_refs: Vec<ObjectId>,
300}
301
302#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
309pub struct CommitMarker {
310 pub commit_seq: CommitSeq,
311 pub commit_time_unix_ns: u64,
313 pub capsule_object_id: ObjectId,
314 pub proof_object_id: ObjectId,
315 pub prev_marker: Option<ObjectId>,
317 pub integrity_hash: [u8; 16],
319}
320
321pub const COMMIT_MARKER_RECORD_V1_SIZE: usize = 88;
327
328const COMMIT_MARKER_RECORD_VERSION: u8 = 1;
330
331impl CommitMarker {
332 #[must_use]
334 pub fn to_record_bytes(&self) -> [u8; COMMIT_MARKER_RECORD_V1_SIZE] {
335 let mut buf = [0u8; COMMIT_MARKER_RECORD_V1_SIZE];
336 buf[0] = COMMIT_MARKER_RECORD_VERSION;
337 buf[1] = 0; buf[2..10].copy_from_slice(&self.commit_seq.get().to_le_bytes());
341 buf[10..18].copy_from_slice(&self.commit_time_unix_ns.to_le_bytes());
343 buf[18..34].copy_from_slice(self.capsule_object_id.as_bytes());
345 buf[34..50].copy_from_slice(self.proof_object_id.as_bytes());
347 if let Some(prev) = self.prev_marker {
349 buf[50..66].copy_from_slice(prev.as_bytes());
350 }
351 buf[66] = u8::from(self.prev_marker.is_some());
353 buf[67..83].copy_from_slice(&self.integrity_hash);
355 buf
357 }
358
359 #[must_use]
361 pub fn from_record_bytes(data: &[u8; COMMIT_MARKER_RECORD_V1_SIZE]) -> Option<Self> {
362 if data[0] != COMMIT_MARKER_RECORD_VERSION {
363 return None;
364 }
365
366 let commit_seq = CommitSeq::new(u64::from_le_bytes(data[2..10].try_into().ok()?));
367 let commit_time_unix_ns = u64::from_le_bytes(data[10..18].try_into().ok()?);
368 let capsule_object_id = ObjectId::from_bytes(data[18..34].try_into().ok()?);
369 let proof_object_id = ObjectId::from_bytes(data[34..50].try_into().ok()?);
370 let has_prev = data[66] != 0;
371 let prev_marker = if has_prev {
372 Some(ObjectId::from_bytes(data[50..66].try_into().ok()?))
373 } else {
374 None
375 };
376 let mut integrity_hash = [0u8; 16];
377 integrity_hash.copy_from_slice(&data[67..83]);
378
379 Some(Self {
380 commit_seq,
381 commit_time_unix_ns,
382 capsule_object_id,
383 proof_object_id,
384 prev_marker,
385 integrity_hash,
386 })
387 }
388
389 #[must_use]
392 pub fn compute_integrity_hash(&self) -> [u8; 16] {
393 let mut buf = Vec::with_capacity(74);
394 append_u64_le(&mut buf, self.commit_seq.get());
395 append_u64_le(&mut buf, self.commit_time_unix_ns);
396 buf.extend_from_slice(self.capsule_object_id.as_bytes());
397 buf.extend_from_slice(self.proof_object_id.as_bytes());
398 if let Some(prev) = self.prev_marker {
399 buf.push(1);
400 buf.extend_from_slice(prev.as_bytes());
401 } else {
402 buf.push(0);
403 buf.extend_from_slice(&[0u8; 16]);
404 }
405 let hash128 = xxhash_rust::xxh3::xxh3_128(&buf);
406 hash128.to_le_bytes()
407 }
408
409 #[must_use]
411 pub fn new(
412 commit_seq: CommitSeq,
413 commit_time_unix_ns: u64,
414 capsule_object_id: ObjectId,
415 proof_object_id: ObjectId,
416 prev_marker: Option<ObjectId>,
417 ) -> Self {
418 let mut marker = Self {
419 commit_seq,
420 commit_time_unix_ns,
421 capsule_object_id,
422 proof_object_id,
423 prev_marker,
424 integrity_hash: [0u8; 16],
425 };
426 marker.integrity_hash = marker.compute_integrity_hash();
427 marker
428 }
429
430 #[must_use]
432 pub fn verify_integrity(&self) -> bool {
433 self.integrity_hash == self.compute_integrity_hash()
434 }
435}
436
437#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
446pub struct Oti {
447 pub f: u64,
449 pub al: u16,
451 pub t: u32,
453 pub z: u32,
455 pub n: u32,
457}
458
459pub const OTI_WIRE_SIZE: usize = 22;
461
462impl Oti {
463 #[must_use]
465 pub fn to_bytes(self) -> [u8; OTI_WIRE_SIZE] {
466 let mut as_vec = Vec::with_capacity(OTI_WIRE_SIZE);
467 append_u64_le(&mut as_vec, self.f);
468 append_u16_le(&mut as_vec, self.al);
469 append_u32_le(&mut as_vec, self.t);
470 append_u32_le(&mut as_vec, self.z);
471 append_u32_le(&mut as_vec, self.n);
472
473 let mut buf = [0u8; OTI_WIRE_SIZE];
474 buf.copy_from_slice(&as_vec);
475 buf
476 }
477
478 #[must_use]
482 pub fn from_bytes(data: &[u8]) -> Option<Self> {
483 if data.len() < OTI_WIRE_SIZE {
484 return None;
485 }
486 Some(Self {
487 f: read_u64_le(&data[0..8])?,
488 al: read_u16_le(&data[8..10])?,
489 t: read_u32_le(&data[10..14])?,
490 z: read_u32_le(&data[14..18])?,
491 n: read_u32_le(&data[18..22])?,
492 })
493 }
494}
495
496#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
498pub struct DecodeProof {
499 pub object_id: ObjectId,
500 pub oti: Oti,
501}
502
503pub use crate::cx::{Budget, Cx};
507
508#[derive(
510 Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
511)]
512pub enum Outcome {
513 Ok,
514 Err,
515 Cancelled,
516 Panicked,
517}
518
519#[derive(
521 Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
522)]
523#[repr(transparent)]
524pub struct EpochId(u64);
525
526impl EpochId {
527 pub const ZERO: Self = Self(0);
529
530 #[inline]
531 pub const fn new(raw: u64) -> Self {
532 Self(raw)
533 }
534
535 #[inline]
536 pub const fn get(self) -> u64 {
537 self.0
538 }
539
540 #[must_use]
544 pub const fn next(self) -> Option<Self> {
545 match self.0.checked_add(1) {
546 Some(val) => Some(Self(val)),
547 None => None,
548 }
549 }
550}
551
552#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
554pub struct SymbolValidityWindow {
555 pub from_epoch: EpochId,
556 pub to_epoch: EpochId,
557}
558
559impl SymbolValidityWindow {
560 #[must_use]
561 pub const fn new(from_epoch: EpochId, to_epoch: EpochId) -> Self {
562 Self {
563 from_epoch,
564 to_epoch,
565 }
566 }
567
568 #[must_use]
570 pub const fn default_window(current_epoch: EpochId) -> Self {
571 Self {
572 from_epoch: EpochId::ZERO,
573 to_epoch: current_epoch,
574 }
575 }
576
577 #[must_use]
582 pub const fn contains(&self, epoch: EpochId) -> bool {
583 epoch.0 >= self.from_epoch.0 && epoch.0 <= self.to_epoch.0
584 }
585}
586
587#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
589#[repr(transparent)]
590pub struct RemoteCap([u8; 16]);
591
592impl RemoteCap {
593 #[must_use]
594 pub const fn from_bytes(bytes: [u8; 16]) -> Self {
595 Self(bytes)
596 }
597
598 #[must_use]
599 pub const fn as_bytes(&self) -> &[u8; 16] {
600 &self.0
601 }
602}
603
604#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
606#[repr(transparent)]
607pub struct SymbolAuthMasterKeyCap([u8; 32]);
608
609impl SymbolAuthMasterKeyCap {
610 #[must_use]
611 pub const fn from_bytes(bytes: [u8; 32]) -> Self {
612 Self(bytes)
613 }
614
615 #[must_use]
616 pub const fn as_bytes(&self) -> &[u8; 32] {
617 &self.0
618 }
619}
620
621#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
623#[repr(transparent)]
624pub struct IdempotencyKey([u8; 16]);
625
626impl IdempotencyKey {
627 #[must_use]
628 pub const fn from_bytes(bytes: [u8; 16]) -> Self {
629 Self(bytes)
630 }
631
632 #[must_use]
633 pub const fn as_bytes(&self) -> &[u8; 16] {
634 &self.0
635 }
636}
637
638#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
640pub struct Saga {
641 pub key: IdempotencyKey,
642}
643
644impl IdempotencyKey {
645 #[must_use]
650 pub fn derive(ecs_epoch: u64, request_bytes: &[u8]) -> Self {
651 let mut hasher = blake3::Hasher::new();
652 hasher.update(b"fsqlite:idempotency:v1");
653 hasher.update(&ecs_epoch.to_le_bytes());
654 hasher.update(request_bytes);
655 let digest = hasher.finalize();
656 let mut out = [0_u8; 16];
657 out.copy_from_slice(&digest.as_bytes()[..16]);
658 Self(out)
659 }
660}
661
662impl Saga {
663 #[must_use]
665 pub const fn new(key: IdempotencyKey) -> Self {
666 Self { key }
667 }
668
669 #[must_use]
671 pub const fn key(self) -> IdempotencyKey {
672 self.key
673 }
674}
675
676#[derive(
678 Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
679)]
680#[repr(transparent)]
681pub struct Region(u32);
682
683impl Region {
684 #[inline]
685 pub const fn new(raw: u32) -> Self {
686 Self(raw)
687 }
688
689 #[inline]
690 pub const fn get(self) -> u32 {
691 self.0
692 }
693}
694
695#[derive(
701 Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
702)]
703pub enum WitnessKey {
704 Page(PageNumber),
706 Cell {
710 btree_root: PageNumber,
711 leaf_page: PageNumber,
712 tag: u64,
713 },
714 ByteRange {
716 page: PageNumber,
717 start: u32,
718 len: u32,
719 },
720 KeyRange {
722 btree_root: PageNumber,
723 lo: Vec<u8>,
724 hi: Vec<u8>,
725 },
726 Custom { namespace: u32, bytes: Vec<u8> },
728}
729
730impl WitnessKey {
731 #[must_use]
736 pub fn cell_tag(btree_root: PageNumber, canonical_key_bytes: &[u8]) -> u64 {
737 use xxhash_rust::xxh3::xxh3_64;
738 let mut buf =
739 Vec::with_capacity(b"fsqlite:witness:cell:v1".len() + 4 + canonical_key_bytes.len());
740 buf.extend_from_slice(b"fsqlite:witness:cell:v1");
741 buf.extend_from_slice(&btree_root.get().to_le_bytes());
742 buf.extend_from_slice(canonical_key_bytes);
743 xxh3_64(&buf)
745 }
746
747 #[must_use]
749 pub fn for_cell_read(
750 btree_root: PageNumber,
751 leaf_page: PageNumber,
752 canonical_key_bytes: &[u8],
753 ) -> Self {
754 Self::Cell {
755 btree_root,
756 leaf_page,
757 tag: Self::cell_tag(btree_root, canonical_key_bytes),
758 }
759 }
760
761 #[must_use]
765 pub fn for_range_scan(leaf_pages: &[PageNumber]) -> Vec<Self> {
766 leaf_pages.iter().copied().map(Self::Page).collect()
767 }
768
769 #[must_use]
774 pub fn for_point_write(
775 btree_root: PageNumber,
776 canonical_key_bytes: &[u8],
777 leaf_pgno: PageNumber,
778 ) -> (Self, Self) {
779 let cell = Self::Cell {
780 btree_root,
781 leaf_page: leaf_pgno,
782 tag: Self::cell_tag(btree_root, canonical_key_bytes),
783 };
784 let page = Self::Page(leaf_pgno);
785 (cell, page)
786 }
787
788 #[must_use]
795 pub const fn page_number(&self) -> Option<PageNumber> {
796 match self {
797 Self::Page(page) | Self::ByteRange { page, .. } => Some(*page),
798 Self::Cell { btree_root, .. } | Self::KeyRange { btree_root, .. } => Some(*btree_root),
799 Self::Custom { .. } => None,
800 }
801 }
802
803 #[must_use]
805 pub fn is_page(&self) -> bool {
806 matches!(self, Self::Page(_))
807 }
808
809 #[must_use]
811 pub fn is_cell(&self) -> bool {
812 matches!(self, Self::Cell { .. })
813 }
814}
815
816#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
818pub struct RangeKey {
819 pub level: u8,
820 pub hash_prefix: u32,
821}
822
823#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
825pub struct ReadWitness {
826 pub txn: TxnId,
827 pub key: WitnessKey,
828}
829
830#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
832pub struct WriteWitness {
833 pub txn: TxnId,
834 pub key: WitnessKey,
835}
836
837#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
839pub struct WitnessIndexSegment {
840 pub epoch: EpochId,
841 pub reads: Vec<ReadWitness>,
842 pub writes: Vec<WriteWitness>,
843}
844
845#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
847pub struct DependencyEdge {
848 pub from: TxnId,
849 pub to: TxnId,
850 pub key_basis: WitnessKey,
851 pub observed_by: TxnId,
852}
853
854#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
860pub struct CommitProof {
861 pub commit_seq: CommitSeq,
863 pub edges: Vec<DependencyEdge>,
865 pub evidence_refs: Vec<ObjectId>,
867}
868
869#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
871#[repr(transparent)]
872pub struct TableId(u32);
873
874impl TableId {
875 #[inline]
876 pub const fn new(raw: u32) -> Self {
877 Self(raw)
878 }
879
880 #[inline]
881 pub const fn get(self) -> u32 {
882 self.0
883 }
884}
885
886#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
888#[repr(transparent)]
889pub struct IndexId(u32);
890
891impl IndexId {
892 #[inline]
893 pub const fn new(raw: u32) -> Self {
894 Self(raw)
895 }
896
897 #[inline]
898 pub const fn get(self) -> u32 {
899 self.0
900 }
901}
902
903#[derive(
905 Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
906)]
907#[repr(transparent)]
908pub struct RowId(i64);
909
910impl RowId {
911 pub const MAX: Self = Self(i64::MAX);
913
914 #[inline]
915 pub const fn new(raw: i64) -> Self {
916 Self(raw)
917 }
918
919 #[inline]
920 pub const fn get(self) -> i64 {
921 self.0
922 }
923}
924
925#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
927pub enum RowIdMode {
928 Normal,
930 AutoIncrement,
933}
934
935#[derive(Debug, Clone)]
942pub struct RowIdAllocator {
943 mode: RowIdMode,
944 sequence_high_water: i64,
946}
947
948impl RowIdAllocator {
949 pub const fn new(mode: RowIdMode) -> Self {
951 Self {
952 mode,
953 sequence_high_water: 0,
954 }
955 }
956
957 pub fn allocate(&mut self, max_existing: Option<RowId>) -> Result<RowId, RowIdExhausted> {
963 let max_val = max_existing.map_or(0, RowId::get);
964
965 match self.mode {
966 RowIdMode::Normal => {
967 if max_val < i64::MAX {
968 Ok(RowId::new(max_val + 1))
969 } else {
970 Err(RowIdExhausted)
973 }
974 }
975 RowIdMode::AutoIncrement => {
976 let base = max_val.max(self.sequence_high_water);
977 if base == i64::MAX {
978 return Err(RowIdExhausted);
979 }
980 let next = base + 1;
981 self.sequence_high_water = next;
982 Ok(RowId::new(next))
983 }
984 }
985 }
986
987 pub const fn sequence_high_water(&self) -> i64 {
989 self.sequence_high_water
990 }
991
992 pub fn set_sequence_high_water(&mut self, val: i64) {
994 self.sequence_high_water = val;
995 }
996}
997
998#[derive(Debug, Clone, PartialEq, Eq)]
1000pub struct RowIdExhausted;
1001
1002impl std::fmt::Display for RowIdExhausted {
1003 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1004 f.write_str("database or object is full (rowid exhausted)")
1005 }
1006}
1007
1008#[derive(
1010 Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
1011)]
1012#[repr(transparent)]
1013pub struct ColumnIdx(u32);
1014
1015impl ColumnIdx {
1016 #[inline]
1017 pub const fn new(raw: u32) -> Self {
1018 Self(raw)
1019 }
1020
1021 #[inline]
1022 pub const fn get(self) -> u32 {
1023 self.0
1024 }
1025}
1026
1027#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1033pub enum BtreeRef {
1034 Table(TableId),
1035 Index(IndexId),
1036}
1037
1038#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1040pub enum SemanticKeyKind {
1041 TableRow,
1042 IndexEntry,
1043}
1044
1045#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1049pub struct SemanticKeyRef {
1050 pub btree: BtreeRef,
1051 pub kind: SemanticKeyKind,
1052 pub key_digest: [u8; 16],
1053}
1054
1055impl SemanticKeyRef {
1056 const DOMAIN_SEP: &'static [u8] = b"fsqlite:btree:key:v1";
1058
1059 #[must_use]
1061 pub fn compute_digest(
1062 kind: SemanticKeyKind,
1063 btree: BtreeRef,
1064 canonical_key_bytes: &[u8],
1065 ) -> [u8; 16] {
1066 let mut hasher = blake3::Hasher::new();
1067 hasher.update(Self::DOMAIN_SEP);
1068 hasher.update(&[match kind {
1069 SemanticKeyKind::TableRow => 0,
1070 SemanticKeyKind::IndexEntry => 1,
1071 }]);
1072 match btree {
1073 BtreeRef::Table(id) => {
1074 hasher.update(&[0]);
1075 hasher.update(&id.get().to_le_bytes());
1076 }
1077 BtreeRef::Index(id) => {
1078 hasher.update(&[1]);
1079 hasher.update(&id.get().to_le_bytes());
1080 }
1081 }
1082 hasher.update(canonical_key_bytes);
1083 let hash = hasher.finalize();
1084 let bytes = hash.as_bytes();
1085 let mut digest = [0u8; 16];
1086 digest.copy_from_slice(&bytes[..16]);
1087 digest
1088 }
1089
1090 #[must_use]
1092 pub fn new(btree: BtreeRef, kind: SemanticKeyKind, canonical_key_bytes: &[u8]) -> Self {
1093 let key_digest = Self::compute_digest(kind, btree, canonical_key_bytes);
1094 Self {
1095 btree,
1096 kind,
1097 key_digest,
1098 }
1099 }
1100}
1101
1102bitflags::bitflags! {
1103 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
1105 pub struct StructuralEffects: u32 {
1106 const NONE = 0;
1108 const PAGE_SPLIT = 1;
1110 const PAGE_MERGE = 2;
1112 const BALANCE_MULTI_PAGE = 4;
1114 const OVERFLOW_ALLOC = 8;
1116 const OVERFLOW_MUTATE = 16;
1118 const FREELIST_MUTATE = 32;
1120 const POINTER_MAP_MUTATE = 64;
1122 const DEFRAG_MOVE_CELLS = 128;
1124 }
1125}
1126
1127impl serde::Serialize for StructuralEffects {
1128 fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
1129 self.bits().serialize(serializer)
1130 }
1131}
1132
1133impl<'de> serde::Deserialize<'de> for StructuralEffects {
1134 fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
1135 let bits = u32::deserialize(deserializer)?;
1136 Self::from_bits(bits).ok_or_else(|| {
1137 serde::de::Error::custom(format!("invalid StructuralEffects bits: {bits:#x}"))
1138 })
1139 }
1140}
1141
1142impl Default for StructuralEffects {
1143 fn default() -> Self {
1144 Self::NONE
1145 }
1146}
1147
1148#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1150pub struct IntentFootprint {
1151 pub reads: Vec<SemanticKeyRef>,
1152 pub writes: Vec<SemanticKeyRef>,
1153 pub structural: StructuralEffects,
1154}
1155
1156impl IntentFootprint {
1157 #[must_use]
1159 pub fn empty() -> Self {
1160 Self {
1161 reads: Vec::new(),
1162 writes: Vec::new(),
1163 structural: StructuralEffects::NONE,
1164 }
1165 }
1166}
1167
1168impl Default for IntentFootprint {
1169 fn default() -> Self {
1170 Self::empty()
1171 }
1172}
1173
1174#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
1179pub enum RebaseExpr {
1180 ColumnRef(ColumnIdx),
1182 Literal(crate::SqliteValue),
1184 UnaryOp {
1186 op: RebaseUnaryOp,
1187 operand: Box<Self>,
1188 },
1189 BinaryOp {
1191 op: RebaseBinaryOp,
1192 left: Box<Self>,
1193 right: Box<Self>,
1194 },
1195 FunctionCall { name: String, args: Vec<Self> },
1197 Cast { expr: Box<Self>, type_name: String },
1199 Case {
1201 operand: Option<Box<Self>>,
1202 when_clauses: Vec<(Self, Self)>,
1203 else_clause: Option<Box<Self>>,
1204 },
1205 Coalesce(Vec<Self>),
1207 NullIf { left: Box<Self>, right: Box<Self> },
1209 Concat { left: Box<Self>, right: Box<Self> },
1211}
1212
1213#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1215pub enum RebaseUnaryOp {
1216 Negate,
1217 BitwiseNot,
1218 Not,
1219}
1220
1221#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1223pub enum RebaseBinaryOp {
1224 Add,
1225 Subtract,
1226 Multiply,
1227 Divide,
1228 Remainder,
1229 BitwiseAnd,
1230 BitwiseOr,
1231 ShiftLeft,
1232 ShiftRight,
1233}
1234
1235#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
1237pub enum IntentOpKind {
1238 Insert {
1239 table: TableId,
1240 key: RowId,
1241 record: Vec<u8>,
1242 },
1243 Delete {
1244 table: TableId,
1245 key: RowId,
1246 },
1247 Update {
1248 table: TableId,
1249 key: RowId,
1250 new_record: Vec<u8>,
1251 },
1252 IndexInsert {
1253 index: IndexId,
1254 key: Vec<u8>,
1255 rowid: RowId,
1256 },
1257 IndexDelete {
1258 index: IndexId,
1259 key: Vec<u8>,
1260 rowid: RowId,
1261 },
1262 UpdateExpression {
1264 table: TableId,
1265 key: RowId,
1266 column_updates: Vec<(ColumnIdx, RebaseExpr)>,
1267 },
1268}
1269
1270#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
1272pub struct IntentOp {
1273 pub schema_epoch: u64,
1274 pub footprint: IntentFootprint,
1275 pub op: IntentOpKind,
1276}
1277
1278pub type IntentLog = Vec<IntentOp>;
1280
1281#[derive(Debug, Clone, PartialEq, Eq)]
1283pub struct PageHistory {
1284 pub pgno: PageNumber,
1285 pub versions: Vec<PageVersion>,
1286}
1287
1288#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
1293pub struct ArcCache;
1294
1295#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1300pub struct RootManifest {
1301 pub schema_epoch: SchemaEpoch,
1302 pub root_page: PageNumber,
1303 pub ecs_epoch: EpochId,
1305}
1306
1307#[derive(
1309 Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
1310)]
1311#[repr(transparent)]
1312pub struct TxnSlot(u32);
1313
1314impl TxnSlot {
1315 #[inline]
1316 pub const fn new(raw: u32) -> Self {
1317 Self(raw)
1318 }
1319
1320 #[inline]
1321 pub const fn get(self) -> u32 {
1322 self.0
1323 }
1324}
1325
1326#[cfg(test)]
1327mod tests {
1328 use std::collections::HashSet;
1329 use std::time::Duration;
1330
1331 use proptest::prelude::*;
1332
1333 use crate::PayloadHash;
1334
1335 use super::*;
1336
1337 #[test]
1338 fn test_txn_id_nonzero_enforced() {
1339 assert!(TxnId::new(0).is_none());
1340 assert!(TxnId::try_from(0_u64).is_err());
1341 assert!(TxnId::new(1).is_some());
1342 assert!(TxnId::new(TxnId::MAX_RAW).is_some());
1343 }
1344
1345 #[test]
1346 fn test_txn_id_62_bit_max() {
1347 assert!(TxnId::new(TxnId::MAX_RAW + 1).is_none());
1348 assert!(TxnId::try_from(TxnId::MAX_RAW + 1).is_err());
1349 }
1350
1351 #[test]
1352 fn test_object_id_16_bytes_blake3_truncation() {
1353 let header = b"hdr:v1";
1354 let payload = b"payload";
1355 let oid = ObjectId::derive(header, PayloadHash::blake3(payload));
1356 assert_eq!(oid.as_bytes().len(), ObjectId::LEN);
1357 }
1358
1359 #[test]
1360 fn test_object_id_content_addressed() {
1361 let header = b"hdr:v1";
1362 let payload = b"payload";
1363 let a = ObjectId::derive(header, PayloadHash::blake3(payload));
1364 let b = ObjectId::derive(header, PayloadHash::blake3(payload));
1365 assert_eq!(a, b);
1366
1367 let c = ObjectId::derive(header, PayloadHash::blake3(b"payload2"));
1368 assert_ne!(a, c);
1369 }
1370
1371 #[test]
1372 fn prop_object_id_collision_resistance() {
1373 let header = b"hdr:v1";
1374 let mut ids = HashSet::<ObjectId>::with_capacity(10_000);
1375
1376 let mut state: u64 = 0xD6E8_FEB8_6659_FD93;
1377 for i in 0..10_000_u64 {
1378 state = state
1380 .wrapping_mul(6_364_136_223_846_793_005_u64)
1381 .wrapping_add(1_442_695_040_888_963_407_u64);
1382
1383 let mut payload = [0_u8; 32];
1384 payload[..8].copy_from_slice(&i.to_le_bytes());
1385 payload[8..16].copy_from_slice(&state.to_le_bytes());
1386 payload[16..24].copy_from_slice(&state.rotate_left(17).to_le_bytes());
1387 payload[24..32].copy_from_slice(&state.rotate_left(41).to_le_bytes());
1388
1389 let oid = ObjectId::derive(header, PayloadHash::blake3(&payload));
1390 assert!(ids.insert(oid), "ObjectId collision at i={i}");
1391 }
1392 }
1393
1394 #[test]
1395 fn test_snapshot_fields() {
1396 let snap = Snapshot::new(CommitSeq::new(7), SchemaEpoch::new(9));
1397 assert_eq!(snap.high.get(), 7);
1398 assert_eq!(snap.schema_epoch.get(), 9);
1399 }
1400
1401 #[test]
1402 fn test_oti_field_widths_allow_large_symbol_size() {
1403 let oti = Oti {
1405 f: 1,
1406 al: 4,
1407 t: 65_536,
1408 z: 1,
1409 n: 1,
1410 };
1411 assert_eq!(oti.t, 65_536);
1412 }
1413
1414 #[test]
1415 fn test_budget_product_lattice_semantics() {
1416 let a = Budget {
1417 deadline: Some(Duration::from_millis(100)),
1418 poll_quota: 10,
1419 cost_quota: Some(500),
1420 priority: 1,
1421 };
1422 let b = Budget {
1423 deadline: Some(Duration::from_millis(50)),
1424 poll_quota: 20,
1425 cost_quota: Some(400),
1426 priority: 9,
1427 };
1428 let c = a.meet(b);
1429 assert_eq!(c.deadline, Some(Duration::from_millis(50)));
1430 assert_eq!(c.poll_quota, 10);
1431 assert_eq!(c.cost_quota, Some(400));
1432 assert_eq!(c.priority, 9);
1433 }
1434
1435 #[test]
1436 fn test_outcome_ordering_lattice() {
1437 assert!(Outcome::Ok < Outcome::Err);
1438 assert!(Outcome::Err < Outcome::Cancelled);
1439 assert!(Outcome::Cancelled < Outcome::Panicked);
1440 }
1441
1442 #[test]
1443 fn test_witness_key_variants_exhaustive() {
1444 let pn = PageNumber::new(1).unwrap();
1445 let a = WitnessKey::Page(pn);
1446 let b = WitnessKey::Cell {
1447 btree_root: pn,
1448 leaf_page: pn,
1449 tag: 7,
1450 };
1451 let c = WitnessKey::ByteRange {
1452 page: pn,
1453 start: 0,
1454 len: 16,
1455 };
1456
1457 assert!(matches!(a, WitnessKey::Page(_)));
1458 assert!(matches!(b, WitnessKey::Cell { .. }));
1459 assert!(matches!(c, WitnessKey::ByteRange { .. }));
1460 }
1461
1462 #[test]
1463 fn test_all_glossary_types_derive_debug_clone() {
1464 fn assert_debug_clone<T: fmt::Debug + Clone>() {}
1465
1466 assert_debug_clone::<TxnId>();
1467 assert_debug_clone::<CommitSeq>();
1468 assert_debug_clone::<TxnEpoch>();
1469 assert_debug_clone::<TxnToken>();
1470 assert_debug_clone::<SchemaEpoch>();
1471 assert_debug_clone::<Snapshot>();
1472 assert_debug_clone::<VersionPointer>();
1473 assert_debug_clone::<PageVersion>();
1474 assert_debug_clone::<ObjectId>();
1475 assert_debug_clone::<CommitCapsule>();
1476 assert_debug_clone::<CommitMarker>();
1477 assert_debug_clone::<Oti>();
1478 assert_debug_clone::<DecodeProof>();
1479 assert_debug_clone::<Cx<crate::cx::ComputeCaps>>();
1480 assert_debug_clone::<Budget>();
1481 assert_debug_clone::<Outcome>();
1482 assert_debug_clone::<EpochId>();
1483 assert_debug_clone::<SymbolValidityWindow>();
1484 assert_debug_clone::<RemoteCap>();
1485 assert_debug_clone::<SymbolAuthMasterKeyCap>();
1486 assert_debug_clone::<IdempotencyKey>();
1487 assert_debug_clone::<Saga>();
1488 assert_debug_clone::<Region>();
1489 assert_debug_clone::<WitnessKey>();
1490 assert_debug_clone::<RangeKey>();
1491 assert_debug_clone::<ReadWitness>();
1492 assert_debug_clone::<WriteWitness>();
1493 assert_debug_clone::<WitnessIndexSegment>();
1494 assert_debug_clone::<DependencyEdge>();
1495 assert_debug_clone::<CommitProof>();
1496 assert_debug_clone::<TableId>();
1497 assert_debug_clone::<IndexId>();
1498 assert_debug_clone::<RowId>();
1499 assert_debug_clone::<ColumnIdx>();
1500 assert_debug_clone::<BtreeRef>();
1501 assert_debug_clone::<SemanticKeyKind>();
1502 assert_debug_clone::<SemanticKeyRef>();
1503 assert_debug_clone::<StructuralEffects>();
1504 assert_debug_clone::<IntentFootprint>();
1505 assert_debug_clone::<RebaseExpr>();
1506 assert_debug_clone::<RebaseUnaryOp>();
1507 assert_debug_clone::<RebaseBinaryOp>();
1508 assert_debug_clone::<IntentOpKind>();
1509 assert_debug_clone::<IntentOp>();
1510 assert_debug_clone::<PageHistory>();
1511 assert_debug_clone::<ArcCache>();
1512 assert_debug_clone::<RootManifest>();
1513 assert_debug_clone::<TxnSlot>();
1514 assert_debug_clone::<OperatingMode>();
1515 }
1516
1517 #[test]
1518 fn test_remote_cap_from_bytes_roundtrip() {
1519 let raw = [0xAB_u8; 16];
1520 let cap = RemoteCap::from_bytes(raw);
1521 assert_eq!(cap.as_bytes(), &raw);
1522 }
1523
1524 #[test]
1525 fn test_idempotency_key_derivation_is_deterministic() {
1526 let req = b"fetch:object=42";
1527 let a = IdempotencyKey::derive(7, req);
1528 let b = IdempotencyKey::derive(7, req);
1529 let c = IdempotencyKey::derive(8, req);
1530 assert_eq!(a, b);
1531 assert_ne!(a, c);
1532 }
1533
1534 #[test]
1535 fn test_remote_cap_roundtrip() {
1536 let raw = [0xAB_u8; 16];
1537 let cap = RemoteCap::from_bytes(raw);
1538 assert_eq!(cap.as_bytes(), &raw);
1539 }
1540
1541 #[test]
1542 fn test_symbol_auth_master_key_cap_roundtrip() {
1543 let raw = [0xCD_u8; 32];
1544 let cap = SymbolAuthMasterKeyCap::from_bytes(raw);
1545 assert_eq!(cap.as_bytes(), &raw);
1546 }
1547
1548 #[test]
1549 fn test_idempotency_key_roundtrip() {
1550 let raw = [0x11_u8; 16];
1551 let key = IdempotencyKey::from_bytes(raw);
1552 assert_eq!(key.as_bytes(), &raw);
1553 }
1554
1555 #[test]
1556 fn test_saga_constructor() {
1557 let key = IdempotencyKey::from_bytes([0x22_u8; 16]);
1558 let saga = Saga::new(key);
1559 assert_eq!(saga.key(), key);
1560 }
1561
1562 fn arb_budget() -> impl Strategy<Value = Budget> {
1563 (
1564 prop::option::of(any::<u64>()),
1565 any::<u32>(),
1566 prop::option::of(any::<u64>()),
1567 any::<u8>(),
1568 )
1569 .prop_map(|(deadline_ms, poll_quota, cost_quota, priority)| Budget {
1570 deadline: deadline_ms.map(Duration::from_millis),
1571 poll_quota,
1572 cost_quota,
1573 priority,
1574 })
1575 }
1576
1577 proptest! {
1578 #[test]
1579 fn prop_budget_combine_associative(a in arb_budget(), b in arb_budget(), c in arb_budget()) {
1580 prop_assert_eq!(a.meet(b).meet(c), a.meet(b.meet(c)));
1581 }
1582
1583 #[test]
1584 fn prop_budget_combine_commutative(a in arb_budget(), b in arb_budget()) {
1585 prop_assert_eq!(a.meet(b), b.meet(a));
1586 }
1587 }
1588
1589 #[test]
1592 fn test_rowid_reuse_without_autoincrement() {
1593 let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1594 let r = alloc.allocate(Some(RowId::new(5))).unwrap();
1596 assert_eq!(r.get(), 6);
1597
1598 let r = alloc.allocate(Some(RowId::new(3))).unwrap();
1600 assert_eq!(r.get(), 4);
1601 }
1602
1603 #[test]
1604 fn test_autoincrement_no_reuse() {
1605 let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1606 let r = alloc.allocate(Some(RowId::new(5))).unwrap();
1608 assert_eq!(r.get(), 6);
1609
1610 let r = alloc.allocate(Some(RowId::new(3))).unwrap();
1613 assert_eq!(r.get(), 7);
1614 }
1615
1616 #[test]
1617 fn test_sqlite_sequence_updates() {
1618 let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1619 assert_eq!(alloc.sequence_high_water(), 0);
1620
1621 let _ = alloc.allocate(Some(RowId::new(10))).unwrap();
1622 assert_eq!(alloc.sequence_high_water(), 11);
1623
1624 alloc.set_sequence_high_water(100);
1626 let r = alloc.allocate(Some(RowId::new(50))).unwrap();
1627 assert_eq!(r.get(), 101);
1628 assert_eq!(alloc.sequence_high_water(), 101);
1629 }
1630
1631 #[test]
1632 fn test_max_rowid_exhausted_autoincrement() {
1633 let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1634 let result = alloc.allocate(Some(RowId::MAX));
1636 assert!(result.is_err());
1637 }
1638
1639 #[test]
1640 fn test_max_rowid_exhausted_normal() {
1641 let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1642 let result = alloc.allocate(Some(RowId::MAX));
1645 assert!(result.is_err());
1646 }
1647
1648 #[test]
1649 fn test_rowid_allocate_empty_table() {
1650 let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1651 let r = alloc.allocate(None).unwrap();
1652 assert_eq!(r.get(), 1);
1653
1654 let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1655 let r = alloc.allocate(None).unwrap();
1656 assert_eq!(r.get(), 1);
1657 }
1658
1659 #[test]
1662 fn test_intent_op_all_variants_encode_decode_roundtrip() {
1663 use crate::SqliteValue;
1664
1665 let variants: Vec<IntentOpKind> = vec![
1666 IntentOpKind::Insert {
1667 table: TableId::new(1),
1668 key: RowId::new(100),
1669 record: vec![0x01, 0x02, 0x03],
1670 },
1671 IntentOpKind::Delete {
1672 table: TableId::new(2),
1673 key: RowId::new(200),
1674 },
1675 IntentOpKind::Update {
1676 table: TableId::new(3),
1677 key: RowId::new(300),
1678 new_record: vec![0x04, 0x05],
1679 },
1680 IntentOpKind::IndexInsert {
1681 index: IndexId::new(10),
1682 key: vec![0xAA, 0xBB],
1683 rowid: RowId::new(400),
1684 },
1685 IntentOpKind::IndexDelete {
1686 index: IndexId::new(11),
1687 key: vec![0xCC],
1688 rowid: RowId::new(500),
1689 },
1690 IntentOpKind::UpdateExpression {
1691 table: TableId::new(4),
1692 key: RowId::new(600),
1693 column_updates: vec![
1694 (
1695 ColumnIdx::new(0),
1696 RebaseExpr::BinaryOp {
1697 op: RebaseBinaryOp::Add,
1698 left: Box::new(RebaseExpr::ColumnRef(ColumnIdx::new(0))),
1699 right: Box::new(RebaseExpr::Literal(SqliteValue::Integer(1))),
1700 },
1701 ),
1702 (
1703 ColumnIdx::new(2),
1704 RebaseExpr::Coalesce(vec![
1705 RebaseExpr::ColumnRef(ColumnIdx::new(2)),
1706 RebaseExpr::Literal(SqliteValue::Integer(0)),
1707 ]),
1708 ),
1709 ],
1710 },
1711 ];
1712
1713 for variant in &variants {
1714 let op = IntentOp {
1715 schema_epoch: 42,
1716 footprint: IntentFootprint::empty(),
1717 op: variant.clone(),
1718 };
1719
1720 let json = serde_json::to_string(&op).expect("serialize must succeed");
1721 let decoded: IntentOp = serde_json::from_str(&json).expect("deserialize must succeed");
1722
1723 assert_eq!(decoded, op, "roundtrip failed for variant: {variant:?}");
1724 }
1725 }
1726
1727 #[test]
1728 fn test_semantic_key_ref_digest_stable() {
1729 let table = BtreeRef::Table(TableId::new(42));
1730 let key_bytes = b"canonical_key_data";
1731
1732 let d1 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, table, key_bytes);
1734 let d2 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, table, key_bytes);
1735 assert_eq!(d1, d2, "digest must be stable across calls");
1736
1737 let skr = SemanticKeyRef::new(table, SemanticKeyKind::TableRow, key_bytes);
1739 assert_eq!(skr.key_digest, d1);
1740
1741 let d3 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, table, b"different_key");
1743 assert_ne!(d1, d3);
1744
1745 let d4 = SemanticKeyRef::compute_digest(SemanticKeyKind::IndexEntry, table, key_bytes);
1747 assert_ne!(d1, d4);
1748
1749 let index = BtreeRef::Index(IndexId::new(42));
1751 let d5 = SemanticKeyRef::compute_digest(SemanticKeyKind::TableRow, index, key_bytes);
1752 assert_ne!(d1, d5);
1753
1754 assert_eq!(d1.len(), 16);
1756 }
1757
1758 #[test]
1759 fn test_structural_effects_bitflags() {
1760 assert_eq!(StructuralEffects::NONE.bits(), 0);
1762 assert!(StructuralEffects::NONE.is_empty());
1763
1764 let leaf = StructuralEffects::NONE;
1766 assert!(!leaf.contains(StructuralEffects::PAGE_SPLIT));
1767 assert!(!leaf.contains(StructuralEffects::FREELIST_MUTATE));
1768
1769 let split_overflow = StructuralEffects::PAGE_SPLIT | StructuralEffects::OVERFLOW_ALLOC;
1771 assert!(split_overflow.contains(StructuralEffects::PAGE_SPLIT));
1772 assert!(split_overflow.contains(StructuralEffects::OVERFLOW_ALLOC));
1773 assert!(!split_overflow.contains(StructuralEffects::PAGE_MERGE));
1774
1775 let all = StructuralEffects::PAGE_SPLIT
1777 | StructuralEffects::PAGE_MERGE
1778 | StructuralEffects::BALANCE_MULTI_PAGE
1779 | StructuralEffects::OVERFLOW_ALLOC
1780 | StructuralEffects::OVERFLOW_MUTATE
1781 | StructuralEffects::FREELIST_MUTATE
1782 | StructuralEffects::POINTER_MAP_MUTATE
1783 | StructuralEffects::DEFRAG_MOVE_CELLS;
1784 assert!(all.contains(StructuralEffects::FREELIST_MUTATE));
1785 assert!(all.contains(StructuralEffects::DEFRAG_MOVE_CELLS));
1786
1787 let json = serde_json::to_string(&split_overflow).expect("serialize");
1789 let decoded: StructuralEffects = serde_json::from_str(&json).expect("deserialize");
1790 assert_eq!(decoded, split_overflow);
1791 }
1792
1793 #[test]
1794 fn test_rowid_allocator_monotone_no_collision() {
1795 let mut alloc = RowIdAllocator::new(RowIdMode::Normal);
1798 let mut ids: Vec<RowId> = Vec::new();
1799
1800 for _ in 0..5 {
1802 let max_existing = ids.last().copied();
1803 let r = alloc.allocate(max_existing).unwrap();
1804 ids.push(r);
1805 }
1806
1807 for _ in 0..5 {
1809 let max_existing = ids.last().copied();
1810 let r = alloc.allocate(max_existing).unwrap();
1811 ids.push(r);
1812 }
1813
1814 let raw_ids: Vec<i64> = ids.iter().map(|r| r.get()).collect();
1816 for window in raw_ids.windows(2) {
1817 assert!(
1818 window[1] > window[0],
1819 "RowIds must be strictly monotonically increasing: {} <= {}",
1820 window[0],
1821 window[1]
1822 );
1823 }
1824
1825 let unique: HashSet<i64> = raw_ids.iter().copied().collect();
1827 assert_eq!(unique.len(), raw_ids.len(), "RowIds must be disjoint");
1828 }
1829
1830 #[test]
1831 fn test_rowid_allocator_bump_on_explicit_rowid() {
1832 let mut alloc = RowIdAllocator::new(RowIdMode::AutoIncrement);
1833
1834 let r1 = alloc.allocate(None).unwrap();
1836 assert_eq!(r1.get(), 1);
1837
1838 alloc.set_sequence_high_water(1000);
1840
1841 let r2 = alloc.allocate(Some(RowId::new(999))).unwrap();
1843 assert!(
1844 r2.get() >= 1001,
1845 "allocator must bump past explicit rowid 1000, got {}",
1846 r2.get()
1847 );
1848
1849 let r3 = alloc.allocate(Some(r2)).unwrap();
1851 assert!(r3.get() > r2.get());
1852 }
1853}