Skip to main content

azoth_core/config/
canonical.rs

1use serde::{Deserialize, Serialize};
2use std::path::PathBuf;
3
4/// Configuration for read connection pooling
5///
6/// When enabled, maintains a pool of read-only connections/transactions
7/// for concurrent read access without blocking writes.
8#[derive(Debug, Clone, Serialize, Deserialize)]
9pub struct ReadPoolConfig {
10    /// Whether pooling is enabled (default: false, opt-in)
11    #[serde(default)]
12    pub enabled: bool,
13
14    /// Number of read connections/slots in the pool (default: 4)
15    ///
16    /// For LMDB: controls concurrent read transaction slots
17    /// For SQLite: controls number of read-only connections
18    #[serde(default = "default_pool_size")]
19    pub pool_size: usize,
20
21    /// Timeout in milliseconds when acquiring a pooled connection (default: 5000)
22    ///
23    /// If no connection is available within this time, an error is returned.
24    #[serde(default = "default_acquire_timeout")]
25    pub acquire_timeout_ms: u64,
26}
27
28impl Default for ReadPoolConfig {
29    fn default() -> Self {
30        Self {
31            enabled: false,
32            pool_size: default_pool_size(),
33            acquire_timeout_ms: default_acquire_timeout(),
34        }
35    }
36}
37
38impl ReadPoolConfig {
39    /// Create a new enabled read pool configuration
40    pub fn enabled(pool_size: usize) -> Self {
41        Self {
42            enabled: true,
43            pool_size,
44            acquire_timeout_ms: default_acquire_timeout(),
45        }
46    }
47
48    /// Set the acquire timeout
49    pub fn with_timeout(mut self, timeout_ms: u64) -> Self {
50        self.acquire_timeout_ms = timeout_ms;
51        self
52    }
53}
54
55fn default_pool_size() -> usize {
56    4
57}
58
59fn default_acquire_timeout() -> u64 {
60    5000
61}
62
63/// Configuration for canonical store
64#[derive(Debug, Clone, Serialize, Deserialize)]
65pub struct CanonicalConfig {
66    /// Path to the canonical store directory
67    pub path: PathBuf,
68
69    /// Maximum map size for LMDB (in bytes)
70    /// Default: 10GB
71    #[serde(default = "default_map_size")]
72    pub map_size: usize,
73
74    /// Sync mode for durability
75    #[serde(default)]
76    pub sync_mode: SyncMode,
77
78    /// Number of stripes for lock manager
79    /// Default: 256
80    #[serde(default = "default_stripe_count")]
81    pub stripe_count: usize,
82
83    /// Maximum number of readers (LMDB specific)
84    /// Default: 126
85    #[serde(default = "default_max_readers")]
86    pub max_readers: u32,
87
88    /// Use read-only transactions for preflight (default: true)
89    ///
90    /// When enabled, preflight validation uses concurrent read-only transactions
91    /// instead of write transactions, improving throughput and reducing contention.
92    #[serde(default = "default_true")]
93    pub preflight_read_only: bool,
94
95    /// Chunk size for state iteration (default: 1000)
96    ///
97    /// State iterators fetch data in chunks to maintain constant memory usage.
98    /// Larger chunks improve throughput but use more memory temporarily.
99    #[serde(default = "default_chunk_size")]
100    pub state_iter_chunk_size: usize,
101
102    /// Enable in-memory cache for preflight validation (default: true)
103    ///
104    /// When enabled, frequently accessed state keys are cached in memory during
105    /// preflight validation, reducing LMDB reads for hot keys.
106    #[serde(default = "default_true")]
107    pub preflight_cache_enabled: bool,
108
109    /// Maximum number of entries in the preflight cache (default: 10,000)
110    ///
111    /// Each entry uses approximately 120 bytes + value size.
112    /// Default of 10,000 entries ≈ 1-6 MB memory overhead.
113    #[serde(default = "default_preflight_cache_size")]
114    pub preflight_cache_size: usize,
115
116    /// Time-to-live for preflight cache entries in seconds (default: 60)
117    ///
118    /// Entries older than this will be evicted on access.
119    #[serde(default = "default_preflight_cache_ttl")]
120    pub preflight_cache_ttl_secs: u64,
121
122    /// Read pool configuration (optional, disabled by default)
123    ///
124    /// When enabled, maintains a pool of read-only connections for
125    /// concurrent read access without blocking writes.
126    #[serde(default)]
127    pub read_pool: ReadPoolConfig,
128
129    /// Lock acquisition timeout in milliseconds (default: 5000)
130    ///
131    /// When acquiring stripe locks for transaction preflight, if a lock
132    /// cannot be acquired within this timeout, the transaction fails with
133    /// `LockTimeout` error instead of blocking indefinitely.
134    #[serde(default = "default_lock_timeout")]
135    pub lock_timeout_ms: u64,
136
137    /// Maximum size of a single event payload in bytes (default: 4MB)
138    #[serde(default = "default_event_max_size")]
139    pub event_max_size_bytes: usize,
140
141    /// Maximum total size for a single event batch append in bytes (default: 64MB)
142    #[serde(default = "default_event_batch_max_bytes")]
143    pub event_batch_max_bytes: usize,
144}
145
146#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Default)]
147pub enum SyncMode {
148    /// Full durability – calls `fsync()` on every commit.
149    ///
150    /// Guarantees that committed data survives power loss and OS crashes.
151    /// This is the safest option but has the highest write latency (~2-5x slower
152    /// than `NoMetaSync`).
153    Full,
154
155    /// Skips syncing the LMDB meta-page on each commit (default).
156    ///
157    /// Data pages are still synced, so committed data is durable against process
158    /// crashes. In the rare event of an OS crash or power failure, the last
159    /// transaction _may_ be lost, but the database will remain consistent.
160    /// This is a good balance of durability and performance for most workloads.
161    #[default]
162    NoMetaSync,
163
164    /// Disables `fsync()` entirely – the OS page cache decides when to flush.
165    ///
166    /// **WARNING**: This is the fastest mode but offers no durability guarantees
167    /// beyond normal process lifetime. A power failure or OS crash can lose an
168    /// unbounded number of recent transactions or, in the worst case, corrupt the
169    /// database file. Only use this for ephemeral, reproducible, or test workloads.
170    NoSync,
171}
172
173fn default_map_size() -> usize {
174    10 * 1024 * 1024 * 1024 // 10GB
175}
176
177fn default_stripe_count() -> usize {
178    256
179}
180
181fn default_max_readers() -> u32 {
182    126
183}
184
185fn default_true() -> bool {
186    true
187}
188
189fn default_chunk_size() -> usize {
190    1000
191}
192
193fn default_preflight_cache_size() -> usize {
194    10_000
195}
196
197fn default_preflight_cache_ttl() -> u64 {
198    60
199}
200
201fn default_lock_timeout() -> u64 {
202    5000
203}
204
205fn default_event_max_size() -> usize {
206    4 * 1024 * 1024
207}
208
209fn default_event_batch_max_bytes() -> usize {
210    64 * 1024 * 1024
211}
212
213impl CanonicalConfig {
214    pub fn new(path: PathBuf) -> Self {
215        Self {
216            path,
217            map_size: default_map_size(),
218            sync_mode: SyncMode::default(),
219            stripe_count: default_stripe_count(),
220            max_readers: default_max_readers(),
221            preflight_read_only: default_true(),
222            state_iter_chunk_size: default_chunk_size(),
223            preflight_cache_enabled: default_true(),
224            preflight_cache_size: default_preflight_cache_size(),
225            preflight_cache_ttl_secs: default_preflight_cache_ttl(),
226            read_pool: ReadPoolConfig::default(),
227            lock_timeout_ms: default_lock_timeout(),
228            event_max_size_bytes: default_event_max_size(),
229            event_batch_max_bytes: default_event_batch_max_bytes(),
230        }
231    }
232
233    pub fn with_map_size(mut self, map_size: usize) -> Self {
234        self.map_size = map_size;
235        self
236    }
237
238    pub fn with_sync_mode(mut self, sync_mode: SyncMode) -> Self {
239        self.sync_mode = sync_mode;
240        self
241    }
242
243    pub fn with_stripe_count(mut self, stripe_count: usize) -> Self {
244        self.stripe_count = stripe_count;
245        self
246    }
247
248    pub fn with_preflight_cache(mut self, enabled: bool) -> Self {
249        self.preflight_cache_enabled = enabled;
250        self
251    }
252
253    pub fn with_preflight_cache_size(mut self, size: usize) -> Self {
254        self.preflight_cache_size = size;
255        self
256    }
257
258    pub fn with_preflight_cache_ttl(mut self, ttl_secs: u64) -> Self {
259        self.preflight_cache_ttl_secs = ttl_secs;
260        self
261    }
262
263    /// Configure read connection pooling
264    pub fn with_read_pool(mut self, config: ReadPoolConfig) -> Self {
265        self.read_pool = config;
266        self
267    }
268
269    /// Enable read pooling with the specified pool size
270    pub fn with_read_pool_size(mut self, pool_size: usize) -> Self {
271        self.read_pool = ReadPoolConfig::enabled(pool_size);
272        self
273    }
274
275    /// Set lock acquisition timeout in milliseconds
276    ///
277    /// This controls how long to wait when acquiring stripe locks for
278    /// transaction preflight. Default is 5000ms.
279    pub fn with_lock_timeout(mut self, timeout_ms: u64) -> Self {
280        self.lock_timeout_ms = timeout_ms;
281        self
282    }
283
284    /// Set maximum event payload size in bytes.
285    pub fn with_event_max_size(mut self, size_bytes: usize) -> Self {
286        self.event_max_size_bytes = size_bytes;
287        self
288    }
289
290    /// Set maximum event batch size in bytes.
291    pub fn with_event_batch_max_bytes(mut self, size_bytes: usize) -> Self {
292        self.event_batch_max_bytes = size_bytes;
293        self
294    }
295}