quill_sql/config/
mod.rs

1use std::path::PathBuf;
2use std::time::Duration;
3
4#[derive(Debug, Clone, Copy)]
5pub struct IOSchedulerConfig {
6    /// Number of io_uring worker threads (or fallback thread pool workers)
7    pub workers: usize,
8    /// IoUring queue depth (Linux only). Ignored on non-Linux.
9    #[cfg(target_os = "linux")]
10    pub iouring_queue_depth: usize,
11    /// Number of registered fixed buffers for io_uring (Linux only).
12    #[cfg(target_os = "linux")]
13    pub iouring_fixed_buffers: usize,
14    /// Optional SQPOLL idle time in milliseconds (Linux only).
15    #[cfg(target_os = "linux")]
16    pub iouring_sqpoll_idle_ms: Option<u32>,
17    /// Whether the IO backend should force an fsync/fdatasync after writes.
18    pub fsync_on_write: bool,
19    /// WAL handler worker threads (buffered I/O)
20    pub wal_workers: usize,
21}
22
23impl IOSchedulerConfig {
24    pub fn default_workers() -> usize {
25        std::thread::available_parallelism()
26            .map(|n| n.get())
27            .unwrap_or(1)
28    }
29}
30
31impl Default for IOSchedulerConfig {
32    fn default() -> Self {
33        IOSchedulerConfig {
34            workers: Self::default_workers(),
35            #[cfg(target_os = "linux")]
36            iouring_queue_depth: 256,
37            #[cfg(target_os = "linux")]
38            iouring_fixed_buffers: 256,
39            #[cfg(target_os = "linux")]
40            iouring_sqpoll_idle_ms: None,
41            fsync_on_write: true,
42            wal_workers: std::cmp::max(1, Self::default_workers() / 2),
43        }
44    }
45}
46
47#[derive(Debug, Clone, Copy)]
48pub struct BufferPoolConfig {
49    pub buffer_pool_size: usize,
50    pub lru_k_k: usize,
51    pub tiny_lfu_enable: bool,
52    pub tiny_lfu_counters: usize,
53    pub admission_enable: bool,
54}
55
56impl Default for BufferPoolConfig {
57    fn default() -> Self {
58        BufferPoolConfig {
59            buffer_pool_size: 5000,
60            lru_k_k: 2,
61            tiny_lfu_enable: true,
62            tiny_lfu_counters: 4,
63            admission_enable: true,
64        }
65    }
66}
67
68#[derive(Debug, Clone)]
69pub struct WalConfig {
70    pub directory: PathBuf,
71    pub segment_size: u64,
72    pub sync_on_flush: bool,
73    pub writer_interval_ms: Option<u64>,
74    pub buffer_capacity: usize,
75    pub flush_coalesce_bytes: usize,
76    pub synchronous_commit: bool,
77    pub checkpoint_interval_ms: Option<u64>,
78    pub retain_segments: usize,
79}
80
81#[derive(Debug, Clone, Copy)]
82pub struct BackgroundConfig {
83    pub wal_writer_interval: Option<Duration>,
84    pub checkpoint_interval: Option<Duration>,
85    pub bg_writer_interval: Option<Duration>,
86    pub vacuum: IndexVacuumConfig,
87}
88
89pub fn background_config(
90    wal_config: &WalConfig,
91    vacuum_cfg: IndexVacuumConfig,
92) -> BackgroundConfig {
93    let wal_writer_interval = wal_config.writer_interval_ms.and_then(duration_from_ms);
94    let checkpoint_interval = wal_config.checkpoint_interval_ms.and_then(duration_from_ms);
95
96    let env_interval = std::env::var("QUILL_BG_WRITER_INTERVAL_MS")
97        .ok()
98        .and_then(|raw| raw.parse::<u64>().ok());
99    let bg_interval_ms = env_interval.unwrap_or(vacuum_cfg.interval_ms);
100    let bg_writer_interval = duration_from_ms(bg_interval_ms);
101
102    BackgroundConfig {
103        wal_writer_interval,
104        checkpoint_interval,
105        bg_writer_interval,
106        vacuum: vacuum_cfg,
107    }
108}
109
110fn duration_from_ms(ms: u64) -> Option<Duration> {
111    if ms == 0 {
112        None
113    } else {
114        Some(Duration::from_millis(ms))
115    }
116}
117
118impl Default for WalConfig {
119    fn default() -> Self {
120        WalConfig {
121            directory: PathBuf::from("wal"),
122            segment_size: 16 * 1024 * 1024, // 16 MiB segments by default
123            sync_on_flush: true,
124            writer_interval_ms: Some(50),
125            buffer_capacity: 256,
126            flush_coalesce_bytes: 2 * 1024 * 1024,
127            synchronous_commit: true,
128            checkpoint_interval_ms: Some(5000),
129            retain_segments: 8,
130        }
131    }
132}
133
134#[derive(Debug, Clone, Copy)]
135pub struct BTreeConfig {
136    pub seq_batch_enable: bool,
137    pub seq_window: usize,
138    pub prefetch_enable: bool,
139    pub prefetch_window: usize,
140    pub debug_find_level: u8,
141    pub debug_insert_level: u8,
142    pub debug_split_level: u8,
143}
144
145impl Default for BTreeConfig {
146    fn default() -> Self {
147        BTreeConfig {
148            seq_batch_enable: true,
149            seq_window: 32,
150            prefetch_enable: true,
151            prefetch_window: 2,
152            debug_find_level: 0,
153            debug_insert_level: 0,
154            debug_split_level: 0,
155        }
156    }
157}
158
159#[derive(Debug, Clone, Copy)]
160pub struct TableScanConfig {
161    pub stream_scan_enable: bool,
162    pub stream_threshold_pages: Option<u32>,
163    pub readahead_pages: usize,
164}
165
166impl Default for TableScanConfig {
167    fn default() -> Self {
168        TableScanConfig {
169            stream_scan_enable: false,
170            stream_threshold_pages: None, // None => use pool_size/4
171            readahead_pages: 2,
172        }
173    }
174}
175
176#[derive(Debug, Clone, Copy)]
177pub struct IndexVacuumConfig {
178    /// Background vacuum interval in milliseconds
179    pub interval_ms: u64,
180    /// Pending garbage counter threshold to trigger a cleanup batch
181    pub trigger_threshold: usize,
182    /// Max number of entries to cleanup in one batch
183    pub batch_limit: usize,
184}
185
186impl Default for IndexVacuumConfig {
187    fn default() -> Self {
188        Self {
189            interval_ms: 10_000,     // 10s
190            trigger_threshold: 4096, // pending count to trigger
191            batch_limit: 128,        // small batch to avoid stalls
192        }
193    }
194}