1use std::{
5 fs,
6 path::{Path, PathBuf},
7 sync::{atomic::Ordering, Arc},
8 time::Duration,
9};
10
11use serde::Deserialize;
12use std::sync::atomic::AtomicUsize;
13
14const DEFAULT_PROMOTION_RATE_DEBUG: usize = 50;
15const DEFAULT_PROMOTION_RATE_RLEASE: usize = 30;
16const DEFAULT_MAX_MINI_PAGE_SIZE: usize = 2048; const DEFAULT_COPY_ON_ACCESS_RATIO: f64 = 0.1;
18const DEFAULT_CIRCULAR_BUFFER_SIZE: usize = 1024 * 1024 * 32;
19const DEFAULT_MIN_RECORD_SIZE: usize = 32;
20const DEFAULT_MAX_RECORD_SIZE: usize = 1952;
21const DEFAULT_LEAF_PAGE_SIZE: usize = 4096;
22const DEFAULT_MAX_KEY_LEN: usize = 16;
23
24#[derive(Debug)]
28pub struct Config {
29 pub(crate) read_promotion_rate: AtomicUsize,
30 pub(crate) scan_promotion_rate: AtomicUsize,
31 pub(crate) storage_backend: StorageBackend,
32 pub(crate) cb_size_byte: usize,
33 pub(crate) cb_min_record_size: usize,
34 pub(crate) cb_max_record_size: usize,
35 pub(crate) leaf_page_size: usize,
36 pub(crate) max_fence_len: usize,
37 pub(crate) cb_copy_on_access_ratio: f64,
38 pub(crate) read_record_cache: bool,
39 pub(crate) file_path: PathBuf,
40 pub(crate) max_mini_page_size: usize,
41 pub(crate) mini_page_binary_search: bool,
42 pub(crate) write_ahead_log: Option<Arc<WalConfig>>,
43 pub(crate) write_load_full_page: bool,
44 pub(crate) cache_only: bool,
45}
46
47impl Clone for Config {
48 fn clone(&self) -> Self {
49 Self {
50 read_promotion_rate: AtomicUsize::new(self.read_promotion_rate.load(Ordering::Relaxed)),
51 scan_promotion_rate: AtomicUsize::new(self.scan_promotion_rate.load(Ordering::Relaxed)),
52 storage_backend: self.storage_backend.clone(),
53 cb_size_byte: self.cb_size_byte,
54 cb_min_record_size: self.cb_min_record_size,
55 cb_max_record_size: self.cb_max_record_size,
56 leaf_page_size: self.leaf_page_size,
57 max_fence_len: self.max_fence_len,
58 cb_copy_on_access_ratio: self.cb_copy_on_access_ratio,
59 read_record_cache: self.read_record_cache,
60 file_path: self.file_path.clone(),
61 max_mini_page_size: self.max_mini_page_size,
62 mini_page_binary_search: self.mini_page_binary_search,
63 write_ahead_log: self.write_ahead_log.clone(),
64 write_load_full_page: self.write_load_full_page,
65 cache_only: self.cache_only,
66 }
67 }
68}
69
70#[derive(Debug, Clone, Eq, PartialEq)]
72pub enum StorageBackend {
73 Memory,
74 Std,
75 #[cfg(target_os = "linux")]
76 StdDirect,
77 #[cfg(target_os = "linux")]
78 IoUringPolling,
79 #[cfg(target_os = "linux")]
80 IoUringBlocking,
81 #[cfg(all(target_os = "linux", feature = "spdk"))]
82 Spdk,
83}
84
85impl Default for StorageBackend {
86 fn default() -> Self {
87 Self::Std
88 }
89}
90
91#[derive(Debug, Deserialize)]
92pub struct ConfigFile {
93 pub(crate) cb_size_byte: usize,
94 pub(crate) cb_min_record_size: usize,
95 pub(crate) cb_max_record_size: usize,
96 pub(crate) cb_max_key_len: usize,
97 pub(crate) leaf_page_size: usize,
98 pub(crate) index_file_path: String,
99 pub(crate) backend_storage: String,
100 pub(crate) read_promotion_rate: usize,
101 pub(crate) write_load_full_page: bool,
102 pub(crate) cache_only: bool,
103}
104
105impl Default for Config {
108 fn default() -> Self {
109 let read_promotion_rate = if cfg!(debug_assertions) {
110 DEFAULT_PROMOTION_RATE_DEBUG
111 } else {
112 DEFAULT_PROMOTION_RATE_RLEASE
113 };
114 let scan_promotion_rate = if cfg!(debug_assertions) {
115 DEFAULT_PROMOTION_RATE_DEBUG
116 } else {
117 DEFAULT_PROMOTION_RATE_RLEASE
118 };
119
120 Self {
121 read_promotion_rate: AtomicUsize::new(read_promotion_rate),
122 scan_promotion_rate: AtomicUsize::new(scan_promotion_rate),
123 cb_size_byte: DEFAULT_CIRCULAR_BUFFER_SIZE,
124 cb_min_record_size: DEFAULT_MIN_RECORD_SIZE,
125 cb_max_record_size: DEFAULT_MAX_RECORD_SIZE,
126 leaf_page_size: DEFAULT_LEAF_PAGE_SIZE,
127 max_fence_len: DEFAULT_MAX_KEY_LEN * 2,
128 cb_copy_on_access_ratio: DEFAULT_COPY_ON_ACCESS_RATIO,
129 file_path: PathBuf::new(),
130 read_record_cache: true,
131 max_mini_page_size: DEFAULT_MAX_MINI_PAGE_SIZE,
132 mini_page_binary_search: true,
133 storage_backend: StorageBackend::Memory,
134 write_ahead_log: None,
135 write_load_full_page: true,
136 cache_only: false,
137 }
138 }
139}
140impl Config {
141 pub fn new(file_path: impl AsRef<Path>, circular_buffer_size: usize) -> Self {
142 let mut config = Self::default();
143 let mut cache_only = false;
144 let storage_backend = if file_path.as_ref().to_str().unwrap().starts_with(":memory:") {
145 StorageBackend::Memory
146 } else if file_path.as_ref().to_str().unwrap().starts_with(":cache:") {
147 cache_only = true;
148 StorageBackend::Memory
149 } else {
150 StorageBackend::default()
151 };
152
153 config
154 .storage_backend(storage_backend)
155 .cache_only(cache_only)
156 .cb_size_byte(circular_buffer_size)
157 .file_path(file_path);
158
159 config
160 }
161
162 pub fn new_with_config_file<P: AsRef<Path>>(config_file_path: P) -> Self {
165 let config_file_str =
166 fs::read_to_string(config_file_path).expect("couldn't read config file");
167 let config_file: ConfigFile =
168 toml::from_str(&config_file_str).expect("Fail to parse config file");
169 let scan_promotion_rate = if cfg!(debug_assertions) {
170 DEFAULT_PROMOTION_RATE_DEBUG
171 } else {
172 DEFAULT_PROMOTION_RATE_RLEASE
173 };
174 let mut storage = StorageBackend::Memory;
175 if config_file.backend_storage == "disk" {
176 storage = StorageBackend::default();
177 }
178
179 Self {
181 read_promotion_rate: AtomicUsize::new(config_file.read_promotion_rate),
182 scan_promotion_rate: AtomicUsize::new(scan_promotion_rate),
183 cb_size_byte: config_file.cb_size_byte,
184 cb_min_record_size: config_file.cb_min_record_size,
185 cb_max_record_size: config_file.cb_max_record_size,
186 leaf_page_size: config_file.leaf_page_size,
187 max_fence_len: config_file.cb_max_key_len * 2,
188 cb_copy_on_access_ratio: DEFAULT_COPY_ON_ACCESS_RATIO,
189 file_path: PathBuf::from(config_file.index_file_path),
190 read_record_cache: true,
191 max_mini_page_size: DEFAULT_MAX_MINI_PAGE_SIZE,
192 mini_page_binary_search: true,
193 storage_backend: storage,
194 write_ahead_log: None,
195 write_load_full_page: config_file.write_load_full_page,
196 cache_only: config_file.cache_only,
197 }
198 }
199
200 pub fn storage_backend(&mut self, backend: StorageBackend) -> &mut Self {
205 self.storage_backend = backend;
206 self
207 }
208
209 pub fn scan_promotion_rate(&mut self, prob: usize) -> &mut Self {
213 self.scan_promotion_rate.store(prob, Ordering::Relaxed);
214 self
215 }
216
217 pub fn read_record_cache(&mut self, read_full_page_cache: bool) -> &mut Self {
222 self.read_record_cache = read_full_page_cache;
223 self
224 }
225
226 pub fn max_mini_page_size(&mut self, size: usize) -> &mut Self {
230 self.max_mini_page_size = size;
231 self
232 }
233
234 pub fn mini_page_binary_search(&mut self, binary_search: bool) -> &mut Self {
238 self.mini_page_binary_search = binary_search;
239 self
240 }
241
242 pub fn read_promotion_rate(&mut self, prob: usize) -> &mut Self {
246 self.read_promotion_rate.store(prob, Ordering::Relaxed);
247 self
248 }
249
250 pub fn cb_copy_on_access_ratio(&mut self, ratio: f64) -> &mut Self {
258 self.cb_copy_on_access_ratio = ratio;
259 self
260 }
261
262 pub fn enable_write_ahead_log(&mut self, wal_config: Arc<WalConfig>) -> &mut Self {
266 self.write_ahead_log = Some(wal_config);
268 self
269 }
270
271 pub fn enable_write_ahead_log_default(&mut self) -> &mut Self {
279 let wal_config = WalConfig::new(self.file_path.parent().unwrap().join("wal.log"));
280 self.write_ahead_log = Some(Arc::new(wal_config));
281 self
282 }
283
284 pub fn cache_only(&mut self, cache_only: bool) -> &mut Self {
286 self.cache_only = cache_only;
287 self
288 }
289
290 pub fn cb_size_byte(&mut self, cb_size_byte: usize) -> &mut Self {
292 self.cb_size_byte = cb_size_byte;
293 self
294 }
295
296 pub fn file_path<P: AsRef<Path>>(&mut self, file_path: P) -> &mut Self {
297 self.file_path = file_path.as_ref().to_path_buf();
298 self
299 }
300
301 pub fn cb_max_key_len(&mut self, max_key_len: usize) -> &mut Self {
302 self.max_fence_len = max_key_len * 2;
303 self
304 }
305
306 pub fn cb_min_record_size(&mut self, min_record_size: usize) -> &mut Self {
307 self.cb_min_record_size = min_record_size;
308 self
309 }
310
311 pub fn cb_max_record_size(&mut self, max_record_size: usize) -> &mut Self {
312 self.cb_max_record_size = max_record_size;
313 self
314 }
315
316 pub fn leaf_page_size(&mut self, leaf_page_size: usize) -> &mut Self {
317 self.leaf_page_size = leaf_page_size;
318 self
319 }
320}
321
322#[derive(Clone, Debug)]
323
324pub struct WalConfig {
325 pub(crate) file_path: PathBuf,
326 pub(crate) flush_interval: Duration,
327 pub(crate) segment_size: usize,
328 pub(crate) storage_backend: StorageBackend,
329}
330
331impl WalConfig {
332 pub fn new(file_path: impl AsRef<Path>) -> Self {
339 Self {
340 file_path: file_path.as_ref().to_path_buf(),
341 flush_interval: Duration::from_millis(1),
342 segment_size: 1024 * 1024 * 1024,
343 storage_backend: StorageBackend::Std,
344 }
345 }
346
347 pub fn flush_interval(&mut self, interval: Duration) -> &mut Self {
349 self.flush_interval = interval;
350 self
351 }
352
353 pub fn segment_size(&mut self, size: usize) -> &mut Self {
355 self.segment_size = size;
356 self
357 }
358
359 pub fn storage_backend(&mut self, backend: StorageBackend) -> &mut Self {
363 self.storage_backend = backend;
364 self
365 }
366}
367
368#[cfg(test)]
369mod tests {
370 use super::*;
371
372 const SAMPLE_CONFIG_FILE: &str = "src/sample_config.toml";
373 #[test]
374 fn test_new_with_config_file() {
375 let config = Config::new_with_config_file(SAMPLE_CONFIG_FILE);
376
377 assert_eq!(config.cb_size_byte, 8192);
378 assert_eq!(config.read_promotion_rate.load(Ordering::Relaxed), 100);
379 assert_eq!(config.write_load_full_page, true);
380 assert_eq!(config.file_path, PathBuf::from("c/d/E"));
381 assert_eq!(config.cache_only, false);
382 }
383}