seerdb 0.0.10

Research-grade storage engine with learned data structures
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
//! Power Failure Tests for seerdb
//!
//! Tests crash consistency under simulated power failures using dm-flakey.
//! These tests verify that seerdb correctly recovers after unexpected crashes.
//!
//! # Requirements
//!
//! - Linux with device mapper support
//! - Root/sudo access for dm-flakey setup
//! - Run with: `sudo -E cargo test --test power_failure_tests`
//!
//! # How dm-flakey works
//!
//! dm-flakey creates a virtual block device that can:
//! 1. Drop writes (simulating power loss during write)
//! 2. Corrupt specific bytes (simulating partial writes)
//! 3. Return errors for a period (simulating device failure)
//!
//! Test flow:
//! 1. Create loopback device from file
//! 2. Create dm-flakey device on top
//! 3. Write data to seerdb on dm-flakey device
//! 4. Trigger "crash" by switching dm-flakey to drop_writes mode
//! 5. Destroy dm-flakey, recreate in normal mode
//! 6. Verify seerdb recovers correctly

#![cfg(target_os = "linux")]

use seerdb::{DBOptions, DB};
use std::fs::{self, File};
use std::path::{Path, PathBuf};
use std::process::Command;

/// Check if we have root privileges
fn is_root() -> bool {
    unsafe { libc::geteuid() == 0 }
}

/// Size of the test loopback device (64MB)
const LOOP_SIZE_MB: usize = 64;

/// Helper to run shell commands
fn run_cmd(cmd: &str, args: &[&str]) -> Result<String, String> {
    let output = Command::new(cmd)
        .args(args)
        .output()
        .map_err(|e| format!("Failed to run {}: {}", cmd, e))?;

    if output.status.success() {
        Ok(String::from_utf8_lossy(&output.stdout).to_string())
    } else {
        Err(format!(
            "{} failed: {}",
            cmd,
            String::from_utf8_lossy(&output.stderr)
        ))
    }
}

/// Get device size in sectors
fn get_sectors(loop_dev: &str) -> Result<u64, String> {
    let output = run_cmd("blockdev", &["--getsz", loop_dev])?;
    output
        .trim()
        .parse()
        .map_err(|e| format!("Failed to parse sector count: {}", e))
}

/// Convert PathBuf to &str with error handling
fn path_str(path: &Path) -> Result<&str, String> {
    path.to_str().ok_or_else(|| "Invalid path".to_string())
}

/// dm-flakey test harness
struct DmFlakeyHarness {
    /// Path to the backing file
    backing_file: PathBuf,
    /// Loop device (e.g., /dev/loop0)
    loop_device: Option<String>,
    /// dm-flakey device name
    dm_name: String,
    /// Mount point for the filesystem
    mount_point: PathBuf,
    /// Whether the device is in "crash" mode
    in_crash_mode: bool,
}

impl DmFlakeyHarness {
    /// Create a new dm-flakey test harness
    fn new(test_name: &str) -> Result<Self, String> {
        if !is_root() {
            return Err("Root privileges required for dm-flakey tests".to_string());
        }

        let base_dir = PathBuf::from("/tmp/seerdb_power_test");
        fs::create_dir_all(&base_dir).map_err(|e| format!("Failed to create test dir: {}", e))?;

        let backing_file = base_dir.join(format!("{}.img", test_name));
        let mount_point = base_dir.join(format!("{}_mount", test_name));
        let dm_name = format!("seerdb_test_{}", test_name);

        Ok(Self {
            backing_file,
            loop_device: None,
            dm_name,
            mount_point,
            in_crash_mode: false,
        })
    }

    /// Set up the loopback device and dm-flakey
    fn setup(&mut self) -> Result<(), String> {
        // Create backing file
        let file = File::create(&self.backing_file)
            .map_err(|e| format!("Failed to create backing file: {}", e))?;
        file.set_len((LOOP_SIZE_MB * 1024 * 1024) as u64)
            .map_err(|e| format!("Failed to set file size: {}", e))?;

        // Set up loop device
        let output = run_cmd("losetup", &["-f", "--show", path_str(&self.backing_file)?])?;
        let loop_dev = output.trim().to_string();
        self.loop_device = Some(loop_dev.clone());

        // Get device size in sectors
        let sectors = get_sectors(&loop_dev)?;

        // Create dm-flakey device (starts in normal mode)
        // Table: "0 <sectors> flakey <dev> 0 <up_interval> <down_interval>"
        // up_interval=3600 (1 hour), down_interval=0 (no failures initially)
        let table = format!("0 {} flakey {} 0 3600 0", sectors, loop_dev);
        run_cmd("dmsetup", &["create", &self.dm_name, "--table", &table])?;

        // Format with ext4 (simple filesystem for testing)
        let dm_path = format!("/dev/mapper/{}", self.dm_name);
        run_cmd("mkfs.ext4", &["-q", &dm_path])?;

        // Create and mount
        fs::create_dir_all(&self.mount_point)
            .map_err(|e| format!("Failed to create mount point: {}", e))?;
        run_cmd("mount", &[&dm_path, path_str(&self.mount_point)?])?;

        Ok(())
    }

    /// Get the path where seerdb should store data
    fn data_path(&self) -> PathBuf {
        self.mount_point.join("seerdb_data")
    }

    /// Simulate a crash by switching dm-flakey to drop_writes mode
    fn simulate_crash(&mut self) -> Result<(), String> {
        if self.in_crash_mode {
            return Ok(());
        }

        // Unmount first (simulates dirty unmount)
        let _ = run_cmd("umount", &["-l", path_str(&self.mount_point)?]);

        // Reload dm-flakey with drop_writes
        let loop_dev = self.loop_device.as_ref().ok_or("No loop device")?;
        let sectors = get_sectors(loop_dev)?;

        // Switch to drop_writes mode
        let table = format!("0 {} flakey {} 0 0 3600 1 drop_writes", sectors, loop_dev);
        run_cmd("dmsetup", &["reload", &self.dm_name, "--table", &table])?;
        run_cmd("dmsetup", &["suspend", &self.dm_name])?;
        run_cmd("dmsetup", &["resume", &self.dm_name])?;

        self.in_crash_mode = true;
        Ok(())
    }

    /// Recover from crash (switch back to normal mode and remount)
    fn recover(&mut self) -> Result<(), String> {
        if !self.in_crash_mode {
            return Ok(());
        }

        // Reload dm-flakey in normal mode
        let loop_dev = self.loop_device.as_ref().ok_or("No loop device")?;
        let sectors = get_sectors(loop_dev)?;

        let table = format!("0 {} flakey {} 0 3600 0", sectors, loop_dev);
        run_cmd("dmsetup", &["reload", &self.dm_name, "--table", &table])?;
        run_cmd("dmsetup", &["suspend", &self.dm_name])?;
        run_cmd("dmsetup", &["resume", &self.dm_name])?;

        // Remount
        let dm_path = format!("/dev/mapper/{}", self.dm_name);
        run_cmd("mount", &[&dm_path, path_str(&self.mount_point)?])?;

        self.in_crash_mode = false;
        Ok(())
    }

    /// Clean up all resources
    fn cleanup(&mut self) {
        // Best-effort cleanup (ignore errors, path_str failure just skips that step)
        if let Ok(mount_path) = path_str(&self.mount_point) {
            let _ = run_cmd("umount", &["-l", mount_path]);
        }
        let _ = run_cmd("dmsetup", &["remove", &self.dm_name]);
        if let Some(ref loop_dev) = self.loop_device {
            let _ = run_cmd("losetup", &["-d", loop_dev]);
        }
        let _ = fs::remove_file(&self.backing_file);
        let _ = fs::remove_dir(&self.mount_point);
    }
}

impl Drop for DmFlakeyHarness {
    fn drop(&mut self) {
        self.cleanup();
    }
}

// =============================================================================
// Tests (only run on Linux with root)
// =============================================================================

#[test]
#[ignore] // Run manually with: sudo -E cargo test --test power_failure_tests -- --ignored
fn test_crash_during_put() {
    if !is_root() {
        eprintln!("Skipping: requires root privileges");
        return;
    }

    let mut harness = DmFlakeyHarness::new("crash_put").expect("Failed to create harness");
    harness.setup().expect("Failed to setup harness");

    // Phase 1: Write some data
    {
        let db = DB::open(&harness.data_path()).expect("Failed to open DB");

        // Write committed data
        for i in 0..100 {
            db.put(format!("committed_{:04}", i).as_bytes(), b"value")
                .expect("Put failed");
        }
        db.flush().expect("Flush failed");

        // Write uncommitted data (will be lost)
        for i in 0..50 {
            db.put(format!("uncommitted_{:04}", i).as_bytes(), b"value")
                .expect("Put failed");
        }

        // Simulate crash BEFORE flush
        harness.simulate_crash().expect("Failed to simulate crash");
    }

    // Phase 2: Recover and verify
    harness.recover().expect("Failed to recover");
    {
        let db = DB::open(&harness.data_path()).expect("Failed to reopen DB after crash");

        // Committed data should be present
        for i in 0..100 {
            let key = format!("committed_{:04}", i);
            let value = db.get(key.as_bytes()).expect("Get failed");
            assert!(value.is_some(), "Committed key {} missing after crash", key);
        }

        // Uncommitted data may or may not be present (depends on WAL state)
        // The key invariant is: no corruption, consistent state
        println!("✓ Crash during put: recovery successful");
    }
}

#[test]
#[ignore]
fn test_crash_during_flush() {
    if !is_root() {
        eprintln!("Skipping: requires root privileges");
        return;
    }

    let mut harness = DmFlakeyHarness::new("crash_flush").expect("Failed to create harness");
    harness.setup().expect("Failed to setup harness");

    // Phase 1: Write data and flush
    {
        let db = DB::open(&harness.data_path()).expect("Failed to open DB");

        // Write data
        let value = vec![b'v'; 100];
        for i in 0..200 {
            db.put(format!("key_{:04}", i).as_bytes(), &value)
                .expect("Put failed");
        }

        // Crash after flush completes (tests post-flush recovery)
        // For true mid-flush crash testing, use failpoints
        db.flush().expect("Flush failed");
        harness.simulate_crash().expect("Failed to simulate crash");
    }

    // Phase 2: Recover and verify
    harness.recover().expect("Failed to recover");
    {
        let db = DB::open(&harness.data_path()).expect("Failed to reopen DB after crash");

        // Count recovered keys
        let mut recovered = 0;
        for i in 0..200 {
            let key = format!("key_{:04}", i);
            if db.get(key.as_bytes()).expect("Get failed").is_some() {
                recovered += 1;
            }
        }

        // Should recover most/all data (flush completed before crash)
        println!("✓ Crash during flush: recovered {}/200 keys", recovered);
        assert!(
            recovered >= 190,
            "Too much data loss: only {}/200 recovered",
            recovered
        );
    }
}

#[test]
#[ignore]
fn test_repeated_crash_recovery() {
    if !is_root() {
        eprintln!("Skipping: requires root privileges");
        return;
    }

    let mut harness = DmFlakeyHarness::new("repeated_crash").expect("Failed to create harness");
    harness.setup().expect("Failed to setup harness");

    // Multiple crash-recover cycles
    for cycle in 0..5 {
        // Write phase
        {
            let db = DB::open(&harness.data_path()).expect("Failed to open DB");

            for i in 0..50 {
                let key = format!("cycle{}_{:04}", cycle, i);
                db.put(key.as_bytes(), b"value").expect("Put failed");
            }
            db.flush().expect("Flush failed");
        }

        // Crash
        harness.simulate_crash().expect("Failed to simulate crash");
        harness.recover().expect("Failed to recover");
    }

    // Final verification
    {
        let db = DB::open(&harness.data_path()).expect("Failed to open DB");

        // All flushed data from all cycles should be present
        for cycle in 0..5 {
            for i in 0..50 {
                let key = format!("cycle{}_{:04}", cycle, i);
                let value = db.get(key.as_bytes()).expect("Get failed");
                assert!(
                    value.is_some(),
                    "Key {} missing after repeated crashes",
                    key
                );
            }
        }
        println!("✓ Repeated crash recovery: all 250 keys recovered");
    }
}

#[test]
#[ignore]
fn test_crash_during_compaction() {
    if !is_root() {
        eprintln!("Skipping: requires root privileges");
        return;
    }

    let mut harness = DmFlakeyHarness::new("crash_compact").expect("Failed to create harness");
    harness.setup().expect("Failed to setup harness");

    // Phase 1: Create multiple SSTables to trigger compaction
    {
        let db = DBOptions::default()
            .memtable_capacity(1024 * 64) // 64KB memtable for faster flushes
            .open(&harness.data_path())
            .expect("Failed to open DB");

        // Write enough data to trigger multiple flushes and compaction
        let value = vec![b'v'; 100];
        for batch in 0..10 {
            for i in 0..100 {
                let key = format!("batch{}_{:04}", batch, i);
                db.put(key.as_bytes(), &value).expect("Put failed");
            }
            db.flush().expect("Flush failed");
        }

        // Crash after flushes (may catch post-compaction state)
        // For true mid-compaction crash testing, use failpoints
        harness.simulate_crash().expect("Failed to simulate crash");
    }

    // Phase 2: Recover and verify
    harness.recover().expect("Failed to recover");
    {
        let db = DB::open(&harness.data_path()).expect("Failed to reopen DB after crash");

        // Verify all data is intact
        let mut found = 0;
        for batch in 0..10 {
            for i in 0..100 {
                let key = format!("batch{}_{:04}", batch, i);
                if db.get(key.as_bytes()).expect("Get failed").is_some() {
                    found += 1;
                }
            }
        }

        println!("✓ Crash during compaction: recovered {}/1000 keys", found);
        assert!(
            found >= 950,
            "Too much data loss: only {}/1000 recovered",
            found
        );
    }
}

/// Verify helper: print summary of db state after crash
#[allow(dead_code)]
fn verify_db_state(data_path: &Path) -> Result<(usize, usize), String> {
    let _db = DB::open(data_path).map_err(|e| format!("Failed to open DB: {}", e))?;

    // Count SST files
    let sst_count = fs::read_dir(data_path)
        .map_err(|e| format!("Failed to read dir: {}", e))?
        .filter_map(|e| e.ok())
        .filter(|e| e.file_name().to_string_lossy().ends_with(".sst"))
        .count();

    // Count WAL files
    let wal_count = fs::read_dir(data_path)
        .map_err(|e| format!("Failed to read dir: {}", e))?
        .filter_map(|e| e.ok())
        .filter(|e| e.file_name().to_string_lossy().ends_with(".wal"))
        .count();

    Ok((sst_count, wal_count))
}