armdb 0.1.12

sharded bitcask key-value storage optimized for NVMe
Documentation
#[cfg(feature = "var-collections")]
use armdb::VarTree;
use armdb::{Config, ConstTree};
use std::sync::Arc;
use std::thread;
use tempfile::tempdir;

#[test]
fn test_compaction_with_concurrent_reads() {
    let dir = tempdir().unwrap();
    let mut config = Config::test();
    config.shard_count = 2;
    config.max_file_size = 4096;
    config.compaction_threshold = 0.1;

    let tree = Arc::new(ConstTree::<[u8; 8], 8>::open(dir.path(), config).unwrap());

    // Write 1000 entries
    for i in 0..1000u64 {
        let key = i.to_be_bytes();
        let val = (i * 7).to_be_bytes();
        tree.put(&key, &val).unwrap();
    }

    // Overwrite first 500 to create dead bytes
    for i in 0..500u64 {
        let key = i.to_be_bytes();
        let val = (i * 7 + 1).to_be_bytes();
        tree.put(&key, &val).unwrap();
    }

    // Spawn reader threads
    let mut handles = Vec::new();
    for _ in 0..3 {
        let tree = Arc::clone(&tree);
        handles.push(thread::spawn(move || {
            for iter in 0..1000u64 {
                let i = iter % 1000;
                let key = i.to_be_bytes();
                if let Some(val) = tree.get(&key) {
                    // Value must be one of the two valid values
                    let v = u64::from_be_bytes(val);
                    let expected_old = i * 7;
                    let expected_new = i * 7 + 1;
                    assert!(
                        v == expected_old || v == expected_new,
                        "unexpected value {} for key {}",
                        v,
                        i
                    );
                }
            }
        }));
    }

    // Compact in main thread while readers are running
    tree.compact().unwrap();

    for handle in handles {
        handle.join().expect("reader thread panicked");
    }

    // Verify final state: all 1000 keys with correct values
    for i in 0..1000u64 {
        let key = i.to_be_bytes();
        let expected = if i < 500 {
            (i * 7 + 1).to_be_bytes()
        } else {
            (i * 7).to_be_bytes()
        };
        let val = tree
            .get(&key)
            .unwrap_or_else(|| panic!("key {} not found after compaction", i));
        assert_eq!(val, expected, "value mismatch for key {}", i);
    }
}

#[test]
fn test_compaction_with_concurrent_writes() {
    let dir = tempdir().unwrap();
    let mut config = Config::test();
    config.shard_count = 2;
    config.max_file_size = 4096;
    config.compaction_threshold = 0.1;

    let tree = Arc::new(ConstTree::<[u8; 8], 8>::open(dir.path(), config).unwrap());

    // Write 1000 entries and overwrite 500
    for i in 0..1000u64 {
        let key = i.to_be_bytes();
        let val = i.to_be_bytes();
        tree.put(&key, &val).unwrap();
    }
    for i in 0..500u64 {
        let key = i.to_be_bytes();
        let val = (i + 1).to_be_bytes();
        tree.put(&key, &val).unwrap();
    }

    // Spawn writer thread that adds new keys 1000..2000
    let tree_w = Arc::clone(&tree);
    let writer = thread::spawn(move || {
        for i in 1000..2000u64 {
            let key = i.to_be_bytes();
            let val = (i * 3).to_be_bytes();
            tree_w.put(&key, &val).unwrap();
        }
    });

    // Compact in main thread while writer is running
    tree.compact().unwrap();

    writer.join().expect("writer thread panicked");

    // Verify all 2000 keys
    for i in 0..2000u64 {
        let key = i.to_be_bytes();
        let expected = if i < 500 {
            (i + 1).to_be_bytes()
        } else if i < 1000 {
            i.to_be_bytes()
        } else {
            (i * 3).to_be_bytes()
        };
        let val = tree
            .get(&key)
            .unwrap_or_else(|| panic!("key {} not found", i));
        assert_eq!(val, expected, "value mismatch for key {}", i);
    }
}

#[cfg(feature = "var-collections")]
#[test]
fn test_compaction_with_concurrent_reads_var() {
    let dir = tempdir().unwrap();
    let mut config = Config::test();
    config.shard_count = 2;
    config.max_file_size = 4096;
    config.compaction_threshold = 0.1;

    let tree = Arc::new(VarTree::<[u8; 8]>::open(dir.path(), config).unwrap());

    for i in 0..1000u64 {
        let key = i.to_be_bytes();
        let val = format!("val_{i}");
        tree.put(&key, val.as_bytes()).unwrap();
    }

    // Overwrite first 500
    for i in 0..500u64 {
        let key = i.to_be_bytes();
        let val = format!("new_{i}");
        tree.put(&key, val.as_bytes()).unwrap();
    }

    // Reader threads
    let mut handles = Vec::new();
    for _ in 0..3 {
        let tree = Arc::clone(&tree);
        handles.push(thread::spawn(move || {
            for iter in 0..1000u64 {
                let i = iter % 1000;
                let key = i.to_be_bytes();
                if let Some(val) = tree.get(&key) {
                    let s = std::str::from_utf8(val.as_ref()).unwrap_or("???");
                    let old = format!("val_{i}");
                    let new = format!("new_{i}");
                    assert!(
                        s == old || s == new,
                        "unexpected value '{}' for key {}",
                        s,
                        i
                    );
                }
            }
        }));
    }

    tree.compact().unwrap();

    for handle in handles {
        handle.join().expect("reader thread panicked");
    }

    // Verify final state
    for i in 0..1000u64 {
        let key = i.to_be_bytes();
        let expected = if i < 500 {
            format!("new_{i}")
        } else {
            format!("val_{i}")
        };
        let val = tree
            .get(&key)
            .unwrap_or_else(|| panic!("key {} not found after compaction", i));
        assert_eq!(val.as_ref(), expected.as_bytes());
    }
}

#[test]
fn test_double_compaction() {
    let dir = tempdir().unwrap();
    let mut config = Config::test();
    config.max_file_size = 4096;
    config.compaction_threshold = 0.1;

    let tree = ConstTree::<[u8; 8], 8>::open(dir.path(), config).unwrap();

    // Phase 1: write + overwrite + compact
    for i in 0..500u64 {
        let key = i.to_be_bytes();
        let val = i.to_be_bytes();
        tree.put(&key, &val).unwrap();
    }
    for i in 0..250u64 {
        let key = i.to_be_bytes();
        let val = (i + 1000).to_be_bytes();
        tree.put(&key, &val).unwrap();
    }

    tree.compact().unwrap();

    // Phase 2: more overwrites + compact again
    for i in 250..500u64 {
        let key = i.to_be_bytes();
        let val = (i + 2000).to_be_bytes();
        tree.put(&key, &val).unwrap();
    }

    tree.compact().unwrap();

    // Verify all 500 keys
    assert_eq!(tree.len(), 500);
    for i in 0..500u64 {
        let key = i.to_be_bytes();
        let expected = if i < 250 {
            (i + 1000).to_be_bytes()
        } else {
            (i + 2000).to_be_bytes()
        };
        assert_eq!(
            tree.get(&key).unwrap(),
            expected,
            "double compaction: value mismatch at key {}",
            i
        );
    }
}