use std::sync::Arc;
use std::thread;
use mmdb::{DB, DbOptions, ReadOptions, WriteBatch, WriteOptions};
fn make_db(dir: &std::path::Path) -> DB {
DB::open(
DbOptions {
create_if_missing: true,
..Default::default()
},
dir,
)
.unwrap()
}
#[test]
fn test_large_values() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
let large_value = vec![0xAB_u8; 1024 * 1024]; db.put(b"large_key", &large_value).unwrap();
let retrieved = db.get(b"large_key").unwrap().unwrap();
assert_eq!(retrieved.len(), large_value.len());
assert_eq!(retrieved, large_value);
}
#[test]
fn test_empty_key_and_value() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
db.put(b"key", b"").unwrap();
assert_eq!(db.get(b"key").unwrap(), Some(vec![]));
}
#[test]
fn test_binary_keys_and_values() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
let binary_key = vec![0, 1, 2, 255, 254, 253];
let binary_value = vec![128, 0, 0, 1, 128, 255];
db.put(&binary_key, &binary_value).unwrap();
assert_eq!(db.get(&binary_key).unwrap(), Some(binary_value));
}
#[test]
fn test_sequential_write_read_1000() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..100 {
let key = format!("{:08}", i);
let val = format!("value_{}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
for i in 0..100 {
let key = format!("{:08}", i);
let val = format!("value_{}", i);
assert_eq!(db.get(key.as_bytes()).unwrap(), Some(val.into_bytes()));
}
}
#[test]
fn test_overwrite_pattern() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
db.put(b"key", b"v1").unwrap();
assert_eq!(db.get(b"key").unwrap(), Some(b"v1".to_vec()));
db.put(b"key", b"v2").unwrap();
assert_eq!(db.get(b"key").unwrap(), Some(b"v2".to_vec()));
db.delete(b"key").unwrap();
assert_eq!(db.get(b"key").unwrap(), None);
db.put(b"key", b"v3").unwrap();
assert_eq!(db.get(b"key").unwrap(), Some(b"v3".to_vec()));
}
#[test]
fn test_snapshot_isolation() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
db.put(b"a", b"1").unwrap();
db.put(b"b", b"2").unwrap();
let snap1 = db.snapshot_seq();
db.put(b"a", b"10").unwrap();
db.delete(b"b").unwrap();
db.put(b"c", b"3").unwrap();
let snap2 = db.snapshot_seq();
db.put(b"a", b"100").unwrap();
let r1 = ReadOptions {
snapshot: Some(snap1),
..Default::default()
};
assert_eq!(db.get_with_options(&r1, b"a").unwrap(), Some(b"1".to_vec()));
assert_eq!(db.get_with_options(&r1, b"b").unwrap(), Some(b"2".to_vec()));
assert_eq!(db.get_with_options(&r1, b"c").unwrap(), None);
let r2 = ReadOptions {
snapshot: Some(snap2),
..Default::default()
};
assert_eq!(
db.get_with_options(&r2, b"a").unwrap(),
Some(b"10".to_vec())
);
assert_eq!(db.get_with_options(&r2, b"b").unwrap(), None);
assert_eq!(db.get_with_options(&r2, b"c").unwrap(), Some(b"3".to_vec()));
assert_eq!(db.get(b"a").unwrap(), Some(b"100".to_vec()));
}
#[test]
fn test_concurrent_stress() {
let dir = tempfile::tempdir().unwrap();
let db = Arc::new(make_db(dir.path()));
let num_threads = 4;
let ops_per_thread = 50;
let mut handles = vec![];
for t in 0..num_threads {
let db = db.clone();
handles.push(thread::spawn(move || {
for i in 0..ops_per_thread {
let key = format!("t{}_{:06}", t, i);
let val = format!("v{}_{}", t, i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
}));
}
for h in handles {
h.join().unwrap();
}
for t in 0..num_threads {
for i in 0..ops_per_thread {
let key = format!("t{}_{:06}", t, i);
let val = format!("v{}_{}", t, i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(val.into_bytes()),
"missing key: {}",
key
);
}
}
}
#[test]
fn test_flush_compact_reopen() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().to_path_buf();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 1024,
l0_compaction_trigger: 3,
..Default::default()
};
{
let db = DB::open(opts.clone(), &path).unwrap();
for i in 0..200 {
let key = format!("key_{:06}", i);
let val = format!("val_{:040}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
db.flush().unwrap();
db.compact().unwrap();
db.close().unwrap();
}
{
let db = DB::open(opts, &path).unwrap();
for i in 0..200 {
let key = format!("key_{:06}", i);
let val = format!("val_{:040}", i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(val.into_bytes()),
"key {} missing after compact+reopen",
i
);
}
}
}
#[test]
fn test_iterator_with_flush_and_compact() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 1024,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
for round in 0..3 {
for i in 0..20 {
let key = format!("key_{:04}", round * 20 + i);
let val = format!("val_{}", round * 20 + i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
db.flush().unwrap();
}
db.compact().unwrap();
let entries: Vec<_> = db.iter().unwrap().collect();
assert_eq!(entries.len(), 60);
for i in 1..entries.len() {
assert!(
entries[i].0 > entries[i - 1].0,
"iterator not sorted at position {}",
i
);
}
}
#[test]
fn test_delete_then_compact() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 512,
num_levels: 2,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
for i in 0..30 {
let key = format!("key_{:04}", i);
db.put(key.as_bytes(), b"alive").unwrap();
}
db.flush().unwrap();
for i in 0..15 {
let key = format!("key_{:04}", i);
db.delete(key.as_bytes()).unwrap();
}
db.flush().unwrap();
db.compact().unwrap();
for i in 0..15 {
let key = format!("key_{:04}", i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
None,
"key {} should be deleted",
i
);
}
for i in 15..30 {
let key = format!("key_{:04}", i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(b"alive".to_vec()),
"key {} should exist",
i
);
}
let entries: Vec<_> = db.iter().unwrap().collect();
assert_eq!(entries.len(), 15);
}
#[test]
fn test_large_scale_write_recovery() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().to_path_buf();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 64 * 1024, l0_compaction_trigger: 4,
..Default::default()
};
{
let db = DB::open(opts.clone(), &path).unwrap();
for i in 0..5_000u64 {
let key = format!("k{:08}", i);
let val = format!("v{:08}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
db.close().unwrap();
}
{
let db = DB::open(opts, &path).unwrap();
for i in 0..5_000u64 {
let key = format!("k{:08}", i);
let val = format!("v{:08}", i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(val.into_bytes()),
"key {} missing after reopen",
key
);
}
}
}
#[test]
fn test_mixed_put_delete_range() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..100 {
let key = format!("key_{:04}", i);
let val = format!("val_{:04}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
db.flush().unwrap();
db.delete_range(b"key_0020", b"key_0050").unwrap();
db.flush().unwrap();
let entries: Vec<(Vec<u8>, Vec<u8>)> = db.iter().unwrap().collect();
let keys: Vec<String> = entries
.iter()
.map(|(k, _)| String::from_utf8(k.clone()).unwrap())
.collect();
for i in 20..50 {
let key = format!("key_{:04}", i);
assert!(
!keys.contains(&key),
"key {} should have been range-deleted (iterator)",
key
);
}
for i in (0..20).chain(50..100) {
let key = format!("key_{:04}", i);
assert!(
keys.contains(&key),
"key {} should still exist in iterator",
key
);
}
assert_eq!(entries.len(), 70, "expected 70 surviving keys in iterator");
}
#[test]
fn test_snapshot_across_flush() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 1024, ..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
db.put(b"alpha", b"1").unwrap();
db.put(b"beta", b"2").unwrap();
let snap = db.snapshot_seq();
db.flush().unwrap();
db.put(b"alpha", b"10").unwrap();
db.put(b"gamma", b"3").unwrap();
let r = ReadOptions {
snapshot: Some(snap),
..Default::default()
};
assert_eq!(
db.get_with_options(&r, b"alpha").unwrap(),
Some(b"1".to_vec())
);
assert_eq!(
db.get_with_options(&r, b"beta").unwrap(),
Some(b"2".to_vec())
);
assert_eq!(db.get_with_options(&r, b"gamma").unwrap(), None);
assert_eq!(db.get(b"alpha").unwrap(), Some(b"10".to_vec()));
assert_eq!(db.get(b"gamma").unwrap(), Some(b"3".to_vec()));
}
#[test]
fn test_multi_snapshot_coexistence() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
db.put(b"x", b"v1").unwrap();
let snap1 = db.snapshot_seq();
db.put(b"x", b"v2").unwrap();
db.put(b"y", b"v2").unwrap();
let snap2 = db.snapshot_seq();
db.put(b"x", b"v3").unwrap();
db.delete(b"y").unwrap();
db.put(b"z", b"v3").unwrap();
let snap3 = db.snapshot_seq();
db.put(b"x", b"v4").unwrap();
let r1 = ReadOptions {
snapshot: Some(snap1),
..Default::default()
};
let r2 = ReadOptions {
snapshot: Some(snap2),
..Default::default()
};
let r3 = ReadOptions {
snapshot: Some(snap3),
..Default::default()
};
assert_eq!(
db.get_with_options(&r1, b"x").unwrap(),
Some(b"v1".to_vec())
);
assert_eq!(db.get_with_options(&r1, b"y").unwrap(), None);
assert_eq!(db.get_with_options(&r1, b"z").unwrap(), None);
assert_eq!(
db.get_with_options(&r2, b"x").unwrap(),
Some(b"v2".to_vec())
);
assert_eq!(
db.get_with_options(&r2, b"y").unwrap(),
Some(b"v2".to_vec())
);
assert_eq!(db.get_with_options(&r2, b"z").unwrap(), None);
assert_eq!(
db.get_with_options(&r3, b"x").unwrap(),
Some(b"v3".to_vec())
);
assert_eq!(db.get_with_options(&r3, b"y").unwrap(), None);
assert_eq!(
db.get_with_options(&r3, b"z").unwrap(),
Some(b"v3".to_vec())
);
assert_eq!(db.get(b"x").unwrap(), Some(b"v4".to_vec()));
}
#[test]
fn test_compaction_preserves_snapshots() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 512,
l0_compaction_trigger: 2,
num_levels: 3,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
for i in 0..50 {
let key = format!("key_{:04}", i);
db.put(key.as_bytes(), b"original").unwrap();
}
db.flush().unwrap();
let snap = db.snapshot_seq();
let r = ReadOptions {
snapshot: Some(snap),
..Default::default()
};
for i in 0..50 {
let key = format!("key_{:04}", i);
assert_eq!(
db.get_with_options(&r, key.as_bytes()).unwrap(),
Some(b"original".to_vec()),
"key {} should be 'original' in snapshot before compaction",
key
);
}
for i in 0..50 {
let key = format!("key_{:04}", i);
db.put(key.as_bytes(), b"updated").unwrap();
}
db.flush().unwrap();
db.compact().unwrap();
for i in 0..50 {
let key = format!("key_{:04}", i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(b"updated".to_vec()),
"key {} should be 'updated' in current view after compaction",
key
);
}
let entries: Vec<_> = db.iter().unwrap().collect();
assert_eq!(entries.len(), 50, "expected 50 keys after compaction");
for (k, v) in &entries {
assert_eq!(v.as_slice(), b"updated", "key {:?} should be 'updated'", k);
}
}
#[test]
fn test_concurrent_high_pressure() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 32 * 1024,
l0_compaction_trigger: 4,
..Default::default()
};
let db = Arc::new(DB::open(opts, dir.path()).unwrap());
let writer_threads = 4;
let ops_per_writer = 500;
let reader_threads = 2;
let done = Arc::new(std::sync::atomic::AtomicBool::new(false));
let mut handles = Vec::new();
for t in 0..writer_threads {
let db = db.clone();
handles.push(thread::spawn(move || {
for i in 0..ops_per_writer {
let key = format!("w{}_{:06}", t, i);
let val = format!("val{}_{}", t, i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
}));
}
for _ in 0..reader_threads {
let db = db.clone();
let done = done.clone();
handles.push(thread::spawn(move || {
let mut reads = 0u64;
while !done.load(std::sync::atomic::Ordering::Relaxed) {
let key = format!("w0_{:06}", reads % ops_per_writer as u64);
let _ = db.get(key.as_bytes());
reads += 1;
}
}));
}
for h in handles.drain(..writer_threads) {
h.join().unwrap();
}
done.store(true, std::sync::atomic::Ordering::Relaxed);
for h in handles {
h.join().unwrap();
}
for t in 0..writer_threads {
for i in 0..ops_per_writer {
let key = format!("w{}_{:06}", t, i);
let val = format!("val{}_{}", t, i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(val.into_bytes()),
"missing key {} after concurrent stress",
key
);
}
}
}
#[test]
fn test_iterator_during_compaction() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 1024,
l0_compaction_trigger: 2,
num_levels: 3,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
for round in 0..5 {
for i in 0..30 {
let key = format!("iter_{:04}", round * 30 + i);
let val = format!("v_{}", round * 30 + i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
db.flush().unwrap();
}
let pre_compact: Vec<(Vec<u8>, Vec<u8>)> = db.iter().unwrap().collect();
db.compact().unwrap();
let post_compact: Vec<(Vec<u8>, Vec<u8>)> = db.iter().unwrap().collect();
assert_eq!(
pre_compact.len(),
post_compact.len(),
"iterator length mismatch after compaction"
);
for (a, b) in pre_compact.iter().zip(post_compact.iter()) {
assert_eq!(a.0, b.0, "key mismatch after compaction");
assert_eq!(a.1, b.1, "value mismatch after compaction");
}
for i in 1..post_compact.len() {
assert!(
post_compact[i].0 > post_compact[i - 1].0,
"iterator not sorted at position {}",
i
);
}
}
#[test]
fn test_compact_range() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 1024,
l0_compaction_trigger: 8, ..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
for i in 0..100 {
let key = format!("cr_{:04}", i);
let val = format!("val_{}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
db.flush().unwrap();
db.compact_range(Some(b"cr_0020"), Some(b"cr_0060"))
.unwrap();
for i in 0..100 {
let key = format!("cr_{:04}", i);
let val = format!("val_{}", i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(val.into_bytes()),
"key {} missing after compact_range",
key
);
}
db.compact_range(None, None).unwrap();
for i in 0..100 {
let key = format!("cr_{:04}", i);
let val = format!("val_{}", i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(val.into_bytes()),
"key {} missing after full compact_range",
key
);
}
}
#[test]
fn test_write_options_disable_wal() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().to_path_buf();
let opts = DbOptions {
create_if_missing: true,
..Default::default()
};
{
let db = DB::open(opts.clone(), &path).unwrap();
let wo = WriteOptions {
disable_wal: true,
..Default::default()
};
for i in 0..50 {
let key = format!("nowal_{:04}", i);
let val = format!("val_{}", i);
db.put_with_options(&wo, key.as_bytes(), val.as_bytes())
.unwrap();
}
for i in 0..50 {
let key = format!("nowal_{:04}", i);
let val = format!("val_{}", i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(val.into_bytes()),
"key {} should be readable with disable_wal in same session",
key
);
}
db.put(b"wal_key", b"wal_val").unwrap();
db.close().unwrap();
}
{
let db = DB::open(opts, &path).unwrap();
assert_eq!(
db.get(b"wal_key").unwrap(),
Some(b"wal_val".to_vec()),
"WAL-backed key should survive reopen"
);
}
}
#[test]
fn test_write_options_no_slowdown() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 512,
l0_compaction_trigger: 2,
l0_slowdown_trigger: 3,
l0_stop_trigger: 5,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
let wo = WriteOptions {
no_slowdown: true,
..Default::default()
};
db.put_with_options(&wo, b"ns_key", b"ns_val").unwrap();
assert_eq!(db.get(b"ns_key").unwrap(), Some(b"ns_val".to_vec()));
let mut hit_error = false;
for i in 0..100 {
let key = format!("flood_{:06}", i);
let val = vec![0xABu8; 256];
let _ = db.put(key.as_bytes(), &val);
if i % 20 == 19 {
let key = format!("ns_{:04}", i);
if db.put_with_options(&wo, key.as_bytes(), b"test").is_err() {
hit_error = true;
break;
}
}
}
let _ = hit_error;
}
#[test]
fn test_db_reopen_cycle() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().to_path_buf();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 4096,
l0_compaction_trigger: 3,
..Default::default()
};
let cycles = 5;
let keys_per_cycle = 20;
for cycle in 0..cycles {
let db = DB::open(opts.clone(), &path).unwrap();
for i in 0..keys_per_cycle {
let key = format!("c{}_k{:04}", cycle, i);
let val = format!("c{}_v{}", cycle, i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
for prev_cycle in 0..cycle {
for i in 0..keys_per_cycle {
let key = format!("c{}_k{:04}", prev_cycle, i);
let val = format!("c{}_v{}", prev_cycle, i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(val.into_bytes()),
"key {} from cycle {} missing in cycle {}",
key,
prev_cycle,
cycle
);
}
}
db.close().unwrap();
}
{
let db = DB::open(opts, &path).unwrap();
for cycle in 0..cycles {
for i in 0..keys_per_cycle {
let key = format!("c{}_k{:04}", cycle, i);
let val = format!("c{}_v{}", cycle, i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(val.into_bytes()),
"key {} from cycle {} missing in final verify",
key,
cycle
);
}
}
}
}
#[test]
fn test_large_value() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 256 * 1024, ..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
let large_val = vec![0x42u8; 100 * 1024];
db.put(b"big", &large_val).unwrap();
let got = db.get(b"big").unwrap().unwrap();
assert_eq!(got.len(), 100 * 1024);
assert_eq!(got, large_val);
db.flush().unwrap();
let got2 = db.get(b"big").unwrap().unwrap();
assert_eq!(got2, large_val);
db.compact().unwrap();
let got3 = db.get(b"big").unwrap().unwrap();
assert_eq!(got3, large_val);
}
#[test]
fn test_empty_key_empty_value() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
db.put(b"nonempty", b"").unwrap();
assert_eq!(db.get(b"nonempty").unwrap(), Some(vec![]));
db.flush().unwrap();
assert_eq!(db.get(b"nonempty").unwrap(), Some(vec![]));
db.delete(b"nonempty").unwrap();
assert_eq!(db.get(b"nonempty").unwrap(), None);
}
#[test]
fn test_get_property() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 1024,
l0_compaction_trigger: 10, ..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
assert_eq!(
db.get_property("num-files-at-level0"),
Some("0".to_string())
);
assert_eq!(db.get_property("total-sst-size"), Some("0".to_string()));
let cp = db.get_property("compaction-pending");
assert!(cp.is_some());
assert_eq!(cp.unwrap(), "0");
let bcu = db.get_property("block-cache-usage");
assert!(bcu.is_some());
let _: u64 = bcu
.unwrap()
.parse()
.expect("block-cache-usage should be numeric");
for i in 0..30 {
let key = format!("prop_{:04}", i);
db.put(key.as_bytes(), b"data_data_data_data").unwrap();
}
db.flush().unwrap();
let l0_count: usize = db
.get_property("num-files-at-level0")
.unwrap()
.parse()
.unwrap();
assert!(
l0_count >= 1,
"expected at least 1 L0 file, got {}",
l0_count
);
let total_size: u64 = db.get_property("total-sst-size").unwrap().parse().unwrap();
assert!(total_size > 0, "total-sst-size should be > 0 after flush");
assert_eq!(db.get_property("unknown-property"), None);
assert_eq!(db.get_property("num-files-at-level999"), None);
}
#[test]
fn test_delete_range_basic() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 1024,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
for ch in b'a'..=b'g' {
db.put(&[ch], &[ch]).unwrap();
}
db.delete_range(b"c", b"f").unwrap();
let entries: Vec<_> = db.iter().unwrap().collect();
let keys: Vec<Vec<u8>> = entries.iter().map(|(k, _)| k.clone()).collect();
assert_eq!(
keys,
vec![vec![b'a'], vec![b'b'], vec![b'f'], vec![b'g']],
"iterator should exclude range-deleted keys c, d, e"
);
db.flush().unwrap();
let entries2: Vec<_> = db.iter().unwrap().collect();
let keys2: Vec<Vec<u8>> = entries2.iter().map(|(k, _)| k.clone()).collect();
assert_eq!(
keys2,
vec![vec![b'a'], vec![b'b'], vec![b'f'], vec![b'g']],
"iterator after flush should still exclude range-deleted keys"
);
db.put(b"d", b"revived").unwrap();
assert_eq!(db.get(b"d").unwrap(), Some(b"revived".to_vec()));
let entries3: Vec<_> = db.iter().unwrap().collect();
let keys3: Vec<Vec<u8>> = entries3.iter().map(|(k, _)| k.clone()).collect();
assert!(
keys3.contains(&vec![b'd']),
"re-inserted key 'd' should appear in iterator"
);
let mut batch = WriteBatch::new();
batch.delete_range(b"a", b"c"); batch.put(b"h", b"new");
db.write(batch).unwrap();
let entries4: Vec<_> = db.iter().unwrap().collect();
let keys4: Vec<Vec<u8>> = entries4.iter().map(|(k, _)| k.clone()).collect();
assert!(
!keys4.contains(&vec![b'a']),
"key 'a' should be range-deleted by batch"
);
assert!(
!keys4.contains(&vec![b'b']),
"key 'b' should be range-deleted by batch"
);
assert_eq!(db.get(b"h").unwrap(), Some(b"new".to_vec()));
}
#[test]
fn test_prefix_iterator() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..10u8 {
let mut key_a = b"AAA".to_vec();
key_a.push(i);
db.put(&key_a, &[i]).unwrap();
let mut key_b = b"BBB".to_vec();
key_b.push(i);
db.put(&key_b, &[i + 100]).unwrap();
}
let entries: Vec<_> = db
.iter_with_prefix(b"AAA", &mmdb::ReadOptions::default())
.unwrap()
.collect();
assert_eq!(entries.len(), 10, "expected 10 AAA-prefixed keys");
for (k, v) in &entries {
assert!(k.starts_with(b"AAA"), "key should start with AAA");
assert!(v[0] < 10, "value should be 0..9");
}
let entries: Vec<_> = db
.iter_with_prefix(b"BBB", &mmdb::ReadOptions::default())
.unwrap()
.collect();
assert_eq!(entries.len(), 10, "expected 10 BBB-prefixed keys");
for (k, _) in &entries {
assert!(k.starts_with(b"BBB"), "key should start with BBB");
}
let entries: Vec<_> = db
.iter_with_prefix(b"CCC", &mmdb::ReadOptions::default())
.unwrap()
.collect();
assert_eq!(entries.len(), 0);
}
#[test]
fn test_range_iterator_bounds() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..20 {
let key = format!("key_{:04}", i);
let val = format!("val_{:04}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
{
let mut iter = db
.iter_with_range(
&mmdb::ReadOptions::default(),
Some(b"key_0005"),
Some(b"key_0010"),
)
.unwrap();
iter.seek(b"key_0005");
iter.set_upper_bound(b"key_0010".to_vec());
let entries: Vec<_> = iter.collect();
assert_eq!(entries.len(), 5, "range [5,10) should have 5 entries");
assert_eq!(entries[0].0, b"key_0005");
assert_eq!(entries[4].0, b"key_0009");
}
{
let mut iter = db
.iter_with_range(
&mmdb::ReadOptions::default(),
Some(b"key_0005"),
Some(b"key_0011"),
)
.unwrap();
iter.seek(b"key_0005");
iter.set_upper_bound(b"key_0011".to_vec());
let entries: Vec<_> = iter.collect();
assert_eq!(entries.len(), 6, "range [5,10] should have 6 entries");
}
{
let mut iter = db
.iter_with_range(&mmdb::ReadOptions::default(), None, Some(b"key_0003"))
.unwrap();
iter.set_upper_bound(b"key_0003".to_vec());
let entries: Vec<_> = iter.collect();
assert_eq!(entries.len(), 3, "range [..,3) should have 3 entries");
}
{
let mut iter = db
.iter_with_range(&mmdb::ReadOptions::default(), Some(b"key_0018"), None)
.unwrap();
iter.seek(b"key_0018");
let entries: Vec<_> = iter.collect();
assert_eq!(entries.len(), 2, "range [18,..] should have 2 entries");
}
{
let entries: Vec<_> = db.iter().unwrap().collect();
assert_eq!(entries.len(), 20, "full range should have 20 entries");
}
}
#[test]
fn test_bidi_iterator_forward_reverse() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..10 {
let key = format!("key_{:04}", i);
let val = format!("val_{:04}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
let forward: Vec<_> = mmdb::BidiIterator::lazy(db.iter().unwrap()).collect();
assert_eq!(forward.len(), 10);
assert_eq!(forward[0].0, b"key_0000");
assert_eq!(forward[9].0, b"key_0009");
let reverse: Vec<_> = mmdb::BidiIterator::lazy(db.iter().unwrap()).rev().collect();
assert_eq!(reverse.len(), 10);
assert_eq!(reverse[0].0, b"key_0009");
assert_eq!(reverse[9].0, b"key_0000");
for (f, r) in forward.iter().zip(reverse.iter().rev()) {
assert_eq!(f.0, r.0);
assert_eq!(f.1, r.1);
}
}
#[test]
fn test_bidi_iterator_interleaved() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for ch in b'a'..=b'f' {
db.put(&[ch], &[ch]).unwrap();
}
let mut it = mmdb::BidiIterator::lazy(db.iter().unwrap());
assert_eq!(it.next().unwrap().0, vec![b'a']); assert_eq!(it.next_back().unwrap().0, vec![b'f']); assert_eq!(it.next().unwrap().0, vec![b'b']); assert_eq!(it.next_back().unwrap().0, vec![b'e']); assert_eq!(it.next().unwrap().0, vec![b'c']); assert_eq!(it.next_back().unwrap().0, vec![b'd']); assert!(it.next().is_none());
assert!(it.next_back().is_none());
}
#[test]
fn test_bidi_range_reverse() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..20 {
let key = format!("r_{:04}", i);
db.put(key.as_bytes(), b"v").unwrap();
}
let mut iter = db
.iter_with_range(
&mmdb::ReadOptions::default(),
Some(b"r_0005"),
Some(b"r_0015"),
)
.unwrap();
iter.seek(b"r_0005");
iter.set_upper_bound(b"r_0015".to_vec());
let entries: Vec<_> = iter.collect();
assert_eq!(entries.len(), 10);
let mut bidi = mmdb::BidiIterator::new(entries);
let reverse: Vec<_> = std::iter::from_fn(|| bidi.next_back()).collect();
assert_eq!(reverse.len(), 10);
assert_eq!(reverse[0].0, b"r_0014");
assert_eq!(reverse[9].0, b"r_0005");
}
#[test]
fn test_iter_rev_after_flush_compact() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 1024,
l0_compaction_trigger: 3,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
for round in 0..4 {
for i in 0..15 {
let key = format!("rev_{:04}", round * 15 + i);
let val = format!("v_{}", round * 15 + i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
db.flush().unwrap();
}
db.compact().unwrap();
let forward: Vec<_> = mmdb::BidiIterator::lazy(db.iter().unwrap()).collect();
assert_eq!(forward.len(), 60);
let reverse: Vec<_> = mmdb::BidiIterator::lazy(db.iter().unwrap()).rev().collect();
assert_eq!(reverse.len(), 60);
assert_eq!(reverse[0].0, forward[59].0);
assert_eq!(reverse[59].0, forward[0].0);
for i in 1..forward.len() {
assert!(forward[i].0 > forward[i - 1].0, "not sorted at {}", i);
}
}
#[test]
fn test_multi_thread_compaction() {
let dir = tempfile::tempdir().unwrap();
let db = DB::open(
DbOptions {
create_if_missing: true,
write_buffer_size: 4 * 1024, l0_compaction_trigger: 2,
max_background_compactions: 4,
..Default::default()
},
dir.path(),
)
.unwrap();
for i in 0..500u64 {
let key = format!("mt_{:06}", i);
db.put(key.as_bytes(), b"value").unwrap();
}
db.flush().unwrap();
db.compact().unwrap();
for i in 0..500u64 {
let key = format!("mt_{:06}", i);
assert_eq!(
db.get(key.as_bytes()).unwrap(),
Some(b"value".to_vec()),
"missing key {}",
i
);
}
let count = db.iter().unwrap().count();
assert_eq!(count, 500);
}
#[test]
fn test_deferred_block_read_correctness() {
let dir = tempfile::tempdir().unwrap();
let db = DB::open(
DbOptions {
create_if_missing: true,
write_buffer_size: 4 * 1024, l0_compaction_trigger: 100, ..Default::default()
},
dir.path(),
)
.unwrap();
for i in 0..200u64 {
let key = format!("df_{:06}", i);
let val = format!("val_{:06}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
db.flush().unwrap();
let mut iter = db.iter().unwrap();
iter.seek(b"df_000000");
let (k, v) = iter.next().unwrap();
assert_eq!(k, b"df_000000");
assert_eq!(v, b"val_000000");
let mut iter = db.iter().unwrap();
iter.seek(b"df_000100");
let (k, _) = iter.next().unwrap();
assert_eq!(k, b"df_000100");
let mut iter = db.iter().unwrap();
iter.seek(b"df_000100x");
let (k, _) = iter.next().unwrap();
assert_eq!(k, b"df_000101");
let mut iter = db.iter().unwrap();
iter.seek(b"zz");
assert!(iter.next().is_none());
assert_eq!(db.iter().unwrap().count(), 200);
}
#[test]
fn test_prefix_scan_across_levels() {
let dir = tempfile::tempdir().unwrap();
let db = DB::open(
DbOptions {
create_if_missing: true,
write_buffer_size: 4 * 1024,
l0_compaction_trigger: 2,
prefix_len: 4,
..Default::default()
},
dir.path(),
)
.unwrap();
for prefix in &["aaa_", "bbb_", "ccc_"] {
for i in 0..100u64 {
let key = format!("{}{:06}", prefix, i);
db.put(key.as_bytes(), b"v").unwrap();
}
}
db.flush().unwrap();
db.compact().unwrap();
let aaa: Vec<_> = db
.iter_with_prefix(b"aaa_", &mmdb::ReadOptions::default())
.unwrap()
.collect();
assert_eq!(aaa.len(), 100, "aaa_ prefix should have 100 entries");
assert!(aaa[0].0.starts_with(b"aaa_"));
assert!(aaa[99].0.starts_with(b"aaa_"));
let bbb: Vec<_> = db
.iter_with_prefix(b"bbb_", &mmdb::ReadOptions::default())
.unwrap()
.collect();
assert_eq!(bbb.len(), 100);
let zzz: Vec<_> = db
.iter_with_prefix(b"zzz_", &mmdb::ReadOptions::default())
.unwrap()
.collect();
assert_eq!(zzz.len(), 0);
}
#[test]
fn test_new_options_accepted() {
let dir = tempfile::tempdir().unwrap();
let db = DB::open(
DbOptions {
create_if_missing: true,
max_background_compactions: 4,
max_subcompactions: 2,
pin_l0_filter_and_index_blocks_in_cache: true,
cache_index_and_filter_blocks: true,
max_write_buffer_number: 8,
level_compaction_dynamic_level_bytes: false,
allow_concurrent_memtable_write: false,
memtable_prefix_bloom_ratio: 0.0,
..Default::default()
},
dir.path(),
)
.unwrap();
db.put(b"k1", b"v1").unwrap();
assert_eq!(db.get(b"k1").unwrap(), Some(b"v1".to_vec()));
db.flush().unwrap();
assert_eq!(db.get(b"k1").unwrap(), Some(b"v1".to_vec()));
}
#[test]
fn test_range_delete_then_get_sequence_ordering() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
db.put(b"d", b"v1").unwrap(); db.delete_range(b"c", b"f").unwrap(); assert_eq!(
db.get(b"d").unwrap(),
None,
"S1: put then delete_range — tombstone should win"
);
db.delete_range(b"x", b"z").unwrap(); db.put(b"y", b"v2").unwrap(); assert_eq!(
db.get(b"y").unwrap(),
Some(b"v2".to_vec()),
"S2: delete_range then put — put should win"
);
let entries: Vec<_> = db.iter().unwrap().collect();
assert!(
!entries.iter().any(|(k, _)| k == b"d"),
"d should not appear in iterator"
);
assert!(
entries.iter().any(|(k, _)| k == b"y"),
"y should appear in iterator"
);
}
#[test]
fn test_range_delete_survives_flush() {
let dir = tempfile::tempdir().unwrap();
let db = DB::open(
DbOptions {
create_if_missing: true,
write_buffer_size: 4 * 1024,
l0_compaction_trigger: 100, ..Default::default()
},
dir.path(),
)
.unwrap();
db.put(b"a", b"1").unwrap();
db.put(b"b", b"2").unwrap();
db.put(b"c", b"3").unwrap();
db.put(b"d", b"4").unwrap();
db.put(b"e", b"5").unwrap();
db.delete_range(b"b", b"e").unwrap();
db.flush().unwrap();
assert_eq!(
db.get(b"a").unwrap(),
Some(b"1".to_vec()),
"a should survive"
);
assert_eq!(
db.get(b"e").unwrap(),
Some(b"5".to_vec()),
"e should survive"
);
assert_eq!(
db.get(b"b").unwrap(),
None,
"b should be deleted by range tombstone"
);
assert_eq!(
db.get(b"c").unwrap(),
None,
"c should be deleted by range tombstone"
);
assert_eq!(
db.get(b"d").unwrap(),
None,
"d should be deleted by range tombstone"
);
let keys: Vec<_> = db.iter().unwrap().map(|(k, _)| k).collect();
assert!(
!keys.contains(&b"b".to_vec()),
"b should be deleted in iter"
);
assert!(
!keys.contains(&b"c".to_vec()),
"c should be deleted in iter"
);
assert!(
!keys.contains(&b"d".to_vec()),
"d should be deleted in iter"
);
}
#[test]
fn test_sst_range_tombstone_in_get_after_flush() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 64 * 1024 * 1024, l0_compaction_trigger: 100, ..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
db.put(b"d", b"v1").unwrap();
db.delete_range(b"c", b"f").unwrap();
db.flush().unwrap();
assert_eq!(
db.get(b"d").unwrap(),
None,
"d should be deleted by range tombstone in SST"
);
db.put(b"a", b"alive").unwrap();
db.put(b"z", b"alive").unwrap();
db.flush().unwrap();
assert_eq!(db.get(b"a").unwrap(), Some(b"alive".to_vec()));
assert_eq!(db.get(b"z").unwrap(), Some(b"alive".to_vec()));
}
#[test]
fn test_sst_range_tombstone_in_get_after_compact() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 64 * 1024 * 1024,
l0_compaction_trigger: 100,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
db.put(b"d", b"v1").unwrap();
db.delete_range(b"c", b"f").unwrap();
db.flush().unwrap();
db.compact().unwrap();
assert_eq!(
db.get(b"d").unwrap(),
None,
"d should be deleted by range tombstone after compaction"
);
}
#[test]
fn test_sst_range_tombstone_overridden_by_newer_put() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 64 * 1024 * 1024,
l0_compaction_trigger: 100,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
db.put(b"d", b"v1").unwrap();
db.delete_range(b"c", b"f").unwrap();
db.flush().unwrap();
db.put(b"d", b"v2").unwrap();
assert_eq!(
db.get(b"d").unwrap(),
Some(b"v2".to_vec()),
"newer put should override range tombstone"
);
db.flush().unwrap();
db.compact().unwrap();
assert_eq!(
db.get(b"d").unwrap(),
Some(b"v2".to_vec()),
"newer put should survive compaction"
);
}
#[test]
fn test_sst_range_tombstone_cross_l0_files() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 64 * 1024 * 1024,
l0_compaction_trigger: 100,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
db.put(b"d", b"v1").unwrap();
db.flush().unwrap();
db.delete_range(b"c", b"f").unwrap();
db.flush().unwrap();
assert_eq!(
db.get(b"d").unwrap(),
None,
"range tombstone in newer L0 file should shadow older point entry"
);
}
#[test]
fn test_compact_range_filters_files() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 512,
l0_compaction_trigger: 100, ..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
for i in 0..50 {
let key = format!("a_{:04}", i);
db.put(key.as_bytes(), b"val").unwrap();
}
db.flush().unwrap();
for i in 0..50 {
let key = format!("m_{:04}", i);
db.put(key.as_bytes(), b"val").unwrap();
}
db.flush().unwrap();
for i in 0..50 {
let key = format!("z_{:04}", i);
db.put(key.as_bytes(), b"val").unwrap();
}
db.flush().unwrap();
db.compact_range(Some(b"m_0000"), Some(b"m_9999")).unwrap();
for i in 0..50 {
let key = format!("a_{:04}", i);
assert_eq!(db.get(key.as_bytes()).unwrap(), Some(b"val".to_vec()));
}
for i in 0..50 {
let key = format!("m_{:04}", i);
assert_eq!(db.get(key.as_bytes()).unwrap(), Some(b"val".to_vec()));
}
for i in 0..50 {
let key = format!("z_{:04}", i);
assert_eq!(db.get(key.as_bytes()).unwrap(), Some(b"val".to_vec()));
}
}
#[test]
fn test_compact_range_none_does_full_compaction() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 512,
l0_compaction_trigger: 100,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
for i in 0..100 {
let key = format!("key_{:04}", i);
db.put(key.as_bytes(), b"val").unwrap();
}
db.flush().unwrap();
db.compact_range(None, None).unwrap();
for i in 0..100 {
let key = format!("key_{:04}", i);
assert_eq!(db.get(key.as_bytes()).unwrap(), Some(b"val".to_vec()));
}
}
#[test]
fn test_stats_bytes_written_and_read() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..100 {
let key = format!("key_{:06}", i);
let val = format!("value_{:06}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
let bytes_written: u64 = db
.get_property("stats.bytes_written")
.unwrap()
.parse()
.unwrap();
assert!(
bytes_written > 0,
"bytes_written should be > 0 after writes"
);
for i in 0..50 {
let key = format!("key_{:06}", i);
let _ = db.get(key.as_bytes()).unwrap();
}
let bytes_read: u64 = db
.get_property("stats.bytes_read")
.unwrap()
.parse()
.unwrap();
assert!(bytes_read > 0, "bytes_read should be > 0 after reads");
}
#[test]
fn test_stats_flush_and_compaction() {
let dir = tempfile::tempdir().unwrap();
let opts = DbOptions {
create_if_missing: true,
write_buffer_size: 512,
l0_compaction_trigger: 100,
..Default::default()
};
let db = DB::open(opts, dir.path()).unwrap();
for i in 0..50 {
let key = format!("key_{:04}", i);
db.put(key.as_bytes(), b"val").unwrap();
}
db.flush().unwrap();
let flushes: u64 = db
.get_property("stats.flushes_completed")
.unwrap()
.parse()
.unwrap();
assert!(flushes >= 1, "should have at least 1 flush");
db.compact().unwrap();
let compactions: u64 = db
.get_property("stats.compactions_completed")
.unwrap()
.parse()
.unwrap();
assert!(compactions >= 1, "should have at least 1 compaction");
let compaction_bytes: u64 = db
.get_property("stats.compaction_bytes_written")
.unwrap()
.parse()
.unwrap();
assert!(
compaction_bytes > 0,
"compaction_bytes_written should be > 0"
);
}
#[test]
fn test_stats_cache_hit_rate() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..100 {
let key = format!("key_{:06}", i);
db.put(key.as_bytes(), b"value").unwrap();
}
db.flush().unwrap();
for i in 0..100 {
let key = format!("key_{:06}", i);
let _ = db.get(key.as_bytes()).unwrap();
}
for i in 0..100 {
let key = format!("key_{:06}", i);
let _ = db.get(key.as_bytes()).unwrap();
}
let hit_rate = db.get_property("stats.cache_hit_rate").unwrap();
let rate: f64 = hit_rate.parse().unwrap();
assert!(
rate > 0.0,
"cache hit rate should be > 0 after repeated reads"
);
}
#[test]
fn test_vsdb_seek_to_last_pattern() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..20u64 {
let key = format!("k_{:04}", i);
db.put(key.as_bytes(), format!("v{}", i).as_bytes())
.unwrap();
}
{
let mut iter = db
.iter_with_range(&ReadOptions::default(), None, None)
.unwrap();
iter.seek_to_last();
assert!(iter.valid(), "seek_to_last should be valid (memtable)");
assert_eq!(iter.key().unwrap(), b"k_0019");
iter.prev();
assert!(iter.valid());
assert_eq!(iter.key().unwrap(), b"k_0018");
}
db.flush().unwrap();
{
let mut iter = db
.iter_with_range(&ReadOptions::default(), None, None)
.unwrap();
iter.seek_to_last();
assert!(iter.valid(), "seek_to_last should be valid (SST)");
assert_eq!(iter.key().unwrap(), b"k_0019");
iter.prev();
assert!(iter.valid());
assert_eq!(iter.key().unwrap(), b"k_0018");
}
{
let start = b"k_".as_slice();
let end = b"k`".as_slice(); let mut iter = db
.iter_with_range(&ReadOptions::default(), Some(start), Some(end))
.unwrap();
iter.seek_to_last();
assert!(iter.valid(), "seek_to_last should be valid (range-bounded)");
assert_eq!(iter.key().unwrap(), b"k_0019");
}
}
#[test]
fn test_range_tombstone_backward_no_resurrection() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for c in b'a'..=b'f' {
db.put(&[c], &[c]).unwrap();
}
db.delete_range(b"c", b"e").unwrap();
let mut iter = db.iter().unwrap();
let fwd: Vec<Vec<u8>> = std::iter::from_fn(|| iter.next().map(|(k, _)| k)).collect();
assert_eq!(fwd, vec![b"a", b"b", b"e", b"f"]);
let bidi = mmdb::BidiIterator::lazy(db.iter().unwrap());
let rev: Vec<Vec<u8>> = bidi.rev().map(|(k, _)| k).collect();
assert_eq!(
rev,
vec![b"f".to_vec(), b"e".to_vec(), b"b".to_vec(), b"a".to_vec()]
);
}
#[test]
fn test_range_tombstone_backward_after_flush() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for c in b'a'..=b'f' {
db.put(&[c], &[c]).unwrap();
}
db.delete_range(b"b", b"e").unwrap();
db.flush().unwrap();
let bidi = mmdb::BidiIterator::lazy(db.iter().unwrap());
let rev: Vec<Vec<u8>> = bidi.rev().map(|(k, _)| k).collect();
assert_eq!(rev, vec![b"f".to_vec(), b"e".to_vec(), b"a".to_vec()]);
}
#[test]
fn test_bidi_streaming_next_back() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
let count = 200;
for i in 0..count {
let key = format!("key_{:06}", i);
let val = format!("val_{}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
db.flush().unwrap();
let bidi = mmdb::BidiIterator::lazy(db.iter().unwrap());
let rev: Vec<Vec<u8>> = bidi.rev().map(|(k, _)| k).collect();
assert_eq!(rev.len(), count);
for (i, entry) in rev.iter().enumerate() {
let expected = format!("key_{:06}", count - 1 - i);
assert_eq!(entry, expected.as_bytes(), "mismatch at position {}", i);
}
}
#[test]
fn test_prev_multi_version_tombstones() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
db.put(b"a", b"a1").unwrap();
db.put(b"b", b"b1").unwrap();
db.put(b"c", b"c1").unwrap();
db.put(b"d", b"d1").unwrap();
db.delete(b"b").unwrap();
db.put(b"c", b"c2").unwrap();
let mut iter = db.iter().unwrap();
iter.seek(b"d");
assert!(iter.valid());
assert_eq!(iter.key().unwrap(), b"d");
iter.prev();
assert!(iter.valid());
assert_eq!(iter.key().unwrap(), b"c");
assert_eq!(iter.value().unwrap(), b"c2");
iter.prev();
assert!(iter.valid());
assert_eq!(iter.key().unwrap(), b"a");
iter.prev();
assert!(!iter.valid());
}
#[test]
fn test_seek_for_prev_round_trip() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..10 {
let key = format!("key_{:02}", i);
let val = format!("val_{}", i);
db.put(key.as_bytes(), val.as_bytes()).unwrap();
}
let mut iter = db.iter().unwrap();
iter.seek_for_prev(b"key_04x");
assert!(iter.valid());
assert_eq!(iter.key().unwrap(), b"key_04");
iter.advance();
assert!(iter.valid());
assert_eq!(iter.key().unwrap(), b"key_05");
iter.prev();
assert!(iter.valid());
assert_eq!(iter.key().unwrap(), b"key_04");
}
#[test]
fn test_compact_range_then_iter() {
let dir = tempfile::tempdir().unwrap();
let db = make_db(dir.path());
for i in 0..50 {
let key = format!("key_{:04}", i);
db.put(key.as_bytes(), key.as_bytes()).unwrap();
}
db.flush().unwrap();
db.compact_range(Some(b"key_0000"), Some(b"key_0050"))
.unwrap();
let mut iter = db.iter().unwrap();
let entries: Vec<Vec<u8>> = std::iter::from_fn(|| iter.next().map(|(k, _)| k)).collect();
assert_eq!(entries.len(), 50);
for (i, entry) in entries.iter().enumerate() {
let expected = format!("key_{:04}", i);
assert_eq!(entry, expected.as_bytes());
}
}