#[test]
#[allow(clippy::const_is_empty)]
fn test_empty_binary_vs_empty_string() {
let empty_binary: Vec<u8> = vec![];
let empty_string = "";
assert!(empty_binary.is_empty());
assert!(empty_string.is_empty());
}
#[test]
fn test_start_key_should_use_byte_size_check() {
let empty_grpc_key: Vec<u8> = vec![]; let real_key: Vec<u8> = vec![1, 2, 3];
assert!(empty_grpc_key.is_empty()); assert!(!real_key.is_empty()); }
fn phash2_simulate(term: &str, range: u32) -> u32 {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
term.hash(&mut hasher);
(hasher.finish() as u32) % range
}
#[test]
fn test_table_gets_single_region_not_all() {
let table_name = "users";
let num_regions = 16;
let region_id = phash2_simulate(table_name, num_regions);
assert!(region_id < num_regions);
let region_id_2 = phash2_simulate(table_name, num_regions);
assert_eq!(region_id, region_id_2, "Same table must map to same region");
}
#[test]
fn test_different_tables_may_get_different_regions() {
let num_regions = 16;
let region_a = phash2_simulate("users", num_regions);
let region_b = phash2_simulate("orders", num_regions);
let region_c = phash2_simulate("products", num_regions);
assert!(region_a < num_regions);
assert!(region_b < num_regions);
assert!(region_c < num_regions);
let all_same = region_a == region_b && region_b == region_c;
if all_same {
println!("Warning: All tables hashed to same region (rare but valid)");
}
}
#[test]
fn test_region_count_is_one_per_table() {
fn get_table_regions_fixed(table_name: &str) -> Vec<u32> {
let num_regions = 16;
let region_id = phash2_simulate(table_name, num_regions) + 1; vec![region_id]
}
let regions = get_table_regions_fixed("users");
assert_eq!(
regions.len(),
1,
"Each table must map to exactly ONE region"
);
assert!(regions[0] >= 1 && regions[0] <= 16);
}
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
struct MockQueryCache {
entries: AtomicUsize,
invalidated: AtomicBool,
}
impl MockQueryCache {
fn new() -> Self {
Self {
entries: AtomicUsize::new(0),
invalidated: AtomicBool::new(false),
}
}
fn insert(&self, _query: &str, _result: &str) {
self.entries.fetch_add(1, Ordering::SeqCst);
}
fn invalidate(&self) {
self.entries.store(0, Ordering::SeqCst);
self.invalidated.store(true, Ordering::SeqCst);
}
fn len(&self) -> usize {
self.entries.load(Ordering::SeqCst)
}
fn was_invalidated(&self) -> bool {
self.invalidated.load(Ordering::SeqCst)
}
}
#[test]
fn test_cache_invalidated_after_insert() {
let cache = Arc::new(MockQueryCache::new());
cache.insert("SELECT * FROM users", "row1, row2");
assert_eq!(cache.len(), 1);
cache.invalidate();
assert_eq!(cache.len(), 0, "Cache must be empty after INSERT");
assert!(
cache.was_invalidated(),
"Cache invalidation must be called after INSERT"
);
}
#[test]
fn test_cache_invalidated_after_update() {
let cache = Arc::new(MockQueryCache::new());
cache.insert("SELECT * FROM users", "row1");
cache.insert("SELECT name FROM users", "name1");
assert_eq!(cache.len(), 2);
cache.invalidate();
assert_eq!(cache.len(), 0, "Cache must be empty after UPDATE");
}
#[test]
fn test_cache_invalidated_after_delete() {
let cache = Arc::new(MockQueryCache::new());
cache.insert("SELECT * FROM users", "row1");
cache.invalidate();
assert_eq!(cache.len(), 0, "Cache must be empty after DELETE");
assert!(cache.was_invalidated());
}
#[test]
fn test_all_workers_should_register_tables() {
let num_workers = 4;
let tables_registered = Arc::new(AtomicUsize::new(0));
for worker_id in 0..num_workers {
let registered = tables_registered.clone();
registered.fetch_add(1, Ordering::SeqCst);
assert!(
worker_id < num_workers, "Worker {} should register tables",
worker_id
);
}
assert_eq!(tables_registered.load(Ordering::SeqCst), num_workers);
}
#[test]
fn test_table_refresh_interval_is_short() {
let refresh_interval_secs = 2;
assert!(refresh_interval_secs < 5, "Refresh interval should be < 5s");
assert!(refresh_interval_secs > 0 && refresh_interval_secs <= 10);
}