use std::collections::HashMap;
use std::hash::Hash;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum CachePolicy {
Lru,
Lfu,
Fifo,
Ttl,
}
#[derive(Debug, Default)]
pub struct CacheStats {
hits: AtomicU64,
misses: AtomicU64,
evictions: AtomicU64,
insertions: AtomicU64,
}
impl CacheStats {
pub fn new() -> Arc<Self> {
Arc::new(Self::default())
}
pub fn record_hit(&self) {
self.hits.fetch_add(1, Ordering::Relaxed);
}
pub fn record_miss(&self) {
self.misses.fetch_add(1, Ordering::Relaxed);
}
pub fn record_eviction(&self) {
self.evictions.fetch_add(1, Ordering::Relaxed);
}
pub fn record_insertion(&self) {
self.insertions.fetch_add(1, Ordering::Relaxed);
}
pub fn hits(&self) -> u64 {
self.hits.load(Ordering::Relaxed)
}
pub fn misses(&self) -> u64 {
self.misses.load(Ordering::Relaxed)
}
pub fn evictions(&self) -> u64 {
self.evictions.load(Ordering::Relaxed)
}
pub fn insertions(&self) -> u64 {
self.insertions.load(Ordering::Relaxed)
}
pub fn hit_rate(&self) -> f64 {
let h = self.hits.load(Ordering::Relaxed) as f64;
let m = self.misses.load(Ordering::Relaxed) as f64;
let total = h + m;
if total == 0.0 {
0.0
} else {
h / total
}
}
pub fn reset(&self) {
self.hits.store(0, Ordering::Relaxed);
self.misses.store(0, Ordering::Relaxed);
self.evictions.store(0, Ordering::Relaxed);
self.insertions.store(0, Ordering::Relaxed);
}
}
#[allow(dead_code)]
struct Entry<V> {
value: V,
created_at: Instant,
expires_at: Option<Instant>,
last_accessed: Instant,
access_count: u64,
insertion_seq: u64,
}
impl<V: Clone> Entry<V> {
fn new(value: V, ttl: Option<Duration>, seq: u64) -> Self {
let now = Instant::now();
Self {
value,
created_at: now,
expires_at: ttl.map(|d| now + d),
last_accessed: now,
access_count: 0,
insertion_seq: seq,
}
}
fn is_expired(&self) -> bool {
self.expires_at
.map(|e| Instant::now() >= e)
.unwrap_or(false)
}
fn touch(&mut self) {
self.last_accessed = Instant::now();
self.access_count += 1;
}
}
struct TripleCacheInner<K, V> {
entries: HashMap<K, Entry<V>>,
policy: CachePolicy,
capacity: usize,
ttl: Option<Duration>,
seq_counter: u64,
}
impl<K: Eq + Hash + Clone, V: Clone> TripleCacheInner<K, V> {
fn new(capacity: usize, policy: CachePolicy, ttl: Option<Duration>) -> Self {
Self {
entries: HashMap::new(),
policy,
capacity,
ttl,
seq_counter: 0,
}
}
fn get_mut(&mut self, key: &K) -> Option<&V> {
if let Some(entry) = self.entries.get_mut(key) {
if entry.is_expired() {
return None;
}
entry.touch();
Some(&self.entries[key].value)
} else {
None
}
}
fn insert(&mut self, key: K, value: V, stats: &CacheStats) {
self.entries.retain(|_, e| !e.is_expired());
if !self.entries.contains_key(&key) && self.entries.len() >= self.capacity {
if let Some(victim) = self.select_victim() {
self.entries.remove(&victim);
stats.record_eviction();
}
}
self.seq_counter += 1;
let entry = Entry::new(value, self.ttl, self.seq_counter);
self.entries.insert(key, entry);
stats.record_insertion();
}
fn select_victim(&self) -> Option<K> {
match self.policy {
CachePolicy::Lru | CachePolicy::Ttl => self
.entries
.iter()
.min_by_key(|(_, e)| e.last_accessed)
.map(|(k, _)| k.clone()),
CachePolicy::Lfu => self
.entries
.iter()
.min_by_key(|(_, e)| e.access_count)
.map(|(k, _)| k.clone()),
CachePolicy::Fifo => self
.entries
.iter()
.min_by_key(|(_, e)| e.insertion_seq)
.map(|(k, _)| k.clone()),
}
}
fn remove(&mut self, key: &K) -> bool {
self.entries.remove(key).is_some()
}
fn len(&self) -> usize {
self.entries.iter().filter(|(_, e)| !e.is_expired()).count()
}
fn clear(&mut self) {
self.entries.clear();
}
fn contains_key(&self, key: &K) -> bool {
self.entries
.get(key)
.map(|e| !e.is_expired())
.unwrap_or(false)
}
fn keys(&self) -> Vec<K> {
self.entries
.iter()
.filter(|(_, e)| !e.is_expired())
.map(|(k, _)| k.clone())
.collect()
}
}
pub struct TripleCache<K, V> {
inner: Mutex<TripleCacheInner<K, V>>,
stats: Arc<CacheStats>,
}
impl<K: Eq + Hash + Clone, V: Clone> TripleCache<K, V> {
pub fn new(capacity: usize, policy: CachePolicy, ttl: Option<Duration>) -> Self {
Self {
inner: Mutex::new(TripleCacheInner::new(capacity, policy, ttl)),
stats: CacheStats::new(),
}
}
pub fn lru(capacity: usize) -> Self {
Self::new(capacity, CachePolicy::Lru, None)
}
pub fn lru_ttl(capacity: usize, ttl: Duration) -> Self {
Self::new(capacity, CachePolicy::Ttl, Some(ttl))
}
pub fn get(&self, key: &K) -> Option<V> {
let mut inner = self.inner.lock().expect("TripleCache lock poisoned");
let result = inner.get_mut(key).cloned();
if result.is_some() {
self.stats.record_hit();
} else {
self.stats.record_miss();
}
result
}
pub fn put(&self, key: K, value: V) {
let mut inner = self.inner.lock().expect("TripleCache lock poisoned");
inner.insert(key, value, &self.stats);
}
pub fn remove(&self, key: &K) -> bool {
let mut inner = self.inner.lock().expect("TripleCache lock poisoned");
inner.remove(key)
}
pub fn contains(&self, key: &K) -> bool {
let inner = self.inner.lock().expect("TripleCache lock poisoned");
inner.contains_key(key)
}
pub fn len(&self) -> usize {
let inner = self.inner.lock().expect("TripleCache lock poisoned");
inner.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn clear(&self) {
let mut inner = self.inner.lock().expect("TripleCache lock poisoned");
inner.clear();
}
pub fn keys(&self) -> Vec<K> {
let inner = self.inner.lock().expect("TripleCache lock poisoned");
inner.keys()
}
pub fn stats(&self) -> Arc<CacheStats> {
Arc::clone(&self.stats)
}
}
pub type SparqlRow = HashMap<String, String>;
#[derive(Debug, Clone)]
pub struct QueryCacheEntry {
pub query_hash: u64,
pub rows: Vec<SparqlRow>,
pub variables: Vec<String>,
pub accessed_predicates: Vec<String>,
pub created_at: Instant,
}
pub struct QueryResultCache {
cache: TripleCache<(String, u64), QueryCacheEntry>,
}
impl QueryResultCache {
pub fn new(capacity: usize, ttl: Duration) -> Self {
Self {
cache: TripleCache::new(capacity, CachePolicy::Ttl, Some(ttl)),
}
}
fn hash_query(query: &str) -> u64 {
const FNV_OFFSET: u64 = 14_695_981_039_346_656_037;
const FNV_PRIME: u64 = 1_099_511_628_211;
let mut h = FNV_OFFSET;
for b in query.bytes() {
h ^= b as u64;
h = h.wrapping_mul(FNV_PRIME);
}
h
}
pub fn put(
&self,
dataset_id: &str,
query: &str,
rows: Vec<SparqlRow>,
variables: Vec<String>,
accessed_predicates: Vec<String>,
) {
let hash = Self::hash_query(query);
let entry = QueryCacheEntry {
query_hash: hash,
rows,
variables,
accessed_predicates,
created_at: Instant::now(),
};
self.cache.put((dataset_id.to_string(), hash), entry);
}
pub fn get(&self, dataset_id: &str, query: &str) -> Option<QueryCacheEntry> {
let hash = Self::hash_query(query);
self.cache.get(&(dataset_id.to_string(), hash))
}
pub fn invalidate_by_predicate(&self, dataset_id: &str, predicate: &str) -> usize {
let keys_to_remove: Vec<_> = self
.cache
.keys()
.into_iter()
.filter(|(ds, _)| ds == dataset_id)
.filter(|k| {
self.cache
.get(k)
.map(|e| e.accessed_predicates.iter().any(|p| p == predicate))
.unwrap_or(false)
})
.collect();
let count = keys_to_remove.len();
for k in keys_to_remove {
self.cache.remove(&k);
}
count
}
pub fn invalidate_dataset(&self, dataset_id: &str) -> usize {
let keys_to_remove: Vec<_> = self
.cache
.keys()
.into_iter()
.filter(|(ds, _)| ds == dataset_id)
.collect();
let count = keys_to_remove.len();
for k in keys_to_remove {
self.cache.remove(&k);
}
count
}
pub fn len(&self) -> usize {
self.cache.len()
}
pub fn is_empty(&self) -> bool {
self.cache.is_empty()
}
pub fn stats(&self) -> Arc<CacheStats> {
self.cache.stats()
}
}
#[derive(Debug, Default, Clone)]
pub struct PrefixCache {
prefix_to_iri: HashMap<String, String>,
iri_to_prefix: HashMap<String, String>,
}
impl PrefixCache {
pub fn new() -> Self {
Self::default()
}
pub fn register(&mut self, prefix: &str, iri: &str) {
if let Some(old_iri) = self.prefix_to_iri.get(prefix) {
self.iri_to_prefix.remove(old_iri.as_str());
}
self.prefix_to_iri
.insert(prefix.to_string(), iri.to_string());
self.iri_to_prefix
.insert(iri.to_string(), prefix.to_string());
}
pub fn resolve_prefix(&self, prefix: &str) -> Option<&str> {
self.prefix_to_iri.get(prefix).map(|s| s.as_str())
}
pub fn resolve_iri(&self, iri: &str) -> Option<&str> {
self.iri_to_prefix.get(iri).map(|s| s.as_str())
}
pub fn expand(&self, prefixed: &str) -> Option<String> {
let colon = prefixed.find(':')?;
let prefix = &prefixed[..colon];
let local = &prefixed[colon + 1..];
let namespace = self.prefix_to_iri.get(prefix)?;
Some(format!("{namespace}{local}"))
}
pub fn compact(&self, iri: &str) -> Option<String> {
let mut best: Option<(&str, &str)> = None;
for (namespace, prefix) in &self.iri_to_prefix {
if iri.starts_with(namespace.as_str())
&& best.map(|(ns, _)| ns.len()).unwrap_or(0) < namespace.len()
{
best = Some((namespace.as_str(), prefix.as_str()));
}
}
best.map(|(ns, pfx)| format!("{}:{}", pfx, &iri[ns.len()..]))
}
pub fn remove(&mut self, prefix: &str) -> bool {
if let Some(iri) = self.prefix_to_iri.remove(prefix) {
self.iri_to_prefix.remove(&iri);
true
} else {
false
}
}
pub fn len(&self) -> usize {
self.prefix_to_iri.len()
}
pub fn is_empty(&self) -> bool {
self.prefix_to_iri.is_empty()
}
pub fn entries(&self) -> Vec<(&str, &str)> {
self.prefix_to_iri
.iter()
.map(|(p, i)| (p.as_str(), i.as_str()))
.collect()
}
pub fn with_standard_prefixes(mut self) -> Self {
self.register("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#");
self.register("rdfs", "http://www.w3.org/2000/01/rdf-schema#");
self.register("owl", "http://www.w3.org/2002/07/owl#");
self.register("xsd", "http://www.w3.org/2001/XMLSchema#");
self.register("dc", "http://purl.org/dc/elements/1.1/");
self.register("dcterms", "http://purl.org/dc/terms/");
self.register("foaf", "http://xmlns.com/foaf/0.1/");
self.register("skos", "http://www.w3.org/2004/02/skos/core#");
self
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::time::Duration;
#[test]
fn test_cache_policy_variants_are_distinct() {
assert_ne!(CachePolicy::Lru, CachePolicy::Lfu);
assert_ne!(CachePolicy::Lfu, CachePolicy::Fifo);
assert_ne!(CachePolicy::Fifo, CachePolicy::Ttl);
}
#[test]
fn test_cache_stats_initial_zero() {
let s = CacheStats::new();
assert_eq!(s.hits(), 0);
assert_eq!(s.misses(), 0);
assert_eq!(s.evictions(), 0);
assert_eq!(s.insertions(), 0);
}
#[test]
fn test_cache_stats_hit_rate_empty() {
let s = CacheStats::new();
assert_eq!(s.hit_rate(), 0.0);
}
#[test]
fn test_cache_stats_hit_rate_all_hits() {
let s = CacheStats::new();
s.record_hit();
s.record_hit();
assert_eq!(s.hit_rate(), 1.0);
}
#[test]
fn test_cache_stats_hit_rate_half() {
let s = CacheStats::new();
s.record_hit();
s.record_miss();
assert!((s.hit_rate() - 0.5).abs() < 1e-9);
}
#[test]
fn test_cache_stats_reset() {
let s = CacheStats::new();
s.record_hit();
s.record_miss();
s.record_eviction();
s.reset();
assert_eq!(s.hits(), 0);
assert_eq!(s.misses(), 0);
assert_eq!(s.evictions(), 0);
}
#[test]
fn test_cache_stats_concurrent_updates() {
let s = Arc::new(CacheStats::default());
let mut handles = vec![];
for _ in 0..4 {
let sc = Arc::clone(&s);
handles.push(thread::spawn(move || {
for _ in 0..100 {
sc.record_hit();
sc.record_miss();
}
}));
}
for h in handles {
h.join().expect("thread panicked");
}
assert_eq!(s.hits(), 400);
assert_eq!(s.misses(), 400);
}
#[test]
fn test_triple_cache_lru_empty_get_returns_none() {
let cache: TripleCache<String, String> = TripleCache::lru(10);
assert!(cache.get(&"missing".to_string()).is_none());
}
#[test]
fn test_triple_cache_lru_put_and_get() {
let cache = TripleCache::lru(10);
cache.put("key1".to_string(), "val1".to_string());
assert_eq!(cache.get(&"key1".to_string()), Some("val1".to_string()));
}
#[test]
fn test_triple_cache_lru_overwrite() {
let cache = TripleCache::lru(10);
cache.put("k".to_string(), "v1".to_string());
cache.put("k".to_string(), "v2".to_string());
assert_eq!(cache.get(&"k".to_string()), Some("v2".to_string()));
}
#[test]
fn test_triple_cache_lru_eviction_at_capacity() {
let cache: TripleCache<usize, usize> = TripleCache::lru(3);
cache.put(1, 10);
cache.put(2, 20);
cache.put(3, 30);
let _ = cache.get(&1);
let _ = cache.get(&2);
cache.put(4, 40);
assert_eq!(cache.len(), 3);
assert!(cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&4));
}
#[test]
fn test_triple_cache_lru_remove() {
let cache = TripleCache::lru(10);
cache.put("a".to_string(), 1i32);
assert!(cache.remove(&"a".to_string()));
assert!(!cache.remove(&"a".to_string()));
assert!(cache.get(&"a".to_string()).is_none());
}
#[test]
fn test_triple_cache_lru_clear() {
let cache: TripleCache<i32, i32> = TripleCache::lru(10);
for i in 0..5 {
cache.put(i, i * 2);
}
cache.clear();
assert_eq!(cache.len(), 0);
assert!(cache.is_empty());
}
#[test]
fn test_triple_cache_lru_contains() {
let cache: TripleCache<i32, i32> = TripleCache::lru(10);
cache.put(42, 84);
assert!(cache.contains(&42));
assert!(!cache.contains(&43));
}
#[test]
fn test_triple_cache_lru_len() {
let cache: TripleCache<i32, i32> = TripleCache::lru(10);
assert_eq!(cache.len(), 0);
cache.put(1, 1);
assert_eq!(cache.len(), 1);
cache.put(2, 2);
assert_eq!(cache.len(), 2);
}
#[test]
fn test_triple_cache_lru_keys() {
let cache: TripleCache<i32, i32> = TripleCache::lru(10);
cache.put(1, 10);
cache.put(2, 20);
let mut keys = cache.keys();
keys.sort();
assert_eq!(keys, vec![1, 2]);
}
#[test]
fn test_triple_cache_lru_stats_incremented() {
let cache: TripleCache<i32, i32> = TripleCache::lru(10);
cache.put(1, 100);
let _ = cache.get(&1);
let _ = cache.get(&99);
let s = cache.stats();
assert_eq!(s.hits(), 1);
assert_eq!(s.misses(), 1);
assert_eq!(s.insertions(), 1);
}
#[test]
fn test_triple_cache_lfu_evicts_least_used() {
let cache: TripleCache<i32, i32> = TripleCache::new(3, CachePolicy::Lfu, None);
cache.put(1, 1);
cache.put(2, 2);
cache.put(3, 3);
for _ in 0..5 {
let _ = cache.get(&1);
}
let _ = cache.get(&2);
cache.put(4, 4);
assert!(cache.contains(&1));
}
#[test]
fn test_triple_cache_fifo_evicts_oldest() {
let cache: TripleCache<i32, i32> = TripleCache::new(3, CachePolicy::Fifo, None);
cache.put(1, 1); cache.put(2, 2); cache.put(3, 3); let _ = cache.get(&1);
let _ = cache.get(&1);
cache.put(4, 4);
assert_eq!(cache.len(), 3);
assert!(!cache.contains(&1)); }
#[test]
fn test_triple_cache_ttl_expiry() {
let ttl = Duration::from_millis(50);
let cache: TripleCache<i32, i32> = TripleCache::lru_ttl(10, ttl);
cache.put(1, 100);
assert!(cache.get(&1).is_some());
thread::sleep(Duration::from_millis(60));
assert!(cache.get(&1).is_none());
}
#[test]
fn test_query_result_cache_miss_on_empty() {
let qrc = QueryResultCache::new(100, Duration::from_secs(60));
assert!(qrc.get("ds", "SELECT * WHERE { ?s ?p ?o }").is_none());
}
#[test]
fn test_query_result_cache_put_and_get() {
let qrc = QueryResultCache::new(100, Duration::from_secs(60));
let rows = vec![[("s".to_string(), "Alice".to_string())]
.into_iter()
.collect()];
qrc.put(
"ds1",
"SELECT ?s WHERE {?s a :Person}",
rows.clone(),
vec!["s".to_string()],
vec![],
);
let entry = qrc
.get("ds1", "SELECT ?s WHERE {?s a :Person}")
.expect("should hit");
assert_eq!(entry.rows.len(), 1);
}
#[test]
fn test_query_result_cache_different_datasets_no_collision() {
let qrc = QueryResultCache::new(100, Duration::from_secs(60));
let q = "SELECT * WHERE { ?s ?p ?o }";
let rows1 = vec![[("x".to_string(), "A".to_string())].into_iter().collect()];
let rows2 = vec![[("x".to_string(), "B".to_string())].into_iter().collect()];
qrc.put("ds1", q, rows1, vec![], vec![]);
qrc.put("ds2", q, rows2, vec![], vec![]);
let e1 = qrc.get("ds1", q).expect("hit");
let e2 = qrc.get("ds2", q).expect("hit");
assert_ne!(e1.rows[0]["x"], e2.rows[0]["x"],);
}
#[test]
fn test_query_result_cache_invalidate_by_predicate() {
let qrc = QueryResultCache::new(100, Duration::from_secs(60));
qrc.put("ds", "q1", vec![], vec![], vec!["http://p/age".to_string()]);
qrc.put(
"ds",
"q2",
vec![],
vec![],
vec!["http://p/name".to_string()],
);
let removed = qrc.invalidate_by_predicate("ds", "http://p/age");
assert_eq!(removed, 1);
assert!(qrc.get("ds", "q1").is_none());
assert!(qrc.get("ds", "q2").is_some());
}
#[test]
fn test_query_result_cache_invalidate_dataset() {
let qrc = QueryResultCache::new(100, Duration::from_secs(60));
qrc.put("ds", "q1", vec![], vec![], vec![]);
qrc.put("ds", "q2", vec![], vec![], vec![]);
qrc.put("other_ds", "q1", vec![], vec![], vec![]);
let removed = qrc.invalidate_dataset("ds");
assert_eq!(removed, 2);
assert!(qrc.get("other_ds", "q1").is_some());
}
#[test]
fn test_query_result_cache_len() {
let qrc = QueryResultCache::new(100, Duration::from_secs(60));
assert_eq!(qrc.len(), 0);
qrc.put("ds", "q1", vec![], vec![], vec![]);
assert_eq!(qrc.len(), 1);
}
#[test]
fn test_query_result_cache_is_empty() {
let qrc = QueryResultCache::new(100, Duration::from_secs(60));
assert!(qrc.is_empty());
qrc.put("ds", "q", vec![], vec![], vec![]);
assert!(!qrc.is_empty());
}
#[test]
fn test_query_result_cache_variables_preserved() {
let qrc = QueryResultCache::new(100, Duration::from_secs(60));
let vars = vec!["?s".to_string(), "?p".to_string(), "?o".to_string()];
qrc.put("ds", "q", vec![], vars.clone(), vec![]);
let entry = qrc.get("ds", "q").expect("hit");
assert_eq!(entry.variables, vars);
}
#[test]
fn test_prefix_cache_empty() {
let pc = PrefixCache::new();
assert!(pc.is_empty());
assert_eq!(pc.len(), 0);
}
#[test]
fn test_prefix_cache_register_and_resolve_prefix() {
let mut pc = PrefixCache::new();
pc.register("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#");
assert_eq!(
pc.resolve_prefix("rdf"),
Some("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
);
}
#[test]
fn test_prefix_cache_register_and_resolve_iri() {
let mut pc = PrefixCache::new();
pc.register("rdfs", "http://www.w3.org/2000/01/rdf-schema#");
assert_eq!(
pc.resolve_iri("http://www.w3.org/2000/01/rdf-schema#"),
Some("rdfs")
);
}
#[test]
fn test_prefix_cache_expand() {
let mut pc = PrefixCache::new();
pc.register("owl", "http://www.w3.org/2002/07/owl#");
assert_eq!(
pc.expand("owl:Class"),
Some("http://www.w3.org/2002/07/owl#Class".to_string())
);
}
#[test]
fn test_prefix_cache_compact() {
let mut pc = PrefixCache::new();
pc.register("xsd", "http://www.w3.org/2001/XMLSchema#");
assert_eq!(
pc.compact("http://www.w3.org/2001/XMLSchema#string"),
Some("xsd:string".to_string())
);
}
#[test]
fn test_prefix_cache_expand_unknown_prefix_returns_none() {
let pc = PrefixCache::new();
assert!(pc.expand("unknown:Term").is_none());
}
#[test]
fn test_prefix_cache_compact_no_match_returns_none() {
let pc = PrefixCache::new();
assert!(pc.compact("http://example.org/x").is_none());
}
#[test]
fn test_prefix_cache_overwrite_prefix() {
let mut pc = PrefixCache::new();
pc.register("ex", "http://example.org/");
pc.register("ex", "http://example.com/");
assert_eq!(pc.resolve_prefix("ex"), Some("http://example.com/"));
assert_eq!(pc.len(), 1);
}
#[test]
fn test_prefix_cache_remove() {
let mut pc = PrefixCache::new();
pc.register("ex", "http://example.org/");
assert!(pc.remove("ex"));
assert!(pc.resolve_prefix("ex").is_none());
assert!(!pc.remove("ex")); }
#[test]
fn test_prefix_cache_standard_prefixes() {
let pc = PrefixCache::new().with_standard_prefixes();
assert!(pc.resolve_prefix("rdf").is_some());
assert!(pc.resolve_prefix("rdfs").is_some());
assert!(pc.resolve_prefix("owl").is_some());
assert!(pc.resolve_prefix("xsd").is_some());
assert!(pc.resolve_prefix("foaf").is_some());
}
#[test]
fn test_prefix_cache_longest_namespace_wins_on_compact() {
let mut pc = PrefixCache::new();
pc.register("schema", "http://schema.org/");
pc.register("schema_person", "http://schema.org/Person/");
let result = pc.compact("http://schema.org/Person/name");
assert_eq!(result, Some("schema_person:name".to_string()));
}
#[test]
fn test_prefix_cache_entries_returns_all() {
let mut pc = PrefixCache::new();
pc.register("a", "http://a.org/");
pc.register("b", "http://b.org/");
let mut entries: Vec<_> = pc.entries().into_iter().map(|(p, _)| p).collect();
entries.sort();
assert_eq!(entries, vec!["a", "b"]);
}
}