use std::any::TypeId;
use std::sync::Arc;
use ahash::HashMap;
use re_byte_size::{MemUsageTree, MemUsageTreeCapture};
use re_chunk_store::ChunkStoreEvent;
use re_entity_db::EntityDb;
use re_log_types::StoreId;
use re_mutex::Mutex;
struct SharedCache {
name: &'static str,
cache: Mutex<Box<dyn Cache>>,
}
impl SharedCache {
fn new<C: Cache + Default>() -> Self {
let cache = Box::<C>::default();
Self {
name: cache.name(),
cache: Mutex::new(cache),
}
}
fn lock(&self) -> re_mutex::MutexGuard<'_, Box<dyn Cache>> {
self.cache.lock()
}
}
pub struct Caches {
caches: Mutex<HashMap<TypeId, Arc<SharedCache>>>,
pub store_id: StoreId,
memory_use_after_last_purge: u64,
}
impl Caches {
pub fn new(store_id: StoreId) -> Self {
Self {
caches: Mutex::new(HashMap::default()),
store_id,
memory_use_after_last_purge: 0,
}
}
pub fn begin_frame(&self) {
re_tracing::profile_function!();
#[expect(clippy::iter_over_hash_type)] for cache in self.caches.lock().values() {
re_tracing::profile_scope!(cache.name);
cache.lock().begin_frame();
}
}
pub fn memory_use_after_last_purge(&self) -> u64 {
self.memory_use_after_last_purge
}
pub fn vram_usage(&self) -> MemUsageTree {
re_tracing::profile_function!();
let mut node = re_byte_size::MemUsageNode::new();
let mut cache_vram: Vec<_> = self
.caches
.lock()
.values()
.map(|cache| (cache.name, cache.lock().vram_usage()))
.collect();
cache_vram.sort_by_key(|(cache_name, _)| *cache_name);
for (cache_name, vram_tree) in cache_vram {
node.add(cache_name, vram_tree);
}
node.into_tree()
}
pub fn purge_memory(&mut self) {
re_tracing::profile_function!();
#[expect(clippy::iter_over_hash_type)] for cache in self.caches.lock().values() {
re_tracing::profile_scope!(cache.name);
cache.lock().purge_memory();
}
self.memory_use_after_last_purge = self.capture_mem_usage_tree().size_bytes();
}
pub fn on_rrd_manifest(&self, entity_db: &EntityDb) {
re_tracing::profile_function!();
if self.store_id != *entity_db.store_id() {
return;
}
#[expect(clippy::iter_over_hash_type)] for cache in self.caches.lock().values() {
re_tracing::profile_scope!(cache.name);
cache.lock().on_rrd_manifest(entity_db);
}
}
pub fn on_store_events(&self, events: &[ChunkStoreEvent], entity_db: &EntityDb) {
re_tracing::profile_function!();
let relevant_events = events
.iter()
.filter(|event| event.store_id == self.store_id)
.collect::<Vec<_>>();
if relevant_events.is_empty() {
return;
}
#[expect(clippy::iter_over_hash_type)] for cache in self.caches.lock().values() {
re_tracing::profile_scope!(cache.name);
cache.lock().on_store_events(&relevant_events, entity_db);
}
}
pub fn entry<C: Cache + Default, R>(&self, f: impl FnOnce(&mut C) -> R) -> R {
let shared_cache = {
re_tracing::profile_wait!("master-cache-lock");
let mut guard = self.caches.lock();
guard
.entry(TypeId::of::<C>())
.or_insert_with(|| Arc::new(SharedCache::new::<C>()))
.clone()
};
let mut cache_guard = {
re_tracing::profile_wait!("cache-lock", shared_cache.name);
shared_cache.lock()
};
let cache = cache_guard.as_mut();
f((cache as &mut dyn std::any::Any)
.downcast_mut::<C>()
.expect("Downcast failed, this indicates a bug in how `Caches` adds new cache types."))
}
}
pub trait Cache: std::any::Any + Send + Sync + re_byte_size::MemUsageTreeCapture {
fn name(&self) -> &'static str;
fn begin_frame(&mut self) {}
fn purge_memory(&mut self);
fn vram_usage(&self) -> MemUsageTree {
MemUsageTree::Bytes(0)
}
fn on_store_events(&mut self, events: &[&ChunkStoreEvent], entity_db: &EntityDb) {
_ = events;
_ = entity_db;
}
fn on_rrd_manifest(&mut self, entity_db: &EntityDb) {
_ = entity_db;
}
}
impl MemUsageTreeCapture for Caches {
fn capture_mem_usage_tree(&self) -> MemUsageTree {
re_tracing::profile_function!();
let mut node = re_byte_size::MemUsageNode::new();
let mut cache_trees: Vec<_> = self
.caches
.lock()
.values()
.map(|cache| (cache.name, cache.lock().capture_mem_usage_tree()))
.collect();
cache_trees.sort_by_key(|(cache_name, _)| *cache_name);
for (cache_name, tree) in cache_trees {
node.add(cache_name, tree);
}
node.into_tree()
}
}