nodedb_cluster/metadata_group/cache.rs
1//! Per-node in-memory view of the replicated metadata state.
2//!
3//! The cache tracks everything `nodedb-cluster` natively understands:
4//! the applied raft index, the HLC watermark, topology / routing
5//! change history, descriptor leases, and cluster version. It
6//! **does not** maintain per-DDL-object descriptor state — that
7//! lives on the host side via
8//! `nodedb::control::catalog_entry::CatalogEntry::apply_to` writing
9//! into `SystemCatalog` redb. The `CatalogDdl { payload }` variant
10//! is opaque here: the cache tracks its applied index and forwards
11//! the payload to the host's `MetadataCommitApplier`.
12
13use std::collections::HashMap;
14
15use nodedb_types::Hlc;
16use tracing::{debug, warn};
17
18use crate::metadata_group::descriptors::{DescriptorId, DescriptorLease};
19use crate::metadata_group::entry::{MetadataEntry, RoutingChange, TopologyChange};
20
21/// In-memory view of the committed metadata state.
22#[derive(Debug, Default)]
23pub struct MetadataCache {
24 pub applied_index: u64,
25 pub last_applied_hlc: Hlc,
26
27 /// `(descriptor_id, node_id) -> lease`.
28 pub leases: HashMap<(DescriptorId, u64), DescriptorLease>,
29
30 /// Topology mutations applied so far.
31 pub topology_log: Vec<TopologyChange>,
32 pub routing_log: Vec<RoutingChange>,
33
34 pub cluster_version: u16,
35
36 /// Monotonically-increasing count of committed `CatalogDdl`
37 /// entries. Exposed for tests and metrics — planners read
38 /// catalog state through the host-side `SystemCatalog`, not
39 /// this counter.
40 pub catalog_entries_applied: u64,
41}
42
43impl MetadataCache {
44 pub fn new() -> Self {
45 Self::default()
46 }
47
48 /// Apply a committed entry. Idempotent by `applied_index`:
49 /// entries at or below the current watermark are ignored.
50 pub fn apply(&mut self, index: u64, entry: &MetadataEntry) {
51 if index != 0 && index <= self.applied_index {
52 debug!(
53 index,
54 watermark = self.applied_index,
55 "metadata cache: skipping already-applied entry"
56 );
57 return;
58 }
59 self.applied_index = index;
60
61 match entry {
62 MetadataEntry::CatalogDdl { payload: _ } => {
63 // Opaque to the cluster crate. The host-side applier
64 // decodes the payload and writes through to
65 // `SystemCatalog`. We just count it.
66 self.catalog_entries_applied += 1;
67 }
68 MetadataEntry::TopologyChange(change) => self.topology_log.push(change.clone()),
69 MetadataEntry::RoutingChange(change) => self.routing_log.push(change.clone()),
70
71 MetadataEntry::ClusterVersionBump { from, to } => {
72 if *from != self.cluster_version && self.cluster_version != 0 {
73 warn!(
74 expected = self.cluster_version,
75 got = *from,
76 "cluster version bump mismatch"
77 );
78 }
79 self.cluster_version = *to;
80 }
81
82 MetadataEntry::DescriptorLeaseGrant(lease) => {
83 if lease.expires_at > self.last_applied_hlc {
84 self.last_applied_hlc = lease.expires_at;
85 }
86 self.leases
87 .insert((lease.descriptor_id.clone(), lease.node_id), lease.clone());
88 }
89 MetadataEntry::DescriptorLeaseRelease {
90 node_id,
91 descriptor_ids,
92 } => {
93 for id in descriptor_ids {
94 self.leases.remove(&(id.clone(), *node_id));
95 }
96 }
97 // Drain state is host-side (lives in
98 // `nodedb::control::lease::DescriptorDrainTracker`);
99 // the cluster-side cache only tracks lease state
100 // directly. These no-op arms keep the exhaustive
101 // match coverage so adding new variants is a
102 // compile-time error here too.
103 MetadataEntry::DescriptorDrainStart { expires_at, .. } => {
104 if *expires_at > self.last_applied_hlc {
105 self.last_applied_hlc = *expires_at;
106 }
107 }
108 MetadataEntry::DescriptorDrainEnd { .. } => {}
109 }
110 }
111}