nodedb_cluster/metadata_group/cache.rs
1//! Per-node in-memory view of the replicated metadata state.
2//!
3//! The cache tracks everything `nodedb-cluster` natively understands:
4//! the applied raft index, the HLC watermark, topology / routing
5//! change history, descriptor leases, and cluster version. It
6//! **does not** maintain per-DDL-object descriptor state — that
7//! lives on the host side via
8//! `nodedb::control::catalog_entry::CatalogEntry::apply_to` writing
9//! into `SystemCatalog` redb. The `CatalogDdl { payload }` variant
10//! is opaque here: the cache tracks its applied index and forwards
11//! the payload to the host's `MetadataCommitApplier`.
12
13use std::collections::HashMap;
14
15use nodedb_types::Hlc;
16use tracing::{debug, warn};
17
18use crate::metadata_group::descriptors::{DescriptorId, DescriptorLease};
19use crate::metadata_group::entry::{MetadataEntry, RoutingChange, TopologyChange};
20
21/// In-memory view of the committed metadata state.
22#[derive(Debug, Default)]
23pub struct MetadataCache {
24 pub applied_index: u64,
25 pub last_applied_hlc: Hlc,
26
27 /// `(descriptor_id, node_id) -> lease`.
28 pub leases: HashMap<(DescriptorId, u64), DescriptorLease>,
29
30 /// Topology mutations applied so far.
31 pub topology_log: Vec<TopologyChange>,
32 pub routing_log: Vec<RoutingChange>,
33
34 pub cluster_version: u16,
35
36 /// Monotonically-increasing count of committed `CatalogDdl`
37 /// entries. Exposed for tests and metrics — planners read
38 /// catalog state through the host-side `SystemCatalog`, not
39 /// this counter.
40 pub catalog_entries_applied: u64,
41}
42
43impl MetadataCache {
44 pub fn new() -> Self {
45 Self::default()
46 }
47
48 /// Apply a committed entry. Idempotent by `applied_index`:
49 /// entries at or below the current watermark are ignored.
50 pub fn apply(&mut self, index: u64, entry: &MetadataEntry) {
51 if index != 0 && index <= self.applied_index {
52 debug!(
53 index,
54 watermark = self.applied_index,
55 "metadata cache: skipping already-applied entry"
56 );
57 return;
58 }
59 self.applied_index = index;
60
61 match entry {
62 MetadataEntry::CatalogDdl { payload: _ }
63 | MetadataEntry::CatalogDdlAudited { payload: _, .. } => {
64 // Opaque to the cluster crate. The host-side applier
65 // decodes the payload and writes through to
66 // `SystemCatalog`. We just count it — both DDL
67 // shapes contribute to `catalog_entries_applied`.
68 self.catalog_entries_applied += 1;
69 }
70 MetadataEntry::TopologyChange(change) => self.topology_log.push(change.clone()),
71 MetadataEntry::RoutingChange(change) => self.routing_log.push(change.clone()),
72
73 MetadataEntry::ClusterVersionBump { from, to } => {
74 if *from != self.cluster_version && self.cluster_version != 0 {
75 warn!(
76 expected = self.cluster_version,
77 got = *from,
78 "cluster version bump mismatch"
79 );
80 }
81 self.cluster_version = *to;
82 }
83
84 MetadataEntry::DescriptorLeaseGrant(lease) => {
85 if lease.expires_at > self.last_applied_hlc {
86 self.last_applied_hlc = lease.expires_at;
87 }
88 self.leases
89 .insert((lease.descriptor_id.clone(), lease.node_id), lease.clone());
90 }
91 MetadataEntry::DescriptorLeaseRelease {
92 node_id,
93 descriptor_ids,
94 } => {
95 for id in descriptor_ids {
96 self.leases.remove(&(id.clone(), *node_id));
97 }
98 }
99 // Drain state is host-side (lives in
100 // `nodedb::control::lease::DescriptorDrainTracker`);
101 // the cluster-side cache only tracks lease state
102 // directly. These no-op arms keep the exhaustive
103 // match coverage so adding new variants is a
104 // compile-time error here too.
105 MetadataEntry::DescriptorDrainStart { expires_at, .. } => {
106 if *expires_at > self.last_applied_hlc {
107 self.last_applied_hlc = *expires_at;
108 }
109 }
110 MetadataEntry::DescriptorDrainEnd { .. } => {}
111 MetadataEntry::CaTrustChange { .. } => {
112 // CA trust mutations are host-side only: the production
113 // applier in the nodedb crate writes/deletes
114 // `tls/ca.d/<fp>.crt` and rebuilds the rustls config.
115 // Cluster cache has nothing to track.
116 }
117 MetadataEntry::Batch { entries } => {
118 for sub in entries {
119 self.apply(index, sub);
120 }
121 }
122 }
123 }
124}