nodedb_cluster/metadata_group/entry.rs
1//! The canonical wire-type for every entry proposed to the metadata Raft group.
2
3use serde::{Deserialize, Serialize};
4
5use nodedb_types::Hlc;
6
7use crate::metadata_group::descriptors::{DescriptorId, DescriptorLease};
8
9/// An entry in the replicated metadata log.
10///
11/// Every mutation to cluster-wide state — DDL, topology, routing,
12/// descriptor leases, cluster version bumps — is encoded as one of
13/// these variants, proposed against the metadata Raft group, and
14/// applied on every node by a
15/// [`crate::metadata_group::applier::MetadataApplier`].
16///
17/// The `CatalogDdl` variant is the single wire shape for every DDL
18/// mutation. Its `payload` is an opaque, host-serialized
19/// `nodedb::control::catalog_entry::CatalogEntry` value — the
20/// `nodedb-cluster` crate is deliberately ignorant of the host's
21/// per-DDL-object struct shapes. This keeps the cluster crate
22/// layering-clean and makes adding new DDL object types on the
23/// host side a zero-wire-change operation.
24#[derive(
25 Debug,
26 Clone,
27 PartialEq,
28 Eq,
29 Serialize,
30 Deserialize,
31 zerompk::ToMessagePack,
32 zerompk::FromMessagePack,
33)]
34pub enum MetadataEntry {
35 /// Single generic DDL entry carrying an opaque host-side payload.
36 /// Produced by every pgwire DDL handler via
37 /// `nodedb::control::metadata_proposer::propose_catalog_entry`.
38 CatalogDdl {
39 payload: Vec<u8>,
40 },
41
42 // ── Topology / routing ─────────────────────────────────────────────
43 TopologyChange(TopologyChange),
44 RoutingChange(RoutingChange),
45
46 // ── Cluster version ────────────────────────────────────────────────
47 ClusterVersionBump {
48 from: u16,
49 to: u16,
50 },
51
52 // ── Descriptor leases ──────────────────────────────────────────────
53 DescriptorLeaseGrant(DescriptorLease),
54 DescriptorLeaseRelease {
55 node_id: u64,
56 descriptor_ids: Vec<DescriptorId>,
57 },
58
59 // ── Descriptor lease drain ────────────────────────────────────────
60 /// Begin draining leases on a descriptor. While a drain entry
61 /// is active, any `acquire_descriptor_lease` at
62 /// `version <= up_to_version` must be rejected cluster-wide so
63 /// the in-flight DDL that bumps the version can make progress.
64 ///
65 /// `expires_at` is the HLC at which this drain entry is
66 /// considered stale and ignored by `is_draining` checks on
67 /// read. Acts as a TTL that prevents a crashed proposer from
68 /// leaving an orphaned drain that blocks the cluster forever.
69 DescriptorDrainStart {
70 descriptor_id: DescriptorId,
71 up_to_version: u64,
72 expires_at: Hlc,
73 },
74 /// End draining on a descriptor. Emitted explicitly on drain
75 /// timeout so the cluster can make progress. On the happy
76 /// path (successful `Put*` apply), the host-side applier
77 /// clears drain implicitly — this variant is the escape
78 /// hatch for the failure path.
79 DescriptorDrainEnd {
80 descriptor_id: DescriptorId,
81 },
82}
83
84/// Topology mutations proposed through the metadata group.
85#[derive(
86 Debug,
87 Clone,
88 PartialEq,
89 Eq,
90 Serialize,
91 Deserialize,
92 zerompk::ToMessagePack,
93 zerompk::FromMessagePack,
94)]
95pub enum TopologyChange {
96 Join { node_id: u64, addr: String },
97 Leave { node_id: u64 },
98 PromoteToVoter { node_id: u64 },
99 StartDecommission { node_id: u64 },
100 FinishDecommission { node_id: u64 },
101}
102
103/// Routing-table mutations proposed through the metadata group.
104#[derive(
105 Debug,
106 Clone,
107 PartialEq,
108 Eq,
109 Serialize,
110 Deserialize,
111 zerompk::ToMessagePack,
112 zerompk::FromMessagePack,
113)]
114pub enum RoutingChange {
115 /// Move a vShard to a new raft group leaseholder.
116 ReassignVShard {
117 vshard_id: u16,
118 new_group_id: u64,
119 new_leaseholder_node_id: u64,
120 },
121 /// Record a leadership transfer within an existing group.
122 LeadershipTransfer {
123 group_id: u64,
124 new_leader_node_id: u64,
125 },
126}