Skip to main content

nodedb_cluster/raft_loop/
handle_rpc.rs

1//! Inbound Raft RPC dispatch — `impl RaftRpcHandler for RaftLoop`.
2//!
3//! Each RPC variant is either handled inline (Raft consensus RPCs that
4//! just lock `MultiRaft`) or delegated to a helper module — health,
5//! forwarding, VShard envelopes, or (for `JoinRequest`) the async
6//! orchestration in [`super::join`].
7
8use crate::error::{ClusterError, Result};
9use crate::forward::RequestForwarder;
10use crate::health;
11use crate::rpc_codec::RaftRpc;
12use crate::transport::RaftRpcHandler;
13
14use super::loop_core::{CommitApplier, RaftLoop};
15
16/// The Raft group that owns cluster topology / membership.
17///
18/// Group 0 is the "metadata" group and is the authoritative source of
19/// truth for who is in the cluster. Joins must be processed by its
20/// leader; this constant is also used by the join orchestration in
21/// [`super::join`].
22pub(super) const TOPOLOGY_GROUP_ID: u64 = 0;
23
24/// Outcome of the leader-check phase of the join flow.
25///
26/// Extracted as a pure enum so the decision logic can be unit-tested
27/// without spinning up a real `MultiRaft` just to observe its leader id.
28#[derive(Debug, PartialEq, Eq)]
29pub(super) enum JoinDecision {
30    /// This node is the group-0 leader (or the founding seed with no leader
31    /// elected yet). Admit the join locally.
32    Admit,
33    /// Another node is the group-0 leader. The client should retry at
34    /// `leader_addr`.
35    Redirect { leader_addr: String },
36}
37
38/// Pure decision: given the observed group-0 leader, this node's id, and
39/// the leader's address (as known to the local topology), should we
40/// admit the join or redirect?
41///
42/// - `group0_leader == 0` means "no elected leader yet". On a freshly
43///   bootstrapped single-seed cluster this is normal — the founding node
44///   is the only possible leader, so we accept.
45/// - `group0_leader == self_node_id` means we are the leader — accept.
46/// - Otherwise redirect. If the leader's address is unknown to topology
47///   (an operator error that shouldn't happen in practice), we still
48///   redirect with an empty string so the client at least sees the
49///   `"not leader"` prefix and can decide to try the next seed.
50pub(super) fn decide_join(
51    group0_leader: u64,
52    self_node_id: u64,
53    leader_addr: Option<String>,
54) -> JoinDecision {
55    if group0_leader == 0 || group0_leader == self_node_id {
56        JoinDecision::Admit
57    } else {
58        JoinDecision::Redirect {
59            leader_addr: leader_addr.unwrap_or_default(),
60        }
61    }
62}
63
64impl<A: CommitApplier, F: RequestForwarder> RaftRpcHandler for RaftLoop<A, F> {
65    async fn handle_rpc(&self, rpc: RaftRpc) -> Result<RaftRpc> {
66        match rpc {
67            // Raft consensus RPCs — lock MultiRaft (sync, never across await).
68            RaftRpc::AppendEntriesRequest(req) => {
69                let mut mr = self.multi_raft.lock().unwrap_or_else(|p| p.into_inner());
70                let resp = mr.handle_append_entries(&req)?;
71                Ok(RaftRpc::AppendEntriesResponse(resp))
72            }
73            RaftRpc::RequestVoteRequest(req) => {
74                let mut mr = self.multi_raft.lock().unwrap_or_else(|p| p.into_inner());
75                let resp = mr.handle_request_vote(&req)?;
76                Ok(RaftRpc::RequestVoteResponse(resp))
77            }
78            RaftRpc::InstallSnapshotRequest(req) => {
79                let mut mr = self.multi_raft.lock().unwrap_or_else(|p| p.into_inner());
80                let resp = mr.handle_install_snapshot(&req)?;
81                Ok(RaftRpc::InstallSnapshotResponse(resp))
82            }
83            // Cluster join — full orchestration in `super::join`.
84            RaftRpc::JoinRequest(req) => Ok(RaftRpc::JoinResponse(self.join_flow(req).await)),
85            // Health check.
86            RaftRpc::Ping(req) => {
87                let topo_version = {
88                    let topo = self.topology.read().unwrap_or_else(|p| p.into_inner());
89                    topo.version()
90                };
91                Ok(health::handle_ping(self.node_id, topo_version, &req))
92            }
93            // Topology broadcast.
94            RaftRpc::TopologyUpdate(update) => {
95                let (updated, ack) =
96                    health::handle_topology_update(self.node_id, &self.topology, &update);
97                if updated {
98                    // Register every member's address with the transport
99                    // so raft RPCs to newly-learned peers actually have
100                    // a destination. Without this, a node that joined
101                    // early and then learned about a later joiner via
102                    // broadcast would hold a stale peer set in its
103                    // transport and AppendEntries to the new peer would
104                    // fail until the circuit breaker opened permanently.
105                    for node in &update.nodes {
106                        if node.node_id == self.node_id {
107                            continue;
108                        }
109                        match node.addr.parse::<std::net::SocketAddr>() {
110                            Ok(addr) => self.transport.register_peer(node.node_id, addr),
111                            Err(e) => tracing::warn!(
112                                node_id = node.node_id,
113                                addr = %node.addr,
114                                error = %e,
115                                "topology update contains unparseable peer address; skipping register_peer"
116                            ),
117                        }
118                    }
119                    // Persist the adopted topology so a subsequent
120                    // restart reads the latest member set from catalog
121                    // rather than the stale snapshot taken at join
122                    // time. Persist only when a catalog is attached;
123                    // failures are logged but never propagate — the
124                    // next TopologyUpdate will retry.
125                    if let Some(catalog) = self.catalog.as_ref() {
126                        let snap = self
127                            .topology
128                            .read()
129                            .unwrap_or_else(|p| p.into_inner())
130                            .clone();
131                        if let Err(e) = catalog.save_topology(&snap) {
132                            tracing::warn!(error = %e, "failed to persist topology update to catalog");
133                        }
134                    }
135                }
136                Ok(ack)
137            }
138            // Query forwarding — execute locally via the RequestForwarder.
139            RaftRpc::ForwardRequest(req) => {
140                let resp = self.forwarder.execute_forwarded(req).await;
141                Ok(RaftRpc::ForwardResponse(resp))
142            }
143            // Metadata-group proposal forwarding — apply locally if
144            // we're the metadata leader, otherwise return a
145            // NotLeader response with a leader hint so the
146            // forwarder can chase the redirect.
147            RaftRpc::MetadataProposeRequest(req) => {
148                let resp = match self.propose_to_metadata_group(req.bytes) {
149                    Ok(log_index) => crate::rpc_codec::MetadataProposeResponse::ok(log_index),
150                    Err(crate::error::ClusterError::Raft(nodedb_raft::RaftError::NotLeader {
151                        leader_hint,
152                    })) => {
153                        crate::rpc_codec::MetadataProposeResponse::err("not leader", leader_hint)
154                    }
155                    Err(e) => crate::rpc_codec::MetadataProposeResponse::err(e.to_string(), None),
156                };
157                Ok(RaftRpc::MetadataProposeResponse(resp))
158            }
159            // VShardEnvelope — dispatch to registered handler (Event Plane, etc.).
160            RaftRpc::VShardEnvelope(bytes) => {
161                if let Some(ref handler) = self.vshard_handler {
162                    let response_bytes = handler(bytes).await?;
163                    Ok(RaftRpc::VShardEnvelope(response_bytes))
164                } else {
165                    Err(ClusterError::Transport {
166                        detail: "VShardEnvelope handler not configured".into(),
167                    })
168                }
169            }
170            other => Err(ClusterError::Transport {
171                detail: format!("unexpected request type in RPC handler: {other:?}"),
172            }),
173        }
174    }
175}
176
177#[cfg(test)]
178mod tests {
179    use super::*;
180    use crate::multi_raft::MultiRaft;
181    use crate::routing::RoutingTable;
182    use crate::topology::{ClusterTopology, NodeInfo, NodeState};
183    use crate::transport::NexarTransport;
184    use nodedb_raft::message::LogEntry;
185    use std::sync::{Arc, RwLock};
186    use std::time::{Duration, Instant};
187
188    /// No-op applier for tests that don't care about state machine output.
189    struct NoopApplier;
190    impl CommitApplier for NoopApplier {
191        fn apply_committed(&self, _group_id: u64, entries: &[LogEntry]) -> u64 {
192            entries.last().map(|e| e.index).unwrap_or(0)
193        }
194    }
195
196    fn make_transport(node_id: u64) -> Arc<NexarTransport> {
197        Arc::new(NexarTransport::new(node_id, "127.0.0.1:0".parse().unwrap()).unwrap())
198    }
199
200    #[tokio::test]
201    async fn rpc_handler_routes_append_entries() {
202        let dir = tempfile::tempdir().unwrap();
203        let transport = make_transport(1);
204        let rt = RoutingTable::uniform(1, &[1], 1);
205        let mut mr = MultiRaft::new(1, rt, dir.path().to_path_buf());
206        mr.add_group(0, vec![]).unwrap();
207
208        for node in mr.groups_mut().values_mut() {
209            node.election_deadline_override(Instant::now() - Duration::from_millis(1));
210        }
211
212        let topo = Arc::new(RwLock::new(ClusterTopology::new()));
213        let raft_loop = RaftLoop::new(mr, transport, topo, NoopApplier);
214
215        raft_loop.do_tick();
216        tokio::time::sleep(Duration::from_millis(20)).await;
217
218        let req = RaftRpc::AppendEntriesRequest(nodedb_raft::AppendEntriesRequest {
219            term: 99,
220            leader_id: 2,
221            prev_log_index: 0,
222            prev_log_term: 0,
223            entries: vec![],
224            leader_commit: 0,
225            group_id: 0,
226        });
227
228        let resp = raft_loop.handle_rpc(req).await.unwrap();
229        match resp {
230            RaftRpc::AppendEntriesResponse(r) => {
231                assert!(r.success);
232                assert_eq!(r.term, 99);
233            }
234            other => panic!("expected AppendEntriesResponse, got {other:?}"),
235        }
236    }
237
238    #[tokio::test]
239    async fn rpc_handler_routes_request_vote() {
240        let dir = tempfile::tempdir().unwrap();
241        let transport = make_transport(1);
242        let rt = RoutingTable::uniform(1, &[1, 2, 3], 3);
243        let mut mr = MultiRaft::new(1, rt, dir.path().to_path_buf());
244        mr.add_group(0, vec![2, 3]).unwrap();
245
246        let topo = Arc::new(RwLock::new(ClusterTopology::new()));
247        let raft_loop = RaftLoop::new(mr, transport, topo, NoopApplier);
248
249        let req = RaftRpc::RequestVoteRequest(nodedb_raft::RequestVoteRequest {
250            term: 1,
251            candidate_id: 2,
252            last_log_index: 0,
253            last_log_term: 0,
254            group_id: 0,
255        });
256
257        let resp = raft_loop.handle_rpc(req).await.unwrap();
258        match resp {
259            RaftRpc::RequestVoteResponse(r) => {
260                assert!(r.vote_granted);
261                assert_eq!(r.term, 1);
262            }
263            other => panic!("expected RequestVoteResponse, got {other:?}"),
264        }
265    }
266
267    /// JoinRequest on a freshly-bootstrapped single-seed RaftLoop is
268    /// admitted locally: this node is leader of every group, so
269    /// `AddLearner` conf-changes are proposed and (because the groups
270    /// are single-voter) commit instantly.
271    #[tokio::test]
272    async fn rpc_handler_accepts_join_on_bootstrap_seed() {
273        let dir = tempfile::tempdir().unwrap();
274        let transport = make_transport(1);
275        let rt = RoutingTable::uniform(2, &[1], 1);
276        let mut mr = MultiRaft::new(1, rt, dir.path().to_path_buf());
277        mr.add_group(0, vec![]).unwrap();
278        mr.add_group(1, vec![]).unwrap();
279        // Force immediate election so both groups reach Leader before
280        // the join flow proposes AddLearner.
281        for node in mr.groups_mut().values_mut() {
282            node.election_deadline_override(Instant::now() - Duration::from_millis(1));
283        }
284
285        let mut topology = ClusterTopology::new();
286        topology.add_node(NodeInfo::new(
287            1,
288            "127.0.0.1:9400".parse().unwrap(),
289            NodeState::Active,
290        ));
291        let topo = Arc::new(RwLock::new(topology));
292
293        let raft_loop = RaftLoop::new(mr, transport, topo.clone(), NoopApplier);
294        raft_loop.do_tick();
295        tokio::time::sleep(Duration::from_millis(20)).await;
296
297        let req = RaftRpc::JoinRequest(crate::rpc_codec::JoinRequest {
298            node_id: 2,
299            listen_addr: "127.0.0.1:9401".into(),
300            wire_version: crate::topology::CLUSTER_WIRE_FORMAT_VERSION,
301        });
302
303        let resp = raft_loop.handle_rpc(req).await.unwrap();
304        match resp {
305            RaftRpc::JoinResponse(r) => {
306                assert!(
307                    r.success,
308                    "join should succeed on bootstrap seed: {}",
309                    r.error
310                );
311                assert_eq!(r.nodes.len(), 2);
312                assert_eq!(r.groups.len(), 2);
313                assert_eq!(r.vshard_to_group.len(), 1024);
314                // The new node should appear as a learner on every group,
315                // not as a voter — voter promotion happens asynchronously
316                // via the tick loop's promotion phase.
317                for g in &r.groups {
318                    assert!(
319                        g.learners.contains(&2),
320                        "expected node 2 as learner in group {}, got learners={:?} members={:?}",
321                        g.group_id,
322                        g.learners,
323                        g.members
324                    );
325                }
326            }
327            other => panic!("expected JoinResponse, got {other:?}"),
328        }
329
330        let topo_guard = topo.read().unwrap();
331        assert_eq!(topo_guard.node_count(), 2);
332        assert!(topo_guard.contains(2));
333    }
334
335    #[test]
336    fn decide_join_self_leader_admits() {
337        assert_eq!(
338            decide_join(7, 7, Some("10.0.0.7:9400".into())),
339            JoinDecision::Admit
340        );
341    }
342
343    #[test]
344    fn decide_join_no_leader_yet_admits() {
345        assert_eq!(decide_join(0, 7, None), JoinDecision::Admit);
346    }
347
348    #[test]
349    fn decide_join_other_leader_redirects() {
350        assert_eq!(
351            decide_join(1, 7, Some("10.0.0.1:9400".into())),
352            JoinDecision::Redirect {
353                leader_addr: "10.0.0.1:9400".into()
354            }
355        );
356    }
357
358    #[test]
359    fn decide_join_other_leader_unknown_addr_still_redirects() {
360        assert_eq!(
361            decide_join(1, 7, None),
362            JoinDecision::Redirect {
363                leader_addr: String::new()
364            }
365        );
366    }
367}