nodedb_cluster/raft_loop/
handle_rpc.rs1use crate::error::{ClusterError, Result};
9use crate::forward::RequestForwarder;
10use crate::health;
11use crate::rpc_codec::RaftRpc;
12use crate::transport::RaftRpcHandler;
13
14use super::loop_core::{CommitApplier, RaftLoop};
15
16pub(super) const TOPOLOGY_GROUP_ID: u64 = 0;
23
24#[derive(Debug, PartialEq, Eq)]
29pub(super) enum JoinDecision {
30 Admit,
33 Redirect { leader_addr: String },
36}
37
38pub(super) fn decide_join(
51 group0_leader: u64,
52 self_node_id: u64,
53 leader_addr: Option<String>,
54) -> JoinDecision {
55 if group0_leader == 0 || group0_leader == self_node_id {
56 JoinDecision::Admit
57 } else {
58 JoinDecision::Redirect {
59 leader_addr: leader_addr.unwrap_or_default(),
60 }
61 }
62}
63
64impl<A: CommitApplier, F: RequestForwarder> RaftRpcHandler for RaftLoop<A, F> {
65 async fn handle_rpc(&self, rpc: RaftRpc) -> Result<RaftRpc> {
66 match rpc {
67 RaftRpc::AppendEntriesRequest(req) => {
69 let mut mr = self.multi_raft.lock().unwrap_or_else(|p| p.into_inner());
70 let resp = mr.handle_append_entries(&req)?;
71 Ok(RaftRpc::AppendEntriesResponse(resp))
72 }
73 RaftRpc::RequestVoteRequest(req) => {
74 let mut mr = self.multi_raft.lock().unwrap_or_else(|p| p.into_inner());
75 let resp = mr.handle_request_vote(&req)?;
76 Ok(RaftRpc::RequestVoteResponse(resp))
77 }
78 RaftRpc::InstallSnapshotRequest(req) => {
79 let mut mr = self.multi_raft.lock().unwrap_or_else(|p| p.into_inner());
80 let resp = mr.handle_install_snapshot(&req)?;
81 Ok(RaftRpc::InstallSnapshotResponse(resp))
82 }
83 RaftRpc::JoinRequest(req) => Ok(RaftRpc::JoinResponse(self.join_flow(req).await)),
85 RaftRpc::Ping(req) => {
87 let topo_version = {
88 let topo = self.topology.read().unwrap_or_else(|p| p.into_inner());
89 topo.version()
90 };
91 Ok(health::handle_ping(self.node_id, topo_version, &req))
92 }
93 RaftRpc::TopologyUpdate(update) => {
95 let (updated, ack) =
96 health::handle_topology_update(self.node_id, &self.topology, &update);
97 if updated {
98 for node in &update.nodes {
106 if node.node_id == self.node_id {
107 continue;
108 }
109 match node.addr.parse::<std::net::SocketAddr>() {
110 Ok(addr) => self.transport.register_peer(node.node_id, addr),
111 Err(e) => tracing::warn!(
112 node_id = node.node_id,
113 addr = %node.addr,
114 error = %e,
115 "topology update contains unparseable peer address; skipping register_peer"
116 ),
117 }
118 }
119 if let Some(catalog) = self.catalog.as_ref() {
126 let snap = self
127 .topology
128 .read()
129 .unwrap_or_else(|p| p.into_inner())
130 .clone();
131 if let Err(e) = catalog.save_topology(&snap) {
132 tracing::warn!(error = %e, "failed to persist topology update to catalog");
133 }
134 }
135 }
136 Ok(ack)
137 }
138 RaftRpc::ForwardRequest(req) => {
140 let resp = self.forwarder.execute_forwarded(req).await;
141 Ok(RaftRpc::ForwardResponse(resp))
142 }
143 RaftRpc::MetadataProposeRequest(req) => {
148 let resp = match self.propose_to_metadata_group(req.bytes) {
149 Ok(log_index) => crate::rpc_codec::MetadataProposeResponse::ok(log_index),
150 Err(crate::error::ClusterError::Raft(nodedb_raft::RaftError::NotLeader {
151 leader_hint,
152 })) => {
153 crate::rpc_codec::MetadataProposeResponse::err("not leader", leader_hint)
154 }
155 Err(e) => crate::rpc_codec::MetadataProposeResponse::err(e.to_string(), None),
156 };
157 Ok(RaftRpc::MetadataProposeResponse(resp))
158 }
159 RaftRpc::VShardEnvelope(bytes) => {
161 if let Some(ref handler) = self.vshard_handler {
162 let response_bytes = handler(bytes).await?;
163 Ok(RaftRpc::VShardEnvelope(response_bytes))
164 } else {
165 Err(ClusterError::Transport {
166 detail: "VShardEnvelope handler not configured".into(),
167 })
168 }
169 }
170 other => Err(ClusterError::Transport {
171 detail: format!("unexpected request type in RPC handler: {other:?}"),
172 }),
173 }
174 }
175}
176
177#[cfg(test)]
178mod tests {
179 use super::*;
180 use crate::multi_raft::MultiRaft;
181 use crate::routing::RoutingTable;
182 use crate::topology::{ClusterTopology, NodeInfo, NodeState};
183 use crate::transport::NexarTransport;
184 use nodedb_raft::message::LogEntry;
185 use std::sync::{Arc, RwLock};
186 use std::time::{Duration, Instant};
187
188 struct NoopApplier;
190 impl CommitApplier for NoopApplier {
191 fn apply_committed(&self, _group_id: u64, entries: &[LogEntry]) -> u64 {
192 entries.last().map(|e| e.index).unwrap_or(0)
193 }
194 }
195
196 fn make_transport(node_id: u64) -> Arc<NexarTransport> {
197 Arc::new(NexarTransport::new(node_id, "127.0.0.1:0".parse().unwrap()).unwrap())
198 }
199
200 #[tokio::test]
201 async fn rpc_handler_routes_append_entries() {
202 let dir = tempfile::tempdir().unwrap();
203 let transport = make_transport(1);
204 let rt = RoutingTable::uniform(1, &[1], 1);
205 let mut mr = MultiRaft::new(1, rt, dir.path().to_path_buf());
206 mr.add_group(0, vec![]).unwrap();
207
208 for node in mr.groups_mut().values_mut() {
209 node.election_deadline_override(Instant::now() - Duration::from_millis(1));
210 }
211
212 let topo = Arc::new(RwLock::new(ClusterTopology::new()));
213 let raft_loop = RaftLoop::new(mr, transport, topo, NoopApplier);
214
215 raft_loop.do_tick();
216 tokio::time::sleep(Duration::from_millis(20)).await;
217
218 let req = RaftRpc::AppendEntriesRequest(nodedb_raft::AppendEntriesRequest {
219 term: 99,
220 leader_id: 2,
221 prev_log_index: 0,
222 prev_log_term: 0,
223 entries: vec![],
224 leader_commit: 0,
225 group_id: 0,
226 });
227
228 let resp = raft_loop.handle_rpc(req).await.unwrap();
229 match resp {
230 RaftRpc::AppendEntriesResponse(r) => {
231 assert!(r.success);
232 assert_eq!(r.term, 99);
233 }
234 other => panic!("expected AppendEntriesResponse, got {other:?}"),
235 }
236 }
237
238 #[tokio::test]
239 async fn rpc_handler_routes_request_vote() {
240 let dir = tempfile::tempdir().unwrap();
241 let transport = make_transport(1);
242 let rt = RoutingTable::uniform(1, &[1, 2, 3], 3);
243 let mut mr = MultiRaft::new(1, rt, dir.path().to_path_buf());
244 mr.add_group(0, vec![2, 3]).unwrap();
245
246 let topo = Arc::new(RwLock::new(ClusterTopology::new()));
247 let raft_loop = RaftLoop::new(mr, transport, topo, NoopApplier);
248
249 let req = RaftRpc::RequestVoteRequest(nodedb_raft::RequestVoteRequest {
250 term: 1,
251 candidate_id: 2,
252 last_log_index: 0,
253 last_log_term: 0,
254 group_id: 0,
255 });
256
257 let resp = raft_loop.handle_rpc(req).await.unwrap();
258 match resp {
259 RaftRpc::RequestVoteResponse(r) => {
260 assert!(r.vote_granted);
261 assert_eq!(r.term, 1);
262 }
263 other => panic!("expected RequestVoteResponse, got {other:?}"),
264 }
265 }
266
267 #[tokio::test]
272 async fn rpc_handler_accepts_join_on_bootstrap_seed() {
273 let dir = tempfile::tempdir().unwrap();
274 let transport = make_transport(1);
275 let rt = RoutingTable::uniform(2, &[1], 1);
276 let mut mr = MultiRaft::new(1, rt, dir.path().to_path_buf());
277 mr.add_group(0, vec![]).unwrap();
278 mr.add_group(1, vec![]).unwrap();
279 for node in mr.groups_mut().values_mut() {
282 node.election_deadline_override(Instant::now() - Duration::from_millis(1));
283 }
284
285 let mut topology = ClusterTopology::new();
286 topology.add_node(NodeInfo::new(
287 1,
288 "127.0.0.1:9400".parse().unwrap(),
289 NodeState::Active,
290 ));
291 let topo = Arc::new(RwLock::new(topology));
292
293 let raft_loop = RaftLoop::new(mr, transport, topo.clone(), NoopApplier);
294 raft_loop.do_tick();
295 tokio::time::sleep(Duration::from_millis(20)).await;
296
297 let req = RaftRpc::JoinRequest(crate::rpc_codec::JoinRequest {
298 node_id: 2,
299 listen_addr: "127.0.0.1:9401".into(),
300 wire_version: crate::topology::CLUSTER_WIRE_FORMAT_VERSION,
301 });
302
303 let resp = raft_loop.handle_rpc(req).await.unwrap();
304 match resp {
305 RaftRpc::JoinResponse(r) => {
306 assert!(
307 r.success,
308 "join should succeed on bootstrap seed: {}",
309 r.error
310 );
311 assert_eq!(r.nodes.len(), 2);
312 assert_eq!(r.groups.len(), 2);
313 assert_eq!(r.vshard_to_group.len(), 1024);
314 for g in &r.groups {
318 assert!(
319 g.learners.contains(&2),
320 "expected node 2 as learner in group {}, got learners={:?} members={:?}",
321 g.group_id,
322 g.learners,
323 g.members
324 );
325 }
326 }
327 other => panic!("expected JoinResponse, got {other:?}"),
328 }
329
330 let topo_guard = topo.read().unwrap();
331 assert_eq!(topo_guard.node_count(), 2);
332 assert!(topo_guard.contains(2));
333 }
334
335 #[test]
336 fn decide_join_self_leader_admits() {
337 assert_eq!(
338 decide_join(7, 7, Some("10.0.0.7:9400".into())),
339 JoinDecision::Admit
340 );
341 }
342
343 #[test]
344 fn decide_join_no_leader_yet_admits() {
345 assert_eq!(decide_join(0, 7, None), JoinDecision::Admit);
346 }
347
348 #[test]
349 fn decide_join_other_leader_redirects() {
350 assert_eq!(
351 decide_join(1, 7, Some("10.0.0.1:9400".into())),
352 JoinDecision::Redirect {
353 leader_addr: "10.0.0.1:9400".into()
354 }
355 );
356 }
357
358 #[test]
359 fn decide_join_other_leader_unknown_addr_still_redirects() {
360 assert_eq!(
361 decide_join(1, 7, None),
362 JoinDecision::Redirect {
363 leader_addr: String::new()
364 }
365 );
366 }
367}