1use crate::action::{Action, StatusActions};
10use crate::connection_mode::ConnectionMode;
11use ant_bootstrap::InitialPeersConfig;
12use ant_evm::{EvmNetwork, RewardsAddress};
13use ant_node_manager::{VerbosityLevel, add_services::config::PortRange};
14use ant_releases::{self, AntReleaseRepoActions, ReleaseType};
15use ant_service_management::{NodeRegistryManager, ServiceStatus};
16use color_eyre::Result;
17use color_eyre::eyre::eyre;
18use service_manager::RestartPolicy;
19use std::{path::PathBuf, str::FromStr};
20use tokio::runtime::Builder;
21use tokio::sync::mpsc::{self, UnboundedSender};
22use tokio::task::LocalSet;
23
24pub const PORT_MAX: u32 = 65535;
25pub const PORT_MIN: u32 = 1024;
26
27const NODE_ADD_MAX_RETRIES: u32 = 5;
28
29pub const FIXED_INTERVAL: u64 = 60_000;
30pub const CONNECTION_TIMEOUT_START: u64 = 120;
31
32pub const NODES_ALL: &str = "NODES_ALL";
33
34#[derive(Debug)]
35pub enum NodeManagementTask {
36 MaintainNodes {
37 args: MaintainNodesArgs,
38 },
39 ResetNodes {
40 start_nodes_after_reset: bool,
41 action_sender: UnboundedSender<Action>,
42 },
43 StopNodes {
44 services: Vec<String>,
45 action_sender: UnboundedSender<Action>,
46 },
47 UpgradeNodes {
48 args: UpgradeNodesArgs,
49 },
50 AddNode {
51 args: MaintainNodesArgs,
52 },
53 RemoveNodes {
54 services: Vec<String>,
55 action_sender: UnboundedSender<Action>,
56 },
57 StartNode {
58 services: Vec<String>,
59 action_sender: UnboundedSender<Action>,
60 },
61}
62
63#[derive(Clone)]
64pub struct NodeManagement {
65 task_sender: mpsc::UnboundedSender<NodeManagementTask>,
66}
67
68impl NodeManagement {
69 pub fn new(node_registry: NodeRegistryManager) -> Result<Self> {
70 let (send, mut recv) = mpsc::unbounded_channel();
71
72 let rt = Builder::new_current_thread().enable_all().build()?;
73
74 std::thread::spawn(move || {
75 let local = LocalSet::new();
76
77 local.spawn_local(async move {
78 while let Some(new_task) = recv.recv().await {
79 match new_task {
80 NodeManagementTask::MaintainNodes { args } => {
81 maintain_n_running_nodes(args, node_registry.clone()).await;
82 }
83 NodeManagementTask::ResetNodes {
84 start_nodes_after_reset,
85 action_sender,
86 } => {
87 reset_nodes(
88 action_sender,
89 node_registry.clone(),
90 start_nodes_after_reset,
91 )
92 .await;
93 }
94 NodeManagementTask::StopNodes {
95 services,
96 action_sender,
97 } => {
98 stop_nodes(services, action_sender, node_registry.clone()).await;
99 }
100 NodeManagementTask::UpgradeNodes { args } => {
101 upgrade_nodes(args, node_registry.clone()).await
102 }
103 NodeManagementTask::RemoveNodes {
104 services,
105 action_sender,
106 } => remove_nodes(services, action_sender, node_registry.clone()).await,
107 NodeManagementTask::StartNode {
108 services,
109 action_sender,
110 } => start_nodes(services, action_sender, node_registry.clone()).await,
111 NodeManagementTask::AddNode { args } => {
112 add_node(args, node_registry.clone()).await
113 }
114 }
115 }
116 });
119
120 rt.block_on(local);
123 });
124
125 Ok(Self { task_sender: send })
126 }
127
128 pub fn send_task(&self, task: NodeManagementTask) -> Result<()> {
135 self.task_sender
136 .send(task)
137 .inspect_err(|err| error!("The node management local set is down {err:?}"))
138 .map_err(|_| eyre!("Failed to send task to the node management local set"))?;
139 Ok(())
140 }
141}
142
143async fn stop_nodes(
145 services: Vec<String>,
146 action_sender: UnboundedSender<Action>,
147 node_registry: NodeRegistryManager,
148) {
149 if let Err(err) = ant_node_manager::cmd::node::stop(
150 None,
151 node_registry.clone(),
152 vec![],
153 services.clone(),
154 VerbosityLevel::Minimal,
155 )
156 .await
157 {
158 error!("Error while stopping services {err:?}");
159 send_action(
160 action_sender,
161 Action::StatusActions(StatusActions::ErrorStoppingNodes {
162 services,
163 raw_error: err.to_string(),
164 }),
165 );
166 } else {
167 info!("Successfully stopped services");
168 for service in services {
169 send_action(
170 action_sender.clone(),
171 Action::StatusActions(StatusActions::StopNodesCompleted {
172 service_name: service,
173 all_nodes_data: node_registry.get_node_service_data().await,
174 is_nat_status_determined: node_registry.nat_status.read().await.is_some(),
175 }),
176 );
177 }
178 }
179}
180
181#[derive(Debug)]
182pub struct MaintainNodesArgs {
183 pub action_sender: UnboundedSender<Action>,
184 pub antnode_path: Option<PathBuf>,
185 pub connection_mode: ConnectionMode,
186 pub count: u16,
187 pub data_dir_path: Option<PathBuf>,
188 pub network_id: Option<u8>,
189 pub owner: String,
190 pub init_peers_config: InitialPeersConfig,
191 pub port_range: Option<PortRange>,
192 pub rewards_address: String,
193 pub run_nat_detection: bool,
194}
195
196async fn maintain_n_running_nodes(args: MaintainNodesArgs, node_registry: NodeRegistryManager) {
198 debug!("Maintaining {} nodes", args.count);
199 if args.run_nat_detection {
200 run_nat_detection(&args.action_sender).await;
201 }
202
203 let config = prepare_node_config(&args);
204 debug_log_config(&config, &args);
205
206 let mut used_ports = get_used_ports(&node_registry).await;
207 let (mut current_port, max_port) = get_port_range(&config.custom_ports);
208
209 let mut non_removed_nodes = 0;
210 for node in node_registry.nodes.read().await.iter() {
211 let node = node.read().await;
212 if node.status != ServiceStatus::Removed {
213 non_removed_nodes += 1;
214 }
215 }
216 let nodes_to_add = args.count as i32 - non_removed_nodes;
217
218 if nodes_to_add <= 0 {
219 debug!("Scaling down nodes to {}", nodes_to_add);
220 scale_down_nodes(&config, args.count, node_registry.clone()).await;
221 } else {
222 debug!("Scaling up nodes to {}", nodes_to_add);
223 add_nodes(
224 &args.action_sender,
225 &config,
226 nodes_to_add,
227 &mut used_ports,
228 &mut current_port,
229 max_port,
230 node_registry.clone(),
231 )
232 .await;
233 }
234
235 debug!("Finished maintaining {} nodes", args.count);
236 send_action(
237 args.action_sender,
238 Action::StatusActions(StatusActions::StartNodesCompleted {
239 service_name: NODES_ALL.to_string(),
240 all_nodes_data: node_registry.get_node_service_data().await,
241 is_nat_status_determined: node_registry.nat_status.read().await.is_some(),
242 }),
243 );
244}
245
246async fn reset_nodes(
248 action_sender: UnboundedSender<Action>,
249 node_registry: NodeRegistryManager,
250 start_nodes_after_reset: bool,
251) {
252 if let Err(err) =
253 ant_node_manager::cmd::node::reset(true, node_registry.clone(), VerbosityLevel::Minimal)
254 .await
255 {
256 error!("Error while resetting services {err:?}");
257 send_action(
258 action_sender,
259 Action::StatusActions(StatusActions::ErrorResettingNodes {
260 raw_error: err.to_string(),
261 }),
262 );
263 } else {
264 info!("Successfully reset services");
265 send_action(
266 action_sender,
267 Action::StatusActions(StatusActions::ResetNodesCompleted {
268 trigger_start_node: start_nodes_after_reset,
269 all_nodes_data: node_registry.get_node_service_data().await,
270 is_nat_status_determined: node_registry.nat_status.read().await.is_some(),
271 }),
272 );
273 }
274}
275
276#[derive(Debug)]
277pub struct UpgradeNodesArgs {
278 pub action_sender: UnboundedSender<Action>,
279 pub connection_timeout_s: u64,
280 pub do_not_start: bool,
281 pub custom_bin_path: Option<PathBuf>,
282 pub force: bool,
283 pub fixed_interval: Option<u64>,
284 pub peer_ids: Vec<String>,
285 pub provided_env_variables: Option<Vec<(String, String)>>,
286 pub service_names: Vec<String>,
287 pub url: Option<String>,
288 pub version: Option<String>,
289}
290
291async fn upgrade_nodes(args: UpgradeNodesArgs, node_registry: NodeRegistryManager) {
292 if let Err(err) = ant_node_manager::cmd::node::stop(
294 None,
295 node_registry.clone(),
296 vec![],
297 args.service_names.clone(),
298 VerbosityLevel::Minimal,
299 )
300 .await
301 {
302 error!("Error while stopping services {err:?}");
303 send_action(
304 args.action_sender.clone(),
305 Action::StatusActions(StatusActions::ErrorUpdatingNodes {
306 raw_error: err.to_string(),
307 }),
308 );
309 }
310
311 if let Err(err) = ant_node_manager::cmd::node::upgrade(
312 0, args.do_not_start,
314 args.custom_bin_path,
315 args.force,
316 Some(FIXED_INTERVAL),
317 node_registry.clone(),
318 args.peer_ids,
319 args.provided_env_variables,
320 args.service_names,
321 args.url,
322 args.version,
323 VerbosityLevel::Minimal,
324 )
325 .await
326 {
327 error!("Error while updating services {err:?}");
328 send_action(
329 args.action_sender,
330 Action::StatusActions(StatusActions::ErrorUpdatingNodes {
331 raw_error: err.to_string(),
332 }),
333 );
334 } else {
335 info!("Successfully updated services");
336 send_action(
337 args.action_sender,
338 Action::StatusActions(StatusActions::UpdateNodesCompleted {
339 all_nodes_data: node_registry.get_node_service_data().await,
340 is_nat_status_determined: node_registry.nat_status.read().await.is_some(),
341 }),
342 );
343 }
344}
345
346async fn remove_nodes(
347 services: Vec<String>,
348 action_sender: UnboundedSender<Action>,
349 node_registry: NodeRegistryManager,
350) {
351 if let Err(err) = ant_node_manager::cmd::node::stop(
353 None,
354 node_registry.clone(),
355 vec![],
356 services.clone(),
357 VerbosityLevel::Minimal,
358 )
359 .await
360 {
361 error!("Error while stopping services {err:?}");
362 send_action(
363 action_sender.clone(),
364 Action::StatusActions(StatusActions::ErrorRemovingNodes {
365 services: services.clone(),
366 raw_error: err.to_string(),
367 }),
368 );
369 }
370
371 if let Err(err) = ant_node_manager::cmd::node::remove(
372 false,
373 vec![],
374 node_registry.clone(),
375 services.clone(),
376 VerbosityLevel::Minimal,
377 )
378 .await
379 {
380 error!("Error while removing services {err:?}");
381 send_action(
382 action_sender,
383 Action::StatusActions(StatusActions::ErrorRemovingNodes {
384 services,
385 raw_error: err.to_string(),
386 }),
387 );
388 } else {
389 info!("Successfully removed services {:?}", services);
390 for service in services {
391 send_action(
392 action_sender.clone(),
393 Action::StatusActions(StatusActions::RemoveNodesCompleted {
394 service_name: service,
395 all_nodes_data: node_registry.get_node_service_data().await,
396 is_nat_status_determined: node_registry.nat_status.read().await.is_some(),
397 }),
398 );
399 }
400 }
401}
402
403async fn add_node(args: MaintainNodesArgs, node_registry: NodeRegistryManager) {
404 debug!("Adding node");
405
406 if args.run_nat_detection {
407 run_nat_detection(&args.action_sender).await;
408 }
409
410 let config = prepare_node_config(&args);
411
412 let used_ports = get_used_ports(&node_registry).await;
413 let (mut current_port, max_port) = get_port_range(&config.custom_ports);
414
415 while used_ports.contains(¤t_port) && current_port <= max_port {
416 current_port += 1;
417 }
418
419 if current_port > max_port {
420 error!("Reached maximum port number. Unable to find an available port.");
421 send_action(
422 args.action_sender.clone(),
423 Action::StatusActions(StatusActions::ErrorAddingNodes {
424 raw_error: format!(
425 "When adding a new node we reached maximum port number ({max_port}).\nUnable to find an available port."
426 ),
427 }),
428 );
429 }
430
431 let port_range = Some(PortRange::Single(current_port));
432 match ant_node_manager::cmd::node::add(
433 false, false, config.auto_set_nat_flags,
436 Some(config.count),
437 config.data_dir_path,
438 true, None, None, None, None, None, None, None, None, None, port_range, node_registry.clone(),
450 config.init_peers_config.clone(),
451 config.relay, get_restart_policy(),
453 RewardsAddress::from_str(config.rewards_address.as_str()).unwrap(),
454 None, None, config.antnode_path.clone(), !config.upnp,
458 None, None, None, VerbosityLevel::Minimal,
462 false, )
464 .await
465 {
466 Err(err) => {
467 error!("Error while adding services {err:?}");
468 send_action(
469 args.action_sender,
470 Action::StatusActions(StatusActions::ErrorAddingNodes {
471 raw_error: err.to_string(),
472 }),
473 );
474 }
475 Ok(services) => {
476 info!("Successfully added services: {:?}", services);
477 for service in services {
478 send_action(
479 args.action_sender.clone(),
480 Action::StatusActions(StatusActions::AddNodesCompleted {
481 service_name: service,
482 all_nodes_data: node_registry.get_node_service_data().await,
483 is_nat_status_determined: node_registry.nat_status.read().await.is_some(),
484 }),
485 );
486 }
487 }
488 }
489}
490
491async fn start_nodes(
492 services: Vec<String>,
493 action_sender: UnboundedSender<Action>,
494 node_registry: NodeRegistryManager,
495) {
496 debug!("Starting node {:?}", services);
497 if let Err(err) = ant_node_manager::cmd::node::start(
498 CONNECTION_TIMEOUT_START,
499 None,
500 node_registry.clone(),
501 vec![],
502 services.clone(),
503 VerbosityLevel::Minimal,
504 )
505 .await
506 {
507 error!("Error while starting services {err:?}");
508 send_action(
509 action_sender,
510 Action::StatusActions(StatusActions::ErrorStartingNodes {
511 services,
512 raw_error: err.to_string(),
513 }),
514 );
515 } else {
516 info!("Successfully started services {:?}", services);
517 for service in services {
518 send_action(
519 action_sender.clone(),
520 Action::StatusActions(StatusActions::StartNodesCompleted {
521 service_name: service,
522 all_nodes_data: node_registry.get_node_service_data().await,
523 is_nat_status_determined: node_registry.nat_status.read().await.is_some(),
524 }),
525 );
526 }
527 }
528}
529
530#[cfg(unix)]
534fn get_restart_policy() -> RestartPolicy {
535 RestartPolicy::OnSuccess { delay_secs: None }
536}
537
538#[cfg(windows)]
539fn get_restart_policy() -> RestartPolicy {
540 RestartPolicy::OnFailure {
543 delay_secs: None,
544 max_retries: None,
545 reset_after_secs: None,
546 }
547}
548
549fn send_action(action_sender: UnboundedSender<Action>, action: Action) {
550 if let Err(err) = action_sender.send(action) {
551 error!("Error while sending action: {err:?}");
552 }
553}
554
555struct NodeConfig {
556 antnode_path: Option<PathBuf>,
557 auto_set_nat_flags: bool,
558 count: u16,
559 custom_ports: Option<PortRange>,
560 data_dir_path: Option<PathBuf>,
561 relay: bool,
562 network_id: Option<u8>,
563 owner: Option<String>,
564 init_peers_config: InitialPeersConfig,
565 rewards_address: String,
566 upnp: bool,
567}
568
569async fn run_nat_detection(action_sender: &UnboundedSender<Action>) {
571 info!("Running nat detection....");
572
573 if let Err(err) = action_sender.send(Action::StatusActions(StatusActions::NatDetectionStarted))
575 {
576 error!("Error while sending action: {err:?}");
577 }
578
579 let release_repo = <dyn AntReleaseRepoActions>::default_config();
580 let version = match release_repo
581 .get_latest_version(&ReleaseType::NatDetection)
582 .await
583 {
584 Ok(v) => {
585 info!("Using NAT detection version {}", v.to_string());
586 v.to_string()
587 }
588 Err(err) => {
589 info!("No NAT detection release found, using fallback version 0.1.0");
590 info!("Error: {err}");
591 "0.1.0".to_string()
592 }
593 };
594
595 if let Err(err) = ant_node_manager::cmd::nat_detection::run_nat_detection(
596 None,
597 true,
598 None,
599 None,
600 Some(version),
601 VerbosityLevel::Minimal,
602 )
603 .await
604 {
605 error!("Error while running nat detection {err:?}. Registering the error.");
606 if let Err(err) = action_sender.send(Action::StatusActions(
607 StatusActions::ErrorWhileRunningNatDetection,
608 )) {
609 error!("Error while sending action: {err:?}");
610 }
611 } else {
612 info!("Successfully ran nat detection.");
613 if let Err(err) = action_sender.send(Action::StatusActions(
614 StatusActions::SuccessfullyDetectedNatStatus,
615 )) {
616 error!("Error while sending action: {err:?}");
617 }
618 }
619}
620
621fn prepare_node_config(args: &MaintainNodesArgs) -> NodeConfig {
622 NodeConfig {
623 antnode_path: args.antnode_path.clone(),
624 auto_set_nat_flags: args.connection_mode == ConnectionMode::Automatic,
625 data_dir_path: args.data_dir_path.clone(),
626 count: args.count,
627 custom_ports: if args.connection_mode == ConnectionMode::CustomPorts {
628 args.port_range.clone()
629 } else {
630 None
631 },
632 owner: if args.owner.is_empty() {
633 None
634 } else {
635 Some(args.owner.clone())
636 },
637 relay: args.connection_mode == ConnectionMode::HomeNetwork,
638 network_id: args.network_id,
639 init_peers_config: args.init_peers_config.clone(),
640 rewards_address: args.rewards_address.clone(),
641 upnp: args.connection_mode == ConnectionMode::UPnP,
642 }
643}
644
645fn debug_log_config(config: &NodeConfig, args: &MaintainNodesArgs) {
647 debug!("************ STARTING NODE MAINTENANCE ************");
648 debug!(
649 "Maintaining {} running nodes with the following args:",
650 config.count
651 );
652 debug!(
653 " owner: {:?}, init_peers_config: {:?}, antnode_path: {:?}, network_id: {:?}",
654 config.owner, config.init_peers_config, config.antnode_path, args.network_id
655 );
656 debug!(
657 " data_dir_path: {:?}, connection_mode: {:?}",
658 config.data_dir_path, args.connection_mode
659 );
660 debug!(
661 " auto_set_nat_flags: {:?}, custom_ports: {:?}, upnp: {}, relay: {}",
662 config.auto_set_nat_flags, config.custom_ports, config.upnp, config.relay
663 );
664}
665
666async fn get_used_ports(node_registry: &NodeRegistryManager) -> Vec<u16> {
668 let mut used_ports = Vec::new();
669 for node in node_registry.nodes.read().await.iter() {
670 let node = node.read().await;
671 if let Some(port) = node.node_port {
672 used_ports.push(port);
673 }
674 }
675 debug!("Currently used ports: {:?}", used_ports);
676 used_ports
677}
678
679fn get_port_range(custom_ports: &Option<PortRange>) -> (u16, u16) {
681 match custom_ports {
682 Some(PortRange::Single(port)) => (*port, *port),
683 Some(PortRange::Range(start, end)) => (*start, *end),
684 None => (PORT_MIN as u16, PORT_MAX as u16),
685 }
686}
687
688async fn scale_down_nodes(config: &NodeConfig, count: u16, node_registry: NodeRegistryManager) {
690 match ant_node_manager::cmd::node::maintain_n_running_nodes(
691 false,
692 false,
693 config.auto_set_nat_flags,
694 CONNECTION_TIMEOUT_START,
695 count,
696 config.data_dir_path.clone(),
697 true,
698 None,
699 Some(EvmNetwork::default()),
700 None,
701 None,
702 None,
703 None,
704 None,
705 config.network_id,
706 None,
707 None, node_registry,
709 config.init_peers_config.clone(),
710 config.relay,
711 RewardsAddress::from_str(config.rewards_address.as_str()).unwrap(),
712 get_restart_policy(),
713 None,
714 None,
715 config.antnode_path.clone(),
716 None,
717 !config.upnp,
718 None,
719 None,
720 VerbosityLevel::Minimal,
721 None,
722 false,
723 )
724 .await
725 {
726 Ok(_) => {
727 info!("Scaling down to {} nodes", count);
728 }
729 Err(err) => {
730 error!("Error while scaling down to {} nodes: {err:?}", count);
731 }
732 }
733}
734
735async fn add_nodes(
737 action_sender: &UnboundedSender<Action>,
738 config: &NodeConfig,
739 mut nodes_to_add: i32,
740 used_ports: &mut Vec<u16>,
741 current_port: &mut u16,
742 max_port: u16,
743 node_registry: NodeRegistryManager,
744) {
745 let mut retry_count = 0;
746
747 while nodes_to_add > 0 && retry_count < NODE_ADD_MAX_RETRIES {
748 while used_ports.contains(current_port) && *current_port <= max_port {
750 *current_port += 1;
751 }
752
753 if *current_port > max_port {
754 error!("Reached maximum port number. Unable to find an available port.");
755 send_action(
756 action_sender.clone(),
757 Action::StatusActions(StatusActions::ErrorScalingUpNodes {
758 raw_error: format!(
759 "Reached maximum port number ({max_port}).\nUnable to find an available port."
760 ),
761 }),
762 );
763 break;
764 }
765
766 let port_range = Some(PortRange::Single(*current_port));
767 match ant_node_manager::cmd::node::maintain_n_running_nodes(
768 false,
769 false,
770 config.auto_set_nat_flags,
771 CONNECTION_TIMEOUT_START,
772 config.count,
773 config.data_dir_path.clone(),
774 true,
775 None,
776 Some(EvmNetwork::default()),
777 None,
778 None,
779 None,
780 None,
781 None,
782 config.network_id,
783 None,
784 port_range,
785 node_registry.clone(),
786 config.init_peers_config.clone(),
787 config.relay,
788 RewardsAddress::from_str(config.rewards_address.as_str()).unwrap(),
789 get_restart_policy(),
790 None,
791 None,
792 config.antnode_path.clone(),
793 None,
794 !config.upnp,
795 None,
796 None,
797 VerbosityLevel::Minimal,
798 None,
799 false,
800 )
801 .await
802 {
803 Ok(_) => {
804 info!("Successfully added a node on port {}", current_port);
805 used_ports.push(*current_port);
806 nodes_to_add -= 1;
807 *current_port += 1;
808 retry_count = 0; }
810 Err(err) => {
811 if err.to_string().contains("is being used by another service") {
813 warn!(
814 "Port {} is being used, retrying with a different port. Attempt {}/{}",
815 current_port,
816 retry_count + 1,
817 NODE_ADD_MAX_RETRIES
818 );
819 } else if err
820 .to_string()
821 .contains("Failed to add one or more services")
822 && retry_count >= NODE_ADD_MAX_RETRIES
823 {
824 send_action(
825 action_sender.clone(),
826 Action::StatusActions(StatusActions::ErrorScalingUpNodes {
827 raw_error: "When trying to add a node, we failed.\n\
828 Maybe you ran out of disk space?\n\
829 Maybe you need to change the port range?"
830 .to_string(),
831 }),
832 );
833 } else if err
834 .to_string()
835 .contains("contains a virus or potentially unwanted software")
836 && retry_count >= NODE_ADD_MAX_RETRIES
837 {
838 send_action(
839 action_sender.clone(),
840 Action::StatusActions(StatusActions::ErrorScalingUpNodes {
841 raw_error: "When trying to add a node, we failed.\n\
842 You may be running an old version of antnode service?\n\
843 Did you whitelisted antnode and the launchpad?"
844 .to_string(),
845 }),
846 );
847 } else {
848 error!("Range of ports to be used {:?}", *current_port..max_port);
849 error!("Error while adding node on port {}: {err:?}", current_port);
850 }
851 *current_port += 1;
853 retry_count += 1;
854 }
855 }
856 }
857 if retry_count >= NODE_ADD_MAX_RETRIES {
858 send_action(
859 action_sender.clone(),
860 Action::StatusActions(StatusActions::ErrorScalingUpNodes {
861 raw_error: format!(
862 "When trying to start a node, we reached the maximum amount of retries ({NODE_ADD_MAX_RETRIES}).\n\
863 Could this be a firewall blocking nodes starting or ports on your router already in use?"
864 ),
865 }),
866 );
867 }
868}