sn_testnet_deploy/
lib.rs

1// Copyright (c) 2023, MaidSafe.
2// All rights reserved.
3//
4// This SAFE Network Software is licensed under the BSD-3-Clause license.
5// Please see the LICENSE file for more details.
6
7pub mod ansible;
8pub mod bootstrap;
9pub mod clients;
10pub mod deploy;
11pub mod digital_ocean;
12pub mod error;
13pub mod funding;
14pub mod infra;
15pub mod inventory;
16pub mod logs;
17pub mod reserved_ip;
18pub mod rpc_client;
19pub mod s3;
20pub mod safe;
21pub mod setup;
22pub mod ssh;
23pub mod terraform;
24pub mod upscale;
25
26const STORAGE_REQUIRED_PER_NODE: u16 = 7;
27
28use crate::{
29    ansible::{
30        extra_vars::ExtraVarsDocBuilder,
31        inventory::{cleanup_environment_inventory, AnsibleInventoryType},
32        provisioning::AnsibleProvisioner,
33        AnsibleRunner,
34    },
35    error::{Error, Result},
36    inventory::{DeploymentInventory, VirtualMachine},
37    rpc_client::RpcClient,
38    s3::S3Repository,
39    ssh::SshClient,
40    terraform::TerraformRunner,
41};
42use ant_service_management::ServiceStatus;
43use flate2::read::GzDecoder;
44use indicatif::{ProgressBar, ProgressStyle};
45use infra::{build_terraform_args, InfraRunOptions};
46use log::{debug, trace};
47use semver::Version;
48use serde::{Deserialize, Serialize};
49use serde_json::json;
50use std::{
51    fs::File,
52    io::{BufRead, BufReader, BufWriter, Write},
53    net::IpAddr,
54    path::{Path, PathBuf},
55    process::{Command, Stdio},
56    str::FromStr,
57    time::Duration,
58};
59use tar::Archive;
60
61const ANSIBLE_DEFAULT_FORKS: usize = 50;
62
63#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
64pub enum DeploymentType {
65    /// The deployment has been bootstrapped from an existing network.
66    Bootstrap,
67    /// Client deployment.
68    Client,
69    /// The deployment is a new network.
70    #[default]
71    New,
72}
73
74#[derive(Debug, Clone, Default, Serialize, Deserialize)]
75pub struct AnvilNodeData {
76    pub data_payments_address: String,
77    pub deployer_wallet_private_key: String,
78    pub payment_token_address: String,
79    pub rpc_url: String,
80}
81
82impl std::fmt::Display for DeploymentType {
83    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
84        match self {
85            DeploymentType::Bootstrap => write!(f, "bootstrap"),
86            DeploymentType::Client => write!(f, "clients"),
87            DeploymentType::New => write!(f, "new"),
88        }
89    }
90}
91
92impl std::str::FromStr for DeploymentType {
93    type Err = String;
94
95    fn from_str(s: &str) -> Result<Self, Self::Err> {
96        match s.to_lowercase().as_str() {
97            "bootstrap" => Ok(DeploymentType::Bootstrap),
98            "clients" => Ok(DeploymentType::Client),
99            "new" => Ok(DeploymentType::New),
100            _ => Err(format!("Invalid deployment type: {}", s)),
101        }
102    }
103}
104
105#[derive(Debug, Clone)]
106pub enum NodeType {
107    FullConePrivateNode,
108    Generic,
109    Genesis,
110    PeerCache,
111    SymmetricPrivateNode,
112}
113
114impl std::fmt::Display for NodeType {
115    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
116        match self {
117            NodeType::FullConePrivateNode => write!(f, "full-cone-private"),
118            NodeType::Generic => write!(f, "generic"),
119            NodeType::Genesis => write!(f, "genesis"),
120            NodeType::PeerCache => write!(f, "peer-cache"),
121            NodeType::SymmetricPrivateNode => write!(f, "symmetric-private"),
122        }
123    }
124}
125
126impl std::str::FromStr for NodeType {
127    type Err = String;
128
129    fn from_str(s: &str) -> Result<Self, Self::Err> {
130        match s.to_lowercase().as_str() {
131            "full-cone-private" => Ok(NodeType::FullConePrivateNode),
132            "generic" => Ok(NodeType::Generic),
133            "genesis" => Ok(NodeType::Genesis),
134            "peer-cache" => Ok(NodeType::PeerCache),
135            "symmetric-private" => Ok(NodeType::SymmetricPrivateNode),
136            _ => Err(format!("Invalid node type: {}", s)),
137        }
138    }
139}
140
141impl NodeType {
142    pub fn telegraf_role(&self) -> &'static str {
143        match self {
144            NodeType::FullConePrivateNode => "NAT_FULL_CONE_NODE",
145            NodeType::Generic => "GENERIC_NODE",
146            NodeType::Genesis => "GENESIS_NODE",
147            NodeType::PeerCache => "PEER_CACHE_NODE",
148            NodeType::SymmetricPrivateNode => "NAT_RANDOMIZED_NODE",
149        }
150    }
151
152    pub fn to_ansible_inventory_type(&self) -> AnsibleInventoryType {
153        match self {
154            NodeType::FullConePrivateNode => AnsibleInventoryType::FullConePrivateNodes,
155            NodeType::Generic => AnsibleInventoryType::Nodes,
156            NodeType::Genesis => AnsibleInventoryType::Genesis,
157            NodeType::PeerCache => AnsibleInventoryType::PeerCacheNodes,
158            NodeType::SymmetricPrivateNode => AnsibleInventoryType::SymmetricPrivateNodes,
159        }
160    }
161}
162
163#[derive(Clone, Debug, Default, Eq, Serialize, Deserialize, PartialEq)]
164pub enum EvmNetwork {
165    #[default]
166    Anvil,
167    ArbitrumOne,
168    ArbitrumSepoliaTest,
169    Custom,
170}
171
172impl std::fmt::Display for EvmNetwork {
173    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
174        match self {
175            EvmNetwork::Anvil => write!(f, "evm-custom"),
176            EvmNetwork::ArbitrumOne => write!(f, "evm-arbitrum-one"),
177            EvmNetwork::ArbitrumSepoliaTest => write!(f, "evm-arbitrum-sepolia-test"),
178            EvmNetwork::Custom => write!(f, "evm-custom"),
179        }
180    }
181}
182
183impl std::str::FromStr for EvmNetwork {
184    type Err = String;
185
186    fn from_str(s: &str) -> Result<Self, Self::Err> {
187        match s.to_lowercase().as_str() {
188            "anvil" => Ok(EvmNetwork::Anvil),
189            "arbitrum-one" => Ok(EvmNetwork::ArbitrumOne),
190            "arbitrum-sepolia-test" => Ok(EvmNetwork::ArbitrumSepoliaTest),
191            "custom" => Ok(EvmNetwork::Custom),
192            _ => Err(format!("Invalid EVM network type: {}", s)),
193        }
194    }
195}
196
197#[derive(Clone, Debug, Default, Serialize, Deserialize)]
198pub struct EvmDetails {
199    pub network: EvmNetwork,
200    pub data_payments_address: Option<String>,
201    pub payment_token_address: Option<String>,
202    pub rpc_url: Option<String>,
203}
204
205#[derive(Clone, Debug, Default, Serialize, Deserialize)]
206pub struct EnvironmentDetails {
207    pub deployment_type: DeploymentType,
208    pub environment_type: EnvironmentType,
209    pub evm_details: EvmDetails,
210    pub funding_wallet_address: Option<String>,
211    pub network_id: Option<u8>,
212    pub region: String,
213    pub rewards_address: Option<String>,
214}
215
216#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
217pub enum EnvironmentType {
218    #[default]
219    Development,
220    Production,
221    Staging,
222}
223
224impl EnvironmentType {
225    pub fn get_tfvars_filenames(&self, name: &str, region: &str) -> Vec<String> {
226        match self {
227            EnvironmentType::Development => vec![
228                "dev.tfvars".to_string(),
229                format!("dev-images-{region}.tfvars", region = region),
230            ],
231            EnvironmentType::Staging => vec![
232                "staging.tfvars".to_string(),
233                format!("staging-images-{region}.tfvars", region = region),
234            ],
235            EnvironmentType::Production => {
236                vec![
237                    format!("{name}.tfvars", name = name),
238                    format!("production-images-{region}.tfvars", region = region),
239                ]
240            }
241        }
242    }
243
244    pub fn get_default_peer_cache_node_count(&self) -> u16 {
245        match self {
246            EnvironmentType::Development => 5,
247            EnvironmentType::Production => 5,
248            EnvironmentType::Staging => 5,
249        }
250    }
251
252    pub fn get_default_node_count(&self) -> u16 {
253        match self {
254            EnvironmentType::Development => 25,
255            EnvironmentType::Production => 25,
256            EnvironmentType::Staging => 25,
257        }
258    }
259
260    pub fn get_default_symmetric_private_node_count(&self) -> u16 {
261        self.get_default_node_count()
262    }
263
264    pub fn get_default_full_cone_private_node_count(&self) -> u16 {
265        self.get_default_node_count()
266    }
267}
268
269impl std::fmt::Display for EnvironmentType {
270    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
271        match self {
272            EnvironmentType::Development => write!(f, "development"),
273            EnvironmentType::Production => write!(f, "production"),
274            EnvironmentType::Staging => write!(f, "staging"),
275        }
276    }
277}
278
279impl FromStr for EnvironmentType {
280    type Err = Error;
281
282    fn from_str(s: &str) -> Result<Self, Self::Err> {
283        match s.to_lowercase().as_str() {
284            "development" => Ok(EnvironmentType::Development),
285            "production" => Ok(EnvironmentType::Production),
286            "staging" => Ok(EnvironmentType::Staging),
287            _ => Err(Error::EnvironmentNameFromStringError(s.to_string())),
288        }
289    }
290}
291
292/// Specify the binary option for the deployment.
293///
294/// There are several binaries involved in the deployment:
295/// * safenode
296/// * safenode_rpc_client
297/// * faucet
298/// * safe
299///
300/// The `safe` binary is only used for smoke testing the deployment, although we don't really do
301/// that at the moment.
302///
303/// The options are to build from source, or supply a pre-built, versioned binary, which will be
304/// fetched from S3. Building from source adds significant time to the deployment.
305#[derive(Clone, Debug, Serialize, Deserialize)]
306pub enum BinaryOption {
307    /// Binaries will be built from source.
308    BuildFromSource {
309        /// A comma-separated list that will be passed to the `--features` argument.
310        antnode_features: Option<String>,
311        branch: String,
312        repo_owner: String,
313    },
314    /// Pre-built, versioned binaries will be fetched from S3.
315    Versioned {
316        ant_version: Option<Version>,
317        antctl_version: Option<Version>,
318        antnode_version: Option<Version>,
319    },
320}
321
322impl BinaryOption {
323    pub fn print(&self) {
324        match self {
325            BinaryOption::BuildFromSource {
326                antnode_features,
327                branch,
328                repo_owner,
329            } => {
330                println!("Source configuration:");
331                println!("  Repository owner: {}", repo_owner);
332                println!("  Branch: {}", branch);
333                if let Some(features) = antnode_features {
334                    println!("  Antnode features: {}", features);
335                }
336            }
337            BinaryOption::Versioned {
338                ant_version,
339                antctl_version,
340                antnode_version,
341            } => {
342                println!("Versioned binaries configuration:");
343                if let Some(version) = ant_version {
344                    println!("  ant version: {}", version);
345                }
346                if let Some(version) = antctl_version {
347                    println!("  antctl version: {}", version);
348                }
349                if let Some(version) = antnode_version {
350                    println!("  antnode version: {}", version);
351                }
352            }
353        }
354    }
355}
356
357#[derive(Debug, Clone, Copy)]
358pub enum CloudProvider {
359    Aws,
360    DigitalOcean,
361}
362
363impl std::fmt::Display for CloudProvider {
364    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
365        match self {
366            CloudProvider::Aws => write!(f, "aws"),
367            CloudProvider::DigitalOcean => write!(f, "digital-ocean"),
368        }
369    }
370}
371
372impl CloudProvider {
373    pub fn get_ssh_user(&self) -> String {
374        match self {
375            CloudProvider::Aws => "ubuntu".to_string(),
376            CloudProvider::DigitalOcean => "root".to_string(),
377        }
378    }
379}
380
381#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
382pub enum LogFormat {
383    Default,
384    Json,
385}
386
387impl LogFormat {
388    pub fn parse_from_str(val: &str) -> Result<Self> {
389        match val {
390            "default" => Ok(LogFormat::Default),
391            "json" => Ok(LogFormat::Json),
392            _ => Err(Error::LoggingConfiguration(
393                "The only valid values for this argument are \"default\" or \"json\"".to_string(),
394            )),
395        }
396    }
397
398    pub fn as_str(&self) -> &'static str {
399        match self {
400            LogFormat::Default => "default",
401            LogFormat::Json => "json",
402        }
403    }
404}
405
406#[derive(Clone)]
407pub struct UpgradeOptions {
408    pub ansible_verbose: bool,
409    pub custom_inventory: Option<Vec<VirtualMachine>>,
410    pub env_variables: Option<Vec<(String, String)>>,
411    pub force: bool,
412    pub forks: usize,
413    pub interval: Duration,
414    pub name: String,
415    pub node_type: Option<NodeType>,
416    pub pre_upgrade_delay: Option<u64>,
417    pub provider: CloudProvider,
418    pub version: Option<String>,
419}
420
421impl UpgradeOptions {
422    pub fn get_ansible_vars(&self) -> String {
423        let mut extra_vars = ExtraVarsDocBuilder::default();
424        extra_vars.add_variable("interval", &self.interval.as_millis().to_string());
425        if let Some(env_variables) = &self.env_variables {
426            extra_vars.add_env_variable_list("env_variables", env_variables.clone());
427        }
428        if self.force {
429            extra_vars.add_variable("force", &self.force.to_string());
430        }
431        if let Some(version) = &self.version {
432            extra_vars.add_variable("antnode_version", version);
433        }
434        if let Some(pre_upgrade_delay) = &self.pre_upgrade_delay {
435            extra_vars.add_variable("pre_upgrade_delay", &pre_upgrade_delay.to_string());
436        }
437        extra_vars.build()
438    }
439}
440
441#[derive(Default)]
442pub struct TestnetDeployBuilder {
443    ansible_forks: Option<usize>,
444    ansible_verbose_mode: bool,
445    deployment_type: EnvironmentType,
446    environment_name: String,
447    provider: Option<CloudProvider>,
448    region: Option<String>,
449    ssh_secret_key_path: Option<PathBuf>,
450    state_bucket_name: Option<String>,
451    terraform_binary_path: Option<PathBuf>,
452    vault_password_path: Option<PathBuf>,
453    working_directory_path: Option<PathBuf>,
454}
455
456impl TestnetDeployBuilder {
457    pub fn new() -> Self {
458        Default::default()
459    }
460
461    pub fn ansible_verbose_mode(&mut self, ansible_verbose_mode: bool) -> &mut Self {
462        self.ansible_verbose_mode = ansible_verbose_mode;
463        self
464    }
465
466    pub fn ansible_forks(&mut self, ansible_forks: usize) -> &mut Self {
467        self.ansible_forks = Some(ansible_forks);
468        self
469    }
470
471    pub fn deployment_type(&mut self, deployment_type: EnvironmentType) -> &mut Self {
472        self.deployment_type = deployment_type;
473        self
474    }
475
476    pub fn environment_name(&mut self, name: &str) -> &mut Self {
477        self.environment_name = name.to_string();
478        self
479    }
480
481    pub fn provider(&mut self, provider: CloudProvider) -> &mut Self {
482        self.provider = Some(provider);
483        self
484    }
485
486    pub fn state_bucket_name(&mut self, state_bucket_name: String) -> &mut Self {
487        self.state_bucket_name = Some(state_bucket_name);
488        self
489    }
490
491    pub fn terraform_binary_path(&mut self, terraform_binary_path: PathBuf) -> &mut Self {
492        self.terraform_binary_path = Some(terraform_binary_path);
493        self
494    }
495
496    pub fn working_directory(&mut self, working_directory_path: PathBuf) -> &mut Self {
497        self.working_directory_path = Some(working_directory_path);
498        self
499    }
500
501    pub fn ssh_secret_key_path(&mut self, ssh_secret_key_path: PathBuf) -> &mut Self {
502        self.ssh_secret_key_path = Some(ssh_secret_key_path);
503        self
504    }
505
506    pub fn vault_password_path(&mut self, vault_password_path: PathBuf) -> &mut Self {
507        self.vault_password_path = Some(vault_password_path);
508        self
509    }
510
511    pub fn region(&mut self, region: String) -> &mut Self {
512        self.region = Some(region);
513        self
514    }
515
516    pub fn build(&self) -> Result<TestnetDeployer> {
517        let provider = self.provider.unwrap_or(CloudProvider::DigitalOcean);
518        match provider {
519            CloudProvider::DigitalOcean => {
520                let digital_ocean_pat = std::env::var("DO_PAT").map_err(|_| {
521                    Error::CloudProviderCredentialsNotSupplied("DO_PAT".to_string())
522                })?;
523                // The DO_PAT variable is not actually read by either Terraform or Ansible.
524                // Each tool uses a different variable, so instead we set each of those variables
525                // to the value of DO_PAT. This means the user only needs to set one variable.
526                std::env::set_var("DIGITALOCEAN_TOKEN", digital_ocean_pat.clone());
527                std::env::set_var("DO_API_TOKEN", digital_ocean_pat);
528            }
529            _ => {
530                return Err(Error::CloudProviderNotSupported(provider.to_string()));
531            }
532        }
533
534        let state_bucket_name = match self.state_bucket_name {
535            Some(ref bucket_name) => bucket_name.clone(),
536            None => std::env::var("TERRAFORM_STATE_BUCKET_NAME")?,
537        };
538
539        let default_terraform_bin_path = PathBuf::from("terraform");
540        let terraform_binary_path = self
541            .terraform_binary_path
542            .as_ref()
543            .unwrap_or(&default_terraform_bin_path);
544
545        let working_directory_path = match self.working_directory_path {
546            Some(ref work_dir_path) => work_dir_path.clone(),
547            None => std::env::current_dir()?.join("resources"),
548        };
549
550        let ssh_secret_key_path = match self.ssh_secret_key_path {
551            Some(ref ssh_sk_path) => ssh_sk_path.clone(),
552            None => PathBuf::from(std::env::var("SSH_KEY_PATH")?),
553        };
554
555        let vault_password_path = match self.vault_password_path {
556            Some(ref vault_pw_path) => vault_pw_path.clone(),
557            None => PathBuf::from(std::env::var("ANSIBLE_VAULT_PASSWORD_PATH")?),
558        };
559
560        let region = match self.region {
561            Some(ref region) => region.clone(),
562            None => "lon1".to_string(),
563        };
564
565        let terraform_runner = TerraformRunner::new(
566            terraform_binary_path.to_path_buf(),
567            working_directory_path
568                .join("terraform")
569                .join("testnet")
570                .join(provider.to_string()),
571            provider,
572            &state_bucket_name,
573        )?;
574        let ansible_runner = AnsibleRunner::new(
575            self.ansible_forks.unwrap_or(ANSIBLE_DEFAULT_FORKS),
576            self.ansible_verbose_mode,
577            &self.environment_name,
578            provider,
579            ssh_secret_key_path.clone(),
580            vault_password_path,
581            working_directory_path.join("ansible"),
582        )?;
583        let ssh_client = SshClient::new(ssh_secret_key_path);
584        let ansible_provisioner =
585            AnsibleProvisioner::new(ansible_runner, provider, ssh_client.clone());
586        let rpc_client = RpcClient::new(
587            PathBuf::from("/usr/local/bin/safenode_rpc_client"),
588            working_directory_path.clone(),
589        );
590
591        // Remove any `safe` binary from a previous deployment. Otherwise you can end up with
592        // mismatched binaries.
593        let safe_path = working_directory_path.join("safe");
594        if safe_path.exists() {
595            std::fs::remove_file(safe_path)?;
596        }
597
598        let testnet = TestnetDeployer::new(
599            ansible_provisioner,
600            provider,
601            self.deployment_type.clone(),
602            &self.environment_name,
603            rpc_client,
604            S3Repository {},
605            ssh_client,
606            terraform_runner,
607            working_directory_path,
608            region,
609        )?;
610
611        Ok(testnet)
612    }
613}
614
615#[derive(Clone)]
616pub struct TestnetDeployer {
617    pub ansible_provisioner: AnsibleProvisioner,
618    pub cloud_provider: CloudProvider,
619    pub deployment_type: EnvironmentType,
620    pub environment_name: String,
621    pub inventory_file_path: PathBuf,
622    pub region: String,
623    pub rpc_client: RpcClient,
624    pub s3_repository: S3Repository,
625    pub ssh_client: SshClient,
626    pub terraform_runner: TerraformRunner,
627    pub working_directory_path: PathBuf,
628}
629
630impl TestnetDeployer {
631    #[allow(clippy::too_many_arguments)]
632    pub fn new(
633        ansible_provisioner: AnsibleProvisioner,
634        cloud_provider: CloudProvider,
635        deployment_type: EnvironmentType,
636        environment_name: &str,
637        rpc_client: RpcClient,
638        s3_repository: S3Repository,
639        ssh_client: SshClient,
640        terraform_runner: TerraformRunner,
641        working_directory_path: PathBuf,
642        region: String,
643    ) -> Result<TestnetDeployer> {
644        if environment_name.is_empty() {
645            return Err(Error::EnvironmentNameRequired);
646        }
647        let inventory_file_path = working_directory_path
648            .join("ansible")
649            .join("inventory")
650            .join("dev_inventory_digital_ocean.yml");
651        Ok(TestnetDeployer {
652            ansible_provisioner,
653            cloud_provider,
654            deployment_type,
655            environment_name: environment_name.to_string(),
656            inventory_file_path,
657            region,
658            rpc_client,
659            ssh_client,
660            s3_repository,
661            terraform_runner,
662            working_directory_path,
663        })
664    }
665
666    pub async fn init(&self) -> Result<()> {
667        if self
668            .s3_repository
669            .folder_exists(
670                "sn-testnet",
671                &format!("testnet-logs/{}", self.environment_name),
672            )
673            .await?
674        {
675            return Err(Error::LogsForPreviousTestnetExist(
676                self.environment_name.clone(),
677            ));
678        }
679
680        self.terraform_runner.init()?;
681        let workspaces = self.terraform_runner.workspace_list()?;
682        if !workspaces.contains(&self.environment_name) {
683            self.terraform_runner
684                .workspace_new(&self.environment_name)?;
685        } else {
686            println!("Workspace {} already exists", self.environment_name);
687        }
688
689        let rpc_client_path = self.working_directory_path.join("safenode_rpc_client");
690        if !rpc_client_path.is_file() {
691            println!("Downloading the rpc client for safenode...");
692            let archive_name = "safenode_rpc_client-latest-x86_64-unknown-linux-musl.tar.gz";
693            get_and_extract_archive_from_s3(
694                &self.s3_repository,
695                "sn-node-rpc-client",
696                archive_name,
697                &self.working_directory_path,
698            )
699            .await?;
700            #[cfg(unix)]
701            {
702                use std::os::unix::fs::PermissionsExt;
703                let mut permissions = std::fs::metadata(&rpc_client_path)?.permissions();
704                permissions.set_mode(0o755); // rwxr-xr-x
705                std::fs::set_permissions(&rpc_client_path, permissions)?;
706            }
707        }
708
709        Ok(())
710    }
711
712    pub fn plan(&self, options: &InfraRunOptions) -> Result<()> {
713        println!("Selecting {} workspace...", options.name);
714        self.terraform_runner.workspace_select(&options.name)?;
715
716        let args = build_terraform_args(options)?;
717
718        self.terraform_runner
719            .plan(Some(args), options.tfvars_filenames.clone())?;
720        Ok(())
721    }
722
723    pub fn start(
724        &self,
725        interval: Duration,
726        node_type: Option<NodeType>,
727        custom_inventory: Option<Vec<VirtualMachine>>,
728    ) -> Result<()> {
729        self.ansible_provisioner.start_nodes(
730            &self.environment_name,
731            interval,
732            node_type,
733            custom_inventory,
734        )?;
735        Ok(())
736    }
737
738    /// Get the status of all nodes in a network.
739    ///
740    /// First, a playbook runs `safenode-manager status` against all the machines, to get the
741    /// current state of all the nodes. Then all the node registry files are retrieved and
742    /// deserialized to a `NodeRegistry`, allowing us to output the status of each node on each VM.
743    pub fn status(&self) -> Result<()> {
744        self.ansible_provisioner.status()?;
745
746        let peer_cache_node_registries = self
747            .ansible_provisioner
748            .get_node_registries(&AnsibleInventoryType::PeerCacheNodes)?;
749        let generic_node_registries = self
750            .ansible_provisioner
751            .get_node_registries(&AnsibleInventoryType::Nodes)?;
752        let symmetric_private_node_registries = self
753            .ansible_provisioner
754            .get_node_registries(&AnsibleInventoryType::SymmetricPrivateNodes)?;
755        let full_cone_private_node_registries = self
756            .ansible_provisioner
757            .get_node_registries(&AnsibleInventoryType::FullConePrivateNodes)?;
758        let genesis_node_registry = self
759            .ansible_provisioner
760            .get_node_registries(&AnsibleInventoryType::Genesis)?
761            .clone();
762
763        peer_cache_node_registries.print();
764        generic_node_registries.print();
765        symmetric_private_node_registries.print();
766        full_cone_private_node_registries.print();
767        genesis_node_registry.print();
768
769        let all_registries = [
770            &peer_cache_node_registries,
771            &generic_node_registries,
772            &symmetric_private_node_registries,
773            &full_cone_private_node_registries,
774            &genesis_node_registry,
775        ];
776
777        let mut total_nodes = 0;
778        let mut running_nodes = 0;
779        let mut stopped_nodes = 0;
780        let mut added_nodes = 0;
781        let mut removed_nodes = 0;
782
783        for (_, registry) in all_registries
784            .iter()
785            .flat_map(|r| r.retrieved_registries.iter())
786        {
787            for node in registry.nodes.iter() {
788                total_nodes += 1;
789                match node.status {
790                    ServiceStatus::Running => running_nodes += 1,
791                    ServiceStatus::Stopped => stopped_nodes += 1,
792                    ServiceStatus::Added => added_nodes += 1,
793                    ServiceStatus::Removed => removed_nodes += 1,
794                }
795            }
796        }
797
798        let peer_cache_hosts = peer_cache_node_registries.retrieved_registries.len();
799        let generic_hosts = generic_node_registries.retrieved_registries.len();
800        let symmetric_private_hosts = symmetric_private_node_registries.retrieved_registries.len();
801        let full_cone_private_hosts = full_cone_private_node_registries.retrieved_registries.len();
802
803        let peer_cache_nodes = peer_cache_node_registries
804            .retrieved_registries
805            .iter()
806            .flat_map(|(_, n)| n.nodes.iter())
807            .count();
808        let generic_nodes = generic_node_registries
809            .retrieved_registries
810            .iter()
811            .flat_map(|(_, n)| n.nodes.iter())
812            .count();
813        let symmetric_private_nodes = symmetric_private_node_registries
814            .retrieved_registries
815            .iter()
816            .flat_map(|(_, n)| n.nodes.iter())
817            .count();
818        let full_cone_private_nodes = full_cone_private_node_registries
819            .retrieved_registries
820            .iter()
821            .flat_map(|(_, n)| n.nodes.iter())
822            .count();
823
824        println!("-------");
825        println!("Summary");
826        println!("-------");
827        println!(
828            "Total peer cache nodes ({}x{}): {}",
829            peer_cache_hosts,
830            if peer_cache_hosts > 0 {
831                peer_cache_nodes / peer_cache_hosts
832            } else {
833                0
834            },
835            peer_cache_nodes
836        );
837        println!(
838            "Total generic nodes ({}x{}): {}",
839            generic_hosts,
840            if generic_hosts > 0 {
841                generic_nodes / generic_hosts
842            } else {
843                0
844            },
845            generic_nodes
846        );
847        println!(
848            "Total symmetric private nodes ({}x{}): {}",
849            symmetric_private_hosts,
850            if symmetric_private_hosts > 0 {
851                symmetric_private_nodes / symmetric_private_hosts
852            } else {
853                0
854            },
855            symmetric_private_nodes
856        );
857        println!(
858            "Total full cone private nodes ({}x{}): {}",
859            full_cone_private_hosts,
860            if full_cone_private_hosts > 0 {
861                full_cone_private_nodes / full_cone_private_hosts
862            } else {
863                0
864            },
865            full_cone_private_nodes
866        );
867        println!("Total nodes: {}", total_nodes);
868        println!("Running nodes: {}", running_nodes);
869        println!("Stopped nodes: {}", stopped_nodes);
870        println!("Added nodes: {}", added_nodes);
871        println!("Removed nodes: {}", removed_nodes);
872
873        Ok(())
874    }
875
876    pub fn cleanup_node_logs(&self, setup_cron: bool) -> Result<()> {
877        self.ansible_provisioner.cleanup_node_logs(setup_cron)?;
878        Ok(())
879    }
880
881    pub fn start_telegraf(
882        &self,
883        node_type: Option<NodeType>,
884        custom_inventory: Option<Vec<VirtualMachine>>,
885    ) -> Result<()> {
886        self.ansible_provisioner.start_telegraf(
887            &self.environment_name,
888            node_type,
889            custom_inventory,
890        )?;
891        Ok(())
892    }
893
894    pub fn stop(
895        &self,
896        interval: Duration,
897        node_type: Option<NodeType>,
898        custom_inventory: Option<Vec<VirtualMachine>>,
899        delay: Option<u64>,
900        service_names: Option<Vec<String>>,
901    ) -> Result<()> {
902        self.ansible_provisioner.stop_nodes(
903            &self.environment_name,
904            interval,
905            node_type,
906            custom_inventory,
907            delay,
908            service_names,
909        )?;
910        Ok(())
911    }
912
913    pub fn stop_telegraf(
914        &self,
915        node_type: Option<NodeType>,
916        custom_inventory: Option<Vec<VirtualMachine>>,
917    ) -> Result<()> {
918        self.ansible_provisioner.stop_telegraf(
919            &self.environment_name,
920            node_type,
921            custom_inventory,
922        )?;
923        Ok(())
924    }
925
926    pub fn upgrade(&self, options: UpgradeOptions) -> Result<()> {
927        self.ansible_provisioner.upgrade_nodes(&options)?;
928        Ok(())
929    }
930
931    pub fn upgrade_antctl(
932        &self,
933        version: Version,
934        node_type: Option<NodeType>,
935        custom_inventory: Option<Vec<VirtualMachine>>,
936    ) -> Result<()> {
937        self.ansible_provisioner.upgrade_antctl(
938            &self.environment_name,
939            &version,
940            node_type,
941            custom_inventory,
942        )?;
943        Ok(())
944    }
945
946    pub fn upgrade_geoip_telegraf(&self, name: &str) -> Result<()> {
947        self.ansible_provisioner.upgrade_geoip_telegraf(name)?;
948        Ok(())
949    }
950
951    pub fn upgrade_node_telegraf(&self, name: &str) -> Result<()> {
952        self.ansible_provisioner.upgrade_node_telegraf(name)?;
953        Ok(())
954    }
955
956    pub fn upgrade_client_telegraf(&self, name: &str) -> Result<()> {
957        self.ansible_provisioner.upgrade_client_telegraf(name)?;
958        Ok(())
959    }
960
961    pub async fn clean(&self) -> Result<()> {
962        let environment_details =
963            get_environment_details(&self.environment_name, &self.s3_repository)
964                .await
965                .inspect_err(|err| {
966                    println!("Failed to get environment details: {err}. Continuing cleanup...");
967                })
968                .ok();
969        if let Some(environment_details) = &environment_details {
970            funding::drain_funds(&self.ansible_provisioner, environment_details).await?;
971        }
972
973        self.destroy_infra(environment_details).await?;
974
975        cleanup_environment_inventory(
976            &self.environment_name,
977            &self
978                .working_directory_path
979                .join("ansible")
980                .join("inventory"),
981            None,
982        )?;
983
984        println!("Deleted Ansible inventory for {}", self.environment_name);
985
986        if let Err(err) = self
987            .s3_repository
988            .delete_object("sn-environment-type", &self.environment_name)
989            .await
990        {
991            println!("Failed to delete environment type: {err}. Continuing cleanup...");
992        }
993        Ok(())
994    }
995
996    async fn destroy_infra(&self, environment_details: Option<EnvironmentDetails>) -> Result<()> {
997        infra::select_workspace(&self.terraform_runner, &self.environment_name)?;
998
999        let options = InfraRunOptions::generate_existing(
1000            &self.environment_name,
1001            &self.region,
1002            &self.terraform_runner,
1003            environment_details.as_ref(),
1004        )
1005        .await?;
1006
1007        let args = build_terraform_args(&options)?;
1008        let tfvars_filenames = if let Some(environment_details) = &environment_details {
1009            environment_details
1010                .environment_type
1011                .get_tfvars_filenames(&self.environment_name, &self.region)
1012        } else {
1013            vec![]
1014        };
1015
1016        self.terraform_runner
1017            .destroy(Some(args), Some(tfvars_filenames))?;
1018
1019        infra::delete_workspace(&self.terraform_runner, &self.environment_name)?;
1020
1021        Ok(())
1022    }
1023}
1024
1025//
1026// Shared Helpers
1027//
1028
1029pub fn get_genesis_multiaddr(
1030    ansible_runner: &AnsibleRunner,
1031    ssh_client: &SshClient,
1032) -> Result<(String, IpAddr)> {
1033    let genesis_inventory = ansible_runner.get_inventory(AnsibleInventoryType::Genesis, true)?;
1034    let genesis_ip = genesis_inventory[0].public_ip_addr;
1035
1036    // It's possible for the genesis host to be altered from its original state where a node was
1037    // started with the `--first` flag.
1038    // First attempt: try to find node with first=true
1039    let multiaddr = ssh_client
1040        .run_command(
1041            &genesis_ip,
1042            "root",
1043            "jq -r '.nodes[] | select(.initial_peers_config.first == true) | .listen_addr[] | select(contains(\"127.0.0.1\") | not) | select(contains(\"quic-v1\"))' /var/antctl/node_registry.json | head -n 1",
1044            false,
1045        )
1046        .map(|output| output.first().cloned())
1047        .unwrap_or_else(|err| {
1048            log::error!("Failed to find first node with quic-v1 protocol: {err:?}");
1049            None
1050        });
1051
1052    // Second attempt: if first attempt failed, see if any node is available.
1053    let multiaddr = match multiaddr {
1054        Some(addr) => addr,
1055        None => ssh_client
1056            .run_command(
1057                &genesis_ip,
1058                "root",
1059                "jq -r '.nodes[] | .listen_addr[] | select(contains(\"127.0.0.1\") | not) | select(contains(\"quic-v1\"))' /var/antctl/node_registry.json | head -n 1",
1060                false,
1061            )?
1062            .first()
1063            .cloned()
1064            .ok_or_else(|| Error::GenesisListenAddress)?,
1065    };
1066
1067    Ok((multiaddr, genesis_ip))
1068}
1069
1070pub fn get_anvil_node_data(
1071    ansible_runner: &AnsibleRunner,
1072    ssh_client: &SshClient,
1073) -> Result<AnvilNodeData> {
1074    let evm_inventory = ansible_runner.get_inventory(AnsibleInventoryType::EvmNodes, true)?;
1075    if evm_inventory.is_empty() {
1076        return Err(Error::EvmNodeNotFound);
1077    }
1078
1079    let evm_ip = evm_inventory[0].public_ip_addr;
1080    debug!("Retrieved IP address for EVM node: {evm_ip}");
1081    let csv_file_path = "/home/ant/.local/share/autonomi/evm_testnet_data.csv";
1082
1083    const MAX_ATTEMPTS: u8 = 5;
1084    const RETRY_DELAY: Duration = Duration::from_secs(5);
1085
1086    for attempt in 1..=MAX_ATTEMPTS {
1087        match ssh_client.run_command(&evm_ip, "ant", &format!("cat {}", csv_file_path), false) {
1088            Ok(output) => {
1089                if let Some(csv_contents) = output.first() {
1090                    let parts: Vec<&str> = csv_contents.split(',').collect();
1091                    if parts.len() != 4 {
1092                        return Err(Error::EvmTestnetDataParsingError(
1093                            "Expected 4 fields in the CSV".to_string(),
1094                        ));
1095                    }
1096
1097                    let evm_testnet_data = AnvilNodeData {
1098                        rpc_url: parts[0].trim().to_string(),
1099                        payment_token_address: parts[1].trim().to_string(),
1100                        data_payments_address: parts[2].trim().to_string(),
1101                        deployer_wallet_private_key: parts[3].trim().to_string(),
1102                    };
1103                    return Ok(evm_testnet_data);
1104                }
1105            }
1106            Err(e) => {
1107                if attempt == MAX_ATTEMPTS {
1108                    return Err(e);
1109                }
1110                println!(
1111                    "Attempt {} failed to read EVM testnet data. Retrying in {} seconds...",
1112                    attempt,
1113                    RETRY_DELAY.as_secs()
1114                );
1115            }
1116        }
1117        std::thread::sleep(RETRY_DELAY);
1118    }
1119
1120    Err(Error::EvmTestnetDataNotFound)
1121}
1122
1123pub fn get_multiaddr(
1124    ansible_runner: &AnsibleRunner,
1125    ssh_client: &SshClient,
1126) -> Result<(String, IpAddr)> {
1127    let node_inventory = ansible_runner.get_inventory(AnsibleInventoryType::Nodes, true)?;
1128    // For upscaling a bootstrap deployment, we'd need to select one of the nodes that's already
1129    // provisioned. So just try the first one.
1130    let node_ip = node_inventory
1131        .iter()
1132        .find(|vm| vm.name.ends_with("-node-1"))
1133        .ok_or_else(|| Error::NodeAddressNotFound)?
1134        .public_ip_addr;
1135
1136    debug!("Getting multiaddr from node {node_ip}");
1137
1138    let multiaddr =
1139        ssh_client
1140        .run_command(
1141            &node_ip,
1142            "root",
1143            // fetch the first multiaddr which does not contain the localhost addr.
1144            "jq -r '.nodes[] | .listen_addr[] | select(contains(\"127.0.0.1\") | not)' /var/antctl/node_registry.json | head -n 1",
1145            false,
1146        )?.first()
1147        .cloned()
1148        .ok_or_else(|| Error::NodeAddressNotFound)?;
1149
1150    // The node_ip is obviously inside the multiaddr, but it's just being returned as a
1151    // separate item for convenience.
1152    Ok((multiaddr, node_ip))
1153}
1154
1155pub async fn get_and_extract_archive_from_s3(
1156    s3_repository: &S3Repository,
1157    bucket_name: &str,
1158    archive_bucket_path: &str,
1159    dest_path: &Path,
1160) -> Result<()> {
1161    // In this case, not using unwrap leads to having to provide a very trivial error variant that
1162    // doesn't seem very valuable.
1163    let archive_file_name = archive_bucket_path.split('/').next_back().unwrap();
1164    let archive_dest_path = dest_path.join(archive_file_name);
1165    s3_repository
1166        .download_object(bucket_name, archive_bucket_path, &archive_dest_path)
1167        .await?;
1168    extract_archive(&archive_dest_path, dest_path)?;
1169    Ok(())
1170}
1171
1172pub fn extract_archive(archive_path: &Path, dest_path: &Path) -> Result<()> {
1173    let archive_file = File::open(archive_path)?;
1174    let decoder = GzDecoder::new(archive_file);
1175    let mut archive = Archive::new(decoder);
1176    let entries = archive.entries()?;
1177    for entry_result in entries {
1178        let mut entry = entry_result?;
1179        let extract_path = dest_path.join(entry.path()?);
1180        if entry.header().entry_type() == tar::EntryType::Directory {
1181            std::fs::create_dir_all(extract_path)?;
1182            continue;
1183        }
1184        let mut file = BufWriter::new(File::create(extract_path)?);
1185        std::io::copy(&mut entry, &mut file)?;
1186    }
1187    std::fs::remove_file(archive_path)?;
1188    Ok(())
1189}
1190
1191pub fn run_external_command(
1192    binary_path: PathBuf,
1193    working_directory_path: PathBuf,
1194    args: Vec<String>,
1195    suppress_stdout: bool,
1196    suppress_stderr: bool,
1197) -> Result<Vec<String>> {
1198    let mut command = Command::new(binary_path.clone());
1199    for arg in &args {
1200        command.arg(arg);
1201    }
1202    command.stdout(Stdio::piped());
1203    command.stderr(Stdio::piped());
1204    command.current_dir(working_directory_path.clone());
1205    debug!("Running {binary_path:#?} with args {args:#?}");
1206    debug!("Working directory set to {working_directory_path:#?}");
1207
1208    let mut child = command.spawn()?;
1209    let mut output_lines = Vec::new();
1210
1211    if let Some(ref mut stdout) = child.stdout {
1212        let reader = BufReader::new(stdout);
1213        for line in reader.lines() {
1214            let line = line?;
1215            if !suppress_stdout {
1216                println!("{line}");
1217            }
1218            output_lines.push(line);
1219        }
1220    }
1221
1222    if let Some(ref mut stderr) = child.stderr {
1223        let reader = BufReader::new(stderr);
1224        for line in reader.lines() {
1225            let line = line?;
1226            if !suppress_stderr {
1227                eprintln!("{line}");
1228            }
1229            output_lines.push(line);
1230        }
1231    }
1232
1233    let output = child.wait()?;
1234    if !output.success() {
1235        // Using `unwrap` here avoids introducing another error variant, which seems excessive.
1236        let binary_path = binary_path.to_str().unwrap();
1237        return Err(Error::ExternalCommandRunFailed {
1238            binary: binary_path.to_string(),
1239            exit_status: output,
1240        });
1241    }
1242
1243    Ok(output_lines)
1244}
1245
1246pub fn is_binary_on_path(binary_name: &str) -> bool {
1247    if let Ok(path) = std::env::var("PATH") {
1248        for dir in path.split(':') {
1249            let mut full_path = PathBuf::from(dir);
1250            full_path.push(binary_name);
1251            if full_path.exists() {
1252                return true;
1253            }
1254        }
1255    }
1256    false
1257}
1258
1259pub fn get_wallet_directory() -> Result<PathBuf> {
1260    Ok(dirs_next::data_dir()
1261        .ok_or_else(|| Error::CouldNotRetrieveDataDirectory)?
1262        .join("safe")
1263        .join("client")
1264        .join("wallet"))
1265}
1266
1267pub async fn notify_slack(inventory: DeploymentInventory) -> Result<()> {
1268    let webhook_url =
1269        std::env::var("SLACK_WEBHOOK_URL").map_err(|_| Error::SlackWebhookUrlNotSupplied)?;
1270
1271    let mut message = String::new();
1272    message.push_str("*Testnet Details*\n");
1273    message.push_str(&format!("Name: {}\n", inventory.name));
1274    message.push_str(&format!("Node count: {}\n", inventory.peers().len()));
1275    message.push_str(&format!("Faucet address: {:?}\n", inventory.faucet_address));
1276    match inventory.binary_option {
1277        BinaryOption::BuildFromSource {
1278            ref repo_owner,
1279            ref branch,
1280            ..
1281        } => {
1282            message.push_str("*Branch Details*\n");
1283            message.push_str(&format!("Repo owner: {}\n", repo_owner));
1284            message.push_str(&format!("Branch: {}\n", branch));
1285        }
1286        BinaryOption::Versioned {
1287            ant_version: ref safe_version,
1288            antnode_version: ref safenode_version,
1289            antctl_version: ref safenode_manager_version,
1290            ..
1291        } => {
1292            message.push_str("*Version Details*\n");
1293            message.push_str(&format!(
1294                "ant version: {}\n",
1295                safe_version
1296                    .as_ref()
1297                    .map_or("None".to_string(), |v| v.to_string())
1298            ));
1299            message.push_str(&format!(
1300                "safenode version: {}\n",
1301                safenode_version
1302                    .as_ref()
1303                    .map_or("None".to_string(), |v| v.to_string())
1304            ));
1305            message.push_str(&format!(
1306                "antctl version: {}\n",
1307                safenode_manager_version
1308                    .as_ref()
1309                    .map_or("None".to_string(), |v| v.to_string())
1310            ));
1311        }
1312    }
1313
1314    message.push_str("*Sample Peers*\n");
1315    message.push_str("```\n");
1316    for peer in inventory.peers().iter().take(20) {
1317        message.push_str(&format!("{peer}\n"));
1318    }
1319    message.push_str("```\n");
1320    message.push_str("*Available Files*\n");
1321    message.push_str("```\n");
1322    for (addr, file_name) in inventory.uploaded_files.iter() {
1323        message.push_str(&format!("{}: {}\n", addr, file_name))
1324    }
1325    message.push_str("```\n");
1326
1327    let payload = json!({
1328        "text": message,
1329    });
1330    reqwest::Client::new()
1331        .post(webhook_url)
1332        .json(&payload)
1333        .send()
1334        .await?;
1335    println!("{message}");
1336    println!("Posted notification to Slack");
1337    Ok(())
1338}
1339
1340fn print_duration(duration: Duration) {
1341    let total_seconds = duration.as_secs();
1342    let minutes = total_seconds / 60;
1343    let seconds = total_seconds % 60;
1344    debug!("Time taken: {} minutes and {} seconds", minutes, seconds);
1345}
1346
1347pub fn get_progress_bar(length: u64) -> Result<ProgressBar> {
1348    let progress_bar = ProgressBar::new(length);
1349    progress_bar.set_style(
1350        ProgressStyle::default_bar()
1351            .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len}")?
1352            .progress_chars("#>-"),
1353    );
1354    progress_bar.enable_steady_tick(Duration::from_millis(100));
1355    Ok(progress_bar)
1356}
1357
1358pub async fn get_environment_details(
1359    environment_name: &str,
1360    s3_repository: &S3Repository,
1361) -> Result<EnvironmentDetails> {
1362    let temp_file = tempfile::NamedTempFile::new()?;
1363
1364    let max_retries = 3;
1365    let mut retries = 0;
1366    let env_details = loop {
1367        debug!("Downloading the environment details file for {environment_name} from S3");
1368        match s3_repository
1369            .download_object("sn-environment-type", environment_name, temp_file.path())
1370            .await
1371        {
1372            Ok(_) => {
1373                debug!("Downloaded the environment details file for {environment_name} from S3");
1374                let content = match std::fs::read_to_string(temp_file.path()) {
1375                    Ok(content) => content,
1376                    Err(err) => {
1377                        log::error!("Could not read the environment details file: {err:?}");
1378                        if retries < max_retries {
1379                            debug!("Retrying to read the environment details file");
1380                            retries += 1;
1381                            continue;
1382                        } else {
1383                            return Err(Error::EnvironmentDetailsNotFound(
1384                                environment_name.to_string(),
1385                            ));
1386                        }
1387                    }
1388                };
1389                trace!("Content of the environment details file: {}", content);
1390
1391                match serde_json::from_str(&content) {
1392                    Ok(environment_details) => break environment_details,
1393                    Err(err) => {
1394                        log::error!("Could not parse the environment details file: {err:?}");
1395                        if retries < max_retries {
1396                            debug!("Retrying to parse the environment details file");
1397                            retries += 1;
1398                            continue;
1399                        } else {
1400                            return Err(Error::EnvironmentDetailsNotFound(
1401                                environment_name.to_string(),
1402                            ));
1403                        }
1404                    }
1405                }
1406            }
1407            Err(err) => {
1408                log::error!(
1409                    "Could not download the environment details file for {environment_name} from S3: {err:?}"
1410                );
1411                if retries < max_retries {
1412                    retries += 1;
1413                    continue;
1414                } else {
1415                    return Err(Error::EnvironmentDetailsNotFound(
1416                        environment_name.to_string(),
1417                    ));
1418                }
1419            }
1420        }
1421    };
1422
1423    debug!("Fetched environment details: {env_details:?}");
1424
1425    Ok(env_details)
1426}
1427
1428pub async fn write_environment_details(
1429    s3_repository: &S3Repository,
1430    environment_name: &str,
1431    environment_details: &EnvironmentDetails,
1432) -> Result<()> {
1433    let temp_dir = tempfile::tempdir()?;
1434    let path = temp_dir.path().to_path_buf().join(environment_name);
1435    let mut file = File::create(&path)?;
1436    let json = serde_json::to_string(environment_details)?;
1437    file.write_all(json.as_bytes())?;
1438    s3_repository
1439        .upload_file("sn-environment-type", &path, true)
1440        .await?;
1441    Ok(())
1442}
1443
1444pub fn calculate_size_per_attached_volume(node_count: u16) -> u16 {
1445    if node_count == 0 {
1446        return 0;
1447    }
1448    let total_volume_required = node_count * STORAGE_REQUIRED_PER_NODE;
1449
1450    // 7 attached volumes per VM
1451    (total_volume_required as f64 / 7.0).ceil() as u16
1452}
1453
1454pub fn get_bootstrap_cache_url(ip_addr: &IpAddr) -> String {
1455    format!("http://{ip_addr}/bootstrap_cache.json")
1456}