sn_testnet_deploy/
lib.rs

1// Copyright (c) 2023, MaidSafe.
2// All rights reserved.
3//
4// This SAFE Network Software is licensed under the BSD-3-Clause license.
5// Please see the LICENSE file for more details.
6
7pub mod ansible;
8pub mod bootstrap;
9pub mod clients;
10pub mod deploy;
11pub mod digital_ocean;
12pub mod error;
13pub mod funding;
14pub mod infra;
15pub mod inventory;
16pub mod logs;
17pub mod reserved_ip;
18pub mod rpc_client;
19pub mod s3;
20pub mod safe;
21pub mod setup;
22pub mod ssh;
23pub mod symlinked_antnode;
24pub mod terraform;
25pub mod upscale;
26
27pub use symlinked_antnode::SymlinkedAntnodeDeployer;
28
29const STORAGE_REQUIRED_PER_NODE: u16 = 7;
30
31use crate::{
32    ansible::{
33        extra_vars::ExtraVarsDocBuilder,
34        inventory::{cleanup_environment_inventory, AnsibleInventoryType},
35        provisioning::AnsibleProvisioner,
36        AnsibleRunner,
37    },
38    error::{Error, Result},
39    inventory::{DeploymentInventory, VirtualMachine},
40    rpc_client::RpcClient,
41    s3::S3Repository,
42    ssh::SshClient,
43    terraform::TerraformRunner,
44};
45use ant_service_management::ServiceStatus;
46use flate2::read::GzDecoder;
47use indicatif::{ProgressBar, ProgressStyle};
48use infra::{build_terraform_args, InfraRunOptions};
49use log::{debug, trace};
50use semver::Version;
51use serde::{Deserialize, Serialize};
52use serde_json::json;
53use std::{
54    fs::File,
55    io::{BufRead, BufReader, BufWriter, Write},
56    net::IpAddr,
57    path::{Path, PathBuf},
58    process::{Command, Stdio},
59    str::FromStr,
60    time::Duration,
61};
62use tar::Archive;
63
64const ANSIBLE_DEFAULT_FORKS: usize = 50;
65
66#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
67pub enum DeploymentType {
68    /// The deployment has been bootstrapped from an existing network.
69    Bootstrap,
70    /// Client deployment.
71    Client,
72    /// The deployment is a new network.
73    #[default]
74    New,
75}
76
77#[derive(Debug, Clone, Default, Serialize, Deserialize)]
78pub struct AnvilNodeData {
79    pub data_payments_address: String,
80    pub deployer_wallet_private_key: String,
81    pub merkle_payments_address: String,
82    pub payment_token_address: String,
83    pub rpc_url: String,
84}
85
86impl std::fmt::Display for DeploymentType {
87    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
88        match self {
89            DeploymentType::Bootstrap => write!(f, "bootstrap"),
90            DeploymentType::Client => write!(f, "clients"),
91            DeploymentType::New => write!(f, "new"),
92        }
93    }
94}
95
96impl std::str::FromStr for DeploymentType {
97    type Err = String;
98
99    fn from_str(s: &str) -> Result<Self, Self::Err> {
100        match s.to_lowercase().as_str() {
101            "bootstrap" => Ok(DeploymentType::Bootstrap),
102            "clients" => Ok(DeploymentType::Client),
103            "new" => Ok(DeploymentType::New),
104            _ => Err(format!("Invalid deployment type: {s}")),
105        }
106    }
107}
108
109#[derive(Debug, Clone)]
110pub enum NodeType {
111    FullConePrivateNode,
112    PortRestrictedConePrivateNode,
113    Generic,
114    Genesis,
115    PeerCache,
116    SymmetricPrivateNode,
117    Upnp,
118}
119
120impl std::fmt::Display for NodeType {
121    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
122        match self {
123            NodeType::FullConePrivateNode => write!(f, "full-cone-private"),
124            NodeType::PortRestrictedConePrivateNode => write!(f, "port-restricted-cone-private"),
125            NodeType::Generic => write!(f, "generic"),
126            NodeType::Genesis => write!(f, "genesis"),
127            NodeType::PeerCache => write!(f, "peer-cache"),
128            NodeType::SymmetricPrivateNode => write!(f, "symmetric-private"),
129            NodeType::Upnp => write!(f, "upnp"),
130        }
131    }
132}
133
134impl std::str::FromStr for NodeType {
135    type Err = String;
136
137    fn from_str(s: &str) -> Result<Self, Self::Err> {
138        match s.to_lowercase().as_str() {
139            "full-cone-private" => Ok(NodeType::FullConePrivateNode),
140            "port-restricted-cone-private" => Ok(NodeType::PortRestrictedConePrivateNode),
141            "generic" => Ok(NodeType::Generic),
142            "genesis" => Ok(NodeType::Genesis),
143            "peer-cache" => Ok(NodeType::PeerCache),
144            "symmetric-private" => Ok(NodeType::SymmetricPrivateNode),
145            "upnp" => Ok(NodeType::Upnp),
146            _ => Err(format!("Invalid node type: {s}")),
147        }
148    }
149}
150
151impl NodeType {
152    pub fn telegraf_role(&self) -> &'static str {
153        match self {
154            NodeType::FullConePrivateNode => "NAT_STATIC_FULL_CONE_NODE",
155            NodeType::PortRestrictedConePrivateNode => "PORT_RESTRICTED_CONE_NODE",
156            NodeType::Generic => "GENERIC_NODE",
157            NodeType::Genesis => "GENESIS_NODE",
158            NodeType::PeerCache => "PEER_CACHE_NODE",
159            NodeType::SymmetricPrivateNode => "NAT_RANDOMIZED_NODE",
160            NodeType::Upnp => "UPNP_NODE",
161        }
162    }
163
164    pub fn to_ansible_inventory_type(&self) -> AnsibleInventoryType {
165        match self {
166            NodeType::FullConePrivateNode => AnsibleInventoryType::FullConePrivateNodes,
167            NodeType::PortRestrictedConePrivateNode => {
168                AnsibleInventoryType::PortRestrictedConePrivateNodes
169            }
170            NodeType::Generic => AnsibleInventoryType::Nodes,
171            NodeType::Genesis => AnsibleInventoryType::Genesis,
172            NodeType::PeerCache => AnsibleInventoryType::PeerCacheNodes,
173            NodeType::SymmetricPrivateNode => AnsibleInventoryType::SymmetricPrivateNodes,
174            NodeType::Upnp => AnsibleInventoryType::Upnp,
175        }
176    }
177}
178
179#[derive(Clone, Debug, Default, Eq, Serialize, Deserialize, PartialEq)]
180pub enum EvmNetwork {
181    #[default]
182    Anvil,
183    ArbitrumOne,
184    ArbitrumSepoliaTest,
185    Custom,
186}
187
188impl std::fmt::Display for EvmNetwork {
189    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
190        match self {
191            EvmNetwork::Anvil => write!(f, "evm-custom"),
192            EvmNetwork::ArbitrumOne => write!(f, "evm-arbitrum-one"),
193            EvmNetwork::ArbitrumSepoliaTest => write!(f, "evm-arbitrum-sepolia-test"),
194            EvmNetwork::Custom => write!(f, "evm-custom"),
195        }
196    }
197}
198
199impl std::str::FromStr for EvmNetwork {
200    type Err = String;
201
202    fn from_str(s: &str) -> Result<Self, Self::Err> {
203        match s.to_lowercase().as_str() {
204            "anvil" => Ok(EvmNetwork::Anvil),
205            "arbitrum-one" => Ok(EvmNetwork::ArbitrumOne),
206            "arbitrum-sepolia-test" => Ok(EvmNetwork::ArbitrumSepoliaTest),
207            "custom" => Ok(EvmNetwork::Custom),
208            _ => Err(format!("Invalid EVM network type: {s}")),
209        }
210    }
211}
212
213#[derive(Clone, Debug, Default, Serialize, Deserialize)]
214pub struct EvmDetails {
215    pub network: EvmNetwork,
216    pub data_payments_address: Option<String>,
217    pub merkle_payments_address: Option<String>,
218    pub payment_token_address: Option<String>,
219    pub rpc_url: Option<String>,
220}
221
222#[derive(Clone, Debug, Default, Serialize, Deserialize)]
223pub struct EnvironmentDetails {
224    pub deployment_type: DeploymentType,
225    pub environment_type: EnvironmentType,
226    pub evm_details: EvmDetails,
227    pub funding_wallet_address: Option<String>,
228    pub network_id: Option<u8>,
229    pub region: String,
230    pub rewards_address: Option<String>,
231}
232
233#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
234pub enum EnvironmentType {
235    #[default]
236    Development,
237    Production,
238    Staging,
239}
240
241impl EnvironmentType {
242    pub fn get_tfvars_filenames(&self, name: &str, region: &str) -> Vec<String> {
243        match self {
244            EnvironmentType::Development => vec![
245                "dev.tfvars".to_string(),
246                format!("dev-images-{region}.tfvars", region = region),
247            ],
248            EnvironmentType::Staging => vec![
249                "staging.tfvars".to_string(),
250                format!("staging-images-{region}.tfvars", region = region),
251            ],
252            EnvironmentType::Production => {
253                vec![
254                    format!("{name}.tfvars", name = name),
255                    format!("production-images-{region}.tfvars", region = region),
256                ]
257            }
258        }
259    }
260
261    pub fn get_default_peer_cache_node_count(&self) -> u16 {
262        match self {
263            EnvironmentType::Development => 5,
264            EnvironmentType::Production => 5,
265            EnvironmentType::Staging => 5,
266        }
267    }
268
269    pub fn get_default_node_count(&self) -> u16 {
270        match self {
271            EnvironmentType::Development => 25,
272            EnvironmentType::Production => 25,
273            EnvironmentType::Staging => 25,
274        }
275    }
276
277    pub fn get_default_symmetric_private_node_count(&self) -> u16 {
278        self.get_default_node_count()
279    }
280
281    pub fn get_default_full_cone_private_node_count(&self) -> u16 {
282        self.get_default_node_count()
283    }
284    pub fn get_default_upnp_private_node_count(&self) -> u16 {
285        self.get_default_node_count()
286    }
287}
288
289impl std::fmt::Display for EnvironmentType {
290    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
291        match self {
292            EnvironmentType::Development => write!(f, "development"),
293            EnvironmentType::Production => write!(f, "production"),
294            EnvironmentType::Staging => write!(f, "staging"),
295        }
296    }
297}
298
299impl FromStr for EnvironmentType {
300    type Err = Error;
301
302    fn from_str(s: &str) -> Result<Self, Self::Err> {
303        match s.to_lowercase().as_str() {
304            "development" => Ok(EnvironmentType::Development),
305            "production" => Ok(EnvironmentType::Production),
306            "staging" => Ok(EnvironmentType::Staging),
307            _ => Err(Error::EnvironmentNameFromStringError(s.to_string())),
308        }
309    }
310}
311
312/// Specify the binary option for the deployment.
313///
314/// There are several binaries involved in the deployment:
315/// * safenode
316/// * safenode_rpc_client
317/// * faucet
318/// * safe
319///
320/// The `safe` binary is only used for smoke testing the deployment, although we don't really do
321/// that at the moment.
322///
323/// The options are to build from source, or supply a pre-built, versioned binary, which will be
324/// fetched from S3. Building from source adds significant time to the deployment.
325#[derive(Clone, Debug, Serialize, Deserialize)]
326pub enum BinaryOption {
327    /// Binaries will be built from source.
328    BuildFromSource {
329        /// A comma-separated list that will be passed to the `--features` argument.
330        antnode_features: Option<String>,
331        branch: String,
332        repo_owner: String,
333        /// Skip building the binaries, if they were already built during the previous run using the same
334        /// branch, repo owner and testnet name.
335        skip_binary_build: bool,
336    },
337    /// Pre-built, versioned binaries will be fetched from S3.
338    Versioned {
339        ant_version: Option<Version>,
340        antctl_version: Option<Version>,
341        antnode_version: Option<Version>,
342    },
343}
344
345impl BinaryOption {
346    pub fn should_provision_build_machine(&self) -> bool {
347        match self {
348            BinaryOption::BuildFromSource {
349                skip_binary_build, ..
350            } => !skip_binary_build,
351            BinaryOption::Versioned { .. } => false,
352        }
353    }
354
355    pub fn print(&self) {
356        match self {
357            BinaryOption::BuildFromSource {
358                antnode_features,
359                branch,
360                repo_owner,
361                skip_binary_build: _,
362            } => {
363                println!("Source configuration:");
364                println!("  Repository owner: {repo_owner}");
365                println!("  Branch: {branch}");
366                if let Some(features) = antnode_features {
367                    println!("  Antnode features: {features}");
368                }
369            }
370            BinaryOption::Versioned {
371                ant_version,
372                antctl_version,
373                antnode_version,
374            } => {
375                println!("Versioned binaries configuration:");
376                if let Some(version) = ant_version {
377                    println!("  ant version: {version}");
378                }
379                if let Some(version) = antctl_version {
380                    println!("  antctl version: {version}");
381                }
382                if let Some(version) = antnode_version {
383                    println!("  antnode version: {version}");
384                }
385            }
386        }
387    }
388}
389
390#[derive(Debug, Clone, Copy)]
391pub enum CloudProvider {
392    Aws,
393    DigitalOcean,
394}
395
396impl std::fmt::Display for CloudProvider {
397    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
398        match self {
399            CloudProvider::Aws => write!(f, "aws"),
400            CloudProvider::DigitalOcean => write!(f, "digital-ocean"),
401        }
402    }
403}
404
405impl CloudProvider {
406    pub fn get_ssh_user(&self) -> String {
407        match self {
408            CloudProvider::Aws => "ubuntu".to_string(),
409            CloudProvider::DigitalOcean => "root".to_string(),
410        }
411    }
412}
413
414#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
415pub enum LogFormat {
416    Default,
417    Json,
418}
419
420impl LogFormat {
421    pub fn parse_from_str(val: &str) -> Result<Self> {
422        match val {
423            "default" => Ok(LogFormat::Default),
424            "json" => Ok(LogFormat::Json),
425            _ => Err(Error::LoggingConfiguration(
426                "The only valid values for this argument are \"default\" or \"json\"".to_string(),
427            )),
428        }
429    }
430
431    pub fn as_str(&self) -> &'static str {
432        match self {
433            LogFormat::Default => "default",
434            LogFormat::Json => "json",
435        }
436    }
437}
438
439#[derive(Clone)]
440pub struct UpgradeOptions {
441    pub ansible_verbose: bool,
442    pub branch: Option<String>,
443    pub custom_inventory: Option<Vec<VirtualMachine>>,
444    pub env_variables: Option<Vec<(String, String)>>,
445    pub force: bool,
446    pub forks: usize,
447    pub interval: Duration,
448    pub name: String,
449    pub node_type: Option<NodeType>,
450    pub pre_upgrade_delay: Option<u64>,
451    pub provider: CloudProvider,
452    pub repo_owner: Option<String>,
453    pub version: Option<String>,
454}
455
456impl UpgradeOptions {
457    pub fn get_ansible_vars(&self) -> String {
458        let mut extra_vars = ExtraVarsDocBuilder::default();
459        extra_vars.add_variable("interval", &self.interval.as_millis().to_string());
460        if let Some(env_variables) = &self.env_variables {
461            extra_vars.add_env_variable_list("env_variables", env_variables.clone());
462        }
463        if self.force {
464            extra_vars.add_variable("force", &self.force.to_string());
465        }
466        if let Some(version) = &self.version {
467            extra_vars.add_variable("antnode_version", version);
468        }
469        if let Some(pre_upgrade_delay) = &self.pre_upgrade_delay {
470            extra_vars.add_variable("pre_upgrade_delay", &pre_upgrade_delay.to_string());
471        }
472
473        if let (Some(repo_owner), Some(branch)) = (&self.repo_owner, &self.branch) {
474            let binary_option = BinaryOption::BuildFromSource {
475                antnode_features: None,
476                branch: branch.clone(),
477                repo_owner: repo_owner.clone(),
478                skip_binary_build: true,
479            };
480            extra_vars.add_node_url_or_version(&self.name, &binary_option);
481        }
482
483        extra_vars.build()
484    }
485}
486
487#[derive(Default)]
488pub struct TestnetDeployBuilder {
489    ansible_forks: Option<usize>,
490    ansible_verbose_mode: bool,
491    deployment_type: EnvironmentType,
492    environment_name: String,
493    provider: Option<CloudProvider>,
494    region: Option<String>,
495    ssh_secret_key_path: Option<PathBuf>,
496    state_bucket_name: Option<String>,
497    terraform_binary_path: Option<PathBuf>,
498    vault_password_path: Option<PathBuf>,
499    working_directory_path: Option<PathBuf>,
500}
501
502impl TestnetDeployBuilder {
503    pub fn new() -> Self {
504        Default::default()
505    }
506
507    pub fn ansible_verbose_mode(&mut self, ansible_verbose_mode: bool) -> &mut Self {
508        self.ansible_verbose_mode = ansible_verbose_mode;
509        self
510    }
511
512    pub fn ansible_forks(&mut self, ansible_forks: usize) -> &mut Self {
513        self.ansible_forks = Some(ansible_forks);
514        self
515    }
516
517    pub fn deployment_type(&mut self, deployment_type: EnvironmentType) -> &mut Self {
518        self.deployment_type = deployment_type;
519        self
520    }
521
522    pub fn environment_name(&mut self, name: &str) -> &mut Self {
523        self.environment_name = name.to_string();
524        self
525    }
526
527    pub fn provider(&mut self, provider: CloudProvider) -> &mut Self {
528        self.provider = Some(provider);
529        self
530    }
531
532    pub fn state_bucket_name(&mut self, state_bucket_name: String) -> &mut Self {
533        self.state_bucket_name = Some(state_bucket_name);
534        self
535    }
536
537    pub fn terraform_binary_path(&mut self, terraform_binary_path: PathBuf) -> &mut Self {
538        self.terraform_binary_path = Some(terraform_binary_path);
539        self
540    }
541
542    pub fn working_directory(&mut self, working_directory_path: PathBuf) -> &mut Self {
543        self.working_directory_path = Some(working_directory_path);
544        self
545    }
546
547    pub fn ssh_secret_key_path(&mut self, ssh_secret_key_path: PathBuf) -> &mut Self {
548        self.ssh_secret_key_path = Some(ssh_secret_key_path);
549        self
550    }
551
552    pub fn vault_password_path(&mut self, vault_password_path: PathBuf) -> &mut Self {
553        self.vault_password_path = Some(vault_password_path);
554        self
555    }
556
557    pub fn region(&mut self, region: String) -> &mut Self {
558        self.region = Some(region);
559        self
560    }
561
562    pub fn build(&self) -> Result<TestnetDeployer> {
563        let provider = self.provider.unwrap_or(CloudProvider::DigitalOcean);
564        match provider {
565            CloudProvider::DigitalOcean => {
566                let digital_ocean_pat = std::env::var("DO_PAT").map_err(|_| {
567                    Error::CloudProviderCredentialsNotSupplied("DO_PAT".to_string())
568                })?;
569                // The DO_PAT variable is not actually read by either Terraform or Ansible.
570                // Each tool uses a different variable, so instead we set each of those variables
571                // to the value of DO_PAT. This means the user only needs to set one variable.
572                std::env::set_var("DIGITALOCEAN_TOKEN", digital_ocean_pat.clone());
573                std::env::set_var("DO_API_TOKEN", digital_ocean_pat);
574            }
575            _ => {
576                return Err(Error::CloudProviderNotSupported(provider.to_string()));
577            }
578        }
579
580        let state_bucket_name = match self.state_bucket_name {
581            Some(ref bucket_name) => bucket_name.clone(),
582            None => std::env::var("TERRAFORM_STATE_BUCKET_NAME")?,
583        };
584
585        let default_terraform_bin_path = PathBuf::from("terraform");
586        let terraform_binary_path = self
587            .terraform_binary_path
588            .as_ref()
589            .unwrap_or(&default_terraform_bin_path);
590
591        let working_directory_path = match self.working_directory_path {
592            Some(ref work_dir_path) => work_dir_path.clone(),
593            None => std::env::current_dir()?.join("resources"),
594        };
595
596        let ssh_secret_key_path = match self.ssh_secret_key_path {
597            Some(ref ssh_sk_path) => ssh_sk_path.clone(),
598            None => PathBuf::from(std::env::var("SSH_KEY_PATH")?),
599        };
600
601        let vault_password_path = match self.vault_password_path {
602            Some(ref vault_pw_path) => vault_pw_path.clone(),
603            None => PathBuf::from(std::env::var("ANSIBLE_VAULT_PASSWORD_PATH")?),
604        };
605
606        let region = match self.region {
607            Some(ref region) => region.clone(),
608            None => "lon1".to_string(),
609        };
610
611        let terraform_runner = TerraformRunner::new(
612            terraform_binary_path.to_path_buf(),
613            working_directory_path
614                .join("terraform")
615                .join("testnet")
616                .join(provider.to_string()),
617            provider,
618            &state_bucket_name,
619        )?;
620        let ansible_runner = AnsibleRunner::new(
621            self.ansible_forks.unwrap_or(ANSIBLE_DEFAULT_FORKS),
622            self.ansible_verbose_mode,
623            &self.environment_name,
624            provider,
625            ssh_secret_key_path.clone(),
626            vault_password_path,
627            working_directory_path.join("ansible"),
628        )?;
629        let ssh_client = SshClient::new(ssh_secret_key_path);
630        let ansible_provisioner =
631            AnsibleProvisioner::new(ansible_runner, provider, ssh_client.clone());
632        let rpc_client = RpcClient::new(
633            PathBuf::from("/usr/local/bin/safenode_rpc_client"),
634            working_directory_path.clone(),
635        );
636
637        // Remove any `safe` binary from a previous deployment. Otherwise you can end up with
638        // mismatched binaries.
639        let safe_path = working_directory_path.join("safe");
640        if safe_path.exists() {
641            std::fs::remove_file(safe_path)?;
642        }
643
644        let testnet = TestnetDeployer::new(
645            ansible_provisioner,
646            provider,
647            self.deployment_type.clone(),
648            &self.environment_name,
649            rpc_client,
650            S3Repository {},
651            ssh_client,
652            terraform_runner,
653            working_directory_path,
654            region,
655        )?;
656
657        Ok(testnet)
658    }
659}
660
661#[derive(Clone)]
662pub struct TestnetDeployer {
663    pub ansible_provisioner: AnsibleProvisioner,
664    pub cloud_provider: CloudProvider,
665    pub deployment_type: EnvironmentType,
666    pub environment_name: String,
667    pub inventory_file_path: PathBuf,
668    pub region: String,
669    pub rpc_client: RpcClient,
670    pub s3_repository: S3Repository,
671    pub ssh_client: SshClient,
672    pub terraform_runner: TerraformRunner,
673    pub working_directory_path: PathBuf,
674}
675
676impl TestnetDeployer {
677    #[allow(clippy::too_many_arguments)]
678    pub fn new(
679        ansible_provisioner: AnsibleProvisioner,
680        cloud_provider: CloudProvider,
681        deployment_type: EnvironmentType,
682        environment_name: &str,
683        rpc_client: RpcClient,
684        s3_repository: S3Repository,
685        ssh_client: SshClient,
686        terraform_runner: TerraformRunner,
687        working_directory_path: PathBuf,
688        region: String,
689    ) -> Result<TestnetDeployer> {
690        if environment_name.is_empty() {
691            return Err(Error::EnvironmentNameRequired);
692        }
693        let inventory_file_path = working_directory_path
694            .join("ansible")
695            .join("inventory")
696            .join("dev_inventory_digital_ocean.yml");
697        Ok(TestnetDeployer {
698            ansible_provisioner,
699            cloud_provider,
700            deployment_type,
701            environment_name: environment_name.to_string(),
702            inventory_file_path,
703            region,
704            rpc_client,
705            ssh_client,
706            s3_repository,
707            terraform_runner,
708            working_directory_path,
709        })
710    }
711
712    pub async fn init(&self) -> Result<()> {
713        if self
714            .s3_repository
715            .folder_exists(
716                "sn-testnet",
717                &format!("testnet-logs/{}", self.environment_name),
718            )
719            .await?
720        {
721            return Err(Error::LogsForPreviousTestnetExist(
722                self.environment_name.clone(),
723            ));
724        }
725
726        self.terraform_runner.init()?;
727        let workspaces = self.terraform_runner.workspace_list()?;
728        if !workspaces.contains(&self.environment_name) {
729            self.terraform_runner
730                .workspace_new(&self.environment_name)?;
731        } else {
732            println!("Workspace {} already exists", self.environment_name);
733        }
734
735        let rpc_client_path = self.working_directory_path.join("safenode_rpc_client");
736        if !rpc_client_path.is_file() {
737            println!("Downloading the rpc client for safenode...");
738            let archive_name = "safenode_rpc_client-latest-x86_64-unknown-linux-musl.tar.gz";
739            get_and_extract_archive_from_s3(
740                &self.s3_repository,
741                "sn-node-rpc-client",
742                archive_name,
743                &self.working_directory_path,
744            )
745            .await?;
746            #[cfg(unix)]
747            {
748                use std::os::unix::fs::PermissionsExt;
749                let mut permissions = std::fs::metadata(&rpc_client_path)?.permissions();
750                permissions.set_mode(0o755); // rwxr-xr-x
751                std::fs::set_permissions(&rpc_client_path, permissions)?;
752            }
753        }
754
755        Ok(())
756    }
757
758    pub fn plan(&self, options: &InfraRunOptions) -> Result<()> {
759        println!("Selecting {} workspace...", options.name);
760        self.terraform_runner.workspace_select(&options.name)?;
761
762        let args = build_terraform_args(options)?;
763
764        self.terraform_runner
765            .plan(Some(args), options.tfvars_filenames.clone())?;
766        Ok(())
767    }
768
769    pub fn start(
770        &self,
771        interval: Duration,
772        node_type: Option<NodeType>,
773        custom_inventory: Option<Vec<VirtualMachine>>,
774    ) -> Result<()> {
775        self.ansible_provisioner.start_nodes(
776            &self.environment_name,
777            interval,
778            node_type,
779            custom_inventory,
780        )?;
781        Ok(())
782    }
783
784    pub fn apply_delete_node_records_cron(
785        &self,
786        node_type: Option<NodeType>,
787        custom_inventory: Option<Vec<VirtualMachine>>,
788    ) -> Result<()> {
789        self.ansible_provisioner.apply_delete_node_records_cron(
790            &self.environment_name,
791            node_type,
792            custom_inventory,
793        )?;
794        Ok(())
795    }
796
797    pub fn reset(
798        &self,
799        node_type: Option<NodeType>,
800        custom_inventory: Option<Vec<VirtualMachine>>,
801    ) -> Result<()> {
802        self.ansible_provisioner.reset_nodes(
803            &self.environment_name,
804            node_type,
805            custom_inventory,
806        )?;
807        Ok(())
808    }
809
810    /// Get the status of all nodes in a network.
811    ///
812    /// First, a playbook runs `safenode-manager status` against all the machines, to get the
813    /// current state of all the nodes. Then all the node registry files are retrieved and
814    /// deserialized to a `NodeRegistry`, allowing us to output the status of each node on each VM.
815    pub async fn status(&self) -> Result<()> {
816        self.ansible_provisioner.status()?;
817
818        let peer_cache_node_registries = self
819            .ansible_provisioner
820            .get_node_registries(&AnsibleInventoryType::PeerCacheNodes)
821            .await?;
822        let generic_node_registries = self
823            .ansible_provisioner
824            .get_node_registries(&AnsibleInventoryType::Nodes)
825            .await?;
826        let symmetric_private_node_registries = self
827            .ansible_provisioner
828            .get_node_registries(&AnsibleInventoryType::SymmetricPrivateNodes)
829            .await?;
830        let full_cone_private_node_registries = self
831            .ansible_provisioner
832            .get_node_registries(&AnsibleInventoryType::FullConePrivateNodes)
833            .await?;
834        let upnp_private_node_registries = self
835            .ansible_provisioner
836            .get_node_registries(&AnsibleInventoryType::Upnp)
837            .await?;
838        let port_restricted_cone_private_node_registries = self
839            .ansible_provisioner
840            .get_node_registries(&AnsibleInventoryType::PortRestrictedConePrivateNodes)
841            .await?;
842        let genesis_node_registry = self
843            .ansible_provisioner
844            .get_node_registries(&AnsibleInventoryType::Genesis)
845            .await?
846            .clone();
847
848        peer_cache_node_registries.print().await;
849        generic_node_registries.print().await;
850        symmetric_private_node_registries.print().await;
851        full_cone_private_node_registries.print().await;
852        upnp_private_node_registries.print().await;
853        genesis_node_registry.print().await;
854
855        let all_registries = [
856            &peer_cache_node_registries,
857            &generic_node_registries,
858            &symmetric_private_node_registries,
859            &full_cone_private_node_registries,
860            &upnp_private_node_registries,
861            &genesis_node_registry,
862        ];
863
864        let mut total_nodes = 0;
865        let mut running_nodes = 0;
866        let mut stopped_nodes = 0;
867        let mut added_nodes = 0;
868        let mut removed_nodes = 0;
869
870        for (_, registry) in all_registries
871            .iter()
872            .flat_map(|r| r.retrieved_registries.iter())
873        {
874            for node in registry.nodes.read().await.iter() {
875                total_nodes += 1;
876                match node.read().await.status {
877                    ServiceStatus::Running => running_nodes += 1,
878                    ServiceStatus::Stopped => stopped_nodes += 1,
879                    ServiceStatus::Added => added_nodes += 1,
880                    ServiceStatus::Removed => removed_nodes += 1,
881                }
882            }
883        }
884
885        let peer_cache_hosts = peer_cache_node_registries.retrieved_registries.len();
886        let generic_hosts = generic_node_registries.retrieved_registries.len();
887        let symmetric_private_hosts = symmetric_private_node_registries.retrieved_registries.len();
888        let full_cone_private_hosts = full_cone_private_node_registries.retrieved_registries.len();
889        let upnp_private_hosts = upnp_private_node_registries.retrieved_registries.len();
890        let port_restricted_cone_private_hosts = port_restricted_cone_private_node_registries
891            .retrieved_registries
892            .len();
893
894        let peer_cache_nodes = peer_cache_node_registries.get_node_count().await;
895        let generic_nodes = generic_node_registries.get_node_count().await;
896        let symmetric_private_nodes = symmetric_private_node_registries.get_node_count().await;
897        let full_cone_private_nodes = full_cone_private_node_registries.get_node_count().await;
898        let upnp_private_nodes = upnp_private_node_registries.get_node_count().await;
899        let port_restricted_cone_private_nodes = port_restricted_cone_private_node_registries
900            .get_node_count()
901            .await;
902
903        println!("-------");
904        println!("Summary");
905        println!("-------");
906        println!(
907            "Total peer cache nodes ({}x{}): {}",
908            peer_cache_hosts,
909            if peer_cache_hosts > 0 {
910                peer_cache_nodes / peer_cache_hosts
911            } else {
912                0
913            },
914            peer_cache_nodes
915        );
916        println!(
917            "Total generic nodes ({}x{}): {}",
918            generic_hosts,
919            if generic_hosts > 0 {
920                generic_nodes / generic_hosts
921            } else {
922                0
923            },
924            generic_nodes
925        );
926        println!(
927            "Total symmetric private nodes ({}x{}): {}",
928            symmetric_private_hosts,
929            if symmetric_private_hosts > 0 {
930                symmetric_private_nodes / symmetric_private_hosts
931            } else {
932                0
933            },
934            symmetric_private_nodes
935        );
936        println!(
937            "Total full cone private nodes ({}x{}): {}",
938            full_cone_private_hosts,
939            if full_cone_private_hosts > 0 {
940                full_cone_private_nodes / full_cone_private_hosts
941            } else {
942                0
943            },
944            full_cone_private_nodes
945        );
946        println!(
947            "Total UPnP private nodes ({}x{}): {}",
948            upnp_private_hosts,
949            if upnp_private_hosts > 0 {
950                upnp_private_nodes / upnp_private_hosts
951            } else {
952                0
953            },
954            upnp_private_nodes
955        );
956        println!(
957            "Total port restricted cone private nodes ({}x{}): {}",
958            port_restricted_cone_private_hosts,
959            if port_restricted_cone_private_hosts > 0 {
960                port_restricted_cone_private_nodes / port_restricted_cone_private_hosts
961            } else {
962                0
963            },
964            port_restricted_cone_private_nodes
965        );
966        println!("Total nodes: {total_nodes}");
967        println!("Running nodes: {running_nodes}");
968        println!("Stopped nodes: {stopped_nodes}");
969        println!("Added nodes: {added_nodes}");
970        println!("Removed nodes: {removed_nodes}");
971
972        Ok(())
973    }
974
975    pub fn cleanup_node_logs(&self, setup_cron: bool) -> Result<()> {
976        self.ansible_provisioner.cleanup_node_logs(setup_cron)?;
977        Ok(())
978    }
979
980    pub fn start_telegraf(
981        &self,
982        node_type: Option<NodeType>,
983        custom_inventory: Option<Vec<VirtualMachine>>,
984    ) -> Result<()> {
985        self.ansible_provisioner.start_telegraf(
986            &self.environment_name,
987            node_type,
988            custom_inventory,
989        )?;
990        Ok(())
991    }
992
993    pub fn stop(
994        &self,
995        interval: Duration,
996        node_type: Option<NodeType>,
997        custom_inventory: Option<Vec<VirtualMachine>>,
998        delay: Option<u64>,
999        service_names: Option<Vec<String>>,
1000    ) -> Result<()> {
1001        self.ansible_provisioner.stop_nodes(
1002            &self.environment_name,
1003            interval,
1004            node_type,
1005            custom_inventory,
1006            delay,
1007            service_names,
1008        )?;
1009        Ok(())
1010    }
1011
1012    pub fn stop_telegraf(
1013        &self,
1014        node_type: Option<NodeType>,
1015        custom_inventory: Option<Vec<VirtualMachine>>,
1016    ) -> Result<()> {
1017        self.ansible_provisioner.stop_telegraf(
1018            &self.environment_name,
1019            node_type,
1020            custom_inventory,
1021        )?;
1022        Ok(())
1023    }
1024
1025    pub fn upgrade(&self, options: UpgradeOptions) -> Result<()> {
1026        self.ansible_provisioner.upgrade_nodes(&options)?;
1027        Ok(())
1028    }
1029
1030    pub fn upgrade_antctl(
1031        &self,
1032        version: Version,
1033        node_type: Option<NodeType>,
1034        custom_inventory: Option<Vec<VirtualMachine>>,
1035    ) -> Result<()> {
1036        self.ansible_provisioner.upgrade_antctl(
1037            &self.environment_name,
1038            &version,
1039            node_type,
1040            custom_inventory,
1041        )?;
1042        Ok(())
1043    }
1044
1045    pub fn upgrade_geoip_telegraf(&self, name: &str) -> Result<()> {
1046        self.ansible_provisioner.upgrade_geoip_telegraf(name)?;
1047        Ok(())
1048    }
1049
1050    pub fn upgrade_node_telegraf(&self, name: &str) -> Result<()> {
1051        self.ansible_provisioner.upgrade_node_telegraf(name)?;
1052        Ok(())
1053    }
1054
1055    pub fn upgrade_client_telegraf(&self, name: &str) -> Result<()> {
1056        self.ansible_provisioner.upgrade_client_telegraf(name)?;
1057        Ok(())
1058    }
1059
1060    pub async fn clean(&self) -> Result<()> {
1061        let environment_details =
1062            get_environment_details(&self.environment_name, &self.s3_repository)
1063                .await
1064                .inspect_err(|err| {
1065                    println!("Failed to get environment details: {err}. Continuing cleanup...");
1066                })
1067                .ok();
1068        if let Some(environment_details) = &environment_details {
1069            funding::drain_funds(&self.ansible_provisioner, environment_details).await?;
1070        }
1071
1072        self.destroy_infra(environment_details).await?;
1073
1074        cleanup_environment_inventory(
1075            &self.environment_name,
1076            &self
1077                .working_directory_path
1078                .join("ansible")
1079                .join("inventory"),
1080            None,
1081        )?;
1082
1083        println!("Deleted Ansible inventory for {}", self.environment_name);
1084
1085        if let Err(err) = self
1086            .s3_repository
1087            .delete_object("sn-environment-type", &self.environment_name)
1088            .await
1089        {
1090            println!("Failed to delete environment type: {err}. Continuing cleanup...");
1091        }
1092        Ok(())
1093    }
1094
1095    async fn destroy_infra(&self, environment_details: Option<EnvironmentDetails>) -> Result<()> {
1096        infra::select_workspace(&self.terraform_runner, &self.environment_name)?;
1097
1098        let options = InfraRunOptions::generate_existing(
1099            &self.environment_name,
1100            &self.region,
1101            &self.terraform_runner,
1102            environment_details.as_ref(),
1103        )
1104        .await?;
1105
1106        let args = build_terraform_args(&options)?;
1107        let tfvars_filenames = if let Some(environment_details) = &environment_details {
1108            environment_details
1109                .environment_type
1110                .get_tfvars_filenames(&self.environment_name, &self.region)
1111        } else {
1112            vec![]
1113        };
1114
1115        self.terraform_runner
1116            .destroy(Some(args), Some(tfvars_filenames))?;
1117
1118        infra::delete_workspace(&self.terraform_runner, &self.environment_name)?;
1119
1120        Ok(())
1121    }
1122}
1123
1124//
1125// Shared Helpers
1126//
1127
1128pub fn get_genesis_multiaddr(
1129    ansible_runner: &AnsibleRunner,
1130    ssh_client: &SshClient,
1131) -> Result<Option<(String, IpAddr)>> {
1132    let genesis_inventory = ansible_runner.get_inventory(AnsibleInventoryType::Genesis, true)?;
1133    if genesis_inventory.is_empty() {
1134        return Ok(None);
1135    }
1136    let genesis_ip = genesis_inventory[0].public_ip_addr;
1137
1138    // It's possible for the genesis host to be altered from its original state where a node was
1139    // started with the `--first` flag.
1140    // First attempt: try to find node with first=true
1141    let multiaddr = ssh_client
1142        .run_command(
1143            &genesis_ip,
1144            "root",
1145            "jq -r '.nodes[] | select(.initial_peers_config.first == true) | .listen_addr[] | select(contains(\"127.0.0.1\") | not) | select(contains(\"quic-v1\"))' /var/antctl/node_registry.json | head -n 1",
1146            false,
1147        )
1148        .map(|output| output.first().cloned())
1149        .unwrap_or_else(|err| {
1150            log::error!("Failed to find first node with quic-v1 protocol: {err:?}");
1151            None
1152        });
1153
1154    // Second attempt: if first attempt failed, see if any node is available.
1155    let multiaddr = match multiaddr {
1156        Some(addr) => addr,
1157        None => ssh_client
1158            .run_command(
1159                &genesis_ip,
1160                "root",
1161                "jq -r '.nodes[] | .listen_addr[] | select(contains(\"127.0.0.1\") | not) | select(contains(\"quic-v1\"))' /var/antctl/node_registry.json | head -n 1",
1162                false,
1163            )?
1164            .first()
1165            .cloned()
1166            .ok_or_else(|| Error::GenesisListenAddress)?,
1167    };
1168
1169    Ok(Some((multiaddr, genesis_ip)))
1170}
1171
1172pub fn get_anvil_node_data_hardcoded(ansible_runner: &AnsibleRunner) -> Result<AnvilNodeData> {
1173    let evm_inventory = ansible_runner.get_inventory(AnsibleInventoryType::EvmNodes, true)?;
1174    if evm_inventory.is_empty() {
1175        return Err(Error::EvmNodeNotFound);
1176    }
1177    let evm_ip = evm_inventory[0].public_ip_addr;
1178
1179    Ok(AnvilNodeData {
1180        data_payments_address: "0x8464135c8F25Da09e49BC8782676a84730C318bC".to_string(),
1181        deployer_wallet_private_key:
1182            "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80".to_string(),
1183        merkle_payments_address: "0x663F3ad617193148711d28f5334eE4Ed07016602".to_string(),
1184        payment_token_address: "0x5FbDB2315678afecb367f032d93F642f64180aa3".to_string(),
1185        rpc_url: format!("http://{evm_ip}:61611"),
1186    })
1187}
1188
1189pub fn get_multiaddr(
1190    ansible_runner: &AnsibleRunner,
1191    ssh_client: &SshClient,
1192) -> Result<(String, IpAddr)> {
1193    let node_inventory = ansible_runner.get_inventory(AnsibleInventoryType::Nodes, true)?;
1194    // For upscaling a bootstrap deployment, we'd need to select one of the nodes that's already
1195    // provisioned. So just try the first one.
1196    let node_ip = node_inventory
1197        .iter()
1198        .find(|vm| vm.name.ends_with("-node-1"))
1199        .ok_or_else(|| Error::NodeAddressNotFound)?
1200        .public_ip_addr;
1201
1202    debug!("Getting multiaddr from node {node_ip}");
1203
1204    let multiaddr =
1205        ssh_client
1206        .run_command(
1207            &node_ip,
1208            "root",
1209            // fetch the first multiaddr which does not contain the localhost addr.
1210            "jq -r '.nodes[] | .listen_addr[] | select(contains(\"127.0.0.1\") | not)' /var/antctl/node_registry.json | head -n 1",
1211            false,
1212        )?.first()
1213        .cloned()
1214        .ok_or_else(|| Error::NodeAddressNotFound)?;
1215
1216    // The node_ip is obviously inside the multiaddr, but it's just being returned as a
1217    // separate item for convenience.
1218    Ok((multiaddr, node_ip))
1219}
1220
1221pub async fn get_and_extract_archive_from_s3(
1222    s3_repository: &S3Repository,
1223    bucket_name: &str,
1224    archive_bucket_path: &str,
1225    dest_path: &Path,
1226) -> Result<()> {
1227    // In this case, not using unwrap leads to having to provide a very trivial error variant that
1228    // doesn't seem very valuable.
1229    let archive_file_name = archive_bucket_path.split('/').next_back().unwrap();
1230    let archive_dest_path = dest_path.join(archive_file_name);
1231    s3_repository
1232        .download_object(bucket_name, archive_bucket_path, &archive_dest_path)
1233        .await?;
1234    extract_archive(&archive_dest_path, dest_path)?;
1235    Ok(())
1236}
1237
1238pub fn extract_archive(archive_path: &Path, dest_path: &Path) -> Result<()> {
1239    let archive_file = File::open(archive_path)?;
1240    let decoder = GzDecoder::new(archive_file);
1241    let mut archive = Archive::new(decoder);
1242    let entries = archive.entries()?;
1243    for entry_result in entries {
1244        let mut entry = entry_result?;
1245        let extract_path = dest_path.join(entry.path()?);
1246        if entry.header().entry_type() == tar::EntryType::Directory {
1247            std::fs::create_dir_all(extract_path)?;
1248            continue;
1249        }
1250        let mut file = BufWriter::new(File::create(extract_path)?);
1251        std::io::copy(&mut entry, &mut file)?;
1252    }
1253    std::fs::remove_file(archive_path)?;
1254    Ok(())
1255}
1256
1257pub fn run_external_command(
1258    binary_path: PathBuf,
1259    working_directory_path: PathBuf,
1260    args: Vec<String>,
1261    suppress_stdout: bool,
1262    suppress_stderr: bool,
1263) -> Result<Vec<String>> {
1264    let mut command = Command::new(binary_path.clone());
1265    for arg in &args {
1266        command.arg(arg);
1267    }
1268    command.stdout(Stdio::piped());
1269    command.stderr(Stdio::piped());
1270    command.current_dir(working_directory_path.clone());
1271    debug!("Running {binary_path:#?} with args {args:#?}");
1272    debug!("Working directory set to {working_directory_path:#?}");
1273
1274    let mut child = command.spawn()?;
1275    let mut output_lines = Vec::new();
1276
1277    if let Some(ref mut stdout) = child.stdout {
1278        let reader = BufReader::new(stdout);
1279        for line in reader.lines() {
1280            let line = line?;
1281            if !suppress_stdout {
1282                println!("{line}");
1283            }
1284            output_lines.push(line);
1285        }
1286    }
1287
1288    if let Some(ref mut stderr) = child.stderr {
1289        let reader = BufReader::new(stderr);
1290        for line in reader.lines() {
1291            let line = line?;
1292            if !suppress_stderr {
1293                eprintln!("{line}");
1294            }
1295            output_lines.push(line);
1296        }
1297    }
1298
1299    let output = child.wait()?;
1300    if !output.success() {
1301        // Using `unwrap` here avoids introducing another error variant, which seems excessive.
1302        let binary_path = binary_path.to_str().unwrap();
1303        return Err(Error::ExternalCommandRunFailed {
1304            binary: binary_path.to_string(),
1305            exit_status: output,
1306        });
1307    }
1308
1309    Ok(output_lines)
1310}
1311
1312pub fn is_binary_on_path(binary_name: &str) -> bool {
1313    if let Ok(path) = std::env::var("PATH") {
1314        for dir in path.split(':') {
1315            let mut full_path = PathBuf::from(dir);
1316            full_path.push(binary_name);
1317            if full_path.exists() {
1318                return true;
1319            }
1320        }
1321    }
1322    false
1323}
1324
1325pub fn get_wallet_directory() -> Result<PathBuf> {
1326    Ok(dirs_next::data_dir()
1327        .ok_or_else(|| Error::CouldNotRetrieveDataDirectory)?
1328        .join("safe")
1329        .join("client")
1330        .join("wallet"))
1331}
1332
1333pub async fn notify_slack(inventory: DeploymentInventory) -> Result<()> {
1334    let webhook_url =
1335        std::env::var("SLACK_WEBHOOK_URL").map_err(|_| Error::SlackWebhookUrlNotSupplied)?;
1336
1337    let mut message = String::new();
1338    message.push_str("*Testnet Details*\n");
1339    message.push_str(&format!("Name: {}\n", inventory.name));
1340    message.push_str(&format!("Node count: {}\n", inventory.peers().len()));
1341    message.push_str(&format!("Faucet address: {:?}\n", inventory.faucet_address));
1342    match inventory.binary_option {
1343        BinaryOption::BuildFromSource {
1344            ref repo_owner,
1345            ref branch,
1346            ..
1347        } => {
1348            message.push_str("*Branch Details*\n");
1349            message.push_str(&format!("Repo owner: {repo_owner}\n"));
1350            message.push_str(&format!("Branch: {branch}\n"));
1351        }
1352        BinaryOption::Versioned {
1353            ant_version: ref safe_version,
1354            antnode_version: ref safenode_version,
1355            antctl_version: ref safenode_manager_version,
1356            ..
1357        } => {
1358            message.push_str("*Version Details*\n");
1359            message.push_str(&format!(
1360                "ant version: {}\n",
1361                safe_version
1362                    .as_ref()
1363                    .map_or("None".to_string(), |v| v.to_string())
1364            ));
1365            message.push_str(&format!(
1366                "safenode version: {}\n",
1367                safenode_version
1368                    .as_ref()
1369                    .map_or("None".to_string(), |v| v.to_string())
1370            ));
1371            message.push_str(&format!(
1372                "antctl version: {}\n",
1373                safenode_manager_version
1374                    .as_ref()
1375                    .map_or("None".to_string(), |v| v.to_string())
1376            ));
1377        }
1378    }
1379
1380    message.push_str("*Sample Peers*\n");
1381    message.push_str("```\n");
1382    for peer in inventory.peers().iter().take(20) {
1383        message.push_str(&format!("{peer}\n"));
1384    }
1385    message.push_str("```\n");
1386    message.push_str("*Available Files*\n");
1387    message.push_str("```\n");
1388    for (addr, file_name) in inventory.uploaded_files.iter() {
1389        message.push_str(&format!("{addr}: {file_name}\n"))
1390    }
1391    message.push_str("```\n");
1392
1393    let payload = json!({
1394        "text": message,
1395    });
1396    reqwest::Client::new()
1397        .post(webhook_url)
1398        .json(&payload)
1399        .send()
1400        .await?;
1401    println!("{message}");
1402    println!("Posted notification to Slack");
1403    Ok(())
1404}
1405
1406fn print_duration(duration: Duration) {
1407    let total_seconds = duration.as_secs();
1408    let minutes = total_seconds / 60;
1409    let seconds = total_seconds % 60;
1410    debug!("Time taken: {minutes} minutes and {seconds} seconds");
1411}
1412
1413pub fn get_progress_bar(length: u64) -> Result<ProgressBar> {
1414    let progress_bar = ProgressBar::new(length);
1415    progress_bar.set_style(
1416        ProgressStyle::default_bar()
1417            .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len}")?
1418            .progress_chars("#>-"),
1419    );
1420    progress_bar.enable_steady_tick(Duration::from_millis(100));
1421    Ok(progress_bar)
1422}
1423
1424pub async fn get_environment_details(
1425    environment_name: &str,
1426    s3_repository: &S3Repository,
1427) -> Result<EnvironmentDetails> {
1428    let temp_file = tempfile::NamedTempFile::new()?;
1429
1430    let max_retries = 3;
1431    let mut retries = 0;
1432    let env_details = loop {
1433        debug!("Downloading the environment details file for {environment_name} from S3");
1434        match s3_repository
1435            .download_object("sn-environment-type", environment_name, temp_file.path())
1436            .await
1437        {
1438            Ok(_) => {
1439                debug!("Downloaded the environment details file for {environment_name} from S3");
1440                let content = match std::fs::read_to_string(temp_file.path()) {
1441                    Ok(content) => content,
1442                    Err(err) => {
1443                        log::error!("Could not read the environment details file: {err:?}");
1444                        if retries < max_retries {
1445                            debug!("Retrying to read the environment details file");
1446                            retries += 1;
1447                            continue;
1448                        } else {
1449                            return Err(Error::EnvironmentDetailsNotFound(
1450                                environment_name.to_string(),
1451                            ));
1452                        }
1453                    }
1454                };
1455                trace!("Content of the environment details file: {content}");
1456
1457                match serde_json::from_str(&content) {
1458                    Ok(environment_details) => break environment_details,
1459                    Err(err) => {
1460                        log::error!("Could not parse the environment details file: {err:?}");
1461                        if retries < max_retries {
1462                            debug!("Retrying to parse the environment details file");
1463                            retries += 1;
1464                            continue;
1465                        } else {
1466                            return Err(Error::EnvironmentDetailsNotFound(
1467                                environment_name.to_string(),
1468                            ));
1469                        }
1470                    }
1471                }
1472            }
1473            Err(err) => {
1474                log::error!(
1475                    "Could not download the environment details file for {environment_name} from S3: {err:?}"
1476                );
1477                if retries < max_retries {
1478                    retries += 1;
1479                    continue;
1480                } else {
1481                    return Err(Error::EnvironmentDetailsNotFound(
1482                        environment_name.to_string(),
1483                    ));
1484                }
1485            }
1486        }
1487    };
1488
1489    debug!("Fetched environment details: {env_details:?}");
1490
1491    Ok(env_details)
1492}
1493
1494pub async fn write_environment_details(
1495    s3_repository: &S3Repository,
1496    environment_name: &str,
1497    environment_details: &EnvironmentDetails,
1498) -> Result<()> {
1499    let temp_dir = tempfile::tempdir()?;
1500    let path = temp_dir.path().to_path_buf().join(environment_name);
1501    let mut file = File::create(&path)?;
1502    let json = serde_json::to_string(environment_details)?;
1503    file.write_all(json.as_bytes())?;
1504    s3_repository
1505        .upload_file("sn-environment-type", &path, true)
1506        .await?;
1507    Ok(())
1508}
1509
1510pub fn calculate_size_per_attached_volume(node_count: u16) -> u16 {
1511    if node_count == 0 {
1512        return 0;
1513    }
1514    let total_volume_required = node_count * STORAGE_REQUIRED_PER_NODE;
1515
1516    // 7 attached volumes per VM
1517    (total_volume_required as f64 / 7.0).ceil() as u16
1518}
1519
1520pub fn get_bootstrap_cache_url(ip_addr: &IpAddr) -> String {
1521    format!("http://{ip_addr}/bootstrap_cache.json")
1522}