Skip to main content

sn_testnet_deploy/
lib.rs

1// Copyright (c) 2023, MaidSafe.
2// All rights reserved.
3//
4// This SAFE Network Software is licensed under the BSD-3-Clause license.
5// Please see the LICENSE file for more details.
6
7pub mod ansible;
8pub mod bootstrap;
9pub mod clients;
10pub mod deploy;
11pub mod digital_ocean;
12pub mod error;
13pub mod funding;
14pub mod infra;
15pub mod inventory;
16pub mod logs;
17pub mod reserved_ip;
18pub mod rpc_client;
19pub mod s3;
20pub mod safe;
21pub mod setup;
22pub mod ssh;
23pub mod symlinked_antnode;
24pub mod terraform;
25pub mod upscale;
26
27pub use symlinked_antnode::SymlinkedAntnodeDeployer;
28
29const STORAGE_REQUIRED_PER_NODE: u16 = 7;
30
31use crate::{
32    ansible::{
33        extra_vars::ExtraVarsDocBuilder,
34        inventory::{cleanup_environment_inventory, AnsibleInventoryType},
35        provisioning::AnsibleProvisioner,
36        AnsibleRunner,
37    },
38    error::{Error, Result},
39    inventory::{DeploymentInventory, VirtualMachine},
40    rpc_client::RpcClient,
41    s3::S3Repository,
42    ssh::SshClient,
43    terraform::TerraformRunner,
44};
45use ant_service_management::ServiceStatus;
46use flate2::read::GzDecoder;
47use indicatif::{ProgressBar, ProgressStyle};
48use infra::{build_terraform_args, InfraRunOptions};
49use log::{debug, trace};
50use semver::Version;
51use serde::{Deserialize, Serialize};
52use serde_json::json;
53use std::{
54    fs::File,
55    io::{BufRead, BufReader, BufWriter, Write},
56    net::IpAddr,
57    path::{Path, PathBuf},
58    process::{Command, Stdio},
59    str::FromStr,
60    time::Duration,
61};
62use tar::Archive;
63
64const ANSIBLE_DEFAULT_FORKS: usize = 50;
65
66#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
67pub enum DeploymentType {
68    /// The deployment has been bootstrapped from an existing network.
69    Bootstrap,
70    /// Client deployment.
71    Client,
72    /// The deployment is a new network.
73    #[default]
74    New,
75}
76
77#[derive(Debug, Clone, Default, Serialize, Deserialize)]
78pub struct AnvilNodeData {
79    pub data_payments_address: String,
80    pub deployer_wallet_private_key: String,
81    pub merkle_payments_address: String,
82    pub payment_token_address: String,
83    pub rpc_url: String,
84}
85
86impl std::fmt::Display for DeploymentType {
87    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
88        match self {
89            DeploymentType::Bootstrap => write!(f, "bootstrap"),
90            DeploymentType::Client => write!(f, "clients"),
91            DeploymentType::New => write!(f, "new"),
92        }
93    }
94}
95
96impl std::str::FromStr for DeploymentType {
97    type Err = String;
98
99    fn from_str(s: &str) -> Result<Self, Self::Err> {
100        match s.to_lowercase().as_str() {
101            "bootstrap" => Ok(DeploymentType::Bootstrap),
102            "clients" => Ok(DeploymentType::Client),
103            "new" => Ok(DeploymentType::New),
104            _ => Err(format!("Invalid deployment type: {s}")),
105        }
106    }
107}
108
109#[derive(Debug, Clone)]
110pub enum NodeType {
111    FullConePrivateNode,
112    PortRestrictedConePrivateNode,
113    Generic,
114    Genesis,
115    PeerCache,
116    SymmetricPrivateNode,
117    Upnp,
118}
119
120impl std::fmt::Display for NodeType {
121    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
122        match self {
123            NodeType::FullConePrivateNode => write!(f, "full-cone-private"),
124            NodeType::PortRestrictedConePrivateNode => write!(f, "port-restricted-cone-private"),
125            NodeType::Generic => write!(f, "generic"),
126            NodeType::Genesis => write!(f, "genesis"),
127            NodeType::PeerCache => write!(f, "peer-cache"),
128            NodeType::SymmetricPrivateNode => write!(f, "symmetric-private"),
129            NodeType::Upnp => write!(f, "upnp"),
130        }
131    }
132}
133
134impl std::str::FromStr for NodeType {
135    type Err = String;
136
137    fn from_str(s: &str) -> Result<Self, Self::Err> {
138        match s.to_lowercase().as_str() {
139            "full-cone-private" => Ok(NodeType::FullConePrivateNode),
140            "port-restricted-cone-private" => Ok(NodeType::PortRestrictedConePrivateNode),
141            "generic" => Ok(NodeType::Generic),
142            "genesis" => Ok(NodeType::Genesis),
143            "peer-cache" => Ok(NodeType::PeerCache),
144            "symmetric-private" => Ok(NodeType::SymmetricPrivateNode),
145            "upnp" => Ok(NodeType::Upnp),
146            _ => Err(format!("Invalid node type: {s}")),
147        }
148    }
149}
150
151impl NodeType {
152    pub fn telegraf_role(&self) -> &'static str {
153        match self {
154            NodeType::FullConePrivateNode => "NAT_STATIC_FULL_CONE_NODE",
155            NodeType::PortRestrictedConePrivateNode => "PORT_RESTRICTED_CONE_NODE",
156            NodeType::Generic => "GENERIC_NODE",
157            NodeType::Genesis => "GENESIS_NODE",
158            NodeType::PeerCache => "PEER_CACHE_NODE",
159            NodeType::SymmetricPrivateNode => "NAT_RANDOMIZED_NODE",
160            NodeType::Upnp => "UPNP_NODE",
161        }
162    }
163
164    pub fn to_ansible_inventory_type(&self) -> AnsibleInventoryType {
165        match self {
166            NodeType::FullConePrivateNode => AnsibleInventoryType::FullConePrivateNodes,
167            NodeType::PortRestrictedConePrivateNode => {
168                AnsibleInventoryType::PortRestrictedConePrivateNodes
169            }
170            NodeType::Generic => AnsibleInventoryType::Nodes,
171            NodeType::Genesis => AnsibleInventoryType::Genesis,
172            NodeType::PeerCache => AnsibleInventoryType::PeerCacheNodes,
173            NodeType::SymmetricPrivateNode => AnsibleInventoryType::SymmetricPrivateNodes,
174            NodeType::Upnp => AnsibleInventoryType::Upnp,
175        }
176    }
177}
178
179#[derive(Clone, Debug, Default, Eq, Serialize, Deserialize, PartialEq)]
180pub enum EvmNetwork {
181    #[default]
182    Anvil,
183    ArbitrumOne,
184    ArbitrumSepoliaTest,
185    Custom,
186}
187
188impl std::fmt::Display for EvmNetwork {
189    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
190        match self {
191            EvmNetwork::Anvil => write!(f, "evm-custom"),
192            EvmNetwork::ArbitrumOne => write!(f, "evm-arbitrum-one"),
193            EvmNetwork::ArbitrumSepoliaTest => write!(f, "evm-arbitrum-sepolia-test"),
194            EvmNetwork::Custom => write!(f, "evm-custom"),
195        }
196    }
197}
198
199impl std::str::FromStr for EvmNetwork {
200    type Err = String;
201
202    fn from_str(s: &str) -> Result<Self, Self::Err> {
203        match s.to_lowercase().as_str() {
204            "anvil" => Ok(EvmNetwork::Anvil),
205            "arbitrum-one" => Ok(EvmNetwork::ArbitrumOne),
206            "arbitrum-sepolia-test" => Ok(EvmNetwork::ArbitrumSepoliaTest),
207            "custom" => Ok(EvmNetwork::Custom),
208            _ => Err(format!("Invalid EVM network type: {s}")),
209        }
210    }
211}
212
213#[derive(Clone, Debug, Default, Serialize, Deserialize)]
214pub struct EvmDetails {
215    pub network: EvmNetwork,
216    pub data_payments_address: Option<String>,
217    pub merkle_payments_address: Option<String>,
218    pub payment_token_address: Option<String>,
219    pub rpc_url: Option<String>,
220}
221
222#[derive(Clone, Debug, Default, Serialize, Deserialize)]
223pub struct EnvironmentDetails {
224    pub deployment_type: DeploymentType,
225    pub environment_type: EnvironmentType,
226    pub evm_details: EvmDetails,
227    pub funding_wallet_address: Option<String>,
228    pub network_id: Option<u8>,
229    pub region: String,
230    pub rewards_address: Option<String>,
231}
232
233#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
234pub enum EnvironmentType {
235    #[default]
236    Development,
237    Production,
238    Staging,
239}
240
241impl EnvironmentType {
242    pub fn get_tfvars_filenames(&self, name: &str, region: &str) -> Vec<String> {
243        match self {
244            EnvironmentType::Development => vec![
245                "dev.tfvars".to_string(),
246                format!("dev-images-{region}.tfvars", region = region),
247            ],
248            EnvironmentType::Staging => vec![
249                "staging.tfvars".to_string(),
250                format!("staging-images-{region}.tfvars", region = region),
251            ],
252            EnvironmentType::Production => {
253                vec![
254                    format!("{name}.tfvars", name = name),
255                    format!("production-images-{region}.tfvars", region = region),
256                ]
257            }
258        }
259    }
260
261    pub fn get_tfvars_filenames_with_fallback(
262        &self,
263        name: &str,
264        region: &str,
265        terraform_dir: &Path,
266    ) -> Vec<String> {
267        match self {
268            EnvironmentType::Development | EnvironmentType::Staging => {
269                self.get_tfvars_filenames(name, region)
270            }
271            EnvironmentType::Production => {
272                let named_tfvars = format!("{name}.tfvars");
273                let tfvars_file = if terraform_dir.join(&named_tfvars).exists() {
274                    named_tfvars
275                } else {
276                    "production.tfvars".to_string()
277                };
278                vec![tfvars_file, format!("production-images-{region}.tfvars")]
279            }
280        }
281    }
282
283    pub fn get_default_peer_cache_node_count(&self) -> u16 {
284        match self {
285            EnvironmentType::Development => 5,
286            EnvironmentType::Production => 5,
287            EnvironmentType::Staging => 5,
288        }
289    }
290
291    pub fn get_default_node_count(&self) -> u16 {
292        match self {
293            EnvironmentType::Development => 25,
294            EnvironmentType::Production => 25,
295            EnvironmentType::Staging => 25,
296        }
297    }
298
299    pub fn get_default_symmetric_private_node_count(&self) -> u16 {
300        self.get_default_node_count()
301    }
302
303    pub fn get_default_full_cone_private_node_count(&self) -> u16 {
304        self.get_default_node_count()
305    }
306    pub fn get_default_upnp_private_node_count(&self) -> u16 {
307        self.get_default_node_count()
308    }
309}
310
311impl std::fmt::Display for EnvironmentType {
312    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
313        match self {
314            EnvironmentType::Development => write!(f, "development"),
315            EnvironmentType::Production => write!(f, "production"),
316            EnvironmentType::Staging => write!(f, "staging"),
317        }
318    }
319}
320
321impl FromStr for EnvironmentType {
322    type Err = Error;
323
324    fn from_str(s: &str) -> Result<Self, Self::Err> {
325        match s.to_lowercase().as_str() {
326            "development" => Ok(EnvironmentType::Development),
327            "production" => Ok(EnvironmentType::Production),
328            "staging" => Ok(EnvironmentType::Staging),
329            _ => Err(Error::EnvironmentNameFromStringError(s.to_string())),
330        }
331    }
332}
333
334/// Specify the binary option for the deployment.
335///
336/// There are several binaries involved in the deployment:
337/// * safenode
338/// * safenode_rpc_client
339/// * faucet
340/// * safe
341///
342/// The `safe` binary is only used for smoke testing the deployment, although we don't really do
343/// that at the moment.
344///
345/// The options are to build from source, or supply a pre-built, versioned binary, which will be
346/// fetched from S3. Building from source adds significant time to the deployment.
347#[derive(Clone, Debug, Serialize, Deserialize)]
348pub enum BinaryOption {
349    /// Binaries will be built from source.
350    BuildFromSource {
351        /// A comma-separated list that will be passed to the `--features` argument.
352        antnode_features: Option<String>,
353        branch: String,
354        repo_owner: String,
355        /// Skip building the binaries, if they were already built during the previous run using the same
356        /// branch, repo owner and testnet name.
357        skip_binary_build: bool,
358    },
359    /// Pre-built, versioned binaries will be fetched from S3.
360    Versioned {
361        ant_version: Option<Version>,
362        antctl_version: Option<Version>,
363        antnode_version: Option<Version>,
364    },
365}
366
367impl BinaryOption {
368    pub fn should_provision_build_machine(&self) -> bool {
369        match self {
370            BinaryOption::BuildFromSource {
371                skip_binary_build, ..
372            } => !skip_binary_build,
373            BinaryOption::Versioned { .. } => false,
374        }
375    }
376
377    pub fn print(&self) {
378        match self {
379            BinaryOption::BuildFromSource {
380                antnode_features,
381                branch,
382                repo_owner,
383                skip_binary_build: _,
384            } => {
385                println!("Source configuration:");
386                println!("  Repository owner: {repo_owner}");
387                println!("  Branch: {branch}");
388                if let Some(features) = antnode_features {
389                    println!("  Antnode features: {features}");
390                }
391            }
392            BinaryOption::Versioned {
393                ant_version,
394                antctl_version,
395                antnode_version,
396            } => {
397                println!("Versioned binaries configuration:");
398                if let Some(version) = ant_version {
399                    println!("  ant version: {version}");
400                }
401                if let Some(version) = antctl_version {
402                    println!("  antctl version: {version}");
403                }
404                if let Some(version) = antnode_version {
405                    println!("  antnode version: {version}");
406                }
407            }
408        }
409    }
410}
411
412#[derive(Debug, Clone, Copy)]
413pub enum CloudProvider {
414    Aws,
415    DigitalOcean,
416}
417
418impl std::fmt::Display for CloudProvider {
419    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
420        match self {
421            CloudProvider::Aws => write!(f, "aws"),
422            CloudProvider::DigitalOcean => write!(f, "digital-ocean"),
423        }
424    }
425}
426
427impl CloudProvider {
428    pub fn get_ssh_user(&self) -> String {
429        match self {
430            CloudProvider::Aws => "ubuntu".to_string(),
431            CloudProvider::DigitalOcean => "root".to_string(),
432        }
433    }
434}
435
436#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
437pub enum LogFormat {
438    Default,
439    Json,
440}
441
442impl LogFormat {
443    pub fn parse_from_str(val: &str) -> Result<Self> {
444        match val {
445            "default" => Ok(LogFormat::Default),
446            "json" => Ok(LogFormat::Json),
447            _ => Err(Error::LoggingConfiguration(
448                "The only valid values for this argument are \"default\" or \"json\"".to_string(),
449            )),
450        }
451    }
452
453    pub fn as_str(&self) -> &'static str {
454        match self {
455            LogFormat::Default => "default",
456            LogFormat::Json => "json",
457        }
458    }
459}
460
461#[derive(Clone)]
462pub struct UpgradeOptions {
463    pub ansible_verbose: bool,
464    pub branch: Option<String>,
465    pub custom_inventory: Option<Vec<VirtualMachine>>,
466    pub env_variables: Option<Vec<(String, String)>>,
467    pub force: bool,
468    pub forks: usize,
469    pub interval: Duration,
470    pub name: String,
471    pub node_type: Option<NodeType>,
472    pub pre_upgrade_delay: Option<u64>,
473    pub provider: CloudProvider,
474    pub repo_owner: Option<String>,
475    pub version: Option<String>,
476}
477
478impl UpgradeOptions {
479    pub fn get_ansible_vars(&self) -> String {
480        let mut extra_vars = ExtraVarsDocBuilder::default();
481        extra_vars.add_variable("interval", &self.interval.as_millis().to_string());
482        if let Some(env_variables) = &self.env_variables {
483            extra_vars.add_env_variable_list("env_variables", env_variables.clone());
484        }
485        if self.force {
486            extra_vars.add_variable("force", &self.force.to_string());
487        }
488        if let Some(version) = &self.version {
489            extra_vars.add_variable("antnode_version", version);
490        }
491        if let Some(pre_upgrade_delay) = &self.pre_upgrade_delay {
492            extra_vars.add_variable("pre_upgrade_delay", &pre_upgrade_delay.to_string());
493        }
494
495        if let (Some(repo_owner), Some(branch)) = (&self.repo_owner, &self.branch) {
496            let binary_option = BinaryOption::BuildFromSource {
497                antnode_features: None,
498                branch: branch.clone(),
499                repo_owner: repo_owner.clone(),
500                skip_binary_build: true,
501            };
502            extra_vars.add_node_url_or_version(&self.name, &binary_option);
503        }
504
505        extra_vars.build()
506    }
507}
508
509#[derive(Default)]
510pub struct TestnetDeployBuilder {
511    ansible_forks: Option<usize>,
512    ansible_verbose_mode: bool,
513    deployment_type: EnvironmentType,
514    environment_name: String,
515    provider: Option<CloudProvider>,
516    region: Option<String>,
517    ssh_secret_key_path: Option<PathBuf>,
518    state_bucket_name: Option<String>,
519    terraform_binary_path: Option<PathBuf>,
520    vault_password_path: Option<PathBuf>,
521    working_directory_path: Option<PathBuf>,
522}
523
524impl TestnetDeployBuilder {
525    pub fn new() -> Self {
526        Default::default()
527    }
528
529    pub fn ansible_verbose_mode(&mut self, ansible_verbose_mode: bool) -> &mut Self {
530        self.ansible_verbose_mode = ansible_verbose_mode;
531        self
532    }
533
534    pub fn ansible_forks(&mut self, ansible_forks: usize) -> &mut Self {
535        self.ansible_forks = Some(ansible_forks);
536        self
537    }
538
539    pub fn deployment_type(&mut self, deployment_type: EnvironmentType) -> &mut Self {
540        self.deployment_type = deployment_type;
541        self
542    }
543
544    pub fn environment_name(&mut self, name: &str) -> &mut Self {
545        self.environment_name = name.to_string();
546        self
547    }
548
549    pub fn provider(&mut self, provider: CloudProvider) -> &mut Self {
550        self.provider = Some(provider);
551        self
552    }
553
554    pub fn state_bucket_name(&mut self, state_bucket_name: String) -> &mut Self {
555        self.state_bucket_name = Some(state_bucket_name);
556        self
557    }
558
559    pub fn terraform_binary_path(&mut self, terraform_binary_path: PathBuf) -> &mut Self {
560        self.terraform_binary_path = Some(terraform_binary_path);
561        self
562    }
563
564    pub fn working_directory(&mut self, working_directory_path: PathBuf) -> &mut Self {
565        self.working_directory_path = Some(working_directory_path);
566        self
567    }
568
569    pub fn ssh_secret_key_path(&mut self, ssh_secret_key_path: PathBuf) -> &mut Self {
570        self.ssh_secret_key_path = Some(ssh_secret_key_path);
571        self
572    }
573
574    pub fn vault_password_path(&mut self, vault_password_path: PathBuf) -> &mut Self {
575        self.vault_password_path = Some(vault_password_path);
576        self
577    }
578
579    pub fn region(&mut self, region: String) -> &mut Self {
580        self.region = Some(region);
581        self
582    }
583
584    pub fn build(&self) -> Result<TestnetDeployer> {
585        let provider = self.provider.unwrap_or(CloudProvider::DigitalOcean);
586        match provider {
587            CloudProvider::DigitalOcean => {
588                let digital_ocean_pat = std::env::var("DO_PAT").map_err(|_| {
589                    Error::CloudProviderCredentialsNotSupplied("DO_PAT".to_string())
590                })?;
591                // The DO_PAT variable is not actually read by either Terraform or Ansible.
592                // Each tool uses a different variable, so instead we set each of those variables
593                // to the value of DO_PAT. This means the user only needs to set one variable.
594                std::env::set_var("DIGITALOCEAN_TOKEN", digital_ocean_pat.clone());
595                std::env::set_var("DO_API_TOKEN", digital_ocean_pat);
596            }
597            _ => {
598                return Err(Error::CloudProviderNotSupported(provider.to_string()));
599            }
600        }
601
602        let state_bucket_name = match self.state_bucket_name {
603            Some(ref bucket_name) => bucket_name.clone(),
604            None => std::env::var("TERRAFORM_STATE_BUCKET_NAME")?,
605        };
606
607        let default_terraform_bin_path = PathBuf::from("terraform");
608        let terraform_binary_path = self
609            .terraform_binary_path
610            .as_ref()
611            .unwrap_or(&default_terraform_bin_path);
612
613        let working_directory_path = match self.working_directory_path {
614            Some(ref work_dir_path) => work_dir_path.clone(),
615            None => std::env::current_dir()?.join("resources"),
616        };
617
618        let ssh_secret_key_path = match self.ssh_secret_key_path {
619            Some(ref ssh_sk_path) => ssh_sk_path.clone(),
620            None => PathBuf::from(std::env::var("SSH_KEY_PATH")?),
621        };
622
623        let vault_password_path = match self.vault_password_path {
624            Some(ref vault_pw_path) => vault_pw_path.clone(),
625            None => PathBuf::from(std::env::var("ANSIBLE_VAULT_PASSWORD_PATH")?),
626        };
627
628        let region = match self.region {
629            Some(ref region) => region.clone(),
630            None => "lon1".to_string(),
631        };
632
633        let terraform_runner = TerraformRunner::new(
634            terraform_binary_path.to_path_buf(),
635            working_directory_path
636                .join("terraform")
637                .join("testnet")
638                .join(provider.to_string()),
639            provider,
640            &state_bucket_name,
641        )?;
642        let ansible_runner = AnsibleRunner::new(
643            self.ansible_forks.unwrap_or(ANSIBLE_DEFAULT_FORKS),
644            self.ansible_verbose_mode,
645            &self.environment_name,
646            provider,
647            ssh_secret_key_path.clone(),
648            vault_password_path,
649            working_directory_path.join("ansible"),
650        )?;
651        let ssh_client = SshClient::new(ssh_secret_key_path);
652        let ansible_provisioner =
653            AnsibleProvisioner::new(ansible_runner, provider, ssh_client.clone());
654        let rpc_client = RpcClient::new(
655            PathBuf::from("/usr/local/bin/safenode_rpc_client"),
656            working_directory_path.clone(),
657        );
658
659        // Remove any `safe` binary from a previous deployment. Otherwise you can end up with
660        // mismatched binaries.
661        let safe_path = working_directory_path.join("safe");
662        if safe_path.exists() {
663            std::fs::remove_file(safe_path)?;
664        }
665
666        let testnet = TestnetDeployer::new(
667            ansible_provisioner,
668            provider,
669            self.deployment_type.clone(),
670            &self.environment_name,
671            rpc_client,
672            S3Repository {},
673            ssh_client,
674            terraform_runner,
675            working_directory_path,
676            region,
677        )?;
678
679        Ok(testnet)
680    }
681}
682
683#[derive(Clone)]
684pub struct TestnetDeployer {
685    pub ansible_provisioner: AnsibleProvisioner,
686    pub cloud_provider: CloudProvider,
687    pub deployment_type: EnvironmentType,
688    pub environment_name: String,
689    pub inventory_file_path: PathBuf,
690    pub region: String,
691    pub rpc_client: RpcClient,
692    pub s3_repository: S3Repository,
693    pub ssh_client: SshClient,
694    pub terraform_runner: TerraformRunner,
695    pub working_directory_path: PathBuf,
696}
697
698impl TestnetDeployer {
699    #[allow(clippy::too_many_arguments)]
700    pub fn new(
701        ansible_provisioner: AnsibleProvisioner,
702        cloud_provider: CloudProvider,
703        deployment_type: EnvironmentType,
704        environment_name: &str,
705        rpc_client: RpcClient,
706        s3_repository: S3Repository,
707        ssh_client: SshClient,
708        terraform_runner: TerraformRunner,
709        working_directory_path: PathBuf,
710        region: String,
711    ) -> Result<TestnetDeployer> {
712        if environment_name.is_empty() {
713            return Err(Error::EnvironmentNameRequired);
714        }
715        let inventory_file_path = working_directory_path
716            .join("ansible")
717            .join("inventory")
718            .join("dev_inventory_digital_ocean.yml");
719        Ok(TestnetDeployer {
720            ansible_provisioner,
721            cloud_provider,
722            deployment_type,
723            environment_name: environment_name.to_string(),
724            inventory_file_path,
725            region,
726            rpc_client,
727            ssh_client,
728            s3_repository,
729            terraform_runner,
730            working_directory_path,
731        })
732    }
733
734    pub async fn init(&self) -> Result<()> {
735        if self
736            .s3_repository
737            .folder_exists(
738                "sn-testnet",
739                &format!("testnet-logs/{}", self.environment_name),
740            )
741            .await?
742        {
743            return Err(Error::LogsForPreviousTestnetExist(
744                self.environment_name.clone(),
745            ));
746        }
747
748        self.terraform_runner.init()?;
749        let workspaces = self.terraform_runner.workspace_list()?;
750        if !workspaces.contains(&self.environment_name) {
751            self.terraform_runner
752                .workspace_new(&self.environment_name)?;
753        } else {
754            println!("Workspace {} already exists", self.environment_name);
755        }
756
757        let rpc_client_path = self.working_directory_path.join("safenode_rpc_client");
758        if !rpc_client_path.is_file() {
759            println!("Downloading the rpc client for safenode...");
760            let archive_name = "safenode_rpc_client-latest-x86_64-unknown-linux-musl.tar.gz";
761            get_and_extract_archive_from_s3(
762                &self.s3_repository,
763                "sn-node-rpc-client",
764                archive_name,
765                &self.working_directory_path,
766            )
767            .await?;
768            #[cfg(unix)]
769            {
770                use std::os::unix::fs::PermissionsExt;
771                let mut permissions = std::fs::metadata(&rpc_client_path)?.permissions();
772                permissions.set_mode(0o755); // rwxr-xr-x
773                std::fs::set_permissions(&rpc_client_path, permissions)?;
774            }
775        }
776
777        Ok(())
778    }
779
780    pub fn plan(&self, options: &InfraRunOptions) -> Result<()> {
781        println!("Selecting {} workspace...", options.name);
782        self.terraform_runner.workspace_select(&options.name)?;
783
784        let args = build_terraform_args(options)?;
785
786        self.terraform_runner
787            .plan(Some(args), options.tfvars_filenames.clone())?;
788        Ok(())
789    }
790
791    pub fn start(
792        &self,
793        interval: Duration,
794        node_type: Option<NodeType>,
795        custom_inventory: Option<Vec<VirtualMachine>>,
796    ) -> Result<()> {
797        self.ansible_provisioner.start_nodes(
798            &self.environment_name,
799            interval,
800            node_type,
801            custom_inventory,
802        )?;
803        Ok(())
804    }
805
806    pub fn apply_delete_node_records_cron(
807        &self,
808        node_type: Option<NodeType>,
809        custom_inventory: Option<Vec<VirtualMachine>>,
810    ) -> Result<()> {
811        self.ansible_provisioner.apply_delete_node_records_cron(
812            &self.environment_name,
813            node_type,
814            custom_inventory,
815        )?;
816        Ok(())
817    }
818
819    pub fn reset(
820        &self,
821        node_type: Option<NodeType>,
822        custom_inventory: Option<Vec<VirtualMachine>>,
823    ) -> Result<()> {
824        self.ansible_provisioner.reset_nodes(
825            &self.environment_name,
826            node_type,
827            custom_inventory,
828        )?;
829        Ok(())
830    }
831
832    /// Get the status of all nodes in a network.
833    ///
834    /// First, a playbook runs `safenode-manager status` against all the machines, to get the
835    /// current state of all the nodes. Then all the node registry files are retrieved and
836    /// deserialized to a `NodeRegistry`, allowing us to output the status of each node on each VM.
837    pub async fn status(&self) -> Result<()> {
838        self.ansible_provisioner.status()?;
839
840        let peer_cache_node_registries = self
841            .ansible_provisioner
842            .get_node_registries(&AnsibleInventoryType::PeerCacheNodes)
843            .await?;
844        let generic_node_registries = self
845            .ansible_provisioner
846            .get_node_registries(&AnsibleInventoryType::Nodes)
847            .await?;
848        let symmetric_private_node_registries = self
849            .ansible_provisioner
850            .get_node_registries(&AnsibleInventoryType::SymmetricPrivateNodes)
851            .await?;
852        let full_cone_private_node_registries = self
853            .ansible_provisioner
854            .get_node_registries(&AnsibleInventoryType::FullConePrivateNodes)
855            .await?;
856        let upnp_private_node_registries = self
857            .ansible_provisioner
858            .get_node_registries(&AnsibleInventoryType::Upnp)
859            .await?;
860        let port_restricted_cone_private_node_registries = self
861            .ansible_provisioner
862            .get_node_registries(&AnsibleInventoryType::PortRestrictedConePrivateNodes)
863            .await?;
864        let genesis_node_registry = self
865            .ansible_provisioner
866            .get_node_registries(&AnsibleInventoryType::Genesis)
867            .await?
868            .clone();
869
870        peer_cache_node_registries.print().await;
871        generic_node_registries.print().await;
872        symmetric_private_node_registries.print().await;
873        full_cone_private_node_registries.print().await;
874        upnp_private_node_registries.print().await;
875        genesis_node_registry.print().await;
876
877        let all_registries = [
878            &peer_cache_node_registries,
879            &generic_node_registries,
880            &symmetric_private_node_registries,
881            &full_cone_private_node_registries,
882            &upnp_private_node_registries,
883            &genesis_node_registry,
884        ];
885
886        let mut total_nodes = 0;
887        let mut running_nodes = 0;
888        let mut stopped_nodes = 0;
889        let mut added_nodes = 0;
890        let mut removed_nodes = 0;
891
892        for (_, registry) in all_registries
893            .iter()
894            .flat_map(|r| r.retrieved_registries.iter())
895        {
896            for node in registry.nodes.read().await.iter() {
897                total_nodes += 1;
898                match node.read().await.status {
899                    ServiceStatus::Running => running_nodes += 1,
900                    ServiceStatus::Stopped => stopped_nodes += 1,
901                    ServiceStatus::Added => added_nodes += 1,
902                    ServiceStatus::Removed => removed_nodes += 1,
903                }
904            }
905        }
906
907        let peer_cache_hosts = peer_cache_node_registries.retrieved_registries.len();
908        let generic_hosts = generic_node_registries.retrieved_registries.len();
909        let symmetric_private_hosts = symmetric_private_node_registries.retrieved_registries.len();
910        let full_cone_private_hosts = full_cone_private_node_registries.retrieved_registries.len();
911        let upnp_private_hosts = upnp_private_node_registries.retrieved_registries.len();
912        let port_restricted_cone_private_hosts = port_restricted_cone_private_node_registries
913            .retrieved_registries
914            .len();
915
916        let peer_cache_nodes = peer_cache_node_registries.get_node_count().await;
917        let generic_nodes = generic_node_registries.get_node_count().await;
918        let symmetric_private_nodes = symmetric_private_node_registries.get_node_count().await;
919        let full_cone_private_nodes = full_cone_private_node_registries.get_node_count().await;
920        let upnp_private_nodes = upnp_private_node_registries.get_node_count().await;
921        let port_restricted_cone_private_nodes = port_restricted_cone_private_node_registries
922            .get_node_count()
923            .await;
924
925        println!("-------");
926        println!("Summary");
927        println!("-------");
928        println!(
929            "Total peer cache nodes ({}x{}): {}",
930            peer_cache_hosts,
931            if peer_cache_hosts > 0 {
932                peer_cache_nodes / peer_cache_hosts
933            } else {
934                0
935            },
936            peer_cache_nodes
937        );
938        println!(
939            "Total generic nodes ({}x{}): {}",
940            generic_hosts,
941            if generic_hosts > 0 {
942                generic_nodes / generic_hosts
943            } else {
944                0
945            },
946            generic_nodes
947        );
948        println!(
949            "Total symmetric private nodes ({}x{}): {}",
950            symmetric_private_hosts,
951            if symmetric_private_hosts > 0 {
952                symmetric_private_nodes / symmetric_private_hosts
953            } else {
954                0
955            },
956            symmetric_private_nodes
957        );
958        println!(
959            "Total full cone private nodes ({}x{}): {}",
960            full_cone_private_hosts,
961            if full_cone_private_hosts > 0 {
962                full_cone_private_nodes / full_cone_private_hosts
963            } else {
964                0
965            },
966            full_cone_private_nodes
967        );
968        println!(
969            "Total UPnP private nodes ({}x{}): {}",
970            upnp_private_hosts,
971            if upnp_private_hosts > 0 {
972                upnp_private_nodes / upnp_private_hosts
973            } else {
974                0
975            },
976            upnp_private_nodes
977        );
978        println!(
979            "Total port restricted cone private nodes ({}x{}): {}",
980            port_restricted_cone_private_hosts,
981            if port_restricted_cone_private_hosts > 0 {
982                port_restricted_cone_private_nodes / port_restricted_cone_private_hosts
983            } else {
984                0
985            },
986            port_restricted_cone_private_nodes
987        );
988        println!("Total nodes: {total_nodes}");
989        println!("Running nodes: {running_nodes}");
990        println!("Stopped nodes: {stopped_nodes}");
991        println!("Added nodes: {added_nodes}");
992        println!("Removed nodes: {removed_nodes}");
993
994        Ok(())
995    }
996
997    pub fn cleanup_node_logs(&self, setup_cron: bool) -> Result<()> {
998        self.ansible_provisioner.cleanup_node_logs(setup_cron)?;
999        Ok(())
1000    }
1001
1002    pub fn start_telegraf(
1003        &self,
1004        node_type: Option<NodeType>,
1005        custom_inventory: Option<Vec<VirtualMachine>>,
1006    ) -> Result<()> {
1007        self.ansible_provisioner.start_telegraf(
1008            &self.environment_name,
1009            node_type,
1010            custom_inventory,
1011        )?;
1012        Ok(())
1013    }
1014
1015    pub fn stop(
1016        &self,
1017        interval: Duration,
1018        node_type: Option<NodeType>,
1019        custom_inventory: Option<Vec<VirtualMachine>>,
1020        delay: Option<u64>,
1021        service_names: Option<Vec<String>>,
1022    ) -> Result<()> {
1023        self.ansible_provisioner.stop_nodes(
1024            &self.environment_name,
1025            interval,
1026            node_type,
1027            custom_inventory,
1028            delay,
1029            service_names,
1030        )?;
1031        Ok(())
1032    }
1033
1034    pub fn stop_telegraf(
1035        &self,
1036        node_type: Option<NodeType>,
1037        custom_inventory: Option<Vec<VirtualMachine>>,
1038    ) -> Result<()> {
1039        self.ansible_provisioner.stop_telegraf(
1040            &self.environment_name,
1041            node_type,
1042            custom_inventory,
1043        )?;
1044        Ok(())
1045    }
1046
1047    pub fn upgrade(&self, options: UpgradeOptions) -> Result<()> {
1048        self.ansible_provisioner.upgrade_nodes(&options)?;
1049        Ok(())
1050    }
1051
1052    pub fn upgrade_antctl(
1053        &self,
1054        version: Version,
1055        node_type: Option<NodeType>,
1056        custom_inventory: Option<Vec<VirtualMachine>>,
1057    ) -> Result<()> {
1058        self.ansible_provisioner.upgrade_antctl(
1059            &self.environment_name,
1060            &version,
1061            node_type,
1062            custom_inventory,
1063        )?;
1064        Ok(())
1065    }
1066
1067    pub fn upgrade_geoip_telegraf(&self, name: &str) -> Result<()> {
1068        self.ansible_provisioner.upgrade_geoip_telegraf(name)?;
1069        Ok(())
1070    }
1071
1072    pub fn upgrade_node_telegraf(&self, name: &str) -> Result<()> {
1073        self.ansible_provisioner.upgrade_node_telegraf(name)?;
1074        Ok(())
1075    }
1076
1077    pub fn upgrade_client_telegraf(&self, name: &str) -> Result<()> {
1078        self.ansible_provisioner.upgrade_client_telegraf(name)?;
1079        Ok(())
1080    }
1081
1082    pub async fn clean(&self) -> Result<()> {
1083        let environment_details =
1084            get_environment_details(&self.environment_name, &self.s3_repository)
1085                .await
1086                .inspect_err(|err| {
1087                    println!("Failed to get environment details: {err}. Continuing cleanup...");
1088                })
1089                .ok();
1090        if let Some(environment_details) = &environment_details {
1091            funding::drain_funds(&self.ansible_provisioner, environment_details).await?;
1092        }
1093
1094        self.destroy_infra(environment_details).await?;
1095
1096        cleanup_environment_inventory(
1097            &self.environment_name,
1098            &self
1099                .working_directory_path
1100                .join("ansible")
1101                .join("inventory"),
1102            None,
1103        )?;
1104
1105        println!("Deleted Ansible inventory for {}", self.environment_name);
1106
1107        if let Err(err) = self
1108            .s3_repository
1109            .delete_object("sn-environment-type", &self.environment_name)
1110            .await
1111        {
1112            println!("Failed to delete environment type: {err}. Continuing cleanup...");
1113        }
1114        Ok(())
1115    }
1116
1117    async fn destroy_infra(&self, environment_details: Option<EnvironmentDetails>) -> Result<()> {
1118        infra::select_workspace(&self.terraform_runner, &self.environment_name)?;
1119
1120        let options = InfraRunOptions::generate_existing(
1121            &self.environment_name,
1122            &self.region,
1123            &self.terraform_runner,
1124            environment_details.as_ref(),
1125        )
1126        .await?;
1127
1128        let args = build_terraform_args(&options)?;
1129        let tfvars_filenames = if let Some(environment_details) = &environment_details {
1130            environment_details
1131                .environment_type
1132                .get_tfvars_filenames(&self.environment_name, &self.region)
1133        } else {
1134            vec![]
1135        };
1136
1137        self.terraform_runner
1138            .destroy(Some(args), Some(tfvars_filenames))?;
1139
1140        infra::delete_workspace(&self.terraform_runner, &self.environment_name)?;
1141
1142        Ok(())
1143    }
1144}
1145
1146//
1147// Shared Helpers
1148//
1149
1150pub fn get_genesis_multiaddr(
1151    ansible_runner: &AnsibleRunner,
1152    ssh_client: &SshClient,
1153) -> Result<Option<(String, IpAddr)>> {
1154    let genesis_inventory = ansible_runner.get_inventory(AnsibleInventoryType::Genesis, true)?;
1155    if genesis_inventory.is_empty() {
1156        return Ok(None);
1157    }
1158    let genesis_ip = genesis_inventory[0].public_ip_addr;
1159
1160    // It's possible for the genesis host to be altered from its original state where a node was
1161    // started with the `--first` flag.
1162    // First attempt: try to find node with first=true
1163    let multiaddr = ssh_client
1164        .run_command(
1165            &genesis_ip,
1166            "root",
1167            "jq -r '.nodes[] | select(.initial_peers_config.first == true) | .listen_addr[] | select(contains(\"127.0.0.1\") | not) | select(contains(\"quic-v1\"))' /var/antctl/node_registry.json | head -n 1",
1168            false,
1169        )
1170        .map(|output| output.first().cloned())
1171        .unwrap_or_else(|err| {
1172            log::error!("Failed to find first node with quic-v1 protocol: {err:?}");
1173            None
1174        });
1175
1176    // Second attempt: if first attempt failed, see if any node is available.
1177    let multiaddr = match multiaddr {
1178        Some(addr) => addr,
1179        None => ssh_client
1180            .run_command(
1181                &genesis_ip,
1182                "root",
1183                "jq -r '.nodes[] | .listen_addr[] | select(contains(\"127.0.0.1\") | not) | select(contains(\"quic-v1\"))' /var/antctl/node_registry.json | head -n 1",
1184                false,
1185            )?
1186            .first()
1187            .cloned()
1188            .ok_or_else(|| Error::GenesisListenAddress)?,
1189    };
1190
1191    Ok(Some((multiaddr, genesis_ip)))
1192}
1193
1194pub fn get_anvil_node_data_hardcoded(ansible_runner: &AnsibleRunner) -> Result<AnvilNodeData> {
1195    let evm_inventory = ansible_runner.get_inventory(AnsibleInventoryType::EvmNodes, true)?;
1196    if evm_inventory.is_empty() {
1197        return Err(Error::EvmNodeNotFound);
1198    }
1199    let evm_ip = evm_inventory[0].public_ip_addr;
1200
1201    Ok(AnvilNodeData {
1202        data_payments_address: "0x8464135c8F25Da09e49BC8782676a84730C318bC".to_string(),
1203        deployer_wallet_private_key:
1204            "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80".to_string(),
1205        merkle_payments_address: "0x663F3ad617193148711d28f5334eE4Ed07016602".to_string(),
1206        payment_token_address: "0x5FbDB2315678afecb367f032d93F642f64180aa3".to_string(),
1207        rpc_url: format!("http://{evm_ip}:61611"),
1208    })
1209}
1210
1211pub fn get_multiaddr(
1212    ansible_runner: &AnsibleRunner,
1213    ssh_client: &SshClient,
1214) -> Result<(String, IpAddr)> {
1215    let node_inventory = ansible_runner.get_inventory(AnsibleInventoryType::Nodes, true)?;
1216    // For upscaling a bootstrap deployment, we'd need to select one of the nodes that's already
1217    // provisioned. So just try the first one.
1218    let node_ip = node_inventory
1219        .iter()
1220        .find(|vm| vm.name.ends_with("-node-1"))
1221        .ok_or_else(|| Error::NodeAddressNotFound)?
1222        .public_ip_addr;
1223
1224    debug!("Getting multiaddr from node {node_ip}");
1225
1226    let multiaddr =
1227        ssh_client
1228        .run_command(
1229            &node_ip,
1230            "root",
1231            // fetch the first multiaddr which does not contain the localhost addr.
1232            "jq -r '.nodes[] | .listen_addr[] | select(contains(\"127.0.0.1\") | not)' /var/antctl/node_registry.json | head -n 1",
1233            false,
1234        )?.first()
1235        .cloned()
1236        .ok_or_else(|| Error::NodeAddressNotFound)?;
1237
1238    // The node_ip is obviously inside the multiaddr, but it's just being returned as a
1239    // separate item for convenience.
1240    Ok((multiaddr, node_ip))
1241}
1242
1243pub async fn get_and_extract_archive_from_s3(
1244    s3_repository: &S3Repository,
1245    bucket_name: &str,
1246    archive_bucket_path: &str,
1247    dest_path: &Path,
1248) -> Result<()> {
1249    // In this case, not using unwrap leads to having to provide a very trivial error variant that
1250    // doesn't seem very valuable.
1251    let archive_file_name = archive_bucket_path.split('/').next_back().unwrap();
1252    let archive_dest_path = dest_path.join(archive_file_name);
1253    s3_repository
1254        .download_object(bucket_name, archive_bucket_path, &archive_dest_path)
1255        .await?;
1256    extract_archive(&archive_dest_path, dest_path)?;
1257    Ok(())
1258}
1259
1260pub fn extract_archive(archive_path: &Path, dest_path: &Path) -> Result<()> {
1261    let archive_file = File::open(archive_path)?;
1262    let decoder = GzDecoder::new(archive_file);
1263    let mut archive = Archive::new(decoder);
1264    let entries = archive.entries()?;
1265    for entry_result in entries {
1266        let mut entry = entry_result?;
1267        let extract_path = dest_path.join(entry.path()?);
1268        if entry.header().entry_type() == tar::EntryType::Directory {
1269            std::fs::create_dir_all(extract_path)?;
1270            continue;
1271        }
1272        let mut file = BufWriter::new(File::create(extract_path)?);
1273        std::io::copy(&mut entry, &mut file)?;
1274    }
1275    std::fs::remove_file(archive_path)?;
1276    Ok(())
1277}
1278
1279pub fn run_external_command(
1280    binary_path: PathBuf,
1281    working_directory_path: PathBuf,
1282    args: Vec<String>,
1283    suppress_stdout: bool,
1284    suppress_stderr: bool,
1285) -> Result<Vec<String>> {
1286    let mut command = Command::new(binary_path.clone());
1287    for arg in &args {
1288        command.arg(arg);
1289    }
1290    command.stdout(Stdio::piped());
1291    command.stderr(Stdio::piped());
1292    command.current_dir(working_directory_path.clone());
1293    debug!("Running {binary_path:#?} with args {args:#?}");
1294    debug!("Working directory set to {working_directory_path:#?}");
1295
1296    let mut child = command.spawn()?;
1297    let mut output_lines = Vec::new();
1298
1299    if let Some(ref mut stdout) = child.stdout {
1300        let reader = BufReader::new(stdout);
1301        for line in reader.lines() {
1302            let line = line?;
1303            if !suppress_stdout {
1304                println!("{line}");
1305            }
1306            output_lines.push(line);
1307        }
1308    }
1309
1310    if let Some(ref mut stderr) = child.stderr {
1311        let reader = BufReader::new(stderr);
1312        for line in reader.lines() {
1313            let line = line?;
1314            if !suppress_stderr {
1315                eprintln!("{line}");
1316            }
1317            output_lines.push(line);
1318        }
1319    }
1320
1321    let output = child.wait()?;
1322    if !output.success() {
1323        // Using `unwrap` here avoids introducing another error variant, which seems excessive.
1324        let binary_path = binary_path.to_str().unwrap();
1325        return Err(Error::ExternalCommandRunFailed {
1326            binary: binary_path.to_string(),
1327            exit_status: output,
1328        });
1329    }
1330
1331    Ok(output_lines)
1332}
1333
1334pub fn is_binary_on_path(binary_name: &str) -> bool {
1335    if let Ok(path) = std::env::var("PATH") {
1336        for dir in path.split(':') {
1337            let mut full_path = PathBuf::from(dir);
1338            full_path.push(binary_name);
1339            if full_path.exists() {
1340                return true;
1341            }
1342        }
1343    }
1344    false
1345}
1346
1347pub fn get_wallet_directory() -> Result<PathBuf> {
1348    Ok(dirs_next::data_dir()
1349        .ok_or_else(|| Error::CouldNotRetrieveDataDirectory)?
1350        .join("safe")
1351        .join("client")
1352        .join("wallet"))
1353}
1354
1355pub async fn notify_slack(inventory: DeploymentInventory) -> Result<()> {
1356    let webhook_url =
1357        std::env::var("SLACK_WEBHOOK_URL").map_err(|_| Error::SlackWebhookUrlNotSupplied)?;
1358
1359    let mut message = String::new();
1360    message.push_str("*Testnet Details*\n");
1361    message.push_str(&format!("Name: {}\n", inventory.name));
1362    message.push_str(&format!("Node count: {}\n", inventory.peers().len()));
1363    message.push_str(&format!("Faucet address: {:?}\n", inventory.faucet_address));
1364    match inventory.binary_option {
1365        BinaryOption::BuildFromSource {
1366            ref repo_owner,
1367            ref branch,
1368            ..
1369        } => {
1370            message.push_str("*Branch Details*\n");
1371            message.push_str(&format!("Repo owner: {repo_owner}\n"));
1372            message.push_str(&format!("Branch: {branch}\n"));
1373        }
1374        BinaryOption::Versioned {
1375            ant_version: ref safe_version,
1376            antnode_version: ref safenode_version,
1377            antctl_version: ref safenode_manager_version,
1378            ..
1379        } => {
1380            message.push_str("*Version Details*\n");
1381            message.push_str(&format!(
1382                "ant version: {}\n",
1383                safe_version
1384                    .as_ref()
1385                    .map_or("None".to_string(), |v| v.to_string())
1386            ));
1387            message.push_str(&format!(
1388                "safenode version: {}\n",
1389                safenode_version
1390                    .as_ref()
1391                    .map_or("None".to_string(), |v| v.to_string())
1392            ));
1393            message.push_str(&format!(
1394                "antctl version: {}\n",
1395                safenode_manager_version
1396                    .as_ref()
1397                    .map_or("None".to_string(), |v| v.to_string())
1398            ));
1399        }
1400    }
1401
1402    message.push_str("*Sample Peers*\n");
1403    message.push_str("```\n");
1404    for peer in inventory.peers().iter().take(20) {
1405        message.push_str(&format!("{peer}\n"));
1406    }
1407    message.push_str("```\n");
1408    message.push_str("*Available Files*\n");
1409    message.push_str("```\n");
1410    for (addr, file_name) in inventory.uploaded_files.iter() {
1411        message.push_str(&format!("{addr}: {file_name}\n"))
1412    }
1413    message.push_str("```\n");
1414
1415    let payload = json!({
1416        "text": message,
1417    });
1418    reqwest::Client::new()
1419        .post(webhook_url)
1420        .json(&payload)
1421        .send()
1422        .await?;
1423    println!("{message}");
1424    println!("Posted notification to Slack");
1425    Ok(())
1426}
1427
1428fn print_duration(duration: Duration) {
1429    let total_seconds = duration.as_secs();
1430    let minutes = total_seconds / 60;
1431    let seconds = total_seconds % 60;
1432    debug!("Time taken: {minutes} minutes and {seconds} seconds");
1433}
1434
1435pub fn get_progress_bar(length: u64) -> Result<ProgressBar> {
1436    let progress_bar = ProgressBar::new(length);
1437    progress_bar.set_style(
1438        ProgressStyle::default_bar()
1439            .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len}")?
1440            .progress_chars("#>-"),
1441    );
1442    progress_bar.enable_steady_tick(Duration::from_millis(100));
1443    Ok(progress_bar)
1444}
1445
1446pub async fn get_environment_details(
1447    environment_name: &str,
1448    s3_repository: &S3Repository,
1449) -> Result<EnvironmentDetails> {
1450    let temp_file = tempfile::NamedTempFile::new()?;
1451
1452    let max_retries = 3;
1453    let mut retries = 0;
1454    let env_details = loop {
1455        debug!("Downloading the environment details file for {environment_name} from S3");
1456        match s3_repository
1457            .download_object("sn-environment-type", environment_name, temp_file.path())
1458            .await
1459        {
1460            Ok(_) => {
1461                debug!("Downloaded the environment details file for {environment_name} from S3");
1462                let content = match std::fs::read_to_string(temp_file.path()) {
1463                    Ok(content) => content,
1464                    Err(err) => {
1465                        log::error!("Could not read the environment details file: {err:?}");
1466                        if retries < max_retries {
1467                            debug!("Retrying to read the environment details file");
1468                            retries += 1;
1469                            continue;
1470                        } else {
1471                            return Err(Error::EnvironmentDetailsNotFound(
1472                                environment_name.to_string(),
1473                            ));
1474                        }
1475                    }
1476                };
1477                trace!("Content of the environment details file: {content}");
1478
1479                match serde_json::from_str(&content) {
1480                    Ok(environment_details) => break environment_details,
1481                    Err(err) => {
1482                        log::error!("Could not parse the environment details file: {err:?}");
1483                        if retries < max_retries {
1484                            debug!("Retrying to parse the environment details file");
1485                            retries += 1;
1486                            continue;
1487                        } else {
1488                            return Err(Error::EnvironmentDetailsNotFound(
1489                                environment_name.to_string(),
1490                            ));
1491                        }
1492                    }
1493                }
1494            }
1495            Err(err) => {
1496                log::error!(
1497                    "Could not download the environment details file for {environment_name} from S3: {err:?}"
1498                );
1499                if retries < max_retries {
1500                    retries += 1;
1501                    continue;
1502                } else {
1503                    return Err(Error::EnvironmentDetailsNotFound(
1504                        environment_name.to_string(),
1505                    ));
1506                }
1507            }
1508        }
1509    };
1510
1511    debug!("Fetched environment details: {env_details:?}");
1512
1513    Ok(env_details)
1514}
1515
1516pub async fn write_environment_details(
1517    s3_repository: &S3Repository,
1518    environment_name: &str,
1519    environment_details: &EnvironmentDetails,
1520) -> Result<()> {
1521    let temp_dir = tempfile::tempdir()?;
1522    let path = temp_dir.path().to_path_buf().join(environment_name);
1523    let mut file = File::create(&path)?;
1524    let json = serde_json::to_string(environment_details)?;
1525    file.write_all(json.as_bytes())?;
1526    s3_repository
1527        .upload_file("sn-environment-type", &path, true)
1528        .await?;
1529    Ok(())
1530}
1531
1532pub fn calculate_size_per_attached_volume(node_count: u16) -> u16 {
1533    if node_count == 0 {
1534        return 0;
1535    }
1536    let total_volume_required = node_count * STORAGE_REQUIRED_PER_NODE;
1537
1538    // 7 attached volumes per VM
1539    (total_volume_required as f64 / 7.0).ceil() as u16
1540}
1541
1542pub fn get_bootstrap_cache_url(ip_addr: &IpAddr) -> String {
1543    format!("http://{ip_addr}/bootstrap_cache.json")
1544}