Skip to main content

sn_testnet_deploy/
lib.rs

1// Copyright (c) 2023, MaidSafe.
2// All rights reserved.
3//
4// This SAFE Network Software is licensed under the BSD-3-Clause license.
5// Please see the LICENSE file for more details.
6
7pub mod ansible;
8pub mod bootstrap;
9pub mod clients;
10pub mod deploy;
11pub mod digital_ocean;
12pub mod error;
13pub mod funding;
14pub mod infra;
15pub mod inventory;
16pub mod logs;
17pub mod rpc_client;
18pub mod s3;
19pub mod safe;
20pub mod setup;
21pub mod ssh;
22pub mod symlinked_antnode;
23pub mod terraform;
24pub mod upscale;
25
26pub use symlinked_antnode::SymlinkedAntnodeDeployer;
27
28const STORAGE_REQUIRED_PER_NODE: u16 = 7;
29
30use crate::{
31    ansible::{
32        extra_vars::ExtraVarsDocBuilder,
33        inventory::{cleanup_environment_inventory, AnsibleInventoryType},
34        provisioning::AnsibleProvisioner,
35        AnsibleRunner,
36    },
37    error::{Error, Result},
38    inventory::{DeploymentInventory, VirtualMachine},
39    rpc_client::RpcClient,
40    s3::S3Repository,
41    ssh::SshClient,
42    terraform::TerraformRunner,
43};
44use ant_service_management::ServiceStatus;
45use flate2::read::GzDecoder;
46use indicatif::{ProgressBar, ProgressStyle};
47use infra::{build_terraform_args, InfraRunOptions};
48use log::{debug, trace};
49use semver::Version;
50use serde::{Deserialize, Serialize};
51use serde_json::json;
52use std::{
53    fs::File,
54    io::{BufRead, BufReader, BufWriter, Write},
55    net::IpAddr,
56    path::{Path, PathBuf},
57    process::{Command, Stdio},
58    str::FromStr,
59    time::Duration,
60};
61use tar::Archive;
62
63const ANSIBLE_DEFAULT_FORKS: usize = 50;
64
65#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
66pub enum DeploymentType {
67    /// The deployment has been bootstrapped from an existing network.
68    Bootstrap,
69    /// Client deployment.
70    Client,
71    /// The deployment is a new network.
72    #[default]
73    New,
74}
75
76#[derive(Debug, Clone, Default, Serialize, Deserialize)]
77pub struct AnvilNodeData {
78    pub data_payments_address: String,
79    pub deployer_wallet_private_key: String,
80    pub merkle_payments_address: String,
81    pub payment_token_address: String,
82    pub rpc_url: String,
83}
84
85impl std::fmt::Display for DeploymentType {
86    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
87        match self {
88            DeploymentType::Bootstrap => write!(f, "bootstrap"),
89            DeploymentType::Client => write!(f, "clients"),
90            DeploymentType::New => write!(f, "new"),
91        }
92    }
93}
94
95impl std::str::FromStr for DeploymentType {
96    type Err = String;
97
98    fn from_str(s: &str) -> Result<Self, Self::Err> {
99        match s.to_lowercase().as_str() {
100            "bootstrap" => Ok(DeploymentType::Bootstrap),
101            "clients" => Ok(DeploymentType::Client),
102            "new" => Ok(DeploymentType::New),
103            _ => Err(format!("Invalid deployment type: {s}")),
104        }
105    }
106}
107
108#[derive(Debug, Clone)]
109pub enum NodeType {
110    FullConePrivateNode,
111    PortRestrictedConePrivateNode,
112    Generic,
113    Genesis,
114    PeerCache,
115    SymmetricPrivateNode,
116    Upnp,
117}
118
119impl std::fmt::Display for NodeType {
120    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
121        match self {
122            NodeType::FullConePrivateNode => write!(f, "full-cone-private"),
123            NodeType::PortRestrictedConePrivateNode => write!(f, "port-restricted-cone-private"),
124            NodeType::Generic => write!(f, "generic"),
125            NodeType::Genesis => write!(f, "genesis"),
126            NodeType::PeerCache => write!(f, "peer-cache"),
127            NodeType::SymmetricPrivateNode => write!(f, "symmetric-private"),
128            NodeType::Upnp => write!(f, "upnp"),
129        }
130    }
131}
132
133impl std::str::FromStr for NodeType {
134    type Err = String;
135
136    fn from_str(s: &str) -> Result<Self, Self::Err> {
137        match s.to_lowercase().as_str() {
138            "full-cone-private" => Ok(NodeType::FullConePrivateNode),
139            "port-restricted-cone-private" => Ok(NodeType::PortRestrictedConePrivateNode),
140            "generic" => Ok(NodeType::Generic),
141            "genesis" => Ok(NodeType::Genesis),
142            "peer-cache" => Ok(NodeType::PeerCache),
143            "symmetric-private" => Ok(NodeType::SymmetricPrivateNode),
144            "upnp" => Ok(NodeType::Upnp),
145            _ => Err(format!("Invalid node type: {s}")),
146        }
147    }
148}
149
150impl NodeType {
151    pub fn telegraf_role(&self) -> &'static str {
152        match self {
153            NodeType::FullConePrivateNode => "NAT_STATIC_FULL_CONE_NODE",
154            NodeType::PortRestrictedConePrivateNode => "PORT_RESTRICTED_CONE_NODE",
155            NodeType::Generic => "GENERIC_NODE",
156            NodeType::Genesis => "GENESIS_NODE",
157            NodeType::PeerCache => "PEER_CACHE_NODE",
158            NodeType::SymmetricPrivateNode => "NAT_RANDOMIZED_NODE",
159            NodeType::Upnp => "UPNP_NODE",
160        }
161    }
162
163    pub fn to_ansible_inventory_type(&self) -> AnsibleInventoryType {
164        match self {
165            NodeType::FullConePrivateNode => AnsibleInventoryType::FullConePrivateNodes,
166            NodeType::PortRestrictedConePrivateNode => {
167                AnsibleInventoryType::PortRestrictedConePrivateNodes
168            }
169            NodeType::Generic => AnsibleInventoryType::Nodes,
170            NodeType::Genesis => AnsibleInventoryType::Genesis,
171            NodeType::PeerCache => AnsibleInventoryType::PeerCacheNodes,
172            NodeType::SymmetricPrivateNode => AnsibleInventoryType::SymmetricPrivateNodes,
173            NodeType::Upnp => AnsibleInventoryType::Upnp,
174        }
175    }
176}
177
178#[derive(Clone, Debug, Default, Eq, Serialize, Deserialize, PartialEq)]
179pub enum EvmNetwork {
180    #[default]
181    Anvil,
182    ArbitrumOne,
183    ArbitrumSepoliaTest,
184    Custom,
185}
186
187impl std::fmt::Display for EvmNetwork {
188    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
189        match self {
190            EvmNetwork::Anvil => write!(f, "evm-custom"),
191            EvmNetwork::ArbitrumOne => write!(f, "evm-arbitrum-one"),
192            EvmNetwork::ArbitrumSepoliaTest => write!(f, "evm-arbitrum-sepolia-test"),
193            EvmNetwork::Custom => write!(f, "evm-custom"),
194        }
195    }
196}
197
198impl std::str::FromStr for EvmNetwork {
199    type Err = String;
200
201    fn from_str(s: &str) -> Result<Self, Self::Err> {
202        match s.to_lowercase().as_str() {
203            "anvil" => Ok(EvmNetwork::Anvil),
204            "arbitrum-one" => Ok(EvmNetwork::ArbitrumOne),
205            "arbitrum-sepolia-test" => Ok(EvmNetwork::ArbitrumSepoliaTest),
206            "custom" => Ok(EvmNetwork::Custom),
207            _ => Err(format!("Invalid EVM network type: {s}")),
208        }
209    }
210}
211
212#[derive(Clone, Debug, Default, Serialize, Deserialize)]
213pub struct EvmDetails {
214    pub network: EvmNetwork,
215    pub data_payments_address: Option<String>,
216    pub merkle_payments_address: Option<String>,
217    pub payment_token_address: Option<String>,
218    pub rpc_url: Option<String>,
219}
220
221#[derive(Clone, Debug, Default, Serialize, Deserialize)]
222pub struct EnvironmentDetails {
223    pub deployment_type: DeploymentType,
224    pub environment_type: EnvironmentType,
225    pub evm_details: EvmDetails,
226    pub funding_wallet_address: Option<String>,
227    pub network_id: Option<u8>,
228    pub region: String,
229    pub rewards_address: Option<String>,
230}
231
232#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
233pub enum EnvironmentType {
234    #[default]
235    Development,
236    Production,
237    Staging,
238}
239
240impl EnvironmentType {
241    pub fn get_tfvars_filenames(&self, name: &str, region: &str) -> Vec<String> {
242        match self {
243            EnvironmentType::Development => vec![
244                "dev.tfvars".to_string(),
245                format!("dev-images-{region}.tfvars", region = region),
246            ],
247            EnvironmentType::Staging => vec![
248                "staging.tfvars".to_string(),
249                format!("staging-images-{region}.tfvars", region = region),
250            ],
251            EnvironmentType::Production => {
252                vec![
253                    format!("{name}.tfvars", name = name),
254                    format!("production-images-{region}.tfvars", region = region),
255                ]
256            }
257        }
258    }
259
260    pub fn get_tfvars_filenames_with_fallback(
261        &self,
262        name: &str,
263        region: &str,
264        terraform_dir: &Path,
265    ) -> Vec<String> {
266        match self {
267            EnvironmentType::Development | EnvironmentType::Staging => {
268                self.get_tfvars_filenames(name, region)
269            }
270            EnvironmentType::Production => {
271                let named_tfvars = format!("{name}.tfvars");
272                let tfvars_file = if terraform_dir.join(&named_tfvars).exists() {
273                    named_tfvars
274                } else {
275                    "production.tfvars".to_string()
276                };
277                vec![tfvars_file, format!("production-images-{region}.tfvars")]
278            }
279        }
280    }
281
282    pub fn get_default_peer_cache_node_count(&self) -> u16 {
283        match self {
284            EnvironmentType::Development => 5,
285            EnvironmentType::Production => 5,
286            EnvironmentType::Staging => 5,
287        }
288    }
289
290    pub fn get_default_node_count(&self) -> u16 {
291        match self {
292            EnvironmentType::Development => 25,
293            EnvironmentType::Production => 25,
294            EnvironmentType::Staging => 25,
295        }
296    }
297
298    pub fn get_default_symmetric_private_node_count(&self) -> u16 {
299        self.get_default_node_count()
300    }
301
302    pub fn get_default_full_cone_private_node_count(&self) -> u16 {
303        self.get_default_node_count()
304    }
305    pub fn get_default_upnp_private_node_count(&self) -> u16 {
306        self.get_default_node_count()
307    }
308}
309
310impl std::fmt::Display for EnvironmentType {
311    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
312        match self {
313            EnvironmentType::Development => write!(f, "development"),
314            EnvironmentType::Production => write!(f, "production"),
315            EnvironmentType::Staging => write!(f, "staging"),
316        }
317    }
318}
319
320impl FromStr for EnvironmentType {
321    type Err = Error;
322
323    fn from_str(s: &str) -> Result<Self, Self::Err> {
324        match s.to_lowercase().as_str() {
325            "development" => Ok(EnvironmentType::Development),
326            "production" => Ok(EnvironmentType::Production),
327            "staging" => Ok(EnvironmentType::Staging),
328            _ => Err(Error::EnvironmentNameFromStringError(s.to_string())),
329        }
330    }
331}
332
333/// Specify the binary option for the deployment.
334///
335/// There are several binaries involved in the deployment:
336/// * safenode
337/// * safenode_rpc_client
338/// * faucet
339/// * safe
340///
341/// The `safe` binary is only used for smoke testing the deployment, although we don't really do
342/// that at the moment.
343///
344/// The options are to build from source, or supply a pre-built, versioned binary, which will be
345/// fetched from S3. Building from source adds significant time to the deployment.
346#[derive(Clone, Debug, Serialize, Deserialize)]
347pub enum BinaryOption {
348    /// Binaries will be built from source.
349    BuildFromSource {
350        /// A comma-separated list that will be passed to the `--features` argument.
351        antnode_features: Option<String>,
352        branch: String,
353        repo_owner: String,
354        /// Skip building the binaries, if they were already built during the previous run using the same
355        /// branch, repo owner and testnet name.
356        skip_binary_build: bool,
357    },
358    /// Pre-built, versioned binaries will be fetched from S3.
359    Versioned {
360        ant_version: Option<Version>,
361        antctl_version: Option<Version>,
362        antnode_version: Option<Version>,
363    },
364}
365
366impl BinaryOption {
367    pub fn should_provision_build_machine(&self) -> bool {
368        match self {
369            BinaryOption::BuildFromSource {
370                skip_binary_build, ..
371            } => !skip_binary_build,
372            BinaryOption::Versioned { .. } => false,
373        }
374    }
375
376    pub fn print(&self) {
377        match self {
378            BinaryOption::BuildFromSource {
379                antnode_features,
380                branch,
381                repo_owner,
382                skip_binary_build: _,
383            } => {
384                println!("Source configuration:");
385                println!("  Repository owner: {repo_owner}");
386                println!("  Branch: {branch}");
387                if let Some(features) = antnode_features {
388                    println!("  Antnode features: {features}");
389                }
390            }
391            BinaryOption::Versioned {
392                ant_version,
393                antctl_version,
394                antnode_version,
395            } => {
396                println!("Versioned binaries configuration:");
397                if let Some(version) = ant_version {
398                    println!("  ant version: {version}");
399                }
400                if let Some(version) = antctl_version {
401                    println!("  antctl version: {version}");
402                }
403                if let Some(version) = antnode_version {
404                    println!("  antnode version: {version}");
405                }
406            }
407        }
408    }
409}
410
411#[derive(Debug, Clone, Copy)]
412pub enum CloudProvider {
413    Aws,
414    DigitalOcean,
415}
416
417impl std::fmt::Display for CloudProvider {
418    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
419        match self {
420            CloudProvider::Aws => write!(f, "aws"),
421            CloudProvider::DigitalOcean => write!(f, "digital-ocean"),
422        }
423    }
424}
425
426impl CloudProvider {
427    pub fn get_ssh_user(&self) -> String {
428        match self {
429            CloudProvider::Aws => "ubuntu".to_string(),
430            CloudProvider::DigitalOcean => "root".to_string(),
431        }
432    }
433}
434
435#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
436pub enum LogFormat {
437    Default,
438    Json,
439}
440
441impl LogFormat {
442    pub fn parse_from_str(val: &str) -> Result<Self> {
443        match val {
444            "default" => Ok(LogFormat::Default),
445            "json" => Ok(LogFormat::Json),
446            _ => Err(Error::LoggingConfiguration(
447                "The only valid values for this argument are \"default\" or \"json\"".to_string(),
448            )),
449        }
450    }
451
452    pub fn as_str(&self) -> &'static str {
453        match self {
454            LogFormat::Default => "default",
455            LogFormat::Json => "json",
456        }
457    }
458}
459
460#[derive(Clone)]
461pub struct UpgradeOptions {
462    pub ansible_verbose: bool,
463    pub branch: Option<String>,
464    pub custom_inventory: Option<Vec<VirtualMachine>>,
465    pub env_variables: Option<Vec<(String, String)>>,
466    pub force: bool,
467    pub forks: usize,
468    pub interval: Duration,
469    pub name: String,
470    pub node_type: Option<NodeType>,
471    pub pre_upgrade_delay: Option<u64>,
472    pub provider: CloudProvider,
473    pub repo_owner: Option<String>,
474    pub version: Option<String>,
475}
476
477impl UpgradeOptions {
478    pub fn get_ansible_vars(&self) -> String {
479        let mut extra_vars = ExtraVarsDocBuilder::default();
480        extra_vars.add_variable("interval", &self.interval.as_millis().to_string());
481        if let Some(env_variables) = &self.env_variables {
482            extra_vars.add_env_variable_list("env_variables", env_variables.clone());
483        }
484        if self.force {
485            extra_vars.add_variable("force", &self.force.to_string());
486        }
487        if let Some(version) = &self.version {
488            extra_vars.add_variable("antnode_version", version);
489        }
490        if let Some(pre_upgrade_delay) = &self.pre_upgrade_delay {
491            extra_vars.add_variable("pre_upgrade_delay", &pre_upgrade_delay.to_string());
492        }
493
494        if let (Some(repo_owner), Some(branch)) = (&self.repo_owner, &self.branch) {
495            let binary_option = BinaryOption::BuildFromSource {
496                antnode_features: None,
497                branch: branch.clone(),
498                repo_owner: repo_owner.clone(),
499                skip_binary_build: true,
500            };
501            extra_vars.add_node_url_or_version(&self.name, &binary_option);
502        }
503
504        extra_vars.build()
505    }
506}
507
508#[derive(Default)]
509pub struct TestnetDeployBuilder {
510    ansible_forks: Option<usize>,
511    ansible_verbose_mode: bool,
512    deployment_type: EnvironmentType,
513    environment_name: String,
514    provider: Option<CloudProvider>,
515    region: Option<String>,
516    ssh_secret_key_path: Option<PathBuf>,
517    state_bucket_name: Option<String>,
518    terraform_binary_path: Option<PathBuf>,
519    vault_password_path: Option<PathBuf>,
520    working_directory_path: Option<PathBuf>,
521}
522
523impl TestnetDeployBuilder {
524    pub fn new() -> Self {
525        Default::default()
526    }
527
528    pub fn ansible_verbose_mode(&mut self, ansible_verbose_mode: bool) -> &mut Self {
529        self.ansible_verbose_mode = ansible_verbose_mode;
530        self
531    }
532
533    pub fn ansible_forks(&mut self, ansible_forks: usize) -> &mut Self {
534        self.ansible_forks = Some(ansible_forks);
535        self
536    }
537
538    pub fn deployment_type(&mut self, deployment_type: EnvironmentType) -> &mut Self {
539        self.deployment_type = deployment_type;
540        self
541    }
542
543    pub fn environment_name(&mut self, name: &str) -> &mut Self {
544        self.environment_name = name.to_string();
545        self
546    }
547
548    pub fn provider(&mut self, provider: CloudProvider) -> &mut Self {
549        self.provider = Some(provider);
550        self
551    }
552
553    pub fn state_bucket_name(&mut self, state_bucket_name: String) -> &mut Self {
554        self.state_bucket_name = Some(state_bucket_name);
555        self
556    }
557
558    pub fn terraform_binary_path(&mut self, terraform_binary_path: PathBuf) -> &mut Self {
559        self.terraform_binary_path = Some(terraform_binary_path);
560        self
561    }
562
563    pub fn working_directory(&mut self, working_directory_path: PathBuf) -> &mut Self {
564        self.working_directory_path = Some(working_directory_path);
565        self
566    }
567
568    pub fn ssh_secret_key_path(&mut self, ssh_secret_key_path: PathBuf) -> &mut Self {
569        self.ssh_secret_key_path = Some(ssh_secret_key_path);
570        self
571    }
572
573    pub fn vault_password_path(&mut self, vault_password_path: PathBuf) -> &mut Self {
574        self.vault_password_path = Some(vault_password_path);
575        self
576    }
577
578    pub fn region(&mut self, region: String) -> &mut Self {
579        self.region = Some(region);
580        self
581    }
582
583    pub fn build(&self) -> Result<TestnetDeployer> {
584        let provider = self.provider.unwrap_or(CloudProvider::DigitalOcean);
585        match provider {
586            CloudProvider::DigitalOcean => {
587                let digital_ocean_pat = std::env::var("DO_PAT").map_err(|_| {
588                    Error::CloudProviderCredentialsNotSupplied("DO_PAT".to_string())
589                })?;
590                // The DO_PAT variable is not actually read by either Terraform or Ansible.
591                // Each tool uses a different variable, so instead we set each of those variables
592                // to the value of DO_PAT. This means the user only needs to set one variable.
593                std::env::set_var("DIGITALOCEAN_TOKEN", digital_ocean_pat.clone());
594                std::env::set_var("DO_API_TOKEN", digital_ocean_pat);
595            }
596            _ => {
597                return Err(Error::CloudProviderNotSupported(provider.to_string()));
598            }
599        }
600
601        let state_bucket_name = match self.state_bucket_name {
602            Some(ref bucket_name) => bucket_name.clone(),
603            None => std::env::var("TERRAFORM_STATE_BUCKET_NAME")?,
604        };
605
606        let default_terraform_bin_path = PathBuf::from("terraform");
607        let terraform_binary_path = self
608            .terraform_binary_path
609            .as_ref()
610            .unwrap_or(&default_terraform_bin_path);
611
612        let working_directory_path = match self.working_directory_path {
613            Some(ref work_dir_path) => work_dir_path.clone(),
614            None => std::env::current_dir()?.join("resources"),
615        };
616
617        let ssh_secret_key_path = match self.ssh_secret_key_path {
618            Some(ref ssh_sk_path) => ssh_sk_path.clone(),
619            None => PathBuf::from(std::env::var("SSH_KEY_PATH")?),
620        };
621
622        let vault_password_path = match self.vault_password_path {
623            Some(ref vault_pw_path) => vault_pw_path.clone(),
624            None => PathBuf::from(std::env::var("ANSIBLE_VAULT_PASSWORD_PATH")?),
625        };
626
627        let region = match self.region {
628            Some(ref region) => region.clone(),
629            None => "lon1".to_string(),
630        };
631
632        let terraform_runner = TerraformRunner::new(
633            terraform_binary_path.to_path_buf(),
634            working_directory_path
635                .join("terraform")
636                .join("testnet")
637                .join(provider.to_string()),
638            provider,
639            &state_bucket_name,
640        )?;
641        let ansible_runner = AnsibleRunner::new(
642            self.ansible_forks.unwrap_or(ANSIBLE_DEFAULT_FORKS),
643            self.ansible_verbose_mode,
644            &self.environment_name,
645            provider,
646            ssh_secret_key_path.clone(),
647            vault_password_path,
648            working_directory_path.join("ansible"),
649        )?;
650        let ssh_client = SshClient::new(ssh_secret_key_path);
651        let ansible_provisioner =
652            AnsibleProvisioner::new(ansible_runner, provider, ssh_client.clone());
653        let rpc_client = RpcClient::new(
654            PathBuf::from("/usr/local/bin/safenode_rpc_client"),
655            working_directory_path.clone(),
656        );
657
658        // Remove any `safe` binary from a previous deployment. Otherwise you can end up with
659        // mismatched binaries.
660        let safe_path = working_directory_path.join("safe");
661        if safe_path.exists() {
662            std::fs::remove_file(safe_path)?;
663        }
664
665        let testnet = TestnetDeployer::new(
666            ansible_provisioner,
667            provider,
668            self.deployment_type.clone(),
669            &self.environment_name,
670            rpc_client,
671            S3Repository {},
672            ssh_client,
673            terraform_runner,
674            working_directory_path,
675            region,
676        )?;
677
678        Ok(testnet)
679    }
680}
681
682#[derive(Clone)]
683pub struct TestnetDeployer {
684    pub ansible_provisioner: AnsibleProvisioner,
685    pub cloud_provider: CloudProvider,
686    pub deployment_type: EnvironmentType,
687    pub environment_name: String,
688    pub inventory_file_path: PathBuf,
689    pub region: String,
690    pub rpc_client: RpcClient,
691    pub s3_repository: S3Repository,
692    pub ssh_client: SshClient,
693    pub terraform_runner: TerraformRunner,
694    pub working_directory_path: PathBuf,
695}
696
697impl TestnetDeployer {
698    #[allow(clippy::too_many_arguments)]
699    pub fn new(
700        ansible_provisioner: AnsibleProvisioner,
701        cloud_provider: CloudProvider,
702        deployment_type: EnvironmentType,
703        environment_name: &str,
704        rpc_client: RpcClient,
705        s3_repository: S3Repository,
706        ssh_client: SshClient,
707        terraform_runner: TerraformRunner,
708        working_directory_path: PathBuf,
709        region: String,
710    ) -> Result<TestnetDeployer> {
711        if environment_name.is_empty() {
712            return Err(Error::EnvironmentNameRequired);
713        }
714        let inventory_file_path = working_directory_path
715            .join("ansible")
716            .join("inventory")
717            .join("dev_inventory_digital_ocean.yml");
718        Ok(TestnetDeployer {
719            ansible_provisioner,
720            cloud_provider,
721            deployment_type,
722            environment_name: environment_name.to_string(),
723            inventory_file_path,
724            region,
725            rpc_client,
726            ssh_client,
727            s3_repository,
728            terraform_runner,
729            working_directory_path,
730        })
731    }
732
733    pub async fn init(&self) -> Result<()> {
734        if self
735            .s3_repository
736            .folder_exists(
737                "sn-testnet",
738                &format!("testnet-logs/{}", self.environment_name),
739            )
740            .await?
741        {
742            return Err(Error::LogsForPreviousTestnetExist(
743                self.environment_name.clone(),
744            ));
745        }
746
747        self.terraform_runner.init()?;
748        let workspaces = self.terraform_runner.workspace_list()?;
749        if !workspaces.contains(&self.environment_name) {
750            self.terraform_runner
751                .workspace_new(&self.environment_name)?;
752        } else {
753            println!("Workspace {} already exists", self.environment_name);
754        }
755
756        let rpc_client_path = self.working_directory_path.join("safenode_rpc_client");
757        if !rpc_client_path.is_file() {
758            println!("Downloading the rpc client for safenode...");
759            let archive_name = "safenode_rpc_client-latest-x86_64-unknown-linux-musl.tar.gz";
760            get_and_extract_archive_from_s3(
761                &self.s3_repository,
762                "sn-node-rpc-client",
763                archive_name,
764                &self.working_directory_path,
765            )
766            .await?;
767            #[cfg(unix)]
768            {
769                use std::os::unix::fs::PermissionsExt;
770                let mut permissions = std::fs::metadata(&rpc_client_path)?.permissions();
771                permissions.set_mode(0o755); // rwxr-xr-x
772                std::fs::set_permissions(&rpc_client_path, permissions)?;
773            }
774        }
775
776        Ok(())
777    }
778
779    pub fn plan(&self, options: &InfraRunOptions) -> Result<()> {
780        println!("Selecting {} workspace...", options.name);
781        self.terraform_runner.workspace_select(&options.name)?;
782
783        let args = build_terraform_args(options)?;
784
785        self.terraform_runner
786            .plan(Some(args), options.tfvars_filenames.clone())?;
787        Ok(())
788    }
789
790    pub fn start(
791        &self,
792        interval: Duration,
793        node_type: Option<NodeType>,
794        custom_inventory: Option<Vec<VirtualMachine>>,
795    ) -> Result<()> {
796        self.ansible_provisioner.start_nodes(
797            &self.environment_name,
798            interval,
799            node_type,
800            custom_inventory,
801        )?;
802        Ok(())
803    }
804
805    pub fn apply_delete_node_records_cron(
806        &self,
807        node_type: Option<NodeType>,
808        custom_inventory: Option<Vec<VirtualMachine>>,
809    ) -> Result<()> {
810        self.ansible_provisioner.apply_delete_node_records_cron(
811            &self.environment_name,
812            node_type,
813            custom_inventory,
814        )?;
815        Ok(())
816    }
817
818    pub fn reset(
819        &self,
820        node_type: Option<NodeType>,
821        custom_inventory: Option<Vec<VirtualMachine>>,
822    ) -> Result<()> {
823        self.ansible_provisioner.reset_nodes(
824            &self.environment_name,
825            node_type,
826            custom_inventory,
827        )?;
828        Ok(())
829    }
830
831    /// Get the status of all nodes in a network.
832    ///
833    /// First, a playbook runs `safenode-manager status` against all the machines, to get the
834    /// current state of all the nodes. Then all the node registry files are retrieved and
835    /// deserialized to a `NodeRegistry`, allowing us to output the status of each node on each VM.
836    pub async fn status(&self) -> Result<()> {
837        self.ansible_provisioner.status()?;
838
839        let peer_cache_node_registries = self
840            .ansible_provisioner
841            .get_node_registries(&AnsibleInventoryType::PeerCacheNodes)
842            .await?;
843        let generic_node_registries = self
844            .ansible_provisioner
845            .get_node_registries(&AnsibleInventoryType::Nodes)
846            .await?;
847        let symmetric_private_node_registries = self
848            .ansible_provisioner
849            .get_node_registries(&AnsibleInventoryType::SymmetricPrivateNodes)
850            .await?;
851        let full_cone_private_node_registries = self
852            .ansible_provisioner
853            .get_node_registries(&AnsibleInventoryType::FullConePrivateNodes)
854            .await?;
855        let upnp_private_node_registries = self
856            .ansible_provisioner
857            .get_node_registries(&AnsibleInventoryType::Upnp)
858            .await?;
859        let port_restricted_cone_private_node_registries = self
860            .ansible_provisioner
861            .get_node_registries(&AnsibleInventoryType::PortRestrictedConePrivateNodes)
862            .await?;
863        let genesis_node_registry = self
864            .ansible_provisioner
865            .get_node_registries(&AnsibleInventoryType::Genesis)
866            .await?
867            .clone();
868
869        peer_cache_node_registries.print().await;
870        generic_node_registries.print().await;
871        symmetric_private_node_registries.print().await;
872        full_cone_private_node_registries.print().await;
873        upnp_private_node_registries.print().await;
874        genesis_node_registry.print().await;
875
876        let all_registries = [
877            &peer_cache_node_registries,
878            &generic_node_registries,
879            &symmetric_private_node_registries,
880            &full_cone_private_node_registries,
881            &upnp_private_node_registries,
882            &genesis_node_registry,
883        ];
884
885        let mut total_nodes = 0;
886        let mut running_nodes = 0;
887        let mut stopped_nodes = 0;
888        let mut added_nodes = 0;
889        let mut removed_nodes = 0;
890
891        for (_, registry) in all_registries
892            .iter()
893            .flat_map(|r| r.retrieved_registries.iter())
894        {
895            for node in registry.nodes.read().await.iter() {
896                total_nodes += 1;
897                match node.read().await.status {
898                    ServiceStatus::Running => running_nodes += 1,
899                    ServiceStatus::Stopped => stopped_nodes += 1,
900                    ServiceStatus::Added => added_nodes += 1,
901                    ServiceStatus::Removed => removed_nodes += 1,
902                }
903            }
904        }
905
906        let peer_cache_hosts = peer_cache_node_registries.retrieved_registries.len();
907        let generic_hosts = generic_node_registries.retrieved_registries.len();
908        let symmetric_private_hosts = symmetric_private_node_registries.retrieved_registries.len();
909        let full_cone_private_hosts = full_cone_private_node_registries.retrieved_registries.len();
910        let upnp_private_hosts = upnp_private_node_registries.retrieved_registries.len();
911        let port_restricted_cone_private_hosts = port_restricted_cone_private_node_registries
912            .retrieved_registries
913            .len();
914
915        let peer_cache_nodes = peer_cache_node_registries.get_node_count().await;
916        let generic_nodes = generic_node_registries.get_node_count().await;
917        let symmetric_private_nodes = symmetric_private_node_registries.get_node_count().await;
918        let full_cone_private_nodes = full_cone_private_node_registries.get_node_count().await;
919        let upnp_private_nodes = upnp_private_node_registries.get_node_count().await;
920        let port_restricted_cone_private_nodes = port_restricted_cone_private_node_registries
921            .get_node_count()
922            .await;
923
924        println!("-------");
925        println!("Summary");
926        println!("-------");
927        println!(
928            "Total peer cache nodes ({}x{}): {}",
929            peer_cache_hosts,
930            if peer_cache_hosts > 0 {
931                peer_cache_nodes / peer_cache_hosts
932            } else {
933                0
934            },
935            peer_cache_nodes
936        );
937        println!(
938            "Total generic nodes ({}x{}): {}",
939            generic_hosts,
940            if generic_hosts > 0 {
941                generic_nodes / generic_hosts
942            } else {
943                0
944            },
945            generic_nodes
946        );
947        println!(
948            "Total symmetric private nodes ({}x{}): {}",
949            symmetric_private_hosts,
950            if symmetric_private_hosts > 0 {
951                symmetric_private_nodes / symmetric_private_hosts
952            } else {
953                0
954            },
955            symmetric_private_nodes
956        );
957        println!(
958            "Total full cone private nodes ({}x{}): {}",
959            full_cone_private_hosts,
960            if full_cone_private_hosts > 0 {
961                full_cone_private_nodes / full_cone_private_hosts
962            } else {
963                0
964            },
965            full_cone_private_nodes
966        );
967        println!(
968            "Total UPnP private nodes ({}x{}): {}",
969            upnp_private_hosts,
970            if upnp_private_hosts > 0 {
971                upnp_private_nodes / upnp_private_hosts
972            } else {
973                0
974            },
975            upnp_private_nodes
976        );
977        println!(
978            "Total port restricted cone private nodes ({}x{}): {}",
979            port_restricted_cone_private_hosts,
980            if port_restricted_cone_private_hosts > 0 {
981                port_restricted_cone_private_nodes / port_restricted_cone_private_hosts
982            } else {
983                0
984            },
985            port_restricted_cone_private_nodes
986        );
987        println!("Total nodes: {total_nodes}");
988        println!("Running nodes: {running_nodes}");
989        println!("Stopped nodes: {stopped_nodes}");
990        println!("Added nodes: {added_nodes}");
991        println!("Removed nodes: {removed_nodes}");
992
993        Ok(())
994    }
995
996    pub fn cleanup_node_logs(&self, setup_cron: bool) -> Result<()> {
997        self.ansible_provisioner.cleanup_node_logs(setup_cron)?;
998        Ok(())
999    }
1000
1001    pub fn start_telegraf(
1002        &self,
1003        node_type: Option<NodeType>,
1004        custom_inventory: Option<Vec<VirtualMachine>>,
1005    ) -> Result<()> {
1006        self.ansible_provisioner.start_telegraf(
1007            &self.environment_name,
1008            node_type,
1009            custom_inventory,
1010        )?;
1011        Ok(())
1012    }
1013
1014    pub fn stop(
1015        &self,
1016        interval: Duration,
1017        node_type: Option<NodeType>,
1018        custom_inventory: Option<Vec<VirtualMachine>>,
1019        delay: Option<u64>,
1020        service_names: Option<Vec<String>>,
1021    ) -> Result<()> {
1022        self.ansible_provisioner.stop_nodes(
1023            &self.environment_name,
1024            interval,
1025            node_type,
1026            custom_inventory,
1027            delay,
1028            service_names,
1029        )?;
1030        Ok(())
1031    }
1032
1033    pub fn stop_telegraf(
1034        &self,
1035        node_type: Option<NodeType>,
1036        custom_inventory: Option<Vec<VirtualMachine>>,
1037    ) -> Result<()> {
1038        self.ansible_provisioner.stop_telegraf(
1039            &self.environment_name,
1040            node_type,
1041            custom_inventory,
1042        )?;
1043        Ok(())
1044    }
1045
1046    pub fn upgrade(&self, options: UpgradeOptions) -> Result<()> {
1047        self.ansible_provisioner.upgrade_nodes(&options)?;
1048        Ok(())
1049    }
1050
1051    pub fn upgrade_antctl(
1052        &self,
1053        version: Version,
1054        node_type: Option<NodeType>,
1055        custom_inventory: Option<Vec<VirtualMachine>>,
1056    ) -> Result<()> {
1057        self.ansible_provisioner.upgrade_antctl(
1058            &self.environment_name,
1059            &version,
1060            node_type,
1061            custom_inventory,
1062        )?;
1063        Ok(())
1064    }
1065
1066    pub fn upgrade_geoip_telegraf(&self, name: &str) -> Result<()> {
1067        self.ansible_provisioner.upgrade_geoip_telegraf(name)?;
1068        Ok(())
1069    }
1070
1071    pub fn upgrade_node_telegraf(&self, name: &str) -> Result<()> {
1072        self.ansible_provisioner.upgrade_node_telegraf(name)?;
1073        Ok(())
1074    }
1075
1076    pub fn upgrade_client_telegraf(&self, name: &str) -> Result<()> {
1077        self.ansible_provisioner.upgrade_client_telegraf(name)?;
1078        Ok(())
1079    }
1080
1081    pub async fn clean(&self) -> Result<()> {
1082        let environment_details =
1083            get_environment_details(&self.environment_name, &self.s3_repository)
1084                .await
1085                .inspect_err(|err| {
1086                    println!("Failed to get environment details: {err}. Continuing cleanup...");
1087                })
1088                .ok();
1089        if let Some(environment_details) = &environment_details {
1090            funding::drain_funds(&self.ansible_provisioner, environment_details).await?;
1091        }
1092
1093        self.destroy_infra(environment_details).await?;
1094
1095        cleanup_environment_inventory(
1096            &self.environment_name,
1097            &self
1098                .working_directory_path
1099                .join("ansible")
1100                .join("inventory"),
1101            None,
1102        )?;
1103
1104        println!("Deleted Ansible inventory for {}", self.environment_name);
1105
1106        if let Err(err) = self
1107            .s3_repository
1108            .delete_object("sn-environment-type", &self.environment_name)
1109            .await
1110        {
1111            println!("Failed to delete environment type: {err}. Continuing cleanup...");
1112        }
1113        Ok(())
1114    }
1115
1116    async fn destroy_infra(&self, environment_details: Option<EnvironmentDetails>) -> Result<()> {
1117        infra::select_workspace(&self.terraform_runner, &self.environment_name)?;
1118
1119        let options = InfraRunOptions::generate_existing(
1120            &self.environment_name,
1121            &self.region,
1122            &self.terraform_runner,
1123            environment_details.as_ref(),
1124        )
1125        .await?;
1126
1127        let args = build_terraform_args(&options)?;
1128        let tfvars_filenames = if let Some(environment_details) = &environment_details {
1129            environment_details
1130                .environment_type
1131                .get_tfvars_filenames_with_fallback(
1132                    &self.environment_name,
1133                    &self.region,
1134                    &self.terraform_runner.working_directory_path,
1135                )
1136        } else {
1137            vec![]
1138        };
1139
1140        self.terraform_runner
1141            .destroy(Some(args), Some(tfvars_filenames))?;
1142
1143        infra::delete_workspace(&self.terraform_runner, &self.environment_name)?;
1144
1145        Ok(())
1146    }
1147}
1148
1149//
1150// Shared Helpers
1151//
1152
1153pub fn get_genesis_multiaddr(
1154    ansible_runner: &AnsibleRunner,
1155    ssh_client: &SshClient,
1156) -> Result<Option<(String, IpAddr)>> {
1157    let genesis_inventory = ansible_runner.get_inventory(AnsibleInventoryType::Genesis, true)?;
1158    if genesis_inventory.is_empty() {
1159        return Ok(None);
1160    }
1161    let genesis_ip = genesis_inventory[0].public_ip_addr;
1162
1163    // It's possible for the genesis host to be altered from its original state where a node was
1164    // started with the `--first` flag.
1165    // First attempt: try to find node with first=true
1166    let multiaddr = ssh_client
1167        .run_command(
1168            &genesis_ip,
1169            "root",
1170            "jq -r '.nodes[] | select(.initial_peers_config.first == true) | .listen_addr[] | select(contains(\"127.0.0.1\") | not) | select(contains(\"quic-v1\"))' /var/antctl/node_registry.json | head -n 1",
1171            false,
1172        )
1173        .map(|output| output.first().cloned())
1174        .unwrap_or_else(|err| {
1175            log::error!("Failed to find first node with quic-v1 protocol: {err:?}");
1176            None
1177        });
1178
1179    // Second attempt: if first attempt failed, see if any node is available.
1180    let multiaddr = match multiaddr {
1181        Some(addr) => addr,
1182        None => ssh_client
1183            .run_command(
1184                &genesis_ip,
1185                "root",
1186                "jq -r '.nodes[] | .listen_addr[] | select(contains(\"127.0.0.1\") | not) | select(contains(\"quic-v1\"))' /var/antctl/node_registry.json | head -n 1",
1187                false,
1188            )?
1189            .first()
1190            .cloned()
1191            .ok_or_else(|| Error::GenesisListenAddress)?,
1192    };
1193
1194    Ok(Some((multiaddr, genesis_ip)))
1195}
1196
1197pub fn get_anvil_node_data_hardcoded(ansible_runner: &AnsibleRunner) -> Result<AnvilNodeData> {
1198    let evm_inventory = ansible_runner.get_inventory(AnsibleInventoryType::EvmNodes, true)?;
1199    if evm_inventory.is_empty() {
1200        return Err(Error::EvmNodeNotFound);
1201    }
1202    let evm_ip = evm_inventory[0].public_ip_addr;
1203
1204    Ok(AnvilNodeData {
1205        data_payments_address: "0x8464135c8F25Da09e49BC8782676a84730C318bC".to_string(),
1206        deployer_wallet_private_key:
1207            "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80".to_string(),
1208        merkle_payments_address: "0x663F3ad617193148711d28f5334eE4Ed07016602".to_string(),
1209        payment_token_address: "0x5FbDB2315678afecb367f032d93F642f64180aa3".to_string(),
1210        rpc_url: format!("http://{evm_ip}:61611"),
1211    })
1212}
1213
1214pub fn get_multiaddr(
1215    ansible_runner: &AnsibleRunner,
1216    ssh_client: &SshClient,
1217) -> Result<(String, IpAddr)> {
1218    let node_inventory = ansible_runner.get_inventory(AnsibleInventoryType::Nodes, true)?;
1219    // For upscaling a bootstrap deployment, we'd need to select one of the nodes that's already
1220    // provisioned. So just try the first one.
1221    let node_ip = node_inventory
1222        .iter()
1223        .find(|vm| vm.name.ends_with("-node-1"))
1224        .ok_or_else(|| Error::NodeAddressNotFound)?
1225        .public_ip_addr;
1226
1227    debug!("Getting multiaddr from node {node_ip}");
1228
1229    let multiaddr =
1230        ssh_client
1231        .run_command(
1232            &node_ip,
1233            "root",
1234            // fetch the first multiaddr which does not contain the localhost addr.
1235            "jq -r '.nodes[] | .listen_addr[] | select(contains(\"127.0.0.1\") | not)' /var/antctl/node_registry.json | head -n 1",
1236            false,
1237        )?.first()
1238        .cloned()
1239        .ok_or_else(|| Error::NodeAddressNotFound)?;
1240
1241    // The node_ip is obviously inside the multiaddr, but it's just being returned as a
1242    // separate item for convenience.
1243    Ok((multiaddr, node_ip))
1244}
1245
1246pub async fn get_and_extract_archive_from_s3(
1247    s3_repository: &S3Repository,
1248    bucket_name: &str,
1249    archive_bucket_path: &str,
1250    dest_path: &Path,
1251) -> Result<()> {
1252    // In this case, not using unwrap leads to having to provide a very trivial error variant that
1253    // doesn't seem very valuable.
1254    let archive_file_name = archive_bucket_path.split('/').next_back().unwrap();
1255    let archive_dest_path = dest_path.join(archive_file_name);
1256    s3_repository
1257        .download_object(bucket_name, archive_bucket_path, &archive_dest_path)
1258        .await?;
1259    extract_archive(&archive_dest_path, dest_path)?;
1260    Ok(())
1261}
1262
1263pub fn extract_archive(archive_path: &Path, dest_path: &Path) -> Result<()> {
1264    let archive_file = File::open(archive_path)?;
1265    let decoder = GzDecoder::new(archive_file);
1266    let mut archive = Archive::new(decoder);
1267    let entries = archive.entries()?;
1268    for entry_result in entries {
1269        let mut entry = entry_result?;
1270        let extract_path = dest_path.join(entry.path()?);
1271        if entry.header().entry_type() == tar::EntryType::Directory {
1272            std::fs::create_dir_all(extract_path)?;
1273            continue;
1274        }
1275        let mut file = BufWriter::new(File::create(extract_path)?);
1276        std::io::copy(&mut entry, &mut file)?;
1277    }
1278    std::fs::remove_file(archive_path)?;
1279    Ok(())
1280}
1281
1282pub fn run_external_command(
1283    binary_path: PathBuf,
1284    working_directory_path: PathBuf,
1285    args: Vec<String>,
1286    suppress_stdout: bool,
1287    suppress_stderr: bool,
1288) -> Result<Vec<String>> {
1289    let mut command = Command::new(binary_path.clone());
1290    for arg in &args {
1291        command.arg(arg);
1292    }
1293    command.stdout(Stdio::piped());
1294    command.stderr(Stdio::piped());
1295    command.current_dir(working_directory_path.clone());
1296    debug!("Running {binary_path:#?} with args {args:#?}");
1297    debug!("Working directory set to {working_directory_path:#?}");
1298
1299    let mut child = command.spawn()?;
1300    let mut output_lines = Vec::new();
1301
1302    if let Some(ref mut stdout) = child.stdout {
1303        let reader = BufReader::new(stdout);
1304        for line in reader.lines() {
1305            let line = line?;
1306            if !suppress_stdout {
1307                println!("{line}");
1308            }
1309            output_lines.push(line);
1310        }
1311    }
1312
1313    if let Some(ref mut stderr) = child.stderr {
1314        let reader = BufReader::new(stderr);
1315        for line in reader.lines() {
1316            let line = line?;
1317            if !suppress_stderr {
1318                eprintln!("{line}");
1319            }
1320            output_lines.push(line);
1321        }
1322    }
1323
1324    let output = child.wait()?;
1325    if !output.success() {
1326        // Using `unwrap` here avoids introducing another error variant, which seems excessive.
1327        let binary_path = binary_path.to_str().unwrap();
1328        return Err(Error::ExternalCommandRunFailed {
1329            binary: binary_path.to_string(),
1330            exit_status: output,
1331        });
1332    }
1333
1334    Ok(output_lines)
1335}
1336
1337pub fn is_binary_on_path(binary_name: &str) -> bool {
1338    if let Ok(path) = std::env::var("PATH") {
1339        for dir in path.split(':') {
1340            let mut full_path = PathBuf::from(dir);
1341            full_path.push(binary_name);
1342            if full_path.exists() {
1343                return true;
1344            }
1345        }
1346    }
1347    false
1348}
1349
1350pub fn get_wallet_directory() -> Result<PathBuf> {
1351    Ok(dirs_next::data_dir()
1352        .ok_or_else(|| Error::CouldNotRetrieveDataDirectory)?
1353        .join("safe")
1354        .join("client")
1355        .join("wallet"))
1356}
1357
1358pub async fn notify_slack(inventory: DeploymentInventory) -> Result<()> {
1359    let webhook_url =
1360        std::env::var("SLACK_WEBHOOK_URL").map_err(|_| Error::SlackWebhookUrlNotSupplied)?;
1361
1362    let mut message = String::new();
1363    message.push_str("*Testnet Details*\n");
1364    message.push_str(&format!("Name: {}\n", inventory.name));
1365    message.push_str(&format!("Node count: {}\n", inventory.peers().len()));
1366    message.push_str(&format!("Faucet address: {:?}\n", inventory.faucet_address));
1367    match inventory.binary_option {
1368        BinaryOption::BuildFromSource {
1369            ref repo_owner,
1370            ref branch,
1371            ..
1372        } => {
1373            message.push_str("*Branch Details*\n");
1374            message.push_str(&format!("Repo owner: {repo_owner}\n"));
1375            message.push_str(&format!("Branch: {branch}\n"));
1376        }
1377        BinaryOption::Versioned {
1378            ant_version: ref safe_version,
1379            antnode_version: ref safenode_version,
1380            antctl_version: ref safenode_manager_version,
1381            ..
1382        } => {
1383            message.push_str("*Version Details*\n");
1384            message.push_str(&format!(
1385                "ant version: {}\n",
1386                safe_version
1387                    .as_ref()
1388                    .map_or("None".to_string(), |v| v.to_string())
1389            ));
1390            message.push_str(&format!(
1391                "safenode version: {}\n",
1392                safenode_version
1393                    .as_ref()
1394                    .map_or("None".to_string(), |v| v.to_string())
1395            ));
1396            message.push_str(&format!(
1397                "antctl version: {}\n",
1398                safenode_manager_version
1399                    .as_ref()
1400                    .map_or("None".to_string(), |v| v.to_string())
1401            ));
1402        }
1403    }
1404
1405    message.push_str("*Sample Peers*\n");
1406    message.push_str("```\n");
1407    for peer in inventory.peers().iter().take(20) {
1408        message.push_str(&format!("{peer}\n"));
1409    }
1410    message.push_str("```\n");
1411    message.push_str("*Available Files*\n");
1412    message.push_str("```\n");
1413    for (addr, file_name) in inventory.uploaded_files.iter() {
1414        message.push_str(&format!("{addr}: {file_name}\n"))
1415    }
1416    message.push_str("```\n");
1417
1418    let payload = json!({
1419        "text": message,
1420    });
1421    reqwest::Client::new()
1422        .post(webhook_url)
1423        .json(&payload)
1424        .send()
1425        .await?;
1426    println!("{message}");
1427    println!("Posted notification to Slack");
1428    Ok(())
1429}
1430
1431fn print_duration(duration: Duration) {
1432    let total_seconds = duration.as_secs();
1433    let minutes = total_seconds / 60;
1434    let seconds = total_seconds % 60;
1435    debug!("Time taken: {minutes} minutes and {seconds} seconds");
1436}
1437
1438pub fn get_progress_bar(length: u64) -> Result<ProgressBar> {
1439    let progress_bar = ProgressBar::new(length);
1440    progress_bar.set_style(
1441        ProgressStyle::default_bar()
1442            .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len}")?
1443            .progress_chars("#>-"),
1444    );
1445    progress_bar.enable_steady_tick(Duration::from_millis(100));
1446    Ok(progress_bar)
1447}
1448
1449pub async fn get_environment_details(
1450    environment_name: &str,
1451    s3_repository: &S3Repository,
1452) -> Result<EnvironmentDetails> {
1453    let temp_file = tempfile::NamedTempFile::new()?;
1454
1455    let max_retries = 3;
1456    let mut retries = 0;
1457    let env_details = loop {
1458        debug!("Downloading the environment details file for {environment_name} from S3");
1459        match s3_repository
1460            .download_object("sn-environment-type", environment_name, temp_file.path())
1461            .await
1462        {
1463            Ok(_) => {
1464                debug!("Downloaded the environment details file for {environment_name} from S3");
1465                let content = match std::fs::read_to_string(temp_file.path()) {
1466                    Ok(content) => content,
1467                    Err(err) => {
1468                        log::error!("Could not read the environment details file: {err:?}");
1469                        if retries < max_retries {
1470                            debug!("Retrying to read the environment details file");
1471                            retries += 1;
1472                            continue;
1473                        } else {
1474                            return Err(Error::EnvironmentDetailsNotFound(
1475                                environment_name.to_string(),
1476                            ));
1477                        }
1478                    }
1479                };
1480                trace!("Content of the environment details file: {content}");
1481
1482                match serde_json::from_str(&content) {
1483                    Ok(environment_details) => break environment_details,
1484                    Err(err) => {
1485                        log::error!("Could not parse the environment details file: {err:?}");
1486                        if retries < max_retries {
1487                            debug!("Retrying to parse the environment details file");
1488                            retries += 1;
1489                            continue;
1490                        } else {
1491                            return Err(Error::EnvironmentDetailsNotFound(
1492                                environment_name.to_string(),
1493                            ));
1494                        }
1495                    }
1496                }
1497            }
1498            Err(err) => {
1499                log::error!(
1500                    "Could not download the environment details file for {environment_name} from S3: {err:?}"
1501                );
1502                if retries < max_retries {
1503                    retries += 1;
1504                    continue;
1505                } else {
1506                    return Err(Error::EnvironmentDetailsNotFound(
1507                        environment_name.to_string(),
1508                    ));
1509                }
1510            }
1511        }
1512    };
1513
1514    debug!("Fetched environment details: {env_details:?}");
1515
1516    Ok(env_details)
1517}
1518
1519pub async fn write_environment_details(
1520    s3_repository: &S3Repository,
1521    environment_name: &str,
1522    environment_details: &EnvironmentDetails,
1523) -> Result<()> {
1524    let temp_dir = tempfile::tempdir()?;
1525    let path = temp_dir.path().to_path_buf().join(environment_name);
1526    let mut file = File::create(&path)?;
1527    let json = serde_json::to_string(environment_details)?;
1528    file.write_all(json.as_bytes())?;
1529    s3_repository
1530        .upload_file("sn-environment-type", &path, true)
1531        .await?;
1532    Ok(())
1533}
1534
1535pub fn calculate_size_per_attached_volume(node_count: u16) -> u16 {
1536    if node_count == 0 {
1537        return 0;
1538    }
1539    let total_volume_required = node_count * STORAGE_REQUIRED_PER_NODE;
1540
1541    // 7 attached volumes per VM
1542    (total_volume_required as f64 / 7.0).ceil() as u16
1543}
1544
1545pub fn get_bootstrap_cache_url(ip_addr: &IpAddr) -> String {
1546    format!("http://{ip_addr}/bootstrap_cache.json")
1547}