1pub mod ansible;
8pub mod bootstrap;
9pub mod clients;
10pub mod deploy;
11pub mod digital_ocean;
12pub mod error;
13pub mod funding;
14pub mod infra;
15pub mod inventory;
16pub mod logs;
17pub mod reserved_ip;
18pub mod rpc_client;
19pub mod s3;
20pub mod safe;
21pub mod setup;
22pub mod ssh;
23pub mod terraform;
24pub mod upscale;
25
26const STORAGE_REQUIRED_PER_NODE: u16 = 7;
27
28use crate::{
29 ansible::{
30 extra_vars::ExtraVarsDocBuilder,
31 inventory::{cleanup_environment_inventory, AnsibleInventoryType},
32 provisioning::AnsibleProvisioner,
33 AnsibleRunner,
34 },
35 error::{Error, Result},
36 inventory::{DeploymentInventory, VirtualMachine},
37 rpc_client::RpcClient,
38 s3::S3Repository,
39 ssh::SshClient,
40 terraform::TerraformRunner,
41};
42use ant_service_management::ServiceStatus;
43use flate2::read::GzDecoder;
44use indicatif::{ProgressBar, ProgressStyle};
45use infra::{build_terraform_args, InfraRunOptions};
46use log::{debug, trace};
47use semver::Version;
48use serde::{Deserialize, Serialize};
49use serde_json::json;
50use std::{
51 fs::File,
52 io::{BufRead, BufReader, BufWriter, Write},
53 net::IpAddr,
54 path::{Path, PathBuf},
55 process::{Command, Stdio},
56 str::FromStr,
57 time::Duration,
58};
59use tar::Archive;
60
61const ANSIBLE_DEFAULT_FORKS: usize = 50;
62
63#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
64pub enum DeploymentType {
65 Bootstrap,
67 Client,
69 #[default]
71 New,
72}
73
74#[derive(Debug, Clone, Default, Serialize, Deserialize)]
75pub struct AnvilNodeData {
76 pub data_payments_address: String,
77 pub deployer_wallet_private_key: String,
78 pub payment_token_address: String,
79 pub rpc_url: String,
80}
81
82impl std::fmt::Display for DeploymentType {
83 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
84 match self {
85 DeploymentType::Bootstrap => write!(f, "bootstrap"),
86 DeploymentType::Client => write!(f, "clients"),
87 DeploymentType::New => write!(f, "new"),
88 }
89 }
90}
91
92impl std::str::FromStr for DeploymentType {
93 type Err = String;
94
95 fn from_str(s: &str) -> Result<Self, Self::Err> {
96 match s.to_lowercase().as_str() {
97 "bootstrap" => Ok(DeploymentType::Bootstrap),
98 "clients" => Ok(DeploymentType::Client),
99 "new" => Ok(DeploymentType::New),
100 _ => Err(format!("Invalid deployment type: {s}")),
101 }
102 }
103}
104
105#[derive(Debug, Clone)]
106pub enum NodeType {
107 FullConePrivateNode,
108 PortRestrictedConePrivateNode,
109 Generic,
110 Genesis,
111 PeerCache,
112 SymmetricPrivateNode,
113 Upnp,
114}
115
116impl std::fmt::Display for NodeType {
117 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
118 match self {
119 NodeType::FullConePrivateNode => write!(f, "full-cone-private"),
120 NodeType::PortRestrictedConePrivateNode => write!(f, "port-restricted-cone-private"),
121 NodeType::Generic => write!(f, "generic"),
122 NodeType::Genesis => write!(f, "genesis"),
123 NodeType::PeerCache => write!(f, "peer-cache"),
124 NodeType::SymmetricPrivateNode => write!(f, "symmetric-private"),
125 NodeType::Upnp => write!(f, "upnp"),
126 }
127 }
128}
129
130impl std::str::FromStr for NodeType {
131 type Err = String;
132
133 fn from_str(s: &str) -> Result<Self, Self::Err> {
134 match s.to_lowercase().as_str() {
135 "full-cone-private" => Ok(NodeType::FullConePrivateNode),
136 "port-restricted-cone-private" => Ok(NodeType::PortRestrictedConePrivateNode),
137 "generic" => Ok(NodeType::Generic),
138 "genesis" => Ok(NodeType::Genesis),
139 "peer-cache" => Ok(NodeType::PeerCache),
140 "symmetric-private" => Ok(NodeType::SymmetricPrivateNode),
141 "upnp" => Ok(NodeType::Upnp),
142 _ => Err(format!("Invalid node type: {s}")),
143 }
144 }
145}
146
147impl NodeType {
148 pub fn telegraf_role(&self) -> &'static str {
149 match self {
150 NodeType::FullConePrivateNode => "NAT_STATIC_FULL_CONE_NODE",
151 NodeType::PortRestrictedConePrivateNode => "PORT_RESTRICTED_CONE_NODE",
152 NodeType::Generic => "GENERIC_NODE",
153 NodeType::Genesis => "GENESIS_NODE",
154 NodeType::PeerCache => "PEER_CACHE_NODE",
155 NodeType::SymmetricPrivateNode => "NAT_RANDOMIZED_NODE",
156 NodeType::Upnp => "UPNP_NODE",
157 }
158 }
159
160 pub fn to_ansible_inventory_type(&self) -> AnsibleInventoryType {
161 match self {
162 NodeType::FullConePrivateNode => AnsibleInventoryType::FullConePrivateNodes,
163 NodeType::PortRestrictedConePrivateNode => {
164 AnsibleInventoryType::PortRestrictedConePrivateNodes
165 }
166 NodeType::Generic => AnsibleInventoryType::Nodes,
167 NodeType::Genesis => AnsibleInventoryType::Genesis,
168 NodeType::PeerCache => AnsibleInventoryType::PeerCacheNodes,
169 NodeType::SymmetricPrivateNode => AnsibleInventoryType::SymmetricPrivateNodes,
170 NodeType::Upnp => AnsibleInventoryType::Upnp,
171 }
172 }
173}
174
175#[derive(Clone, Debug, Default, Eq, Serialize, Deserialize, PartialEq)]
176pub enum EvmNetwork {
177 #[default]
178 Anvil,
179 ArbitrumOne,
180 ArbitrumSepoliaTest,
181 Custom,
182}
183
184impl std::fmt::Display for EvmNetwork {
185 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
186 match self {
187 EvmNetwork::Anvil => write!(f, "evm-custom"),
188 EvmNetwork::ArbitrumOne => write!(f, "evm-arbitrum-one"),
189 EvmNetwork::ArbitrumSepoliaTest => write!(f, "evm-arbitrum-sepolia-test"),
190 EvmNetwork::Custom => write!(f, "evm-custom"),
191 }
192 }
193}
194
195impl std::str::FromStr for EvmNetwork {
196 type Err = String;
197
198 fn from_str(s: &str) -> Result<Self, Self::Err> {
199 match s.to_lowercase().as_str() {
200 "anvil" => Ok(EvmNetwork::Anvil),
201 "arbitrum-one" => Ok(EvmNetwork::ArbitrumOne),
202 "arbitrum-sepolia-test" => Ok(EvmNetwork::ArbitrumSepoliaTest),
203 "custom" => Ok(EvmNetwork::Custom),
204 _ => Err(format!("Invalid EVM network type: {s}")),
205 }
206 }
207}
208
209#[derive(Clone, Debug, Default, Serialize, Deserialize)]
210pub struct EvmDetails {
211 pub network: EvmNetwork,
212 pub data_payments_address: Option<String>,
213 pub payment_token_address: Option<String>,
214 pub rpc_url: Option<String>,
215}
216
217#[derive(Clone, Debug, Default, Serialize, Deserialize)]
218pub struct EnvironmentDetails {
219 pub deployment_type: DeploymentType,
220 pub environment_type: EnvironmentType,
221 pub evm_details: EvmDetails,
222 pub funding_wallet_address: Option<String>,
223 pub network_id: Option<u8>,
224 pub region: String,
225 pub rewards_address: Option<String>,
226}
227
228#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
229pub enum EnvironmentType {
230 #[default]
231 Development,
232 Production,
233 Staging,
234}
235
236impl EnvironmentType {
237 pub fn get_tfvars_filenames(&self, name: &str, region: &str) -> Vec<String> {
238 match self {
239 EnvironmentType::Development => vec![
240 "dev.tfvars".to_string(),
241 format!("dev-images-{region}.tfvars", region = region),
242 ],
243 EnvironmentType::Staging => vec![
244 "staging.tfvars".to_string(),
245 format!("staging-images-{region}.tfvars", region = region),
246 ],
247 EnvironmentType::Production => {
248 vec![
249 format!("{name}.tfvars", name = name),
250 format!("production-images-{region}.tfvars", region = region),
251 ]
252 }
253 }
254 }
255
256 pub fn get_default_peer_cache_node_count(&self) -> u16 {
257 match self {
258 EnvironmentType::Development => 5,
259 EnvironmentType::Production => 5,
260 EnvironmentType::Staging => 5,
261 }
262 }
263
264 pub fn get_default_node_count(&self) -> u16 {
265 match self {
266 EnvironmentType::Development => 25,
267 EnvironmentType::Production => 25,
268 EnvironmentType::Staging => 25,
269 }
270 }
271
272 pub fn get_default_symmetric_private_node_count(&self) -> u16 {
273 self.get_default_node_count()
274 }
275
276 pub fn get_default_full_cone_private_node_count(&self) -> u16 {
277 self.get_default_node_count()
278 }
279 pub fn get_default_upnp_private_node_count(&self) -> u16 {
280 self.get_default_node_count()
281 }
282}
283
284impl std::fmt::Display for EnvironmentType {
285 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
286 match self {
287 EnvironmentType::Development => write!(f, "development"),
288 EnvironmentType::Production => write!(f, "production"),
289 EnvironmentType::Staging => write!(f, "staging"),
290 }
291 }
292}
293
294impl FromStr for EnvironmentType {
295 type Err = Error;
296
297 fn from_str(s: &str) -> Result<Self, Self::Err> {
298 match s.to_lowercase().as_str() {
299 "development" => Ok(EnvironmentType::Development),
300 "production" => Ok(EnvironmentType::Production),
301 "staging" => Ok(EnvironmentType::Staging),
302 _ => Err(Error::EnvironmentNameFromStringError(s.to_string())),
303 }
304 }
305}
306
307#[derive(Clone, Debug, Serialize, Deserialize)]
321pub enum BinaryOption {
322 BuildFromSource {
324 antnode_features: Option<String>,
326 branch: String,
327 repo_owner: String,
328 skip_binary_build: bool,
331 },
332 Versioned {
334 ant_version: Option<Version>,
335 antctl_version: Option<Version>,
336 antnode_version: Option<Version>,
337 },
338}
339
340impl BinaryOption {
341 pub fn should_provision_build_machine(&self) -> bool {
342 match self {
343 BinaryOption::BuildFromSource {
344 skip_binary_build, ..
345 } => !skip_binary_build,
346 BinaryOption::Versioned { .. } => false,
347 }
348 }
349
350 pub fn print(&self) {
351 match self {
352 BinaryOption::BuildFromSource {
353 antnode_features,
354 branch,
355 repo_owner,
356 skip_binary_build: _,
357 } => {
358 println!("Source configuration:");
359 println!(" Repository owner: {repo_owner}");
360 println!(" Branch: {branch}");
361 if let Some(features) = antnode_features {
362 println!(" Antnode features: {features}");
363 }
364 }
365 BinaryOption::Versioned {
366 ant_version,
367 antctl_version,
368 antnode_version,
369 } => {
370 println!("Versioned binaries configuration:");
371 if let Some(version) = ant_version {
372 println!(" ant version: {version}");
373 }
374 if let Some(version) = antctl_version {
375 println!(" antctl version: {version}");
376 }
377 if let Some(version) = antnode_version {
378 println!(" antnode version: {version}");
379 }
380 }
381 }
382 }
383}
384
385#[derive(Debug, Clone, Copy)]
386pub enum CloudProvider {
387 Aws,
388 DigitalOcean,
389}
390
391impl std::fmt::Display for CloudProvider {
392 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
393 match self {
394 CloudProvider::Aws => write!(f, "aws"),
395 CloudProvider::DigitalOcean => write!(f, "digital-ocean"),
396 }
397 }
398}
399
400impl CloudProvider {
401 pub fn get_ssh_user(&self) -> String {
402 match self {
403 CloudProvider::Aws => "ubuntu".to_string(),
404 CloudProvider::DigitalOcean => "root".to_string(),
405 }
406 }
407}
408
409#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
410pub enum LogFormat {
411 Default,
412 Json,
413}
414
415impl LogFormat {
416 pub fn parse_from_str(val: &str) -> Result<Self> {
417 match val {
418 "default" => Ok(LogFormat::Default),
419 "json" => Ok(LogFormat::Json),
420 _ => Err(Error::LoggingConfiguration(
421 "The only valid values for this argument are \"default\" or \"json\"".to_string(),
422 )),
423 }
424 }
425
426 pub fn as_str(&self) -> &'static str {
427 match self {
428 LogFormat::Default => "default",
429 LogFormat::Json => "json",
430 }
431 }
432}
433
434#[derive(Clone)]
435pub struct UpgradeOptions {
436 pub ansible_verbose: bool,
437 pub branch: Option<String>,
438 pub custom_inventory: Option<Vec<VirtualMachine>>,
439 pub env_variables: Option<Vec<(String, String)>>,
440 pub force: bool,
441 pub forks: usize,
442 pub interval: Duration,
443 pub name: String,
444 pub node_type: Option<NodeType>,
445 pub pre_upgrade_delay: Option<u64>,
446 pub provider: CloudProvider,
447 pub repo_owner: Option<String>,
448 pub version: Option<String>,
449}
450
451impl UpgradeOptions {
452 pub fn get_ansible_vars(&self) -> String {
453 let mut extra_vars = ExtraVarsDocBuilder::default();
454 extra_vars.add_variable("interval", &self.interval.as_millis().to_string());
455 if let Some(env_variables) = &self.env_variables {
456 extra_vars.add_env_variable_list("env_variables", env_variables.clone());
457 }
458 if self.force {
459 extra_vars.add_variable("force", &self.force.to_string());
460 }
461 if let Some(version) = &self.version {
462 extra_vars.add_variable("antnode_version", version);
463 }
464 if let Some(pre_upgrade_delay) = &self.pre_upgrade_delay {
465 extra_vars.add_variable("pre_upgrade_delay", &pre_upgrade_delay.to_string());
466 }
467
468 if let (Some(repo_owner), Some(branch)) = (&self.repo_owner, &self.branch) {
469 let binary_option = BinaryOption::BuildFromSource {
470 antnode_features: None,
471 branch: branch.clone(),
472 repo_owner: repo_owner.clone(),
473 skip_binary_build: true,
474 };
475 extra_vars.add_node_url_or_version(&self.name, &binary_option);
476 }
477
478 extra_vars.build()
479 }
480}
481
482#[derive(Default)]
483pub struct TestnetDeployBuilder {
484 ansible_forks: Option<usize>,
485 ansible_verbose_mode: bool,
486 deployment_type: EnvironmentType,
487 environment_name: String,
488 provider: Option<CloudProvider>,
489 region: Option<String>,
490 ssh_secret_key_path: Option<PathBuf>,
491 state_bucket_name: Option<String>,
492 terraform_binary_path: Option<PathBuf>,
493 vault_password_path: Option<PathBuf>,
494 working_directory_path: Option<PathBuf>,
495}
496
497impl TestnetDeployBuilder {
498 pub fn new() -> Self {
499 Default::default()
500 }
501
502 pub fn ansible_verbose_mode(&mut self, ansible_verbose_mode: bool) -> &mut Self {
503 self.ansible_verbose_mode = ansible_verbose_mode;
504 self
505 }
506
507 pub fn ansible_forks(&mut self, ansible_forks: usize) -> &mut Self {
508 self.ansible_forks = Some(ansible_forks);
509 self
510 }
511
512 pub fn deployment_type(&mut self, deployment_type: EnvironmentType) -> &mut Self {
513 self.deployment_type = deployment_type;
514 self
515 }
516
517 pub fn environment_name(&mut self, name: &str) -> &mut Self {
518 self.environment_name = name.to_string();
519 self
520 }
521
522 pub fn provider(&mut self, provider: CloudProvider) -> &mut Self {
523 self.provider = Some(provider);
524 self
525 }
526
527 pub fn state_bucket_name(&mut self, state_bucket_name: String) -> &mut Self {
528 self.state_bucket_name = Some(state_bucket_name);
529 self
530 }
531
532 pub fn terraform_binary_path(&mut self, terraform_binary_path: PathBuf) -> &mut Self {
533 self.terraform_binary_path = Some(terraform_binary_path);
534 self
535 }
536
537 pub fn working_directory(&mut self, working_directory_path: PathBuf) -> &mut Self {
538 self.working_directory_path = Some(working_directory_path);
539 self
540 }
541
542 pub fn ssh_secret_key_path(&mut self, ssh_secret_key_path: PathBuf) -> &mut Self {
543 self.ssh_secret_key_path = Some(ssh_secret_key_path);
544 self
545 }
546
547 pub fn vault_password_path(&mut self, vault_password_path: PathBuf) -> &mut Self {
548 self.vault_password_path = Some(vault_password_path);
549 self
550 }
551
552 pub fn region(&mut self, region: String) -> &mut Self {
553 self.region = Some(region);
554 self
555 }
556
557 pub fn build(&self) -> Result<TestnetDeployer> {
558 let provider = self.provider.unwrap_or(CloudProvider::DigitalOcean);
559 match provider {
560 CloudProvider::DigitalOcean => {
561 let digital_ocean_pat = std::env::var("DO_PAT").map_err(|_| {
562 Error::CloudProviderCredentialsNotSupplied("DO_PAT".to_string())
563 })?;
564 std::env::set_var("DIGITALOCEAN_TOKEN", digital_ocean_pat.clone());
568 std::env::set_var("DO_API_TOKEN", digital_ocean_pat);
569 }
570 _ => {
571 return Err(Error::CloudProviderNotSupported(provider.to_string()));
572 }
573 }
574
575 let state_bucket_name = match self.state_bucket_name {
576 Some(ref bucket_name) => bucket_name.clone(),
577 None => std::env::var("TERRAFORM_STATE_BUCKET_NAME")?,
578 };
579
580 let default_terraform_bin_path = PathBuf::from("terraform");
581 let terraform_binary_path = self
582 .terraform_binary_path
583 .as_ref()
584 .unwrap_or(&default_terraform_bin_path);
585
586 let working_directory_path = match self.working_directory_path {
587 Some(ref work_dir_path) => work_dir_path.clone(),
588 None => std::env::current_dir()?.join("resources"),
589 };
590
591 let ssh_secret_key_path = match self.ssh_secret_key_path {
592 Some(ref ssh_sk_path) => ssh_sk_path.clone(),
593 None => PathBuf::from(std::env::var("SSH_KEY_PATH")?),
594 };
595
596 let vault_password_path = match self.vault_password_path {
597 Some(ref vault_pw_path) => vault_pw_path.clone(),
598 None => PathBuf::from(std::env::var("ANSIBLE_VAULT_PASSWORD_PATH")?),
599 };
600
601 let region = match self.region {
602 Some(ref region) => region.clone(),
603 None => "lon1".to_string(),
604 };
605
606 let terraform_runner = TerraformRunner::new(
607 terraform_binary_path.to_path_buf(),
608 working_directory_path
609 .join("terraform")
610 .join("testnet")
611 .join(provider.to_string()),
612 provider,
613 &state_bucket_name,
614 )?;
615 let ansible_runner = AnsibleRunner::new(
616 self.ansible_forks.unwrap_or(ANSIBLE_DEFAULT_FORKS),
617 self.ansible_verbose_mode,
618 &self.environment_name,
619 provider,
620 ssh_secret_key_path.clone(),
621 vault_password_path,
622 working_directory_path.join("ansible"),
623 )?;
624 let ssh_client = SshClient::new(ssh_secret_key_path);
625 let ansible_provisioner =
626 AnsibleProvisioner::new(ansible_runner, provider, ssh_client.clone());
627 let rpc_client = RpcClient::new(
628 PathBuf::from("/usr/local/bin/safenode_rpc_client"),
629 working_directory_path.clone(),
630 );
631
632 let safe_path = working_directory_path.join("safe");
635 if safe_path.exists() {
636 std::fs::remove_file(safe_path)?;
637 }
638
639 let testnet = TestnetDeployer::new(
640 ansible_provisioner,
641 provider,
642 self.deployment_type.clone(),
643 &self.environment_name,
644 rpc_client,
645 S3Repository {},
646 ssh_client,
647 terraform_runner,
648 working_directory_path,
649 region,
650 )?;
651
652 Ok(testnet)
653 }
654}
655
656#[derive(Clone)]
657pub struct TestnetDeployer {
658 pub ansible_provisioner: AnsibleProvisioner,
659 pub cloud_provider: CloudProvider,
660 pub deployment_type: EnvironmentType,
661 pub environment_name: String,
662 pub inventory_file_path: PathBuf,
663 pub region: String,
664 pub rpc_client: RpcClient,
665 pub s3_repository: S3Repository,
666 pub ssh_client: SshClient,
667 pub terraform_runner: TerraformRunner,
668 pub working_directory_path: PathBuf,
669}
670
671impl TestnetDeployer {
672 #[allow(clippy::too_many_arguments)]
673 pub fn new(
674 ansible_provisioner: AnsibleProvisioner,
675 cloud_provider: CloudProvider,
676 deployment_type: EnvironmentType,
677 environment_name: &str,
678 rpc_client: RpcClient,
679 s3_repository: S3Repository,
680 ssh_client: SshClient,
681 terraform_runner: TerraformRunner,
682 working_directory_path: PathBuf,
683 region: String,
684 ) -> Result<TestnetDeployer> {
685 if environment_name.is_empty() {
686 return Err(Error::EnvironmentNameRequired);
687 }
688 let inventory_file_path = working_directory_path
689 .join("ansible")
690 .join("inventory")
691 .join("dev_inventory_digital_ocean.yml");
692 Ok(TestnetDeployer {
693 ansible_provisioner,
694 cloud_provider,
695 deployment_type,
696 environment_name: environment_name.to_string(),
697 inventory_file_path,
698 region,
699 rpc_client,
700 ssh_client,
701 s3_repository,
702 terraform_runner,
703 working_directory_path,
704 })
705 }
706
707 pub async fn init(&self) -> Result<()> {
708 if self
709 .s3_repository
710 .folder_exists(
711 "sn-testnet",
712 &format!("testnet-logs/{}", self.environment_name),
713 )
714 .await?
715 {
716 return Err(Error::LogsForPreviousTestnetExist(
717 self.environment_name.clone(),
718 ));
719 }
720
721 self.terraform_runner.init()?;
722 let workspaces = self.terraform_runner.workspace_list()?;
723 if !workspaces.contains(&self.environment_name) {
724 self.terraform_runner
725 .workspace_new(&self.environment_name)?;
726 } else {
727 println!("Workspace {} already exists", self.environment_name);
728 }
729
730 let rpc_client_path = self.working_directory_path.join("safenode_rpc_client");
731 if !rpc_client_path.is_file() {
732 println!("Downloading the rpc client for safenode...");
733 let archive_name = "safenode_rpc_client-latest-x86_64-unknown-linux-musl.tar.gz";
734 get_and_extract_archive_from_s3(
735 &self.s3_repository,
736 "sn-node-rpc-client",
737 archive_name,
738 &self.working_directory_path,
739 )
740 .await?;
741 #[cfg(unix)]
742 {
743 use std::os::unix::fs::PermissionsExt;
744 let mut permissions = std::fs::metadata(&rpc_client_path)?.permissions();
745 permissions.set_mode(0o755); std::fs::set_permissions(&rpc_client_path, permissions)?;
747 }
748 }
749
750 Ok(())
751 }
752
753 pub fn plan(&self, options: &InfraRunOptions) -> Result<()> {
754 println!("Selecting {} workspace...", options.name);
755 self.terraform_runner.workspace_select(&options.name)?;
756
757 let args = build_terraform_args(options)?;
758
759 self.terraform_runner
760 .plan(Some(args), options.tfvars_filenames.clone())?;
761 Ok(())
762 }
763
764 pub fn start(
765 &self,
766 interval: Duration,
767 node_type: Option<NodeType>,
768 custom_inventory: Option<Vec<VirtualMachine>>,
769 ) -> Result<()> {
770 self.ansible_provisioner.start_nodes(
771 &self.environment_name,
772 interval,
773 node_type,
774 custom_inventory,
775 )?;
776 Ok(())
777 }
778
779 pub fn apply_delete_node_records_cron(
780 &self,
781 node_type: Option<NodeType>,
782 custom_inventory: Option<Vec<VirtualMachine>>,
783 ) -> Result<()> {
784 self.ansible_provisioner.apply_delete_node_records_cron(
785 &self.environment_name,
786 node_type,
787 custom_inventory,
788 )?;
789 Ok(())
790 }
791
792 pub fn reset(
793 &self,
794 node_type: Option<NodeType>,
795 custom_inventory: Option<Vec<VirtualMachine>>,
796 ) -> Result<()> {
797 self.ansible_provisioner.reset_nodes(
798 &self.environment_name,
799 node_type,
800 custom_inventory,
801 )?;
802 Ok(())
803 }
804
805 pub fn status(&self) -> Result<()> {
811 self.ansible_provisioner.status()?;
812
813 let peer_cache_node_registries = self
814 .ansible_provisioner
815 .get_node_registries(&AnsibleInventoryType::PeerCacheNodes)?;
816 let generic_node_registries = self
817 .ansible_provisioner
818 .get_node_registries(&AnsibleInventoryType::Nodes)?;
819 let symmetric_private_node_registries = self
820 .ansible_provisioner
821 .get_node_registries(&AnsibleInventoryType::SymmetricPrivateNodes)?;
822 let full_cone_private_node_registries = self
823 .ansible_provisioner
824 .get_node_registries(&AnsibleInventoryType::FullConePrivateNodes)?;
825 let upnp_private_node_registries = self
826 .ansible_provisioner
827 .get_node_registries(&AnsibleInventoryType::Upnp)?;
828 let genesis_node_registry = self
829 .ansible_provisioner
830 .get_node_registries(&AnsibleInventoryType::Genesis)?
831 .clone();
832
833 peer_cache_node_registries.print();
834 generic_node_registries.print();
835 symmetric_private_node_registries.print();
836 full_cone_private_node_registries.print();
837 upnp_private_node_registries.print();
838 genesis_node_registry.print();
839
840 let all_registries = [
841 &peer_cache_node_registries,
842 &generic_node_registries,
843 &symmetric_private_node_registries,
844 &full_cone_private_node_registries,
845 &upnp_private_node_registries,
846 &genesis_node_registry,
847 ];
848
849 let mut total_nodes = 0;
850 let mut running_nodes = 0;
851 let mut stopped_nodes = 0;
852 let mut added_nodes = 0;
853 let mut removed_nodes = 0;
854
855 for (_, registry) in all_registries
856 .iter()
857 .flat_map(|r| r.retrieved_registries.iter())
858 {
859 for node in registry.nodes.iter() {
860 total_nodes += 1;
861 match node.status {
862 ServiceStatus::Running => running_nodes += 1,
863 ServiceStatus::Stopped => stopped_nodes += 1,
864 ServiceStatus::Added => added_nodes += 1,
865 ServiceStatus::Removed => removed_nodes += 1,
866 }
867 }
868 }
869
870 let peer_cache_hosts = peer_cache_node_registries.retrieved_registries.len();
871 let generic_hosts = generic_node_registries.retrieved_registries.len();
872 let symmetric_private_hosts = symmetric_private_node_registries.retrieved_registries.len();
873 let full_cone_private_hosts = full_cone_private_node_registries.retrieved_registries.len();
874 let upnp_private_hosts = upnp_private_node_registries.retrieved_registries.len();
875
876 let peer_cache_nodes = peer_cache_node_registries
877 .retrieved_registries
878 .iter()
879 .flat_map(|(_, n)| n.nodes.iter())
880 .count();
881 let generic_nodes = generic_node_registries
882 .retrieved_registries
883 .iter()
884 .flat_map(|(_, n)| n.nodes.iter())
885 .count();
886 let symmetric_private_nodes = symmetric_private_node_registries
887 .retrieved_registries
888 .iter()
889 .flat_map(|(_, n)| n.nodes.iter())
890 .count();
891 let full_cone_private_nodes = full_cone_private_node_registries
892 .retrieved_registries
893 .iter()
894 .flat_map(|(_, n)| n.nodes.iter())
895 .count();
896 let upnp_private_nodes = upnp_private_node_registries
897 .retrieved_registries
898 .iter()
899 .flat_map(|(_, n)| n.nodes.iter())
900 .count();
901
902 println!("-------");
903 println!("Summary");
904 println!("-------");
905 println!(
906 "Total peer cache nodes ({}x{}): {}",
907 peer_cache_hosts,
908 if peer_cache_hosts > 0 {
909 peer_cache_nodes / peer_cache_hosts
910 } else {
911 0
912 },
913 peer_cache_nodes
914 );
915 println!(
916 "Total generic nodes ({}x{}): {}",
917 generic_hosts,
918 if generic_hosts > 0 {
919 generic_nodes / generic_hosts
920 } else {
921 0
922 },
923 generic_nodes
924 );
925 println!(
926 "Total symmetric private nodes ({}x{}): {}",
927 symmetric_private_hosts,
928 if symmetric_private_hosts > 0 {
929 symmetric_private_nodes / symmetric_private_hosts
930 } else {
931 0
932 },
933 symmetric_private_nodes
934 );
935 println!(
936 "Total full cone private nodes ({}x{}): {}",
937 full_cone_private_hosts,
938 if full_cone_private_hosts > 0 {
939 full_cone_private_nodes / full_cone_private_hosts
940 } else {
941 0
942 },
943 full_cone_private_nodes
944 );
945 println!(
946 "Total UPnP private nodes ({}x{}): {}",
947 upnp_private_hosts,
948 if upnp_private_hosts > 0 {
949 upnp_private_nodes / upnp_private_hosts
950 } else {
951 0
952 },
953 upnp_private_nodes
954 );
955 println!("Total nodes: {total_nodes}");
956 println!("Running nodes: {running_nodes}");
957 println!("Stopped nodes: {stopped_nodes}");
958 println!("Added nodes: {added_nodes}");
959 println!("Removed nodes: {removed_nodes}");
960
961 Ok(())
962 }
963
964 pub fn cleanup_node_logs(&self, setup_cron: bool) -> Result<()> {
965 self.ansible_provisioner.cleanup_node_logs(setup_cron)?;
966 Ok(())
967 }
968
969 pub fn start_telegraf(
970 &self,
971 node_type: Option<NodeType>,
972 custom_inventory: Option<Vec<VirtualMachine>>,
973 ) -> Result<()> {
974 self.ansible_provisioner.start_telegraf(
975 &self.environment_name,
976 node_type,
977 custom_inventory,
978 )?;
979 Ok(())
980 }
981
982 pub fn stop(
983 &self,
984 interval: Duration,
985 node_type: Option<NodeType>,
986 custom_inventory: Option<Vec<VirtualMachine>>,
987 delay: Option<u64>,
988 service_names: Option<Vec<String>>,
989 ) -> Result<()> {
990 self.ansible_provisioner.stop_nodes(
991 &self.environment_name,
992 interval,
993 node_type,
994 custom_inventory,
995 delay,
996 service_names,
997 )?;
998 Ok(())
999 }
1000
1001 pub fn stop_telegraf(
1002 &self,
1003 node_type: Option<NodeType>,
1004 custom_inventory: Option<Vec<VirtualMachine>>,
1005 ) -> Result<()> {
1006 self.ansible_provisioner.stop_telegraf(
1007 &self.environment_name,
1008 node_type,
1009 custom_inventory,
1010 )?;
1011 Ok(())
1012 }
1013
1014 pub fn upgrade(&self, options: UpgradeOptions) -> Result<()> {
1015 self.ansible_provisioner.upgrade_nodes(&options)?;
1016 Ok(())
1017 }
1018
1019 pub fn upgrade_antctl(
1020 &self,
1021 version: Version,
1022 node_type: Option<NodeType>,
1023 custom_inventory: Option<Vec<VirtualMachine>>,
1024 ) -> Result<()> {
1025 self.ansible_provisioner.upgrade_antctl(
1026 &self.environment_name,
1027 &version,
1028 node_type,
1029 custom_inventory,
1030 )?;
1031 Ok(())
1032 }
1033
1034 pub fn upgrade_geoip_telegraf(&self, name: &str) -> Result<()> {
1035 self.ansible_provisioner.upgrade_geoip_telegraf(name)?;
1036 Ok(())
1037 }
1038
1039 pub fn upgrade_node_telegraf(&self, name: &str) -> Result<()> {
1040 self.ansible_provisioner.upgrade_node_telegraf(name)?;
1041 Ok(())
1042 }
1043
1044 pub fn upgrade_client_telegraf(&self, name: &str) -> Result<()> {
1045 self.ansible_provisioner.upgrade_client_telegraf(name)?;
1046 Ok(())
1047 }
1048
1049 pub async fn clean(&self) -> Result<()> {
1050 let environment_details =
1051 get_environment_details(&self.environment_name, &self.s3_repository)
1052 .await
1053 .inspect_err(|err| {
1054 println!("Failed to get environment details: {err}. Continuing cleanup...");
1055 })
1056 .ok();
1057 if let Some(environment_details) = &environment_details {
1058 funding::drain_funds(&self.ansible_provisioner, environment_details).await?;
1059 }
1060
1061 self.destroy_infra(environment_details).await?;
1062
1063 cleanup_environment_inventory(
1064 &self.environment_name,
1065 &self
1066 .working_directory_path
1067 .join("ansible")
1068 .join("inventory"),
1069 None,
1070 )?;
1071
1072 println!("Deleted Ansible inventory for {}", self.environment_name);
1073
1074 if let Err(err) = self
1075 .s3_repository
1076 .delete_object("sn-environment-type", &self.environment_name)
1077 .await
1078 {
1079 println!("Failed to delete environment type: {err}. Continuing cleanup...");
1080 }
1081 Ok(())
1082 }
1083
1084 async fn destroy_infra(&self, environment_details: Option<EnvironmentDetails>) -> Result<()> {
1085 infra::select_workspace(&self.terraform_runner, &self.environment_name)?;
1086
1087 let options = InfraRunOptions::generate_existing(
1088 &self.environment_name,
1089 &self.region,
1090 &self.terraform_runner,
1091 environment_details.as_ref(),
1092 )
1093 .await?;
1094
1095 let args = build_terraform_args(&options)?;
1096 let tfvars_filenames = if let Some(environment_details) = &environment_details {
1097 environment_details
1098 .environment_type
1099 .get_tfvars_filenames(&self.environment_name, &self.region)
1100 } else {
1101 vec![]
1102 };
1103
1104 self.terraform_runner
1105 .destroy(Some(args), Some(tfvars_filenames))?;
1106
1107 infra::delete_workspace(&self.terraform_runner, &self.environment_name)?;
1108
1109 Ok(())
1110 }
1111}
1112
1113pub fn get_genesis_multiaddr(
1118 ansible_runner: &AnsibleRunner,
1119 ssh_client: &SshClient,
1120) -> Result<Option<(String, IpAddr)>> {
1121 let genesis_inventory = ansible_runner.get_inventory(AnsibleInventoryType::Genesis, true)?;
1122 if genesis_inventory.is_empty() {
1123 return Ok(None);
1124 }
1125 let genesis_ip = genesis_inventory[0].public_ip_addr;
1126
1127 let multiaddr = ssh_client
1131 .run_command(
1132 &genesis_ip,
1133 "root",
1134 "jq -r '.nodes[] | select(.initial_peers_config.first == true) | .listen_addr[] | select(contains(\"127.0.0.1\") | not) | select(contains(\"quic-v1\"))' /var/antctl/node_registry.json | head -n 1",
1135 false,
1136 )
1137 .map(|output| output.first().cloned())
1138 .unwrap_or_else(|err| {
1139 log::error!("Failed to find first node with quic-v1 protocol: {err:?}");
1140 None
1141 });
1142
1143 let multiaddr = match multiaddr {
1145 Some(addr) => addr,
1146 None => ssh_client
1147 .run_command(
1148 &genesis_ip,
1149 "root",
1150 "jq -r '.nodes[] | .listen_addr[] | select(contains(\"127.0.0.1\") | not) | select(contains(\"quic-v1\"))' /var/antctl/node_registry.json | head -n 1",
1151 false,
1152 )?
1153 .first()
1154 .cloned()
1155 .ok_or_else(|| Error::GenesisListenAddress)?,
1156 };
1157
1158 Ok(Some((multiaddr, genesis_ip)))
1159}
1160
1161pub fn get_anvil_node_data(
1162 ansible_runner: &AnsibleRunner,
1163 ssh_client: &SshClient,
1164) -> Result<AnvilNodeData> {
1165 let evm_inventory = ansible_runner.get_inventory(AnsibleInventoryType::EvmNodes, true)?;
1166 if evm_inventory.is_empty() {
1167 return Err(Error::EvmNodeNotFound);
1168 }
1169
1170 let evm_ip = evm_inventory[0].public_ip_addr;
1171 debug!("Retrieved IP address for EVM node: {evm_ip}");
1172 let csv_file_path = "/home/ant/.local/share/autonomi/evm_testnet_data.csv";
1173
1174 const MAX_ATTEMPTS: u8 = 5;
1175 const RETRY_DELAY: Duration = Duration::from_secs(5);
1176
1177 for attempt in 1..=MAX_ATTEMPTS {
1178 match ssh_client.run_command(&evm_ip, "ant", &format!("cat {csv_file_path}"), false) {
1179 Ok(output) => {
1180 if let Some(csv_contents) = output.first() {
1181 let parts: Vec<&str> = csv_contents.split(',').collect();
1182 if parts.len() != 4 {
1183 return Err(Error::EvmTestnetDataParsingError(
1184 "Expected 4 fields in the CSV".to_string(),
1185 ));
1186 }
1187
1188 let evm_testnet_data = AnvilNodeData {
1189 rpc_url: parts[0].trim().to_string(),
1190 payment_token_address: parts[1].trim().to_string(),
1191 data_payments_address: parts[2].trim().to_string(),
1192 deployer_wallet_private_key: parts[3].trim().to_string(),
1193 };
1194 return Ok(evm_testnet_data);
1195 }
1196 }
1197 Err(e) => {
1198 if attempt == MAX_ATTEMPTS {
1199 return Err(e);
1200 }
1201 println!(
1202 "Attempt {} failed to read EVM testnet data. Retrying in {} seconds...",
1203 attempt,
1204 RETRY_DELAY.as_secs()
1205 );
1206 }
1207 }
1208 std::thread::sleep(RETRY_DELAY);
1209 }
1210
1211 Err(Error::EvmTestnetDataNotFound)
1212}
1213
1214pub fn get_multiaddr(
1215 ansible_runner: &AnsibleRunner,
1216 ssh_client: &SshClient,
1217) -> Result<(String, IpAddr)> {
1218 let node_inventory = ansible_runner.get_inventory(AnsibleInventoryType::Nodes, true)?;
1219 let node_ip = node_inventory
1222 .iter()
1223 .find(|vm| vm.name.ends_with("-node-1"))
1224 .ok_or_else(|| Error::NodeAddressNotFound)?
1225 .public_ip_addr;
1226
1227 debug!("Getting multiaddr from node {node_ip}");
1228
1229 let multiaddr =
1230 ssh_client
1231 .run_command(
1232 &node_ip,
1233 "root",
1234 "jq -r '.nodes[] | .listen_addr[] | select(contains(\"127.0.0.1\") | not)' /var/antctl/node_registry.json | head -n 1",
1236 false,
1237 )?.first()
1238 .cloned()
1239 .ok_or_else(|| Error::NodeAddressNotFound)?;
1240
1241 Ok((multiaddr, node_ip))
1244}
1245
1246pub async fn get_and_extract_archive_from_s3(
1247 s3_repository: &S3Repository,
1248 bucket_name: &str,
1249 archive_bucket_path: &str,
1250 dest_path: &Path,
1251) -> Result<()> {
1252 let archive_file_name = archive_bucket_path.split('/').next_back().unwrap();
1255 let archive_dest_path = dest_path.join(archive_file_name);
1256 s3_repository
1257 .download_object(bucket_name, archive_bucket_path, &archive_dest_path)
1258 .await?;
1259 extract_archive(&archive_dest_path, dest_path)?;
1260 Ok(())
1261}
1262
1263pub fn extract_archive(archive_path: &Path, dest_path: &Path) -> Result<()> {
1264 let archive_file = File::open(archive_path)?;
1265 let decoder = GzDecoder::new(archive_file);
1266 let mut archive = Archive::new(decoder);
1267 let entries = archive.entries()?;
1268 for entry_result in entries {
1269 let mut entry = entry_result?;
1270 let extract_path = dest_path.join(entry.path()?);
1271 if entry.header().entry_type() == tar::EntryType::Directory {
1272 std::fs::create_dir_all(extract_path)?;
1273 continue;
1274 }
1275 let mut file = BufWriter::new(File::create(extract_path)?);
1276 std::io::copy(&mut entry, &mut file)?;
1277 }
1278 std::fs::remove_file(archive_path)?;
1279 Ok(())
1280}
1281
1282pub fn run_external_command(
1283 binary_path: PathBuf,
1284 working_directory_path: PathBuf,
1285 args: Vec<String>,
1286 suppress_stdout: bool,
1287 suppress_stderr: bool,
1288) -> Result<Vec<String>> {
1289 let mut command = Command::new(binary_path.clone());
1290 for arg in &args {
1291 command.arg(arg);
1292 }
1293 command.stdout(Stdio::piped());
1294 command.stderr(Stdio::piped());
1295 command.current_dir(working_directory_path.clone());
1296 debug!("Running {binary_path:#?} with args {args:#?}");
1297 debug!("Working directory set to {working_directory_path:#?}");
1298
1299 let mut child = command.spawn()?;
1300 let mut output_lines = Vec::new();
1301
1302 if let Some(ref mut stdout) = child.stdout {
1303 let reader = BufReader::new(stdout);
1304 for line in reader.lines() {
1305 let line = line?;
1306 if !suppress_stdout {
1307 println!("{line}");
1308 }
1309 output_lines.push(line);
1310 }
1311 }
1312
1313 if let Some(ref mut stderr) = child.stderr {
1314 let reader = BufReader::new(stderr);
1315 for line in reader.lines() {
1316 let line = line?;
1317 if !suppress_stderr {
1318 eprintln!("{line}");
1319 }
1320 output_lines.push(line);
1321 }
1322 }
1323
1324 let output = child.wait()?;
1325 if !output.success() {
1326 let binary_path = binary_path.to_str().unwrap();
1328 return Err(Error::ExternalCommandRunFailed {
1329 binary: binary_path.to_string(),
1330 exit_status: output,
1331 });
1332 }
1333
1334 Ok(output_lines)
1335}
1336
1337pub fn is_binary_on_path(binary_name: &str) -> bool {
1338 if let Ok(path) = std::env::var("PATH") {
1339 for dir in path.split(':') {
1340 let mut full_path = PathBuf::from(dir);
1341 full_path.push(binary_name);
1342 if full_path.exists() {
1343 return true;
1344 }
1345 }
1346 }
1347 false
1348}
1349
1350pub fn get_wallet_directory() -> Result<PathBuf> {
1351 Ok(dirs_next::data_dir()
1352 .ok_or_else(|| Error::CouldNotRetrieveDataDirectory)?
1353 .join("safe")
1354 .join("client")
1355 .join("wallet"))
1356}
1357
1358pub async fn notify_slack(inventory: DeploymentInventory) -> Result<()> {
1359 let webhook_url =
1360 std::env::var("SLACK_WEBHOOK_URL").map_err(|_| Error::SlackWebhookUrlNotSupplied)?;
1361
1362 let mut message = String::new();
1363 message.push_str("*Testnet Details*\n");
1364 message.push_str(&format!("Name: {}\n", inventory.name));
1365 message.push_str(&format!("Node count: {}\n", inventory.peers().len()));
1366 message.push_str(&format!("Faucet address: {:?}\n", inventory.faucet_address));
1367 match inventory.binary_option {
1368 BinaryOption::BuildFromSource {
1369 ref repo_owner,
1370 ref branch,
1371 ..
1372 } => {
1373 message.push_str("*Branch Details*\n");
1374 message.push_str(&format!("Repo owner: {repo_owner}\n"));
1375 message.push_str(&format!("Branch: {branch}\n"));
1376 }
1377 BinaryOption::Versioned {
1378 ant_version: ref safe_version,
1379 antnode_version: ref safenode_version,
1380 antctl_version: ref safenode_manager_version,
1381 ..
1382 } => {
1383 message.push_str("*Version Details*\n");
1384 message.push_str(&format!(
1385 "ant version: {}\n",
1386 safe_version
1387 .as_ref()
1388 .map_or("None".to_string(), |v| v.to_string())
1389 ));
1390 message.push_str(&format!(
1391 "safenode version: {}\n",
1392 safenode_version
1393 .as_ref()
1394 .map_or("None".to_string(), |v| v.to_string())
1395 ));
1396 message.push_str(&format!(
1397 "antctl version: {}\n",
1398 safenode_manager_version
1399 .as_ref()
1400 .map_or("None".to_string(), |v| v.to_string())
1401 ));
1402 }
1403 }
1404
1405 message.push_str("*Sample Peers*\n");
1406 message.push_str("```\n");
1407 for peer in inventory.peers().iter().take(20) {
1408 message.push_str(&format!("{peer}\n"));
1409 }
1410 message.push_str("```\n");
1411 message.push_str("*Available Files*\n");
1412 message.push_str("```\n");
1413 for (addr, file_name) in inventory.uploaded_files.iter() {
1414 message.push_str(&format!("{addr}: {file_name}\n"))
1415 }
1416 message.push_str("```\n");
1417
1418 let payload = json!({
1419 "text": message,
1420 });
1421 reqwest::Client::new()
1422 .post(webhook_url)
1423 .json(&payload)
1424 .send()
1425 .await?;
1426 println!("{message}");
1427 println!("Posted notification to Slack");
1428 Ok(())
1429}
1430
1431fn print_duration(duration: Duration) {
1432 let total_seconds = duration.as_secs();
1433 let minutes = total_seconds / 60;
1434 let seconds = total_seconds % 60;
1435 debug!("Time taken: {minutes} minutes and {seconds} seconds");
1436}
1437
1438pub fn get_progress_bar(length: u64) -> Result<ProgressBar> {
1439 let progress_bar = ProgressBar::new(length);
1440 progress_bar.set_style(
1441 ProgressStyle::default_bar()
1442 .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len}")?
1443 .progress_chars("#>-"),
1444 );
1445 progress_bar.enable_steady_tick(Duration::from_millis(100));
1446 Ok(progress_bar)
1447}
1448
1449pub async fn get_environment_details(
1450 environment_name: &str,
1451 s3_repository: &S3Repository,
1452) -> Result<EnvironmentDetails> {
1453 let temp_file = tempfile::NamedTempFile::new()?;
1454
1455 let max_retries = 3;
1456 let mut retries = 0;
1457 let env_details = loop {
1458 debug!("Downloading the environment details file for {environment_name} from S3");
1459 match s3_repository
1460 .download_object("sn-environment-type", environment_name, temp_file.path())
1461 .await
1462 {
1463 Ok(_) => {
1464 debug!("Downloaded the environment details file for {environment_name} from S3");
1465 let content = match std::fs::read_to_string(temp_file.path()) {
1466 Ok(content) => content,
1467 Err(err) => {
1468 log::error!("Could not read the environment details file: {err:?}");
1469 if retries < max_retries {
1470 debug!("Retrying to read the environment details file");
1471 retries += 1;
1472 continue;
1473 } else {
1474 return Err(Error::EnvironmentDetailsNotFound(
1475 environment_name.to_string(),
1476 ));
1477 }
1478 }
1479 };
1480 trace!("Content of the environment details file: {content}");
1481
1482 match serde_json::from_str(&content) {
1483 Ok(environment_details) => break environment_details,
1484 Err(err) => {
1485 log::error!("Could not parse the environment details file: {err:?}");
1486 if retries < max_retries {
1487 debug!("Retrying to parse the environment details file");
1488 retries += 1;
1489 continue;
1490 } else {
1491 return Err(Error::EnvironmentDetailsNotFound(
1492 environment_name.to_string(),
1493 ));
1494 }
1495 }
1496 }
1497 }
1498 Err(err) => {
1499 log::error!(
1500 "Could not download the environment details file for {environment_name} from S3: {err:?}"
1501 );
1502 if retries < max_retries {
1503 retries += 1;
1504 continue;
1505 } else {
1506 return Err(Error::EnvironmentDetailsNotFound(
1507 environment_name.to_string(),
1508 ));
1509 }
1510 }
1511 }
1512 };
1513
1514 debug!("Fetched environment details: {env_details:?}");
1515
1516 Ok(env_details)
1517}
1518
1519pub async fn write_environment_details(
1520 s3_repository: &S3Repository,
1521 environment_name: &str,
1522 environment_details: &EnvironmentDetails,
1523) -> Result<()> {
1524 let temp_dir = tempfile::tempdir()?;
1525 let path = temp_dir.path().to_path_buf().join(environment_name);
1526 let mut file = File::create(&path)?;
1527 let json = serde_json::to_string(environment_details)?;
1528 file.write_all(json.as_bytes())?;
1529 s3_repository
1530 .upload_file("sn-environment-type", &path, true)
1531 .await?;
1532 Ok(())
1533}
1534
1535pub fn calculate_size_per_attached_volume(node_count: u16) -> u16 {
1536 if node_count == 0 {
1537 return 0;
1538 }
1539 let total_volume_required = node_count * STORAGE_REQUIRED_PER_NODE;
1540
1541 (total_volume_required as f64 / 7.0).ceil() as u16
1543}
1544
1545pub fn get_bootstrap_cache_url(ip_addr: &IpAddr) -> String {
1546 format!("http://{ip_addr}/bootstrap_cache.json")
1547}