soroban_cli/commands/snapshot/
create.rs

1use async_compression::tokio::bufread::GzipDecoder;
2use bytesize::ByteSize;
3use clap::{arg, Parser, ValueEnum};
4use futures::StreamExt;
5use humantime::format_duration;
6use itertools::{Either, Itertools};
7use sha2::{Digest, Sha256};
8use soroban_ledger_snapshot::LedgerSnapshot;
9use std::{
10    collections::HashSet,
11    fs,
12    io::{self},
13    path::PathBuf,
14    str::FromStr,
15    time::{Duration, Instant},
16};
17use stellar_xdr::curr::{
18    self as xdr, AccountId, Asset, BucketEntry, ConfigSettingEntry, ContractExecutable, Frame,
19    Hash, LedgerEntryData, LedgerHeaderHistoryEntry, LedgerKey, Limited, Limits, ReadXdr,
20    ScAddress, ScContractInstance, ScVal,
21};
22use tokio::fs::OpenOptions;
23use tokio::io::BufReader;
24use tokio_util::io::StreamReader;
25use url::Url;
26
27use crate::{
28    commands::{config::data, global, HEADING_ARCHIVE},
29    config::{self, locator, network::passphrase},
30    print,
31    tx::builder,
32    utils::get_name_from_stellar_asset_contract_storage,
33};
34use crate::{config::address::UnresolvedMuxedAccount, utils::http};
35
36#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, ValueEnum, Default)]
37pub enum Output {
38    #[default]
39    Json,
40}
41
42fn default_out_path() -> PathBuf {
43    PathBuf::new().join("snapshot.json")
44}
45
46/// Create a ledger snapshot using a history archive.
47///
48/// Filters (address, wasm-hash) specify what ledger entries to include.
49///
50/// Account addresses include the account, and trustlines.
51///
52/// Contract addresses include the related wasm, contract data.
53///
54/// If a contract is a Stellar asset contract, it includes the asset issuer's
55/// account and trust lines, but does not include all the trust lines of other
56/// accounts holding the asset. To include them specify the addresses of
57/// relevant accounts.
58///
59/// Any invalid contract id passed as `--address` will be ignored.
60///
61#[derive(Parser, Debug, Clone)]
62#[group(skip)]
63pub struct Cmd {
64    /// The ledger sequence number to snapshot. Defaults to latest history archived ledger.
65    #[arg(long)]
66    ledger: Option<u32>,
67
68    /// Account or contract address/alias to include in the snapshot.
69    #[arg(long = "address", help_heading = "Filter Options")]
70    address: Vec<String>,
71
72    /// WASM hashes to include in the snapshot.
73    #[arg(long = "wasm-hash", help_heading = "Filter Options")]
74    wasm_hashes: Vec<Hash>,
75
76    /// Format of the out file.
77    #[arg(long, value_enum, default_value_t)]
78    output: Output,
79
80    /// Out path that the snapshot is written to.
81    #[arg(long, default_value=default_out_path().into_os_string())]
82    out: PathBuf,
83
84    /// Archive URL
85    #[arg(long, help_heading = HEADING_ARCHIVE, env = "STELLAR_ARCHIVE_URL")]
86    archive_url: Option<Url>,
87
88    #[command(flatten)]
89    locator: locator::Args,
90
91    #[command(flatten)]
92    network: config::network::Args,
93}
94
95#[derive(thiserror::Error, Debug)]
96pub enum Error {
97    #[error("wasm hash invalid: {0}")]
98    WasmHashInvalid(String),
99
100    #[error("downloading history: {0}")]
101    DownloadingHistory(reqwest::Error),
102
103    #[error("downloading history: got status code {0}")]
104    DownloadingHistoryGotStatusCode(reqwest::StatusCode),
105
106    #[error("json decoding history: {0}")]
107    JsonDecodingHistory(serde_json::Error),
108
109    #[error("opening cached bucket to read: {0}")]
110    ReadOpeningCachedBucket(io::Error),
111
112    #[error("parsing bucket url: {0}")]
113    ParsingBucketUrl(url::ParseError),
114
115    #[error("getting bucket: {0}")]
116    GettingBucket(reqwest::Error),
117
118    #[error("getting bucket: got status code {0}")]
119    GettingBucketGotStatusCode(reqwest::StatusCode),
120
121    #[error("opening cached bucket to write: {0}")]
122    WriteOpeningCachedBucket(io::Error),
123
124    #[error("streaming bucket: {0}")]
125    StreamingBucket(io::Error),
126
127    #[error("read XDR frame bucket entry: {0}")]
128    ReadXdrFrameBucketEntry(xdr::Error),
129
130    #[error("renaming temporary downloaded file to final destination: {0}")]
131    RenameDownloadFile(io::Error),
132
133    #[error("getting bucket directory: {0}")]
134    GetBucketDir(data::Error),
135
136    #[error("reading history http stream: {0}")]
137    ReadHistoryHttpStream(reqwest::Error),
138
139    #[error("writing ledger snapshot: {0}")]
140    WriteLedgerSnapshot(soroban_ledger_snapshot::Error),
141
142    #[error(transparent)]
143    Join(#[from] tokio::task::JoinError),
144
145    #[error(transparent)]
146    Network(#[from] config::network::Error),
147
148    #[error(transparent)]
149    Locator(#[from] locator::Error),
150
151    #[error(transparent)]
152    Config(#[from] config::Error),
153
154    #[error("archive url not configured")]
155    ArchiveUrlNotConfigured,
156
157    #[error("parsing asset name: {0}")]
158    ParseAssetName(String),
159
160    #[error(transparent)]
161    Asset(#[from] builder::asset::Error),
162
163    #[error("ledger not found in archive")]
164    LedgerNotFound,
165
166    #[error("xdr parsing error: {0}")]
167    Xdr(#[from] xdr::Error),
168
169    #[error("corrupted bucket file: expected hash {expected}, got {actual}")]
170    CorruptedBucket { expected: String, actual: String },
171}
172
173/// Checkpoint frequency is usually 64 ledgers, but in local test nets it'll
174/// often by 8. There's no way to simply detect what frequency to expect ledgers
175/// at, so it is hardcoded at 64, and this value is used only to help the user
176/// select good ledger numbers when they select one that doesn't exist.
177const CHECKPOINT_FREQUENCY: u32 = 64;
178
179impl Cmd {
180    #[allow(clippy::too_many_lines)]
181    pub async fn run(&self, global_args: &global::Args) -> Result<(), Error> {
182        let print = print::Print::new(global_args.quiet);
183        let start = Instant::now();
184
185        let archive_url = self.archive_url()?;
186        let history = get_history(&print, &archive_url, self.ledger).await?;
187
188        let ledger = history.current_ledger;
189        let network_passphrase = &history.network_passphrase;
190        let network_id = Sha256::digest(network_passphrase);
191
192        print.infoln(format!("Ledger: {ledger}"));
193        print.infoln(format!("Network Passphrase: {network_passphrase}"));
194        print.infoln(format!("Network id: {}", hex::encode(network_id)));
195
196        // Get ledger close time and base reserve from archive
197        let (ledger_close_time, base_reserve) =
198            match get_ledger_metadata_from_archive(&print, &archive_url, ledger).await {
199                Ok((close_time, reserve)) => {
200                    print.infoln(format!("Ledger Close Time: {close_time}"));
201                    print.infoln(format!("Base Reserve: {reserve}"));
202                    (close_time, reserve)
203                }
204                Err(e) => {
205                    print.warnln(format!("Failed to get ledger metadata from archive: {e}"));
206                    print.infoln("Using default values: close_time=0, base_reserve=1");
207                    (0u64, 1u32) // Default values
208                }
209            };
210
211        // Prepare a flat list of buckets to read. They'll be ordered by their
212        // level so that they can iterated higher level to lower level.
213        let buckets = history
214            .current_buckets
215            .iter()
216            .flat_map(|h| [h.curr.clone(), h.snap.clone()])
217            .filter(|b| b != "0000000000000000000000000000000000000000000000000000000000000000")
218            .collect::<Vec<_>>();
219
220        // Pre-cache the buckets.
221        for (i, bucket) in buckets.iter().enumerate() {
222            cache_bucket(&print, &archive_url, i, bucket).await?;
223        }
224
225        // The snapshot is what will be written to file at the end. Fields will
226        // be updated while parsing the history archive.
227        let mut snapshot = LedgerSnapshot {
228            protocol_version: 0,
229            sequence_number: ledger,
230            timestamp: ledger_close_time,
231            network_id: network_id.into(),
232            base_reserve,
233            min_persistent_entry_ttl: 0,
234            min_temp_entry_ttl: 0,
235            max_entry_ttl: 0,
236            ledger_entries: Vec::new(),
237        };
238
239        // Track ledger keys seen, so that we can ignore old versions of
240        // entries. Entries can appear in both higher level and lower level
241        // buckets, and to get the latest version of the entry the version in
242        // the higher level bucket should be used.
243        let mut seen = HashSet::new();
244
245        #[allow(clippy::items_after_statements)]
246        #[derive(Default)]
247        struct SearchInputs {
248            account_ids: HashSet<AccountId>,
249            contract_ids: HashSet<ScAddress>,
250            wasm_hashes: HashSet<Hash>,
251        }
252        impl SearchInputs {
253            pub fn is_empty(&self) -> bool {
254                self.account_ids.is_empty()
255                    && self.contract_ids.is_empty()
256                    && self.wasm_hashes.is_empty()
257            }
258        }
259
260        // Search the buckets using the user inputs as the starting inputs.
261        let (account_ids, contract_ids): (HashSet<AccountId>, HashSet<ScAddress>) = self
262            .address
263            .iter()
264            .cloned()
265            .filter_map(|a| self.resolve_address_sync(&a, network_passphrase))
266            .partition_map(|a| a);
267
268        let mut current = SearchInputs {
269            account_ids,
270            contract_ids,
271            wasm_hashes: self.wasm_hashes.iter().cloned().collect(),
272        };
273        let mut next = SearchInputs::default();
274
275        loop {
276            if current.is_empty() {
277                break;
278            }
279
280            print.infoln(format!(
281                "Searching for {} accounts, {} contracts, {} wasms",
282                current.account_ids.len(),
283                current.contract_ids.len(),
284                current.wasm_hashes.len(),
285            ));
286
287            for (i, bucket) in buckets.iter().enumerate() {
288                // Defined where the bucket will be read from, either from cache on
289                // disk, or streamed from the archive.
290                let cache_path = cache_bucket(&print, &archive_url, i, bucket).await?;
291                let file = std::fs::OpenOptions::new()
292                    .read(true)
293                    .open(&cache_path)
294                    .map_err(Error::ReadOpeningCachedBucket)?;
295
296                let message = format!("Searching bucket {i} {bucket}");
297                print.searchln(format!("{message}…"));
298
299                if let Ok(metadata) = file.metadata() {
300                    print.clear_previous_line();
301                    print.searchln(format!("{message} ({})", ByteSize(metadata.len())));
302                }
303
304                // Stream the bucket entries from the bucket, identifying
305                // entries that match the filters, and including only the
306                // entries that match in the snapshot.
307                let limited = &mut Limited::new(file, Limits::none());
308                let entries = Frame::<BucketEntry>::read_xdr_iter(limited);
309                let mut count_saved = 0;
310                for entry in entries {
311                    let Frame(entry) = entry.map_err(Error::ReadXdrFrameBucketEntry)?;
312                    let (key, val) = match entry {
313                        BucketEntry::Liveentry(l) | BucketEntry::Initentry(l) => {
314                            let k = l.to_key();
315                            (k, Some(l))
316                        }
317                        BucketEntry::Deadentry(k) => (k, None),
318                        BucketEntry::Metaentry(m) => {
319                            if m.ledger_version > snapshot.protocol_version {
320                                snapshot.protocol_version = m.ledger_version;
321                                print.infoln(format!(
322                                    "Protocol version: {}",
323                                    snapshot.protocol_version
324                                ));
325                            }
326                            continue;
327                        }
328                    };
329
330                    if seen.contains(&key) {
331                        continue;
332                    }
333
334                    let keep = match &key {
335                        LedgerKey::Account(k) => current.account_ids.contains(&k.account_id),
336                        LedgerKey::Trustline(k) => current.account_ids.contains(&k.account_id),
337                        LedgerKey::ContractData(k) => current.contract_ids.contains(&k.contract),
338                        LedgerKey::ContractCode(e) => current.wasm_hashes.contains(&e.hash),
339                        LedgerKey::ConfigSetting(_) => true,
340                        _ => false,
341                    };
342
343                    if !keep {
344                        continue;
345                    }
346
347                    seen.insert(key.clone());
348
349                    let Some(val) = val else {
350                        continue;
351                    };
352
353                    match &val.data {
354                        LedgerEntryData::ConfigSetting(ConfigSettingEntry::StateArchival(
355                            state_archival,
356                        )) => {
357                            snapshot.min_persistent_entry_ttl = state_archival.min_persistent_ttl;
358                            snapshot.min_temp_entry_ttl = state_archival.min_temporary_ttl;
359                            snapshot.max_entry_ttl = state_archival.max_entry_ttl;
360                            false
361                        }
362
363                        LedgerEntryData::ContractData(e) => {
364                            // If a contract instance references contract
365                            // executable stored in another ledger entry, add
366                            // that ledger entry to the filter so that Wasm for
367                            // any filtered contract is collected too in the
368                            // second pass.
369                            if e.key == ScVal::LedgerKeyContractInstance {
370                                match &e.val {
371                                    ScVal::ContractInstance(ScContractInstance {
372                                        executable: ContractExecutable::Wasm(hash),
373                                        ..
374                                    }) => {
375                                        if !current.wasm_hashes.contains(hash) {
376                                            next.wasm_hashes.insert(hash.clone());
377                                            print.infoln(format!(
378                                                "Adding wasm {} to search",
379                                                hex::encode(hash)
380                                            ));
381                                        }
382                                    }
383                                    ScVal::ContractInstance(ScContractInstance {
384                                        executable: ContractExecutable::StellarAsset,
385                                        storage: Some(storage),
386                                    }) => {
387                                        if let Some(name) =
388                                            get_name_from_stellar_asset_contract_storage(storage)
389                                        {
390                                            let asset: builder::Asset = name.parse()?;
391                                            if let Some(issuer) = match asset
392                                                .resolve(&global_args.locator)?
393                                            {
394                                                Asset::Native => None,
395                                                Asset::CreditAlphanum4(a4) => Some(a4.issuer),
396                                                Asset::CreditAlphanum12(a12) => Some(a12.issuer),
397                                            } {
398                                                print.infoln(format!(
399                                                    "Adding asset issuer {issuer} to search"
400                                                ));
401                                                next.account_ids.insert(issuer);
402                                            }
403                                        }
404                                    }
405                                    _ => {}
406                                }
407                            }
408                            keep
409                        }
410                        _ => false,
411                    };
412                    snapshot
413                        .ledger_entries
414                        .push((Box::new(key), (Box::new(val), Some(u32::MAX))));
415                    count_saved += 1;
416                }
417                if count_saved > 0 {
418                    print.infoln(format!("Found {count_saved} entries"));
419                }
420            }
421            current = next;
422            next = SearchInputs::default();
423        }
424
425        // Write the snapshot to file.
426        snapshot
427            .write_file(&self.out)
428            .map_err(Error::WriteLedgerSnapshot)?;
429        print.saveln(format!(
430            "Saved {} entries to {:?}",
431            snapshot.ledger_entries.len(),
432            self.out
433        ));
434
435        let duration = Duration::from_secs(start.elapsed().as_secs());
436        print.checkln(format!("Completed in {}", format_duration(duration)));
437
438        Ok(())
439    }
440
441    fn archive_url(&self) -> Result<Url, Error> {
442        // Return the configured archive URL, or if one is not configured, guess
443        // at an appropriate archive URL given the network passphrase.
444        self.archive_url
445            .clone()
446            .or_else(|| {
447                self.network.get(&self.locator).ok().and_then(|network| {
448                    match network.network_passphrase.as_str() {
449                        passphrase::MAINNET => {
450                            Some("https://history.stellar.org/prd/core-live/core_live_001")
451                        }
452                        passphrase::TESTNET => {
453                            Some("https://history.stellar.org/prd/core-testnet/core_testnet_001")
454                        }
455                        passphrase::FUTURENET => Some("https://history-futurenet.stellar.org"),
456                        passphrase::LOCAL => Some("http://localhost:8000/archive"),
457                        _ => None,
458                    }
459                    .map(|s| Url::from_str(s).expect("archive url valid"))
460                })
461            })
462            .ok_or(Error::ArchiveUrlNotConfigured)
463    }
464
465    fn resolve_address_sync(
466        &self,
467        address: &str,
468        network_passphrase: &str,
469    ) -> Option<Either<AccountId, ScAddress>> {
470        if let Some(contract) = self.resolve_contract(address, network_passphrase) {
471            Some(Either::Right(contract))
472        } else {
473            self.resolve_account_sync(address).map(Either::Left)
474        }
475    }
476
477    // Resolve an account address to an account id. The address can be a
478    // G-address or a key name (as in `stellar keys address NAME`).
479    fn resolve_account_sync(&self, address: &str) -> Option<AccountId> {
480        let address: UnresolvedMuxedAccount = address.parse().ok()?;
481        let muxed_account = address
482            .resolve_muxed_account_sync(&self.locator, None)
483            .ok()?;
484        Some(muxed_account.account_id())
485    }
486
487    // Resolve a contract address to a contract id. The contract can be a
488    // C-address or a contract alias.
489    fn resolve_contract(&self, address: &str, network_passphrase: &str) -> Option<ScAddress> {
490        address.parse().ok().or_else(|| {
491            Some(ScAddress::Contract(stellar_xdr::curr::ContractId(
492                self.locator
493                    .resolve_contract_id(address, network_passphrase)
494                    .ok()?
495                    .0
496                    .into(),
497            )))
498        })
499    }
500}
501
502fn ledger_to_path_components(ledger: u32) -> (String, String, String, String) {
503    let ledger_hex = format!("{ledger:08x}");
504    let ledger_hex_0 = ledger_hex[0..=1].to_string();
505    let ledger_hex_1 = ledger_hex[2..=3].to_string();
506    let ledger_hex_2 = ledger_hex[4..=5].to_string();
507    (ledger_hex, ledger_hex_0, ledger_hex_1, ledger_hex_2)
508}
509
510async fn get_history(
511    print: &print::Print,
512    archive_url: &Url,
513    ledger: Option<u32>,
514) -> Result<History, Error> {
515    let archive_url = archive_url.to_string();
516    let archive_url = archive_url.strip_suffix('/').unwrap_or(&archive_url);
517    let history_url = if let Some(ledger) = ledger {
518        let (ledger_hex, ledger_hex_0, ledger_hex_1, ledger_hex_2) =
519            ledger_to_path_components(ledger);
520        format!("{archive_url}/history/{ledger_hex_0}/{ledger_hex_1}/{ledger_hex_2}/history-{ledger_hex}.json")
521    } else {
522        format!("{archive_url}/.well-known/stellar-history.json")
523    };
524    let history_url = Url::from_str(&history_url).unwrap();
525
526    print.globeln(format!("Downloading history {history_url}"));
527
528    let response = http::client()
529        .get(history_url.as_str())
530        .send()
531        .await
532        .map_err(Error::DownloadingHistory)?;
533
534    if !response.status().is_success() {
535        // Check ledger is a checkpoint ledger and available in archives.
536        if let Some(ledger) = ledger {
537            let ledger_offset = (ledger + 1) % CHECKPOINT_FREQUENCY;
538
539            if ledger_offset != 0 {
540                print.errorln(format!(
541                    "Ledger {ledger} may not be a checkpoint ledger, try {} or {}",
542                    ledger - ledger_offset,
543                    ledger + (CHECKPOINT_FREQUENCY - ledger_offset),
544                ));
545            }
546        }
547        return Err(Error::DownloadingHistoryGotStatusCode(response.status()));
548    }
549
550    let body = response
551        .bytes()
552        .await
553        .map_err(Error::ReadHistoryHttpStream)?;
554
555    print.clear_previous_line();
556    print.globeln(format!("Downloaded history {}", &history_url));
557
558    serde_json::from_slice::<History>(&body).map_err(Error::JsonDecodingHistory)
559}
560
561async fn get_ledger_metadata_from_archive(
562    print: &print::Print,
563    archive_url: &Url,
564    ledger: u32,
565) -> Result<(u64, u32), Error> {
566    let archive_url = archive_url.to_string();
567    let archive_url = archive_url.strip_suffix('/').unwrap_or(&archive_url);
568
569    // Calculate the path to the ledger header file
570    let (ledger_hex, ledger_hex_0, ledger_hex_1, ledger_hex_2) = ledger_to_path_components(ledger);
571    let ledger_url = format!(
572        "{archive_url}/ledger/{ledger_hex_0}/{ledger_hex_1}/{ledger_hex_2}/ledger-{ledger_hex}.xdr.gz"
573    );
574
575    print.globeln(format!("Downloading ledger headers {ledger_url}"));
576
577    let ledger_url = Url::from_str(&ledger_url).map_err(Error::ParsingBucketUrl)?;
578    let response = http::client()
579        .get(ledger_url.as_str())
580        .send()
581        .await
582        .map_err(Error::DownloadingHistory)?;
583
584    if !response.status().is_success() {
585        return Err(Error::DownloadingHistoryGotStatusCode(response.status()));
586    }
587
588    // Cache the ledger file to disk like bucket files
589    let ledger_dir = data::bucket_dir().map_err(Error::GetBucketDir)?;
590    let cache_path = ledger_dir.join(format!("ledger-{ledger_hex}.xdr"));
591    let dl_path = cache_path.with_extension("dl");
592
593    let stream = response
594        .bytes_stream()
595        .map(|result| result.map_err(std::io::Error::other));
596    let stream_reader = StreamReader::new(stream);
597    let buf_reader = BufReader::new(stream_reader);
598    let mut decoder = GzipDecoder::new(buf_reader);
599
600    let mut file = OpenOptions::new()
601        .create(true)
602        .truncate(true)
603        .write(true)
604        .open(&dl_path)
605        .await
606        .map_err(Error::WriteOpeningCachedBucket)?;
607
608    tokio::io::copy(&mut decoder, &mut file)
609        .await
610        .map_err(Error::StreamingBucket)?;
611
612    fs::rename(&dl_path, &cache_path).map_err(Error::RenameDownloadFile)?;
613
614    print.clear_previous_line();
615    print.globeln(format!("Downloaded ledger headers for ledger {ledger}"));
616
617    // Now read the cached file
618    let file = std::fs::File::open(&cache_path).map_err(Error::ReadOpeningCachedBucket)?;
619    let limited = &mut Limited::new(file, Limits::none());
620
621    // Find the specific ledger header entry we need
622    let entries = Frame::<LedgerHeaderHistoryEntry>::read_xdr_iter(limited);
623    for entry in entries {
624        let Frame(header_entry) = entry.map_err(Error::Xdr)?;
625
626        if header_entry.header.ledger_seq == ledger {
627            let close_time = header_entry.header.scp_value.close_time.0;
628            let base_reserve = header_entry.header.base_reserve;
629
630            return Ok((close_time, base_reserve));
631        }
632    }
633
634    Err(Error::LedgerNotFound)
635}
636
637fn validate_bucket_hash(cache_path: &PathBuf, expected_hash: &str) -> Result<(), Error> {
638    let file = std::fs::File::open(cache_path).map_err(Error::ReadOpeningCachedBucket)?;
639    let mut hasher = Sha256::new();
640    std::io::copy(&mut std::io::BufReader::new(file), &mut hasher)
641        .map_err(Error::ReadOpeningCachedBucket)?;
642    let actual_hash = hex::encode(hasher.finalize());
643
644    if actual_hash != expected_hash {
645        return Err(Error::CorruptedBucket {
646            expected: expected_hash.to_string(),
647            actual: actual_hash,
648        });
649    }
650
651    Ok(())
652}
653
654async fn cache_bucket(
655    print: &print::Print,
656    archive_url: &Url,
657    bucket_index: usize,
658    bucket: &str,
659) -> Result<PathBuf, Error> {
660    let bucket_dir = data::bucket_dir().map_err(Error::GetBucketDir)?;
661    let cache_path = bucket_dir.join(format!("bucket-{bucket}.xdr"));
662
663    // Validate cached bucket if it exists
664    if cache_path.exists() {
665        if validate_bucket_hash(&cache_path, bucket).is_err() {
666            print.warnln(format!(
667                "Cached bucket {bucket} is corrupted, re-downloading"
668            ));
669            std::fs::remove_file(&cache_path).ok();
670        } else {
671            return Ok(cache_path);
672        }
673    }
674
675    if !cache_path.exists() {
676        let bucket_0 = &bucket[0..=1];
677        let bucket_1 = &bucket[2..=3];
678        let bucket_2 = &bucket[4..=5];
679        let bucket_url =
680            format!("{archive_url}/bucket/{bucket_0}/{bucket_1}/{bucket_2}/bucket-{bucket}.xdr.gz");
681
682        print.globeln(format!("Downloading bucket {bucket_index} {bucket}…"));
683
684        let bucket_url = Url::from_str(&bucket_url).map_err(Error::ParsingBucketUrl)?;
685
686        let response = http::client()
687            .get(bucket_url.as_str())
688            .send()
689            .await
690            .map_err(Error::GettingBucket)?;
691
692        if !response.status().is_success() {
693            print.println("");
694            return Err(Error::GettingBucketGotStatusCode(response.status()));
695        }
696
697        if let Some(len) = response.content_length() {
698            print.clear_previous_line();
699            print.globeln(format!(
700                "Downloaded bucket {bucket_index} {bucket} ({})",
701                ByteSize(len)
702            ));
703        }
704
705        let stream = response
706            .bytes_stream()
707            .map(|result| result.map_err(std::io::Error::other));
708        let stream_reader = StreamReader::new(stream);
709        let buf_reader = BufReader::new(stream_reader);
710        let mut decoder = GzipDecoder::new(buf_reader);
711        let dl_path = cache_path.with_extension("dl");
712        let mut file = OpenOptions::new()
713            .create(true)
714            .truncate(true)
715            .write(true)
716            .open(&dl_path)
717            .await
718            .map_err(Error::WriteOpeningCachedBucket)?;
719        tokio::io::copy(&mut decoder, &mut file)
720            .await
721            .map_err(Error::StreamingBucket)?;
722        fs::rename(&dl_path, &cache_path).map_err(Error::RenameDownloadFile)?;
723    }
724    Ok(cache_path)
725}
726
727#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Deserialize)]
728#[serde(rename_all = "camelCase")]
729struct History {
730    current_ledger: u32,
731    current_buckets: Vec<HistoryBucket>,
732    network_passphrase: String,
733}
734
735#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Deserialize)]
736#[serde(rename_all = "camelCase")]
737struct HistoryBucket {
738    curr: String,
739    snap: String,
740}