solana_runtime/
serde_snapshot.rs

1#[cfg(target_os = "linux")]
2use std::ffi::{CStr, CString};
3use {
4    crate::{
5        bank::{Bank, BankFieldsToDeserialize, BankFieldsToSerialize, BankHashStats, BankRc},
6        epoch_stakes::{EpochStakes, VersionedEpochStakes},
7        runtime_config::RuntimeConfig,
8        serde_snapshot::storage::SerializableAccountStorageEntry,
9        snapshot_utils::{SnapshotError, StorageAndNextAccountsFileId},
10        stakes::{serde_stakes_to_delegation_format, Stakes, StakesEnum},
11    },
12    bincode::{self, config::Options, Error},
13    log::*,
14    serde::{de::DeserializeOwned, Deserialize, Serialize},
15    solana_accounts_db::{
16        accounts::Accounts,
17        accounts_db::{
18            AccountStorageEntry, AccountsDb, AccountsDbConfig, AccountsFileId,
19            AtomicAccountsFileId, DuplicatesLtHash, IndexGenerationInfo,
20        },
21        accounts_file::{AccountsFile, StorageAccess},
22        accounts_hash::{AccountsDeltaHash, AccountsHash},
23        accounts_update_notifier_interface::AccountsUpdateNotifier,
24        ancestors::AncestorsForSerialization,
25        blockhash_queue::BlockhashQueue,
26        epoch_accounts_hash::EpochAccountsHash,
27    },
28    solana_builtins::prototype::BuiltinPrototype,
29    solana_clock::{Epoch, Slot, UnixTimestamp},
30    solana_epoch_schedule::EpochSchedule,
31    solana_fee_calculator::{FeeCalculator, FeeRateGovernor},
32    solana_genesis_config::GenesisConfig,
33    solana_hard_forks::HardForks,
34    solana_hash::Hash,
35    solana_inflation::Inflation,
36    solana_measure::measure::Measure,
37    solana_pubkey::Pubkey,
38    solana_rent_collector::RentCollector,
39    solana_serde::default_on_eof,
40    solana_stake_interface::state::Delegation,
41    std::{
42        cell::RefCell,
43        collections::{HashMap, HashSet},
44        io::{self, BufReader, BufWriter, Read, Write},
45        path::{Path, PathBuf},
46        result::Result,
47        sync::{
48            atomic::{AtomicBool, AtomicUsize, Ordering},
49            Arc,
50        },
51        thread::Builder,
52    },
53    storage::SerializableStorage,
54    types::SerdeAccountsLtHash,
55};
56
57mod storage;
58mod tests;
59mod types;
60mod utils;
61
62pub(crate) use {
63    solana_accounts_db::accounts_hash::{
64        SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash,
65    },
66    storage::SerializedAccountsFileId,
67};
68
69const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
70
71#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
72#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
73pub struct AccountsDbFields<T>(
74    HashMap<Slot, Vec<T>>,
75    u64, // obsolete, formerly write_version
76    Slot,
77    BankHashInfo,
78    /// all slots that were roots within the last epoch
79    #[serde(deserialize_with = "default_on_eof")]
80    Vec<Slot>,
81    /// slots that were roots within the last epoch for which we care about the hash value
82    #[serde(deserialize_with = "default_on_eof")]
83    Vec<(Slot, Hash)>,
84);
85
86/// Incremental snapshots only calculate their accounts hash based on the
87/// account changes WITHIN the incremental slot range. So, we need to keep track
88/// of the full snapshot expected accounts hash results. We also need to keep
89/// track of the hash and capitalization specific to the incremental snapshot
90/// slot range. The capitalization we calculate for the incremental slot will
91/// NOT be consistent with the bank's capitalization. It is not feasible to
92/// calculate a capitalization delta that is correct given just incremental
93/// slots account data and the full snapshot's capitalization.
94#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
95#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)]
96pub struct BankIncrementalSnapshotPersistence {
97    /// slot of full snapshot
98    pub full_slot: Slot,
99    /// accounts hash from the full snapshot
100    pub full_hash: SerdeAccountsHash,
101    /// capitalization from the full snapshot
102    pub full_capitalization: u64,
103    /// hash of the accounts in the incremental snapshot slot range, including zero-lamport accounts
104    pub incremental_hash: SerdeIncrementalAccountsHash,
105    /// capitalization of the accounts in the incremental snapshot slot range
106    pub incremental_capitalization: u64,
107}
108
109#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
110#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
111struct BankHashInfo {
112    accounts_delta_hash: SerdeAccountsDeltaHash,
113    accounts_hash: SerdeAccountsHash,
114    stats: BankHashStats,
115}
116
117#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
118#[derive(Default, Clone, PartialEq, Eq, Debug, Deserialize, Serialize)]
119struct UnusedAccounts {
120    unused1: HashSet<Pubkey>,
121    unused2: HashSet<Pubkey>,
122    unused3: HashMap<Pubkey, u64>,
123}
124
125// Deserializable version of Bank which need not be serializable,
126// because it's handled by SerializableVersionedBank.
127// So, sync fields with it!
128#[derive(Clone, Deserialize)]
129struct DeserializableVersionedBank {
130    blockhash_queue: BlockhashQueue,
131    ancestors: AncestorsForSerialization,
132    hash: Hash,
133    parent_hash: Hash,
134    parent_slot: Slot,
135    hard_forks: HardForks,
136    transaction_count: u64,
137    tick_height: u64,
138    signature_count: u64,
139    capitalization: u64,
140    max_tick_height: u64,
141    hashes_per_tick: Option<u64>,
142    ticks_per_slot: u64,
143    ns_per_slot: u128,
144    genesis_creation_time: UnixTimestamp,
145    slots_per_year: f64,
146    accounts_data_len: u64,
147    slot: Slot,
148    epoch: Epoch,
149    block_height: u64,
150    collector_id: Pubkey,
151    collector_fees: u64,
152    _fee_calculator: FeeCalculator,
153    fee_rate_governor: FeeRateGovernor,
154    collected_rent: u64,
155    rent_collector: RentCollector,
156    epoch_schedule: EpochSchedule,
157    inflation: Inflation,
158    stakes: Stakes<Delegation>,
159    #[allow(dead_code)]
160    unused_accounts: UnusedAccounts,
161    epoch_stakes: HashMap<Epoch, EpochStakes>,
162    is_delta: bool,
163}
164
165impl From<DeserializableVersionedBank> for BankFieldsToDeserialize {
166    fn from(dvb: DeserializableVersionedBank) -> Self {
167        BankFieldsToDeserialize {
168            blockhash_queue: dvb.blockhash_queue,
169            ancestors: dvb.ancestors,
170            hash: dvb.hash,
171            parent_hash: dvb.parent_hash,
172            parent_slot: dvb.parent_slot,
173            hard_forks: dvb.hard_forks,
174            transaction_count: dvb.transaction_count,
175            tick_height: dvb.tick_height,
176            signature_count: dvb.signature_count,
177            capitalization: dvb.capitalization,
178            max_tick_height: dvb.max_tick_height,
179            hashes_per_tick: dvb.hashes_per_tick,
180            ticks_per_slot: dvb.ticks_per_slot,
181            ns_per_slot: dvb.ns_per_slot,
182            genesis_creation_time: dvb.genesis_creation_time,
183            slots_per_year: dvb.slots_per_year,
184            accounts_data_len: dvb.accounts_data_len,
185            slot: dvb.slot,
186            epoch: dvb.epoch,
187            block_height: dvb.block_height,
188            collector_id: dvb.collector_id,
189            collector_fees: dvb.collector_fees,
190            fee_rate_governor: dvb.fee_rate_governor,
191            collected_rent: dvb.collected_rent,
192            rent_collector: dvb.rent_collector,
193            epoch_schedule: dvb.epoch_schedule,
194            inflation: dvb.inflation,
195            stakes: dvb.stakes,
196            epoch_stakes: dvb.epoch_stakes,
197            is_delta: dvb.is_delta,
198            incremental_snapshot_persistence: None,
199            epoch_accounts_hash: None,
200            accounts_lt_hash: None, // populated from ExtraFieldsToDeserialize
201            bank_hash_stats: BankHashStats::default(), // populated from AccountsDbFields
202        }
203    }
204}
205
206// Serializable version of Bank, not Deserializable to avoid cloning by using refs.
207// Sync fields with DeserializableVersionedBank!
208#[derive(Serialize)]
209struct SerializableVersionedBank {
210    blockhash_queue: BlockhashQueue,
211    ancestors: AncestorsForSerialization,
212    hash: Hash,
213    parent_hash: Hash,
214    parent_slot: Slot,
215    hard_forks: HardForks,
216    transaction_count: u64,
217    tick_height: u64,
218    signature_count: u64,
219    capitalization: u64,
220    max_tick_height: u64,
221    hashes_per_tick: Option<u64>,
222    ticks_per_slot: u64,
223    ns_per_slot: u128,
224    genesis_creation_time: UnixTimestamp,
225    slots_per_year: f64,
226    accounts_data_len: u64,
227    slot: Slot,
228    epoch: Epoch,
229    block_height: u64,
230    collector_id: Pubkey,
231    collector_fees: u64,
232    fee_calculator: FeeCalculator,
233    fee_rate_governor: FeeRateGovernor,
234    collected_rent: u64,
235    rent_collector: RentCollector,
236    epoch_schedule: EpochSchedule,
237    inflation: Inflation,
238    #[serde(serialize_with = "serde_stakes_to_delegation_format::serialize")]
239    stakes: StakesEnum,
240    unused_accounts: UnusedAccounts,
241    epoch_stakes: HashMap<Epoch, EpochStakes>,
242    is_delta: bool,
243}
244
245impl From<BankFieldsToSerialize> for SerializableVersionedBank {
246    fn from(rhs: BankFieldsToSerialize) -> Self {
247        Self {
248            blockhash_queue: rhs.blockhash_queue,
249            ancestors: rhs.ancestors,
250            hash: rhs.hash,
251            parent_hash: rhs.parent_hash,
252            parent_slot: rhs.parent_slot,
253            hard_forks: rhs.hard_forks,
254            transaction_count: rhs.transaction_count,
255            tick_height: rhs.tick_height,
256            signature_count: rhs.signature_count,
257            capitalization: rhs.capitalization,
258            max_tick_height: rhs.max_tick_height,
259            hashes_per_tick: rhs.hashes_per_tick,
260            ticks_per_slot: rhs.ticks_per_slot,
261            ns_per_slot: rhs.ns_per_slot,
262            genesis_creation_time: rhs.genesis_creation_time,
263            slots_per_year: rhs.slots_per_year,
264            accounts_data_len: rhs.accounts_data_len,
265            slot: rhs.slot,
266            epoch: rhs.epoch,
267            block_height: rhs.block_height,
268            collector_id: rhs.collector_id,
269            collector_fees: rhs.collector_fees,
270            fee_calculator: FeeCalculator::default(),
271            fee_rate_governor: rhs.fee_rate_governor,
272            collected_rent: rhs.collected_rent,
273            rent_collector: rhs.rent_collector,
274            epoch_schedule: rhs.epoch_schedule,
275            inflation: rhs.inflation,
276            stakes: rhs.stakes,
277            unused_accounts: UnusedAccounts::default(),
278            epoch_stakes: rhs.epoch_stakes,
279            is_delta: rhs.is_delta,
280        }
281    }
282}
283
284#[cfg(feature = "frozen-abi")]
285impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableVersionedBank {}
286
287/// Helper type to wrap BufReader streams when deserializing and reconstructing from either just a
288/// full snapshot, or both a full and incremental snapshot
289pub struct SnapshotStreams<'a, R> {
290    pub full_snapshot_stream: &'a mut BufReader<R>,
291    pub incremental_snapshot_stream: Option<&'a mut BufReader<R>>,
292}
293
294/// Helper type to wrap BankFields when reconstructing Bank from either just a full
295/// snapshot, or both a full and incremental snapshot
296#[derive(Debug)]
297pub struct SnapshotBankFields {
298    full: BankFieldsToDeserialize,
299    incremental: Option<BankFieldsToDeserialize>,
300}
301
302impl SnapshotBankFields {
303    /// Collapse the SnapshotBankFields into a single (the latest) BankFieldsToDeserialize.
304    pub fn collapse_into(self) -> BankFieldsToDeserialize {
305        self.incremental.unwrap_or(self.full)
306    }
307}
308
309/// Helper type to wrap AccountsDbFields when reconstructing AccountsDb from either just a full
310/// snapshot, or both a full and incremental snapshot
311#[derive(Debug)]
312pub struct SnapshotAccountsDbFields<T> {
313    full_snapshot_accounts_db_fields: AccountsDbFields<T>,
314    incremental_snapshot_accounts_db_fields: Option<AccountsDbFields<T>>,
315}
316
317impl<T> SnapshotAccountsDbFields<T> {
318    /// Collapse the SnapshotAccountsDbFields into a single AccountsDbFields.  If there is no
319    /// incremental snapshot, this returns the AccountsDbFields from the full snapshot.
320    /// Otherwise, use the AccountsDbFields from the incremental snapshot, and a combination
321    /// of the storages from both the full and incremental snapshots.
322    fn collapse_into(self) -> Result<AccountsDbFields<T>, Error> {
323        match self.incremental_snapshot_accounts_db_fields {
324            None => Ok(self.full_snapshot_accounts_db_fields),
325            Some(AccountsDbFields(
326                mut incremental_snapshot_storages,
327                incremental_snapshot_version,
328                incremental_snapshot_slot,
329                incremental_snapshot_bank_hash_info,
330                incremental_snapshot_historical_roots,
331                incremental_snapshot_historical_roots_with_hash,
332            )) => {
333                let full_snapshot_storages = self.full_snapshot_accounts_db_fields.0;
334                let full_snapshot_slot = self.full_snapshot_accounts_db_fields.2;
335
336                // filter out incremental snapshot storages with slot <= full snapshot slot
337                incremental_snapshot_storages.retain(|slot, _| *slot > full_snapshot_slot);
338
339                // There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot
340                incremental_snapshot_storages
341                    .iter()
342                    .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then_some(()).ok_or_else(|| {
343                        io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!")
344                    })?;
345
346                let mut combined_storages = full_snapshot_storages;
347                combined_storages.extend(incremental_snapshot_storages);
348
349                Ok(AccountsDbFields(
350                    combined_storages,
351                    incremental_snapshot_version,
352                    incremental_snapshot_slot,
353                    incremental_snapshot_bank_hash_info,
354                    incremental_snapshot_historical_roots,
355                    incremental_snapshot_historical_roots_with_hash,
356                ))
357            }
358        }
359    }
360}
361
362fn deserialize_from<R, T>(reader: R) -> bincode::Result<T>
363where
364    R: Read,
365    T: DeserializeOwned,
366{
367    bincode::options()
368        .with_limit(MAX_STREAM_SIZE)
369        .with_fixint_encoding()
370        .allow_trailing_bytes()
371        .deserialize_from::<R, T>(reader)
372}
373
374fn deserialize_accounts_db_fields<R>(
375    stream: &mut BufReader<R>,
376) -> Result<AccountsDbFields<SerializableAccountStorageEntry>, Error>
377where
378    R: Read,
379{
380    deserialize_from::<_, _>(stream)
381}
382
383/// Extra fields that are deserialized from the end of snapshots.
384///
385/// Note that this struct's fields should stay synced with the fields in
386/// ExtraFieldsToSerialize with the exception that new "extra fields" should be
387/// added to this struct a minor release before they are added to the serialize
388/// struct.
389#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
390#[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))]
391#[derive(Clone, Debug, Deserialize)]
392struct ExtraFieldsToDeserialize {
393    #[serde(deserialize_with = "default_on_eof")]
394    lamports_per_signature: u64,
395    #[serde(deserialize_with = "default_on_eof")]
396    incremental_snapshot_persistence: Option<BankIncrementalSnapshotPersistence>,
397    #[serde(deserialize_with = "default_on_eof")]
398    epoch_accounts_hash: Option<Hash>,
399    #[serde(deserialize_with = "default_on_eof")]
400    versioned_epoch_stakes: HashMap<u64, VersionedEpochStakes>,
401    #[serde(deserialize_with = "default_on_eof")]
402    accounts_lt_hash: Option<SerdeAccountsLtHash>,
403}
404
405/// Extra fields that are serialized at the end of snapshots.
406///
407/// Note that this struct's fields should stay synced with the fields in
408/// ExtraFieldsToDeserialize with the exception that new "extra fields" should
409/// be added to the deserialize struct a minor release before they are added to
410/// this one.
411#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
412#[cfg_attr(feature = "dev-context-only-utils", derive(Default, PartialEq))]
413#[derive(Debug, Serialize)]
414pub struct ExtraFieldsToSerialize<'a> {
415    pub lamports_per_signature: u64,
416    pub incremental_snapshot_persistence: Option<&'a BankIncrementalSnapshotPersistence>,
417    pub epoch_accounts_hash: Option<EpochAccountsHash>,
418    pub versioned_epoch_stakes: HashMap<u64, VersionedEpochStakes>,
419    pub accounts_lt_hash: Option<SerdeAccountsLtHash>,
420}
421
422fn deserialize_bank_fields<R>(
423    mut stream: &mut BufReader<R>,
424) -> Result<
425    (
426        BankFieldsToDeserialize,
427        AccountsDbFields<SerializableAccountStorageEntry>,
428    ),
429    Error,
430>
431where
432    R: Read,
433{
434    let mut bank_fields: BankFieldsToDeserialize =
435        deserialize_from::<_, DeserializableVersionedBank>(&mut stream)?.into();
436    let accounts_db_fields = deserialize_accounts_db_fields(stream)?;
437    let extra_fields = deserialize_from(stream)?;
438
439    // Process extra fields
440    let ExtraFieldsToDeserialize {
441        lamports_per_signature,
442        incremental_snapshot_persistence,
443        epoch_accounts_hash,
444        versioned_epoch_stakes,
445        accounts_lt_hash,
446    } = extra_fields;
447
448    bank_fields.fee_rate_governor = bank_fields
449        .fee_rate_governor
450        .clone_with_lamports_per_signature(lamports_per_signature);
451    bank_fields.incremental_snapshot_persistence = incremental_snapshot_persistence;
452    bank_fields.epoch_accounts_hash = epoch_accounts_hash;
453
454    // If we deserialize the new epoch stakes, add all of the entries into the
455    // other deserialized map which could still have old epoch stakes entries
456    bank_fields.epoch_stakes.extend(
457        versioned_epoch_stakes
458            .into_iter()
459            .map(|(epoch, versioned_epoch_stakes)| (epoch, versioned_epoch_stakes.into())),
460    );
461
462    bank_fields.accounts_lt_hash = accounts_lt_hash.map(Into::into);
463
464    Ok((bank_fields, accounts_db_fields))
465}
466
467/// used by tests to compare contents of serialized bank fields
468/// serialized format is not deterministic - likely due to randomness in structs like hashmaps
469#[cfg(feature = "dev-context-only-utils")]
470pub(crate) fn compare_two_serialized_banks(
471    path1: impl AsRef<Path>,
472    path2: impl AsRef<Path>,
473) -> std::result::Result<bool, Error> {
474    use std::fs::File;
475    let file1 = File::open(path1)?;
476    let mut stream1 = BufReader::new(file1);
477    let file2 = File::open(path2)?;
478    let mut stream2 = BufReader::new(file2);
479
480    let fields1 = deserialize_bank_fields(&mut stream1)?;
481    let fields2 = deserialize_bank_fields(&mut stream2)?;
482    Ok(fields1 == fields2)
483}
484
485/// Get snapshot storage lengths from accounts_db_fields
486pub(crate) fn snapshot_storage_lengths_from_fields(
487    accounts_db_fields: &AccountsDbFields<SerializableAccountStorageEntry>,
488) -> HashMap<Slot, HashMap<SerializedAccountsFileId, usize>> {
489    let AccountsDbFields(snapshot_storage, ..) = &accounts_db_fields;
490    snapshot_storage
491        .iter()
492        .map(|(slot, slot_storage)| {
493            (
494                *slot,
495                slot_storage
496                    .iter()
497                    .map(|storage_entry| (storage_entry.id(), storage_entry.current_len()))
498                    .collect(),
499            )
500        })
501        .collect()
502}
503
504pub(crate) fn fields_from_stream<R: Read>(
505    snapshot_stream: &mut BufReader<R>,
506) -> std::result::Result<
507    (
508        BankFieldsToDeserialize,
509        AccountsDbFields<SerializableAccountStorageEntry>,
510    ),
511    Error,
512> {
513    deserialize_bank_fields(snapshot_stream)
514}
515
516pub(crate) fn fields_from_streams(
517    snapshot_streams: &mut SnapshotStreams<impl Read>,
518) -> std::result::Result<
519    (
520        SnapshotBankFields,
521        SnapshotAccountsDbFields<SerializableAccountStorageEntry>,
522    ),
523    Error,
524> {
525    let (full_snapshot_bank_fields, full_snapshot_accounts_db_fields) =
526        fields_from_stream(snapshot_streams.full_snapshot_stream)?;
527    let (incremental_snapshot_bank_fields, incremental_snapshot_accounts_db_fields) =
528        snapshot_streams
529            .incremental_snapshot_stream
530            .as_mut()
531            .map(|stream| fields_from_stream(stream))
532            .transpose()?
533            .unzip();
534
535    let snapshot_bank_fields = SnapshotBankFields {
536        full: full_snapshot_bank_fields,
537        incremental: incremental_snapshot_bank_fields,
538    };
539    let snapshot_accounts_db_fields = SnapshotAccountsDbFields {
540        full_snapshot_accounts_db_fields,
541        incremental_snapshot_accounts_db_fields,
542    };
543    Ok((snapshot_bank_fields, snapshot_accounts_db_fields))
544}
545
546/// This struct contains side-info while reconstructing the bank from streams
547#[derive(Debug)]
548pub struct BankFromStreamsInfo {
549    pub duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
550}
551
552#[allow(clippy::too_many_arguments)]
553pub(crate) fn bank_from_streams<R>(
554    snapshot_streams: &mut SnapshotStreams<R>,
555    account_paths: &[PathBuf],
556    storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
557    genesis_config: &GenesisConfig,
558    runtime_config: &RuntimeConfig,
559    debug_keys: Option<Arc<HashSet<Pubkey>>>,
560    additional_builtins: Option<&[BuiltinPrototype]>,
561    limit_load_slot_count_from_snapshot: Option<usize>,
562    verify_index: bool,
563    accounts_db_config: Option<AccountsDbConfig>,
564    accounts_update_notifier: Option<AccountsUpdateNotifier>,
565    exit: Arc<AtomicBool>,
566) -> std::result::Result<(Bank, BankFromStreamsInfo), Error>
567where
568    R: Read,
569{
570    let (bank_fields, accounts_db_fields) = fields_from_streams(snapshot_streams)?;
571    let (bank, info) = reconstruct_bank_from_fields(
572        bank_fields,
573        accounts_db_fields,
574        genesis_config,
575        runtime_config,
576        account_paths,
577        storage_and_next_append_vec_id,
578        debug_keys,
579        additional_builtins,
580        limit_load_slot_count_from_snapshot,
581        verify_index,
582        accounts_db_config,
583        accounts_update_notifier,
584        exit,
585    )?;
586    Ok((
587        bank,
588        BankFromStreamsInfo {
589            duplicates_lt_hash: info.duplicates_lt_hash,
590        },
591    ))
592}
593
594#[cfg(test)]
595pub(crate) fn bank_to_stream<W>(
596    stream: &mut BufWriter<W>,
597    bank: &Bank,
598    snapshot_storages: &[Vec<Arc<AccountStorageEntry>>],
599) -> Result<(), Error>
600where
601    W: Write,
602{
603    bincode::serialize_into(
604        stream,
605        &SerializableBankAndStorage {
606            bank,
607            snapshot_storages,
608        },
609    )
610}
611
612#[cfg(test)]
613pub(crate) fn bank_to_stream_no_extra_fields<W>(
614    stream: &mut BufWriter<W>,
615    bank: &Bank,
616    snapshot_storages: &[Vec<Arc<AccountStorageEntry>>],
617) -> Result<(), Error>
618where
619    W: Write,
620{
621    bincode::serialize_into(
622        stream,
623        &SerializableBankAndStorageNoExtra {
624            bank,
625            snapshot_storages,
626        },
627    )
628}
629
630/// Serializes bank snapshot into `stream` with bincode
631pub fn serialize_bank_snapshot_into<W>(
632    stream: &mut BufWriter<W>,
633    bank_fields: BankFieldsToSerialize,
634    bank_hash_stats: BankHashStats,
635    accounts_delta_hash: AccountsDeltaHash,
636    accounts_hash: AccountsHash,
637    account_storage_entries: &[Vec<Arc<AccountStorageEntry>>],
638    extra_fields: ExtraFieldsToSerialize,
639    write_version: u64,
640) -> Result<(), Error>
641where
642    W: Write,
643{
644    let mut serializer = bincode::Serializer::new(
645        stream,
646        bincode::DefaultOptions::new().with_fixint_encoding(),
647    );
648    serialize_bank_snapshot_with(
649        &mut serializer,
650        bank_fields,
651        bank_hash_stats,
652        accounts_delta_hash,
653        accounts_hash,
654        account_storage_entries,
655        extra_fields,
656        write_version,
657    )
658}
659
660/// Serializes bank snapshot with `serializer`
661pub fn serialize_bank_snapshot_with<S>(
662    serializer: S,
663    bank_fields: BankFieldsToSerialize,
664    bank_hash_stats: BankHashStats,
665    accounts_delta_hash: AccountsDeltaHash,
666    accounts_hash: AccountsHash,
667    account_storage_entries: &[Vec<Arc<AccountStorageEntry>>],
668    extra_fields: ExtraFieldsToSerialize,
669    write_version: u64,
670) -> Result<S::Ok, S::Error>
671where
672    S: serde::Serializer,
673{
674    let slot = bank_fields.slot;
675    let serializable_bank = SerializableVersionedBank::from(bank_fields);
676    let serializable_accounts_db = SerializableAccountsDb::<'_> {
677        slot,
678        account_storage_entries,
679        bank_hash_stats,
680        accounts_delta_hash,
681        accounts_hash,
682        write_version,
683    };
684    (serializable_bank, serializable_accounts_db, extra_fields).serialize(serializer)
685}
686
687#[cfg(test)]
688struct SerializableBankAndStorage<'a> {
689    bank: &'a Bank,
690    snapshot_storages: &'a [Vec<Arc<AccountStorageEntry>>],
691}
692
693#[cfg(test)]
694impl Serialize for SerializableBankAndStorage<'_> {
695    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
696    where
697        S: serde::ser::Serializer,
698    {
699        let slot = self.bank.slot();
700        let mut bank_fields = self.bank.get_fields_to_serialize();
701        let accounts_db = &self.bank.rc.accounts.accounts_db;
702        let bank_hash_stats = self.bank.get_bank_hash_stats();
703        let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap();
704        let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0;
705        let write_version = accounts_db.write_version.load(Ordering::Acquire);
706        let lamports_per_signature = bank_fields.fee_rate_governor.lamports_per_signature;
707        let versioned_epoch_stakes = std::mem::take(&mut bank_fields.versioned_epoch_stakes);
708        let accounts_lt_hash = bank_fields.accounts_lt_hash.clone().map(Into::into);
709        let bank_fields_to_serialize = (
710            SerializableVersionedBank::from(bank_fields),
711            SerializableAccountsDb::<'_> {
712                slot,
713                account_storage_entries: self.snapshot_storages,
714                bank_hash_stats,
715                accounts_delta_hash,
716                accounts_hash,
717                write_version,
718            },
719            ExtraFieldsToSerialize {
720                lamports_per_signature,
721                incremental_snapshot_persistence: None,
722                epoch_accounts_hash: self.bank.get_epoch_accounts_hash_to_serialize(),
723                versioned_epoch_stakes,
724                accounts_lt_hash,
725            },
726        );
727        bank_fields_to_serialize.serialize(serializer)
728    }
729}
730
731#[cfg(test)]
732struct SerializableBankAndStorageNoExtra<'a> {
733    bank: &'a Bank,
734    snapshot_storages: &'a [Vec<Arc<AccountStorageEntry>>],
735}
736
737#[cfg(test)]
738impl Serialize for SerializableBankAndStorageNoExtra<'_> {
739    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
740    where
741        S: serde::ser::Serializer,
742    {
743        let slot = self.bank.slot();
744        let bank_fields = self.bank.get_fields_to_serialize();
745        let accounts_db = &self.bank.rc.accounts.accounts_db;
746        let bank_hash_stats = self.bank.get_bank_hash_stats();
747        let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap();
748        let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0;
749        let write_version = accounts_db.write_version.load(Ordering::Acquire);
750        (
751            SerializableVersionedBank::from(bank_fields),
752            SerializableAccountsDb::<'_> {
753                slot,
754                account_storage_entries: self.snapshot_storages,
755                bank_hash_stats,
756                accounts_delta_hash,
757                accounts_hash,
758                write_version,
759            },
760        )
761            .serialize(serializer)
762    }
763}
764
765#[cfg(test)]
766impl<'a> From<SerializableBankAndStorageNoExtra<'a>> for SerializableBankAndStorage<'a> {
767    fn from(s: SerializableBankAndStorageNoExtra<'a>) -> SerializableBankAndStorage<'a> {
768        let SerializableBankAndStorageNoExtra {
769            bank,
770            snapshot_storages,
771        } = s;
772        SerializableBankAndStorage {
773            bank,
774            snapshot_storages,
775        }
776    }
777}
778
779struct SerializableAccountsDb<'a> {
780    slot: Slot,
781    account_storage_entries: &'a [Vec<Arc<AccountStorageEntry>>],
782    bank_hash_stats: BankHashStats,
783    accounts_delta_hash: AccountsDeltaHash,
784    accounts_hash: AccountsHash,
785    write_version: u64,
786}
787
788impl Serialize for SerializableAccountsDb<'_> {
789    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
790    where
791        S: serde::ser::Serializer,
792    {
793        // (1st of 3 elements) write the list of account storage entry lists out as a map
794        let entry_count = RefCell::<usize>::new(0);
795        let entries = utils::serialize_iter_as_map(self.account_storage_entries.iter().map(|x| {
796            *entry_count.borrow_mut() += x.len();
797            (
798                x.first().unwrap().slot(),
799                utils::serialize_iter_as_seq(
800                    x.iter()
801                        .map(|x| SerializableAccountStorageEntry::new(x.as_ref(), self.slot)),
802                ),
803            )
804        }));
805        let bank_hash_info = BankHashInfo {
806            accounts_delta_hash: self.accounts_delta_hash.into(),
807            accounts_hash: self.accounts_hash.into(),
808            stats: self.bank_hash_stats.clone(),
809        };
810
811        let historical_roots = Vec::<Slot>::default();
812        let historical_roots_with_hash = Vec::<(Slot, Hash)>::default();
813
814        let mut serialize_account_storage_timer = Measure::start("serialize_account_storage_ms");
815        let result = (
816            entries,
817            self.write_version,
818            self.slot,
819            bank_hash_info,
820            historical_roots,
821            historical_roots_with_hash,
822        )
823            .serialize(serializer);
824        serialize_account_storage_timer.stop();
825        datapoint_info!(
826            "serialize_account_storage_ms",
827            ("duration", serialize_account_storage_timer.as_ms(), i64),
828            ("num_entries", *entry_count.borrow(), i64),
829        );
830        result
831    }
832}
833
834#[cfg(feature = "frozen-abi")]
835impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableAccountsDb<'_> {}
836
837/// This struct contains side-info while reconstructing the bank from fields
838#[derive(Debug)]
839struct ReconstructedBankInfo {
840    duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
841}
842
843#[allow(clippy::too_many_arguments)]
844fn reconstruct_bank_from_fields<E>(
845    bank_fields: SnapshotBankFields,
846    snapshot_accounts_db_fields: SnapshotAccountsDbFields<E>,
847    genesis_config: &GenesisConfig,
848    runtime_config: &RuntimeConfig,
849    account_paths: &[PathBuf],
850    storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
851    debug_keys: Option<Arc<HashSet<Pubkey>>>,
852    additional_builtins: Option<&[BuiltinPrototype]>,
853    limit_load_slot_count_from_snapshot: Option<usize>,
854    verify_index: bool,
855    accounts_db_config: Option<AccountsDbConfig>,
856    accounts_update_notifier: Option<AccountsUpdateNotifier>,
857    exit: Arc<AtomicBool>,
858) -> Result<(Bank, ReconstructedBankInfo), Error>
859where
860    E: SerializableStorage + std::marker::Sync,
861{
862    let capitalizations = (
863        bank_fields.full.capitalization,
864        bank_fields
865            .incremental
866            .as_ref()
867            .map(|bank_fields| bank_fields.capitalization),
868    );
869    let mut bank_fields = bank_fields.collapse_into();
870    let (accounts_db, reconstructed_accounts_db_info) = reconstruct_accountsdb_from_fields(
871        snapshot_accounts_db_fields,
872        account_paths,
873        storage_and_next_append_vec_id,
874        genesis_config,
875        limit_load_slot_count_from_snapshot,
876        verify_index,
877        accounts_db_config,
878        accounts_update_notifier,
879        exit,
880        bank_fields.epoch_accounts_hash,
881        capitalizations,
882        bank_fields.incremental_snapshot_persistence.as_ref(),
883        bank_fields.accounts_lt_hash.is_some(),
884    )?;
885    bank_fields.bank_hash_stats = reconstructed_accounts_db_info.bank_hash_stats;
886
887    let bank_rc = BankRc::new(Accounts::new(Arc::new(accounts_db)));
888    let runtime_config = Arc::new(runtime_config.clone());
889
890    // if limit_load_slot_count_from_snapshot is set, then we need to side-step some correctness checks beneath this call
891    let debug_do_not_add_builtins = limit_load_slot_count_from_snapshot.is_some();
892    let bank = Bank::new_from_fields(
893        bank_rc,
894        genesis_config,
895        runtime_config,
896        bank_fields,
897        debug_keys,
898        additional_builtins,
899        debug_do_not_add_builtins,
900        reconstructed_accounts_db_info.accounts_data_len,
901    );
902
903    info!("rent_collector: {:?}", bank.rent_collector());
904    Ok((
905        bank,
906        ReconstructedBankInfo {
907            duplicates_lt_hash: reconstructed_accounts_db_info.duplicates_lt_hash,
908        },
909    ))
910}
911
912pub(crate) fn reconstruct_single_storage(
913    slot: &Slot,
914    append_vec_path: &Path,
915    current_len: usize,
916    append_vec_id: AccountsFileId,
917    storage_access: StorageAccess,
918) -> Result<Arc<AccountStorageEntry>, SnapshotError> {
919    let accounts_file =
920        AccountsFile::new_for_startup(append_vec_path, current_len, storage_access)?;
921    Ok(Arc::new(AccountStorageEntry::new_existing(
922        *slot,
923        append_vec_id,
924        accounts_file,
925    )))
926}
927
928// Remap the AppendVec ID to handle any duplicate IDs that may previously existed
929// due to full snapshots and incremental snapshots generated from different
930// nodes
931pub(crate) fn remap_append_vec_file(
932    slot: Slot,
933    old_append_vec_id: SerializedAccountsFileId,
934    append_vec_path: &Path,
935    next_append_vec_id: &AtomicAccountsFileId,
936    num_collisions: &AtomicUsize,
937) -> io::Result<(AccountsFileId, PathBuf)> {
938    #[cfg(target_os = "linux")]
939    let append_vec_path_cstr = cstring_from_path(append_vec_path)?;
940
941    let mut remapped_append_vec_path = append_vec_path.to_path_buf();
942
943    // Break out of the loop in the following situations:
944    // 1. The new ID is the same as the original ID.  This means we do not need to
945    //    rename the file, since the ID is the "correct" one already.
946    // 2. There is not a file already at the new path.  This means it is safe to
947    //    rename the file to this new path.
948    let (remapped_append_vec_id, remapped_append_vec_path) = loop {
949        let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel);
950
951        // this can only happen in the first iteration of the loop
952        if old_append_vec_id == remapped_append_vec_id as SerializedAccountsFileId {
953            break (remapped_append_vec_id, remapped_append_vec_path);
954        }
955
956        let remapped_file_name = AccountsFile::file_name(slot, remapped_append_vec_id);
957        remapped_append_vec_path = append_vec_path.parent().unwrap().join(remapped_file_name);
958
959        #[cfg(all(target_os = "linux", target_env = "gnu"))]
960        {
961            let remapped_append_vec_path_cstr = cstring_from_path(&remapped_append_vec_path)?;
962
963            // On linux we use renameat2(NO_REPLACE) instead of IF metadata(path).is_err() THEN
964            // rename() in order to save a statx() syscall.
965            match rename_no_replace(&append_vec_path_cstr, &remapped_append_vec_path_cstr) {
966                // If the file was successfully renamed, break out of the loop
967                Ok(_) => break (remapped_append_vec_id, remapped_append_vec_path),
968                // If there's already a file at the new path, continue so we try
969                // the next ID
970                Err(e) if e.kind() == io::ErrorKind::AlreadyExists => {}
971                Err(e) => return Err(e),
972            }
973        }
974
975        #[cfg(any(
976            not(target_os = "linux"),
977            all(target_os = "linux", not(target_env = "gnu"))
978        ))]
979        if std::fs::metadata(&remapped_append_vec_path).is_err() {
980            break (remapped_append_vec_id, remapped_append_vec_path);
981        }
982
983        // If we made it this far, a file exists at the new path.  Record the collision
984        // and try again.
985        num_collisions.fetch_add(1, Ordering::Relaxed);
986    };
987
988    // Only rename the file if the new ID is actually different from the original. In the target_os
989    // = linux case, we have already renamed if necessary.
990    #[cfg(any(
991        not(target_os = "linux"),
992        all(target_os = "linux", not(target_env = "gnu"))
993    ))]
994    if old_append_vec_id != remapped_append_vec_id as SerializedAccountsFileId {
995        std::fs::rename(append_vec_path, &remapped_append_vec_path)?;
996    }
997
998    Ok((remapped_append_vec_id, remapped_append_vec_path))
999}
1000
1001pub(crate) fn remap_and_reconstruct_single_storage(
1002    slot: Slot,
1003    old_append_vec_id: SerializedAccountsFileId,
1004    current_len: usize,
1005    append_vec_path: &Path,
1006    next_append_vec_id: &AtomicAccountsFileId,
1007    num_collisions: &AtomicUsize,
1008    storage_access: StorageAccess,
1009) -> Result<Arc<AccountStorageEntry>, SnapshotError> {
1010    let (remapped_append_vec_id, remapped_append_vec_path) = remap_append_vec_file(
1011        slot,
1012        old_append_vec_id,
1013        append_vec_path,
1014        next_append_vec_id,
1015        num_collisions,
1016    )?;
1017    let storage = reconstruct_single_storage(
1018        &slot,
1019        &remapped_append_vec_path,
1020        current_len,
1021        remapped_append_vec_id,
1022        storage_access,
1023    )?;
1024    Ok(storage)
1025}
1026
1027/// This struct contains side-info while reconstructing the accounts DB from fields.
1028#[derive(Debug, Default, Clone)]
1029pub struct ReconstructedAccountsDbInfo {
1030    pub accounts_data_len: u64,
1031    pub duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
1032    pub bank_hash_stats: BankHashStats,
1033}
1034
1035#[allow(clippy::too_many_arguments)]
1036fn reconstruct_accountsdb_from_fields<E>(
1037    snapshot_accounts_db_fields: SnapshotAccountsDbFields<E>,
1038    account_paths: &[PathBuf],
1039    storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
1040    genesis_config: &GenesisConfig,
1041    limit_load_slot_count_from_snapshot: Option<usize>,
1042    verify_index: bool,
1043    accounts_db_config: Option<AccountsDbConfig>,
1044    accounts_update_notifier: Option<AccountsUpdateNotifier>,
1045    exit: Arc<AtomicBool>,
1046    epoch_accounts_hash: Option<Hash>,
1047    capitalizations: (u64, Option<u64>),
1048    incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>,
1049    has_accounts_lt_hash: bool,
1050) -> Result<(AccountsDb, ReconstructedAccountsDbInfo), Error>
1051where
1052    E: SerializableStorage + std::marker::Sync,
1053{
1054    let mut accounts_db = AccountsDb::new_with_config(
1055        account_paths.to_vec(),
1056        accounts_db_config,
1057        accounts_update_notifier,
1058        exit,
1059    );
1060
1061    if let Some(epoch_accounts_hash) = epoch_accounts_hash {
1062        accounts_db
1063            .epoch_accounts_hash_manager
1064            .set_valid(EpochAccountsHash::new(epoch_accounts_hash), 0);
1065    }
1066
1067    // Store the accounts hash & capitalization, from the full snapshot, in the new AccountsDb
1068    {
1069        let AccountsDbFields(_, _, slot, bank_hash_info, _, _) =
1070            &snapshot_accounts_db_fields.full_snapshot_accounts_db_fields;
1071
1072        if let Some(incremental_snapshot_persistence) = incremental_snapshot_persistence {
1073            // If we've booted from local state that was originally intended to be an incremental
1074            // snapshot, then we will use the incremental snapshot persistence field to set the
1075            // initial accounts hashes in accounts db.
1076            let old_accounts_hash = accounts_db.set_accounts_hash_from_snapshot(
1077                incremental_snapshot_persistence.full_slot,
1078                incremental_snapshot_persistence.full_hash.clone(),
1079                incremental_snapshot_persistence.full_capitalization,
1080            );
1081            assert!(
1082                old_accounts_hash.is_none(),
1083                "There should not already be an AccountsHash at slot {slot}: {old_accounts_hash:?}",
1084            );
1085            let old_incremental_accounts_hash = accounts_db
1086                .set_incremental_accounts_hash_from_snapshot(
1087                    *slot,
1088                    incremental_snapshot_persistence.incremental_hash.clone(),
1089                    incremental_snapshot_persistence.incremental_capitalization,
1090                );
1091            assert!(
1092                old_incremental_accounts_hash.is_none(),
1093                "There should not already be an IncrementalAccountsHash at slot {slot}: {old_incremental_accounts_hash:?}",
1094            );
1095        } else {
1096            // Otherwise, we've booted from a snapshot archive, or from local state that was *not*
1097            // intended to be an incremental snapshot.
1098            let old_accounts_hash = accounts_db.set_accounts_hash_from_snapshot(
1099                *slot,
1100                bank_hash_info.accounts_hash.clone(),
1101                capitalizations.0,
1102            );
1103            assert!(
1104                old_accounts_hash.is_none(),
1105                "There should not already be an AccountsHash at slot {slot}: {old_accounts_hash:?}",
1106            );
1107        }
1108    }
1109
1110    // Store the accounts hash & capitalization, from the incremental snapshot, in the new AccountsDb
1111    {
1112        if let Some(AccountsDbFields(_, _, slot, bank_hash_info, _, _)) =
1113            snapshot_accounts_db_fields
1114                .incremental_snapshot_accounts_db_fields
1115                .as_ref()
1116        {
1117            if let Some(incremental_snapshot_persistence) = incremental_snapshot_persistence {
1118                // Use the presence of a BankIncrementalSnapshotPersistence to indicate the
1119                // Incremental Accounts Hash feature is enabled, and use its accounts hashes
1120                // instead of `BankHashInfo`'s.
1121                let AccountsDbFields(_, _, full_slot, full_bank_hash_info, _, _) =
1122                    &snapshot_accounts_db_fields.full_snapshot_accounts_db_fields;
1123                let full_accounts_hash = &full_bank_hash_info.accounts_hash;
1124                assert_eq!(
1125                    incremental_snapshot_persistence.full_slot, *full_slot,
1126                    "The incremental snapshot's base slot ({}) must match the full snapshot's slot ({full_slot})!",
1127                    incremental_snapshot_persistence.full_slot,
1128                );
1129                assert_eq!(
1130                    &incremental_snapshot_persistence.full_hash, full_accounts_hash,
1131                    "The incremental snapshot's base accounts hash ({}) must match the full snapshot's accounts hash ({})!",
1132                    &incremental_snapshot_persistence.full_hash.0, full_accounts_hash.0,
1133                );
1134                assert_eq!(
1135                    incremental_snapshot_persistence.full_capitalization, capitalizations.0,
1136                    "The incremental snapshot's base capitalization ({}) must match the full snapshot's capitalization ({})!",
1137                    incremental_snapshot_persistence.full_capitalization, capitalizations.0,
1138                );
1139                let old_incremental_accounts_hash = accounts_db
1140                    .set_incremental_accounts_hash_from_snapshot(
1141                        *slot,
1142                        incremental_snapshot_persistence.incremental_hash.clone(),
1143                        incremental_snapshot_persistence.incremental_capitalization,
1144                    );
1145                assert!(
1146                    old_incremental_accounts_hash.is_none(),
1147                    "There should not already be an IncrementalAccountsHash at slot {slot}: {old_incremental_accounts_hash:?}",
1148                );
1149            } else {
1150                // ..and without a BankIncrementalSnapshotPersistence then the Incremental Accounts
1151                // Hash feature is disabled; the accounts hash in `BankHashInfo` is valid.
1152                let old_accounts_hash = accounts_db.set_accounts_hash_from_snapshot(
1153                    *slot,
1154                    bank_hash_info.accounts_hash.clone(),
1155                    capitalizations
1156                        .1
1157                        .expect("capitalization from incremental snapshot"),
1158                );
1159                assert!(
1160                    old_accounts_hash.is_none(),
1161                    "There should not already be an AccountsHash at slot {slot}: {old_accounts_hash:?}",
1162                );
1163            };
1164        }
1165    }
1166
1167    let AccountsDbFields(
1168        _snapshot_storages,
1169        snapshot_version,
1170        snapshot_slot,
1171        snapshot_bank_hash_info,
1172        _snapshot_historical_roots,
1173        _snapshot_historical_roots_with_hash,
1174    ) = snapshot_accounts_db_fields.collapse_into()?;
1175
1176    // Ensure all account paths exist
1177    for path in &accounts_db.paths {
1178        std::fs::create_dir_all(path)
1179            .unwrap_or_else(|err| panic!("Failed to create directory {}: {}", path.display(), err));
1180    }
1181
1182    let StorageAndNextAccountsFileId {
1183        storage,
1184        next_append_vec_id,
1185    } = storage_and_next_append_vec_id;
1186
1187    assert!(
1188        !storage.is_empty(),
1189        "At least one storage entry must exist from deserializing stream"
1190    );
1191
1192    let next_append_vec_id = next_append_vec_id.load(Ordering::Acquire);
1193    let max_append_vec_id = next_append_vec_id - 1;
1194    assert!(
1195        max_append_vec_id <= AccountsFileId::MAX / 2,
1196        "Storage id {max_append_vec_id} larger than allowed max"
1197    );
1198
1199    // Process deserialized data, set necessary fields in self
1200    let old_accounts_delta_hash = accounts_db.set_accounts_delta_hash_from_snapshot(
1201        snapshot_slot,
1202        snapshot_bank_hash_info.accounts_delta_hash,
1203    );
1204    assert!(
1205        old_accounts_delta_hash.is_none(),
1206        "There should not already be an AccountsDeltaHash at slot {snapshot_slot}: {old_accounts_delta_hash:?}",
1207        );
1208    accounts_db.storage.initialize(storage);
1209    accounts_db
1210        .next_id
1211        .store(next_append_vec_id, Ordering::Release);
1212    accounts_db
1213        .write_version
1214        .fetch_add(snapshot_version, Ordering::Release);
1215
1216    let mut measure_notify = Measure::start("accounts_notify");
1217
1218    let accounts_db = Arc::new(accounts_db);
1219    let accounts_db_clone = accounts_db.clone();
1220    let handle = Builder::new()
1221        .name("solNfyAccRestor".to_string())
1222        .spawn(move || {
1223            accounts_db_clone.notify_account_restore_from_snapshot();
1224        })
1225        .unwrap();
1226
1227    // When generating the index, we want to calculate the duplicates lt hash value (needed to do
1228    // the lattice-based verification of the accounts in the background) optimistically.
1229    // This means, either when the cli arg is set, or when the snapshot has an accounts lt hash.
1230    let is_accounts_lt_hash_enabled =
1231        accounts_db.is_experimental_accumulator_hash_enabled() || has_accounts_lt_hash;
1232    let IndexGenerationInfo {
1233        accounts_data_len,
1234        rent_paying_accounts_by_partition,
1235        duplicates_lt_hash,
1236    } = accounts_db.generate_index(
1237        limit_load_slot_count_from_snapshot,
1238        verify_index,
1239        genesis_config,
1240        is_accounts_lt_hash_enabled,
1241    );
1242    accounts_db
1243        .accounts_index
1244        .rent_paying_accounts_by_partition
1245        .set(rent_paying_accounts_by_partition)
1246        .unwrap();
1247
1248    handle.join().unwrap();
1249    measure_notify.stop();
1250
1251    datapoint_info!(
1252        "reconstruct_accountsdb_from_fields()",
1253        ("accountsdb-notify-at-start-us", measure_notify.as_us(), i64),
1254    );
1255
1256    Ok((
1257        Arc::try_unwrap(accounts_db).unwrap(),
1258        ReconstructedAccountsDbInfo {
1259            accounts_data_len,
1260            duplicates_lt_hash,
1261            bank_hash_stats: snapshot_bank_hash_info.stats,
1262        },
1263    ))
1264}
1265
1266// Rename `src` to `dest` only if `dest` doesn't already exist.
1267#[cfg(all(target_os = "linux", target_env = "gnu"))]
1268fn rename_no_replace(src: &CStr, dest: &CStr) -> io::Result<()> {
1269    let ret = unsafe {
1270        libc::renameat2(
1271            libc::AT_FDCWD,
1272            src.as_ptr() as *const _,
1273            libc::AT_FDCWD,
1274            dest.as_ptr() as *const _,
1275            libc::RENAME_NOREPLACE,
1276        )
1277    };
1278    if ret == -1 {
1279        return Err(io::Error::last_os_error());
1280    }
1281
1282    Ok(())
1283}
1284
1285#[cfg(target_os = "linux")]
1286fn cstring_from_path(path: &Path) -> io::Result<CString> {
1287    // It is better to allocate here than use the stack. Jemalloc is going to give us a chunk of a
1288    // preallocated small arena anyway. Instead if we used the stack since PATH_MAX=4096 it would
1289    // result in LLVM inserting a stack probe, see
1290    // https://docs.rs/compiler_builtins/latest/compiler_builtins/probestack/index.html.
1291    CString::new(path.as_os_str().as_encoded_bytes())
1292        .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))
1293}