solana_runtime/
serde_snapshot.rs

1#[cfg(target_os = "linux")]
2use std::ffi::{CStr, CString};
3use {
4    crate::{
5        bank::{Bank, BankFieldsToDeserialize, BankFieldsToSerialize, BankHashStats, BankRc},
6        epoch_stakes::{EpochStakes, VersionedEpochStakes},
7        runtime_config::RuntimeConfig,
8        serde_snapshot::storage::SerializableAccountStorageEntry,
9        snapshot_utils::{SnapshotError, StorageAndNextAccountsFileId},
10        stakes::{serde_stakes_to_delegation_format, Stakes, StakesEnum},
11    },
12    bincode::{self, config::Options, Error},
13    log::*,
14    serde::{de::DeserializeOwned, Deserialize, Serialize},
15    solana_accounts_db::{
16        account_storage::meta::StoredMetaWriteVersion,
17        accounts::Accounts,
18        accounts_db::{
19            AccountStorageEntry, AccountsDb, AccountsDbConfig, AccountsFileId,
20            AtomicAccountsFileId, DuplicatesLtHash, IndexGenerationInfo,
21        },
22        accounts_file::{AccountsFile, StorageAccess},
23        accounts_hash::{AccountsDeltaHash, AccountsHash},
24        accounts_update_notifier_interface::AccountsUpdateNotifier,
25        ancestors::AncestorsForSerialization,
26        blockhash_queue::BlockhashQueue,
27        epoch_accounts_hash::EpochAccountsHash,
28    },
29    solana_builtins::prototype::BuiltinPrototype,
30    solana_measure::measure::Measure,
31    solana_sdk::{
32        clock::{Epoch, Slot, UnixTimestamp},
33        deserialize_utils::default_on_eof,
34        epoch_schedule::EpochSchedule,
35        fee_calculator::{FeeCalculator, FeeRateGovernor},
36        genesis_config::GenesisConfig,
37        hard_forks::HardForks,
38        hash::Hash,
39        inflation::Inflation,
40        pubkey::Pubkey,
41        rent_collector::RentCollector,
42        stake::state::Delegation,
43    },
44    std::{
45        cell::RefCell,
46        collections::{HashMap, HashSet},
47        io::{self, BufReader, BufWriter, Read, Write},
48        path::{Path, PathBuf},
49        result::Result,
50        sync::{
51            atomic::{AtomicBool, AtomicUsize, Ordering},
52            Arc,
53        },
54        thread::Builder,
55    },
56    storage::SerializableStorage,
57    types::SerdeAccountsLtHash,
58};
59
60mod storage;
61mod tests;
62mod types;
63mod utils;
64
65pub(crate) use {
66    solana_accounts_db::accounts_hash::{
67        SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash,
68    },
69    storage::SerializedAccountsFileId,
70};
71
72const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
73
74#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
75#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
76pub struct AccountsDbFields<T>(
77    HashMap<Slot, Vec<T>>,
78    StoredMetaWriteVersion,
79    Slot,
80    BankHashInfo,
81    /// all slots that were roots within the last epoch
82    #[serde(deserialize_with = "default_on_eof")]
83    Vec<Slot>,
84    /// slots that were roots within the last epoch for which we care about the hash value
85    #[serde(deserialize_with = "default_on_eof")]
86    Vec<(Slot, Hash)>,
87);
88
89/// Incremental snapshots only calculate their accounts hash based on the
90/// account changes WITHIN the incremental slot range. So, we need to keep track
91/// of the full snapshot expected accounts hash results. We also need to keep
92/// track of the hash and capitalization specific to the incremental snapshot
93/// slot range. The capitalization we calculate for the incremental slot will
94/// NOT be consistent with the bank's capitalization. It is not feasible to
95/// calculate a capitalization delta that is correct given just incremental
96/// slots account data and the full snapshot's capitalization.
97#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
98#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)]
99pub struct BankIncrementalSnapshotPersistence {
100    /// slot of full snapshot
101    pub full_slot: Slot,
102    /// accounts hash from the full snapshot
103    pub full_hash: SerdeAccountsHash,
104    /// capitalization from the full snapshot
105    pub full_capitalization: u64,
106    /// hash of the accounts in the incremental snapshot slot range, including zero-lamport accounts
107    pub incremental_hash: SerdeIncrementalAccountsHash,
108    /// capitalization of the accounts in the incremental snapshot slot range
109    pub incremental_capitalization: u64,
110}
111
112#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
113#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
114struct BankHashInfo {
115    accounts_delta_hash: SerdeAccountsDeltaHash,
116    accounts_hash: SerdeAccountsHash,
117    stats: BankHashStats,
118}
119
120#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
121#[derive(Default, Clone, PartialEq, Eq, Debug, Deserialize, Serialize)]
122struct UnusedAccounts {
123    unused1: HashSet<Pubkey>,
124    unused2: HashSet<Pubkey>,
125    unused3: HashMap<Pubkey, u64>,
126}
127
128// Deserializable version of Bank which need not be serializable,
129// because it's handled by SerializableVersionedBank.
130// So, sync fields with it!
131#[derive(Clone, Deserialize)]
132struct DeserializableVersionedBank {
133    blockhash_queue: BlockhashQueue,
134    ancestors: AncestorsForSerialization,
135    hash: Hash,
136    parent_hash: Hash,
137    parent_slot: Slot,
138    hard_forks: HardForks,
139    transaction_count: u64,
140    tick_height: u64,
141    signature_count: u64,
142    capitalization: u64,
143    max_tick_height: u64,
144    hashes_per_tick: Option<u64>,
145    ticks_per_slot: u64,
146    ns_per_slot: u128,
147    genesis_creation_time: UnixTimestamp,
148    slots_per_year: f64,
149    accounts_data_len: u64,
150    slot: Slot,
151    epoch: Epoch,
152    block_height: u64,
153    collector_id: Pubkey,
154    collector_fees: u64,
155    _fee_calculator: FeeCalculator,
156    fee_rate_governor: FeeRateGovernor,
157    collected_rent: u64,
158    rent_collector: RentCollector,
159    epoch_schedule: EpochSchedule,
160    inflation: Inflation,
161    stakes: Stakes<Delegation>,
162    #[allow(dead_code)]
163    unused_accounts: UnusedAccounts,
164    epoch_stakes: HashMap<Epoch, EpochStakes>,
165    is_delta: bool,
166}
167
168impl From<DeserializableVersionedBank> for BankFieldsToDeserialize {
169    fn from(dvb: DeserializableVersionedBank) -> Self {
170        BankFieldsToDeserialize {
171            blockhash_queue: dvb.blockhash_queue,
172            ancestors: dvb.ancestors,
173            hash: dvb.hash,
174            parent_hash: dvb.parent_hash,
175            parent_slot: dvb.parent_slot,
176            hard_forks: dvb.hard_forks,
177            transaction_count: dvb.transaction_count,
178            tick_height: dvb.tick_height,
179            signature_count: dvb.signature_count,
180            capitalization: dvb.capitalization,
181            max_tick_height: dvb.max_tick_height,
182            hashes_per_tick: dvb.hashes_per_tick,
183            ticks_per_slot: dvb.ticks_per_slot,
184            ns_per_slot: dvb.ns_per_slot,
185            genesis_creation_time: dvb.genesis_creation_time,
186            slots_per_year: dvb.slots_per_year,
187            accounts_data_len: dvb.accounts_data_len,
188            slot: dvb.slot,
189            epoch: dvb.epoch,
190            block_height: dvb.block_height,
191            collector_id: dvb.collector_id,
192            collector_fees: dvb.collector_fees,
193            fee_rate_governor: dvb.fee_rate_governor,
194            collected_rent: dvb.collected_rent,
195            rent_collector: dvb.rent_collector,
196            epoch_schedule: dvb.epoch_schedule,
197            inflation: dvb.inflation,
198            stakes: dvb.stakes,
199            epoch_stakes: dvb.epoch_stakes,
200            is_delta: dvb.is_delta,
201            incremental_snapshot_persistence: None,
202            epoch_accounts_hash: None,
203            accounts_lt_hash: None, // populated from ExtraFieldsToDeserialize
204            bank_hash_stats: BankHashStats::default(), // populated from AccountsDbFields
205        }
206    }
207}
208
209// Serializable version of Bank, not Deserializable to avoid cloning by using refs.
210// Sync fields with DeserializableVersionedBank!
211#[derive(Serialize)]
212struct SerializableVersionedBank {
213    blockhash_queue: BlockhashQueue,
214    ancestors: AncestorsForSerialization,
215    hash: Hash,
216    parent_hash: Hash,
217    parent_slot: Slot,
218    hard_forks: HardForks,
219    transaction_count: u64,
220    tick_height: u64,
221    signature_count: u64,
222    capitalization: u64,
223    max_tick_height: u64,
224    hashes_per_tick: Option<u64>,
225    ticks_per_slot: u64,
226    ns_per_slot: u128,
227    genesis_creation_time: UnixTimestamp,
228    slots_per_year: f64,
229    accounts_data_len: u64,
230    slot: Slot,
231    epoch: Epoch,
232    block_height: u64,
233    collector_id: Pubkey,
234    collector_fees: u64,
235    fee_calculator: FeeCalculator,
236    fee_rate_governor: FeeRateGovernor,
237    collected_rent: u64,
238    rent_collector: RentCollector,
239    epoch_schedule: EpochSchedule,
240    inflation: Inflation,
241    #[serde(serialize_with = "serde_stakes_to_delegation_format::serialize")]
242    stakes: StakesEnum,
243    unused_accounts: UnusedAccounts,
244    epoch_stakes: HashMap<Epoch, EpochStakes>,
245    is_delta: bool,
246}
247
248impl From<BankFieldsToSerialize> for SerializableVersionedBank {
249    fn from(rhs: BankFieldsToSerialize) -> Self {
250        Self {
251            blockhash_queue: rhs.blockhash_queue,
252            ancestors: rhs.ancestors,
253            hash: rhs.hash,
254            parent_hash: rhs.parent_hash,
255            parent_slot: rhs.parent_slot,
256            hard_forks: rhs.hard_forks,
257            transaction_count: rhs.transaction_count,
258            tick_height: rhs.tick_height,
259            signature_count: rhs.signature_count,
260            capitalization: rhs.capitalization,
261            max_tick_height: rhs.max_tick_height,
262            hashes_per_tick: rhs.hashes_per_tick,
263            ticks_per_slot: rhs.ticks_per_slot,
264            ns_per_slot: rhs.ns_per_slot,
265            genesis_creation_time: rhs.genesis_creation_time,
266            slots_per_year: rhs.slots_per_year,
267            accounts_data_len: rhs.accounts_data_len,
268            slot: rhs.slot,
269            epoch: rhs.epoch,
270            block_height: rhs.block_height,
271            collector_id: rhs.collector_id,
272            collector_fees: rhs.collector_fees,
273            fee_calculator: FeeCalculator::default(),
274            fee_rate_governor: rhs.fee_rate_governor,
275            collected_rent: rhs.collected_rent,
276            rent_collector: rhs.rent_collector,
277            epoch_schedule: rhs.epoch_schedule,
278            inflation: rhs.inflation,
279            stakes: rhs.stakes,
280            unused_accounts: UnusedAccounts::default(),
281            epoch_stakes: rhs.epoch_stakes,
282            is_delta: rhs.is_delta,
283        }
284    }
285}
286
287#[cfg(feature = "frozen-abi")]
288impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableVersionedBank {}
289
290/// Helper type to wrap BufReader streams when deserializing and reconstructing from either just a
291/// full snapshot, or both a full and incremental snapshot
292pub struct SnapshotStreams<'a, R> {
293    pub full_snapshot_stream: &'a mut BufReader<R>,
294    pub incremental_snapshot_stream: Option<&'a mut BufReader<R>>,
295}
296
297/// Helper type to wrap BankFields when reconstructing Bank from either just a full
298/// snapshot, or both a full and incremental snapshot
299#[derive(Debug)]
300pub struct SnapshotBankFields {
301    full: BankFieldsToDeserialize,
302    incremental: Option<BankFieldsToDeserialize>,
303}
304
305impl SnapshotBankFields {
306    /// Collapse the SnapshotBankFields into a single (the latest) BankFieldsToDeserialize.
307    pub fn collapse_into(self) -> BankFieldsToDeserialize {
308        self.incremental.unwrap_or(self.full)
309    }
310}
311
312/// Helper type to wrap AccountsDbFields when reconstructing AccountsDb from either just a full
313/// snapshot, or both a full and incremental snapshot
314#[derive(Debug)]
315pub struct SnapshotAccountsDbFields<T> {
316    full_snapshot_accounts_db_fields: AccountsDbFields<T>,
317    incremental_snapshot_accounts_db_fields: Option<AccountsDbFields<T>>,
318}
319
320impl<T> SnapshotAccountsDbFields<T> {
321    /// Collapse the SnapshotAccountsDbFields into a single AccountsDbFields.  If there is no
322    /// incremental snapshot, this returns the AccountsDbFields from the full snapshot.
323    /// Otherwise, use the AccountsDbFields from the incremental snapshot, and a combination
324    /// of the storages from both the full and incremental snapshots.
325    fn collapse_into(self) -> Result<AccountsDbFields<T>, Error> {
326        match self.incremental_snapshot_accounts_db_fields {
327            None => Ok(self.full_snapshot_accounts_db_fields),
328            Some(AccountsDbFields(
329                mut incremental_snapshot_storages,
330                incremental_snapshot_version,
331                incremental_snapshot_slot,
332                incremental_snapshot_bank_hash_info,
333                incremental_snapshot_historical_roots,
334                incremental_snapshot_historical_roots_with_hash,
335            )) => {
336                let full_snapshot_storages = self.full_snapshot_accounts_db_fields.0;
337                let full_snapshot_slot = self.full_snapshot_accounts_db_fields.2;
338
339                // filter out incremental snapshot storages with slot <= full snapshot slot
340                incremental_snapshot_storages.retain(|slot, _| *slot > full_snapshot_slot);
341
342                // There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot
343                incremental_snapshot_storages
344                    .iter()
345                    .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then_some(()).ok_or_else(|| {
346                        io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!")
347                    })?;
348
349                let mut combined_storages = full_snapshot_storages;
350                combined_storages.extend(incremental_snapshot_storages);
351
352                Ok(AccountsDbFields(
353                    combined_storages,
354                    incremental_snapshot_version,
355                    incremental_snapshot_slot,
356                    incremental_snapshot_bank_hash_info,
357                    incremental_snapshot_historical_roots,
358                    incremental_snapshot_historical_roots_with_hash,
359                ))
360            }
361        }
362    }
363}
364
365fn deserialize_from<R, T>(reader: R) -> bincode::Result<T>
366where
367    R: Read,
368    T: DeserializeOwned,
369{
370    bincode::options()
371        .with_limit(MAX_STREAM_SIZE)
372        .with_fixint_encoding()
373        .allow_trailing_bytes()
374        .deserialize_from::<R, T>(reader)
375}
376
377fn deserialize_accounts_db_fields<R>(
378    stream: &mut BufReader<R>,
379) -> Result<AccountsDbFields<SerializableAccountStorageEntry>, Error>
380where
381    R: Read,
382{
383    deserialize_from::<_, _>(stream)
384}
385
386/// Extra fields that are deserialized from the end of snapshots.
387///
388/// Note that this struct's fields should stay synced with the fields in
389/// ExtraFieldsToSerialize with the exception that new "extra fields" should be
390/// added to this struct a minor release before they are added to the serialize
391/// struct.
392#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
393#[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))]
394#[derive(Clone, Debug, Deserialize)]
395struct ExtraFieldsToDeserialize {
396    #[serde(deserialize_with = "default_on_eof")]
397    lamports_per_signature: u64,
398    #[serde(deserialize_with = "default_on_eof")]
399    incremental_snapshot_persistence: Option<BankIncrementalSnapshotPersistence>,
400    #[serde(deserialize_with = "default_on_eof")]
401    epoch_accounts_hash: Option<Hash>,
402    #[serde(deserialize_with = "default_on_eof")]
403    versioned_epoch_stakes: HashMap<u64, VersionedEpochStakes>,
404    #[serde(deserialize_with = "default_on_eof")]
405    accounts_lt_hash: Option<SerdeAccountsLtHash>,
406}
407
408/// Extra fields that are serialized at the end of snapshots.
409///
410/// Note that this struct's fields should stay synced with the fields in
411/// ExtraFieldsToDeserialize with the exception that new "extra fields" should
412/// be added to the deserialize struct a minor release before they are added to
413/// this one.
414#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
415#[cfg_attr(feature = "dev-context-only-utils", derive(Default, PartialEq))]
416#[derive(Debug, Serialize)]
417pub struct ExtraFieldsToSerialize<'a> {
418    pub lamports_per_signature: u64,
419    pub incremental_snapshot_persistence: Option<&'a BankIncrementalSnapshotPersistence>,
420    pub epoch_accounts_hash: Option<EpochAccountsHash>,
421    pub versioned_epoch_stakes: HashMap<u64, VersionedEpochStakes>,
422    pub accounts_lt_hash: Option<SerdeAccountsLtHash>,
423}
424
425fn deserialize_bank_fields<R>(
426    mut stream: &mut BufReader<R>,
427) -> Result<
428    (
429        BankFieldsToDeserialize,
430        AccountsDbFields<SerializableAccountStorageEntry>,
431    ),
432    Error,
433>
434where
435    R: Read,
436{
437    let mut bank_fields: BankFieldsToDeserialize =
438        deserialize_from::<_, DeserializableVersionedBank>(&mut stream)?.into();
439    let accounts_db_fields = deserialize_accounts_db_fields(stream)?;
440    let extra_fields = deserialize_from(stream)?;
441
442    // Process extra fields
443    let ExtraFieldsToDeserialize {
444        lamports_per_signature,
445        incremental_snapshot_persistence,
446        epoch_accounts_hash,
447        versioned_epoch_stakes,
448        accounts_lt_hash,
449    } = extra_fields;
450
451    bank_fields.fee_rate_governor = bank_fields
452        .fee_rate_governor
453        .clone_with_lamports_per_signature(lamports_per_signature);
454    bank_fields.incremental_snapshot_persistence = incremental_snapshot_persistence;
455    bank_fields.epoch_accounts_hash = epoch_accounts_hash;
456
457    // If we deserialize the new epoch stakes, add all of the entries into the
458    // other deserialized map which could still have old epoch stakes entries
459    bank_fields.epoch_stakes.extend(
460        versioned_epoch_stakes
461            .into_iter()
462            .map(|(epoch, versioned_epoch_stakes)| (epoch, versioned_epoch_stakes.into())),
463    );
464
465    bank_fields.accounts_lt_hash = accounts_lt_hash.map(Into::into);
466
467    Ok((bank_fields, accounts_db_fields))
468}
469
470/// used by tests to compare contents of serialized bank fields
471/// serialized format is not deterministic - likely due to randomness in structs like hashmaps
472#[cfg(feature = "dev-context-only-utils")]
473pub(crate) fn compare_two_serialized_banks(
474    path1: impl AsRef<Path>,
475    path2: impl AsRef<Path>,
476) -> std::result::Result<bool, Error> {
477    use std::fs::File;
478    let file1 = File::open(path1)?;
479    let mut stream1 = BufReader::new(file1);
480    let file2 = File::open(path2)?;
481    let mut stream2 = BufReader::new(file2);
482
483    let fields1 = deserialize_bank_fields(&mut stream1)?;
484    let fields2 = deserialize_bank_fields(&mut stream2)?;
485    Ok(fields1 == fields2)
486}
487
488/// Get snapshot storage lengths from accounts_db_fields
489pub(crate) fn snapshot_storage_lengths_from_fields(
490    accounts_db_fields: &AccountsDbFields<SerializableAccountStorageEntry>,
491) -> HashMap<Slot, HashMap<SerializedAccountsFileId, usize>> {
492    let AccountsDbFields(snapshot_storage, ..) = &accounts_db_fields;
493    snapshot_storage
494        .iter()
495        .map(|(slot, slot_storage)| {
496            (
497                *slot,
498                slot_storage
499                    .iter()
500                    .map(|storage_entry| (storage_entry.id(), storage_entry.current_len()))
501                    .collect(),
502            )
503        })
504        .collect()
505}
506
507pub(crate) fn fields_from_stream<R: Read>(
508    snapshot_stream: &mut BufReader<R>,
509) -> std::result::Result<
510    (
511        BankFieldsToDeserialize,
512        AccountsDbFields<SerializableAccountStorageEntry>,
513    ),
514    Error,
515> {
516    deserialize_bank_fields(snapshot_stream)
517}
518
519pub(crate) fn fields_from_streams(
520    snapshot_streams: &mut SnapshotStreams<impl Read>,
521) -> std::result::Result<
522    (
523        SnapshotBankFields,
524        SnapshotAccountsDbFields<SerializableAccountStorageEntry>,
525    ),
526    Error,
527> {
528    let (full_snapshot_bank_fields, full_snapshot_accounts_db_fields) =
529        fields_from_stream(snapshot_streams.full_snapshot_stream)?;
530    let (incremental_snapshot_bank_fields, incremental_snapshot_accounts_db_fields) =
531        snapshot_streams
532            .incremental_snapshot_stream
533            .as_mut()
534            .map(|stream| fields_from_stream(stream))
535            .transpose()?
536            .unzip();
537
538    let snapshot_bank_fields = SnapshotBankFields {
539        full: full_snapshot_bank_fields,
540        incremental: incremental_snapshot_bank_fields,
541    };
542    let snapshot_accounts_db_fields = SnapshotAccountsDbFields {
543        full_snapshot_accounts_db_fields,
544        incremental_snapshot_accounts_db_fields,
545    };
546    Ok((snapshot_bank_fields, snapshot_accounts_db_fields))
547}
548
549/// This struct contains side-info while reconstructing the bank from streams
550#[derive(Debug)]
551pub struct BankFromStreamsInfo {
552    pub duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
553}
554
555#[allow(clippy::too_many_arguments)]
556pub(crate) fn bank_from_streams<R>(
557    snapshot_streams: &mut SnapshotStreams<R>,
558    account_paths: &[PathBuf],
559    storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
560    genesis_config: &GenesisConfig,
561    runtime_config: &RuntimeConfig,
562    debug_keys: Option<Arc<HashSet<Pubkey>>>,
563    additional_builtins: Option<&[BuiltinPrototype]>,
564    limit_load_slot_count_from_snapshot: Option<usize>,
565    verify_index: bool,
566    accounts_db_config: Option<AccountsDbConfig>,
567    accounts_update_notifier: Option<AccountsUpdateNotifier>,
568    exit: Arc<AtomicBool>,
569) -> std::result::Result<(Bank, BankFromStreamsInfo), Error>
570where
571    R: Read,
572{
573    let (bank_fields, accounts_db_fields) = fields_from_streams(snapshot_streams)?;
574    let (bank, info) = reconstruct_bank_from_fields(
575        bank_fields,
576        accounts_db_fields,
577        genesis_config,
578        runtime_config,
579        account_paths,
580        storage_and_next_append_vec_id,
581        debug_keys,
582        additional_builtins,
583        limit_load_slot_count_from_snapshot,
584        verify_index,
585        accounts_db_config,
586        accounts_update_notifier,
587        exit,
588    )?;
589    Ok((
590        bank,
591        BankFromStreamsInfo {
592            duplicates_lt_hash: info.duplicates_lt_hash,
593        },
594    ))
595}
596
597#[cfg(test)]
598pub(crate) fn bank_to_stream<W>(
599    stream: &mut BufWriter<W>,
600    bank: &Bank,
601    snapshot_storages: &[Vec<Arc<AccountStorageEntry>>],
602) -> Result<(), Error>
603where
604    W: Write,
605{
606    bincode::serialize_into(
607        stream,
608        &SerializableBankAndStorage {
609            bank,
610            snapshot_storages,
611        },
612    )
613}
614
615#[cfg(test)]
616pub(crate) fn bank_to_stream_no_extra_fields<W>(
617    stream: &mut BufWriter<W>,
618    bank: &Bank,
619    snapshot_storages: &[Vec<Arc<AccountStorageEntry>>],
620) -> Result<(), Error>
621where
622    W: Write,
623{
624    bincode::serialize_into(
625        stream,
626        &SerializableBankAndStorageNoExtra {
627            bank,
628            snapshot_storages,
629        },
630    )
631}
632
633/// Serializes bank snapshot into `stream` with bincode
634pub fn serialize_bank_snapshot_into<W>(
635    stream: &mut BufWriter<W>,
636    bank_fields: BankFieldsToSerialize,
637    bank_hash_stats: BankHashStats,
638    accounts_delta_hash: AccountsDeltaHash,
639    accounts_hash: AccountsHash,
640    account_storage_entries: &[Vec<Arc<AccountStorageEntry>>],
641    extra_fields: ExtraFieldsToSerialize,
642    write_version: StoredMetaWriteVersion,
643) -> Result<(), Error>
644where
645    W: Write,
646{
647    let mut serializer = bincode::Serializer::new(
648        stream,
649        bincode::DefaultOptions::new().with_fixint_encoding(),
650    );
651    serialize_bank_snapshot_with(
652        &mut serializer,
653        bank_fields,
654        bank_hash_stats,
655        accounts_delta_hash,
656        accounts_hash,
657        account_storage_entries,
658        extra_fields,
659        write_version,
660    )
661}
662
663/// Serializes bank snapshot with `serializer`
664pub fn serialize_bank_snapshot_with<S>(
665    serializer: S,
666    bank_fields: BankFieldsToSerialize,
667    bank_hash_stats: BankHashStats,
668    accounts_delta_hash: AccountsDeltaHash,
669    accounts_hash: AccountsHash,
670    account_storage_entries: &[Vec<Arc<AccountStorageEntry>>],
671    extra_fields: ExtraFieldsToSerialize,
672    write_version: StoredMetaWriteVersion,
673) -> Result<S::Ok, S::Error>
674where
675    S: serde::Serializer,
676{
677    let slot = bank_fields.slot;
678    let serializable_bank = SerializableVersionedBank::from(bank_fields);
679    let serializable_accounts_db = SerializableAccountsDb::<'_> {
680        slot,
681        account_storage_entries,
682        bank_hash_stats,
683        accounts_delta_hash,
684        accounts_hash,
685        write_version,
686    };
687    (serializable_bank, serializable_accounts_db, extra_fields).serialize(serializer)
688}
689
690#[cfg(test)]
691struct SerializableBankAndStorage<'a> {
692    bank: &'a Bank,
693    snapshot_storages: &'a [Vec<Arc<AccountStorageEntry>>],
694}
695
696#[cfg(test)]
697impl Serialize for SerializableBankAndStorage<'_> {
698    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
699    where
700        S: serde::ser::Serializer,
701    {
702        let slot = self.bank.slot();
703        let mut bank_fields = self.bank.get_fields_to_serialize();
704        let accounts_db = &self.bank.rc.accounts.accounts_db;
705        let bank_hash_stats = self.bank.get_bank_hash_stats();
706        let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap();
707        let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0;
708        let write_version = accounts_db.write_version.load(Ordering::Acquire);
709        let lamports_per_signature = bank_fields.fee_rate_governor.lamports_per_signature;
710        let versioned_epoch_stakes = std::mem::take(&mut bank_fields.versioned_epoch_stakes);
711        let accounts_lt_hash = bank_fields.accounts_lt_hash.clone().map(Into::into);
712        let bank_fields_to_serialize = (
713            SerializableVersionedBank::from(bank_fields),
714            SerializableAccountsDb::<'_> {
715                slot,
716                account_storage_entries: self.snapshot_storages,
717                bank_hash_stats,
718                accounts_delta_hash,
719                accounts_hash,
720                write_version,
721            },
722            ExtraFieldsToSerialize {
723                lamports_per_signature,
724                incremental_snapshot_persistence: None,
725                epoch_accounts_hash: self.bank.get_epoch_accounts_hash_to_serialize(),
726                versioned_epoch_stakes,
727                accounts_lt_hash,
728            },
729        );
730        bank_fields_to_serialize.serialize(serializer)
731    }
732}
733
734#[cfg(test)]
735struct SerializableBankAndStorageNoExtra<'a> {
736    bank: &'a Bank,
737    snapshot_storages: &'a [Vec<Arc<AccountStorageEntry>>],
738}
739
740#[cfg(test)]
741impl Serialize for SerializableBankAndStorageNoExtra<'_> {
742    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
743    where
744        S: serde::ser::Serializer,
745    {
746        let slot = self.bank.slot();
747        let bank_fields = self.bank.get_fields_to_serialize();
748        let accounts_db = &self.bank.rc.accounts.accounts_db;
749        let bank_hash_stats = self.bank.get_bank_hash_stats();
750        let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap();
751        let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0;
752        let write_version = accounts_db.write_version.load(Ordering::Acquire);
753        (
754            SerializableVersionedBank::from(bank_fields),
755            SerializableAccountsDb::<'_> {
756                slot,
757                account_storage_entries: self.snapshot_storages,
758                bank_hash_stats,
759                accounts_delta_hash,
760                accounts_hash,
761                write_version,
762            },
763        )
764            .serialize(serializer)
765    }
766}
767
768#[cfg(test)]
769impl<'a> From<SerializableBankAndStorageNoExtra<'a>> for SerializableBankAndStorage<'a> {
770    fn from(s: SerializableBankAndStorageNoExtra<'a>) -> SerializableBankAndStorage<'a> {
771        let SerializableBankAndStorageNoExtra {
772            bank,
773            snapshot_storages,
774        } = s;
775        SerializableBankAndStorage {
776            bank,
777            snapshot_storages,
778        }
779    }
780}
781
782struct SerializableAccountsDb<'a> {
783    slot: Slot,
784    account_storage_entries: &'a [Vec<Arc<AccountStorageEntry>>],
785    bank_hash_stats: BankHashStats,
786    accounts_delta_hash: AccountsDeltaHash,
787    accounts_hash: AccountsHash,
788    write_version: StoredMetaWriteVersion,
789}
790
791impl Serialize for SerializableAccountsDb<'_> {
792    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
793    where
794        S: serde::ser::Serializer,
795    {
796        // (1st of 3 elements) write the list of account storage entry lists out as a map
797        let entry_count = RefCell::<usize>::new(0);
798        let entries = utils::serialize_iter_as_map(self.account_storage_entries.iter().map(|x| {
799            *entry_count.borrow_mut() += x.len();
800            (
801                x.first().unwrap().slot(),
802                utils::serialize_iter_as_seq(
803                    x.iter()
804                        .map(|x| SerializableAccountStorageEntry::from(x.as_ref())),
805                ),
806            )
807        }));
808        let bank_hash_info = BankHashInfo {
809            accounts_delta_hash: self.accounts_delta_hash.into(),
810            accounts_hash: self.accounts_hash.into(),
811            stats: self.bank_hash_stats.clone(),
812        };
813
814        let historical_roots = Vec::<Slot>::default();
815        let historical_roots_with_hash = Vec::<(Slot, Hash)>::default();
816
817        let mut serialize_account_storage_timer = Measure::start("serialize_account_storage_ms");
818        let result = (
819            entries,
820            self.write_version,
821            self.slot,
822            bank_hash_info,
823            historical_roots,
824            historical_roots_with_hash,
825        )
826            .serialize(serializer);
827        serialize_account_storage_timer.stop();
828        datapoint_info!(
829            "serialize_account_storage_ms",
830            ("duration", serialize_account_storage_timer.as_ms(), i64),
831            ("num_entries", *entry_count.borrow(), i64),
832        );
833        result
834    }
835}
836
837#[cfg(feature = "frozen-abi")]
838impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableAccountsDb<'_> {}
839
840/// This struct contains side-info while reconstructing the bank from fields
841#[derive(Debug)]
842struct ReconstructedBankInfo {
843    duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
844}
845
846#[allow(clippy::too_many_arguments)]
847fn reconstruct_bank_from_fields<E>(
848    bank_fields: SnapshotBankFields,
849    snapshot_accounts_db_fields: SnapshotAccountsDbFields<E>,
850    genesis_config: &GenesisConfig,
851    runtime_config: &RuntimeConfig,
852    account_paths: &[PathBuf],
853    storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
854    debug_keys: Option<Arc<HashSet<Pubkey>>>,
855    additional_builtins: Option<&[BuiltinPrototype]>,
856    limit_load_slot_count_from_snapshot: Option<usize>,
857    verify_index: bool,
858    accounts_db_config: Option<AccountsDbConfig>,
859    accounts_update_notifier: Option<AccountsUpdateNotifier>,
860    exit: Arc<AtomicBool>,
861) -> Result<(Bank, ReconstructedBankInfo), Error>
862where
863    E: SerializableStorage + std::marker::Sync,
864{
865    let capitalizations = (
866        bank_fields.full.capitalization,
867        bank_fields
868            .incremental
869            .as_ref()
870            .map(|bank_fields| bank_fields.capitalization),
871    );
872    let mut bank_fields = bank_fields.collapse_into();
873    let (accounts_db, reconstructed_accounts_db_info) = reconstruct_accountsdb_from_fields(
874        snapshot_accounts_db_fields,
875        account_paths,
876        storage_and_next_append_vec_id,
877        genesis_config,
878        limit_load_slot_count_from_snapshot,
879        verify_index,
880        accounts_db_config,
881        accounts_update_notifier,
882        exit,
883        bank_fields.epoch_accounts_hash,
884        capitalizations,
885        bank_fields.incremental_snapshot_persistence.as_ref(),
886        bank_fields.accounts_lt_hash.is_some(),
887    )?;
888    bank_fields.bank_hash_stats = reconstructed_accounts_db_info.bank_hash_stats;
889
890    let bank_rc = BankRc::new(Accounts::new(Arc::new(accounts_db)));
891    let runtime_config = Arc::new(runtime_config.clone());
892
893    // if limit_load_slot_count_from_snapshot is set, then we need to side-step some correctness checks beneath this call
894    let debug_do_not_add_builtins = limit_load_slot_count_from_snapshot.is_some();
895    let bank = Bank::new_from_fields(
896        bank_rc,
897        genesis_config,
898        runtime_config,
899        bank_fields,
900        debug_keys,
901        additional_builtins,
902        debug_do_not_add_builtins,
903        reconstructed_accounts_db_info.accounts_data_len,
904    );
905
906    info!("rent_collector: {:?}", bank.rent_collector());
907    Ok((
908        bank,
909        ReconstructedBankInfo {
910            duplicates_lt_hash: reconstructed_accounts_db_info.duplicates_lt_hash,
911        },
912    ))
913}
914
915pub(crate) fn reconstruct_single_storage(
916    slot: &Slot,
917    append_vec_path: &Path,
918    current_len: usize,
919    append_vec_id: AccountsFileId,
920    storage_access: StorageAccess,
921) -> Result<Arc<AccountStorageEntry>, SnapshotError> {
922    let (accounts_file, num_accounts) =
923        AccountsFile::new_from_file(append_vec_path, current_len, storage_access)?;
924    Ok(Arc::new(AccountStorageEntry::new_existing(
925        *slot,
926        append_vec_id,
927        accounts_file,
928        num_accounts,
929    )))
930}
931
932// Remap the AppendVec ID to handle any duplicate IDs that may previously existed
933// due to full snapshots and incremental snapshots generated from different
934// nodes
935pub(crate) fn remap_append_vec_file(
936    slot: Slot,
937    old_append_vec_id: SerializedAccountsFileId,
938    append_vec_path: &Path,
939    next_append_vec_id: &AtomicAccountsFileId,
940    num_collisions: &AtomicUsize,
941) -> io::Result<(AccountsFileId, PathBuf)> {
942    #[cfg(target_os = "linux")]
943    let append_vec_path_cstr = cstring_from_path(append_vec_path)?;
944
945    let mut remapped_append_vec_path = append_vec_path.to_path_buf();
946
947    // Break out of the loop in the following situations:
948    // 1. The new ID is the same as the original ID.  This means we do not need to
949    //    rename the file, since the ID is the "correct" one already.
950    // 2. There is not a file already at the new path.  This means it is safe to
951    //    rename the file to this new path.
952    let (remapped_append_vec_id, remapped_append_vec_path) = loop {
953        let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel);
954
955        // this can only happen in the first iteration of the loop
956        if old_append_vec_id == remapped_append_vec_id as SerializedAccountsFileId {
957            break (remapped_append_vec_id, remapped_append_vec_path);
958        }
959
960        let remapped_file_name = AccountsFile::file_name(slot, remapped_append_vec_id);
961        remapped_append_vec_path = append_vec_path.parent().unwrap().join(remapped_file_name);
962
963        #[cfg(all(target_os = "linux", target_env = "gnu"))]
964        {
965            let remapped_append_vec_path_cstr = cstring_from_path(&remapped_append_vec_path)?;
966
967            // On linux we use renameat2(NO_REPLACE) instead of IF metadata(path).is_err() THEN
968            // rename() in order to save a statx() syscall.
969            match rename_no_replace(&append_vec_path_cstr, &remapped_append_vec_path_cstr) {
970                // If the file was successfully renamed, break out of the loop
971                Ok(_) => break (remapped_append_vec_id, remapped_append_vec_path),
972                // If there's already a file at the new path, continue so we try
973                // the next ID
974                Err(e) if e.kind() == io::ErrorKind::AlreadyExists => {}
975                Err(e) => return Err(e),
976            }
977        }
978
979        #[cfg(any(
980            not(target_os = "linux"),
981            all(target_os = "linux", not(target_env = "gnu"))
982        ))]
983        if std::fs::metadata(&remapped_append_vec_path).is_err() {
984            break (remapped_append_vec_id, remapped_append_vec_path);
985        }
986
987        // If we made it this far, a file exists at the new path.  Record the collision
988        // and try again.
989        num_collisions.fetch_add(1, Ordering::Relaxed);
990    };
991
992    // Only rename the file if the new ID is actually different from the original. In the target_os
993    // = linux case, we have already renamed if necessary.
994    #[cfg(any(
995        not(target_os = "linux"),
996        all(target_os = "linux", not(target_env = "gnu"))
997    ))]
998    if old_append_vec_id != remapped_append_vec_id as SerializedAccountsFileId {
999        std::fs::rename(append_vec_path, &remapped_append_vec_path)?;
1000    }
1001
1002    Ok((remapped_append_vec_id, remapped_append_vec_path))
1003}
1004
1005pub(crate) fn remap_and_reconstruct_single_storage(
1006    slot: Slot,
1007    old_append_vec_id: SerializedAccountsFileId,
1008    current_len: usize,
1009    append_vec_path: &Path,
1010    next_append_vec_id: &AtomicAccountsFileId,
1011    num_collisions: &AtomicUsize,
1012    storage_access: StorageAccess,
1013) -> Result<Arc<AccountStorageEntry>, SnapshotError> {
1014    let (remapped_append_vec_id, remapped_append_vec_path) = remap_append_vec_file(
1015        slot,
1016        old_append_vec_id,
1017        append_vec_path,
1018        next_append_vec_id,
1019        num_collisions,
1020    )?;
1021    let storage = reconstruct_single_storage(
1022        &slot,
1023        &remapped_append_vec_path,
1024        current_len,
1025        remapped_append_vec_id,
1026        storage_access,
1027    )?;
1028    Ok(storage)
1029}
1030
1031/// This struct contains side-info while reconstructing the accounts DB from fields.
1032#[derive(Debug, Default, Clone)]
1033pub struct ReconstructedAccountsDbInfo {
1034    pub accounts_data_len: u64,
1035    pub duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
1036    pub bank_hash_stats: BankHashStats,
1037}
1038
1039#[allow(clippy::too_many_arguments)]
1040fn reconstruct_accountsdb_from_fields<E>(
1041    snapshot_accounts_db_fields: SnapshotAccountsDbFields<E>,
1042    account_paths: &[PathBuf],
1043    storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
1044    genesis_config: &GenesisConfig,
1045    limit_load_slot_count_from_snapshot: Option<usize>,
1046    verify_index: bool,
1047    accounts_db_config: Option<AccountsDbConfig>,
1048    accounts_update_notifier: Option<AccountsUpdateNotifier>,
1049    exit: Arc<AtomicBool>,
1050    epoch_accounts_hash: Option<Hash>,
1051    capitalizations: (u64, Option<u64>),
1052    incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>,
1053    has_accounts_lt_hash: bool,
1054) -> Result<(AccountsDb, ReconstructedAccountsDbInfo), Error>
1055where
1056    E: SerializableStorage + std::marker::Sync,
1057{
1058    let mut accounts_db = AccountsDb::new_with_config(
1059        account_paths.to_vec(),
1060        accounts_db_config,
1061        accounts_update_notifier,
1062        exit,
1063    );
1064
1065    if let Some(epoch_accounts_hash) = epoch_accounts_hash {
1066        accounts_db
1067            .epoch_accounts_hash_manager
1068            .set_valid(EpochAccountsHash::new(epoch_accounts_hash), 0);
1069    }
1070
1071    // Store the accounts hash & capitalization, from the full snapshot, in the new AccountsDb
1072    {
1073        let AccountsDbFields(_, _, slot, bank_hash_info, _, _) =
1074            &snapshot_accounts_db_fields.full_snapshot_accounts_db_fields;
1075
1076        if let Some(incremental_snapshot_persistence) = incremental_snapshot_persistence {
1077            // If we've booted from local state that was originally intended to be an incremental
1078            // snapshot, then we will use the incremental snapshot persistence field to set the
1079            // initial accounts hashes in accounts db.
1080            let old_accounts_hash = accounts_db.set_accounts_hash_from_snapshot(
1081                incremental_snapshot_persistence.full_slot,
1082                incremental_snapshot_persistence.full_hash.clone(),
1083                incremental_snapshot_persistence.full_capitalization,
1084            );
1085            assert!(
1086                old_accounts_hash.is_none(),
1087                "There should not already be an AccountsHash at slot {slot}: {old_accounts_hash:?}",
1088            );
1089            let old_incremental_accounts_hash = accounts_db
1090                .set_incremental_accounts_hash_from_snapshot(
1091                    *slot,
1092                    incremental_snapshot_persistence.incremental_hash.clone(),
1093                    incremental_snapshot_persistence.incremental_capitalization,
1094                );
1095            assert!(
1096                old_incremental_accounts_hash.is_none(),
1097                "There should not already be an IncrementalAccountsHash at slot {slot}: {old_incremental_accounts_hash:?}",
1098            );
1099        } else {
1100            // Otherwise, we've booted from a snapshot archive, or from local state that was *not*
1101            // intended to be an incremental snapshot.
1102            let old_accounts_hash = accounts_db.set_accounts_hash_from_snapshot(
1103                *slot,
1104                bank_hash_info.accounts_hash.clone(),
1105                capitalizations.0,
1106            );
1107            assert!(
1108                old_accounts_hash.is_none(),
1109                "There should not already be an AccountsHash at slot {slot}: {old_accounts_hash:?}",
1110            );
1111        }
1112    }
1113
1114    // Store the accounts hash & capitalization, from the incremental snapshot, in the new AccountsDb
1115    {
1116        if let Some(AccountsDbFields(_, _, slot, bank_hash_info, _, _)) =
1117            snapshot_accounts_db_fields
1118                .incremental_snapshot_accounts_db_fields
1119                .as_ref()
1120        {
1121            if let Some(incremental_snapshot_persistence) = incremental_snapshot_persistence {
1122                // Use the presence of a BankIncrementalSnapshotPersistence to indicate the
1123                // Incremental Accounts Hash feature is enabled, and use its accounts hashes
1124                // instead of `BankHashInfo`'s.
1125                let AccountsDbFields(_, _, full_slot, full_bank_hash_info, _, _) =
1126                    &snapshot_accounts_db_fields.full_snapshot_accounts_db_fields;
1127                let full_accounts_hash = &full_bank_hash_info.accounts_hash;
1128                assert_eq!(
1129                    incremental_snapshot_persistence.full_slot, *full_slot,
1130                    "The incremental snapshot's base slot ({}) must match the full snapshot's slot ({full_slot})!",
1131                    incremental_snapshot_persistence.full_slot,
1132                );
1133                assert_eq!(
1134                    &incremental_snapshot_persistence.full_hash, full_accounts_hash,
1135                    "The incremental snapshot's base accounts hash ({}) must match the full snapshot's accounts hash ({})!",
1136                    &incremental_snapshot_persistence.full_hash.0, full_accounts_hash.0,
1137                );
1138                assert_eq!(
1139                    incremental_snapshot_persistence.full_capitalization, capitalizations.0,
1140                    "The incremental snapshot's base capitalization ({}) must match the full snapshot's capitalization ({})!",
1141                    incremental_snapshot_persistence.full_capitalization, capitalizations.0,
1142                );
1143                let old_incremental_accounts_hash = accounts_db
1144                    .set_incremental_accounts_hash_from_snapshot(
1145                        *slot,
1146                        incremental_snapshot_persistence.incremental_hash.clone(),
1147                        incremental_snapshot_persistence.incremental_capitalization,
1148                    );
1149                assert!(
1150                    old_incremental_accounts_hash.is_none(),
1151                    "There should not already be an IncrementalAccountsHash at slot {slot}: {old_incremental_accounts_hash:?}",
1152                );
1153            } else {
1154                // ..and without a BankIncrementalSnapshotPersistence then the Incremental Accounts
1155                // Hash feature is disabled; the accounts hash in `BankHashInfo` is valid.
1156                let old_accounts_hash = accounts_db.set_accounts_hash_from_snapshot(
1157                    *slot,
1158                    bank_hash_info.accounts_hash.clone(),
1159                    capitalizations
1160                        .1
1161                        .expect("capitalization from incremental snapshot"),
1162                );
1163                assert!(
1164                    old_accounts_hash.is_none(),
1165                    "There should not already be an AccountsHash at slot {slot}: {old_accounts_hash:?}",
1166                );
1167            };
1168        }
1169    }
1170
1171    let AccountsDbFields(
1172        _snapshot_storages,
1173        snapshot_version,
1174        snapshot_slot,
1175        snapshot_bank_hash_info,
1176        _snapshot_historical_roots,
1177        _snapshot_historical_roots_with_hash,
1178    ) = snapshot_accounts_db_fields.collapse_into()?;
1179
1180    // Ensure all account paths exist
1181    for path in &accounts_db.paths {
1182        std::fs::create_dir_all(path)
1183            .unwrap_or_else(|err| panic!("Failed to create directory {}: {}", path.display(), err));
1184    }
1185
1186    let StorageAndNextAccountsFileId {
1187        storage,
1188        next_append_vec_id,
1189    } = storage_and_next_append_vec_id;
1190
1191    assert!(
1192        !storage.is_empty(),
1193        "At least one storage entry must exist from deserializing stream"
1194    );
1195
1196    let next_append_vec_id = next_append_vec_id.load(Ordering::Acquire);
1197    let max_append_vec_id = next_append_vec_id - 1;
1198    assert!(
1199        max_append_vec_id <= AccountsFileId::MAX / 2,
1200        "Storage id {max_append_vec_id} larger than allowed max"
1201    );
1202
1203    // Process deserialized data, set necessary fields in self
1204    let old_accounts_delta_hash = accounts_db.set_accounts_delta_hash_from_snapshot(
1205        snapshot_slot,
1206        snapshot_bank_hash_info.accounts_delta_hash,
1207    );
1208    assert!(
1209        old_accounts_delta_hash.is_none(),
1210        "There should not already be an AccountsDeltaHash at slot {snapshot_slot}: {old_accounts_delta_hash:?}",
1211        );
1212    accounts_db.storage.initialize(storage);
1213    accounts_db
1214        .next_id
1215        .store(next_append_vec_id, Ordering::Release);
1216    accounts_db
1217        .write_version
1218        .fetch_add(snapshot_version, Ordering::Release);
1219
1220    let mut measure_notify = Measure::start("accounts_notify");
1221
1222    let accounts_db = Arc::new(accounts_db);
1223    let accounts_db_clone = accounts_db.clone();
1224    let handle = Builder::new()
1225        .name("solNfyAccRestor".to_string())
1226        .spawn(move || {
1227            accounts_db_clone.notify_account_restore_from_snapshot();
1228        })
1229        .unwrap();
1230
1231    // When generating the index, we want to calculate the duplicates lt hash value (needed to do
1232    // the lattice-based verification of the accounts in the background) optimistically.
1233    // This means, either when the cli arg is set, or when the snapshot has an accounts lt hash.
1234    let is_accounts_lt_hash_enabled =
1235        accounts_db.is_experimental_accumulator_hash_enabled() || has_accounts_lt_hash;
1236    let IndexGenerationInfo {
1237        accounts_data_len,
1238        rent_paying_accounts_by_partition,
1239        duplicates_lt_hash,
1240    } = accounts_db.generate_index(
1241        limit_load_slot_count_from_snapshot,
1242        verify_index,
1243        genesis_config,
1244        is_accounts_lt_hash_enabled,
1245    );
1246    accounts_db
1247        .accounts_index
1248        .rent_paying_accounts_by_partition
1249        .set(rent_paying_accounts_by_partition)
1250        .unwrap();
1251
1252    handle.join().unwrap();
1253    measure_notify.stop();
1254
1255    datapoint_info!(
1256        "reconstruct_accountsdb_from_fields()",
1257        ("accountsdb-notify-at-start-us", measure_notify.as_us(), i64),
1258    );
1259
1260    Ok((
1261        Arc::try_unwrap(accounts_db).unwrap(),
1262        ReconstructedAccountsDbInfo {
1263            accounts_data_len,
1264            duplicates_lt_hash,
1265            bank_hash_stats: snapshot_bank_hash_info.stats,
1266        },
1267    ))
1268}
1269
1270// Rename `src` to `dest` only if `dest` doesn't already exist.
1271#[cfg(all(target_os = "linux", target_env = "gnu"))]
1272fn rename_no_replace(src: &CStr, dest: &CStr) -> io::Result<()> {
1273    let ret = unsafe {
1274        libc::renameat2(
1275            libc::AT_FDCWD,
1276            src.as_ptr() as *const _,
1277            libc::AT_FDCWD,
1278            dest.as_ptr() as *const _,
1279            libc::RENAME_NOREPLACE,
1280        )
1281    };
1282    if ret == -1 {
1283        return Err(io::Error::last_os_error());
1284    }
1285
1286    Ok(())
1287}
1288
1289#[cfg(target_os = "linux")]
1290fn cstring_from_path(path: &Path) -> io::Result<CString> {
1291    // It is better to allocate here than use the stack. Jemalloc is going to give us a chunk of a
1292    // preallocated small arena anyway. Instead if we used the stack since PATH_MAX=4096 it would
1293    // result in LLVM inserting a stack probe, see
1294    // https://docs.rs/compiler_builtins/latest/compiler_builtins/probestack/index.html.
1295    CString::new(path.as_os_str().as_encoded_bytes())
1296        .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))
1297}