solana_runtime/
serde_snapshot.rs

1#[cfg(target_os = "linux")]
2use std::ffi::{CStr, CString};
3use {
4    crate::{
5        bank::{
6            builtins::BuiltinPrototype, Bank, BankFieldsToDeserialize, BankFieldsToSerialize,
7            BankRc,
8        },
9        epoch_stakes::{EpochStakes, VersionedEpochStakes},
10        runtime_config::RuntimeConfig,
11        serde_snapshot::storage::SerializableAccountStorageEntry,
12        snapshot_utils::{SnapshotError, StorageAndNextAccountsFileId},
13        stakes::{serde_stakes_to_delegation_format, Stakes, StakesEnum},
14    },
15    bincode::{self, config::Options, Error},
16    log::*,
17    serde::{de::DeserializeOwned, Deserialize, Serialize},
18    solana_accounts_db::{
19        account_storage::meta::StoredMetaWriteVersion,
20        accounts::Accounts,
21        accounts_db::{
22            stats::BankHashStats, AccountStorageEntry, AccountsDb, AccountsDbConfig,
23            AccountsFileId, AtomicAccountsFileId, DuplicatesLtHash, IndexGenerationInfo,
24        },
25        accounts_file::{AccountsFile, StorageAccess},
26        accounts_hash::{AccountsDeltaHash, AccountsHash},
27        accounts_update_notifier_interface::AccountsUpdateNotifier,
28        ancestors::AncestorsForSerialization,
29        blockhash_queue::BlockhashQueue,
30        epoch_accounts_hash::EpochAccountsHash,
31    },
32    solana_measure::measure::Measure,
33    solana_sdk::{
34        clock::{Epoch, Slot, UnixTimestamp},
35        deserialize_utils::default_on_eof,
36        epoch_schedule::EpochSchedule,
37        fee_calculator::{FeeCalculator, FeeRateGovernor},
38        genesis_config::GenesisConfig,
39        hard_forks::HardForks,
40        hash::Hash,
41        inflation::Inflation,
42        pubkey::Pubkey,
43        rent_collector::RentCollector,
44        stake::state::Delegation,
45    },
46    std::{
47        cell::RefCell,
48        collections::{HashMap, HashSet},
49        io::{self, BufReader, BufWriter, Read, Write},
50        path::{Path, PathBuf},
51        result::Result,
52        sync::{
53            atomic::{AtomicBool, AtomicUsize, Ordering},
54            Arc,
55        },
56        thread::Builder,
57    },
58    storage::SerializableStorage,
59    types::SerdeAccountsLtHash,
60};
61
62mod storage;
63mod tests;
64mod types;
65mod utils;
66
67pub(crate) use {
68    solana_accounts_db::accounts_hash::{
69        SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash,
70    },
71    storage::SerializedAccountsFileId,
72};
73
74const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
75
76#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
77#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
78pub struct AccountsDbFields<T>(
79    HashMap<Slot, Vec<T>>,
80    StoredMetaWriteVersion,
81    Slot,
82    BankHashInfo,
83    /// all slots that were roots within the last epoch
84    #[serde(deserialize_with = "default_on_eof")]
85    Vec<Slot>,
86    /// slots that were roots within the last epoch for which we care about the hash value
87    #[serde(deserialize_with = "default_on_eof")]
88    Vec<(Slot, Hash)>,
89);
90
91/// Incremental snapshots only calculate their accounts hash based on the
92/// account changes WITHIN the incremental slot range. So, we need to keep track
93/// of the full snapshot expected accounts hash results. We also need to keep
94/// track of the hash and capitalization specific to the incremental snapshot
95/// slot range. The capitalization we calculate for the incremental slot will
96/// NOT be consistent with the bank's capitalization. It is not feasible to
97/// calculate a capitalization delta that is correct given just incremental
98/// slots account data and the full snapshot's capitalization.
99#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
100#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)]
101pub struct BankIncrementalSnapshotPersistence {
102    /// slot of full snapshot
103    pub full_slot: Slot,
104    /// accounts hash from the full snapshot
105    pub full_hash: SerdeAccountsHash,
106    /// capitalization from the full snapshot
107    pub full_capitalization: u64,
108    /// hash of the accounts in the incremental snapshot slot range, including zero-lamport accounts
109    pub incremental_hash: SerdeIncrementalAccountsHash,
110    /// capitalization of the accounts in the incremental snapshot slot range
111    pub incremental_capitalization: u64,
112}
113
114#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
115#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
116struct BankHashInfo {
117    accounts_delta_hash: SerdeAccountsDeltaHash,
118    accounts_hash: SerdeAccountsHash,
119    stats: BankHashStats,
120}
121
122#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
123#[derive(Default, Clone, PartialEq, Eq, Debug, Deserialize, Serialize)]
124struct UnusedAccounts {
125    unused1: HashSet<Pubkey>,
126    unused2: HashSet<Pubkey>,
127    unused3: HashMap<Pubkey, u64>,
128}
129
130// Deserializable version of Bank which need not be serializable,
131// because it's handled by SerializableVersionedBank.
132// So, sync fields with it!
133#[derive(Clone, Deserialize)]
134struct DeserializableVersionedBank {
135    blockhash_queue: BlockhashQueue,
136    ancestors: AncestorsForSerialization,
137    hash: Hash,
138    parent_hash: Hash,
139    parent_slot: Slot,
140    hard_forks: HardForks,
141    transaction_count: u64,
142    tick_height: u64,
143    signature_count: u64,
144    capitalization: u64,
145    max_tick_height: u64,
146    hashes_per_tick: Option<u64>,
147    ticks_per_slot: u64,
148    ns_per_slot: u128,
149    genesis_creation_time: UnixTimestamp,
150    slots_per_year: f64,
151    accounts_data_len: u64,
152    slot: Slot,
153    epoch: Epoch,
154    block_height: u64,
155    collector_id: Pubkey,
156    collector_fees: u64,
157    _fee_calculator: FeeCalculator,
158    fee_rate_governor: FeeRateGovernor,
159    collected_rent: u64,
160    rent_collector: RentCollector,
161    epoch_schedule: EpochSchedule,
162    inflation: Inflation,
163    stakes: Stakes<Delegation>,
164    #[allow(dead_code)]
165    unused_accounts: UnusedAccounts,
166    epoch_stakes: HashMap<Epoch, EpochStakes>,
167    is_delta: bool,
168}
169
170impl From<DeserializableVersionedBank> for BankFieldsToDeserialize {
171    fn from(dvb: DeserializableVersionedBank) -> Self {
172        BankFieldsToDeserialize {
173            blockhash_queue: dvb.blockhash_queue,
174            ancestors: dvb.ancestors,
175            hash: dvb.hash,
176            parent_hash: dvb.parent_hash,
177            parent_slot: dvb.parent_slot,
178            hard_forks: dvb.hard_forks,
179            transaction_count: dvb.transaction_count,
180            tick_height: dvb.tick_height,
181            signature_count: dvb.signature_count,
182            capitalization: dvb.capitalization,
183            max_tick_height: dvb.max_tick_height,
184            hashes_per_tick: dvb.hashes_per_tick,
185            ticks_per_slot: dvb.ticks_per_slot,
186            ns_per_slot: dvb.ns_per_slot,
187            genesis_creation_time: dvb.genesis_creation_time,
188            slots_per_year: dvb.slots_per_year,
189            accounts_data_len: dvb.accounts_data_len,
190            slot: dvb.slot,
191            epoch: dvb.epoch,
192            block_height: dvb.block_height,
193            collector_id: dvb.collector_id,
194            collector_fees: dvb.collector_fees,
195            fee_rate_governor: dvb.fee_rate_governor,
196            collected_rent: dvb.collected_rent,
197            rent_collector: dvb.rent_collector,
198            epoch_schedule: dvb.epoch_schedule,
199            inflation: dvb.inflation,
200            stakes: dvb.stakes,
201            epoch_stakes: dvb.epoch_stakes,
202            is_delta: dvb.is_delta,
203            incremental_snapshot_persistence: None,
204            epoch_accounts_hash: None,
205        }
206    }
207}
208
209// Serializable version of Bank, not Deserializable to avoid cloning by using refs.
210// Sync fields with DeserializableVersionedBank!
211#[derive(Serialize)]
212struct SerializableVersionedBank {
213    blockhash_queue: BlockhashQueue,
214    ancestors: AncestorsForSerialization,
215    hash: Hash,
216    parent_hash: Hash,
217    parent_slot: Slot,
218    hard_forks: HardForks,
219    transaction_count: u64,
220    tick_height: u64,
221    signature_count: u64,
222    capitalization: u64,
223    max_tick_height: u64,
224    hashes_per_tick: Option<u64>,
225    ticks_per_slot: u64,
226    ns_per_slot: u128,
227    genesis_creation_time: UnixTimestamp,
228    slots_per_year: f64,
229    accounts_data_len: u64,
230    slot: Slot,
231    epoch: Epoch,
232    block_height: u64,
233    collector_id: Pubkey,
234    collector_fees: u64,
235    fee_calculator: FeeCalculator,
236    fee_rate_governor: FeeRateGovernor,
237    collected_rent: u64,
238    rent_collector: RentCollector,
239    epoch_schedule: EpochSchedule,
240    inflation: Inflation,
241    #[serde(serialize_with = "serde_stakes_to_delegation_format::serialize")]
242    stakes: StakesEnum,
243    unused_accounts: UnusedAccounts,
244    epoch_stakes: HashMap<Epoch, EpochStakes>,
245    is_delta: bool,
246}
247
248impl From<BankFieldsToSerialize> for SerializableVersionedBank {
249    fn from(rhs: BankFieldsToSerialize) -> Self {
250        Self {
251            blockhash_queue: rhs.blockhash_queue,
252            ancestors: rhs.ancestors,
253            hash: rhs.hash,
254            parent_hash: rhs.parent_hash,
255            parent_slot: rhs.parent_slot,
256            hard_forks: rhs.hard_forks,
257            transaction_count: rhs.transaction_count,
258            tick_height: rhs.tick_height,
259            signature_count: rhs.signature_count,
260            capitalization: rhs.capitalization,
261            max_tick_height: rhs.max_tick_height,
262            hashes_per_tick: rhs.hashes_per_tick,
263            ticks_per_slot: rhs.ticks_per_slot,
264            ns_per_slot: rhs.ns_per_slot,
265            genesis_creation_time: rhs.genesis_creation_time,
266            slots_per_year: rhs.slots_per_year,
267            accounts_data_len: rhs.accounts_data_len,
268            slot: rhs.slot,
269            epoch: rhs.epoch,
270            block_height: rhs.block_height,
271            collector_id: rhs.collector_id,
272            collector_fees: rhs.collector_fees,
273            fee_calculator: FeeCalculator::default(),
274            fee_rate_governor: rhs.fee_rate_governor,
275            collected_rent: rhs.collected_rent,
276            rent_collector: rhs.rent_collector,
277            epoch_schedule: rhs.epoch_schedule,
278            inflation: rhs.inflation,
279            stakes: rhs.stakes,
280            unused_accounts: UnusedAccounts::default(),
281            epoch_stakes: rhs.epoch_stakes,
282            is_delta: rhs.is_delta,
283        }
284    }
285}
286
287#[cfg(feature = "frozen-abi")]
288impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableVersionedBank {}
289
290/// Helper type to wrap BufReader streams when deserializing and reconstructing from either just a
291/// full snapshot, or both a full and incremental snapshot
292pub struct SnapshotStreams<'a, R> {
293    pub full_snapshot_stream: &'a mut BufReader<R>,
294    pub incremental_snapshot_stream: Option<&'a mut BufReader<R>>,
295}
296
297/// Helper type to wrap BankFields when reconstructing Bank from either just a full
298/// snapshot, or both a full and incremental snapshot
299#[derive(Debug)]
300pub struct SnapshotBankFields {
301    full: BankFieldsToDeserialize,
302    incremental: Option<BankFieldsToDeserialize>,
303}
304
305impl SnapshotBankFields {
306    /// Collapse the SnapshotBankFields into a single (the latest) BankFieldsToDeserialize.
307    pub fn collapse_into(self) -> BankFieldsToDeserialize {
308        self.incremental.unwrap_or(self.full)
309    }
310}
311
312/// Helper type to wrap AccountsDbFields when reconstructing AccountsDb from either just a full
313/// snapshot, or both a full and incremental snapshot
314#[derive(Debug)]
315pub struct SnapshotAccountsDbFields<T> {
316    full_snapshot_accounts_db_fields: AccountsDbFields<T>,
317    incremental_snapshot_accounts_db_fields: Option<AccountsDbFields<T>>,
318}
319
320impl<T> SnapshotAccountsDbFields<T> {
321    /// Collapse the SnapshotAccountsDbFields into a single AccountsDbFields.  If there is no
322    /// incremental snapshot, this returns the AccountsDbFields from the full snapshot.
323    /// Otherwise, use the AccountsDbFields from the incremental snapshot, and a combination
324    /// of the storages from both the full and incremental snapshots.
325    fn collapse_into(self) -> Result<AccountsDbFields<T>, Error> {
326        match self.incremental_snapshot_accounts_db_fields {
327            None => Ok(self.full_snapshot_accounts_db_fields),
328            Some(AccountsDbFields(
329                mut incremental_snapshot_storages,
330                incremental_snapshot_version,
331                incremental_snapshot_slot,
332                incremental_snapshot_bank_hash_info,
333                incremental_snapshot_historical_roots,
334                incremental_snapshot_historical_roots_with_hash,
335            )) => {
336                let full_snapshot_storages = self.full_snapshot_accounts_db_fields.0;
337                let full_snapshot_slot = self.full_snapshot_accounts_db_fields.2;
338
339                // filter out incremental snapshot storages with slot <= full snapshot slot
340                incremental_snapshot_storages.retain(|slot, _| *slot > full_snapshot_slot);
341
342                // There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot
343                incremental_snapshot_storages
344                    .iter()
345                    .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then_some(()).ok_or_else(|| {
346                        io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!")
347                    })?;
348
349                let mut combined_storages = full_snapshot_storages;
350                combined_storages.extend(incremental_snapshot_storages);
351
352                Ok(AccountsDbFields(
353                    combined_storages,
354                    incremental_snapshot_version,
355                    incremental_snapshot_slot,
356                    incremental_snapshot_bank_hash_info,
357                    incremental_snapshot_historical_roots,
358                    incremental_snapshot_historical_roots_with_hash,
359                ))
360            }
361        }
362    }
363}
364
365fn deserialize_from<R, T>(reader: R) -> bincode::Result<T>
366where
367    R: Read,
368    T: DeserializeOwned,
369{
370    bincode::options()
371        .with_limit(MAX_STREAM_SIZE)
372        .with_fixint_encoding()
373        .allow_trailing_bytes()
374        .deserialize_from::<R, T>(reader)
375}
376
377fn deserialize_accounts_db_fields<R>(
378    stream: &mut BufReader<R>,
379) -> Result<AccountsDbFields<SerializableAccountStorageEntry>, Error>
380where
381    R: Read,
382{
383    deserialize_from::<_, _>(stream)
384}
385
386/// Extra fields that are deserialized from the end of snapshots.
387///
388/// Note that this struct's fields should stay synced with the fields in
389/// ExtraFieldsToSerialize with the exception that new "extra fields" should be
390/// added to this struct a minor release before they are added to the serialize
391/// struct.
392#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
393#[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))]
394#[derive(Clone, Debug, Deserialize)]
395struct ExtraFieldsToDeserialize {
396    #[serde(deserialize_with = "default_on_eof")]
397    lamports_per_signature: u64,
398    #[serde(deserialize_with = "default_on_eof")]
399    incremental_snapshot_persistence: Option<BankIncrementalSnapshotPersistence>,
400    #[serde(deserialize_with = "default_on_eof")]
401    epoch_accounts_hash: Option<Hash>,
402    #[serde(deserialize_with = "default_on_eof")]
403    versioned_epoch_stakes: HashMap<u64, VersionedEpochStakes>,
404    #[serde(deserialize_with = "default_on_eof")]
405    #[allow(dead_code)]
406    accounts_lt_hash: Option<SerdeAccountsLtHash>,
407}
408
409/// Extra fields that are serialized at the end of snapshots.
410///
411/// Note that this struct's fields should stay synced with the fields in
412/// ExtraFieldsToDeserialize with the exception that new "extra fields" should
413/// be added to the deserialize struct a minor release before they are added to
414/// this one.
415#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
416#[cfg_attr(feature = "dev-context-only-utils", derive(Default, PartialEq))]
417#[derive(Debug, Serialize)]
418pub struct ExtraFieldsToSerialize<'a> {
419    pub lamports_per_signature: u64,
420    pub incremental_snapshot_persistence: Option<&'a BankIncrementalSnapshotPersistence>,
421    pub epoch_accounts_hash: Option<EpochAccountsHash>,
422    pub versioned_epoch_stakes: HashMap<u64, VersionedEpochStakes>,
423}
424
425fn deserialize_bank_fields<R>(
426    mut stream: &mut BufReader<R>,
427) -> Result<
428    (
429        BankFieldsToDeserialize,
430        AccountsDbFields<SerializableAccountStorageEntry>,
431    ),
432    Error,
433>
434where
435    R: Read,
436{
437    let mut bank_fields: BankFieldsToDeserialize =
438        deserialize_from::<_, DeserializableVersionedBank>(&mut stream)?.into();
439    let accounts_db_fields = deserialize_accounts_db_fields(stream)?;
440    let extra_fields = deserialize_from(stream)?;
441
442    // Process extra fields
443    let ExtraFieldsToDeserialize {
444        lamports_per_signature,
445        incremental_snapshot_persistence,
446        epoch_accounts_hash,
447        versioned_epoch_stakes,
448        accounts_lt_hash: _,
449    } = extra_fields;
450
451    bank_fields.fee_rate_governor = bank_fields
452        .fee_rate_governor
453        .clone_with_lamports_per_signature(lamports_per_signature);
454    bank_fields.incremental_snapshot_persistence = incremental_snapshot_persistence;
455    bank_fields.epoch_accounts_hash = epoch_accounts_hash;
456
457    // If we deserialize the new epoch stakes, add all of the entries into the
458    // other deserialized map which could still have old epoch stakes entries
459    bank_fields.epoch_stakes.extend(
460        versioned_epoch_stakes
461            .into_iter()
462            .map(|(epoch, versioned_epoch_stakes)| (epoch, versioned_epoch_stakes.into())),
463    );
464
465    Ok((bank_fields, accounts_db_fields))
466}
467
468/// used by tests to compare contents of serialized bank fields
469/// serialized format is not deterministic - likely due to randomness in structs like hashmaps
470#[cfg(feature = "dev-context-only-utils")]
471pub(crate) fn compare_two_serialized_banks(
472    path1: impl AsRef<Path>,
473    path2: impl AsRef<Path>,
474) -> std::result::Result<bool, Error> {
475    use std::fs::File;
476    let file1 = File::open(path1)?;
477    let mut stream1 = BufReader::new(file1);
478    let file2 = File::open(path2)?;
479    let mut stream2 = BufReader::new(file2);
480
481    let fields1 = deserialize_bank_fields(&mut stream1)?;
482    let fields2 = deserialize_bank_fields(&mut stream2)?;
483    Ok(fields1 == fields2)
484}
485
486/// Get snapshot storage lengths from accounts_db_fields
487pub(crate) fn snapshot_storage_lengths_from_fields(
488    accounts_db_fields: &AccountsDbFields<SerializableAccountStorageEntry>,
489) -> HashMap<Slot, HashMap<SerializedAccountsFileId, usize>> {
490    let AccountsDbFields(snapshot_storage, ..) = &accounts_db_fields;
491    snapshot_storage
492        .iter()
493        .map(|(slot, slot_storage)| {
494            (
495                *slot,
496                slot_storage
497                    .iter()
498                    .map(|storage_entry| (storage_entry.id(), storage_entry.current_len()))
499                    .collect(),
500            )
501        })
502        .collect()
503}
504
505pub(crate) fn fields_from_stream<R: Read>(
506    snapshot_stream: &mut BufReader<R>,
507) -> std::result::Result<
508    (
509        BankFieldsToDeserialize,
510        AccountsDbFields<SerializableAccountStorageEntry>,
511    ),
512    Error,
513> {
514    deserialize_bank_fields(snapshot_stream)
515}
516
517pub(crate) fn fields_from_streams(
518    snapshot_streams: &mut SnapshotStreams<impl Read>,
519) -> std::result::Result<
520    (
521        SnapshotBankFields,
522        SnapshotAccountsDbFields<SerializableAccountStorageEntry>,
523    ),
524    Error,
525> {
526    let (full_snapshot_bank_fields, full_snapshot_accounts_db_fields) =
527        fields_from_stream(snapshot_streams.full_snapshot_stream)?;
528    let (incremental_snapshot_bank_fields, incremental_snapshot_accounts_db_fields) =
529        snapshot_streams
530            .incremental_snapshot_stream
531            .as_mut()
532            .map(|stream| fields_from_stream(stream))
533            .transpose()?
534            .unzip();
535
536    let snapshot_bank_fields = SnapshotBankFields {
537        full: full_snapshot_bank_fields,
538        incremental: incremental_snapshot_bank_fields,
539    };
540    let snapshot_accounts_db_fields = SnapshotAccountsDbFields {
541        full_snapshot_accounts_db_fields,
542        incremental_snapshot_accounts_db_fields,
543    };
544    Ok((snapshot_bank_fields, snapshot_accounts_db_fields))
545}
546
547/// This struct contains side-info while reconstructing the bank from streams
548#[derive(Debug)]
549pub struct BankFromStreamsInfo {
550    pub duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
551}
552
553#[allow(clippy::too_many_arguments)]
554pub(crate) fn bank_from_streams<R>(
555    snapshot_streams: &mut SnapshotStreams<R>,
556    account_paths: &[PathBuf],
557    storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
558    genesis_config: &GenesisConfig,
559    runtime_config: &RuntimeConfig,
560    debug_keys: Option<Arc<HashSet<Pubkey>>>,
561    additional_builtins: Option<&[BuiltinPrototype]>,
562    limit_load_slot_count_from_snapshot: Option<usize>,
563    verify_index: bool,
564    accounts_db_config: Option<AccountsDbConfig>,
565    accounts_update_notifier: Option<AccountsUpdateNotifier>,
566    exit: Arc<AtomicBool>,
567) -> std::result::Result<(Bank, BankFromStreamsInfo), Error>
568where
569    R: Read,
570{
571    let (bank_fields, accounts_db_fields) = fields_from_streams(snapshot_streams)?;
572    let (bank, info) = reconstruct_bank_from_fields(
573        bank_fields,
574        accounts_db_fields,
575        genesis_config,
576        runtime_config,
577        account_paths,
578        storage_and_next_append_vec_id,
579        debug_keys,
580        additional_builtins,
581        limit_load_slot_count_from_snapshot,
582        verify_index,
583        accounts_db_config,
584        accounts_update_notifier,
585        exit,
586    )?;
587    Ok((
588        bank,
589        BankFromStreamsInfo {
590            duplicates_lt_hash: info.duplicates_lt_hash,
591        },
592    ))
593}
594
595#[cfg(test)]
596pub(crate) fn bank_to_stream<W>(
597    stream: &mut BufWriter<W>,
598    bank: &Bank,
599    snapshot_storages: &[Vec<Arc<AccountStorageEntry>>],
600) -> Result<(), Error>
601where
602    W: Write,
603{
604    bincode::serialize_into(
605        stream,
606        &SerializableBankAndStorage {
607            bank,
608            snapshot_storages,
609        },
610    )
611}
612
613#[cfg(test)]
614pub(crate) fn bank_to_stream_no_extra_fields<W>(
615    stream: &mut BufWriter<W>,
616    bank: &Bank,
617    snapshot_storages: &[Vec<Arc<AccountStorageEntry>>],
618) -> Result<(), Error>
619where
620    W: Write,
621{
622    bincode::serialize_into(
623        stream,
624        &SerializableBankAndStorageNoExtra {
625            bank,
626            snapshot_storages,
627        },
628    )
629}
630
631/// Serializes bank snapshot into `stream` with bincode
632pub fn serialize_bank_snapshot_into<W>(
633    stream: &mut BufWriter<W>,
634    bank_fields: BankFieldsToSerialize,
635    bank_hash_stats: BankHashStats,
636    accounts_delta_hash: AccountsDeltaHash,
637    accounts_hash: AccountsHash,
638    account_storage_entries: &[Vec<Arc<AccountStorageEntry>>],
639    extra_fields: ExtraFieldsToSerialize,
640    write_version: StoredMetaWriteVersion,
641) -> Result<(), Error>
642where
643    W: Write,
644{
645    let mut serializer = bincode::Serializer::new(
646        stream,
647        bincode::DefaultOptions::new().with_fixint_encoding(),
648    );
649    serialize_bank_snapshot_with(
650        &mut serializer,
651        bank_fields,
652        bank_hash_stats,
653        accounts_delta_hash,
654        accounts_hash,
655        account_storage_entries,
656        extra_fields,
657        write_version,
658    )
659}
660
661/// Serializes bank snapshot with `serializer`
662pub fn serialize_bank_snapshot_with<S>(
663    serializer: S,
664    bank_fields: BankFieldsToSerialize,
665    bank_hash_stats: BankHashStats,
666    accounts_delta_hash: AccountsDeltaHash,
667    accounts_hash: AccountsHash,
668    account_storage_entries: &[Vec<Arc<AccountStorageEntry>>],
669    extra_fields: ExtraFieldsToSerialize,
670    write_version: StoredMetaWriteVersion,
671) -> Result<S::Ok, S::Error>
672where
673    S: serde::Serializer,
674{
675    let slot = bank_fields.slot;
676    let serializable_bank = SerializableVersionedBank::from(bank_fields);
677    let serializable_accounts_db = SerializableAccountsDb::<'_> {
678        slot,
679        account_storage_entries,
680        bank_hash_stats,
681        accounts_delta_hash,
682        accounts_hash,
683        write_version,
684    };
685    (serializable_bank, serializable_accounts_db, extra_fields).serialize(serializer)
686}
687
688#[cfg(test)]
689struct SerializableBankAndStorage<'a> {
690    bank: &'a Bank,
691    snapshot_storages: &'a [Vec<Arc<AccountStorageEntry>>],
692}
693
694#[cfg(test)]
695impl<'a> Serialize for SerializableBankAndStorage<'a> {
696    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
697    where
698        S: serde::ser::Serializer,
699    {
700        let slot = self.bank.slot();
701        let mut bank_fields = self.bank.get_fields_to_serialize();
702        let accounts_db = &self.bank.rc.accounts.accounts_db;
703        let bank_hash_stats = accounts_db.get_bank_hash_stats(slot).unwrap();
704        let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap();
705        let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0;
706        let write_version = accounts_db.write_version.load(Ordering::Acquire);
707        let lamports_per_signature = bank_fields.fee_rate_governor.lamports_per_signature;
708        let versioned_epoch_stakes = std::mem::take(&mut bank_fields.versioned_epoch_stakes);
709        let bank_fields_to_serialize = (
710            SerializableVersionedBank::from(bank_fields),
711            SerializableAccountsDb::<'_> {
712                slot,
713                account_storage_entries: self.snapshot_storages,
714                bank_hash_stats,
715                accounts_delta_hash,
716                accounts_hash,
717                write_version,
718            },
719            ExtraFieldsToSerialize {
720                lamports_per_signature,
721                incremental_snapshot_persistence: None,
722                epoch_accounts_hash: self.bank.get_epoch_accounts_hash_to_serialize(),
723                versioned_epoch_stakes,
724            },
725        );
726        bank_fields_to_serialize.serialize(serializer)
727    }
728}
729
730#[cfg(test)]
731struct SerializableBankAndStorageNoExtra<'a> {
732    bank: &'a Bank,
733    snapshot_storages: &'a [Vec<Arc<AccountStorageEntry>>],
734}
735
736#[cfg(test)]
737impl<'a> Serialize for SerializableBankAndStorageNoExtra<'a> {
738    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
739    where
740        S: serde::ser::Serializer,
741    {
742        let slot = self.bank.slot();
743        let bank_fields = self.bank.get_fields_to_serialize();
744        let accounts_db = &self.bank.rc.accounts.accounts_db;
745        let bank_hash_stats = accounts_db.get_bank_hash_stats(slot).unwrap();
746        let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap();
747        let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0;
748        let write_version = accounts_db.write_version.load(Ordering::Acquire);
749        (
750            SerializableVersionedBank::from(bank_fields),
751            SerializableAccountsDb::<'_> {
752                slot,
753                account_storage_entries: self.snapshot_storages,
754                bank_hash_stats,
755                accounts_delta_hash,
756                accounts_hash,
757                write_version,
758            },
759        )
760            .serialize(serializer)
761    }
762}
763
764#[cfg(test)]
765impl<'a> From<SerializableBankAndStorageNoExtra<'a>> for SerializableBankAndStorage<'a> {
766    fn from(s: SerializableBankAndStorageNoExtra<'a>) -> SerializableBankAndStorage<'a> {
767        let SerializableBankAndStorageNoExtra {
768            bank,
769            snapshot_storages,
770        } = s;
771        SerializableBankAndStorage {
772            bank,
773            snapshot_storages,
774        }
775    }
776}
777
778struct SerializableAccountsDb<'a> {
779    slot: Slot,
780    account_storage_entries: &'a [Vec<Arc<AccountStorageEntry>>],
781    bank_hash_stats: BankHashStats,
782    accounts_delta_hash: AccountsDeltaHash,
783    accounts_hash: AccountsHash,
784    write_version: StoredMetaWriteVersion,
785}
786
787impl<'a> Serialize for SerializableAccountsDb<'a> {
788    fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
789    where
790        S: serde::ser::Serializer,
791    {
792        // (1st of 3 elements) write the list of account storage entry lists out as a map
793        let entry_count = RefCell::<usize>::new(0);
794        let entries = utils::serialize_iter_as_map(self.account_storage_entries.iter().map(|x| {
795            *entry_count.borrow_mut() += x.len();
796            (
797                x.first().unwrap().slot(),
798                utils::serialize_iter_as_seq(
799                    x.iter()
800                        .map(|x| SerializableAccountStorageEntry::from(x.as_ref())),
801                ),
802            )
803        }));
804        let bank_hash_info = BankHashInfo {
805            accounts_delta_hash: self.accounts_delta_hash.into(),
806            accounts_hash: self.accounts_hash.into(),
807            stats: self.bank_hash_stats.clone(),
808        };
809
810        let historical_roots = Vec::<Slot>::default();
811        let historical_roots_with_hash = Vec::<(Slot, Hash)>::default();
812
813        let mut serialize_account_storage_timer = Measure::start("serialize_account_storage_ms");
814        let result = (
815            entries,
816            self.write_version,
817            self.slot,
818            bank_hash_info,
819            historical_roots,
820            historical_roots_with_hash,
821        )
822            .serialize(serializer);
823        serialize_account_storage_timer.stop();
824        datapoint_info!(
825            "serialize_account_storage_ms",
826            ("duration", serialize_account_storage_timer.as_ms(), i64),
827            ("num_entries", *entry_count.borrow(), i64),
828        );
829        result
830    }
831}
832
833#[cfg(feature = "frozen-abi")]
834impl<'a> solana_frozen_abi::abi_example::TransparentAsHelper for SerializableAccountsDb<'a> {}
835
836/// This struct contains side-info while reconstructing the bank from fields
837#[derive(Debug)]
838struct ReconstructedBankInfo {
839    duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
840}
841
842#[allow(clippy::too_many_arguments)]
843fn reconstruct_bank_from_fields<E>(
844    bank_fields: SnapshotBankFields,
845    snapshot_accounts_db_fields: SnapshotAccountsDbFields<E>,
846    genesis_config: &GenesisConfig,
847    runtime_config: &RuntimeConfig,
848    account_paths: &[PathBuf],
849    storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
850    debug_keys: Option<Arc<HashSet<Pubkey>>>,
851    additional_builtins: Option<&[BuiltinPrototype]>,
852    limit_load_slot_count_from_snapshot: Option<usize>,
853    verify_index: bool,
854    accounts_db_config: Option<AccountsDbConfig>,
855    accounts_update_notifier: Option<AccountsUpdateNotifier>,
856    exit: Arc<AtomicBool>,
857) -> Result<(Bank, ReconstructedBankInfo), Error>
858where
859    E: SerializableStorage + std::marker::Sync,
860{
861    let capitalizations = (
862        bank_fields.full.capitalization,
863        bank_fields
864            .incremental
865            .as_ref()
866            .map(|bank_fields| bank_fields.capitalization),
867    );
868    let bank_fields = bank_fields.collapse_into();
869    let (accounts_db, reconstructed_accounts_db_info) = reconstruct_accountsdb_from_fields(
870        snapshot_accounts_db_fields,
871        account_paths,
872        storage_and_next_append_vec_id,
873        genesis_config,
874        limit_load_slot_count_from_snapshot,
875        verify_index,
876        accounts_db_config,
877        accounts_update_notifier,
878        exit,
879        bank_fields.epoch_accounts_hash,
880        capitalizations,
881        bank_fields.incremental_snapshot_persistence.as_ref(),
882    )?;
883
884    let bank_rc = BankRc::new(Accounts::new(Arc::new(accounts_db)));
885    let runtime_config = Arc::new(runtime_config.clone());
886
887    // if limit_load_slot_count_from_snapshot is set, then we need to side-step some correctness checks beneath this call
888    let debug_do_not_add_builtins = limit_load_slot_count_from_snapshot.is_some();
889    let bank = Bank::new_from_fields(
890        bank_rc,
891        genesis_config,
892        runtime_config,
893        bank_fields,
894        debug_keys,
895        additional_builtins,
896        debug_do_not_add_builtins,
897        reconstructed_accounts_db_info.accounts_data_len,
898    );
899
900    info!("rent_collector: {:?}", bank.rent_collector());
901
902    Ok((
903        bank,
904        ReconstructedBankInfo {
905            duplicates_lt_hash: reconstructed_accounts_db_info.duplicates_lt_hash,
906        },
907    ))
908}
909
910pub(crate) fn reconstruct_single_storage(
911    slot: &Slot,
912    append_vec_path: &Path,
913    current_len: usize,
914    append_vec_id: AccountsFileId,
915    storage_access: StorageAccess,
916) -> Result<Arc<AccountStorageEntry>, SnapshotError> {
917    let (accounts_file, num_accounts) =
918        AccountsFile::new_from_file(append_vec_path, current_len, storage_access)?;
919    Ok(Arc::new(AccountStorageEntry::new_existing(
920        *slot,
921        append_vec_id,
922        accounts_file,
923        num_accounts,
924    )))
925}
926
927// Remap the AppendVec ID to handle any duplicate IDs that may previously existed
928// due to full snapshots and incremental snapshots generated from different
929// nodes
930pub(crate) fn remap_append_vec_file(
931    slot: Slot,
932    old_append_vec_id: SerializedAccountsFileId,
933    append_vec_path: &Path,
934    next_append_vec_id: &AtomicAccountsFileId,
935    num_collisions: &AtomicUsize,
936) -> io::Result<(AccountsFileId, PathBuf)> {
937    #[cfg(target_os = "linux")]
938    let append_vec_path_cstr = cstring_from_path(append_vec_path)?;
939
940    let mut remapped_append_vec_path = append_vec_path.to_path_buf();
941
942    // Break out of the loop in the following situations:
943    // 1. The new ID is the same as the original ID.  This means we do not need to
944    //    rename the file, since the ID is the "correct" one already.
945    // 2. There is not a file already at the new path.  This means it is safe to
946    //    rename the file to this new path.
947    let (remapped_append_vec_id, remapped_append_vec_path) = loop {
948        let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel);
949
950        // this can only happen in the first iteration of the loop
951        if old_append_vec_id == remapped_append_vec_id as SerializedAccountsFileId {
952            break (remapped_append_vec_id, remapped_append_vec_path);
953        }
954
955        let remapped_file_name = AccountsFile::file_name(slot, remapped_append_vec_id);
956        remapped_append_vec_path = append_vec_path.parent().unwrap().join(remapped_file_name);
957
958        #[cfg(all(target_os = "linux", target_env = "gnu"))]
959        {
960            let remapped_append_vec_path_cstr = cstring_from_path(&remapped_append_vec_path)?;
961
962            // On linux we use renameat2(NO_REPLACE) instead of IF metadata(path).is_err() THEN
963            // rename() in order to save a statx() syscall.
964            match rename_no_replace(&append_vec_path_cstr, &remapped_append_vec_path_cstr) {
965                // If the file was successfully renamed, break out of the loop
966                Ok(_) => break (remapped_append_vec_id, remapped_append_vec_path),
967                // If there's already a file at the new path, continue so we try
968                // the next ID
969                Err(e) if e.kind() == io::ErrorKind::AlreadyExists => {}
970                Err(e) => return Err(e),
971            }
972        }
973
974        #[cfg(any(
975            not(target_os = "linux"),
976            all(target_os = "linux", not(target_env = "gnu"))
977        ))]
978        if std::fs::metadata(&remapped_append_vec_path).is_err() {
979            break (remapped_append_vec_id, remapped_append_vec_path);
980        }
981
982        // If we made it this far, a file exists at the new path.  Record the collision
983        // and try again.
984        num_collisions.fetch_add(1, Ordering::Relaxed);
985    };
986
987    // Only rename the file if the new ID is actually different from the original. In the target_os
988    // = linux case, we have already renamed if necessary.
989    #[cfg(any(
990        not(target_os = "linux"),
991        all(target_os = "linux", not(target_env = "gnu"))
992    ))]
993    if old_append_vec_id != remapped_append_vec_id as SerializedAccountsFileId {
994        std::fs::rename(append_vec_path, &remapped_append_vec_path)?;
995    }
996
997    Ok((remapped_append_vec_id, remapped_append_vec_path))
998}
999
1000pub(crate) fn remap_and_reconstruct_single_storage(
1001    slot: Slot,
1002    old_append_vec_id: SerializedAccountsFileId,
1003    current_len: usize,
1004    append_vec_path: &Path,
1005    next_append_vec_id: &AtomicAccountsFileId,
1006    num_collisions: &AtomicUsize,
1007    storage_access: StorageAccess,
1008) -> Result<Arc<AccountStorageEntry>, SnapshotError> {
1009    let (remapped_append_vec_id, remapped_append_vec_path) = remap_append_vec_file(
1010        slot,
1011        old_append_vec_id,
1012        append_vec_path,
1013        next_append_vec_id,
1014        num_collisions,
1015    )?;
1016    let storage = reconstruct_single_storage(
1017        &slot,
1018        &remapped_append_vec_path,
1019        current_len,
1020        remapped_append_vec_id,
1021        storage_access,
1022    )?;
1023    Ok(storage)
1024}
1025
1026/// This struct contains side-info while reconstructing the accounts DB from fields.
1027#[derive(Debug, Default, Clone)]
1028pub struct ReconstructedAccountsDbInfo {
1029    pub accounts_data_len: u64,
1030    pub duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
1031}
1032
1033#[allow(clippy::too_many_arguments)]
1034fn reconstruct_accountsdb_from_fields<E>(
1035    snapshot_accounts_db_fields: SnapshotAccountsDbFields<E>,
1036    account_paths: &[PathBuf],
1037    storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
1038    genesis_config: &GenesisConfig,
1039    limit_load_slot_count_from_snapshot: Option<usize>,
1040    verify_index: bool,
1041    accounts_db_config: Option<AccountsDbConfig>,
1042    accounts_update_notifier: Option<AccountsUpdateNotifier>,
1043    exit: Arc<AtomicBool>,
1044    epoch_accounts_hash: Option<Hash>,
1045    capitalizations: (u64, Option<u64>),
1046    incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>,
1047) -> Result<(AccountsDb, ReconstructedAccountsDbInfo), Error>
1048where
1049    E: SerializableStorage + std::marker::Sync,
1050{
1051    let mut accounts_db = AccountsDb::new_with_config(
1052        account_paths.to_vec(),
1053        accounts_db_config,
1054        accounts_update_notifier,
1055        exit,
1056    );
1057
1058    if let Some(epoch_accounts_hash) = epoch_accounts_hash {
1059        accounts_db
1060            .epoch_accounts_hash_manager
1061            .set_valid(EpochAccountsHash::new(epoch_accounts_hash), 0);
1062    }
1063
1064    // Store the accounts hash & capitalization, from the full snapshot, in the new AccountsDb
1065    {
1066        let AccountsDbFields(_, _, slot, bank_hash_info, _, _) =
1067            &snapshot_accounts_db_fields.full_snapshot_accounts_db_fields;
1068
1069        if let Some(incremental_snapshot_persistence) = incremental_snapshot_persistence {
1070            // If we've booted from local state that was originally intended to be an incremental
1071            // snapshot, then we will use the incremental snapshot persistence field to set the
1072            // initial accounts hashes in accounts db.
1073            let old_accounts_hash = accounts_db.set_accounts_hash_from_snapshot(
1074                incremental_snapshot_persistence.full_slot,
1075                incremental_snapshot_persistence.full_hash.clone(),
1076                incremental_snapshot_persistence.full_capitalization,
1077            );
1078            assert!(
1079                old_accounts_hash.is_none(),
1080                "There should not already be an AccountsHash at slot {slot}: {old_accounts_hash:?}",
1081            );
1082            let old_incremental_accounts_hash = accounts_db
1083                .set_incremental_accounts_hash_from_snapshot(
1084                    *slot,
1085                    incremental_snapshot_persistence.incremental_hash.clone(),
1086                    incremental_snapshot_persistence.incremental_capitalization,
1087                );
1088            assert!(
1089                old_incremental_accounts_hash.is_none(),
1090                "There should not already be an IncrementalAccountsHash at slot {slot}: {old_incremental_accounts_hash:?}",
1091            );
1092        } else {
1093            // Otherwise, we've booted from a snapshot archive, or from local state that was *not*
1094            // intended to be an incremental snapshot.
1095            let old_accounts_hash = accounts_db.set_accounts_hash_from_snapshot(
1096                *slot,
1097                bank_hash_info.accounts_hash.clone(),
1098                capitalizations.0,
1099            );
1100            assert!(
1101                old_accounts_hash.is_none(),
1102                "There should not already be an AccountsHash at slot {slot}: {old_accounts_hash:?}",
1103            );
1104        }
1105    }
1106
1107    // Store the accounts hash & capitalization, from the incremental snapshot, in the new AccountsDb
1108    {
1109        if let Some(AccountsDbFields(_, _, slot, bank_hash_info, _, _)) =
1110            snapshot_accounts_db_fields
1111                .incremental_snapshot_accounts_db_fields
1112                .as_ref()
1113        {
1114            if let Some(incremental_snapshot_persistence) = incremental_snapshot_persistence {
1115                // Use the presence of a BankIncrementalSnapshotPersistence to indicate the
1116                // Incremental Accounts Hash feature is enabled, and use its accounts hashes
1117                // instead of `BankHashInfo`'s.
1118                let AccountsDbFields(_, _, full_slot, full_bank_hash_info, _, _) =
1119                    &snapshot_accounts_db_fields.full_snapshot_accounts_db_fields;
1120                let full_accounts_hash = &full_bank_hash_info.accounts_hash;
1121                assert_eq!(
1122                    incremental_snapshot_persistence.full_slot, *full_slot,
1123                    "The incremental snapshot's base slot ({}) must match the full snapshot's slot ({full_slot})!",
1124                    incremental_snapshot_persistence.full_slot,
1125                );
1126                assert_eq!(
1127                    &incremental_snapshot_persistence.full_hash, full_accounts_hash,
1128                    "The incremental snapshot's base accounts hash ({}) must match the full snapshot's accounts hash ({})!",
1129                    &incremental_snapshot_persistence.full_hash.0, full_accounts_hash.0,
1130                );
1131                assert_eq!(
1132                    incremental_snapshot_persistence.full_capitalization, capitalizations.0,
1133                    "The incremental snapshot's base capitalization ({}) must match the full snapshot's capitalization ({})!",
1134                    incremental_snapshot_persistence.full_capitalization, capitalizations.0,
1135                );
1136                let old_incremental_accounts_hash = accounts_db
1137                    .set_incremental_accounts_hash_from_snapshot(
1138                        *slot,
1139                        incremental_snapshot_persistence.incremental_hash.clone(),
1140                        incremental_snapshot_persistence.incremental_capitalization,
1141                    );
1142                assert!(
1143                    old_incremental_accounts_hash.is_none(),
1144                    "There should not already be an IncrementalAccountsHash at slot {slot}: {old_incremental_accounts_hash:?}",
1145                );
1146            } else {
1147                // ..and without a BankIncrementalSnapshotPersistence then the Incremental Accounts
1148                // Hash feature is disabled; the accounts hash in `BankHashInfo` is valid.
1149                let old_accounts_hash = accounts_db.set_accounts_hash_from_snapshot(
1150                    *slot,
1151                    bank_hash_info.accounts_hash.clone(),
1152                    capitalizations
1153                        .1
1154                        .expect("capitalization from incremental snapshot"),
1155                );
1156                assert!(
1157                    old_accounts_hash.is_none(),
1158                    "There should not already be an AccountsHash at slot {slot}: {old_accounts_hash:?}",
1159                );
1160            };
1161        }
1162    }
1163
1164    let AccountsDbFields(
1165        _snapshot_storages,
1166        snapshot_version,
1167        snapshot_slot,
1168        snapshot_bank_hash_info,
1169        _snapshot_historical_roots,
1170        _snapshot_historical_roots_with_hash,
1171    ) = snapshot_accounts_db_fields.collapse_into()?;
1172
1173    // Ensure all account paths exist
1174    for path in &accounts_db.paths {
1175        std::fs::create_dir_all(path)
1176            .unwrap_or_else(|err| panic!("Failed to create directory {}: {}", path.display(), err));
1177    }
1178
1179    let StorageAndNextAccountsFileId {
1180        storage,
1181        next_append_vec_id,
1182    } = storage_and_next_append_vec_id;
1183
1184    assert!(
1185        !storage.is_empty(),
1186        "At least one storage entry must exist from deserializing stream"
1187    );
1188
1189    let next_append_vec_id = next_append_vec_id.load(Ordering::Acquire);
1190    let max_append_vec_id = next_append_vec_id - 1;
1191    assert!(
1192        max_append_vec_id <= AccountsFileId::MAX / 2,
1193        "Storage id {max_append_vec_id} larger than allowed max"
1194    );
1195
1196    // Process deserialized data, set necessary fields in self
1197    let old_accounts_delta_hash = accounts_db.set_accounts_delta_hash_from_snapshot(
1198        snapshot_slot,
1199        snapshot_bank_hash_info.accounts_delta_hash,
1200    );
1201    assert!(
1202        old_accounts_delta_hash.is_none(),
1203        "There should not already be an AccountsDeltaHash at slot {snapshot_slot}: {old_accounts_delta_hash:?}",
1204        );
1205    let old_stats = accounts_db
1206        .update_bank_hash_stats_from_snapshot(snapshot_slot, snapshot_bank_hash_info.stats);
1207    assert!(
1208        old_stats.is_none(),
1209        "There should not already be a BankHashStats at slot {snapshot_slot}: {old_stats:?}",
1210    );
1211    accounts_db.storage.initialize(storage);
1212    accounts_db
1213        .next_id
1214        .store(next_append_vec_id, Ordering::Release);
1215    accounts_db
1216        .write_version
1217        .fetch_add(snapshot_version, Ordering::Release);
1218
1219    let mut measure_notify = Measure::start("accounts_notify");
1220
1221    let accounts_db = Arc::new(accounts_db);
1222    let accounts_db_clone = accounts_db.clone();
1223    let handle = Builder::new()
1224        .name("solNfyAccRestor".to_string())
1225        .spawn(move || {
1226            accounts_db_clone.notify_account_restore_from_snapshot();
1227        })
1228        .unwrap();
1229
1230    let IndexGenerationInfo {
1231        accounts_data_len,
1232        rent_paying_accounts_by_partition,
1233        duplicates_lt_hash,
1234    } = accounts_db.generate_index(
1235        limit_load_slot_count_from_snapshot,
1236        verify_index,
1237        genesis_config,
1238    );
1239    accounts_db
1240        .accounts_index
1241        .rent_paying_accounts_by_partition
1242        .set(rent_paying_accounts_by_partition)
1243        .unwrap();
1244
1245    handle.join().unwrap();
1246    measure_notify.stop();
1247
1248    datapoint_info!(
1249        "reconstruct_accountsdb_from_fields()",
1250        ("accountsdb-notify-at-start-us", measure_notify.as_us(), i64),
1251    );
1252
1253    Ok((
1254        Arc::try_unwrap(accounts_db).unwrap(),
1255        ReconstructedAccountsDbInfo {
1256            accounts_data_len,
1257            duplicates_lt_hash,
1258        },
1259    ))
1260}
1261
1262// Rename `src` to `dest` only if `dest` doesn't already exist.
1263#[cfg(all(target_os = "linux", target_env = "gnu"))]
1264fn rename_no_replace(src: &CStr, dest: &CStr) -> io::Result<()> {
1265    let ret = unsafe {
1266        libc::renameat2(
1267            libc::AT_FDCWD,
1268            src.as_ptr() as *const _,
1269            libc::AT_FDCWD,
1270            dest.as_ptr() as *const _,
1271            libc::RENAME_NOREPLACE,
1272        )
1273    };
1274    if ret == -1 {
1275        return Err(io::Error::last_os_error());
1276    }
1277
1278    Ok(())
1279}
1280
1281#[cfg(target_os = "linux")]
1282fn cstring_from_path(path: &Path) -> io::Result<CString> {
1283    // It is better to allocate here than use the stack. Jemalloc is going to give us a chunk of a
1284    // preallocated small arena anyway. Instead if we used the stack since PATH_MAX=4096 it would
1285    // result in LLVM inserting a stack probe, see
1286    // https://docs.rs/compiler_builtins/latest/compiler_builtins/probestack/index.html.
1287    CString::new(path.as_os_str().as_encoded_bytes())
1288        .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))
1289}