solana_runtime/
bank.rs

1//! The `bank` module tracks client accounts and the progress of on-chain
2//! programs.
3//!
4//! A single bank relates to a block produced by a single leader and each bank
5//! except for the genesis bank points back to a parent bank.
6//!
7//! The bank is the main entrypoint for processing verified transactions with the function
8//! `Bank::process_transactions`
9//!
10//! It does this by loading the accounts using the reference it holds on the account store,
11//! and then passing those to an InvokeContext which handles loading the programs specified
12//! by the Transaction and executing it.
13//!
14//! The bank then stores the results to the accounts store.
15//!
16//! It then has APIs for retrieving if a transaction has been processed and it's status.
17//! See `get_signature_status` et al.
18//!
19//! Bank lifecycle:
20//!
21//! A bank is newly created and open to transactions. Transactions are applied
22//! until either the bank reached the tick count when the node is the leader for that slot, or the
23//! node has applied all transactions present in all `Entry`s in the slot.
24//!
25//! Once it is complete, the bank can then be frozen. After frozen, no more transactions can
26//! be applied or state changes made. At the frozen step, rent will be applied and various
27//! sysvar special accounts update to the new state of the system.
28//!
29//! After frozen, and the bank has had the appropriate number of votes on it, then it can become
30//! rooted. At this point, it will not be able to be removed from the chain and the
31//! state is finalized.
32//!
33//! It offers a high-level API that signs transactions
34//! on behalf of the caller, and a low-level API for when they have
35//! already been signed and verified.
36use {
37    crate::{
38        account_saver::collect_accounts_to_store,
39        bank::{
40            builtins::{BuiltinPrototype, BUILTINS, STATELESS_BUILTINS},
41            metrics::*,
42            partitioned_epoch_rewards::{EpochRewardStatus, StakeRewards, VoteRewardsAccounts},
43        },
44        bank_forks::BankForks,
45        epoch_stakes::{split_epoch_stakes, EpochStakes, NodeVoteAccounts, VersionedEpochStakes},
46        installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock},
47        rent_collector::RentCollectorWithMetrics,
48        runtime_config::RuntimeConfig,
49        serde_snapshot::BankIncrementalSnapshotPersistence,
50        snapshot_hash::SnapshotHash,
51        stake_account::StakeAccount,
52        stake_history::StakeHistory,
53        stake_weighted_timestamp::{
54            calculate_stake_weighted_timestamp, MaxAllowableDrift,
55            MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST, MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW_V2,
56        },
57        stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum},
58        status_cache::{SlotDelta, StatusCache},
59        transaction_batch::{OwnedOrBorrowed, TransactionBatch},
60        verify_precompiles::verify_precompiles,
61    },
62    accounts_lt_hash::InitialStateOfAccount,
63    ahash::AHashMap,
64    byteorder::{ByteOrder, LittleEndian},
65    dashmap::{DashMap, DashSet},
66    log::*,
67    rayon::{
68        iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator},
69        slice::ParallelSlice,
70        ThreadPool, ThreadPoolBuilder,
71    },
72    serde::Serialize,
73    solana_accounts_db::{
74        account_locks::validate_account_locks,
75        accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot},
76        accounts_db::{
77            AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource,
78            DuplicatesLtHash, OldStoragesPolicy, PubkeyHashAccount,
79            VerifyAccountsHashAndLamportsConfig,
80        },
81        accounts_hash::{
82            AccountHash, AccountsHash, AccountsLtHash, CalcAccountsHashConfig, HashStats,
83            IncrementalAccountsHash,
84        },
85        accounts_index::{IndexKey, ScanConfig, ScanResult},
86        accounts_partition::{self, Partition, PartitionIndex},
87        accounts_update_notifier_interface::AccountsUpdateNotifier,
88        ancestors::{Ancestors, AncestorsForSerialization},
89        blockhash_queue::BlockhashQueue,
90        epoch_accounts_hash::EpochAccountsHash,
91        sorted_storages::SortedStorages,
92        stake_rewards::StakeReward,
93        storable_accounts::StorableAccounts,
94    },
95    solana_bpf_loader_program::syscalls::{
96        create_program_runtime_environment_v1, create_program_runtime_environment_v2,
97    },
98    solana_compute_budget::compute_budget::ComputeBudget,
99    solana_cost_model::{block_cost_limits::simd_0207_block_limits, cost_tracker::CostTracker},
100    solana_feature_set::{
101        self as feature_set, remove_rounding_in_fee_calculation, reward_full_priority_fee,
102        FeatureSet,
103    },
104    solana_lattice_hash::lt_hash::LtHash,
105    solana_measure::{meas_dur, measure::Measure, measure_time, measure_us},
106    solana_program_runtime::{
107        invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry,
108    },
109    solana_runtime_transaction::instructions_processor::process_compute_budget_instructions,
110    solana_sdk::{
111        account::{
112            create_account_shared_data_with_fields as create_account, from_account, Account,
113            AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount,
114        },
115        bpf_loader_upgradeable,
116        clock::{
117            BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK,
118            DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE,
119            MAX_TRANSACTION_FORWARDING_DELAY, SECONDS_PER_DAY, UPDATED_HASHES_PER_TICK2,
120            UPDATED_HASHES_PER_TICK3, UPDATED_HASHES_PER_TICK4, UPDATED_HASHES_PER_TICK5,
121            UPDATED_HASHES_PER_TICK6,
122        },
123        epoch_info::EpochInfo,
124        epoch_schedule::EpochSchedule,
125        feature,
126        fee::{FeeBudgetLimits, FeeDetails, FeeStructure},
127        fee_calculator::FeeRateGovernor,
128        genesis_config::{ClusterType, GenesisConfig},
129        hard_forks::HardForks,
130        hash::{extend_and_hash, hashv, Hash},
131        incinerator,
132        inflation::Inflation,
133        inner_instruction::InnerInstructions,
134        message::{AccountKeys, SanitizedMessage},
135        native_loader,
136        native_token::LAMPORTS_PER_SOL,
137        packet::PACKET_DATA_SIZE,
138        precompiles::get_precompiles,
139        pubkey::Pubkey,
140        rent_collector::{CollectedInfo, RentCollector},
141        rent_debits::RentDebits,
142        reserved_account_keys::ReservedAccountKeys,
143        reward_info::RewardInfo,
144        signature::{Keypair, Signature},
145        slot_hashes::SlotHashes,
146        slot_history::{Check, SlotHistory},
147        stake::state::Delegation,
148        system_transaction,
149        sysvar::{self, last_restart_slot::LastRestartSlot, Sysvar, SysvarId},
150        timing::years_as_slots,
151        transaction::{
152            MessageHash, Result, SanitizedTransaction, Transaction, TransactionError,
153            TransactionVerificationMode, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS,
154        },
155        transaction_context::{TransactionAccount, TransactionReturnData},
156    },
157    solana_stake_program::{
158        points::{InflationPointCalculationEvent, PointValue},
159        stake_state::StakeStateV2,
160    },
161    solana_svm::{
162        account_loader::{collect_rent_from_account, LoadedTransaction},
163        account_overrides::AccountOverrides,
164        transaction_commit_result::{CommittedTransaction, TransactionCommitResult},
165        transaction_error_metrics::TransactionErrorMetrics,
166        transaction_execution_result::{
167            TransactionExecutionDetails, TransactionLoadedAccountsStats,
168        },
169        transaction_processing_callback::{AccountState, TransactionProcessingCallback},
170        transaction_processing_result::{
171            ProcessedTransaction, TransactionProcessingResult,
172            TransactionProcessingResultExtensions,
173        },
174        transaction_processor::{
175            ExecutionRecordingConfig, TransactionBatchProcessor, TransactionLogMessages,
176            TransactionProcessingConfig, TransactionProcessingEnvironment,
177        },
178    },
179    solana_svm_transaction::svm_message::SVMMessage,
180    solana_timings::{ExecuteTimingType, ExecuteTimings},
181    solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap},
182    solana_vote_program::vote_state::VoteState,
183    std::{
184        collections::{HashMap, HashSet},
185        convert::TryFrom,
186        fmt,
187        ops::{AddAssign, RangeFull, RangeInclusive},
188        path::PathBuf,
189        slice,
190        sync::{
191            atomic::{
192                AtomicBool, AtomicI64, AtomicU64, AtomicUsize,
193                Ordering::{AcqRel, Acquire, Relaxed},
194            },
195            Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, Weak,
196        },
197        thread::Builder,
198        time::{Duration, Instant},
199    },
200};
201pub use {
202    partitioned_epoch_rewards::KeyedRewardsAndNumPartitions, solana_sdk::reward_type::RewardType,
203};
204#[cfg(feature = "dev-context-only-utils")]
205use {
206    solana_accounts_db::accounts_db::{
207        ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING,
208    },
209    solana_program_runtime::{loaded_programs::ProgramCacheForTxBatch, sysvar_cache::SysvarCache},
210    solana_sdk::nonce,
211    solana_svm::program_loader::load_program_with_pubkey,
212    solana_system_program::{get_system_account_kind, SystemAccountKind},
213};
214
215/// params to `verify_accounts_hash`
216struct VerifyAccountsHashConfig {
217    test_hash_calculation: bool,
218    ignore_mismatch: bool,
219    require_rooted_bank: bool,
220    run_in_background: bool,
221    store_hash_raw_data_for_debug: bool,
222}
223
224mod accounts_lt_hash;
225mod address_lookup_table;
226pub mod bank_hash_details;
227mod builtin_programs;
228pub mod builtins;
229mod check_transactions;
230pub mod epoch_accounts_hash_utils;
231mod fee_distribution;
232mod metrics;
233pub(crate) mod partitioned_epoch_rewards;
234mod recent_blockhashes_account;
235mod serde_snapshot;
236mod sysvar_cache;
237pub(crate) mod tests;
238
239pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0;
240
241pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5;
242
243#[derive(Default)]
244struct RentMetrics {
245    hold_range_us: AtomicU64,
246    load_us: AtomicU64,
247    collect_us: AtomicU64,
248    hash_us: AtomicU64,
249    store_us: AtomicU64,
250    count: AtomicUsize,
251}
252
253pub type BankStatusCache = StatusCache<Result<()>>;
254#[cfg_attr(
255    feature = "frozen-abi",
256    frozen_abi(digest = "BHg4qpwegtaJypLUqAdjQYzYeLfEGf6tA4U5cREbHMHi")
257)]
258pub type BankSlotDelta = SlotDelta<Result<()>>;
259
260#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)]
261pub struct SquashTiming {
262    pub squash_accounts_ms: u64,
263    pub squash_accounts_cache_ms: u64,
264    pub squash_accounts_index_ms: u64,
265    pub squash_accounts_store_ms: u64,
266
267    pub squash_cache_ms: u64,
268}
269
270impl AddAssign for SquashTiming {
271    fn add_assign(&mut self, rhs: Self) {
272        self.squash_accounts_ms += rhs.squash_accounts_ms;
273        self.squash_accounts_cache_ms += rhs.squash_accounts_cache_ms;
274        self.squash_accounts_index_ms += rhs.squash_accounts_index_ms;
275        self.squash_accounts_store_ms += rhs.squash_accounts_store_ms;
276        self.squash_cache_ms += rhs.squash_cache_ms;
277    }
278}
279
280#[derive(Debug, Default, PartialEq)]
281pub(crate) struct CollectorFeeDetails {
282    transaction_fee: u64,
283    priority_fee: u64,
284}
285
286impl CollectorFeeDetails {
287    pub(crate) fn accumulate(&mut self, fee_details: &FeeDetails) {
288        self.transaction_fee = self
289            .transaction_fee
290            .saturating_add(fee_details.transaction_fee());
291        self.priority_fee = self
292            .priority_fee
293            .saturating_add(fee_details.prioritization_fee());
294    }
295
296    pub(crate) fn total(&self) -> u64 {
297        self.transaction_fee.saturating_add(self.priority_fee)
298    }
299}
300
301impl From<FeeDetails> for CollectorFeeDetails {
302    fn from(fee_details: FeeDetails) -> Self {
303        CollectorFeeDetails {
304            transaction_fee: fee_details.transaction_fee(),
305            priority_fee: fee_details.prioritization_fee(),
306        }
307    }
308}
309
310#[derive(Debug)]
311pub struct BankRc {
312    /// where all the Accounts are stored
313    pub accounts: Arc<Accounts>,
314
315    /// Previous checkpoint of this bank
316    pub(crate) parent: RwLock<Option<Arc<Bank>>>,
317
318    pub(crate) bank_id_generator: Arc<AtomicU64>,
319}
320
321impl BankRc {
322    pub(crate) fn new(accounts: Accounts) -> Self {
323        Self {
324            accounts: Arc::new(accounts),
325            parent: RwLock::new(None),
326            bank_id_generator: Arc::new(AtomicU64::new(0)),
327        }
328    }
329}
330
331pub struct LoadAndExecuteTransactionsOutput {
332    // Vector of results indicating whether a transaction was processed or could not
333    // be processed. Note processed transactions can still have failed!
334    pub processing_results: Vec<TransactionProcessingResult>,
335    // Processed transaction counts used to update bank transaction counts and
336    // for metrics reporting.
337    pub processed_counts: ProcessedTransactionCounts,
338}
339
340#[derive(Debug, PartialEq)]
341pub struct TransactionSimulationResult {
342    pub result: Result<()>,
343    pub logs: TransactionLogMessages,
344    pub post_simulation_accounts: Vec<TransactionAccount>,
345    pub units_consumed: u64,
346    pub return_data: Option<TransactionReturnData>,
347    pub inner_instructions: Option<Vec<InnerInstructions>>,
348}
349
350#[derive(Clone)]
351pub struct TransactionBalancesSet {
352    pub pre_balances: TransactionBalances,
353    pub post_balances: TransactionBalances,
354}
355
356impl TransactionBalancesSet {
357    pub fn new(pre_balances: TransactionBalances, post_balances: TransactionBalances) -> Self {
358        assert_eq!(pre_balances.len(), post_balances.len());
359        Self {
360            pre_balances,
361            post_balances,
362        }
363    }
364}
365pub type TransactionBalances = Vec<Vec<u64>>;
366
367#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
368pub enum TransactionLogCollectorFilter {
369    All,
370    AllWithVotes,
371    None,
372    OnlyMentionedAddresses,
373}
374
375impl Default for TransactionLogCollectorFilter {
376    fn default() -> Self {
377        Self::None
378    }
379}
380
381#[derive(Debug, Default)]
382pub struct TransactionLogCollectorConfig {
383    pub mentioned_addresses: HashSet<Pubkey>,
384    pub filter: TransactionLogCollectorFilter,
385}
386
387#[derive(Clone, Debug, PartialEq, Eq)]
388pub struct TransactionLogInfo {
389    pub signature: Signature,
390    pub result: Result<()>,
391    pub is_vote: bool,
392    pub log_messages: TransactionLogMessages,
393}
394
395#[derive(Default, Debug)]
396pub struct TransactionLogCollector {
397    // All the logs collected for from this Bank.  Exact contents depend on the
398    // active `TransactionLogCollectorFilter`
399    pub logs: Vec<TransactionLogInfo>,
400
401    // For each `mentioned_addresses`, maintain a list of indices into `logs` to easily
402    // locate the logs from transactions that included the mentioned addresses.
403    pub mentioned_address_map: HashMap<Pubkey, Vec<usize>>,
404}
405
406impl TransactionLogCollector {
407    pub fn get_logs_for_address(
408        &self,
409        address: Option<&Pubkey>,
410    ) -> Option<Vec<TransactionLogInfo>> {
411        match address {
412            None => Some(self.logs.clone()),
413            Some(address) => self.mentioned_address_map.get(address).map(|log_indices| {
414                log_indices
415                    .iter()
416                    .filter_map(|i| self.logs.get(*i).cloned())
417                    .collect()
418            }),
419        }
420    }
421}
422
423/// Bank's common fields shared by all supported snapshot versions for deserialization.
424/// Sync fields with BankFieldsToSerialize! This is paired with it.
425/// All members are made public to remain Bank's members private and to make versioned deserializer workable on this
426/// Note that some fields are missing from the serializer struct. This is because of fields added later.
427/// Since it is difficult to insert fields to serialize/deserialize against existing code already deployed,
428/// new fields can be optionally serialized and optionally deserialized. At some point, the serialization and
429/// deserialization will use a new mechanism or otherwise be in sync more clearly.
430#[derive(Clone, Debug, Default)]
431#[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))]
432pub struct BankFieldsToDeserialize {
433    pub(crate) blockhash_queue: BlockhashQueue,
434    pub(crate) ancestors: AncestorsForSerialization,
435    pub(crate) hash: Hash,
436    pub(crate) parent_hash: Hash,
437    pub(crate) parent_slot: Slot,
438    pub(crate) hard_forks: HardForks,
439    pub(crate) transaction_count: u64,
440    pub(crate) tick_height: u64,
441    pub(crate) signature_count: u64,
442    pub(crate) capitalization: u64,
443    pub(crate) max_tick_height: u64,
444    pub(crate) hashes_per_tick: Option<u64>,
445    pub(crate) ticks_per_slot: u64,
446    pub(crate) ns_per_slot: u128,
447    pub(crate) genesis_creation_time: UnixTimestamp,
448    pub(crate) slots_per_year: f64,
449    pub(crate) slot: Slot,
450    pub(crate) epoch: Epoch,
451    pub(crate) block_height: u64,
452    pub(crate) collector_id: Pubkey,
453    pub(crate) collector_fees: u64,
454    pub(crate) fee_rate_governor: FeeRateGovernor,
455    pub(crate) collected_rent: u64,
456    pub(crate) rent_collector: RentCollector,
457    pub(crate) epoch_schedule: EpochSchedule,
458    pub(crate) inflation: Inflation,
459    pub(crate) stakes: Stakes<Delegation>,
460    pub(crate) epoch_stakes: HashMap<Epoch, EpochStakes>,
461    pub(crate) is_delta: bool,
462    pub(crate) accounts_data_len: u64,
463    pub(crate) incremental_snapshot_persistence: Option<BankIncrementalSnapshotPersistence>,
464    pub(crate) epoch_accounts_hash: Option<Hash>,
465}
466
467/// Bank's common fields shared by all supported snapshot versions for serialization.
468/// This was separated from BankFieldsToDeserialize to avoid cloning by using refs.
469/// So, sync fields with BankFieldsToDeserialize!
470/// all members are made public to keep Bank private and to make versioned serializer workable on this.
471/// Note that some fields are missing from the serializer struct. This is because of fields added later.
472/// Since it is difficult to insert fields to serialize/deserialize against existing code already deployed,
473/// new fields can be optionally serialized and optionally deserialized. At some point, the serialization and
474/// deserialization will use a new mechanism or otherwise be in sync more clearly.
475#[derive(Debug)]
476pub struct BankFieldsToSerialize {
477    pub blockhash_queue: BlockhashQueue,
478    pub ancestors: AncestorsForSerialization,
479    pub hash: Hash,
480    pub parent_hash: Hash,
481    pub parent_slot: Slot,
482    pub hard_forks: HardForks,
483    pub transaction_count: u64,
484    pub tick_height: u64,
485    pub signature_count: u64,
486    pub capitalization: u64,
487    pub max_tick_height: u64,
488    pub hashes_per_tick: Option<u64>,
489    pub ticks_per_slot: u64,
490    pub ns_per_slot: u128,
491    pub genesis_creation_time: UnixTimestamp,
492    pub slots_per_year: f64,
493    pub slot: Slot,
494    pub epoch: Epoch,
495    pub block_height: u64,
496    pub collector_id: Pubkey,
497    pub collector_fees: u64,
498    pub fee_rate_governor: FeeRateGovernor,
499    pub collected_rent: u64,
500    pub rent_collector: RentCollector,
501    pub epoch_schedule: EpochSchedule,
502    pub inflation: Inflation,
503    pub stakes: StakesEnum,
504    pub epoch_stakes: HashMap<Epoch, EpochStakes>,
505    pub is_delta: bool,
506    pub accounts_data_len: u64,
507    pub versioned_epoch_stakes: HashMap<u64, VersionedEpochStakes>,
508}
509
510// Can't derive PartialEq because RwLock doesn't implement PartialEq
511#[cfg(feature = "dev-context-only-utils")]
512impl PartialEq for Bank {
513    fn eq(&self, other: &Self) -> bool {
514        if std::ptr::eq(self, other) {
515            return true;
516        }
517        // Suppress rustfmt until https://github.com/rust-lang/rustfmt/issues/5920 is fixed ...
518        #[rustfmt::skip]
519        let Self {
520            skipped_rewrites: _,
521            rc: _,
522            status_cache: _,
523            blockhash_queue,
524            ancestors,
525            hash,
526            parent_hash,
527            parent_slot,
528            hard_forks,
529            transaction_count,
530            non_vote_transaction_count_since_restart: _,
531            transaction_error_count: _,
532            transaction_entries_count: _,
533            transactions_per_entry_max: _,
534            tick_height,
535            signature_count,
536            capitalization,
537            max_tick_height,
538            hashes_per_tick,
539            ticks_per_slot,
540            ns_per_slot,
541            genesis_creation_time,
542            slots_per_year,
543            slot,
544            bank_id: _,
545            epoch,
546            block_height,
547            collector_id,
548            collector_fees,
549            fee_rate_governor,
550            collected_rent,
551            rent_collector,
552            epoch_schedule,
553            inflation,
554            stakes_cache,
555            epoch_stakes,
556            is_delta,
557            #[cfg(feature = "dev-context-only-utils")]
558            hash_overrides,
559            accounts_lt_hash,
560            // TODO: Confirm if all these fields are intentionally ignored!
561            rewards: _,
562            cluster_type: _,
563            lazy_rent_collection: _,
564            rewards_pool_pubkeys: _,
565            transaction_debug_keys: _,
566            transaction_log_collector_config: _,
567            transaction_log_collector: _,
568            feature_set: _,
569            reserved_account_keys: _,
570            drop_callback: _,
571            freeze_started: _,
572            vote_only_bank: _,
573            cost_tracker: _,
574            accounts_data_size_initial: _,
575            accounts_data_size_delta_on_chain: _,
576            accounts_data_size_delta_off_chain: _,
577            epoch_reward_status: _,
578            transaction_processor: _,
579            check_program_modification_slot: _,
580            collector_fee_details: _,
581            compute_budget: _,
582            transaction_account_lock_limit: _,
583            fee_structure: _,
584            cache_for_accounts_lt_hash: _,
585            block_id,
586            // Ignore new fields explicitly if they do not impact PartialEq.
587            // Adding ".." will remove compile-time checks that if a new field
588            // is added to the struct, this PartialEq is accordingly updated.
589        } = self;
590        *blockhash_queue.read().unwrap() == *other.blockhash_queue.read().unwrap()
591            && ancestors == &other.ancestors
592            && *hash.read().unwrap() == *other.hash.read().unwrap()
593            && parent_hash == &other.parent_hash
594            && parent_slot == &other.parent_slot
595            && *hard_forks.read().unwrap() == *other.hard_forks.read().unwrap()
596            && transaction_count.load(Relaxed) == other.transaction_count.load(Relaxed)
597            && tick_height.load(Relaxed) == other.tick_height.load(Relaxed)
598            && signature_count.load(Relaxed) == other.signature_count.load(Relaxed)
599            && capitalization.load(Relaxed) == other.capitalization.load(Relaxed)
600            && max_tick_height == &other.max_tick_height
601            && hashes_per_tick == &other.hashes_per_tick
602            && ticks_per_slot == &other.ticks_per_slot
603            && ns_per_slot == &other.ns_per_slot
604            && genesis_creation_time == &other.genesis_creation_time
605            && slots_per_year == &other.slots_per_year
606            && slot == &other.slot
607            && epoch == &other.epoch
608            && block_height == &other.block_height
609            && collector_id == &other.collector_id
610            && collector_fees.load(Relaxed) == other.collector_fees.load(Relaxed)
611            && fee_rate_governor == &other.fee_rate_governor
612            && collected_rent.load(Relaxed) == other.collected_rent.load(Relaxed)
613            && rent_collector == &other.rent_collector
614            && epoch_schedule == &other.epoch_schedule
615            && *inflation.read().unwrap() == *other.inflation.read().unwrap()
616            && *stakes_cache.stakes() == *other.stakes_cache.stakes()
617            && epoch_stakes == &other.epoch_stakes
618            && is_delta.load(Relaxed) == other.is_delta.load(Relaxed)
619            // No deadlock is possbile, when Arc::ptr_eq() returns false, because of being
620            // different Mutexes.
621            && (Arc::ptr_eq(hash_overrides, &other.hash_overrides) ||
622                *hash_overrides.lock().unwrap() == *other.hash_overrides.lock().unwrap())
623            && !(self.is_accounts_lt_hash_enabled() && other.is_accounts_lt_hash_enabled()
624                && *accounts_lt_hash.lock().unwrap() != *other.accounts_lt_hash.lock().unwrap())
625            && *block_id.read().unwrap() == *other.block_id.read().unwrap()
626    }
627}
628
629#[cfg(feature = "dev-context-only-utils")]
630impl BankFieldsToSerialize {
631    /// Create a new BankFieldsToSerialize where basically every field is defaulted.
632    /// Only use for tests; many of the fields are invalid!
633    pub fn default_for_tests() -> Self {
634        Self {
635            blockhash_queue: BlockhashQueue::default(),
636            ancestors: AncestorsForSerialization::default(),
637            hash: Hash::default(),
638            parent_hash: Hash::default(),
639            parent_slot: Slot::default(),
640            hard_forks: HardForks::default(),
641            transaction_count: u64::default(),
642            tick_height: u64::default(),
643            signature_count: u64::default(),
644            capitalization: u64::default(),
645            max_tick_height: u64::default(),
646            hashes_per_tick: Option::default(),
647            ticks_per_slot: u64::default(),
648            ns_per_slot: u128::default(),
649            genesis_creation_time: UnixTimestamp::default(),
650            slots_per_year: f64::default(),
651            slot: Slot::default(),
652            epoch: Epoch::default(),
653            block_height: u64::default(),
654            collector_id: Pubkey::default(),
655            collector_fees: u64::default(),
656            fee_rate_governor: FeeRateGovernor::default(),
657            collected_rent: u64::default(),
658            rent_collector: RentCollector::default(),
659            epoch_schedule: EpochSchedule::default(),
660            inflation: Inflation::default(),
661            stakes: Stakes::<Delegation>::default().into(),
662            epoch_stakes: HashMap::default(),
663            is_delta: bool::default(),
664            accounts_data_len: u64::default(),
665            versioned_epoch_stakes: HashMap::default(),
666        }
667    }
668}
669
670#[derive(Debug)]
671pub enum RewardCalculationEvent<'a, 'b> {
672    Staking(&'a Pubkey, &'b InflationPointCalculationEvent),
673}
674
675/// type alias is not supported for trait in rust yet. As a workaround, we define the
676/// `RewardCalcTracer` trait explicitly and implement it on any type that implement
677/// `Fn(&RewardCalculationEvent) + Send + Sync`.
678pub trait RewardCalcTracer: Fn(&RewardCalculationEvent) + Send + Sync {}
679
680impl<T: Fn(&RewardCalculationEvent) + Send + Sync> RewardCalcTracer for T {}
681
682fn null_tracer() -> Option<impl RewardCalcTracer> {
683    None::<fn(&RewardCalculationEvent)>
684}
685
686pub trait DropCallback: fmt::Debug {
687    fn callback(&self, b: &Bank);
688    fn clone_box(&self) -> Box<dyn DropCallback + Send + Sync>;
689}
690
691#[derive(Debug, Default)]
692pub struct OptionalDropCallback(Option<Box<dyn DropCallback + Send + Sync>>);
693
694#[derive(Default, Debug, Clone, PartialEq)]
695#[cfg(feature = "dev-context-only-utils")]
696pub struct HashOverrides {
697    hashes: HashMap<Slot, HashOverride>,
698}
699
700#[cfg(feature = "dev-context-only-utils")]
701impl HashOverrides {
702    fn get_hash_override(&self, slot: Slot) -> Option<&HashOverride> {
703        self.hashes.get(&slot)
704    }
705
706    fn get_blockhash_override(&self, slot: Slot) -> Option<&Hash> {
707        self.get_hash_override(slot)
708            .map(|hash_override| &hash_override.blockhash)
709    }
710
711    fn get_bank_hash_override(&self, slot: Slot) -> Option<&Hash> {
712        self.get_hash_override(slot)
713            .map(|hash_override| &hash_override.bank_hash)
714    }
715
716    pub fn add_override(&mut self, slot: Slot, blockhash: Hash, bank_hash: Hash) {
717        let is_new = self
718            .hashes
719            .insert(
720                slot,
721                HashOverride {
722                    blockhash,
723                    bank_hash,
724                },
725            )
726            .is_none();
727        assert!(is_new);
728    }
729}
730
731#[derive(Debug, Clone, PartialEq)]
732#[cfg(feature = "dev-context-only-utils")]
733struct HashOverride {
734    blockhash: Hash,
735    bank_hash: Hash,
736}
737
738/// Manager for the state of all accounts and programs after processing its entries.
739#[derive(Debug)]
740pub struct Bank {
741    /// References to accounts, parent and signature status
742    pub rc: BankRc,
743
744    /// A cache of signature statuses
745    pub status_cache: Arc<RwLock<BankStatusCache>>,
746
747    /// FIFO queue of `recent_blockhash` items
748    blockhash_queue: RwLock<BlockhashQueue>,
749
750    /// The set of parents including this bank
751    pub ancestors: Ancestors,
752
753    /// Hash of this Bank's state. Only meaningful after freezing.
754    hash: RwLock<Hash>,
755
756    /// Hash of this Bank's parent's state
757    parent_hash: Hash,
758
759    /// parent's slot
760    parent_slot: Slot,
761
762    /// slots to hard fork at
763    hard_forks: Arc<RwLock<HardForks>>,
764
765    /// The number of committed transactions since genesis.
766    transaction_count: AtomicU64,
767
768    /// The number of non-vote transactions committed since the most
769    /// recent boot from snapshot or genesis. This value is only stored in
770    /// blockstore for the RPC method "getPerformanceSamples". It is not
771    /// retained within snapshots, but is preserved in `Bank::new_from_parent`.
772    non_vote_transaction_count_since_restart: AtomicU64,
773
774    /// The number of transaction errors in this slot
775    transaction_error_count: AtomicU64,
776
777    /// The number of transaction entries in this slot
778    transaction_entries_count: AtomicU64,
779
780    /// The max number of transaction in an entry in this slot
781    transactions_per_entry_max: AtomicU64,
782
783    /// Bank tick height
784    tick_height: AtomicU64,
785
786    /// The number of signatures from valid transactions in this slot
787    signature_count: AtomicU64,
788
789    /// Total capitalization, used to calculate inflation
790    capitalization: AtomicU64,
791
792    // Bank max_tick_height
793    max_tick_height: u64,
794
795    /// The number of hashes in each tick. None value means hashing is disabled.
796    hashes_per_tick: Option<u64>,
797
798    /// The number of ticks in each slot.
799    ticks_per_slot: u64,
800
801    /// length of a slot in ns
802    pub ns_per_slot: u128,
803
804    /// genesis time, used for computed clock
805    genesis_creation_time: UnixTimestamp,
806
807    /// The number of slots per year, used for inflation
808    slots_per_year: f64,
809
810    /// Bank slot (i.e. block)
811    slot: Slot,
812
813    bank_id: BankId,
814
815    /// Bank epoch
816    epoch: Epoch,
817
818    /// Bank block_height
819    block_height: u64,
820
821    /// The pubkey to send transactions fees to.
822    collector_id: Pubkey,
823
824    /// Fees that have been collected
825    collector_fees: AtomicU64,
826
827    /// Track cluster signature throughput and adjust fee rate
828    pub(crate) fee_rate_governor: FeeRateGovernor,
829
830    /// Rent that has been collected
831    collected_rent: AtomicU64,
832
833    /// latest rent collector, knows the epoch
834    rent_collector: RentCollector,
835
836    /// initialized from genesis
837    pub(crate) epoch_schedule: EpochSchedule,
838
839    /// inflation specs
840    inflation: Arc<RwLock<Inflation>>,
841
842    /// cache of vote_account and stake_account state for this fork
843    stakes_cache: StakesCache,
844
845    /// staked nodes on epoch boundaries, saved off when a bank.slot() is at
846    ///   a leader schedule calculation boundary
847    epoch_stakes: HashMap<Epoch, EpochStakes>,
848
849    /// A boolean reflecting whether any entries were recorded into the PoH
850    /// stream for the slot == self.slot
851    is_delta: AtomicBool,
852
853    /// Protocol-level rewards that were distributed by this bank
854    pub rewards: RwLock<Vec<(Pubkey, RewardInfo)>>,
855
856    pub cluster_type: Option<ClusterType>,
857
858    pub lazy_rent_collection: AtomicBool,
859
860    // this is temporary field only to remove rewards_pool entirely
861    pub rewards_pool_pubkeys: Arc<HashSet<Pubkey>>,
862
863    transaction_debug_keys: Option<Arc<HashSet<Pubkey>>>,
864
865    // Global configuration for how transaction logs should be collected across all banks
866    pub transaction_log_collector_config: Arc<RwLock<TransactionLogCollectorConfig>>,
867
868    // Logs from transactions that this Bank executed collected according to the criteria in
869    // `transaction_log_collector_config`
870    pub transaction_log_collector: Arc<RwLock<TransactionLogCollector>>,
871
872    pub feature_set: Arc<FeatureSet>,
873
874    /// Set of reserved account keys that cannot be write locked
875    reserved_account_keys: Arc<ReservedAccountKeys>,
876
877    /// callback function only to be called when dropping and should only be called once
878    pub drop_callback: RwLock<OptionalDropCallback>,
879
880    pub freeze_started: AtomicBool,
881
882    vote_only_bank: bool,
883
884    cost_tracker: RwLock<CostTracker>,
885
886    /// The initial accounts data size at the start of this Bank, before processing any transactions/etc
887    accounts_data_size_initial: u64,
888    /// The change to accounts data size in this Bank, due on-chain events (i.e. transactions)
889    accounts_data_size_delta_on_chain: AtomicI64,
890    /// The change to accounts data size in this Bank, due to off-chain events (i.e. rent collection)
891    accounts_data_size_delta_off_chain: AtomicI64,
892
893    /// until the skipped rewrites feature is activated, it is possible to skip rewrites and still include
894    /// the account hash of the accounts that would have been rewritten as bank hash expects.
895    skipped_rewrites: Mutex<HashMap<Pubkey, AccountHash>>,
896
897    epoch_reward_status: EpochRewardStatus,
898
899    transaction_processor: TransactionBatchProcessor<BankForks>,
900
901    check_program_modification_slot: bool,
902
903    /// Collected fee details
904    collector_fee_details: RwLock<CollectorFeeDetails>,
905
906    /// The compute budget to use for transaction execution.
907    compute_budget: Option<ComputeBudget>,
908
909    /// The max number of accounts that a transaction may lock.
910    transaction_account_lock_limit: Option<usize>,
911
912    /// Fee structure to use for assessing transaction fees.
913    fee_structure: FeeStructure,
914
915    /// blockhash and bank_hash overrides keyed by slot for simulated block production.
916    /// This _field_ was needed to be DCOU-ed to avoid 2 locks per bank freezing...
917    #[cfg(feature = "dev-context-only-utils")]
918    hash_overrides: Arc<Mutex<HashOverrides>>,
919
920    /// The lattice hash of all accounts
921    ///
922    /// The value is only meaningful after freezing.
923    accounts_lt_hash: Mutex<AccountsLtHash>,
924
925    /// A cache of *the initial state* of accounts modified in this slot
926    ///
927    /// The accounts lt hash needs both the initial and final state of each
928    /// account that was modified in this slot.  Cache the initial state here.
929    cache_for_accounts_lt_hash: RwLock<AHashMap<Pubkey, InitialStateOfAccount>>,
930
931    /// The unique identifier for the corresponding block for this bank.
932    /// None for banks that have not yet completed replay or for leader banks as we cannot populate block_id
933    /// until bankless leader. Can be computed directly from shreds without needing to execute transactions.
934    block_id: RwLock<Option<Hash>>,
935}
936
937struct VoteWithStakeDelegations {
938    vote_state: Arc<VoteState>,
939    vote_account: AccountSharedData,
940    delegations: Vec<(Pubkey, StakeAccount<Delegation>)>,
941}
942
943type VoteWithStakeDelegationsMap = DashMap<Pubkey, VoteWithStakeDelegations>;
944
945type InvalidCacheKeyMap = DashMap<Pubkey, InvalidCacheEntryReason>;
946
947struct LoadVoteAndStakeAccountsResult {
948    vote_with_stake_delegations_map: VoteWithStakeDelegationsMap,
949    invalid_vote_keys: InvalidCacheKeyMap,
950    vote_accounts_cache_miss_count: usize,
951}
952
953#[derive(Debug)]
954struct VoteReward {
955    vote_account: AccountSharedData,
956    commission: u8,
957    vote_rewards: u64,
958    vote_needs_store: bool,
959}
960
961type VoteRewards = DashMap<Pubkey, VoteReward>;
962
963#[derive(Debug, Default)]
964pub struct NewBankOptions {
965    pub vote_only_bank: bool,
966}
967
968#[cfg(feature = "dev-context-only-utils")]
969#[derive(Debug)]
970pub struct BankTestConfig {
971    pub accounts_db_config: AccountsDbConfig,
972}
973
974#[cfg(feature = "dev-context-only-utils")]
975impl Default for BankTestConfig {
976    fn default() -> Self {
977        Self {
978            accounts_db_config: ACCOUNTS_DB_CONFIG_FOR_TESTING,
979        }
980    }
981}
982
983#[derive(Debug)]
984struct PrevEpochInflationRewards {
985    validator_rewards: u64,
986    prev_epoch_duration_in_years: f64,
987    validator_rate: f64,
988    foundation_rate: f64,
989}
990
991#[derive(Debug, Default, PartialEq)]
992pub struct ProcessedTransactionCounts {
993    pub processed_transactions_count: u64,
994    pub processed_non_vote_transactions_count: u64,
995    pub processed_with_successful_result_count: u64,
996    pub signature_count: u64,
997}
998
999impl Bank {
1000    fn default_with_accounts(accounts: Accounts) -> Self {
1001        let mut bank = Self {
1002            skipped_rewrites: Mutex::default(),
1003            rc: BankRc::new(accounts),
1004            status_cache: Arc::<RwLock<BankStatusCache>>::default(),
1005            blockhash_queue: RwLock::<BlockhashQueue>::default(),
1006            ancestors: Ancestors::default(),
1007            hash: RwLock::<Hash>::default(),
1008            parent_hash: Hash::default(),
1009            parent_slot: Slot::default(),
1010            hard_forks: Arc::<RwLock<HardForks>>::default(),
1011            transaction_count: AtomicU64::default(),
1012            non_vote_transaction_count_since_restart: AtomicU64::default(),
1013            transaction_error_count: AtomicU64::default(),
1014            transaction_entries_count: AtomicU64::default(),
1015            transactions_per_entry_max: AtomicU64::default(),
1016            tick_height: AtomicU64::default(),
1017            signature_count: AtomicU64::default(),
1018            capitalization: AtomicU64::default(),
1019            max_tick_height: u64::default(),
1020            hashes_per_tick: Option::<u64>::default(),
1021            ticks_per_slot: u64::default(),
1022            ns_per_slot: u128::default(),
1023            genesis_creation_time: UnixTimestamp::default(),
1024            slots_per_year: f64::default(),
1025            slot: Slot::default(),
1026            bank_id: BankId::default(),
1027            epoch: Epoch::default(),
1028            block_height: u64::default(),
1029            collector_id: Pubkey::default(),
1030            collector_fees: AtomicU64::default(),
1031            fee_rate_governor: FeeRateGovernor::default(),
1032            collected_rent: AtomicU64::default(),
1033            rent_collector: RentCollector::default(),
1034            epoch_schedule: EpochSchedule::default(),
1035            inflation: Arc::<RwLock<Inflation>>::default(),
1036            stakes_cache: StakesCache::default(),
1037            epoch_stakes: HashMap::<Epoch, EpochStakes>::default(),
1038            is_delta: AtomicBool::default(),
1039            rewards: RwLock::<Vec<(Pubkey, RewardInfo)>>::default(),
1040            cluster_type: Option::<ClusterType>::default(),
1041            lazy_rent_collection: AtomicBool::default(),
1042            rewards_pool_pubkeys: Arc::<HashSet<Pubkey>>::default(),
1043            transaction_debug_keys: Option::<Arc<HashSet<Pubkey>>>::default(),
1044            transaction_log_collector_config: Arc::<RwLock<TransactionLogCollectorConfig>>::default(
1045            ),
1046            transaction_log_collector: Arc::<RwLock<TransactionLogCollector>>::default(),
1047            feature_set: Arc::<FeatureSet>::default(),
1048            reserved_account_keys: Arc::<ReservedAccountKeys>::default(),
1049            drop_callback: RwLock::new(OptionalDropCallback(None)),
1050            freeze_started: AtomicBool::default(),
1051            vote_only_bank: false,
1052            cost_tracker: RwLock::<CostTracker>::default(),
1053            accounts_data_size_initial: 0,
1054            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1055            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1056            epoch_reward_status: EpochRewardStatus::default(),
1057            transaction_processor: TransactionBatchProcessor::default(),
1058            check_program_modification_slot: false,
1059            collector_fee_details: RwLock::new(CollectorFeeDetails::default()),
1060            compute_budget: None,
1061            transaction_account_lock_limit: None,
1062            fee_structure: FeeStructure::default(),
1063            #[cfg(feature = "dev-context-only-utils")]
1064            hash_overrides: Arc::new(Mutex::new(HashOverrides::default())),
1065            accounts_lt_hash: Mutex::new(AccountsLtHash(LtHash::identity())),
1066            cache_for_accounts_lt_hash: RwLock::new(AHashMap::new()),
1067            block_id: RwLock::new(None),
1068        };
1069
1070        bank.transaction_processor =
1071            TransactionBatchProcessor::new_uninitialized(bank.slot, bank.epoch);
1072
1073        let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64;
1074        bank.accounts_data_size_initial = accounts_data_size_initial;
1075
1076        bank
1077    }
1078
1079    #[allow(clippy::too_many_arguments)]
1080    pub fn new_with_paths(
1081        genesis_config: &GenesisConfig,
1082        runtime_config: Arc<RuntimeConfig>,
1083        paths: Vec<PathBuf>,
1084        debug_keys: Option<Arc<HashSet<Pubkey>>>,
1085        additional_builtins: Option<&[BuiltinPrototype]>,
1086        debug_do_not_add_builtins: bool,
1087        accounts_db_config: Option<AccountsDbConfig>,
1088        accounts_update_notifier: Option<AccountsUpdateNotifier>,
1089        #[allow(unused)] collector_id_for_tests: Option<Pubkey>,
1090        exit: Arc<AtomicBool>,
1091        #[allow(unused)] genesis_hash: Option<Hash>,
1092        #[allow(unused)] feature_set: Option<FeatureSet>,
1093    ) -> Self {
1094        let accounts_db =
1095            AccountsDb::new_with_config(paths, accounts_db_config, accounts_update_notifier, exit);
1096        let accounts = Accounts::new(Arc::new(accounts_db));
1097        let mut bank = Self::default_with_accounts(accounts);
1098        bank.ancestors = Ancestors::from(vec![bank.slot()]);
1099        bank.compute_budget = runtime_config.compute_budget;
1100        bank.transaction_account_lock_limit = runtime_config.transaction_account_lock_limit;
1101        bank.transaction_debug_keys = debug_keys;
1102        bank.cluster_type = Some(genesis_config.cluster_type);
1103
1104        #[cfg(feature = "dev-context-only-utils")]
1105        {
1106            bank.feature_set = Arc::new(feature_set.unwrap_or_default());
1107        }
1108
1109        #[cfg(not(feature = "dev-context-only-utils"))]
1110        bank.process_genesis_config(genesis_config);
1111        #[cfg(feature = "dev-context-only-utils")]
1112        bank.process_genesis_config(genesis_config, collector_id_for_tests, genesis_hash);
1113
1114        bank.finish_init(
1115            genesis_config,
1116            additional_builtins,
1117            debug_do_not_add_builtins,
1118        );
1119
1120        // genesis needs stakes for all epochs up to the epoch implied by
1121        //  slot = 0 and genesis configuration
1122        {
1123            let stakes = bank.stakes_cache.stakes().clone();
1124            let stakes = Arc::new(StakesEnum::from(stakes));
1125            for epoch in 0..=bank.get_leader_schedule_epoch(bank.slot) {
1126                bank.epoch_stakes
1127                    .insert(epoch, EpochStakes::new(stakes.clone(), epoch));
1128            }
1129            bank.update_stake_history(None);
1130        }
1131        bank.update_clock(None);
1132        bank.update_rent();
1133        bank.update_epoch_schedule();
1134        bank.update_recent_blockhashes();
1135        bank.update_last_restart_slot();
1136        bank.transaction_processor
1137            .fill_missing_sysvar_cache_entries(&bank);
1138        bank
1139    }
1140
1141    /// Create a new bank that points to an immutable checkpoint of another bank.
1142    pub fn new_from_parent(parent: Arc<Bank>, collector_id: &Pubkey, slot: Slot) -> Self {
1143        Self::_new_from_parent(
1144            parent,
1145            collector_id,
1146            slot,
1147            null_tracer(),
1148            NewBankOptions::default(),
1149        )
1150    }
1151
1152    pub fn new_from_parent_with_options(
1153        parent: Arc<Bank>,
1154        collector_id: &Pubkey,
1155        slot: Slot,
1156        new_bank_options: NewBankOptions,
1157    ) -> Self {
1158        Self::_new_from_parent(parent, collector_id, slot, null_tracer(), new_bank_options)
1159    }
1160
1161    pub fn new_from_parent_with_tracer(
1162        parent: Arc<Bank>,
1163        collector_id: &Pubkey,
1164        slot: Slot,
1165        reward_calc_tracer: impl RewardCalcTracer,
1166    ) -> Self {
1167        Self::_new_from_parent(
1168            parent,
1169            collector_id,
1170            slot,
1171            Some(reward_calc_tracer),
1172            NewBankOptions::default(),
1173        )
1174    }
1175
1176    fn get_rent_collector_from(rent_collector: &RentCollector, epoch: Epoch) -> RentCollector {
1177        rent_collector.clone_with_epoch(epoch)
1178    }
1179
1180    fn _new_from_parent(
1181        parent: Arc<Bank>,
1182        collector_id: &Pubkey,
1183        slot: Slot,
1184        reward_calc_tracer: Option<impl RewardCalcTracer>,
1185        new_bank_options: NewBankOptions,
1186    ) -> Self {
1187        let mut time = Measure::start("bank::new_from_parent");
1188        let NewBankOptions { vote_only_bank } = new_bank_options;
1189
1190        parent.freeze();
1191        assert_ne!(slot, parent.slot());
1192
1193        let epoch_schedule = parent.epoch_schedule().clone();
1194        let epoch = epoch_schedule.get_epoch(slot);
1195
1196        let (rc, bank_rc_creation_time_us) = measure_us!({
1197            let accounts_db = Arc::clone(&parent.rc.accounts.accounts_db);
1198            accounts_db.insert_default_bank_hash_stats(slot, parent.slot());
1199            BankRc {
1200                accounts: Arc::new(Accounts::new(accounts_db)),
1201                parent: RwLock::new(Some(Arc::clone(&parent))),
1202                bank_id_generator: Arc::clone(&parent.rc.bank_id_generator),
1203            }
1204        });
1205
1206        let (status_cache, status_cache_time_us) = measure_us!(Arc::clone(&parent.status_cache));
1207
1208        let (fee_rate_governor, fee_components_time_us) = measure_us!(
1209            FeeRateGovernor::new_derived(&parent.fee_rate_governor, parent.signature_count())
1210        );
1211
1212        let bank_id = rc.bank_id_generator.fetch_add(1, Relaxed) + 1;
1213        let (blockhash_queue, blockhash_queue_time_us) =
1214            measure_us!(RwLock::new(parent.blockhash_queue.read().unwrap().clone()));
1215
1216        let (stakes_cache, stakes_cache_time_us) =
1217            measure_us!(StakesCache::new(parent.stakes_cache.stakes().clone()));
1218
1219        let (epoch_stakes, epoch_stakes_time_us) = measure_us!(parent.epoch_stakes.clone());
1220
1221        let (transaction_processor, builtin_program_ids_time_us) = measure_us!(
1222            TransactionBatchProcessor::new_from(&parent.transaction_processor, slot, epoch)
1223        );
1224
1225        let (rewards_pool_pubkeys, rewards_pool_pubkeys_time_us) =
1226            measure_us!(parent.rewards_pool_pubkeys.clone());
1227
1228        let (transaction_debug_keys, transaction_debug_keys_time_us) =
1229            measure_us!(parent.transaction_debug_keys.clone());
1230
1231        let (transaction_log_collector_config, transaction_log_collector_config_time_us) =
1232            measure_us!(parent.transaction_log_collector_config.clone());
1233
1234        let (feature_set, feature_set_time_us) = measure_us!(parent.feature_set.clone());
1235
1236        let accounts_data_size_initial = parent.load_accounts_data_size();
1237        let mut new = Self {
1238            skipped_rewrites: Mutex::default(),
1239            rc,
1240            status_cache,
1241            slot,
1242            bank_id,
1243            epoch,
1244            blockhash_queue,
1245
1246            // TODO: clean this up, so much special-case copying...
1247            hashes_per_tick: parent.hashes_per_tick,
1248            ticks_per_slot: parent.ticks_per_slot,
1249            ns_per_slot: parent.ns_per_slot,
1250            genesis_creation_time: parent.genesis_creation_time,
1251            slots_per_year: parent.slots_per_year,
1252            epoch_schedule,
1253            collected_rent: AtomicU64::new(0),
1254            rent_collector: Self::get_rent_collector_from(&parent.rent_collector, epoch),
1255            max_tick_height: slot
1256                .checked_add(1)
1257                .expect("max tick height addition overflowed")
1258                .checked_mul(parent.ticks_per_slot)
1259                .expect("max tick height multiplication overflowed"),
1260            block_height: parent
1261                .block_height
1262                .checked_add(1)
1263                .expect("block height addition overflowed"),
1264            fee_rate_governor,
1265            capitalization: AtomicU64::new(parent.capitalization()),
1266            vote_only_bank,
1267            inflation: parent.inflation.clone(),
1268            transaction_count: AtomicU64::new(parent.transaction_count()),
1269            non_vote_transaction_count_since_restart: AtomicU64::new(
1270                parent.non_vote_transaction_count_since_restart(),
1271            ),
1272            transaction_error_count: AtomicU64::new(0),
1273            transaction_entries_count: AtomicU64::new(0),
1274            transactions_per_entry_max: AtomicU64::new(0),
1275            // we will .clone_with_epoch() this soon after stake data update; so just .clone() for now
1276            stakes_cache,
1277            epoch_stakes,
1278            parent_hash: parent.hash(),
1279            parent_slot: parent.slot(),
1280            collector_id: *collector_id,
1281            collector_fees: AtomicU64::new(0),
1282            ancestors: Ancestors::default(),
1283            hash: RwLock::new(Hash::default()),
1284            is_delta: AtomicBool::new(false),
1285            tick_height: AtomicU64::new(parent.tick_height.load(Relaxed)),
1286            signature_count: AtomicU64::new(0),
1287            hard_forks: parent.hard_forks.clone(),
1288            rewards: RwLock::new(vec![]),
1289            cluster_type: parent.cluster_type,
1290            lazy_rent_collection: AtomicBool::new(parent.lazy_rent_collection.load(Relaxed)),
1291            rewards_pool_pubkeys,
1292            transaction_debug_keys,
1293            transaction_log_collector_config,
1294            transaction_log_collector: Arc::new(RwLock::new(TransactionLogCollector::default())),
1295            feature_set: Arc::clone(&feature_set),
1296            reserved_account_keys: parent.reserved_account_keys.clone(),
1297            drop_callback: RwLock::new(OptionalDropCallback(
1298                parent
1299                    .drop_callback
1300                    .read()
1301                    .unwrap()
1302                    .0
1303                    .as_ref()
1304                    .map(|drop_callback| drop_callback.clone_box()),
1305            )),
1306            freeze_started: AtomicBool::new(false),
1307            cost_tracker: RwLock::new(parent.read_cost_tracker().unwrap().new_from_parent_limits()),
1308            accounts_data_size_initial,
1309            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1310            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1311            epoch_reward_status: parent.epoch_reward_status.clone(),
1312            transaction_processor,
1313            check_program_modification_slot: false,
1314            collector_fee_details: RwLock::new(CollectorFeeDetails::default()),
1315            compute_budget: parent.compute_budget,
1316            transaction_account_lock_limit: parent.transaction_account_lock_limit,
1317            fee_structure: parent.fee_structure.clone(),
1318            #[cfg(feature = "dev-context-only-utils")]
1319            hash_overrides: parent.hash_overrides.clone(),
1320            accounts_lt_hash: Mutex::new(parent.accounts_lt_hash.lock().unwrap().clone()),
1321            cache_for_accounts_lt_hash: RwLock::new(AHashMap::new()),
1322            block_id: RwLock::new(None),
1323        };
1324
1325        let (_, ancestors_time_us) = measure_us!({
1326            let mut ancestors = Vec::with_capacity(1 + new.parents().len());
1327            ancestors.push(new.slot());
1328            new.parents().iter().for_each(|p| {
1329                ancestors.push(p.slot());
1330            });
1331            new.ancestors = Ancestors::from(ancestors);
1332        });
1333
1334        // Following code may touch AccountsDb, requiring proper ancestors
1335        let (_, update_epoch_time_us) = measure_us!({
1336            if parent.epoch() < new.epoch() {
1337                new.process_new_epoch(
1338                    parent.epoch(),
1339                    parent.slot(),
1340                    parent.block_height(),
1341                    reward_calc_tracer,
1342                );
1343            } else {
1344                // Save a snapshot of stakes for use in consensus and stake weighted networking
1345                let leader_schedule_epoch = new.epoch_schedule().get_leader_schedule_epoch(slot);
1346                new.update_epoch_stakes(leader_schedule_epoch);
1347            }
1348            if new.is_partitioned_rewards_code_enabled() {
1349                new.distribute_partitioned_epoch_rewards();
1350            }
1351        });
1352
1353        let (_epoch, slot_index) = new.epoch_schedule.get_epoch_and_slot_index(new.slot);
1354        let slots_in_epoch = new.epoch_schedule.get_slots_in_epoch(new.epoch);
1355
1356        let (_, cache_preparation_time_us) = measure_us!(new
1357            .transaction_processor
1358            .prepare_program_cache_for_upcoming_feature_set(
1359                &new,
1360                &new.compute_active_feature_set(true).0,
1361                &new.compute_budget.unwrap_or_default(),
1362                slot_index,
1363                slots_in_epoch,
1364            ));
1365
1366        // Update sysvars before processing transactions
1367        let (_, update_sysvars_time_us) = measure_us!({
1368            new.update_slot_hashes();
1369            new.update_stake_history(Some(parent.epoch()));
1370            new.update_clock(Some(parent.epoch()));
1371            new.update_last_restart_slot()
1372        });
1373
1374        let (_, fill_sysvar_cache_time_us) = measure_us!(new
1375            .transaction_processor
1376            .fill_missing_sysvar_cache_entries(&new));
1377        time.stop();
1378
1379        report_new_bank_metrics(
1380            slot,
1381            parent.slot(),
1382            new.block_height,
1383            NewBankTimings {
1384                bank_rc_creation_time_us,
1385                total_elapsed_time_us: time.as_us(),
1386                status_cache_time_us,
1387                fee_components_time_us,
1388                blockhash_queue_time_us,
1389                stakes_cache_time_us,
1390                epoch_stakes_time_us,
1391                builtin_program_ids_time_us,
1392                rewards_pool_pubkeys_time_us,
1393                executor_cache_time_us: 0,
1394                transaction_debug_keys_time_us,
1395                transaction_log_collector_config_time_us,
1396                feature_set_time_us,
1397                ancestors_time_us,
1398                update_epoch_time_us,
1399                cache_preparation_time_us,
1400                update_sysvars_time_us,
1401                fill_sysvar_cache_time_us,
1402            },
1403        );
1404
1405        report_loaded_programs_stats(
1406            &parent
1407                .transaction_processor
1408                .program_cache
1409                .read()
1410                .unwrap()
1411                .stats,
1412            parent.slot(),
1413        );
1414
1415        new.transaction_processor
1416            .program_cache
1417            .write()
1418            .unwrap()
1419            .stats
1420            .reset();
1421        new
1422    }
1423
1424    pub fn set_fork_graph_in_program_cache(&self, fork_graph: Weak<RwLock<BankForks>>) {
1425        self.transaction_processor
1426            .program_cache
1427            .write()
1428            .unwrap()
1429            .set_fork_graph(fork_graph);
1430    }
1431
1432    pub fn prune_program_cache(&self, new_root_slot: Slot, new_root_epoch: Epoch) {
1433        self.transaction_processor
1434            .program_cache
1435            .write()
1436            .unwrap()
1437            .prune(new_root_slot, new_root_epoch);
1438    }
1439
1440    pub fn prune_program_cache_by_deployment_slot(&self, deployment_slot: Slot) {
1441        self.transaction_processor
1442            .program_cache
1443            .write()
1444            .unwrap()
1445            .prune_by_deployment_slot(deployment_slot);
1446    }
1447
1448    /// Epoch in which the new cooldown warmup rate for stake was activated
1449    pub fn new_warmup_cooldown_rate_epoch(&self) -> Option<Epoch> {
1450        self.feature_set
1451            .new_warmup_cooldown_rate_epoch(&self.epoch_schedule)
1452    }
1453
1454    /// process for the start of a new epoch
1455    fn process_new_epoch(
1456        &mut self,
1457        parent_epoch: Epoch,
1458        parent_slot: Slot,
1459        parent_height: u64,
1460        reward_calc_tracer: Option<impl RewardCalcTracer>,
1461    ) {
1462        let epoch = self.epoch();
1463        let slot = self.slot();
1464        let (thread_pool, thread_pool_time_us) = measure_us!(ThreadPoolBuilder::new()
1465            .thread_name(|i| format!("solBnkNewEpch{i:02}"))
1466            .build()
1467            .expect("new rayon threadpool"));
1468
1469        let (_, apply_feature_activations_time_us) = measure_us!(
1470            self.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false)
1471        );
1472
1473        // Add new entry to stakes.stake_history, set appropriate epoch and
1474        // update vote accounts with warmed up stakes before saving a
1475        // snapshot of stakes in epoch stakes
1476        let (_, activate_epoch_time_us) = measure_us!(self.stakes_cache.activate_epoch(
1477            epoch,
1478            &thread_pool,
1479            self.new_warmup_cooldown_rate_epoch()
1480        ));
1481
1482        // Save a snapshot of stakes for use in consensus and stake weighted networking
1483        let leader_schedule_epoch = self.epoch_schedule.get_leader_schedule_epoch(slot);
1484        let (_, update_epoch_stakes_time_us) =
1485            measure_us!(self.update_epoch_stakes(leader_schedule_epoch));
1486
1487        let mut rewards_metrics = RewardsMetrics::default();
1488        // After saving a snapshot of stakes, apply stake rewards and commission
1489        let (_, update_rewards_with_thread_pool_time_us) =
1490            measure_us!(if self.is_partitioned_rewards_code_enabled() {
1491                self.begin_partitioned_rewards(
1492                    reward_calc_tracer,
1493                    &thread_pool,
1494                    parent_epoch,
1495                    parent_slot,
1496                    parent_height,
1497                    &mut rewards_metrics,
1498                );
1499            } else {
1500                self.update_rewards_with_thread_pool(
1501                    parent_epoch,
1502                    reward_calc_tracer,
1503                    &thread_pool,
1504                    &mut rewards_metrics,
1505                )
1506            });
1507
1508        report_new_epoch_metrics(
1509            epoch,
1510            slot,
1511            parent_slot,
1512            NewEpochTimings {
1513                thread_pool_time_us,
1514                apply_feature_activations_time_us,
1515                activate_epoch_time_us,
1516                update_epoch_stakes_time_us,
1517                update_rewards_with_thread_pool_time_us,
1518            },
1519            rewards_metrics,
1520        );
1521    }
1522
1523    pub fn byte_limit_for_scans(&self) -> Option<usize> {
1524        self.rc
1525            .accounts
1526            .accounts_db
1527            .accounts_index
1528            .scan_results_limit_bytes
1529    }
1530
1531    pub fn proper_ancestors_set(&self) -> HashSet<Slot> {
1532        HashSet::from_iter(self.proper_ancestors())
1533    }
1534
1535    /// Returns all ancestors excluding self.slot.
1536    pub(crate) fn proper_ancestors(&self) -> impl Iterator<Item = Slot> + '_ {
1537        self.ancestors
1538            .keys()
1539            .into_iter()
1540            .filter(move |slot| *slot != self.slot)
1541    }
1542
1543    pub fn set_callback(&self, callback: Option<Box<dyn DropCallback + Send + Sync>>) {
1544        *self.drop_callback.write().unwrap() = OptionalDropCallback(callback);
1545    }
1546
1547    pub fn vote_only_bank(&self) -> bool {
1548        self.vote_only_bank
1549    }
1550
1551    /// Like `new_from_parent` but additionally:
1552    /// * Doesn't assume that the parent is anywhere near `slot`, parent could be millions of slots
1553    ///   in the past
1554    /// * Adjusts the new bank's tick height to avoid having to run PoH for millions of slots
1555    /// * Freezes the new bank, assuming that the user will `Bank::new_from_parent` from this bank
1556    /// * Calculates and sets the epoch accounts hash from the parent
1557    pub fn warp_from_parent(
1558        parent: Arc<Bank>,
1559        collector_id: &Pubkey,
1560        slot: Slot,
1561        data_source: CalcAccountsHashDataSource,
1562    ) -> Self {
1563        parent.freeze();
1564        parent
1565            .rc
1566            .accounts
1567            .accounts_db
1568            .epoch_accounts_hash_manager
1569            .set_in_flight(parent.slot());
1570        let accounts_hash = parent.update_accounts_hash(data_source, false, true);
1571        let epoch_accounts_hash = accounts_hash.into();
1572        parent
1573            .rc
1574            .accounts
1575            .accounts_db
1576            .epoch_accounts_hash_manager
1577            .set_valid(epoch_accounts_hash, parent.slot());
1578
1579        let parent_timestamp = parent.clock().unix_timestamp;
1580        let mut new = Bank::new_from_parent(parent, collector_id, slot);
1581        new.apply_feature_activations(ApplyFeatureActivationsCaller::WarpFromParent, false);
1582        new.update_epoch_stakes(new.epoch_schedule().get_epoch(slot));
1583        new.tick_height.store(new.max_tick_height(), Relaxed);
1584
1585        let mut clock = new.clock();
1586        clock.epoch_start_timestamp = parent_timestamp;
1587        clock.unix_timestamp = parent_timestamp;
1588        new.update_sysvar_account(&sysvar::clock::id(), |account| {
1589            create_account(
1590                &clock,
1591                new.inherit_specially_retained_account_fields(account),
1592            )
1593        });
1594        new.transaction_processor
1595            .fill_missing_sysvar_cache_entries(&new);
1596        new.freeze();
1597        new
1598    }
1599
1600    /// Create a bank from explicit arguments and deserialized fields from snapshot
1601    pub(crate) fn new_from_fields(
1602        bank_rc: BankRc,
1603        genesis_config: &GenesisConfig,
1604        runtime_config: Arc<RuntimeConfig>,
1605        fields: BankFieldsToDeserialize,
1606        debug_keys: Option<Arc<HashSet<Pubkey>>>,
1607        additional_builtins: Option<&[BuiltinPrototype]>,
1608        debug_do_not_add_builtins: bool,
1609        accounts_data_size_initial: u64,
1610    ) -> Self {
1611        let now = Instant::now();
1612        let ancestors = Ancestors::from(&fields.ancestors);
1613        // For backward compatibility, we can only serialize and deserialize
1614        // Stakes<Delegation> in BankFieldsTo{Serialize,Deserialize}. But Bank
1615        // caches Stakes<StakeAccount>. Below Stakes<StakeAccount> is obtained
1616        // from Stakes<Delegation> by reading the full account state from
1617        // accounts-db. Note that it is crucial that these accounts are loaded
1618        // at the right slot and match precisely with serialized Delegations.
1619        //
1620        // Note that we are disabling the read cache while we populate the stakes cache.
1621        // The stakes accounts will not be expected to be loaded again.
1622        // If we populate the read cache with these loads, then we'll just soon have to evict these.
1623        let (stakes, stakes_time) = measure_time!(Stakes::new(&fields.stakes, |pubkey| {
1624            let (account, _slot) = bank_rc
1625                .accounts
1626                .load_with_fixed_root_do_not_populate_read_cache(&ancestors, pubkey)?;
1627            Some(account)
1628        })
1629        .expect(
1630            "Stakes cache is inconsistent with accounts-db. This can indicate \
1631            a corrupted snapshot or bugs in cached accounts or accounts-db.",
1632        ));
1633        info!("Loading Stakes took: {stakes_time}");
1634        let stakes_accounts_load_duration = now.elapsed();
1635        let mut bank = Self {
1636            skipped_rewrites: Mutex::default(),
1637            rc: bank_rc,
1638            status_cache: Arc::<RwLock<BankStatusCache>>::default(),
1639            blockhash_queue: RwLock::new(fields.blockhash_queue),
1640            ancestors,
1641            hash: RwLock::new(fields.hash),
1642            parent_hash: fields.parent_hash,
1643            parent_slot: fields.parent_slot,
1644            hard_forks: Arc::new(RwLock::new(fields.hard_forks)),
1645            transaction_count: AtomicU64::new(fields.transaction_count),
1646            non_vote_transaction_count_since_restart: AtomicU64::default(),
1647            transaction_error_count: AtomicU64::default(),
1648            transaction_entries_count: AtomicU64::default(),
1649            transactions_per_entry_max: AtomicU64::default(),
1650            tick_height: AtomicU64::new(fields.tick_height),
1651            signature_count: AtomicU64::new(fields.signature_count),
1652            capitalization: AtomicU64::new(fields.capitalization),
1653            max_tick_height: fields.max_tick_height,
1654            hashes_per_tick: fields.hashes_per_tick,
1655            ticks_per_slot: fields.ticks_per_slot,
1656            ns_per_slot: fields.ns_per_slot,
1657            genesis_creation_time: fields.genesis_creation_time,
1658            slots_per_year: fields.slots_per_year,
1659            slot: fields.slot,
1660            bank_id: 0,
1661            epoch: fields.epoch,
1662            block_height: fields.block_height,
1663            collector_id: fields.collector_id,
1664            collector_fees: AtomicU64::new(fields.collector_fees),
1665            fee_rate_governor: fields.fee_rate_governor,
1666            collected_rent: AtomicU64::new(fields.collected_rent),
1667            // clone()-ing is needed to consider a gated behavior in rent_collector
1668            rent_collector: Self::get_rent_collector_from(&fields.rent_collector, fields.epoch),
1669            epoch_schedule: fields.epoch_schedule,
1670            inflation: Arc::new(RwLock::new(fields.inflation)),
1671            stakes_cache: StakesCache::new(stakes),
1672            epoch_stakes: fields.epoch_stakes,
1673            is_delta: AtomicBool::new(fields.is_delta),
1674            rewards: RwLock::new(vec![]),
1675            cluster_type: Some(genesis_config.cluster_type),
1676            lazy_rent_collection: AtomicBool::default(),
1677            rewards_pool_pubkeys: Arc::<HashSet<Pubkey>>::default(),
1678            transaction_debug_keys: debug_keys,
1679            transaction_log_collector_config: Arc::<RwLock<TransactionLogCollectorConfig>>::default(
1680            ),
1681            transaction_log_collector: Arc::<RwLock<TransactionLogCollector>>::default(),
1682            feature_set: Arc::<FeatureSet>::default(),
1683            reserved_account_keys: Arc::<ReservedAccountKeys>::default(),
1684            drop_callback: RwLock::new(OptionalDropCallback(None)),
1685            freeze_started: AtomicBool::new(fields.hash != Hash::default()),
1686            vote_only_bank: false,
1687            cost_tracker: RwLock::new(CostTracker::default()),
1688            accounts_data_size_initial,
1689            accounts_data_size_delta_on_chain: AtomicI64::new(0),
1690            accounts_data_size_delta_off_chain: AtomicI64::new(0),
1691            epoch_reward_status: EpochRewardStatus::default(),
1692            transaction_processor: TransactionBatchProcessor::default(),
1693            check_program_modification_slot: false,
1694            // collector_fee_details is not serialized to snapshot
1695            collector_fee_details: RwLock::new(CollectorFeeDetails::default()),
1696            compute_budget: runtime_config.compute_budget,
1697            transaction_account_lock_limit: runtime_config.transaction_account_lock_limit,
1698            fee_structure: FeeStructure::default(),
1699            #[cfg(feature = "dev-context-only-utils")]
1700            hash_overrides: Arc::new(Mutex::new(HashOverrides::default())),
1701            accounts_lt_hash: Mutex::new(AccountsLtHash(LtHash([0xBAD1; LtHash::NUM_ELEMENTS]))),
1702            cache_for_accounts_lt_hash: RwLock::new(AHashMap::new()),
1703            block_id: RwLock::new(None),
1704        };
1705
1706        bank.transaction_processor =
1707            TransactionBatchProcessor::new_uninitialized(bank.slot, bank.epoch);
1708
1709        let thread_pool = ThreadPoolBuilder::new()
1710            .thread_name(|i| format!("solBnkNewFlds{i:02}"))
1711            .build()
1712            .expect("new rayon threadpool");
1713        bank.recalculate_partitioned_rewards(null_tracer(), &thread_pool);
1714
1715        bank.finish_init(
1716            genesis_config,
1717            additional_builtins,
1718            debug_do_not_add_builtins,
1719        );
1720        bank.transaction_processor
1721            .fill_missing_sysvar_cache_entries(&bank);
1722        bank.rebuild_skipped_rewrites();
1723
1724        let calculate_accounts_lt_hash_duration = bank.is_accounts_lt_hash_enabled().then(|| {
1725            let (_, duration) = meas_dur!({
1726                *bank.accounts_lt_hash.get_mut().unwrap() = bank
1727                    .rc
1728                    .accounts
1729                    .accounts_db
1730                    .calculate_accounts_lt_hash_at_startup_from_index(&bank.ancestors, bank.slot());
1731            });
1732            duration
1733        });
1734
1735        // Sanity assertions between bank snapshot and genesis config
1736        // Consider removing from serializable bank state
1737        // (BankFieldsToSerialize/BankFieldsToDeserialize) and initializing
1738        // from the passed in genesis_config instead (as new()/new_with_paths() already do)
1739        assert_eq!(
1740            bank.genesis_creation_time, genesis_config.creation_time,
1741            "Bank snapshot genesis creation time does not match genesis.bin creation time. \
1742             The snapshot and genesis.bin might pertain to different clusters"
1743        );
1744        assert_eq!(bank.ticks_per_slot, genesis_config.ticks_per_slot);
1745        assert_eq!(
1746            bank.ns_per_slot,
1747            genesis_config.poh_config.target_tick_duration.as_nanos()
1748                * genesis_config.ticks_per_slot as u128
1749        );
1750        assert_eq!(bank.max_tick_height, (bank.slot + 1) * bank.ticks_per_slot);
1751        assert_eq!(
1752            bank.slots_per_year,
1753            years_as_slots(
1754                1.0,
1755                &genesis_config.poh_config.target_tick_duration,
1756                bank.ticks_per_slot,
1757            )
1758        );
1759        assert_eq!(bank.epoch_schedule, genesis_config.epoch_schedule);
1760        assert_eq!(bank.epoch, bank.epoch_schedule.get_epoch(bank.slot));
1761
1762        datapoint_info!(
1763            "bank-new-from-fields",
1764            (
1765                "accounts_data_len-from-snapshot",
1766                fields.accounts_data_len as i64,
1767                i64
1768            ),
1769            (
1770                "accounts_data_len-from-generate_index",
1771                accounts_data_size_initial as i64,
1772                i64
1773            ),
1774            (
1775                "stakes_accounts_load_duration_us",
1776                stakes_accounts_load_duration.as_micros(),
1777                i64
1778            ),
1779            (
1780                "calculate_accounts_lt_hash_us",
1781                calculate_accounts_lt_hash_duration.as_ref().map(Duration::as_micros),
1782                Option<i64>
1783            ),
1784        );
1785        bank
1786    }
1787
1788    /// Return subset of bank fields representing serializable state
1789    pub(crate) fn get_fields_to_serialize(&self) -> BankFieldsToSerialize {
1790        let (epoch_stakes, versioned_epoch_stakes) = split_epoch_stakes(self.epoch_stakes.clone());
1791        BankFieldsToSerialize {
1792            blockhash_queue: self.blockhash_queue.read().unwrap().clone(),
1793            ancestors: AncestorsForSerialization::from(&self.ancestors),
1794            hash: *self.hash.read().unwrap(),
1795            parent_hash: self.parent_hash,
1796            parent_slot: self.parent_slot,
1797            hard_forks: self.hard_forks.read().unwrap().clone(),
1798            transaction_count: self.transaction_count.load(Relaxed),
1799            tick_height: self.tick_height.load(Relaxed),
1800            signature_count: self.signature_count.load(Relaxed),
1801            capitalization: self.capitalization.load(Relaxed),
1802            max_tick_height: self.max_tick_height,
1803            hashes_per_tick: self.hashes_per_tick,
1804            ticks_per_slot: self.ticks_per_slot,
1805            ns_per_slot: self.ns_per_slot,
1806            genesis_creation_time: self.genesis_creation_time,
1807            slots_per_year: self.slots_per_year,
1808            slot: self.slot,
1809            epoch: self.epoch,
1810            block_height: self.block_height,
1811            collector_id: self.collector_id,
1812            collector_fees: self.collector_fees.load(Relaxed),
1813            fee_rate_governor: self.fee_rate_governor.clone(),
1814            collected_rent: self.collected_rent.load(Relaxed),
1815            rent_collector: self.rent_collector.clone(),
1816            epoch_schedule: self.epoch_schedule.clone(),
1817            inflation: *self.inflation.read().unwrap(),
1818            stakes: StakesEnum::from(self.stakes_cache.stakes().clone()),
1819            epoch_stakes,
1820            is_delta: self.is_delta.load(Relaxed),
1821            accounts_data_len: self.load_accounts_data_size(),
1822            versioned_epoch_stakes,
1823        }
1824    }
1825
1826    pub fn collector_id(&self) -> &Pubkey {
1827        &self.collector_id
1828    }
1829
1830    pub fn genesis_creation_time(&self) -> UnixTimestamp {
1831        self.genesis_creation_time
1832    }
1833
1834    pub fn slot(&self) -> Slot {
1835        self.slot
1836    }
1837
1838    pub fn bank_id(&self) -> BankId {
1839        self.bank_id
1840    }
1841
1842    pub fn epoch(&self) -> Epoch {
1843        self.epoch
1844    }
1845
1846    pub fn first_normal_epoch(&self) -> Epoch {
1847        self.epoch_schedule().first_normal_epoch
1848    }
1849
1850    pub fn freeze_lock(&self) -> RwLockReadGuard<Hash> {
1851        self.hash.read().unwrap()
1852    }
1853
1854    pub fn hash(&self) -> Hash {
1855        *self.hash.read().unwrap()
1856    }
1857
1858    pub fn is_frozen(&self) -> bool {
1859        *self.hash.read().unwrap() != Hash::default()
1860    }
1861
1862    pub fn freeze_started(&self) -> bool {
1863        self.freeze_started.load(Relaxed)
1864    }
1865
1866    pub fn status_cache_ancestors(&self) -> Vec<u64> {
1867        let mut roots = self.status_cache.read().unwrap().roots().clone();
1868        let min = roots.iter().min().cloned().unwrap_or(0);
1869        for ancestor in self.ancestors.keys() {
1870            if ancestor >= min {
1871                roots.insert(ancestor);
1872            }
1873        }
1874
1875        let mut ancestors: Vec<_> = roots.into_iter().collect();
1876        #[allow(clippy::stable_sort_primitive)]
1877        ancestors.sort();
1878        ancestors
1879    }
1880
1881    /// computed unix_timestamp at this slot height
1882    pub fn unix_timestamp_from_genesis(&self) -> i64 {
1883        self.genesis_creation_time.saturating_add(
1884            (self.slot as u128)
1885                .saturating_mul(self.ns_per_slot)
1886                .saturating_div(1_000_000_000) as i64,
1887        )
1888    }
1889
1890    fn update_sysvar_account<F>(&self, pubkey: &Pubkey, updater: F)
1891    where
1892        F: Fn(&Option<AccountSharedData>) -> AccountSharedData,
1893    {
1894        let old_account = self.get_account_with_fixed_root(pubkey);
1895        let mut new_account = updater(&old_account);
1896
1897        // When new sysvar comes into existence (with RENT_UNADJUSTED_INITIAL_BALANCE lamports),
1898        // this code ensures that the sysvar's balance is adjusted to be rent-exempt.
1899        //
1900        // More generally, this code always re-calculates for possible sysvar data size change,
1901        // although there is no such sysvars currently.
1902        self.adjust_sysvar_balance_for_rent(&mut new_account);
1903        self.store_account_and_update_capitalization(pubkey, &new_account);
1904    }
1905
1906    fn inherit_specially_retained_account_fields(
1907        &self,
1908        old_account: &Option<AccountSharedData>,
1909    ) -> InheritableAccountFields {
1910        const RENT_UNADJUSTED_INITIAL_BALANCE: u64 = 1;
1911
1912        (
1913            old_account
1914                .as_ref()
1915                .map(|a| a.lamports())
1916                .unwrap_or(RENT_UNADJUSTED_INITIAL_BALANCE),
1917            old_account
1918                .as_ref()
1919                .map(|a| a.rent_epoch())
1920                .unwrap_or(INITIAL_RENT_EPOCH),
1921        )
1922    }
1923
1924    pub fn clock(&self) -> sysvar::clock::Clock {
1925        from_account(&self.get_account(&sysvar::clock::id()).unwrap_or_default())
1926            .unwrap_or_default()
1927    }
1928
1929    fn update_clock(&self, parent_epoch: Option<Epoch>) {
1930        let mut unix_timestamp = self.clock().unix_timestamp;
1931        // set epoch_start_timestamp to None to warp timestamp
1932        let epoch_start_timestamp = {
1933            let epoch = if let Some(epoch) = parent_epoch {
1934                epoch
1935            } else {
1936                self.epoch()
1937            };
1938            let first_slot_in_epoch = self.epoch_schedule().get_first_slot_in_epoch(epoch);
1939            Some((first_slot_in_epoch, self.clock().epoch_start_timestamp))
1940        };
1941        let max_allowable_drift = MaxAllowableDrift {
1942            fast: MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST,
1943            slow: MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW_V2,
1944        };
1945
1946        let ancestor_timestamp = self.clock().unix_timestamp;
1947        if let Some(timestamp_estimate) =
1948            self.get_timestamp_estimate(max_allowable_drift, epoch_start_timestamp)
1949        {
1950            unix_timestamp = timestamp_estimate;
1951            if timestamp_estimate < ancestor_timestamp {
1952                unix_timestamp = ancestor_timestamp;
1953            }
1954        }
1955        datapoint_info!(
1956            "bank-timestamp-correction",
1957            ("slot", self.slot(), i64),
1958            ("from_genesis", self.unix_timestamp_from_genesis(), i64),
1959            ("corrected", unix_timestamp, i64),
1960            ("ancestor_timestamp", ancestor_timestamp, i64),
1961        );
1962        let mut epoch_start_timestamp =
1963            // On epoch boundaries, update epoch_start_timestamp
1964            if parent_epoch.is_some() && parent_epoch.unwrap() != self.epoch() {
1965                unix_timestamp
1966            } else {
1967                self.clock().epoch_start_timestamp
1968            };
1969        if self.slot == 0 {
1970            unix_timestamp = self.unix_timestamp_from_genesis();
1971            epoch_start_timestamp = self.unix_timestamp_from_genesis();
1972        }
1973        let clock = sysvar::clock::Clock {
1974            slot: self.slot,
1975            epoch_start_timestamp,
1976            epoch: self.epoch_schedule().get_epoch(self.slot),
1977            leader_schedule_epoch: self.epoch_schedule().get_leader_schedule_epoch(self.slot),
1978            unix_timestamp,
1979        };
1980        self.update_sysvar_account(&sysvar::clock::id(), |account| {
1981            create_account(
1982                &clock,
1983                self.inherit_specially_retained_account_fields(account),
1984            )
1985        });
1986    }
1987
1988    pub fn update_last_restart_slot(&self) {
1989        let feature_flag = self
1990            .feature_set
1991            .is_active(&feature_set::last_restart_slot_sysvar::id());
1992
1993        if feature_flag {
1994            // First, see what the currently stored last restart slot is. This
1995            // account may not exist yet if the feature was just activated.
1996            let current_last_restart_slot = self
1997                .get_account(&sysvar::last_restart_slot::id())
1998                .and_then(|account| {
1999                    let lrs: Option<LastRestartSlot> = from_account(&account);
2000                    lrs
2001                })
2002                .map(|account| account.last_restart_slot);
2003
2004            let last_restart_slot = {
2005                let slot = self.slot;
2006                let hard_forks_r = self.hard_forks.read().unwrap();
2007
2008                // Only consider hard forks <= this bank's slot to avoid prematurely applying
2009                // a hard fork that is set to occur in the future.
2010                hard_forks_r
2011                    .iter()
2012                    .rev()
2013                    .find(|(hard_fork, _)| *hard_fork <= slot)
2014                    .map(|(slot, _)| *slot)
2015                    .unwrap_or(0)
2016            };
2017
2018            // Only need to write if the last restart has changed
2019            if current_last_restart_slot != Some(last_restart_slot) {
2020                self.update_sysvar_account(&sysvar::last_restart_slot::id(), |account| {
2021                    create_account(
2022                        &LastRestartSlot { last_restart_slot },
2023                        self.inherit_specially_retained_account_fields(account),
2024                    )
2025                });
2026            }
2027        }
2028    }
2029
2030    pub fn set_sysvar_for_tests<T>(&self, sysvar: &T)
2031    where
2032        T: Sysvar + SysvarId,
2033    {
2034        self.update_sysvar_account(&T::id(), |account| {
2035            create_account(
2036                sysvar,
2037                self.inherit_specially_retained_account_fields(account),
2038            )
2039        });
2040        // Simply force fill sysvar cache rather than checking which sysvar was
2041        // actually updated since tests don't need to be optimized for performance.
2042        self.transaction_processor.reset_sysvar_cache();
2043        self.transaction_processor
2044            .fill_missing_sysvar_cache_entries(self);
2045    }
2046
2047    fn update_slot_history(&self) {
2048        self.update_sysvar_account(&sysvar::slot_history::id(), |account| {
2049            let mut slot_history = account
2050                .as_ref()
2051                .map(|account| from_account::<SlotHistory, _>(account).unwrap())
2052                .unwrap_or_default();
2053            slot_history.add(self.slot());
2054            create_account(
2055                &slot_history,
2056                self.inherit_specially_retained_account_fields(account),
2057            )
2058        });
2059    }
2060
2061    fn update_slot_hashes(&self) {
2062        self.update_sysvar_account(&sysvar::slot_hashes::id(), |account| {
2063            let mut slot_hashes = account
2064                .as_ref()
2065                .map(|account| from_account::<SlotHashes, _>(account).unwrap())
2066                .unwrap_or_default();
2067            slot_hashes.add(self.parent_slot, self.parent_hash);
2068            create_account(
2069                &slot_hashes,
2070                self.inherit_specially_retained_account_fields(account),
2071            )
2072        });
2073    }
2074
2075    pub fn get_slot_history(&self) -> SlotHistory {
2076        from_account(&self.get_account(&sysvar::slot_history::id()).unwrap()).unwrap()
2077    }
2078
2079    fn update_epoch_stakes(&mut self, leader_schedule_epoch: Epoch) {
2080        // update epoch_stakes cache
2081        //  if my parent didn't populate for this staker's epoch, we've
2082        //  crossed a boundary
2083        if !self.epoch_stakes.contains_key(&leader_schedule_epoch) {
2084            self.epoch_stakes.retain(|&epoch, _| {
2085                epoch >= leader_schedule_epoch.saturating_sub(MAX_LEADER_SCHEDULE_STAKES)
2086            });
2087            let stakes = self.stakes_cache.stakes().clone();
2088            let stakes = Arc::new(StakesEnum::from(stakes));
2089            let new_epoch_stakes = EpochStakes::new(stakes, leader_schedule_epoch);
2090            info!(
2091                "new epoch stakes, epoch: {}, total_stake: {}",
2092                leader_schedule_epoch,
2093                new_epoch_stakes.total_stake(),
2094            );
2095
2096            // It is expensive to log the details of epoch stakes. Only log them at "trace"
2097            // level for debugging purpose.
2098            if log::log_enabled!(log::Level::Trace) {
2099                let vote_stakes: HashMap<_, _> = self
2100                    .stakes_cache
2101                    .stakes()
2102                    .vote_accounts()
2103                    .delegated_stakes()
2104                    .map(|(pubkey, stake)| (*pubkey, stake))
2105                    .collect();
2106                trace!("new epoch stakes, stakes: {vote_stakes:#?}");
2107            }
2108            self.epoch_stakes
2109                .insert(leader_schedule_epoch, new_epoch_stakes);
2110        }
2111    }
2112
2113    #[cfg(feature = "dev-context-only-utils")]
2114    pub fn set_epoch_stakes_for_test(&mut self, epoch: Epoch, stakes: EpochStakes) {
2115        self.epoch_stakes.insert(epoch, stakes);
2116    }
2117
2118    fn update_rent(&self) {
2119        self.update_sysvar_account(&sysvar::rent::id(), |account| {
2120            create_account(
2121                &self.rent_collector.rent,
2122                self.inherit_specially_retained_account_fields(account),
2123            )
2124        });
2125    }
2126
2127    fn update_epoch_schedule(&self) {
2128        self.update_sysvar_account(&sysvar::epoch_schedule::id(), |account| {
2129            create_account(
2130                self.epoch_schedule(),
2131                self.inherit_specially_retained_account_fields(account),
2132            )
2133        });
2134    }
2135
2136    fn update_stake_history(&self, epoch: Option<Epoch>) {
2137        if epoch == Some(self.epoch()) {
2138            return;
2139        }
2140        // if I'm the first Bank in an epoch, ensure stake_history is updated
2141        self.update_sysvar_account(&sysvar::stake_history::id(), |account| {
2142            create_account::<sysvar::stake_history::StakeHistory>(
2143                self.stakes_cache.stakes().history(),
2144                self.inherit_specially_retained_account_fields(account),
2145            )
2146        });
2147    }
2148
2149    pub fn epoch_duration_in_years(&self, prev_epoch: Epoch) -> f64 {
2150        // period: time that has passed as a fraction of a year, basically the length of
2151        //  an epoch as a fraction of a year
2152        //  calculated as: slots_elapsed / (slots / year)
2153        self.epoch_schedule().get_slots_in_epoch(prev_epoch) as f64 / self.slots_per_year
2154    }
2155
2156    // Calculates the starting-slot for inflation from the activation slot.
2157    // This method assumes that `pico_inflation` will be enabled before `full_inflation`, giving
2158    // precedence to the latter. However, since `pico_inflation` is fixed-rate Inflation, should
2159    // `pico_inflation` be enabled 2nd, the incorrect start slot provided here should have no
2160    // effect on the inflation calculation.
2161    fn get_inflation_start_slot(&self) -> Slot {
2162        let mut slots = self
2163            .feature_set
2164            .full_inflation_features_enabled()
2165            .iter()
2166            .filter_map(|id| self.feature_set.activated_slot(id))
2167            .collect::<Vec<_>>();
2168        slots.sort_unstable();
2169        slots.first().cloned().unwrap_or_else(|| {
2170            self.feature_set
2171                .activated_slot(&feature_set::pico_inflation::id())
2172                .unwrap_or(0)
2173        })
2174    }
2175
2176    fn get_inflation_num_slots(&self) -> u64 {
2177        let inflation_activation_slot = self.get_inflation_start_slot();
2178        // Normalize inflation_start to align with the start of rewards accrual.
2179        let inflation_start_slot = self.epoch_schedule().get_first_slot_in_epoch(
2180            self.epoch_schedule()
2181                .get_epoch(inflation_activation_slot)
2182                .saturating_sub(1),
2183        );
2184        self.epoch_schedule().get_first_slot_in_epoch(self.epoch()) - inflation_start_slot
2185    }
2186
2187    pub fn slot_in_year_for_inflation(&self) -> f64 {
2188        let num_slots = self.get_inflation_num_slots();
2189
2190        // calculated as: num_slots / (slots / year)
2191        num_slots as f64 / self.slots_per_year
2192    }
2193
2194    fn calculate_previous_epoch_inflation_rewards(
2195        &self,
2196        prev_epoch_capitalization: u64,
2197        prev_epoch: Epoch,
2198    ) -> PrevEpochInflationRewards {
2199        let slot_in_year = self.slot_in_year_for_inflation();
2200        let (validator_rate, foundation_rate) = {
2201            let inflation = self.inflation.read().unwrap();
2202            (
2203                (*inflation).validator(slot_in_year),
2204                (*inflation).foundation(slot_in_year),
2205            )
2206        };
2207
2208        let prev_epoch_duration_in_years = self.epoch_duration_in_years(prev_epoch);
2209        let validator_rewards = (validator_rate
2210            * prev_epoch_capitalization as f64
2211            * prev_epoch_duration_in_years) as u64;
2212
2213        PrevEpochInflationRewards {
2214            validator_rewards,
2215            prev_epoch_duration_in_years,
2216            validator_rate,
2217            foundation_rate,
2218        }
2219    }
2220
2221    fn assert_validator_rewards_paid(&self, validator_rewards_paid: u64) {
2222        assert_eq!(
2223            validator_rewards_paid,
2224            u64::try_from(
2225                self.rewards
2226                    .read()
2227                    .unwrap()
2228                    .par_iter()
2229                    .map(|(_address, reward_info)| {
2230                        match reward_info.reward_type {
2231                            RewardType::Voting | RewardType::Staking => reward_info.lamports,
2232                            _ => 0,
2233                        }
2234                    })
2235                    .sum::<i64>()
2236            )
2237            .unwrap()
2238        );
2239    }
2240
2241    // update rewards based on the previous epoch
2242    fn update_rewards_with_thread_pool(
2243        &mut self,
2244        prev_epoch: Epoch,
2245        reward_calc_tracer: Option<impl Fn(&RewardCalculationEvent) + Send + Sync>,
2246        thread_pool: &ThreadPool,
2247        metrics: &mut RewardsMetrics,
2248    ) {
2249        let capitalization = self.capitalization();
2250        let PrevEpochInflationRewards {
2251            validator_rewards,
2252            prev_epoch_duration_in_years,
2253            validator_rate,
2254            foundation_rate,
2255        } = self.calculate_previous_epoch_inflation_rewards(capitalization, prev_epoch);
2256
2257        let old_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked();
2258
2259        self.pay_validator_rewards_with_thread_pool(
2260            prev_epoch,
2261            validator_rewards,
2262            reward_calc_tracer,
2263            thread_pool,
2264            metrics,
2265        );
2266
2267        let new_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked();
2268        let validator_rewards_paid = new_vote_balance_and_staked - old_vote_balance_and_staked;
2269        assert_eq!(
2270            validator_rewards_paid,
2271            u64::try_from(
2272                self.rewards
2273                    .read()
2274                    .unwrap()
2275                    .iter()
2276                    .map(|(_address, reward_info)| {
2277                        match reward_info.reward_type {
2278                            RewardType::Voting | RewardType::Staking => reward_info.lamports,
2279                            _ => 0,
2280                        }
2281                    })
2282                    .sum::<i64>()
2283            )
2284            .unwrap()
2285        );
2286
2287        // verify that we didn't pay any more than we expected to
2288        assert!(validator_rewards >= validator_rewards_paid);
2289
2290        info!(
2291            "distributed inflation: {} (rounded from: {})",
2292            validator_rewards_paid, validator_rewards
2293        );
2294        let (num_stake_accounts, num_vote_accounts) = {
2295            let stakes = self.stakes_cache.stakes();
2296            (
2297                stakes.stake_delegations().len(),
2298                stakes.vote_accounts().len(),
2299            )
2300        };
2301        self.capitalization
2302            .fetch_add(validator_rewards_paid, Relaxed);
2303
2304        let active_stake = if let Some(stake_history_entry) =
2305            self.stakes_cache.stakes().history().get(prev_epoch)
2306        {
2307            stake_history_entry.effective
2308        } else {
2309            0
2310        };
2311
2312        datapoint_warn!(
2313            "epoch_rewards",
2314            ("slot", self.slot, i64),
2315            ("epoch", prev_epoch, i64),
2316            ("validator_rate", validator_rate, f64),
2317            ("foundation_rate", foundation_rate, f64),
2318            ("epoch_duration_in_years", prev_epoch_duration_in_years, f64),
2319            ("validator_rewards", validator_rewards_paid, i64),
2320            ("active_stake", active_stake, i64),
2321            ("pre_capitalization", capitalization, i64),
2322            ("post_capitalization", self.capitalization(), i64),
2323            ("num_stake_accounts", num_stake_accounts, i64),
2324            ("num_vote_accounts", num_vote_accounts, i64),
2325        );
2326    }
2327
2328    fn filter_stake_delegations<'a>(
2329        &self,
2330        stakes: &'a Stakes<StakeAccount<Delegation>>,
2331    ) -> Vec<(&'a Pubkey, &'a StakeAccount<Delegation>)> {
2332        if self
2333            .feature_set
2334            .is_active(&feature_set::stake_minimum_delegation_for_rewards::id())
2335        {
2336            let num_stake_delegations = stakes.stake_delegations().len();
2337            let min_stake_delegation =
2338                solana_stake_program::get_minimum_delegation(&self.feature_set)
2339                    .max(LAMPORTS_PER_SOL);
2340
2341            let (stake_delegations, filter_time_us) = measure_us!(stakes
2342                .stake_delegations()
2343                .iter()
2344                .filter(|(_stake_pubkey, cached_stake_account)| {
2345                    cached_stake_account.delegation().stake >= min_stake_delegation
2346                })
2347                .collect::<Vec<_>>());
2348
2349            datapoint_info!(
2350                "stake_account_filter_time",
2351                ("filter_time_us", filter_time_us, i64),
2352                ("num_stake_delegations_before", num_stake_delegations, i64),
2353                ("num_stake_delegations_after", stake_delegations.len(), i64)
2354            );
2355            stake_delegations
2356        } else {
2357            stakes.stake_delegations().iter().collect()
2358        }
2359    }
2360
2361    fn _load_vote_and_stake_accounts(
2362        &self,
2363        thread_pool: &ThreadPool,
2364        reward_calc_tracer: Option<impl RewardCalcTracer>,
2365    ) -> LoadVoteAndStakeAccountsResult {
2366        let stakes = self.stakes_cache.stakes();
2367        let stake_delegations = self.filter_stake_delegations(&stakes);
2368
2369        // Obtain all unique voter pubkeys from stake delegations.
2370        fn merge(mut acc: HashSet<Pubkey>, other: HashSet<Pubkey>) -> HashSet<Pubkey> {
2371            if acc.len() < other.len() {
2372                return merge(other, acc);
2373            }
2374            acc.extend(other);
2375            acc
2376        }
2377        let voter_pubkeys = thread_pool.install(|| {
2378            stake_delegations
2379                .par_iter()
2380                .fold(
2381                    HashSet::default,
2382                    |mut voter_pubkeys, (_stake_pubkey, stake_account)| {
2383                        voter_pubkeys.insert(stake_account.delegation().voter_pubkey);
2384                        voter_pubkeys
2385                    },
2386                )
2387                .reduce(HashSet::default, merge)
2388        });
2389        // Obtain vote-accounts for unique voter pubkeys.
2390        let cached_vote_accounts = stakes.vote_accounts();
2391        let solana_vote_program: Pubkey = solana_vote_program::id();
2392        let vote_accounts_cache_miss_count = AtomicUsize::default();
2393        let get_vote_account = |vote_pubkey: &Pubkey| -> Option<VoteAccount> {
2394            if let Some(vote_account) = cached_vote_accounts.get(vote_pubkey) {
2395                return Some(vote_account.clone());
2396            }
2397            // If accounts-db contains a valid vote account, then it should
2398            // already have been cached in cached_vote_accounts; so the code
2399            // below is only for sanity check, and can be removed once
2400            // vote_accounts_cache_miss_count is shown to be always zero.
2401            let account = self.get_account_with_fixed_root(vote_pubkey)?;
2402            if account.owner() == &solana_vote_program
2403                && VoteState::deserialize(account.data()).is_ok()
2404            {
2405                vote_accounts_cache_miss_count.fetch_add(1, Relaxed);
2406            }
2407            VoteAccount::try_from(account).ok()
2408        };
2409        let invalid_vote_keys = DashMap::<Pubkey, InvalidCacheEntryReason>::new();
2410        let make_vote_delegations_entry = |vote_pubkey| {
2411            let Some(vote_account) = get_vote_account(&vote_pubkey) else {
2412                invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::Missing);
2413                return None;
2414            };
2415            if vote_account.owner() != &solana_vote_program {
2416                invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::WrongOwner);
2417                return None;
2418            }
2419            let vote_with_stake_delegations = VoteWithStakeDelegations {
2420                vote_state: Arc::new(vote_account.vote_state().clone()),
2421                vote_account: AccountSharedData::from(vote_account),
2422                delegations: Vec::default(),
2423            };
2424            Some((vote_pubkey, vote_with_stake_delegations))
2425        };
2426        let vote_with_stake_delegations_map: DashMap<Pubkey, VoteWithStakeDelegations> =
2427            thread_pool.install(|| {
2428                voter_pubkeys
2429                    .into_par_iter()
2430                    .filter_map(make_vote_delegations_entry)
2431                    .collect()
2432            });
2433        // Join stake accounts with vote-accounts.
2434        let push_stake_delegation = |(stake_pubkey, stake_account): (&Pubkey, &StakeAccount<_>)| {
2435            let delegation = stake_account.delegation();
2436            let Some(mut vote_delegations) =
2437                vote_with_stake_delegations_map.get_mut(&delegation.voter_pubkey)
2438            else {
2439                return;
2440            };
2441            if let Some(reward_calc_tracer) = reward_calc_tracer.as_ref() {
2442                let delegation =
2443                    InflationPointCalculationEvent::Delegation(*delegation, solana_vote_program);
2444                let event = RewardCalculationEvent::Staking(stake_pubkey, &delegation);
2445                reward_calc_tracer(&event);
2446            }
2447            let stake_delegation = (*stake_pubkey, stake_account.clone());
2448            vote_delegations.delegations.push(stake_delegation);
2449        };
2450        thread_pool.install(|| {
2451            stake_delegations
2452                .into_par_iter()
2453                .for_each(push_stake_delegation);
2454        });
2455        LoadVoteAndStakeAccountsResult {
2456            vote_with_stake_delegations_map,
2457            invalid_vote_keys,
2458            vote_accounts_cache_miss_count: vote_accounts_cache_miss_count.into_inner(),
2459        }
2460    }
2461
2462    /// Load, calculate and payout epoch rewards for stake and vote accounts
2463    fn pay_validator_rewards_with_thread_pool(
2464        &mut self,
2465        rewarded_epoch: Epoch,
2466        rewards: u64,
2467        reward_calc_tracer: Option<impl RewardCalcTracer>,
2468        thread_pool: &ThreadPool,
2469        metrics: &mut RewardsMetrics,
2470    ) {
2471        let stake_history = self.stakes_cache.stakes().history().clone();
2472        let vote_with_stake_delegations_map =
2473            self.load_vote_and_stake_accounts(thread_pool, reward_calc_tracer.as_ref(), metrics);
2474
2475        let point_value = self.calculate_reward_points(
2476            &vote_with_stake_delegations_map,
2477            rewards,
2478            &stake_history,
2479            thread_pool,
2480            metrics,
2481        );
2482
2483        if let Some(point_value) = point_value {
2484            let (vote_account_rewards, stake_rewards) = self.redeem_rewards(
2485                vote_with_stake_delegations_map,
2486                rewarded_epoch,
2487                point_value,
2488                &stake_history,
2489                thread_pool,
2490                reward_calc_tracer.as_ref(),
2491                metrics,
2492            );
2493
2494            // this checking of an unactivated feature can be enabled in tests or with a validator by passing `--partitioned-epoch-rewards-compare-calculation`
2495            if self
2496                .partitioned_epoch_rewards_config()
2497                .test_compare_partitioned_epoch_rewards
2498            {
2499                // immutable `&self` to avoid side effects
2500                (self as &Bank).compare_with_partitioned_rewards(
2501                    &stake_rewards,
2502                    &vote_account_rewards,
2503                    rewarded_epoch,
2504                    thread_pool,
2505                    null_tracer(),
2506                );
2507            }
2508
2509            self.store_stake_accounts(thread_pool, &stake_rewards, metrics);
2510            let vote_rewards = self.store_vote_accounts(vote_account_rewards, metrics);
2511            self.update_reward_history(stake_rewards, vote_rewards);
2512        }
2513    }
2514
2515    fn load_vote_and_stake_accounts(
2516        &mut self,
2517        thread_pool: &ThreadPool,
2518        reward_calc_tracer: Option<impl RewardCalcTracer>,
2519        metrics: &mut RewardsMetrics,
2520    ) -> VoteWithStakeDelegationsMap {
2521        let (
2522            LoadVoteAndStakeAccountsResult {
2523                vote_with_stake_delegations_map,
2524                invalid_vote_keys,
2525                vote_accounts_cache_miss_count,
2526            },
2527            load_vote_and_stake_accounts_us,
2528        ) = measure_us!({
2529            self._load_vote_and_stake_accounts(thread_pool, reward_calc_tracer.as_ref())
2530        });
2531        metrics
2532            .load_vote_and_stake_accounts_us
2533            .fetch_add(load_vote_and_stake_accounts_us, Relaxed);
2534        metrics.vote_accounts_cache_miss_count += vote_accounts_cache_miss_count;
2535        self.stakes_cache
2536            .handle_invalid_keys(invalid_vote_keys, self.slot());
2537        vote_with_stake_delegations_map
2538    }
2539
2540    fn calculate_reward_points(
2541        &self,
2542        vote_with_stake_delegations_map: &VoteWithStakeDelegationsMap,
2543        rewards: u64,
2544        stake_history: &StakeHistory,
2545        thread_pool: &ThreadPool,
2546        metrics: &RewardsMetrics,
2547    ) -> Option<PointValue> {
2548        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
2549        let (points, calculate_points_us) = measure_us!(thread_pool.install(|| {
2550            vote_with_stake_delegations_map
2551                .par_iter()
2552                .map(|entry| {
2553                    let VoteWithStakeDelegations {
2554                        vote_state,
2555                        delegations,
2556                        ..
2557                    } = entry.value();
2558
2559                    delegations
2560                        .par_iter()
2561                        .map(|(_stake_pubkey, stake_account)| {
2562                            solana_stake_program::points::calculate_points(
2563                                stake_account.stake_state(),
2564                                vote_state,
2565                                stake_history,
2566                                new_warmup_cooldown_rate_epoch,
2567                            )
2568                            .unwrap_or(0)
2569                        })
2570                        .sum::<u128>()
2571                })
2572                .sum()
2573        }));
2574        metrics
2575            .calculate_points_us
2576            .fetch_add(calculate_points_us, Relaxed);
2577
2578        (points > 0).then_some(PointValue { rewards, points })
2579    }
2580
2581    fn redeem_rewards(
2582        &self,
2583        vote_with_stake_delegations_map: DashMap<Pubkey, VoteWithStakeDelegations>,
2584        rewarded_epoch: Epoch,
2585        point_value: PointValue,
2586        stake_history: &StakeHistory,
2587        thread_pool: &ThreadPool,
2588        reward_calc_tracer: Option<impl RewardCalcTracer>,
2589        metrics: &mut RewardsMetrics,
2590    ) -> (VoteRewards, StakeRewards) {
2591        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
2592        let vote_account_rewards: VoteRewards =
2593            DashMap::with_capacity(vote_with_stake_delegations_map.len());
2594        let stake_delegation_iterator = vote_with_stake_delegations_map.into_par_iter().flat_map(
2595            |(
2596                vote_pubkey,
2597                VoteWithStakeDelegations {
2598                    vote_state,
2599                    vote_account,
2600                    delegations,
2601                },
2602            )| {
2603                vote_account_rewards.insert(
2604                    vote_pubkey,
2605                    VoteReward {
2606                        vote_account,
2607                        commission: vote_state.commission,
2608                        vote_rewards: 0,
2609                        vote_needs_store: false,
2610                    },
2611                );
2612                delegations
2613                    .into_par_iter()
2614                    .map(move |delegation| (vote_pubkey, Arc::clone(&vote_state), delegation))
2615            },
2616        );
2617
2618        let (stake_rewards, redeem_rewards_us) = measure_us!(thread_pool.install(|| {
2619            stake_delegation_iterator
2620                .filter_map(|(vote_pubkey, vote_state, (stake_pubkey, stake_account))| {
2621                    // curry closure to add the contextual stake_pubkey
2622                    let reward_calc_tracer = reward_calc_tracer.as_ref().map(|outer| {
2623                        // inner
2624                        move |inner_event: &_| {
2625                            outer(&RewardCalculationEvent::Staking(&stake_pubkey, inner_event))
2626                        }
2627                    });
2628                    let (mut stake_account, stake_state) =
2629                        <(AccountSharedData, StakeStateV2)>::from(stake_account);
2630                    let redeemed = solana_stake_program::rewards::redeem_rewards(
2631                        rewarded_epoch,
2632                        stake_state,
2633                        &mut stake_account,
2634                        &vote_state,
2635                        &point_value,
2636                        stake_history,
2637                        reward_calc_tracer.as_ref(),
2638                        new_warmup_cooldown_rate_epoch,
2639                    );
2640                    if let Ok((stakers_reward, voters_reward)) = redeemed {
2641                        // track voter rewards
2642                        if let Some(VoteReward {
2643                            vote_account: _,
2644                            commission: _,
2645                            vote_rewards: vote_rewards_sum,
2646                            vote_needs_store,
2647                        }) = vote_account_rewards.get_mut(&vote_pubkey).as_deref_mut()
2648                        {
2649                            *vote_needs_store = true;
2650                            *vote_rewards_sum = vote_rewards_sum.saturating_add(voters_reward);
2651                        }
2652
2653                        let post_balance = stake_account.lamports();
2654                        return Some(StakeReward {
2655                            stake_pubkey,
2656                            stake_reward_info: RewardInfo {
2657                                reward_type: RewardType::Staking,
2658                                lamports: i64::try_from(stakers_reward).unwrap(),
2659                                post_balance,
2660                                commission: Some(vote_state.commission),
2661                            },
2662                            stake_account,
2663                        });
2664                    } else {
2665                        debug!(
2666                            "solana_stake_program::rewards::redeem_rewards() failed for {}: {:?}",
2667                            stake_pubkey, redeemed
2668                        );
2669                    }
2670                    None
2671                })
2672                .collect()
2673        }));
2674        metrics.redeem_rewards_us += redeem_rewards_us;
2675        (vote_account_rewards, stake_rewards)
2676    }
2677
2678    fn store_stake_accounts(
2679        &self,
2680        thread_pool: &ThreadPool,
2681        stake_rewards: &[StakeReward],
2682        metrics: &RewardsMetrics,
2683    ) {
2684        // store stake account even if stake_reward is 0
2685        // because credits observed has changed
2686        let now = Instant::now();
2687        let slot = self.slot();
2688        self.stakes_cache.update_stake_accounts(
2689            thread_pool,
2690            stake_rewards,
2691            self.new_warmup_cooldown_rate_epoch(),
2692        );
2693        assert!(!self.freeze_started());
2694        thread_pool.install(|| {
2695            stake_rewards
2696                .par_chunks(512)
2697                .for_each(|chunk| self.rc.accounts.store_accounts_cached((slot, chunk)))
2698        });
2699        metrics
2700            .store_stake_accounts_us
2701            .fetch_add(now.elapsed().as_micros() as u64, Relaxed);
2702    }
2703
2704    fn store_vote_accounts(
2705        &self,
2706        vote_account_rewards: VoteRewards,
2707        metrics: &RewardsMetrics,
2708    ) -> Vec<(Pubkey, RewardInfo)> {
2709        let (vote_rewards, store_vote_accounts_us) = measure_us!(vote_account_rewards
2710            .into_iter()
2711            .filter_map(
2712                |(
2713                    vote_pubkey,
2714                    VoteReward {
2715                        mut vote_account,
2716                        commission,
2717                        vote_rewards,
2718                        vote_needs_store,
2719                    },
2720                )| {
2721                    if let Err(err) = vote_account.checked_add_lamports(vote_rewards) {
2722                        debug!("reward redemption failed for {}: {:?}", vote_pubkey, err);
2723                        return None;
2724                    }
2725
2726                    if vote_needs_store {
2727                        self.store_account(&vote_pubkey, &vote_account);
2728                    }
2729
2730                    Some((
2731                        vote_pubkey,
2732                        RewardInfo {
2733                            reward_type: RewardType::Voting,
2734                            lamports: vote_rewards as i64,
2735                            post_balance: vote_account.lamports(),
2736                            commission: Some(commission),
2737                        },
2738                    ))
2739                },
2740            )
2741            .collect::<Vec<_>>());
2742
2743        metrics
2744            .store_vote_accounts_us
2745            .fetch_add(store_vote_accounts_us, Relaxed);
2746        vote_rewards
2747    }
2748
2749    /// return reward info for each vote account
2750    /// return account data for each vote account that needs to be stored
2751    /// This return value is a little awkward at the moment so that downstream existing code in the non-partitioned rewards code path can be re-used without duplication or modification.
2752    /// This function is copied from the existing code path's `store_vote_accounts`.
2753    /// The primary differences:
2754    /// - we want this fn to have no side effects (such as actually storing vote accounts) so that we
2755    ///   can compare the expected results with the current code path
2756    /// - we want to be able to batch store the vote accounts later for improved performance/cache updating
2757    fn calc_vote_accounts_to_store(
2758        vote_account_rewards: DashMap<Pubkey, VoteReward>,
2759    ) -> VoteRewardsAccounts {
2760        let len = vote_account_rewards.len();
2761        let mut result = VoteRewardsAccounts {
2762            rewards: Vec::with_capacity(len),
2763            accounts_to_store: Vec::with_capacity(len),
2764        };
2765        vote_account_rewards.into_iter().for_each(
2766            |(
2767                vote_pubkey,
2768                VoteReward {
2769                    mut vote_account,
2770                    commission,
2771                    vote_rewards,
2772                    vote_needs_store,
2773                },
2774            )| {
2775                if let Err(err) = vote_account.checked_add_lamports(vote_rewards) {
2776                    debug!("reward redemption failed for {}: {:?}", vote_pubkey, err);
2777                    return;
2778                }
2779
2780                result.rewards.push((
2781                    vote_pubkey,
2782                    RewardInfo {
2783                        reward_type: RewardType::Voting,
2784                        lamports: vote_rewards as i64,
2785                        post_balance: vote_account.lamports(),
2786                        commission: Some(commission),
2787                    },
2788                ));
2789                result
2790                    .accounts_to_store
2791                    .push(vote_needs_store.then_some(vote_account));
2792            },
2793        );
2794        result
2795    }
2796
2797    fn update_reward_history(
2798        &self,
2799        stake_rewards: StakeRewards,
2800        mut vote_rewards: Vec<(Pubkey, RewardInfo)>,
2801    ) {
2802        let additional_reserve = stake_rewards.len() + vote_rewards.len();
2803        let mut rewards = self.rewards.write().unwrap();
2804        rewards.reserve(additional_reserve);
2805        rewards.append(&mut vote_rewards);
2806        stake_rewards
2807            .into_iter()
2808            .filter(|x| x.get_stake_reward() > 0)
2809            .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info)));
2810    }
2811
2812    fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) {
2813        #[allow(deprecated)]
2814        self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| {
2815            let recent_blockhash_iter = locked_blockhash_queue.get_recent_blockhashes();
2816            recent_blockhashes_account::create_account_with_data_and_fields(
2817                recent_blockhash_iter,
2818                self.inherit_specially_retained_account_fields(account),
2819            )
2820        });
2821    }
2822
2823    pub fn update_recent_blockhashes(&self) {
2824        let blockhash_queue = self.blockhash_queue.read().unwrap();
2825        self.update_recent_blockhashes_locked(&blockhash_queue);
2826    }
2827
2828    fn get_timestamp_estimate(
2829        &self,
2830        max_allowable_drift: MaxAllowableDrift,
2831        epoch_start_timestamp: Option<(Slot, UnixTimestamp)>,
2832    ) -> Option<UnixTimestamp> {
2833        let mut get_timestamp_estimate_time = Measure::start("get_timestamp_estimate");
2834        let slots_per_epoch = self.epoch_schedule().slots_per_epoch;
2835        let vote_accounts = self.vote_accounts();
2836        let recent_timestamps = vote_accounts.iter().filter_map(|(pubkey, (_, account))| {
2837            let vote_state = account.vote_state();
2838            let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?;
2839            (slot_delta <= slots_per_epoch).then_some({
2840                (
2841                    *pubkey,
2842                    (
2843                        vote_state.last_timestamp.slot,
2844                        vote_state.last_timestamp.timestamp,
2845                    ),
2846                )
2847            })
2848        });
2849        let slot_duration = Duration::from_nanos(self.ns_per_slot as u64);
2850        let epoch = self.epoch_schedule().get_epoch(self.slot());
2851        let stakes = self.epoch_vote_accounts(epoch)?;
2852        let stake_weighted_timestamp = calculate_stake_weighted_timestamp(
2853            recent_timestamps,
2854            stakes,
2855            self.slot(),
2856            slot_duration,
2857            epoch_start_timestamp,
2858            max_allowable_drift,
2859            self.feature_set
2860                .is_active(&feature_set::warp_timestamp_again::id()),
2861        );
2862        get_timestamp_estimate_time.stop();
2863        datapoint_info!(
2864            "bank-timestamp",
2865            (
2866                "get_timestamp_estimate_us",
2867                get_timestamp_estimate_time.as_us(),
2868                i64
2869            ),
2870        );
2871        stake_weighted_timestamp
2872    }
2873
2874    /// Recalculates the bank hash
2875    ///
2876    /// This is used by ledger-tool when creating a snapshot, which
2877    /// recalcuates the bank hash.
2878    ///
2879    /// Note that the account state is *not* allowed to change by rehashing.
2880    /// If it does, this function will panic.
2881    /// If modifying accounts in ledger-tool is needed, create a new bank.
2882    pub fn rehash(&self) {
2883        let get_delta_hash = || {
2884            self.rc
2885                .accounts
2886                .accounts_db
2887                .get_accounts_delta_hash(self.slot())
2888        };
2889
2890        let mut hash = self.hash.write().unwrap();
2891        let curr_accounts_delta_hash = get_delta_hash();
2892        let new = self.hash_internal_state();
2893        if let Some(curr_accounts_delta_hash) = curr_accounts_delta_hash {
2894            let new_accounts_delta_hash = get_delta_hash().unwrap();
2895            assert_eq!(
2896                new_accounts_delta_hash, curr_accounts_delta_hash,
2897                "rehashing is not allowed to change the account state",
2898            );
2899        }
2900        if new != *hash {
2901            warn!("Updating bank hash to {new}");
2902            *hash = new;
2903        }
2904    }
2905
2906    pub fn freeze(&self) {
2907        // This lock prevents any new commits from BankingStage
2908        // `Consumer::execute_and_commit_transactions_locked()` from
2909        // coming in after the last tick is observed. This is because in
2910        // BankingStage, any transaction successfully recorded in
2911        // `record_transactions()` is recorded after this `hash` lock
2912        // is grabbed. At the time of the successful record,
2913        // this means the PoH has not yet reached the last tick,
2914        // so this means freeze() hasn't been called yet. And because
2915        // BankingStage doesn't release this hash lock until both
2916        // record and commit are finished, those transactions will be
2917        // committed before this write lock can be obtained here.
2918        let mut hash = self.hash.write().unwrap();
2919        if *hash == Hash::default() {
2920            // finish up any deferred changes to account state
2921            self.collect_rent_eagerly();
2922            if self.feature_set.is_active(&reward_full_priority_fee::id()) {
2923                self.distribute_transaction_fee_details();
2924            } else {
2925                self.distribute_transaction_fees();
2926            }
2927            self.distribute_rent_fees();
2928            self.update_slot_history();
2929            self.run_incinerator();
2930
2931            // freeze is a one-way trip, idempotent
2932            self.freeze_started.store(true, Relaxed);
2933            if self.is_accounts_lt_hash_enabled() {
2934                // updating the accounts lt hash must happen *outside* of hash_internal_state() so
2935                // that rehash() can be called and *not* modify self.accounts_lt_hash.
2936                self.update_accounts_lt_hash();
2937            }
2938            *hash = self.hash_internal_state();
2939            self.rc.accounts.accounts_db.mark_slot_frozen(self.slot());
2940        }
2941    }
2942
2943    // dangerous; don't use this; this is only needed for ledger-tool's special command
2944    #[cfg(feature = "dev-context-only-utils")]
2945    pub fn unfreeze_for_ledger_tool(&self) {
2946        self.freeze_started.store(false, Relaxed);
2947    }
2948
2949    pub fn epoch_schedule(&self) -> &EpochSchedule {
2950        &self.epoch_schedule
2951    }
2952
2953    /// squash the parent's state up into this Bank,
2954    ///   this Bank becomes a root
2955    /// Note that this function is not thread-safe. If it is called concurrently on the same bank
2956    /// by multiple threads, the end result could be inconsistent.
2957    /// Calling code does not currently call this concurrently.
2958    pub fn squash(&self) -> SquashTiming {
2959        self.freeze();
2960
2961        //this bank and all its parents are now on the rooted path
2962        let mut roots = vec![self.slot()];
2963        roots.append(&mut self.parents().iter().map(|p| p.slot()).collect());
2964
2965        let mut total_index_us = 0;
2966        let mut total_cache_us = 0;
2967        let mut total_store_us = 0;
2968
2969        let mut squash_accounts_time = Measure::start("squash_accounts_time");
2970        for slot in roots.iter().rev() {
2971            // root forks cannot be purged
2972            let add_root_timing = self.rc.accounts.add_root(*slot);
2973            total_index_us += add_root_timing.index_us;
2974            total_cache_us += add_root_timing.cache_us;
2975            total_store_us += add_root_timing.store_us;
2976        }
2977        squash_accounts_time.stop();
2978
2979        *self.rc.parent.write().unwrap() = None;
2980
2981        let mut squash_cache_time = Measure::start("squash_cache_time");
2982        roots
2983            .iter()
2984            .for_each(|slot| self.status_cache.write().unwrap().add_root(*slot));
2985        squash_cache_time.stop();
2986
2987        SquashTiming {
2988            squash_accounts_ms: squash_accounts_time.as_ms(),
2989            squash_accounts_index_ms: total_index_us / 1000,
2990            squash_accounts_cache_ms: total_cache_us / 1000,
2991            squash_accounts_store_ms: total_store_us / 1000,
2992
2993            squash_cache_ms: squash_cache_time.as_ms(),
2994        }
2995    }
2996
2997    /// Return the more recent checkpoint of this bank instance.
2998    pub fn parent(&self) -> Option<Arc<Bank>> {
2999        self.rc.parent.read().unwrap().clone()
3000    }
3001
3002    pub fn parent_slot(&self) -> Slot {
3003        self.parent_slot
3004    }
3005
3006    pub fn parent_hash(&self) -> Hash {
3007        self.parent_hash
3008    }
3009
3010    fn process_genesis_config(
3011        &mut self,
3012        genesis_config: &GenesisConfig,
3013        #[cfg(feature = "dev-context-only-utils")] collector_id_for_tests: Option<Pubkey>,
3014        #[cfg(feature = "dev-context-only-utils")] genesis_hash: Option<Hash>,
3015    ) {
3016        // Bootstrap validator collects fees until `new_from_parent` is called.
3017        self.fee_rate_governor = genesis_config.fee_rate_governor.clone();
3018
3019        for (pubkey, account) in genesis_config.accounts.iter() {
3020            assert!(
3021                self.get_account(pubkey).is_none(),
3022                "{pubkey} repeated in genesis config"
3023            );
3024            self.store_account(pubkey, &account.to_account_shared_data());
3025            self.capitalization.fetch_add(account.lamports(), Relaxed);
3026            self.accounts_data_size_initial += account.data().len() as u64;
3027        }
3028
3029        for (pubkey, account) in genesis_config.rewards_pools.iter() {
3030            assert!(
3031                self.get_account(pubkey).is_none(),
3032                "{pubkey} repeated in genesis config"
3033            );
3034            self.store_account(pubkey, &account.to_account_shared_data());
3035            self.accounts_data_size_initial += account.data().len() as u64;
3036        }
3037
3038        // After storing genesis accounts, the bank stakes cache will be warmed
3039        // up and can be used to set the collector id to the highest staked
3040        // node. If no staked nodes exist, allow fallback to an unstaked test
3041        // collector id during tests.
3042        let collector_id = self.stakes_cache.stakes().highest_staked_node().copied();
3043        #[cfg(feature = "dev-context-only-utils")]
3044        let collector_id = collector_id.or(collector_id_for_tests);
3045        self.collector_id =
3046            collector_id.expect("genesis processing failed because no staked nodes exist");
3047
3048        #[cfg(not(feature = "dev-context-only-utils"))]
3049        let genesis_hash = genesis_config.hash();
3050        #[cfg(feature = "dev-context-only-utils")]
3051        let genesis_hash = genesis_hash.unwrap_or(genesis_config.hash());
3052
3053        self.blockhash_queue
3054            .write()
3055            .unwrap()
3056            .genesis_hash(&genesis_hash, self.fee_rate_governor.lamports_per_signature);
3057
3058        self.hashes_per_tick = genesis_config.hashes_per_tick();
3059        self.ticks_per_slot = genesis_config.ticks_per_slot();
3060        self.ns_per_slot = genesis_config.ns_per_slot();
3061        self.genesis_creation_time = genesis_config.creation_time;
3062        self.max_tick_height = (self.slot + 1) * self.ticks_per_slot;
3063        self.slots_per_year = genesis_config.slots_per_year();
3064
3065        self.epoch_schedule = genesis_config.epoch_schedule.clone();
3066
3067        self.inflation = Arc::new(RwLock::new(genesis_config.inflation));
3068
3069        self.rent_collector = RentCollector::new(
3070            self.epoch,
3071            self.epoch_schedule().clone(),
3072            self.slots_per_year,
3073            genesis_config.rent.clone(),
3074        );
3075
3076        // Add additional builtin programs specified in the genesis config
3077        for (name, program_id) in &genesis_config.native_instruction_processors {
3078            self.add_builtin_account(name, program_id);
3079        }
3080    }
3081
3082    fn burn_and_purge_account(&self, program_id: &Pubkey, mut account: AccountSharedData) {
3083        let old_data_size = account.data().len();
3084        self.capitalization.fetch_sub(account.lamports(), Relaxed);
3085        // Both resetting account balance to 0 and zeroing the account data
3086        // is needed to really purge from AccountsDb and flush the Stakes cache
3087        account.set_lamports(0);
3088        account.data_as_mut_slice().fill(0);
3089        self.store_account(program_id, &account);
3090        self.calculate_and_update_accounts_data_size_delta_off_chain(old_data_size, 0);
3091    }
3092
3093    /// Add a precompiled program account
3094    pub fn add_precompiled_account(&self, program_id: &Pubkey) {
3095        self.add_precompiled_account_with_owner(program_id, native_loader::id())
3096    }
3097
3098    // Used by tests to simulate clusters with precompiles that aren't owned by the native loader
3099    fn add_precompiled_account_with_owner(&self, program_id: &Pubkey, owner: Pubkey) {
3100        if let Some(account) = self.get_account_with_fixed_root(program_id) {
3101            if account.executable() {
3102                return;
3103            } else {
3104                // malicious account is pre-occupying at program_id
3105                self.burn_and_purge_account(program_id, account);
3106            }
3107        };
3108
3109        assert!(
3110            !self.freeze_started(),
3111            "Can't change frozen bank by adding not-existing new precompiled program ({program_id}). \
3112                Maybe, inconsistent program activation is detected on snapshot restore?"
3113        );
3114
3115        // Add a bogus executable account, which will be loaded and ignored.
3116        let (lamports, rent_epoch) = self.inherit_specially_retained_account_fields(&None);
3117
3118        let account = AccountSharedData::from(Account {
3119            lamports,
3120            owner,
3121            data: vec![],
3122            executable: true,
3123            rent_epoch,
3124        });
3125        self.store_account_and_update_capitalization(program_id, &account);
3126    }
3127
3128    pub fn set_rent_burn_percentage(&mut self, burn_percent: u8) {
3129        self.rent_collector.rent.burn_percent = burn_percent;
3130    }
3131
3132    pub fn set_hashes_per_tick(&mut self, hashes_per_tick: Option<u64>) {
3133        self.hashes_per_tick = hashes_per_tick;
3134    }
3135
3136    /// Return the last block hash registered.
3137    pub fn last_blockhash(&self) -> Hash {
3138        self.blockhash_queue.read().unwrap().last_hash()
3139    }
3140
3141    pub fn last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) {
3142        let blockhash_queue = self.blockhash_queue.read().unwrap();
3143        let last_hash = blockhash_queue.last_hash();
3144        let last_lamports_per_signature = blockhash_queue
3145            .get_lamports_per_signature(&last_hash)
3146            .unwrap(); // safe so long as the BlockhashQueue is consistent
3147        (last_hash, last_lamports_per_signature)
3148    }
3149
3150    pub fn is_blockhash_valid(&self, hash: &Hash) -> bool {
3151        let blockhash_queue = self.blockhash_queue.read().unwrap();
3152        blockhash_queue.is_hash_valid_for_age(hash, MAX_PROCESSING_AGE)
3153    }
3154
3155    pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> u64 {
3156        self.rent_collector.rent.minimum_balance(data_len).max(1)
3157    }
3158
3159    pub fn get_lamports_per_signature(&self) -> u64 {
3160        self.fee_rate_governor.lamports_per_signature
3161    }
3162
3163    pub fn get_lamports_per_signature_for_blockhash(&self, hash: &Hash) -> Option<u64> {
3164        let blockhash_queue = self.blockhash_queue.read().unwrap();
3165        blockhash_queue.get_lamports_per_signature(hash)
3166    }
3167
3168    pub fn get_fee_for_message(&self, message: &SanitizedMessage) -> Option<u64> {
3169        let lamports_per_signature = {
3170            let blockhash_queue = self.blockhash_queue.read().unwrap();
3171            blockhash_queue.get_lamports_per_signature(message.recent_blockhash())
3172        }
3173        .or_else(|| {
3174            self.load_message_nonce_account(message).map(
3175                |(_nonce_address, _nonce_account, nonce_data)| {
3176                    nonce_data.get_lamports_per_signature()
3177                },
3178            )
3179        })?;
3180        Some(self.get_fee_for_message_with_lamports_per_signature(message, lamports_per_signature))
3181    }
3182
3183    /// Returns true when startup accounts hash verification has completed or never had to run in background.
3184    pub fn get_startup_verification_complete(&self) -> &Arc<AtomicBool> {
3185        &self
3186            .rc
3187            .accounts
3188            .accounts_db
3189            .verify_accounts_hash_in_bg
3190            .verified
3191    }
3192
3193    /// return true if bg hash verification is complete
3194    /// return false if bg hash verification has not completed yet
3195    /// if hash verification failed, a panic will occur
3196    pub fn is_startup_verification_complete(&self) -> bool {
3197        self.has_initial_accounts_hash_verification_completed()
3198    }
3199
3200    /// This can occur because it completed in the background
3201    /// or if the verification was run in the foreground.
3202    pub fn set_startup_verification_complete(&self) {
3203        self.set_initial_accounts_hash_verification_completed();
3204    }
3205
3206    pub fn get_fee_for_message_with_lamports_per_signature(
3207        &self,
3208        message: &impl SVMMessage,
3209        lamports_per_signature: u64,
3210    ) -> u64 {
3211        let fee_budget_limits = FeeBudgetLimits::from(
3212            process_compute_budget_instructions(
3213                message.program_instructions_iter(),
3214                &self.feature_set,
3215            )
3216            .unwrap_or_default(),
3217        );
3218        solana_fee::calculate_fee(
3219            message,
3220            lamports_per_signature == 0,
3221            self.fee_structure().lamports_per_signature,
3222            fee_budget_limits.prioritization_fee,
3223            self.feature_set
3224                .is_active(&remove_rounding_in_fee_calculation::id()),
3225        )
3226    }
3227
3228    pub fn get_blockhash_last_valid_block_height(&self, blockhash: &Hash) -> Option<Slot> {
3229        let blockhash_queue = self.blockhash_queue.read().unwrap();
3230        // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue
3231        // length is made variable by epoch
3232        blockhash_queue
3233            .get_hash_age(blockhash)
3234            .map(|age| self.block_height + MAX_PROCESSING_AGE as u64 - age)
3235    }
3236
3237    pub fn confirmed_last_blockhash(&self) -> Hash {
3238        const NUM_BLOCKHASH_CONFIRMATIONS: usize = 3;
3239
3240        let parents = self.parents();
3241        if parents.is_empty() {
3242            self.last_blockhash()
3243        } else {
3244            let index = NUM_BLOCKHASH_CONFIRMATIONS.min(parents.len() - 1);
3245            parents[index].last_blockhash()
3246        }
3247    }
3248
3249    /// Forget all signatures. Useful for benchmarking.
3250    pub fn clear_signatures(&self) {
3251        self.status_cache.write().unwrap().clear();
3252    }
3253
3254    pub fn clear_slot_signatures(&self, slot: Slot) {
3255        self.status_cache.write().unwrap().clear_slot_entries(slot);
3256    }
3257
3258    fn update_transaction_statuses(
3259        &self,
3260        sanitized_txs: &[SanitizedTransaction],
3261        processing_results: &[TransactionProcessingResult],
3262    ) {
3263        let mut status_cache = self.status_cache.write().unwrap();
3264        assert_eq!(sanitized_txs.len(), processing_results.len());
3265        for (tx, processing_result) in sanitized_txs.iter().zip(processing_results) {
3266            if let Ok(processed_tx) = &processing_result {
3267                // Add the message hash to the status cache to ensure that this message
3268                // won't be processed again with a different signature.
3269                status_cache.insert(
3270                    tx.message().recent_blockhash(),
3271                    tx.message_hash(),
3272                    self.slot(),
3273                    processed_tx.status(),
3274                );
3275                // Add the transaction signature to the status cache so that transaction status
3276                // can be queried by transaction signature over RPC. In the future, this should
3277                // only be added for API nodes because voting validators don't need to do this.
3278                status_cache.insert(
3279                    tx.message().recent_blockhash(),
3280                    tx.signature(),
3281                    self.slot(),
3282                    processed_tx.status(),
3283                );
3284            }
3285        }
3286    }
3287
3288    /// Register a new recent blockhash in the bank's recent blockhash queue. Called when a bank
3289    /// reaches its max tick height. Can be called by tests to get new blockhashes for transaction
3290    /// processing without advancing to a new bank slot.
3291    fn register_recent_blockhash(&self, blockhash: &Hash, scheduler: &InstalledSchedulerRwLock) {
3292        // This is needed because recent_blockhash updates necessitate synchronizations for
3293        // consistent tx check_age handling.
3294        BankWithScheduler::wait_for_paused_scheduler(self, scheduler);
3295
3296        // Only acquire the write lock for the blockhash queue on block boundaries because
3297        // readers can starve this write lock acquisition and ticks would be slowed down too
3298        // much if the write lock is acquired for each tick.
3299        let mut w_blockhash_queue = self.blockhash_queue.write().unwrap();
3300
3301        #[cfg(feature = "dev-context-only-utils")]
3302        let blockhash_override = self
3303            .hash_overrides
3304            .lock()
3305            .unwrap()
3306            .get_blockhash_override(self.slot())
3307            .copied()
3308            .inspect(|blockhash_override| {
3309                if blockhash_override != blockhash {
3310                    info!(
3311                        "bank: slot: {}: overrode blockhash: {} with {}",
3312                        self.slot(),
3313                        blockhash,
3314                        blockhash_override
3315                    );
3316                }
3317            });
3318        #[cfg(feature = "dev-context-only-utils")]
3319        let blockhash = blockhash_override.as_ref().unwrap_or(blockhash);
3320
3321        w_blockhash_queue.register_hash(blockhash, self.fee_rate_governor.lamports_per_signature);
3322        self.update_recent_blockhashes_locked(&w_blockhash_queue);
3323    }
3324
3325    // gating this under #[cfg(feature = "dev-context-only-utils")] isn't easy due to
3326    // solana-program-test's usage...
3327    pub fn register_unique_recent_blockhash_for_test(&self) {
3328        self.register_recent_blockhash(
3329            &Hash::new_unique(),
3330            &BankWithScheduler::no_scheduler_available(),
3331        )
3332    }
3333
3334    #[cfg(feature = "dev-context-only-utils")]
3335    pub fn register_recent_blockhash_for_test(
3336        &self,
3337        blockhash: &Hash,
3338        lamports_per_signature: Option<u64>,
3339    ) {
3340        // Only acquire the write lock for the blockhash queue on block boundaries because
3341        // readers can starve this write lock acquisition and ticks would be slowed down too
3342        // much if the write lock is acquired for each tick.
3343        let mut w_blockhash_queue = self.blockhash_queue.write().unwrap();
3344        if let Some(lamports_per_signature) = lamports_per_signature {
3345            w_blockhash_queue.register_hash(blockhash, lamports_per_signature);
3346        } else {
3347            w_blockhash_queue
3348                .register_hash(blockhash, self.fee_rate_governor.lamports_per_signature);
3349        }
3350    }
3351
3352    /// Tell the bank which Entry IDs exist on the ledger. This function assumes subsequent calls
3353    /// correspond to later entries, and will boot the oldest ones once its internal cache is full.
3354    /// Once boot, the bank will reject transactions using that `hash`.
3355    ///
3356    /// This is NOT thread safe because if tick height is updated by two different threads, the
3357    /// block boundary condition could be missed.
3358    pub fn register_tick(&self, hash: &Hash, scheduler: &InstalledSchedulerRwLock) {
3359        assert!(
3360            !self.freeze_started(),
3361            "register_tick() working on a bank that is already frozen or is undergoing freezing!"
3362        );
3363
3364        if self.is_block_boundary(self.tick_height.load(Relaxed) + 1) {
3365            self.register_recent_blockhash(hash, scheduler);
3366        }
3367
3368        // ReplayStage will start computing the accounts delta hash when it
3369        // detects the tick height has reached the boundary, so the system
3370        // needs to guarantee all account updates for the slot have been
3371        // committed before this tick height is incremented (like the blockhash
3372        // sysvar above)
3373        self.tick_height.fetch_add(1, Relaxed);
3374    }
3375
3376    #[cfg(feature = "dev-context-only-utils")]
3377    pub fn register_tick_for_test(&self, hash: &Hash) {
3378        self.register_tick(hash, &BankWithScheduler::no_scheduler_available())
3379    }
3380
3381    #[cfg(feature = "dev-context-only-utils")]
3382    pub fn register_default_tick_for_test(&self) {
3383        self.register_tick_for_test(&Hash::default())
3384    }
3385
3386    #[cfg(feature = "dev-context-only-utils")]
3387    pub fn register_unique_tick(&self) {
3388        self.register_tick_for_test(&Hash::new_unique())
3389    }
3390
3391    pub fn is_complete(&self) -> bool {
3392        self.tick_height() == self.max_tick_height()
3393    }
3394
3395    pub fn is_block_boundary(&self, tick_height: u64) -> bool {
3396        tick_height == self.max_tick_height
3397    }
3398
3399    /// Get the max number of accounts that a transaction may lock in this block
3400    pub fn get_transaction_account_lock_limit(&self) -> usize {
3401        if let Some(transaction_account_lock_limit) = self.transaction_account_lock_limit {
3402            transaction_account_lock_limit
3403        } else if self
3404            .feature_set
3405            .is_active(&feature_set::increase_tx_account_lock_limit::id())
3406        {
3407            MAX_TX_ACCOUNT_LOCKS
3408        } else {
3409            64
3410        }
3411    }
3412
3413    /// Prepare a transaction batch from a list of versioned transactions from
3414    /// an entry. Used for tests only.
3415    pub fn prepare_entry_batch(
3416        &self,
3417        txs: Vec<VersionedTransaction>,
3418    ) -> Result<TransactionBatch<SanitizedTransaction>> {
3419        let sanitized_txs = txs
3420            .into_iter()
3421            .map(|tx| {
3422                SanitizedTransaction::try_create(
3423                    tx,
3424                    MessageHash::Compute,
3425                    None,
3426                    self,
3427                    self.get_reserved_account_keys(),
3428                )
3429            })
3430            .collect::<Result<Vec<_>>>()?;
3431        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
3432        let lock_results = self
3433            .rc
3434            .accounts
3435            .lock_accounts(sanitized_txs.iter(), tx_account_lock_limit);
3436        Ok(TransactionBatch::new(
3437            lock_results,
3438            self,
3439            OwnedOrBorrowed::Owned(sanitized_txs),
3440        ))
3441    }
3442
3443    /// Attempt to take locks on the accounts in a transaction batch
3444    pub fn try_lock_accounts(&self, txs: &[SanitizedTransaction]) -> Vec<Result<()>> {
3445        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
3446        self.rc
3447            .accounts
3448            .lock_accounts(txs.iter(), tx_account_lock_limit)
3449    }
3450
3451    /// Prepare a locked transaction batch from a list of sanitized transactions.
3452    pub fn prepare_sanitized_batch<'a, 'b>(
3453        &'a self,
3454        txs: &'b [SanitizedTransaction],
3455    ) -> TransactionBatch<'a, 'b, SanitizedTransaction> {
3456        TransactionBatch::new(
3457            self.try_lock_accounts(txs),
3458            self,
3459            OwnedOrBorrowed::Borrowed(txs),
3460        )
3461    }
3462
3463    /// Prepare a locked transaction batch from a list of sanitized transactions, and their cost
3464    /// limited packing status
3465    pub fn prepare_sanitized_batch_with_results<'a, 'b>(
3466        &'a self,
3467        transactions: &'b [SanitizedTransaction],
3468        transaction_results: impl Iterator<Item = Result<()>>,
3469    ) -> TransactionBatch<'a, 'b, SanitizedTransaction> {
3470        // this lock_results could be: Ok, AccountInUse, WouldExceedBlockMaxLimit or WouldExceedAccountMaxLimit
3471        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
3472        let lock_results = self.rc.accounts.lock_accounts_with_results(
3473            transactions.iter(),
3474            transaction_results,
3475            tx_account_lock_limit,
3476        );
3477        TransactionBatch::new(lock_results, self, OwnedOrBorrowed::Borrowed(transactions))
3478    }
3479
3480    /// Prepare a transaction batch from a single transaction without locking accounts
3481    pub fn prepare_unlocked_batch_from_single_tx<'a>(
3482        &'a self,
3483        transaction: &'a SanitizedTransaction,
3484    ) -> TransactionBatch<'_, '_, SanitizedTransaction> {
3485        let tx_account_lock_limit = self.get_transaction_account_lock_limit();
3486        let lock_result =
3487            validate_account_locks(transaction.message().account_keys(), tx_account_lock_limit);
3488        let mut batch = TransactionBatch::new(
3489            vec![lock_result],
3490            self,
3491            OwnedOrBorrowed::Borrowed(slice::from_ref(transaction)),
3492        );
3493        batch.set_needs_unlock(false);
3494        batch
3495    }
3496
3497    /// Run transactions against a frozen bank without committing the results
3498    pub fn simulate_transaction(
3499        &self,
3500        transaction: &SanitizedTransaction,
3501        enable_cpi_recording: bool,
3502    ) -> TransactionSimulationResult {
3503        assert!(self.is_frozen(), "simulation bank must be frozen");
3504
3505        self.simulate_transaction_unchecked(transaction, enable_cpi_recording)
3506    }
3507
3508    /// Run transactions against a bank without committing the results; does not check if the bank
3509    /// is frozen, enabling use in single-Bank test frameworks
3510    pub fn simulate_transaction_unchecked(
3511        &self,
3512        transaction: &SanitizedTransaction,
3513        enable_cpi_recording: bool,
3514    ) -> TransactionSimulationResult {
3515        let account_keys = transaction.message().account_keys();
3516        let number_of_accounts = account_keys.len();
3517        let account_overrides = self.get_account_overrides_for_simulation(&account_keys);
3518        let batch = self.prepare_unlocked_batch_from_single_tx(transaction);
3519        let mut timings = ExecuteTimings::default();
3520
3521        let LoadAndExecuteTransactionsOutput {
3522            mut processing_results,
3523            ..
3524        } = self.load_and_execute_transactions(
3525            &batch,
3526            // After simulation, transactions will need to be forwarded to the leader
3527            // for processing. During forwarding, the transaction could expire if the
3528            // delay is not accounted for.
3529            MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY,
3530            &mut timings,
3531            &mut TransactionErrorMetrics::default(),
3532            TransactionProcessingConfig {
3533                account_overrides: Some(&account_overrides),
3534                check_program_modification_slot: self.check_program_modification_slot,
3535                compute_budget: self.compute_budget(),
3536                log_messages_bytes_limit: None,
3537                limit_to_load_programs: true,
3538                recording_config: ExecutionRecordingConfig {
3539                    enable_cpi_recording,
3540                    enable_log_recording: true,
3541                    enable_return_data_recording: true,
3542                },
3543                transaction_account_lock_limit: Some(self.get_transaction_account_lock_limit()),
3544            },
3545        );
3546
3547        let units_consumed =
3548            timings
3549                .details
3550                .per_program_timings
3551                .iter()
3552                .fold(0, |acc: u64, (_, program_timing)| {
3553                    acc.saturating_add(program_timing.accumulated_units)
3554                        .saturating_add(program_timing.total_errored_units)
3555                });
3556
3557        debug!("simulate_transaction: {:?}", timings);
3558
3559        let processing_result = processing_results
3560            .pop()
3561            .unwrap_or(Err(TransactionError::InvalidProgramForExecution));
3562        let (post_simulation_accounts, result, logs, return_data, inner_instructions) =
3563            match processing_result {
3564                Ok(processed_tx) => match processed_tx {
3565                    ProcessedTransaction::Executed(executed_tx) => {
3566                        let details = executed_tx.execution_details;
3567                        let post_simulation_accounts = executed_tx
3568                            .loaded_transaction
3569                            .accounts
3570                            .into_iter()
3571                            .take(number_of_accounts)
3572                            .collect::<Vec<_>>();
3573                        (
3574                            post_simulation_accounts,
3575                            details.status,
3576                            details.log_messages,
3577                            details.return_data,
3578                            details.inner_instructions,
3579                        )
3580                    }
3581                    ProcessedTransaction::FeesOnly(fees_only_tx) => {
3582                        (vec![], Err(fees_only_tx.load_error), None, None, None)
3583                    }
3584                },
3585                Err(error) => (vec![], Err(error), None, None, None),
3586            };
3587        let logs = logs.unwrap_or_default();
3588
3589        TransactionSimulationResult {
3590            result,
3591            logs,
3592            post_simulation_accounts,
3593            units_consumed,
3594            return_data,
3595            inner_instructions,
3596        }
3597    }
3598
3599    fn get_account_overrides_for_simulation(&self, account_keys: &AccountKeys) -> AccountOverrides {
3600        let mut account_overrides = AccountOverrides::default();
3601        let slot_history_id = sysvar::slot_history::id();
3602        if account_keys.iter().any(|pubkey| *pubkey == slot_history_id) {
3603            let current_account = self.get_account_with_fixed_root(&slot_history_id);
3604            let slot_history = current_account
3605                .as_ref()
3606                .map(|account| from_account::<SlotHistory, _>(account).unwrap())
3607                .unwrap_or_default();
3608            if slot_history.check(self.slot()) == Check::Found {
3609                let ancestors = Ancestors::from(self.proper_ancestors().collect::<Vec<_>>());
3610                if let Some((account, _)) =
3611                    self.load_slow_with_fixed_root(&ancestors, &slot_history_id)
3612                {
3613                    account_overrides.set_slot_history(Some(account));
3614                }
3615            }
3616        }
3617        account_overrides
3618    }
3619
3620    pub fn unlock_accounts<'a, Tx: SVMMessage + 'a>(
3621        &self,
3622        txs_and_results: impl Iterator<Item = (&'a Tx, &'a Result<()>)> + Clone,
3623    ) {
3624        self.rc.accounts.unlock_accounts(txs_and_results)
3625    }
3626
3627    pub fn remove_unrooted_slots(&self, slots: &[(Slot, BankId)]) {
3628        self.rc.accounts.accounts_db.remove_unrooted_slots(slots)
3629    }
3630
3631    pub fn get_hash_age(&self, hash: &Hash) -> Option<u64> {
3632        self.blockhash_queue.read().unwrap().get_hash_age(hash)
3633    }
3634
3635    pub fn is_hash_valid_for_age(&self, hash: &Hash, max_age: usize) -> bool {
3636        self.blockhash_queue
3637            .read()
3638            .unwrap()
3639            .is_hash_valid_for_age(hash, max_age)
3640    }
3641
3642    pub fn collect_balances(
3643        &self,
3644        batch: &TransactionBatch<impl SVMMessage>,
3645    ) -> TransactionBalances {
3646        let mut balances: TransactionBalances = vec![];
3647        for transaction in batch.sanitized_transactions() {
3648            let mut transaction_balances: Vec<u64> = vec![];
3649            for account_key in transaction.account_keys().iter() {
3650                transaction_balances.push(self.get_balance(account_key));
3651            }
3652            balances.push(transaction_balances);
3653        }
3654        balances
3655    }
3656
3657    pub fn load_and_execute_transactions(
3658        &self,
3659        batch: &TransactionBatch<SanitizedTransaction>,
3660        max_age: usize,
3661        timings: &mut ExecuteTimings,
3662        error_counters: &mut TransactionErrorMetrics,
3663        processing_config: TransactionProcessingConfig,
3664    ) -> LoadAndExecuteTransactionsOutput {
3665        let sanitized_txs = batch.sanitized_transactions();
3666
3667        let (check_results, check_us) = measure_us!(self.check_transactions(
3668            sanitized_txs,
3669            batch.lock_results(),
3670            max_age,
3671            error_counters,
3672        ));
3673        timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_us);
3674
3675        let (blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature();
3676        let rent_collector_with_metrics =
3677            RentCollectorWithMetrics::new(self.rent_collector.clone());
3678        let processing_environment = TransactionProcessingEnvironment {
3679            blockhash,
3680            epoch_total_stake: Some(self.get_current_epoch_total_stake()),
3681            epoch_vote_accounts: Some(self.get_current_epoch_vote_accounts()),
3682            feature_set: Arc::clone(&self.feature_set),
3683            fee_structure: Some(&self.fee_structure),
3684            lamports_per_signature,
3685            rent_collector: Some(&rent_collector_with_metrics),
3686        };
3687
3688        let sanitized_output = self
3689            .transaction_processor
3690            .load_and_execute_sanitized_transactions(
3691                self,
3692                sanitized_txs,
3693                check_results,
3694                &processing_environment,
3695                &processing_config,
3696            );
3697
3698        // Accumulate the errors returned by the batch processor.
3699        error_counters.accumulate(&sanitized_output.error_metrics);
3700
3701        // Accumulate the transaction batch execution timings.
3702        timings.accumulate(&sanitized_output.execute_timings);
3703
3704        let ((), collect_logs_us) =
3705            measure_us!(self.collect_logs(sanitized_txs, &sanitized_output.processing_results));
3706        timings.saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_us);
3707
3708        let mut processed_counts = ProcessedTransactionCounts::default();
3709        let err_count = &mut error_counters.total;
3710
3711        for (processing_result, tx) in sanitized_output
3712            .processing_results
3713            .iter()
3714            .zip(sanitized_txs)
3715        {
3716            if let Some(debug_keys) = &self.transaction_debug_keys {
3717                for key in tx.message().account_keys().iter() {
3718                    if debug_keys.contains(key) {
3719                        let result = processing_result.flattened_result();
3720                        info!("slot: {} result: {:?} tx: {:?}", self.slot, result, tx);
3721                        break;
3722                    }
3723                }
3724            }
3725
3726            if processing_result.was_processed() {
3727                // Signature count must be accumulated only if the transaction
3728                // is processed, otherwise a mismatched count between banking
3729                // and replay could occur
3730                processed_counts.signature_count +=
3731                    u64::from(tx.message().header().num_required_signatures);
3732                processed_counts.processed_transactions_count += 1;
3733
3734                if !tx.is_simple_vote_transaction() {
3735                    processed_counts.processed_non_vote_transactions_count += 1;
3736                }
3737            }
3738
3739            match processing_result.flattened_result() {
3740                Ok(()) => {
3741                    processed_counts.processed_with_successful_result_count += 1;
3742                }
3743                Err(err) => {
3744                    if *err_count == 0 {
3745                        debug!("tx error: {:?} {:?}", err, tx);
3746                    }
3747                    *err_count += 1;
3748                }
3749            }
3750        }
3751
3752        LoadAndExecuteTransactionsOutput {
3753            processing_results: sanitized_output.processing_results,
3754            processed_counts,
3755        }
3756    }
3757
3758    fn collect_logs(
3759        &self,
3760        transactions: &[SanitizedTransaction],
3761        processing_results: &[TransactionProcessingResult],
3762    ) {
3763        let transaction_log_collector_config =
3764            self.transaction_log_collector_config.read().unwrap();
3765        if transaction_log_collector_config.filter == TransactionLogCollectorFilter::None {
3766            return;
3767        }
3768
3769        let collected_logs: Vec<_> = processing_results
3770            .iter()
3771            .zip(transactions)
3772            .filter_map(|(processing_result, transaction)| {
3773                // Skip log collection for unprocessed transactions
3774                let processed_tx = processing_result.processed_transaction()?;
3775                // Skip log collection for unexecuted transactions
3776                let execution_details = processed_tx.execution_details()?;
3777                Self::collect_transaction_logs(
3778                    &transaction_log_collector_config,
3779                    transaction,
3780                    execution_details,
3781                )
3782            })
3783            .collect();
3784
3785        if !collected_logs.is_empty() {
3786            let mut transaction_log_collector = self.transaction_log_collector.write().unwrap();
3787            for (log, filtered_mentioned_addresses) in collected_logs {
3788                let transaction_log_index = transaction_log_collector.logs.len();
3789                transaction_log_collector.logs.push(log);
3790                for key in filtered_mentioned_addresses.into_iter() {
3791                    transaction_log_collector
3792                        .mentioned_address_map
3793                        .entry(key)
3794                        .or_default()
3795                        .push(transaction_log_index);
3796                }
3797            }
3798        }
3799    }
3800
3801    fn collect_transaction_logs(
3802        transaction_log_collector_config: &TransactionLogCollectorConfig,
3803        transaction: &SanitizedTransaction,
3804        execution_details: &TransactionExecutionDetails,
3805    ) -> Option<(TransactionLogInfo, Vec<Pubkey>)> {
3806        // Skip log collection if no log messages were recorded
3807        let log_messages = execution_details.log_messages.as_ref()?;
3808
3809        let mut filtered_mentioned_addresses = Vec::new();
3810        if !transaction_log_collector_config
3811            .mentioned_addresses
3812            .is_empty()
3813        {
3814            for key in transaction.message().account_keys().iter() {
3815                if transaction_log_collector_config
3816                    .mentioned_addresses
3817                    .contains(key)
3818                {
3819                    filtered_mentioned_addresses.push(*key);
3820                }
3821            }
3822        }
3823
3824        let is_vote = transaction.is_simple_vote_transaction();
3825        let store = match transaction_log_collector_config.filter {
3826            TransactionLogCollectorFilter::All => {
3827                !is_vote || !filtered_mentioned_addresses.is_empty()
3828            }
3829            TransactionLogCollectorFilter::AllWithVotes => true,
3830            TransactionLogCollectorFilter::None => false,
3831            TransactionLogCollectorFilter::OnlyMentionedAddresses => {
3832                !filtered_mentioned_addresses.is_empty()
3833            }
3834        };
3835
3836        if store {
3837            Some((
3838                TransactionLogInfo {
3839                    signature: *transaction.signature(),
3840                    result: execution_details.status.clone(),
3841                    is_vote,
3842                    log_messages: log_messages.clone(),
3843                },
3844                filtered_mentioned_addresses,
3845            ))
3846        } else {
3847            None
3848        }
3849    }
3850
3851    /// Load the accounts data size, in bytes
3852    pub fn load_accounts_data_size(&self) -> u64 {
3853        self.accounts_data_size_initial
3854            .saturating_add_signed(self.load_accounts_data_size_delta())
3855    }
3856
3857    /// Load the change in accounts data size in this Bank, in bytes
3858    pub fn load_accounts_data_size_delta(&self) -> i64 {
3859        let delta_on_chain = self.load_accounts_data_size_delta_on_chain();
3860        let delta_off_chain = self.load_accounts_data_size_delta_off_chain();
3861        delta_on_chain.saturating_add(delta_off_chain)
3862    }
3863
3864    /// Load the change in accounts data size in this Bank, in bytes, from on-chain events
3865    /// i.e. transactions
3866    pub fn load_accounts_data_size_delta_on_chain(&self) -> i64 {
3867        self.accounts_data_size_delta_on_chain.load(Acquire)
3868    }
3869
3870    /// Load the change in accounts data size in this Bank, in bytes, from off-chain events
3871    /// i.e. rent collection
3872    pub fn load_accounts_data_size_delta_off_chain(&self) -> i64 {
3873        self.accounts_data_size_delta_off_chain.load(Acquire)
3874    }
3875
3876    /// Update the accounts data size delta from on-chain events by adding `amount`.
3877    /// The arithmetic saturates.
3878    fn update_accounts_data_size_delta_on_chain(&self, amount: i64) {
3879        if amount == 0 {
3880            return;
3881        }
3882
3883        self.accounts_data_size_delta_on_chain
3884            .fetch_update(AcqRel, Acquire, |accounts_data_size_delta_on_chain| {
3885                Some(accounts_data_size_delta_on_chain.saturating_add(amount))
3886            })
3887            // SAFETY: unwrap() is safe since our update fn always returns `Some`
3888            .unwrap();
3889    }
3890
3891    /// Update the accounts data size delta from off-chain events by adding `amount`.
3892    /// The arithmetic saturates.
3893    fn update_accounts_data_size_delta_off_chain(&self, amount: i64) {
3894        if amount == 0 {
3895            return;
3896        }
3897
3898        self.accounts_data_size_delta_off_chain
3899            .fetch_update(AcqRel, Acquire, |accounts_data_size_delta_off_chain| {
3900                Some(accounts_data_size_delta_off_chain.saturating_add(amount))
3901            })
3902            // SAFETY: unwrap() is safe since our update fn always returns `Some`
3903            .unwrap();
3904    }
3905
3906    /// Calculate the data size delta and update the off-chain accounts data size delta
3907    fn calculate_and_update_accounts_data_size_delta_off_chain(
3908        &self,
3909        old_data_size: usize,
3910        new_data_size: usize,
3911    ) {
3912        let data_size_delta = calculate_data_size_delta(old_data_size, new_data_size);
3913        self.update_accounts_data_size_delta_off_chain(data_size_delta);
3914    }
3915
3916    fn filter_program_errors_and_collect_fee(
3917        &self,
3918        processing_results: &[TransactionProcessingResult],
3919    ) {
3920        let mut fees = 0;
3921
3922        processing_results.iter().for_each(|processing_result| {
3923            if let Ok(processed_tx) = processing_result {
3924                fees += processed_tx.fee_details().total_fee();
3925            }
3926        });
3927
3928        self.collector_fees.fetch_add(fees, Relaxed);
3929    }
3930
3931    // Note: this function is not yet used; next PR will call it behind a feature gate
3932    fn filter_program_errors_and_collect_fee_details(
3933        &self,
3934        processing_results: &[TransactionProcessingResult],
3935    ) {
3936        let mut accumulated_fee_details = FeeDetails::default();
3937
3938        processing_results.iter().for_each(|processing_result| {
3939            if let Ok(processed_tx) = processing_result {
3940                accumulated_fee_details.accumulate(&processed_tx.fee_details());
3941            }
3942        });
3943
3944        self.collector_fee_details
3945            .write()
3946            .unwrap()
3947            .accumulate(&accumulated_fee_details);
3948    }
3949
3950    pub fn commit_transactions(
3951        &self,
3952        sanitized_txs: &[SanitizedTransaction],
3953        processing_results: Vec<TransactionProcessingResult>,
3954        processed_counts: &ProcessedTransactionCounts,
3955        timings: &mut ExecuteTimings,
3956    ) -> Vec<TransactionCommitResult> {
3957        assert!(
3958            !self.freeze_started(),
3959            "commit_transactions() working on a bank that is already frozen or is undergoing freezing!"
3960        );
3961
3962        let ProcessedTransactionCounts {
3963            processed_transactions_count,
3964            processed_non_vote_transactions_count,
3965            processed_with_successful_result_count,
3966            signature_count,
3967        } = *processed_counts;
3968
3969        self.increment_transaction_count(processed_transactions_count);
3970        self.increment_non_vote_transaction_count_since_restart(
3971            processed_non_vote_transactions_count,
3972        );
3973        self.increment_signature_count(signature_count);
3974
3975        let processed_with_failure_result_count =
3976            processed_transactions_count.saturating_sub(processed_with_successful_result_count);
3977        self.transaction_error_count
3978            .fetch_add(processed_with_failure_result_count, Relaxed);
3979
3980        if processed_transactions_count > 0 {
3981            self.is_delta.store(true, Relaxed);
3982            self.transaction_entries_count.fetch_add(1, Relaxed);
3983            self.transactions_per_entry_max
3984                .fetch_max(processed_transactions_count, Relaxed);
3985        }
3986
3987        let ((), store_accounts_us) = measure_us!({
3988            // If geyser is present, we must collect `SanitizedTransaction`
3989            // references in order to comply with that interface - until it
3990            // is changed.
3991            let maybe_transaction_refs = self
3992                .accounts()
3993                .accounts_db
3994                .has_accounts_update_notifier()
3995                .then(|| sanitized_txs.iter().collect::<Vec<_>>());
3996
3997            let (accounts_to_store, transactions) = collect_accounts_to_store(
3998                sanitized_txs,
3999                &maybe_transaction_refs,
4000                &processing_results,
4001            );
4002            self.rc.accounts.store_cached(
4003                (self.slot(), accounts_to_store.as_slice()),
4004                transactions.as_deref(),
4005            );
4006        });
4007
4008        self.collect_rent(&processing_results);
4009
4010        // Cached vote and stake accounts are synchronized with accounts-db
4011        // after each transaction.
4012        let ((), update_stakes_cache_us) =
4013            measure_us!(self.update_stakes_cache(sanitized_txs, &processing_results));
4014
4015        let ((), update_executors_us) = measure_us!({
4016            let mut cache = None;
4017            for processing_result in &processing_results {
4018                if let Some(ProcessedTransaction::Executed(executed_tx)) =
4019                    processing_result.processed_transaction()
4020                {
4021                    let programs_modified_by_tx = &executed_tx.programs_modified_by_tx;
4022                    if executed_tx.was_successful() && !programs_modified_by_tx.is_empty() {
4023                        cache
4024                            .get_or_insert_with(|| {
4025                                self.transaction_processor.program_cache.write().unwrap()
4026                            })
4027                            .merge(programs_modified_by_tx);
4028                    }
4029                }
4030            }
4031        });
4032
4033        let accounts_data_len_delta = processing_results
4034            .iter()
4035            .filter_map(|processing_result| processing_result.processed_transaction())
4036            .filter_map(|processed_tx| processed_tx.execution_details())
4037            .filter_map(|details| {
4038                details
4039                    .status
4040                    .is_ok()
4041                    .then_some(details.accounts_data_len_delta)
4042            })
4043            .sum();
4044        self.update_accounts_data_size_delta_on_chain(accounts_data_len_delta);
4045
4046        let ((), update_transaction_statuses_us) =
4047            measure_us!(self.update_transaction_statuses(sanitized_txs, &processing_results));
4048
4049        if self.feature_set.is_active(&reward_full_priority_fee::id()) {
4050            self.filter_program_errors_and_collect_fee_details(&processing_results)
4051        } else {
4052            self.filter_program_errors_and_collect_fee(&processing_results)
4053        };
4054
4055        timings.saturating_add_in_place(ExecuteTimingType::StoreUs, store_accounts_us);
4056        timings.saturating_add_in_place(
4057            ExecuteTimingType::UpdateStakesCacheUs,
4058            update_stakes_cache_us,
4059        );
4060        timings.saturating_add_in_place(ExecuteTimingType::UpdateExecutorsUs, update_executors_us);
4061        timings.saturating_add_in_place(
4062            ExecuteTimingType::UpdateTransactionStatuses,
4063            update_transaction_statuses_us,
4064        );
4065
4066        Self::create_commit_results(processing_results)
4067    }
4068
4069    fn create_commit_results(
4070        processing_results: Vec<TransactionProcessingResult>,
4071    ) -> Vec<TransactionCommitResult> {
4072        processing_results
4073            .into_iter()
4074            .map(|processing_result| match processing_result? {
4075                ProcessedTransaction::Executed(executed_tx) => {
4076                    let execution_details = executed_tx.execution_details;
4077                    let LoadedTransaction {
4078                        rent_debits,
4079                        accounts: loaded_accounts,
4080                        loaded_accounts_data_size,
4081                        fee_details,
4082                        ..
4083                    } = executed_tx.loaded_transaction;
4084
4085                    // Rent is only collected for successfully executed transactions
4086                    let rent_debits = if execution_details.was_successful() {
4087                        rent_debits
4088                    } else {
4089                        RentDebits::default()
4090                    };
4091
4092                    Ok(CommittedTransaction {
4093                        status: execution_details.status,
4094                        log_messages: execution_details.log_messages,
4095                        inner_instructions: execution_details.inner_instructions,
4096                        return_data: execution_details.return_data,
4097                        executed_units: execution_details.executed_units,
4098                        fee_details,
4099                        rent_debits,
4100                        loaded_account_stats: TransactionLoadedAccountsStats {
4101                            loaded_accounts_count: loaded_accounts.len(),
4102                            loaded_accounts_data_size,
4103                        },
4104                    })
4105                }
4106                ProcessedTransaction::FeesOnly(fees_only_tx) => Ok(CommittedTransaction {
4107                    status: Err(fees_only_tx.load_error),
4108                    log_messages: None,
4109                    inner_instructions: None,
4110                    return_data: None,
4111                    executed_units: 0,
4112                    rent_debits: RentDebits::default(),
4113                    fee_details: fees_only_tx.fee_details,
4114                    loaded_account_stats: TransactionLoadedAccountsStats {
4115                        loaded_accounts_count: fees_only_tx.rollback_accounts.count(),
4116                        loaded_accounts_data_size: fees_only_tx.rollback_accounts.data_size()
4117                            as u32,
4118                    },
4119                }),
4120            })
4121            .collect()
4122    }
4123
4124    fn collect_rent(&self, processing_results: &[TransactionProcessingResult]) {
4125        let collected_rent = processing_results
4126            .iter()
4127            .filter_map(|processing_result| processing_result.processed_transaction())
4128            .filter_map(|processed_tx| processed_tx.executed_transaction())
4129            .filter(|executed_tx| executed_tx.was_successful())
4130            .map(|executed_tx| executed_tx.loaded_transaction.rent)
4131            .sum();
4132        self.collected_rent.fetch_add(collected_rent, Relaxed);
4133    }
4134
4135    fn run_incinerator(&self) {
4136        if let Some((account, _)) =
4137            self.get_account_modified_since_parent_with_fixed_root(&incinerator::id())
4138        {
4139            self.capitalization.fetch_sub(account.lamports(), Relaxed);
4140            self.store_account(&incinerator::id(), &AccountSharedData::default());
4141        }
4142    }
4143
4144    /// Get stake and stake node accounts
4145    pub(crate) fn get_stake_accounts(&self, minimized_account_set: &DashSet<Pubkey>) {
4146        self.stakes_cache
4147            .stakes()
4148            .stake_delegations()
4149            .iter()
4150            .for_each(|(pubkey, _)| {
4151                minimized_account_set.insert(*pubkey);
4152            });
4153
4154        self.stakes_cache
4155            .stakes()
4156            .staked_nodes()
4157            .par_iter()
4158            .for_each(|(pubkey, _)| {
4159                minimized_account_set.insert(*pubkey);
4160            });
4161    }
4162
4163    /// After deserialize, populate skipped rewrites with accounts that would normally
4164    /// have had their data rewritten in this slot due to rent collection (but didn't).
4165    ///
4166    /// This is required when starting up from a snapshot to verify the bank hash.
4167    ///
4168    /// A second usage is from the `bank_to_xxx_snapshot_archive()` functions.  These fns call
4169    /// `Bank::rehash()` to handle if the user manually modified any accounts and thus requires
4170    /// calculating the bank hash again.  Since calculating the bank hash *takes* the skipped
4171    /// rewrites, this second time will not have any skipped rewrites, and thus the hash would be
4172    /// updated to the wrong value.  So, rebuild the skipped rewrites before rehashing.
4173    fn rebuild_skipped_rewrites(&self) {
4174        // If the feature gate to *not* add rent collection rewrites to the bank hash is enabled,
4175        // then do *not* add anything to our skipped_rewrites.
4176        if self.bank_hash_skips_rent_rewrites() {
4177            return;
4178        }
4179
4180        let (skipped_rewrites, measure_skipped_rewrites) =
4181            measure_time!(self.calculate_skipped_rewrites());
4182        info!(
4183            "Rebuilding skipped rewrites of {} accounts{measure_skipped_rewrites}",
4184            skipped_rewrites.len()
4185        );
4186
4187        *self.skipped_rewrites.lock().unwrap() = skipped_rewrites;
4188    }
4189
4190    /// Calculates (and returns) skipped rewrites for this bank
4191    ///
4192    /// Refer to `rebuild_skipped_rewrites()` for more documentation.
4193    /// This implementation is purposely separate to facilitate testing.
4194    ///
4195    /// The key observation is that accounts in Bank::skipped_rewrites are only used IFF the
4196    /// specific account is *not* already in the accounts delta hash.  If an account is not in
4197    /// the accounts delta hash, then it means the account was not modified.  Since (basically)
4198    /// all accounts are rent exempt, this means (basically) all accounts are unmodified by rent
4199    /// collection.  So we just need to load the accounts that would've been checked for rent
4200    /// collection, hash them, and add them to Bank::skipped_rewrites.
4201    ///
4202    /// As of this writing, there are ~350 million acounts on mainnet-beta.
4203    /// Rent collection almost always collects a single slot at a time.
4204    /// So 1 slot of 432,000, of 350 million accounts, is ~800 accounts per slot.
4205    /// Since we haven't started processing anything yet, it should be fast enough to simply
4206    /// load the accounts directly.
4207    /// Empirically, this takes about 3-4 milliseconds.
4208    fn calculate_skipped_rewrites(&self) -> HashMap<Pubkey, AccountHash> {
4209        // The returned skipped rewrites may include accounts that were actually *not* skipped!
4210        // (This is safe, as per the fn's documentation above.)
4211        self.get_accounts_for_skipped_rewrites()
4212            .map(|(pubkey, account_hash, _account)| (pubkey, account_hash))
4213            .collect()
4214    }
4215
4216    /// Loads accounts that were selected for rent collection this slot.
4217    /// After loading the accounts, also calculate and return the account hashes.
4218    /// This is used when dealing with skipped rewrites.
4219    fn get_accounts_for_skipped_rewrites(
4220        &self,
4221    ) -> impl Iterator<Item = (Pubkey, AccountHash, AccountSharedData)> + '_ {
4222        self.rent_collection_partitions()
4223            .into_iter()
4224            .map(accounts_partition::pubkey_range_from_partition)
4225            .flat_map(|pubkey_range| {
4226                self.rc
4227                    .accounts
4228                    .load_to_collect_rent_eagerly(&self.ancestors, pubkey_range)
4229            })
4230            .map(|(pubkey, account, _slot)| {
4231                let account_hash = AccountsDb::hash_account(&account, &pubkey);
4232                (pubkey, account_hash, account)
4233            })
4234    }
4235
4236    /// Returns the accounts, sorted by pubkey, that were part of accounts delta hash calculation
4237    /// This is used when writing a bank hash details file.
4238    pub(crate) fn get_accounts_for_bank_hash_details(&self) -> Vec<PubkeyHashAccount> {
4239        let accounts_db = &self.rc.accounts.accounts_db;
4240
4241        let mut accounts_written_this_slot =
4242            accounts_db.get_pubkey_hash_account_for_slot(self.slot());
4243
4244        // If we are skipping rewrites but also include them in the accounts delta hash, then we
4245        // need to go load those accounts and add them to the list of accounts written this slot.
4246        if !self.bank_hash_skips_rent_rewrites()
4247            && accounts_db.test_skip_rewrites_but_include_in_bank_hash
4248        {
4249            let pubkeys_written_this_slot: HashSet<_> = accounts_written_this_slot
4250                .iter()
4251                .map(|pubkey_hash_account| pubkey_hash_account.pubkey)
4252                .collect();
4253
4254            let rent_collection_accounts = self.get_accounts_for_skipped_rewrites();
4255            for (pubkey, hash, account) in rent_collection_accounts {
4256                if !pubkeys_written_this_slot.contains(&pubkey) {
4257                    accounts_written_this_slot.push(PubkeyHashAccount {
4258                        pubkey,
4259                        hash,
4260                        account,
4261                    });
4262                }
4263            }
4264        }
4265
4266        // Sort the accounts by pubkey to match the order of the accounts delta hash.
4267        // This also makes comparison of files from different nodes deterministic.
4268        accounts_written_this_slot.sort_unstable_by_key(|account| account.pubkey);
4269        accounts_written_this_slot
4270    }
4271
4272    fn collect_rent_eagerly(&self) {
4273        if self.lazy_rent_collection.load(Relaxed) {
4274            return;
4275        }
4276
4277        let mut measure = Measure::start("collect_rent_eagerly-ms");
4278        let partitions = self.rent_collection_partitions();
4279        let count = partitions.len();
4280        let rent_metrics = RentMetrics::default();
4281        // partitions will usually be 1, but could be more if we skip slots
4282        let mut parallel = count > 1;
4283        if parallel {
4284            let ranges = partitions
4285                .iter()
4286                .map(|partition| {
4287                    (
4288                        *partition,
4289                        accounts_partition::pubkey_range_from_partition(*partition),
4290                    )
4291                })
4292                .collect::<Vec<_>>();
4293            // test every range to make sure ranges are not overlapping
4294            // some tests collect rent from overlapping ranges
4295            // example: [(0, 31, 32), (0, 0, 128), (0, 27, 128)]
4296            // read-modify-write of an account for rent collection cannot be done in parallel
4297            'outer: for i in 0..ranges.len() {
4298                for j in 0..ranges.len() {
4299                    if i == j {
4300                        continue;
4301                    }
4302
4303                    let i = &ranges[i].1;
4304                    let j = &ranges[j].1;
4305                    // make sure i doesn't contain j
4306                    if i.contains(j.start()) || i.contains(j.end()) {
4307                        parallel = false;
4308                        break 'outer;
4309                    }
4310                }
4311            }
4312
4313            if parallel {
4314                let thread_pool = &self.rc.accounts.accounts_db.thread_pool;
4315                thread_pool.install(|| {
4316                    ranges.into_par_iter().for_each(|range| {
4317                        self.collect_rent_in_range(range.0, range.1, &rent_metrics)
4318                    });
4319                });
4320            }
4321        }
4322        if !parallel {
4323            // collect serially
4324            partitions
4325                .into_iter()
4326                .for_each(|partition| self.collect_rent_in_partition(partition, &rent_metrics));
4327        }
4328        measure.stop();
4329        datapoint_info!(
4330            "collect_rent_eagerly",
4331            ("accounts", rent_metrics.count.load(Relaxed), i64),
4332            ("partitions", count, i64),
4333            ("total_time_us", measure.as_us(), i64),
4334            (
4335                "hold_range_us",
4336                rent_metrics.hold_range_us.load(Relaxed),
4337                i64
4338            ),
4339            ("load_us", rent_metrics.load_us.load(Relaxed), i64),
4340            ("collect_us", rent_metrics.collect_us.load(Relaxed), i64),
4341            ("hash_us", rent_metrics.hash_us.load(Relaxed), i64),
4342            ("store_us", rent_metrics.store_us.load(Relaxed), i64),
4343        );
4344    }
4345
4346    fn rent_collection_partitions(&self) -> Vec<Partition> {
4347        if !self.use_fixed_collection_cycle() {
4348            // This mode is for production/development/testing.
4349            // In this mode, we iterate over the whole pubkey value range for each epochs
4350            // including warm-up epochs.
4351            // The only exception is the situation where normal epochs are relatively short
4352            // (currently less than 2 day). In that case, we arrange a single collection
4353            // cycle to be multiple of epochs so that a cycle could be greater than the 2 day.
4354            self.variable_cycle_partitions()
4355        } else {
4356            // This mode is mainly for benchmarking only.
4357            // In this mode, we always iterate over the whole pubkey value range with
4358            // <slot_count_in_two_day> slots as a collection cycle, regardless warm-up or
4359            // alignment between collection cycles and epochs.
4360            // Thus, we can simulate stable processing load of eager rent collection,
4361            // strictly proportional to the number of pubkeys since genesis.
4362            self.fixed_cycle_partitions()
4363        }
4364    }
4365
4366    /// true if rent collection does NOT rewrite accounts whose pubkey indicates
4367    ///  it is time for rent collection, but the account is rent exempt.
4368    /// false if rent collection DOES rewrite accounts if the account is rent exempt
4369    /// This is the default behavior historically.
4370    fn bank_hash_skips_rent_rewrites(&self) -> bool {
4371        self.feature_set
4372            .is_active(&feature_set::skip_rent_rewrites::id())
4373    }
4374
4375    /// true if rent fees should be collected (i.e. disable_rent_fees_collection is NOT enabled)
4376    fn should_collect_rent(&self) -> bool {
4377        !self
4378            .feature_set
4379            .is_active(&feature_set::disable_rent_fees_collection::id())
4380    }
4381
4382    /// Collect rent from `accounts`
4383    ///
4384    /// This fn is called inside a parallel loop from `collect_rent_in_partition()`.  Avoid adding
4385    /// any code that causes contention on shared memory/data (i.e. do not update atomic metrics).
4386    ///
4387    /// The return value is a struct of computed values that `collect_rent_in_partition()` will
4388    /// reduce at the end of its parallel loop.  If possible, place data/computation that cause
4389    /// contention/take locks in the return struct and process them in
4390    /// `collect_rent_from_partition()` after reducing the parallel loop.
4391    fn collect_rent_from_accounts(
4392        &self,
4393        mut accounts: Vec<(Pubkey, AccountSharedData, Slot)>,
4394        rent_paying_pubkeys: Option<&HashSet<Pubkey>>,
4395        partition_index: PartitionIndex,
4396    ) -> CollectRentFromAccountsInfo {
4397        let mut rent_debits = RentDebits::default();
4398        let mut total_rent_collected_info = CollectedInfo::default();
4399        let mut accounts_to_store =
4400            Vec::<(&Pubkey, &AccountSharedData)>::with_capacity(accounts.len());
4401        let mut time_collecting_rent_us = 0;
4402        let mut time_storing_accounts_us = 0;
4403        let can_skip_rewrites = self.bank_hash_skips_rent_rewrites();
4404        let test_skip_rewrites_but_include_in_bank_hash = self
4405            .rc
4406            .accounts
4407            .accounts_db
4408            .test_skip_rewrites_but_include_in_bank_hash;
4409        let mut skipped_rewrites = Vec::default();
4410        for (pubkey, account, _loaded_slot) in accounts.iter_mut() {
4411            let rent_epoch_pre = account.rent_epoch();
4412            let (rent_collected_info, collect_rent_us) = measure_us!(collect_rent_from_account(
4413                &self.feature_set,
4414                &self.rent_collector,
4415                pubkey,
4416                account
4417            ));
4418            time_collecting_rent_us += collect_rent_us;
4419            let rent_epoch_post = account.rent_epoch();
4420
4421            // did the account change in any way due to rent collection?
4422            let rent_epoch_changed = rent_epoch_post != rent_epoch_pre;
4423            let account_changed = rent_collected_info.rent_amount != 0 || rent_epoch_changed;
4424
4425            // always store the account, regardless if it changed or not
4426            let always_store_accounts =
4427                !can_skip_rewrites && !test_skip_rewrites_but_include_in_bank_hash;
4428
4429            // only store accounts where we collected rent
4430            // but get the hash for all these accounts even if collected rent is 0 (= not updated).
4431            // Also, there's another subtle side-effect from rewrites: this
4432            // ensures we verify the whole on-chain state (= all accounts)
4433            // via the bank delta hash slowly once per an epoch.
4434            if account_changed || always_store_accounts {
4435                if rent_collected_info.rent_amount > 0 {
4436                    if let Some(rent_paying_pubkeys) = rent_paying_pubkeys {
4437                        if !rent_paying_pubkeys.contains(pubkey) {
4438                            let partition_from_pubkey = accounts_partition::partition_from_pubkey(
4439                                pubkey,
4440                                self.epoch_schedule.slots_per_epoch,
4441                            );
4442                            // Submit datapoint instead of assert while we verify this is correct
4443                            datapoint_warn!(
4444                                "bank-unexpected_rent_paying_pubkey",
4445                                ("slot", self.slot(), i64),
4446                                ("pubkey", pubkey.to_string(), String),
4447                                ("partition_index", partition_index, i64),
4448                                ("partition_from_pubkey", partition_from_pubkey, i64)
4449                            );
4450                            warn!(
4451                                "Collecting rent from unexpected pubkey: {}, slot: {}, parent_slot: {:?}, \
4452                                partition_index: {}, partition_from_pubkey: {}",
4453                                pubkey,
4454                                self.slot(),
4455                                self.parent().map(|bank| bank.slot()),
4456                                partition_index,
4457                                partition_from_pubkey,
4458                            );
4459                        }
4460                    }
4461                } else {
4462                    debug_assert_eq!(rent_collected_info.rent_amount, 0);
4463                    if rent_epoch_changed {
4464                        datapoint_info!(
4465                            "bank-rent_collection_updated_only_rent_epoch",
4466                            ("slot", self.slot(), i64),
4467                            ("pubkey", pubkey.to_string(), String),
4468                            ("rent_epoch_pre", rent_epoch_pre, i64),
4469                            ("rent_epoch_post", rent_epoch_post, i64),
4470                        );
4471                    }
4472                }
4473                total_rent_collected_info += rent_collected_info;
4474                accounts_to_store.push((pubkey, account));
4475            } else if !account_changed
4476                && !can_skip_rewrites
4477                && test_skip_rewrites_but_include_in_bank_hash
4478            {
4479                // include rewrites that we skipped in the accounts delta hash.
4480                // This is what consensus requires prior to activation of bank_hash_skips_rent_rewrites.
4481                // This code path exists to allow us to test the long term effects on validators when the skipped rewrites
4482                // feature is enabled.
4483                let hash = AccountsDb::hash_account(account, pubkey);
4484                skipped_rewrites.push((*pubkey, hash));
4485            }
4486            rent_debits.insert(pubkey, rent_collected_info.rent_amount, account.lamports());
4487        }
4488
4489        if !accounts_to_store.is_empty() {
4490            // TODO: Maybe do not call `store_accounts()` here.  Instead return `accounts_to_store`
4491            // and have `collect_rent_in_partition()` perform all the stores.
4492            let (_, store_accounts_us) =
4493                measure_us!(self.store_accounts((self.slot(), &accounts_to_store[..])));
4494            time_storing_accounts_us += store_accounts_us;
4495        }
4496
4497        CollectRentFromAccountsInfo {
4498            skipped_rewrites,
4499            rent_collected_info: total_rent_collected_info,
4500            rent_rewards: rent_debits.into_unordered_rewards_iter().collect(),
4501            time_collecting_rent_us,
4502            time_storing_accounts_us,
4503            num_accounts: accounts.len(),
4504        }
4505    }
4506
4507    /// convert 'partition' to a pubkey range and 'collect_rent_in_range'
4508    fn collect_rent_in_partition(&self, partition: Partition, metrics: &RentMetrics) {
4509        let subrange_full = accounts_partition::pubkey_range_from_partition(partition);
4510        self.collect_rent_in_range(partition, subrange_full, metrics)
4511    }
4512
4513    /// get all pubkeys that we expect to be rent-paying or None, if this was not initialized at load time (that should only exist in test cases)
4514    fn get_rent_paying_pubkeys(&self, partition: &Partition) -> Option<HashSet<Pubkey>> {
4515        self.rc
4516            .accounts
4517            .accounts_db
4518            .accounts_index
4519            .rent_paying_accounts_by_partition
4520            .get()
4521            .and_then(|rent_paying_accounts| {
4522                rent_paying_accounts.is_initialized().then(|| {
4523                    accounts_partition::get_partition_end_indexes(partition)
4524                        .into_iter()
4525                        .flat_map(|end_index| {
4526                            rent_paying_accounts.get_pubkeys_in_partition_index(end_index)
4527                        })
4528                        .cloned()
4529                        .collect::<HashSet<_>>()
4530                })
4531            })
4532    }
4533
4534    /// load accounts with pubkeys in 'subrange_full'
4535    /// collect rent and update 'account.rent_epoch' as necessary
4536    /// store accounts, whether rent was collected or not (depending on whether we skipping rewrites is enabled)
4537    /// update bank's rewrites set for all rewrites that were skipped
4538    fn collect_rent_in_range(
4539        &self,
4540        partition: Partition,
4541        subrange_full: RangeInclusive<Pubkey>,
4542        metrics: &RentMetrics,
4543    ) {
4544        let mut hold_range = Measure::start("hold_range");
4545        let thread_pool = &self.rc.accounts.accounts_db.thread_pool;
4546        thread_pool.install(|| {
4547            self.rc
4548                .accounts
4549                .hold_range_in_memory(&subrange_full, true, thread_pool);
4550            hold_range.stop();
4551            metrics.hold_range_us.fetch_add(hold_range.as_us(), Relaxed);
4552
4553            let rent_paying_pubkeys_ = self.get_rent_paying_pubkeys(&partition);
4554            let rent_paying_pubkeys = rent_paying_pubkeys_.as_ref();
4555
4556            // divide the range into num_threads smaller ranges and process in parallel
4557            // Note that 'pubkey_range_from_partition' cannot easily be re-used here to break the range smaller.
4558            // It has special handling of 0..0 and partition_count changes affect all ranges unevenly.
4559            let num_threads = solana_accounts_db::accounts_db::quarter_thread_count() as u64;
4560            let sz = std::mem::size_of::<u64>();
4561            let start_prefix = accounts_partition::prefix_from_pubkey(subrange_full.start());
4562            let end_prefix_inclusive = accounts_partition::prefix_from_pubkey(subrange_full.end());
4563            let range = end_prefix_inclusive - start_prefix;
4564            let increment = range / num_threads;
4565            let mut results = (0..num_threads)
4566                .into_par_iter()
4567                .map(|chunk| {
4568                    let offset = |chunk| start_prefix + chunk * increment;
4569                    let start = offset(chunk);
4570                    let last = chunk == num_threads - 1;
4571                    let merge_prefix = |prefix: u64, mut bound: Pubkey| {
4572                        bound.as_mut()[0..sz].copy_from_slice(&prefix.to_be_bytes());
4573                        bound
4574                    };
4575                    let start = merge_prefix(start, *subrange_full.start());
4576                    let (accounts, measure_load_accounts) = measure_time!(if last {
4577                        let end = *subrange_full.end();
4578                        let subrange = start..=end; // IN-clusive
4579                        self.rc
4580                            .accounts
4581                            .load_to_collect_rent_eagerly(&self.ancestors, subrange)
4582                    } else {
4583                        let end = merge_prefix(offset(chunk + 1), *subrange_full.start());
4584                        let subrange = start..end; // EX-clusive, the next 'start' will be this same value
4585                        self.rc
4586                            .accounts
4587                            .load_to_collect_rent_eagerly(&self.ancestors, subrange)
4588                    });
4589                    CollectRentInPartitionInfo::new(
4590                        self.collect_rent_from_accounts(accounts, rent_paying_pubkeys, partition.1),
4591                        Duration::from_nanos(measure_load_accounts.as_ns()),
4592                    )
4593                })
4594                .reduce(
4595                    CollectRentInPartitionInfo::default,
4596                    CollectRentInPartitionInfo::reduce,
4597                );
4598
4599            self.skipped_rewrites
4600                .lock()
4601                .unwrap()
4602                .extend(results.skipped_rewrites);
4603
4604            // We cannot assert here that we collected from all expected keys.
4605            // Some accounts may have been topped off or may have had all funds removed and gone to 0 lamports.
4606
4607            self.rc
4608                .accounts
4609                .hold_range_in_memory(&subrange_full, false, thread_pool);
4610
4611            self.collected_rent
4612                .fetch_add(results.rent_collected, Relaxed);
4613            self.update_accounts_data_size_delta_off_chain(
4614                -(results.accounts_data_size_reclaimed as i64),
4615            );
4616            self.rewards
4617                .write()
4618                .unwrap()
4619                .append(&mut results.rent_rewards);
4620
4621            metrics
4622                .load_us
4623                .fetch_add(results.time_loading_accounts_us, Relaxed);
4624            metrics
4625                .collect_us
4626                .fetch_add(results.time_collecting_rent_us, Relaxed);
4627            metrics
4628                .store_us
4629                .fetch_add(results.time_storing_accounts_us, Relaxed);
4630            metrics.count.fetch_add(results.num_accounts, Relaxed);
4631        });
4632    }
4633
4634    pub(crate) fn fixed_cycle_partitions_between_slots(
4635        &self,
4636        starting_slot: Slot,
4637        ending_slot: Slot,
4638    ) -> Vec<Partition> {
4639        let slot_count_in_two_day = self.slot_count_in_two_day();
4640        accounts_partition::get_partitions(ending_slot, starting_slot, slot_count_in_two_day)
4641    }
4642
4643    fn fixed_cycle_partitions(&self) -> Vec<Partition> {
4644        self.fixed_cycle_partitions_between_slots(self.parent_slot(), self.slot())
4645    }
4646
4647    pub(crate) fn variable_cycle_partitions_between_slots(
4648        &self,
4649        starting_slot: Slot,
4650        ending_slot: Slot,
4651    ) -> Vec<Partition> {
4652        let (starting_epoch, mut starting_slot_index) =
4653            self.get_epoch_and_slot_index(starting_slot);
4654        let (ending_epoch, ending_slot_index) = self.get_epoch_and_slot_index(ending_slot);
4655
4656        let mut partitions = vec![];
4657        if starting_epoch < ending_epoch {
4658            let slot_skipped = (ending_slot - starting_slot) > 1;
4659            if slot_skipped {
4660                // Generate special partitions because there are skipped slots
4661                // exactly at the epoch transition.
4662
4663                let parent_last_slot_index = self.get_slots_in_epoch(starting_epoch) - 1;
4664
4665                // ... for parent epoch
4666                partitions.push(self.partition_from_slot_indexes_with_gapped_epochs(
4667                    starting_slot_index,
4668                    parent_last_slot_index,
4669                    starting_epoch,
4670                ));
4671
4672                if ending_slot_index > 0 {
4673                    // ... for current epoch
4674                    partitions.push(self.partition_from_slot_indexes_with_gapped_epochs(
4675                        0,
4676                        0,
4677                        ending_epoch,
4678                    ));
4679                }
4680            }
4681            starting_slot_index = 0;
4682        }
4683
4684        partitions.push(self.partition_from_normal_slot_indexes(
4685            starting_slot_index,
4686            ending_slot_index,
4687            ending_epoch,
4688        ));
4689
4690        partitions
4691    }
4692
4693    fn variable_cycle_partitions(&self) -> Vec<Partition> {
4694        self.variable_cycle_partitions_between_slots(self.parent_slot(), self.slot())
4695    }
4696
4697    fn do_partition_from_slot_indexes(
4698        &self,
4699        start_slot_index: SlotIndex,
4700        end_slot_index: SlotIndex,
4701        epoch: Epoch,
4702        generated_for_gapped_epochs: bool,
4703    ) -> Partition {
4704        let slot_count_per_epoch = self.get_slots_in_epoch(epoch);
4705
4706        let cycle_params = if !self.use_multi_epoch_collection_cycle(epoch) {
4707            // mnb should always go through this code path
4708            accounts_partition::rent_single_epoch_collection_cycle_params(
4709                epoch,
4710                slot_count_per_epoch,
4711            )
4712        } else {
4713            accounts_partition::rent_multi_epoch_collection_cycle_params(
4714                epoch,
4715                slot_count_per_epoch,
4716                self.first_normal_epoch(),
4717                self.slot_count_in_two_day() / slot_count_per_epoch,
4718            )
4719        };
4720        accounts_partition::get_partition_from_slot_indexes(
4721            cycle_params,
4722            start_slot_index,
4723            end_slot_index,
4724            generated_for_gapped_epochs,
4725        )
4726    }
4727
4728    fn partition_from_normal_slot_indexes(
4729        &self,
4730        start_slot_index: SlotIndex,
4731        end_slot_index: SlotIndex,
4732        epoch: Epoch,
4733    ) -> Partition {
4734        self.do_partition_from_slot_indexes(start_slot_index, end_slot_index, epoch, false)
4735    }
4736
4737    fn partition_from_slot_indexes_with_gapped_epochs(
4738        &self,
4739        start_slot_index: SlotIndex,
4740        end_slot_index: SlotIndex,
4741        epoch: Epoch,
4742    ) -> Partition {
4743        self.do_partition_from_slot_indexes(start_slot_index, end_slot_index, epoch, true)
4744    }
4745
4746    // Given short epochs, it's too costly to collect rent eagerly
4747    // within an epoch, so lower the frequency of it.
4748    // These logic isn't strictly eager anymore and should only be used
4749    // for development/performance purpose.
4750    // Absolutely not under ClusterType::MainnetBeta!!!!
4751    fn use_multi_epoch_collection_cycle(&self, epoch: Epoch) -> bool {
4752        // Force normal behavior, disabling multi epoch collection cycle for manual local testing
4753        #[cfg(not(test))]
4754        if self.slot_count_per_normal_epoch() == solana_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH
4755        {
4756            return false;
4757        }
4758
4759        epoch >= self.first_normal_epoch()
4760            && self.slot_count_per_normal_epoch() < self.slot_count_in_two_day()
4761    }
4762
4763    pub(crate) fn use_fixed_collection_cycle(&self) -> bool {
4764        // Force normal behavior, disabling fixed collection cycle for manual local testing
4765        #[cfg(not(test))]
4766        if self.slot_count_per_normal_epoch() == solana_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH
4767        {
4768            return false;
4769        }
4770
4771        self.cluster_type() != ClusterType::MainnetBeta
4772            && self.slot_count_per_normal_epoch() < self.slot_count_in_two_day()
4773    }
4774
4775    fn slot_count_in_two_day(&self) -> SlotCount {
4776        Self::slot_count_in_two_day_helper(self.ticks_per_slot)
4777    }
4778
4779    // This value is specially chosen to align with slots per epoch in mainnet-beta and testnet
4780    // Also, assume 500GB account data set as the extreme, then for 2 day (=48 hours) to collect
4781    // rent eagerly, we'll consume 5.7 MB/s IO bandwidth, bidirectionally.
4782    pub fn slot_count_in_two_day_helper(ticks_per_slot: SlotCount) -> SlotCount {
4783        2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / ticks_per_slot
4784    }
4785
4786    fn slot_count_per_normal_epoch(&self) -> SlotCount {
4787        self.get_slots_in_epoch(self.first_normal_epoch())
4788    }
4789
4790    pub fn cluster_type(&self) -> ClusterType {
4791        // unwrap is safe; self.cluster_type is ensured to be Some() always...
4792        // we only using Option here for ABI compatibility...
4793        self.cluster_type.unwrap()
4794    }
4795
4796    /// Process a batch of transactions.
4797    #[must_use]
4798    pub fn load_execute_and_commit_transactions(
4799        &self,
4800        batch: &TransactionBatch<SanitizedTransaction>,
4801        max_age: usize,
4802        collect_balances: bool,
4803        recording_config: ExecutionRecordingConfig,
4804        timings: &mut ExecuteTimings,
4805        log_messages_bytes_limit: Option<usize>,
4806    ) -> (Vec<TransactionCommitResult>, TransactionBalancesSet) {
4807        let pre_balances = if collect_balances {
4808            self.collect_balances(batch)
4809        } else {
4810            vec![]
4811        };
4812
4813        let LoadAndExecuteTransactionsOutput {
4814            processing_results,
4815            processed_counts,
4816        } = self.load_and_execute_transactions(
4817            batch,
4818            max_age,
4819            timings,
4820            &mut TransactionErrorMetrics::default(),
4821            TransactionProcessingConfig {
4822                account_overrides: None,
4823                check_program_modification_slot: self.check_program_modification_slot,
4824                compute_budget: self.compute_budget(),
4825                log_messages_bytes_limit,
4826                limit_to_load_programs: false,
4827                recording_config,
4828                transaction_account_lock_limit: Some(self.get_transaction_account_lock_limit()),
4829            },
4830        );
4831
4832        let commit_results = self.commit_transactions(
4833            batch.sanitized_transactions(),
4834            processing_results,
4835            &processed_counts,
4836            timings,
4837        );
4838        let post_balances = if collect_balances {
4839            self.collect_balances(batch)
4840        } else {
4841            vec![]
4842        };
4843        (
4844            commit_results,
4845            TransactionBalancesSet::new(pre_balances, post_balances),
4846        )
4847    }
4848
4849    /// Process a Transaction. This is used for unit tests and simply calls the vector
4850    /// Bank::process_transactions method.
4851    pub fn process_transaction(&self, tx: &Transaction) -> Result<()> {
4852        self.try_process_transactions(std::iter::once(tx))?[0].clone()?;
4853        tx.signatures
4854            .first()
4855            .map_or(Ok(()), |sig| self.get_signature_status(sig).unwrap())
4856    }
4857
4858    /// Process a Transaction and store metadata. This is used for tests and the banks services. It
4859    /// replicates the vector Bank::process_transaction method with metadata recording enabled.
4860    pub fn process_transaction_with_metadata(
4861        &self,
4862        tx: impl Into<VersionedTransaction>,
4863    ) -> Result<CommittedTransaction> {
4864        let txs = vec![tx.into()];
4865        let batch = self.prepare_entry_batch(txs)?;
4866
4867        let (mut commit_results, ..) = self.load_execute_and_commit_transactions(
4868            &batch,
4869            MAX_PROCESSING_AGE,
4870            false, // collect_balances
4871            ExecutionRecordingConfig {
4872                enable_cpi_recording: false,
4873                enable_log_recording: true,
4874                enable_return_data_recording: true,
4875            },
4876            &mut ExecuteTimings::default(),
4877            Some(1000 * 1000),
4878        );
4879
4880        commit_results.remove(0)
4881    }
4882
4883    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
4884    /// Short circuits if any of the transactions do not pass sanitization checks.
4885    pub fn try_process_transactions<'a>(
4886        &self,
4887        txs: impl Iterator<Item = &'a Transaction>,
4888    ) -> Result<Vec<Result<()>>> {
4889        let txs = txs
4890            .map(|tx| VersionedTransaction::from(tx.clone()))
4891            .collect();
4892        self.try_process_entry_transactions(txs)
4893    }
4894
4895    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
4896    /// Short circuits if any of the transactions do not pass sanitization checks.
4897    pub fn try_process_entry_transactions(
4898        &self,
4899        txs: Vec<VersionedTransaction>,
4900    ) -> Result<Vec<Result<()>>> {
4901        let batch = self.prepare_entry_batch(txs)?;
4902        Ok(self.process_transaction_batch(&batch))
4903    }
4904
4905    #[must_use]
4906    fn process_transaction_batch(
4907        &self,
4908        batch: &TransactionBatch<SanitizedTransaction>,
4909    ) -> Vec<Result<()>> {
4910        self.load_execute_and_commit_transactions(
4911            batch,
4912            MAX_PROCESSING_AGE,
4913            false,
4914            ExecutionRecordingConfig::new_single_setting(false),
4915            &mut ExecuteTimings::default(),
4916            None,
4917        )
4918        .0
4919        .into_iter()
4920        .map(|commit_result| commit_result.map(|_| ()))
4921        .collect()
4922    }
4923
4924    /// Create, sign, and process a Transaction from `keypair` to `to` of
4925    /// `n` lamports where `blockhash` is the last Entry ID observed by the client.
4926    pub fn transfer(&self, n: u64, keypair: &Keypair, to: &Pubkey) -> Result<Signature> {
4927        let blockhash = self.last_blockhash();
4928        let tx = system_transaction::transfer(keypair, to, n, blockhash);
4929        let signature = tx.signatures[0];
4930        self.process_transaction(&tx).map(|_| signature)
4931    }
4932
4933    pub fn read_balance(account: &AccountSharedData) -> u64 {
4934        account.lamports()
4935    }
4936    /// Each program would need to be able to introspect its own state
4937    /// this is hard-coded to the Budget language
4938    pub fn get_balance(&self, pubkey: &Pubkey) -> u64 {
4939        self.get_account(pubkey)
4940            .map(|x| Self::read_balance(&x))
4941            .unwrap_or(0)
4942    }
4943
4944    /// Compute all the parents of the bank in order
4945    pub fn parents(&self) -> Vec<Arc<Bank>> {
4946        let mut parents = vec![];
4947        let mut bank = self.parent();
4948        while let Some(parent) = bank {
4949            parents.push(parent.clone());
4950            bank = parent.parent();
4951        }
4952        parents
4953    }
4954
4955    /// Compute all the parents of the bank including this bank itself
4956    pub fn parents_inclusive(self: Arc<Self>) -> Vec<Arc<Bank>> {
4957        let mut parents = self.parents();
4958        parents.insert(0, self);
4959        parents
4960    }
4961
4962    /// fn store the single `account` with `pubkey`.
4963    /// Uses `store_accounts`, which works on a vector of accounts.
4964    pub fn store_account(&self, pubkey: &Pubkey, account: &AccountSharedData) {
4965        self.store_accounts((self.slot(), &[(pubkey, account)][..]))
4966    }
4967
4968    pub fn store_accounts<'a>(&self, accounts: impl StorableAccounts<'a>) {
4969        assert!(!self.freeze_started());
4970        let mut m = Measure::start("stakes_cache.check_and_store");
4971        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
4972        (0..accounts.len()).for_each(|i| {
4973            accounts.account(i, |account| {
4974                self.stakes_cache.check_and_store(
4975                    account.pubkey(),
4976                    &account,
4977                    new_warmup_cooldown_rate_epoch,
4978                )
4979            })
4980        });
4981        self.rc.accounts.store_accounts_cached(accounts);
4982        m.stop();
4983        self.rc
4984            .accounts
4985            .accounts_db
4986            .stats
4987            .stakes_cache_check_and_store_us
4988            .fetch_add(m.as_us(), Relaxed);
4989    }
4990
4991    pub fn force_flush_accounts_cache(&self) {
4992        self.rc
4993            .accounts
4994            .accounts_db
4995            .flush_accounts_cache(true, Some(self.slot()))
4996    }
4997
4998    pub fn flush_accounts_cache_if_needed(&self) {
4999        self.rc
5000            .accounts
5001            .accounts_db
5002            .flush_accounts_cache(false, Some(self.slot()))
5003    }
5004
5005    /// Technically this issues (or even burns!) new lamports,
5006    /// so be extra careful for its usage
5007    fn store_account_and_update_capitalization(
5008        &self,
5009        pubkey: &Pubkey,
5010        new_account: &AccountSharedData,
5011    ) {
5012        let old_account_data_size =
5013            if let Some(old_account) = self.get_account_with_fixed_root_no_cache(pubkey) {
5014                match new_account.lamports().cmp(&old_account.lamports()) {
5015                    std::cmp::Ordering::Greater => {
5016                        let increased = new_account.lamports() - old_account.lamports();
5017                        trace!(
5018                            "store_account_and_update_capitalization: increased: {} {}",
5019                            pubkey,
5020                            increased
5021                        );
5022                        self.capitalization.fetch_add(increased, Relaxed);
5023                    }
5024                    std::cmp::Ordering::Less => {
5025                        let decreased = old_account.lamports() - new_account.lamports();
5026                        trace!(
5027                            "store_account_and_update_capitalization: decreased: {} {}",
5028                            pubkey,
5029                            decreased
5030                        );
5031                        self.capitalization.fetch_sub(decreased, Relaxed);
5032                    }
5033                    std::cmp::Ordering::Equal => {}
5034                }
5035                old_account.data().len()
5036            } else {
5037                trace!(
5038                    "store_account_and_update_capitalization: created: {} {}",
5039                    pubkey,
5040                    new_account.lamports()
5041                );
5042                self.capitalization
5043                    .fetch_add(new_account.lamports(), Relaxed);
5044                0
5045            };
5046
5047        self.store_account(pubkey, new_account);
5048        self.calculate_and_update_accounts_data_size_delta_off_chain(
5049            old_account_data_size,
5050            new_account.data().len(),
5051        );
5052    }
5053
5054    pub fn accounts(&self) -> Arc<Accounts> {
5055        self.rc.accounts.clone()
5056    }
5057
5058    fn finish_init(
5059        &mut self,
5060        genesis_config: &GenesisConfig,
5061        additional_builtins: Option<&[BuiltinPrototype]>,
5062        debug_do_not_add_builtins: bool,
5063    ) {
5064        self.rewards_pool_pubkeys =
5065            Arc::new(genesis_config.rewards_pools.keys().cloned().collect());
5066
5067        self.apply_feature_activations(
5068            ApplyFeatureActivationsCaller::FinishInit,
5069            debug_do_not_add_builtins,
5070        );
5071
5072        // Cost-Tracker is not serialized in snapshot or any configs.
5073        // We must apply previously activated features related to limits here
5074        // so that the initial bank state is consistent with the feature set.
5075        // Cost-tracker limits are propagated through children banks.
5076        if self
5077            .feature_set
5078            .is_active(&feature_set::raise_block_limits_to_50m::id())
5079        {
5080            let (account_cost_limit, block_cost_limit, vote_cost_limit) = simd_0207_block_limits();
5081            self.write_cost_tracker().unwrap().set_limits(
5082                account_cost_limit,
5083                block_cost_limit,
5084                vote_cost_limit,
5085            );
5086        }
5087
5088        if !debug_do_not_add_builtins {
5089            for builtin in BUILTINS
5090                .iter()
5091                .chain(additional_builtins.unwrap_or(&[]).iter())
5092            {
5093                // The builtin should be added if it has no enable feature ID
5094                // and it has not been migrated to Core BPF.
5095                //
5096                // If a program was previously migrated to Core BPF, accountsDB
5097                // from snapshot should contain the BPF program accounts.
5098                let builtin_is_bpf = |program_id: &Pubkey| {
5099                    self.get_account(program_id)
5100                        .map(|a| a.owner() == &bpf_loader_upgradeable::id())
5101                        .unwrap_or(false)
5102                };
5103                if builtin.enable_feature_id.is_none() && !builtin_is_bpf(&builtin.program_id) {
5104                    self.transaction_processor.add_builtin(
5105                        self,
5106                        builtin.program_id,
5107                        builtin.name,
5108                        ProgramCacheEntry::new_builtin(0, builtin.name.len(), builtin.entrypoint),
5109                    );
5110                }
5111            }
5112            for precompile in get_precompiles() {
5113                if precompile.feature.is_none() {
5114                    self.add_precompile(&precompile.program_id);
5115                }
5116            }
5117        }
5118
5119        let mut program_cache = self.transaction_processor.program_cache.write().unwrap();
5120        program_cache.latest_root_slot = self.slot();
5121        program_cache.latest_root_epoch = self.epoch();
5122        program_cache.environments.program_runtime_v1 = Arc::new(
5123            create_program_runtime_environment_v1(
5124                &self.feature_set,
5125                &self.compute_budget().unwrap_or_default(),
5126                false, /* deployment */
5127                false, /* debugging_features */
5128            )
5129            .unwrap(),
5130        );
5131        program_cache.environments.program_runtime_v2 =
5132            Arc::new(create_program_runtime_environment_v2(
5133                &self.compute_budget().unwrap_or_default(),
5134                false, /* debugging_features */
5135            ));
5136    }
5137
5138    pub fn set_inflation(&self, inflation: Inflation) {
5139        *self.inflation.write().unwrap() = inflation;
5140    }
5141
5142    /// Get a snapshot of the current set of hard forks
5143    pub fn hard_forks(&self) -> HardForks {
5144        self.hard_forks.read().unwrap().clone()
5145    }
5146
5147    pub fn register_hard_fork(&self, new_hard_fork_slot: Slot) {
5148        let bank_slot = self.slot();
5149
5150        let lock = self.freeze_lock();
5151        let bank_frozen = *lock != Hash::default();
5152        if new_hard_fork_slot < bank_slot {
5153            warn!(
5154                "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is older \
5155                than the bank at slot {bank_slot} that attempted to register it."
5156            );
5157        } else if (new_hard_fork_slot == bank_slot) && bank_frozen {
5158            warn!(
5159                "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is the same \
5160                slot as the bank at slot {bank_slot} that attempted to register it, but that \
5161                bank is already frozen."
5162            );
5163        } else {
5164            self.hard_forks
5165                .write()
5166                .unwrap()
5167                .register(new_hard_fork_slot);
5168        }
5169    }
5170
5171    pub fn get_account_with_fixed_root_no_cache(
5172        &self,
5173        pubkey: &Pubkey,
5174    ) -> Option<AccountSharedData> {
5175        self.load_account_with(pubkey, |_| false)
5176            .map(|(acc, _slot)| acc)
5177    }
5178
5179    fn load_account_with(
5180        &self,
5181        pubkey: &Pubkey,
5182        callback: impl for<'local> Fn(&'local AccountSharedData) -> bool,
5183    ) -> Option<(AccountSharedData, Slot)> {
5184        self.rc
5185            .accounts
5186            .accounts_db
5187            .load_account_with(&self.ancestors, pubkey, callback)
5188    }
5189
5190    // Hi! leaky abstraction here....
5191    // try to use get_account_with_fixed_root() if it's called ONLY from on-chain runtime account
5192    // processing. That alternative fn provides more safety.
5193    pub fn get_account(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
5194        self.get_account_modified_slot(pubkey)
5195            .map(|(acc, _slot)| acc)
5196    }
5197
5198    // Hi! leaky abstraction here....
5199    // use this over get_account() if it's called ONLY from on-chain runtime account
5200    // processing (i.e. from in-band replay/banking stage; that ensures root is *fixed* while
5201    // running).
5202    // pro: safer assertion can be enabled inside AccountsDb
5203    // con: panics!() if called from off-chain processing
5204    pub fn get_account_with_fixed_root(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
5205        self.get_account_modified_slot_with_fixed_root(pubkey)
5206            .map(|(acc, _slot)| acc)
5207    }
5208
5209    // See note above get_account_with_fixed_root() about when to prefer this function
5210    pub fn get_account_modified_slot_with_fixed_root(
5211        &self,
5212        pubkey: &Pubkey,
5213    ) -> Option<(AccountSharedData, Slot)> {
5214        self.load_slow_with_fixed_root(&self.ancestors, pubkey)
5215    }
5216
5217    pub fn get_account_modified_slot(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> {
5218        self.load_slow(&self.ancestors, pubkey)
5219    }
5220
5221    fn load_slow(
5222        &self,
5223        ancestors: &Ancestors,
5224        pubkey: &Pubkey,
5225    ) -> Option<(AccountSharedData, Slot)> {
5226        // get_account (= primary this fn caller) may be called from on-chain Bank code even if we
5227        // try hard to use get_account_with_fixed_root for that purpose...
5228        // so pass safer LoadHint:Unspecified here as a fallback
5229        self.rc.accounts.load_without_fixed_root(ancestors, pubkey)
5230    }
5231
5232    fn load_slow_with_fixed_root(
5233        &self,
5234        ancestors: &Ancestors,
5235        pubkey: &Pubkey,
5236    ) -> Option<(AccountSharedData, Slot)> {
5237        self.rc.accounts.load_with_fixed_root(ancestors, pubkey)
5238    }
5239
5240    pub fn get_program_accounts(
5241        &self,
5242        program_id: &Pubkey,
5243        config: &ScanConfig,
5244    ) -> ScanResult<Vec<TransactionAccount>> {
5245        self.rc
5246            .accounts
5247            .load_by_program(&self.ancestors, self.bank_id, program_id, config)
5248    }
5249
5250    pub fn get_filtered_program_accounts<F: Fn(&AccountSharedData) -> bool>(
5251        &self,
5252        program_id: &Pubkey,
5253        filter: F,
5254        config: &ScanConfig,
5255    ) -> ScanResult<Vec<TransactionAccount>> {
5256        self.rc.accounts.load_by_program_with_filter(
5257            &self.ancestors,
5258            self.bank_id,
5259            program_id,
5260            filter,
5261            config,
5262        )
5263    }
5264
5265    pub fn get_filtered_indexed_accounts<F: Fn(&AccountSharedData) -> bool>(
5266        &self,
5267        index_key: &IndexKey,
5268        filter: F,
5269        config: &ScanConfig,
5270        byte_limit_for_scan: Option<usize>,
5271    ) -> ScanResult<Vec<TransactionAccount>> {
5272        self.rc.accounts.load_by_index_key_with_filter(
5273            &self.ancestors,
5274            self.bank_id,
5275            index_key,
5276            filter,
5277            config,
5278            byte_limit_for_scan,
5279        )
5280    }
5281
5282    pub fn account_indexes_include_key(&self, key: &Pubkey) -> bool {
5283        self.rc.accounts.account_indexes_include_key(key)
5284    }
5285
5286    /// Returns all the accounts this bank can load
5287    pub fn get_all_accounts(&self, sort_results: bool) -> ScanResult<Vec<PubkeyAccountSlot>> {
5288        self.rc
5289            .accounts
5290            .load_all(&self.ancestors, self.bank_id, sort_results)
5291    }
5292
5293    // Scans all the accounts this bank can load, applying `scan_func`
5294    pub fn scan_all_accounts<F>(&self, scan_func: F, sort_results: bool) -> ScanResult<()>
5295    where
5296        F: FnMut(Option<(&Pubkey, AccountSharedData, Slot)>),
5297    {
5298        self.rc
5299            .accounts
5300            .scan_all(&self.ancestors, self.bank_id, scan_func, sort_results)
5301    }
5302
5303    pub fn get_program_accounts_modified_since_parent(
5304        &self,
5305        program_id: &Pubkey,
5306    ) -> Vec<TransactionAccount> {
5307        self.rc
5308            .accounts
5309            .load_by_program_slot(self.slot(), Some(program_id))
5310    }
5311
5312    pub fn get_transaction_logs(
5313        &self,
5314        address: Option<&Pubkey>,
5315    ) -> Option<Vec<TransactionLogInfo>> {
5316        self.transaction_log_collector
5317            .read()
5318            .unwrap()
5319            .get_logs_for_address(address)
5320    }
5321
5322    /// Returns all the accounts stored in this slot
5323    pub fn get_all_accounts_modified_since_parent(&self) -> Vec<TransactionAccount> {
5324        self.rc.accounts.load_by_program_slot(self.slot(), None)
5325    }
5326
5327    // if you want get_account_modified_since_parent without fixed_root, please define so...
5328    fn get_account_modified_since_parent_with_fixed_root(
5329        &self,
5330        pubkey: &Pubkey,
5331    ) -> Option<(AccountSharedData, Slot)> {
5332        let just_self: Ancestors = Ancestors::from(vec![self.slot()]);
5333        if let Some((account, slot)) = self.load_slow_with_fixed_root(&just_self, pubkey) {
5334            if slot == self.slot() {
5335                return Some((account, slot));
5336            }
5337        }
5338        None
5339    }
5340
5341    pub fn get_largest_accounts(
5342        &self,
5343        num: usize,
5344        filter_by_address: &HashSet<Pubkey>,
5345        filter: AccountAddressFilter,
5346        sort_results: bool,
5347    ) -> ScanResult<Vec<(Pubkey, u64)>> {
5348        self.rc.accounts.load_largest_accounts(
5349            &self.ancestors,
5350            self.bank_id,
5351            num,
5352            filter_by_address,
5353            filter,
5354            sort_results,
5355        )
5356    }
5357
5358    /// Return the accumulated executed transaction count
5359    pub fn transaction_count(&self) -> u64 {
5360        self.transaction_count.load(Relaxed)
5361    }
5362
5363    /// Returns the number of non-vote transactions processed without error
5364    /// since the most recent boot from snapshot or genesis.
5365    /// This value is not shared though the network, nor retained
5366    /// within snapshots, but is preserved in `Bank::new_from_parent`.
5367    pub fn non_vote_transaction_count_since_restart(&self) -> u64 {
5368        self.non_vote_transaction_count_since_restart.load(Relaxed)
5369    }
5370
5371    /// Return the transaction count executed only in this bank
5372    pub fn executed_transaction_count(&self) -> u64 {
5373        self.transaction_count()
5374            .saturating_sub(self.parent().map_or(0, |parent| parent.transaction_count()))
5375    }
5376
5377    pub fn transaction_error_count(&self) -> u64 {
5378        self.transaction_error_count.load(Relaxed)
5379    }
5380
5381    pub fn transaction_entries_count(&self) -> u64 {
5382        self.transaction_entries_count.load(Relaxed)
5383    }
5384
5385    pub fn transactions_per_entry_max(&self) -> u64 {
5386        self.transactions_per_entry_max.load(Relaxed)
5387    }
5388
5389    fn increment_transaction_count(&self, tx_count: u64) {
5390        self.transaction_count.fetch_add(tx_count, Relaxed);
5391    }
5392
5393    fn increment_non_vote_transaction_count_since_restart(&self, tx_count: u64) {
5394        self.non_vote_transaction_count_since_restart
5395            .fetch_add(tx_count, Relaxed);
5396    }
5397
5398    pub fn signature_count(&self) -> u64 {
5399        self.signature_count.load(Relaxed)
5400    }
5401
5402    fn increment_signature_count(&self, signature_count: u64) {
5403        self.signature_count.fetch_add(signature_count, Relaxed);
5404    }
5405
5406    pub fn get_signature_status_processed_since_parent(
5407        &self,
5408        signature: &Signature,
5409    ) -> Option<Result<()>> {
5410        if let Some((slot, status)) = self.get_signature_status_slot(signature) {
5411            if slot <= self.slot() {
5412                return Some(status);
5413            }
5414        }
5415        None
5416    }
5417
5418    pub fn get_signature_status_with_blockhash(
5419        &self,
5420        signature: &Signature,
5421        blockhash: &Hash,
5422    ) -> Option<Result<()>> {
5423        let rcache = self.status_cache.read().unwrap();
5424        rcache
5425            .get_status(signature, blockhash, &self.ancestors)
5426            .map(|v| v.1)
5427    }
5428
5429    pub fn get_signature_status_slot(&self, signature: &Signature) -> Option<(Slot, Result<()>)> {
5430        let rcache = self.status_cache.read().unwrap();
5431        rcache.get_status_any_blockhash(signature, &self.ancestors)
5432    }
5433
5434    pub fn get_signature_status(&self, signature: &Signature) -> Option<Result<()>> {
5435        self.get_signature_status_slot(signature).map(|v| v.1)
5436    }
5437
5438    pub fn has_signature(&self, signature: &Signature) -> bool {
5439        self.get_signature_status_slot(signature).is_some()
5440    }
5441
5442    /// Hash the `accounts` HashMap. This represents a validator's interpretation
5443    ///  of the delta of the ledger since the last vote and up to now
5444    fn hash_internal_state(&self) -> Hash {
5445        let measure_total = Measure::start("");
5446
5447        let slot = self.slot();
5448        let ignore = (!self.is_partitioned_rewards_feature_enabled()
5449            && self.force_partition_rewards_in_first_block_of_epoch())
5450        .then_some(sysvar::epoch_rewards::id());
5451        let (accounts_delta_hash, accounts_delta_hash_us) = measure_us!({
5452            self.rc
5453                .accounts
5454                .accounts_db
5455                .calculate_accounts_delta_hash_internal(
5456                    slot,
5457                    ignore,
5458                    self.skipped_rewrites.lock().unwrap().clone(),
5459                )
5460        });
5461
5462        let mut signature_count_buf = [0u8; 8];
5463        LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count());
5464
5465        let mut hash = hashv(&[
5466            self.parent_hash.as_ref(),
5467            accounts_delta_hash.0.as_ref(),
5468            &signature_count_buf,
5469            self.last_blockhash().as_ref(),
5470        ]);
5471
5472        let epoch_accounts_hash = self.wait_get_epoch_accounts_hash();
5473        if let Some(epoch_accounts_hash) = epoch_accounts_hash {
5474            hash = hashv(&[hash.as_ref(), epoch_accounts_hash.as_ref().as_ref()]);
5475        };
5476
5477        let buf = self
5478            .hard_forks
5479            .read()
5480            .unwrap()
5481            .get_hash_data(slot, self.parent_slot());
5482        if let Some(buf) = buf {
5483            let hard_forked_hash = extend_and_hash(&hash, &buf);
5484            warn!("hard fork at slot {slot} by hashing {buf:?}: {hash} => {hard_forked_hash}");
5485            hash = hard_forked_hash;
5486        }
5487
5488        #[cfg(feature = "dev-context-only-utils")]
5489        let hash_override = self
5490            .hash_overrides
5491            .lock()
5492            .unwrap()
5493            .get_bank_hash_override(slot)
5494            .copied()
5495            .inspect(|&hash_override| {
5496                if hash_override != hash {
5497                    info!(
5498                        "bank: slot: {}: overrode bank hash: {} with {}",
5499                        self.slot(),
5500                        hash,
5501                        hash_override
5502                    );
5503                }
5504            });
5505        // Avoid to optimize out `hash` along with the whole computation by super smart rustc.
5506        // hash_override is used by ledger-tool's simulate-block-production, which prefers
5507        // the actual bank freezing processing for accurate simulation.
5508        #[cfg(feature = "dev-context-only-utils")]
5509        let hash = hash_override.unwrap_or(std::hint::black_box(hash));
5510
5511        let bank_hash_stats = self
5512            .rc
5513            .accounts
5514            .accounts_db
5515            .get_bank_hash_stats(slot)
5516            .expect("No bank hash stats were found for this bank, that should not be possible");
5517
5518        let total_us = measure_total.end_as_us();
5519        datapoint_info!(
5520            "bank-hash_internal_state",
5521            ("slot", slot, i64),
5522            ("total_us", total_us, i64),
5523            ("accounts_delta_hash_us", accounts_delta_hash_us, i64),
5524        );
5525        info!(
5526            "bank frozen: {slot} hash: {hash} accounts_delta: {} signature_count: {} last_blockhash: {} capitalization: {}{}, stats: {bank_hash_stats:?}{}",
5527            accounts_delta_hash.0,
5528            self.signature_count(),
5529            self.last_blockhash(),
5530            self.capitalization(),
5531            if let Some(epoch_accounts_hash) = epoch_accounts_hash {
5532                format!(", epoch_accounts_hash: {:?}", epoch_accounts_hash.as_ref())
5533            } else {
5534                "".to_string()
5535            },
5536            if self.is_accounts_lt_hash_enabled() {
5537                let checksum = self.accounts_lt_hash.lock().unwrap().0.checksum();
5538                format!(", accounts_lt_hash checksum: {checksum}")
5539            } else {
5540                String::new()
5541            },
5542        );
5543        hash
5544    }
5545
5546    /// The epoch accounts hash is hashed into the bank's hash once per epoch at a predefined slot.
5547    /// Should it be included in *this* bank?
5548    fn should_include_epoch_accounts_hash(&self) -> bool {
5549        if !epoch_accounts_hash_utils::is_enabled_this_epoch(self) {
5550            return false;
5551        }
5552
5553        let stop_slot = epoch_accounts_hash_utils::calculation_stop(self);
5554        self.parent_slot() < stop_slot && self.slot() >= stop_slot
5555    }
5556
5557    /// If the epoch accounts hash should be included in this Bank, then fetch it. If the EAH
5558    /// calculation has not completed yet, this fn will block until it does complete.
5559    fn wait_get_epoch_accounts_hash(&self) -> Option<EpochAccountsHash> {
5560        if !self.should_include_epoch_accounts_hash() {
5561            return None;
5562        }
5563
5564        let (epoch_accounts_hash, waiting_time_us) = measure_us!(self
5565            .rc
5566            .accounts
5567            .accounts_db
5568            .epoch_accounts_hash_manager
5569            .wait_get_epoch_accounts_hash());
5570
5571        datapoint_info!(
5572            "bank-wait_get_epoch_accounts_hash",
5573            ("slot", self.slot(), i64),
5574            ("waiting-time-us", waiting_time_us, i64),
5575        );
5576        Some(epoch_accounts_hash)
5577    }
5578
5579    /// Used by ledger tool to run a final hash calculation once all ledger replay has completed.
5580    /// This should not be called by validator code.
5581    pub fn run_final_hash_calc(&self, on_halt_store_hash_raw_data_for_debug: bool) {
5582        self.force_flush_accounts_cache();
5583        // note that this slot may not be a root
5584        _ = self.verify_accounts_hash(
5585            None,
5586            VerifyAccountsHashConfig {
5587                test_hash_calculation: false,
5588                ignore_mismatch: true,
5589                require_rooted_bank: false,
5590                run_in_background: false,
5591                store_hash_raw_data_for_debug: on_halt_store_hash_raw_data_for_debug,
5592            },
5593            None,
5594        );
5595    }
5596
5597    /// Recalculate the accounts hash from the account stores. Used to verify a snapshot.
5598    /// return true if all is good
5599    /// Only called from startup or test code.
5600    #[must_use]
5601    fn verify_accounts_hash(
5602        &self,
5603        base: Option<(Slot, /*capitalization*/ u64)>,
5604        mut config: VerifyAccountsHashConfig,
5605        duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
5606    ) -> bool {
5607        #[derive(Debug, Eq, PartialEq)]
5608        enum VerifyKind {
5609            Merkle,
5610            Lattice,
5611        }
5612
5613        let accounts = &self.rc.accounts;
5614        // Wait until initial hash calc is complete before starting a new hash calc.
5615        // This should only occur when we halt at a slot in ledger-tool.
5616        accounts
5617            .accounts_db
5618            .verify_accounts_hash_in_bg
5619            .wait_for_complete();
5620
5621        let slot = self.slot();
5622        let verify_kind = if self
5623            .rc
5624            .accounts
5625            .accounts_db
5626            .is_experimental_accumulator_hash_enabled()
5627        {
5628            VerifyKind::Lattice
5629        } else {
5630            VerifyKind::Merkle
5631        };
5632
5633        if verify_kind == VerifyKind::Lattice {
5634            // Calculating the accounts lt hash from storages *requires* a duplicates_lt_hash.
5635            // If it is None here, then we must use the index instead, which also means we
5636            // cannot run in the background.
5637            if duplicates_lt_hash.is_none() {
5638                config.run_in_background = false;
5639            }
5640        }
5641
5642        if config.require_rooted_bank && !accounts.accounts_db.accounts_index.is_alive_root(slot) {
5643            if let Some(parent) = self.parent() {
5644                info!(
5645                    "slot {slot} is not a root, so verify accounts hash on parent bank at slot {}",
5646                    parent.slot(),
5647                );
5648                if verify_kind == VerifyKind::Lattice {
5649                    // The duplicates_lt_hash is only valid for the current slot, so we must fall
5650                    // back to verifying the accounts lt hash with the index (which also means we
5651                    // cannot run in the background).
5652                    config.run_in_background = false;
5653                }
5654                return parent.verify_accounts_hash(base, config, None);
5655            } else {
5656                // this will result in mismatch errors
5657                // accounts hash calc doesn't include unrooted slots
5658                panic!("cannot verify accounts hash because slot {slot} is not a root");
5659            }
5660        }
5661
5662        // The snapshot storages must be captured *before* starting the background verification.
5663        // Otherwise, it is possible that a delayed call to `get_snapshot_storages()` will *not*
5664        // get the correct storages required to calculate and verify the accounts hashes.
5665        let snapshot_storages = self
5666            .rc
5667            .accounts
5668            .accounts_db
5669            .get_snapshot_storages(RangeFull);
5670        let capitalization = self.capitalization();
5671        let verify_config = VerifyAccountsHashAndLamportsConfig {
5672            ancestors: &self.ancestors,
5673            epoch_schedule: self.epoch_schedule(),
5674            rent_collector: self.rent_collector(),
5675            test_hash_calculation: config.test_hash_calculation,
5676            ignore_mismatch: config.ignore_mismatch,
5677            store_detailed_debug_info: config.store_hash_raw_data_for_debug,
5678            use_bg_thread_pool: config.run_in_background,
5679        };
5680
5681        if config.run_in_background {
5682            let accounts = Arc::clone(accounts);
5683            let accounts_ = Arc::clone(&accounts);
5684            let ancestors = self.ancestors.clone();
5685            let epoch_schedule = self.epoch_schedule().clone();
5686            let rent_collector = self.rent_collector().clone();
5687            let expected_accounts_lt_hash = self.accounts_lt_hash.lock().unwrap().clone();
5688            accounts.accounts_db.verify_accounts_hash_in_bg.start(|| {
5689                Builder::new()
5690                    .name("solBgHashVerify".into())
5691                    .spawn(move || {
5692                        info!("Initial background accounts hash verification has started");
5693                        let start = Instant::now();
5694                        let mut lattice_verify_time = None;
5695                        let mut merkle_verify_time = None;
5696                        match verify_kind {
5697                            VerifyKind::Lattice => {
5698                                // accounts lt hash is *enabled* so use lattice-based verification
5699                                let accounts_db = &accounts_.accounts_db;
5700                                let (calculated_accounts_lt_hash, duration) =
5701                                    meas_dur!(accounts_db.thread_pool_hash.install(|| {
5702                                        accounts_db
5703                                            .calculate_accounts_lt_hash_at_startup_from_storages(
5704                                                snapshot_storages.0.as_slice(),
5705                                                &duplicates_lt_hash.unwrap(),
5706                                            )
5707                                    }));
5708                                if calculated_accounts_lt_hash != expected_accounts_lt_hash {
5709                                    let expected = expected_accounts_lt_hash.0.checksum();
5710                                    let calculated = calculated_accounts_lt_hash.0.checksum();
5711                                    error!(
5712                                        "Verifying accounts failed: accounts lattice hashes do not \
5713                                         match, expected: {expected}, calculated: {calculated}",
5714                                    );
5715                                    return false;
5716                                }
5717                                lattice_verify_time = Some(duration);
5718                            }
5719                            VerifyKind::Merkle => {
5720                                // accounts lt hash is *disabled* so use merkle-based verification
5721                                let snapshot_storages_and_slots = (
5722                                    snapshot_storages.0.as_slice(),
5723                                    snapshot_storages.1.as_slice(),
5724                                );
5725                                let (result, duration) = meas_dur!(accounts_
5726                                    .verify_accounts_hash_and_lamports(
5727                                        snapshot_storages_and_slots,
5728                                        slot,
5729                                        capitalization,
5730                                        base,
5731                                        VerifyAccountsHashAndLamportsConfig {
5732                                            ancestors: &ancestors,
5733                                            epoch_schedule: &epoch_schedule,
5734                                            rent_collector: &rent_collector,
5735                                            ..verify_config
5736                                        },
5737                                    ));
5738                                if !result {
5739                                    return false;
5740                                }
5741                                merkle_verify_time = Some(duration);
5742                            }
5743                        }
5744                        accounts_
5745                            .accounts_db
5746                            .verify_accounts_hash_in_bg
5747                            .background_finished();
5748                        let total_time = start.elapsed();
5749                        datapoint_info!(
5750                            "startup_verify_accounts",
5751                            ("total_us", total_time.as_micros(), i64),
5752                            (
5753                                "verify_accounts_lt_hash_us",
5754                                lattice_verify_time.as_ref().map(Duration::as_micros),
5755                                Option<i64>
5756                            ),
5757                            ("verify_accounts_hash_us",
5758                                merkle_verify_time.as_ref().map(Duration::as_micros),
5759                                Option<i64>
5760                            ),
5761                        );
5762                        info!("Initial background accounts hash verification has stopped");
5763                        true
5764                    })
5765                    .unwrap()
5766            });
5767            true // initial result is true. We haven't failed yet. If verification fails, we'll panic from bg thread.
5768        } else {
5769            match verify_kind {
5770                VerifyKind::Lattice => {
5771                    let expected_accounts_lt_hash = self.accounts_lt_hash.lock().unwrap().clone();
5772                    let calculated_accounts_lt_hash = if let Some(duplicates_lt_hash) =
5773                        duplicates_lt_hash
5774                    {
5775                        accounts
5776                            .accounts_db
5777                            .calculate_accounts_lt_hash_at_startup_from_storages(
5778                                snapshot_storages.0.as_slice(),
5779                                &duplicates_lt_hash,
5780                            )
5781                    } else {
5782                        accounts
5783                            .accounts_db
5784                            .calculate_accounts_lt_hash_at_startup_from_index(&self.ancestors, slot)
5785                    };
5786                    let is_ok = calculated_accounts_lt_hash == expected_accounts_lt_hash;
5787                    if !is_ok {
5788                        let expected = expected_accounts_lt_hash.0.checksum();
5789                        let calculated = calculated_accounts_lt_hash.0.checksum();
5790                        error!(
5791                            "Verifying accounts failed: accounts lattice hashes do not \
5792                             match, expected: {expected}, calculated: {calculated}",
5793                        );
5794                    }
5795                    is_ok
5796                }
5797                VerifyKind::Merkle => {
5798                    let snapshot_storages_and_slots = (
5799                        snapshot_storages.0.as_slice(),
5800                        snapshot_storages.1.as_slice(),
5801                    );
5802                    let result = accounts.verify_accounts_hash_and_lamports(
5803                        snapshot_storages_and_slots,
5804                        slot,
5805                        capitalization,
5806                        base,
5807                        verify_config,
5808                    );
5809                    self.set_initial_accounts_hash_verification_completed();
5810                    result
5811                }
5812            }
5813        }
5814    }
5815
5816    /// Specify that initial verification has completed.
5817    /// Called internally when verification runs in the foreground thread.
5818    /// Also has to be called by some tests which don't do verification on startup.
5819    pub fn set_initial_accounts_hash_verification_completed(&self) {
5820        self.rc
5821            .accounts
5822            .accounts_db
5823            .verify_accounts_hash_in_bg
5824            .verification_complete();
5825    }
5826
5827    /// return true if bg hash verification is complete
5828    /// return false if bg hash verification has not completed yet
5829    /// if hash verification failed, a panic will occur
5830    pub fn has_initial_accounts_hash_verification_completed(&self) -> bool {
5831        self.rc
5832            .accounts
5833            .accounts_db
5834            .verify_accounts_hash_in_bg
5835            .check_complete()
5836    }
5837
5838    /// Get this bank's storages to use for snapshots.
5839    ///
5840    /// If a base slot is provided, return only the storages that are *higher* than this slot.
5841    pub fn get_snapshot_storages(&self, base_slot: Option<Slot>) -> Vec<Arc<AccountStorageEntry>> {
5842        // if a base slot is provided, request storages starting at the slot *after*
5843        let start_slot = base_slot.map_or(0, |slot| slot.saturating_add(1));
5844        // we want to *include* the storage at our slot
5845        let requested_slots = start_slot..=self.slot();
5846
5847        self.rc
5848            .accounts
5849            .accounts_db
5850            .get_snapshot_storages(requested_slots)
5851            .0
5852    }
5853
5854    #[must_use]
5855    fn verify_hash(&self) -> bool {
5856        assert!(self.is_frozen());
5857        let calculated_hash = self.hash_internal_state();
5858        let expected_hash = self.hash();
5859
5860        if calculated_hash == expected_hash {
5861            true
5862        } else {
5863            warn!(
5864                "verify failed: slot: {}, {} (calculated) != {} (expected)",
5865                self.slot(),
5866                calculated_hash,
5867                expected_hash
5868            );
5869            false
5870        }
5871    }
5872
5873    pub fn verify_transaction(
5874        &self,
5875        tx: VersionedTransaction,
5876        verification_mode: TransactionVerificationMode,
5877    ) -> Result<SanitizedTransaction> {
5878        let sanitized_tx = {
5879            let size =
5880                bincode::serialized_size(&tx).map_err(|_| TransactionError::SanitizeFailure)?;
5881            if size > PACKET_DATA_SIZE as u64 {
5882                return Err(TransactionError::SanitizeFailure);
5883            }
5884            let message_hash = if verification_mode == TransactionVerificationMode::FullVerification
5885            {
5886                tx.verify_and_hash_message()?
5887            } else {
5888                tx.message.hash()
5889            };
5890
5891            SanitizedTransaction::try_create(
5892                tx,
5893                message_hash,
5894                None,
5895                self,
5896                self.get_reserved_account_keys(),
5897            )
5898        }?;
5899
5900        let move_precompile_verification_to_svm = self
5901            .feature_set
5902            .is_active(&feature_set::move_precompile_verification_to_svm::id());
5903        if !move_precompile_verification_to_svm && {
5904            verification_mode == TransactionVerificationMode::HashAndVerifyPrecompiles
5905                || verification_mode == TransactionVerificationMode::FullVerification
5906        } {
5907            verify_precompiles(&sanitized_tx, &self.feature_set)?;
5908        }
5909
5910        Ok(sanitized_tx)
5911    }
5912
5913    pub fn fully_verify_transaction(
5914        &self,
5915        tx: VersionedTransaction,
5916    ) -> Result<SanitizedTransaction> {
5917        self.verify_transaction(tx, TransactionVerificationMode::FullVerification)
5918    }
5919
5920    /// Checks if the transaction violates the bank's reserved keys.
5921    /// This needs to be checked upon epoch boundary crosses because the
5922    /// reserved key set may have changed since the initial sanitization.
5923    pub fn check_reserved_keys(&self, tx: &impl SVMMessage) -> Result<()> {
5924        // Check keys against the reserved set - these failures simply require us
5925        // to re-sanitize the transaction. We do not need to drop the transaction.
5926        let reserved_keys = self.get_reserved_account_keys();
5927        for (index, key) in tx.account_keys().iter().enumerate() {
5928            if tx.is_writable(index) && reserved_keys.contains(key) {
5929                return Err(TransactionError::ResanitizationNeeded);
5930            }
5931        }
5932
5933        Ok(())
5934    }
5935
5936    /// only called from ledger-tool or tests
5937    fn calculate_capitalization(&self, debug_verify: bool) -> u64 {
5938        let is_startup = true;
5939        self.rc
5940            .accounts
5941            .accounts_db
5942            .verify_accounts_hash_in_bg
5943            .wait_for_complete();
5944        self.rc
5945            .accounts
5946            .accounts_db
5947            .update_accounts_hash_with_verify_from(
5948                // we have to use the index since the slot could be in the write cache still
5949                CalcAccountsHashDataSource::IndexForTests,
5950                debug_verify,
5951                self.slot(),
5952                &self.ancestors,
5953                None,
5954                self.epoch_schedule(),
5955                &self.rent_collector,
5956                is_startup,
5957            )
5958            .1
5959    }
5960
5961    /// only called from tests or ledger tool
5962    pub fn calculate_and_verify_capitalization(&self, debug_verify: bool) -> bool {
5963        let calculated = self.calculate_capitalization(debug_verify);
5964        let expected = self.capitalization();
5965        if calculated == expected {
5966            true
5967        } else {
5968            warn!(
5969                "Capitalization mismatch: calculated: {} != expected: {}",
5970                calculated, expected
5971            );
5972            false
5973        }
5974    }
5975
5976    /// Forcibly overwrites current capitalization by actually recalculating accounts' balances.
5977    /// This should only be used for developing purposes.
5978    pub fn set_capitalization(&self) -> u64 {
5979        let old = self.capitalization();
5980        // We cannot debug verify the hash calculation here because calculate_capitalization will use the index calculation due to callers using the write cache.
5981        // debug_verify only exists as an extra debugging step under the assumption that this code path is only used for tests. But, this is used by ledger-tool create-snapshot
5982        // for example.
5983        let debug_verify = false;
5984        self.capitalization
5985            .store(self.calculate_capitalization(debug_verify), Relaxed);
5986        old
5987    }
5988
5989    /// Returns the `AccountsHash` that was calculated for this bank's slot
5990    ///
5991    /// This fn is used when creating a snapshot with ledger-tool, or when
5992    /// packaging a snapshot into an archive (used to get the `SnapshotHash`).
5993    pub fn get_accounts_hash(&self) -> Option<AccountsHash> {
5994        self.rc
5995            .accounts
5996            .accounts_db
5997            .get_accounts_hash(self.slot())
5998            .map(|(accounts_hash, _)| accounts_hash)
5999    }
6000
6001    /// Returns the `IncrementalAccountsHash` that was calculated for this bank's slot
6002    ///
6003    /// This fn is used when creating an incremental snapshot with ledger-tool, or when
6004    /// packaging a snapshot into an archive (used to get the `SnapshotHash`).
6005    pub fn get_incremental_accounts_hash(&self) -> Option<IncrementalAccountsHash> {
6006        self.rc
6007            .accounts
6008            .accounts_db
6009            .get_incremental_accounts_hash(self.slot())
6010            .map(|(incremental_accounts_hash, _)| incremental_accounts_hash)
6011    }
6012
6013    /// Returns the `SnapshotHash` for this bank's slot
6014    ///
6015    /// This fn is used at startup to verify the bank was rebuilt correctly.
6016    ///
6017    /// # Panics
6018    ///
6019    /// Panics if there is both-or-neither of an `AccountsHash` and an `IncrementalAccountsHash`
6020    /// for this bank's slot.  There may only be one or the other.
6021    pub fn get_snapshot_hash(&self) -> SnapshotHash {
6022        let accounts_hash = self.get_accounts_hash();
6023        let incremental_accounts_hash = self.get_incremental_accounts_hash();
6024
6025        let accounts_hash = match (accounts_hash, incremental_accounts_hash) {
6026            (Some(_), Some(_)) => panic!("Both full and incremental accounts hashes are present for slot {}; it is ambiguous which one to use for the snapshot hash!", self.slot()),
6027            (Some(accounts_hash), None) => accounts_hash.into(),
6028            (None, Some(incremental_accounts_hash)) => incremental_accounts_hash.into(),
6029            (None, None) => panic!("accounts hash is required to get snapshot hash"),
6030        };
6031        let epoch_accounts_hash = self.get_epoch_accounts_hash_to_serialize();
6032        SnapshotHash::new(&accounts_hash, epoch_accounts_hash.as_ref())
6033    }
6034
6035    pub fn load_account_into_read_cache(&self, key: &Pubkey) {
6036        self.rc
6037            .accounts
6038            .accounts_db
6039            .load_account_into_read_cache(&self.ancestors, key);
6040    }
6041
6042    pub fn update_accounts_hash(
6043        &self,
6044        data_source: CalcAccountsHashDataSource,
6045        mut debug_verify: bool,
6046        is_startup: bool,
6047    ) -> AccountsHash {
6048        let (accounts_hash, total_lamports) = self
6049            .rc
6050            .accounts
6051            .accounts_db
6052            .update_accounts_hash_with_verify_from(
6053                data_source,
6054                debug_verify,
6055                self.slot(),
6056                &self.ancestors,
6057                Some(self.capitalization()),
6058                self.epoch_schedule(),
6059                &self.rent_collector,
6060                is_startup,
6061            );
6062        if total_lamports != self.capitalization() {
6063            datapoint_info!(
6064                "capitalization_mismatch",
6065                ("slot", self.slot(), i64),
6066                ("calculated_lamports", total_lamports, i64),
6067                ("capitalization", self.capitalization(), i64),
6068            );
6069
6070            if !debug_verify {
6071                // cap mismatch detected. It has been logged to metrics above.
6072                // Run both versions of the calculation to attempt to get more info.
6073                debug_verify = true;
6074                self.rc
6075                    .accounts
6076                    .accounts_db
6077                    .update_accounts_hash_with_verify_from(
6078                        data_source,
6079                        debug_verify,
6080                        self.slot(),
6081                        &self.ancestors,
6082                        Some(self.capitalization()),
6083                        self.epoch_schedule(),
6084                        &self.rent_collector,
6085                        is_startup,
6086                    );
6087            }
6088
6089            panic!(
6090                "capitalization_mismatch. slot: {}, calculated_lamports: {}, capitalization: {}",
6091                self.slot(),
6092                total_lamports,
6093                self.capitalization()
6094            );
6095        }
6096        accounts_hash
6097    }
6098
6099    /// Calculate the incremental accounts hash from `base_slot` to `self`
6100    pub fn update_incremental_accounts_hash(&self, base_slot: Slot) -> IncrementalAccountsHash {
6101        let config = CalcAccountsHashConfig {
6102            use_bg_thread_pool: true,
6103            ancestors: None, // does not matter, will not be used
6104            epoch_schedule: &self.epoch_schedule,
6105            rent_collector: &self.rent_collector,
6106            store_detailed_debug_info_on_failure: false,
6107        };
6108        let storages = self.get_snapshot_storages(Some(base_slot));
6109        let sorted_storages = SortedStorages::new(&storages);
6110        self.rc
6111            .accounts
6112            .accounts_db
6113            .update_incremental_accounts_hash(
6114                &config,
6115                &sorted_storages,
6116                self.slot(),
6117                HashStats::default(),
6118            )
6119            .0
6120    }
6121
6122    /// A snapshot bank should be purged of 0 lamport accounts which are not part of the hash
6123    /// calculation and could shield other real accounts.
6124    pub fn verify_snapshot_bank(
6125        &self,
6126        test_hash_calculation: bool,
6127        skip_shrink: bool,
6128        force_clean: bool,
6129        latest_full_snapshot_slot: Slot,
6130        base: Option<(Slot, /*capitalization*/ u64)>,
6131        duplicates_lt_hash: Option<Box<DuplicatesLtHash>>,
6132    ) -> bool {
6133        let (_, clean_time_us) = measure_us!({
6134            let should_clean = force_clean || (!skip_shrink && self.slot() > 0);
6135            if should_clean {
6136                info!("Cleaning...");
6137                // We cannot clean past the latest full snapshot's slot because we are about to
6138                // perform an accounts hash calculation *up to that slot*.  If we cleaned *past*
6139                // that slot, then accounts could be removed from older storages, which would
6140                // change the accounts hash.
6141                self.rc.accounts.accounts_db.clean_accounts(
6142                    Some(latest_full_snapshot_slot),
6143                    true,
6144                    self.epoch_schedule(),
6145                    self.clean_accounts_old_storages_policy(),
6146                );
6147                info!("Cleaning... Done.");
6148            } else {
6149                info!("Cleaning... Skipped.");
6150            }
6151        });
6152
6153        let (_, shrink_time_us) = measure_us!({
6154            let should_shrink = !skip_shrink && self.slot() > 0;
6155            if should_shrink {
6156                info!("Shrinking...");
6157                self.rc.accounts.accounts_db.shrink_all_slots(
6158                    true,
6159                    self.epoch_schedule(),
6160                    // we cannot allow the snapshot slot to be shrunk
6161                    Some(self.slot()),
6162                );
6163                info!("Shrinking... Done.");
6164            } else {
6165                info!("Shrinking... Skipped.");
6166            }
6167        });
6168
6169        let (verified_accounts, verify_accounts_time_us) = measure_us!({
6170            let should_verify_accounts = !self.rc.accounts.accounts_db.skip_initial_hash_calc;
6171            if should_verify_accounts {
6172                info!("Verifying accounts...");
6173                let verified = self.verify_accounts_hash(
6174                    base,
6175                    VerifyAccountsHashConfig {
6176                        test_hash_calculation,
6177                        ignore_mismatch: false,
6178                        require_rooted_bank: false,
6179                        run_in_background: true,
6180                        store_hash_raw_data_for_debug: false,
6181                    },
6182                    duplicates_lt_hash,
6183                );
6184                info!("Verifying accounts... In background.");
6185                verified
6186            } else {
6187                info!("Verifying accounts... Skipped.");
6188                self.rc
6189                    .accounts
6190                    .accounts_db
6191                    .verify_accounts_hash_in_bg
6192                    .verification_complete();
6193                true
6194            }
6195        });
6196
6197        info!("Verifying bank...");
6198        let (verified_bank, verify_bank_time_us) = measure_us!(self.verify_hash());
6199        info!("Verifying bank... Done.");
6200
6201        datapoint_info!(
6202            "verify_snapshot_bank",
6203            ("clean_us", clean_time_us, i64),
6204            ("shrink_us", shrink_time_us, i64),
6205            ("verify_accounts_us", verify_accounts_time_us, i64),
6206            ("verify_bank_us", verify_bank_time_us, i64),
6207        );
6208
6209        verified_accounts && verified_bank
6210    }
6211
6212    /// Return the number of hashes per tick
6213    pub fn hashes_per_tick(&self) -> &Option<u64> {
6214        &self.hashes_per_tick
6215    }
6216
6217    /// Return the number of ticks per slot
6218    pub fn ticks_per_slot(&self) -> u64 {
6219        self.ticks_per_slot
6220    }
6221
6222    /// Return the number of slots per year
6223    pub fn slots_per_year(&self) -> f64 {
6224        self.slots_per_year
6225    }
6226
6227    /// Return the number of ticks since genesis.
6228    pub fn tick_height(&self) -> u64 {
6229        self.tick_height.load(Relaxed)
6230    }
6231
6232    /// Return the inflation parameters of the Bank
6233    pub fn inflation(&self) -> Inflation {
6234        *self.inflation.read().unwrap()
6235    }
6236
6237    /// Return the rent collector for this Bank
6238    pub fn rent_collector(&self) -> &RentCollector {
6239        &self.rent_collector
6240    }
6241
6242    /// Return the total capitalization of the Bank
6243    pub fn capitalization(&self) -> u64 {
6244        self.capitalization.load(Relaxed)
6245    }
6246
6247    /// Return this bank's max_tick_height
6248    pub fn max_tick_height(&self) -> u64 {
6249        self.max_tick_height
6250    }
6251
6252    /// Return the block_height of this bank
6253    pub fn block_height(&self) -> u64 {
6254        self.block_height
6255    }
6256
6257    /// Return the number of slots per epoch for the given epoch
6258    pub fn get_slots_in_epoch(&self, epoch: Epoch) -> u64 {
6259        self.epoch_schedule().get_slots_in_epoch(epoch)
6260    }
6261
6262    /// returns the epoch for which this bank's leader_schedule_slot_offset and slot would
6263    ///  need to cache leader_schedule
6264    pub fn get_leader_schedule_epoch(&self, slot: Slot) -> Epoch {
6265        self.epoch_schedule().get_leader_schedule_epoch(slot)
6266    }
6267
6268    /// a bank-level cache of vote accounts and stake delegation info
6269    fn update_stakes_cache(
6270        &self,
6271        txs: &[SanitizedTransaction],
6272        processing_results: &[TransactionProcessingResult],
6273    ) {
6274        debug_assert_eq!(txs.len(), processing_results.len());
6275        let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch();
6276        txs.iter()
6277            .zip(processing_results)
6278            .filter_map(|(tx, processing_result)| {
6279                processing_result
6280                    .processed_transaction()
6281                    .map(|processed_tx| (tx, processed_tx))
6282            })
6283            .filter_map(|(tx, processed_tx)| {
6284                processed_tx
6285                    .executed_transaction()
6286                    .map(|executed_tx| (tx, executed_tx))
6287            })
6288            .filter(|(_, executed_tx)| executed_tx.was_successful())
6289            .flat_map(|(tx, executed_tx)| {
6290                let num_account_keys = tx.message().account_keys().len();
6291                let loaded_tx = &executed_tx.loaded_transaction;
6292                loaded_tx.accounts.iter().take(num_account_keys)
6293            })
6294            .for_each(|(pubkey, account)| {
6295                // note that this could get timed to: self.rc.accounts.accounts_db.stats.stakes_cache_check_and_store_us,
6296                //  but this code path is captured separately in ExecuteTimingType::UpdateStakesCacheUs
6297                self.stakes_cache
6298                    .check_and_store(pubkey, account, new_warmup_cooldown_rate_epoch);
6299            });
6300    }
6301
6302    /// current vote accounts for this bank along with the stake
6303    ///   attributed to each account
6304    pub fn vote_accounts(&self) -> Arc<VoteAccountsHashMap> {
6305        let stakes = self.stakes_cache.stakes();
6306        Arc::from(stakes.vote_accounts())
6307    }
6308
6309    /// Vote account for the given vote account pubkey.
6310    pub fn get_vote_account(&self, vote_account: &Pubkey) -> Option<VoteAccount> {
6311        let stakes = self.stakes_cache.stakes();
6312        let vote_account = stakes.vote_accounts().get(vote_account)?;
6313        Some(vote_account.clone())
6314    }
6315
6316    /// Get the EpochStakes for the current Bank::epoch
6317    pub fn current_epoch_stakes(&self) -> &EpochStakes {
6318        // The stakes for a given epoch (E) in self.epoch_stakes are keyed by leader schedule epoch
6319        // (E + 1) so the stakes for the current epoch are stored at self.epoch_stakes[E + 1]
6320        self.epoch_stakes
6321            .get(&self.epoch.saturating_add(1))
6322            .expect("Current epoch stakes must exist")
6323    }
6324
6325    /// Get the EpochStakes for a given epoch
6326    pub fn epoch_stakes(&self, epoch: Epoch) -> Option<&EpochStakes> {
6327        self.epoch_stakes.get(&epoch)
6328    }
6329
6330    pub fn epoch_stakes_map(&self) -> &HashMap<Epoch, EpochStakes> {
6331        &self.epoch_stakes
6332    }
6333
6334    /// Get the staked nodes map for the current Bank::epoch
6335    pub fn current_epoch_staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
6336        self.current_epoch_stakes().stakes().staked_nodes()
6337    }
6338
6339    pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option<Arc<HashMap<Pubkey, u64>>> {
6340        Some(self.epoch_stakes.get(&epoch)?.stakes().staked_nodes())
6341    }
6342
6343    /// Get the total epoch stake for the given epoch.
6344    pub fn epoch_total_stake(&self, epoch: Epoch) -> Option<u64> {
6345        self.epoch_stakes
6346            .get(&epoch)
6347            .map(|epoch_stakes| epoch_stakes.total_stake())
6348    }
6349
6350    /// Get the total epoch stake for the current Bank::epoch
6351    pub fn get_current_epoch_total_stake(&self) -> u64 {
6352        self.current_epoch_stakes().total_stake()
6353    }
6354
6355    /// vote accounts for the specific epoch along with the stake
6356    ///   attributed to each account
6357    pub fn epoch_vote_accounts(&self, epoch: Epoch) -> Option<&VoteAccountsHashMap> {
6358        let epoch_stakes = self.epoch_stakes.get(&epoch)?.stakes();
6359        Some(epoch_stakes.vote_accounts().as_ref())
6360    }
6361
6362    /// Get the vote accounts along with the stake attributed to each account
6363    /// for the current Bank::epoch
6364    pub fn get_current_epoch_vote_accounts(&self) -> &VoteAccountsHashMap {
6365        self.current_epoch_stakes()
6366            .stakes()
6367            .vote_accounts()
6368            .as_ref()
6369    }
6370
6371    /// Get the fixed authorized voter for the given vote account for the
6372    /// current epoch
6373    pub fn epoch_authorized_voter(&self, vote_account: &Pubkey) -> Option<&Pubkey> {
6374        self.epoch_stakes
6375            .get(&self.epoch)
6376            .expect("Epoch stakes for bank's own epoch must exist")
6377            .epoch_authorized_voters()
6378            .get(vote_account)
6379    }
6380
6381    /// Get the fixed set of vote accounts for the given node id for the
6382    /// current epoch
6383    pub fn epoch_vote_accounts_for_node_id(&self, node_id: &Pubkey) -> Option<&NodeVoteAccounts> {
6384        self.epoch_stakes
6385            .get(&self.epoch)
6386            .expect("Epoch stakes for bank's own epoch must exist")
6387            .node_id_to_vote_accounts()
6388            .get(node_id)
6389    }
6390
6391    /// Get the total stake belonging to vote accounts associated with the given node id for the
6392    /// given epoch.
6393    pub fn epoch_node_id_to_stake(&self, epoch: Epoch, node_id: &Pubkey) -> Option<u64> {
6394        self.epoch_stakes(epoch)
6395            .and_then(|epoch_stakes| epoch_stakes.node_id_to_stake(node_id))
6396    }
6397
6398    /// Get the fixed total stake of all vote accounts for current epoch
6399    pub fn total_epoch_stake(&self) -> u64 {
6400        self.epoch_stakes
6401            .get(&self.epoch)
6402            .expect("Epoch stakes for bank's own epoch must exist")
6403            .total_stake()
6404    }
6405
6406    /// Get the fixed stake of the given vote account for the current epoch
6407    pub fn epoch_vote_account_stake(&self, vote_account: &Pubkey) -> u64 {
6408        *self
6409            .epoch_vote_accounts(self.epoch())
6410            .expect("Bank epoch vote accounts must contain entry for the bank's own epoch")
6411            .get(vote_account)
6412            .map(|(stake, _)| stake)
6413            .unwrap_or(&0)
6414    }
6415
6416    /// given a slot, return the epoch and offset into the epoch this slot falls
6417    /// e.g. with a fixed number for slots_per_epoch, the calculation is simply:
6418    ///
6419    ///  ( slot/slots_per_epoch, slot % slots_per_epoch )
6420    ///
6421    pub fn get_epoch_and_slot_index(&self, slot: Slot) -> (Epoch, SlotIndex) {
6422        self.epoch_schedule().get_epoch_and_slot_index(slot)
6423    }
6424
6425    pub fn get_epoch_info(&self) -> EpochInfo {
6426        let absolute_slot = self.slot();
6427        let block_height = self.block_height();
6428        let (epoch, slot_index) = self.get_epoch_and_slot_index(absolute_slot);
6429        let slots_in_epoch = self.get_slots_in_epoch(epoch);
6430        let transaction_count = Some(self.transaction_count());
6431        EpochInfo {
6432            epoch,
6433            slot_index,
6434            slots_in_epoch,
6435            absolute_slot,
6436            block_height,
6437            transaction_count,
6438        }
6439    }
6440
6441    pub fn is_empty(&self) -> bool {
6442        !self.is_delta.load(Relaxed)
6443    }
6444
6445    pub fn add_mockup_builtin(
6446        &mut self,
6447        program_id: Pubkey,
6448        builtin_function: BuiltinFunctionWithContext,
6449    ) {
6450        self.transaction_processor.add_builtin(
6451            self,
6452            program_id,
6453            "mockup",
6454            ProgramCacheEntry::new_builtin(self.slot, 0, builtin_function),
6455        );
6456    }
6457
6458    pub fn add_precompile(&mut self, program_id: &Pubkey) {
6459        debug!("Adding precompiled program {}", program_id);
6460        self.add_precompiled_account(program_id);
6461        debug!("Added precompiled program {:?}", program_id);
6462    }
6463
6464    // Call AccountsDb::clean_accounts()
6465    //
6466    // This fn is meant to be called by the snapshot handler in Accounts Background Service.  If
6467    // calling from elsewhere, ensure the same invariants hold/expectations are met.
6468    pub(crate) fn clean_accounts(&self) {
6469        // Don't clean the slot we're snapshotting because it may have zero-lamport
6470        // accounts that were included in the bank delta hash when the bank was frozen,
6471        // and if we clean them here, any newly created snapshot's hash for this bank
6472        // may not match the frozen hash.
6473        //
6474        // So when we're snapshotting, the highest slot to clean is lowered by one.
6475        let highest_slot_to_clean = self.slot().saturating_sub(1);
6476
6477        self.rc.accounts.accounts_db.clean_accounts(
6478            Some(highest_slot_to_clean),
6479            false,
6480            self.epoch_schedule(),
6481            self.clean_accounts_old_storages_policy(),
6482        );
6483    }
6484
6485    pub fn print_accounts_stats(&self) {
6486        self.rc.accounts.accounts_db.print_accounts_stats("");
6487    }
6488
6489    pub fn shrink_candidate_slots(&self) -> usize {
6490        self.rc
6491            .accounts
6492            .accounts_db
6493            .shrink_candidate_slots(self.epoch_schedule())
6494    }
6495
6496    pub(crate) fn shrink_ancient_slots(&self) {
6497        // Invoke ancient slot shrinking only when the validator is
6498        // explicitly configured to do so. This condition may be
6499        // removed when the skip rewrites feature is enabled.
6500        if self.are_ancient_storages_enabled() {
6501            self.rc
6502                .accounts
6503                .accounts_db
6504                .shrink_ancient_slots(self.epoch_schedule())
6505        }
6506    }
6507
6508    /// Returns if ancient storages are enabled or not
6509    pub fn are_ancient_storages_enabled(&self) -> bool {
6510        let can_skip_rewrites = self.bank_hash_skips_rent_rewrites();
6511        let test_skip_rewrites_but_include_in_bank_hash = self
6512            .rc
6513            .accounts
6514            .accounts_db
6515            .test_skip_rewrites_but_include_in_bank_hash;
6516        can_skip_rewrites || test_skip_rewrites_but_include_in_bank_hash
6517    }
6518
6519    /// Returns how clean_accounts() should handle old storages
6520    fn clean_accounts_old_storages_policy(&self) -> OldStoragesPolicy {
6521        if self.are_ancient_storages_enabled() {
6522            OldStoragesPolicy::Leave
6523        } else {
6524            OldStoragesPolicy::Clean
6525        }
6526    }
6527
6528    pub fn read_cost_tracker(&self) -> LockResult<RwLockReadGuard<CostTracker>> {
6529        self.cost_tracker.read()
6530    }
6531
6532    pub fn write_cost_tracker(&self) -> LockResult<RwLockWriteGuard<CostTracker>> {
6533        self.cost_tracker.write()
6534    }
6535
6536    // Check if the wallclock time from bank creation to now has exceeded the allotted
6537    // time for transaction processing
6538    pub fn should_bank_still_be_processing_txs(
6539        bank_creation_time: &Instant,
6540        max_tx_ingestion_nanos: u128,
6541    ) -> bool {
6542        // Do this check outside of the PoH lock, hence not a method on PohRecorder
6543        bank_creation_time.elapsed().as_nanos() <= max_tx_ingestion_nanos
6544    }
6545
6546    pub fn deactivate_feature(&mut self, id: &Pubkey) {
6547        let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
6548        feature_set.active.remove(id);
6549        feature_set.inactive.insert(*id);
6550        self.feature_set = Arc::new(feature_set);
6551    }
6552
6553    pub fn activate_feature(&mut self, id: &Pubkey) {
6554        let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
6555        feature_set.inactive.remove(id);
6556        feature_set.active.insert(*id, 0);
6557        self.feature_set = Arc::new(feature_set);
6558    }
6559
6560    pub fn fill_bank_with_ticks_for_tests(&self) {
6561        self.do_fill_bank_with_ticks_for_tests(&BankWithScheduler::no_scheduler_available())
6562    }
6563
6564    pub(crate) fn do_fill_bank_with_ticks_for_tests(&self, scheduler: &InstalledSchedulerRwLock) {
6565        if self.tick_height.load(Relaxed) < self.max_tick_height {
6566            let last_blockhash = self.last_blockhash();
6567            while self.last_blockhash() == last_blockhash {
6568                self.register_tick(&Hash::new_unique(), scheduler)
6569            }
6570        } else {
6571            warn!("Bank already reached max tick height, cannot fill it with more ticks");
6572        }
6573    }
6574
6575    /// Get a set of all actively reserved account keys that are not allowed to
6576    /// be write-locked during transaction processing.
6577    pub fn get_reserved_account_keys(&self) -> &HashSet<Pubkey> {
6578        &self.reserved_account_keys.active
6579    }
6580
6581    // This is called from snapshot restore AND for each epoch boundary
6582    // The entire code path herein must be idempotent
6583    fn apply_feature_activations(
6584        &mut self,
6585        caller: ApplyFeatureActivationsCaller,
6586        debug_do_not_add_builtins: bool,
6587    ) {
6588        use ApplyFeatureActivationsCaller as Caller;
6589        let allow_new_activations = match caller {
6590            Caller::FinishInit => false,
6591            Caller::NewFromParent => true,
6592            Caller::WarpFromParent => false,
6593        };
6594        let (feature_set, new_feature_activations) =
6595            self.compute_active_feature_set(allow_new_activations);
6596        self.feature_set = Arc::new(feature_set);
6597
6598        // Update activation slot of features in `new_feature_activations`
6599        for feature_id in new_feature_activations.iter() {
6600            if let Some(mut account) = self.get_account_with_fixed_root(feature_id) {
6601                if let Some(mut feature) = feature::from_account(&account) {
6602                    feature.activated_at = Some(self.slot());
6603                    if feature::to_account(&feature, &mut account).is_some() {
6604                        self.store_account(feature_id, &account);
6605                    }
6606                    info!("Feature {} activated at slot {}", feature_id, self.slot());
6607                }
6608            }
6609        }
6610
6611        // Update active set of reserved account keys which are not allowed to be write locked
6612        self.reserved_account_keys = {
6613            let mut reserved_keys = ReservedAccountKeys::clone(&self.reserved_account_keys);
6614            reserved_keys.update_active_set(&self.feature_set);
6615            Arc::new(reserved_keys)
6616        };
6617
6618        if new_feature_activations.contains(&feature_set::pico_inflation::id()) {
6619            *self.inflation.write().unwrap() = Inflation::pico();
6620            self.fee_rate_governor.burn_percent = 50; // 50% fee burn
6621            self.rent_collector.rent.burn_percent = 50; // 50% rent burn
6622        }
6623
6624        if !new_feature_activations.is_disjoint(&self.feature_set.full_inflation_features_enabled())
6625        {
6626            *self.inflation.write().unwrap() = Inflation::full();
6627            self.fee_rate_governor.burn_percent = 50; // 50% fee burn
6628            self.rent_collector.rent.burn_percent = 50; // 50% rent burn
6629        }
6630
6631        if !debug_do_not_add_builtins {
6632            self.apply_builtin_program_feature_transitions(
6633                allow_new_activations,
6634                &new_feature_activations,
6635            );
6636        }
6637
6638        if new_feature_activations.contains(&feature_set::update_hashes_per_tick::id()) {
6639            self.apply_updated_hashes_per_tick(DEFAULT_HASHES_PER_TICK);
6640        }
6641
6642        if new_feature_activations.contains(&feature_set::update_hashes_per_tick2::id()) {
6643            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK2);
6644        }
6645
6646        if new_feature_activations.contains(&feature_set::update_hashes_per_tick3::id()) {
6647            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK3);
6648        }
6649
6650        if new_feature_activations.contains(&feature_set::update_hashes_per_tick4::id()) {
6651            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK4);
6652        }
6653
6654        if new_feature_activations.contains(&feature_set::update_hashes_per_tick5::id()) {
6655            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK5);
6656        }
6657
6658        if new_feature_activations.contains(&feature_set::update_hashes_per_tick6::id()) {
6659            self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK6);
6660        }
6661
6662        if new_feature_activations.contains(&feature_set::raise_block_limits_to_50m::id()) {
6663            let (account_cost_limit, block_cost_limit, vote_cost_limit) = simd_0207_block_limits();
6664            self.write_cost_tracker().unwrap().set_limits(
6665                account_cost_limit,
6666                block_cost_limit,
6667                vote_cost_limit,
6668            );
6669        }
6670    }
6671
6672    fn apply_updated_hashes_per_tick(&mut self, hashes_per_tick: u64) {
6673        info!(
6674            "Activating update_hashes_per_tick {} at slot {}",
6675            hashes_per_tick,
6676            self.slot(),
6677        );
6678        self.hashes_per_tick = Some(hashes_per_tick);
6679    }
6680
6681    fn adjust_sysvar_balance_for_rent(&self, account: &mut AccountSharedData) {
6682        account.set_lamports(
6683            self.get_minimum_balance_for_rent_exemption(account.data().len())
6684                .max(account.lamports()),
6685        );
6686    }
6687
6688    /// Compute the active feature set based on the current bank state,
6689    /// and return it together with the set of newly activated features.
6690    fn compute_active_feature_set(&self, include_pending: bool) -> (FeatureSet, HashSet<Pubkey>) {
6691        let mut active = self.feature_set.active.clone();
6692        let mut inactive = HashSet::new();
6693        let mut pending = HashSet::new();
6694        let slot = self.slot();
6695
6696        for feature_id in &self.feature_set.inactive {
6697            let mut activated = None;
6698            if let Some(account) = self.get_account_with_fixed_root(feature_id) {
6699                if let Some(feature) = feature::from_account(&account) {
6700                    match feature.activated_at {
6701                        None if include_pending => {
6702                            // Feature activation is pending
6703                            pending.insert(*feature_id);
6704                            activated = Some(slot);
6705                        }
6706                        Some(activation_slot) if slot >= activation_slot => {
6707                            // Feature has been activated already
6708                            activated = Some(activation_slot);
6709                        }
6710                        _ => {}
6711                    }
6712                }
6713            }
6714            if let Some(slot) = activated {
6715                active.insert(*feature_id, slot);
6716            } else {
6717                inactive.insert(*feature_id);
6718            }
6719        }
6720
6721        (FeatureSet { active, inactive }, pending)
6722    }
6723
6724    fn apply_builtin_program_feature_transitions(
6725        &mut self,
6726        only_apply_transitions_for_new_features: bool,
6727        new_feature_activations: &HashSet<Pubkey>,
6728    ) {
6729        for builtin in BUILTINS.iter() {
6730            // The `builtin_is_bpf` flag is used to handle the case where a
6731            // builtin is scheduled to be enabled by one feature gate and
6732            // later migrated to Core BPF by another.
6733            //
6734            // There should never be a case where a builtin is set to be
6735            // migrated to Core BPF and is also set to be enabled on feature
6736            // activation on the same feature gate. However, the
6737            // `builtin_is_bpf` flag will handle this case as well, electing
6738            // to first attempt the migration to Core BPF.
6739            //
6740            // The migration to Core BPF will fail gracefully because the
6741            // program account will not exist. The builtin will subsequently
6742            // be enabled, but it will never be migrated to Core BPF.
6743            //
6744            // Using the same feature gate for both enabling and migrating a
6745            // builtin to Core BPF should be strictly avoided.
6746            let mut builtin_is_bpf = false;
6747            if let Some(core_bpf_migration_config) = &builtin.core_bpf_migration_config {
6748                // If the builtin is set to be migrated to Core BPF on feature
6749                // activation, perform the migration and do not add the program
6750                // to the bank's builtins. The migration will remove it from
6751                // the builtins list and the cache.
6752                if new_feature_activations.contains(&core_bpf_migration_config.feature_id) {
6753                    if let Err(e) = self
6754                        .migrate_builtin_to_core_bpf(&builtin.program_id, core_bpf_migration_config)
6755                    {
6756                        warn!(
6757                            "Failed to migrate builtin {} to Core BPF: {}",
6758                            builtin.name, e
6759                        );
6760                    } else {
6761                        builtin_is_bpf = true;
6762                    }
6763                } else {
6764                    // If the builtin has already been migrated to Core BPF, do not
6765                    // add it to the bank's builtins.
6766                    builtin_is_bpf = self
6767                        .get_account(&builtin.program_id)
6768                        .map(|a| a.owner() == &bpf_loader_upgradeable::id())
6769                        .unwrap_or(false);
6770                }
6771            };
6772
6773            if let Some(feature_id) = builtin.enable_feature_id {
6774                let should_enable_builtin_on_feature_transition = !builtin_is_bpf
6775                    && if only_apply_transitions_for_new_features {
6776                        new_feature_activations.contains(&feature_id)
6777                    } else {
6778                        self.feature_set.is_active(&feature_id)
6779                    };
6780
6781                if should_enable_builtin_on_feature_transition {
6782                    self.transaction_processor.add_builtin(
6783                        self,
6784                        builtin.program_id,
6785                        builtin.name,
6786                        ProgramCacheEntry::new_builtin(
6787                            self.feature_set.activated_slot(&feature_id).unwrap_or(0),
6788                            builtin.name.len(),
6789                            builtin.entrypoint,
6790                        ),
6791                    );
6792                }
6793            }
6794        }
6795
6796        // Migrate any necessary stateless builtins to core BPF.
6797        // Stateless builtins do not have an `enable_feature_id` since they
6798        // do not exist on-chain.
6799        for stateless_builtin in STATELESS_BUILTINS.iter() {
6800            if let Some(core_bpf_migration_config) = &stateless_builtin.core_bpf_migration_config {
6801                if new_feature_activations.contains(&core_bpf_migration_config.feature_id) {
6802                    if let Err(e) = self.migrate_builtin_to_core_bpf(
6803                        &stateless_builtin.program_id,
6804                        core_bpf_migration_config,
6805                    ) {
6806                        warn!(
6807                            "Failed to migrate stateless builtin {} to Core BPF: {}",
6808                            stateless_builtin.name, e
6809                        );
6810                    }
6811                }
6812            }
6813        }
6814
6815        for precompile in get_precompiles() {
6816            let should_add_precompile = precompile
6817                .feature
6818                .as_ref()
6819                .map(|feature_id| self.feature_set.is_active(feature_id))
6820                .unwrap_or(false);
6821            if should_add_precompile {
6822                self.add_precompile(&precompile.program_id);
6823            }
6824        }
6825    }
6826
6827    /// Use to replace programs by feature activation
6828    #[allow(dead_code)]
6829    fn replace_program_account(
6830        &mut self,
6831        old_address: &Pubkey,
6832        new_address: &Pubkey,
6833        datapoint_name: &'static str,
6834    ) {
6835        if let Some(old_account) = self.get_account_with_fixed_root(old_address) {
6836            if let Some(new_account) = self.get_account_with_fixed_root(new_address) {
6837                datapoint_info!(datapoint_name, ("slot", self.slot, i64));
6838
6839                // Burn lamports in the old account
6840                self.capitalization
6841                    .fetch_sub(old_account.lamports(), Relaxed);
6842
6843                // Transfer new account to old account
6844                self.store_account(old_address, &new_account);
6845
6846                // Clear new account
6847                self.store_account(new_address, &AccountSharedData::default());
6848
6849                // Unload a program from the bank's cache
6850                self.transaction_processor
6851                    .program_cache
6852                    .write()
6853                    .unwrap()
6854                    .remove_programs([*old_address].into_iter());
6855
6856                self.calculate_and_update_accounts_data_size_delta_off_chain(
6857                    old_account.data().len(),
6858                    new_account.data().len(),
6859                );
6860            }
6861        }
6862    }
6863
6864    /// Get all the accounts for this bank and calculate stats
6865    pub fn get_total_accounts_stats(&self) -> ScanResult<TotalAccountsStats> {
6866        let accounts = self.get_all_accounts(false)?;
6867        Ok(self.calculate_total_accounts_stats(
6868            accounts
6869                .iter()
6870                .map(|(pubkey, account, _slot)| (pubkey, account)),
6871        ))
6872    }
6873
6874    /// Given all the accounts for a bank, calculate stats
6875    pub fn calculate_total_accounts_stats<'a>(
6876        &self,
6877        accounts: impl Iterator<Item = (&'a Pubkey, &'a AccountSharedData)>,
6878    ) -> TotalAccountsStats {
6879        let rent_collector = self.rent_collector();
6880        let mut total_accounts_stats = TotalAccountsStats::default();
6881        accounts.for_each(|(pubkey, account)| {
6882            total_accounts_stats.accumulate_account(pubkey, account, rent_collector);
6883        });
6884
6885        total_accounts_stats
6886    }
6887
6888    /// Get the EAH that will be used by snapshots
6889    ///
6890    /// Since snapshots are taken on roots, if the bank is in the EAH calculation window then an
6891    /// EAH *must* be included.  This means if an EAH calculation is currently in-flight we will
6892    /// wait for it to complete.
6893    pub fn get_epoch_accounts_hash_to_serialize(&self) -> Option<EpochAccountsHash> {
6894        let should_get_epoch_accounts_hash = epoch_accounts_hash_utils::is_enabled_this_epoch(self)
6895            && epoch_accounts_hash_utils::is_in_calculation_window(self);
6896        if !should_get_epoch_accounts_hash {
6897            return None;
6898        }
6899
6900        let (epoch_accounts_hash, waiting_time_us) = measure_us!(self
6901            .rc
6902            .accounts
6903            .accounts_db
6904            .epoch_accounts_hash_manager
6905            .wait_get_epoch_accounts_hash());
6906
6907        datapoint_info!(
6908            "bank-get_epoch_accounts_hash_to_serialize",
6909            ("slot", self.slot(), i64),
6910            ("waiting-time-us", waiting_time_us, i64),
6911        );
6912        Some(epoch_accounts_hash)
6913    }
6914
6915    /// Convenience fn to get the Epoch Accounts Hash
6916    pub fn epoch_accounts_hash(&self) -> Option<EpochAccountsHash> {
6917        self.rc
6918            .accounts
6919            .accounts_db
6920            .epoch_accounts_hash_manager
6921            .try_get_epoch_accounts_hash()
6922    }
6923
6924    pub fn is_in_slot_hashes_history(&self, slot: &Slot) -> bool {
6925        if slot < &self.slot {
6926            if let Ok(slot_hashes) = self.transaction_processor.sysvar_cache().get_slot_hashes() {
6927                return slot_hashes.get(slot).is_some();
6928            }
6929        }
6930        false
6931    }
6932
6933    pub fn check_program_modification_slot(&self) -> bool {
6934        self.check_program_modification_slot
6935    }
6936
6937    pub fn set_check_program_modification_slot(&mut self, check: bool) {
6938        self.check_program_modification_slot = check;
6939    }
6940
6941    pub fn fee_structure(&self) -> &FeeStructure {
6942        &self.fee_structure
6943    }
6944
6945    pub fn block_id(&self) -> Option<Hash> {
6946        *self.block_id.read().unwrap()
6947    }
6948
6949    pub fn set_block_id(&self, block_id: Option<Hash>) {
6950        *self.block_id.write().unwrap() = block_id;
6951    }
6952
6953    pub fn compute_budget(&self) -> Option<ComputeBudget> {
6954        self.compute_budget
6955    }
6956
6957    pub fn add_builtin(&self, program_id: Pubkey, name: &str, builtin: ProgramCacheEntry) {
6958        self.transaction_processor
6959            .add_builtin(self, program_id, name, builtin)
6960    }
6961}
6962
6963impl TransactionProcessingCallback for Bank {
6964    fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option<usize> {
6965        self.rc
6966            .accounts
6967            .accounts_db
6968            .account_matches_owners(&self.ancestors, account, owners)
6969            .ok()
6970    }
6971
6972    fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
6973        self.rc
6974            .accounts
6975            .accounts_db
6976            .load_with_fixed_root(&self.ancestors, pubkey)
6977            .map(|(acc, _)| acc)
6978    }
6979
6980    // NOTE: must hold idempotent for the same set of arguments
6981    /// Add a builtin program account
6982    fn add_builtin_account(&self, name: &str, program_id: &Pubkey) {
6983        let existing_genuine_program =
6984            self.get_account_with_fixed_root(program_id)
6985                .and_then(|account| {
6986                    // it's very unlikely to be squatted at program_id as non-system account because of burden to
6987                    // find victim's pubkey/hash. So, when account.owner is indeed native_loader's, it's
6988                    // safe to assume it's a genuine program.
6989                    if native_loader::check_id(account.owner()) {
6990                        Some(account)
6991                    } else {
6992                        // malicious account is pre-occupying at program_id
6993                        self.burn_and_purge_account(program_id, account);
6994                        None
6995                    }
6996                });
6997
6998        // introducing builtin program
6999        if existing_genuine_program.is_some() {
7000            // The existing account is sufficient
7001            return;
7002        }
7003
7004        assert!(
7005            !self.freeze_started(),
7006            "Can't change frozen bank by adding not-existing new builtin program ({name}, {program_id}). \
7007            Maybe, inconsistent program activation is detected on snapshot restore?"
7008        );
7009
7010        // Add a bogus executable builtin account, which will be loaded and ignored.
7011        let account = native_loader::create_loadable_account_with_fields(
7012            name,
7013            self.inherit_specially_retained_account_fields(&existing_genuine_program),
7014        );
7015        self.store_account_and_update_capitalization(program_id, &account);
7016    }
7017
7018    fn inspect_account(&self, address: &Pubkey, account_state: AccountState, is_writable: bool) {
7019        if self.is_accounts_lt_hash_enabled() {
7020            self.inspect_account_for_accounts_lt_hash(address, &account_state, is_writable);
7021        }
7022    }
7023}
7024
7025#[cfg(feature = "dev-context-only-utils")]
7026impl Bank {
7027    pub fn wrap_with_bank_forks_for_tests(self) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7028        let bank_forks = BankForks::new_rw_arc(self);
7029        let bank = bank_forks.read().unwrap().root_bank();
7030        (bank, bank_forks)
7031    }
7032
7033    pub fn default_for_tests() -> Self {
7034        let accounts_db = AccountsDb::default_for_tests();
7035        let accounts = Accounts::new(Arc::new(accounts_db));
7036        Self::default_with_accounts(accounts)
7037    }
7038
7039    pub fn new_with_bank_forks_for_tests(
7040        genesis_config: &GenesisConfig,
7041    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7042        let bank = Self::new_for_tests(genesis_config);
7043        bank.wrap_with_bank_forks_for_tests()
7044    }
7045
7046    pub fn new_for_tests(genesis_config: &GenesisConfig) -> Self {
7047        Self::new_with_config_for_tests(genesis_config, BankTestConfig::default())
7048    }
7049
7050    pub fn new_with_mockup_builtin_for_tests(
7051        genesis_config: &GenesisConfig,
7052        program_id: Pubkey,
7053        builtin_function: BuiltinFunctionWithContext,
7054    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7055        let mut bank = Self::new_for_tests(genesis_config);
7056        bank.add_mockup_builtin(program_id, builtin_function);
7057        bank.wrap_with_bank_forks_for_tests()
7058    }
7059
7060    pub fn new_no_wallclock_throttle_for_tests(
7061        genesis_config: &GenesisConfig,
7062    ) -> (Arc<Self>, Arc<RwLock<BankForks>>) {
7063        let mut bank = Self::new_for_tests(genesis_config);
7064
7065        bank.ns_per_slot = u128::MAX;
7066        bank.wrap_with_bank_forks_for_tests()
7067    }
7068
7069    pub fn new_with_config_for_tests(
7070        genesis_config: &GenesisConfig,
7071        test_config: BankTestConfig,
7072    ) -> Self {
7073        Self::new_with_paths_for_tests(
7074            genesis_config,
7075            Arc::new(RuntimeConfig::default()),
7076            test_config,
7077            Vec::new(),
7078        )
7079    }
7080
7081    pub fn new_with_paths_for_tests(
7082        genesis_config: &GenesisConfig,
7083        runtime_config: Arc<RuntimeConfig>,
7084        test_config: BankTestConfig,
7085        paths: Vec<PathBuf>,
7086    ) -> Self {
7087        Self::new_with_paths(
7088            genesis_config,
7089            runtime_config,
7090            paths,
7091            None,
7092            None,
7093            false,
7094            Some(test_config.accounts_db_config),
7095            None,
7096            Some(Pubkey::new_unique()),
7097            Arc::default(),
7098            None,
7099            None,
7100        )
7101    }
7102
7103    pub fn new_for_benches(genesis_config: &GenesisConfig) -> Self {
7104        Self::new_with_paths_for_benches(genesis_config, Vec::new())
7105    }
7106
7107    /// Intended for use by benches only.
7108    /// create new bank with the given config and paths.
7109    pub fn new_with_paths_for_benches(genesis_config: &GenesisConfig, paths: Vec<PathBuf>) -> Self {
7110        Self::new_with_paths(
7111            genesis_config,
7112            Arc::<RuntimeConfig>::default(),
7113            paths,
7114            None,
7115            None,
7116            false,
7117            Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS),
7118            None,
7119            Some(Pubkey::new_unique()),
7120            Arc::default(),
7121            None,
7122            None,
7123        )
7124    }
7125
7126    /// Prepare a transaction batch from a list of legacy transactions. Used for tests only.
7127    pub fn prepare_batch_for_tests(
7128        &self,
7129        txs: Vec<Transaction>,
7130    ) -> TransactionBatch<SanitizedTransaction> {
7131        let transaction_account_lock_limit = self.get_transaction_account_lock_limit();
7132        let sanitized_txs = txs
7133            .into_iter()
7134            .map(SanitizedTransaction::from_transaction_for_tests)
7135            .collect::<Vec<_>>();
7136        let lock_results = self
7137            .rc
7138            .accounts
7139            .lock_accounts(sanitized_txs.iter(), transaction_account_lock_limit);
7140        TransactionBatch::new(lock_results, self, OwnedOrBorrowed::Owned(sanitized_txs))
7141    }
7142
7143    /// Set the initial accounts data size
7144    /// NOTE: This fn is *ONLY FOR TESTS*
7145    pub fn set_accounts_data_size_initial_for_tests(&mut self, amount: u64) {
7146        self.accounts_data_size_initial = amount;
7147    }
7148
7149    /// Update the accounts data size off-chain delta
7150    /// NOTE: This fn is *ONLY FOR TESTS*
7151    pub fn update_accounts_data_size_delta_off_chain_for_tests(&self, amount: i64) {
7152        self.update_accounts_data_size_delta_off_chain(amount)
7153    }
7154
7155    #[cfg(test)]
7156    fn restore_old_behavior_for_fragile_tests(&self) {
7157        self.lazy_rent_collection.store(true, Relaxed);
7158    }
7159
7160    /// Process multiple transaction in a single batch. This is used for benches and unit tests.
7161    ///
7162    /// # Panics
7163    ///
7164    /// Panics if any of the transactions do not pass sanitization checks.
7165    #[must_use]
7166    pub fn process_transactions<'a>(
7167        &self,
7168        txs: impl Iterator<Item = &'a Transaction>,
7169    ) -> Vec<Result<()>> {
7170        self.try_process_transactions(txs).unwrap()
7171    }
7172
7173    /// Process entry transactions in a single batch. This is used for benches and unit tests.
7174    ///
7175    /// # Panics
7176    ///
7177    /// Panics if any of the transactions do not pass sanitization checks.
7178    #[must_use]
7179    pub fn process_entry_transactions(&self, txs: Vec<VersionedTransaction>) -> Vec<Result<()>> {
7180        self.try_process_entry_transactions(txs).unwrap()
7181    }
7182
7183    #[cfg(test)]
7184    pub fn flush_accounts_cache_slot_for_tests(&self) {
7185        self.rc
7186            .accounts
7187            .accounts_db
7188            .flush_accounts_cache_slot_for_tests(self.slot())
7189    }
7190
7191    /// This is only valid to call from tests.
7192    /// block until initial accounts hash verification has completed
7193    pub fn wait_for_initial_accounts_hash_verification_completed_for_tests(&self) {
7194        self.rc
7195            .accounts
7196            .accounts_db
7197            .verify_accounts_hash_in_bg
7198            .wait_for_complete()
7199    }
7200
7201    pub fn get_sysvar_cache_for_tests(&self) -> SysvarCache {
7202        self.transaction_processor.get_sysvar_cache_for_tests()
7203    }
7204
7205    pub fn update_accounts_hash_for_tests(&self) -> AccountsHash {
7206        self.update_accounts_hash(CalcAccountsHashDataSource::IndexForTests, false, false)
7207    }
7208
7209    pub fn new_program_cache_for_tx_batch_for_slot(&self, slot: Slot) -> ProgramCacheForTxBatch {
7210        ProgramCacheForTxBatch::new_from_cache(
7211            slot,
7212            self.epoch_schedule.get_epoch(slot),
7213            &self.transaction_processor.program_cache.read().unwrap(),
7214        )
7215    }
7216
7217    pub fn get_transaction_processor(&self) -> &TransactionBatchProcessor<BankForks> {
7218        &self.transaction_processor
7219    }
7220
7221    pub fn set_fee_structure(&mut self, fee_structure: &FeeStructure) {
7222        self.fee_structure = fee_structure.clone();
7223    }
7224
7225    pub fn load_program(
7226        &self,
7227        pubkey: &Pubkey,
7228        reload: bool,
7229        effective_epoch: Epoch,
7230    ) -> Option<Arc<ProgramCacheEntry>> {
7231        let environments = self
7232            .transaction_processor
7233            .get_environments_for_epoch(effective_epoch)?;
7234        load_program_with_pubkey(
7235            self,
7236            &environments,
7237            pubkey,
7238            self.slot(),
7239            &mut ExecuteTimings::default(), // Called by ledger-tool, metrics not accumulated.
7240            reload,
7241        )
7242    }
7243
7244    pub fn withdraw(&self, pubkey: &Pubkey, lamports: u64) -> Result<()> {
7245        match self.get_account_with_fixed_root(pubkey) {
7246            Some(mut account) => {
7247                let min_balance = match get_system_account_kind(&account) {
7248                    Some(SystemAccountKind::Nonce) => self
7249                        .rent_collector
7250                        .rent
7251                        .minimum_balance(nonce::State::size()),
7252                    _ => 0,
7253                };
7254
7255                lamports
7256                    .checked_add(min_balance)
7257                    .filter(|required_balance| *required_balance <= account.lamports())
7258                    .ok_or(TransactionError::InsufficientFundsForFee)?;
7259                account
7260                    .checked_sub_lamports(lamports)
7261                    .map_err(|_| TransactionError::InsufficientFundsForFee)?;
7262                self.store_account(pubkey, &account);
7263
7264                Ok(())
7265            }
7266            None => Err(TransactionError::AccountNotFound),
7267        }
7268    }
7269
7270    pub fn set_hash_overrides(&self, hash_overrides: HashOverrides) {
7271        *self.hash_overrides.lock().unwrap() = hash_overrides;
7272    }
7273}
7274
7275/// Compute how much an account has changed size.  This function is useful when the data size delta
7276/// needs to be computed and passed to an `update_accounts_data_size_delta` function.
7277fn calculate_data_size_delta(old_data_size: usize, new_data_size: usize) -> i64 {
7278    assert!(old_data_size <= i64::MAX as usize);
7279    assert!(new_data_size <= i64::MAX as usize);
7280    let old_data_size = old_data_size as i64;
7281    let new_data_size = new_data_size as i64;
7282
7283    new_data_size.saturating_sub(old_data_size)
7284}
7285
7286/// Since `apply_feature_activations()` has different behavior depending on its caller, enumerate
7287/// those callers explicitly.
7288#[derive(Debug, Copy, Clone, Eq, PartialEq)]
7289enum ApplyFeatureActivationsCaller {
7290    FinishInit,
7291    NewFromParent,
7292    WarpFromParent,
7293}
7294
7295/// Return the computed values from `collect_rent_from_accounts()`
7296///
7297/// Since `collect_rent_from_accounts()` is running in parallel, instead of updating the
7298/// atomics/shared data inside this function, return those values in this struct for the caller to
7299/// process later.
7300#[derive(Debug, Default)]
7301struct CollectRentFromAccountsInfo {
7302    skipped_rewrites: Vec<(Pubkey, AccountHash)>,
7303    rent_collected_info: CollectedInfo,
7304    rent_rewards: Vec<(Pubkey, RewardInfo)>,
7305    time_collecting_rent_us: u64,
7306    time_storing_accounts_us: u64,
7307    num_accounts: usize,
7308}
7309
7310/// Return the computed values—of each iteration in the parallel loop inside
7311/// `collect_rent_in_partition()`—and then perform a reduce on all of them.
7312#[derive(Debug, Default)]
7313struct CollectRentInPartitionInfo {
7314    skipped_rewrites: Vec<(Pubkey, AccountHash)>,
7315    rent_collected: u64,
7316    accounts_data_size_reclaimed: u64,
7317    rent_rewards: Vec<(Pubkey, RewardInfo)>,
7318    time_loading_accounts_us: u64,
7319    time_collecting_rent_us: u64,
7320    time_storing_accounts_us: u64,
7321    num_accounts: usize,
7322}
7323
7324impl CollectRentInPartitionInfo {
7325    /// Create a new `CollectRentInPartitionInfo` from the results of loading accounts and
7326    /// collecting rent on them.
7327    #[must_use]
7328    fn new(info: CollectRentFromAccountsInfo, time_loading_accounts: Duration) -> Self {
7329        Self {
7330            skipped_rewrites: info.skipped_rewrites,
7331            rent_collected: info.rent_collected_info.rent_amount,
7332            accounts_data_size_reclaimed: info.rent_collected_info.account_data_len_reclaimed,
7333            rent_rewards: info.rent_rewards,
7334            time_loading_accounts_us: time_loading_accounts.as_micros() as u64,
7335            time_collecting_rent_us: info.time_collecting_rent_us,
7336            time_storing_accounts_us: info.time_storing_accounts_us,
7337            num_accounts: info.num_accounts,
7338        }
7339    }
7340
7341    /// Reduce (i.e. 'combine') two `CollectRentInPartitionInfo`s into one.
7342    ///
7343    /// This fn is used by `collect_rent_in_partition()` as the reduce step (of map-reduce) in its
7344    /// parallel loop of rent collection.
7345    #[must_use]
7346    fn reduce(lhs: Self, rhs: Self) -> Self {
7347        Self {
7348            skipped_rewrites: [lhs.skipped_rewrites, rhs.skipped_rewrites].concat(),
7349            rent_collected: lhs.rent_collected.saturating_add(rhs.rent_collected),
7350            accounts_data_size_reclaimed: lhs
7351                .accounts_data_size_reclaimed
7352                .saturating_add(rhs.accounts_data_size_reclaimed),
7353            rent_rewards: [lhs.rent_rewards, rhs.rent_rewards].concat(),
7354            time_loading_accounts_us: lhs
7355                .time_loading_accounts_us
7356                .saturating_add(rhs.time_loading_accounts_us),
7357            time_collecting_rent_us: lhs
7358                .time_collecting_rent_us
7359                .saturating_add(rhs.time_collecting_rent_us),
7360            time_storing_accounts_us: lhs
7361                .time_storing_accounts_us
7362                .saturating_add(rhs.time_storing_accounts_us),
7363            num_accounts: lhs.num_accounts.saturating_add(rhs.num_accounts),
7364        }
7365    }
7366}
7367
7368/// Struct to collect stats when scanning all accounts in `get_total_accounts_stats()`
7369#[derive(Debug, Default, Copy, Clone, Serialize)]
7370pub struct TotalAccountsStats {
7371    /// Total number of accounts
7372    pub num_accounts: usize,
7373    /// Total data size of all accounts
7374    pub data_len: usize,
7375
7376    /// Total number of executable accounts
7377    pub num_executable_accounts: usize,
7378    /// Total data size of executable accounts
7379    pub executable_data_len: usize,
7380
7381    /// Total number of rent exempt accounts
7382    pub num_rent_exempt_accounts: usize,
7383    /// Total number of rent paying accounts
7384    pub num_rent_paying_accounts: usize,
7385    /// Total number of rent paying accounts without data
7386    pub num_rent_paying_accounts_without_data: usize,
7387    /// Total amount of lamports in rent paying accounts
7388    pub lamports_in_rent_paying_accounts: u64,
7389}
7390
7391impl TotalAccountsStats {
7392    pub fn accumulate_account(
7393        &mut self,
7394        address: &Pubkey,
7395        account: &AccountSharedData,
7396        rent_collector: &RentCollector,
7397    ) {
7398        let data_len = account.data().len();
7399        self.num_accounts += 1;
7400        self.data_len += data_len;
7401
7402        if account.executable() {
7403            self.num_executable_accounts += 1;
7404            self.executable_data_len += data_len;
7405        }
7406
7407        if !rent_collector.should_collect_rent(address, account.executable())
7408            || rent_collector
7409                .get_rent_due(
7410                    account.lamports(),
7411                    account.data().len(),
7412                    account.rent_epoch(),
7413                )
7414                .is_exempt()
7415        {
7416            self.num_rent_exempt_accounts += 1;
7417        } else {
7418            self.num_rent_paying_accounts += 1;
7419            self.lamports_in_rent_paying_accounts += account.lamports();
7420            if data_len == 0 {
7421                self.num_rent_paying_accounts_without_data += 1;
7422            }
7423        }
7424    }
7425}
7426
7427impl Drop for Bank {
7428    fn drop(&mut self) {
7429        if let Some(drop_callback) = self.drop_callback.read().unwrap().0.as_ref() {
7430            drop_callback.callback(self);
7431        } else {
7432            // Default case for tests
7433            self.rc
7434                .accounts
7435                .accounts_db
7436                .purge_slot(self.slot(), self.bank_id(), false);
7437        }
7438    }
7439}
7440
7441/// utility function used for testing and benchmarking.
7442pub mod test_utils {
7443    use {
7444        super::Bank,
7445        crate::installed_scheduler_pool::BankWithScheduler,
7446        solana_sdk::{
7447            account::{ReadableAccount, WritableAccount},
7448            hash::hashv,
7449            lamports::LamportsError,
7450            pubkey::Pubkey,
7451        },
7452        solana_vote_program::vote_state::{self, BlockTimestamp, VoteStateVersions},
7453        std::sync::Arc,
7454    };
7455    pub fn goto_end_of_slot(bank: Arc<Bank>) {
7456        goto_end_of_slot_with_scheduler(&BankWithScheduler::new_without_scheduler(bank))
7457    }
7458
7459    pub fn goto_end_of_slot_with_scheduler(bank: &BankWithScheduler) {
7460        let mut tick_hash = bank.last_blockhash();
7461        loop {
7462            tick_hash = hashv(&[tick_hash.as_ref(), &[42]]);
7463            bank.register_tick(&tick_hash);
7464            if tick_hash == bank.last_blockhash() {
7465                bank.freeze();
7466                return;
7467            }
7468        }
7469    }
7470
7471    pub fn update_vote_account_timestamp(
7472        timestamp: BlockTimestamp,
7473        bank: &Bank,
7474        vote_pubkey: &Pubkey,
7475    ) {
7476        let mut vote_account = bank.get_account(vote_pubkey).unwrap_or_default();
7477        let mut vote_state = vote_state::from(&vote_account).unwrap_or_default();
7478        vote_state.last_timestamp = timestamp;
7479        let versioned = VoteStateVersions::new_current(vote_state);
7480        vote_state::to(&versioned, &mut vote_account).unwrap();
7481        bank.store_account(vote_pubkey, &vote_account);
7482    }
7483
7484    pub fn deposit(
7485        bank: &Bank,
7486        pubkey: &Pubkey,
7487        lamports: u64,
7488    ) -> std::result::Result<u64, LamportsError> {
7489        // This doesn't collect rents intentionally.
7490        // Rents should only be applied to actual TXes
7491        let mut account = bank
7492            .get_account_with_fixed_root_no_cache(pubkey)
7493            .unwrap_or_default();
7494        account.checked_add_lamports(lamports)?;
7495        bank.store_account(pubkey, &account);
7496        Ok(account.lamports())
7497    }
7498}