solana_program_runtime/
loaded_programs.rs

1use {
2    crate::invoke_context::{BuiltinFunctionWithContext, InvokeContext},
3    log::{debug, error, log_enabled, trace},
4    percentage::PercentageInteger,
5    solana_measure::measure::Measure,
6    solana_rbpf::{
7        elf::Executable,
8        program::{BuiltinProgram, FunctionRegistry},
9        verifier::RequisiteVerifier,
10        vm::Config,
11    },
12    solana_sdk::{
13        bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable,
14        clock::{Epoch, Slot},
15        loader_v4, native_loader,
16        pubkey::Pubkey,
17        saturating_add_assign,
18    },
19    solana_timings::ExecuteDetailsTimings,
20    solana_type_overrides::{
21        rand::{thread_rng, Rng},
22        sync::{
23            atomic::{AtomicU64, Ordering},
24            Arc, Condvar, Mutex, RwLock,
25        },
26        thread,
27    },
28    std::{
29        collections::{hash_map::Entry, HashMap},
30        fmt::{Debug, Formatter},
31        sync::Weak,
32    },
33};
34
35pub type ProgramRuntimeEnvironment = Arc<BuiltinProgram<InvokeContext<'static>>>;
36pub const MAX_LOADED_ENTRY_COUNT: usize = 512;
37pub const DELAY_VISIBILITY_SLOT_OFFSET: Slot = 1;
38
39/// Relationship between two fork IDs
40#[derive(Copy, Clone, Debug, PartialEq)]
41pub enum BlockRelation {
42    /// The slot is on the same fork and is an ancestor of the other slot
43    Ancestor,
44    /// The two slots are equal and are on the same fork
45    Equal,
46    /// The slot is on the same fork and is a descendant of the other slot
47    Descendant,
48    /// The slots are on two different forks and may have had a common ancestor at some point
49    Unrelated,
50    /// Either one or both of the slots are either older than the latest root, or are in future
51    Unknown,
52}
53
54/// Maps relationship between two slots.
55pub trait ForkGraph {
56    /// Returns the BlockRelation of A to B
57    fn relationship(&self, a: Slot, b: Slot) -> BlockRelation;
58}
59
60/// The owner of a programs accounts, thus the loader of a program
61#[derive(Default, Clone, Copy, PartialEq, Eq, Debug)]
62pub enum ProgramCacheEntryOwner {
63    #[default]
64    NativeLoader,
65    LoaderV1,
66    LoaderV2,
67    LoaderV3,
68    LoaderV4,
69}
70
71impl TryFrom<&Pubkey> for ProgramCacheEntryOwner {
72    type Error = ();
73    fn try_from(loader_key: &Pubkey) -> Result<Self, ()> {
74        if native_loader::check_id(loader_key) {
75            Ok(ProgramCacheEntryOwner::NativeLoader)
76        } else if bpf_loader_deprecated::check_id(loader_key) {
77            Ok(ProgramCacheEntryOwner::LoaderV1)
78        } else if bpf_loader::check_id(loader_key) {
79            Ok(ProgramCacheEntryOwner::LoaderV2)
80        } else if bpf_loader_upgradeable::check_id(loader_key) {
81            Ok(ProgramCacheEntryOwner::LoaderV3)
82        } else if loader_v4::check_id(loader_key) {
83            Ok(ProgramCacheEntryOwner::LoaderV4)
84        } else {
85            Err(())
86        }
87    }
88}
89
90impl From<ProgramCacheEntryOwner> for Pubkey {
91    fn from(program_cache_entry_owner: ProgramCacheEntryOwner) -> Self {
92        match program_cache_entry_owner {
93            ProgramCacheEntryOwner::NativeLoader => native_loader::id(),
94            ProgramCacheEntryOwner::LoaderV1 => bpf_loader_deprecated::id(),
95            ProgramCacheEntryOwner::LoaderV2 => bpf_loader::id(),
96            ProgramCacheEntryOwner::LoaderV3 => bpf_loader_upgradeable::id(),
97            ProgramCacheEntryOwner::LoaderV4 => loader_v4::id(),
98        }
99    }
100}
101
102/*
103    The possible ProgramCacheEntryType transitions:
104
105    DelayVisibility is special in that it is never stored in the cache.
106    It is only returned by ProgramCacheForTxBatch::find() when a Loaded entry
107    is encountered which is not effective yet.
108
109    Builtin re/deployment:
110    - Empty => Builtin in TransactionBatchProcessor::add_builtin
111    - Builtin => Builtin in TransactionBatchProcessor::add_builtin
112
113    Un/re/deployment (with delay and cooldown):
114    - Empty / Closed => Loaded in UpgradeableLoaderInstruction::DeployWithMaxDataLen
115    - Loaded / FailedVerification => Loaded in UpgradeableLoaderInstruction::Upgrade
116    - Loaded / FailedVerification => Closed in UpgradeableLoaderInstruction::Close
117
118    Eviction and unloading (in the same slot):
119    - Unloaded => Loaded in ProgramCache::assign_program
120    - Loaded => Unloaded in ProgramCache::unload_program_entry
121
122    At epoch boundary (when feature set and environment changes):
123    - Loaded => FailedVerification in Bank::_new_from_parent
124    - FailedVerification => Loaded in Bank::_new_from_parent
125
126    Through pruning (when on orphan fork or overshadowed on the rooted fork):
127    - Closed / Unloaded / Loaded / Builtin => Empty in ProgramCache::prune
128*/
129
130/// Actual payload of [ProgramCacheEntry].
131#[derive(Default)]
132pub enum ProgramCacheEntryType {
133    /// Tombstone for programs which currently do not pass the verifier but could if the feature set changed.
134    FailedVerification(ProgramRuntimeEnvironment),
135    /// Tombstone for programs that were either explicitly closed or never deployed.
136    ///
137    /// It's also used for accounts belonging to program loaders, that don't actually contain program code (e.g. buffer accounts for LoaderV3 programs).
138    #[default]
139    Closed,
140    /// Tombstone for programs which have recently been modified but the new version is not visible yet.
141    DelayVisibility,
142    /// Successfully verified but not currently compiled.
143    ///
144    /// It continues to track usage statistics even when the compiled executable of the program is evicted from memory.
145    Unloaded(ProgramRuntimeEnvironment),
146    /// Verified and compiled program
147    Loaded(Executable<InvokeContext<'static>>),
148    /// A built-in program which is not stored on-chain but backed into and distributed with the validator
149    Builtin(BuiltinProgram<InvokeContext<'static>>),
150}
151
152impl Debug for ProgramCacheEntryType {
153    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
154        match self {
155            ProgramCacheEntryType::FailedVerification(_) => {
156                write!(f, "ProgramCacheEntryType::FailedVerification")
157            }
158            ProgramCacheEntryType::Closed => write!(f, "ProgramCacheEntryType::Closed"),
159            ProgramCacheEntryType::DelayVisibility => {
160                write!(f, "ProgramCacheEntryType::DelayVisibility")
161            }
162            ProgramCacheEntryType::Unloaded(_) => write!(f, "ProgramCacheEntryType::Unloaded"),
163            ProgramCacheEntryType::Loaded(_) => write!(f, "ProgramCacheEntryType::Loaded"),
164            ProgramCacheEntryType::Builtin(_) => write!(f, "ProgramCacheEntryType::Builtin"),
165        }
166    }
167}
168
169impl ProgramCacheEntryType {
170    /// Returns a reference to its environment if it has one
171    pub fn get_environment(&self) -> Option<&ProgramRuntimeEnvironment> {
172        match self {
173            ProgramCacheEntryType::Loaded(program) => Some(program.get_loader()),
174            ProgramCacheEntryType::FailedVerification(env)
175            | ProgramCacheEntryType::Unloaded(env) => Some(env),
176            _ => None,
177        }
178    }
179}
180
181/// Holds a program version at a specific address and on a specific slot / fork.
182///
183/// It contains the actual program in [ProgramCacheEntryType] and a bunch of meta-data.
184#[derive(Debug, Default)]
185pub struct ProgramCacheEntry {
186    /// The program of this entry
187    pub program: ProgramCacheEntryType,
188    /// The loader of this entry
189    pub account_owner: ProgramCacheEntryOwner,
190    /// Size of account that stores the program and program data
191    pub account_size: usize,
192    /// Slot in which the program was (re)deployed
193    pub deployment_slot: Slot,
194    /// Slot in which this entry will become active (can be in the future)
195    pub effective_slot: Slot,
196    /// How often this entry was used by a transaction
197    pub tx_usage_counter: AtomicU64,
198    /// How often this entry was used by an instruction
199    pub ix_usage_counter: AtomicU64,
200    /// Latest slot in which the entry was used
201    pub latest_access_slot: AtomicU64,
202}
203
204/// Global cache statistics for [ProgramCache].
205#[derive(Debug, Default)]
206pub struct ProgramCacheStats {
207    /// a program was already in the cache
208    pub hits: AtomicU64,
209    /// a program was not found and loaded instead
210    pub misses: AtomicU64,
211    /// a compiled executable was unloaded
212    pub evictions: HashMap<Pubkey, u64>,
213    /// an unloaded program was loaded again (opposite of eviction)
214    pub reloads: AtomicU64,
215    /// a program was loaded or un/re/deployed
216    pub insertions: AtomicU64,
217    /// a program was loaded but can not be extracted on its own fork anymore
218    pub lost_insertions: AtomicU64,
219    /// a program which was already in the cache was reloaded by mistake
220    pub replacements: AtomicU64,
221    /// a program was only used once before being unloaded
222    pub one_hit_wonders: AtomicU64,
223    /// a program became unreachable in the fork graph because of rerooting
224    pub prunes_orphan: AtomicU64,
225    /// a program got pruned because it was not recompiled for the next epoch
226    pub prunes_environment: AtomicU64,
227    /// a program had no entries because all slot versions got pruned
228    pub empty_entries: AtomicU64,
229    /// water level of loaded entries currently cached
230    pub water_level: AtomicU64,
231}
232
233impl ProgramCacheStats {
234    pub fn reset(&mut self) {
235        *self = ProgramCacheStats::default();
236    }
237    pub fn log(&self) {
238        let hits = self.hits.load(Ordering::Relaxed);
239        let misses = self.misses.load(Ordering::Relaxed);
240        let evictions: u64 = self.evictions.values().sum();
241        let reloads = self.reloads.load(Ordering::Relaxed);
242        let insertions = self.insertions.load(Ordering::Relaxed);
243        let lost_insertions = self.lost_insertions.load(Ordering::Relaxed);
244        let replacements = self.replacements.load(Ordering::Relaxed);
245        let one_hit_wonders = self.one_hit_wonders.load(Ordering::Relaxed);
246        let prunes_orphan = self.prunes_orphan.load(Ordering::Relaxed);
247        let prunes_environment = self.prunes_environment.load(Ordering::Relaxed);
248        let empty_entries = self.empty_entries.load(Ordering::Relaxed);
249        let water_level = self.water_level.load(Ordering::Relaxed);
250        debug!(
251            "Loaded Programs Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Reloads: {}, Insertions: {}, Lost-Insertions: {}, Replacements: {}, One-Hit-Wonders: {}, Prunes-Orphan: {}, Prunes-Environment: {}, Empty: {}, Water-Level: {}",
252            hits, misses, evictions, reloads, insertions, lost_insertions, replacements, one_hit_wonders, prunes_orphan, prunes_environment, empty_entries, water_level
253        );
254        if log_enabled!(log::Level::Trace) && !self.evictions.is_empty() {
255            let mut evictions = self.evictions.iter().collect::<Vec<_>>();
256            evictions.sort_by_key(|e| e.1);
257            let evictions = evictions
258                .into_iter()
259                .rev()
260                .map(|(program_id, evictions)| {
261                    format!("  {:<44}  {}", program_id.to_string(), evictions)
262                })
263                .collect::<Vec<_>>();
264            let evictions = evictions.join("\n");
265            trace!(
266                "Eviction Details:\n  {:<44}  {}\n{}",
267                "Program",
268                "Count",
269                evictions
270            );
271        }
272    }
273}
274
275/// Time measurements for loading a single [ProgramCacheEntry].
276#[derive(Debug, Default)]
277pub struct LoadProgramMetrics {
278    /// Program address, but as text
279    pub program_id: String,
280    /// Microseconds it took to `create_program_runtime_environment`
281    pub register_syscalls_us: u64,
282    /// Microseconds it took to `Executable::<InvokeContext>::load`
283    pub load_elf_us: u64,
284    /// Microseconds it took to `executable.verify::<RequisiteVerifier>`
285    pub verify_code_us: u64,
286    /// Microseconds it took to `executable.jit_compile`
287    pub jit_compile_us: u64,
288}
289
290impl LoadProgramMetrics {
291    pub fn submit_datapoint(&self, timings: &mut ExecuteDetailsTimings) {
292        saturating_add_assign!(
293            timings.create_executor_register_syscalls_us,
294            self.register_syscalls_us
295        );
296        saturating_add_assign!(timings.create_executor_load_elf_us, self.load_elf_us);
297        saturating_add_assign!(timings.create_executor_verify_code_us, self.verify_code_us);
298        saturating_add_assign!(timings.create_executor_jit_compile_us, self.jit_compile_us);
299        datapoint_trace!(
300            "create_executor_trace",
301            ("program_id", self.program_id, String),
302            ("register_syscalls_us", self.register_syscalls_us, i64),
303            ("load_elf_us", self.load_elf_us, i64),
304            ("verify_code_us", self.verify_code_us, i64),
305            ("jit_compile_us", self.jit_compile_us, i64),
306        );
307    }
308}
309
310impl PartialEq for ProgramCacheEntry {
311    fn eq(&self, other: &Self) -> bool {
312        self.effective_slot == other.effective_slot
313            && self.deployment_slot == other.deployment_slot
314            && self.is_tombstone() == other.is_tombstone()
315    }
316}
317
318impl ProgramCacheEntry {
319    /// Creates a new user program
320    pub fn new(
321        loader_key: &Pubkey,
322        program_runtime_environment: ProgramRuntimeEnvironment,
323        deployment_slot: Slot,
324        effective_slot: Slot,
325        elf_bytes: &[u8],
326        account_size: usize,
327        metrics: &mut LoadProgramMetrics,
328    ) -> Result<Self, Box<dyn std::error::Error>> {
329        Self::new_internal(
330            loader_key,
331            program_runtime_environment,
332            deployment_slot,
333            effective_slot,
334            elf_bytes,
335            account_size,
336            metrics,
337            false, /* reloading */
338        )
339    }
340
341    /// Reloads a user program, *without* running the verifier.
342    ///
343    /// # Safety
344    ///
345    /// This method is unsafe since it assumes that the program has already been verified. Should
346    /// only be called when the program was previously verified and loaded in the cache, but was
347    /// unloaded due to inactivity. It should also be checked that the `program_runtime_environment`
348    /// hasn't changed since it was unloaded.
349    pub unsafe fn reload(
350        loader_key: &Pubkey,
351        program_runtime_environment: Arc<BuiltinProgram<InvokeContext<'static>>>,
352        deployment_slot: Slot,
353        effective_slot: Slot,
354        elf_bytes: &[u8],
355        account_size: usize,
356        metrics: &mut LoadProgramMetrics,
357    ) -> Result<Self, Box<dyn std::error::Error>> {
358        Self::new_internal(
359            loader_key,
360            program_runtime_environment,
361            deployment_slot,
362            effective_slot,
363            elf_bytes,
364            account_size,
365            metrics,
366            true, /* reloading */
367        )
368    }
369
370    fn new_internal(
371        loader_key: &Pubkey,
372        program_runtime_environment: Arc<BuiltinProgram<InvokeContext<'static>>>,
373        deployment_slot: Slot,
374        effective_slot: Slot,
375        elf_bytes: &[u8],
376        account_size: usize,
377        metrics: &mut LoadProgramMetrics,
378        reloading: bool,
379    ) -> Result<Self, Box<dyn std::error::Error>> {
380        let load_elf_time = Measure::start("load_elf_time");
381        // The following unused_mut exception is needed for architectures that do not
382        // support JIT compilation.
383        #[allow(unused_mut)]
384        let mut executable = Executable::load(elf_bytes, program_runtime_environment.clone())?;
385        metrics.load_elf_us = load_elf_time.end_as_us();
386
387        if !reloading {
388            let verify_code_time = Measure::start("verify_code_time");
389            executable.verify::<RequisiteVerifier>()?;
390            metrics.verify_code_us = verify_code_time.end_as_us();
391        }
392
393        #[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))]
394        {
395            let jit_compile_time = Measure::start("jit_compile_time");
396            executable.jit_compile()?;
397            metrics.jit_compile_us = jit_compile_time.end_as_us();
398        }
399
400        Ok(Self {
401            deployment_slot,
402            account_owner: ProgramCacheEntryOwner::try_from(loader_key).unwrap(),
403            account_size,
404            effective_slot,
405            tx_usage_counter: AtomicU64::new(0),
406            program: ProgramCacheEntryType::Loaded(executable),
407            ix_usage_counter: AtomicU64::new(0),
408            latest_access_slot: AtomicU64::new(0),
409        })
410    }
411
412    pub fn to_unloaded(&self) -> Option<Self> {
413        match &self.program {
414            ProgramCacheEntryType::Loaded(_) => {}
415            ProgramCacheEntryType::FailedVerification(_)
416            | ProgramCacheEntryType::Closed
417            | ProgramCacheEntryType::DelayVisibility
418            | ProgramCacheEntryType::Unloaded(_)
419            | ProgramCacheEntryType::Builtin(_) => {
420                return None;
421            }
422        }
423        Some(Self {
424            program: ProgramCacheEntryType::Unloaded(self.program.get_environment()?.clone()),
425            account_owner: self.account_owner,
426            account_size: self.account_size,
427            deployment_slot: self.deployment_slot,
428            effective_slot: self.effective_slot,
429            tx_usage_counter: AtomicU64::new(self.tx_usage_counter.load(Ordering::Relaxed)),
430            ix_usage_counter: AtomicU64::new(self.ix_usage_counter.load(Ordering::Relaxed)),
431            latest_access_slot: AtomicU64::new(self.latest_access_slot.load(Ordering::Relaxed)),
432        })
433    }
434
435    /// Creates a new built-in program
436    pub fn new_builtin(
437        deployment_slot: Slot,
438        account_size: usize,
439        builtin_function: BuiltinFunctionWithContext,
440    ) -> Self {
441        let mut function_registry = FunctionRegistry::default();
442        function_registry
443            .register_function_hashed(*b"entrypoint", builtin_function)
444            .unwrap();
445        Self {
446            deployment_slot,
447            account_owner: ProgramCacheEntryOwner::NativeLoader,
448            account_size,
449            effective_slot: deployment_slot,
450            tx_usage_counter: AtomicU64::new(0),
451            program: ProgramCacheEntryType::Builtin(BuiltinProgram::new_builtin(function_registry)),
452            ix_usage_counter: AtomicU64::new(0),
453            latest_access_slot: AtomicU64::new(0),
454        }
455    }
456
457    pub fn new_tombstone(
458        slot: Slot,
459        account_owner: ProgramCacheEntryOwner,
460        reason: ProgramCacheEntryType,
461    ) -> Self {
462        let tombstone = Self {
463            program: reason,
464            account_owner,
465            account_size: 0,
466            deployment_slot: slot,
467            effective_slot: slot,
468            tx_usage_counter: AtomicU64::default(),
469            ix_usage_counter: AtomicU64::default(),
470            latest_access_slot: AtomicU64::new(0),
471        };
472        debug_assert!(tombstone.is_tombstone());
473        tombstone
474    }
475
476    pub fn is_tombstone(&self) -> bool {
477        matches!(
478            self.program,
479            ProgramCacheEntryType::FailedVerification(_)
480                | ProgramCacheEntryType::Closed
481                | ProgramCacheEntryType::DelayVisibility
482        )
483    }
484
485    fn is_implicit_delay_visibility_tombstone(&self, slot: Slot) -> bool {
486        !matches!(self.program, ProgramCacheEntryType::Builtin(_))
487            && self.effective_slot.saturating_sub(self.deployment_slot)
488                == DELAY_VISIBILITY_SLOT_OFFSET
489            && slot >= self.deployment_slot
490            && slot < self.effective_slot
491    }
492
493    pub fn update_access_slot(&self, slot: Slot) {
494        let _ = self.latest_access_slot.fetch_max(slot, Ordering::Relaxed);
495    }
496
497    pub fn decayed_usage_counter(&self, now: Slot) -> u64 {
498        let last_access = self.latest_access_slot.load(Ordering::Relaxed);
499        // Shifting the u64 value for more than 63 will cause an overflow.
500        let decaying_for = std::cmp::min(63, now.saturating_sub(last_access));
501        self.tx_usage_counter.load(Ordering::Relaxed) >> decaying_for
502    }
503
504    pub fn account_owner(&self) -> Pubkey {
505        self.account_owner.into()
506    }
507}
508
509/// Globally shared RBPF config and syscall registry
510///
511/// This is only valid in an epoch range as long as no feature affecting RBPF is activated.
512#[derive(Clone, Debug)]
513pub struct ProgramRuntimeEnvironments {
514    /// For program runtime V1
515    pub program_runtime_v1: ProgramRuntimeEnvironment,
516    /// For program runtime V2
517    pub program_runtime_v2: ProgramRuntimeEnvironment,
518}
519
520impl Default for ProgramRuntimeEnvironments {
521    fn default() -> Self {
522        let empty_loader = Arc::new(BuiltinProgram::new_loader(
523            Config::default(),
524            FunctionRegistry::default(),
525        ));
526        Self {
527            program_runtime_v1: empty_loader.clone(),
528            program_runtime_v2: empty_loader,
529        }
530    }
531}
532
533#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
534pub struct LoadingTaskCookie(u64);
535
536impl LoadingTaskCookie {
537    fn new() -> Self {
538        Self(0)
539    }
540
541    fn update(&mut self) {
542        let LoadingTaskCookie(cookie) = self;
543        *cookie = cookie.wrapping_add(1);
544    }
545}
546
547/// Suspends the thread in case no cooprative loading task was assigned
548#[derive(Debug, Default)]
549pub struct LoadingTaskWaiter {
550    cookie: Mutex<LoadingTaskCookie>,
551    cond: Condvar,
552}
553
554impl LoadingTaskWaiter {
555    pub fn new() -> Self {
556        Self {
557            cookie: Mutex::new(LoadingTaskCookie::new()),
558            cond: Condvar::new(),
559        }
560    }
561
562    pub fn cookie(&self) -> LoadingTaskCookie {
563        *self.cookie.lock().unwrap()
564    }
565
566    pub fn notify(&self) {
567        let mut cookie = self.cookie.lock().unwrap();
568        cookie.update();
569        self.cond.notify_all();
570    }
571
572    pub fn wait(&self, cookie: LoadingTaskCookie) -> LoadingTaskCookie {
573        let cookie_guard = self.cookie.lock().unwrap();
574        *self
575            .cond
576            .wait_while(cookie_guard, |current_cookie| *current_cookie == cookie)
577            .unwrap()
578    }
579}
580
581#[derive(Debug)]
582enum IndexImplementation {
583    /// Fork-graph aware index implementation
584    V1 {
585        /// A two level index:
586        ///
587        /// - the first level is for the address at which programs are deployed
588        /// - the second level for the slot (and thus also fork), sorted by slot number.
589        entries: HashMap<Pubkey, Vec<Arc<ProgramCacheEntry>>>,
590        /// The entries that are getting loaded and have not yet finished loading.
591        ///
592        /// The key is the program address, the value is a tuple of the slot in which the program is
593        /// being loaded and the thread ID doing the load.
594        ///
595        /// It is possible that multiple TX batches from different slots need different versions of a
596        /// program. The deployment slot of a program is only known after load tho,
597        /// so all loads for a given program key are serialized.
598        loading_entries: Mutex<HashMap<Pubkey, (Slot, thread::ThreadId)>>,
599    },
600}
601
602/// This structure is the global cache of loaded, verified and compiled programs.
603///
604/// It ...
605/// - is validator global and fork graph aware, so it can optimize the commonalities across banks.
606/// - handles the visibility rules of un/re/deployments.
607/// - stores the usage statistics and verification status of each program.
608/// - is elastic and uses a probabilistic eviction stragety based on the usage statistics.
609/// - also keeps the compiled executables around, but only for the most used programs.
610/// - supports various kinds of tombstones to avoid loading programs which can not be loaded.
611/// - cleans up entries on orphan branches when the block store is rerooted.
612/// - supports the cache preparation phase before feature activations which can change cached programs.
613/// - manages the environments of the programs and upcoming environments for the next epoch.
614/// - allows for cooperative loading of TX batches which hit the same missing programs simultaneously.
615/// - enforces that all programs used in a batch are eagerly loaded ahead of execution.
616/// - is not persisted to disk or a snapshot, so it needs to cold start and warm up first.
617pub struct ProgramCache<FG: ForkGraph> {
618    /// Index of the cached entries and cooperative loading tasks
619    index: IndexImplementation,
620    /// The slot of the last rerooting
621    pub latest_root_slot: Slot,
622    /// The epoch of the last rerooting
623    pub latest_root_epoch: Epoch,
624    /// Environments of the current epoch
625    pub environments: ProgramRuntimeEnvironments,
626    /// Anticipated replacement for `environments` at the next epoch
627    ///
628    /// This is `None` during most of an epoch, and only `Some` around the boundaries (at the end and beginning of an epoch).
629    /// More precisely, it starts with the cache preparation phase a few hundred slots before the epoch boundary,
630    /// and it ends with the first rerooting after the epoch boundary.
631    pub upcoming_environments: Option<ProgramRuntimeEnvironments>,
632    /// List of loaded programs which should be recompiled before the next epoch (but don't have to).
633    pub programs_to_recompile: Vec<(Pubkey, Arc<ProgramCacheEntry>)>,
634    /// Statistics counters
635    pub stats: ProgramCacheStats,
636    /// Reference to the block store
637    pub fork_graph: Option<Weak<RwLock<FG>>>,
638    /// Coordinates TX batches waiting for others to complete their task during cooperative loading
639    pub loading_task_waiter: Arc<LoadingTaskWaiter>,
640}
641
642impl<FG: ForkGraph> Debug for ProgramCache<FG> {
643    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
644        f.debug_struct("ProgramCache")
645            .field("root slot", &self.latest_root_slot)
646            .field("root epoch", &self.latest_root_epoch)
647            .field("stats", &self.stats)
648            .field("index", &self.index)
649            .finish()
650    }
651}
652
653/// Local view into [ProgramCache] which was extracted for a specific TX batch.
654///
655/// This isolation enables the global [ProgramCache] to continue to evolve (e.g. evictions),
656/// while the TX batch is guaranteed it will continue to find all the programs it requires.
657/// For program management instructions this also buffers them before they are merged back into the global [ProgramCache].
658#[derive(Clone, Debug, Default)]
659pub struct ProgramCacheForTxBatch {
660    /// Pubkey is the address of a program.
661    /// ProgramCacheEntry is the corresponding program entry valid for the slot in which a transaction is being executed.
662    entries: HashMap<Pubkey, Arc<ProgramCacheEntry>>,
663    /// Program entries modified during the transaction batch.
664    modified_entries: HashMap<Pubkey, Arc<ProgramCacheEntry>>,
665    slot: Slot,
666    pub environments: ProgramRuntimeEnvironments,
667    /// Anticipated replacement for `environments` at the next epoch.
668    ///
669    /// This is `None` during most of an epoch, and only `Some` around the boundaries (at the end and beginning of an epoch).
670    /// More precisely, it starts with the cache preparation phase a few hundred slots before the epoch boundary,
671    /// and it ends with the first rerooting after the epoch boundary.
672    /// Needed when a program is deployed at the last slot of an epoch, becomes effective in the next epoch.
673    /// So needs to be compiled with the environment for the next epoch.
674    pub upcoming_environments: Option<ProgramRuntimeEnvironments>,
675    /// The epoch of the last rerooting
676    pub latest_root_epoch: Epoch,
677    pub hit_max_limit: bool,
678    pub loaded_missing: bool,
679    pub merged_modified: bool,
680}
681
682impl ProgramCacheForTxBatch {
683    pub fn new(
684        slot: Slot,
685        environments: ProgramRuntimeEnvironments,
686        upcoming_environments: Option<ProgramRuntimeEnvironments>,
687        latest_root_epoch: Epoch,
688    ) -> Self {
689        Self {
690            entries: HashMap::new(),
691            modified_entries: HashMap::new(),
692            slot,
693            environments,
694            upcoming_environments,
695            latest_root_epoch,
696            hit_max_limit: false,
697            loaded_missing: false,
698            merged_modified: false,
699        }
700    }
701
702    pub fn new_from_cache<FG: ForkGraph>(
703        slot: Slot,
704        epoch: Epoch,
705        cache: &ProgramCache<FG>,
706    ) -> Self {
707        Self {
708            entries: HashMap::new(),
709            modified_entries: HashMap::new(),
710            slot,
711            environments: cache.get_environments_for_epoch(epoch),
712            upcoming_environments: cache.get_upcoming_environments_for_epoch(epoch),
713            latest_root_epoch: cache.latest_root_epoch,
714            hit_max_limit: false,
715            loaded_missing: false,
716            merged_modified: false,
717        }
718    }
719
720    /// Returns the current environments depending on the given epoch
721    pub fn get_environments_for_epoch(&self, epoch: Epoch) -> &ProgramRuntimeEnvironments {
722        if epoch != self.latest_root_epoch {
723            if let Some(upcoming_environments) = self.upcoming_environments.as_ref() {
724                return upcoming_environments;
725            }
726        }
727        &self.environments
728    }
729
730    /// Refill the cache with a single entry. It's typically called during transaction loading, and
731    /// transaction processing (for program management instructions).
732    /// It replaces the existing entry (if any) with the provided entry. The return value contains
733    /// `true` if an entry existed.
734    /// The function also returns the newly inserted value.
735    pub fn replenish(
736        &mut self,
737        key: Pubkey,
738        entry: Arc<ProgramCacheEntry>,
739    ) -> (bool, Arc<ProgramCacheEntry>) {
740        (self.entries.insert(key, entry.clone()).is_some(), entry)
741    }
742
743    /// Store an entry in `modified_entries` for a program modified during the
744    /// transaction batch.
745    pub fn store_modified_entry(&mut self, key: Pubkey, entry: Arc<ProgramCacheEntry>) {
746        self.modified_entries.insert(key, entry);
747    }
748
749    /// Drain the program cache's modified entries, returning the owned
750    /// collection.
751    pub fn drain_modified_entries(&mut self) -> HashMap<Pubkey, Arc<ProgramCacheEntry>> {
752        std::mem::take(&mut self.modified_entries)
753    }
754
755    pub fn find(&self, key: &Pubkey) -> Option<Arc<ProgramCacheEntry>> {
756        // First lookup the cache of the programs modified by the current
757        // transaction. If not found, lookup the cache of the cache of the
758        // programs that are loaded for the transaction batch.
759        self.modified_entries
760            .get(key)
761            .or(self.entries.get(key))
762            .map(|entry| {
763                if entry.is_implicit_delay_visibility_tombstone(self.slot) {
764                    // Found a program entry on the current fork, but it's not effective
765                    // yet. It indicates that the program has delayed visibility. Return
766                    // the tombstone to reflect that.
767                    Arc::new(ProgramCacheEntry::new_tombstone(
768                        entry.deployment_slot,
769                        entry.account_owner,
770                        ProgramCacheEntryType::DelayVisibility,
771                    ))
772                } else {
773                    entry.clone()
774                }
775            })
776    }
777
778    pub fn slot(&self) -> Slot {
779        self.slot
780    }
781
782    pub fn set_slot_for_tests(&mut self, slot: Slot) {
783        self.slot = slot;
784    }
785
786    pub fn merge(&mut self, modified_entries: &HashMap<Pubkey, Arc<ProgramCacheEntry>>) {
787        modified_entries.iter().for_each(|(key, entry)| {
788            self.merged_modified = true;
789            self.replenish(*key, entry.clone());
790        })
791    }
792
793    pub fn is_empty(&self) -> bool {
794        self.entries.is_empty()
795    }
796}
797
798pub enum ProgramCacheMatchCriteria {
799    DeployedOnOrAfterSlot(Slot),
800    Tombstone,
801    NoCriteria,
802}
803
804impl<FG: ForkGraph> ProgramCache<FG> {
805    pub fn new(root_slot: Slot, root_epoch: Epoch) -> Self {
806        Self {
807            index: IndexImplementation::V1 {
808                entries: HashMap::new(),
809                loading_entries: Mutex::new(HashMap::new()),
810            },
811            latest_root_slot: root_slot,
812            latest_root_epoch: root_epoch,
813            environments: ProgramRuntimeEnvironments::default(),
814            upcoming_environments: None,
815            programs_to_recompile: Vec::default(),
816            stats: ProgramCacheStats::default(),
817            fork_graph: None,
818            loading_task_waiter: Arc::new(LoadingTaskWaiter::default()),
819        }
820    }
821
822    pub fn set_fork_graph(&mut self, fork_graph: Weak<RwLock<FG>>) {
823        self.fork_graph = Some(fork_graph);
824    }
825
826    /// Returns the current environments depending on the given epoch
827    pub fn get_environments_for_epoch(&self, epoch: Epoch) -> ProgramRuntimeEnvironments {
828        if epoch != self.latest_root_epoch {
829            if let Some(upcoming_environments) = self.upcoming_environments.as_ref() {
830                return upcoming_environments.clone();
831            }
832        }
833        self.environments.clone()
834    }
835
836    /// Returns the upcoming environments depending on the given epoch
837    pub fn get_upcoming_environments_for_epoch(
838        &self,
839        epoch: Epoch,
840    ) -> Option<ProgramRuntimeEnvironments> {
841        if epoch == self.latest_root_epoch {
842            return self.upcoming_environments.clone();
843        }
844        None
845    }
846
847    /// Insert a single entry. It's typically called during transaction loading,
848    /// when the cache doesn't contain the entry corresponding to program `key`.
849    pub fn assign_program(&mut self, key: Pubkey, entry: Arc<ProgramCacheEntry>) -> bool {
850        debug_assert!(!matches!(
851            &entry.program,
852            ProgramCacheEntryType::DelayVisibility
853        ));
854        // This function always returns `true` during normal operation.
855        // Only during the cache preparation phase this can return `false`
856        // for entries with `upcoming_environments`.
857        fn is_current_env(
858            environments: &ProgramRuntimeEnvironments,
859            env_opt: Option<&ProgramRuntimeEnvironment>,
860        ) -> bool {
861            env_opt
862                .map(|env| {
863                    Arc::ptr_eq(env, &environments.program_runtime_v1)
864                        || Arc::ptr_eq(env, &environments.program_runtime_v2)
865                })
866                .unwrap_or(true)
867        }
868        match &mut self.index {
869            IndexImplementation::V1 { entries, .. } => {
870                let slot_versions = &mut entries.entry(key).or_default();
871                match slot_versions.binary_search_by(|at| {
872                    at.effective_slot
873                        .cmp(&entry.effective_slot)
874                        .then(at.deployment_slot.cmp(&entry.deployment_slot))
875                        .then(
876                            // This `.then()` has no effect during normal operation.
877                            // Only during the cache preparation phase this does allow entries
878                            // which only differ in their environment to be interleaved in `slot_versions`.
879                            is_current_env(&self.environments, at.program.get_environment()).cmp(
880                                &is_current_env(
881                                    &self.environments,
882                                    entry.program.get_environment(),
883                                ),
884                            ),
885                        )
886                }) {
887                    Ok(index) => {
888                        let existing = slot_versions.get_mut(index).unwrap();
889                        match (&existing.program, &entry.program) {
890                            (
891                                ProgramCacheEntryType::Builtin(_),
892                                ProgramCacheEntryType::Builtin(_),
893                            )
894                            | (
895                                ProgramCacheEntryType::Unloaded(_),
896                                ProgramCacheEntryType::Loaded(_),
897                            ) => {}
898                            _ => {
899                                // Something is wrong, I can feel it ...
900                                error!("ProgramCache::assign_program() failed key={:?} existing={:?} entry={:?}", key, slot_versions, entry);
901                                debug_assert!(false, "Unexpected replacement of an entry");
902                                self.stats.replacements.fetch_add(1, Ordering::Relaxed);
903                                return true;
904                            }
905                        }
906                        // Copy over the usage counter to the new entry
907                        entry.tx_usage_counter.fetch_add(
908                            existing.tx_usage_counter.load(Ordering::Relaxed),
909                            Ordering::Relaxed,
910                        );
911                        entry.ix_usage_counter.fetch_add(
912                            existing.ix_usage_counter.load(Ordering::Relaxed),
913                            Ordering::Relaxed,
914                        );
915                        *existing = Arc::clone(&entry);
916                        self.stats.reloads.fetch_add(1, Ordering::Relaxed);
917                    }
918                    Err(index) => {
919                        self.stats.insertions.fetch_add(1, Ordering::Relaxed);
920                        slot_versions.insert(index, Arc::clone(&entry));
921                    }
922                }
923            }
924        }
925        false
926    }
927
928    pub fn prune_by_deployment_slot(&mut self, slot: Slot) {
929        match &mut self.index {
930            IndexImplementation::V1 { entries, .. } => {
931                for second_level in entries.values_mut() {
932                    second_level.retain(|entry| entry.deployment_slot != slot);
933                }
934                self.remove_programs_with_no_entries();
935            }
936        }
937    }
938
939    /// Before rerooting the blockstore this removes all superfluous entries
940    pub fn prune(&mut self, new_root_slot: Slot, new_root_epoch: Epoch) {
941        let Some(fork_graph) = self.fork_graph.clone() else {
942            error!("Program cache doesn't have fork graph.");
943            return;
944        };
945        let fork_graph = fork_graph.upgrade().unwrap();
946        let Ok(fork_graph) = fork_graph.read() else {
947            error!("Failed to lock fork graph for reading.");
948            return;
949        };
950        let mut preparation_phase_ends = false;
951        if self.latest_root_epoch != new_root_epoch {
952            self.latest_root_epoch = new_root_epoch;
953            if let Some(upcoming_environments) = self.upcoming_environments.take() {
954                preparation_phase_ends = true;
955                self.environments = upcoming_environments;
956                self.programs_to_recompile.clear();
957            }
958        }
959        match &mut self.index {
960            IndexImplementation::V1 { entries, .. } => {
961                for second_level in entries.values_mut() {
962                    // Remove entries un/re/deployed on orphan forks
963                    let mut first_ancestor_found = false;
964                    let mut first_ancestor_env = None;
965                    *second_level = second_level
966                        .iter()
967                        .rev()
968                        .filter(|entry| {
969                            let relation =
970                                fork_graph.relationship(entry.deployment_slot, new_root_slot);
971                            if entry.deployment_slot >= new_root_slot {
972                                matches!(relation, BlockRelation::Equal | BlockRelation::Descendant)
973                            } else if matches!(relation, BlockRelation::Ancestor)
974                                || entry.deployment_slot <= self.latest_root_slot
975                            {
976                                if !first_ancestor_found {
977                                    first_ancestor_found = true;
978                                    first_ancestor_env = entry.program.get_environment();
979                                    return true;
980                                }
981                                // Do not prune the entry if the runtime environment of the entry is different
982                                // than the entry that was previously found (stored in first_ancestor_env).
983                                // Different environment indicates that this entry might belong to an older
984                                // epoch that had a different environment (e.g. different feature set).
985                                // Once the root moves to the new/current epoch, the entry will get pruned.
986                                // But, until then the entry might still be getting used by an older slot.
987                                if let Some(entry_env) = entry.program.get_environment() {
988                                    if let Some(env) = first_ancestor_env {
989                                        if !Arc::ptr_eq(entry_env, env) {
990                                            return true;
991                                        }
992                                    }
993                                }
994                                self.stats.prunes_orphan.fetch_add(1, Ordering::Relaxed);
995                                false
996                            } else {
997                                self.stats.prunes_orphan.fetch_add(1, Ordering::Relaxed);
998                                false
999                            }
1000                        })
1001                        .filter(|entry| {
1002                            // Remove outdated environment of previous feature set
1003                            if preparation_phase_ends
1004                                && !Self::matches_environment(entry, &self.environments)
1005                            {
1006                                self.stats
1007                                    .prunes_environment
1008                                    .fetch_add(1, Ordering::Relaxed);
1009                                return false;
1010                            }
1011                            true
1012                        })
1013                        .cloned()
1014                        .collect();
1015                    second_level.reverse();
1016                }
1017            }
1018        }
1019        self.remove_programs_with_no_entries();
1020        debug_assert!(self.latest_root_slot <= new_root_slot);
1021        self.latest_root_slot = new_root_slot;
1022    }
1023
1024    fn matches_environment(
1025        entry: &Arc<ProgramCacheEntry>,
1026        environments: &ProgramRuntimeEnvironments,
1027    ) -> bool {
1028        let Some(environment) = entry.program.get_environment() else {
1029            return true;
1030        };
1031        Arc::ptr_eq(environment, &environments.program_runtime_v1)
1032            || Arc::ptr_eq(environment, &environments.program_runtime_v2)
1033    }
1034
1035    fn matches_criteria(
1036        program: &Arc<ProgramCacheEntry>,
1037        criteria: &ProgramCacheMatchCriteria,
1038    ) -> bool {
1039        match criteria {
1040            ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(slot) => {
1041                program.deployment_slot >= *slot
1042            }
1043            ProgramCacheMatchCriteria::Tombstone => program.is_tombstone(),
1044            ProgramCacheMatchCriteria::NoCriteria => true,
1045        }
1046    }
1047
1048    /// Extracts a subset of the programs relevant to a transaction batch
1049    /// and returns which program accounts the accounts DB needs to load.
1050    pub fn extract(
1051        &self,
1052        search_for: &mut Vec<(Pubkey, (ProgramCacheMatchCriteria, u64))>,
1053        loaded_programs_for_tx_batch: &mut ProgramCacheForTxBatch,
1054        is_first_round: bool,
1055    ) -> Option<(Pubkey, u64)> {
1056        debug_assert!(self.fork_graph.is_some());
1057        let fork_graph = self.fork_graph.as_ref().unwrap().upgrade().unwrap();
1058        let locked_fork_graph = fork_graph.read().unwrap();
1059        let mut cooperative_loading_task = None;
1060        match &self.index {
1061            IndexImplementation::V1 {
1062                entries,
1063                loading_entries,
1064            } => {
1065                search_for.retain(|(key, (match_criteria, usage_count))| {
1066                    if let Some(second_level) = entries.get(key) {
1067                        for entry in second_level.iter().rev() {
1068                            if entry.deployment_slot <= self.latest_root_slot
1069                                || matches!(
1070                                    locked_fork_graph.relationship(
1071                                        entry.deployment_slot,
1072                                        loaded_programs_for_tx_batch.slot
1073                                    ),
1074                                    BlockRelation::Equal | BlockRelation::Ancestor
1075                                )
1076                            {
1077                                let entry_to_return = if loaded_programs_for_tx_batch.slot
1078                                    >= entry.effective_slot
1079                                    && Self::matches_environment(
1080                                        entry,
1081                                        &loaded_programs_for_tx_batch.environments,
1082                                    ) {
1083                                    if !Self::matches_criteria(entry, match_criteria) {
1084                                        break;
1085                                    }
1086                                    if let ProgramCacheEntryType::Unloaded(_environment) =
1087                                        &entry.program
1088                                    {
1089                                        break;
1090                                    }
1091                                    entry.clone()
1092                                } else if entry.is_implicit_delay_visibility_tombstone(
1093                                    loaded_programs_for_tx_batch.slot,
1094                                ) {
1095                                    // Found a program entry on the current fork, but it's not effective
1096                                    // yet. It indicates that the program has delayed visibility. Return
1097                                    // the tombstone to reflect that.
1098                                    Arc::new(ProgramCacheEntry::new_tombstone(
1099                                        entry.deployment_slot,
1100                                        entry.account_owner,
1101                                        ProgramCacheEntryType::DelayVisibility,
1102                                    ))
1103                                } else {
1104                                    continue;
1105                                };
1106                                entry_to_return
1107                                    .update_access_slot(loaded_programs_for_tx_batch.slot);
1108                                entry_to_return
1109                                    .tx_usage_counter
1110                                    .fetch_add(*usage_count, Ordering::Relaxed);
1111                                loaded_programs_for_tx_batch
1112                                    .entries
1113                                    .insert(*key, entry_to_return);
1114                                return false;
1115                            }
1116                        }
1117                    }
1118                    if cooperative_loading_task.is_none() {
1119                        let mut loading_entries = loading_entries.lock().unwrap();
1120                        let entry = loading_entries.entry(*key);
1121                        if let Entry::Vacant(entry) = entry {
1122                            entry.insert((
1123                                loaded_programs_for_tx_batch.slot,
1124                                thread::current().id(),
1125                            ));
1126                            cooperative_loading_task = Some((*key, *usage_count));
1127                        }
1128                    }
1129                    true
1130                });
1131            }
1132        }
1133        drop(locked_fork_graph);
1134        if is_first_round {
1135            self.stats
1136                .misses
1137                .fetch_add(search_for.len() as u64, Ordering::Relaxed);
1138            self.stats.hits.fetch_add(
1139                loaded_programs_for_tx_batch.entries.len() as u64,
1140                Ordering::Relaxed,
1141            );
1142        }
1143        cooperative_loading_task
1144    }
1145
1146    /// Called by Bank::replenish_program_cache() for each program that is done loading.
1147    pub fn finish_cooperative_loading_task(
1148        &mut self,
1149        slot: Slot,
1150        key: Pubkey,
1151        loaded_program: Arc<ProgramCacheEntry>,
1152    ) -> bool {
1153        match &mut self.index {
1154            IndexImplementation::V1 {
1155                loading_entries, ..
1156            } => {
1157                let loading_thread = loading_entries.get_mut().unwrap().remove(&key);
1158                debug_assert_eq!(loading_thread, Some((slot, thread::current().id())));
1159                // Check that it will be visible to our own fork once inserted
1160                if loaded_program.deployment_slot > self.latest_root_slot
1161                    && !matches!(
1162                        self.fork_graph
1163                            .as_ref()
1164                            .unwrap()
1165                            .upgrade()
1166                            .unwrap()
1167                            .read()
1168                            .unwrap()
1169                            .relationship(loaded_program.deployment_slot, slot),
1170                        BlockRelation::Equal | BlockRelation::Ancestor
1171                    )
1172                {
1173                    self.stats.lost_insertions.fetch_add(1, Ordering::Relaxed);
1174                }
1175                let was_occupied = self.assign_program(key, loaded_program);
1176                self.loading_task_waiter.notify();
1177                was_occupied
1178            }
1179        }
1180    }
1181
1182    pub fn merge(&mut self, modified_entries: &HashMap<Pubkey, Arc<ProgramCacheEntry>>) {
1183        modified_entries.iter().for_each(|(key, entry)| {
1184            self.assign_program(*key, entry.clone());
1185        })
1186    }
1187
1188    /// Returns the list of entries which are verified and compiled.
1189    pub fn get_flattened_entries(
1190        &self,
1191        include_program_runtime_v1: bool,
1192        _include_program_runtime_v2: bool,
1193    ) -> Vec<(Pubkey, Arc<ProgramCacheEntry>)> {
1194        match &self.index {
1195            IndexImplementation::V1 { entries, .. } => entries
1196                .iter()
1197                .flat_map(|(id, second_level)| {
1198                    second_level
1199                        .iter()
1200                        .filter_map(move |program| match program.program {
1201                            ProgramCacheEntryType::Loaded(_) => {
1202                                if include_program_runtime_v1 {
1203                                    Some((*id, program.clone()))
1204                                } else {
1205                                    None
1206                                }
1207                            }
1208                            _ => None,
1209                        })
1210                })
1211                .collect(),
1212        }
1213    }
1214
1215    /// Returns the list of all entries in the cache.
1216    pub fn get_flattened_entries_for_tests(&self) -> Vec<(Pubkey, Arc<ProgramCacheEntry>)> {
1217        match &self.index {
1218            IndexImplementation::V1 { entries, .. } => entries
1219                .iter()
1220                .flat_map(|(id, second_level)| {
1221                    second_level.iter().map(|program| (*id, program.clone()))
1222                })
1223                .collect(),
1224        }
1225    }
1226
1227    /// Returns the slot versions for the given program id.
1228    pub fn get_slot_versions_for_tests(&self, key: &Pubkey) -> &[Arc<ProgramCacheEntry>] {
1229        match &self.index {
1230            IndexImplementation::V1 { entries, .. } => entries
1231                .get(key)
1232                .map(|second_level| second_level.as_ref())
1233                .unwrap_or(&[]),
1234        }
1235    }
1236
1237    /// Unloads programs which were used infrequently
1238    pub fn sort_and_unload(&mut self, shrink_to: PercentageInteger) {
1239        let mut sorted_candidates = self.get_flattened_entries(true, true);
1240        sorted_candidates
1241            .sort_by_cached_key(|(_id, program)| program.tx_usage_counter.load(Ordering::Relaxed));
1242        let num_to_unload = sorted_candidates
1243            .len()
1244            .saturating_sub(shrink_to.apply_to(MAX_LOADED_ENTRY_COUNT));
1245        self.unload_program_entries(sorted_candidates.iter().take(num_to_unload));
1246    }
1247
1248    /// Evicts programs using 2's random selection, choosing the least used program out of the two entries.
1249    /// The eviction is performed enough number of times to reduce the cache usage to the given percentage.
1250    pub fn evict_using_2s_random_selection(&mut self, shrink_to: PercentageInteger, now: Slot) {
1251        let mut candidates = self.get_flattened_entries(true, true);
1252        self.stats
1253            .water_level
1254            .store(candidates.len() as u64, Ordering::Relaxed);
1255        let num_to_unload = candidates
1256            .len()
1257            .saturating_sub(shrink_to.apply_to(MAX_LOADED_ENTRY_COUNT));
1258        fn random_index_and_usage_counter(
1259            candidates: &[(Pubkey, Arc<ProgramCacheEntry>)],
1260            now: Slot,
1261        ) -> (usize, u64) {
1262            let mut rng = thread_rng();
1263            let index = rng.gen_range(0..candidates.len());
1264            let usage_counter = candidates
1265                .get(index)
1266                .expect("Failed to get cached entry")
1267                .1
1268                .decayed_usage_counter(now);
1269            (index, usage_counter)
1270        }
1271
1272        for _ in 0..num_to_unload {
1273            let (index1, usage_counter1) = random_index_and_usage_counter(&candidates, now);
1274            let (index2, usage_counter2) = random_index_and_usage_counter(&candidates, now);
1275
1276            let (program, entry) = if usage_counter1 < usage_counter2 {
1277                candidates.swap_remove(index1)
1278            } else {
1279                candidates.swap_remove(index2)
1280            };
1281            self.unload_program_entry(&program, &entry);
1282        }
1283    }
1284
1285    /// Removes all the entries at the given keys, if they exist
1286    pub fn remove_programs(&mut self, keys: impl Iterator<Item = Pubkey>) {
1287        match &mut self.index {
1288            IndexImplementation::V1 { entries, .. } => {
1289                for k in keys {
1290                    entries.remove(&k);
1291                }
1292            }
1293        }
1294    }
1295
1296    /// This function removes the given entry for the given program from the cache.
1297    /// The function expects that the program and entry exists in the cache. Otherwise it'll panic.
1298    fn unload_program_entry(&mut self, program: &Pubkey, remove_entry: &Arc<ProgramCacheEntry>) {
1299        match &mut self.index {
1300            IndexImplementation::V1 { entries, .. } => {
1301                let second_level = entries.get_mut(program).expect("Cache lookup failed");
1302                let candidate = second_level
1303                    .iter_mut()
1304                    .find(|entry| entry == &remove_entry)
1305                    .expect("Program entry not found");
1306
1307                // Certain entry types cannot be unloaded, such as tombstones, or already unloaded entries.
1308                // For such entries, `to_unloaded()` will return None.
1309                // These entry types do not occupy much memory.
1310                if let Some(unloaded) = candidate.to_unloaded() {
1311                    if candidate.tx_usage_counter.load(Ordering::Relaxed) == 1 {
1312                        self.stats.one_hit_wonders.fetch_add(1, Ordering::Relaxed);
1313                    }
1314                    self.stats
1315                        .evictions
1316                        .entry(*program)
1317                        .and_modify(|c| saturating_add_assign!(*c, 1))
1318                        .or_insert(1);
1319                    *candidate = Arc::new(unloaded);
1320                }
1321            }
1322        }
1323    }
1324
1325    fn unload_program_entries<'a>(
1326        &mut self,
1327        remove: impl Iterator<Item = &'a (Pubkey, Arc<ProgramCacheEntry>)>,
1328    ) {
1329        for (program, entry) in remove {
1330            self.unload_program_entry(program, entry);
1331        }
1332    }
1333
1334    fn remove_programs_with_no_entries(&mut self) {
1335        match &mut self.index {
1336            IndexImplementation::V1 { entries, .. } => {
1337                let num_programs_before_removal = entries.len();
1338                entries.retain(|_key, second_level| !second_level.is_empty());
1339                if entries.len() < num_programs_before_removal {
1340                    self.stats.empty_entries.fetch_add(
1341                        num_programs_before_removal.saturating_sub(entries.len()) as u64,
1342                        Ordering::Relaxed,
1343                    );
1344                }
1345            }
1346        }
1347    }
1348}
1349
1350#[cfg(feature = "frozen-abi")]
1351impl solana_frozen_abi::abi_example::AbiExample for ProgramCacheEntry {
1352    fn example() -> Self {
1353        // ProgramCacheEntry isn't serializable by definition.
1354        Self::default()
1355    }
1356}
1357
1358#[cfg(feature = "frozen-abi")]
1359impl<FG: ForkGraph> solana_frozen_abi::abi_example::AbiExample for ProgramCache<FG> {
1360    fn example() -> Self {
1361        // ProgramCache isn't serializable by definition.
1362        Self::new(Slot::default(), Epoch::default())
1363    }
1364}
1365
1366#[cfg(test)]
1367mod tests {
1368    use {
1369        crate::loaded_programs::{
1370            BlockRelation, ForkGraph, ProgramCache, ProgramCacheEntry, ProgramCacheEntryOwner,
1371            ProgramCacheEntryType, ProgramCacheForTxBatch, ProgramCacheMatchCriteria,
1372            ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET,
1373        },
1374        assert_matches::assert_matches,
1375        percentage::Percentage,
1376        solana_rbpf::{elf::Executable, program::BuiltinProgram},
1377        solana_sdk::{clock::Slot, pubkey::Pubkey},
1378        std::{
1379            fs::File,
1380            io::Read,
1381            ops::ControlFlow,
1382            sync::{
1383                atomic::{AtomicU64, Ordering},
1384                Arc, RwLock,
1385            },
1386        },
1387        test_case::{test_case, test_matrix},
1388    };
1389
1390    static MOCK_ENVIRONMENT: std::sync::OnceLock<ProgramRuntimeEnvironment> =
1391        std::sync::OnceLock::<ProgramRuntimeEnvironment>::new();
1392
1393    fn get_mock_env() -> ProgramRuntimeEnvironment {
1394        MOCK_ENVIRONMENT
1395            .get_or_init(|| Arc::new(BuiltinProgram::new_mock()))
1396            .clone()
1397    }
1398
1399    fn new_mock_cache<FG: ForkGraph>() -> ProgramCache<FG> {
1400        let mut cache = ProgramCache::new(0, 0);
1401        cache.environments.program_runtime_v1 = get_mock_env();
1402        cache
1403    }
1404
1405    fn new_test_entry(deployment_slot: Slot, effective_slot: Slot) -> Arc<ProgramCacheEntry> {
1406        new_test_entry_with_usage(deployment_slot, effective_slot, AtomicU64::default())
1407    }
1408
1409    fn new_loaded_entry(env: ProgramRuntimeEnvironment) -> ProgramCacheEntryType {
1410        let mut elf = Vec::new();
1411        File::open("../programs/bpf_loader/test_elfs/out/noop_aligned.so")
1412            .unwrap()
1413            .read_to_end(&mut elf)
1414            .unwrap();
1415        let executable = Executable::load(&elf, env).unwrap();
1416        ProgramCacheEntryType::Loaded(executable)
1417    }
1418
1419    fn new_test_entry_with_usage(
1420        deployment_slot: Slot,
1421        effective_slot: Slot,
1422        usage_counter: AtomicU64,
1423    ) -> Arc<ProgramCacheEntry> {
1424        Arc::new(ProgramCacheEntry {
1425            program: new_loaded_entry(get_mock_env()),
1426            account_owner: ProgramCacheEntryOwner::LoaderV2,
1427            account_size: 0,
1428            deployment_slot,
1429            effective_slot,
1430            tx_usage_counter: usage_counter,
1431            ix_usage_counter: AtomicU64::default(),
1432            latest_access_slot: AtomicU64::new(deployment_slot),
1433        })
1434    }
1435
1436    fn new_test_builtin_entry(
1437        deployment_slot: Slot,
1438        effective_slot: Slot,
1439    ) -> Arc<ProgramCacheEntry> {
1440        Arc::new(ProgramCacheEntry {
1441            program: ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),
1442            account_owner: ProgramCacheEntryOwner::NativeLoader,
1443            account_size: 0,
1444            deployment_slot,
1445            effective_slot,
1446            tx_usage_counter: AtomicU64::default(),
1447            ix_usage_counter: AtomicU64::default(),
1448            latest_access_slot: AtomicU64::default(),
1449        })
1450    }
1451
1452    fn set_tombstone<FG: ForkGraph>(
1453        cache: &mut ProgramCache<FG>,
1454        key: Pubkey,
1455        slot: Slot,
1456        reason: ProgramCacheEntryType,
1457    ) -> Arc<ProgramCacheEntry> {
1458        let program = Arc::new(ProgramCacheEntry::new_tombstone(
1459            slot,
1460            ProgramCacheEntryOwner::LoaderV2,
1461            reason,
1462        ));
1463        cache.assign_program(key, program.clone());
1464        program
1465    }
1466
1467    fn insert_unloaded_entry<FG: ForkGraph>(
1468        cache: &mut ProgramCache<FG>,
1469        key: Pubkey,
1470        slot: Slot,
1471    ) -> Arc<ProgramCacheEntry> {
1472        let loaded = new_test_entry_with_usage(slot, slot.saturating_add(1), AtomicU64::default());
1473        let unloaded = Arc::new(loaded.to_unloaded().expect("Failed to unload the program"));
1474        cache.assign_program(key, unloaded.clone());
1475        unloaded
1476    }
1477
1478    fn num_matching_entries<P, FG>(cache: &ProgramCache<FG>, predicate: P) -> usize
1479    where
1480        P: Fn(&ProgramCacheEntryType) -> bool,
1481        FG: ForkGraph,
1482    {
1483        cache
1484            .get_flattened_entries_for_tests()
1485            .iter()
1486            .filter(|(_key, program)| predicate(&program.program))
1487            .count()
1488    }
1489
1490    #[test]
1491    fn test_usage_counter_decay() {
1492        let _cache = new_mock_cache::<TestForkGraph>();
1493        let program = new_test_entry_with_usage(10, 11, AtomicU64::new(32));
1494        program.update_access_slot(15);
1495        assert_eq!(program.decayed_usage_counter(15), 32);
1496        assert_eq!(program.decayed_usage_counter(16), 16);
1497        assert_eq!(program.decayed_usage_counter(17), 8);
1498        assert_eq!(program.decayed_usage_counter(18), 4);
1499        assert_eq!(program.decayed_usage_counter(19), 2);
1500        assert_eq!(program.decayed_usage_counter(20), 1);
1501        assert_eq!(program.decayed_usage_counter(21), 0);
1502        assert_eq!(program.decayed_usage_counter(15), 32);
1503        assert_eq!(program.decayed_usage_counter(14), 32);
1504
1505        program.update_access_slot(18);
1506        assert_eq!(program.decayed_usage_counter(15), 32);
1507        assert_eq!(program.decayed_usage_counter(16), 32);
1508        assert_eq!(program.decayed_usage_counter(17), 32);
1509        assert_eq!(program.decayed_usage_counter(18), 32);
1510        assert_eq!(program.decayed_usage_counter(19), 16);
1511        assert_eq!(program.decayed_usage_counter(20), 8);
1512        assert_eq!(program.decayed_usage_counter(21), 4);
1513
1514        // Decay for 63 or more slots
1515        assert_eq!(program.decayed_usage_counter(18 + 63), 0);
1516        assert_eq!(program.decayed_usage_counter(100), 0);
1517    }
1518
1519    fn program_deploy_test_helper(
1520        cache: &mut ProgramCache<TestForkGraph>,
1521        program: Pubkey,
1522        deployment_slots: Vec<Slot>,
1523        usage_counters: Vec<u64>,
1524        programs: &mut Vec<(Pubkey, Slot, u64)>,
1525    ) {
1526        // Add multiple entries for program
1527        deployment_slots
1528            .iter()
1529            .enumerate()
1530            .for_each(|(i, deployment_slot)| {
1531                let usage_counter = *usage_counters.get(i).unwrap_or(&0);
1532                cache.assign_program(
1533                    program,
1534                    new_test_entry_with_usage(
1535                        *deployment_slot,
1536                        (*deployment_slot).saturating_add(2),
1537                        AtomicU64::new(usage_counter),
1538                    ),
1539                );
1540                programs.push((program, *deployment_slot, usage_counter));
1541            });
1542
1543        // Add tombstones entries for program
1544        let env = Arc::new(BuiltinProgram::new_mock());
1545        for slot in 21..31 {
1546            set_tombstone(
1547                cache,
1548                program,
1549                slot,
1550                ProgramCacheEntryType::FailedVerification(env.clone()),
1551            );
1552        }
1553
1554        // Add unloaded entries for program
1555        for slot in 31..41 {
1556            insert_unloaded_entry(cache, program, slot);
1557        }
1558    }
1559
1560    #[test]
1561    fn test_random_eviction() {
1562        let mut programs = vec![];
1563
1564        let mut cache = new_mock_cache::<TestForkGraph>();
1565
1566        // This test adds different kind of entries to the cache.
1567        // Tombstones and unloaded entries are expected to not be evicted.
1568        // It also adds multiple entries for three programs as it tries to create a typical cache instance.
1569
1570        // Program 1
1571        program_deploy_test_helper(
1572            &mut cache,
1573            Pubkey::new_unique(),
1574            vec![0, 10, 20],
1575            vec![4, 5, 25],
1576            &mut programs,
1577        );
1578
1579        // Program 2
1580        program_deploy_test_helper(
1581            &mut cache,
1582            Pubkey::new_unique(),
1583            vec![5, 11],
1584            vec![0, 2],
1585            &mut programs,
1586        );
1587
1588        // Program 3
1589        program_deploy_test_helper(
1590            &mut cache,
1591            Pubkey::new_unique(),
1592            vec![0, 5, 15],
1593            vec![100, 3, 20],
1594            &mut programs,
1595        );
1596
1597        // 1 for each deployment slot
1598        let num_loaded_expected = 8;
1599        // 10 for each program
1600        let num_unloaded_expected = 30;
1601        // 10 for each program
1602        let num_tombstones_expected = 30;
1603
1604        // Count the number of loaded, unloaded and tombstone entries.
1605        programs.sort_by_key(|(_id, _slot, usage_count)| *usage_count);
1606        let num_loaded = num_matching_entries(&cache, |program_type| {
1607            matches!(program_type, ProgramCacheEntryType::Loaded(_))
1608        });
1609        let num_unloaded = num_matching_entries(&cache, |program_type| {
1610            matches!(program_type, ProgramCacheEntryType::Unloaded(_))
1611        });
1612        let num_tombstones = num_matching_entries(&cache, |program_type| {
1613            matches!(
1614                program_type,
1615                ProgramCacheEntryType::DelayVisibility
1616                    | ProgramCacheEntryType::FailedVerification(_)
1617                    | ProgramCacheEntryType::Closed
1618            )
1619        });
1620
1621        // Test that the cache is constructed with the expected number of entries.
1622        assert_eq!(num_loaded, num_loaded_expected);
1623        assert_eq!(num_unloaded, num_unloaded_expected);
1624        assert_eq!(num_tombstones, num_tombstones_expected);
1625
1626        // Evict entries from the cache
1627        let eviction_pct = 1;
1628
1629        let num_loaded_expected =
1630            Percentage::from(eviction_pct).apply_to(crate::loaded_programs::MAX_LOADED_ENTRY_COUNT);
1631        let num_unloaded_expected = num_unloaded_expected + num_loaded - num_loaded_expected;
1632        cache.evict_using_2s_random_selection(Percentage::from(eviction_pct), 21);
1633
1634        // Count the number of loaded, unloaded and tombstone entries.
1635        let num_loaded = num_matching_entries(&cache, |program_type| {
1636            matches!(program_type, ProgramCacheEntryType::Loaded(_))
1637        });
1638        let num_unloaded = num_matching_entries(&cache, |program_type| {
1639            matches!(program_type, ProgramCacheEntryType::Unloaded(_))
1640        });
1641        let num_tombstones = num_matching_entries(&cache, |program_type| {
1642            matches!(program_type, ProgramCacheEntryType::FailedVerification(_))
1643        });
1644
1645        // However many entries are left after the shrink
1646        assert_eq!(num_loaded, num_loaded_expected);
1647        // The original unloaded entries + the evicted loaded entries
1648        assert_eq!(num_unloaded, num_unloaded_expected);
1649        // The original tombstones are not evicted
1650        assert_eq!(num_tombstones, num_tombstones_expected);
1651    }
1652
1653    #[test]
1654    fn test_eviction() {
1655        let mut programs = vec![];
1656        let mut cache = new_mock_cache::<TestForkGraph>();
1657
1658        // Program 1
1659        program_deploy_test_helper(
1660            &mut cache,
1661            Pubkey::new_unique(),
1662            vec![0, 10, 20],
1663            vec![4, 5, 25],
1664            &mut programs,
1665        );
1666
1667        // Program 2
1668        program_deploy_test_helper(
1669            &mut cache,
1670            Pubkey::new_unique(),
1671            vec![5, 11],
1672            vec![0, 2],
1673            &mut programs,
1674        );
1675
1676        // Program 3
1677        program_deploy_test_helper(
1678            &mut cache,
1679            Pubkey::new_unique(),
1680            vec![0, 5, 15],
1681            vec![100, 3, 20],
1682            &mut programs,
1683        );
1684
1685        // 1 for each deployment slot
1686        let num_loaded_expected = 8;
1687        // 10 for each program
1688        let num_unloaded_expected = 30;
1689        // 10 for each program
1690        let num_tombstones_expected = 30;
1691
1692        // Count the number of loaded, unloaded and tombstone entries.
1693        programs.sort_by_key(|(_id, _slot, usage_count)| *usage_count);
1694        let num_loaded = num_matching_entries(&cache, |program_type| {
1695            matches!(program_type, ProgramCacheEntryType::Loaded(_))
1696        });
1697        let num_unloaded = num_matching_entries(&cache, |program_type| {
1698            matches!(program_type, ProgramCacheEntryType::Unloaded(_))
1699        });
1700        let num_tombstones = num_matching_entries(&cache, |program_type| {
1701            matches!(program_type, ProgramCacheEntryType::FailedVerification(_))
1702        });
1703
1704        // Test that the cache is constructed with the expected number of entries.
1705        assert_eq!(num_loaded, num_loaded_expected);
1706        assert_eq!(num_unloaded, num_unloaded_expected);
1707        assert_eq!(num_tombstones, num_tombstones_expected);
1708
1709        // Evict entries from the cache
1710        let eviction_pct = 1;
1711
1712        let num_loaded_expected =
1713            Percentage::from(eviction_pct).apply_to(crate::loaded_programs::MAX_LOADED_ENTRY_COUNT);
1714        let num_unloaded_expected = num_unloaded_expected + num_loaded - num_loaded_expected;
1715
1716        cache.sort_and_unload(Percentage::from(eviction_pct));
1717
1718        // Check that every program is still in the cache.
1719        let entries = cache.get_flattened_entries_for_tests();
1720        programs.iter().for_each(|entry| {
1721            assert!(entries.iter().any(|(key, _entry)| key == &entry.0));
1722        });
1723
1724        let unloaded = entries
1725            .iter()
1726            .filter_map(|(key, program)| {
1727                matches!(program.program, ProgramCacheEntryType::Unloaded(_))
1728                    .then_some((*key, program.tx_usage_counter.load(Ordering::Relaxed)))
1729            })
1730            .collect::<Vec<(Pubkey, u64)>>();
1731
1732        for index in 0..3 {
1733            let expected = programs.get(index).expect("Missing program");
1734            assert!(unloaded.contains(&(expected.0, expected.2)));
1735        }
1736
1737        // Count the number of loaded, unloaded and tombstone entries.
1738        let num_loaded = num_matching_entries(&cache, |program_type| {
1739            matches!(program_type, ProgramCacheEntryType::Loaded(_))
1740        });
1741        let num_unloaded = num_matching_entries(&cache, |program_type| {
1742            matches!(program_type, ProgramCacheEntryType::Unloaded(_))
1743        });
1744        let num_tombstones = num_matching_entries(&cache, |program_type| {
1745            matches!(
1746                program_type,
1747                ProgramCacheEntryType::DelayVisibility
1748                    | ProgramCacheEntryType::FailedVerification(_)
1749                    | ProgramCacheEntryType::Closed
1750            )
1751        });
1752
1753        // However many entries are left after the shrink
1754        assert_eq!(num_loaded, num_loaded_expected);
1755        // The original unloaded entries + the evicted loaded entries
1756        assert_eq!(num_unloaded, num_unloaded_expected);
1757        // The original tombstones are not evicted
1758        assert_eq!(num_tombstones, num_tombstones_expected);
1759    }
1760
1761    #[test]
1762    fn test_usage_count_of_unloaded_program() {
1763        let mut cache = new_mock_cache::<TestForkGraph>();
1764
1765        let program = Pubkey::new_unique();
1766        let evict_to_pct = 2;
1767        let cache_capacity_after_shrink =
1768            Percentage::from(evict_to_pct).apply_to(crate::loaded_programs::MAX_LOADED_ENTRY_COUNT);
1769        // Add enough programs to the cache to trigger 1 eviction after shrinking.
1770        let num_total_programs = (cache_capacity_after_shrink + 1) as u64;
1771        (0..num_total_programs).for_each(|i| {
1772            cache.assign_program(
1773                program,
1774                new_test_entry_with_usage(i, i + 2, AtomicU64::new(i + 10)),
1775            );
1776        });
1777
1778        cache.sort_and_unload(Percentage::from(evict_to_pct));
1779
1780        let num_unloaded = num_matching_entries(&cache, |program_type| {
1781            matches!(program_type, ProgramCacheEntryType::Unloaded(_))
1782        });
1783        assert_eq!(num_unloaded, 1);
1784
1785        cache
1786            .get_flattened_entries_for_tests()
1787            .iter()
1788            .for_each(|(_key, program)| {
1789                if matches!(program.program, ProgramCacheEntryType::Unloaded(_)) {
1790                    // Test that the usage counter is retained for the unloaded program
1791                    assert_eq!(program.tx_usage_counter.load(Ordering::Relaxed), 10);
1792                    assert_eq!(program.deployment_slot, 0);
1793                    assert_eq!(program.effective_slot, 2);
1794                }
1795            });
1796
1797        // Replenish the program that was just unloaded. Use 0 as the usage counter. This should be
1798        // updated with the usage counter from the unloaded program.
1799        cache.assign_program(program, new_test_entry_with_usage(0, 2, AtomicU64::new(0)));
1800
1801        cache
1802            .get_flattened_entries_for_tests()
1803            .iter()
1804            .for_each(|(_key, program)| {
1805                if matches!(program.program, ProgramCacheEntryType::Unloaded(_))
1806                    && program.deployment_slot == 0
1807                    && program.effective_slot == 2
1808                {
1809                    // Test that the usage counter was correctly updated.
1810                    assert_eq!(program.tx_usage_counter.load(Ordering::Relaxed), 10);
1811                }
1812            });
1813    }
1814
1815    #[test]
1816    fn test_fuzz_assign_program_order() {
1817        use rand::prelude::SliceRandom;
1818        const EXPECTED_ENTRIES: [(u64, u64); 7] =
1819            [(1, 2), (5, 5), (5, 6), (5, 10), (9, 10), (10, 10), (3, 12)];
1820        let mut rng = rand::thread_rng();
1821        let program_id = Pubkey::new_unique();
1822        for _ in 0..1000 {
1823            let mut entries = EXPECTED_ENTRIES.to_vec();
1824            entries.shuffle(&mut rng);
1825            let mut cache = new_mock_cache::<TestForkGraph>();
1826            for (deployment_slot, effective_slot) in entries {
1827                assert!(!cache
1828                    .assign_program(program_id, new_test_entry(deployment_slot, effective_slot)));
1829            }
1830            for ((deployment_slot, effective_slot), entry) in EXPECTED_ENTRIES
1831                .iter()
1832                .zip(cache.get_slot_versions_for_tests(&program_id).iter())
1833            {
1834                assert_eq!(entry.deployment_slot, *deployment_slot);
1835                assert_eq!(entry.effective_slot, *effective_slot);
1836            }
1837        }
1838    }
1839
1840    #[test_matrix(
1841        (
1842            ProgramCacheEntryType::Closed,
1843            ProgramCacheEntryType::FailedVerification(get_mock_env()),
1844            new_loaded_entry(get_mock_env()),
1845        ),
1846        (
1847            ProgramCacheEntryType::FailedVerification(get_mock_env()),
1848            ProgramCacheEntryType::Closed,
1849            ProgramCacheEntryType::Unloaded(get_mock_env()),
1850            new_loaded_entry(get_mock_env()),
1851            ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),
1852        )
1853    )]
1854    #[test_matrix(
1855        (
1856            ProgramCacheEntryType::Unloaded(get_mock_env()),
1857        ),
1858        (
1859            ProgramCacheEntryType::FailedVerification(get_mock_env()),
1860            ProgramCacheEntryType::Closed,
1861            ProgramCacheEntryType::Unloaded(get_mock_env()),
1862            ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),
1863        )
1864    )]
1865    #[test_matrix(
1866        (ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),),
1867        (
1868            ProgramCacheEntryType::FailedVerification(get_mock_env()),
1869            ProgramCacheEntryType::Closed,
1870            ProgramCacheEntryType::Unloaded(get_mock_env()),
1871            new_loaded_entry(get_mock_env()),
1872        )
1873    )]
1874    #[should_panic(expected = "Unexpected replacement of an entry")]
1875    fn test_assign_program_failure(old: ProgramCacheEntryType, new: ProgramCacheEntryType) {
1876        let mut cache = new_mock_cache::<TestForkGraph>();
1877        let program_id = Pubkey::new_unique();
1878        assert!(!cache.assign_program(
1879            program_id,
1880            Arc::new(ProgramCacheEntry {
1881                program: old,
1882                account_owner: ProgramCacheEntryOwner::LoaderV2,
1883                account_size: 0,
1884                deployment_slot: 10,
1885                effective_slot: 11,
1886                tx_usage_counter: AtomicU64::default(),
1887                ix_usage_counter: AtomicU64::default(),
1888                latest_access_slot: AtomicU64::default(),
1889            }),
1890        ));
1891        cache.assign_program(
1892            program_id,
1893            Arc::new(ProgramCacheEntry {
1894                program: new,
1895                account_owner: ProgramCacheEntryOwner::LoaderV2,
1896                account_size: 0,
1897                deployment_slot: 10,
1898                effective_slot: 11,
1899                tx_usage_counter: AtomicU64::default(),
1900                ix_usage_counter: AtomicU64::default(),
1901                latest_access_slot: AtomicU64::default(),
1902            }),
1903        );
1904    }
1905
1906    #[test_case(
1907        ProgramCacheEntryType::Unloaded(Arc::new(BuiltinProgram::new_mock())),
1908        new_loaded_entry(get_mock_env())
1909    )]
1910    #[test_case(
1911        ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),
1912        ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock())
1913    )]
1914    fn test_assign_program_success(old: ProgramCacheEntryType, new: ProgramCacheEntryType) {
1915        let mut cache = new_mock_cache::<TestForkGraph>();
1916        let program_id = Pubkey::new_unique();
1917        assert!(!cache.assign_program(
1918            program_id,
1919            Arc::new(ProgramCacheEntry {
1920                program: old,
1921                account_owner: ProgramCacheEntryOwner::LoaderV2,
1922                account_size: 0,
1923                deployment_slot: 10,
1924                effective_slot: 11,
1925                tx_usage_counter: AtomicU64::default(),
1926                ix_usage_counter: AtomicU64::default(),
1927                latest_access_slot: AtomicU64::default(),
1928            }),
1929        ));
1930        assert!(!cache.assign_program(
1931            program_id,
1932            Arc::new(ProgramCacheEntry {
1933                program: new,
1934                account_owner: ProgramCacheEntryOwner::LoaderV2,
1935                account_size: 0,
1936                deployment_slot: 10,
1937                effective_slot: 11,
1938                tx_usage_counter: AtomicU64::default(),
1939                ix_usage_counter: AtomicU64::default(),
1940                latest_access_slot: AtomicU64::default(),
1941            }),
1942        ));
1943    }
1944
1945    #[test]
1946    fn test_tombstone() {
1947        let env = Arc::new(BuiltinProgram::new_mock());
1948        let tombstone = ProgramCacheEntry::new_tombstone(
1949            0,
1950            ProgramCacheEntryOwner::LoaderV2,
1951            ProgramCacheEntryType::FailedVerification(env.clone()),
1952        );
1953        assert_matches!(
1954            tombstone.program,
1955            ProgramCacheEntryType::FailedVerification(_)
1956        );
1957        assert!(tombstone.is_tombstone());
1958        assert_eq!(tombstone.deployment_slot, 0);
1959        assert_eq!(tombstone.effective_slot, 0);
1960
1961        let tombstone = ProgramCacheEntry::new_tombstone(
1962            100,
1963            ProgramCacheEntryOwner::LoaderV2,
1964            ProgramCacheEntryType::Closed,
1965        );
1966        assert_matches!(tombstone.program, ProgramCacheEntryType::Closed);
1967        assert!(tombstone.is_tombstone());
1968        assert_eq!(tombstone.deployment_slot, 100);
1969        assert_eq!(tombstone.effective_slot, 100);
1970
1971        let mut cache = new_mock_cache::<TestForkGraph>();
1972        let program1 = Pubkey::new_unique();
1973        let tombstone = set_tombstone(
1974            &mut cache,
1975            program1,
1976            10,
1977            ProgramCacheEntryType::FailedVerification(env.clone()),
1978        );
1979        let slot_versions = cache.get_slot_versions_for_tests(&program1);
1980        assert_eq!(slot_versions.len(), 1);
1981        assert!(slot_versions.first().unwrap().is_tombstone());
1982        assert_eq!(tombstone.deployment_slot, 10);
1983        assert_eq!(tombstone.effective_slot, 10);
1984
1985        // Add a program at slot 50, and a tombstone for the program at slot 60
1986        let program2 = Pubkey::new_unique();
1987        cache.assign_program(program2, new_test_builtin_entry(50, 51));
1988        let slot_versions = cache.get_slot_versions_for_tests(&program2);
1989        assert_eq!(slot_versions.len(), 1);
1990        assert!(!slot_versions.first().unwrap().is_tombstone());
1991
1992        let tombstone = set_tombstone(
1993            &mut cache,
1994            program2,
1995            60,
1996            ProgramCacheEntryType::FailedVerification(env),
1997        );
1998        let slot_versions = cache.get_slot_versions_for_tests(&program2);
1999        assert_eq!(slot_versions.len(), 2);
2000        assert!(!slot_versions.first().unwrap().is_tombstone());
2001        assert!(slot_versions.get(1).unwrap().is_tombstone());
2002        assert!(tombstone.is_tombstone());
2003        assert_eq!(tombstone.deployment_slot, 60);
2004        assert_eq!(tombstone.effective_slot, 60);
2005    }
2006
2007    struct TestForkGraph {
2008        relation: BlockRelation,
2009    }
2010    impl ForkGraph for TestForkGraph {
2011        fn relationship(&self, _a: Slot, _b: Slot) -> BlockRelation {
2012            self.relation
2013        }
2014    }
2015
2016    #[test]
2017    fn test_prune_empty() {
2018        let mut cache = new_mock_cache::<TestForkGraph>();
2019        let fork_graph = Arc::new(RwLock::new(TestForkGraph {
2020            relation: BlockRelation::Unrelated,
2021        }));
2022
2023        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2024
2025        cache.prune(0, 0);
2026        assert!(cache.get_flattened_entries_for_tests().is_empty());
2027
2028        cache.prune(10, 0);
2029        assert!(cache.get_flattened_entries_for_tests().is_empty());
2030
2031        let mut cache = new_mock_cache::<TestForkGraph>();
2032        let fork_graph = Arc::new(RwLock::new(TestForkGraph {
2033            relation: BlockRelation::Ancestor,
2034        }));
2035
2036        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2037
2038        cache.prune(0, 0);
2039        assert!(cache.get_flattened_entries_for_tests().is_empty());
2040
2041        cache.prune(10, 0);
2042        assert!(cache.get_flattened_entries_for_tests().is_empty());
2043
2044        let mut cache = new_mock_cache::<TestForkGraph>();
2045        let fork_graph = Arc::new(RwLock::new(TestForkGraph {
2046            relation: BlockRelation::Descendant,
2047        }));
2048
2049        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2050
2051        cache.prune(0, 0);
2052        assert!(cache.get_flattened_entries_for_tests().is_empty());
2053
2054        cache.prune(10, 0);
2055        assert!(cache.get_flattened_entries_for_tests().is_empty());
2056
2057        let mut cache = new_mock_cache::<TestForkGraph>();
2058        let fork_graph = Arc::new(RwLock::new(TestForkGraph {
2059            relation: BlockRelation::Unknown,
2060        }));
2061        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2062
2063        cache.prune(0, 0);
2064        assert!(cache.get_flattened_entries_for_tests().is_empty());
2065
2066        cache.prune(10, 0);
2067        assert!(cache.get_flattened_entries_for_tests().is_empty());
2068    }
2069
2070    #[test]
2071    fn test_prune_different_env() {
2072        let mut cache = new_mock_cache::<TestForkGraph>();
2073
2074        let fork_graph = Arc::new(RwLock::new(TestForkGraph {
2075            relation: BlockRelation::Ancestor,
2076        }));
2077
2078        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2079
2080        let program1 = Pubkey::new_unique();
2081        cache.assign_program(program1, new_test_entry(10, 10));
2082
2083        let new_env = Arc::new(BuiltinProgram::new_mock());
2084        cache.upcoming_environments = Some(ProgramRuntimeEnvironments {
2085            program_runtime_v1: new_env.clone(),
2086            program_runtime_v2: new_env.clone(),
2087        });
2088        let updated_program = Arc::new(ProgramCacheEntry {
2089            program: new_loaded_entry(new_env.clone()),
2090            account_owner: ProgramCacheEntryOwner::LoaderV2,
2091            account_size: 0,
2092            deployment_slot: 20,
2093            effective_slot: 20,
2094            tx_usage_counter: AtomicU64::default(),
2095            ix_usage_counter: AtomicU64::default(),
2096            latest_access_slot: AtomicU64::default(),
2097        });
2098        cache.assign_program(program1, updated_program.clone());
2099
2100        // Test that there are 2 entries for the program
2101        assert_eq!(cache.get_slot_versions_for_tests(&program1).len(), 2);
2102
2103        cache.prune(21, cache.latest_root_epoch);
2104
2105        // Test that prune didn't remove the entry, since environments are different.
2106        assert_eq!(cache.get_slot_versions_for_tests(&program1).len(), 2);
2107
2108        cache.prune(22, cache.latest_root_epoch.saturating_add(1));
2109
2110        // Test that prune removed 1 entry, since epoch changed
2111        assert_eq!(cache.get_slot_versions_for_tests(&program1).len(), 1);
2112
2113        let entry = cache
2114            .get_slot_versions_for_tests(&program1)
2115            .first()
2116            .expect("Failed to get the program")
2117            .clone();
2118        // Test that the correct entry remains in the cache
2119        assert_eq!(entry, updated_program);
2120    }
2121
2122    #[derive(Default)]
2123    struct TestForkGraphSpecific {
2124        forks: Vec<Vec<Slot>>,
2125    }
2126
2127    impl TestForkGraphSpecific {
2128        fn insert_fork(&mut self, fork: &[Slot]) {
2129            let mut fork = fork.to_vec();
2130            fork.sort();
2131            self.forks.push(fork)
2132        }
2133    }
2134
2135    impl ForkGraph for TestForkGraphSpecific {
2136        fn relationship(&self, a: Slot, b: Slot) -> BlockRelation {
2137            match self.forks.iter().try_for_each(|fork| {
2138                let relation = fork
2139                    .iter()
2140                    .position(|x| *x == a)
2141                    .and_then(|a_pos| {
2142                        fork.iter().position(|x| *x == b).and_then(|b_pos| {
2143                            (a_pos == b_pos)
2144                                .then_some(BlockRelation::Equal)
2145                                .or_else(|| (a_pos < b_pos).then_some(BlockRelation::Ancestor))
2146                                .or(Some(BlockRelation::Descendant))
2147                        })
2148                    })
2149                    .unwrap_or(BlockRelation::Unrelated);
2150
2151                if relation != BlockRelation::Unrelated {
2152                    return ControlFlow::Break(relation);
2153                }
2154
2155                ControlFlow::Continue(())
2156            }) {
2157                ControlFlow::Break(relation) => relation,
2158                _ => BlockRelation::Unrelated,
2159            }
2160        }
2161    }
2162
2163    fn get_entries_to_load(
2164        cache: &ProgramCache<TestForkGraphSpecific>,
2165        loading_slot: Slot,
2166        keys: &[Pubkey],
2167    ) -> Vec<(Pubkey, (ProgramCacheMatchCriteria, u64))> {
2168        let fork_graph = cache.fork_graph.as_ref().unwrap().upgrade().unwrap();
2169        let locked_fork_graph = fork_graph.read().unwrap();
2170        let entries = cache.get_flattened_entries_for_tests();
2171        keys.iter()
2172            .filter_map(|key| {
2173                entries
2174                    .iter()
2175                    .rev()
2176                    .find(|(program_id, entry)| {
2177                        program_id == key
2178                            && matches!(
2179                                locked_fork_graph.relationship(entry.deployment_slot, loading_slot),
2180                                BlockRelation::Equal | BlockRelation::Ancestor,
2181                            )
2182                    })
2183                    .map(|(program_id, _entry)| {
2184                        (*program_id, (ProgramCacheMatchCriteria::NoCriteria, 1))
2185                    })
2186            })
2187            .collect()
2188    }
2189
2190    fn match_slot(
2191        extracted: &ProgramCacheForTxBatch,
2192        program: &Pubkey,
2193        deployment_slot: Slot,
2194        working_slot: Slot,
2195    ) -> bool {
2196        assert_eq!(extracted.slot, working_slot);
2197        extracted
2198            .entries
2199            .get(program)
2200            .map(|entry| entry.deployment_slot == deployment_slot)
2201            .unwrap_or(false)
2202    }
2203
2204    fn match_missing(
2205        missing: &[(Pubkey, (ProgramCacheMatchCriteria, u64))],
2206        program: &Pubkey,
2207        expected_result: bool,
2208    ) -> bool {
2209        missing.iter().any(|(key, _)| key == program) == expected_result
2210    }
2211
2212    #[test]
2213    fn test_fork_extract_and_prune() {
2214        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2215
2216        // Fork graph created for the test
2217        //                   0
2218        //                 /   \
2219        //                10    5
2220        //                |     |
2221        //                20    11
2222        //                |     | \
2223        //                22   15  25
2224        //                      |   |
2225        //                     16  27
2226        //                      |
2227        //                     19
2228        //                      |
2229        //                     23
2230
2231        let mut fork_graph = TestForkGraphSpecific::default();
2232        fork_graph.insert_fork(&[0, 10, 20, 22]);
2233        fork_graph.insert_fork(&[0, 5, 11, 15, 16, 18, 19, 21, 23]);
2234        fork_graph.insert_fork(&[0, 5, 11, 25, 27]);
2235
2236        let fork_graph = Arc::new(RwLock::new(fork_graph));
2237        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2238
2239        let program1 = Pubkey::new_unique();
2240        cache.assign_program(program1, new_test_entry(0, 1));
2241        cache.assign_program(program1, new_test_entry(10, 11));
2242        cache.assign_program(program1, new_test_entry(20, 21));
2243
2244        let program2 = Pubkey::new_unique();
2245        cache.assign_program(program2, new_test_entry(5, 6));
2246        cache.assign_program(
2247            program2,
2248            new_test_entry(11, 11 + DELAY_VISIBILITY_SLOT_OFFSET),
2249        );
2250
2251        let program3 = Pubkey::new_unique();
2252        cache.assign_program(program3, new_test_entry(25, 26));
2253
2254        let program4 = Pubkey::new_unique();
2255        cache.assign_program(program4, new_test_entry(0, 1));
2256        cache.assign_program(program4, new_test_entry(5, 6));
2257        // The following is a special case, where effective slot is 3 slots in the future
2258        cache.assign_program(
2259            program4,
2260            new_test_entry(15, 15 + DELAY_VISIBILITY_SLOT_OFFSET),
2261        );
2262
2263        // Current fork graph
2264        //                   0
2265        //                 /   \
2266        //                10    5
2267        //                |     |
2268        //                20    11
2269        //                |     | \
2270        //                22   15  25
2271        //                      |   |
2272        //                     16  27
2273        //                      |
2274        //                     19
2275        //                      |
2276        //                     23
2277
2278        // Testing fork 0 - 10 - 12 - 22 with current slot at 22
2279        let mut missing =
2280            get_entries_to_load(&cache, 22, &[program1, program2, program3, program4]);
2281        assert!(match_missing(&missing, &program2, false));
2282        assert!(match_missing(&missing, &program3, false));
2283        let mut extracted = ProgramCacheForTxBatch::new(22, cache.environments.clone(), None, 0);
2284        cache.extract(&mut missing, &mut extracted, true);
2285        assert!(match_slot(&extracted, &program1, 20, 22));
2286        assert!(match_slot(&extracted, &program4, 0, 22));
2287
2288        // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 15
2289        let mut missing =
2290            get_entries_to_load(&cache, 15, &[program1, program2, program3, program4]);
2291        assert!(match_missing(&missing, &program3, false));
2292        let mut extracted = ProgramCacheForTxBatch::new(15, cache.environments.clone(), None, 0);
2293        cache.extract(&mut missing, &mut extracted, true);
2294        assert!(match_slot(&extracted, &program1, 0, 15));
2295        assert!(match_slot(&extracted, &program2, 11, 15));
2296        // The effective slot of program4 deployed in slot 15 is 19. So it should not be usable in slot 16.
2297        // A delay visibility tombstone should be returned here.
2298        let tombstone = extracted
2299            .find(&program4)
2300            .expect("Failed to find the tombstone");
2301        assert_matches!(tombstone.program, ProgramCacheEntryType::DelayVisibility);
2302        assert_eq!(tombstone.deployment_slot, 15);
2303
2304        // Testing the same fork above, but current slot is now 18 (equal to effective slot of program4).
2305        let mut missing =
2306            get_entries_to_load(&cache, 18, &[program1, program2, program3, program4]);
2307        assert!(match_missing(&missing, &program3, false));
2308        let mut extracted = ProgramCacheForTxBatch::new(18, cache.environments.clone(), None, 0);
2309        cache.extract(&mut missing, &mut extracted, true);
2310        assert!(match_slot(&extracted, &program1, 0, 18));
2311        assert!(match_slot(&extracted, &program2, 11, 18));
2312        // The effective slot of program4 deployed in slot 15 is 18. So it should be usable in slot 18.
2313        assert!(match_slot(&extracted, &program4, 15, 18));
2314
2315        // Testing the same fork above, but current slot is now 23 (future slot than effective slot of program4).
2316        let mut missing =
2317            get_entries_to_load(&cache, 23, &[program1, program2, program3, program4]);
2318        assert!(match_missing(&missing, &program3, false));
2319        let mut extracted = ProgramCacheForTxBatch::new(23, cache.environments.clone(), None, 0);
2320        cache.extract(&mut missing, &mut extracted, true);
2321        assert!(match_slot(&extracted, &program1, 0, 23));
2322        assert!(match_slot(&extracted, &program2, 11, 23));
2323        // The effective slot of program4 deployed in slot 15 is 19. So it should be usable in slot 23.
2324        assert!(match_slot(&extracted, &program4, 15, 23));
2325
2326        // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 11
2327        let mut missing =
2328            get_entries_to_load(&cache, 11, &[program1, program2, program3, program4]);
2329        assert!(match_missing(&missing, &program3, false));
2330        let mut extracted = ProgramCacheForTxBatch::new(11, cache.environments.clone(), None, 0);
2331        cache.extract(&mut missing, &mut extracted, true);
2332        assert!(match_slot(&extracted, &program1, 0, 11));
2333        // program2 was updated at slot 11, but is not effective till slot 12. The result should contain a tombstone.
2334        let tombstone = extracted
2335            .find(&program2)
2336            .expect("Failed to find the tombstone");
2337        assert_matches!(tombstone.program, ProgramCacheEntryType::DelayVisibility);
2338        assert_eq!(tombstone.deployment_slot, 11);
2339        assert!(match_slot(&extracted, &program4, 5, 11));
2340
2341        cache.prune(5, 0);
2342
2343        // Fork graph after pruning
2344        //                   0
2345        //                   |
2346        //                   5
2347        //                   |
2348        //                   11
2349        //                   | \
2350        //                  15  25
2351        //                   |   |
2352        //                  16  27
2353        //                   |
2354        //                  19
2355        //                   |
2356        //                  23
2357
2358        // Testing fork 11 - 15 - 16- 19 - 22 with root at 5 and current slot at 22
2359        let mut missing =
2360            get_entries_to_load(&cache, 21, &[program1, program2, program3, program4]);
2361        assert!(match_missing(&missing, &program3, false));
2362        let mut extracted = ProgramCacheForTxBatch::new(21, cache.environments.clone(), None, 0);
2363        cache.extract(&mut missing, &mut extracted, true);
2364        // Since the fork was pruned, we should not find the entry deployed at slot 20.
2365        assert!(match_slot(&extracted, &program1, 0, 21));
2366        assert!(match_slot(&extracted, &program2, 11, 21));
2367        assert!(match_slot(&extracted, &program4, 15, 21));
2368
2369        // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27
2370        let mut missing =
2371            get_entries_to_load(&cache, 27, &[program1, program2, program3, program4]);
2372        let mut extracted = ProgramCacheForTxBatch::new(27, cache.environments.clone(), None, 0);
2373        cache.extract(&mut missing, &mut extracted, true);
2374        assert!(match_slot(&extracted, &program1, 0, 27));
2375        assert!(match_slot(&extracted, &program2, 11, 27));
2376        assert!(match_slot(&extracted, &program3, 25, 27));
2377        assert!(match_slot(&extracted, &program4, 5, 27));
2378
2379        cache.prune(15, 0);
2380
2381        // Fork graph after pruning
2382        //                  0
2383        //                  |
2384        //                  5
2385        //                  |
2386        //                  11
2387        //                  |
2388        //                  15
2389        //                  |
2390        //                  16
2391        //                  |
2392        //                  19
2393        //                  |
2394        //                  23
2395
2396        // Testing fork 16, 19, 23, with root at 15, current slot at 23
2397        let mut missing =
2398            get_entries_to_load(&cache, 23, &[program1, program2, program3, program4]);
2399        assert!(match_missing(&missing, &program3, false));
2400        let mut extracted = ProgramCacheForTxBatch::new(23, cache.environments.clone(), None, 0);
2401        cache.extract(&mut missing, &mut extracted, true);
2402        assert!(match_slot(&extracted, &program1, 0, 23));
2403        assert!(match_slot(&extracted, &program2, 11, 23));
2404        assert!(match_slot(&extracted, &program4, 15, 23));
2405    }
2406
2407    #[test]
2408    fn test_extract_using_deployment_slot() {
2409        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2410
2411        // Fork graph created for the test
2412        //                   0
2413        //                 /   \
2414        //                10    5
2415        //                |     |
2416        //                20    11
2417        //                |     | \
2418        //                22   15  25
2419        //                      |   |
2420        //                     16  27
2421        //                      |
2422        //                     19
2423        //                      |
2424        //                     23
2425
2426        let mut fork_graph = TestForkGraphSpecific::default();
2427        fork_graph.insert_fork(&[0, 10, 20, 22]);
2428        fork_graph.insert_fork(&[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]);
2429        fork_graph.insert_fork(&[0, 5, 11, 25, 27]);
2430
2431        let fork_graph = Arc::new(RwLock::new(fork_graph));
2432        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2433
2434        let program1 = Pubkey::new_unique();
2435        cache.assign_program(program1, new_test_entry(0, 1));
2436        cache.assign_program(program1, new_test_entry(20, 21));
2437
2438        let program2 = Pubkey::new_unique();
2439        cache.assign_program(program2, new_test_entry(5, 6));
2440        cache.assign_program(program2, new_test_entry(11, 12));
2441
2442        let program3 = Pubkey::new_unique();
2443        cache.assign_program(program3, new_test_entry(25, 26));
2444
2445        // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19
2446        let mut missing = get_entries_to_load(&cache, 12, &[program1, program2, program3]);
2447        assert!(match_missing(&missing, &program3, false));
2448        let mut extracted = ProgramCacheForTxBatch::new(12, cache.environments.clone(), None, 0);
2449        cache.extract(&mut missing, &mut extracted, true);
2450        assert!(match_slot(&extracted, &program1, 0, 12));
2451        assert!(match_slot(&extracted, &program2, 11, 12));
2452
2453        // Test the same fork, but request the program modified at a later slot than what's in the cache.
2454        let mut missing = get_entries_to_load(&cache, 12, &[program1, program2, program3]);
2455        missing.get_mut(0).unwrap().1 .0 = ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(5);
2456        missing.get_mut(1).unwrap().1 .0 = ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(5);
2457        assert!(match_missing(&missing, &program3, false));
2458        let mut extracted = ProgramCacheForTxBatch::new(12, cache.environments.clone(), None, 0);
2459        cache.extract(&mut missing, &mut extracted, true);
2460        assert!(match_missing(&missing, &program1, true));
2461        assert!(match_slot(&extracted, &program2, 11, 12));
2462    }
2463
2464    #[test]
2465    fn test_extract_unloaded() {
2466        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2467
2468        // Fork graph created for the test
2469        //                   0
2470        //                 /   \
2471        //                10    5
2472        //                |     |
2473        //                20    11
2474        //                |     | \
2475        //                22   15  25
2476        //                      |   |
2477        //                     16  27
2478        //                      |
2479        //                     19
2480        //                      |
2481        //                     23
2482
2483        let mut fork_graph = TestForkGraphSpecific::default();
2484        fork_graph.insert_fork(&[0, 10, 20, 22]);
2485        fork_graph.insert_fork(&[0, 5, 11, 15, 16, 19, 21, 23]);
2486        fork_graph.insert_fork(&[0, 5, 11, 25, 27]);
2487
2488        let fork_graph = Arc::new(RwLock::new(fork_graph));
2489        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2490
2491        let program1 = Pubkey::new_unique();
2492        cache.assign_program(program1, new_test_entry(0, 1));
2493        cache.assign_program(program1, new_test_entry(20, 21));
2494
2495        let program2 = Pubkey::new_unique();
2496        cache.assign_program(program2, new_test_entry(5, 6));
2497        cache.assign_program(program2, new_test_entry(11, 12));
2498
2499        let program3 = Pubkey::new_unique();
2500        // Insert an unloaded program with correct/cache's environment at slot 25
2501        let _ = insert_unloaded_entry(&mut cache, program3, 25);
2502
2503        // Insert another unloaded program with a different environment at slot 20
2504        // Since this entry's environment won't match cache's environment, looking up this
2505        // entry should return missing instead of unloaded entry.
2506        cache.assign_program(
2507            program3,
2508            Arc::new(
2509                new_test_entry(20, 21)
2510                    .to_unloaded()
2511                    .expect("Failed to create unloaded program"),
2512            ),
2513        );
2514
2515        // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19
2516        let mut missing = get_entries_to_load(&cache, 19, &[program1, program2, program3]);
2517        assert!(match_missing(&missing, &program3, false));
2518        let mut extracted = ProgramCacheForTxBatch::new(19, cache.environments.clone(), None, 0);
2519        cache.extract(&mut missing, &mut extracted, true);
2520        assert!(match_slot(&extracted, &program1, 0, 19));
2521        assert!(match_slot(&extracted, &program2, 11, 19));
2522
2523        // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27
2524        let mut missing = get_entries_to_load(&cache, 27, &[program1, program2, program3]);
2525        let mut extracted = ProgramCacheForTxBatch::new(27, cache.environments.clone(), None, 0);
2526        cache.extract(&mut missing, &mut extracted, true);
2527        assert!(match_slot(&extracted, &program1, 0, 27));
2528        assert!(match_slot(&extracted, &program2, 11, 27));
2529        assert!(match_missing(&missing, &program3, true));
2530
2531        // Testing fork 0 - 10 - 20 - 22 with current slot at 22
2532        let mut missing = get_entries_to_load(&cache, 22, &[program1, program2, program3]);
2533        assert!(match_missing(&missing, &program2, false));
2534        let mut extracted = ProgramCacheForTxBatch::new(22, cache.environments.clone(), None, 0);
2535        cache.extract(&mut missing, &mut extracted, true);
2536        assert!(match_slot(&extracted, &program1, 20, 22));
2537        assert!(match_missing(&missing, &program3, true));
2538    }
2539
2540    #[test]
2541    fn test_extract_nonexistent() {
2542        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2543        let fork_graph = TestForkGraphSpecific::default();
2544        let fork_graph = Arc::new(RwLock::new(fork_graph));
2545        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2546
2547        let program1 = Pubkey::new_unique();
2548        let mut missing = vec![(program1, (ProgramCacheMatchCriteria::NoCriteria, 1))];
2549        let mut extracted = ProgramCacheForTxBatch::new(0, cache.environments.clone(), None, 0);
2550        cache.extract(&mut missing, &mut extracted, true);
2551        assert!(match_missing(&missing, &program1, true));
2552    }
2553
2554    #[test]
2555    fn test_unloaded() {
2556        let mut cache = new_mock_cache::<TestForkGraph>();
2557        for program_cache_entry_type in [
2558            ProgramCacheEntryType::FailedVerification(
2559                cache.environments.program_runtime_v1.clone(),
2560            ),
2561            ProgramCacheEntryType::Closed,
2562            ProgramCacheEntryType::Unloaded(cache.environments.program_runtime_v1.clone()),
2563            ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),
2564        ] {
2565            let entry = Arc::new(ProgramCacheEntry {
2566                program: program_cache_entry_type,
2567                account_owner: ProgramCacheEntryOwner::LoaderV2,
2568                account_size: 0,
2569                deployment_slot: 0,
2570                effective_slot: 0,
2571                tx_usage_counter: AtomicU64::default(),
2572                ix_usage_counter: AtomicU64::default(),
2573                latest_access_slot: AtomicU64::default(),
2574            });
2575            assert!(entry.to_unloaded().is_none());
2576
2577            // Check that unload_program_entry() does nothing for this entry
2578            let program_id = Pubkey::new_unique();
2579            cache.assign_program(program_id, entry.clone());
2580            cache.unload_program_entry(&program_id, &entry);
2581            assert_eq!(cache.get_slot_versions_for_tests(&program_id).len(), 1);
2582            assert!(cache.stats.evictions.is_empty());
2583        }
2584
2585        let entry = new_test_entry_with_usage(1, 2, AtomicU64::new(3));
2586        let unloaded_entry = entry.to_unloaded().unwrap();
2587        assert_eq!(unloaded_entry.deployment_slot, 1);
2588        assert_eq!(unloaded_entry.effective_slot, 2);
2589        assert_eq!(unloaded_entry.latest_access_slot.load(Ordering::Relaxed), 1);
2590        assert_eq!(unloaded_entry.tx_usage_counter.load(Ordering::Relaxed), 3);
2591
2592        // Check that unload_program_entry() does its work
2593        let program_id = Pubkey::new_unique();
2594        cache.assign_program(program_id, entry.clone());
2595        cache.unload_program_entry(&program_id, &entry);
2596        assert!(cache.stats.evictions.contains_key(&program_id));
2597    }
2598
2599    #[test]
2600    fn test_fork_prune_find_first_ancestor() {
2601        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2602
2603        // Fork graph created for the test
2604        //                   0
2605        //                 /   \
2606        //                10    5
2607        //                |
2608        //                20
2609
2610        // Deploy program on slot 0, and slot 5.
2611        // Prune the fork that has slot 5. The cache should still have the program
2612        // deployed at slot 0.
2613        let mut fork_graph = TestForkGraphSpecific::default();
2614        fork_graph.insert_fork(&[0, 10, 20]);
2615        fork_graph.insert_fork(&[0, 5]);
2616        let fork_graph = Arc::new(RwLock::new(fork_graph));
2617        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2618
2619        let program1 = Pubkey::new_unique();
2620        cache.assign_program(program1, new_test_entry(0, 1));
2621        cache.assign_program(program1, new_test_entry(5, 6));
2622
2623        cache.prune(10, 0);
2624
2625        let mut missing = get_entries_to_load(&cache, 20, &[program1]);
2626        let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0);
2627        cache.extract(&mut missing, &mut extracted, true);
2628
2629        // The cache should have the program deployed at slot 0
2630        assert_eq!(
2631            extracted
2632                .find(&program1)
2633                .expect("Did not find the program")
2634                .deployment_slot,
2635            0
2636        );
2637    }
2638
2639    #[test]
2640    fn test_prune_by_deployment_slot() {
2641        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2642
2643        // Fork graph created for the test
2644        //                   0
2645        //                 /   \
2646        //                10    5
2647        //                |
2648        //                20
2649
2650        // Deploy program on slot 0, and slot 5.
2651        // Prune the fork that has slot 5. The cache should still have the program
2652        // deployed at slot 0.
2653        let mut fork_graph = TestForkGraphSpecific::default();
2654        fork_graph.insert_fork(&[0, 10, 20]);
2655        fork_graph.insert_fork(&[0, 5, 6]);
2656        let fork_graph = Arc::new(RwLock::new(fork_graph));
2657        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2658
2659        let program1 = Pubkey::new_unique();
2660        cache.assign_program(program1, new_test_entry(0, 1));
2661        cache.assign_program(program1, new_test_entry(5, 6));
2662
2663        let program2 = Pubkey::new_unique();
2664        cache.assign_program(program2, new_test_entry(10, 11));
2665
2666        let mut missing = get_entries_to_load(&cache, 20, &[program1, program2]);
2667        let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0);
2668        cache.extract(&mut missing, &mut extracted, true);
2669        assert!(match_slot(&extracted, &program1, 0, 20));
2670        assert!(match_slot(&extracted, &program2, 10, 20));
2671
2672        let mut missing = get_entries_to_load(&cache, 6, &[program1, program2]);
2673        assert!(match_missing(&missing, &program2, false));
2674        let mut extracted = ProgramCacheForTxBatch::new(6, cache.environments.clone(), None, 0);
2675        cache.extract(&mut missing, &mut extracted, true);
2676        assert!(match_slot(&extracted, &program1, 5, 6));
2677
2678        // Pruning slot 5 will remove program1 entry deployed at slot 5.
2679        // On fork chaining from slot 5, the entry deployed at slot 0 will become visible.
2680        cache.prune_by_deployment_slot(5);
2681
2682        let mut missing = get_entries_to_load(&cache, 20, &[program1, program2]);
2683        let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0);
2684        cache.extract(&mut missing, &mut extracted, true);
2685        assert!(match_slot(&extracted, &program1, 0, 20));
2686        assert!(match_slot(&extracted, &program2, 10, 20));
2687
2688        let mut missing = get_entries_to_load(&cache, 6, &[program1, program2]);
2689        assert!(match_missing(&missing, &program2, false));
2690        let mut extracted = ProgramCacheForTxBatch::new(6, cache.environments.clone(), None, 0);
2691        cache.extract(&mut missing, &mut extracted, true);
2692        assert!(match_slot(&extracted, &program1, 0, 6));
2693
2694        // Pruning slot 10 will remove program2 entry deployed at slot 10.
2695        // As there is no other entry for program2, extract() will return it as missing.
2696        cache.prune_by_deployment_slot(10);
2697
2698        let mut missing = get_entries_to_load(&cache, 20, &[program1, program2]);
2699        assert!(match_missing(&missing, &program2, false));
2700        let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0);
2701        cache.extract(&mut missing, &mut extracted, true);
2702        assert!(match_slot(&extracted, &program1, 0, 20));
2703    }
2704
2705    #[test]
2706    fn test_usable_entries_for_slot() {
2707        new_mock_cache::<TestForkGraph>();
2708        let tombstone = Arc::new(ProgramCacheEntry::new_tombstone(
2709            0,
2710            ProgramCacheEntryOwner::LoaderV2,
2711            ProgramCacheEntryType::Closed,
2712        ));
2713
2714        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2715            &tombstone,
2716            &ProgramCacheMatchCriteria::NoCriteria
2717        ));
2718
2719        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2720            &tombstone,
2721            &ProgramCacheMatchCriteria::Tombstone
2722        ));
2723
2724        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2725            &tombstone,
2726            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(0)
2727        ));
2728
2729        assert!(!ProgramCache::<TestForkGraph>::matches_criteria(
2730            &tombstone,
2731            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(1)
2732        ));
2733
2734        let program = new_test_entry(0, 1);
2735
2736        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2737            &program,
2738            &ProgramCacheMatchCriteria::NoCriteria
2739        ));
2740
2741        assert!(!ProgramCache::<TestForkGraph>::matches_criteria(
2742            &program,
2743            &ProgramCacheMatchCriteria::Tombstone
2744        ));
2745
2746        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2747            &program,
2748            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(0)
2749        ));
2750
2751        assert!(!ProgramCache::<TestForkGraph>::matches_criteria(
2752            &program,
2753            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(1)
2754        ));
2755
2756        let program = Arc::new(new_test_entry_with_usage(0, 1, AtomicU64::default()));
2757
2758        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2759            &program,
2760            &ProgramCacheMatchCriteria::NoCriteria
2761        ));
2762
2763        assert!(!ProgramCache::<TestForkGraph>::matches_criteria(
2764            &program,
2765            &ProgramCacheMatchCriteria::Tombstone
2766        ));
2767
2768        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2769            &program,
2770            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(0)
2771        ));
2772
2773        assert!(!ProgramCache::<TestForkGraph>::matches_criteria(
2774            &program,
2775            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(1)
2776        ));
2777    }
2778}