rialo_s_program_runtime/
loaded_programs.rs

1use std::{
2    collections::{hash_map::Entry, HashMap},
3    fmt::{Debug, Formatter},
4};
5
6use log::{debug, error, log_enabled, trace};
7use percentage::PercentageInteger;
8use rialo_s_clock::{Epoch, Slot};
9use rialo_s_pubkey::Pubkey;
10use rialo_s_sdk_ids::{
11    bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, loader_v4, native_loader,
12};
13use rialo_s_type_overrides::{
14    rand::{thread_rng, Rng},
15    sync::{
16        atomic::{AtomicU64, Ordering},
17        Arc, Condvar, Mutex,
18    },
19    thread,
20};
21use solana_sbpf::{
22    elf::Executable, program::BuiltinProgram, verifier::RequisiteVerifier, vm::Config,
23};
24#[cfg(feature = "metrics")]
25use {rialo_s_measure::measure::Measure, rialo_s_timings::ExecuteDetailsTimings};
26
27use crate::invoke_context::{BuiltinFunctionWithContext, InvokeContext};
28
29pub type ProgramRuntimeEnvironment = Arc<BuiltinProgram<InvokeContext<'static>>>;
30pub const MAX_LOADED_ENTRY_COUNT: usize = 512;
31pub const DELAY_VISIBILITY_SLOT_OFFSET: Slot = 1;
32
33/// Relationship between two fork IDs
34#[derive(Copy, Clone, Debug, PartialEq)]
35pub enum BlockRelation {
36    /// The slot is on the same fork and is an ancestor of the other slot
37    Ancestor,
38    /// The two slots are equal and are on the same fork
39    Equal,
40    /// The slot is on the same fork and is a descendant of the other slot
41    Descendant,
42    /// The slots are on two different forks and may have had a common ancestor at some point
43    Unrelated,
44    /// Either one or both of the slots are either older than the latest root, or are in future
45    Unknown,
46}
47
48/// Maps relationship between two slots.
49pub trait ForkGraph: Copy + Clone + Send + Sync {
50    /// Returns the BlockRelation of A to B
51    fn relationship(&self, a: Slot, b: Slot) -> BlockRelation;
52}
53
54/// The owner of a programs accounts, thus the loader of a program
55#[derive(Default, Clone, Copy, PartialEq, Eq, Debug)]
56pub enum ProgramCacheEntryOwner {
57    #[default]
58    NativeLoader,
59    LoaderV1,
60    LoaderV2,
61    LoaderV3,
62    LoaderV4,
63    RiscVLoader,
64}
65
66impl TryFrom<&Pubkey> for ProgramCacheEntryOwner {
67    type Error = ();
68    fn try_from(loader_key: &Pubkey) -> Result<Self, ()> {
69        if native_loader::check_id(loader_key) {
70            Ok(ProgramCacheEntryOwner::NativeLoader)
71        } else if bpf_loader_deprecated::check_id(loader_key) {
72            Ok(ProgramCacheEntryOwner::LoaderV1)
73        } else if bpf_loader::check_id(loader_key) {
74            Ok(ProgramCacheEntryOwner::LoaderV2)
75        } else if bpf_loader_upgradeable::check_id(loader_key) {
76            Ok(ProgramCacheEntryOwner::LoaderV3)
77        } else if loader_v4::check_id(loader_key) {
78            Ok(ProgramCacheEntryOwner::LoaderV4)
79        } else if rialo_risc_v_loader_interface::check_id(loader_key) {
80            Ok(ProgramCacheEntryOwner::RiscVLoader)
81        } else {
82            Err(())
83        }
84    }
85}
86
87impl From<ProgramCacheEntryOwner> for Pubkey {
88    fn from(program_cache_entry_owner: ProgramCacheEntryOwner) -> Self {
89        match program_cache_entry_owner {
90            ProgramCacheEntryOwner::NativeLoader => native_loader::id(),
91            ProgramCacheEntryOwner::LoaderV1 => bpf_loader_deprecated::id(),
92            ProgramCacheEntryOwner::LoaderV2 => bpf_loader::id(),
93            ProgramCacheEntryOwner::LoaderV3 => bpf_loader_upgradeable::id(),
94            ProgramCacheEntryOwner::LoaderV4 => loader_v4::id(),
95            ProgramCacheEntryOwner::RiscVLoader => rialo_risc_v_loader_interface::id(),
96        }
97    }
98}
99
100/*
101    The possible ProgramCacheEntryType transitions:
102
103    DelayVisibility is special in that it is never stored in the cache.
104    It is only returned by ProgramCacheForTxBatch::find() when a Loaded entry
105    is encountered which is not effective yet.
106
107    Builtin re/deployment:
108    - Empty => Builtin in TransactionBatchProcessor::add_builtin
109    - Builtin => Builtin in TransactionBatchProcessor::add_builtin
110
111    Un/re/deployment (with delay and cooldown):
112    - Empty / Closed => Loaded in UpgradeableLoaderInstruction::DeployWithMaxDataLen
113    - Loaded / FailedVerification => Loaded in UpgradeableLoaderInstruction::Upgrade
114    - Loaded / FailedVerification => Closed in UpgradeableLoaderInstruction::Close
115
116    Eviction and unloading (in the same slot):
117    - Unloaded => Loaded in ProgramCache::assign_program
118    - Loaded => Unloaded in ProgramCache::unload_program_entry
119
120    At epoch boundary (when feature set and environment changes):
121    - Loaded => FailedVerification in Bank::_new_from_parent
122    - FailedVerification => Loaded in Bank::_new_from_parent
123
124    Through pruning (when on orphan fork or overshadowed on the rooted fork):
125    - Closed / Unloaded / Loaded / Builtin => Empty in ProgramCache::prune
126*/
127
128/// Executable kind.
129pub enum ExecutableKind {
130    /// eBPF.
131    Ebpf(Executable<InvokeContext<'static>>),
132
133    /// RISC-V.
134    RiscV(Vec<u8>),
135}
136
137/// Actual payload of [ProgramCacheEntry].
138#[derive(Default)]
139pub enum ProgramCacheEntryType {
140    /// Tombstone for programs which currently do not pass the verifier but could if the feature set changed.
141    FailedVerification(ProgramRuntimeEnvironment),
142    /// Tombstone for programs that were either explicitly closed or never deployed.
143    ///
144    /// It's also used for accounts belonging to program loaders, that don't actually contain program code (e.g. buffer accounts for LoaderV3 programs).
145    #[default]
146    Closed,
147    /// Tombstone for programs which have recently been modified but the new version is not visible yet.
148    DelayVisibility,
149    /// Successfully verified but not currently compiled.
150    ///
151    /// It continues to track usage statistics even when the compiled executable of the program is evicted from memory.
152    Unloaded(ProgramRuntimeEnvironment),
153    /// Verified and compiled program
154    Loaded(ExecutableKind),
155    /// A built-in program which is not stored on-chain but backed into and distributed with the validator
156    Builtin(BuiltinProgram<InvokeContext<'static>>),
157}
158
159impl Debug for ProgramCacheEntryType {
160    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
161        match self {
162            ProgramCacheEntryType::FailedVerification(_) => {
163                write!(f, "ProgramCacheEntryType::FailedVerification")
164            }
165            ProgramCacheEntryType::Closed => write!(f, "ProgramCacheEntryType::Closed"),
166            ProgramCacheEntryType::DelayVisibility => {
167                write!(f, "ProgramCacheEntryType::DelayVisibility")
168            }
169            ProgramCacheEntryType::Unloaded(_) => write!(f, "ProgramCacheEntryType::Unloaded"),
170            ProgramCacheEntryType::Loaded(_) => write!(f, "ProgramCacheEntryType::Loaded"),
171            ProgramCacheEntryType::Builtin(_) => write!(f, "ProgramCacheEntryType::Builtin"),
172        }
173    }
174}
175
176impl ProgramCacheEntryType {
177    /// Returns a reference to its environment if it has one
178    pub fn get_environment(&self) -> Option<&ProgramRuntimeEnvironment> {
179        match self {
180            ProgramCacheEntryType::Loaded(ExecutableKind::Ebpf(program)) => {
181                Some(program.get_loader())
182            }
183            ProgramCacheEntryType::FailedVerification(env)
184            | ProgramCacheEntryType::Unloaded(env) => Some(env),
185            _ => None,
186        }
187    }
188}
189
190/// Holds a program version at a specific address and on a specific slot / fork.
191///
192/// It contains the actual program in [ProgramCacheEntryType] and a bunch of meta-data.
193#[derive(Debug, Default)]
194pub struct ProgramCacheEntry {
195    /// The program of this entry
196    pub program: ProgramCacheEntryType,
197    /// The loader of this entry
198    pub account_owner: ProgramCacheEntryOwner,
199    /// Size of account that stores the program and program data
200    pub account_size: usize,
201    /// Slot in which the program was (re)deployed
202    pub deployment_slot: Slot,
203    /// Slot in which this entry will become active (can be in the future)
204    pub effective_slot: Slot,
205    /// How often this entry was used by a transaction
206    pub tx_usage_counter: AtomicU64,
207    /// How often this entry was used by an instruction
208    pub ix_usage_counter: AtomicU64,
209    /// Latest slot in which the entry was used
210    pub latest_access_slot: AtomicU64,
211}
212
213/// Global cache statistics for [ProgramCache].
214#[derive(Debug, Default)]
215pub struct ProgramCacheStats {
216    /// a program was already in the cache
217    pub hits: AtomicU64,
218    /// a program was not found and loaded instead
219    pub misses: AtomicU64,
220    /// a compiled executable was unloaded
221    pub evictions: HashMap<Pubkey, u64>,
222    /// an unloaded program was loaded again (opposite of eviction)
223    pub reloads: AtomicU64,
224    /// a program was loaded or un/re/deployed
225    pub insertions: AtomicU64,
226    /// a program was loaded but can not be extracted on its own fork anymore
227    pub lost_insertions: AtomicU64,
228    /// a program which was already in the cache was reloaded by mistake
229    pub replacements: AtomicU64,
230    /// a program was only used once before being unloaded
231    pub one_hit_wonders: AtomicU64,
232    /// a program became unreachable in the fork graph because of rerooting
233    pub prunes_orphan: AtomicU64,
234    /// a program got pruned because it was not recompiled for the next epoch
235    pub prunes_environment: AtomicU64,
236    /// a program had no entries because all slot versions got pruned
237    pub empty_entries: AtomicU64,
238    /// water level of loaded entries currently cached
239    pub water_level: AtomicU64,
240}
241
242impl ProgramCacheStats {
243    pub fn reset(&mut self) {
244        *self = ProgramCacheStats::default();
245    }
246    pub fn log(&self) {
247        let hits = self.hits.load(Ordering::Relaxed);
248        let misses = self.misses.load(Ordering::Relaxed);
249        let evictions: u64 = self.evictions.values().sum();
250        let reloads = self.reloads.load(Ordering::Relaxed);
251        let insertions = self.insertions.load(Ordering::Relaxed);
252        let lost_insertions = self.lost_insertions.load(Ordering::Relaxed);
253        let replacements = self.replacements.load(Ordering::Relaxed);
254        let one_hit_wonders = self.one_hit_wonders.load(Ordering::Relaxed);
255        let prunes_orphan = self.prunes_orphan.load(Ordering::Relaxed);
256        let prunes_environment = self.prunes_environment.load(Ordering::Relaxed);
257        let empty_entries = self.empty_entries.load(Ordering::Relaxed);
258        let water_level = self.water_level.load(Ordering::Relaxed);
259        debug!(
260            "Loaded Programs Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Reloads: {}, Insertions: {}, Lost-Insertions: {}, Replacements: {}, One-Hit-Wonders: {}, Prunes-Orphan: {}, Prunes-Environment: {}, Empty: {}, Water-Level: {}",
261            hits, misses, evictions, reloads, insertions, lost_insertions, replacements, one_hit_wonders, prunes_orphan, prunes_environment, empty_entries, water_level
262        );
263        if log_enabled!(log::Level::Trace) && !self.evictions.is_empty() {
264            let mut evictions = self.evictions.iter().collect::<Vec<_>>();
265            evictions.sort_by_key(|e| e.1);
266            let evictions = evictions
267                .into_iter()
268                .rev()
269                .map(|(program_id, evictions)| {
270                    format!("  {:<44}  {}", program_id.to_string(), evictions)
271                })
272                .collect::<Vec<_>>();
273            let evictions = evictions.join("\n");
274            trace!(
275                "Eviction Details:\n  {:<44}  {}\n{}",
276                "Program",
277                "Count",
278                evictions
279            );
280        }
281    }
282}
283
284#[cfg(feature = "metrics")]
285/// Time measurements for loading a single [ProgramCacheEntry].
286#[derive(Debug, Default)]
287pub struct LoadProgramMetrics {
288    /// Program address, but as text
289    pub program_id: String,
290    /// Microseconds it took to `create_program_runtime_environment`
291    pub register_syscalls_us: u64,
292    /// Microseconds it took to `Executable::<InvokeContext>::load`
293    pub load_elf_us: u64,
294    /// Microseconds it took to `executable.verify::<RequisiteVerifier>`
295    pub verify_code_us: u64,
296    /// Microseconds it took to `executable.jit_compile`
297    pub jit_compile_us: u64,
298}
299
300#[cfg(feature = "metrics")]
301impl LoadProgramMetrics {
302    pub fn submit_datapoint(&self, timings: &mut ExecuteDetailsTimings) {
303        timings.create_executor_register_syscalls_us += self.register_syscalls_us;
304        timings.create_executor_load_elf_us += self.load_elf_us;
305        timings.create_executor_verify_code_us += self.verify_code_us;
306        timings.create_executor_jit_compile_us += self.jit_compile_us;
307        datapoint_trace!(
308            "create_executor_trace",
309            ("program_id", self.program_id, String),
310            ("register_syscalls_us", self.register_syscalls_us, i64),
311            ("load_elf_us", self.load_elf_us, i64),
312            ("verify_code_us", self.verify_code_us, i64),
313            ("jit_compile_us", self.jit_compile_us, i64),
314        );
315    }
316}
317
318impl PartialEq for ProgramCacheEntry {
319    fn eq(&self, other: &Self) -> bool {
320        self.effective_slot == other.effective_slot
321            && self.deployment_slot == other.deployment_slot
322            && self.is_tombstone() == other.is_tombstone()
323    }
324}
325
326impl ProgramCacheEntry {
327    /// Creates an eBPF new user program
328    pub fn new(
329        loader_key: &Pubkey,
330        program_runtime_environment: ProgramRuntimeEnvironment,
331        deployment_slot: Slot,
332        effective_slot: Slot,
333        elf_bytes: &[u8],
334        account_size: usize,
335        #[cfg(feature = "metrics")] metrics: &mut LoadProgramMetrics,
336    ) -> Result<Self, Box<dyn std::error::Error>> {
337        Self::new_internal(
338            loader_key,
339            program_runtime_environment,
340            deployment_slot,
341            effective_slot,
342            elf_bytes,
343            account_size,
344            #[cfg(feature = "metrics")]
345            metrics,
346            false, /* reloading */
347        )
348    }
349
350    /// Creates a RISC-V new user program
351    pub fn new_risc_v(
352        loader_key: &Pubkey,
353        deployment_slot: Slot,
354        effective_slot: Slot,
355        elf_bytes: &[u8],
356        account_size: usize,
357    ) -> Result<Self, Box<dyn std::error::Error>> {
358        Self::new_internal_risc_v(
359            loader_key,
360            deployment_slot,
361            effective_slot,
362            elf_bytes,
363            account_size,
364            false, /* reloading */
365        )
366    }
367
368    /// Reloads an eBPF user program, *without* running the verifier.
369    ///
370    /// # Safety
371    ///
372    /// This method is unsafe since it assumes that the program has already been verified. Should
373    /// only be called when the program was previously verified and loaded in the cache, but was
374    /// unloaded due to inactivity. It should also be checked that the `program_runtime_environment`
375    /// hasn't changed since it was unloaded.
376    pub unsafe fn reload(
377        loader_key: &Pubkey,
378        program_runtime_environment: Arc<BuiltinProgram<InvokeContext<'static>>>,
379        deployment_slot: Slot,
380        effective_slot: Slot,
381        elf_bytes: &[u8],
382        account_size: usize,
383        #[cfg(feature = "metrics")] metrics: &mut LoadProgramMetrics,
384    ) -> Result<Self, Box<dyn std::error::Error>> {
385        Self::new_internal(
386            loader_key,
387            program_runtime_environment,
388            deployment_slot,
389            effective_slot,
390            elf_bytes,
391            account_size,
392            #[cfg(feature = "metrics")]
393            metrics,
394            true, /* reloading */
395        )
396    }
397
398    /// Reloads a RISC-V user program, *without* running the verifier.
399    ///
400    /// # Safety
401    ///
402    /// This method is unsafe since it assumes that the program has already been verified. Should
403    /// only be called when the program was previously verified and loaded in the cache, but was
404    /// unloaded due to inactivity. It should also be checked that the `program_runtime_environment`
405    /// hasn't changed since it was unloaded.
406    pub unsafe fn reload_risc_v(
407        loader_key: &Pubkey,
408        deployment_slot: Slot,
409        effective_slot: Slot,
410        elf_bytes: &[u8],
411        account_size: usize,
412    ) -> Result<Self, Box<dyn std::error::Error>> {
413        Self::new_internal_risc_v(
414            loader_key,
415            deployment_slot,
416            effective_slot,
417            elf_bytes,
418            account_size,
419            true, /* reloading */
420        )
421    }
422
423    fn new_internal(
424        loader_key: &Pubkey,
425        program_runtime_environment: Arc<BuiltinProgram<InvokeContext<'static>>>,
426        deployment_slot: Slot,
427        effective_slot: Slot,
428        elf_bytes: &[u8],
429        account_size: usize,
430        #[cfg(feature = "metrics")] metrics: &mut LoadProgramMetrics,
431        reloading: bool,
432    ) -> Result<Self, Box<dyn std::error::Error>> {
433        #[cfg(feature = "metrics")]
434        let load_elf_time = Measure::start("load_elf_time");
435        // The following unused_mut exception is needed for architectures that do not
436        // support JIT compilation.
437        #[allow(unused_mut)]
438        let mut executable = Executable::load(elf_bytes, program_runtime_environment.clone())?;
439        #[cfg(feature = "metrics")]
440        {
441            metrics.load_elf_us = load_elf_time.end_as_us();
442        }
443
444        if !reloading {
445            #[cfg(feature = "metrics")]
446            let verify_code_time = Measure::start("verify_code_time");
447            executable.verify::<RequisiteVerifier>()?;
448            #[cfg(feature = "metrics")]
449            {
450                metrics.verify_code_us = verify_code_time.end_as_us();
451            }
452        }
453
454        #[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))]
455        {
456            #[cfg(feature = "metrics")]
457            let jit_compile_time = Measure::start("jit_compile_time");
458            executable.jit_compile()?;
459            #[cfg(feature = "metrics")]
460            {
461                metrics.jit_compile_us = jit_compile_time.end_as_us();
462            }
463        }
464
465        Ok(Self {
466            deployment_slot,
467            account_owner: ProgramCacheEntryOwner::try_from(loader_key).unwrap(),
468            account_size,
469            effective_slot,
470            tx_usage_counter: AtomicU64::new(0),
471            program: ProgramCacheEntryType::Loaded(ExecutableKind::Ebpf(executable)),
472            ix_usage_counter: AtomicU64::new(0),
473            latest_access_slot: AtomicU64::new(0),
474        })
475    }
476
477    fn new_internal_risc_v(
478        loader_key: &Pubkey,
479        deployment_slot: Slot,
480        effective_slot: Slot,
481        elf_bytes: &[u8],
482        account_size: usize,
483        _reloading: bool,
484    ) -> Result<Self, Box<dyn std::error::Error>> {
485        Ok(Self {
486            deployment_slot,
487            account_owner: ProgramCacheEntryOwner::try_from(loader_key).unwrap(),
488            account_size,
489            effective_slot,
490            tx_usage_counter: AtomicU64::new(0),
491            program: ProgramCacheEntryType::Loaded(ExecutableKind::RiscV(elf_bytes.to_vec())),
492            ix_usage_counter: AtomicU64::new(0),
493            latest_access_slot: AtomicU64::new(0),
494        })
495    }
496
497    pub fn to_unloaded(&self) -> Option<Self> {
498        match &self.program {
499            ProgramCacheEntryType::Loaded(_) => {}
500            ProgramCacheEntryType::FailedVerification(_)
501            | ProgramCacheEntryType::Closed
502            | ProgramCacheEntryType::DelayVisibility
503            | ProgramCacheEntryType::Unloaded(_)
504            | ProgramCacheEntryType::Builtin(_) => {
505                return None;
506            }
507        }
508        Some(Self {
509            program: ProgramCacheEntryType::Unloaded(self.program.get_environment()?.clone()),
510            account_owner: self.account_owner,
511            account_size: self.account_size,
512            deployment_slot: self.deployment_slot,
513            effective_slot: self.effective_slot,
514            tx_usage_counter: AtomicU64::new(self.tx_usage_counter.load(Ordering::Relaxed)),
515            ix_usage_counter: AtomicU64::new(self.ix_usage_counter.load(Ordering::Relaxed)),
516            latest_access_slot: AtomicU64::new(self.latest_access_slot.load(Ordering::Relaxed)),
517        })
518    }
519
520    /// Creates a new built-in program
521    pub fn new_builtin(
522        deployment_slot: Slot,
523        account_size: usize,
524        builtin_function: BuiltinFunctionWithContext,
525    ) -> Self {
526        let mut program = BuiltinProgram::new_builtin();
527        program
528            .register_function("entrypoint", builtin_function)
529            .unwrap();
530        Self {
531            deployment_slot,
532            account_owner: ProgramCacheEntryOwner::NativeLoader,
533            account_size,
534            effective_slot: deployment_slot,
535            tx_usage_counter: AtomicU64::new(0),
536            program: ProgramCacheEntryType::Builtin(program),
537            ix_usage_counter: AtomicU64::new(0),
538            latest_access_slot: AtomicU64::new(0),
539        }
540    }
541
542    pub fn new_tombstone(
543        slot: Slot,
544        account_owner: ProgramCacheEntryOwner,
545        reason: ProgramCacheEntryType,
546    ) -> Self {
547        let tombstone = Self {
548            program: reason,
549            account_owner,
550            account_size: 0,
551            deployment_slot: slot,
552            effective_slot: slot,
553            tx_usage_counter: AtomicU64::default(),
554            ix_usage_counter: AtomicU64::default(),
555            latest_access_slot: AtomicU64::new(0),
556        };
557        debug_assert!(tombstone.is_tombstone());
558        tombstone
559    }
560
561    pub fn is_tombstone(&self) -> bool {
562        matches!(
563            self.program,
564            ProgramCacheEntryType::FailedVerification(_)
565                | ProgramCacheEntryType::Closed
566                | ProgramCacheEntryType::DelayVisibility
567        )
568    }
569
570    fn is_implicit_delay_visibility_tombstone(&self, slot: Slot) -> bool {
571        !matches!(self.program, ProgramCacheEntryType::Builtin(_))
572            && self.effective_slot.saturating_sub(self.deployment_slot)
573                == DELAY_VISIBILITY_SLOT_OFFSET
574            && slot >= self.deployment_slot
575            && slot < self.effective_slot
576    }
577
578    pub fn update_access_slot(&self, slot: Slot) {
579        let _ = self.latest_access_slot.fetch_max(slot, Ordering::Relaxed);
580    }
581
582    pub fn decayed_usage_counter(&self, now: Slot) -> u64 {
583        let last_access = self.latest_access_slot.load(Ordering::Relaxed);
584        // Shifting the u64 value for more than 63 will cause an overflow.
585        let decaying_for = std::cmp::min(63, now.saturating_sub(last_access));
586        self.tx_usage_counter.load(Ordering::Relaxed) >> decaying_for
587    }
588
589    pub fn account_owner(&self) -> Pubkey {
590        self.account_owner.into()
591    }
592}
593
594/// Globally shared RBPF config and syscall registry
595///
596/// This is only valid in an epoch range as long as no feature affecting RBPF is activated.
597#[derive(Clone, Debug)]
598pub struct ProgramRuntimeEnvironments {
599    /// For program runtime V1
600    pub program_runtime_v1: ProgramRuntimeEnvironment,
601    /// For program runtime V2
602    pub program_runtime_v2: ProgramRuntimeEnvironment,
603}
604
605impl Default for ProgramRuntimeEnvironments {
606    fn default() -> Self {
607        let empty_loader = Arc::new(BuiltinProgram::new_loader(Config::default()));
608        Self {
609            program_runtime_v1: empty_loader.clone(),
610            program_runtime_v2: empty_loader,
611        }
612    }
613}
614
615#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
616pub struct LoadingTaskCookie(u64);
617
618impl LoadingTaskCookie {
619    fn new() -> Self {
620        Self(0)
621    }
622
623    fn update(&mut self) {
624        let LoadingTaskCookie(cookie) = self;
625        *cookie = cookie.wrapping_add(1);
626    }
627}
628
629/// Suspends the thread in case no cooprative loading task was assigned
630#[derive(Debug, Default)]
631pub struct LoadingTaskWaiter {
632    cookie: Mutex<LoadingTaskCookie>,
633    cond: Condvar,
634}
635
636impl LoadingTaskWaiter {
637    pub fn new() -> Self {
638        Self {
639            cookie: Mutex::new(LoadingTaskCookie::new()),
640            cond: Condvar::new(),
641        }
642    }
643
644    pub fn cookie(&self) -> LoadingTaskCookie {
645        *self.cookie.lock().unwrap()
646    }
647
648    pub fn notify(&self) {
649        let mut cookie = self.cookie.lock().unwrap();
650        cookie.update();
651        self.cond.notify_all();
652    }
653
654    pub fn wait(&self, cookie: LoadingTaskCookie) -> LoadingTaskCookie {
655        let cookie_guard = self.cookie.lock().unwrap();
656        *self
657            .cond
658            .wait_while(cookie_guard, |current_cookie| *current_cookie == cookie)
659            .unwrap()
660    }
661}
662
663#[derive(Debug)]
664enum IndexImplementation {
665    /// Fork-graph aware index implementation
666    V1 {
667        /// A two level index:
668        ///
669        /// - the first level is for the address at which programs are deployed
670        /// - the second level for the slot (and thus also fork), sorted by slot number.
671        entries: HashMap<Pubkey, Vec<Arc<ProgramCacheEntry>>>,
672        /// The entries that are getting loaded and have not yet finished loading.
673        ///
674        /// The key is the program address, the value is a tuple of the slot in which the program is
675        /// being loaded and the thread ID doing the load.
676        ///
677        /// It is possible that multiple TX batches from different slots need different versions of a
678        /// program. The deployment slot of a program is only known after load tho,
679        /// so all loads for a given program key are serialized.
680        loading_entries: Mutex<HashMap<Pubkey, (Slot, thread::ThreadId)>>,
681    },
682}
683
684/// This structure is the global cache of loaded, verified and compiled programs.
685///
686/// It ...
687/// - is validator global and fork graph aware, so it can optimize the commonalities across banks.
688/// - handles the visibility rules of un/re/deployments.
689/// - stores the usage statistics and verification status of each program.
690/// - is elastic and uses a probabilistic eviction stragety based on the usage statistics.
691/// - also keeps the compiled executables around, but only for the most used programs.
692/// - supports various kinds of tombstones to avoid loading programs which can not be loaded.
693/// - cleans up entries on orphan branches when the block store is rerooted.
694/// - supports the cache preparation phase before feature activations which can change cached programs.
695/// - manages the environments of the programs and upcoming environments for the next epoch.
696/// - allows for cooperative loading of TX batches which hit the same missing programs simultaneously.
697/// - enforces that all programs used in a batch are eagerly loaded ahead of execution.
698/// - is not persisted to disk or a snapshot, so it needs to cold start and warm up first.
699pub struct ProgramCache {
700    /// Index of the cached entries and cooperative loading tasks
701    index: IndexImplementation,
702    /// The slot of the last rerooting
703    pub latest_root_slot: Slot,
704    /// The epoch of the last rerooting
705    pub latest_root_epoch: Epoch,
706    /// Environments of the current epoch
707    pub environments: ProgramRuntimeEnvironments,
708    /// Anticipated replacement for `environments` at the next epoch
709    ///
710    /// This is `None` during most of an epoch, and only `Some` around the boundaries (at the end and beginning of an epoch).
711    /// More precisely, it starts with the cache preparation phase a few hundred slots before the epoch boundary,
712    /// and it ends with the first rerooting after the epoch boundary.
713    pub upcoming_environments: Option<ProgramRuntimeEnvironments>,
714    /// List of loaded programs which should be recompiled before the next epoch (but don't have to).
715    pub programs_to_recompile: Vec<(Pubkey, Arc<ProgramCacheEntry>)>,
716    /// Statistics counters
717    pub stats: ProgramCacheStats,
718    /// Coordinates TX batches waiting for others to complete their task during cooperative loading
719    pub loading_task_waiter: Arc<LoadingTaskWaiter>,
720}
721
722impl Debug for ProgramCache {
723    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
724        f.debug_struct("ProgramCache")
725            .field("root slot", &self.latest_root_slot)
726            .field("root epoch", &self.latest_root_epoch)
727            .field("stats", &self.stats)
728            .field("index", &self.index)
729            .finish()
730    }
731}
732
733/// Local view into [ProgramCache] which was extracted for a specific TX batch.
734///
735/// This isolation enables the global [ProgramCache] to continue to evolve (e.g. evictions),
736/// while the TX batch is guaranteed it will continue to find all the programs it requires.
737/// For program management instructions this also buffers them before they are merged back into the global [ProgramCache].
738#[derive(Clone, Debug, Default)]
739pub struct ProgramCacheForTxBatch {
740    /// Pubkey is the address of a program.
741    /// ProgramCacheEntry is the corresponding program entry valid for the slot in which a transaction is being executed.
742    entries: HashMap<Pubkey, Arc<ProgramCacheEntry>>,
743    /// Program entries modified during the transaction batch.
744    modified_entries: HashMap<Pubkey, Arc<ProgramCacheEntry>>,
745    slot: Slot,
746    pub environments: ProgramRuntimeEnvironments,
747    /// Anticipated replacement for `environments` at the next epoch.
748    ///
749    /// This is `None` during most of an epoch, and only `Some` around the boundaries (at the end and beginning of an epoch).
750    /// More precisely, it starts with the cache preparation phase a few hundred slots before the epoch boundary,
751    /// and it ends with the first rerooting after the epoch boundary.
752    /// Needed when a program is deployed at the last slot of an epoch, becomes effective in the next epoch.
753    /// So needs to be compiled with the environment for the next epoch.
754    pub upcoming_environments: Option<ProgramRuntimeEnvironments>,
755    /// The epoch of the last rerooting
756    pub latest_root_epoch: Epoch,
757    pub hit_max_limit: bool,
758    pub loaded_missing: bool,
759    pub merged_modified: bool,
760}
761
762impl ProgramCacheForTxBatch {
763    pub fn new(
764        slot: Slot,
765        environments: ProgramRuntimeEnvironments,
766        upcoming_environments: Option<ProgramRuntimeEnvironments>,
767        latest_root_epoch: Epoch,
768    ) -> Self {
769        Self {
770            entries: HashMap::new(),
771            modified_entries: HashMap::new(),
772            slot,
773            environments,
774            upcoming_environments,
775            latest_root_epoch,
776            hit_max_limit: false,
777            loaded_missing: false,
778            merged_modified: false,
779        }
780    }
781
782    pub fn new_from_cache(slot: Slot, epoch: Epoch, cache: &ProgramCache) -> Self {
783        Self {
784            entries: HashMap::new(),
785            modified_entries: HashMap::new(),
786            slot,
787            environments: cache.get_environments_for_epoch(epoch),
788            upcoming_environments: cache.get_upcoming_environments_for_epoch(epoch),
789            latest_root_epoch: cache.latest_root_epoch,
790            hit_max_limit: false,
791            loaded_missing: false,
792            merged_modified: false,
793        }
794    }
795
796    /// Returns the current environments depending on the given epoch
797    pub fn get_environments_for_epoch(&self, epoch: Epoch) -> &ProgramRuntimeEnvironments {
798        if epoch != self.latest_root_epoch {
799            if let Some(upcoming_environments) = self.upcoming_environments.as_ref() {
800                return upcoming_environments;
801            }
802        }
803        &self.environments
804    }
805
806    /// Refill the cache with a single entry. It's typically called during transaction loading, and
807    /// transaction processing (for program management instructions).
808    /// It replaces the existing entry (if any) with the provided entry. The return value contains
809    /// `true` if an entry existed.
810    /// The function also returns the newly inserted value.
811    pub fn replenish(
812        &mut self,
813        key: Pubkey,
814        entry: Arc<ProgramCacheEntry>,
815    ) -> (bool, Arc<ProgramCacheEntry>) {
816        (self.entries.insert(key, entry.clone()).is_some(), entry)
817    }
818
819    /// Store an entry in `modified_entries` for a program modified during the
820    /// transaction batch.
821    pub fn store_modified_entry(&mut self, key: Pubkey, entry: Arc<ProgramCacheEntry>) {
822        self.modified_entries.insert(key, entry);
823    }
824
825    /// Drain the program cache's modified entries, returning the owned
826    /// collection.
827    pub fn drain_modified_entries(&mut self) -> HashMap<Pubkey, Arc<ProgramCacheEntry>> {
828        std::mem::take(&mut self.modified_entries)
829    }
830
831    pub fn find(&self, key: &Pubkey) -> Option<Arc<ProgramCacheEntry>> {
832        // First lookup the cache of the programs modified by the current
833        // transaction. If not found, lookup the cache of the cache of the
834        // programs that are loaded for the transaction batch.
835        self.modified_entries
836            .get(key)
837            .or(self.entries.get(key))
838            .map(|entry| {
839                if entry.is_implicit_delay_visibility_tombstone(self.slot) {
840                    // Found a program entry on the current fork, but it's not effective
841                    // yet. It indicates that the program has delayed visibility. Return
842                    // the tombstone to reflect that.
843                    Arc::new(ProgramCacheEntry::new_tombstone(
844                        entry.deployment_slot,
845                        entry.account_owner,
846                        ProgramCacheEntryType::DelayVisibility,
847                    ))
848                } else {
849                    entry.clone()
850                }
851            })
852    }
853
854    pub fn slot(&self) -> Slot {
855        self.slot
856    }
857
858    pub fn set_slot_for_tests(&mut self, slot: Slot) {
859        self.slot = slot;
860    }
861
862    pub fn merge(&mut self, modified_entries: &HashMap<Pubkey, Arc<ProgramCacheEntry>>) {
863        modified_entries.iter().for_each(|(key, entry)| {
864            self.merged_modified = true;
865            self.replenish(*key, entry.clone());
866        })
867    }
868
869    pub fn is_empty(&self) -> bool {
870        self.entries.is_empty()
871    }
872}
873
874pub enum ProgramCacheMatchCriteria {
875    DeployedOnOrAfterSlot(Slot),
876    Tombstone,
877    NoCriteria,
878}
879
880impl ProgramCache {
881    pub fn new(root_slot: Slot, root_epoch: Epoch) -> Self {
882        Self {
883            index: IndexImplementation::V1 {
884                entries: HashMap::new(),
885                loading_entries: Mutex::new(HashMap::new()),
886            },
887            latest_root_slot: root_slot,
888            latest_root_epoch: root_epoch,
889            environments: ProgramRuntimeEnvironments::default(),
890            upcoming_environments: None,
891            programs_to_recompile: Vec::default(),
892            stats: ProgramCacheStats::default(),
893            loading_task_waiter: Arc::new(LoadingTaskWaiter::default()),
894        }
895    }
896
897    /// Returns the current environments depending on the given epoch
898    pub fn get_environments_for_epoch(&self, epoch: Epoch) -> ProgramRuntimeEnvironments {
899        if epoch != self.latest_root_epoch {
900            if let Some(upcoming_environments) = self.upcoming_environments.as_ref() {
901                return upcoming_environments.clone();
902            }
903        }
904        self.environments.clone()
905    }
906
907    /// Returns the upcoming environments depending on the given epoch
908    pub fn get_upcoming_environments_for_epoch(
909        &self,
910        epoch: Epoch,
911    ) -> Option<ProgramRuntimeEnvironments> {
912        if epoch == self.latest_root_epoch {
913            return self.upcoming_environments.clone();
914        }
915        None
916    }
917
918    /// Insert a single entry. It's typically called during transaction loading,
919    /// when the cache doesn't contain the entry corresponding to program `key`.
920    pub fn assign_program(&mut self, key: Pubkey, entry: Arc<ProgramCacheEntry>) -> bool {
921        debug_assert!(!matches!(
922            &entry.program,
923            ProgramCacheEntryType::DelayVisibility
924        ));
925        // This function always returns `true` during normal operation.
926        // Only during the cache preparation phase this can return `false`
927        // for entries with `upcoming_environments`.
928        fn is_current_env(
929            environments: &ProgramRuntimeEnvironments,
930            env_opt: Option<&ProgramRuntimeEnvironment>,
931        ) -> bool {
932            env_opt
933                .map(|env| {
934                    Arc::ptr_eq(env, &environments.program_runtime_v1)
935                        || Arc::ptr_eq(env, &environments.program_runtime_v2)
936                })
937                .unwrap_or(true)
938        }
939        match &mut self.index {
940            IndexImplementation::V1 { entries, .. } => {
941                let slot_versions = &mut entries.entry(key).or_default();
942                match slot_versions.binary_search_by(|at| {
943                    at.effective_slot
944                        .cmp(&entry.effective_slot)
945                        .then(at.deployment_slot.cmp(&entry.deployment_slot))
946                        .then(
947                            // This `.then()` has no effect during normal operation.
948                            // Only during the cache preparation phase this does allow entries
949                            // which only differ in their environment to be interleaved in `slot_versions`.
950                            is_current_env(&self.environments, at.program.get_environment()).cmp(
951                                &is_current_env(
952                                    &self.environments,
953                                    entry.program.get_environment(),
954                                ),
955                            ),
956                        )
957                }) {
958                    Ok(index) => {
959                        let existing = slot_versions.get_mut(index).unwrap();
960                        match (&existing.program, &entry.program) {
961                            (
962                                ProgramCacheEntryType::Builtin(_),
963                                ProgramCacheEntryType::Builtin(_),
964                            )
965                            | (
966                                ProgramCacheEntryType::Unloaded(_),
967                                ProgramCacheEntryType::Loaded(_),
968                            ) => {}
969                            _ => {
970                                // Something is wrong, I can feel it ...
971                                error!("ProgramCache::assign_program() failed key={:?} existing={:?} entry={:?}", key, slot_versions, entry);
972                                debug_assert!(false, "Unexpected replacement of an entry");
973                                self.stats.replacements.fetch_add(1, Ordering::Relaxed);
974                                return true;
975                            }
976                        }
977                        // Copy over the usage counter to the new entry
978                        entry.tx_usage_counter.fetch_add(
979                            existing.tx_usage_counter.load(Ordering::Relaxed),
980                            Ordering::Relaxed,
981                        );
982                        entry.ix_usage_counter.fetch_add(
983                            existing.ix_usage_counter.load(Ordering::Relaxed),
984                            Ordering::Relaxed,
985                        );
986                        *existing = Arc::clone(&entry);
987                        self.stats.reloads.fetch_add(1, Ordering::Relaxed);
988                    }
989                    Err(index) => {
990                        self.stats.insertions.fetch_add(1, Ordering::Relaxed);
991                        slot_versions.insert(index, Arc::clone(&entry));
992                    }
993                }
994            }
995        }
996        false
997    }
998
999    pub fn prune_by_deployment_slot(&mut self, slot: Slot) {
1000        match &mut self.index {
1001            IndexImplementation::V1 { entries, .. } => {
1002                for second_level in entries.values_mut() {
1003                    second_level.retain(|entry| entry.deployment_slot != slot);
1004                }
1005                self.remove_programs_with_no_entries();
1006            }
1007        }
1008    }
1009
1010    /// Before rerooting the blockstore this removes all superfluous entries
1011    pub fn prune(&mut self, new_root_slot: Slot, new_root_epoch: Epoch) {
1012        let mut preparation_phase_ends = false;
1013        if self.latest_root_epoch != new_root_epoch {
1014            self.latest_root_epoch = new_root_epoch;
1015            if let Some(upcoming_environments) = self.upcoming_environments.take() {
1016                preparation_phase_ends = true;
1017                self.environments = upcoming_environments;
1018                self.programs_to_recompile.clear();
1019            }
1020        }
1021        match &mut self.index {
1022            IndexImplementation::V1 { entries, .. } => {
1023                for second_level in entries.values_mut() {
1024                    // Remove entries un/re/deployed on orphan forks
1025                    let mut first_ancestor_found = false;
1026                    let mut first_ancestor_env = None;
1027                    *second_level = second_level
1028                        .iter()
1029                        .rev()
1030                        .filter(|entry| {
1031                            if entry.deployment_slot >= new_root_slot {
1032                                true
1033                            } else if entry.deployment_slot < new_root_slot
1034                                || entry.deployment_slot <= self.latest_root_slot
1035                            {
1036                                if !first_ancestor_found {
1037                                    first_ancestor_found = true;
1038                                    first_ancestor_env = entry.program.get_environment();
1039                                    return true;
1040                                }
1041                                // Do not prune the entry if the runtime environment of the entry is different
1042                                // than the entry that was previously found (stored in first_ancestor_env).
1043                                // Different environment indicates that this entry might belong to an older
1044                                // epoch that had a different environment (e.g. different feature set).
1045                                // Once the root moves to the new/current epoch, the entry will get pruned.
1046                                // But, until then the entry might still be getting used by an older slot.
1047                                if let Some(entry_env) = entry.program.get_environment() {
1048                                    if let Some(env) = first_ancestor_env {
1049                                        if !Arc::ptr_eq(entry_env, env) {
1050                                            return true;
1051                                        }
1052                                    }
1053                                }
1054                                self.stats.prunes_orphan.fetch_add(1, Ordering::Relaxed);
1055                                false
1056                            } else {
1057                                self.stats.prunes_orphan.fetch_add(1, Ordering::Relaxed);
1058                                false
1059                            }
1060                        })
1061                        .filter(|entry| {
1062                            // Remove outdated environment of previous feature set
1063                            if preparation_phase_ends
1064                                && !Self::matches_environment(entry, &self.environments)
1065                            {
1066                                self.stats
1067                                    .prunes_environment
1068                                    .fetch_add(1, Ordering::Relaxed);
1069                                return false;
1070                            }
1071                            true
1072                        })
1073                        .cloned()
1074                        .collect();
1075                    second_level.reverse();
1076                }
1077            }
1078        }
1079        self.remove_programs_with_no_entries();
1080        debug_assert!(self.latest_root_slot <= new_root_slot);
1081        self.latest_root_slot = new_root_slot;
1082    }
1083
1084    fn matches_environment(
1085        entry: &Arc<ProgramCacheEntry>,
1086        environments: &ProgramRuntimeEnvironments,
1087    ) -> bool {
1088        let Some(environment) = entry.program.get_environment() else {
1089            return true;
1090        };
1091        Arc::ptr_eq(environment, &environments.program_runtime_v1)
1092            || Arc::ptr_eq(environment, &environments.program_runtime_v2)
1093    }
1094
1095    fn matches_criteria(
1096        program: &Arc<ProgramCacheEntry>,
1097        criteria: &ProgramCacheMatchCriteria,
1098    ) -> bool {
1099        match criteria {
1100            ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(slot) => {
1101                program.deployment_slot >= *slot
1102            }
1103            ProgramCacheMatchCriteria::Tombstone => program.is_tombstone(),
1104            ProgramCacheMatchCriteria::NoCriteria => true,
1105        }
1106    }
1107
1108    /// Extracts a subset of the programs relevant to a transaction batch
1109    /// and returns which program accounts the accounts DB needs to load.
1110    pub fn extract(
1111        &self,
1112        search_for: &mut Vec<(Pubkey, (ProgramCacheMatchCriteria, u64))>,
1113        loaded_programs_for_tx_batch: &mut ProgramCacheForTxBatch,
1114        is_first_round: bool,
1115    ) -> Option<(Pubkey, u64)> {
1116        let mut cooperative_loading_task = None;
1117        match &self.index {
1118            IndexImplementation::V1 {
1119                entries,
1120                loading_entries,
1121            } => {
1122                search_for.retain(|(key, (match_criteria, usage_count))| {
1123                    if let Some(second_level) = entries.get(key) {
1124                        for entry in second_level.iter().rev() {
1125                            if entry.deployment_slot <= self.latest_root_slot
1126                                || entry.deployment_slot <= loaded_programs_for_tx_batch.slot
1127                            {
1128                                let entry_to_return = if loaded_programs_for_tx_batch.slot
1129                                    >= entry.effective_slot
1130                                    && Self::matches_environment(
1131                                        entry,
1132                                        &loaded_programs_for_tx_batch.environments,
1133                                    ) {
1134                                    if !Self::matches_criteria(entry, match_criteria) {
1135                                        break;
1136                                    }
1137                                    if let ProgramCacheEntryType::Unloaded(_environment) =
1138                                        &entry.program
1139                                    {
1140                                        break;
1141                                    }
1142                                    entry.clone()
1143                                } else if entry.is_implicit_delay_visibility_tombstone(
1144                                    loaded_programs_for_tx_batch.slot,
1145                                ) {
1146                                    // Found a program entry on the current fork, but it's not effective
1147                                    // yet. It indicates that the program has delayed visibility. Return
1148                                    // the tombstone to reflect that.
1149                                    Arc::new(ProgramCacheEntry::new_tombstone(
1150                                        entry.deployment_slot,
1151                                        entry.account_owner,
1152                                        ProgramCacheEntryType::DelayVisibility,
1153                                    ))
1154                                } else {
1155                                    continue;
1156                                };
1157                                entry_to_return
1158                                    .update_access_slot(loaded_programs_for_tx_batch.slot);
1159                                entry_to_return
1160                                    .tx_usage_counter
1161                                    .fetch_add(*usage_count, Ordering::Relaxed);
1162                                loaded_programs_for_tx_batch
1163                                    .entries
1164                                    .insert(*key, entry_to_return);
1165                                return false;
1166                            }
1167                        }
1168                    }
1169                    if cooperative_loading_task.is_none() {
1170                        let mut loading_entries = loading_entries.lock().unwrap();
1171                        let entry = loading_entries.entry(*key);
1172                        if let Entry::Vacant(entry) = entry {
1173                            entry.insert((
1174                                loaded_programs_for_tx_batch.slot,
1175                                thread::current().id(),
1176                            ));
1177                            cooperative_loading_task = Some((*key, *usage_count));
1178                        }
1179                    }
1180                    true
1181                });
1182            }
1183        }
1184        if is_first_round {
1185            self.stats
1186                .misses
1187                .fetch_add(search_for.len() as u64, Ordering::Relaxed);
1188            self.stats.hits.fetch_add(
1189                loaded_programs_for_tx_batch.entries.len() as u64,
1190                Ordering::Relaxed,
1191            );
1192        }
1193        cooperative_loading_task
1194    }
1195
1196    /// Called by Bank::replenish_program_cache() for each program that is done loading.
1197    pub fn finish_cooperative_loading_task(
1198        &mut self,
1199        slot: Slot,
1200        key: Pubkey,
1201        loaded_program: Arc<ProgramCacheEntry>,
1202    ) -> bool {
1203        match &mut self.index {
1204            IndexImplementation::V1 {
1205                loading_entries, ..
1206            } => {
1207                let loading_thread = loading_entries.get_mut().unwrap().remove(&key);
1208                debug_assert_eq!(loading_thread, Some((slot, thread::current().id())));
1209                // Check that it will be visible to our own fork once inserted
1210                if loaded_program.deployment_slot > self.latest_root_slot
1211                    && loaded_program.deployment_slot > slot
1212                {
1213                    self.stats.lost_insertions.fetch_add(1, Ordering::Relaxed);
1214                }
1215                let was_occupied = self.assign_program(key, loaded_program);
1216                self.loading_task_waiter.notify();
1217                was_occupied
1218            }
1219        }
1220    }
1221
1222    pub fn merge(&mut self, modified_entries: &HashMap<Pubkey, Arc<ProgramCacheEntry>>) {
1223        modified_entries.iter().for_each(|(key, entry)| {
1224            self.assign_program(*key, entry.clone());
1225        })
1226    }
1227
1228    /// Returns the list of entries which are verified and compiled.
1229    pub fn get_flattened_entries(
1230        &self,
1231        include_program_runtime_v1: bool,
1232        _include_program_runtime_v2: bool,
1233    ) -> Vec<(Pubkey, Arc<ProgramCacheEntry>)> {
1234        match &self.index {
1235            IndexImplementation::V1 { entries, .. } => entries
1236                .iter()
1237                .flat_map(|(id, second_level)| {
1238                    second_level
1239                        .iter()
1240                        .filter_map(move |program| match program.program {
1241                            ProgramCacheEntryType::Loaded(_) => {
1242                                if include_program_runtime_v1 {
1243                                    Some((*id, program.clone()))
1244                                } else {
1245                                    None
1246                                }
1247                            }
1248                            _ => None,
1249                        })
1250                })
1251                .collect(),
1252        }
1253    }
1254
1255    /// Returns the list of all entries in the cache.
1256    pub fn get_flattened_entries_for_tests(&self) -> Vec<(Pubkey, Arc<ProgramCacheEntry>)> {
1257        match &self.index {
1258            IndexImplementation::V1 { entries, .. } => entries
1259                .iter()
1260                .flat_map(|(id, second_level)| {
1261                    second_level.iter().map(|program| (*id, program.clone()))
1262                })
1263                .collect(),
1264        }
1265    }
1266
1267    /// Returns the slot versions for the given program id.
1268    pub fn get_slot_versions_for_tests(&self, key: &Pubkey) -> &[Arc<ProgramCacheEntry>] {
1269        match &self.index {
1270            IndexImplementation::V1 { entries, .. } => entries
1271                .get(key)
1272                .map(|second_level| second_level.as_ref())
1273                .unwrap_or(&[]),
1274        }
1275    }
1276
1277    /// Unloads programs which were used infrequently
1278    pub fn sort_and_unload(&mut self, shrink_to: PercentageInteger) {
1279        let mut sorted_candidates = self.get_flattened_entries(true, true);
1280        sorted_candidates
1281            .sort_by_cached_key(|(_id, program)| program.tx_usage_counter.load(Ordering::Relaxed));
1282        let num_to_unload = sorted_candidates
1283            .len()
1284            .saturating_sub(shrink_to.apply_to(MAX_LOADED_ENTRY_COUNT));
1285        self.unload_program_entries(sorted_candidates.iter().take(num_to_unload));
1286    }
1287
1288    /// Evicts programs using 2's random selection, choosing the least used program out of the two entries.
1289    /// The eviction is performed enough number of times to reduce the cache usage to the given percentage.
1290    pub fn evict_using_2s_random_selection(&mut self, shrink_to: PercentageInteger, now: Slot) {
1291        let mut candidates = self.get_flattened_entries(true, true);
1292        self.stats
1293            .water_level
1294            .store(candidates.len() as u64, Ordering::Relaxed);
1295        let num_to_unload = candidates
1296            .len()
1297            .saturating_sub(shrink_to.apply_to(MAX_LOADED_ENTRY_COUNT));
1298        fn random_index_and_usage_counter(
1299            candidates: &[(Pubkey, Arc<ProgramCacheEntry>)],
1300            now: Slot,
1301        ) -> (usize, u64) {
1302            let mut rng = thread_rng();
1303            let index = rng.gen_range(0..candidates.len());
1304            let usage_counter = candidates
1305                .get(index)
1306                .expect("Failed to get cached entry")
1307                .1
1308                .decayed_usage_counter(now);
1309            (index, usage_counter)
1310        }
1311
1312        for _ in 0..num_to_unload {
1313            let (index1, usage_counter1) = random_index_and_usage_counter(&candidates, now);
1314            let (index2, usage_counter2) = random_index_and_usage_counter(&candidates, now);
1315
1316            let (program, entry) = if usage_counter1 < usage_counter2 {
1317                candidates.swap_remove(index1)
1318            } else {
1319                candidates.swap_remove(index2)
1320            };
1321            self.unload_program_entry(&program, &entry);
1322        }
1323    }
1324
1325    /// Removes all the entries at the given keys, if they exist
1326    pub fn remove_programs(&mut self, keys: impl Iterator<Item = Pubkey>) {
1327        match &mut self.index {
1328            IndexImplementation::V1 { entries, .. } => {
1329                for k in keys {
1330                    entries.remove(&k);
1331                }
1332            }
1333        }
1334    }
1335
1336    /// This function removes the given entry for the given program from the cache.
1337    /// The function expects that the program and entry exists in the cache. Otherwise it'll panic.
1338    fn unload_program_entry(&mut self, program: &Pubkey, remove_entry: &Arc<ProgramCacheEntry>) {
1339        match &mut self.index {
1340            IndexImplementation::V1 { entries, .. } => {
1341                let second_level = entries.get_mut(program).expect("Cache lookup failed");
1342                let candidate = second_level
1343                    .iter_mut()
1344                    .find(|entry| entry == &remove_entry)
1345                    .expect("Program entry not found");
1346
1347                // Certain entry types cannot be unloaded, such as tombstones, or already unloaded entries.
1348                // For such entries, `to_unloaded()` will return None.
1349                // These entry types do not occupy much memory.
1350                if let Some(unloaded) = candidate.to_unloaded() {
1351                    if candidate.tx_usage_counter.load(Ordering::Relaxed) == 1 {
1352                        self.stats.one_hit_wonders.fetch_add(1, Ordering::Relaxed);
1353                    }
1354                    self.stats
1355                        .evictions
1356                        .entry(*program)
1357                        .and_modify(|c| *c = c.saturating_add(1))
1358                        .or_insert(1);
1359                    *candidate = Arc::new(unloaded);
1360                }
1361            }
1362        }
1363    }
1364
1365    fn unload_program_entries<'a>(
1366        &mut self,
1367        remove: impl Iterator<Item = &'a (Pubkey, Arc<ProgramCacheEntry>)>,
1368    ) {
1369        for (program, entry) in remove {
1370            self.unload_program_entry(program, entry);
1371        }
1372    }
1373
1374    fn remove_programs_with_no_entries(&mut self) {
1375        match &mut self.index {
1376            IndexImplementation::V1 { entries, .. } => {
1377                let num_programs_before_removal = entries.len();
1378                entries.retain(|_key, second_level| !second_level.is_empty());
1379                if entries.len() < num_programs_before_removal {
1380                    self.stats.empty_entries.fetch_add(
1381                        num_programs_before_removal.saturating_sub(entries.len()) as u64,
1382                        Ordering::Relaxed,
1383                    );
1384                }
1385            }
1386        }
1387    }
1388}
1389
1390#[cfg(feature = "frozen-abi")]
1391impl rialo_frozen_abi::abi_example::AbiExample for ProgramCacheEntry {
1392    fn example() -> Self {
1393        // ProgramCacheEntry isn't serializable by definition.
1394        Self::default()
1395    }
1396}
1397
1398#[cfg(feature = "frozen-abi")]
1399impl rialo_frozen_abi::abi_example::AbiExample for ProgramCache {
1400    fn example() -> Self {
1401        // ProgramCache isn't serializable by definition.
1402        Self::new(Slot::default(), Epoch::default())
1403    }
1404}
1405
1406#[cfg(test)]
1407mod tests {
1408    use std::{
1409        fs::File,
1410        io::Read,
1411        ops::ControlFlow,
1412        sync::{
1413            atomic::{AtomicU64, Ordering},
1414            Arc, RwLock,
1415        },
1416    };
1417
1418    use assert_matches::assert_matches;
1419    use percentage::Percentage;
1420    use rialo_s_clock::Slot;
1421    use rialo_s_pubkey::Pubkey;
1422    use solana_sbpf::{elf::Executable, program::BuiltinProgram};
1423    use test_case::{test_case, test_matrix};
1424
1425    use crate::loaded_programs::{
1426        BlockRelation, ForkGraph, ProgramCache, ProgramCacheEntry, ProgramCacheEntryOwner,
1427        ProgramCacheEntryType, ProgramCacheForTxBatch, ProgramCacheMatchCriteria,
1428        ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET,
1429    };
1430
1431    static MOCK_ENVIRONMENT: std::sync::OnceLock<ProgramRuntimeEnvironment> =
1432        std::sync::OnceLock::<ProgramRuntimeEnvironment>::new();
1433
1434    fn get_mock_env() -> ProgramRuntimeEnvironment {
1435        MOCK_ENVIRONMENT
1436            .get_or_init(|| Arc::new(BuiltinProgram::new_mock()))
1437            .clone()
1438    }
1439
1440    fn new_mock_cache() -> ProgramCache {
1441        let mut cache = ProgramCache::new(0, 0);
1442        cache.environments.program_runtime_v1 = get_mock_env();
1443        cache
1444    }
1445
1446    fn new_test_entry(deployment_slot: Slot, effective_slot: Slot) -> Arc<ProgramCacheEntry> {
1447        new_test_entry_with_usage(deployment_slot, effective_slot, AtomicU64::default())
1448    }
1449
1450    fn new_loaded_entry(env: ProgramRuntimeEnvironment) -> ProgramCacheEntryType {
1451        let mut elf = Vec::new();
1452        File::open("../programs/bpf_loader/test_elfs/out/noop_aligned.so")
1453            .unwrap()
1454            .read_to_end(&mut elf)
1455            .unwrap();
1456        let executable = Executable::load(&elf, env).unwrap();
1457        ProgramCacheEntryType::Loaded(executable)
1458    }
1459
1460    fn new_test_entry_with_usage(
1461        deployment_slot: Slot,
1462        effective_slot: Slot,
1463        usage_counter: AtomicU64,
1464    ) -> Arc<ProgramCacheEntry> {
1465        Arc::new(ProgramCacheEntry {
1466            program: new_loaded_entry(get_mock_env()),
1467            account_owner: ProgramCacheEntryOwner::LoaderV2,
1468            account_size: 0,
1469            deployment_slot,
1470            effective_slot,
1471            tx_usage_counter: usage_counter,
1472            ix_usage_counter: AtomicU64::default(),
1473            latest_access_slot: AtomicU64::new(deployment_slot),
1474        })
1475    }
1476
1477    fn new_test_builtin_entry(
1478        deployment_slot: Slot,
1479        effective_slot: Slot,
1480    ) -> Arc<ProgramCacheEntry> {
1481        Arc::new(ProgramCacheEntry {
1482            program: ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),
1483            account_owner: ProgramCacheEntryOwner::NativeLoader,
1484            account_size: 0,
1485            deployment_slot,
1486            effective_slot,
1487            tx_usage_counter: AtomicU64::default(),
1488            ix_usage_counter: AtomicU64::default(),
1489            latest_access_slot: AtomicU64::default(),
1490        })
1491    }
1492
1493    fn set_tombstone(
1494        cache: &mut ProgramCache,
1495        key: Pubkey,
1496        slot: Slot,
1497        reason: ProgramCacheEntryType,
1498    ) -> Arc<ProgramCacheEntry> {
1499        let program = Arc::new(ProgramCacheEntry::new_tombstone(
1500            slot,
1501            ProgramCacheEntryOwner::LoaderV2,
1502            reason,
1503        ));
1504        cache.assign_program(key, program.clone());
1505        program
1506    }
1507
1508    fn insert_unloaded_entry(
1509        cache: &mut ProgramCache,
1510        key: Pubkey,
1511        slot: Slot,
1512    ) -> Arc<ProgramCacheEntry> {
1513        let loaded = new_test_entry_with_usage(slot, slot.saturating_add(1), AtomicU64::default());
1514        let unloaded = Arc::new(loaded.to_unloaded().expect("Failed to unload the program"));
1515        cache.assign_program(key, unloaded.clone());
1516        unloaded
1517    }
1518
1519    fn num_matching_entries<P>(cache: &ProgramCache, predicate: P) -> usize
1520    where
1521        P: Fn(&ProgramCacheEntryType) -> bool,
1522    {
1523        cache
1524            .get_flattened_entries_for_tests()
1525            .iter()
1526            .filter(|(_key, program)| predicate(&program.program))
1527            .count()
1528    }
1529
1530    #[test]
1531    fn test_usage_counter_decay() {
1532        let _cache = new_mock_cache::<TestForkGraph>();
1533        let program = new_test_entry_with_usage(10, 11, AtomicU64::new(32));
1534        program.update_access_slot(15);
1535        assert_eq!(program.decayed_usage_counter(15), 32);
1536        assert_eq!(program.decayed_usage_counter(16), 16);
1537        assert_eq!(program.decayed_usage_counter(17), 8);
1538        assert_eq!(program.decayed_usage_counter(18), 4);
1539        assert_eq!(program.decayed_usage_counter(19), 2);
1540        assert_eq!(program.decayed_usage_counter(20), 1);
1541        assert_eq!(program.decayed_usage_counter(21), 0);
1542        assert_eq!(program.decayed_usage_counter(15), 32);
1543        assert_eq!(program.decayed_usage_counter(14), 32);
1544
1545        program.update_access_slot(18);
1546        assert_eq!(program.decayed_usage_counter(15), 32);
1547        assert_eq!(program.decayed_usage_counter(16), 32);
1548        assert_eq!(program.decayed_usage_counter(17), 32);
1549        assert_eq!(program.decayed_usage_counter(18), 32);
1550        assert_eq!(program.decayed_usage_counter(19), 16);
1551        assert_eq!(program.decayed_usage_counter(20), 8);
1552        assert_eq!(program.decayed_usage_counter(21), 4);
1553
1554        // Decay for 63 or more slots
1555        assert_eq!(program.decayed_usage_counter(18 + 63), 0);
1556        assert_eq!(program.decayed_usage_counter(100), 0);
1557    }
1558
1559    fn program_deploy_test_helper(
1560        cache: &mut ProgramCache<TestForkGraph>,
1561        program: Pubkey,
1562        deployment_slots: Vec<Slot>,
1563        usage_counters: Vec<u64>,
1564        programs: &mut Vec<(Pubkey, Slot, u64)>,
1565    ) {
1566        // Add multiple entries for program
1567        deployment_slots
1568            .iter()
1569            .enumerate()
1570            .for_each(|(i, deployment_slot)| {
1571                let usage_counter = *usage_counters.get(i).unwrap_or(&0);
1572                cache.assign_program(
1573                    program,
1574                    new_test_entry_with_usage(
1575                        *deployment_slot,
1576                        (*deployment_slot).saturating_add(2),
1577                        AtomicU64::new(usage_counter),
1578                    ),
1579                );
1580                programs.push((program, *deployment_slot, usage_counter));
1581            });
1582
1583        // Add tombstones entries for program
1584        let env = Arc::new(BuiltinProgram::new_mock());
1585        for slot in 21..31 {
1586            set_tombstone(
1587                cache,
1588                program,
1589                slot,
1590                ProgramCacheEntryType::FailedVerification(env.clone()),
1591            );
1592        }
1593
1594        // Add unloaded entries for program
1595        for slot in 31..41 {
1596            insert_unloaded_entry(cache, program, slot);
1597        }
1598    }
1599
1600    #[test]
1601    fn test_random_eviction() {
1602        let mut programs = vec![];
1603
1604        let mut cache = new_mock_cache::<TestForkGraph>();
1605
1606        // This test adds different kind of entries to the cache.
1607        // Tombstones and unloaded entries are expected to not be evicted.
1608        // It also adds multiple entries for three programs as it tries to create a typical cache instance.
1609
1610        // Program 1
1611        program_deploy_test_helper(
1612            &mut cache,
1613            Pubkey::new_unique(),
1614            vec![0, 10, 20],
1615            vec![4, 5, 25],
1616            &mut programs,
1617        );
1618
1619        // Program 2
1620        program_deploy_test_helper(
1621            &mut cache,
1622            Pubkey::new_unique(),
1623            vec![5, 11],
1624            vec![0, 2],
1625            &mut programs,
1626        );
1627
1628        // Program 3
1629        program_deploy_test_helper(
1630            &mut cache,
1631            Pubkey::new_unique(),
1632            vec![0, 5, 15],
1633            vec![100, 3, 20],
1634            &mut programs,
1635        );
1636
1637        // 1 for each deployment slot
1638        let num_loaded_expected = 8;
1639        // 10 for each program
1640        let num_unloaded_expected = 30;
1641        // 10 for each program
1642        let num_tombstones_expected = 30;
1643
1644        // Count the number of loaded, unloaded and tombstone entries.
1645        programs.sort_by_key(|(_id, _slot, usage_count)| *usage_count);
1646        let num_loaded = num_matching_entries(&cache, |program_type| {
1647            matches!(program_type, ProgramCacheEntryType::Loaded(_))
1648        });
1649        let num_unloaded = num_matching_entries(&cache, |program_type| {
1650            matches!(program_type, ProgramCacheEntryType::Unloaded(_))
1651        });
1652        let num_tombstones = num_matching_entries(&cache, |program_type| {
1653            matches!(
1654                program_type,
1655                ProgramCacheEntryType::DelayVisibility
1656                    | ProgramCacheEntryType::FailedVerification(_)
1657                    | ProgramCacheEntryType::Closed
1658            )
1659        });
1660
1661        // Test that the cache is constructed with the expected number of entries.
1662        assert_eq!(num_loaded, num_loaded_expected);
1663        assert_eq!(num_unloaded, num_unloaded_expected);
1664        assert_eq!(num_tombstones, num_tombstones_expected);
1665
1666        // Evict entries from the cache
1667        let eviction_pct = 1;
1668
1669        let num_loaded_expected =
1670            Percentage::from(eviction_pct).apply_to(crate::loaded_programs::MAX_LOADED_ENTRY_COUNT);
1671        let num_unloaded_expected = num_unloaded_expected + num_loaded - num_loaded_expected;
1672        cache.evict_using_2s_random_selection(Percentage::from(eviction_pct), 21);
1673
1674        // Count the number of loaded, unloaded and tombstone entries.
1675        let num_loaded = num_matching_entries(&cache, |program_type| {
1676            matches!(program_type, ProgramCacheEntryType::Loaded(_))
1677        });
1678        let num_unloaded = num_matching_entries(&cache, |program_type| {
1679            matches!(program_type, ProgramCacheEntryType::Unloaded(_))
1680        });
1681        let num_tombstones = num_matching_entries(&cache, |program_type| {
1682            matches!(program_type, ProgramCacheEntryType::FailedVerification(_))
1683        });
1684
1685        // However many entries are left after the shrink
1686        assert_eq!(num_loaded, num_loaded_expected);
1687        // The original unloaded entries + the evicted loaded entries
1688        assert_eq!(num_unloaded, num_unloaded_expected);
1689        // The original tombstones are not evicted
1690        assert_eq!(num_tombstones, num_tombstones_expected);
1691    }
1692
1693    #[test]
1694    fn test_eviction() {
1695        let mut programs = vec![];
1696        let mut cache = new_mock_cache::<TestForkGraph>();
1697
1698        // Program 1
1699        program_deploy_test_helper(
1700            &mut cache,
1701            Pubkey::new_unique(),
1702            vec![0, 10, 20],
1703            vec![4, 5, 25],
1704            &mut programs,
1705        );
1706
1707        // Program 2
1708        program_deploy_test_helper(
1709            &mut cache,
1710            Pubkey::new_unique(),
1711            vec![5, 11],
1712            vec![0, 2],
1713            &mut programs,
1714        );
1715
1716        // Program 3
1717        program_deploy_test_helper(
1718            &mut cache,
1719            Pubkey::new_unique(),
1720            vec![0, 5, 15],
1721            vec![100, 3, 20],
1722            &mut programs,
1723        );
1724
1725        // 1 for each deployment slot
1726        let num_loaded_expected = 8;
1727        // 10 for each program
1728        let num_unloaded_expected = 30;
1729        // 10 for each program
1730        let num_tombstones_expected = 30;
1731
1732        // Count the number of loaded, unloaded and tombstone entries.
1733        programs.sort_by_key(|(_id, _slot, usage_count)| *usage_count);
1734        let num_loaded = num_matching_entries(&cache, |program_type| {
1735            matches!(program_type, ProgramCacheEntryType::Loaded(_))
1736        });
1737        let num_unloaded = num_matching_entries(&cache, |program_type| {
1738            matches!(program_type, ProgramCacheEntryType::Unloaded(_))
1739        });
1740        let num_tombstones = num_matching_entries(&cache, |program_type| {
1741            matches!(program_type, ProgramCacheEntryType::FailedVerification(_))
1742        });
1743
1744        // Test that the cache is constructed with the expected number of entries.
1745        assert_eq!(num_loaded, num_loaded_expected);
1746        assert_eq!(num_unloaded, num_unloaded_expected);
1747        assert_eq!(num_tombstones, num_tombstones_expected);
1748
1749        // Evict entries from the cache
1750        let eviction_pct = 1;
1751
1752        let num_loaded_expected =
1753            Percentage::from(eviction_pct).apply_to(crate::loaded_programs::MAX_LOADED_ENTRY_COUNT);
1754        let num_unloaded_expected = num_unloaded_expected + num_loaded - num_loaded_expected;
1755
1756        cache.sort_and_unload(Percentage::from(eviction_pct));
1757
1758        // Check that every program is still in the cache.
1759        let entries = cache.get_flattened_entries_for_tests();
1760        programs.iter().for_each(|entry| {
1761            assert!(entries.iter().any(|(key, _entry)| key == &entry.0));
1762        });
1763
1764        let unloaded = entries
1765            .iter()
1766            .filter_map(|(key, program)| {
1767                matches!(program.program, ProgramCacheEntryType::Unloaded(_))
1768                    .then_some((*key, program.tx_usage_counter.load(Ordering::Relaxed)))
1769            })
1770            .collect::<Vec<(Pubkey, u64)>>();
1771
1772        for index in 0..3 {
1773            let expected = programs.get(index).expect("Missing program");
1774            assert!(unloaded.contains(&(expected.0, expected.2)));
1775        }
1776
1777        // Count the number of loaded, unloaded and tombstone entries.
1778        let num_loaded = num_matching_entries(&cache, |program_type| {
1779            matches!(program_type, ProgramCacheEntryType::Loaded(_))
1780        });
1781        let num_unloaded = num_matching_entries(&cache, |program_type| {
1782            matches!(program_type, ProgramCacheEntryType::Unloaded(_))
1783        });
1784        let num_tombstones = num_matching_entries(&cache, |program_type| {
1785            matches!(
1786                program_type,
1787                ProgramCacheEntryType::DelayVisibility
1788                    | ProgramCacheEntryType::FailedVerification(_)
1789                    | ProgramCacheEntryType::Closed
1790            )
1791        });
1792
1793        // However many entries are left after the shrink
1794        assert_eq!(num_loaded, num_loaded_expected);
1795        // The original unloaded entries + the evicted loaded entries
1796        assert_eq!(num_unloaded, num_unloaded_expected);
1797        // The original tombstones are not evicted
1798        assert_eq!(num_tombstones, num_tombstones_expected);
1799    }
1800
1801    #[test]
1802    fn test_usage_count_of_unloaded_program() {
1803        let mut cache = new_mock_cache::<TestForkGraph>();
1804
1805        let program = Pubkey::new_unique();
1806        let evict_to_pct = 2;
1807        let cache_capacity_after_shrink =
1808            Percentage::from(evict_to_pct).apply_to(crate::loaded_programs::MAX_LOADED_ENTRY_COUNT);
1809        // Add enough programs to the cache to trigger 1 eviction after shrinking.
1810        let num_total_programs = (cache_capacity_after_shrink + 1) as u64;
1811        (0..num_total_programs).for_each(|i| {
1812            cache.assign_program(
1813                program,
1814                new_test_entry_with_usage(i, i + 2, AtomicU64::new(i + 10)),
1815            );
1816        });
1817
1818        cache.sort_and_unload(Percentage::from(evict_to_pct));
1819
1820        let num_unloaded = num_matching_entries(&cache, |program_type| {
1821            matches!(program_type, ProgramCacheEntryType::Unloaded(_))
1822        });
1823        assert_eq!(num_unloaded, 1);
1824
1825        cache
1826            .get_flattened_entries_for_tests()
1827            .iter()
1828            .for_each(|(_key, program)| {
1829                if matches!(program.program, ProgramCacheEntryType::Unloaded(_)) {
1830                    // Test that the usage counter is retained for the unloaded program
1831                    assert_eq!(program.tx_usage_counter.load(Ordering::Relaxed), 10);
1832                    assert_eq!(program.deployment_slot, 0);
1833                    assert_eq!(program.effective_slot, 2);
1834                }
1835            });
1836
1837        // Replenish the program that was just unloaded. Use 0 as the usage counter. This should be
1838        // updated with the usage counter from the unloaded program.
1839        cache.assign_program(program, new_test_entry_with_usage(0, 2, AtomicU64::new(0)));
1840
1841        cache
1842            .get_flattened_entries_for_tests()
1843            .iter()
1844            .for_each(|(_key, program)| {
1845                if matches!(program.program, ProgramCacheEntryType::Unloaded(_))
1846                    && program.deployment_slot == 0
1847                    && program.effective_slot == 2
1848                {
1849                    // Test that the usage counter was correctly updated.
1850                    assert_eq!(program.tx_usage_counter.load(Ordering::Relaxed), 10);
1851                }
1852            });
1853    }
1854
1855    #[test]
1856    fn test_fuzz_assign_program_order() {
1857        use rand::prelude::SliceRandom;
1858        const EXPECTED_ENTRIES: [(u64, u64); 7] =
1859            [(1, 2), (5, 5), (5, 6), (5, 10), (9, 10), (10, 10), (3, 12)];
1860        let mut rng = rand::thread_rng();
1861        let program_id = Pubkey::new_unique();
1862        for _ in 0..1000 {
1863            let mut entries = EXPECTED_ENTRIES.to_vec();
1864            entries.shuffle(&mut rng);
1865            let mut cache = new_mock_cache::<TestForkGraph>();
1866            for (deployment_slot, effective_slot) in entries {
1867                assert!(!cache
1868                    .assign_program(program_id, new_test_entry(deployment_slot, effective_slot)));
1869            }
1870            for ((deployment_slot, effective_slot), entry) in EXPECTED_ENTRIES
1871                .iter()
1872                .zip(cache.get_slot_versions_for_tests(&program_id).iter())
1873            {
1874                assert_eq!(entry.deployment_slot, *deployment_slot);
1875                assert_eq!(entry.effective_slot, *effective_slot);
1876            }
1877        }
1878    }
1879
1880    #[test_matrix(
1881        (
1882            ProgramCacheEntryType::Closed,
1883            ProgramCacheEntryType::FailedVerification(get_mock_env()),
1884            new_loaded_entry(get_mock_env()),
1885        ),
1886        (
1887            ProgramCacheEntryType::FailedVerification(get_mock_env()),
1888            ProgramCacheEntryType::Closed,
1889            ProgramCacheEntryType::Unloaded(get_mock_env()),
1890            new_loaded_entry(get_mock_env()),
1891            ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),
1892        )
1893    )]
1894    #[test_matrix(
1895        (
1896            ProgramCacheEntryType::Unloaded(get_mock_env()),
1897        ),
1898        (
1899            ProgramCacheEntryType::FailedVerification(get_mock_env()),
1900            ProgramCacheEntryType::Closed,
1901            ProgramCacheEntryType::Unloaded(get_mock_env()),
1902            ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),
1903        )
1904    )]
1905    #[test_matrix(
1906        (ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),),
1907        (
1908            ProgramCacheEntryType::FailedVerification(get_mock_env()),
1909            ProgramCacheEntryType::Closed,
1910            ProgramCacheEntryType::Unloaded(get_mock_env()),
1911            new_loaded_entry(get_mock_env()),
1912        )
1913    )]
1914    #[should_panic(expected = "Unexpected replacement of an entry")]
1915    fn test_assign_program_failure(old: ProgramCacheEntryType, new: ProgramCacheEntryType) {
1916        let mut cache = new_mock_cache::<TestForkGraph>();
1917        let program_id = Pubkey::new_unique();
1918        assert!(!cache.assign_program(
1919            program_id,
1920            Arc::new(ProgramCacheEntry {
1921                program: old,
1922                account_owner: ProgramCacheEntryOwner::LoaderV2,
1923                account_size: 0,
1924                deployment_slot: 10,
1925                effective_slot: 11,
1926                tx_usage_counter: AtomicU64::default(),
1927                ix_usage_counter: AtomicU64::default(),
1928                latest_access_slot: AtomicU64::default(),
1929            }),
1930        ));
1931        cache.assign_program(
1932            program_id,
1933            Arc::new(ProgramCacheEntry {
1934                program: new,
1935                account_owner: ProgramCacheEntryOwner::LoaderV2,
1936                account_size: 0,
1937                deployment_slot: 10,
1938                effective_slot: 11,
1939                tx_usage_counter: AtomicU64::default(),
1940                ix_usage_counter: AtomicU64::default(),
1941                latest_access_slot: AtomicU64::default(),
1942            }),
1943        );
1944    }
1945
1946    #[test_case(
1947        ProgramCacheEntryType::Unloaded(Arc::new(BuiltinProgram::new_mock())),
1948        new_loaded_entry(get_mock_env())
1949    )]
1950    #[test_case(
1951        ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),
1952        ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock())
1953    )]
1954    fn test_assign_program_success(old: ProgramCacheEntryType, new: ProgramCacheEntryType) {
1955        let mut cache = new_mock_cache::<TestForkGraph>();
1956        let program_id = Pubkey::new_unique();
1957        assert!(!cache.assign_program(
1958            program_id,
1959            Arc::new(ProgramCacheEntry {
1960                program: old,
1961                account_owner: ProgramCacheEntryOwner::LoaderV2,
1962                account_size: 0,
1963                deployment_slot: 10,
1964                effective_slot: 11,
1965                tx_usage_counter: AtomicU64::default(),
1966                ix_usage_counter: AtomicU64::default(),
1967                latest_access_slot: AtomicU64::default(),
1968            }),
1969        ));
1970        assert!(!cache.assign_program(
1971            program_id,
1972            Arc::new(ProgramCacheEntry {
1973                program: new,
1974                account_owner: ProgramCacheEntryOwner::LoaderV2,
1975                account_size: 0,
1976                deployment_slot: 10,
1977                effective_slot: 11,
1978                tx_usage_counter: AtomicU64::default(),
1979                ix_usage_counter: AtomicU64::default(),
1980                latest_access_slot: AtomicU64::default(),
1981            }),
1982        ));
1983    }
1984
1985    #[test]
1986    fn test_tombstone() {
1987        let env = Arc::new(BuiltinProgram::new_mock());
1988        let tombstone = ProgramCacheEntry::new_tombstone(
1989            0,
1990            ProgramCacheEntryOwner::LoaderV2,
1991            ProgramCacheEntryType::FailedVerification(env.clone()),
1992        );
1993        assert_matches!(
1994            tombstone.program,
1995            ProgramCacheEntryType::FailedVerification(_)
1996        );
1997        assert!(tombstone.is_tombstone());
1998        assert_eq!(tombstone.deployment_slot, 0);
1999        assert_eq!(tombstone.effective_slot, 0);
2000
2001        let tombstone = ProgramCacheEntry::new_tombstone(
2002            100,
2003            ProgramCacheEntryOwner::LoaderV2,
2004            ProgramCacheEntryType::Closed,
2005        );
2006        assert_matches!(tombstone.program, ProgramCacheEntryType::Closed);
2007        assert!(tombstone.is_tombstone());
2008        assert_eq!(tombstone.deployment_slot, 100);
2009        assert_eq!(tombstone.effective_slot, 100);
2010
2011        let mut cache = new_mock_cache::<TestForkGraph>();
2012        let program1 = Pubkey::new_unique();
2013        let tombstone = set_tombstone(
2014            &mut cache,
2015            program1,
2016            10,
2017            ProgramCacheEntryType::FailedVerification(env.clone()),
2018        );
2019        let slot_versions = cache.get_slot_versions_for_tests(&program1);
2020        assert_eq!(slot_versions.len(), 1);
2021        assert!(slot_versions.first().unwrap().is_tombstone());
2022        assert_eq!(tombstone.deployment_slot, 10);
2023        assert_eq!(tombstone.effective_slot, 10);
2024
2025        // Add a program at slot 50, and a tombstone for the program at slot 60
2026        let program2 = Pubkey::new_unique();
2027        cache.assign_program(program2, new_test_builtin_entry(50, 51));
2028        let slot_versions = cache.get_slot_versions_for_tests(&program2);
2029        assert_eq!(slot_versions.len(), 1);
2030        assert!(!slot_versions.first().unwrap().is_tombstone());
2031
2032        let tombstone = set_tombstone(
2033            &mut cache,
2034            program2,
2035            60,
2036            ProgramCacheEntryType::FailedVerification(env),
2037        );
2038        let slot_versions = cache.get_slot_versions_for_tests(&program2);
2039        assert_eq!(slot_versions.len(), 2);
2040        assert!(!slot_versions.first().unwrap().is_tombstone());
2041        assert!(slot_versions.get(1).unwrap().is_tombstone());
2042        assert!(tombstone.is_tombstone());
2043        assert_eq!(tombstone.deployment_slot, 60);
2044        assert_eq!(tombstone.effective_slot, 60);
2045    }
2046
2047    struct TestForkGraph {
2048        relation: BlockRelation,
2049    }
2050    impl ForkGraph for TestForkGraph {
2051        fn relationship(&self, _a: Slot, _b: Slot) -> BlockRelation {
2052            self.relation
2053        }
2054    }
2055
2056    #[test]
2057    fn test_prune_empty() {
2058        let mut cache = new_mock_cache::<TestForkGraph>();
2059        let fork_graph = Arc::new(RwLock::new(TestForkGraph {
2060            relation: BlockRelation::Unrelated,
2061        }));
2062
2063        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2064
2065        cache.prune(0, 0);
2066        assert!(cache.get_flattened_entries_for_tests().is_empty());
2067
2068        cache.prune(10, 0);
2069        assert!(cache.get_flattened_entries_for_tests().is_empty());
2070
2071        let mut cache = new_mock_cache::<TestForkGraph>();
2072        let fork_graph = Arc::new(RwLock::new(TestForkGraph {
2073            relation: BlockRelation::Ancestor,
2074        }));
2075
2076        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2077
2078        cache.prune(0, 0);
2079        assert!(cache.get_flattened_entries_for_tests().is_empty());
2080
2081        cache.prune(10, 0);
2082        assert!(cache.get_flattened_entries_for_tests().is_empty());
2083
2084        let mut cache = new_mock_cache::<TestForkGraph>();
2085        let fork_graph = Arc::new(RwLock::new(TestForkGraph {
2086            relation: BlockRelation::Descendant,
2087        }));
2088
2089        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2090
2091        cache.prune(0, 0);
2092        assert!(cache.get_flattened_entries_for_tests().is_empty());
2093
2094        cache.prune(10, 0);
2095        assert!(cache.get_flattened_entries_for_tests().is_empty());
2096
2097        let mut cache = new_mock_cache::<TestForkGraph>();
2098        let fork_graph = Arc::new(RwLock::new(TestForkGraph {
2099            relation: BlockRelation::Unknown,
2100        }));
2101        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2102
2103        cache.prune(0, 0);
2104        assert!(cache.get_flattened_entries_for_tests().is_empty());
2105
2106        cache.prune(10, 0);
2107        assert!(cache.get_flattened_entries_for_tests().is_empty());
2108    }
2109
2110    #[test]
2111    fn test_prune_different_env() {
2112        let mut cache = new_mock_cache::<TestForkGraph>();
2113
2114        let fork_graph = Arc::new(RwLock::new(TestForkGraph {
2115            relation: BlockRelation::Ancestor,
2116        }));
2117
2118        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2119
2120        let program1 = Pubkey::new_unique();
2121        cache.assign_program(program1, new_test_entry(10, 10));
2122
2123        let new_env = Arc::new(BuiltinProgram::new_mock());
2124        cache.upcoming_environments = Some(ProgramRuntimeEnvironments {
2125            program_runtime_v1: new_env.clone(),
2126            program_runtime_v2: new_env.clone(),
2127        });
2128        let updated_program = Arc::new(ProgramCacheEntry {
2129            program: new_loaded_entry(new_env.clone()),
2130            account_owner: ProgramCacheEntryOwner::LoaderV2,
2131            account_size: 0,
2132            deployment_slot: 20,
2133            effective_slot: 20,
2134            tx_usage_counter: AtomicU64::default(),
2135            ix_usage_counter: AtomicU64::default(),
2136            latest_access_slot: AtomicU64::default(),
2137        });
2138        cache.assign_program(program1, updated_program.clone());
2139
2140        // Test that there are 2 entries for the program
2141        assert_eq!(cache.get_slot_versions_for_tests(&program1).len(), 2);
2142
2143        cache.prune(21, cache.latest_root_epoch);
2144
2145        // Test that prune didn't remove the entry, since environments are different.
2146        assert_eq!(cache.get_slot_versions_for_tests(&program1).len(), 2);
2147
2148        cache.prune(22, cache.latest_root_epoch.saturating_add(1));
2149
2150        // Test that prune removed 1 entry, since epoch changed
2151        assert_eq!(cache.get_slot_versions_for_tests(&program1).len(), 1);
2152
2153        let entry = cache
2154            .get_slot_versions_for_tests(&program1)
2155            .first()
2156            .expect("Failed to get the program")
2157            .clone();
2158        // Test that the correct entry remains in the cache
2159        assert_eq!(entry, updated_program);
2160    }
2161
2162    #[derive(Default)]
2163    struct TestForkGraphSpecific {
2164        forks: Vec<Vec<Slot>>,
2165    }
2166
2167    impl TestForkGraphSpecific {
2168        fn insert_fork(&mut self, fork: &[Slot]) {
2169            let mut fork = fork.to_vec();
2170            fork.sort();
2171            self.forks.push(fork)
2172        }
2173    }
2174
2175    impl ForkGraph for TestForkGraphSpecific {
2176        fn relationship(&self, a: Slot, b: Slot) -> BlockRelation {
2177            match self.forks.iter().try_for_each(|fork| {
2178                let relation = fork
2179                    .iter()
2180                    .position(|x| *x == a)
2181                    .and_then(|a_pos| {
2182                        fork.iter().position(|x| *x == b).and_then(|b_pos| {
2183                            (a_pos == b_pos)
2184                                .then_some(BlockRelation::Equal)
2185                                .or_else(|| (a_pos < b_pos).then_some(BlockRelation::Ancestor))
2186                                .or(Some(BlockRelation::Descendant))
2187                        })
2188                    })
2189                    .unwrap_or(BlockRelation::Unrelated);
2190
2191                if relation != BlockRelation::Unrelated {
2192                    return ControlFlow::Break(relation);
2193                }
2194
2195                ControlFlow::Continue(())
2196            }) {
2197                ControlFlow::Break(relation) => relation,
2198                _ => BlockRelation::Unrelated,
2199            }
2200        }
2201    }
2202
2203    fn get_entries_to_load(
2204        cache: &ProgramCache<TestForkGraphSpecific>,
2205        loading_slot: Slot,
2206        keys: &[Pubkey],
2207    ) -> Vec<(Pubkey, (ProgramCacheMatchCriteria, u64))> {
2208        let fork_graph = cache.fork_graph.as_ref().unwrap().upgrade().unwrap();
2209        let locked_fork_graph = fork_graph.read().unwrap();
2210        let entries = cache.get_flattened_entries_for_tests();
2211        keys.iter()
2212            .filter_map(|key| {
2213                entries
2214                    .iter()
2215                    .rev()
2216                    .find(|(program_id, entry)| {
2217                        program_id == key && entry.deployment_slot <= loading_slot
2218                    })
2219                    .map(|(program_id, _entry)| {
2220                        (*program_id, (ProgramCacheMatchCriteria::NoCriteria, 1))
2221                    })
2222            })
2223            .collect()
2224    }
2225
2226    fn match_slot(
2227        extracted: &ProgramCacheForTxBatch,
2228        program: &Pubkey,
2229        deployment_slot: Slot,
2230        working_slot: Slot,
2231    ) -> bool {
2232        assert_eq!(extracted.slot, working_slot);
2233        extracted
2234            .entries
2235            .get(program)
2236            .map(|entry| entry.deployment_slot == deployment_slot)
2237            .unwrap_or(false)
2238    }
2239
2240    fn match_missing(
2241        missing: &[(Pubkey, (ProgramCacheMatchCriteria, u64))],
2242        program: &Pubkey,
2243        expected_result: bool,
2244    ) -> bool {
2245        missing.iter().any(|(key, _)| key == program) == expected_result
2246    }
2247
2248    #[test]
2249    fn test_fork_extract_and_prune() {
2250        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2251
2252        // Fork graph created for the test
2253        //                   0
2254        //                 /   \
2255        //                10    5
2256        //                |     |
2257        //                20    11
2258        //                |     | \
2259        //                22   15  25
2260        //                      |   |
2261        //                     16  27
2262        //                      |
2263        //                     19
2264        //                      |
2265        //                     23
2266
2267        let mut fork_graph = TestForkGraphSpecific::default();
2268        fork_graph.insert_fork(&[0, 10, 20, 22]);
2269        fork_graph.insert_fork(&[0, 5, 11, 15, 16, 18, 19, 21, 23]);
2270        fork_graph.insert_fork(&[0, 5, 11, 25, 27]);
2271
2272        let fork_graph = Arc::new(RwLock::new(fork_graph));
2273        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2274
2275        let program1 = Pubkey::new_unique();
2276        cache.assign_program(program1, new_test_entry(0, 1));
2277        cache.assign_program(program1, new_test_entry(10, 11));
2278        cache.assign_program(program1, new_test_entry(20, 21));
2279
2280        let program2 = Pubkey::new_unique();
2281        cache.assign_program(program2, new_test_entry(5, 6));
2282        cache.assign_program(
2283            program2,
2284            new_test_entry(11, 11 + DELAY_VISIBILITY_SLOT_OFFSET),
2285        );
2286
2287        let program3 = Pubkey::new_unique();
2288        cache.assign_program(program3, new_test_entry(25, 26));
2289
2290        let program4 = Pubkey::new_unique();
2291        cache.assign_program(program4, new_test_entry(0, 1));
2292        cache.assign_program(program4, new_test_entry(5, 6));
2293        // The following is a special case, where effective slot is 3 slots in the future
2294        cache.assign_program(
2295            program4,
2296            new_test_entry(15, 15 + DELAY_VISIBILITY_SLOT_OFFSET),
2297        );
2298
2299        // Current fork graph
2300        //                   0
2301        //                 /   \
2302        //                10    5
2303        //                |     |
2304        //                20    11
2305        //                |     | \
2306        //                22   15  25
2307        //                      |   |
2308        //                     16  27
2309        //                      |
2310        //                     19
2311        //                      |
2312        //                     23
2313
2314        // Testing fork 0 - 10 - 12 - 22 with current slot at 22
2315        let mut missing =
2316            get_entries_to_load(&cache, 22, &[program1, program2, program3, program4]);
2317        assert!(match_missing(&missing, &program2, false));
2318        assert!(match_missing(&missing, &program3, false));
2319        let mut extracted = ProgramCacheForTxBatch::new(22, cache.environments.clone(), None, 0);
2320        cache.extract(&mut missing, &mut extracted, true);
2321        assert!(match_slot(&extracted, &program1, 20, 22));
2322        assert!(match_slot(&extracted, &program4, 0, 22));
2323
2324        // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 15
2325        let mut missing =
2326            get_entries_to_load(&cache, 15, &[program1, program2, program3, program4]);
2327        assert!(match_missing(&missing, &program3, false));
2328        let mut extracted = ProgramCacheForTxBatch::new(15, cache.environments.clone(), None, 0);
2329        cache.extract(&mut missing, &mut extracted, true);
2330        assert!(match_slot(&extracted, &program1, 0, 15));
2331        assert!(match_slot(&extracted, &program2, 11, 15));
2332        // The effective slot of program4 deployed in slot 15 is 19. So it should not be usable in slot 16.
2333        // A delay visibility tombstone should be returned here.
2334        let tombstone = extracted
2335            .find(&program4)
2336            .expect("Failed to find the tombstone");
2337        assert_matches!(tombstone.program, ProgramCacheEntryType::DelayVisibility);
2338        assert_eq!(tombstone.deployment_slot, 15);
2339
2340        // Testing the same fork above, but current slot is now 18 (equal to effective slot of program4).
2341        let mut missing =
2342            get_entries_to_load(&cache, 18, &[program1, program2, program3, program4]);
2343        assert!(match_missing(&missing, &program3, false));
2344        let mut extracted = ProgramCacheForTxBatch::new(18, cache.environments.clone(), None, 0);
2345        cache.extract(&mut missing, &mut extracted, true);
2346        assert!(match_slot(&extracted, &program1, 0, 18));
2347        assert!(match_slot(&extracted, &program2, 11, 18));
2348        // The effective slot of program4 deployed in slot 15 is 18. So it should be usable in slot 18.
2349        assert!(match_slot(&extracted, &program4, 15, 18));
2350
2351        // Testing the same fork above, but current slot is now 23 (future slot than effective slot of program4).
2352        let mut missing =
2353            get_entries_to_load(&cache, 23, &[program1, program2, program3, program4]);
2354        assert!(match_missing(&missing, &program3, false));
2355        let mut extracted = ProgramCacheForTxBatch::new(23, cache.environments.clone(), None, 0);
2356        cache.extract(&mut missing, &mut extracted, true);
2357        assert!(match_slot(&extracted, &program1, 0, 23));
2358        assert!(match_slot(&extracted, &program2, 11, 23));
2359        // The effective slot of program4 deployed in slot 15 is 19. So it should be usable in slot 23.
2360        assert!(match_slot(&extracted, &program4, 15, 23));
2361
2362        // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 11
2363        let mut missing =
2364            get_entries_to_load(&cache, 11, &[program1, program2, program3, program4]);
2365        assert!(match_missing(&missing, &program3, false));
2366        let mut extracted = ProgramCacheForTxBatch::new(11, cache.environments.clone(), None, 0);
2367        cache.extract(&mut missing, &mut extracted, true);
2368        assert!(match_slot(&extracted, &program1, 0, 11));
2369        // program2 was updated at slot 11, but is not effective till slot 12. The result should contain a tombstone.
2370        let tombstone = extracted
2371            .find(&program2)
2372            .expect("Failed to find the tombstone");
2373        assert_matches!(tombstone.program, ProgramCacheEntryType::DelayVisibility);
2374        assert_eq!(tombstone.deployment_slot, 11);
2375        assert!(match_slot(&extracted, &program4, 5, 11));
2376
2377        cache.prune(5, 0);
2378
2379        // Fork graph after pruning
2380        //                   0
2381        //                   |
2382        //                   5
2383        //                   |
2384        //                   11
2385        //                   | \
2386        //                  15  25
2387        //                   |   |
2388        //                  16  27
2389        //                   |
2390        //                  19
2391        //                   |
2392        //                  23
2393
2394        // Testing fork 11 - 15 - 16- 19 - 22 with root at 5 and current slot at 22
2395        let mut missing =
2396            get_entries_to_load(&cache, 21, &[program1, program2, program3, program4]);
2397        assert!(match_missing(&missing, &program3, false));
2398        let mut extracted = ProgramCacheForTxBatch::new(21, cache.environments.clone(), None, 0);
2399        cache.extract(&mut missing, &mut extracted, true);
2400        // Since the fork was pruned, we should not find the entry deployed at slot 20.
2401        assert!(match_slot(&extracted, &program1, 0, 21));
2402        assert!(match_slot(&extracted, &program2, 11, 21));
2403        assert!(match_slot(&extracted, &program4, 15, 21));
2404
2405        // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27
2406        let mut missing =
2407            get_entries_to_load(&cache, 27, &[program1, program2, program3, program4]);
2408        let mut extracted = ProgramCacheForTxBatch::new(27, cache.environments.clone(), None, 0);
2409        cache.extract(&mut missing, &mut extracted, true);
2410        assert!(match_slot(&extracted, &program1, 0, 27));
2411        assert!(match_slot(&extracted, &program2, 11, 27));
2412        assert!(match_slot(&extracted, &program3, 25, 27));
2413        assert!(match_slot(&extracted, &program4, 5, 27));
2414
2415        cache.prune(15, 0);
2416
2417        // Fork graph after pruning
2418        //                  0
2419        //                  |
2420        //                  5
2421        //                  |
2422        //                  11
2423        //                  |
2424        //                  15
2425        //                  |
2426        //                  16
2427        //                  |
2428        //                  19
2429        //                  |
2430        //                  23
2431
2432        // Testing fork 16, 19, 23, with root at 15, current slot at 23
2433        let mut missing =
2434            get_entries_to_load(&cache, 23, &[program1, program2, program3, program4]);
2435        assert!(match_missing(&missing, &program3, false));
2436        let mut extracted = ProgramCacheForTxBatch::new(23, cache.environments.clone(), None, 0);
2437        cache.extract(&mut missing, &mut extracted, true);
2438        assert!(match_slot(&extracted, &program1, 0, 23));
2439        assert!(match_slot(&extracted, &program2, 11, 23));
2440        assert!(match_slot(&extracted, &program4, 15, 23));
2441    }
2442
2443    #[test]
2444    fn test_extract_using_deployment_slot() {
2445        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2446
2447        // Fork graph created for the test
2448        //                   0
2449        //                 /   \
2450        //                10    5
2451        //                |     |
2452        //                20    11
2453        //                |     | \
2454        //                22   15  25
2455        //                      |   |
2456        //                     16  27
2457        //                      |
2458        //                     19
2459        //                      |
2460        //                     23
2461
2462        let mut fork_graph = TestForkGraphSpecific::default();
2463        fork_graph.insert_fork(&[0, 10, 20, 22]);
2464        fork_graph.insert_fork(&[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]);
2465        fork_graph.insert_fork(&[0, 5, 11, 25, 27]);
2466
2467        let fork_graph = Arc::new(RwLock::new(fork_graph));
2468        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2469
2470        let program1 = Pubkey::new_unique();
2471        cache.assign_program(program1, new_test_entry(0, 1));
2472        cache.assign_program(program1, new_test_entry(20, 21));
2473
2474        let program2 = Pubkey::new_unique();
2475        cache.assign_program(program2, new_test_entry(5, 6));
2476        cache.assign_program(program2, new_test_entry(11, 12));
2477
2478        let program3 = Pubkey::new_unique();
2479        cache.assign_program(program3, new_test_entry(25, 26));
2480
2481        // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19
2482        let mut missing = get_entries_to_load(&cache, 12, &[program1, program2, program3]);
2483        assert!(match_missing(&missing, &program3, false));
2484        let mut extracted = ProgramCacheForTxBatch::new(12, cache.environments.clone(), None, 0);
2485        cache.extract(&mut missing, &mut extracted, true);
2486        assert!(match_slot(&extracted, &program1, 0, 12));
2487        assert!(match_slot(&extracted, &program2, 11, 12));
2488
2489        // Test the same fork, but request the program modified at a later slot than what's in the cache.
2490        let mut missing = get_entries_to_load(&cache, 12, &[program1, program2, program3]);
2491        missing.get_mut(0).unwrap().1 .0 = ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(5);
2492        missing.get_mut(1).unwrap().1 .0 = ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(5);
2493        assert!(match_missing(&missing, &program3, false));
2494        let mut extracted = ProgramCacheForTxBatch::new(12, cache.environments.clone(), None, 0);
2495        cache.extract(&mut missing, &mut extracted, true);
2496        assert!(match_missing(&missing, &program1, true));
2497        assert!(match_slot(&extracted, &program2, 11, 12));
2498    }
2499
2500    #[test]
2501    fn test_extract_unloaded() {
2502        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2503
2504        // Fork graph created for the test
2505        //                   0
2506        //                 /   \
2507        //                10    5
2508        //                |     |
2509        //                20    11
2510        //                |     | \
2511        //                22   15  25
2512        //                      |   |
2513        //                     16  27
2514        //                      |
2515        //                     19
2516        //                      |
2517        //                     23
2518
2519        let mut fork_graph = TestForkGraphSpecific::default();
2520        fork_graph.insert_fork(&[0, 10, 20, 22]);
2521        fork_graph.insert_fork(&[0, 5, 11, 15, 16, 19, 21, 23]);
2522        fork_graph.insert_fork(&[0, 5, 11, 25, 27]);
2523
2524        let fork_graph = Arc::new(RwLock::new(fork_graph));
2525        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2526
2527        let program1 = Pubkey::new_unique();
2528        cache.assign_program(program1, new_test_entry(0, 1));
2529        cache.assign_program(program1, new_test_entry(20, 21));
2530
2531        let program2 = Pubkey::new_unique();
2532        cache.assign_program(program2, new_test_entry(5, 6));
2533        cache.assign_program(program2, new_test_entry(11, 12));
2534
2535        let program3 = Pubkey::new_unique();
2536        // Insert an unloaded program with correct/cache's environment at slot 25
2537        let _ = insert_unloaded_entry(&mut cache, program3, 25);
2538
2539        // Insert another unloaded program with a different environment at slot 20
2540        // Since this entry's environment won't match cache's environment, looking up this
2541        // entry should return missing instead of unloaded entry.
2542        cache.assign_program(
2543            program3,
2544            Arc::new(
2545                new_test_entry(20, 21)
2546                    .to_unloaded()
2547                    .expect("Failed to create unloaded program"),
2548            ),
2549        );
2550
2551        // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19
2552        let mut missing = get_entries_to_load(&cache, 19, &[program1, program2, program3]);
2553        assert!(match_missing(&missing, &program3, false));
2554        let mut extracted = ProgramCacheForTxBatch::new(19, cache.environments.clone(), None, 0);
2555        cache.extract(&mut missing, &mut extracted, true);
2556        assert!(match_slot(&extracted, &program1, 0, 19));
2557        assert!(match_slot(&extracted, &program2, 11, 19));
2558
2559        // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27
2560        let mut missing = get_entries_to_load(&cache, 27, &[program1, program2, program3]);
2561        let mut extracted = ProgramCacheForTxBatch::new(27, cache.environments.clone(), None, 0);
2562        cache.extract(&mut missing, &mut extracted, true);
2563        assert!(match_slot(&extracted, &program1, 0, 27));
2564        assert!(match_slot(&extracted, &program2, 11, 27));
2565        assert!(match_missing(&missing, &program3, true));
2566
2567        // Testing fork 0 - 10 - 20 - 22 with current slot at 22
2568        let mut missing = get_entries_to_load(&cache, 22, &[program1, program2, program3]);
2569        assert!(match_missing(&missing, &program2, false));
2570        let mut extracted = ProgramCacheForTxBatch::new(22, cache.environments.clone(), None, 0);
2571        cache.extract(&mut missing, &mut extracted, true);
2572        assert!(match_slot(&extracted, &program1, 20, 22));
2573        assert!(match_missing(&missing, &program3, true));
2574    }
2575
2576    #[test]
2577    fn test_extract_nonexistent() {
2578        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2579        let fork_graph = TestForkGraphSpecific::default();
2580        let fork_graph = Arc::new(RwLock::new(fork_graph));
2581        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2582
2583        let program1 = Pubkey::new_unique();
2584        let mut missing = vec![(program1, (ProgramCacheMatchCriteria::NoCriteria, 1))];
2585        let mut extracted = ProgramCacheForTxBatch::new(0, cache.environments.clone(), None, 0);
2586        cache.extract(&mut missing, &mut extracted, true);
2587        assert!(match_missing(&missing, &program1, true));
2588    }
2589
2590    #[test]
2591    fn test_unloaded() {
2592        let mut cache = new_mock_cache::<TestForkGraph>();
2593        for program_cache_entry_type in [
2594            ProgramCacheEntryType::FailedVerification(
2595                cache.environments.program_runtime_v1.clone(),
2596            ),
2597            ProgramCacheEntryType::Closed,
2598            ProgramCacheEntryType::Unloaded(cache.environments.program_runtime_v1.clone()),
2599            ProgramCacheEntryType::Builtin(BuiltinProgram::new_mock()),
2600        ] {
2601            let entry = Arc::new(ProgramCacheEntry {
2602                program: program_cache_entry_type,
2603                account_owner: ProgramCacheEntryOwner::LoaderV2,
2604                account_size: 0,
2605                deployment_slot: 0,
2606                effective_slot: 0,
2607                tx_usage_counter: AtomicU64::default(),
2608                ix_usage_counter: AtomicU64::default(),
2609                latest_access_slot: AtomicU64::default(),
2610            });
2611            assert!(entry.to_unloaded().is_none());
2612
2613            // Check that unload_program_entry() does nothing for this entry
2614            let program_id = Pubkey::new_unique();
2615            cache.assign_program(program_id, entry.clone());
2616            cache.unload_program_entry(&program_id, &entry);
2617            assert_eq!(cache.get_slot_versions_for_tests(&program_id).len(), 1);
2618            assert!(cache.stats.evictions.is_empty());
2619        }
2620
2621        let entry = new_test_entry_with_usage(1, 2, AtomicU64::new(3));
2622        let unloaded_entry = entry.to_unloaded().unwrap();
2623        assert_eq!(unloaded_entry.deployment_slot, 1);
2624        assert_eq!(unloaded_entry.effective_slot, 2);
2625        assert_eq!(unloaded_entry.latest_access_slot.load(Ordering::Relaxed), 1);
2626        assert_eq!(unloaded_entry.tx_usage_counter.load(Ordering::Relaxed), 3);
2627
2628        // Check that unload_program_entry() does its work
2629        let program_id = Pubkey::new_unique();
2630        cache.assign_program(program_id, entry.clone());
2631        cache.unload_program_entry(&program_id, &entry);
2632        assert!(cache.stats.evictions.contains_key(&program_id));
2633    }
2634
2635    #[test]
2636    fn test_fork_prune_find_first_ancestor() {
2637        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2638
2639        // Fork graph created for the test
2640        //                   0
2641        //                 /   \
2642        //                10    5
2643        //                |
2644        //                20
2645
2646        // Deploy program on slot 0, and slot 5.
2647        // Prune the fork that has slot 5. The cache should still have the program
2648        // deployed at slot 0.
2649        let mut fork_graph = TestForkGraphSpecific::default();
2650        fork_graph.insert_fork(&[0, 10, 20]);
2651        fork_graph.insert_fork(&[0, 5]);
2652        let fork_graph = Arc::new(RwLock::new(fork_graph));
2653        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2654
2655        let program1 = Pubkey::new_unique();
2656        cache.assign_program(program1, new_test_entry(0, 1));
2657        cache.assign_program(program1, new_test_entry(5, 6));
2658
2659        cache.prune(10, 0);
2660
2661        let mut missing = get_entries_to_load(&cache, 20, &[program1]);
2662        let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0);
2663        cache.extract(&mut missing, &mut extracted, true);
2664
2665        // The cache should have the program deployed at slot 0
2666        assert_eq!(
2667            extracted
2668                .find(&program1)
2669                .expect("Did not find the program")
2670                .deployment_slot,
2671            0
2672        );
2673    }
2674
2675    #[test]
2676    fn test_prune_by_deployment_slot() {
2677        let mut cache = new_mock_cache::<TestForkGraphSpecific>();
2678
2679        // Fork graph created for the test
2680        //                   0
2681        //                 /   \
2682        //                10    5
2683        //                |
2684        //                20
2685
2686        // Deploy program on slot 0, and slot 5.
2687        // Prune the fork that has slot 5. The cache should still have the program
2688        // deployed at slot 0.
2689        let mut fork_graph = TestForkGraphSpecific::default();
2690        fork_graph.insert_fork(&[0, 10, 20]);
2691        fork_graph.insert_fork(&[0, 5, 6]);
2692        let fork_graph = Arc::new(RwLock::new(fork_graph));
2693        cache.set_fork_graph(Arc::downgrade(&fork_graph));
2694
2695        let program1 = Pubkey::new_unique();
2696        cache.assign_program(program1, new_test_entry(0, 1));
2697        cache.assign_program(program1, new_test_entry(5, 6));
2698
2699        let program2 = Pubkey::new_unique();
2700        cache.assign_program(program2, new_test_entry(10, 11));
2701
2702        let mut missing = get_entries_to_load(&cache, 20, &[program1, program2]);
2703        let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0);
2704        cache.extract(&mut missing, &mut extracted, true);
2705        assert!(match_slot(&extracted, &program1, 0, 20));
2706        assert!(match_slot(&extracted, &program2, 10, 20));
2707
2708        let mut missing = get_entries_to_load(&cache, 6, &[program1, program2]);
2709        assert!(match_missing(&missing, &program2, false));
2710        let mut extracted = ProgramCacheForTxBatch::new(6, cache.environments.clone(), None, 0);
2711        cache.extract(&mut missing, &mut extracted, true);
2712        assert!(match_slot(&extracted, &program1, 5, 6));
2713
2714        // Pruning slot 5 will remove program1 entry deployed at slot 5.
2715        // On fork chaining from slot 5, the entry deployed at slot 0 will become visible.
2716        cache.prune_by_deployment_slot(5);
2717
2718        let mut missing = get_entries_to_load(&cache, 20, &[program1, program2]);
2719        let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0);
2720        cache.extract(&mut missing, &mut extracted, true);
2721        assert!(match_slot(&extracted, &program1, 0, 20));
2722        assert!(match_slot(&extracted, &program2, 10, 20));
2723
2724        let mut missing = get_entries_to_load(&cache, 6, &[program1, program2]);
2725        assert!(match_missing(&missing, &program2, false));
2726        let mut extracted = ProgramCacheForTxBatch::new(6, cache.environments.clone(), None, 0);
2727        cache.extract(&mut missing, &mut extracted, true);
2728        assert!(match_slot(&extracted, &program1, 0, 6));
2729
2730        // Pruning slot 10 will remove program2 entry deployed at slot 10.
2731        // As there is no other entry for program2, extract() will return it as missing.
2732        cache.prune_by_deployment_slot(10);
2733
2734        let mut missing = get_entries_to_load(&cache, 20, &[program1, program2]);
2735        assert!(match_missing(&missing, &program2, false));
2736        let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0);
2737        cache.extract(&mut missing, &mut extracted, true);
2738        assert!(match_slot(&extracted, &program1, 0, 20));
2739    }
2740
2741    #[test]
2742    fn test_usable_entries_for_slot() {
2743        new_mock_cache::<TestForkGraph>();
2744        let tombstone = Arc::new(ProgramCacheEntry::new_tombstone(
2745            0,
2746            ProgramCacheEntryOwner::LoaderV2,
2747            ProgramCacheEntryType::Closed,
2748        ));
2749
2750        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2751            &tombstone,
2752            &ProgramCacheMatchCriteria::NoCriteria
2753        ));
2754
2755        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2756            &tombstone,
2757            &ProgramCacheMatchCriteria::Tombstone
2758        ));
2759
2760        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2761            &tombstone,
2762            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(0)
2763        ));
2764
2765        assert!(!ProgramCache::<TestForkGraph>::matches_criteria(
2766            &tombstone,
2767            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(1)
2768        ));
2769
2770        let program = new_test_entry(0, 1);
2771
2772        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2773            &program,
2774            &ProgramCacheMatchCriteria::NoCriteria
2775        ));
2776
2777        assert!(!ProgramCache::<TestForkGraph>::matches_criteria(
2778            &program,
2779            &ProgramCacheMatchCriteria::Tombstone
2780        ));
2781
2782        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2783            &program,
2784            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(0)
2785        ));
2786
2787        assert!(!ProgramCache::<TestForkGraph>::matches_criteria(
2788            &program,
2789            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(1)
2790        ));
2791
2792        let program = Arc::new(new_test_entry_with_usage(0, 1, AtomicU64::default()));
2793
2794        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2795            &program,
2796            &ProgramCacheMatchCriteria::NoCriteria
2797        ));
2798
2799        assert!(!ProgramCache::<TestForkGraph>::matches_criteria(
2800            &program,
2801            &ProgramCacheMatchCriteria::Tombstone
2802        ));
2803
2804        assert!(ProgramCache::<TestForkGraph>::matches_criteria(
2805            &program,
2806            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(0)
2807        ));
2808
2809        assert!(!ProgramCache::<TestForkGraph>::matches_criteria(
2810            &program,
2811            &ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(1)
2812        ));
2813    }
2814}