solana_sbpf/
vm.rs

1#![allow(clippy::arithmetic_side_effects)]
2// Derived from uBPF <https://github.com/iovisor/ubpf>
3// Copyright 2015 Big Switch Networks, Inc
4//      (uBPF: VM architecture, parts of the interpreter, originally in C)
5// Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
6//      (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for syscalls)
7// Copyright 2020 Solana Maintainers <maintainers@solana.com>
8//
9// Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
10// the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
11// copied, modified, or distributed except according to those terms.
12
13//! Virtual machine for eBPF programs.
14
15use crate::{
16    ebpf,
17    elf::Executable,
18    error::{EbpfError, ProgramResult},
19    interpreter::Interpreter,
20    memory_region::MemoryMapping,
21    program::{BuiltinFunction, BuiltinProgram, FunctionRegistry, SBPFVersion},
22    static_analysis::Analysis,
23};
24use std::{collections::BTreeMap, fmt::Debug};
25
26#[cfg(not(feature = "shuttle-test"))]
27use {
28    rand::{thread_rng, Rng},
29    std::sync::Arc,
30};
31
32#[cfg(feature = "shuttle-test")]
33use shuttle::{
34    rand::{thread_rng, Rng},
35    sync::Arc,
36};
37
38/// Shift the RUNTIME_ENVIRONMENT_KEY by this many bits to the LSB
39///
40/// 3 bits for 8 Byte alignment, and 1 bit to have encoding space for the RuntimeEnvironment.
41const PROGRAM_ENVIRONMENT_KEY_SHIFT: u32 = 4;
42static RUNTIME_ENVIRONMENT_KEY: std::sync::OnceLock<i32> = std::sync::OnceLock::<i32>::new();
43
44/// Returns (and if not done before generates) the encryption key for the VM pointer
45pub fn get_runtime_environment_key() -> i32 {
46    *RUNTIME_ENVIRONMENT_KEY
47        .get_or_init(|| thread_rng().gen::<i32>() >> PROGRAM_ENVIRONMENT_KEY_SHIFT)
48}
49
50/// VM configuration settings
51#[derive(Debug, Clone, PartialEq, Eq)]
52pub struct Config {
53    /// Maximum call depth
54    pub max_call_depth: usize,
55    /// Size of a stack frame in bytes, must match the size specified in the LLVM BPF backend
56    pub stack_frame_size: usize,
57    /// Enables the use of MemoryMapping and MemoryRegion for address translation
58    pub enable_address_translation: bool,
59    /// Enables gaps in VM address space between the stack frames
60    pub enable_stack_frame_gaps: bool,
61    /// Maximal pc distance after which a new instruction meter validation is emitted by the JIT
62    pub instruction_meter_checkpoint_distance: usize,
63    /// Enable instruction meter and limiting
64    pub enable_instruction_meter: bool,
65    /// Enable instruction tracing
66    pub enable_instruction_tracing: bool,
67    /// Enable dynamic string allocation for labels
68    pub enable_symbol_and_section_labels: bool,
69    /// Reject ELF files containing issues that the verifier did not catch before (up to v0.2.21)
70    pub reject_broken_elfs: bool,
71    /// Ratio of native host instructions per random no-op in JIT (0 = OFF)
72    pub noop_instruction_rate: u32,
73    /// Enable disinfection of immediate values and offsets provided by the user in JIT
74    pub sanitize_user_provided_values: bool,
75    /// Avoid copying read only sections when possible
76    pub optimize_rodata: bool,
77    /// Use aligned memory mapping
78    pub aligned_memory_mapping: bool,
79    /// Allowed [SBPFVersion]s
80    pub enabled_sbpf_versions: std::ops::RangeInclusive<SBPFVersion>,
81}
82
83impl Config {
84    /// Returns the size of the stack memory region
85    pub fn stack_size(&self) -> usize {
86        self.stack_frame_size * self.max_call_depth
87    }
88}
89
90impl Default for Config {
91    fn default() -> Self {
92        Self {
93            max_call_depth: 64,
94            stack_frame_size: 4_096,
95            enable_address_translation: true,
96            enable_stack_frame_gaps: true,
97            instruction_meter_checkpoint_distance: 10000,
98            enable_instruction_meter: true,
99            enable_instruction_tracing: false,
100            enable_symbol_and_section_labels: false,
101            reject_broken_elfs: false,
102            noop_instruction_rate: 256,
103            sanitize_user_provided_values: true,
104            optimize_rodata: true,
105            aligned_memory_mapping: true,
106            enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V3,
107        }
108    }
109}
110
111/// Static constructors for Executable
112impl<C: ContextObject> Executable<C> {
113    /// Creates an executable from an ELF file
114    pub fn from_elf(elf_bytes: &[u8], loader: Arc<BuiltinProgram<C>>) -> Result<Self, EbpfError> {
115        let executable = Executable::load(elf_bytes, loader)?;
116        Ok(executable)
117    }
118    /// Creates an executable from machine code
119    pub fn from_text_bytes(
120        text_bytes: &[u8],
121        loader: Arc<BuiltinProgram<C>>,
122        sbpf_version: SBPFVersion,
123        function_registry: FunctionRegistry<usize>,
124    ) -> Result<Self, EbpfError> {
125        Executable::new_from_text_bytes(text_bytes, loader, sbpf_version, function_registry)
126            .map_err(EbpfError::ElfError)
127    }
128}
129
130/// Runtime context
131pub trait ContextObject {
132    /// Called for every instruction executed when tracing is enabled
133    fn trace(&mut self, state: [u64; 12]);
134    /// Consume instructions from meter
135    fn consume(&mut self, amount: u64);
136    /// Get the number of remaining instructions allowed
137    fn get_remaining(&self) -> u64;
138}
139
140/// Statistic of taken branches (from a recorded trace)
141pub struct DynamicAnalysis {
142    /// Maximal edge counter value
143    pub edge_counter_max: usize,
144    /// src_node, dst_node, edge_counter
145    pub edges: BTreeMap<usize, BTreeMap<usize, usize>>,
146}
147
148impl DynamicAnalysis {
149    /// Accumulates a trace
150    pub fn new(trace_log: &[[u64; 12]], analysis: &Analysis) -> Self {
151        let mut result = Self {
152            edge_counter_max: 0,
153            edges: BTreeMap::new(),
154        };
155        let mut last_basic_block = usize::MAX;
156        for traced_instruction in trace_log.iter() {
157            let pc = traced_instruction[11] as usize;
158            if analysis.cfg_nodes.contains_key(&pc) {
159                let counter = result
160                    .edges
161                    .entry(last_basic_block)
162                    .or_default()
163                    .entry(pc)
164                    .or_insert(0);
165                *counter += 1;
166                result.edge_counter_max = result.edge_counter_max.max(*counter);
167                last_basic_block = pc;
168            }
169        }
170        result
171    }
172}
173
174/// A call frame used for function calls inside the Interpreter
175#[derive(Clone, Default)]
176pub struct CallFrame {
177    /// The caller saved registers
178    pub caller_saved_registers: [u64; ebpf::SCRATCH_REGS],
179    /// The callers frame pointer
180    pub frame_pointer: u64,
181    /// The target_pc of the exit instruction which returns back to the caller
182    pub target_pc: u64,
183}
184
185/// Indices of slots inside [EbpfVm]
186pub enum RuntimeEnvironmentSlot {
187    /// [EbpfVm::host_stack_pointer]
188    HostStackPointer = 0,
189    /// [EbpfVm::call_depth]
190    CallDepth = 1,
191    /// [EbpfVm::context_object_pointer]
192    ContextObjectPointer = 2,
193    /// [EbpfVm::previous_instruction_meter]
194    PreviousInstructionMeter = 3,
195    /// [EbpfVm::due_insn_count]
196    DueInsnCount = 4,
197    /// [EbpfVm::stopwatch_numerator]
198    StopwatchNumerator = 5,
199    /// [EbpfVm::stopwatch_denominator]
200    StopwatchDenominator = 6,
201    /// [EbpfVm::registers]
202    Registers = 7,
203    /// [EbpfVm::program_result]
204    ProgramResult = 19,
205    /// [EbpfVm::memory_mapping]
206    MemoryMapping = 27,
207}
208
209/// A virtual machine to run eBPF programs.
210///
211/// # Examples
212///
213/// ```
214/// use solana_sbpf::{
215///     aligned_memory::AlignedMemory,
216///     ebpf,
217///     elf::Executable,
218///     memory_region::{MemoryMapping, MemoryRegion},
219///     program::{BuiltinProgram, FunctionRegistry, SBPFVersion},
220///     verifier::RequisiteVerifier,
221///     vm::{Config, EbpfVm},
222/// };
223/// use test_utils::TestContextObject;
224///
225/// let prog = &[
226///     0x9d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00  // exit
227/// ];
228/// let mem = &mut [
229///     0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd
230/// ];
231///
232/// let loader = std::sync::Arc::new(BuiltinProgram::new_mock());
233/// let function_registry = FunctionRegistry::default();
234/// let mut executable = Executable::<TestContextObject>::from_text_bytes(prog, loader.clone(), SBPFVersion::V3, function_registry).unwrap();
235/// executable.verify::<RequisiteVerifier>().unwrap();
236/// let mut context_object = TestContextObject::new(1);
237/// let sbpf_version = executable.get_sbpf_version();
238///
239/// let mut stack = AlignedMemory::<{ebpf::HOST_ALIGN}>::zero_filled(executable.get_config().stack_size());
240/// let stack_len = stack.len();
241/// let mut heap = AlignedMemory::<{ebpf::HOST_ALIGN}>::with_capacity(0);
242///
243/// let regions: Vec<MemoryRegion> = vec![
244///     executable.get_ro_region(),
245///     MemoryRegion::new_writable(
246///         stack.as_slice_mut(),
247///         ebpf::MM_STACK_START,
248///     ),
249///     MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START),
250///     MemoryRegion::new_writable(mem, ebpf::MM_INPUT_START),
251/// ];
252///
253/// let memory_mapping = MemoryMapping::new(regions, executable.get_config(), sbpf_version).unwrap();
254///
255/// let mut vm = EbpfVm::new(loader, sbpf_version, &mut context_object, memory_mapping, stack_len);
256///
257/// let (instruction_count, result) = vm.execute_program(&executable, true);
258/// assert_eq!(instruction_count, 1);
259/// assert_eq!(result.unwrap(), 0);
260/// ```
261#[repr(C)]
262pub struct EbpfVm<'a, C: ContextObject> {
263    /// Needed to exit from the guest back into the host
264    pub host_stack_pointer: *mut u64,
265    /// The current call depth.
266    ///
267    /// Incremented on calls and decremented on exits. It's used to enforce
268    /// config.max_call_depth and to know when to terminate execution.
269    pub call_depth: u64,
270    /// Pointer to ContextObject
271    pub context_object_pointer: &'a mut C,
272    /// Last return value of instruction_meter.get_remaining()
273    pub previous_instruction_meter: u64,
274    /// Outstanding value to instruction_meter.consume()
275    pub due_insn_count: u64,
276    /// CPU cycles accumulated by the stop watch
277    pub stopwatch_numerator: u64,
278    /// Number of times the stop watch was used
279    pub stopwatch_denominator: u64,
280    /// Registers inlined
281    pub registers: [u64; 12],
282    /// ProgramResult inlined
283    pub program_result: ProgramResult,
284    /// MemoryMapping inlined
285    pub memory_mapping: MemoryMapping<'a>,
286    /// Stack of CallFrames used by the Interpreter
287    pub call_frames: Vec<CallFrame>,
288    /// Loader built-in program
289    pub loader: Arc<BuiltinProgram<C>>,
290    /// TCP port for the debugger interface
291    #[cfg(feature = "debugger")]
292    pub debug_port: Option<u16>,
293}
294
295impl<'a, C: ContextObject> EbpfVm<'a, C> {
296    /// Creates a new virtual machine instance.
297    pub fn new(
298        loader: Arc<BuiltinProgram<C>>,
299        sbpf_version: SBPFVersion,
300        context_object: &'a mut C,
301        mut memory_mapping: MemoryMapping<'a>,
302        stack_len: usize,
303    ) -> Self {
304        let config = loader.get_config();
305        let mut registers = [0u64; 12];
306        registers[ebpf::FRAME_PTR_REG] =
307            ebpf::MM_STACK_START.saturating_add(if sbpf_version.dynamic_stack_frames() {
308                // the stack is fully descending, frames start as empty and change size anytime r11 is modified
309                stack_len
310            } else {
311                // within a frame the stack grows down, but frames are ascending
312                config.stack_frame_size
313            } as u64);
314        if !config.enable_address_translation {
315            memory_mapping = MemoryMapping::new_identity();
316        }
317        EbpfVm {
318            host_stack_pointer: std::ptr::null_mut(),
319            call_depth: 0,
320            context_object_pointer: context_object,
321            previous_instruction_meter: 0,
322            due_insn_count: 0,
323            stopwatch_numerator: 0,
324            stopwatch_denominator: 0,
325            registers,
326            program_result: ProgramResult::Ok(0),
327            memory_mapping,
328            call_frames: vec![CallFrame::default(); config.max_call_depth],
329            loader,
330            #[cfg(feature = "debugger")]
331            debug_port: None,
332        }
333    }
334
335    /// Execute the program
336    ///
337    /// If interpreted = `false` then the JIT compiled executable is used.
338    pub fn execute_program(
339        &mut self,
340        executable: &Executable<C>,
341        interpreted: bool,
342    ) -> (u64, ProgramResult) {
343        debug_assert!(Arc::ptr_eq(&self.loader, executable.get_loader()));
344        self.registers[1] = ebpf::MM_INPUT_START;
345        self.registers[11] = executable.get_entrypoint_instruction_offset() as u64;
346        let config = executable.get_config();
347        let initial_insn_count = self.context_object_pointer.get_remaining();
348        self.previous_instruction_meter = initial_insn_count;
349        self.due_insn_count = 0;
350        self.program_result = ProgramResult::Ok(0);
351        if interpreted {
352            #[cfg(feature = "debugger")]
353            let debug_port = self.debug_port.clone();
354            let mut interpreter = Interpreter::new(self, executable, self.registers);
355            #[cfg(feature = "debugger")]
356            if let Some(debug_port) = debug_port {
357                crate::debugger::execute(&mut interpreter, debug_port);
358            } else {
359                while interpreter.step() {}
360            }
361            #[cfg(not(feature = "debugger"))]
362            while interpreter.step() {}
363        } else {
364            #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))]
365            {
366                let compiled_program = match executable
367                    .get_compiled_program()
368                    .ok_or_else(|| EbpfError::JitNotCompiled)
369                {
370                    Ok(compiled_program) => compiled_program,
371                    Err(error) => return (0, ProgramResult::Err(error)),
372                };
373                compiled_program.invoke(config, self, self.registers);
374            }
375            #[cfg(not(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64")))]
376            {
377                return (0, ProgramResult::Err(EbpfError::JitNotCompiled));
378            }
379        };
380        let instruction_count = if config.enable_instruction_meter {
381            self.context_object_pointer.consume(self.due_insn_count);
382            initial_insn_count.saturating_sub(self.context_object_pointer.get_remaining())
383        } else {
384            0
385        };
386        let mut result = ProgramResult::Ok(0);
387        std::mem::swap(&mut result, &mut self.program_result);
388        (instruction_count, result)
389    }
390
391    /// Invokes a built-in function
392    pub fn invoke_function(&mut self, function: BuiltinFunction<C>) {
393        function(
394            unsafe {
395                std::ptr::addr_of_mut!(*self)
396                    .cast::<u64>()
397                    .offset(get_runtime_environment_key() as isize)
398                    .cast::<Self>()
399            },
400            self.registers[1],
401            self.registers[2],
402            self.registers[3],
403            self.registers[4],
404            self.registers[5],
405        );
406    }
407}