1#![allow(clippy::arithmetic_side_effects)]
2use crate::{
16 ebpf,
17 elf::Executable,
18 error::{EbpfError, ProgramResult},
19 program::BuiltinFunction,
20 vm::{Config, ContextObject, EbpfVm},
21};
22
23macro_rules! translate_memory_access {
25 (_impl, $self:ident, $op:ident, $vm_addr:ident, $T:ty, $($rest:expr),*) => {
26 match $self.vm.memory_mapping.$op::<$T>(
27 $($rest,)*
28 $vm_addr,
29 ) {
30 ProgramResult::Ok(v) => v,
31 ProgramResult::Err(err) => {
32 throw_error!($self, err);
33 },
34 }
35 };
36
37 ($self:ident, load, $vm_addr:ident, $T:ty) => {
39 translate_memory_access!(_impl, $self, load, $vm_addr, $T,)
40 };
41
42 ($self:ident, store, $value:expr, $vm_addr:ident, $T:ty) => {
44 translate_memory_access!(_impl, $self, store, $vm_addr, $T, ($value) as $T);
45 };
46}
47
48macro_rules! throw_error {
49 ($self:expr, $err:expr) => {{
50 $self.vm.registers[11] = $self.reg[11];
51 $self.vm.program_result = ProgramResult::Err($err);
52 return false;
53 }};
54 (DivideByZero; $self:expr, $src:expr, $ty:ty) => {
55 if $src as $ty == 0 {
56 throw_error!($self, EbpfError::DivideByZero);
57 }
58 };
59 (DivideOverflow; $self:expr, $src:expr, $dst:expr, $ty:ty) => {
60 if $dst as $ty == <$ty>::MIN && $src as $ty == -1 {
61 throw_error!($self, EbpfError::DivideOverflow);
62 }
63 };
64}
65
66macro_rules! check_pc {
67 ($self:expr, $next_pc:ident, $target_pc:expr) => {
68 if ($target_pc as usize)
69 .checked_mul(ebpf::INSN_SIZE)
70 .and_then(|offset| {
71 $self
72 .program
73 .get(offset..offset.saturating_add(ebpf::INSN_SIZE))
74 })
75 .is_some()
76 {
77 $next_pc = $target_pc;
78 } else {
79 throw_error!($self, EbpfError::CallOutsideTextSegment);
80 }
81 };
82}
83
84#[cfg(feature = "debugger")]
86pub enum DebugState {
87 Step,
89 Continue,
91}
92
93pub struct Interpreter<'a, 'b, C: ContextObject> {
95 pub(crate) vm: &'a mut EbpfVm<'b, C>,
96 pub(crate) executable: &'a Executable<C>,
97 pub(crate) program: &'a [u8],
98 pub(crate) program_vm_addr: u64,
99
100 pub reg: [u64; 12],
102
103 #[cfg(feature = "debugger")]
104 pub(crate) debug_state: DebugState,
105 #[cfg(feature = "debugger")]
106 pub(crate) breakpoints: Vec<u64>,
107}
108
109impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
110 pub fn new(
112 vm: &'a mut EbpfVm<'b, C>,
113 executable: &'a Executable<C>,
114 registers: [u64; 12],
115 ) -> Self {
116 let (program_vm_addr, program) = executable.get_text_bytes();
117 Self {
118 vm,
119 executable,
120 program,
121 program_vm_addr,
122 reg: registers,
123 #[cfg(feature = "debugger")]
124 debug_state: DebugState::Continue,
125 #[cfg(feature = "debugger")]
126 breakpoints: Vec::new(),
127 }
128 }
129
130 #[cfg(feature = "debugger")]
132 pub fn get_dbg_pc(&self) -> u64 {
133 (self.reg[11] * ebpf::INSN_SIZE as u64) + self.executable.get_text_section_offset()
134 }
135
136 fn push_frame(&mut self, config: &Config) -> bool {
137 let frame = &mut self.vm.call_frames[self.vm.call_depth as usize];
138 frame.caller_saved_registers.copy_from_slice(
139 &self.reg[ebpf::FIRST_SCRATCH_REG..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS],
140 );
141 frame.frame_pointer = self.reg[ebpf::FRAME_PTR_REG];
142 frame.target_pc = self.reg[11] + 1;
143
144 self.vm.call_depth += 1;
145 if self.vm.call_depth as usize == config.max_call_depth {
146 throw_error!(self, EbpfError::CallDepthExceeded);
147 }
148
149 if !self.executable.get_sbpf_version().dynamic_stack_frames() {
150 let stack_frame_size =
152 config.stack_frame_size * if config.enable_stack_frame_gaps { 2 } else { 1 };
153 self.reg[ebpf::FRAME_PTR_REG] += stack_frame_size as u64;
154 }
155
156 true
157 }
158
159 fn sign_extension(&self, value: i32) -> u64 {
160 if self
161 .executable
162 .get_sbpf_version()
163 .explicit_sign_extension_of_results()
164 {
165 value as u32 as u64
166 } else {
167 value as i64 as u64
168 }
169 }
170
171 #[rustfmt::skip]
175 pub fn step(&mut self) -> bool {
176 let config = &self.executable.get_config();
177
178 if config.enable_instruction_meter && self.vm.due_insn_count >= self.vm.previous_instruction_meter {
179 throw_error!(self, EbpfError::ExceededMaxInstructions);
180 }
181 self.vm.due_insn_count += 1;
182 if self.reg[11] as usize * ebpf::INSN_SIZE >= self.program.len() {
183 throw_error!(self, EbpfError::ExecutionOverrun);
184 }
185 let mut next_pc = self.reg[11] + 1;
186 let mut insn = ebpf::get_insn_unchecked(self.program, self.reg[11] as usize);
187 let dst = insn.dst as usize;
188 let src = insn.src as usize;
189
190 if config.enable_instruction_tracing {
191 self.vm.context_object_pointer.trace(self.reg);
192 }
193
194 match insn.opc {
195 ebpf::LD_DW_IMM if !self.executable.get_sbpf_version().disable_lddw() => {
196 ebpf::augment_lddw_unchecked(self.program, &mut insn);
197 self.reg[dst] = insn.imm as u64;
198 self.reg[11] += 1;
199 next_pc += 1;
200 },
201
202 ebpf::LD_B_REG if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
204 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
205 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u8);
206 },
207 ebpf::LD_H_REG if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
208 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
209 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u16);
210 },
211 ebpf::LD_W_REG if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
212 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
213 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u32);
214 },
215 ebpf::LD_DW_REG if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
216 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
217 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u64);
218 },
219
220 ebpf::ST_B_IMM if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
222 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
223 translate_memory_access!(self, store, insn.imm, vm_addr, u8);
224 },
225 ebpf::ST_H_IMM if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
226 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
227 translate_memory_access!(self, store, insn.imm, vm_addr, u16);
228 },
229 ebpf::ST_W_IMM if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
230 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
231 translate_memory_access!(self, store, insn.imm, vm_addr, u32);
232 },
233 ebpf::ST_DW_IMM if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
234 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
235 translate_memory_access!(self, store, insn.imm, vm_addr, u64);
236 },
237
238 ebpf::ST_B_REG if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
240 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
241 translate_memory_access!(self, store, self.reg[src], vm_addr, u8);
242 },
243 ebpf::ST_H_REG if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
244 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
245 translate_memory_access!(self, store, self.reg[src], vm_addr, u16);
246 },
247 ebpf::ST_W_REG if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
248 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
249 translate_memory_access!(self, store, self.reg[src], vm_addr, u32);
250 },
251 ebpf::ST_DW_REG if !self.executable.get_sbpf_version().move_memory_instruction_classes() => {
252 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
253 translate_memory_access!(self, store, self.reg[src], vm_addr, u64);
254 },
255
256 ebpf::ADD32_IMM => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_add(insn.imm as i32)),
258 ebpf::ADD32_REG => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_add(self.reg[src] as i32)),
259 ebpf::SUB32_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() {
260 self.reg[dst] = self.sign_extension((insn.imm as i32).wrapping_sub(self.reg[dst] as i32))
261 } else {
262 self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_sub(insn.imm as i32))
263 },
264 ebpf::SUB32_REG => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32)),
265 ebpf::MUL32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64,
266 ebpf::MUL32_REG if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64,
267 ebpf::LD_1B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
268 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
269 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u8);
270 },
271 ebpf::DIV32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32 / insn.imm as u32) as u64,
272 ebpf::DIV32_REG if !self.executable.get_sbpf_version().enable_pqr() => {
273 throw_error!(DivideByZero; self, self.reg[src], u32);
274 self.reg[dst] = (self.reg[dst] as u32 / self.reg[src] as u32) as u64;
275 },
276 ebpf::LD_2B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
277 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
278 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u16);
279 },
280 ebpf::OR32_IMM => self.reg[dst] = (self.reg[dst] as u32 | insn.imm as u32) as u64,
281 ebpf::OR32_REG => self.reg[dst] = (self.reg[dst] as u32 | self.reg[src] as u32) as u64,
282 ebpf::AND32_IMM => self.reg[dst] = (self.reg[dst] as u32 & insn.imm as u32) as u64,
283 ebpf::AND32_REG => self.reg[dst] = (self.reg[dst] as u32 & self.reg[src] as u32) as u64,
284 ebpf::LSH32_IMM => self.reg[dst] = (self.reg[dst] as u32).wrapping_shl(insn.imm as u32) as u64,
285 ebpf::LSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shl(self.reg[src] as u32) as u64,
286 ebpf::RSH32_IMM => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(insn.imm as u32) as u64,
287 ebpf::RSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(self.reg[src] as u32) as u64,
288 ebpf::NEG32 if !self.executable.get_sbpf_version().disable_neg() => self.reg[dst] = (self.reg[dst] as i32).wrapping_neg() as u64 & (u32::MAX as u64),
289 ebpf::LD_4B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
290 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
291 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u32);
292 },
293 ebpf::MOD32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32 % insn.imm as u32) as u64,
294 ebpf::MOD32_REG if !self.executable.get_sbpf_version().enable_pqr() => {
295 throw_error!(DivideByZero; self, self.reg[src], u32);
296 self.reg[dst] = (self.reg[dst] as u32 % self.reg[src] as u32) as u64;
297 },
298 ebpf::LD_8B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
299 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
300 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u64);
301 },
302 ebpf::XOR32_IMM => self.reg[dst] = (self.reg[dst] as u32 ^ insn.imm as u32) as u64,
303 ebpf::XOR32_REG => self.reg[dst] = (self.reg[dst] as u32 ^ self.reg[src] as u32) as u64,
304 ebpf::MOV32_IMM => self.reg[dst] = insn.imm as u32 as u64,
305 ebpf::MOV32_REG => self.reg[dst] = if self.executable.get_sbpf_version().explicit_sign_extension_of_results() {
306 self.reg[src] as i32 as i64 as u64
307 } else {
308 self.reg[src] as u32 as u64
309 },
310 ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u32 as u64,
311 ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u32 as u64,
312 ebpf::LE if !self.executable.get_sbpf_version().disable_le() => {
313 self.reg[dst] = match insn.imm {
314 16 => (self.reg[dst] as u16).to_le() as u64,
315 32 => (self.reg[dst] as u32).to_le() as u64,
316 64 => self.reg[dst].to_le(),
317 _ => {
318 throw_error!(self, EbpfError::InvalidInstruction);
319 }
320 };
321 },
322 ebpf::BE => {
323 self.reg[dst] = match insn.imm {
324 16 => (self.reg[dst] as u16).to_be() as u64,
325 32 => (self.reg[dst] as u32).to_be() as u64,
326 64 => self.reg[dst].to_be(),
327 _ => {
328 throw_error!(self, EbpfError::InvalidInstruction);
329 }
330 };
331 },
332
333 ebpf::ADD64_IMM => self.reg[dst] = self.reg[dst].wrapping_add(insn.imm as u64),
335 ebpf::ADD64_REG => self.reg[dst] = self.reg[dst].wrapping_add(self.reg[src]),
336 ebpf::SUB64_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() {
337 self.reg[dst] = (insn.imm as u64).wrapping_sub(self.reg[dst])
338 } else {
339 self.reg[dst] = self.reg[dst].wrapping_sub(insn.imm as u64)
340 },
341 ebpf::SUB64_REG => self.reg[dst] = self.reg[dst].wrapping_sub(self.reg[src]),
342 ebpf::MUL64_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64),
343 ebpf::ST_1B_IMM if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
344 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
345 translate_memory_access!(self, store, insn.imm, vm_addr, u8);
346 },
347 ebpf::MUL64_REG if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]),
348 ebpf::ST_1B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
349 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
350 translate_memory_access!(self, store, self.reg[src], vm_addr, u8);
351 },
352 ebpf::DIV64_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] /= insn.imm as u64,
353 ebpf::ST_2B_IMM if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
354 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
355 translate_memory_access!(self, store, insn.imm, vm_addr, u16);
356 },
357 ebpf::DIV64_REG if !self.executable.get_sbpf_version().enable_pqr() => {
358 throw_error!(DivideByZero; self, self.reg[src], u64);
359 self.reg[dst] /= self.reg[src];
360 },
361 ebpf::ST_2B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
362 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
363 translate_memory_access!(self, store, self.reg[src], vm_addr, u16);
364 },
365 ebpf::OR64_IMM => self.reg[dst] |= insn.imm as u64,
366 ebpf::OR64_REG => self.reg[dst] |= self.reg[src],
367 ebpf::AND64_IMM => self.reg[dst] &= insn.imm as u64,
368 ebpf::AND64_REG => self.reg[dst] &= self.reg[src],
369 ebpf::LSH64_IMM => self.reg[dst] = self.reg[dst].wrapping_shl(insn.imm as u32),
370 ebpf::LSH64_REG => self.reg[dst] = self.reg[dst].wrapping_shl(self.reg[src] as u32),
371 ebpf::RSH64_IMM => self.reg[dst] = self.reg[dst].wrapping_shr(insn.imm as u32),
372 ebpf::RSH64_REG => self.reg[dst] = self.reg[dst].wrapping_shr(self.reg[src] as u32),
373 ebpf::ST_4B_IMM if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
374 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
375 translate_memory_access!(self, store, insn.imm, vm_addr, u32);
376 },
377 ebpf::NEG64 if !self.executable.get_sbpf_version().disable_neg() => self.reg[dst] = (self.reg[dst] as i64).wrapping_neg() as u64,
378 ebpf::ST_4B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
379 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
380 translate_memory_access!(self, store, self.reg[src], vm_addr, u32);
381 },
382 ebpf::MOD64_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] %= insn.imm as u64,
383 ebpf::ST_8B_IMM if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
384 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
385 translate_memory_access!(self, store, insn.imm, vm_addr, u64);
386 },
387 ebpf::MOD64_REG if !self.executable.get_sbpf_version().enable_pqr() => {
388 throw_error!(DivideByZero; self, self.reg[src], u64);
389 self.reg[dst] %= self.reg[src];
390 },
391 ebpf::ST_8B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => {
392 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
393 translate_memory_access!(self, store, self.reg[src], vm_addr, u64);
394 },
395 ebpf::XOR64_IMM => self.reg[dst] ^= insn.imm as u64,
396 ebpf::XOR64_REG => self.reg[dst] ^= self.reg[src],
397 ebpf::MOV64_IMM => self.reg[dst] = insn.imm as u64,
398 ebpf::MOV64_REG => self.reg[dst] = self.reg[src],
399 ebpf::ARSH64_IMM => self.reg[dst] = (self.reg[dst] as i64).wrapping_shr(insn.imm as u32) as u64,
400 ebpf::ARSH64_REG => self.reg[dst] = (self.reg[dst] as i64).wrapping_shr(self.reg[src] as u32) as u64,
401 ebpf::HOR64_IMM if self.executable.get_sbpf_version().disable_lddw() => {
402 self.reg[dst] |= (insn.imm as u64).wrapping_shl(32);
403 }
404
405 ebpf::LMUL32_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32).wrapping_mul(insn.imm as u32) as u64,
407 ebpf::LMUL32_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32).wrapping_mul(self.reg[src] as u32) as u64,
408 ebpf::LMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64),
409 ebpf::LMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]),
410 ebpf::UHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(insn.imm as u32 as u128).wrapping_shr(64) as u64,
411 ebpf::UHMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(self.reg[src] as u128).wrapping_shr(64) as u64,
412 ebpf::SHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i64 as i128).wrapping_mul(insn.imm as i128).wrapping_shr(64) as u64,
413 ebpf::SHMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i64 as i128).wrapping_mul(self.reg[src] as i64 as i128).wrapping_shr(64) as u64,
414 ebpf::UDIV32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
415 self.reg[dst] = (self.reg[dst] as u32 / insn.imm as u32) as u64;
416 }
417 ebpf::UDIV32_REG if self.executable.get_sbpf_version().enable_pqr() => {
418 throw_error!(DivideByZero; self, self.reg[src], u32);
419 self.reg[dst] = (self.reg[dst] as u32 / self.reg[src] as u32) as u64;
420 },
421 ebpf::UDIV64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
422 self.reg[dst] /= insn.imm as u32 as u64;
423 }
424 ebpf::UDIV64_REG if self.executable.get_sbpf_version().enable_pqr() => {
425 throw_error!(DivideByZero; self, self.reg[src], u64);
426 self.reg[dst] /= self.reg[src];
427 },
428 ebpf::UREM32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
429 self.reg[dst] = (self.reg[dst] as u32 % insn.imm as u32) as u64;
430 }
431 ebpf::UREM32_REG if self.executable.get_sbpf_version().enable_pqr() => {
432 throw_error!(DivideByZero; self, self.reg[src], u32);
433 self.reg[dst] = (self.reg[dst] as u32 % self.reg[src] as u32) as u64;
434 },
435 ebpf::UREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
436 self.reg[dst] %= insn.imm as u32 as u64;
437 }
438 ebpf::UREM64_REG if self.executable.get_sbpf_version().enable_pqr() => {
439 throw_error!(DivideByZero; self, self.reg[src], u64);
440 self.reg[dst] %= self.reg[src];
441 },
442 ebpf::SDIV32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
443 throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32);
444 self.reg[dst] = (self.reg[dst] as i32 / insn.imm as i32) as u32 as u64;
445 }
446 ebpf::SDIV32_REG if self.executable.get_sbpf_version().enable_pqr() => {
447 throw_error!(DivideByZero; self, self.reg[src], i32);
448 throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32);
449 self.reg[dst] = (self.reg[dst] as i32 / self.reg[src] as i32) as u32 as u64;
450 },
451 ebpf::SDIV64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
452 throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64);
453 self.reg[dst] = (self.reg[dst] as i64 / insn.imm) as u64;
454 }
455 ebpf::SDIV64_REG if self.executable.get_sbpf_version().enable_pqr() => {
456 throw_error!(DivideByZero; self, self.reg[src], i64);
457 throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i64);
458 self.reg[dst] = (self.reg[dst] as i64 / self.reg[src] as i64) as u64;
459 },
460 ebpf::SREM32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
461 throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32);
462 self.reg[dst] = (self.reg[dst] as i32 % insn.imm as i32) as u32 as u64;
463 }
464 ebpf::SREM32_REG if self.executable.get_sbpf_version().enable_pqr() => {
465 throw_error!(DivideByZero; self, self.reg[src], i32);
466 throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32);
467 self.reg[dst] = (self.reg[dst] as i32 % self.reg[src] as i32) as u32 as u64;
468 },
469 ebpf::SREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
470 throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64);
471 self.reg[dst] = (self.reg[dst] as i64 % insn.imm) as u64;
472 }
473 ebpf::SREM64_REG if self.executable.get_sbpf_version().enable_pqr() => {
474 throw_error!(DivideByZero; self, self.reg[src], i64);
475 throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i64);
476 self.reg[dst] = (self.reg[dst] as i64 % self.reg[src] as i64) as u64;
477 },
478
479 ebpf::JA => { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
481 ebpf::JEQ_IMM => if self.reg[dst] == insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
482 ebpf::JEQ_REG => if self.reg[dst] == self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
483 ebpf::JGT_IMM => if self.reg[dst] > insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
484 ebpf::JGT_REG => if self.reg[dst] > self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
485 ebpf::JGE_IMM => if self.reg[dst] >= insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
486 ebpf::JGE_REG => if self.reg[dst] >= self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
487 ebpf::JLT_IMM => if self.reg[dst] < insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
488 ebpf::JLT_REG => if self.reg[dst] < self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
489 ebpf::JLE_IMM => if self.reg[dst] <= insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
490 ebpf::JLE_REG => if self.reg[dst] <= self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
491 ebpf::JSET_IMM => if self.reg[dst] & insn.imm as u64 != 0 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
492 ebpf::JSET_REG => if self.reg[dst] & self.reg[src] != 0 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
493 ebpf::JNE_IMM => if self.reg[dst] != insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
494 ebpf::JNE_REG => if self.reg[dst] != self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
495 ebpf::JSGT_IMM => if (self.reg[dst] as i64) > insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
496 ebpf::JSGT_REG => if (self.reg[dst] as i64) > self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
497 ebpf::JSGE_IMM => if (self.reg[dst] as i64) >= insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
498 ebpf::JSGE_REG => if (self.reg[dst] as i64) >= self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
499 ebpf::JSLT_IMM => if (self.reg[dst] as i64) < insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
500 ebpf::JSLT_REG => if (self.reg[dst] as i64) < self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
501 ebpf::JSLE_IMM => if (self.reg[dst] as i64) <= insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
502 ebpf::JSLE_REG => if (self.reg[dst] as i64) <= self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
503
504 ebpf::CALL_REG => {
505 let target_pc = if self.executable.get_sbpf_version().callx_uses_src_reg() {
506 self.reg[src]
507 } else {
508 self.reg[insn.imm as usize]
509 };
510 if !self.push_frame(config) {
511 return false;
512 }
513 check_pc!(self, next_pc, target_pc.wrapping_sub(self.program_vm_addr) / ebpf::INSN_SIZE as u64);
514 if self.executable.get_sbpf_version().static_syscalls() && self.executable.get_function_registry().lookup_by_key(next_pc as u32).is_none() {
515 throw_error!(self, EbpfError::UnsupportedInstruction);
516 }
517 },
518
519 ebpf::CALL_IMM => {
522 if let (false, Some((_, function))) =
523 (self.executable.get_sbpf_version().static_syscalls(),
524 self.executable.get_loader().get_function_registry().lookup_by_key(insn.imm as u32)) {
525 self.reg[0] = match self.dispatch_syscall(function) {
527 ProgramResult::Ok(value) => *value,
528 ProgramResult::Err(_err) => return false,
529 };
530 } else if let Some((_, target_pc)) =
531 self.executable
532 .get_function_registry()
533 .lookup_by_key(
534 self
535 .executable
536 .get_sbpf_version()
537 .calculate_call_imm_target_pc(self.reg[11] as usize, insn.imm)
538 ) {
539 if !self.push_frame(config) {
541 return false;
542 }
543 check_pc!(self, next_pc, target_pc as u64);
544 } else {
545 throw_error!(self, EbpfError::UnsupportedInstruction);
546 }
547 }
548 ebpf::SYSCALL if self.executable.get_sbpf_version().static_syscalls() => {
549 if let Some((_, function)) = self.executable.get_loader().get_function_registry().lookup_by_key(insn.imm as u32) {
550 self.reg[0] = match self.dispatch_syscall(function) {
552 ProgramResult::Ok(value) => *value,
553 ProgramResult::Err(_err) => return false,
554 };
555 } else {
556 debug_assert!(false, "Invalid syscall should have been detected in the verifier.");
557 }
558 },
559 ebpf::RETURN
560 | ebpf::EXIT => {
561 if (insn.opc == ebpf::EXIT && self.executable.get_sbpf_version().static_syscalls())
562 || (insn.opc == ebpf::RETURN && !self.executable.get_sbpf_version().static_syscalls()) {
563 throw_error!(self, EbpfError::UnsupportedInstruction);
564 }
565
566 if self.vm.call_depth == 0 {
567 if config.enable_instruction_meter && self.vm.due_insn_count > self.vm.previous_instruction_meter {
568 throw_error!(self, EbpfError::ExceededMaxInstructions);
569 }
570 self.vm.program_result = ProgramResult::Ok(self.reg[0]);
571 return false;
572 }
573 self.vm.call_depth -= 1;
575 let frame = &self.vm.call_frames[self.vm.call_depth as usize];
576 self.reg[ebpf::FRAME_PTR_REG] = frame.frame_pointer;
577 self.reg[ebpf::FIRST_SCRATCH_REG
578 ..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS]
579 .copy_from_slice(&frame.caller_saved_registers);
580 check_pc!(self, next_pc, frame.target_pc);
581 }
582 _ => throw_error!(self, EbpfError::UnsupportedInstruction),
583 }
584
585 self.reg[11] = next_pc;
586 true
587 }
588
589 fn dispatch_syscall(&mut self, function: BuiltinFunction<C>) -> &ProgramResult {
590 self.vm.due_insn_count = self.vm.previous_instruction_meter - self.vm.due_insn_count;
591 self.vm.registers[0..6].copy_from_slice(&self.reg[0..6]);
592 self.vm.invoke_function(function);
593 self.vm.due_insn_count = 0;
594 &self.vm.program_result
595 }
596}