1#![allow(clippy::arithmetic_side_effects)]
2use crate::{
16 ebpf::{self, STACK_PTR_REG},
17 elf::Executable,
18 error::{EbpfError, ProgramResult},
19 vm::{Config, ContextObject, EbpfVm},
20};
21
22macro_rules! translate_memory_access {
24 (_impl, $self:ident, $op:ident, $vm_addr:ident, $T:ty, $($rest:expr),*) => {
25 match $self.vm.memory_mapping.$op::<$T>(
26 $($rest,)*
27 $vm_addr,
28 ) {
29 ProgramResult::Ok(v) => v,
30 ProgramResult::Err(err) => {
31 throw_error!($self, err);
32 },
33 }
34 };
35
36 ($self:ident, load, $vm_addr:ident, $T:ty) => {
38 translate_memory_access!(_impl, $self, load, $vm_addr, $T,)
39 };
40
41 ($self:ident, store, $value:expr, $vm_addr:ident, $T:ty) => {
43 translate_memory_access!(_impl, $self, store, $vm_addr, $T, ($value) as $T);
44 };
45}
46
47macro_rules! throw_error {
48 ($self:expr, $err:expr) => {{
49 $self.vm.registers[11] = $self.reg[11];
50 $self.vm.program_result = ProgramResult::Err($err);
51 return false;
52 }};
53 (DivideByZero; $self:expr, $src:expr, $ty:ty) => {
54 if $src as $ty == 0 {
55 throw_error!($self, EbpfError::DivideByZero);
56 }
57 };
58 (DivideOverflow; $self:expr, $src:expr, $dst:expr, $ty:ty) => {
59 if $dst as $ty == <$ty>::MIN && $src as $ty == -1 {
60 throw_error!($self, EbpfError::DivideOverflow);
61 }
62 };
63}
64
65macro_rules! check_pc {
66 ($self:expr, $next_pc:ident, $target_pc:expr) => {
67 if ($target_pc as usize)
68 .checked_mul(ebpf::INSN_SIZE)
69 .and_then(|offset| $self.program.get(offset..offset + ebpf::INSN_SIZE))
70 .is_some()
71 {
72 $next_pc = $target_pc;
73 } else {
74 throw_error!($self, EbpfError::CallOutsideTextSegment);
75 }
76 };
77}
78
79#[cfg(feature = "debugger")]
81pub enum DebugState {
82 Step,
84 Continue,
86}
87
88pub struct Interpreter<'a, 'b, C: ContextObject> {
90 pub(crate) vm: &'a mut EbpfVm<'b, C>,
91 pub(crate) executable: &'a Executable<C>,
92 pub(crate) program: &'a [u8],
93 pub(crate) program_vm_addr: u64,
94
95 pub reg: [u64; 12],
97
98 #[cfg(feature = "debugger")]
99 pub(crate) debug_state: DebugState,
100 #[cfg(feature = "debugger")]
101 pub(crate) breakpoints: Vec<u64>,
102}
103
104impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
105 pub fn new(
107 vm: &'a mut EbpfVm<'b, C>,
108 executable: &'a Executable<C>,
109 registers: [u64; 12],
110 ) -> Self {
111 let (program_vm_addr, program) = executable.get_text_bytes();
112 Self {
113 vm,
114 executable,
115 program,
116 program_vm_addr,
117 reg: registers,
118 #[cfg(feature = "debugger")]
119 debug_state: DebugState::Continue,
120 #[cfg(feature = "debugger")]
121 breakpoints: Vec::new(),
122 }
123 }
124
125 #[cfg(feature = "debugger")]
127 pub fn get_dbg_pc(&self) -> u64 {
128 (self.reg[11] * ebpf::INSN_SIZE as u64) + self.executable.get_text_section_offset()
129 }
130
131 fn push_frame(&mut self, config: &Config) -> bool {
132 let frame = &mut self.vm.call_frames[self.vm.call_depth as usize];
133 frame.caller_saved_registers.copy_from_slice(
134 &self.reg[ebpf::FIRST_SCRATCH_REG..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS],
135 );
136 frame.frame_pointer = self.reg[ebpf::FRAME_PTR_REG];
137 frame.target_pc = self.reg[11] + 1;
138
139 self.vm.call_depth += 1;
140 if self.vm.call_depth as usize == config.max_call_depth {
141 throw_error!(self, EbpfError::CallDepthExceeded);
142 }
143
144 if !self.executable.get_sbpf_version().dynamic_stack_frames() {
145 let stack_frame_size =
147 config.stack_frame_size * if config.enable_stack_frame_gaps { 2 } else { 1 };
148 self.vm.stack_pointer += stack_frame_size as u64;
149 }
150 self.reg[ebpf::FRAME_PTR_REG] = self.vm.stack_pointer;
151
152 true
153 }
154
155 #[rustfmt::skip]
159 pub fn step(&mut self) -> bool {
160 let config = &self.executable.get_config();
161
162 self.vm.due_insn_count += 1;
163 let mut next_pc = self.reg[11] + 1;
164 if next_pc as usize * ebpf::INSN_SIZE > self.program.len() {
165 throw_error!(self, EbpfError::ExecutionOverrun);
166 }
167 let mut insn = ebpf::get_insn_unchecked(self.program, self.reg[11] as usize);
168 let dst = insn.dst as usize;
169 let src = insn.src as usize;
170
171 if config.enable_instruction_tracing {
172 self.vm.context_object_pointer.trace(self.reg);
173 }
174
175 match insn.opc {
176 ebpf::ADD64_IMM if dst == STACK_PTR_REG && self.executable.get_sbpf_version().dynamic_stack_frames() => {
177 self.vm.stack_pointer = self.vm.stack_pointer.overflowing_add(insn.imm as u64).0;
184 }
185
186 ebpf::LD_DW_IMM if self.executable.get_sbpf_version().enable_lddw() => {
187 ebpf::augment_lddw_unchecked(self.program, &mut insn);
188 self.reg[dst] = insn.imm as u64;
189 self.reg[11] += 1;
190 next_pc += 1;
191 },
192
193 ebpf::LD_B_REG => {
195 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
196 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u8);
197 },
198 ebpf::LD_H_REG => {
199 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
200 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u16);
201 },
202 ebpf::LD_W_REG => {
203 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
204 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u32);
205 },
206 ebpf::LD_DW_REG => {
207 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
208 self.reg[dst] = translate_memory_access!(self, load, vm_addr, u64);
209 },
210
211 ebpf::ST_B_IMM => {
213 let vm_addr = (self.reg[dst] as i64).wrapping_add( insn.off as i64) as u64;
214 translate_memory_access!(self, store, insn.imm, vm_addr, u8);
215 },
216 ebpf::ST_H_IMM => {
217 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
218 translate_memory_access!(self, store, insn.imm, vm_addr, u16);
219 },
220 ebpf::ST_W_IMM => {
221 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
222 translate_memory_access!(self, store, insn.imm, vm_addr, u32);
223 },
224 ebpf::ST_DW_IMM => {
225 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
226 translate_memory_access!(self, store, insn.imm, vm_addr, u64);
227 },
228
229 ebpf::ST_B_REG => {
231 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
232 translate_memory_access!(self, store, self.reg[src], vm_addr, u8);
233 },
234 ebpf::ST_H_REG => {
235 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
236 translate_memory_access!(self, store, self.reg[src], vm_addr, u16);
237 },
238 ebpf::ST_W_REG => {
239 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
240 translate_memory_access!(self, store, self.reg[src], vm_addr, u32);
241 },
242 ebpf::ST_DW_REG => {
243 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
244 translate_memory_access!(self, store, self.reg[src], vm_addr, u64);
245 },
246
247 ebpf::ADD32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(insn.imm as i32) as u64,
249 ebpf::ADD32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(self.reg[src] as i32) as u64,
250 ebpf::SUB32_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() {
251 self.reg[dst] = (insn.imm as i32).wrapping_sub(self.reg[dst] as i32) as u64
252 } else {
253 self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(insn.imm as i32) as u64
254 },
255 ebpf::SUB32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32) as u64,
256 ebpf::MUL32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64,
257 ebpf::MUL32_REG if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64,
258 ebpf::DIV32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32 / insn.imm as u32) as u64,
259 ebpf::DIV32_REG if !self.executable.get_sbpf_version().enable_pqr() => {
260 throw_error!(DivideByZero; self, self.reg[src], u32);
261 self.reg[dst] = (self.reg[dst] as u32 / self.reg[src] as u32) as u64;
262 },
263 ebpf::OR32_IMM => self.reg[dst] = (self.reg[dst] as u32 | insn.imm as u32) as u64,
264 ebpf::OR32_REG => self.reg[dst] = (self.reg[dst] as u32 | self.reg[src] as u32) as u64,
265 ebpf::AND32_IMM => self.reg[dst] = (self.reg[dst] as u32 & insn.imm as u32) as u64,
266 ebpf::AND32_REG => self.reg[dst] = (self.reg[dst] as u32 & self.reg[src] as u32) as u64,
267 ebpf::LSH32_IMM => self.reg[dst] = (self.reg[dst] as u32).wrapping_shl(insn.imm as u32) as u64,
268 ebpf::LSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shl(self.reg[src] as u32) as u64,
269 ebpf::RSH32_IMM => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(insn.imm as u32) as u64,
270 ebpf::RSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(self.reg[src] as u32) as u64,
271 ebpf::NEG32 if self.executable.get_sbpf_version().enable_neg() => self.reg[dst] = (self.reg[dst] as i32).wrapping_neg() as u64 & (u32::MAX as u64),
272 ebpf::MOD32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32 % insn.imm as u32) as u64,
273 ebpf::MOD32_REG if !self.executable.get_sbpf_version().enable_pqr() => {
274 throw_error!(DivideByZero; self, self.reg[src], u32);
275 self.reg[dst] = (self.reg[dst] as u32 % self.reg[src] as u32) as u64;
276 },
277 ebpf::XOR32_IMM => self.reg[dst] = (self.reg[dst] as u32 ^ insn.imm as u32) as u64,
278 ebpf::XOR32_REG => self.reg[dst] = (self.reg[dst] as u32 ^ self.reg[src] as u32) as u64,
279 ebpf::MOV32_IMM => self.reg[dst] = insn.imm as u32 as u64,
280 ebpf::MOV32_REG => self.reg[dst] = (self.reg[src] as u32) as u64,
281 ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u64 & (u32::MAX as u64),
282 ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u64 & (u32::MAX as u64),
283 ebpf::LE if self.executable.get_sbpf_version().enable_le() => {
284 self.reg[dst] = match insn.imm {
285 16 => (self.reg[dst] as u16).to_le() as u64,
286 32 => (self.reg[dst] as u32).to_le() as u64,
287 64 => self.reg[dst].to_le(),
288 _ => {
289 throw_error!(self, EbpfError::InvalidInstruction);
290 }
291 };
292 },
293 ebpf::BE => {
294 self.reg[dst] = match insn.imm {
295 16 => (self.reg[dst] as u16).to_be() as u64,
296 32 => (self.reg[dst] as u32).to_be() as u64,
297 64 => self.reg[dst].to_be(),
298 _ => {
299 throw_error!(self, EbpfError::InvalidInstruction);
300 }
301 };
302 },
303
304 ebpf::ADD64_IMM => self.reg[dst] = self.reg[dst].wrapping_add(insn.imm as u64),
306 ebpf::ADD64_REG => self.reg[dst] = self.reg[dst].wrapping_add(self.reg[src]),
307 ebpf::SUB64_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() {
308 self.reg[dst] = (insn.imm as u64).wrapping_sub(self.reg[dst])
309 } else {
310 self.reg[dst] = self.reg[dst].wrapping_sub(insn.imm as u64)
311 },
312 ebpf::SUB64_REG => self.reg[dst] = self.reg[dst].wrapping_sub(self.reg[src]),
313 ebpf::MUL64_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64),
314 ebpf::MUL64_REG if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]),
315 ebpf::DIV64_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] /= insn.imm as u64,
316 ebpf::DIV64_REG if !self.executable.get_sbpf_version().enable_pqr() => {
317 throw_error!(DivideByZero; self, self.reg[src], u64);
318 self.reg[dst] /= self.reg[src];
319 },
320 ebpf::OR64_IMM => self.reg[dst] |= insn.imm as u64,
321 ebpf::OR64_REG => self.reg[dst] |= self.reg[src],
322 ebpf::AND64_IMM => self.reg[dst] &= insn.imm as u64,
323 ebpf::AND64_REG => self.reg[dst] &= self.reg[src],
324 ebpf::LSH64_IMM => self.reg[dst] = self.reg[dst].wrapping_shl(insn.imm as u32),
325 ebpf::LSH64_REG => self.reg[dst] = self.reg[dst].wrapping_shl(self.reg[src] as u32),
326 ebpf::RSH64_IMM => self.reg[dst] = self.reg[dst].wrapping_shr(insn.imm as u32),
327 ebpf::RSH64_REG => self.reg[dst] = self.reg[dst].wrapping_shr(self.reg[src] as u32),
328 ebpf::NEG64 if self.executable.get_sbpf_version().enable_neg() => self.reg[dst] = (self.reg[dst] as i64).wrapping_neg() as u64,
329 ebpf::MOD64_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] %= insn.imm as u64,
330 ebpf::MOD64_REG if !self.executable.get_sbpf_version().enable_pqr() => {
331 throw_error!(DivideByZero; self, self.reg[src], u64);
332 self.reg[dst] %= self.reg[src];
333 },
334 ebpf::XOR64_IMM => self.reg[dst] ^= insn.imm as u64,
335 ebpf::XOR64_REG => self.reg[dst] ^= self.reg[src],
336 ebpf::MOV64_IMM => self.reg[dst] = insn.imm as u64,
337 ebpf::MOV64_REG => self.reg[dst] = self.reg[src],
338 ebpf::ARSH64_IMM => self.reg[dst] = (self.reg[dst] as i64).wrapping_shr(insn.imm as u32) as u64,
339 ebpf::ARSH64_REG => self.reg[dst] = (self.reg[dst] as i64).wrapping_shr(self.reg[src] as u32) as u64,
340 ebpf::HOR64_IMM if !self.executable.get_sbpf_version().enable_lddw() => {
341 self.reg[dst] |= (insn.imm as u64).wrapping_shl(32);
342 }
343
344 ebpf::LMUL32_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64,
346 ebpf::LMUL32_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64,
347 ebpf::LMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64),
348 ebpf::LMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]),
349 ebpf::UHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(insn.imm as u64 as u128).wrapping_shr(64) as u64,
350 ebpf::UHMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(self.reg[src] as u128).wrapping_shr(64) as u64,
351 ebpf::SHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i64 as i128).wrapping_mul(insn.imm as i128).wrapping_shr(64) as u64,
352 ebpf::SHMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i64 as i128).wrapping_mul(self.reg[src] as i64 as i128).wrapping_shr(64) as u64,
353 ebpf::UDIV32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
354 self.reg[dst] = (self.reg[dst] as u32 / insn.imm as u32) as u64;
355 }
356 ebpf::UDIV32_REG if self.executable.get_sbpf_version().enable_pqr() => {
357 throw_error!(DivideByZero; self, self.reg[src], u32);
358 self.reg[dst] = (self.reg[dst] as u32 / self.reg[src] as u32) as u64;
359 },
360 ebpf::UDIV64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
361 self.reg[dst] /= insn.imm as u64;
362 }
363 ebpf::UDIV64_REG if self.executable.get_sbpf_version().enable_pqr() => {
364 throw_error!(DivideByZero; self, self.reg[src], u64);
365 self.reg[dst] /= self.reg[src];
366 },
367 ebpf::UREM32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
368 self.reg[dst] = (self.reg[dst] as u32 % insn.imm as u32) as u64;
369 }
370 ebpf::UREM32_REG if self.executable.get_sbpf_version().enable_pqr() => {
371 throw_error!(DivideByZero; self, self.reg[src], u32);
372 self.reg[dst] = (self.reg[dst] as u32 % self.reg[src] as u32) as u64;
373 },
374 ebpf::UREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
375 self.reg[dst] %= insn.imm as u64;
376 }
377 ebpf::UREM64_REG if self.executable.get_sbpf_version().enable_pqr() => {
378 throw_error!(DivideByZero; self, self.reg[src], u64);
379 self.reg[dst] %= self.reg[src];
380 },
381 ebpf::SDIV32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
382 throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32);
383 self.reg[dst] = (self.reg[dst] as i32 / insn.imm as i32) as u64;
384 }
385 ebpf::SDIV32_REG if self.executable.get_sbpf_version().enable_pqr() => {
386 throw_error!(DivideByZero; self, self.reg[src], i32);
387 throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32);
388 self.reg[dst] = (self.reg[dst] as i32 / self.reg[src] as i32) as u64;
389 },
390 ebpf::SDIV64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
391 throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64);
392 self.reg[dst] = (self.reg[dst] as i64 / insn.imm) as u64;
393 }
394 ebpf::SDIV64_REG if self.executable.get_sbpf_version().enable_pqr() => {
395 throw_error!(DivideByZero; self, self.reg[src], i64);
396 throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i64);
397 self.reg[dst] = (self.reg[dst] as i64 / self.reg[src] as i64) as u64;
398 },
399 ebpf::SREM32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
400 throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32);
401 self.reg[dst] = (self.reg[dst] as i32 % insn.imm as i32) as u64;
402 }
403 ebpf::SREM32_REG if self.executable.get_sbpf_version().enable_pqr() => {
404 throw_error!(DivideByZero; self, self.reg[src], i32);
405 throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32);
406 self.reg[dst] = (self.reg[dst] as i32 % self.reg[src] as i32) as u64;
407 },
408 ebpf::SREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
409 throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64);
410 self.reg[dst] = (self.reg[dst] as i64 % insn.imm) as u64;
411 }
412 ebpf::SREM64_REG if self.executable.get_sbpf_version().enable_pqr() => {
413 throw_error!(DivideByZero; self, self.reg[src], i64);
414 throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i64);
415 self.reg[dst] = (self.reg[dst] as i64 % self.reg[src] as i64) as u64;
416 },
417
418 ebpf::JA => { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
420 ebpf::JEQ_IMM => if self.reg[dst] == insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
421 ebpf::JEQ_REG => if self.reg[dst] == self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
422 ebpf::JGT_IMM => if self.reg[dst] > insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
423 ebpf::JGT_REG => if self.reg[dst] > self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
424 ebpf::JGE_IMM => if self.reg[dst] >= insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
425 ebpf::JGE_REG => if self.reg[dst] >= self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
426 ebpf::JLT_IMM => if self.reg[dst] < insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
427 ebpf::JLT_REG => if self.reg[dst] < self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
428 ebpf::JLE_IMM => if self.reg[dst] <= insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
429 ebpf::JLE_REG => if self.reg[dst] <= self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
430 ebpf::JSET_IMM => if self.reg[dst] & insn.imm as u64 != 0 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
431 ebpf::JSET_REG => if self.reg[dst] & self.reg[src] != 0 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
432 ebpf::JNE_IMM => if self.reg[dst] != insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
433 ebpf::JNE_REG => if self.reg[dst] != self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
434 ebpf::JSGT_IMM => if (self.reg[dst] as i64) > insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
435 ebpf::JSGT_REG => if (self.reg[dst] as i64) > self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
436 ebpf::JSGE_IMM => if (self.reg[dst] as i64) >= insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
437 ebpf::JSGE_REG => if (self.reg[dst] as i64) >= self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
438 ebpf::JSLT_IMM => if (self.reg[dst] as i64) < insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
439 ebpf::JSLT_REG => if (self.reg[dst] as i64) < self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
440 ebpf::JSLE_IMM => if (self.reg[dst] as i64) <= insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
441 ebpf::JSLE_REG => if (self.reg[dst] as i64) <= self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; },
442
443 ebpf::CALL_REG => {
444 let target_pc = if self.executable.get_sbpf_version().callx_uses_src_reg() {
445 self.reg[src]
446 } else {
447 self.reg[insn.imm as usize]
448 };
449 if !self.push_frame(config) {
450 return false;
451 }
452 check_pc!(self, next_pc, target_pc.wrapping_sub(self.program_vm_addr) / ebpf::INSN_SIZE as u64);
453 if self.executable.get_sbpf_version().static_syscalls() && self.executable.get_function_registry().lookup_by_key(next_pc as u32).is_none() {
454 self.vm.due_insn_count += 1;
455 self.reg[11] = next_pc;
456 throw_error!(self, EbpfError::UnsupportedInstruction);
457 }
458 },
459
460 ebpf::CALL_IMM => {
463 let mut resolved = false;
464 let (external, internal) = if self.executable.get_sbpf_version().static_syscalls() {
465 (insn.src == 0, insn.src != 0)
466 } else {
467 (true, true)
468 };
469
470 if external {
471 if let Some((_function_name, function)) = self.executable.get_loader().get_function_registry().lookup_by_key(insn.imm as u32) {
472 resolved = true;
473
474 self.vm.due_insn_count = self.vm.previous_instruction_meter - self.vm.due_insn_count;
475 self.vm.registers[0..6].copy_from_slice(&self.reg[0..6]);
476 self.vm.invoke_function(function);
477 self.vm.due_insn_count = 0;
478 self.reg[0] = match &self.vm.program_result {
479 ProgramResult::Ok(value) => *value,
480 ProgramResult::Err(_err) => return false,
481 };
482 }
483 }
484
485 if internal && !resolved {
486 if let Some((_function_name, target_pc)) = self.executable.get_function_registry().lookup_by_key(insn.imm as u32) {
487 resolved = true;
488
489 if !self.push_frame(config) {
491 return false;
492 }
493 check_pc!(self, next_pc, target_pc as u64);
494 }
495 }
496
497 if !resolved {
498 throw_error!(self, EbpfError::UnsupportedInstruction);
499 }
500 }
501
502 ebpf::EXIT => {
503 if self.vm.call_depth == 0 {
504 if config.enable_instruction_meter && self.vm.due_insn_count > self.vm.previous_instruction_meter {
505 throw_error!(self, EbpfError::ExceededMaxInstructions);
506 }
507 self.vm.program_result = ProgramResult::Ok(self.reg[0]);
508 return false;
509 }
510 self.vm.call_depth -= 1;
512 let frame = &self.vm.call_frames[self.vm.call_depth as usize];
513 self.reg[ebpf::FRAME_PTR_REG] = frame.frame_pointer;
514 self.reg[ebpf::FIRST_SCRATCH_REG
515 ..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS]
516 .copy_from_slice(&frame.caller_saved_registers);
517 if !self.executable.get_sbpf_version().dynamic_stack_frames() {
518 let stack_frame_size =
519 config.stack_frame_size * if config.enable_stack_frame_gaps { 2 } else { 1 };
520 self.vm.stack_pointer -= stack_frame_size as u64;
521 }
522 check_pc!(self, next_pc, frame.target_pc);
523 }
524 _ => throw_error!(self, EbpfError::UnsupportedInstruction),
525 }
526
527 if config.enable_instruction_meter && self.vm.due_insn_count >= self.vm.previous_instruction_meter {
528 self.reg[11] += 1;
529 throw_error!(self, EbpfError::ExceededMaxInstructions);
530 }
531
532 self.reg[11] = next_pc;
533 true
534 }
535}