1#![allow(clippy::integer_arithmetic)]
2use crate::{
16 ebpf,
17 ebpf::STACK_PTR_REG,
18 error::EbpfError,
19 memory_region::AccessType,
20 verifier::Verifier,
21 vm::{Config, ContextObject, EbpfVm, ProgramResult},
22};
23
24macro_rules! translate_memory_access {
26 ($self:ident, $vm_addr:ident, $access_type:expr, $pc:ident, $T:ty) => {
27 match $self.vm.env.memory_mapping.map(
28 $access_type,
29 $vm_addr,
30 std::mem::size_of::<$T>() as u64,
31 $pc + ebpf::ELF_INSN_DUMP_OFFSET,
32 ) {
33 ProgramResult::Ok(host_addr) => host_addr as *mut $T,
34 ProgramResult::Err(err) => throw_error!($self, err),
35 }
36 };
37}
38
39macro_rules! throw_error {
40 ($self:expr, $err:expr) => {{
41 $self.vm.env.program_result = ProgramResult::Err($err);
42 return false;
43 }};
44}
45
46#[cfg(feature = "debugger")]
48pub enum DebugState {
49 Step,
51 Continue,
53}
54
55pub struct Interpreter<'a, 'b, V: Verifier, C: ContextObject> {
57 pub(crate) vm: &'a mut EbpfVm<'b, V, C>,
58 pub(crate) program: &'a [u8],
59 pub(crate) program_vm_addr: u64,
60 pub(crate) due_insn_count: u64,
61
62 pub reg: [u64; 11],
64 pub pc: usize,
66
67 #[cfg(feature = "debugger")]
68 pub(crate) debug_state: DebugState,
69 #[cfg(feature = "debugger")]
70 pub(crate) breakpoints: Vec<u64>,
71}
72
73impl<'a, 'b, V: Verifier, C: ContextObject> Interpreter<'a, 'b, V, C> {
74 pub fn new(
76 vm: &'a mut EbpfVm<'b, V, C>,
77 registers: [u64; 11],
78 target_pc: usize,
79 ) -> Result<Self, EbpfError> {
80 let executable = vm.verified_executable.get_executable();
81 let (program_vm_addr, program) = executable.get_text_bytes();
82 Ok(Self {
83 vm,
84 program,
85 program_vm_addr,
86 due_insn_count: 0,
87 reg: registers,
88 pc: target_pc,
89 #[cfg(feature = "debugger")]
90 debug_state: DebugState::Continue,
91 #[cfg(feature = "debugger")]
92 breakpoints: Vec::new(),
93 })
94 }
95
96 fn check_pc(&mut self, current_pc: usize) -> bool {
97 if self
98 .pc
99 .checked_mul(ebpf::INSN_SIZE)
100 .and_then(|offset| self.program.get(offset..offset + ebpf::INSN_SIZE))
101 .is_some()
102 {
103 true
104 } else {
105 throw_error!(
106 self,
107 EbpfError::CallOutsideTextSegment(
108 current_pc + ebpf::ELF_INSN_DUMP_OFFSET,
109 self.program_vm_addr + (self.pc * ebpf::INSN_SIZE) as u64,
110 )
111 );
112 }
113 }
114
115 #[cfg(feature = "debugger")]
117 pub fn get_dbg_pc(&self) -> u64 {
118 ((self.pc * ebpf::INSN_SIZE) as u64)
119 + self
120 .vm
121 .verified_executable
122 .get_executable()
123 .get_text_section_offset()
124 }
125
126 fn push_frame(&mut self, config: &Config) -> bool {
127 let frame = &mut self.vm.env.call_frames[self.vm.env.call_depth as usize];
128 frame.caller_saved_registers.copy_from_slice(
129 &self.reg[ebpf::FIRST_SCRATCH_REG..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS],
130 );
131 frame.frame_pointer = self.reg[ebpf::FRAME_PTR_REG];
132 frame.target_pc = self.pc;
133
134 self.vm.env.call_depth += 1;
135 if self.vm.env.call_depth as usize == config.max_call_depth {
136 throw_error!(
137 self,
138 EbpfError::CallDepthExceeded(
139 self.pc + ebpf::ELF_INSN_DUMP_OFFSET - 1,
140 config.max_call_depth,
141 )
142 );
143 }
144
145 if !config.dynamic_stack_frames {
146 let stack_frame_size =
148 config.stack_frame_size * if config.enable_stack_frame_gaps { 2 } else { 1 };
149 self.vm.env.stack_pointer += stack_frame_size as u64;
150 }
151 self.reg[ebpf::FRAME_PTR_REG] = self.vm.env.stack_pointer;
152
153 true
154 }
155
156 #[rustfmt::skip]
160 pub fn step(&mut self) -> bool {
161 let executable = self.vm.verified_executable.get_executable();
162 let config = &executable.get_config();
163
164 let mut instruction_width = 1;
165 self.due_insn_count += 1;
166 let pc = self.pc;
167 self.pc += instruction_width;
168 if self.pc * ebpf::INSN_SIZE > self.program.len() {
169 throw_error!(self, EbpfError::ExecutionOverrun(pc + ebpf::ELF_INSN_DUMP_OFFSET));
170 }
171 let mut insn = ebpf::get_insn_unchecked(self.program, pc);
172 let dst = insn.dst as usize;
173 let src = insn.src as usize;
174
175 if config.enable_instruction_tracing {
176 let mut state = [0u64; 12];
177 state[0..11].copy_from_slice(&self.reg);
178 state[11] = pc as u64;
179 self.vm.env.context_object_pointer.trace(state);
180 }
181
182 match insn.opc {
183 _ if dst == STACK_PTR_REG && config.dynamic_stack_frames => {
184 match insn.opc {
191 ebpf::SUB64_IMM => { self.vm.env.stack_pointer = self.vm.env.stack_pointer.overflowing_add(-insn.imm as u64).0; }
192 ebpf::ADD64_IMM => { self.vm.env.stack_pointer = self.vm.env.stack_pointer.overflowing_add(insn.imm as u64).0; }
193 _ => {
194 #[cfg(debug_assertions)]
195 unreachable!("unexpected insn on r11")
196 }
197 }
198 }
199
200 ebpf::LD_DW_IMM => {
201 ebpf::augment_lddw_unchecked(self.program, &mut insn);
202 instruction_width = 2;
203 self.pc += 1;
204 self.reg[dst] = insn.imm as u64;
205 },
206
207 ebpf::LD_B_REG => {
209 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
210 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u8);
211 self.reg[dst] = unsafe { *host_ptr as u64 };
212 },
213 ebpf::LD_H_REG => {
214 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
215 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u16);
216 self.reg[dst] = unsafe { *host_ptr as u64 };
217 },
218 ebpf::LD_W_REG => {
219 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
220 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u32);
221 self.reg[dst] = unsafe { *host_ptr as u64 };
222 },
223 ebpf::LD_DW_REG => {
224 let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64;
225 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u64);
226 self.reg[dst] = unsafe { *host_ptr };
227 },
228
229 ebpf::ST_B_IMM => {
231 let vm_addr = (self.reg[dst] as i64).wrapping_add( insn.off as i64) as u64;
232 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u8);
233 unsafe { *host_ptr = insn.imm as u8 };
234 },
235 ebpf::ST_H_IMM => {
236 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
237 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u16);
238 unsafe { *host_ptr = insn.imm as u16 };
239 },
240 ebpf::ST_W_IMM => {
241 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
242 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u32);
243 unsafe { *host_ptr = insn.imm as u32 };
244 },
245 ebpf::ST_DW_IMM => {
246 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
247 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u64);
248 unsafe { *host_ptr = insn.imm as u64 };
249 },
250
251 ebpf::ST_B_REG => {
253 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
254 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u8);
255 unsafe { *host_ptr = self.reg[src] as u8 };
256 },
257 ebpf::ST_H_REG => {
258 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
259 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u16);
260 unsafe { *host_ptr = self.reg[src] as u16 };
261 },
262 ebpf::ST_W_REG => {
263 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
264 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u32);
265 unsafe { *host_ptr = self.reg[src] as u32 };
266 },
267 ebpf::ST_DW_REG => {
268 let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
269 let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u64);
270 unsafe { *host_ptr = self.reg[src] };
271 },
272
273 ebpf::ADD32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(insn.imm as i32) as u64,
275 ebpf::ADD32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(self.reg[src] as i32) as u64,
276 ebpf::SUB32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(insn.imm as i32) as u64,
277 ebpf::SUB32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32) as u64,
278 ebpf::MUL32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64,
279 ebpf::MUL32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64,
280 ebpf::DIV32_IMM => self.reg[dst] = (self.reg[dst] as u32 / insn.imm as u32) as u64,
281 ebpf::DIV32_REG => {
282 if self.reg[src] as u32 == 0 {
283 throw_error!(self, EbpfError::DivideByZero(pc + ebpf::ELF_INSN_DUMP_OFFSET));
284 }
285 self.reg[dst] = (self.reg[dst] as u32 / self.reg[src] as u32) as u64;
286 },
287 ebpf::SDIV32_IMM => {
288 if self.reg[dst] as i32 == i32::MIN && insn.imm == -1 {
289 throw_error!(self, EbpfError::DivideOverflow(pc + ebpf::ELF_INSN_DUMP_OFFSET));
290 }
291 self.reg[dst] = (self.reg[dst] as i32 / insn.imm as i32) as u64;
292 }
293 ebpf::SDIV32_REG => {
294 if self.reg[src] as i32 == 0 {
295 throw_error!(self, EbpfError::DivideByZero(pc + ebpf::ELF_INSN_DUMP_OFFSET));
296 }
297 if self.reg[dst] as i32 == i32::MIN && self.reg[src] as i32 == -1 {
298 throw_error!(self, EbpfError::DivideOverflow(pc + ebpf::ELF_INSN_DUMP_OFFSET));
299 }
300 self.reg[dst] = (self.reg[dst] as i32 / self.reg[src] as i32) as u64;
301 },
302 ebpf::OR32_IMM => self.reg[dst] = (self.reg[dst] as u32 | insn.imm as u32) as u64,
303 ebpf::OR32_REG => self.reg[dst] = (self.reg[dst] as u32 | self.reg[src] as u32) as u64,
304 ebpf::AND32_IMM => self.reg[dst] = (self.reg[dst] as u32 & insn.imm as u32) as u64,
305 ebpf::AND32_REG => self.reg[dst] = (self.reg[dst] as u32 & self.reg[src] as u32) as u64,
306 ebpf::LSH32_IMM => self.reg[dst] = (self.reg[dst] as u32).wrapping_shl(insn.imm as u32) as u64,
307 ebpf::LSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shl(self.reg[src] as u32) as u64,
308 ebpf::RSH32_IMM => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(insn.imm as u32) as u64,
309 ebpf::RSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(self.reg[src] as u32) as u64,
310 ebpf::NEG32 => self.reg[dst] = (self.reg[dst] as i32).wrapping_neg() as u64 & (u32::MAX as u64),
311 ebpf::MOD32_IMM => self.reg[dst] = (self.reg[dst] as u32 % insn.imm as u32) as u64,
312 ebpf::MOD32_REG => {
313 if self.reg[src] as u32 == 0 {
314 throw_error!(self, EbpfError::DivideByZero(pc + ebpf::ELF_INSN_DUMP_OFFSET));
315 }
316 self.reg[dst] = (self.reg[dst] as u32 % self.reg[src] as u32) as u64;
317 },
318 ebpf::XOR32_IMM => self.reg[dst] = (self.reg[dst] as u32 ^ insn.imm as u32) as u64,
319 ebpf::XOR32_REG => self.reg[dst] = (self.reg[dst] as u32 ^ self.reg[src] as u32) as u64,
320 ebpf::MOV32_IMM => self.reg[dst] = insn.imm as u32 as u64,
321 ebpf::MOV32_REG => self.reg[dst] = (self.reg[src] as u32) as u64,
322 ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u64 & (u32::MAX as u64),
323 ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u64 & (u32::MAX as u64),
324 ebpf::LE => {
325 self.reg[dst] = match insn.imm {
326 16 => (self.reg[dst] as u16).to_le() as u64,
327 32 => (self.reg[dst] as u32).to_le() as u64,
328 64 => self.reg[dst].to_le(),
329 _ => {
330 throw_error!(self, EbpfError::InvalidInstruction(pc + ebpf::ELF_INSN_DUMP_OFFSET));
331 }
332 };
333 },
334 ebpf::BE => {
335 self.reg[dst] = match insn.imm {
336 16 => (self.reg[dst] as u16).to_be() as u64,
337 32 => (self.reg[dst] as u32).to_be() as u64,
338 64 => self.reg[dst].to_be(),
339 _ => {
340 throw_error!(self, EbpfError::InvalidInstruction(pc + ebpf::ELF_INSN_DUMP_OFFSET));
341 }
342 };
343 },
344
345 ebpf::ADD64_IMM => self.reg[dst] = self.reg[dst].wrapping_add(insn.imm as u64),
347 ebpf::ADD64_REG => self.reg[dst] = self.reg[dst].wrapping_add(self.reg[src]),
348 ebpf::SUB64_IMM => self.reg[dst] = self.reg[dst].wrapping_sub(insn.imm as u64),
349 ebpf::SUB64_REG => self.reg[dst] = self.reg[dst].wrapping_sub(self.reg[src]),
350 ebpf::MUL64_IMM => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64),
351 ebpf::MUL64_REG => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]),
352 ebpf::DIV64_IMM => self.reg[dst] /= insn.imm as u64,
353 ebpf::DIV64_REG => {
354 if self.reg[src] == 0 {
355 throw_error!(self, EbpfError::DivideByZero(pc + ebpf::ELF_INSN_DUMP_OFFSET));
356 }
357 self.reg[dst] /= self.reg[src];
358 },
359 ebpf::SDIV64_IMM => {
360 if self.reg[dst] as i64 == i64::MIN && insn.imm == -1 {
361 throw_error!(self, EbpfError::DivideOverflow(pc + ebpf::ELF_INSN_DUMP_OFFSET));
362 }
363 self.reg[dst] = (self.reg[dst] as i64 / insn.imm) as u64
364 }
365 ebpf::SDIV64_REG => {
366 if self.reg[src] == 0 {
367 throw_error!(self, EbpfError::DivideByZero(pc + ebpf::ELF_INSN_DUMP_OFFSET));
368 }
369 if self.reg[dst] as i64 == i64::MIN && self.reg[src] as i64 == -1 {
370 throw_error!(self, EbpfError::DivideOverflow(pc + ebpf::ELF_INSN_DUMP_OFFSET));
371 }
372 self.reg[dst] = (self.reg[dst] as i64 / self.reg[src] as i64) as u64;
373 },
374 ebpf::OR64_IMM => self.reg[dst] |= insn.imm as u64,
375 ebpf::OR64_REG => self.reg[dst] |= self.reg[src],
376 ebpf::AND64_IMM => self.reg[dst] &= insn.imm as u64,
377 ebpf::AND64_REG => self.reg[dst] &= self.reg[src],
378 ebpf::LSH64_IMM => self.reg[dst] = self.reg[dst].wrapping_shl(insn.imm as u32),
379 ebpf::LSH64_REG => self.reg[dst] = self.reg[dst].wrapping_shl(self.reg[src] as u32),
380 ebpf::RSH64_IMM => self.reg[dst] = self.reg[dst].wrapping_shr(insn.imm as u32),
381 ebpf::RSH64_REG => self.reg[dst] = self.reg[dst].wrapping_shr(self.reg[src] as u32),
382 ebpf::NEG64 => self.reg[dst] = (self.reg[dst] as i64).wrapping_neg() as u64,
383 ebpf::MOD64_IMM => self.reg[dst] %= insn.imm as u64,
384 ebpf::MOD64_REG => {
385 if self.reg[src] == 0 {
386 throw_error!(self, EbpfError::DivideByZero(pc + ebpf::ELF_INSN_DUMP_OFFSET));
387 }
388 self.reg[dst] %= self.reg[src];
389 },
390 ebpf::XOR64_IMM => self.reg[dst] ^= insn.imm as u64,
391 ebpf::XOR64_REG => self.reg[dst] ^= self.reg[src],
392 ebpf::MOV64_IMM => self.reg[dst] = insn.imm as u64,
393 ebpf::MOV64_REG => self.reg[dst] = self.reg[src],
394 ebpf::ARSH64_IMM => self.reg[dst] = (self.reg[dst] as i64).wrapping_shr(insn.imm as u32) as u64,
395 ebpf::ARSH64_REG => self.reg[dst] = (self.reg[dst] as i64).wrapping_shr(self.reg[src] as u32) as u64,
396
397 ebpf::JA => { self.pc = (self.pc as isize + insn.off as isize) as usize; },
399 ebpf::JEQ_IMM => if self.reg[dst] == insn.imm as u64 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
400 ebpf::JEQ_REG => if self.reg[dst] == self.reg[src] { self.pc = (self.pc as isize + insn.off as isize) as usize; },
401 ebpf::JGT_IMM => if self.reg[dst] > insn.imm as u64 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
402 ebpf::JGT_REG => if self.reg[dst] > self.reg[src] { self.pc = (self.pc as isize + insn.off as isize) as usize; },
403 ebpf::JGE_IMM => if self.reg[dst] >= insn.imm as u64 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
404 ebpf::JGE_REG => if self.reg[dst] >= self.reg[src] { self.pc = (self.pc as isize + insn.off as isize) as usize; },
405 ebpf::JLT_IMM => if self.reg[dst] < insn.imm as u64 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
406 ebpf::JLT_REG => if self.reg[dst] < self.reg[src] { self.pc = (self.pc as isize + insn.off as isize) as usize; },
407 ebpf::JLE_IMM => if self.reg[dst] <= insn.imm as u64 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
408 ebpf::JLE_REG => if self.reg[dst] <= self.reg[src] { self.pc = (self.pc as isize + insn.off as isize) as usize; },
409 ebpf::JSET_IMM => if self.reg[dst] & insn.imm as u64 != 0 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
410 ebpf::JSET_REG => if self.reg[dst] & self.reg[src] != 0 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
411 ebpf::JNE_IMM => if self.reg[dst] != insn.imm as u64 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
412 ebpf::JNE_REG => if self.reg[dst] != self.reg[src] { self.pc = (self.pc as isize + insn.off as isize) as usize; },
413 ebpf::JSGT_IMM => if (self.reg[dst] as i64) > insn.imm { self.pc = (self.pc as isize + insn.off as isize) as usize; },
414 ebpf::JSGT_REG => if (self.reg[dst] as i64) > self.reg[src] as i64 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
415 ebpf::JSGE_IMM => if (self.reg[dst] as i64) >= insn.imm { self.pc = (self.pc as isize + insn.off as isize) as usize; },
416 ebpf::JSGE_REG => if (self.reg[dst] as i64) >= self.reg[src] as i64 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
417 ebpf::JSLT_IMM => if (self.reg[dst] as i64) < insn.imm { self.pc = (self.pc as isize + insn.off as isize) as usize; },
418 ebpf::JSLT_REG => if (self.reg[dst] as i64) < self.reg[src] as i64 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
419 ebpf::JSLE_IMM => if (self.reg[dst] as i64) <= insn.imm { self.pc = (self.pc as isize + insn.off as isize) as usize; },
420 ebpf::JSLE_REG => if (self.reg[dst] as i64) <= self.reg[src] as i64 { self.pc = (self.pc as isize + insn.off as isize) as usize; },
421
422 ebpf::CALL_REG => {
423 let target_address = self.reg[insn.imm as usize];
424 if !self.push_frame(config) {
425 return false;
426 }
427 if target_address < self.program_vm_addr {
428 throw_error!(self, EbpfError::CallOutsideTextSegment(pc + ebpf::ELF_INSN_DUMP_OFFSET, target_address / ebpf::INSN_SIZE as u64 * ebpf::INSN_SIZE as u64));
429 }
430 self.pc = (target_address - self.program_vm_addr) as usize / ebpf::INSN_SIZE;
431 if !self.check_pc(pc) {
432 return false;
433 }
434 if config.static_syscalls && executable.lookup_internal_function(self.pc as u32).is_none() {
435 self.due_insn_count += 1;
436 throw_error!(self, EbpfError::UnsupportedInstruction(self.pc + ebpf::ELF_INSN_DUMP_OFFSET));
437 }
438 },
439
440 ebpf::CALL_IMM => {
443 let mut resolved = false;
444 let (external, internal) = if config.static_syscalls {
445 (insn.src == 0, insn.src != 0)
446 } else {
447 (true, true)
448 };
449
450 if external {
451 if let Some((_function_name, function)) = executable.get_loader().lookup_function(insn.imm as u32) {
452 resolved = true;
453
454 if config.enable_instruction_meter {
455 self.vm.env.context_object_pointer.consume(self.due_insn_count);
456 }
457 self.due_insn_count = 0;
458 function(
459 self.vm.env.context_object_pointer,
460 self.reg[1],
461 self.reg[2],
462 self.reg[3],
463 self.reg[4],
464 self.reg[5],
465 &mut self.vm.env.memory_mapping,
466 &mut self.vm.env.program_result,
467 );
468 self.reg[0] = match &self.vm.env.program_result {
469 ProgramResult::Ok(value) => *value,
470 ProgramResult::Err(_err) => return false,
471 };
472 if config.enable_instruction_meter {
473 self.vm.env.previous_instruction_meter = self.vm.env.context_object_pointer.get_remaining();
474 }
475 }
476 }
477
478 if internal && !resolved {
479 if let Some(target_pc) = executable.lookup_internal_function(insn.imm as u32) {
480 resolved = true;
481
482 if !self.push_frame(config) {
484 return false;
485 }
486 self.pc = target_pc;
487 if !self.check_pc(pc) {
488 return false;
489 }
490 }
491 }
492
493 if !resolved {
494 throw_error!(self, EbpfError::UnsupportedInstruction(pc + ebpf::ELF_INSN_DUMP_OFFSET));
495 }
496 }
497
498 ebpf::EXIT => {
499 if self.vm.env.call_depth == 0 {
500 self.vm.env.program_result = ProgramResult::Ok(self.reg[0]);
501 return false;
502 }
503 self.vm.env.call_depth -= 1;
505 let frame = &self.vm.env.call_frames[self.vm.env.call_depth as usize];
506 self.pc = frame.target_pc;
507 self.reg[ebpf::FRAME_PTR_REG] = frame.frame_pointer;
508 self.reg[ebpf::FIRST_SCRATCH_REG
509 ..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS]
510 .copy_from_slice(&frame.caller_saved_registers);
511 if !config.dynamic_stack_frames {
512 let stack_frame_size =
513 config.stack_frame_size * if config.enable_stack_frame_gaps { 2 } else { 1 };
514 self.vm.env.stack_pointer -= stack_frame_size as u64;
515 }
516 if !self.check_pc(pc) {
517 return false;
518 }
519 }
520 _ => throw_error!(self, EbpfError::UnsupportedInstruction(pc + ebpf::ELF_INSN_DUMP_OFFSET)),
521 }
522
523 if config.enable_instruction_meter && self.due_insn_count >= self.vm.env.previous_instruction_meter {
524 throw_error!(self, EbpfError::ExceededMaxInstructions(pc + instruction_width + ebpf::ELF_INSN_DUMP_OFFSET, 0));
526 }
527
528 true
529 }
530}