1use super::{PyCode, PyDictRef, PyIntRef, PyStrRef};
6use crate::{
7 Context, Py, PyObjectRef, PyRef, PyResult, VirtualMachine,
8 class::PyClassImpl,
9 frame::{Frame, FrameOwner, FrameRef},
10 function::PySetterValue,
11 types::Representable,
12};
13use num_traits::Zero;
14use rustpython_compiler_core::bytecode::{
15 self, Constant, Instruction, InstructionMetadata, StackEffect,
16};
17use stack_analysis::*;
18
19pub(crate) mod stack_analysis {
25 use super::*;
26
27 const BITS_PER_BLOCK: u32 = 3;
28 const MASK: i64 = (1 << BITS_PER_BLOCK) - 1; const MAX_STACK_ENTRIES: u32 = 63 / BITS_PER_BLOCK; const WILL_OVERFLOW: u64 = 1u64 << ((MAX_STACK_ENTRIES - 1) * BITS_PER_BLOCK);
31
32 pub const EMPTY_STACK: i64 = 0;
33 pub const UNINITIALIZED: i64 = -2;
34 pub const OVERFLOWED: i64 = -1;
35
36 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
38 #[repr(i64)]
39 pub enum Kind {
40 Iterator = 1,
41 Except = 2,
42 Object = 3,
43 Null = 4,
44 Lasti = 5,
45 }
46
47 impl Kind {
48 fn from_i64(v: i64) -> Option<Self> {
49 match v {
50 1 => Some(Kind::Iterator),
51 2 => Some(Kind::Except),
52 3 => Some(Kind::Object),
53 4 => Some(Kind::Null),
54 5 => Some(Kind::Lasti),
55 _ => None,
56 }
57 }
58 }
59
60 pub fn push_value(stack: i64, kind: i64) -> i64 {
61 if (stack as u64) >= WILL_OVERFLOW {
62 OVERFLOWED
63 } else {
64 (stack << BITS_PER_BLOCK) | kind
65 }
66 }
67
68 pub fn pop_value(stack: i64) -> i64 {
69 stack >> BITS_PER_BLOCK
70 }
71
72 pub fn top_of_stack(stack: i64) -> i64 {
73 stack & MASK
74 }
75
76 fn peek(stack: i64, n: u32) -> i64 {
77 debug_assert!(n >= 1);
78 (stack >> (BITS_PER_BLOCK * (n - 1))) & MASK
79 }
80
81 fn stack_swap(stack: i64, n: u32) -> i64 {
82 debug_assert!(n >= 1);
83 let to_swap = peek(stack, n);
84 let top = top_of_stack(stack);
85 let shift = BITS_PER_BLOCK * (n - 1);
86 let replaced_low = (stack & !(MASK << shift)) | (top << shift);
87 (replaced_low & !MASK) | to_swap
88 }
89
90 fn pop_to_level(mut stack: i64, level: u32) -> i64 {
91 if level == 0 {
92 return EMPTY_STACK;
93 }
94 let max_item: i64 = (1 << BITS_PER_BLOCK) - 1;
95 let level_max_stack = max_item << ((level - 1) * BITS_PER_BLOCK);
96 while stack > level_max_stack {
97 stack = pop_value(stack);
98 }
99 stack
100 }
101
102 fn compatible_kind(from: i64, to: i64) -> bool {
103 if to == 0 {
104 return false;
105 }
106 if to == Kind::Object as i64 {
107 return from != Kind::Null as i64;
108 }
109 if to == Kind::Null as i64 {
110 return true;
111 }
112 from == to
113 }
114
115 pub fn compatible_stack(from_stack: i64, to_stack: i64) -> bool {
116 if from_stack < 0 || to_stack < 0 {
117 return false;
118 }
119 let mut from = from_stack;
120 let mut to = to_stack;
121 while from > to {
122 from = pop_value(from);
123 }
124 while from != 0 {
125 let from_top = top_of_stack(from);
126 let to_top = top_of_stack(to);
127 if !compatible_kind(from_top, to_top) {
128 return false;
129 }
130 from = pop_value(from);
131 to = pop_value(to);
132 }
133 to == 0
134 }
135
136 pub fn explain_incompatible_stack(to_stack: i64) -> &'static str {
137 debug_assert!(to_stack != 0);
138 if to_stack == OVERFLOWED {
139 return "stack is too deep to analyze";
140 }
141 if to_stack == UNINITIALIZED {
142 return "can't jump into an exception handler, or code may be unreachable";
143 }
144 match Kind::from_i64(top_of_stack(to_stack)) {
145 Some(Kind::Except) => "can't jump into an 'except' block as there's no exception",
146 Some(Kind::Lasti) => "can't jump into a re-raising block as there's no location",
147 Some(Kind::Iterator) => "can't jump into the body of a for loop",
148 _ => "incompatible stacks",
149 }
150 }
151
152 pub fn mark_stacks<C: Constant>(code: &bytecode::CodeObject<C>) -> Vec<i64> {
154 let instructions = &*code.instructions;
155 let len = instructions.len();
156
157 let mut stacks = vec![UNINITIALIZED; len + 1];
158 stacks[0] = EMPTY_STACK;
159
160 let mut todo = true;
161 while todo {
162 todo = false;
163
164 let mut i = 0;
165 while i < len {
166 let mut next_stack = stacks[i];
167 let mut opcode = instructions[i].op;
168 let mut oparg: u32 = 0;
169
170 while matches!(opcode, Instruction::ExtendedArg) {
172 oparg = (oparg << 8) | u32::from(u8::from(instructions[i].arg));
173 i += 1;
174 if i >= len {
175 break;
176 }
177 stacks[i] = next_stack;
178 opcode = instructions[i].op;
179 }
180 if i >= len {
181 break;
182 }
183 oparg = (oparg << 8) | u32::from(u8::from(instructions[i].arg));
184
185 let opcode = opcode.to_base().unwrap_or(opcode).deoptimize();
187
188 let caches = opcode.cache_entries();
189 let next_i = i + 1 + caches;
190
191 if next_stack == UNINITIALIZED {
192 i = next_i;
193 continue;
194 }
195
196 match opcode {
197 Instruction::PopJumpIfFalse { .. }
198 | Instruction::PopJumpIfTrue { .. }
199 | Instruction::PopJumpIfNone { .. }
200 | Instruction::PopJumpIfNotNone { .. } => {
201 let j = next_i + oparg as usize;
203 next_stack = pop_value(next_stack);
204 let target_stack = next_stack;
205 if j < stacks.len() && stacks[j] == UNINITIALIZED {
206 stacks[j] = target_stack;
207 }
208 if next_i < stacks.len() {
209 stacks[next_i] = next_stack;
210 }
211 }
212 Instruction::Send { .. } => {
213 let j = next_i + oparg as usize;
215 if j < stacks.len() && stacks[j] == UNINITIALIZED {
216 stacks[j] = next_stack;
217 }
218 if next_i < stacks.len() {
219 stacks[next_i] = next_stack;
220 }
221 }
222 Instruction::JumpForward { .. } => {
223 let j = next_i + oparg as usize;
225 if j < stacks.len() && stacks[j] == UNINITIALIZED {
226 stacks[j] = next_stack;
227 }
228 }
229 Instruction::JumpBackward { .. }
230 | Instruction::JumpBackwardNoInterrupt { .. } => {
231 let j = next_i - oparg as usize;
233 if j < stacks.len() && stacks[j] == UNINITIALIZED {
234 stacks[j] = next_stack;
235 if j < i {
236 todo = true;
237 }
238 }
239 }
240 Instruction::GetIter | Instruction::GetAIter => {
241 next_stack = push_value(pop_value(next_stack), Kind::Iterator as i64);
242 if next_i < stacks.len() {
243 stacks[next_i] = next_stack;
244 }
245 }
246 Instruction::ForIter { .. } => {
247 let body_stack = push_value(next_stack, Kind::Object as i64);
249 if next_i < stacks.len() {
250 stacks[next_i] = body_stack;
251 }
252 let mut j = next_i + oparg as usize;
254 if j < instructions.len() {
255 let target_op =
256 instructions[j].op.to_base().unwrap_or(instructions[j].op);
257 if matches!(target_op, Instruction::EndFor) {
258 j += 1;
259 }
260 }
261 if j < stacks.len() && stacks[j] == UNINITIALIZED {
262 stacks[j] = next_stack;
263 }
264 }
265 Instruction::EndAsyncFor => {
266 next_stack = pop_value(pop_value(next_stack));
267 if next_i < stacks.len() {
268 stacks[next_i] = next_stack;
269 }
270 }
271 Instruction::PushExcInfo => {
272 next_stack = push_value(next_stack, Kind::Except as i64);
273 if next_i < stacks.len() {
274 stacks[next_i] = next_stack;
275 }
276 }
277 Instruction::PopExcept => {
278 next_stack = pop_value(next_stack);
279 if next_i < stacks.len() {
280 stacks[next_i] = next_stack;
281 }
282 }
283 Instruction::ReturnValue => {
284 }
286 Instruction::RaiseVarargs { .. } => {
287 }
289 Instruction::Reraise { .. } => {
290 }
292 Instruction::PushNull => {
293 next_stack = push_value(next_stack, Kind::Null as i64);
294 if next_i < stacks.len() {
295 stacks[next_i] = next_stack;
296 }
297 }
298 Instruction::LoadGlobal { .. } => {
299 next_stack = push_value(next_stack, Kind::Object as i64);
300 if oparg & 1 != 0 {
301 next_stack = push_value(next_stack, Kind::Null as i64);
302 }
303 if next_i < stacks.len() {
304 stacks[next_i] = next_stack;
305 }
306 }
307 Instruction::LoadAttr { .. } => {
308 let attr_oparg = oparg;
311 if attr_oparg & 1 != 0 {
312 next_stack = pop_value(next_stack);
313 next_stack = push_value(next_stack, Kind::Object as i64);
314 next_stack = push_value(next_stack, Kind::Null as i64);
315 }
316 else {
318 let effect: StackEffect = opcode.stack_effect_info(oparg);
319 let popped = effect.popped() as i64;
320 let pushed = effect.pushed() as i64;
321 for _ in 0..popped {
322 next_stack = pop_value(next_stack);
323 }
324 for _ in 0..pushed {
325 next_stack = push_value(next_stack, Kind::Object as i64);
326 }
327 }
328 if next_i < stacks.len() {
329 stacks[next_i] = next_stack;
330 }
331 }
332 Instruction::Swap { .. } => {
333 let n = oparg;
334 next_stack = stack_swap(next_stack, n);
335 if next_i < stacks.len() {
336 stacks[next_i] = next_stack;
337 }
338 }
339 Instruction::Copy { .. } => {
340 let n = oparg;
341 next_stack = push_value(next_stack, peek(next_stack, n));
342 if next_i < stacks.len() {
343 stacks[next_i] = next_stack;
344 }
345 }
346 _ => {
347 let effect: StackEffect = opcode.stack_effect_info(oparg);
349 let popped = effect.popped() as i64;
350 let pushed = effect.pushed() as i64;
351 let mut ns = next_stack;
352 for _ in 0..popped {
353 ns = pop_value(ns);
354 }
355 for _ in 0..pushed {
356 ns = push_value(ns, Kind::Object as i64);
357 }
358 next_stack = ns;
359 if next_i < stacks.len() {
360 stacks[next_i] = next_stack;
361 }
362 }
363 }
364 i = next_i;
365 }
366
367 let exception_table = bytecode::decode_exception_table(&code.exceptiontable);
369 for entry in &exception_table {
370 let start_offset = entry.start as usize;
371 let handler = entry.target as usize;
372 let level = entry.depth as u32;
373 let has_lasti = entry.push_lasti;
374
375 if start_offset < stacks.len()
376 && stacks[start_offset] != UNINITIALIZED
377 && handler < stacks.len()
378 && stacks[handler] == UNINITIALIZED
379 {
380 todo = true;
381 let mut target_stack = pop_to_level(stacks[start_offset], level);
382 if has_lasti {
383 target_stack = push_value(target_stack, Kind::Lasti as i64);
384 }
385 target_stack = push_value(target_stack, Kind::Except as i64);
386 stacks[handler] = target_stack;
387 }
388 }
389 }
390
391 stacks
392 }
393
394 pub fn mark_lines<C: Constant>(code: &bytecode::CodeObject<C>) -> Vec<i32> {
397 let len = code.instructions.len();
398 let mut line_starts = vec![-1i32; len];
399 let mut last_line: i32 = -1;
400
401 for (i, (loc, _)) in code.locations.iter().enumerate() {
402 if i >= len {
403 break;
404 }
405 let line = loc.line.get() as i32;
406 if line != last_line && line > 0 {
407 line_starts[i] = line;
408 last_line = line;
409 }
410 }
411 line_starts
412 }
413
414 pub fn first_line_not_before(lines: &[i32], line: i32) -> i32 {
416 let mut result = i32::MAX;
417 for &l in lines {
418 if l >= line && l < result {
419 result = l;
420 }
421 }
422 if result == i32::MAX { -1 } else { result }
423 }
424}
425
426pub fn init(context: &'static Context) {
427 Frame::extend_class(context, context.types.frame_type);
428}
429
430impl Representable for Frame {
431 #[inline]
432 fn repr(_zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyStrRef> {
433 const REPR: &str = "<frame object at .. >";
434 Ok(vm.ctx.intern_str(REPR).to_owned())
435 }
436
437 #[cold]
438 fn repr_str(_zelf: &Py<Self>, _vm: &VirtualMachine) -> PyResult<String> {
439 unreachable!("use repr instead")
440 }
441}
442
443#[pyclass(flags(DISALLOW_INSTANTIATION), with(Py))]
444impl Frame {
445 #[pygetset]
446 fn f_globals(&self) -> PyDictRef {
447 self.globals.clone()
448 }
449
450 #[pygetset]
451 fn f_builtins(&self) -> PyObjectRef {
452 self.builtins.clone()
453 }
454
455 #[pygetset]
456 fn f_locals(&self, vm: &VirtualMachine) -> PyResult {
457 let result = self.locals(vm).map(Into::into);
458 self.locals_dirty
459 .store(true, core::sync::atomic::Ordering::Release);
460 result
461 }
462
463 #[pygetset]
464 pub fn f_code(&self) -> PyRef<PyCode> {
465 self.code.clone()
466 }
467
468 #[pygetset]
469 fn f_lasti(&self) -> u32 {
470 self.lasti() * 2
472 }
473
474 #[pygetset]
475 pub fn f_lineno(&self) -> usize {
476 if self.lasti() == 0 {
479 self.code.first_line_number.map(|n| n.get()).unwrap_or(1)
480 } else {
481 self.current_location().line.get()
482 }
483 }
484
485 #[pygetset(setter)]
486 fn set_f_lineno(&self, value: PySetterValue, vm: &VirtualMachine) -> PyResult<()> {
487 let l_new_lineno = match value {
488 PySetterValue::Assign(val) => {
489 let line_ref: PyIntRef = val
490 .downcast()
491 .map_err(|_| vm.new_value_error("lineno must be an integer"))?;
492 line_ref
493 .try_to_primitive::<i32>(vm)
494 .map_err(|_| vm.new_value_error("lineno must be an integer"))?
495 }
496 PySetterValue::Delete => {
497 return Err(vm.new_type_error("can't delete f_lineno attribute"));
498 }
499 };
500
501 let first_line = self
502 .code
503 .first_line_number
504 .map(|n| n.get() as i32)
505 .unwrap_or(1);
506
507 if l_new_lineno < first_line {
508 return Err(vm.new_value_error(format!(
509 "line {l_new_lineno} comes before the current code block"
510 )));
511 }
512
513 let py_code: &PyCode = &self.code;
514 let code = &py_code.code;
515 let lines = mark_lines(code);
516
517 let new_lineno = first_line_not_before(&lines, l_new_lineno);
519 if new_lineno < 0 {
520 return Err(vm.new_value_error(format!(
521 "line {l_new_lineno} comes after the current code block"
522 )));
523 }
524
525 let stacks = mark_stacks(code);
526 let len = self.code.instructions.len();
527
528 let current_lasti = self.lasti() as usize;
533 let start_idx = current_lasti.saturating_sub(1);
534 let start_stack = if start_idx < stacks.len() {
535 stacks[start_idx]
536 } else {
537 OVERFLOWED
538 };
539 let mut best_stack = OVERFLOWED;
540 let mut best_addr: i32 = -1;
541 let mut err: i32 = -1;
542 let mut msg = "cannot find bytecode for specified line";
543
544 for i in 0..len {
545 if lines[i] == new_lineno {
546 let target_stack = stacks[i];
547 if compatible_stack(start_stack, target_stack) {
548 err = 0;
549 if target_stack > best_stack {
550 best_stack = target_stack;
551 best_addr = i as i32;
552 }
553 } else if err < 0 {
554 if start_stack == OVERFLOWED {
555 msg = "stack to deep to analyze";
556 } else if start_stack == UNINITIALIZED {
557 msg = "can't jump from unreachable code";
558 } else {
559 msg = explain_incompatible_stack(target_stack);
560 err = 1;
561 }
562 }
563 }
564 }
565
566 if err != 0 {
567 return Err(vm.new_value_error(msg.to_owned()));
568 }
569
570 let mut pop_count = 0usize;
572 {
573 let mut s = start_stack;
574 while s > best_stack {
575 pop_count += 1;
576 s = pop_value(s);
577 }
578 }
579
580 self.set_pending_stack_pops(pop_count as u32);
584 self.set_pending_unwind_from_stack(start_stack);
585
586 self.set_lasti(best_addr as u32);
589 Ok(())
590 }
591
592 #[pygetset]
593 fn f_trace(&self) -> PyObjectRef {
594 let boxed = self.trace.lock();
595 boxed.clone()
596 }
597
598 #[pygetset(setter)]
599 fn set_f_trace(&self, value: PySetterValue, vm: &VirtualMachine) {
600 let mut storage = self.trace.lock();
601 *storage = value.unwrap_or_none(vm);
602 }
603
604 #[pymember(type = "bool")]
605 fn f_trace_lines(vm: &VirtualMachine, zelf: PyObjectRef) -> PyResult {
606 let zelf: FrameRef = zelf.downcast().unwrap_or_else(|_| unreachable!());
607
608 let boxed = zelf.trace_lines.lock();
609 Ok(vm.ctx.new_bool(*boxed).into())
610 }
611
612 #[pymember(type = "bool", setter)]
613 fn set_f_trace_lines(
614 vm: &VirtualMachine,
615 zelf: PyObjectRef,
616 value: PySetterValue,
617 ) -> PyResult<()> {
618 match value {
619 PySetterValue::Assign(value) => {
620 let zelf: FrameRef = zelf.downcast().unwrap_or_else(|_| unreachable!());
621
622 let value: PyIntRef = value
623 .downcast()
624 .map_err(|_| vm.new_type_error("attribute value type must be bool"))?;
625
626 let mut trace_lines = zelf.trace_lines.lock();
627 *trace_lines = !value.as_bigint().is_zero();
628
629 Ok(())
630 }
631 PySetterValue::Delete => Err(vm.new_type_error("can't delete numeric/char attribute")),
632 }
633 }
634
635 #[pymember(type = "bool")]
636 fn f_trace_opcodes(vm: &VirtualMachine, zelf: PyObjectRef) -> PyResult {
637 let zelf: FrameRef = zelf.downcast().unwrap_or_else(|_| unreachable!());
638 let trace_opcodes = zelf.trace_opcodes.lock();
639 Ok(vm.ctx.new_bool(*trace_opcodes).into())
640 }
641
642 #[pymember(type = "bool", setter)]
643 fn set_f_trace_opcodes(
644 vm: &VirtualMachine,
645 zelf: PyObjectRef,
646 value: PySetterValue,
647 ) -> PyResult<()> {
648 match value {
649 PySetterValue::Assign(value) => {
650 let zelf: FrameRef = zelf.downcast().unwrap_or_else(|_| unreachable!());
651
652 let value: PyIntRef = value
653 .downcast()
654 .map_err(|_| vm.new_type_error("attribute value type must be bool"))?;
655
656 let mut trace_opcodes = zelf.trace_opcodes.lock();
657 *trace_opcodes = !value.as_bigint().is_zero();
658
659 Ok(())
662 }
663 PySetterValue::Delete => Err(vm.new_type_error("can't delete numeric/char attribute")),
664 }
665 }
666}
667
668#[pyclass]
669impl Py<Frame> {
670 #[pymethod]
671 fn clear(&self, vm: &VirtualMachine) -> PyResult<()> {
673 let owner = FrameOwner::from_i8(self.owner.load(core::sync::atomic::Ordering::Acquire));
674 match owner {
675 FrameOwner::Generator => {
676 if self.lasti() != 0 {
680 return Err(vm.new_runtime_error("cannot clear a suspended frame"));
681 }
682 }
683 FrameOwner::Thread => {
684 return Err(vm.new_runtime_error("cannot clear an executing frame"));
686 }
687 FrameOwner::FrameObject => {
688 }
690 }
691
692 {
695 let fastlocals = unsafe { self.fastlocals_mut() };
696 for slot in fastlocals.iter_mut() {
697 *slot = None;
698 }
699 }
700
701 self.clear_stack_and_cells();
703
704 self.temporary_refs.lock().clear();
706
707 Ok(())
708 }
709
710 #[pygetset]
711 fn f_generator(&self) -> Option<PyObjectRef> {
712 self.generator.to_owned()
713 }
714
715 #[pygetset]
716 pub fn f_back(&self, vm: &VirtualMachine) -> Option<PyRef<Frame>> {
717 let previous = self.previous_frame();
718 if previous.is_null() {
719 return None;
720 }
721
722 if let Some(frame) = vm
723 .frames
724 .borrow()
725 .iter()
726 .find(|fp| {
727 let py: &crate::Py<Frame> = unsafe { fp.as_ref() };
729 let ptr: *const Frame = &**py;
730 core::ptr::eq(ptr, previous)
731 })
732 .map(|fp| unsafe { fp.as_ref() }.to_owned())
733 {
734 return Some(frame);
735 }
736
737 #[cfg(feature = "threading")]
738 {
739 let registry = vm.state.thread_frames.lock();
740 for slot in registry.values() {
741 let frames = slot.frames.lock();
742 if let Some(frame) = frames.iter().find_map(|fp| {
745 let f = unsafe { fp.as_ref() };
746 let ptr: *const Frame = &**f;
747 core::ptr::eq(ptr, previous).then(|| f.to_owned())
748 }) {
749 return Some(frame);
750 }
751 }
752 }
753
754 None
755 }
756}