Skip to main content

rustpython_vm/builtins/
frame.rs

1/*! The python `frame` type.
2
3*/
4
5use super::{PyCode, PyDictRef, PyIntRef, PyStrRef};
6use crate::{
7    Context, Py, PyObjectRef, PyRef, PyResult, VirtualMachine,
8    class::PyClassImpl,
9    frame::{Frame, FrameOwner, FrameRef},
10    function::PySetterValue,
11    types::Representable,
12};
13use num_traits::Zero;
14use rustpython_compiler_core::bytecode::{
15    self, Constant, Instruction, InstructionMetadata, StackEffect,
16};
17use stack_analysis::*;
18
19/// Stack state analysis for safe line-number jumps.
20///
21/// Models the evaluation stack as a 64-bit integer, encoding the kind of each
22/// stack entry in 3-bit blocks. Used by `set_f_lineno` to verify that a jump
23/// is safe and to determine how many values need to be popped.
24pub(crate) mod stack_analysis {
25    use super::*;
26
27    const BITS_PER_BLOCK: u32 = 3;
28    const MASK: i64 = (1 << BITS_PER_BLOCK) - 1; // 0b111
29    const MAX_STACK_ENTRIES: u32 = 63 / BITS_PER_BLOCK; // 21
30    const WILL_OVERFLOW: u64 = 1u64 << ((MAX_STACK_ENTRIES - 1) * BITS_PER_BLOCK);
31
32    pub const EMPTY_STACK: i64 = 0;
33    pub const UNINITIALIZED: i64 = -2;
34    pub const OVERFLOWED: i64 = -1;
35
36    /// Kind of a stack entry.
37    #[derive(Clone, Copy, PartialEq, Eq, Debug)]
38    #[repr(i64)]
39    pub enum Kind {
40        Iterator = 1,
41        Except = 2,
42        Object = 3,
43        Null = 4,
44        Lasti = 5,
45    }
46
47    impl Kind {
48        fn from_i64(v: i64) -> Option<Self> {
49            match v {
50                1 => Some(Kind::Iterator),
51                2 => Some(Kind::Except),
52                3 => Some(Kind::Object),
53                4 => Some(Kind::Null),
54                5 => Some(Kind::Lasti),
55                _ => None,
56            }
57        }
58    }
59
60    pub fn push_value(stack: i64, kind: i64) -> i64 {
61        if (stack as u64) >= WILL_OVERFLOW {
62            OVERFLOWED
63        } else {
64            (stack << BITS_PER_BLOCK) | kind
65        }
66    }
67
68    pub fn pop_value(stack: i64) -> i64 {
69        stack >> BITS_PER_BLOCK
70    }
71
72    pub fn top_of_stack(stack: i64) -> i64 {
73        stack & MASK
74    }
75
76    fn peek(stack: i64, n: u32) -> i64 {
77        debug_assert!(n >= 1);
78        (stack >> (BITS_PER_BLOCK * (n - 1))) & MASK
79    }
80
81    fn stack_swap(stack: i64, n: u32) -> i64 {
82        debug_assert!(n >= 1);
83        let to_swap = peek(stack, n);
84        let top = top_of_stack(stack);
85        let shift = BITS_PER_BLOCK * (n - 1);
86        let replaced_low = (stack & !(MASK << shift)) | (top << shift);
87        (replaced_low & !MASK) | to_swap
88    }
89
90    fn pop_to_level(mut stack: i64, level: u32) -> i64 {
91        if level == 0 {
92            return EMPTY_STACK;
93        }
94        let max_item: i64 = (1 << BITS_PER_BLOCK) - 1;
95        let level_max_stack = max_item << ((level - 1) * BITS_PER_BLOCK);
96        while stack > level_max_stack {
97            stack = pop_value(stack);
98        }
99        stack
100    }
101
102    fn compatible_kind(from: i64, to: i64) -> bool {
103        if to == 0 {
104            return false;
105        }
106        if to == Kind::Object as i64 {
107            return from != Kind::Null as i64;
108        }
109        if to == Kind::Null as i64 {
110            return true;
111        }
112        from == to
113    }
114
115    pub fn compatible_stack(from_stack: i64, to_stack: i64) -> bool {
116        if from_stack < 0 || to_stack < 0 {
117            return false;
118        }
119        let mut from = from_stack;
120        let mut to = to_stack;
121        while from > to {
122            from = pop_value(from);
123        }
124        while from != 0 {
125            let from_top = top_of_stack(from);
126            let to_top = top_of_stack(to);
127            if !compatible_kind(from_top, to_top) {
128                return false;
129            }
130            from = pop_value(from);
131            to = pop_value(to);
132        }
133        to == 0
134    }
135
136    pub fn explain_incompatible_stack(to_stack: i64) -> &'static str {
137        debug_assert!(to_stack != 0);
138        if to_stack == OVERFLOWED {
139            return "stack is too deep to analyze";
140        }
141        if to_stack == UNINITIALIZED {
142            return "can't jump into an exception handler, or code may be unreachable";
143        }
144        match Kind::from_i64(top_of_stack(to_stack)) {
145            Some(Kind::Except) => "can't jump into an 'except' block as there's no exception",
146            Some(Kind::Lasti) => "can't jump into a re-raising block as there's no location",
147            Some(Kind::Iterator) => "can't jump into the body of a for loop",
148            _ => "incompatible stacks",
149        }
150    }
151
152    /// Analyze bytecode and compute the stack state at each instruction index.
153    pub fn mark_stacks<C: Constant>(code: &bytecode::CodeObject<C>) -> Vec<i64> {
154        let instructions = &*code.instructions;
155        let len = instructions.len();
156
157        let mut stacks = vec![UNINITIALIZED; len + 1];
158        stacks[0] = EMPTY_STACK;
159
160        let mut todo = true;
161        while todo {
162            todo = false;
163
164            let mut i = 0;
165            while i < len {
166                let mut next_stack = stacks[i];
167                let mut opcode = instructions[i].op;
168                let mut oparg: u32 = 0;
169
170                // Accumulate EXTENDED_ARG prefixes
171                while matches!(opcode, Instruction::ExtendedArg) {
172                    oparg = (oparg << 8) | u32::from(u8::from(instructions[i].arg));
173                    i += 1;
174                    if i >= len {
175                        break;
176                    }
177                    stacks[i] = next_stack;
178                    opcode = instructions[i].op;
179                }
180                if i >= len {
181                    break;
182                }
183                oparg = (oparg << 8) | u32::from(u8::from(instructions[i].arg));
184
185                // De-instrument and de-specialize: get the underlying base instruction
186                let opcode = opcode.to_base().unwrap_or(opcode).deoptimize();
187
188                let caches = opcode.cache_entries();
189                let next_i = i + 1 + caches;
190
191                if next_stack == UNINITIALIZED {
192                    i = next_i;
193                    continue;
194                }
195
196                match opcode {
197                    Instruction::PopJumpIfFalse { .. }
198                    | Instruction::PopJumpIfTrue { .. }
199                    | Instruction::PopJumpIfNone { .. }
200                    | Instruction::PopJumpIfNotNone { .. } => {
201                        // Relative forward: target = after_caches + delta
202                        let j = next_i + oparg as usize;
203                        next_stack = pop_value(next_stack);
204                        let target_stack = next_stack;
205                        if j < stacks.len() && stacks[j] == UNINITIALIZED {
206                            stacks[j] = target_stack;
207                        }
208                        if next_i < stacks.len() {
209                            stacks[next_i] = next_stack;
210                        }
211                    }
212                    Instruction::Send { .. } => {
213                        // Relative forward: target = after_caches + delta
214                        let j = next_i + oparg as usize;
215                        if j < stacks.len() && stacks[j] == UNINITIALIZED {
216                            stacks[j] = next_stack;
217                        }
218                        if next_i < stacks.len() {
219                            stacks[next_i] = next_stack;
220                        }
221                    }
222                    Instruction::JumpForward { .. } => {
223                        // Relative forward: target = after_caches + delta
224                        let j = next_i + oparg as usize;
225                        if j < stacks.len() && stacks[j] == UNINITIALIZED {
226                            stacks[j] = next_stack;
227                        }
228                    }
229                    Instruction::JumpBackward { .. }
230                    | Instruction::JumpBackwardNoInterrupt { .. } => {
231                        // Relative backward: target = after_caches - delta
232                        let j = next_i - oparg as usize;
233                        if j < stacks.len() && stacks[j] == UNINITIALIZED {
234                            stacks[j] = next_stack;
235                            if j < i {
236                                todo = true;
237                            }
238                        }
239                    }
240                    Instruction::GetIter | Instruction::GetAIter => {
241                        next_stack = push_value(pop_value(next_stack), Kind::Iterator as i64);
242                        if next_i < stacks.len() {
243                            stacks[next_i] = next_stack;
244                        }
245                    }
246                    Instruction::ForIter { .. } => {
247                        // Fall-through (iteration continues): pushes the next value
248                        let body_stack = push_value(next_stack, Kind::Object as i64);
249                        if next_i < stacks.len() {
250                            stacks[next_i] = body_stack;
251                        }
252                        // Exhaustion path: relative forward from after_caches
253                        let mut j = next_i + oparg as usize;
254                        if j < instructions.len() {
255                            let target_op =
256                                instructions[j].op.to_base().unwrap_or(instructions[j].op);
257                            if matches!(target_op, Instruction::EndFor) {
258                                j += 1;
259                            }
260                        }
261                        if j < stacks.len() && stacks[j] == UNINITIALIZED {
262                            stacks[j] = next_stack;
263                        }
264                    }
265                    Instruction::EndAsyncFor => {
266                        next_stack = pop_value(pop_value(next_stack));
267                        if next_i < stacks.len() {
268                            stacks[next_i] = next_stack;
269                        }
270                    }
271                    Instruction::PushExcInfo => {
272                        next_stack = push_value(next_stack, Kind::Except as i64);
273                        if next_i < stacks.len() {
274                            stacks[next_i] = next_stack;
275                        }
276                    }
277                    Instruction::PopExcept => {
278                        next_stack = pop_value(next_stack);
279                        if next_i < stacks.len() {
280                            stacks[next_i] = next_stack;
281                        }
282                    }
283                    Instruction::ReturnValue => {
284                        // End of block, no fall-through
285                    }
286                    Instruction::RaiseVarargs { .. } => {
287                        // End of block, no fall-through
288                    }
289                    Instruction::Reraise { .. } => {
290                        // End of block, no fall-through
291                    }
292                    Instruction::PushNull => {
293                        next_stack = push_value(next_stack, Kind::Null as i64);
294                        if next_i < stacks.len() {
295                            stacks[next_i] = next_stack;
296                        }
297                    }
298                    Instruction::LoadGlobal { .. } => {
299                        next_stack = push_value(next_stack, Kind::Object as i64);
300                        if oparg & 1 != 0 {
301                            next_stack = push_value(next_stack, Kind::Null as i64);
302                        }
303                        if next_i < stacks.len() {
304                            stacks[next_i] = next_stack;
305                        }
306                    }
307                    Instruction::LoadAttr { .. } => {
308                        // LoadAttr: pops object, pushes result
309                        // If oparg & 1, it also pushes Null (method load)
310                        let attr_oparg = oparg;
311                        if attr_oparg & 1 != 0 {
312                            next_stack = pop_value(next_stack);
313                            next_stack = push_value(next_stack, Kind::Object as i64);
314                            next_stack = push_value(next_stack, Kind::Null as i64);
315                        }
316                        // else: default stack_effect handles it
317                        else {
318                            let effect: StackEffect = opcode.stack_effect_info(oparg);
319                            let popped = effect.popped() as i64;
320                            let pushed = effect.pushed() as i64;
321                            for _ in 0..popped {
322                                next_stack = pop_value(next_stack);
323                            }
324                            for _ in 0..pushed {
325                                next_stack = push_value(next_stack, Kind::Object as i64);
326                            }
327                        }
328                        if next_i < stacks.len() {
329                            stacks[next_i] = next_stack;
330                        }
331                    }
332                    Instruction::Swap { .. } => {
333                        let n = oparg;
334                        next_stack = stack_swap(next_stack, n);
335                        if next_i < stacks.len() {
336                            stacks[next_i] = next_stack;
337                        }
338                    }
339                    Instruction::Copy { .. } => {
340                        let n = oparg;
341                        next_stack = push_value(next_stack, peek(next_stack, n));
342                        if next_i < stacks.len() {
343                            stacks[next_i] = next_stack;
344                        }
345                    }
346                    _ => {
347                        // Default: use stack_effect
348                        let effect: StackEffect = opcode.stack_effect_info(oparg);
349                        let popped = effect.popped() as i64;
350                        let pushed = effect.pushed() as i64;
351                        let mut ns = next_stack;
352                        for _ in 0..popped {
353                            ns = pop_value(ns);
354                        }
355                        for _ in 0..pushed {
356                            ns = push_value(ns, Kind::Object as i64);
357                        }
358                        next_stack = ns;
359                        if next_i < stacks.len() {
360                            stacks[next_i] = next_stack;
361                        }
362                    }
363                }
364                i = next_i;
365            }
366
367            // Scan exception table
368            let exception_table = bytecode::decode_exception_table(&code.exceptiontable);
369            for entry in &exception_table {
370                let start_offset = entry.start as usize;
371                let handler = entry.target as usize;
372                let level = entry.depth as u32;
373                let has_lasti = entry.push_lasti;
374
375                if start_offset < stacks.len()
376                    && stacks[start_offset] != UNINITIALIZED
377                    && handler < stacks.len()
378                    && stacks[handler] == UNINITIALIZED
379                {
380                    todo = true;
381                    let mut target_stack = pop_to_level(stacks[start_offset], level);
382                    if has_lasti {
383                        target_stack = push_value(target_stack, Kind::Lasti as i64);
384                    }
385                    target_stack = push_value(target_stack, Kind::Except as i64);
386                    stacks[handler] = target_stack;
387                }
388            }
389        }
390
391        stacks
392    }
393
394    /// Build a mapping from instruction index to line number.
395    /// Returns -1 for indices with no line start.
396    pub fn mark_lines<C: Constant>(code: &bytecode::CodeObject<C>) -> Vec<i32> {
397        let len = code.instructions.len();
398        let mut line_starts = vec![-1i32; len];
399        let mut last_line: i32 = -1;
400
401        for (i, (loc, _)) in code.locations.iter().enumerate() {
402            if i >= len {
403                break;
404            }
405            let line = loc.line.get() as i32;
406            if line != last_line && line > 0 {
407                line_starts[i] = line;
408                last_line = line;
409            }
410        }
411        line_starts
412    }
413
414    /// Find the first line number >= `line` that has code.
415    pub fn first_line_not_before(lines: &[i32], line: i32) -> i32 {
416        let mut result = i32::MAX;
417        for &l in lines {
418            if l >= line && l < result {
419                result = l;
420            }
421        }
422        if result == i32::MAX { -1 } else { result }
423    }
424}
425
426pub fn init(context: &'static Context) {
427    Frame::extend_class(context, context.types.frame_type);
428}
429
430impl Representable for Frame {
431    #[inline]
432    fn repr(_zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyStrRef> {
433        const REPR: &str = "<frame object at .. >";
434        Ok(vm.ctx.intern_str(REPR).to_owned())
435    }
436
437    #[cold]
438    fn repr_str(_zelf: &Py<Self>, _vm: &VirtualMachine) -> PyResult<String> {
439        unreachable!("use repr instead")
440    }
441}
442
443#[pyclass(flags(DISALLOW_INSTANTIATION), with(Py))]
444impl Frame {
445    #[pygetset]
446    fn f_globals(&self) -> PyDictRef {
447        self.globals.clone()
448    }
449
450    #[pygetset]
451    fn f_builtins(&self) -> PyObjectRef {
452        self.builtins.clone()
453    }
454
455    #[pygetset]
456    fn f_locals(&self, vm: &VirtualMachine) -> PyResult {
457        let result = self.locals(vm).map(Into::into);
458        self.locals_dirty
459            .store(true, core::sync::atomic::Ordering::Release);
460        result
461    }
462
463    #[pygetset]
464    pub fn f_code(&self) -> PyRef<PyCode> {
465        self.code.clone()
466    }
467
468    #[pygetset]
469    fn f_lasti(&self) -> u32 {
470        // Return byte offset (each instruction is 2 bytes) for compatibility
471        self.lasti() * 2
472    }
473
474    #[pygetset]
475    pub fn f_lineno(&self) -> usize {
476        // If lasti is 0, execution hasn't started yet - use first line number
477        // Similar to PyCode_Addr2Line which returns co_firstlineno for addr_q < 0
478        if self.lasti() == 0 {
479            self.code.first_line_number.map(|n| n.get()).unwrap_or(1)
480        } else {
481            self.current_location().line.get()
482        }
483    }
484
485    #[pygetset(setter)]
486    fn set_f_lineno(&self, value: PySetterValue, vm: &VirtualMachine) -> PyResult<()> {
487        let l_new_lineno = match value {
488            PySetterValue::Assign(val) => {
489                let line_ref: PyIntRef = val
490                    .downcast()
491                    .map_err(|_| vm.new_value_error("lineno must be an integer"))?;
492                line_ref
493                    .try_to_primitive::<i32>(vm)
494                    .map_err(|_| vm.new_value_error("lineno must be an integer"))?
495            }
496            PySetterValue::Delete => {
497                return Err(vm.new_type_error("can't delete f_lineno attribute"));
498            }
499        };
500
501        let first_line = self
502            .code
503            .first_line_number
504            .map(|n| n.get() as i32)
505            .unwrap_or(1);
506
507        if l_new_lineno < first_line {
508            return Err(vm.new_value_error(format!(
509                "line {l_new_lineno} comes before the current code block"
510            )));
511        }
512
513        let py_code: &PyCode = &self.code;
514        let code = &py_code.code;
515        let lines = mark_lines(code);
516
517        // Find the first line >= target that has actual code
518        let new_lineno = first_line_not_before(&lines, l_new_lineno);
519        if new_lineno < 0 {
520            return Err(vm.new_value_error(format!(
521                "line {l_new_lineno} comes after the current code block"
522            )));
523        }
524
525        let stacks = mark_stacks(code);
526        let len = self.code.instructions.len();
527
528        // lasti points past the current instruction (already incremented).
529        // stacks[lasti - 1] gives the stack state before executing the
530        // instruction that triggered this trace event, which is the current
531        // evaluation stack.
532        let current_lasti = self.lasti() as usize;
533        let start_idx = current_lasti.saturating_sub(1);
534        let start_stack = if start_idx < stacks.len() {
535            stacks[start_idx]
536        } else {
537            OVERFLOWED
538        };
539        let mut best_stack = OVERFLOWED;
540        let mut best_addr: i32 = -1;
541        let mut err: i32 = -1;
542        let mut msg = "cannot find bytecode for specified line";
543
544        for i in 0..len {
545            if lines[i] == new_lineno {
546                let target_stack = stacks[i];
547                if compatible_stack(start_stack, target_stack) {
548                    err = 0;
549                    if target_stack > best_stack {
550                        best_stack = target_stack;
551                        best_addr = i as i32;
552                    }
553                } else if err < 0 {
554                    if start_stack == OVERFLOWED {
555                        msg = "stack to deep to analyze";
556                    } else if start_stack == UNINITIALIZED {
557                        msg = "can't jump from unreachable code";
558                    } else {
559                        msg = explain_incompatible_stack(target_stack);
560                        err = 1;
561                    }
562                }
563            }
564        }
565
566        if err != 0 {
567            return Err(vm.new_value_error(msg.to_owned()));
568        }
569
570        // Count how many entries to pop
571        let mut pop_count = 0usize;
572        {
573            let mut s = start_stack;
574            while s > best_stack {
575                pop_count += 1;
576                s = pop_value(s);
577            }
578        }
579
580        // Store the pending unwind for the execution loop to perform.
581        // We cannot pop stack entries here because the execution loop
582        // holds the state mutex, and trying to lock it again would deadlock.
583        self.set_pending_stack_pops(pop_count as u32);
584        self.set_pending_unwind_from_stack(start_stack);
585
586        // Set lasti to best_addr. The executor will read lasti and execute
587        // the instruction at that index next.
588        self.set_lasti(best_addr as u32);
589        Ok(())
590    }
591
592    #[pygetset]
593    fn f_trace(&self) -> PyObjectRef {
594        let boxed = self.trace.lock();
595        boxed.clone()
596    }
597
598    #[pygetset(setter)]
599    fn set_f_trace(&self, value: PySetterValue, vm: &VirtualMachine) {
600        let mut storage = self.trace.lock();
601        *storage = value.unwrap_or_none(vm);
602    }
603
604    #[pymember(type = "bool")]
605    fn f_trace_lines(vm: &VirtualMachine, zelf: PyObjectRef) -> PyResult {
606        let zelf: FrameRef = zelf.downcast().unwrap_or_else(|_| unreachable!());
607
608        let boxed = zelf.trace_lines.lock();
609        Ok(vm.ctx.new_bool(*boxed).into())
610    }
611
612    #[pymember(type = "bool", setter)]
613    fn set_f_trace_lines(
614        vm: &VirtualMachine,
615        zelf: PyObjectRef,
616        value: PySetterValue,
617    ) -> PyResult<()> {
618        match value {
619            PySetterValue::Assign(value) => {
620                let zelf: FrameRef = zelf.downcast().unwrap_or_else(|_| unreachable!());
621
622                let value: PyIntRef = value
623                    .downcast()
624                    .map_err(|_| vm.new_type_error("attribute value type must be bool"))?;
625
626                let mut trace_lines = zelf.trace_lines.lock();
627                *trace_lines = !value.as_bigint().is_zero();
628
629                Ok(())
630            }
631            PySetterValue::Delete => Err(vm.new_type_error("can't delete numeric/char attribute")),
632        }
633    }
634
635    #[pymember(type = "bool")]
636    fn f_trace_opcodes(vm: &VirtualMachine, zelf: PyObjectRef) -> PyResult {
637        let zelf: FrameRef = zelf.downcast().unwrap_or_else(|_| unreachable!());
638        let trace_opcodes = zelf.trace_opcodes.lock();
639        Ok(vm.ctx.new_bool(*trace_opcodes).into())
640    }
641
642    #[pymember(type = "bool", setter)]
643    fn set_f_trace_opcodes(
644        vm: &VirtualMachine,
645        zelf: PyObjectRef,
646        value: PySetterValue,
647    ) -> PyResult<()> {
648        match value {
649            PySetterValue::Assign(value) => {
650                let zelf: FrameRef = zelf.downcast().unwrap_or_else(|_| unreachable!());
651
652                let value: PyIntRef = value
653                    .downcast()
654                    .map_err(|_| vm.new_type_error("attribute value type must be bool"))?;
655
656                let mut trace_opcodes = zelf.trace_opcodes.lock();
657                *trace_opcodes = !value.as_bigint().is_zero();
658
659                // TODO: Implement the equivalent of _PyEval_SetOpcodeTrace()
660
661                Ok(())
662            }
663            PySetterValue::Delete => Err(vm.new_type_error("can't delete numeric/char attribute")),
664        }
665    }
666}
667
668#[pyclass]
669impl Py<Frame> {
670    #[pymethod]
671    // = frame_clear_impl
672    fn clear(&self, vm: &VirtualMachine) -> PyResult<()> {
673        let owner = FrameOwner::from_i8(self.owner.load(core::sync::atomic::Ordering::Acquire));
674        match owner {
675            FrameOwner::Generator => {
676                // Generator frame: check if suspended (lasti > 0 means
677                // FRAME_SUSPENDED). lasti == 0 means FRAME_CREATED and
678                // can be cleared.
679                if self.lasti() != 0 {
680                    return Err(vm.new_runtime_error("cannot clear a suspended frame"));
681                }
682            }
683            FrameOwner::Thread => {
684                // Thread-owned frame: always executing, cannot clear.
685                return Err(vm.new_runtime_error("cannot clear an executing frame"));
686            }
687            FrameOwner::FrameObject => {
688                // Detached frame: safe to clear.
689            }
690        }
691
692        // Clear fastlocals
693        // SAFETY: Frame is not executing (detached or stopped).
694        {
695            let fastlocals = unsafe { self.fastlocals_mut() };
696            for slot in fastlocals.iter_mut() {
697                *slot = None;
698            }
699        }
700
701        // Clear the evaluation stack and cell references
702        self.clear_stack_and_cells();
703
704        // Clear temporary refs
705        self.temporary_refs.lock().clear();
706
707        Ok(())
708    }
709
710    #[pygetset]
711    fn f_generator(&self) -> Option<PyObjectRef> {
712        self.generator.to_owned()
713    }
714
715    #[pygetset]
716    pub fn f_back(&self, vm: &VirtualMachine) -> Option<PyRef<Frame>> {
717        let previous = self.previous_frame();
718        if previous.is_null() {
719            return None;
720        }
721
722        if let Some(frame) = vm
723            .frames
724            .borrow()
725            .iter()
726            .find(|fp| {
727                // SAFETY: the caller keeps the FrameRef alive while it's in the Vec
728                let py: &crate::Py<Frame> = unsafe { fp.as_ref() };
729                let ptr: *const Frame = &**py;
730                core::ptr::eq(ptr, previous)
731            })
732            .map(|fp| unsafe { fp.as_ref() }.to_owned())
733        {
734            return Some(frame);
735        }
736
737        #[cfg(feature = "threading")]
738        {
739            let registry = vm.state.thread_frames.lock();
740            for slot in registry.values() {
741                let frames = slot.frames.lock();
742                // SAFETY: the owning thread can't pop while we hold the Mutex,
743                // so FramePtr is valid for the duration of the lock.
744                if let Some(frame) = frames.iter().find_map(|fp| {
745                    let f = unsafe { fp.as_ref() };
746                    let ptr: *const Frame = &**f;
747                    core::ptr::eq(ptr, previous).then(|| f.to_owned())
748                }) {
749                    return Some(frame);
750                }
751            }
752        }
753
754        None
755    }
756}