Skip to main content

rustpython_vm/
frame.rs

1// spell-checker: ignore compactlong compactlongs
2
3use crate::anystr::AnyStr;
4#[cfg(feature = "flame")]
5use crate::bytecode::InstructionMetadata;
6use crate::{
7    AsObject, Py, PyExact, PyObject, PyObjectRef, PyPayload, PyRef, PyResult, PyStackRef,
8    TryFromObject, VirtualMachine,
9    builtins::{
10        PyBaseException, PyBaseExceptionRef, PyBaseObject, PyCode, PyCoroutine, PyDict, PyDictRef,
11        PyFloat, PyFrozenSet, PyGenerator, PyInt, PyInterpolation, PyList, PyModule, PyProperty,
12        PySet, PySlice, PyStr, PyStrInterned, PyTemplate, PyTraceback, PyType, PyUtf8Str,
13        builtin_func::PyNativeFunction,
14        descriptor::{MemberGetter, PyMemberDescriptor, PyMethodDescriptor},
15        frame::stack_analysis,
16        function::{
17            PyBoundMethod, PyCell, PyCellRef, PyFunction, datastack_frame_size_bytes_for_code,
18            vectorcall_function,
19        },
20        list::PyListIterator,
21        range::PyRangeIterator,
22        tuple::{PyTuple, PyTupleIterator, PyTupleRef},
23    },
24    bytecode::{
25        self, ADAPTIVE_COOLDOWN_VALUE, Arg, Instruction, LoadAttr, LoadSuperAttr, SpecialMethod,
26    },
27    convert::{ToPyObject, ToPyResult},
28    coroutine::Coro,
29    exceptions::ExceptionCtor,
30    function::{ArgMapping, Either, FuncArgs, PyMethodFlags},
31    object::PyAtomicBorrow,
32    object::{Traverse, TraverseFn},
33    protocol::{PyIter, PyIterReturn},
34    scope::Scope,
35    sliceable::SliceableSequenceOp,
36    stdlib::{_typing, builtins, sys::monitoring},
37    types::{PyComparisonOp, PyTypeFlags},
38    vm::{Context, PyMethod},
39};
40use alloc::fmt;
41use bstr::ByteSlice;
42use core::cell::UnsafeCell;
43use core::sync::atomic;
44use core::sync::atomic::AtomicPtr;
45use core::sync::atomic::Ordering::{Acquire, Relaxed};
46use indexmap::IndexMap;
47use itertools::Itertools;
48use malachite_bigint::BigInt;
49use num_traits::Zero;
50use rustpython_common::atomic::{PyAtomic, Radium};
51use rustpython_common::{
52    lock::{OnceCell, PyMutex},
53    wtf8::{Wtf8, Wtf8Buf, wtf8_concat},
54};
55use rustpython_compiler_core::SourceLocation;
56
57pub type FrameRef = PyRef<Frame>;
58
59/// The reason why we might be unwinding a block.
60/// This could be return of function, exception being
61/// raised, a break or continue being hit, etc..
62#[derive(Clone, Debug)]
63enum UnwindReason {
64    /// We are returning a value from a return statement.
65    Returning { value: PyObjectRef },
66
67    /// We hit an exception, so unwind any try-except and finally blocks. The exception should be
68    /// on top of the vm exception stack.
69    Raising { exception: PyBaseExceptionRef },
70}
71
72/// Tracks who owns a frame.
73// = `_PyFrameOwner`
74#[repr(i8)]
75#[derive(Debug, Clone, Copy, PartialEq, Eq)]
76pub(crate) enum FrameOwner {
77    /// Being executed by a thread (FRAME_OWNED_BY_THREAD).
78    Thread = 0,
79    /// Owned by a generator/coroutine (FRAME_OWNED_BY_GENERATOR).
80    Generator = 1,
81    /// Not executing; held only by a frame object or traceback
82    /// (FRAME_OWNED_BY_FRAME_OBJECT).
83    FrameObject = 2,
84}
85
86impl FrameOwner {
87    pub(crate) fn from_i8(v: i8) -> Self {
88        match v {
89            0 => Self::Thread,
90            1 => Self::Generator,
91            _ => Self::FrameObject,
92        }
93    }
94}
95
96/// Lock-free mutable storage for frame-internal data.
97///
98/// # Safety
99/// Frame execution is single-threaded: only one thread at a time executes
100/// a given frame (enforced by the owner field and generator running flag).
101/// External readers (e.g. `f_locals`) are on the same thread as execution
102/// (trace callback) or the frame is not executing.
103pub(crate) struct FrameUnsafeCell<T>(UnsafeCell<T>);
104
105impl<T> FrameUnsafeCell<T> {
106    fn new(value: T) -> Self {
107        Self(UnsafeCell::new(value))
108    }
109
110    /// # Safety
111    /// Caller must ensure no concurrent mutable access.
112    #[inline(always)]
113    unsafe fn get(&self) -> *mut T {
114        self.0.get()
115    }
116}
117
118// SAFETY: Frame execution is single-threaded. See FrameUnsafeCell doc.
119#[cfg(feature = "threading")]
120unsafe impl<T: Send> Send for FrameUnsafeCell<T> {}
121#[cfg(feature = "threading")]
122unsafe impl<T: Send> Sync for FrameUnsafeCell<T> {}
123
124/// Unified storage for local variables and evaluation stack.
125///
126/// Memory layout (each slot is `usize`-sized):
127///   `[0..nlocalsplus)` — fastlocals (`Option<PyObjectRef>`)
128///   `[nlocalsplus..nlocalsplus+stack_top)` — active evaluation stack (`Option<PyStackRef>`)
129///   `[nlocalsplus+stack_top..capacity)` — unused stack capacity
130///
131/// Both `Option<PyObjectRef>` and `Option<PyStackRef>` are `usize`-sized
132/// (niche optimization on NonNull / NonZeroUsize). The raw storage is
133/// `usize` to unify them; typed access is provided through methods.
134pub struct LocalsPlus {
135    /// Backing storage.
136    data: LocalsPlusData,
137    /// Number of fastlocals slots (nlocals + ncells + nfrees).
138    nlocalsplus: u32,
139    /// Current evaluation stack depth.
140    stack_top: u32,
141}
142
143enum LocalsPlusData {
144    /// Heap-allocated storage (generators, coroutines, exec/eval frames).
145    Heap(Box<[usize]>),
146    /// Data stack allocated storage (normal function calls).
147    /// The pointer is valid while the enclosing data stack frame is alive.
148    DataStack { ptr: *mut usize, capacity: usize },
149}
150
151// SAFETY: DataStack variant points to thread-local DataStack memory.
152// Frame execution is single-threaded (enforced by owner field).
153#[cfg(feature = "threading")]
154unsafe impl Send for LocalsPlusData {}
155#[cfg(feature = "threading")]
156unsafe impl Sync for LocalsPlusData {}
157
158const _: () = {
159    assert!(core::mem::size_of::<Option<PyObjectRef>>() == core::mem::size_of::<usize>());
160    // PyStackRef size is checked in object/core.rs
161};
162
163impl LocalsPlus {
164    /// Create a new heap-backed LocalsPlus.  All slots start as None (0).
165    fn new(nlocalsplus: usize, stacksize: usize) -> Self {
166        let capacity = nlocalsplus
167            .checked_add(stacksize)
168            .expect("LocalsPlus capacity overflow");
169        let nlocalsplus_u32 = u32::try_from(nlocalsplus).expect("nlocalsplus exceeds u32");
170        Self {
171            data: LocalsPlusData::Heap(vec![0usize; capacity].into_boxed_slice()),
172            nlocalsplus: nlocalsplus_u32,
173            stack_top: 0,
174        }
175    }
176
177    /// Create a new LocalsPlus backed by the thread data stack.
178    /// All slots are zero-initialized.
179    ///
180    /// The caller must call `materialize_localsplus()` when the frame finishes
181    /// to migrate data to the heap, then `datastack_pop()` to free the memory.
182    fn new_on_datastack(nlocalsplus: usize, stacksize: usize, vm: &VirtualMachine) -> Self {
183        let capacity = nlocalsplus
184            .checked_add(stacksize)
185            .expect("LocalsPlus capacity overflow");
186        let byte_size = capacity
187            .checked_mul(core::mem::size_of::<usize>())
188            .expect("LocalsPlus byte size overflow");
189        let nlocalsplus_u32 = u32::try_from(nlocalsplus).expect("nlocalsplus exceeds u32");
190        let ptr = vm.datastack_push(byte_size) as *mut usize;
191        // Zero-initialize all slots (0 = None for both PyObjectRef and PyStackRef).
192        unsafe { core::ptr::write_bytes(ptr, 0, capacity) };
193        Self {
194            data: LocalsPlusData::DataStack { ptr, capacity },
195            nlocalsplus: nlocalsplus_u32,
196            stack_top: 0,
197        }
198    }
199
200    /// Migrate data-stack-backed storage to the heap, preserving all values.
201    /// Returns the data stack base pointer for `DataStack::pop()`.
202    /// Returns `None` if already heap-backed.
203    fn materialize_to_heap(&mut self) -> Option<*mut u8> {
204        if let LocalsPlusData::DataStack { ptr, capacity } = &self.data {
205            let base = *ptr as *mut u8;
206            let heap_data = unsafe { core::slice::from_raw_parts(*ptr, *capacity) }
207                .to_vec()
208                .into_boxed_slice();
209            self.data = LocalsPlusData::Heap(heap_data);
210            Some(base)
211        } else {
212            None
213        }
214    }
215
216    /// Drop all contained values without freeing the backing storage.
217    fn drop_values(&mut self) {
218        self.stack_clear();
219        let fastlocals = self.fastlocals_mut();
220        for slot in fastlocals.iter_mut() {
221            let _ = slot.take();
222        }
223    }
224
225    // -- Data access helpers --
226
227    #[inline(always)]
228    fn data_as_slice(&self) -> &[usize] {
229        match &self.data {
230            LocalsPlusData::Heap(b) => b,
231            LocalsPlusData::DataStack { ptr, capacity } => unsafe {
232                core::slice::from_raw_parts(*ptr, *capacity)
233            },
234        }
235    }
236
237    #[inline(always)]
238    fn data_as_mut_slice(&mut self) -> &mut [usize] {
239        match &mut self.data {
240            LocalsPlusData::Heap(b) => b,
241            LocalsPlusData::DataStack { ptr, capacity } => unsafe {
242                core::slice::from_raw_parts_mut(*ptr, *capacity)
243            },
244        }
245    }
246
247    /// Total capacity (fastlocals + stack).
248    #[inline(always)]
249    fn capacity(&self) -> usize {
250        match &self.data {
251            LocalsPlusData::Heap(b) => b.len(),
252            LocalsPlusData::DataStack { capacity, .. } => *capacity,
253        }
254    }
255
256    /// Stack capacity (max stack depth).
257    #[inline(always)]
258    fn stack_capacity(&self) -> usize {
259        self.capacity() - self.nlocalsplus as usize
260    }
261
262    // -- Fastlocals access --
263
264    /// Immutable access to fastlocals as `Option<PyObjectRef>` slice.
265    #[inline(always)]
266    fn fastlocals(&self) -> &[Option<PyObjectRef>] {
267        let data = self.data_as_slice();
268        let ptr = data.as_ptr() as *const Option<PyObjectRef>;
269        unsafe { core::slice::from_raw_parts(ptr, self.nlocalsplus as usize) }
270    }
271
272    /// Mutable access to fastlocals as `Option<PyObjectRef>` slice.
273    #[inline(always)]
274    fn fastlocals_mut(&mut self) -> &mut [Option<PyObjectRef>] {
275        let nlocalsplus = self.nlocalsplus as usize;
276        let data = self.data_as_mut_slice();
277        let ptr = data.as_mut_ptr() as *mut Option<PyObjectRef>;
278        unsafe { core::slice::from_raw_parts_mut(ptr, nlocalsplus) }
279    }
280
281    // -- Stack access --
282
283    /// Current stack depth.
284    #[inline(always)]
285    fn stack_len(&self) -> usize {
286        self.stack_top as usize
287    }
288
289    /// Whether the stack is empty.
290    #[inline(always)]
291    fn stack_is_empty(&self) -> bool {
292        self.stack_top == 0
293    }
294
295    /// Push a value onto the evaluation stack.
296    #[inline(always)]
297    fn stack_push(&mut self, val: Option<PyStackRef>) {
298        let idx = self.nlocalsplus as usize + self.stack_top as usize;
299        debug_assert!(
300            idx < self.capacity(),
301            "stack overflow: stack_top={}, capacity={}",
302            self.stack_top,
303            self.stack_capacity()
304        );
305        let data = self.data_as_mut_slice();
306        data[idx] = unsafe { core::mem::transmute::<Option<PyStackRef>, usize>(val) };
307        self.stack_top += 1;
308    }
309
310    /// Try to push; returns Err if stack is full.
311    #[inline(always)]
312    fn stack_try_push(&mut self, val: Option<PyStackRef>) -> Result<(), Option<PyStackRef>> {
313        let idx = self.nlocalsplus as usize + self.stack_top as usize;
314        if idx >= self.capacity() {
315            return Err(val);
316        }
317        let data = self.data_as_mut_slice();
318        data[idx] = unsafe { core::mem::transmute::<Option<PyStackRef>, usize>(val) };
319        self.stack_top += 1;
320        Ok(())
321    }
322
323    /// Pop a value from the evaluation stack.
324    #[inline(always)]
325    fn stack_pop(&mut self) -> Option<PyStackRef> {
326        debug_assert!(self.stack_top > 0, "stack underflow");
327        self.stack_top -= 1;
328        let idx = self.nlocalsplus as usize + self.stack_top as usize;
329        let data = self.data_as_mut_slice();
330        let raw = core::mem::replace(&mut data[idx], 0);
331        unsafe { core::mem::transmute::<usize, Option<PyStackRef>>(raw) }
332    }
333
334    /// Immutable view of the active stack as `Option<PyStackRef>` slice.
335    #[inline(always)]
336    fn stack_as_slice(&self) -> &[Option<PyStackRef>] {
337        let data = self.data_as_slice();
338        let base = self.nlocalsplus as usize;
339        let ptr = unsafe { (data.as_ptr().add(base)) as *const Option<PyStackRef> };
340        unsafe { core::slice::from_raw_parts(ptr, self.stack_top as usize) }
341    }
342
343    /// Get a reference to a stack slot by index from the bottom.
344    #[inline(always)]
345    fn stack_index(&self, idx: usize) -> &Option<PyStackRef> {
346        debug_assert!(idx < self.stack_top as usize);
347        let data = self.data_as_slice();
348        let raw_idx = self.nlocalsplus as usize + idx;
349        unsafe { &*(data.as_ptr().add(raw_idx) as *const Option<PyStackRef>) }
350    }
351
352    /// Get a mutable reference to a stack slot by index from the bottom.
353    #[inline(always)]
354    fn stack_index_mut(&mut self, idx: usize) -> &mut Option<PyStackRef> {
355        debug_assert!(idx < self.stack_top as usize);
356        let raw_idx = self.nlocalsplus as usize + idx;
357        let data = self.data_as_mut_slice();
358        unsafe { &mut *(data.as_mut_ptr().add(raw_idx) as *mut Option<PyStackRef>) }
359    }
360
361    /// Get the last stack element (top of stack).
362    #[inline(always)]
363    fn stack_last(&self) -> Option<&Option<PyStackRef>> {
364        if self.stack_top == 0 {
365            None
366        } else {
367            Some(self.stack_index(self.stack_top as usize - 1))
368        }
369    }
370
371    /// Get mutable reference to the last stack element.
372    #[inline(always)]
373    fn stack_last_mut(&mut self) -> Option<&mut Option<PyStackRef>> {
374        if self.stack_top == 0 {
375            None
376        } else {
377            let idx = self.stack_top as usize - 1;
378            Some(self.stack_index_mut(idx))
379        }
380    }
381
382    /// Swap two stack elements.
383    #[inline(always)]
384    fn stack_swap(&mut self, a: usize, b: usize) {
385        let base = self.nlocalsplus as usize;
386        let data = self.data_as_mut_slice();
387        data.swap(base + a, base + b);
388    }
389
390    /// Truncate the stack to `new_len` elements, dropping excess values.
391    fn stack_truncate(&mut self, new_len: usize) {
392        debug_assert!(new_len <= self.stack_top as usize);
393        while self.stack_top as usize > new_len {
394            let _ = self.stack_pop();
395        }
396    }
397
398    /// Clear the stack, dropping all values.
399    fn stack_clear(&mut self) {
400        while self.stack_top > 0 {
401            let _ = self.stack_pop();
402        }
403    }
404
405    /// Drain stack elements from `from` to the end, returning an iterator
406    /// that yields `Option<PyStackRef>` in forward order and shrinks the stack.
407    fn stack_drain(
408        &mut self,
409        from: usize,
410    ) -> impl ExactSizeIterator<Item = Option<PyStackRef>> + '_ {
411        let end = self.stack_top as usize;
412        debug_assert!(from <= end);
413        // Reduce stack_top now; the drain iterator owns the elements.
414        self.stack_top = from as u32;
415        LocalsPlusStackDrain {
416            localsplus: self,
417            current: from,
418            end,
419        }
420    }
421
422    /// Extend the stack with values from an iterator.
423    fn stack_extend(&mut self, iter: impl Iterator<Item = Option<PyStackRef>>) {
424        for val in iter {
425            self.stack_push(val);
426        }
427    }
428}
429
430/// Iterator for draining stack elements in forward order.
431struct LocalsPlusStackDrain<'a> {
432    localsplus: &'a mut LocalsPlus,
433    /// Current read position (stack-relative index).
434    current: usize,
435    /// End position (exclusive, stack-relative index).
436    end: usize,
437}
438
439impl Iterator for LocalsPlusStackDrain<'_> {
440    type Item = Option<PyStackRef>;
441
442    fn next(&mut self) -> Option<Self::Item> {
443        if self.current >= self.end {
444            return None;
445        }
446        let idx = self.localsplus.nlocalsplus as usize + self.current;
447        let data = self.localsplus.data_as_mut_slice();
448        let raw = core::mem::replace(&mut data[idx], 0);
449        self.current += 1;
450        Some(unsafe { core::mem::transmute::<usize, Option<PyStackRef>>(raw) })
451    }
452
453    fn size_hint(&self) -> (usize, Option<usize>) {
454        let remaining = self.end - self.current;
455        (remaining, Some(remaining))
456    }
457}
458
459impl ExactSizeIterator for LocalsPlusStackDrain<'_> {}
460
461impl Drop for LocalsPlusStackDrain<'_> {
462    fn drop(&mut self) {
463        while self.current < self.end {
464            let idx = self.localsplus.nlocalsplus as usize + self.current;
465            let data = self.localsplus.data_as_mut_slice();
466            let raw = core::mem::replace(&mut data[idx], 0);
467            let _ = unsafe { core::mem::transmute::<usize, Option<PyStackRef>>(raw) };
468            self.current += 1;
469        }
470    }
471}
472
473impl Drop for LocalsPlus {
474    fn drop(&mut self) {
475        // drop_values handles both stack and fastlocals.
476        // For DataStack-backed storage, the caller should have called
477        // materialize_localsplus() + datastack_pop() before drop.
478        // If not (e.g. panic), the DataStack memory is leaked but
479        // values are still dropped safely.
480        self.drop_values();
481    }
482}
483
484unsafe impl Traverse for LocalsPlus {
485    fn traverse(&self, tracer_fn: &mut TraverseFn<'_>) {
486        self.fastlocals().traverse(tracer_fn);
487        self.stack_as_slice().traverse(tracer_fn);
488    }
489}
490
491/// Lazy locals dict for frames. For NEWLOCALS frames, the dict is
492/// only allocated on first access (most function frames never need it).
493pub struct FrameLocals {
494    inner: OnceCell<ArgMapping>,
495}
496
497impl FrameLocals {
498    /// Create with an already-initialized locals mapping (non-NEWLOCALS frames).
499    fn with_locals(locals: ArgMapping) -> Self {
500        let cell = OnceCell::new();
501        let _ = cell.set(locals);
502        Self { inner: cell }
503    }
504
505    /// Create an empty lazy locals (for NEWLOCALS frames).
506    /// The dict will be created on first access.
507    fn lazy() -> Self {
508        Self {
509            inner: OnceCell::new(),
510        }
511    }
512
513    /// Get the locals mapping, creating it lazily if needed.
514    #[inline]
515    pub fn get_or_create(&self, vm: &VirtualMachine) -> &ArgMapping {
516        self.inner
517            .get_or_init(|| ArgMapping::from_dict_exact(vm.ctx.new_dict()))
518    }
519
520    /// Get the locals mapping if already created.
521    #[inline]
522    pub fn get(&self) -> Option<&ArgMapping> {
523        self.inner.get()
524    }
525
526    #[inline]
527    pub fn mapping(&self, vm: &VirtualMachine) -> crate::protocol::PyMapping<'_> {
528        self.get_or_create(vm).mapping()
529    }
530
531    #[inline]
532    pub fn clone_mapping(&self, vm: &VirtualMachine) -> ArgMapping {
533        self.get_or_create(vm).clone()
534    }
535
536    pub fn into_object(&self, vm: &VirtualMachine) -> PyObjectRef {
537        self.clone_mapping(vm).into()
538    }
539
540    pub fn as_object(&self, vm: &VirtualMachine) -> &PyObject {
541        self.get_or_create(vm).obj()
542    }
543}
544
545impl fmt::Debug for FrameLocals {
546    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
547        f.debug_struct("FrameLocals")
548            .field("initialized", &self.inner.get().is_some())
549            .finish()
550    }
551}
552
553impl Clone for FrameLocals {
554    fn clone(&self) -> Self {
555        let cell = OnceCell::new();
556        if let Some(locals) = self.inner.get() {
557            let _ = cell.set(locals.clone());
558        }
559        Self { inner: cell }
560    }
561}
562
563unsafe impl Traverse for FrameLocals {
564    fn traverse(&self, tracer_fn: &mut TraverseFn<'_>) {
565        if let Some(locals) = self.inner.get() {
566            locals.traverse(tracer_fn);
567        }
568    }
569}
570
571/// Lightweight execution frame. Not a PyObject.
572/// Analogous to CPython's `_PyInterpreterFrame`.
573///
574/// Currently always embedded inside a `Frame` PyObject via `FrameUnsafeCell`.
575/// In future PRs this will be usable independently for normal function calls
576/// (allocated on the Rust stack + DataStack), eliminating PyObject overhead.
577pub struct InterpreterFrame {
578    pub code: PyRef<PyCode>,
579    pub func_obj: Option<PyObjectRef>,
580
581    /// Unified storage for local variables and evaluation stack.
582    pub(crate) localsplus: LocalsPlus,
583    pub locals: FrameLocals,
584    pub globals: PyDictRef,
585    pub builtins: PyObjectRef,
586
587    /// index of last instruction ran
588    pub lasti: PyAtomic<u32>,
589    /// tracer function for this frame (usually is None)
590    pub trace: PyMutex<PyObjectRef>,
591
592    /// Previous line number for LINE event suppression.
593    pub(crate) prev_line: u32,
594
595    // member
596    pub trace_lines: PyMutex<bool>,
597    pub trace_opcodes: PyMutex<bool>,
598    pub temporary_refs: PyMutex<Vec<PyObjectRef>>,
599    /// Back-reference to owning generator/coroutine/async generator.
600    /// Borrowed reference (not ref-counted) to avoid Generator↔Frame cycle.
601    /// Cleared by the generator's Drop impl.
602    pub generator: PyAtomicBorrow,
603    /// Previous frame in the call chain for signal-safe traceback walking.
604    /// Mirrors `_PyInterpreterFrame.previous`.
605    pub(crate) previous: AtomicPtr<Frame>,
606    /// Who owns this frame. Mirrors `_PyInterpreterFrame.owner`.
607    /// Used by `frame.clear()` to reject clearing an executing frame,
608    /// even when called from a different thread.
609    pub(crate) owner: atomic::AtomicI8,
610    /// Set when f_locals is accessed. Cleared after locals_to_fast() sync.
611    pub(crate) locals_dirty: atomic::AtomicBool,
612    /// Number of stack entries to pop after set_f_lineno returns to the
613    /// execution loop.  set_f_lineno cannot pop directly because the
614    /// execution loop holds the state mutex.
615    pub(crate) pending_stack_pops: PyAtomic<u32>,
616    /// The encoded stack state that set_f_lineno wants to unwind *from*.
617    /// Used together with `pending_stack_pops` to identify Except entries
618    /// that need special exception-state handling.
619    pub(crate) pending_unwind_from_stack: PyAtomic<i64>,
620}
621
622/// Python-visible frame object. Currently always wraps an `InterpreterFrame`.
623/// Analogous to CPython's `PyFrameObject`.
624#[pyclass(module = false, name = "frame", traverse = "manual")]
625pub struct Frame {
626    pub(crate) iframe: FrameUnsafeCell<InterpreterFrame>,
627}
628
629impl core::ops::Deref for Frame {
630    type Target = InterpreterFrame;
631    /// Transparent access to InterpreterFrame fields.
632    ///
633    /// # Safety argument
634    /// Immutable fields (code, globals, builtins, func_obj, locals) are safe
635    /// to access at any time. Atomic/mutex fields (lasti, trace, owner, etc.)
636    /// provide their own synchronization. Mutable fields (localsplus, prev_line)
637    /// are only mutated during single-threaded execution via `with_exec`.
638    #[inline(always)]
639    fn deref(&self) -> &InterpreterFrame {
640        unsafe { &*self.iframe.get() }
641    }
642}
643
644impl PyPayload for Frame {
645    #[inline]
646    fn class(ctx: &Context) -> &'static Py<PyType> {
647        ctx.types.frame_type
648    }
649}
650
651unsafe impl Traverse for Frame {
652    fn traverse(&self, tracer_fn: &mut TraverseFn<'_>) {
653        // SAFETY: GC traversal does not run concurrently with frame execution.
654        let iframe = unsafe { &*self.iframe.get() };
655        iframe.code.traverse(tracer_fn);
656        iframe.func_obj.traverse(tracer_fn);
657        iframe.localsplus.traverse(tracer_fn);
658        iframe.locals.traverse(tracer_fn);
659        iframe.globals.traverse(tracer_fn);
660        iframe.builtins.traverse(tracer_fn);
661        iframe.trace.traverse(tracer_fn);
662        iframe.temporary_refs.traverse(tracer_fn);
663    }
664}
665
666// Running a frame can result in one of the below:
667pub enum ExecutionResult {
668    Return(PyObjectRef),
669    Yield(PyObjectRef),
670}
671
672/// A valid execution result, or an exception
673type FrameResult = PyResult<Option<ExecutionResult>>;
674
675impl Frame {
676    pub(crate) fn new(
677        code: PyRef<PyCode>,
678        scope: Scope,
679        builtins: PyObjectRef,
680        closure: &[PyCellRef],
681        func_obj: Option<PyObjectRef>,
682        use_datastack: bool,
683        vm: &VirtualMachine,
684    ) -> Self {
685        let nlocalsplus = code.localspluskinds.len();
686        let max_stackdepth = code.max_stackdepth as usize;
687        let mut localsplus = if use_datastack {
688            LocalsPlus::new_on_datastack(nlocalsplus, max_stackdepth, vm)
689        } else {
690            LocalsPlus::new(nlocalsplus, max_stackdepth)
691        };
692
693        // Pre-copy closure cells into free var slots so that locals() works
694        // even before COPY_FREE_VARS runs (e.g. coroutine before first send).
695        // COPY_FREE_VARS will overwrite these on first execution.
696        {
697            let nfrees = code.freevars.len();
698            if nfrees > 0 {
699                let freevar_start = nlocalsplus - nfrees;
700                let fastlocals = localsplus.fastlocals_mut();
701                for (i, cell) in closure.iter().enumerate() {
702                    fastlocals[freevar_start + i] = Some(cell.clone().into());
703                }
704            }
705        }
706
707        // For generators/coroutines, initialize prev_line to the def line
708        // so that preamble instructions (RETURN_GENERATOR, POP_TOP) don't
709        // fire spurious LINE events.
710        let prev_line = if code
711            .flags
712            .intersects(bytecode::CodeFlags::GENERATOR | bytecode::CodeFlags::COROUTINE)
713        {
714            code.first_line_number.map_or(0, |line| line.get() as u32)
715        } else {
716            0
717        };
718
719        let iframe = InterpreterFrame {
720            localsplus,
721            locals: match scope.locals {
722                Some(locals) => FrameLocals::with_locals(locals),
723                None if code.flags.contains(bytecode::CodeFlags::NEWLOCALS) => FrameLocals::lazy(),
724                None => {
725                    FrameLocals::with_locals(ArgMapping::from_dict_exact(scope.globals.clone()))
726                }
727            },
728            globals: scope.globals,
729            builtins,
730            code,
731            func_obj,
732            lasti: Radium::new(0),
733            prev_line,
734            trace: PyMutex::new(vm.ctx.none()),
735            trace_lines: PyMutex::new(true),
736            trace_opcodes: PyMutex::new(false),
737            temporary_refs: PyMutex::new(vec![]),
738            generator: PyAtomicBorrow::new(),
739            previous: AtomicPtr::new(core::ptr::null_mut()),
740            owner: atomic::AtomicI8::new(FrameOwner::FrameObject as i8),
741            locals_dirty: atomic::AtomicBool::new(false),
742            pending_stack_pops: Default::default(),
743            pending_unwind_from_stack: Default::default(),
744        };
745        Self {
746            iframe: FrameUnsafeCell::new(iframe),
747        }
748    }
749
750    /// Access fastlocals immutably.
751    ///
752    /// # Safety
753    /// Caller must ensure no concurrent mutable access (frame not executing,
754    /// or called from the same thread during trace callback).
755    #[inline(always)]
756    pub unsafe fn fastlocals(&self) -> &[Option<PyObjectRef>] {
757        unsafe { (*self.iframe.get()).localsplus.fastlocals() }
758    }
759
760    /// Access fastlocals mutably.
761    ///
762    /// # Safety
763    /// Caller must ensure exclusive access (frame not executing).
764    #[inline(always)]
765    #[allow(clippy::mut_from_ref)]
766    pub unsafe fn fastlocals_mut(&self) -> &mut [Option<PyObjectRef>] {
767        unsafe { (*self.iframe.get()).localsplus.fastlocals_mut() }
768    }
769
770    /// Migrate data-stack-backed storage to the heap, preserving all values,
771    /// and return the data stack base pointer for `DataStack::pop()`.
772    /// Returns `None` if already heap-backed.
773    ///
774    /// # Safety
775    /// Caller must ensure the frame is not executing and the returned
776    /// pointer is passed to `VirtualMachine::datastack_pop()`.
777    pub(crate) unsafe fn materialize_localsplus(&self) -> Option<*mut u8> {
778        unsafe { (*self.iframe.get()).localsplus.materialize_to_heap() }
779    }
780
781    /// Clear evaluation stack and state-owned cell/free references.
782    /// For full local/cell cleanup, call `clear_locals_and_stack()`.
783    pub(crate) fn clear_stack_and_cells(&self) {
784        // SAFETY: Called when frame is not executing (generator closed).
785        // Cell refs in fastlocals[nlocals..] are cleared by clear_locals_and_stack().
786        unsafe {
787            (*self.iframe.get()).localsplus.stack_clear();
788        }
789    }
790
791    /// Clear locals and stack after generator/coroutine close.
792    /// Releases references held by the frame, matching _PyFrame_ClearLocals.
793    pub(crate) fn clear_locals_and_stack(&self) {
794        self.clear_stack_and_cells();
795        // SAFETY: Frame is not executing (generator closed).
796        let fastlocals = unsafe { (*self.iframe.get()).localsplus.fastlocals_mut() };
797        for slot in fastlocals.iter_mut() {
798            *slot = None;
799        }
800    }
801
802    /// Get cell contents by localsplus index.
803    pub(crate) fn get_cell_contents(&self, localsplus_idx: usize) -> Option<PyObjectRef> {
804        // SAFETY: Frame not executing; no concurrent mutation.
805        let fastlocals = unsafe { (*self.iframe.get()).localsplus.fastlocals() };
806        fastlocals
807            .get(localsplus_idx)
808            .and_then(|slot| slot.as_ref())
809            .and_then(|obj| obj.downcast_ref::<PyCell>())
810            .and_then(|cell| cell.get())
811    }
812
813    /// Store a borrowed back-reference to the owning generator/coroutine.
814    /// The caller must ensure the generator outlives the frame.
815    pub fn set_generator(&self, generator: &PyObject) {
816        self.generator.store(generator);
817        self.owner
818            .store(FrameOwner::Generator as i8, atomic::Ordering::Release);
819    }
820
821    /// Clear the generator back-reference. Called when the generator is finalized.
822    pub fn clear_generator(&self) {
823        self.generator.clear();
824        self.owner
825            .store(FrameOwner::FrameObject as i8, atomic::Ordering::Release);
826    }
827
828    pub fn current_location(&self) -> SourceLocation {
829        self.code.locations[self.lasti() as usize - 1].0
830    }
831
832    /// Get the previous frame pointer for signal-safe traceback walking.
833    pub fn previous_frame(&self) -> *const Frame {
834        self.previous.load(atomic::Ordering::Relaxed)
835    }
836
837    pub fn lasti(&self) -> u32 {
838        self.lasti.load(Relaxed)
839    }
840
841    pub fn set_lasti(&self, val: u32) {
842        self.lasti.store(val, Relaxed);
843    }
844
845    pub(crate) fn pending_stack_pops(&self) -> u32 {
846        self.pending_stack_pops.load(Relaxed)
847    }
848
849    pub(crate) fn set_pending_stack_pops(&self, val: u32) {
850        self.pending_stack_pops.store(val, Relaxed);
851    }
852
853    pub(crate) fn pending_unwind_from_stack(&self) -> i64 {
854        self.pending_unwind_from_stack.load(Relaxed)
855    }
856
857    pub(crate) fn set_pending_unwind_from_stack(&self, val: i64) {
858        self.pending_unwind_from_stack.store(val, Relaxed);
859    }
860
861    /// Sync locals dict back to fastlocals. Called before generator/coroutine resume
862    /// to apply any modifications made via f_locals.
863    pub fn locals_to_fast(&self, vm: &VirtualMachine) -> PyResult<()> {
864        if !self.locals_dirty.load(atomic::Ordering::Acquire) {
865            return Ok(());
866        }
867        let code = &**self.code;
868        // SAFETY: Called before generator resume; no concurrent access.
869        let fastlocals = unsafe { (*self.iframe.get()).localsplus.fastlocals_mut() };
870        let locals_map = self.locals.mapping(vm);
871        for (i, &varname) in code.varnames.iter().enumerate() {
872            if i >= fastlocals.len() {
873                break;
874            }
875            match locals_map.subscript(varname, vm) {
876                Ok(value) => fastlocals[i] = Some(value),
877                Err(e) if e.fast_isinstance(vm.ctx.exceptions.key_error) => {}
878                Err(e) => return Err(e),
879            }
880        }
881        self.locals_dirty.store(false, atomic::Ordering::Release);
882        Ok(())
883    }
884
885    pub fn locals(&self, vm: &VirtualMachine) -> PyResult<ArgMapping> {
886        use rustpython_compiler_core::bytecode::{
887            CO_FAST_CELL, CO_FAST_FREE, CO_FAST_HIDDEN, CO_FAST_LOCAL,
888        };
889        // SAFETY: Either the frame is not executing (caller checked owner),
890        // or we're in a trace callback on the same thread that's executing.
891        let locals = &self.locals;
892        let code = &**self.code;
893        let locals_map = locals.mapping(vm);
894        let fastlocals = unsafe { (*self.iframe.get()).localsplus.fastlocals() };
895
896        // Iterate through all localsplus slots using localspluskinds
897        let nlocalsplus = code.localspluskinds.len();
898        let nfrees = code.freevars.len();
899        let free_start = nlocalsplus - nfrees;
900        let is_optimized = code.flags.contains(bytecode::CodeFlags::OPTIMIZED);
901
902        // Track which non-merged cellvar index we're at
903        let mut nonmerged_cell_idx = 0;
904
905        for (i, &kind) in code.localspluskinds.iter().enumerate() {
906            if kind & CO_FAST_HIDDEN != 0 {
907                // Hidden variables are only skipped when their slot is empty.
908                // After a comprehension restores values, they should appear in locals().
909                let slot_empty = match fastlocals[i].as_ref() {
910                    None => true,
911                    Some(obj) => {
912                        if kind & (CO_FAST_CELL | CO_FAST_FREE) != 0 {
913                            // If it's a PyCell, check if the cell is empty.
914                            // If it's a raw value (merged cell during inlined comp), not empty.
915                            obj.downcast_ref::<PyCell>()
916                                .is_some_and(|cell| cell.get().is_none())
917                        } else {
918                            false
919                        }
920                    }
921                };
922                if slot_empty {
923                    continue;
924                }
925            }
926
927            // Free variables only included for optimized (function-like) scopes.
928            // Class/module scopes should not expose free vars in locals().
929            if kind == CO_FAST_FREE && !is_optimized {
930                continue;
931            }
932
933            // Get the name for this slot
934            let name = if kind & CO_FAST_LOCAL != 0 {
935                code.varnames[i]
936            } else if kind & CO_FAST_FREE != 0 {
937                code.freevars[i - free_start]
938            } else if kind & CO_FAST_CELL != 0 {
939                // Non-merged cell: find the name by skipping merged cellvars
940                let mut found_name = None;
941                let mut skip = nonmerged_cell_idx;
942                for cv in code.cellvars.iter() {
943                    let is_merged = code.varnames.contains(cv);
944                    if !is_merged {
945                        if skip == 0 {
946                            found_name = Some(*cv);
947                            break;
948                        }
949                        skip -= 1;
950                    }
951                }
952                nonmerged_cell_idx += 1;
953                match found_name {
954                    Some(n) => n,
955                    None => continue,
956                }
957            } else {
958                continue;
959            };
960
961            // Get the value
962            let value = if kind & (CO_FAST_CELL | CO_FAST_FREE) != 0 {
963                // Cell or free var: extract value from PyCell.
964                // During inlined comprehensions, a merged cell slot may hold a raw
965                // value (not a PyCell) after LOAD_FAST_AND_CLEAR + STORE_FAST.
966                fastlocals[i].as_ref().and_then(|obj| {
967                    if let Some(cell) = obj.downcast_ref::<PyCell>() {
968                        cell.get()
969                    } else {
970                        Some(obj.clone())
971                    }
972                })
973            } else {
974                // Regular local
975                fastlocals[i].clone()
976            };
977
978            match locals_map.ass_subscript(name, value, vm) {
979                Ok(()) => {}
980                Err(e) if e.fast_isinstance(vm.ctx.exceptions.key_error) => {}
981                Err(e) => return Err(e),
982            }
983        }
984        Ok(locals.clone_mapping(vm))
985    }
986}
987
988impl Py<Frame> {
989    #[inline(always)]
990    fn with_exec<R>(&self, vm: &VirtualMachine, f: impl FnOnce(ExecutingFrame<'_>) -> R) -> R {
991        // SAFETY: Frame execution is single-threaded. Only one thread at a time
992        // executes a given frame (enforced by the owner field and generator
993        // running flag). Same safety argument as FastLocals (UnsafeCell).
994        let iframe = unsafe { &mut *self.iframe.get() };
995        let exec = ExecutingFrame {
996            code: &iframe.code,
997            localsplus: &mut iframe.localsplus,
998            locals: &iframe.locals,
999            globals: &iframe.globals,
1000            builtins: &iframe.builtins,
1001            builtins_dict: if iframe.globals.class().is(vm.ctx.types.dict_type) {
1002                iframe
1003                    .builtins
1004                    .downcast_ref_if_exact::<PyDict>(vm)
1005                    // SAFETY: downcast_ref_if_exact already verified exact type
1006                    .map(|d| unsafe { PyExact::ref_unchecked(d) })
1007            } else {
1008                None
1009            },
1010            lasti: &iframe.lasti,
1011            object: self,
1012            prev_line: &mut iframe.prev_line,
1013            monitoring_mask: 0,
1014        };
1015        f(exec)
1016    }
1017
1018    // #[cfg_attr(feature = "flame-it", flame("Frame"))]
1019    pub fn run(&self, vm: &VirtualMachine) -> PyResult<ExecutionResult> {
1020        self.with_exec(vm, |mut exec| exec.run(vm))
1021    }
1022
1023    pub(crate) fn resume(
1024        &self,
1025        value: Option<PyObjectRef>,
1026        vm: &VirtualMachine,
1027    ) -> PyResult<ExecutionResult> {
1028        self.with_exec(vm, |mut exec| {
1029            if let Some(value) = value {
1030                exec.push_value(value)
1031            }
1032            exec.run(vm)
1033        })
1034    }
1035
1036    pub(crate) fn gen_throw(
1037        &self,
1038        vm: &VirtualMachine,
1039        exc_type: PyObjectRef,
1040        exc_val: PyObjectRef,
1041        exc_tb: PyObjectRef,
1042    ) -> PyResult<ExecutionResult> {
1043        self.with_exec(vm, |mut exec| exec.gen_throw(vm, exc_type, exc_val, exc_tb))
1044    }
1045
1046    pub fn yield_from_target(&self) -> Option<PyObjectRef> {
1047        // If the frame is currently executing (owned by thread), it has no
1048        // yield-from target to report.
1049        let owner = FrameOwner::from_i8(self.owner.load(atomic::Ordering::Acquire));
1050        if owner == FrameOwner::Thread {
1051            return None;
1052        }
1053        // SAFETY: Frame is not executing, so UnsafeCell access is safe.
1054        let iframe = unsafe { &mut *self.iframe.get() };
1055        let exec = ExecutingFrame {
1056            code: &iframe.code,
1057            localsplus: &mut iframe.localsplus,
1058            locals: &iframe.locals,
1059            globals: &iframe.globals,
1060            builtins: &iframe.builtins,
1061            builtins_dict: None,
1062            lasti: &iframe.lasti,
1063            object: self,
1064            prev_line: &mut iframe.prev_line,
1065            monitoring_mask: 0,
1066        };
1067        exec.yield_from_target().map(PyObject::to_owned)
1068    }
1069
1070    pub fn is_internal_frame(&self) -> bool {
1071        let code = self.f_code();
1072        let filename = code.co_filename();
1073        let filename = filename.as_bytes();
1074        filename.find(b"importlib").is_some() && filename.find(b"_bootstrap").is_some()
1075    }
1076
1077    pub fn next_external_frame(&self, vm: &VirtualMachine) -> Option<FrameRef> {
1078        let mut frame = self.f_back(vm);
1079        while let Some(ref f) = frame {
1080            if !f.is_internal_frame() {
1081                break;
1082            }
1083            frame = f.f_back(vm);
1084        }
1085        frame
1086    }
1087}
1088
1089/// An executing frame; borrows mutable frame-internal data for the duration
1090/// of bytecode execution.
1091struct ExecutingFrame<'a> {
1092    code: &'a PyRef<PyCode>,
1093    localsplus: &'a mut LocalsPlus,
1094    locals: &'a FrameLocals,
1095    globals: &'a PyDictRef,
1096    builtins: &'a PyObjectRef,
1097    /// Cached downcast of builtins to PyDict for fast LOAD_GLOBAL.
1098    /// Only set when both globals and builtins are exact dict types (not
1099    /// subclasses), so that `__missing__` / `__getitem__` overrides are
1100    /// not bypassed.
1101    builtins_dict: Option<&'a PyExact<PyDict>>,
1102    object: &'a Py<Frame>,
1103    lasti: &'a PyAtomic<u32>,
1104    prev_line: &'a mut u32,
1105    /// Cached monitoring events mask. Reloaded at Resume instruction only,
1106    monitoring_mask: u32,
1107}
1108
1109#[inline]
1110fn specialization_compact_int_value(i: &PyInt, vm: &VirtualMachine) -> Option<isize> {
1111    // _PyLong_IsCompact(): a one-digit PyLong (base 2^30),
1112    // i.e. abs(value) <= 2^30 - 1.
1113    const CPYTHON_COMPACT_LONG_ABS_MAX: i64 = (1i64 << 30) - 1;
1114    let v = i.try_to_primitive::<i64>(vm).ok()?;
1115    if (-CPYTHON_COMPACT_LONG_ABS_MAX..=CPYTHON_COMPACT_LONG_ABS_MAX).contains(&v) {
1116        Some(v as isize)
1117    } else {
1118        None
1119    }
1120}
1121
1122#[inline]
1123fn compact_int_from_obj(obj: &PyObject, vm: &VirtualMachine) -> Option<isize> {
1124    obj.downcast_ref_if_exact::<PyInt>(vm)
1125        .and_then(|i| specialization_compact_int_value(i, vm))
1126}
1127
1128#[inline]
1129fn exact_float_from_obj(obj: &PyObject, vm: &VirtualMachine) -> Option<f64> {
1130    obj.downcast_ref_if_exact::<PyFloat>(vm).map(|f| f.to_f64())
1131}
1132
1133#[inline]
1134fn specialization_nonnegative_compact_index(i: &PyInt, vm: &VirtualMachine) -> Option<usize> {
1135    // _PyLong_IsNonNegativeCompact(): a single base-2^30 digit.
1136    const CPYTHON_COMPACT_LONG_MAX: u64 = (1u64 << 30) - 1;
1137    let v = i.try_to_primitive::<u64>(vm).ok()?;
1138    if v <= CPYTHON_COMPACT_LONG_MAX {
1139        Some(v as usize)
1140    } else {
1141        None
1142    }
1143}
1144
1145fn release_datastack_frame(frame: &Py<Frame>, vm: &VirtualMachine) {
1146    unsafe {
1147        if let Some(base) = frame.materialize_localsplus() {
1148            vm.datastack_pop(base);
1149        }
1150    }
1151}
1152
1153type BinaryOpExtendGuard = fn(&PyObject, &PyObject, &VirtualMachine) -> bool;
1154type BinaryOpExtendAction = fn(&PyObject, &PyObject, &VirtualMachine) -> Option<PyObjectRef>;
1155
1156struct BinaryOpExtendSpecializationDescr {
1157    oparg: bytecode::BinaryOperator,
1158    guard: BinaryOpExtendGuard,
1159    action: BinaryOpExtendAction,
1160}
1161
1162const BINARY_OP_EXTEND_EXTERNAL_CACHE_OFFSET: usize = 1;
1163
1164#[inline]
1165fn compactlongs_guard(lhs: &PyObject, rhs: &PyObject, vm: &VirtualMachine) -> bool {
1166    compact_int_from_obj(lhs, vm).is_some() && compact_int_from_obj(rhs, vm).is_some()
1167}
1168
1169macro_rules! bitwise_longs_action {
1170    ($name:ident, $op:tt) => {
1171        #[inline]
1172        fn $name(lhs: &PyObject, rhs: &PyObject, vm: &VirtualMachine) -> Option<PyObjectRef> {
1173            let lhs_val = compact_int_from_obj(lhs, vm)?;
1174            let rhs_val = compact_int_from_obj(rhs, vm)?;
1175            Some(vm.ctx.new_int(lhs_val $op rhs_val).into())
1176        }
1177    };
1178}
1179bitwise_longs_action!(compactlongs_or, |);
1180bitwise_longs_action!(compactlongs_and, &);
1181bitwise_longs_action!(compactlongs_xor, ^);
1182
1183#[inline]
1184fn float_compactlong_guard(lhs: &PyObject, rhs: &PyObject, vm: &VirtualMachine) -> bool {
1185    exact_float_from_obj(lhs, vm).is_some_and(|f| !f.is_nan())
1186        && compact_int_from_obj(rhs, vm).is_some()
1187}
1188
1189#[inline]
1190fn nonzero_float_compactlong_guard(lhs: &PyObject, rhs: &PyObject, vm: &VirtualMachine) -> bool {
1191    float_compactlong_guard(lhs, rhs, vm) && compact_int_from_obj(rhs, vm).is_some_and(|v| v != 0)
1192}
1193
1194macro_rules! float_long_action {
1195    ($name:ident, $op:tt) => {
1196        #[inline]
1197        fn $name(lhs: &PyObject, rhs: &PyObject, vm: &VirtualMachine) -> Option<PyObjectRef> {
1198            let lhs_val = exact_float_from_obj(lhs, vm)?;
1199            let rhs_val = compact_int_from_obj(rhs, vm)?;
1200            Some(vm.ctx.new_float(lhs_val $op rhs_val as f64).into())
1201        }
1202    };
1203}
1204float_long_action!(float_compactlong_add, +);
1205float_long_action!(float_compactlong_subtract, -);
1206float_long_action!(float_compactlong_multiply, *);
1207float_long_action!(float_compactlong_true_div, /);
1208
1209#[inline]
1210fn compactlong_float_guard(lhs: &PyObject, rhs: &PyObject, vm: &VirtualMachine) -> bool {
1211    compact_int_from_obj(lhs, vm).is_some()
1212        && exact_float_from_obj(rhs, vm).is_some_and(|f| !f.is_nan())
1213}
1214
1215#[inline]
1216fn nonzero_compactlong_float_guard(lhs: &PyObject, rhs: &PyObject, vm: &VirtualMachine) -> bool {
1217    compactlong_float_guard(lhs, rhs, vm) && exact_float_from_obj(rhs, vm).is_some_and(|f| f != 0.0)
1218}
1219
1220macro_rules! long_float_action {
1221    ($name:ident, $op:tt) => {
1222        #[inline]
1223        fn $name(lhs: &PyObject, rhs: &PyObject, vm: &VirtualMachine) -> Option<PyObjectRef> {
1224            let lhs_val = compact_int_from_obj(lhs, vm)?;
1225            let rhs_val = exact_float_from_obj(rhs, vm)?;
1226            Some(vm.ctx.new_float(lhs_val as f64 $op rhs_val).into())
1227        }
1228    };
1229}
1230long_float_action!(compactlong_float_add, +);
1231long_float_action!(compactlong_float_subtract, -);
1232long_float_action!(compactlong_float_multiply, *);
1233long_float_action!(compactlong_float_true_div, /);
1234
1235static BINARY_OP_EXTEND_DESCRIPTORS: &[BinaryOpExtendSpecializationDescr] = &[
1236    // long-long arithmetic
1237    BinaryOpExtendSpecializationDescr {
1238        oparg: bytecode::BinaryOperator::Or,
1239        guard: compactlongs_guard,
1240        action: compactlongs_or,
1241    },
1242    BinaryOpExtendSpecializationDescr {
1243        oparg: bytecode::BinaryOperator::And,
1244        guard: compactlongs_guard,
1245        action: compactlongs_and,
1246    },
1247    BinaryOpExtendSpecializationDescr {
1248        oparg: bytecode::BinaryOperator::Xor,
1249        guard: compactlongs_guard,
1250        action: compactlongs_xor,
1251    },
1252    BinaryOpExtendSpecializationDescr {
1253        oparg: bytecode::BinaryOperator::InplaceOr,
1254        guard: compactlongs_guard,
1255        action: compactlongs_or,
1256    },
1257    BinaryOpExtendSpecializationDescr {
1258        oparg: bytecode::BinaryOperator::InplaceAnd,
1259        guard: compactlongs_guard,
1260        action: compactlongs_and,
1261    },
1262    BinaryOpExtendSpecializationDescr {
1263        oparg: bytecode::BinaryOperator::InplaceXor,
1264        guard: compactlongs_guard,
1265        action: compactlongs_xor,
1266    },
1267    // float-long arithmetic
1268    BinaryOpExtendSpecializationDescr {
1269        oparg: bytecode::BinaryOperator::Add,
1270        guard: float_compactlong_guard,
1271        action: float_compactlong_add,
1272    },
1273    BinaryOpExtendSpecializationDescr {
1274        oparg: bytecode::BinaryOperator::Subtract,
1275        guard: float_compactlong_guard,
1276        action: float_compactlong_subtract,
1277    },
1278    BinaryOpExtendSpecializationDescr {
1279        oparg: bytecode::BinaryOperator::TrueDivide,
1280        guard: nonzero_float_compactlong_guard,
1281        action: float_compactlong_true_div,
1282    },
1283    BinaryOpExtendSpecializationDescr {
1284        oparg: bytecode::BinaryOperator::Multiply,
1285        guard: float_compactlong_guard,
1286        action: float_compactlong_multiply,
1287    },
1288    // long-float arithmetic
1289    BinaryOpExtendSpecializationDescr {
1290        oparg: bytecode::BinaryOperator::Add,
1291        guard: compactlong_float_guard,
1292        action: compactlong_float_add,
1293    },
1294    BinaryOpExtendSpecializationDescr {
1295        oparg: bytecode::BinaryOperator::Subtract,
1296        guard: compactlong_float_guard,
1297        action: compactlong_float_subtract,
1298    },
1299    BinaryOpExtendSpecializationDescr {
1300        oparg: bytecode::BinaryOperator::TrueDivide,
1301        guard: nonzero_compactlong_float_guard,
1302        action: compactlong_float_true_div,
1303    },
1304    BinaryOpExtendSpecializationDescr {
1305        oparg: bytecode::BinaryOperator::Multiply,
1306        guard: compactlong_float_guard,
1307        action: compactlong_float_multiply,
1308    },
1309];
1310
1311impl fmt::Debug for ExecutingFrame<'_> {
1312    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1313        f.debug_struct("ExecutingFrame")
1314            .field("code", self.code)
1315            .field("stack_len", &self.localsplus.stack_len())
1316            .finish()
1317    }
1318}
1319
1320impl ExecutingFrame<'_> {
1321    #[inline]
1322    fn monitoring_disabled_for_code(&self, vm: &VirtualMachine) -> bool {
1323        self.code.is(&vm.ctx.init_cleanup_code)
1324    }
1325
1326    fn specialization_new_init_cleanup_frame(&self, vm: &VirtualMachine) -> FrameRef {
1327        Frame::new(
1328            vm.ctx.init_cleanup_code.clone(),
1329            Scope::new(
1330                Some(ArgMapping::from_dict_exact(vm.ctx.new_dict())),
1331                self.globals.clone(),
1332            ),
1333            self.builtins.clone(),
1334            &[],
1335            None,
1336            true,
1337            vm,
1338        )
1339        .into_ref(&vm.ctx)
1340    }
1341
1342    fn specialization_run_init_cleanup_shim(
1343        &self,
1344        new_obj: PyObjectRef,
1345        init_func: &Py<PyFunction>,
1346        pos_args: Vec<PyObjectRef>,
1347        vm: &VirtualMachine,
1348    ) -> PyResult<PyObjectRef> {
1349        let shim = self.specialization_new_init_cleanup_frame(vm);
1350        let shim_result = vm.with_frame_untraced(shim.clone(), |shim| {
1351            shim.with_exec(vm, |mut exec| exec.push_value(new_obj.clone()));
1352
1353            let mut all_args = Vec::with_capacity(pos_args.len() + 1);
1354            all_args.push(new_obj.clone());
1355            all_args.extend(pos_args);
1356
1357            let init_frame = init_func.prepare_exact_args_frame(all_args, vm);
1358            let init_result = vm.run_frame(init_frame.clone());
1359            release_datastack_frame(&init_frame, vm);
1360            let init_result = init_result?;
1361
1362            shim.with_exec(vm, |mut exec| exec.push_value(init_result));
1363            match shim.run(vm)? {
1364                ExecutionResult::Return(value) => Ok(value),
1365                ExecutionResult::Yield(_) => unreachable!("_Py_InitCleanup shim cannot yield"),
1366            }
1367        });
1368        release_datastack_frame(&shim, vm);
1369        shim_result
1370    }
1371
1372    #[inline(always)]
1373    fn update_lasti(&mut self, f: impl FnOnce(&mut u32)) {
1374        let mut val = self.lasti.load(Relaxed);
1375        f(&mut val);
1376        self.lasti.store(val, Relaxed);
1377    }
1378
1379    #[inline(always)]
1380    fn lasti(&self) -> u32 {
1381        self.lasti.load(Relaxed)
1382    }
1383
1384    /// Access the PyCellRef at the given localsplus index.
1385    #[inline(always)]
1386    fn cell_ref(&self, localsplus_idx: usize) -> &PyCell {
1387        let fastlocals = self.localsplus.fastlocals();
1388        let slot = &fastlocals[localsplus_idx];
1389        slot.as_ref()
1390            .expect("cell slot empty")
1391            .downcast_ref::<PyCell>()
1392            .expect("cell slot is not a PyCell")
1393    }
1394
1395    /// Perform deferred stack unwinding after set_f_lineno.
1396    ///
1397    /// set_f_lineno cannot pop the value stack directly because the execution
1398    /// loop holds the state mutex.  Instead it records the work in
1399    /// `pending_stack_pops` / `pending_unwind_from_stack` and we execute it
1400    /// here, inside the execution loop where we already own the state.
1401    fn unwind_stack_for_lineno(&mut self, pop_count: usize, from_stack: i64, vm: &VirtualMachine) {
1402        let mut cur_stack = from_stack;
1403        for _ in 0..pop_count {
1404            let val = self.pop_value_opt();
1405            if stack_analysis::top_of_stack(cur_stack) == stack_analysis::Kind::Except as i64
1406                && let Some(exc_obj) = val
1407            {
1408                if vm.is_none(&exc_obj) {
1409                    vm.set_exception(None);
1410                } else {
1411                    let exc = exc_obj.downcast::<PyBaseException>().ok();
1412                    vm.set_exception(exc);
1413                }
1414            }
1415            cur_stack = stack_analysis::pop_value(cur_stack);
1416        }
1417    }
1418
1419    /// Fire 'exception' trace event (sys.settrace) with (type, value, traceback) tuple.
1420    /// Matches `_PyEval_MonitorRaise` → `PY_MONITORING_EVENT_RAISE` →
1421    /// `sys_trace_exception_func` in legacy_tracing.c.
1422    fn fire_exception_trace(&self, exc: &PyBaseExceptionRef, vm: &VirtualMachine) -> PyResult<()> {
1423        if vm.use_tracing.get() && !vm.is_none(&self.object.trace.lock()) {
1424            let exc_type: PyObjectRef = exc.class().to_owned().into();
1425            let exc_value: PyObjectRef = exc.clone().into();
1426            let exc_tb: PyObjectRef = exc
1427                .__traceback__()
1428                .map(|tb| -> PyObjectRef { tb.into() })
1429                .unwrap_or_else(|| vm.ctx.none());
1430            let tuple = vm.ctx.new_tuple(vec![exc_type, exc_value, exc_tb]).into();
1431            vm.trace_event(crate::protocol::TraceEvent::Exception, Some(tuple))?;
1432        }
1433        Ok(())
1434    }
1435
1436    fn run(&mut self, vm: &VirtualMachine) -> PyResult<ExecutionResult> {
1437        flame_guard!(format!(
1438            "Frame::run({obj_name})",
1439            obj_name = self.code.obj_name
1440        ));
1441        // Execute until return or exception:
1442        let mut arg_state = bytecode::OpArgState::default();
1443        loop {
1444            let idx = self.lasti() as usize;
1445            // Advance lasti past the current instruction BEFORE firing the
1446            // line event.  This ensures that f_lineno (which reads
1447            // locations[lasti - 1]) returns the line of the instruction
1448            // being traced, not the previous one.
1449            self.update_lasti(|i| *i += 1);
1450
1451            // Fire 'line' trace event when line number changes.
1452            // Only fire if this frame has a per-frame trace function set
1453            // (frames entered before sys.settrace() have trace=None).
1454            // Skip RESUME – it should not generate user-visible line events.
1455            if vm.use_tracing.get()
1456                && !vm.is_none(&self.object.trace.lock())
1457                && !matches!(
1458                    self.code.instructions.read_op(idx),
1459                    Instruction::Resume { .. } | Instruction::InstrumentedResume
1460                )
1461                && let Some((loc, _)) = self.code.locations.get(idx)
1462                && loc.line.get() as u32 != *self.prev_line
1463            {
1464                *self.prev_line = loc.line.get() as u32;
1465                vm.trace_event(crate::protocol::TraceEvent::Line, None)?;
1466                // Trace callback may have changed lasti via set_f_lineno.
1467                // Re-read and restart the loop from the new position.
1468                if self.lasti() != (idx as u32 + 1) {
1469                    // set_f_lineno defers stack unwinding because we hold
1470                    // the state mutex.  Perform it now.
1471                    let pops = self.object.pending_stack_pops();
1472                    if pops > 0 {
1473                        let from_stack = self.object.pending_unwind_from_stack();
1474                        self.unwind_stack_for_lineno(pops as usize, from_stack, vm);
1475                        self.object.set_pending_stack_pops(0);
1476                    }
1477                    arg_state.reset();
1478                    continue;
1479                }
1480            }
1481            let op = self.code.instructions.read_op(idx);
1482            let arg = arg_state.extend(self.code.instructions.read_arg(idx));
1483            let mut do_extend_arg = false;
1484            let caches = op.cache_entries();
1485
1486            // Update prev_line only when tracing or monitoring is active.
1487            // When neither is enabled, prev_line is stale but unused.
1488            if vm.use_tracing.get() {
1489                if !matches!(
1490                    op,
1491                    Instruction::Resume { .. }
1492                        | Instruction::ExtendedArg
1493                        | Instruction::InstrumentedLine
1494                ) && let Some((loc, _)) = self.code.locations.get(idx)
1495                {
1496                    *self.prev_line = loc.line.get() as u32;
1497                }
1498
1499                // Fire 'opcode' trace event for sys.settrace when f_trace_opcodes
1500                // is set. Skip RESUME and ExtendedArg
1501                // (_Py_call_instrumentation_instruction).
1502                if !vm.is_none(&self.object.trace.lock())
1503                    && *self.object.trace_opcodes.lock()
1504                    && !matches!(
1505                        op,
1506                        Instruction::Resume { .. }
1507                            | Instruction::InstrumentedResume
1508                            | Instruction::ExtendedArg
1509                    )
1510                {
1511                    vm.trace_event(crate::protocol::TraceEvent::Opcode, None)?;
1512                }
1513            }
1514
1515            if vm.eval_breaker_tripped()
1516                && let Err(exception) = vm.check_signals()
1517            {
1518                #[cold]
1519                fn handle_signal_exception(
1520                    frame: &mut ExecutingFrame<'_>,
1521                    exception: PyBaseExceptionRef,
1522                    idx: usize,
1523                    vm: &VirtualMachine,
1524                ) -> FrameResult {
1525                    let (loc, _end_loc) = frame.code.locations[idx];
1526                    let next = exception.__traceback__();
1527                    let new_traceback =
1528                        PyTraceback::new(next, frame.object.to_owned(), idx as u32 * 2, loc.line);
1529                    exception.set_traceback_typed(Some(new_traceback.into_ref(&vm.ctx)));
1530                    vm.contextualize_exception(&exception);
1531                    frame.unwind_blocks(vm, UnwindReason::Raising { exception })
1532                }
1533                match handle_signal_exception(self, exception, idx, vm) {
1534                    Ok(None) => {}
1535                    Ok(Some(value)) => {
1536                        break Ok(value);
1537                    }
1538                    Err(exception) => {
1539                        break Err(exception);
1540                    }
1541                }
1542                continue;
1543            }
1544            let lasti_before = self.lasti();
1545            let result = self.execute_instruction(op, arg, &mut do_extend_arg, vm);
1546            // Skip inline cache entries if instruction fell through (no jump).
1547            if caches > 0 && self.lasti() == lasti_before {
1548                self.update_lasti(|i| *i += caches as u32);
1549            }
1550            match result {
1551                Ok(None) => {}
1552                Ok(Some(value)) => {
1553                    break Ok(value);
1554                }
1555                // Instruction raised an exception
1556                Err(exception) => {
1557                    #[cold]
1558                    fn handle_exception(
1559                        frame: &mut ExecutingFrame<'_>,
1560                        exception: PyBaseExceptionRef,
1561                        idx: usize,
1562                        is_reraise: bool,
1563                        is_new_raise: bool,
1564                        vm: &VirtualMachine,
1565                    ) -> FrameResult {
1566                        // 1. Extract traceback from exception's '__traceback__' attr.
1567                        // 2. Add new entry with current execution position (filename, lineno, code_object) to traceback.
1568                        // 3. First, try to find handler in exception table
1569
1570                        // RERAISE instructions should not add traceback entries - they're just
1571                        // re-raising an already-processed exception
1572                        if !is_reraise {
1573                            // Check if the exception already has traceback entries before
1574                            // we add ours. If it does, it was propagated from a callee
1575                            // function and we should not re-contextualize it.
1576                            let had_prior_traceback = exception.__traceback__().is_some();
1577
1578                            // PyTraceBack_Here always adds a new entry without
1579                            // checking for duplicates. Each time an exception passes through
1580                            // a frame (e.g., in a loop with repeated raise statements),
1581                            // a new traceback entry is added.
1582                            let (loc, _end_loc) = frame.code.locations[idx];
1583                            let next = exception.__traceback__();
1584
1585                            let new_traceback = PyTraceback::new(
1586                                next,
1587                                frame.object.to_owned(),
1588                                idx as u32 * 2,
1589                                loc.line,
1590                            );
1591                            vm_trace!("Adding to traceback: {:?} {:?}", new_traceback, loc.line);
1592                            exception.set_traceback_typed(Some(new_traceback.into_ref(&vm.ctx)));
1593
1594                            // _PyErr_SetObject sets __context__ only when the exception
1595                            // is first raised. When an exception propagates through frames,
1596                            // __context__ must not be overwritten. We contextualize when:
1597                            // - It's an explicit raise (raise/raise from)
1598                            // - The exception had no prior traceback (originated here)
1599                            if is_new_raise || !had_prior_traceback {
1600                                vm.contextualize_exception(&exception);
1601                            }
1602                        }
1603
1604                        // Use exception table for zero-cost exception handling
1605                        frame.unwind_blocks(vm, UnwindReason::Raising { exception })
1606                    }
1607
1608                    // Check if this is a RERAISE instruction
1609                    // Both AnyInstruction::Raise { kind: Reraise/ReraiseFromStack } and
1610                    // AnyInstruction::Reraise are reraise operations that should not add
1611                    // new traceback entries.
1612                    // EndAsyncFor and CleanupThrow also re-raise non-matching exceptions.
1613                    let is_reraise = match op {
1614                        Instruction::RaiseVarargs { argc: kind } => matches!(
1615                            kind.get(arg),
1616                            bytecode::RaiseKind::BareRaise | bytecode::RaiseKind::ReraiseFromStack
1617                        ),
1618                        Instruction::Reraise { .. }
1619                        | Instruction::EndAsyncFor
1620                        | Instruction::CleanupThrow => true,
1621                        _ => false,
1622                    };
1623
1624                    // Explicit raise instructions (raise/raise from) - these always
1625                    // need contextualization even if the exception has prior traceback
1626                    let is_new_raise = matches!(
1627                        op,
1628                        Instruction::RaiseVarargs { argc: kind }
1629                            if matches!(
1630                                kind.get(arg),
1631                                bytecode::RaiseKind::Raise | bytecode::RaiseKind::RaiseCause
1632                            )
1633                    );
1634
1635                    // Fire RAISE or RERAISE monitoring event.
1636                    // If the callback raises, replace the original exception.
1637                    let exception = {
1638                        let mon_events = vm.state.monitoring_events.load();
1639                        if is_reraise {
1640                            if mon_events & monitoring::EVENT_RERAISE != 0 {
1641                                let offset = idx as u32 * 2;
1642                                let exc_obj: PyObjectRef = exception.clone().into();
1643                                match monitoring::fire_reraise(vm, self.code, offset, &exc_obj) {
1644                                    Ok(()) => exception,
1645                                    Err(monitor_exc) => monitor_exc,
1646                                }
1647                            } else {
1648                                exception
1649                            }
1650                        } else if mon_events & monitoring::EVENT_RAISE != 0 {
1651                            let offset = idx as u32 * 2;
1652                            let exc_obj: PyObjectRef = exception.clone().into();
1653                            match monitoring::fire_raise(vm, self.code, offset, &exc_obj) {
1654                                Ok(()) => exception,
1655                                Err(monitor_exc) => monitor_exc,
1656                            }
1657                        } else {
1658                            exception
1659                        }
1660                    };
1661
1662                    // Fire 'exception' trace event for sys.settrace.
1663                    // Only for new raises, not re-raises (matching the
1664                    // `error` label that calls _PyEval_MonitorRaise).
1665                    if !is_reraise {
1666                        self.fire_exception_trace(&exception, vm)?;
1667                    }
1668
1669                    match handle_exception(self, exception, idx, is_reraise, is_new_raise, vm) {
1670                        Ok(None) => {}
1671                        Ok(Some(result)) => break Ok(result),
1672                        Err(exception) => {
1673                            // Fire PY_UNWIND: exception escapes this frame
1674                            let exception = if vm.state.monitoring_events.load()
1675                                & monitoring::EVENT_PY_UNWIND
1676                                != 0
1677                            {
1678                                let offset = idx as u32 * 2;
1679                                let exc_obj: PyObjectRef = exception.clone().into();
1680                                match monitoring::fire_py_unwind(vm, self.code, offset, &exc_obj) {
1681                                    Ok(()) => exception,
1682                                    Err(monitor_exc) => monitor_exc,
1683                                }
1684                            } else {
1685                                exception
1686                            };
1687
1688                            // Restore lasti from traceback so frame.f_lineno matches tb_lineno
1689                            // The traceback was created with the correct lasti when exception
1690                            // was first raised, but frame.lasti may have changed during cleanup
1691                            if let Some(tb) = exception.__traceback__()
1692                                && core::ptr::eq::<Py<Frame>>(&*tb.frame, self.object)
1693                            {
1694                                // This traceback entry is for this frame - restore its lasti
1695                                // tb.lasti is in bytes (idx * 2), convert back to instruction index
1696                                self.update_lasti(|i| *i = tb.lasti / 2);
1697                            }
1698                            break Err(exception);
1699                        }
1700                    }
1701                }
1702            }
1703            if !do_extend_arg {
1704                arg_state.reset()
1705            }
1706        }
1707    }
1708
1709    fn yield_from_target(&self) -> Option<&PyObject> {
1710        // checks gi_frame_state == FRAME_SUSPENDED_YIELD_FROM
1711        // which is set when YIELD_VALUE with oparg >= 1 is executed.
1712        // In RustPython, we check:
1713        // 1. lasti points to RESUME (after YIELD_VALUE)
1714        // 2. The previous instruction was YIELD_VALUE with arg >= 1
1715        // 3. Stack top is the delegate (receiver)
1716        //
1717        // First check if stack is empty - if so, we can't be in yield-from
1718        if self.localsplus.stack_is_empty() {
1719            return None;
1720        }
1721        let lasti = self.lasti() as usize;
1722        if let Some(unit) = self.code.instructions.get(lasti) {
1723            match &unit.op {
1724                Instruction::Send { .. } => return Some(self.top_value()),
1725                Instruction::Resume { .. } | Instruction::InstrumentedResume => {
1726                    // Check if previous instruction was YIELD_VALUE with arg >= 1
1727                    // This indicates yield-from/await context
1728                    if lasti > 0
1729                        && let Some(prev_unit) = self.code.instructions.get(lasti - 1)
1730                        && matches!(
1731                            &prev_unit.op,
1732                            Instruction::YieldValue { .. } | Instruction::InstrumentedYieldValue
1733                        )
1734                    {
1735                        // YIELD_VALUE arg: 0 = direct yield, >= 1 = yield-from/await
1736                        // OpArgByte.0 is the raw byte value
1737                        if u8::from(prev_unit.arg) >= 1 {
1738                            // In yield-from/await context, delegate is on top of stack
1739                            return Some(self.top_value());
1740                        }
1741                    }
1742                }
1743                _ => {}
1744            }
1745        }
1746        None
1747    }
1748
1749    /// Handle throw() on a generator/coroutine.
1750    fn gen_throw(
1751        &mut self,
1752        vm: &VirtualMachine,
1753        exc_type: PyObjectRef,
1754        exc_val: PyObjectRef,
1755        exc_tb: PyObjectRef,
1756    ) -> PyResult<ExecutionResult> {
1757        self.monitoring_mask = vm.state.monitoring_events.load();
1758        if let Some(jen) = self.yield_from_target() {
1759            // Check if the exception is GeneratorExit (type or instance).
1760            // For GeneratorExit, close the sub-iterator instead of throwing.
1761            let is_gen_exit = if let Some(typ) = exc_type.downcast_ref::<PyType>() {
1762                typ.fast_issubclass(vm.ctx.exceptions.generator_exit)
1763            } else {
1764                exc_type.fast_isinstance(vm.ctx.exceptions.generator_exit)
1765            };
1766
1767            if is_gen_exit {
1768                // gen_close_iter: close the sub-iterator
1769                let close_result = if let Some(coro) = self.builtin_coro(jen) {
1770                    coro.close(jen, vm).map(|_| ())
1771                } else if let Some(close_meth) = vm.get_attribute_opt(jen.to_owned(), "close")? {
1772                    close_meth.call((), vm).map(|_| ())
1773                } else {
1774                    Ok(())
1775                };
1776                if let Err(err) = close_result {
1777                    let idx = self.lasti().saturating_sub(1) as usize;
1778                    if idx < self.code.locations.len() {
1779                        let (loc, _end_loc) = self.code.locations[idx];
1780                        let next = err.__traceback__();
1781                        let new_traceback = PyTraceback::new(
1782                            next,
1783                            self.object.to_owned(),
1784                            idx as u32 * 2,
1785                            loc.line,
1786                        );
1787                        err.set_traceback_typed(Some(new_traceback.into_ref(&vm.ctx)));
1788                    }
1789
1790                    self.push_value(vm.ctx.none());
1791                    vm.contextualize_exception(&err);
1792                    return match self.unwind_blocks(vm, UnwindReason::Raising { exception: err }) {
1793                        Ok(None) => {
1794                            *self.prev_line = 0;
1795                            self.run(vm)
1796                        }
1797                        Ok(Some(result)) => Ok(result),
1798                        Err(exception) => Err(exception),
1799                    };
1800                }
1801                // Fall through to throw_here to raise GeneratorExit in the generator
1802            } else {
1803                // For non-GeneratorExit, delegate throw to sub-iterator
1804                let thrower = if let Some(coro) = self.builtin_coro(jen) {
1805                    Some(Either::A(coro))
1806                } else {
1807                    vm.get_attribute_opt(jen.to_owned(), "throw")?
1808                        .map(Either::B)
1809                };
1810                if let Some(thrower) = thrower {
1811                    let ret = match thrower {
1812                        Either::A(coro) => coro
1813                            .throw(jen, exc_type, exc_val, exc_tb, vm)
1814                            .to_pyresult(vm),
1815                        Either::B(meth) => meth.call((exc_type, exc_val, exc_tb), vm),
1816                    };
1817                    return ret.map(ExecutionResult::Yield).or_else(|err| {
1818                        // Add traceback entry for the yield-from/await point.
1819                        // gen_send_ex2 resumes the frame with a pending exception,
1820                        // which goes through error: → PyTraceBack_Here. We add the
1821                        // entry here before calling unwind_blocks.
1822                        let idx = self.lasti().saturating_sub(1) as usize;
1823                        if idx < self.code.locations.len() {
1824                            let (loc, _end_loc) = self.code.locations[idx];
1825                            let next = err.__traceback__();
1826                            let new_traceback = PyTraceback::new(
1827                                next,
1828                                self.object.to_owned(),
1829                                idx as u32 * 2,
1830                                loc.line,
1831                            );
1832                            err.set_traceback_typed(Some(new_traceback.into_ref(&vm.ctx)));
1833                        }
1834
1835                        self.push_value(vm.ctx.none());
1836                        vm.contextualize_exception(&err);
1837                        match self.unwind_blocks(vm, UnwindReason::Raising { exception: err }) {
1838                            Ok(None) => {
1839                                *self.prev_line = 0;
1840                                self.run(vm)
1841                            }
1842                            Ok(Some(result)) => Ok(result),
1843                            Err(exception) => Err(exception),
1844                        }
1845                    });
1846                }
1847            }
1848        }
1849        // throw_here: no delegate has throw method, or not in yield-from
1850        // Validate the exception type first. Invalid types propagate directly to
1851        // the caller. Valid types with failed instantiation (e.g. __new__ returns
1852        // wrong type) get thrown into the generator via PyErr_SetObject path.
1853        let ctor = ExceptionCtor::try_from_object(vm, exc_type)?;
1854        let exception = match ctor.instantiate_value(exc_val, vm) {
1855            Ok(exc) => {
1856                if let Some(tb) = Option::<PyRef<PyTraceback>>::try_from_object(vm, exc_tb)? {
1857                    exc.set_traceback_typed(Some(tb));
1858                }
1859                exc
1860            }
1861            Err(err) => err,
1862        };
1863
1864        // Add traceback entry for the generator frame at the yield site
1865        let idx = self.lasti().saturating_sub(1) as usize;
1866        if idx < self.code.locations.len() {
1867            let (loc, _end_loc) = self.code.locations[idx];
1868            let next = exception.__traceback__();
1869            let new_traceback =
1870                PyTraceback::new(next, self.object.to_owned(), idx as u32 * 2, loc.line);
1871            exception.set_traceback_typed(Some(new_traceback.into_ref(&vm.ctx)));
1872        }
1873
1874        // Fire PY_THROW and RAISE events before raising the exception.
1875        // If a monitoring callback fails, its exception replaces the original.
1876        let exception = {
1877            let mon_events = vm.state.monitoring_events.load();
1878            let exception = if mon_events & monitoring::EVENT_PY_THROW != 0 {
1879                let offset = idx as u32 * 2;
1880                let exc_obj: PyObjectRef = exception.clone().into();
1881                match monitoring::fire_py_throw(vm, self.code, offset, &exc_obj) {
1882                    Ok(()) => exception,
1883                    Err(monitor_exc) => monitor_exc,
1884                }
1885            } else {
1886                exception
1887            };
1888            if mon_events & monitoring::EVENT_RAISE != 0 {
1889                let offset = idx as u32 * 2;
1890                let exc_obj: PyObjectRef = exception.clone().into();
1891                match monitoring::fire_raise(vm, self.code, offset, &exc_obj) {
1892                    Ok(()) => exception,
1893                    Err(monitor_exc) => monitor_exc,
1894                }
1895            } else {
1896                exception
1897            }
1898        };
1899
1900        // when raising an exception, set __context__ to the current exception
1901        // This is done in _PyErr_SetObject
1902        vm.contextualize_exception(&exception);
1903
1904        // always pushes Py_None before calling gen_send_ex with exc=1
1905        // This is needed for exception handler to have correct stack state
1906        self.push_value(vm.ctx.none());
1907
1908        match self.unwind_blocks(vm, UnwindReason::Raising { exception }) {
1909            Ok(None) => {
1910                // Reset prev_line so that the first instruction in the handler
1911                // fires a LINE event. In CPython, gen_send_ex re-enters the
1912                // eval loop which reinitializes its local prev_instr tracker.
1913                *self.prev_line = 0;
1914                self.run(vm)
1915            }
1916            Ok(Some(result)) => Ok(result),
1917            Err(exception) => {
1918                // Fire PY_UNWIND: exception escapes the generator frame.
1919                let exception =
1920                    if vm.state.monitoring_events.load() & monitoring::EVENT_PY_UNWIND != 0 {
1921                        let offset = idx as u32 * 2;
1922                        let exc_obj: PyObjectRef = exception.clone().into();
1923                        match monitoring::fire_py_unwind(vm, self.code, offset, &exc_obj) {
1924                            Ok(()) => exception,
1925                            Err(monitor_exc) => monitor_exc,
1926                        }
1927                    } else {
1928                        exception
1929                    };
1930                Err(exception)
1931            }
1932        }
1933    }
1934
1935    fn unbound_cell_exception(
1936        &self,
1937        localsplus_idx: usize,
1938        vm: &VirtualMachine,
1939    ) -> PyBaseExceptionRef {
1940        use rustpython_compiler_core::bytecode::CO_FAST_FREE;
1941        let kind = self
1942            .code
1943            .localspluskinds
1944            .get(localsplus_idx)
1945            .copied()
1946            .unwrap_or(0);
1947        if kind & CO_FAST_FREE != 0 {
1948            let name = self.localsplus_name(localsplus_idx);
1949            vm.new_name_error(
1950                format!("cannot access free variable '{name}' where it is not associated with a value in enclosing scope"),
1951                name.to_owned(),
1952            )
1953        } else {
1954            // Both merged cells (LOCAL|CELL) and non-merged cells get unbound local error
1955            let name = self.localsplus_name(localsplus_idx);
1956            vm.new_exception_msg(
1957                vm.ctx.exceptions.unbound_local_error.to_owned(),
1958                format!("local variable '{name}' referenced before assignment").into(),
1959            )
1960        }
1961    }
1962
1963    /// Get the variable name for a localsplus index.
1964    fn localsplus_name(&self, idx: usize) -> &'static PyStrInterned {
1965        use rustpython_compiler_core::bytecode::{CO_FAST_CELL, CO_FAST_FREE, CO_FAST_LOCAL};
1966        let nlocals = self.code.varnames.len();
1967        let kind = self.code.localspluskinds.get(idx).copied().unwrap_or(0);
1968        if kind & CO_FAST_LOCAL != 0 {
1969            // Merged cell or regular local: name is in varnames
1970            self.code.varnames[idx]
1971        } else if kind & CO_FAST_FREE != 0 {
1972            // Free var: slots are at the end of localsplus
1973            let nlocalsplus = self.code.localspluskinds.len();
1974            let nfrees = self.code.freevars.len();
1975            let free_start = nlocalsplus - nfrees;
1976            self.code.freevars[idx - free_start]
1977        } else if kind & CO_FAST_CELL != 0 {
1978            // Non-merged cell: count how many non-merged cell slots are before
1979            // this index to find the corresponding cellvars entry.
1980            // Non-merged cellvars appear in their original order (skipping merged ones).
1981            let nonmerged_pos = self.code.localspluskinds[nlocals..idx]
1982                .iter()
1983                .filter(|&&k| k == CO_FAST_CELL)
1984                .count();
1985            // Skip merged cellvars to find the right one
1986            let mut cv_idx = 0;
1987            let mut nonmerged_count = 0;
1988            for (i, name) in self.code.cellvars.iter().enumerate() {
1989                let is_merged = self.code.varnames.contains(name);
1990                if !is_merged {
1991                    if nonmerged_count == nonmerged_pos {
1992                        cv_idx = i;
1993                        break;
1994                    }
1995                    nonmerged_count += 1;
1996                }
1997            }
1998            self.code.cellvars[cv_idx]
1999        } else {
2000            self.code.varnames[idx]
2001        }
2002    }
2003
2004    /// Execute a single instruction.
2005    #[inline(always)]
2006    fn execute_instruction(
2007        &mut self,
2008        instruction: Instruction,
2009        arg: bytecode::OpArg,
2010        extend_arg: &mut bool,
2011        vm: &VirtualMachine,
2012    ) -> FrameResult {
2013        flame_guard!(format!(
2014            "Frame::execute_instruction({})",
2015            instruction.display(arg, &self.code.code).to_string()
2016        ));
2017
2018        #[cfg(feature = "vm-tracing-logging")]
2019        {
2020            trace!("=======");
2021            /* TODO:
2022            for frame in self.frames.iter() {
2023                trace!("  {:?}", frame);
2024            }
2025            */
2026            trace!("  {:#?}", self);
2027            trace!(
2028                "  Executing op code: {}",
2029                instruction.display(arg, &self.code.code)
2030            );
2031            trace!("=======");
2032        }
2033
2034        #[cold]
2035        fn name_error(name: &'static PyStrInterned, vm: &VirtualMachine) -> PyBaseExceptionRef {
2036            vm.new_name_error(format!("name '{name}' is not defined"), name.to_owned())
2037        }
2038
2039        match instruction {
2040            Instruction::BinaryOp { op } => {
2041                let op_val = op.get(arg);
2042                self.adaptive(|s, ii, cb| s.specialize_binary_op(vm, op_val, ii, cb));
2043                self.execute_bin_op(vm, op_val)
2044            }
2045            // Super-instruction for BINARY_OP_ADD_UNICODE + STORE_FAST targeting
2046            // the left local, matching BINARY_OP_INPLACE_ADD_UNICODE shape.
2047            Instruction::BinaryOpInplaceAddUnicode => {
2048                let b = self.top_value();
2049                let a = self.nth_value(1);
2050                let instr_idx = self.lasti() as usize - 1;
2051                let cache_base = instr_idx + 1;
2052                let target_local = self.binary_op_inplace_unicode_target_local(cache_base, a);
2053                if let (Some(_a_str), Some(_b_str), Some(target_local)) = (
2054                    a.downcast_ref_if_exact::<PyStr>(vm),
2055                    b.downcast_ref_if_exact::<PyStr>(vm),
2056                    target_local,
2057                ) {
2058                    let right = self.pop_value();
2059                    let left = self.pop_value();
2060
2061                    let local_obj = self.localsplus.fastlocals_mut()[target_local]
2062                        .take()
2063                        .expect("BINARY_OP_INPLACE_ADD_UNICODE target local missing");
2064                    debug_assert!(local_obj.is(&left));
2065                    let mut local_str = local_obj
2066                        .downcast_exact::<PyStr>(vm)
2067                        .expect("BINARY_OP_INPLACE_ADD_UNICODE target local not exact str")
2068                        .into_pyref();
2069                    drop(left);
2070                    let right_str = right
2071                        .downcast_ref_if_exact::<PyStr>(vm)
2072                        .expect("BINARY_OP_INPLACE_ADD_UNICODE right operand not exact str");
2073                    local_str.concat_in_place(right_str.as_wtf8(), vm);
2074
2075                    self.localsplus.fastlocals_mut()[target_local] = Some(local_str.into());
2076                    self.jump_relative_forward(
2077                        1,
2078                        Instruction::BinaryOpInplaceAddUnicode.cache_entries() as u32,
2079                    );
2080                    Ok(None)
2081                } else {
2082                    self.execute_bin_op(vm, self.binary_op_from_arg(arg))
2083                }
2084            }
2085            Instruction::BinarySlice => {
2086                // Stack: [container, start, stop] -> [result]
2087                let stop = self.pop_value();
2088                let start = self.pop_value();
2089                let container = self.pop_value();
2090                let slice: PyObjectRef = PySlice {
2091                    start: Some(start),
2092                    stop,
2093                    step: None,
2094                }
2095                .into_ref(&vm.ctx)
2096                .into();
2097                let result = container.get_item(&*slice, vm)?;
2098                self.push_value(result);
2099                Ok(None)
2100            }
2101            Instruction::BuildList { count: size } => {
2102                let sz = size.get(arg) as usize;
2103                let elements = self.pop_multiple(sz).collect();
2104                let list_obj = vm.ctx.new_list(elements);
2105                self.push_value(list_obj.into());
2106                Ok(None)
2107            }
2108            Instruction::BuildMap { count: size } => self.execute_build_map(vm, size.get(arg)),
2109            Instruction::BuildSet { count: size } => {
2110                let set = PySet::default().into_ref(&vm.ctx);
2111                for element in self.pop_multiple(size.get(arg) as usize) {
2112                    set.add(element, vm)?;
2113                }
2114                self.push_value(set.into());
2115                Ok(None)
2116            }
2117            Instruction::BuildSlice { argc } => self.execute_build_slice(vm, argc.get(arg)),
2118            /*
2119             Instruction::ToBool => {
2120                 dbg!("Shouldn't be called outside of match statements for now")
2121                 let value = self.pop_value();
2122                 // call __bool__
2123                 let result = value.try_to_bool(vm)?;
2124                 self.push_value(vm.ctx.new_bool(result).into());
2125                 Ok(None)
2126            }
2127            */
2128            Instruction::BuildString { count: size } => {
2129                let s: Wtf8Buf = self
2130                    .pop_multiple(size.get(arg) as usize)
2131                    .map(|pyobj| pyobj.downcast::<PyStr>().unwrap())
2132                    .collect();
2133                self.push_value(vm.ctx.new_str(s).into());
2134                Ok(None)
2135            }
2136            Instruction::BuildTuple { count: size } => {
2137                let elements = self.pop_multiple(size.get(arg) as usize).collect();
2138                let list_obj = vm.ctx.new_tuple(elements);
2139                self.push_value(list_obj.into());
2140                Ok(None)
2141            }
2142            Instruction::BuildTemplate => {
2143                // Stack: [strings_tuple, interpolations_tuple] -> [template]
2144                let interpolations = self.pop_value();
2145                let strings = self.pop_value();
2146
2147                let strings = strings
2148                    .downcast::<PyTuple>()
2149                    .map_err(|_| vm.new_type_error("BUILD_TEMPLATE expected tuple for strings"))?;
2150                let interpolations = interpolations.downcast::<PyTuple>().map_err(|_| {
2151                    vm.new_type_error("BUILD_TEMPLATE expected tuple for interpolations")
2152                })?;
2153
2154                let template = PyTemplate::new(strings, interpolations);
2155                self.push_value(template.into_pyobject(vm));
2156                Ok(None)
2157            }
2158            Instruction::BuildInterpolation { format: oparg } => {
2159                // oparg encoding: (conversion << 2) | has_format_spec
2160                // Stack: [value, expression_str, (format_spec)?] -> [interpolation]
2161                let oparg_val = oparg.get(arg);
2162                let has_format_spec = (oparg_val & 1) != 0;
2163                let conversion_code = oparg_val >> 2;
2164
2165                let format_spec = if has_format_spec {
2166                    self.pop_value().downcast::<PyStr>().map_err(|_| {
2167                        vm.new_type_error("BUILD_INTERPOLATION expected str for format_spec")
2168                    })?
2169                } else {
2170                    vm.ctx.empty_str.to_owned()
2171                };
2172
2173                let expression = self.pop_value().downcast::<PyStr>().map_err(|_| {
2174                    vm.new_type_error("BUILD_INTERPOLATION expected str for expression")
2175                })?;
2176                let value = self.pop_value();
2177
2178                // conversion: 0=None, 1=Str, 2=Repr, 3=Ascii
2179                let conversion: PyObjectRef = match conversion_code {
2180                    0 => vm.ctx.none(),
2181                    1 => vm.ctx.new_str("s").into(),
2182                    2 => vm.ctx.new_str("r").into(),
2183                    3 => vm.ctx.new_str("a").into(),
2184                    _ => vm.ctx.none(), // should not happen
2185                };
2186
2187                let interpolation =
2188                    PyInterpolation::new(value, expression, conversion, format_spec, vm)?;
2189                self.push_value(interpolation.into_pyobject(vm));
2190                Ok(None)
2191            }
2192            Instruction::Call { argc: nargs } => {
2193                // Stack: [callable, self_or_null, arg1, ..., argN]
2194                let nargs_val = nargs.get(arg);
2195                self.adaptive(|s, ii, cb| s.specialize_call(vm, nargs_val, ii, cb));
2196                self.execute_call_vectorcall(nargs_val, vm)
2197            }
2198            Instruction::CallKw { argc: nargs } => {
2199                let nargs = nargs.get(arg);
2200                self.adaptive(|s, ii, cb| s.specialize_call_kw(vm, nargs, ii, cb));
2201                // Stack: [callable, self_or_null, arg1, ..., argN, kwarg_names]
2202                self.execute_call_kw_vectorcall(nargs, vm)
2203            }
2204            Instruction::CallFunctionEx => {
2205                // Stack: [callable, self_or_null, args_tuple, kwargs_or_null]
2206                let args = self.collect_ex_args(vm)?;
2207                self.execute_call(args, vm)
2208            }
2209            Instruction::CallIntrinsic1 { func } => {
2210                let value = self.pop_value();
2211                let result = self.call_intrinsic_1(func.get(arg), value, vm)?;
2212                self.push_value(result);
2213                Ok(None)
2214            }
2215            Instruction::CallIntrinsic2 { func } => {
2216                let value2 = self.pop_value();
2217                let value1 = self.pop_value();
2218                let result = self.call_intrinsic_2(func.get(arg), value1, value2, vm)?;
2219                self.push_value(result);
2220                Ok(None)
2221            }
2222            Instruction::CheckEgMatch => {
2223                let match_type = self.pop_value();
2224                let exc_value = self.pop_value();
2225                let (rest, matched) =
2226                    crate::exceptions::exception_group_match(&exc_value, &match_type, vm)?;
2227
2228                // Set matched exception as current exception (if not None)
2229                // This mirrors CPython's PyErr_SetHandledException(match_o) in CHECK_EG_MATCH
2230                if !vm.is_none(&matched)
2231                    && let Some(exc) = matched.downcast_ref::<PyBaseException>()
2232                {
2233                    vm.set_exception(Some(exc.to_owned()));
2234                }
2235
2236                self.push_value(rest);
2237                self.push_value(matched);
2238                Ok(None)
2239            }
2240            Instruction::CompareOp { opname: op } => {
2241                let op_val = op.get(arg);
2242                self.adaptive(|s, ii, cb| s.specialize_compare_op(vm, op_val, ii, cb));
2243                self.execute_compare(vm, op_val)
2244            }
2245            Instruction::ContainsOp { invert } => {
2246                self.adaptive(|s, ii, cb| s.specialize_contains_op(vm, ii, cb));
2247                let b = self.pop_value();
2248                let a = self.pop_value();
2249
2250                let value = match invert.get(arg) {
2251                    bytecode::Invert::No => self._in(vm, &a, &b)?,
2252                    bytecode::Invert::Yes => self._not_in(vm, &a, &b)?,
2253                };
2254                self.push_value(vm.ctx.new_bool(value).into());
2255                Ok(None)
2256            }
2257            Instruction::ConvertValue { oparg: conversion } => {
2258                self.convert_value(conversion.get(arg), vm)
2259            }
2260            Instruction::Copy { i: index } => {
2261                // CopyItem { index: 1 } copies TOS
2262                // CopyItem { index: 2 } copies second from top
2263                // This is 1-indexed to match CPython
2264                let idx = index.get(arg) as usize;
2265                let stack_len = self.localsplus.stack_len();
2266                debug_assert!(stack_len >= idx, "CopyItem: stack underflow");
2267                let value = self.localsplus.stack_index(stack_len - idx).clone();
2268                self.push_stackref_opt(value);
2269                Ok(None)
2270            }
2271            Instruction::CopyFreeVars { n } => {
2272                let n = n.get(arg) as usize;
2273                if n > 0 {
2274                    let closure = self
2275                        .object
2276                        .func_obj
2277                        .as_ref()
2278                        .and_then(|f| f.downcast_ref::<PyFunction>())
2279                        .and_then(|f| f.closure.as_ref());
2280                    let nlocalsplus = self.code.localspluskinds.len();
2281                    let freevar_start = nlocalsplus - n;
2282                    let fastlocals = self.localsplus.fastlocals_mut();
2283                    if let Some(closure) = closure {
2284                        for i in 0..n {
2285                            fastlocals[freevar_start + i] = Some(closure[i].clone().into());
2286                        }
2287                    }
2288                }
2289                Ok(None)
2290            }
2291            Instruction::DeleteAttr { namei: idx } => self.delete_attr(vm, idx.get(arg)),
2292            Instruction::DeleteDeref { i } => {
2293                self.cell_ref(i.get(arg).as_usize()).set(None);
2294                Ok(None)
2295            }
2296            Instruction::DeleteFast { var_num } => {
2297                let fastlocals = self.localsplus.fastlocals_mut();
2298                let idx = var_num.get(arg);
2299                if fastlocals[idx].is_none() {
2300                    return Err(vm.new_exception_msg(
2301                        vm.ctx.exceptions.unbound_local_error.to_owned(),
2302                        format!(
2303                            "local variable '{}' referenced before assignment",
2304                            self.code.varnames[idx]
2305                        )
2306                        .into(),
2307                    ));
2308                }
2309                fastlocals[idx] = None;
2310                Ok(None)
2311            }
2312            Instruction::DeleteGlobal { namei: idx } => {
2313                let name = self.code.names[idx.get(arg) as usize];
2314                match self.globals.del_item(name, vm) {
2315                    Ok(()) => {}
2316                    Err(e) if e.fast_isinstance(vm.ctx.exceptions.key_error) => {
2317                        return Err(name_error(name, vm));
2318                    }
2319                    Err(e) => return Err(e),
2320                }
2321                Ok(None)
2322            }
2323            Instruction::DeleteName { namei: idx } => {
2324                let name = self.code.names[idx.get(arg) as usize];
2325                let res = self.locals.mapping(vm).ass_subscript(name, None, vm);
2326
2327                match res {
2328                    Ok(()) => {}
2329                    Err(e) if e.fast_isinstance(vm.ctx.exceptions.key_error) => {
2330                        return Err(name_error(name, vm));
2331                    }
2332                    Err(e) => return Err(e),
2333                }
2334                Ok(None)
2335            }
2336            Instruction::DeleteSubscr => self.execute_delete_subscript(vm),
2337            Instruction::DictUpdate { i: index } => {
2338                // Stack before: [..., dict, ..., source]  (source at TOS)
2339                // Stack after:  [..., dict, ...]  (source consumed)
2340                // The dict to update is at position TOS-i (before popping source)
2341
2342                let idx = index.get(arg);
2343
2344                // Pop the source from TOS
2345                let source = self.pop_value();
2346
2347                // Get the dict to update (it's now at TOS-(i-1) after popping source)
2348                let dict = if idx <= 1 {
2349                    // DICT_UPDATE 0 or 1: dict is at TOS (after popping source)
2350                    self.top_value()
2351                } else {
2352                    // DICT_UPDATE n: dict is at TOS-(n-1)
2353                    self.nth_value(idx - 1)
2354                };
2355
2356                let dict = dict.downcast_ref::<PyDict>().expect("exact dict expected");
2357
2358                // For dictionary unpacking {**x}, x must be a mapping
2359                // Check if the object has the mapping protocol (keys method)
2360                if vm
2361                    .get_method(source.clone(), vm.ctx.intern_str("keys"))
2362                    .is_none()
2363                {
2364                    return Err(vm.new_type_error(format!(
2365                        "'{}' object is not a mapping",
2366                        source.class().name()
2367                    )));
2368                }
2369
2370                dict.merge_object(source, vm)?;
2371                Ok(None)
2372            }
2373            Instruction::DictMerge { i: index } => {
2374                let source = self.pop_value();
2375                let idx = index.get(arg);
2376
2377                // Get the dict to merge into (same logic as DICT_UPDATE)
2378                let dict_ref = if idx <= 1 {
2379                    self.top_value()
2380                } else {
2381                    self.nth_value(idx - 1)
2382                };
2383
2384                let dict: &Py<PyDict> = unsafe { dict_ref.downcast_unchecked_ref() };
2385
2386                // Get callable for error messages
2387                // Stack: [callable, self_or_null, args_tuple, kwargs_dict]
2388                let callable = self.nth_value(idx + 2);
2389                let func_str = Self::object_function_str(callable, vm);
2390
2391                // Check if source is a mapping
2392                if vm
2393                    .get_method(source.clone(), vm.ctx.intern_str("keys"))
2394                    .is_none()
2395                {
2396                    return Err(vm.new_type_error(format!(
2397                        "{} argument after ** must be a mapping, not {}",
2398                        func_str,
2399                        source.class().name()
2400                    )));
2401                }
2402
2403                // Merge keys, checking for duplicates
2404                let keys_iter = vm.call_method(&source, "keys", ())?;
2405                for key in keys_iter.try_to_value::<Vec<PyObjectRef>>(vm)? {
2406                    if dict.contains_key(&*key, vm) {
2407                        let key_str = key.str(vm)?;
2408                        return Err(vm.new_type_error(format!(
2409                            "{} got multiple values for keyword argument '{}'",
2410                            func_str,
2411                            key_str.as_wtf8()
2412                        )));
2413                    }
2414                    let value = vm.call_method(&source, "__getitem__", (key.clone(),))?;
2415                    dict.set_item(&*key, value, vm)?;
2416                }
2417                Ok(None)
2418            }
2419            Instruction::EndAsyncFor => {
2420                // Pops (awaitable, exc) from stack.
2421                // If exc is StopAsyncIteration, clears it (normal loop end).
2422                // Otherwise re-raises.
2423                let exc = self.pop_value();
2424                let _awaitable = self.pop_value();
2425
2426                let exc = exc
2427                    .downcast::<PyBaseException>()
2428                    .expect("EndAsyncFor expects exception on stack");
2429
2430                if exc.fast_isinstance(vm.ctx.exceptions.stop_async_iteration) {
2431                    // StopAsyncIteration - normal end of async for loop
2432                    vm.set_exception(None);
2433                    Ok(None)
2434                } else {
2435                    // Other exception - re-raise
2436                    Err(exc)
2437                }
2438            }
2439            Instruction::ExtendedArg => {
2440                *extend_arg = true;
2441                Ok(None)
2442            }
2443            Instruction::ForIter { .. } => {
2444                // Relative forward jump: target = lasti + caches + delta
2445                let target = bytecode::Label::from_u32(self.lasti() + 1 + u32::from(arg));
2446                self.adaptive(|s, ii, cb| s.specialize_for_iter(vm, u32::from(arg), ii, cb));
2447                self.execute_for_iter(vm, target)?;
2448                Ok(None)
2449            }
2450            Instruction::FormatSimple => {
2451                let value = self.pop_value();
2452                let formatted = vm.format(&value, vm.ctx.new_str(""))?;
2453                self.push_value(formatted.into());
2454
2455                Ok(None)
2456            }
2457            Instruction::FormatWithSpec => {
2458                let spec = self.pop_value();
2459                let value = self.pop_value();
2460                let formatted = vm.format(&value, spec.downcast::<PyStr>().unwrap())?;
2461                self.push_value(formatted.into());
2462
2463                Ok(None)
2464            }
2465            Instruction::GetAIter => {
2466                let aiterable = self.pop_value();
2467                let aiter = vm.call_special_method(&aiterable, identifier!(vm, __aiter__), ())?;
2468                self.push_value(aiter);
2469                Ok(None)
2470            }
2471            Instruction::GetANext => {
2472                #[cfg(debug_assertions)] // remove when GetANext is fully implemented
2473                let orig_stack_len = self.localsplus.stack_len();
2474
2475                let aiter = self.top_value();
2476                let awaitable = if aiter.class().is(vm.ctx.types.async_generator) {
2477                    vm.call_special_method(aiter, identifier!(vm, __anext__), ())?
2478                } else {
2479                    if !aiter.has_attr("__anext__", vm).unwrap_or(false) {
2480                        // TODO: __anext__ must be protocol
2481                        let msg = format!(
2482                            "'async for' requires an iterator with __anext__ method, got {:.100}",
2483                            aiter.class().name()
2484                        );
2485                        return Err(vm.new_type_error(msg));
2486                    }
2487                    let next_iter =
2488                        vm.call_special_method(aiter, identifier!(vm, __anext__), ())?;
2489
2490                    // _PyCoro_GetAwaitableIter in CPython
2491                    fn get_awaitable_iter(next_iter: &PyObject, vm: &VirtualMachine) -> PyResult {
2492                        let gen_is_coroutine = |_| {
2493                            // TODO: cpython gen_is_coroutine
2494                            true
2495                        };
2496                        if next_iter.class().is(vm.ctx.types.coroutine_type)
2497                            || gen_is_coroutine(next_iter)
2498                        {
2499                            return Ok(next_iter.to_owned());
2500                        }
2501                        // TODO: error handling
2502                        vm.call_special_method(next_iter, identifier!(vm, __await__), ())
2503                    }
2504                    get_awaitable_iter(&next_iter, vm).map_err(|_| {
2505                        vm.new_type_error(format!(
2506                            "'async for' received an invalid object from __anext__: {:.200}",
2507                            next_iter.class().name()
2508                        ))
2509                    })?
2510                };
2511                self.push_value(awaitable);
2512                #[cfg(debug_assertions)]
2513                debug_assert_eq!(orig_stack_len + 1, self.localsplus.stack_len());
2514                Ok(None)
2515            }
2516            Instruction::GetAwaitable { r#where: oparg } => {
2517                let iterable = self.pop_value();
2518
2519                let iter = match crate::coroutine::get_awaitable_iter(iterable.clone(), vm) {
2520                    Ok(iter) => iter,
2521                    Err(e) => {
2522                        // _PyEval_FormatAwaitableError: override error for async with
2523                        // when the type doesn't have __await__
2524                        let oparg_val = oparg.get(arg);
2525                        if vm
2526                            .get_method(iterable.clone(), identifier!(vm, __await__))
2527                            .is_none()
2528                        {
2529                            if oparg_val == 1 {
2530                                return Err(vm.new_type_error(format!(
2531                                    "'async with' received an object from __aenter__ \
2532                                     that does not implement __await__: {}",
2533                                    iterable.class().name()
2534                                )));
2535                            } else if oparg_val == 2 {
2536                                return Err(vm.new_type_error(format!(
2537                                    "'async with' received an object from __aexit__ \
2538                                     that does not implement __await__: {}",
2539                                    iterable.class().name()
2540                                )));
2541                            }
2542                        }
2543                        return Err(e);
2544                    }
2545                };
2546
2547                // Check if coroutine is already being awaited
2548                if let Some(coro) = iter.downcast_ref::<PyCoroutine>()
2549                    && coro.as_coro().frame().yield_from_target().is_some()
2550                {
2551                    return Err(vm.new_runtime_error("coroutine is being awaited already"));
2552                }
2553
2554                self.push_value(iter);
2555                Ok(None)
2556            }
2557            Instruction::GetIter => {
2558                let iterated_obj = self.pop_value();
2559                let iter_obj = iterated_obj.get_iter(vm)?;
2560                self.push_value(iter_obj.into());
2561                Ok(None)
2562            }
2563            Instruction::GetYieldFromIter => {
2564                // GET_YIELD_FROM_ITER: prepare iterator for yield from
2565                // If iterable is a coroutine, ensure we're in a coroutine context
2566                // If iterable is a generator, use it directly
2567                // Otherwise, call iter() on it
2568                let iterable = self.pop_value();
2569                let iter = if iterable.class().is(vm.ctx.types.coroutine_type) {
2570                    // Coroutine requires CO_COROUTINE or CO_ITERABLE_COROUTINE flag
2571                    if !self.code.flags.intersects(
2572                        bytecode::CodeFlags::COROUTINE | bytecode::CodeFlags::ITERABLE_COROUTINE,
2573                    ) {
2574                        return Err(vm.new_type_error(
2575                            "cannot 'yield from' a coroutine object in a non-coroutine generator",
2576                        ));
2577                    }
2578                    iterable
2579                } else if iterable.class().is(vm.ctx.types.generator_type) {
2580                    // Generator can be used directly
2581                    iterable
2582                } else {
2583                    // Otherwise, get iterator
2584                    iterable.get_iter(vm)?.into()
2585                };
2586                self.push_value(iter);
2587                Ok(None)
2588            }
2589            Instruction::GetLen => {
2590                // STACK.append(len(STACK[-1]))
2591                let obj = self.top_value();
2592                let len = obj.length(vm)?;
2593                self.push_value(vm.ctx.new_int(len).into());
2594                Ok(None)
2595            }
2596            Instruction::ImportFrom { namei: idx } => {
2597                let obj = self.import_from(vm, idx.get(arg))?;
2598                self.push_value(obj);
2599                Ok(None)
2600            }
2601            Instruction::ImportName { namei: idx } => {
2602                self.import(vm, Some(self.code.names[idx.get(arg) as usize]))?;
2603                Ok(None)
2604            }
2605            Instruction::IsOp { invert } => {
2606                let b = self.pop_value();
2607                let a = self.pop_value();
2608                let res = a.is(&b);
2609
2610                let value = match invert.get(arg) {
2611                    bytecode::Invert::No => res,
2612                    bytecode::Invert::Yes => !res,
2613                };
2614                self.push_value(vm.ctx.new_bool(value).into());
2615                Ok(None)
2616            }
2617            Instruction::JumpForward { .. } => {
2618                self.jump_relative_forward(u32::from(arg), 0);
2619                Ok(None)
2620            }
2621            Instruction::JumpBackward { .. } => {
2622                // CPython rewrites JUMP_BACKWARD to JUMP_BACKWARD_NO_JIT
2623                // when JIT is unavailable.
2624                let instr_idx = self.lasti() as usize - 1;
2625                unsafe {
2626                    self.code
2627                        .instructions
2628                        .replace_op(instr_idx, Instruction::JumpBackwardNoJit);
2629                }
2630                self.jump_relative_backward(u32::from(arg), 1);
2631                Ok(None)
2632            }
2633            Instruction::JumpBackwardJit | Instruction::JumpBackwardNoJit => {
2634                self.jump_relative_backward(u32::from(arg), 1);
2635                Ok(None)
2636            }
2637            Instruction::JumpBackwardNoInterrupt { .. } => {
2638                self.jump_relative_backward(u32::from(arg), 0);
2639                Ok(None)
2640            }
2641            Instruction::ListAppend { i } => {
2642                let item = self.pop_value();
2643                let obj = self.nth_value(i.get(arg) - 1);
2644                let list: &Py<PyList> = unsafe {
2645                    // SAFETY: trust compiler
2646                    obj.downcast_unchecked_ref()
2647                };
2648                list.append(item);
2649                Ok(None)
2650            }
2651            Instruction::ListExtend { i } => {
2652                let iterable = self.pop_value();
2653                let obj = self.nth_value(i.get(arg) - 1);
2654                let list: &Py<PyList> = unsafe {
2655                    // SAFETY: compiler guarantees correct type
2656                    obj.downcast_unchecked_ref()
2657                };
2658                let type_name = iterable.class().name().to_owned();
2659                // Only rewrite the error if the type is truly not iterable
2660                // (no __iter__ and no __getitem__). Preserve original TypeError
2661                // from custom iterables that raise during iteration.
2662                let not_iterable = iterable.class().slots.iter.load().is_none()
2663                    && iterable
2664                        .get_class_attr(vm.ctx.intern_str("__getitem__"))
2665                        .is_none();
2666                list.extend(iterable, vm).map_err(|e| {
2667                    if not_iterable && e.class().is(vm.ctx.exceptions.type_error) {
2668                        vm.new_type_error(format!(
2669                            "Value after * must be an iterable, not {type_name}"
2670                        ))
2671                    } else {
2672                        e
2673                    }
2674                })?;
2675                Ok(None)
2676            }
2677            Instruction::LoadAttr { namei: idx } => self.load_attr(vm, idx.get(arg)),
2678            Instruction::LoadSuperAttr { namei: idx } => {
2679                let idx_val = idx.get(arg);
2680                self.adaptive(|s, ii, cb| s.specialize_load_super_attr(vm, idx_val, ii, cb));
2681                self.load_super_attr(vm, idx_val)
2682            }
2683            Instruction::LoadBuildClass => {
2684                let build_class = if let Some(builtins_dict) = self.builtins_dict {
2685                    builtins_dict
2686                        .get_item_opt(identifier!(vm, __build_class__), vm)?
2687                        .ok_or_else(|| {
2688                            vm.new_name_error(
2689                                "__build_class__ not found",
2690                                identifier!(vm, __build_class__).to_owned(),
2691                            )
2692                        })?
2693                } else {
2694                    self.builtins
2695                        .get_item(identifier!(vm, __build_class__), vm)
2696                        .map_err(|e| {
2697                            if e.fast_isinstance(vm.ctx.exceptions.key_error) {
2698                                vm.new_name_error(
2699                                    "__build_class__ not found",
2700                                    identifier!(vm, __build_class__).to_owned(),
2701                                )
2702                            } else {
2703                                e
2704                            }
2705                        })?
2706                };
2707                self.push_value(build_class);
2708                Ok(None)
2709            }
2710            Instruction::LoadLocals => {
2711                // Push the locals dict onto the stack
2712                let locals = self.locals.into_object(vm);
2713                self.push_value(locals);
2714                Ok(None)
2715            }
2716            Instruction::LoadFromDictOrDeref { i } => {
2717                // Pop dict from stack (locals or classdict depending on context)
2718                let class_dict = self.pop_value();
2719                let idx = i.get(arg).as_usize();
2720                let name = self.localsplus_name(idx);
2721                // Only treat KeyError as "not found", propagate other exceptions
2722                let value = if let Some(dict_obj) = class_dict.downcast_ref::<PyDict>() {
2723                    dict_obj.get_item_opt(name, vm)?
2724                } else {
2725                    match class_dict.get_item(name, vm) {
2726                        Ok(v) => Some(v),
2727                        Err(e) if e.fast_isinstance(vm.ctx.exceptions.key_error) => None,
2728                        Err(e) => return Err(e),
2729                    }
2730                };
2731                self.push_value(match value {
2732                    Some(v) => v,
2733                    None => self
2734                        .cell_ref(idx)
2735                        .get()
2736                        .ok_or_else(|| self.unbound_cell_exception(idx, vm))?,
2737                });
2738                Ok(None)
2739            }
2740            Instruction::LoadFromDictOrGlobals { i: idx } => {
2741                // PEP 649: Pop dict from stack (classdict), check there first, then globals
2742                let dict = self.pop_value();
2743                let name = self.code.names[idx.get(arg) as usize];
2744
2745                // Only treat KeyError as "not found", propagate other exceptions
2746                let value = if let Some(dict_obj) = dict.downcast_ref::<PyDict>() {
2747                    dict_obj.get_item_opt(name, vm)?
2748                } else {
2749                    // Not an exact dict, use mapping protocol
2750                    match dict.get_item(name, vm) {
2751                        Ok(v) => Some(v),
2752                        Err(e) if e.fast_isinstance(vm.ctx.exceptions.key_error) => None,
2753                        Err(e) => return Err(e),
2754                    }
2755                };
2756
2757                self.push_value(match value {
2758                    Some(v) => v,
2759                    None => self.load_global_or_builtin(name, vm)?,
2760                });
2761                Ok(None)
2762            }
2763            Instruction::LoadConst { consti } => {
2764                self.push_value(self.code.constants[consti.get(arg)].clone().into());
2765                // Mirror CPython's LOAD_CONST family transition. RustPython does
2766                // not currently distinguish immortal constants at runtime.
2767                let instr_idx = self.lasti() as usize - 1;
2768                unsafe {
2769                    self.code
2770                        .instructions
2771                        .replace_op(instr_idx, Instruction::LoadConstMortal);
2772                }
2773                Ok(None)
2774            }
2775            Instruction::LoadConstMortal | Instruction::LoadConstImmortal => {
2776                self.push_value(self.code.constants[u32::from(arg).into()].clone().into());
2777                Ok(None)
2778            }
2779            Instruction::LoadCommonConstant { idx } => {
2780                use bytecode::CommonConstant;
2781                let value = match idx.get(arg) {
2782                    CommonConstant::AssertionError => {
2783                        vm.ctx.exceptions.assertion_error.to_owned().into()
2784                    }
2785                    CommonConstant::NotImplementedError => {
2786                        vm.ctx.exceptions.not_implemented_error.to_owned().into()
2787                    }
2788                    CommonConstant::BuiltinTuple => vm.ctx.types.tuple_type.to_owned().into(),
2789                    CommonConstant::BuiltinAll => vm
2790                        .callable_cache
2791                        .builtin_all
2792                        .clone()
2793                        .expect("builtin_all not initialized"),
2794                    CommonConstant::BuiltinAny => vm
2795                        .callable_cache
2796                        .builtin_any
2797                        .clone()
2798                        .expect("builtin_any not initialized"),
2799                    CommonConstant::BuiltinList => vm.ctx.types.list_type.to_owned().into(),
2800                    CommonConstant::BuiltinSet => vm.ctx.types.set_type.to_owned().into(),
2801                };
2802                self.push_value(value);
2803                Ok(None)
2804            }
2805            Instruction::LoadSmallInt { i: idx } => {
2806                // Push small integer (-5..=256) directly without constant table lookup
2807                let value = vm.ctx.new_int(idx.get(arg) as i32);
2808                self.push_value(value.into());
2809                Ok(None)
2810            }
2811            Instruction::LoadDeref { i } => {
2812                let idx = i.get(arg).as_usize();
2813                let x = self
2814                    .cell_ref(idx)
2815                    .get()
2816                    .ok_or_else(|| self.unbound_cell_exception(idx, vm))?;
2817                self.push_value(x);
2818                Ok(None)
2819            }
2820            Instruction::LoadFast { var_num } => {
2821                #[cold]
2822                fn reference_error(
2823                    varname: &'static PyStrInterned,
2824                    vm: &VirtualMachine,
2825                ) -> PyBaseExceptionRef {
2826                    vm.new_exception_msg(
2827                        vm.ctx.exceptions.unbound_local_error.to_owned(),
2828                        format!("local variable '{varname}' referenced before assignment").into(),
2829                    )
2830                }
2831                let idx = var_num.get(arg);
2832                let x = self.localsplus.fastlocals()[idx]
2833                    .clone()
2834                    .ok_or_else(|| reference_error(self.code.varnames[idx], vm))?;
2835                self.push_value(x);
2836                Ok(None)
2837            }
2838            Instruction::LoadFastAndClear { var_num } => {
2839                // Save current slot value and clear it (for inlined comprehensions).
2840                // Pushes NULL (None at Option level) if slot was empty, so that
2841                // StoreFast can restore the empty state after the comprehension.
2842                let idx = var_num.get(arg);
2843                let x = self.localsplus.fastlocals_mut()[idx].take();
2844                self.push_value_opt(x);
2845                Ok(None)
2846            }
2847            Instruction::LoadFastCheck { var_num } => {
2848                // Same as LoadFast but explicitly checks for unbound locals
2849                // (LoadFast in RustPython already does this check)
2850                let idx = var_num.get(arg);
2851                let x = self.localsplus.fastlocals()[idx].clone().ok_or_else(|| {
2852                    vm.new_exception_msg(
2853                        vm.ctx.exceptions.unbound_local_error.to_owned(),
2854                        format!(
2855                            "local variable '{}' referenced before assignment",
2856                            self.code.varnames[idx]
2857                        )
2858                        .into(),
2859                    )
2860                })?;
2861                self.push_value(x);
2862                Ok(None)
2863            }
2864            Instruction::LoadFastLoadFast { var_nums } => {
2865                // Load two local variables at once
2866                // oparg encoding: (idx1 << 4) | idx2
2867                let oparg = var_nums.get(arg);
2868                let (idx1, idx2) = oparg.indexes();
2869                let fastlocals = self.localsplus.fastlocals();
2870                let x1 = fastlocals[idx1].clone().ok_or_else(|| {
2871                    vm.new_exception_msg(
2872                        vm.ctx.exceptions.unbound_local_error.to_owned(),
2873                        format!(
2874                            "local variable '{}' referenced before assignment",
2875                            self.code.varnames[idx1]
2876                        )
2877                        .into(),
2878                    )
2879                })?;
2880                let x2 = fastlocals[idx2].clone().ok_or_else(|| {
2881                    vm.new_exception_msg(
2882                        vm.ctx.exceptions.unbound_local_error.to_owned(),
2883                        format!(
2884                            "local variable '{}' referenced before assignment",
2885                            self.code.varnames[idx2]
2886                        )
2887                        .into(),
2888                    )
2889                })?;
2890                self.push_value(x1);
2891                self.push_value(x2);
2892                Ok(None)
2893            }
2894            // Borrow optimization not yet active; falls back to clone.
2895            // push_borrowed() is available but disabled until stack
2896            // lifetime issues at yield/exception points are resolved.
2897            Instruction::LoadFastBorrow { var_num } => {
2898                let idx = var_num.get(arg);
2899                let x = self.localsplus.fastlocals()[idx].clone().ok_or_else(|| {
2900                    vm.new_exception_msg(
2901                        vm.ctx.exceptions.unbound_local_error.to_owned(),
2902                        format!(
2903                            "local variable '{}' referenced before assignment",
2904                            self.code.varnames[idx]
2905                        )
2906                        .into(),
2907                    )
2908                })?;
2909                self.push_value(x);
2910                Ok(None)
2911            }
2912            Instruction::LoadFastBorrowLoadFastBorrow { var_nums } => {
2913                let oparg = var_nums.get(arg);
2914                let (idx1, idx2) = oparg.indexes();
2915                let fastlocals = self.localsplus.fastlocals();
2916                let x1 = fastlocals[idx1].clone().ok_or_else(|| {
2917                    vm.new_exception_msg(
2918                        vm.ctx.exceptions.unbound_local_error.to_owned(),
2919                        format!(
2920                            "local variable '{}' referenced before assignment",
2921                            self.code.varnames[idx1]
2922                        )
2923                        .into(),
2924                    )
2925                })?;
2926                let x2 = fastlocals[idx2].clone().ok_or_else(|| {
2927                    vm.new_exception_msg(
2928                        vm.ctx.exceptions.unbound_local_error.to_owned(),
2929                        format!(
2930                            "local variable '{}' referenced before assignment",
2931                            self.code.varnames[idx2]
2932                        )
2933                        .into(),
2934                    )
2935                })?;
2936                self.push_value(x1);
2937                self.push_value(x2);
2938                Ok(None)
2939            }
2940            Instruction::LoadGlobal { namei: idx } => {
2941                let oparg = idx.get(arg);
2942                self.adaptive(|s, ii, cb| s.specialize_load_global(vm, oparg, ii, cb));
2943                let name = &self.code.names[(oparg >> 1) as usize];
2944                let x = self.load_global_or_builtin(name, vm)?;
2945                self.push_value(x);
2946                if (oparg & 1) != 0 {
2947                    self.push_value_opt(None);
2948                }
2949                Ok(None)
2950            }
2951            Instruction::LoadName { namei: idx } => {
2952                let name = self.code.names[idx.get(arg) as usize];
2953                let result = self.locals.mapping(vm).subscript(name, vm);
2954                match result {
2955                    Ok(x) => self.push_value(x),
2956                    Err(e) if e.fast_isinstance(vm.ctx.exceptions.key_error) => {
2957                        self.push_value(self.load_global_or_builtin(name, vm)?);
2958                    }
2959                    Err(e) => return Err(e),
2960                }
2961                Ok(None)
2962            }
2963            Instruction::LoadSpecial { method } => {
2964                // Pops obj, pushes (callable, self_or_null) for CALL convention.
2965                // Push order: callable first (deeper), self_or_null on top.
2966                use crate::vm::PyMethod;
2967
2968                let obj = self.pop_value();
2969                let oparg = method.get(arg);
2970                let method_name = get_special_method_name(oparg, vm);
2971
2972                match vm.get_special_method(&obj, method_name)? {
2973                    Some(PyMethod::Function { target, func }) => {
2974                        self.push_value(func); // callable (deeper)
2975                        self.push_value(target); // self (TOS)
2976                    }
2977                    Some(PyMethod::Attribute(bound)) => {
2978                        self.push_value(bound); // callable (deeper)
2979                        self.push_null(); // NULL (TOS)
2980                    }
2981                    None => {
2982                        return Err(vm.new_type_error(get_special_method_error_msg(
2983                            oparg,
2984                            &obj.class().name(),
2985                            special_method_can_suggest(&obj, oparg, vm)?,
2986                        )));
2987                    }
2988                };
2989                Ok(None)
2990            }
2991            Instruction::MakeFunction => self.execute_make_function(vm),
2992            Instruction::MakeCell { i } => {
2993                // Wrap the current slot value (if any) in a new PyCell.
2994                // For merged cells (LOCAL|CELL), this wraps the argument value.
2995                // For non-merged cells, this creates an empty cell.
2996                let idx = i.get(arg).as_usize();
2997                let fastlocals = self.localsplus.fastlocals_mut();
2998                let initial = fastlocals[idx].take();
2999                let cell = PyCell::new(initial).into_ref(&vm.ctx).into();
3000                fastlocals[idx] = Some(cell);
3001                Ok(None)
3002            }
3003            Instruction::MapAdd { i } => {
3004                let value = self.pop_value();
3005                let key = self.pop_value();
3006                let obj = self.nth_value(i.get(arg) - 1);
3007                let dict: &Py<PyDict> = unsafe {
3008                    // SAFETY: trust compiler
3009                    obj.downcast_unchecked_ref()
3010                };
3011                dict.set_item(&*key, value, vm)?;
3012                Ok(None)
3013            }
3014            Instruction::MatchClass { count: nargs } => {
3015                // STACK[-1] is a tuple of keyword attribute names, STACK[-2] is the class being matched against, and STACK[-3] is the match subject.
3016                // nargs is the number of positional sub-patterns.
3017                let kwd_attrs = self.pop_value();
3018                let kwd_attrs = kwd_attrs.downcast_ref::<PyTuple>().unwrap();
3019                let cls = self.pop_value();
3020                let subject = self.pop_value();
3021                let nargs_val = nargs.get(arg) as usize;
3022
3023                // Check if subject is an instance of cls
3024                if subject.is_instance(cls.as_ref(), vm)? {
3025                    let mut extracted = vec![];
3026
3027                    // Get __match_args__ for positional arguments if nargs > 0
3028                    if nargs_val > 0 {
3029                        // Get __match_args__ from the class
3030                        let match_args =
3031                            vm.get_attribute_opt(cls.clone(), identifier!(vm, __match_args__))?;
3032
3033                        if let Some(match_args) = match_args {
3034                            // Convert to tuple
3035                            let match_args = match match_args.downcast_exact::<PyTuple>(vm) {
3036                                Ok(tuple) => tuple,
3037                                Err(match_args) => {
3038                                    // __match_args__ must be a tuple
3039                                    // Get type names for error message
3040                                    let type_name = cls
3041                                        .downcast::<crate::builtins::PyType>()
3042                                        .ok()
3043                                        .and_then(|t| t.__name__(vm).to_str().map(str::to_owned))
3044                                        .unwrap_or_else(|| String::from("?"));
3045                                    let match_args_type_name = match_args.class().__name__(vm);
3046                                    return Err(vm.new_type_error(format!(
3047                                        "{}.__match_args__ must be a tuple (got {})",
3048                                        type_name, match_args_type_name
3049                                    )));
3050                                }
3051                            };
3052
3053                            // Check if we have enough match args
3054                            if match_args.len() < nargs_val {
3055                                return Err(vm.new_type_error(format!(
3056                                    "class pattern accepts at most {} positional sub-patterns ({} given)",
3057                                    match_args.len(),
3058                                    nargs_val
3059                                )));
3060                            }
3061
3062                            // Extract positional attributes
3063                            for i in 0..nargs_val {
3064                                let attr_name = &match_args[i];
3065                                let attr_name_str = match attr_name.downcast_ref::<PyStr>() {
3066                                    Some(s) => s,
3067                                    None => {
3068                                        return Err(vm.new_type_error(
3069                                            "__match_args__ elements must be strings",
3070                                        ));
3071                                    }
3072                                };
3073                                match subject.get_attr(attr_name_str, vm) {
3074                                    Ok(value) => extracted.push(value),
3075                                    Err(e)
3076                                        if e.fast_isinstance(vm.ctx.exceptions.attribute_error) =>
3077                                    {
3078                                        // Missing attribute → non-match
3079                                        self.push_value(vm.ctx.none());
3080                                        return Ok(None);
3081                                    }
3082                                    Err(e) => return Err(e),
3083                                }
3084                            }
3085                        } else {
3086                            // No __match_args__, check if this is a type with MATCH_SELF behavior
3087                            // For built-in types like bool, int, str, list, tuple, dict, etc.
3088                            // they match the subject itself as the single positional argument
3089                            let is_match_self_type = cls
3090                                .downcast::<PyType>()
3091                                .is_ok_and(|t| t.slots.flags.contains(PyTypeFlags::_MATCH_SELF));
3092
3093                            if is_match_self_type {
3094                                if nargs_val == 1 {
3095                                    // Match the subject itself as the single positional argument
3096                                    extracted.push(subject.clone());
3097                                } else if nargs_val > 1 {
3098                                    // Too many positional arguments for MATCH_SELF
3099                                    return Err(vm.new_type_error(
3100                                        "class pattern accepts at most 1 positional sub-pattern for MATCH_SELF types",
3101                                    ));
3102                                }
3103                            } else {
3104                                // No __match_args__ and not a MATCH_SELF type
3105                                if nargs_val > 0 {
3106                                    return Err(vm.new_type_error(
3107                                        "class pattern defines no positional sub-patterns (__match_args__ missing)",
3108                                    ));
3109                                }
3110                            }
3111                        }
3112                    }
3113
3114                    // Extract keyword attributes
3115                    for name in kwd_attrs {
3116                        let name_str = name.downcast_ref::<PyStr>().unwrap();
3117                        match subject.get_attr(name_str, vm) {
3118                            Ok(value) => extracted.push(value),
3119                            Err(e) if e.fast_isinstance(vm.ctx.exceptions.attribute_error) => {
3120                                self.push_value(vm.ctx.none());
3121                                return Ok(None);
3122                            }
3123                            Err(e) => return Err(e),
3124                        }
3125                    }
3126
3127                    self.push_value(vm.ctx.new_tuple(extracted).into());
3128                } else {
3129                    // Not an instance, push None
3130                    self.push_value(vm.ctx.none());
3131                }
3132                Ok(None)
3133            }
3134            Instruction::MatchKeys => {
3135                // MATCH_KEYS doesn't pop subject and keys, only reads them
3136                let keys_tuple = self.top_value(); // stack[-1]
3137                let subject = self.nth_value(1); // stack[-2]
3138
3139                // Check if subject is a mapping and extract values for keys
3140                if subject.class().slots.flags.contains(PyTypeFlags::MAPPING) {
3141                    let keys = keys_tuple.downcast_ref::<PyTuple>().unwrap();
3142                    let mut values = Vec::new();
3143                    let mut all_match = true;
3144
3145                    // We use the two argument form of map.get(key, default) for two reasons:
3146                    // - Atomically check for a key and get its value without error handling.
3147                    // - Don't cause key creation or resizing in dict subclasses like
3148                    //   collections.defaultdict that define __missing__ (or similar).
3149                    // See CPython's _PyEval_MatchKeys
3150
3151                    if let Some(get_method) = vm
3152                        .get_method(subject.to_owned(), vm.ctx.intern_str("get"))
3153                        .transpose()?
3154                    {
3155                        let dummy = vm
3156                            .ctx
3157                            .new_base_object(vm.ctx.types.object_type.to_owned(), None);
3158
3159                        for key in keys {
3160                            // value = map.get(key, dummy)
3161                            match get_method.call((key.as_object(), dummy.clone()), vm) {
3162                                Ok(value) => {
3163                                    // if value == dummy: key not in map!
3164                                    if value.is(&dummy) {
3165                                        all_match = false;
3166                                        break;
3167                                    }
3168                                    values.push(value);
3169                                }
3170                                Err(e) => return Err(e),
3171                            }
3172                        }
3173                    } else {
3174                        // Fallback if .get() method is not available (shouldn't happen for mappings)
3175                        for key in keys {
3176                            match subject.get_item(key.as_object(), vm) {
3177                                Ok(value) => values.push(value),
3178                                Err(e) if e.fast_isinstance(vm.ctx.exceptions.key_error) => {
3179                                    all_match = false;
3180                                    break;
3181                                }
3182                                Err(e) => return Err(e),
3183                            }
3184                        }
3185                    }
3186
3187                    if all_match {
3188                        // Push values tuple on successful match
3189                        self.push_value(vm.ctx.new_tuple(values).into());
3190                    } else {
3191                        // No match - push None
3192                        self.push_value(vm.ctx.none());
3193                    }
3194                } else {
3195                    // Not a mapping - push None
3196                    self.push_value(vm.ctx.none());
3197                }
3198                Ok(None)
3199            }
3200            Instruction::MatchMapping => {
3201                // Pop and push back the subject to keep it on stack
3202                let subject = self.pop_value();
3203
3204                // Check if the type has the MAPPING flag
3205                let is_mapping = subject.class().slots.flags.contains(PyTypeFlags::MAPPING);
3206
3207                self.push_value(subject);
3208                self.push_value(vm.ctx.new_bool(is_mapping).into());
3209                Ok(None)
3210            }
3211            Instruction::MatchSequence => {
3212                // Pop and push back the subject to keep it on stack
3213                let subject = self.pop_value();
3214
3215                // Check if the type has the SEQUENCE flag
3216                let is_sequence = subject.class().slots.flags.contains(PyTypeFlags::SEQUENCE);
3217
3218                self.push_value(subject);
3219                self.push_value(vm.ctx.new_bool(is_sequence).into());
3220                Ok(None)
3221            }
3222            Instruction::Nop => Ok(None),
3223            // NOT_TAKEN is a branch prediction hint - functionally a NOP
3224            Instruction::NotTaken => Ok(None),
3225            // CACHE is used by adaptive interpreter for inline caching - NOP for us
3226            Instruction::Cache => Ok(None),
3227            Instruction::ReturnGenerator => {
3228                // In RustPython, generators/coroutines are created in function.rs
3229                // before the frame starts executing. The RETURN_GENERATOR instruction
3230                // pushes None so that the following POP_TOP has something to consume.
3231                // This matches CPython's semantics where the sent value (None for first call)
3232                // is on the stack when the generator resumes.
3233                self.push_value(vm.ctx.none());
3234                Ok(None)
3235            }
3236            Instruction::PopExcept => {
3237                // Pop prev_exc from value stack and restore it
3238                let prev_exc = self.pop_value();
3239                if vm.is_none(&prev_exc) {
3240                    vm.set_exception(None);
3241                } else if let Ok(exc) = prev_exc.downcast::<PyBaseException>() {
3242                    vm.set_exception(Some(exc));
3243                }
3244
3245                // NOTE: We do NOT clear the traceback of the exception that was just handled.
3246                // Python preserves exception tracebacks even after the exception is no longer
3247                // the "current exception". This is important for code that catches an exception,
3248                // stores it, and later inspects its traceback.
3249                // Reference cycles (Exception → Traceback → Frame → locals) are handled by
3250                // Python's garbage collector which can detect and break cycles.
3251
3252                Ok(None)
3253            }
3254            Instruction::PopJumpIfFalse { .. } => self.pop_jump_if_relative(vm, arg, 1, false),
3255            Instruction::PopJumpIfTrue { .. } => self.pop_jump_if_relative(vm, arg, 1, true),
3256            Instruction::PopJumpIfNone { .. } => {
3257                let value = self.pop_value();
3258                if vm.is_none(&value) {
3259                    self.jump_relative_forward(u32::from(arg), 1);
3260                }
3261                Ok(None)
3262            }
3263            Instruction::PopJumpIfNotNone { .. } => {
3264                let value = self.pop_value();
3265                if !vm.is_none(&value) {
3266                    self.jump_relative_forward(u32::from(arg), 1);
3267                }
3268                Ok(None)
3269            }
3270            Instruction::PopTop => {
3271                // Pop value from stack and ignore.
3272                self.pop_value();
3273                Ok(None)
3274            }
3275            Instruction::EndFor => {
3276                // Pop the next value from stack (cleanup after loop body)
3277                self.pop_value();
3278                Ok(None)
3279            }
3280            Instruction::PopIter => {
3281                // Pop the iterator from stack (end of for loop)
3282                self.pop_value();
3283                Ok(None)
3284            }
3285            Instruction::PushNull => {
3286                // Push NULL for self_or_null slot in call protocol
3287                self.push_null();
3288                Ok(None)
3289            }
3290            Instruction::RaiseVarargs { argc: kind } => self.execute_raise(vm, kind.get(arg)),
3291            Instruction::Resume { .. } | Instruction::ResumeCheck => {
3292                // Lazy quickening: initialize adaptive counters on first execution
3293                if !self.code.quickened.swap(true, atomic::Ordering::Relaxed) {
3294                    self.code.instructions.quicken();
3295                    atomic::fence(atomic::Ordering::Release);
3296                }
3297                if self.monitoring_disabled_for_code(vm) {
3298                    let global_ver = vm
3299                        .state
3300                        .instrumentation_version
3301                        .load(atomic::Ordering::Acquire);
3302                    monitoring::instrument_code(self.code, 0);
3303                    self.code
3304                        .instrumentation_version
3305                        .store(global_ver, atomic::Ordering::Release);
3306                    return Ok(None);
3307                }
3308                // Check if bytecode needs re-instrumentation
3309                let global_ver = vm
3310                    .state
3311                    .instrumentation_version
3312                    .load(atomic::Ordering::Acquire);
3313                let code_ver = self
3314                    .code
3315                    .instrumentation_version
3316                    .load(atomic::Ordering::Acquire);
3317                if code_ver != global_ver {
3318                    let events = {
3319                        let state = vm.state.monitoring.lock();
3320                        state.events_for_code(self.code.get_id())
3321                    };
3322                    monitoring::instrument_code(self.code, events);
3323                    self.code
3324                        .instrumentation_version
3325                        .store(global_ver, atomic::Ordering::Release);
3326                    // Re-execute this instruction (it may now be INSTRUMENTED_RESUME)
3327                    self.update_lasti(|i| *i -= 1);
3328                }
3329                Ok(None)
3330            }
3331            Instruction::ReturnValue => {
3332                let value = self.pop_value();
3333                self.unwind_blocks(vm, UnwindReason::Returning { value })
3334            }
3335            Instruction::SetAdd { i } => {
3336                let item = self.pop_value();
3337                let obj = self.nth_value(i.get(arg) - 1);
3338                let set: &Py<PySet> = unsafe {
3339                    // SAFETY: trust compiler
3340                    obj.downcast_unchecked_ref()
3341                };
3342                set.add(item, vm)?;
3343                Ok(None)
3344            }
3345            Instruction::SetUpdate { i } => {
3346                let iterable = self.pop_value();
3347                let obj = self.nth_value(i.get(arg) - 1);
3348                let set: &Py<PySet> = unsafe {
3349                    // SAFETY: compiler guarantees correct type
3350                    obj.downcast_unchecked_ref()
3351                };
3352                let iter = PyIter::try_from_object(vm, iterable)?;
3353                while let PyIterReturn::Return(item) = iter.next(vm)? {
3354                    set.add(item, vm)?;
3355                }
3356                Ok(None)
3357            }
3358            Instruction::PushExcInfo => {
3359                // Stack: [exc] -> [prev_exc, exc]
3360                let exc = self.pop_value();
3361                let prev_exc = vm
3362                    .current_exception()
3363                    .map(|e| e.into())
3364                    .unwrap_or_else(|| vm.ctx.none());
3365
3366                // Set exc as the current exception
3367                if let Some(exc_ref) = exc.downcast_ref::<PyBaseException>() {
3368                    vm.set_exception(Some(exc_ref.to_owned()));
3369                }
3370
3371                self.push_value(prev_exc);
3372                self.push_value(exc);
3373                Ok(None)
3374            }
3375            Instruction::CheckExcMatch => {
3376                // Stack: [exc, type] -> [exc, bool]
3377                let exc_type = self.pop_value();
3378                let exc = self.top_value();
3379
3380                // Validate that exc_type inherits from BaseException
3381                if let Some(tuple_of_exceptions) = exc_type.downcast_ref::<PyTuple>() {
3382                    for exception in tuple_of_exceptions {
3383                        if !exception
3384                            .is_subclass(vm.ctx.exceptions.base_exception_type.into(), vm)?
3385                        {
3386                            return Err(vm.new_type_error(
3387                                "catching classes that do not inherit from BaseException is not allowed",
3388                            ));
3389                        }
3390                    }
3391                } else if !exc_type.is_subclass(vm.ctx.exceptions.base_exception_type.into(), vm)? {
3392                    return Err(vm.new_type_error(
3393                        "catching classes that do not inherit from BaseException is not allowed",
3394                    ));
3395                }
3396
3397                let result = exc.is_instance(&exc_type, vm)?;
3398                self.push_value(vm.ctx.new_bool(result).into());
3399                Ok(None)
3400            }
3401            Instruction::Reraise { depth } => {
3402                // inst(RERAISE, (values[oparg], exc -- values[oparg]))
3403                //
3404                // Stack layout: [values..., exc] where len(values) == oparg
3405                // RERAISE pops exc and oparg additional values from the stack.
3406                // values[0] is lasti used to set frame->instr_ptr for traceback.
3407                // We skip the lasti update since RustPython's traceback is already correct.
3408                let depth_val = depth.get(arg) as usize;
3409
3410                // Pop exception from TOS
3411                let exc = self.pop_value();
3412
3413                // Pop the depth values (lasti and possibly other items like prev_exc)
3414                for _ in 0..depth_val {
3415                    self.pop_value();
3416                }
3417
3418                if let Some(exc_ref) = exc.downcast_ref::<PyBaseException>() {
3419                    Err(exc_ref.to_owned())
3420                } else {
3421                    // Fallback: use current exception if TOS is not an exception
3422                    let exc = vm
3423                        .topmost_exception()
3424                        .ok_or_else(|| vm.new_runtime_error("No active exception to re-raise"))?;
3425                    Err(exc)
3426                }
3427            }
3428            Instruction::SetFunctionAttribute { flag: attr } => {
3429                self.execute_set_function_attribute(vm, attr.get(arg))
3430            }
3431            Instruction::SetupAnnotations => self.setup_annotations(vm),
3432            Instruction::StoreAttr { namei: idx } => {
3433                let idx_val = idx.get(arg);
3434                self.adaptive(|s, ii, cb| s.specialize_store_attr(vm, idx_val, ii, cb));
3435                self.store_attr(vm, idx_val)
3436            }
3437            Instruction::StoreDeref { i } => {
3438                let value = self.pop_value();
3439                self.cell_ref(i.get(arg).as_usize()).set(Some(value));
3440                Ok(None)
3441            }
3442            Instruction::StoreFast { var_num } => {
3443                // pop_value_opt: allows NULL from LoadFastAndClear restore path
3444                let value = self.pop_value_opt();
3445                let fastlocals = self.localsplus.fastlocals_mut();
3446                fastlocals[var_num.get(arg)] = value;
3447                Ok(None)
3448            }
3449            Instruction::StoreFastLoadFast { var_nums } => {
3450                let value = self.pop_value();
3451                let locals = self.localsplus.fastlocals_mut();
3452                let oparg = var_nums.get(arg);
3453                let (store_idx, load_idx) = oparg.indexes();
3454                locals[store_idx] = Some(value);
3455                let load_value = locals[load_idx]
3456                    .clone()
3457                    .expect("StoreFastLoadFast: load slot should have value after store");
3458                self.push_value(load_value);
3459                Ok(None)
3460            }
3461            Instruction::StoreFastStoreFast { var_nums } => {
3462                let oparg = var_nums.get(arg);
3463                let (idx1, idx2) = oparg.indexes();
3464                // pop_value_opt: allows NULL from LoadFastAndClear restore path
3465                let value1 = self.pop_value_opt();
3466                let value2 = self.pop_value_opt();
3467                let fastlocals = self.localsplus.fastlocals_mut();
3468                fastlocals[idx1] = value1;
3469                fastlocals[idx2] = value2;
3470                Ok(None)
3471            }
3472            Instruction::StoreGlobal { namei: idx } => {
3473                let value = self.pop_value();
3474                self.globals
3475                    .set_item(self.code.names[idx.get(arg) as usize], value, vm)?;
3476                Ok(None)
3477            }
3478            Instruction::StoreName { namei: idx } => {
3479                let name = self.code.names[idx.get(arg) as usize];
3480                let value = self.pop_value();
3481                self.locals
3482                    .mapping(vm)
3483                    .ass_subscript(name, Some(value), vm)?;
3484                Ok(None)
3485            }
3486            Instruction::StoreSlice => {
3487                // Stack: [value, container, start, stop] -> []
3488                let stop = self.pop_value();
3489                let start = self.pop_value();
3490                let container = self.pop_value();
3491                let value = self.pop_value();
3492                let slice: PyObjectRef = PySlice {
3493                    start: Some(start),
3494                    stop,
3495                    step: None,
3496                }
3497                .into_ref(&vm.ctx)
3498                .into();
3499                container.set_item(&*slice, value, vm)?;
3500                Ok(None)
3501            }
3502            Instruction::StoreSubscr => {
3503                self.adaptive(|s, ii, cb| s.specialize_store_subscr(vm, ii, cb));
3504                self.execute_store_subscript(vm)
3505            }
3506            Instruction::Swap { i: index } => {
3507                let len = self.localsplus.stack_len();
3508                debug_assert!(len > 0, "stack underflow in SWAP");
3509                let i = len - 1; // TOS index
3510                let index_val = index.get(arg) as usize;
3511                // CPython: SWAP(n) swaps TOS with PEEK(n) where PEEK(n) = stack_pointer[-n]
3512                // This means swap TOS with the element at index (len - n)
3513                debug_assert!(
3514                    index_val <= len,
3515                    "SWAP index {} exceeds stack size {}",
3516                    index_val,
3517                    len
3518                );
3519                let j = len - index_val;
3520                self.localsplus.stack_swap(i, j);
3521                Ok(None)
3522            }
3523            Instruction::ToBool => {
3524                self.adaptive(|s, ii, cb| s.specialize_to_bool(vm, ii, cb));
3525                let obj = self.pop_value();
3526                let bool_val = obj.try_to_bool(vm)?;
3527                self.push_value(vm.ctx.new_bool(bool_val).into());
3528                Ok(None)
3529            }
3530            Instruction::UnpackEx { counts: args } => {
3531                let args = args.get(arg);
3532                self.execute_unpack_ex(vm, args.before, args.after)
3533            }
3534            Instruction::UnpackSequence { count: size } => {
3535                let expected = size.get(arg);
3536                self.adaptive(|s, ii, cb| s.specialize_unpack_sequence(vm, expected, ii, cb));
3537                self.unpack_sequence(expected, vm)
3538            }
3539            Instruction::WithExceptStart => {
3540                // Stack: [..., exit_func, self_or_null, lasti, prev_exc, exc]
3541                // exit_func at TOS-4, self_or_null at TOS-3
3542                let exc = vm.current_exception();
3543
3544                let stack_len = self.localsplus.stack_len();
3545                let exit_func = expect_unchecked(
3546                    self.localsplus.stack_index(stack_len - 5).clone(),
3547                    "WithExceptStart: exit_func is NULL",
3548                );
3549                let self_or_null = self.localsplus.stack_index(stack_len - 4).clone();
3550
3551                let (tp, val, tb) = if let Some(ref exc) = exc {
3552                    vm.split_exception(exc.clone())
3553                } else {
3554                    (vm.ctx.none(), vm.ctx.none(), vm.ctx.none())
3555                };
3556
3557                let exit_res = if let Some(self_exit) = self_or_null {
3558                    exit_func.call((self_exit.to_pyobj(), tp, val, tb), vm)?
3559                } else {
3560                    exit_func.call((tp, val, tb), vm)?
3561                };
3562                self.push_value(exit_res);
3563
3564                Ok(None)
3565            }
3566            Instruction::YieldValue { .. } => {
3567                debug_assert!(
3568                    self.localsplus
3569                        .stack_as_slice()
3570                        .iter()
3571                        .flatten()
3572                        .all(|sr| !sr.is_borrowed()),
3573                    "borrowed refs on stack at yield point"
3574                );
3575                Ok(Some(ExecutionResult::Yield(self.pop_value())))
3576            }
3577            Instruction::Send { .. } => {
3578                // (receiver, v -- receiver, retval)
3579                self.adaptive(|s, ii, cb| s.specialize_send(vm, ii, cb));
3580                let exit_label = bytecode::Label::from_u32(self.lasti() + 1 + u32::from(arg));
3581                let receiver = self.nth_value(1);
3582                let can_fast_send = !self.specialization_eval_frame_active(vm)
3583                    && (receiver.downcast_ref_if_exact::<PyGenerator>(vm).is_some()
3584                        || receiver.downcast_ref_if_exact::<PyCoroutine>(vm).is_some())
3585                    && self
3586                        .builtin_coro(receiver)
3587                        .is_some_and(|coro| !coro.running() && !coro.closed());
3588                let val = self.pop_value();
3589                let receiver = self.top_value();
3590                let ret = if can_fast_send {
3591                    let coro = self.builtin_coro(receiver).unwrap();
3592                    if vm.is_none(&val) {
3593                        coro.send_none(receiver, vm)?
3594                    } else {
3595                        coro.send(receiver, val, vm)?
3596                    }
3597                } else {
3598                    self._send(receiver, val, vm)?
3599                };
3600                match ret {
3601                    PyIterReturn::Return(value) => {
3602                        self.push_value(value);
3603                        Ok(None)
3604                    }
3605                    PyIterReturn::StopIteration(value) => {
3606                        if vm.use_tracing.get() && !vm.is_none(&self.object.trace.lock()) {
3607                            let stop_exc = vm.new_stop_iteration(value.clone());
3608                            self.fire_exception_trace(&stop_exc, vm)?;
3609                        }
3610                        let value = vm.unwrap_or_none(value);
3611                        self.push_value(value);
3612                        self.jump(exit_label);
3613                        Ok(None)
3614                    }
3615                }
3616            }
3617            Instruction::SendGen => {
3618                let exit_label = bytecode::Label::from_u32(self.lasti() + 1 + u32::from(arg));
3619                // Stack: [receiver, val] — peek receiver before popping
3620                let receiver = self.nth_value(1);
3621                let can_fast_send = !self.specialization_eval_frame_active(vm)
3622                    && (receiver.downcast_ref_if_exact::<PyGenerator>(vm).is_some()
3623                        || receiver.downcast_ref_if_exact::<PyCoroutine>(vm).is_some())
3624                    && self
3625                        .builtin_coro(receiver)
3626                        .is_some_and(|coro| !coro.running() && !coro.closed());
3627                let val = self.pop_value();
3628
3629                if can_fast_send {
3630                    let receiver = self.top_value();
3631                    let coro = self.builtin_coro(receiver).unwrap();
3632                    let ret = if vm.is_none(&val) {
3633                        coro.send_none(receiver, vm)?
3634                    } else {
3635                        coro.send(receiver, val, vm)?
3636                    };
3637                    match ret {
3638                        PyIterReturn::Return(value) => {
3639                            self.push_value(value);
3640                            return Ok(None);
3641                        }
3642                        PyIterReturn::StopIteration(value) => {
3643                            if vm.use_tracing.get() && !vm.is_none(&self.object.trace.lock()) {
3644                                let stop_exc = vm.new_stop_iteration(value.clone());
3645                                self.fire_exception_trace(&stop_exc, vm)?;
3646                            }
3647                            let value = vm.unwrap_or_none(value);
3648                            self.push_value(value);
3649                            self.jump(exit_label);
3650                            return Ok(None);
3651                        }
3652                    }
3653                }
3654                let receiver = self.top_value();
3655                match self._send(receiver, val, vm)? {
3656                    PyIterReturn::Return(value) => {
3657                        self.push_value(value);
3658                        Ok(None)
3659                    }
3660                    PyIterReturn::StopIteration(value) => {
3661                        if vm.use_tracing.get() && !vm.is_none(&self.object.trace.lock()) {
3662                            let stop_exc = vm.new_stop_iteration(value.clone());
3663                            self.fire_exception_trace(&stop_exc, vm)?;
3664                        }
3665                        let value = vm.unwrap_or_none(value);
3666                        self.push_value(value);
3667                        self.jump(exit_label);
3668                        Ok(None)
3669                    }
3670                }
3671            }
3672            Instruction::EndSend => {
3673                // Stack: (receiver, value) -> (value)
3674                // Pops receiver, leaves value
3675                let value = self.pop_value();
3676                self.pop_value(); // discard receiver
3677                self.push_value(value);
3678                Ok(None)
3679            }
3680            Instruction::ExitInitCheck => {
3681                // Check that __init__ returned None
3682                let should_be_none = self.pop_value();
3683                if !vm.is_none(&should_be_none) {
3684                    return Err(vm.new_type_error(format!(
3685                        "__init__() should return None, not '{}'",
3686                        should_be_none.class().name()
3687                    )));
3688                }
3689                Ok(None)
3690            }
3691            Instruction::CleanupThrow => {
3692                // CLEANUP_THROW: (sub_iter, last_sent_val, exc) -> (None, value) OR re-raise
3693                // If StopIteration: pop all 3, extract value, push (None, value)
3694                // Otherwise: pop all 3, return Err(exc) for unwind_blocks to handle
3695                //
3696                // Unlike CPython where exception_unwind pops the triple as part of
3697                // stack cleanup to handler depth, RustPython pops here explicitly
3698                // and lets unwind_blocks find outer handlers.
3699                // Compiler sets handler_depth = base + 2 (before exc is pushed).
3700
3701                // First peek at exc_value (top of stack) without popping
3702                let exc = self.top_value();
3703
3704                // Check if it's a StopIteration
3705                if let Some(exc_ref) = exc.downcast_ref::<PyBaseException>()
3706                    && exc_ref.fast_isinstance(vm.ctx.exceptions.stop_iteration)
3707                {
3708                    // Extract value from StopIteration
3709                    let value = exc_ref.get_arg(0).unwrap_or_else(|| vm.ctx.none());
3710                    // Now pop all three
3711                    self.pop_value(); // exc
3712                    self.pop_value(); // last_sent_val
3713                    self.pop_value(); // sub_iter
3714                    self.push_value(vm.ctx.none());
3715                    self.push_value(value);
3716                    return Ok(None);
3717                }
3718
3719                // Re-raise other exceptions: pop all three and return Err(exc)
3720                let exc = self.pop_value(); // exc
3721                self.pop_value(); // last_sent_val
3722                self.pop_value(); // sub_iter
3723
3724                let exc = exc
3725                    .downcast::<PyBaseException>()
3726                    .map_err(|_| vm.new_type_error("exception expected"))?;
3727                Err(exc)
3728            }
3729            Instruction::UnaryInvert => {
3730                let a = self.pop_value();
3731                let value = vm._invert(&a)?;
3732                self.push_value(value);
3733                Ok(None)
3734            }
3735            Instruction::UnaryNegative => {
3736                let a = self.pop_value();
3737                let value = vm._neg(&a)?;
3738                self.push_value(value);
3739                Ok(None)
3740            }
3741            Instruction::UnaryNot => {
3742                let obj = self.pop_value();
3743                let value = obj.try_to_bool(vm)?;
3744                self.push_value(vm.ctx.new_bool(!value).into());
3745                Ok(None)
3746            }
3747            // Specialized LOAD_ATTR opcodes
3748            Instruction::LoadAttrMethodNoDict => {
3749                let oparg = LoadAttr::from_u32(u32::from(arg));
3750                let cache_base = self.lasti() as usize;
3751
3752                let owner = self.top_value();
3753                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
3754
3755                if type_version != 0
3756                    && owner.class().tp_version_tag.load(Acquire) == type_version
3757                    && let Some(func) = self.try_read_cached_descriptor(cache_base, type_version)
3758                {
3759                    let owner = self.pop_value();
3760                    self.push_value(func);
3761                    self.push_value(owner);
3762                    Ok(None)
3763                } else {
3764                    self.load_attr_slow(vm, oparg)
3765                }
3766            }
3767            Instruction::LoadAttrMethodLazyDict => {
3768                let oparg = LoadAttr::from_u32(u32::from(arg));
3769                let cache_base = self.lasti() as usize;
3770
3771                let owner = self.top_value();
3772                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
3773
3774                if type_version != 0
3775                    && owner.class().tp_version_tag.load(Acquire) == type_version
3776                    && owner.dict().is_none()
3777                    && let Some(func) = self.try_read_cached_descriptor(cache_base, type_version)
3778                {
3779                    let owner = self.pop_value();
3780                    self.push_value(func);
3781                    self.push_value(owner);
3782                    Ok(None)
3783                } else {
3784                    self.load_attr_slow(vm, oparg)
3785                }
3786            }
3787            Instruction::LoadAttrMethodWithValues => {
3788                let oparg = LoadAttr::from_u32(u32::from(arg));
3789                let cache_base = self.lasti() as usize;
3790                let attr_name = self.code.names[oparg.name_idx() as usize];
3791
3792                let owner = self.top_value();
3793                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
3794
3795                if type_version != 0 && owner.class().tp_version_tag.load(Acquire) == type_version {
3796                    // Check instance dict doesn't shadow the method
3797                    let shadowed = if let Some(dict) = owner.dict() {
3798                        match dict.get_item_opt(attr_name, vm) {
3799                            Ok(Some(_)) => true,
3800                            Ok(None) => false,
3801                            Err(_) => {
3802                                // Dict lookup error -> use safe path.
3803                                return self.load_attr_slow(vm, oparg);
3804                            }
3805                        }
3806                    } else {
3807                        false
3808                    };
3809
3810                    if !shadowed
3811                        && let Some(func) =
3812                            self.try_read_cached_descriptor(cache_base, type_version)
3813                    {
3814                        let owner = self.pop_value();
3815                        self.push_value(func);
3816                        self.push_value(owner);
3817                        return Ok(None);
3818                    }
3819                }
3820                self.load_attr_slow(vm, oparg)
3821            }
3822            Instruction::LoadAttrInstanceValue => {
3823                let oparg = LoadAttr::from_u32(u32::from(arg));
3824                let cache_base = self.lasti() as usize;
3825                let attr_name = self.code.names[oparg.name_idx() as usize];
3826
3827                let owner = self.top_value();
3828                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
3829
3830                if type_version != 0 && owner.class().tp_version_tag.load(Acquire) == type_version {
3831                    // Type version matches — no data descriptor for this attr.
3832                    // Try direct dict lookup, skipping full descriptor protocol.
3833                    if let Some(dict) = owner.dict()
3834                        && let Some(value) = dict.get_item_opt(attr_name, vm)?
3835                    {
3836                        self.pop_value();
3837                        self.push_value(value);
3838                        return Ok(None);
3839                    }
3840                    // Not in instance dict — fall through to class lookup via slow path
3841                }
3842                self.load_attr_slow(vm, oparg)
3843            }
3844            Instruction::LoadAttrWithHint => {
3845                let oparg = LoadAttr::from_u32(u32::from(arg));
3846                let cache_base = self.lasti() as usize;
3847                let attr_name = self.code.names[oparg.name_idx() as usize];
3848
3849                let owner = self.top_value();
3850                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
3851
3852                if type_version != 0
3853                    && owner.class().tp_version_tag.load(Acquire) == type_version
3854                    && let Some(dict) = owner.dict()
3855                    && let Some(value) = dict.get_item_opt(attr_name, vm)?
3856                {
3857                    self.pop_value();
3858                    if oparg.is_method() {
3859                        self.push_value(value);
3860                        self.push_value_opt(None);
3861                    } else {
3862                        self.push_value(value);
3863                    }
3864                    return Ok(None);
3865                }
3866
3867                self.load_attr_slow(vm, oparg)
3868            }
3869            Instruction::LoadAttrModule => {
3870                let oparg = LoadAttr::from_u32(u32::from(arg));
3871                let cache_base = self.lasti() as usize;
3872                let attr_name = self.code.names[oparg.name_idx() as usize];
3873
3874                let owner = self.top_value();
3875                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
3876
3877                if type_version != 0
3878                    && owner.class().tp_version_tag.load(Acquire) == type_version
3879                    && let Some(module) = owner.downcast_ref_if_exact::<PyModule>(vm)
3880                    && let Ok(value) = module.get_attr(attr_name, vm)
3881                {
3882                    self.pop_value();
3883                    if oparg.is_method() {
3884                        self.push_value(value);
3885                        self.push_value_opt(None);
3886                    } else {
3887                        self.push_value(value);
3888                    }
3889                    return Ok(None);
3890                }
3891                self.load_attr_slow(vm, oparg)
3892            }
3893            Instruction::LoadAttrNondescriptorNoDict => {
3894                let oparg = LoadAttr::from_u32(u32::from(arg));
3895                let cache_base = self.lasti() as usize;
3896
3897                let owner = self.top_value();
3898                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
3899
3900                if type_version != 0
3901                    && owner.class().tp_version_tag.load(Acquire) == type_version
3902                    && let Some(attr) = self.try_read_cached_descriptor(cache_base, type_version)
3903                {
3904                    self.pop_value();
3905                    if oparg.is_method() {
3906                        self.push_value(attr);
3907                        self.push_value_opt(None);
3908                    } else {
3909                        self.push_value(attr);
3910                    }
3911                    return Ok(None);
3912                }
3913                self.load_attr_slow(vm, oparg)
3914            }
3915            Instruction::LoadAttrNondescriptorWithValues => {
3916                let oparg = LoadAttr::from_u32(u32::from(arg));
3917                let cache_base = self.lasti() as usize;
3918                let attr_name = self.code.names[oparg.name_idx() as usize];
3919
3920                let owner = self.top_value();
3921                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
3922
3923                if type_version != 0 && owner.class().tp_version_tag.load(Acquire) == type_version {
3924                    // Instance dict has priority — check if attr is shadowed
3925                    if let Some(dict) = owner.dict()
3926                        && let Some(value) = dict.get_item_opt(attr_name, vm)?
3927                    {
3928                        self.pop_value();
3929                        if oparg.is_method() {
3930                            self.push_value(value);
3931                            self.push_value_opt(None);
3932                        } else {
3933                            self.push_value(value);
3934                        }
3935                        return Ok(None);
3936                    }
3937                    // Not in instance dict — use cached class attr
3938                    let Some(attr) = self.try_read_cached_descriptor(cache_base, type_version)
3939                    else {
3940                        return self.load_attr_slow(vm, oparg);
3941                    };
3942                    self.pop_value();
3943                    if oparg.is_method() {
3944                        self.push_value(attr);
3945                        self.push_value_opt(None);
3946                    } else {
3947                        self.push_value(attr);
3948                    }
3949                    return Ok(None);
3950                }
3951                self.load_attr_slow(vm, oparg)
3952            }
3953            Instruction::LoadAttrClass => {
3954                let oparg = LoadAttr::from_u32(u32::from(arg));
3955                let cache_base = self.lasti() as usize;
3956
3957                let owner = self.top_value();
3958                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
3959
3960                if type_version != 0
3961                    && let Some(owner_type) = owner.downcast_ref::<PyType>()
3962                    && owner_type.tp_version_tag.load(Acquire) == type_version
3963                    && let Some(attr) = self.try_read_cached_descriptor(cache_base, type_version)
3964                {
3965                    self.pop_value();
3966                    if oparg.is_method() {
3967                        self.push_value(attr);
3968                        self.push_value_opt(None);
3969                    } else {
3970                        self.push_value(attr);
3971                    }
3972                    return Ok(None);
3973                }
3974                self.load_attr_slow(vm, oparg)
3975            }
3976            Instruction::LoadAttrClassWithMetaclassCheck => {
3977                let oparg = LoadAttr::from_u32(u32::from(arg));
3978                let cache_base = self.lasti() as usize;
3979
3980                let owner = self.top_value();
3981                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
3982                let metaclass_version = self.code.instructions.read_cache_u32(cache_base + 3);
3983
3984                if type_version != 0
3985                    && metaclass_version != 0
3986                    && let Some(owner_type) = owner.downcast_ref::<PyType>()
3987                    && owner_type.tp_version_tag.load(Acquire) == type_version
3988                    && owner.class().tp_version_tag.load(Acquire) == metaclass_version
3989                    && let Some(attr) = self.try_read_cached_descriptor(cache_base, type_version)
3990                {
3991                    self.pop_value();
3992                    if oparg.is_method() {
3993                        self.push_value(attr);
3994                        self.push_value_opt(None);
3995                    } else {
3996                        self.push_value(attr);
3997                    }
3998                    return Ok(None);
3999                }
4000                self.load_attr_slow(vm, oparg)
4001            }
4002            Instruction::LoadAttrGetattributeOverridden => {
4003                let oparg = LoadAttr::from_u32(u32::from(arg));
4004                let cache_base = self.lasti() as usize;
4005                let owner = self.top_value();
4006                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
4007                let func_version = self.code.instructions.read_cache_u32(cache_base + 3);
4008
4009                if !oparg.is_method()
4010                    && !self.specialization_eval_frame_active(vm)
4011                    && type_version != 0
4012                    && func_version != 0
4013                    && owner.class().tp_version_tag.load(Acquire) == type_version
4014                    && let Some(func_obj) =
4015                        self.try_read_cached_descriptor(cache_base, type_version)
4016                    && let Some(func) = func_obj.downcast_ref_if_exact::<PyFunction>(vm)
4017                    && func.func_version() == func_version
4018                    && self.specialization_has_datastack_space_for_func(vm, func)
4019                {
4020                    debug_assert!(func.has_exact_argcount(2));
4021                    let owner = self.pop_value();
4022                    let attr_name = self.code.names[oparg.name_idx() as usize].to_owned().into();
4023                    let result = func.invoke_exact_args(vec![owner, attr_name], vm)?;
4024                    self.push_value(result);
4025                    return Ok(None);
4026                }
4027                self.load_attr_slow(vm, oparg)
4028            }
4029            Instruction::LoadAttrSlot => {
4030                let oparg = LoadAttr::from_u32(u32::from(arg));
4031                let cache_base = self.lasti() as usize;
4032
4033                let owner = self.top_value();
4034                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
4035
4036                if type_version != 0 && owner.class().tp_version_tag.load(Acquire) == type_version {
4037                    let slot_offset =
4038                        self.code.instructions.read_cache_u32(cache_base + 3) as usize;
4039                    if let Some(value) = owner.get_slot(slot_offset) {
4040                        self.pop_value();
4041                        if oparg.is_method() {
4042                            self.push_value(value);
4043                            self.push_value_opt(None);
4044                        } else {
4045                            self.push_value(value);
4046                        }
4047                        return Ok(None);
4048                    }
4049                    // Slot is None → AttributeError (fall through to slow path)
4050                }
4051                self.load_attr_slow(vm, oparg)
4052            }
4053            Instruction::LoadAttrProperty => {
4054                let oparg = LoadAttr::from_u32(u32::from(arg));
4055                let cache_base = self.lasti() as usize;
4056
4057                let owner = self.top_value();
4058                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
4059
4060                if type_version != 0
4061                    && !self.specialization_eval_frame_active(vm)
4062                    && owner.class().tp_version_tag.load(Acquire) == type_version
4063                    && let Some(fget_obj) =
4064                        self.try_read_cached_descriptor(cache_base, type_version)
4065                    && let Some(func) = fget_obj.downcast_ref_if_exact::<PyFunction>(vm)
4066                    && func.can_specialize_call(1)
4067                    && self.specialization_has_datastack_space_for_func(vm, func)
4068                {
4069                    let owner = self.pop_value();
4070                    let result = func.invoke_exact_args(vec![owner], vm)?;
4071                    self.push_value(result);
4072                    return Ok(None);
4073                }
4074                self.load_attr_slow(vm, oparg)
4075            }
4076            Instruction::StoreAttrInstanceValue => {
4077                let attr_idx = u32::from(arg);
4078                let instr_idx = self.lasti() as usize - 1;
4079                let cache_base = instr_idx + 1;
4080                let attr_name = self.code.names[attr_idx as usize];
4081                let owner = self.top_value();
4082                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
4083
4084                if type_version != 0
4085                    && owner.class().tp_version_tag.load(Acquire) == type_version
4086                    && let Some(dict) = owner.dict()
4087                {
4088                    self.pop_value(); // owner
4089                    let value = self.pop_value();
4090                    dict.set_item(attr_name, value, vm)?;
4091                    return Ok(None);
4092                }
4093                self.store_attr(vm, attr_idx)
4094            }
4095            Instruction::StoreAttrWithHint => {
4096                let attr_idx = u32::from(arg);
4097                let instr_idx = self.lasti() as usize - 1;
4098                let cache_base = instr_idx + 1;
4099                let attr_name = self.code.names[attr_idx as usize];
4100                let owner = self.top_value();
4101                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
4102
4103                if type_version != 0
4104                    && owner.class().tp_version_tag.load(Acquire) == type_version
4105                    && let Some(dict) = owner.dict()
4106                {
4107                    self.pop_value(); // owner
4108                    let value = self.pop_value();
4109                    dict.set_item(attr_name, value, vm)?;
4110                    return Ok(None);
4111                }
4112                self.store_attr(vm, attr_idx)
4113            }
4114            Instruction::StoreAttrSlot => {
4115                let instr_idx = self.lasti() as usize - 1;
4116                let cache_base = instr_idx + 1;
4117                let type_version = self.code.instructions.read_cache_u32(cache_base + 1);
4118                let version_match = type_version != 0 && {
4119                    let owner = self.top_value();
4120                    owner.class().tp_version_tag.load(Acquire) == type_version
4121                };
4122
4123                if version_match {
4124                    let slot_offset =
4125                        self.code.instructions.read_cache_u16(cache_base + 3) as usize;
4126                    let owner = self.pop_value();
4127                    let value = self.pop_value();
4128                    owner.set_slot(slot_offset, Some(value));
4129                    return Ok(None);
4130                }
4131                let attr_idx = u32::from(arg);
4132                self.store_attr(vm, attr_idx)
4133            }
4134            Instruction::StoreSubscrListInt => {
4135                // Stack: [value, obj, idx] (TOS=idx, TOS1=obj, TOS2=value)
4136                let idx = self.pop_value();
4137                let obj = self.pop_value();
4138                let value = self.pop_value();
4139                if let Some(list) = obj.downcast_ref_if_exact::<PyList>(vm)
4140                    && let Some(int_idx) = idx.downcast_ref_if_exact::<PyInt>(vm)
4141                    && let Some(i) = specialization_nonnegative_compact_index(int_idx, vm)
4142                {
4143                    let mut vec = list.borrow_vec_mut();
4144                    if i < vec.len() {
4145                        vec[i] = value;
4146                        return Ok(None);
4147                    }
4148                }
4149                obj.set_item(&*idx, value, vm)?;
4150                Ok(None)
4151            }
4152            Instruction::StoreSubscrDict => {
4153                // Stack: [value, obj, idx] (TOS=idx, TOS1=obj, TOS2=value)
4154                let idx = self.pop_value();
4155                let obj = self.pop_value();
4156                let value = self.pop_value();
4157                if let Some(dict) = obj.downcast_ref_if_exact::<PyDict>(vm) {
4158                    dict.set_item(&*idx, value, vm)?;
4159                    Ok(None)
4160                } else {
4161                    obj.set_item(&*idx, value, vm)?;
4162                    Ok(None)
4163                }
4164            }
4165            // Specialized BINARY_OP opcodes
4166            Instruction::BinaryOpAddInt => {
4167                self.execute_binary_op_int(vm, |a, b| a + b, bytecode::BinaryOperator::Add)
4168            }
4169            Instruction::BinaryOpSubtractInt => {
4170                self.execute_binary_op_int(vm, |a, b| a - b, bytecode::BinaryOperator::Subtract)
4171            }
4172            Instruction::BinaryOpMultiplyInt => {
4173                self.execute_binary_op_int(vm, |a, b| a * b, bytecode::BinaryOperator::Multiply)
4174            }
4175            Instruction::BinaryOpAddFloat => {
4176                self.execute_binary_op_float(vm, |a, b| a + b, bytecode::BinaryOperator::Add)
4177            }
4178            Instruction::BinaryOpSubtractFloat => {
4179                self.execute_binary_op_float(vm, |a, b| a - b, bytecode::BinaryOperator::Subtract)
4180            }
4181            Instruction::BinaryOpMultiplyFloat => {
4182                self.execute_binary_op_float(vm, |a, b| a * b, bytecode::BinaryOperator::Multiply)
4183            }
4184            Instruction::BinaryOpAddUnicode => {
4185                let b = self.top_value();
4186                let a = self.nth_value(1);
4187                if let (Some(a_str), Some(b_str)) = (
4188                    a.downcast_ref_if_exact::<PyStr>(vm),
4189                    b.downcast_ref_if_exact::<PyStr>(vm),
4190                ) {
4191                    let result = a_str.as_wtf8().py_add(b_str.as_wtf8());
4192                    self.pop_value();
4193                    self.pop_value();
4194                    self.push_value(result.to_pyobject(vm));
4195                    Ok(None)
4196                } else {
4197                    self.execute_bin_op(vm, bytecode::BinaryOperator::Add)
4198                }
4199            }
4200            Instruction::BinaryOpSubscrGetitem => {
4201                let owner = self.nth_value(1);
4202                if !self.specialization_eval_frame_active(vm)
4203                    && let Some((func, func_version)) =
4204                        owner.class().get_cached_getitem_for_specialization()
4205                    && func.func_version() == func_version
4206                    && self.specialization_has_datastack_space_for_func(vm, &func)
4207                {
4208                    debug_assert!(func.has_exact_argcount(2));
4209                    let sub = self.pop_value();
4210                    let owner = self.pop_value();
4211                    let result = func.invoke_exact_args(vec![owner, sub], vm)?;
4212                    self.push_value(result);
4213                    return Ok(None);
4214                }
4215                self.execute_bin_op(vm, bytecode::BinaryOperator::Subscr)
4216            }
4217            Instruction::BinaryOpExtend => {
4218                let op = self.binary_op_from_arg(arg);
4219                let b = self.top_value();
4220                let a = self.nth_value(1);
4221                let cache_base = self.lasti() as usize;
4222                if let Some(descr) = self.read_cached_binary_op_extend_descr(cache_base)
4223                    && descr.oparg == op
4224                    && (descr.guard)(a, b, vm)
4225                    && let Some(result) = (descr.action)(a, b, vm)
4226                {
4227                    self.pop_value();
4228                    self.pop_value();
4229                    self.push_value(result);
4230                    Ok(None)
4231                } else {
4232                    self.execute_bin_op(vm, op)
4233                }
4234            }
4235            Instruction::BinaryOpSubscrListInt => {
4236                let b = self.top_value();
4237                let a = self.nth_value(1);
4238                if let (Some(list), Some(idx)) = (
4239                    a.downcast_ref_if_exact::<PyList>(vm),
4240                    b.downcast_ref_if_exact::<PyInt>(vm),
4241                ) && let Some(i) = specialization_nonnegative_compact_index(idx, vm)
4242                {
4243                    let vec = list.borrow_vec();
4244                    if i < vec.len() {
4245                        let value = vec.do_get(i);
4246                        drop(vec);
4247                        self.pop_value();
4248                        self.pop_value();
4249                        self.push_value(value);
4250                        return Ok(None);
4251                    }
4252                }
4253                self.execute_bin_op(vm, bytecode::BinaryOperator::Subscr)
4254            }
4255            Instruction::BinaryOpSubscrTupleInt => {
4256                let b = self.top_value();
4257                let a = self.nth_value(1);
4258                if let (Some(tuple), Some(idx)) = (
4259                    a.downcast_ref_if_exact::<PyTuple>(vm),
4260                    b.downcast_ref_if_exact::<PyInt>(vm),
4261                ) && let Some(i) = specialization_nonnegative_compact_index(idx, vm)
4262                {
4263                    let elements = tuple.as_slice();
4264                    if i < elements.len() {
4265                        let value = elements[i].clone();
4266                        self.pop_value();
4267                        self.pop_value();
4268                        self.push_value(value);
4269                        return Ok(None);
4270                    }
4271                }
4272                self.execute_bin_op(vm, bytecode::BinaryOperator::Subscr)
4273            }
4274            Instruction::BinaryOpSubscrDict => {
4275                let b = self.top_value();
4276                let a = self.nth_value(1);
4277                if let Some(dict) = a.downcast_ref_if_exact::<PyDict>(vm) {
4278                    match dict.get_item_opt(b, vm) {
4279                        Ok(Some(value)) => {
4280                            self.pop_value();
4281                            self.pop_value();
4282                            self.push_value(value);
4283                            return Ok(None);
4284                        }
4285                        Ok(None) => {
4286                            let key = self.pop_value();
4287                            self.pop_value();
4288                            return Err(vm.new_key_error(key));
4289                        }
4290                        Err(e) => {
4291                            return Err(e);
4292                        }
4293                    }
4294                }
4295                self.execute_bin_op(vm, bytecode::BinaryOperator::Subscr)
4296            }
4297            Instruction::BinaryOpSubscrStrInt => {
4298                let b = self.top_value();
4299                let a = self.nth_value(1);
4300                if let (Some(a_str), Some(b_int)) = (
4301                    a.downcast_ref_if_exact::<PyStr>(vm),
4302                    b.downcast_ref_if_exact::<PyInt>(vm),
4303                ) && let Some(i) = specialization_nonnegative_compact_index(b_int, vm)
4304                    && let Ok(ch) = a_str.getitem_by_index(vm, i as isize)
4305                    && ch.is_ascii()
4306                {
4307                    let ascii_idx = ch.to_u32() as usize;
4308                    self.pop_value();
4309                    self.pop_value();
4310                    self.push_value(vm.ctx.ascii_char_cache[ascii_idx].clone().into());
4311                    return Ok(None);
4312                }
4313                self.execute_bin_op(vm, bytecode::BinaryOperator::Subscr)
4314            }
4315            Instruction::BinaryOpSubscrListSlice => {
4316                let b = self.top_value();
4317                let a = self.nth_value(1);
4318                if a.downcast_ref_if_exact::<PyList>(vm).is_some()
4319                    && b.downcast_ref::<PySlice>().is_some()
4320                {
4321                    let b_owned = self.pop_value();
4322                    let a_owned = self.pop_value();
4323                    let result = a_owned.get_item(b_owned.as_object(), vm)?;
4324                    self.push_value(result);
4325                    return Ok(None);
4326                }
4327                self.execute_bin_op(vm, bytecode::BinaryOperator::Subscr)
4328            }
4329            Instruction::CallPyExactArgs => {
4330                let instr_idx = self.lasti() as usize - 1;
4331                let cache_base = instr_idx + 1;
4332                let cached_version = self.code.instructions.read_cache_u32(cache_base + 1);
4333                let nargs: u32 = arg.into();
4334                if self.specialization_eval_frame_active(vm) {
4335                    return self.execute_call_vectorcall(nargs, vm);
4336                }
4337                // Stack: [callable, self_or_null, arg1, ..., argN]
4338                let stack_len = self.localsplus.stack_len();
4339                let self_or_null_is_some = self
4340                    .localsplus
4341                    .stack_index(stack_len - nargs as usize - 1)
4342                    .is_some();
4343                let callable = self.nth_value(nargs + 1);
4344                if let Some(func) = callable.downcast_ref_if_exact::<PyFunction>(vm)
4345                    && func.func_version() == cached_version
4346                    && cached_version != 0
4347                {
4348                    let effective_nargs = nargs + u32::from(self_or_null_is_some);
4349                    if !func.has_exact_argcount(effective_nargs) {
4350                        return self.execute_call_vectorcall(nargs, vm);
4351                    }
4352                    if !self.specialization_has_datastack_space_for_func(vm, func) {
4353                        return self.execute_call_vectorcall(nargs, vm);
4354                    }
4355                    if self.specialization_call_recursion_guard(vm) {
4356                        return self.execute_call_vectorcall(nargs, vm);
4357                    }
4358                    let pos_args: Vec<PyObjectRef> = self.pop_multiple(nargs as usize).collect();
4359                    let self_or_null = self.pop_value_opt();
4360                    let callable = self.pop_value();
4361                    let func = callable.downcast_ref_if_exact::<PyFunction>(vm).unwrap();
4362                    let args = if let Some(self_val) = self_or_null {
4363                        let mut all_args = Vec::with_capacity(pos_args.len() + 1);
4364                        all_args.push(self_val);
4365                        all_args.extend(pos_args);
4366                        all_args
4367                    } else {
4368                        pos_args
4369                    };
4370                    let result = func.invoke_exact_args(args, vm)?;
4371                    self.push_value(result);
4372                    Ok(None)
4373                } else {
4374                    self.execute_call_vectorcall(nargs, vm)
4375                }
4376            }
4377            Instruction::CallBoundMethodExactArgs => {
4378                let instr_idx = self.lasti() as usize - 1;
4379                let cache_base = instr_idx + 1;
4380                let cached_version = self.code.instructions.read_cache_u32(cache_base + 1);
4381                let nargs: u32 = arg.into();
4382                if self.specialization_eval_frame_active(vm) {
4383                    return self.execute_call_vectorcall(nargs, vm);
4384                }
4385                // Stack: [callable, self_or_null(NULL), arg1, ..., argN]
4386                let stack_len = self.localsplus.stack_len();
4387                let self_or_null_is_some = self
4388                    .localsplus
4389                    .stack_index(stack_len - nargs as usize - 1)
4390                    .is_some();
4391                let callable = self.nth_value(nargs + 1);
4392                if !self_or_null_is_some
4393                    && let Some(bound_method) = callable.downcast_ref_if_exact::<PyBoundMethod>(vm)
4394                {
4395                    let bound_function = bound_method.function_obj().clone();
4396                    let bound_self = bound_method.self_obj().clone();
4397                    if let Some(func) = bound_function.downcast_ref_if_exact::<PyFunction>(vm)
4398                        && func.func_version() == cached_version
4399                        && cached_version != 0
4400                    {
4401                        if !func.has_exact_argcount(nargs + 1) {
4402                            return self.execute_call_vectorcall(nargs, vm);
4403                        }
4404                        if !self.specialization_has_datastack_space_for_func(vm, func) {
4405                            return self.execute_call_vectorcall(nargs, vm);
4406                        }
4407                        if self.specialization_call_recursion_guard(vm) {
4408                            return self.execute_call_vectorcall(nargs, vm);
4409                        }
4410                        let pos_args: Vec<PyObjectRef> =
4411                            self.pop_multiple(nargs as usize).collect();
4412                        self.pop_value_opt(); // null (self_or_null)
4413                        self.pop_value(); // callable (bound method)
4414                        let mut all_args = Vec::with_capacity(pos_args.len() + 1);
4415                        all_args.push(bound_self);
4416                        all_args.extend(pos_args);
4417                        let result = func.invoke_exact_args(all_args, vm)?;
4418                        self.push_value(result);
4419                        return Ok(None);
4420                    }
4421                }
4422                self.execute_call_vectorcall(nargs, vm)
4423            }
4424            Instruction::CallLen => {
4425                let nargs: u32 = arg.into();
4426                if nargs == 1 {
4427                    // Stack: [callable, null, arg]
4428                    let obj = self.pop_value(); // arg
4429                    let null = self.pop_value_opt();
4430                    let callable = self.pop_value();
4431                    if null.is_none()
4432                        && vm
4433                            .callable_cache
4434                            .len
4435                            .as_ref()
4436                            .is_some_and(|len_callable| callable.is(len_callable))
4437                    {
4438                        let len = obj.length(vm)?;
4439                        self.push_value(vm.ctx.new_int(len).into());
4440                        return Ok(None);
4441                    }
4442                    // Guard failed — re-push and fallback
4443                    self.push_value(callable);
4444                    self.push_value_opt(null);
4445                    self.push_value(obj);
4446                }
4447                self.execute_call_vectorcall(nargs, vm)
4448            }
4449            Instruction::CallIsinstance => {
4450                let nargs: u32 = arg.into();
4451                let stack_len = self.localsplus.stack_len();
4452                let self_or_null_is_some = self
4453                    .localsplus
4454                    .stack_index(stack_len - nargs as usize - 1)
4455                    .is_some();
4456                let effective_nargs = nargs + u32::from(self_or_null_is_some);
4457                if effective_nargs == 2 {
4458                    let callable = self.nth_value(nargs + 1);
4459                    if vm
4460                        .callable_cache
4461                        .isinstance
4462                        .as_ref()
4463                        .is_some_and(|isinstance_callable| callable.is(isinstance_callable))
4464                    {
4465                        let nargs_usize = nargs as usize;
4466                        let pos_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
4467                        let self_or_null = self.pop_value_opt();
4468                        self.pop_value(); // callable
4469                        let mut all_args = Vec::with_capacity(2);
4470                        if let Some(self_val) = self_or_null {
4471                            all_args.push(self_val);
4472                        }
4473                        all_args.extend(pos_args);
4474                        let result = all_args[0].is_instance(&all_args[1], vm)?;
4475                        self.push_value(vm.ctx.new_bool(result).into());
4476                        return Ok(None);
4477                    }
4478                }
4479                self.execute_call_vectorcall(nargs, vm)
4480            }
4481            Instruction::CallType1 => {
4482                let nargs: u32 = arg.into();
4483                if nargs == 1 {
4484                    // Stack: [callable, null, arg]
4485                    let obj = self.pop_value();
4486                    let null = self.pop_value_opt();
4487                    let callable = self.pop_value();
4488                    if null.is_none() && callable.is(vm.ctx.types.type_type.as_object()) {
4489                        let tp = obj.class().to_owned().into();
4490                        self.push_value(tp);
4491                        return Ok(None);
4492                    }
4493                    // Guard failed — re-push and fallback
4494                    self.push_value(callable);
4495                    self.push_value_opt(null);
4496                    self.push_value(obj);
4497                }
4498                self.execute_call_vectorcall(nargs, vm)
4499            }
4500            Instruction::CallStr1 => {
4501                let nargs: u32 = arg.into();
4502                if nargs == 1 {
4503                    let obj = self.pop_value();
4504                    let null = self.pop_value_opt();
4505                    let callable = self.pop_value();
4506                    if null.is_none() && callable.is(vm.ctx.types.str_type.as_object()) {
4507                        let result = obj.str(vm)?;
4508                        self.push_value(result.into());
4509                        return Ok(None);
4510                    }
4511                    self.push_value(callable);
4512                    self.push_value_opt(null);
4513                    self.push_value(obj);
4514                }
4515                self.execute_call_vectorcall(nargs, vm)
4516            }
4517            Instruction::CallTuple1 => {
4518                let nargs: u32 = arg.into();
4519                if nargs == 1 {
4520                    let obj = self.pop_value();
4521                    let null = self.pop_value_opt();
4522                    let callable = self.pop_value();
4523                    if null.is_none() && callable.is(vm.ctx.types.tuple_type.as_object()) {
4524                        // tuple(x) returns x as-is when x is already an exact tuple
4525                        if let Ok(tuple) = obj.clone().downcast_exact::<PyTuple>(vm) {
4526                            self.push_value(tuple.into_pyref().into());
4527                        } else {
4528                            let elements: Vec<PyObjectRef> = vm.extract_elements_with(&obj, Ok)?;
4529                            self.push_value(vm.ctx.new_tuple(elements).into());
4530                        }
4531                        return Ok(None);
4532                    }
4533                    self.push_value(callable);
4534                    self.push_value_opt(null);
4535                    self.push_value(obj);
4536                }
4537                self.execute_call_vectorcall(nargs, vm)
4538            }
4539            Instruction::CallBuiltinO => {
4540                let nargs: u32 = arg.into();
4541                let stack_len = self.localsplus.stack_len();
4542                let self_or_null_is_some = self
4543                    .localsplus
4544                    .stack_index(stack_len - nargs as usize - 1)
4545                    .is_some();
4546                let effective_nargs = nargs + u32::from(self_or_null_is_some);
4547                let callable = self.nth_value(nargs + 1);
4548                if let Some(native) = callable.downcast_ref_if_exact::<PyNativeFunction>(vm) {
4549                    let call_conv = native.value.flags
4550                        & (PyMethodFlags::VARARGS
4551                            | PyMethodFlags::FASTCALL
4552                            | PyMethodFlags::NOARGS
4553                            | PyMethodFlags::O
4554                            | PyMethodFlags::KEYWORDS);
4555                    if call_conv == PyMethodFlags::O && effective_nargs == 1 {
4556                        let nargs_usize = nargs as usize;
4557                        let pos_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
4558                        let self_or_null = self.pop_value_opt();
4559                        let callable = self.pop_value();
4560                        let mut args_vec = Vec::with_capacity(effective_nargs as usize);
4561                        if let Some(self_val) = self_or_null {
4562                            args_vec.push(self_val);
4563                        }
4564                        args_vec.extend(pos_args);
4565                        let result =
4566                            callable.vectorcall(args_vec, effective_nargs as usize, None, vm)?;
4567                        self.push_value(result);
4568                        return Ok(None);
4569                    }
4570                }
4571                self.execute_call_vectorcall(nargs, vm)
4572            }
4573            Instruction::CallBuiltinFast => {
4574                let nargs: u32 = arg.into();
4575                let stack_len = self.localsplus.stack_len();
4576                let self_or_null_is_some = self
4577                    .localsplus
4578                    .stack_index(stack_len - nargs as usize - 1)
4579                    .is_some();
4580                let effective_nargs = nargs + u32::from(self_or_null_is_some);
4581                let callable = self.nth_value(nargs + 1);
4582                if let Some(native) = callable.downcast_ref_if_exact::<PyNativeFunction>(vm) {
4583                    let call_conv = native.value.flags
4584                        & (PyMethodFlags::VARARGS
4585                            | PyMethodFlags::FASTCALL
4586                            | PyMethodFlags::NOARGS
4587                            | PyMethodFlags::O
4588                            | PyMethodFlags::KEYWORDS);
4589                    if call_conv == PyMethodFlags::FASTCALL {
4590                        let nargs_usize = nargs as usize;
4591                        let pos_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
4592                        let self_or_null = self.pop_value_opt();
4593                        let callable = self.pop_value();
4594                        let mut args_vec = Vec::with_capacity(effective_nargs as usize);
4595                        if let Some(self_val) = self_or_null {
4596                            args_vec.push(self_val);
4597                        }
4598                        args_vec.extend(pos_args);
4599                        let result =
4600                            callable.vectorcall(args_vec, effective_nargs as usize, None, vm)?;
4601                        self.push_value(result);
4602                        return Ok(None);
4603                    }
4604                }
4605                self.execute_call_vectorcall(nargs, vm)
4606            }
4607            Instruction::CallPyGeneral => {
4608                let instr_idx = self.lasti() as usize - 1;
4609                let cache_base = instr_idx + 1;
4610                let cached_version = self.code.instructions.read_cache_u32(cache_base + 1);
4611                let nargs: u32 = arg.into();
4612                if self.specialization_eval_frame_active(vm) {
4613                    return self.execute_call_vectorcall(nargs, vm);
4614                }
4615                let callable = self.nth_value(nargs + 1);
4616                if let Some(func) = callable.downcast_ref_if_exact::<PyFunction>(vm)
4617                    && func.func_version() == cached_version
4618                    && cached_version != 0
4619                {
4620                    if self.specialization_call_recursion_guard(vm) {
4621                        return self.execute_call_vectorcall(nargs, vm);
4622                    }
4623                    let nargs_usize = nargs as usize;
4624                    let pos_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
4625                    let self_or_null = self.pop_value_opt();
4626                    let callable = self.pop_value();
4627                    let (args_vec, effective_nargs) = if let Some(self_val) = self_or_null {
4628                        let mut v = Vec::with_capacity(nargs_usize + 1);
4629                        v.push(self_val);
4630                        v.extend(pos_args);
4631                        (v, nargs_usize + 1)
4632                    } else {
4633                        (pos_args, nargs_usize)
4634                    };
4635                    let result =
4636                        vectorcall_function(&callable, args_vec, effective_nargs, None, vm)?;
4637                    self.push_value(result);
4638                    Ok(None)
4639                } else {
4640                    self.execute_call_vectorcall(nargs, vm)
4641                }
4642            }
4643            Instruction::CallBoundMethodGeneral => {
4644                let instr_idx = self.lasti() as usize - 1;
4645                let cache_base = instr_idx + 1;
4646                let cached_version = self.code.instructions.read_cache_u32(cache_base + 1);
4647                let nargs: u32 = arg.into();
4648                if self.specialization_eval_frame_active(vm) {
4649                    return self.execute_call_vectorcall(nargs, vm);
4650                }
4651                let stack_len = self.localsplus.stack_len();
4652                let self_or_null_is_some = self
4653                    .localsplus
4654                    .stack_index(stack_len - nargs as usize - 1)
4655                    .is_some();
4656                let callable = self.nth_value(nargs + 1);
4657                if !self_or_null_is_some
4658                    && let Some(bound_method) = callable.downcast_ref_if_exact::<PyBoundMethod>(vm)
4659                {
4660                    let bound_function = bound_method.function_obj().clone();
4661                    let bound_self = bound_method.self_obj().clone();
4662                    if let Some(func) = bound_function.downcast_ref_if_exact::<PyFunction>(vm)
4663                        && func.func_version() == cached_version
4664                        && cached_version != 0
4665                    {
4666                        if self.specialization_call_recursion_guard(vm) {
4667                            return self.execute_call_vectorcall(nargs, vm);
4668                        }
4669                        let nargs_usize = nargs as usize;
4670                        let pos_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
4671                        self.pop_value_opt(); // null (self_or_null)
4672                        self.pop_value(); // callable (bound method)
4673                        let mut args_vec = Vec::with_capacity(nargs_usize + 1);
4674                        args_vec.push(bound_self);
4675                        args_vec.extend(pos_args);
4676                        let result = vectorcall_function(
4677                            &bound_function,
4678                            args_vec,
4679                            nargs_usize + 1,
4680                            None,
4681                            vm,
4682                        )?;
4683                        self.push_value(result);
4684                        return Ok(None);
4685                    }
4686                }
4687                self.execute_call_vectorcall(nargs, vm)
4688            }
4689            Instruction::CallListAppend => {
4690                let nargs: u32 = arg.into();
4691                if nargs == 1 {
4692                    // Stack: [callable, self_or_null, item]
4693                    let stack_len = self.localsplus.stack_len();
4694                    let self_or_null_is_some = self.localsplus.stack_index(stack_len - 2).is_some();
4695                    let callable = self.nth_value(2);
4696                    let self_is_list = self
4697                        .localsplus
4698                        .stack_index(stack_len - 2)
4699                        .as_ref()
4700                        .is_some_and(|obj| obj.downcast_ref::<PyList>().is_some());
4701                    if vm
4702                        .callable_cache
4703                        .list_append
4704                        .as_ref()
4705                        .is_some_and(|list_append| callable.is(list_append))
4706                        && self_or_null_is_some
4707                        && self_is_list
4708                    {
4709                        let item = self.pop_value();
4710                        let self_or_null = self.pop_value_opt();
4711                        let callable = self.pop_value();
4712                        if let Some(list_obj) = self_or_null.as_ref()
4713                            && let Some(list) = list_obj.downcast_ref::<PyList>()
4714                        {
4715                            list.append(item);
4716                            // CALL_LIST_APPEND fuses the following POP_TOP.
4717                            self.jump_relative_forward(
4718                                1,
4719                                Instruction::CallListAppend.cache_entries() as u32,
4720                            );
4721                            return Ok(None);
4722                        }
4723                        self.push_value(callable);
4724                        self.push_value_opt(self_or_null);
4725                        self.push_value(item);
4726                    }
4727                }
4728                self.execute_call_vectorcall(nargs, vm)
4729            }
4730            Instruction::CallMethodDescriptorNoargs => {
4731                let nargs: u32 = arg.into();
4732                let stack_len = self.localsplus.stack_len();
4733                let self_or_null_is_some = self
4734                    .localsplus
4735                    .stack_index(stack_len - nargs as usize - 1)
4736                    .is_some();
4737                let total_nargs = nargs + u32::from(self_or_null_is_some);
4738                if total_nargs == 1 {
4739                    let callable = self.nth_value(nargs + 1);
4740                    let self_index =
4741                        stack_len - nargs as usize - 1 + usize::from(!self_or_null_is_some);
4742                    if let Some(descr) = callable.downcast_ref_if_exact::<PyMethodDescriptor>(vm)
4743                        && (descr.method.flags
4744                            & (PyMethodFlags::VARARGS
4745                                | PyMethodFlags::FASTCALL
4746                                | PyMethodFlags::NOARGS
4747                                | PyMethodFlags::O
4748                                | PyMethodFlags::KEYWORDS))
4749                            == PyMethodFlags::NOARGS
4750                        && self
4751                            .localsplus
4752                            .stack_index(self_index)
4753                            .as_ref()
4754                            .is_some_and(|self_obj| self_obj.class().is(descr.objclass))
4755                    {
4756                        let func = descr.method.func;
4757                        let positional_args: Vec<PyObjectRef> =
4758                            self.pop_multiple(nargs as usize).collect();
4759                        let self_or_null = self.pop_value_opt();
4760                        self.pop_value(); // callable
4761                        let mut all_args = Vec::with_capacity(total_nargs as usize);
4762                        if let Some(self_val) = self_or_null {
4763                            all_args.push(self_val);
4764                        }
4765                        all_args.extend(positional_args);
4766                        let args = FuncArgs {
4767                            args: all_args,
4768                            kwargs: Default::default(),
4769                        };
4770                        let result = func(vm, args)?;
4771                        self.push_value(result);
4772                        return Ok(None);
4773                    }
4774                }
4775                self.execute_call_vectorcall(nargs, vm)
4776            }
4777            Instruction::CallMethodDescriptorO => {
4778                let nargs: u32 = arg.into();
4779                let stack_len = self.localsplus.stack_len();
4780                let self_or_null_is_some = self
4781                    .localsplus
4782                    .stack_index(stack_len - nargs as usize - 1)
4783                    .is_some();
4784                let total_nargs = nargs + u32::from(self_or_null_is_some);
4785                if total_nargs == 2 {
4786                    let callable = self.nth_value(nargs + 1);
4787                    let self_index =
4788                        stack_len - nargs as usize - 1 + usize::from(!self_or_null_is_some);
4789                    if let Some(descr) = callable.downcast_ref_if_exact::<PyMethodDescriptor>(vm)
4790                        && (descr.method.flags
4791                            & (PyMethodFlags::VARARGS
4792                                | PyMethodFlags::FASTCALL
4793                                | PyMethodFlags::NOARGS
4794                                | PyMethodFlags::O
4795                                | PyMethodFlags::KEYWORDS))
4796                            == PyMethodFlags::O
4797                        && self
4798                            .localsplus
4799                            .stack_index(self_index)
4800                            .as_ref()
4801                            .is_some_and(|self_obj| self_obj.class().is(descr.objclass))
4802                    {
4803                        let func = descr.method.func;
4804                        let positional_args: Vec<PyObjectRef> =
4805                            self.pop_multiple(nargs as usize).collect();
4806                        let self_or_null = self.pop_value_opt();
4807                        self.pop_value(); // callable
4808                        let mut all_args = Vec::with_capacity(total_nargs as usize);
4809                        if let Some(self_val) = self_or_null {
4810                            all_args.push(self_val);
4811                        }
4812                        all_args.extend(positional_args);
4813                        let args = FuncArgs {
4814                            args: all_args,
4815                            kwargs: Default::default(),
4816                        };
4817                        let result = func(vm, args)?;
4818                        self.push_value(result);
4819                        return Ok(None);
4820                    }
4821                }
4822                self.execute_call_vectorcall(nargs, vm)
4823            }
4824            Instruction::CallMethodDescriptorFast => {
4825                let nargs: u32 = arg.into();
4826                let stack_len = self.localsplus.stack_len();
4827                let self_or_null_is_some = self
4828                    .localsplus
4829                    .stack_index(stack_len - nargs as usize - 1)
4830                    .is_some();
4831                let total_nargs = nargs + u32::from(self_or_null_is_some);
4832                let callable = self.nth_value(nargs + 1);
4833                let self_index =
4834                    stack_len - nargs as usize - 1 + usize::from(!self_or_null_is_some);
4835                if total_nargs > 0
4836                    && let Some(descr) = callable.downcast_ref_if_exact::<PyMethodDescriptor>(vm)
4837                    && (descr.method.flags
4838                        & (PyMethodFlags::VARARGS
4839                            | PyMethodFlags::FASTCALL
4840                            | PyMethodFlags::NOARGS
4841                            | PyMethodFlags::O
4842                            | PyMethodFlags::KEYWORDS))
4843                        == PyMethodFlags::FASTCALL
4844                    && self
4845                        .localsplus
4846                        .stack_index(self_index)
4847                        .as_ref()
4848                        .is_some_and(|self_obj| self_obj.class().is(descr.objclass))
4849                {
4850                    let func = descr.method.func;
4851                    let positional_args: Vec<PyObjectRef> =
4852                        self.pop_multiple(nargs as usize).collect();
4853                    let self_or_null = self.pop_value_opt();
4854                    self.pop_value(); // callable
4855                    let mut all_args = Vec::with_capacity(total_nargs as usize);
4856                    if let Some(self_val) = self_or_null {
4857                        all_args.push(self_val);
4858                    }
4859                    all_args.extend(positional_args);
4860                    let args = FuncArgs {
4861                        args: all_args,
4862                        kwargs: Default::default(),
4863                    };
4864                    let result = func(vm, args)?;
4865                    self.push_value(result);
4866                    return Ok(None);
4867                }
4868                self.execute_call_vectorcall(nargs, vm)
4869            }
4870            Instruction::CallBuiltinClass => {
4871                let nargs: u32 = arg.into();
4872                let callable = self.nth_value(nargs + 1);
4873                if let Some(cls) = callable.downcast_ref::<PyType>()
4874                    && cls.slots.vectorcall.load().is_some()
4875                {
4876                    let nargs_usize = nargs as usize;
4877                    let pos_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
4878                    let self_or_null = self.pop_value_opt();
4879                    let callable = self.pop_value();
4880                    let self_is_some = self_or_null.is_some();
4881                    let mut args_vec = Vec::with_capacity(nargs_usize + usize::from(self_is_some));
4882                    if let Some(self_val) = self_or_null {
4883                        args_vec.push(self_val);
4884                    }
4885                    args_vec.extend(pos_args);
4886                    let result = callable.vectorcall(
4887                        args_vec,
4888                        nargs_usize + usize::from(self_is_some),
4889                        None,
4890                        vm,
4891                    )?;
4892                    self.push_value(result);
4893                    return Ok(None);
4894                }
4895                self.execute_call_vectorcall(nargs, vm)
4896            }
4897            Instruction::CallAllocAndEnterInit => {
4898                let instr_idx = self.lasti() as usize - 1;
4899                let cache_base = instr_idx + 1;
4900                let cached_version = self.code.instructions.read_cache_u32(cache_base + 1);
4901                let nargs: u32 = arg.into();
4902                let callable = self.nth_value(nargs + 1);
4903                let stack_len = self.localsplus.stack_len();
4904                let self_or_null_is_some = self
4905                    .localsplus
4906                    .stack_index(stack_len - nargs as usize - 1)
4907                    .is_some();
4908                if !self.specialization_eval_frame_active(vm)
4909                    && !self_or_null_is_some
4910                    && cached_version != 0
4911                    && let Some(cls) = callable.downcast_ref::<PyType>()
4912                    && cls.tp_version_tag.load(Acquire) == cached_version
4913                    && let Some(init_func) = cls.get_cached_init_for_specialization(cached_version)
4914                    && let Some(cls_alloc) = cls.slots.alloc.load()
4915                {
4916                    // Match CPython's `code->co_framesize + _Py_InitCleanup.co_framesize`
4917                    // shape, using RustPython's datastack-backed frame size
4918                    // equivalent for the extra shim frame.
4919                    let init_cleanup_stack_bytes =
4920                        datastack_frame_size_bytes_for_code(&vm.ctx.init_cleanup_code)
4921                            .expect("_Py_InitCleanup shim is not a generator/coroutine");
4922                    if !self.specialization_has_datastack_space_for_func_with_extra(
4923                        vm,
4924                        &init_func,
4925                        init_cleanup_stack_bytes,
4926                    ) {
4927                        return self.execute_call_vectorcall(nargs, vm);
4928                    }
4929                    // CPython creates `_Py_InitCleanup` + `__init__` frames here.
4930                    // Keep the guard conservative and deopt when the effective
4931                    // recursion budget for those two frames is not available.
4932                    if self.specialization_call_recursion_guard_with_extra_frames(vm, 1) {
4933                        return self.execute_call_vectorcall(nargs, vm);
4934                    }
4935                    // Allocate object directly (tp_new == object.__new__, tp_alloc == generic).
4936                    let cls_ref = cls.to_owned();
4937                    let new_obj = cls_alloc(cls_ref, 0, vm)?;
4938
4939                    // Build args: [new_obj, arg1, ..., argN]
4940                    let pos_args: Vec<PyObjectRef> = self.pop_multiple(nargs as usize).collect();
4941                    let _null = self.pop_value_opt(); // self_or_null (None)
4942                    let _callable = self.pop_value(); // callable (type)
4943                    let result = self
4944                        .specialization_run_init_cleanup_shim(new_obj, &init_func, pos_args, vm)?;
4945                    self.push_value(result);
4946                    return Ok(None);
4947                }
4948                self.execute_call_vectorcall(nargs, vm)
4949            }
4950            Instruction::CallMethodDescriptorFastWithKeywords => {
4951                // Native function interface is uniform regardless of keyword support
4952                let nargs: u32 = arg.into();
4953                let stack_len = self.localsplus.stack_len();
4954                let self_or_null_is_some = self
4955                    .localsplus
4956                    .stack_index(stack_len - nargs as usize - 1)
4957                    .is_some();
4958                let total_nargs = nargs + u32::from(self_or_null_is_some);
4959                let callable = self.nth_value(nargs + 1);
4960                let self_index =
4961                    stack_len - nargs as usize - 1 + usize::from(!self_or_null_is_some);
4962                if total_nargs > 0
4963                    && let Some(descr) = callable.downcast_ref_if_exact::<PyMethodDescriptor>(vm)
4964                    && (descr.method.flags
4965                        & (PyMethodFlags::VARARGS
4966                            | PyMethodFlags::FASTCALL
4967                            | PyMethodFlags::NOARGS
4968                            | PyMethodFlags::O
4969                            | PyMethodFlags::KEYWORDS))
4970                        == (PyMethodFlags::FASTCALL | PyMethodFlags::KEYWORDS)
4971                    && self
4972                        .localsplus
4973                        .stack_index(self_index)
4974                        .as_ref()
4975                        .is_some_and(|self_obj| self_obj.class().is(descr.objclass))
4976                {
4977                    let func = descr.method.func;
4978                    let positional_args: Vec<PyObjectRef> =
4979                        self.pop_multiple(nargs as usize).collect();
4980                    let self_or_null = self.pop_value_opt();
4981                    self.pop_value(); // callable
4982                    let mut all_args = Vec::with_capacity(total_nargs as usize);
4983                    if let Some(self_val) = self_or_null {
4984                        all_args.push(self_val);
4985                    }
4986                    all_args.extend(positional_args);
4987                    let args = FuncArgs {
4988                        args: all_args,
4989                        kwargs: Default::default(),
4990                    };
4991                    let result = func(vm, args)?;
4992                    self.push_value(result);
4993                    return Ok(None);
4994                }
4995                self.execute_call_vectorcall(nargs, vm)
4996            }
4997            Instruction::CallBuiltinFastWithKeywords => {
4998                // Native function interface is uniform regardless of keyword support
4999                let nargs: u32 = arg.into();
5000                let stack_len = self.localsplus.stack_len();
5001                let self_or_null_is_some = self
5002                    .localsplus
5003                    .stack_index(stack_len - nargs as usize - 1)
5004                    .is_some();
5005                let effective_nargs = nargs + u32::from(self_or_null_is_some);
5006                let callable = self.nth_value(nargs + 1);
5007                if let Some(native) = callable.downcast_ref_if_exact::<PyNativeFunction>(vm) {
5008                    let call_conv = native.value.flags
5009                        & (PyMethodFlags::VARARGS
5010                            | PyMethodFlags::FASTCALL
5011                            | PyMethodFlags::NOARGS
5012                            | PyMethodFlags::O
5013                            | PyMethodFlags::KEYWORDS);
5014                    if call_conv == (PyMethodFlags::FASTCALL | PyMethodFlags::KEYWORDS) {
5015                        let nargs_usize = nargs as usize;
5016                        let pos_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
5017                        let self_or_null = self.pop_value_opt();
5018                        let callable = self.pop_value();
5019                        let mut args_vec = Vec::with_capacity(effective_nargs as usize);
5020                        if let Some(self_val) = self_or_null {
5021                            args_vec.push(self_val);
5022                        }
5023                        args_vec.extend(pos_args);
5024                        let result =
5025                            callable.vectorcall(args_vec, effective_nargs as usize, None, vm)?;
5026                        self.push_value(result);
5027                        return Ok(None);
5028                    }
5029                }
5030                self.execute_call_vectorcall(nargs, vm)
5031            }
5032            Instruction::CallNonPyGeneral => {
5033                let nargs: u32 = arg.into();
5034                let stack_len = self.localsplus.stack_len();
5035                let self_or_null_is_some = self
5036                    .localsplus
5037                    .stack_index(stack_len - nargs as usize - 1)
5038                    .is_some();
5039                let callable = self.nth_value(nargs + 1);
5040                if callable.downcast_ref_if_exact::<PyFunction>(vm).is_some()
5041                    || callable
5042                        .downcast_ref_if_exact::<PyBoundMethod>(vm)
5043                        .is_some()
5044                {
5045                    return self.execute_call_vectorcall(nargs, vm);
5046                }
5047                let nargs_usize = nargs as usize;
5048                let pos_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
5049                let self_or_null = self.pop_value_opt();
5050                let callable = self.pop_value();
5051                let mut args_vec =
5052                    Vec::with_capacity(nargs_usize + usize::from(self_or_null_is_some));
5053                if let Some(self_val) = self_or_null {
5054                    args_vec.push(self_val);
5055                }
5056                args_vec.extend(pos_args);
5057                let result = callable.vectorcall(
5058                    args_vec,
5059                    nargs_usize + usize::from(self_or_null_is_some),
5060                    None,
5061                    vm,
5062                )?;
5063                self.push_value(result);
5064                Ok(None)
5065            }
5066            Instruction::CallKwPy => {
5067                let instr_idx = self.lasti() as usize - 1;
5068                let cache_base = instr_idx + 1;
5069                let cached_version = self.code.instructions.read_cache_u32(cache_base + 1);
5070                let nargs: u32 = arg.into();
5071                if self.specialization_eval_frame_active(vm) {
5072                    return self.execute_call_kw_vectorcall(nargs, vm);
5073                }
5074                // Stack: [callable, self_or_null, arg1, ..., argN, kwarg_names]
5075                let callable = self.nth_value(nargs + 2);
5076                if let Some(func) = callable.downcast_ref_if_exact::<PyFunction>(vm)
5077                    && func.func_version() == cached_version
5078                    && cached_version != 0
5079                {
5080                    if self.specialization_call_recursion_guard(vm) {
5081                        return self.execute_call_kw_vectorcall(nargs, vm);
5082                    }
5083                    let nargs_usize = nargs as usize;
5084                    let kwarg_names_obj = self.pop_value();
5085                    let kwarg_names_tuple = kwarg_names_obj
5086                        .downcast_ref::<PyTuple>()
5087                        .expect("kwarg names should be tuple");
5088                    let kw_count = kwarg_names_tuple.len();
5089                    let all_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
5090                    let self_or_null = self.pop_value_opt();
5091                    let callable = self.pop_value();
5092                    let pos_count = nargs_usize - kw_count;
5093                    let (args_vec, effective_nargs) = if let Some(self_val) = self_or_null {
5094                        let mut v = Vec::with_capacity(nargs_usize + 1);
5095                        v.push(self_val);
5096                        v.extend(all_args);
5097                        (v, pos_count + 1)
5098                    } else {
5099                        (all_args, pos_count)
5100                    };
5101                    let kwnames = kwarg_names_tuple.as_slice();
5102                    let result = vectorcall_function(
5103                        &callable,
5104                        args_vec,
5105                        effective_nargs,
5106                        Some(kwnames),
5107                        vm,
5108                    )?;
5109                    self.push_value(result);
5110                    return Ok(None);
5111                }
5112                self.execute_call_kw_vectorcall(nargs, vm)
5113            }
5114            Instruction::CallKwBoundMethod => {
5115                let instr_idx = self.lasti() as usize - 1;
5116                let cache_base = instr_idx + 1;
5117                let cached_version = self.code.instructions.read_cache_u32(cache_base + 1);
5118                let nargs: u32 = arg.into();
5119                if self.specialization_eval_frame_active(vm) {
5120                    return self.execute_call_kw_vectorcall(nargs, vm);
5121                }
5122                // Stack: [callable, self_or_null, arg1, ..., argN, kwarg_names]
5123                let stack_len = self.localsplus.stack_len();
5124                let self_or_null_is_some = self
5125                    .localsplus
5126                    .stack_index(stack_len - nargs as usize - 2)
5127                    .is_some();
5128                let callable = self.nth_value(nargs + 2);
5129                if !self_or_null_is_some
5130                    && let Some(bound_method) = callable.downcast_ref_if_exact::<PyBoundMethod>(vm)
5131                {
5132                    let bound_function = bound_method.function_obj().clone();
5133                    let bound_self = bound_method.self_obj().clone();
5134                    if let Some(func) = bound_function.downcast_ref_if_exact::<PyFunction>(vm)
5135                        && func.func_version() == cached_version
5136                        && cached_version != 0
5137                    {
5138                        let nargs_usize = nargs as usize;
5139                        let kwarg_names_obj = self.pop_value();
5140                        let kwarg_names_tuple = kwarg_names_obj
5141                            .downcast_ref::<PyTuple>()
5142                            .expect("kwarg names should be tuple");
5143                        let kw_count = kwarg_names_tuple.len();
5144                        let all_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
5145                        self.pop_value_opt(); // null (self_or_null)
5146                        self.pop_value(); // callable (bound method)
5147                        let pos_count = nargs_usize - kw_count;
5148                        let mut args_vec = Vec::with_capacity(nargs_usize + 1);
5149                        args_vec.push(bound_self);
5150                        args_vec.extend(all_args);
5151                        let kwnames = kwarg_names_tuple.as_slice();
5152                        let result = vectorcall_function(
5153                            &bound_function,
5154                            args_vec,
5155                            pos_count + 1,
5156                            Some(kwnames),
5157                            vm,
5158                        )?;
5159                        self.push_value(result);
5160                        return Ok(None);
5161                    }
5162                }
5163                self.execute_call_kw_vectorcall(nargs, vm)
5164            }
5165            Instruction::CallKwNonPy => {
5166                let nargs: u32 = arg.into();
5167                let stack_len = self.localsplus.stack_len();
5168                let self_or_null_is_some = self
5169                    .localsplus
5170                    .stack_index(stack_len - nargs as usize - 2)
5171                    .is_some();
5172                let callable = self.nth_value(nargs + 2);
5173                if callable.downcast_ref_if_exact::<PyFunction>(vm).is_some()
5174                    || callable
5175                        .downcast_ref_if_exact::<PyBoundMethod>(vm)
5176                        .is_some()
5177                {
5178                    return self.execute_call_kw_vectorcall(nargs, vm);
5179                }
5180                let nargs_usize = nargs as usize;
5181                let kwarg_names_obj = self.pop_value();
5182                let kwarg_names_tuple = kwarg_names_obj
5183                    .downcast_ref::<PyTuple>()
5184                    .expect("kwarg names should be tuple");
5185                let kw_count = kwarg_names_tuple.len();
5186                let all_args: Vec<PyObjectRef> = self.pop_multiple(nargs_usize).collect();
5187                let self_or_null = self.pop_value_opt();
5188                let callable = self.pop_value();
5189                let pos_count = nargs_usize - kw_count;
5190                let mut args_vec =
5191                    Vec::with_capacity(nargs_usize + usize::from(self_or_null_is_some));
5192                if let Some(self_val) = self_or_null {
5193                    args_vec.push(self_val);
5194                }
5195                args_vec.extend(all_args);
5196                let result = callable.vectorcall(
5197                    args_vec,
5198                    pos_count + usize::from(self_or_null_is_some),
5199                    Some(kwarg_names_tuple.as_slice()),
5200                    vm,
5201                )?;
5202                self.push_value(result);
5203                Ok(None)
5204            }
5205            Instruction::LoadSuperAttrAttr => {
5206                let oparg = u32::from(arg);
5207                let attr_name = self.code.names[(oparg >> 2) as usize];
5208                // Stack: [global_super, class, self]
5209                let self_obj = self.top_value();
5210                let class_obj = self.nth_value(1);
5211                let global_super = self.nth_value(2);
5212                // Guard: global_super is builtin super and class is a type
5213                if global_super.is(&vm.ctx.types.super_type.as_object())
5214                    && class_obj.downcast_ref::<PyType>().is_some()
5215                {
5216                    let class = class_obj.downcast_ref::<PyType>().unwrap();
5217                    let start_type = self_obj.class();
5218                    // MRO lookup: skip classes up to and including `class`, then search
5219                    let mro: Vec<PyRef<PyType>> = start_type.mro_map_collect(|x| x.to_owned());
5220                    let mut found = None;
5221                    let mut past_class = false;
5222                    for cls in &mro {
5223                        if !past_class {
5224                            if cls.is(class) {
5225                                past_class = true;
5226                            }
5227                            continue;
5228                        }
5229                        if let Some(descr) = cls.get_direct_attr(attr_name) {
5230                            // Call descriptor __get__ if available
5231                            // Pass None for obj when self IS its own type (classmethod)
5232                            let obj_arg = if self_obj.is(start_type.as_object()) {
5233                                None
5234                            } else {
5235                                Some(self_obj.to_owned())
5236                            };
5237                            let result = vm
5238                                .call_get_descriptor_specific(
5239                                    &descr,
5240                                    obj_arg,
5241                                    Some(start_type.as_object().to_owned()),
5242                                )
5243                                .unwrap_or(Ok(descr))?;
5244                            found = Some(result);
5245                            break;
5246                        }
5247                    }
5248                    if let Some(attr) = found {
5249                        self.pop_value(); // self
5250                        self.pop_value(); // class
5251                        self.pop_value(); // super
5252                        self.push_value(attr);
5253                        return Ok(None);
5254                    }
5255                }
5256                let oparg = LoadSuperAttr::from_u32(oparg);
5257                self.load_super_attr(vm, oparg)
5258            }
5259            Instruction::LoadSuperAttrMethod => {
5260                let oparg = u32::from(arg);
5261                let attr_name = self.code.names[(oparg >> 2) as usize];
5262                // Stack: [global_super, class, self]
5263                let self_obj = self.top_value();
5264                let class_obj = self.nth_value(1);
5265                let global_super = self.nth_value(2);
5266                // Guard: global_super is builtin super and class is a type
5267                if global_super.is(&vm.ctx.types.super_type.as_object())
5268                    && class_obj.downcast_ref::<PyType>().is_some()
5269                {
5270                    let class = class_obj.downcast_ref::<PyType>().unwrap();
5271                    let self_val = self_obj.to_owned();
5272                    let start_type = self_obj.class();
5273                    // MRO lookup
5274                    let mro: Vec<PyRef<PyType>> = start_type.mro_map_collect(|x| x.to_owned());
5275                    let mut found = None;
5276                    let mut past_class = false;
5277                    for cls in &mro {
5278                        if !past_class {
5279                            if cls.is(class) {
5280                                past_class = true;
5281                            }
5282                            continue;
5283                        }
5284                        if let Some(descr) = cls.get_direct_attr(attr_name) {
5285                            let descr_cls = descr.class();
5286                            if descr_cls
5287                                .slots
5288                                .flags
5289                                .has_feature(PyTypeFlags::METHOD_DESCRIPTOR)
5290                            {
5291                                // Method descriptor: push unbound func + self
5292                                // CALL will prepend self as first positional arg
5293                                found = Some((descr, true));
5294                            } else if let Some(descr_get) = descr_cls.slots.descr_get.load() {
5295                                // Has __get__ but not METHOD_DESCRIPTOR: bind it
5296                                let bound = descr_get(
5297                                    descr,
5298                                    Some(self_val.clone()),
5299                                    Some(start_type.as_object().to_owned()),
5300                                    vm,
5301                                )?;
5302                                found = Some((bound, false));
5303                            } else {
5304                                // Plain attribute
5305                                found = Some((descr, false));
5306                            }
5307                            break;
5308                        }
5309                    }
5310                    if let Some((attr, is_method)) = found {
5311                        self.pop_value(); // self
5312                        self.pop_value(); // class
5313                        self.pop_value(); // super
5314                        self.push_value(attr);
5315                        if is_method {
5316                            self.push_value(self_val);
5317                        } else {
5318                            self.push_null();
5319                        }
5320                        return Ok(None);
5321                    }
5322                }
5323                let oparg = LoadSuperAttr::from_u32(oparg);
5324                self.load_super_attr(vm, oparg)
5325            }
5326            Instruction::CompareOpInt => {
5327                let b = self.top_value();
5328                let a = self.nth_value(1);
5329                if let (Some(a_int), Some(b_int)) = (
5330                    a.downcast_ref_if_exact::<PyInt>(vm),
5331                    b.downcast_ref_if_exact::<PyInt>(vm),
5332                ) && let (Some(a_val), Some(b_val)) = (
5333                    specialization_compact_int_value(a_int, vm),
5334                    specialization_compact_int_value(b_int, vm),
5335                ) {
5336                    let op = self.compare_op_from_arg(arg);
5337                    let result = op.eval_ord(a_val.cmp(&b_val));
5338                    self.pop_value();
5339                    self.pop_value();
5340                    self.push_value(vm.ctx.new_bool(result).into());
5341                    Ok(None)
5342                } else {
5343                    let op = bytecode::ComparisonOperator::try_from(u32::from(arg))
5344                        .unwrap_or(bytecode::ComparisonOperator::Equal);
5345                    self.execute_compare(vm, op)
5346                }
5347            }
5348            Instruction::CompareOpFloat => {
5349                let b = self.top_value();
5350                let a = self.nth_value(1);
5351                if let (Some(a_f), Some(b_f)) = (
5352                    a.downcast_ref_if_exact::<PyFloat>(vm),
5353                    b.downcast_ref_if_exact::<PyFloat>(vm),
5354                ) {
5355                    let op = self.compare_op_from_arg(arg);
5356                    let (a, b) = (a_f.to_f64(), b_f.to_f64());
5357                    // Use Rust's IEEE 754 float comparison which handles NaN correctly
5358                    let result = match a.partial_cmp(&b) {
5359                        Some(ord) => op.eval_ord(ord),
5360                        None => op == PyComparisonOp::Ne, // NaN != anything is true
5361                    };
5362                    self.pop_value();
5363                    self.pop_value();
5364                    self.push_value(vm.ctx.new_bool(result).into());
5365                    Ok(None)
5366                } else {
5367                    let op = bytecode::ComparisonOperator::try_from(u32::from(arg))
5368                        .unwrap_or(bytecode::ComparisonOperator::Equal);
5369                    self.execute_compare(vm, op)
5370                }
5371            }
5372            Instruction::CompareOpStr => {
5373                let b = self.top_value();
5374                let a = self.nth_value(1);
5375                if let (Some(a_str), Some(b_str)) = (
5376                    a.downcast_ref_if_exact::<PyStr>(vm),
5377                    b.downcast_ref_if_exact::<PyStr>(vm),
5378                ) {
5379                    let op = self.compare_op_from_arg(arg);
5380                    if op != PyComparisonOp::Eq && op != PyComparisonOp::Ne {
5381                        let op = bytecode::ComparisonOperator::try_from(u32::from(arg))
5382                            .unwrap_or(bytecode::ComparisonOperator::Equal);
5383                        return self.execute_compare(vm, op);
5384                    }
5385                    let result = op.eval_ord(a_str.as_wtf8().cmp(b_str.as_wtf8()));
5386                    self.pop_value();
5387                    self.pop_value();
5388                    self.push_value(vm.ctx.new_bool(result).into());
5389                    Ok(None)
5390                } else {
5391                    let op = bytecode::ComparisonOperator::try_from(u32::from(arg))
5392                        .unwrap_or(bytecode::ComparisonOperator::Equal);
5393                    self.execute_compare(vm, op)
5394                }
5395            }
5396            Instruction::ToBoolBool => {
5397                let obj = self.top_value();
5398                if obj.class().is(vm.ctx.types.bool_type) {
5399                    // Already a bool, no-op
5400                    Ok(None)
5401                } else {
5402                    let obj = self.pop_value();
5403                    let result = obj.try_to_bool(vm)?;
5404                    self.push_value(vm.ctx.new_bool(result).into());
5405                    Ok(None)
5406                }
5407            }
5408            Instruction::ToBoolInt => {
5409                let obj = self.top_value();
5410                if let Some(int_val) = obj.downcast_ref_if_exact::<PyInt>(vm) {
5411                    let result = !int_val.as_bigint().is_zero();
5412                    self.pop_value();
5413                    self.push_value(vm.ctx.new_bool(result).into());
5414                    Ok(None)
5415                } else {
5416                    let obj = self.pop_value();
5417                    let result = obj.try_to_bool(vm)?;
5418                    self.push_value(vm.ctx.new_bool(result).into());
5419                    Ok(None)
5420                }
5421            }
5422            Instruction::ToBoolNone => {
5423                let obj = self.top_value();
5424                if obj.class().is(vm.ctx.types.none_type) {
5425                    self.pop_value();
5426                    self.push_value(vm.ctx.new_bool(false).into());
5427                    Ok(None)
5428                } else {
5429                    let obj = self.pop_value();
5430                    let result = obj.try_to_bool(vm)?;
5431                    self.push_value(vm.ctx.new_bool(result).into());
5432                    Ok(None)
5433                }
5434            }
5435            Instruction::ToBoolList => {
5436                let obj = self.top_value();
5437                if let Some(list) = obj.downcast_ref_if_exact::<PyList>(vm) {
5438                    let result = !list.borrow_vec().is_empty();
5439                    self.pop_value();
5440                    self.push_value(vm.ctx.new_bool(result).into());
5441                    Ok(None)
5442                } else {
5443                    let obj = self.pop_value();
5444                    let result = obj.try_to_bool(vm)?;
5445                    self.push_value(vm.ctx.new_bool(result).into());
5446                    Ok(None)
5447                }
5448            }
5449            Instruction::ToBoolStr => {
5450                let obj = self.top_value();
5451                if let Some(s) = obj.downcast_ref_if_exact::<PyStr>(vm) {
5452                    let result = !s.is_empty();
5453                    self.pop_value();
5454                    self.push_value(vm.ctx.new_bool(result).into());
5455                    Ok(None)
5456                } else {
5457                    let obj = self.pop_value();
5458                    let result = obj.try_to_bool(vm)?;
5459                    self.push_value(vm.ctx.new_bool(result).into());
5460                    Ok(None)
5461                }
5462            }
5463            Instruction::ToBoolAlwaysTrue => {
5464                // Objects without __bool__ or __len__ are always True.
5465                // Guard: check type version hasn't changed.
5466                let instr_idx = self.lasti() as usize - 1;
5467                let cache_base = instr_idx + 1;
5468                let obj = self.top_value();
5469                let cached_version = self.code.instructions.read_cache_u32(cache_base + 1);
5470                if cached_version != 0 && obj.class().tp_version_tag.load(Acquire) == cached_version
5471                {
5472                    self.pop_value();
5473                    self.push_value(vm.ctx.new_bool(true).into());
5474                    Ok(None)
5475                } else {
5476                    let obj = self.pop_value();
5477                    let result = obj.try_to_bool(vm)?;
5478                    self.push_value(vm.ctx.new_bool(result).into());
5479                    Ok(None)
5480                }
5481            }
5482            Instruction::ContainsOpDict => {
5483                let b = self.top_value(); // haystack
5484                if let Some(dict) = b.downcast_ref_if_exact::<PyDict>(vm) {
5485                    let a = self.nth_value(1); // needle
5486                    let found = dict.get_item_opt(a, vm)?.is_some();
5487                    self.pop_value();
5488                    self.pop_value();
5489                    let invert = bytecode::Invert::try_from(u32::from(arg) as u8)
5490                        .unwrap_or(bytecode::Invert::No);
5491                    let value = match invert {
5492                        bytecode::Invert::No => found,
5493                        bytecode::Invert::Yes => !found,
5494                    };
5495                    self.push_value(vm.ctx.new_bool(value).into());
5496                    Ok(None)
5497                } else {
5498                    let b = self.pop_value();
5499                    let a = self.pop_value();
5500                    let invert = bytecode::Invert::try_from(u32::from(arg) as u8)
5501                        .unwrap_or(bytecode::Invert::No);
5502                    let value = match invert {
5503                        bytecode::Invert::No => self._in(vm, &a, &b)?,
5504                        bytecode::Invert::Yes => self._not_in(vm, &a, &b)?,
5505                    };
5506                    self.push_value(vm.ctx.new_bool(value).into());
5507                    Ok(None)
5508                }
5509            }
5510            Instruction::ContainsOpSet => {
5511                let b = self.top_value(); // haystack
5512                if b.downcast_ref_if_exact::<PySet>(vm).is_some()
5513                    || b.downcast_ref_if_exact::<PyFrozenSet>(vm).is_some()
5514                {
5515                    let a = self.nth_value(1); // needle
5516                    let found = vm._contains(b, a)?;
5517                    self.pop_value();
5518                    self.pop_value();
5519                    let invert = bytecode::Invert::try_from(u32::from(arg) as u8)
5520                        .unwrap_or(bytecode::Invert::No);
5521                    let value = match invert {
5522                        bytecode::Invert::No => found,
5523                        bytecode::Invert::Yes => !found,
5524                    };
5525                    self.push_value(vm.ctx.new_bool(value).into());
5526                    Ok(None)
5527                } else {
5528                    let b = self.pop_value();
5529                    let a = self.pop_value();
5530                    let invert = bytecode::Invert::try_from(u32::from(arg) as u8)
5531                        .unwrap_or(bytecode::Invert::No);
5532                    let value = match invert {
5533                        bytecode::Invert::No => self._in(vm, &a, &b)?,
5534                        bytecode::Invert::Yes => self._not_in(vm, &a, &b)?,
5535                    };
5536                    self.push_value(vm.ctx.new_bool(value).into());
5537                    Ok(None)
5538                }
5539            }
5540            Instruction::UnpackSequenceTwoTuple => {
5541                let obj = self.top_value();
5542                if let Some(tuple) = obj.downcast_ref_if_exact::<PyTuple>(vm) {
5543                    let elements = tuple.as_slice();
5544                    if elements.len() == 2 {
5545                        let e0 = elements[0].clone();
5546                        let e1 = elements[1].clone();
5547                        self.pop_value();
5548                        self.push_value(e1);
5549                        self.push_value(e0);
5550                        return Ok(None);
5551                    }
5552                }
5553                let size = u32::from(arg);
5554                self.unpack_sequence(size, vm)
5555            }
5556            Instruction::UnpackSequenceTuple => {
5557                let size = u32::from(arg) as usize;
5558                let obj = self.top_value();
5559                if let Some(tuple) = obj.downcast_ref_if_exact::<PyTuple>(vm) {
5560                    let elements = tuple.as_slice();
5561                    if elements.len() == size {
5562                        let elems: Vec<_> = elements.to_vec();
5563                        self.pop_value();
5564                        for elem in elems.into_iter().rev() {
5565                            self.push_value(elem);
5566                        }
5567                        return Ok(None);
5568                    }
5569                }
5570                self.unpack_sequence(size as u32, vm)
5571            }
5572            Instruction::UnpackSequenceList => {
5573                let size = u32::from(arg) as usize;
5574                let obj = self.top_value();
5575                if let Some(list) = obj.downcast_ref_if_exact::<PyList>(vm) {
5576                    let vec = list.borrow_vec();
5577                    if vec.len() == size {
5578                        let elems: Vec<_> = vec.to_vec();
5579                        drop(vec);
5580                        self.pop_value();
5581                        for elem in elems.into_iter().rev() {
5582                            self.push_value(elem);
5583                        }
5584                        return Ok(None);
5585                    }
5586                }
5587                self.unpack_sequence(size as u32, vm)
5588            }
5589            Instruction::ForIterRange => {
5590                let target = bytecode::Label::from_u32(self.lasti() + 1 + u32::from(arg));
5591                let iter = self.top_value();
5592                if let Some(range_iter) = iter.downcast_ref_if_exact::<PyRangeIterator>(vm) {
5593                    if let Some(value) = range_iter.fast_next() {
5594                        self.push_value(vm.ctx.new_int(value).into());
5595                    } else {
5596                        self.for_iter_jump_on_exhausted(target);
5597                    }
5598                    Ok(None)
5599                } else {
5600                    self.execute_for_iter(vm, target)?;
5601                    Ok(None)
5602                }
5603            }
5604            Instruction::ForIterList => {
5605                let target = bytecode::Label::from_u32(self.lasti() + 1 + u32::from(arg));
5606                let iter = self.top_value();
5607                if let Some(list_iter) = iter.downcast_ref_if_exact::<PyListIterator>(vm) {
5608                    if let Some(value) = list_iter.fast_next() {
5609                        self.push_value(value);
5610                    } else {
5611                        self.for_iter_jump_on_exhausted(target);
5612                    }
5613                    Ok(None)
5614                } else {
5615                    self.execute_for_iter(vm, target)?;
5616                    Ok(None)
5617                }
5618            }
5619            Instruction::ForIterTuple => {
5620                let target = bytecode::Label::from_u32(self.lasti() + 1 + u32::from(arg));
5621                let iter = self.top_value();
5622                if let Some(tuple_iter) = iter.downcast_ref_if_exact::<PyTupleIterator>(vm) {
5623                    if let Some(value) = tuple_iter.fast_next() {
5624                        self.push_value(value);
5625                    } else {
5626                        self.for_iter_jump_on_exhausted(target);
5627                    }
5628                    Ok(None)
5629                } else {
5630                    self.execute_for_iter(vm, target)?;
5631                    Ok(None)
5632                }
5633            }
5634            Instruction::ForIterGen => {
5635                let target = bytecode::Label::from_u32(self.lasti() + 1 + u32::from(arg));
5636                let iter = self.top_value();
5637                if self.specialization_eval_frame_active(vm) {
5638                    self.execute_for_iter(vm, target)?;
5639                    return Ok(None);
5640                }
5641                if let Some(generator) = iter.downcast_ref_if_exact::<PyGenerator>(vm) {
5642                    if generator.as_coro().running() || generator.as_coro().closed() {
5643                        self.execute_for_iter(vm, target)?;
5644                        return Ok(None);
5645                    }
5646                    match generator.as_coro().send_none(iter, vm) {
5647                        Ok(PyIterReturn::Return(value)) => {
5648                            self.push_value(value);
5649                        }
5650                        Ok(PyIterReturn::StopIteration(value)) => {
5651                            if vm.use_tracing.get() && !vm.is_none(&self.object.trace.lock()) {
5652                                let stop_exc = vm.new_stop_iteration(value);
5653                                self.fire_exception_trace(&stop_exc, vm)?;
5654                            }
5655                            self.for_iter_jump_on_exhausted(target);
5656                        }
5657                        Err(e) => return Err(e),
5658                    }
5659                    Ok(None)
5660                } else {
5661                    self.execute_for_iter(vm, target)?;
5662                    Ok(None)
5663                }
5664            }
5665            Instruction::LoadGlobalModule => {
5666                let oparg = u32::from(arg);
5667                let cache_base = self.lasti() as usize;
5668                // Keep specialized opcode on guard miss (JUMP_TO_PREDICTED behavior).
5669                let cached_version = self.code.instructions.read_cache_u16(cache_base + 1);
5670                let cached_index = self.code.instructions.read_cache_u16(cache_base + 3);
5671                if let Ok(current_version) = u16::try_from(self.globals.version())
5672                    && cached_version == current_version
5673                {
5674                    let name = self.code.names[(oparg >> 1) as usize];
5675                    if let Some(x) = self.globals.get_item_opt_hint(name, cached_index, vm)? {
5676                        self.push_value(x);
5677                        if (oparg & 1) != 0 {
5678                            self.push_value_opt(None);
5679                        }
5680                        return Ok(None);
5681                    }
5682                }
5683                let name = self.code.names[(oparg >> 1) as usize];
5684                let x = self.load_global_or_builtin(name, vm)?;
5685                self.push_value(x);
5686                if (oparg & 1) != 0 {
5687                    self.push_value_opt(None);
5688                }
5689                Ok(None)
5690            }
5691            Instruction::LoadGlobalBuiltin => {
5692                let oparg = u32::from(arg);
5693                let cache_base = self.lasti() as usize;
5694                let cached_globals_ver = self.code.instructions.read_cache_u16(cache_base + 1);
5695                let cached_builtins_ver = self.code.instructions.read_cache_u16(cache_base + 2);
5696                let cached_index = self.code.instructions.read_cache_u16(cache_base + 3);
5697                if let Ok(current_globals_ver) = u16::try_from(self.globals.version())
5698                    && cached_globals_ver == current_globals_ver
5699                    && let Some(builtins_dict) = self.builtins.downcast_ref_if_exact::<PyDict>(vm)
5700                    && let Ok(current_builtins_ver) = u16::try_from(builtins_dict.version())
5701                    && cached_builtins_ver == current_builtins_ver
5702                {
5703                    let name = self.code.names[(oparg >> 1) as usize];
5704                    if let Some(x) = builtins_dict.get_item_opt_hint(name, cached_index, vm)? {
5705                        self.push_value(x);
5706                        if (oparg & 1) != 0 {
5707                            self.push_value_opt(None);
5708                        }
5709                        return Ok(None);
5710                    }
5711                }
5712                let name = self.code.names[(oparg >> 1) as usize];
5713                let x = self.load_global_or_builtin(name, vm)?;
5714                self.push_value(x);
5715                if (oparg & 1) != 0 {
5716                    self.push_value_opt(None);
5717                }
5718                Ok(None)
5719            }
5720            // All INSTRUMENTED_* opcodes delegate to a cold function to keep
5721            // the hot instruction loop free of monitoring overhead.
5722            _ => self.execute_instrumented(instruction, arg, vm),
5723        }
5724    }
5725
5726    /// Handle all INSTRUMENTED_* opcodes. This function is cold — it only
5727    /// runs when sys.monitoring has rewritten the bytecode.
5728    #[cold]
5729    fn execute_instrumented(
5730        &mut self,
5731        instruction: Instruction,
5732        arg: bytecode::OpArg,
5733        vm: &VirtualMachine,
5734    ) -> FrameResult {
5735        debug_assert!(
5736            instruction.is_instrumented(),
5737            "execute_instrumented called with non-instrumented opcode {instruction:?}"
5738        );
5739        if self.monitoring_disabled_for_code(vm) {
5740            let global_ver = vm
5741                .state
5742                .instrumentation_version
5743                .load(atomic::Ordering::Acquire);
5744            monitoring::instrument_code(self.code, 0);
5745            self.code
5746                .instrumentation_version
5747                .store(global_ver, atomic::Ordering::Release);
5748            self.update_lasti(|i| *i -= 1);
5749            return Ok(None);
5750        }
5751        self.monitoring_mask = vm.state.monitoring_events.load();
5752        match instruction {
5753            Instruction::InstrumentedResume => {
5754                // Version check: re-instrument if stale
5755                let global_ver = vm
5756                    .state
5757                    .instrumentation_version
5758                    .load(atomic::Ordering::Acquire);
5759                let code_ver = self
5760                    .code
5761                    .instrumentation_version
5762                    .load(atomic::Ordering::Acquire);
5763                if code_ver != global_ver {
5764                    let events = {
5765                        let state = vm.state.monitoring.lock();
5766                        state.events_for_code(self.code.get_id())
5767                    };
5768                    monitoring::instrument_code(self.code, events);
5769                    self.code
5770                        .instrumentation_version
5771                        .store(global_ver, atomic::Ordering::Release);
5772                    // Re-execute (may have been de-instrumented to base Resume)
5773                    self.update_lasti(|i| *i -= 1);
5774                    return Ok(None);
5775                }
5776                let resume_type = u32::from(arg);
5777                let offset = (self.lasti() - 1) * 2;
5778                if resume_type == 0 {
5779                    if self.monitoring_mask & monitoring::EVENT_PY_START != 0 {
5780                        monitoring::fire_py_start(vm, self.code, offset)?;
5781                    }
5782                } else if self.monitoring_mask & monitoring::EVENT_PY_RESUME != 0 {
5783                    monitoring::fire_py_resume(vm, self.code, offset)?;
5784                }
5785                Ok(None)
5786            }
5787            Instruction::InstrumentedReturnValue => {
5788                let value = self.pop_value();
5789                if self.monitoring_mask & monitoring::EVENT_PY_RETURN != 0 {
5790                    let offset = (self.lasti() - 1) * 2;
5791                    monitoring::fire_py_return(vm, self.code, offset, &value)?;
5792                }
5793                self.unwind_blocks(vm, UnwindReason::Returning { value })
5794            }
5795            Instruction::InstrumentedYieldValue => {
5796                debug_assert!(
5797                    self.localsplus
5798                        .stack_as_slice()
5799                        .iter()
5800                        .flatten()
5801                        .all(|sr| !sr.is_borrowed()),
5802                    "borrowed refs on stack at yield point"
5803                );
5804                let value = self.pop_value();
5805                if self.monitoring_mask & monitoring::EVENT_PY_YIELD != 0 {
5806                    let offset = (self.lasti() - 1) * 2;
5807                    monitoring::fire_py_yield(vm, self.code, offset, &value)?;
5808                }
5809                Ok(Some(ExecutionResult::Yield(value)))
5810            }
5811            Instruction::InstrumentedCall => {
5812                let args = self.collect_positional_args(u32::from(arg));
5813                self.execute_call_instrumented(args, vm)
5814            }
5815            Instruction::InstrumentedCallKw => {
5816                let args = self.collect_keyword_args(u32::from(arg));
5817                self.execute_call_instrumented(args, vm)
5818            }
5819            Instruction::InstrumentedCallFunctionEx => {
5820                let args = self.collect_ex_args(vm)?;
5821                self.execute_call_instrumented(args, vm)
5822            }
5823            Instruction::InstrumentedLoadSuperAttr => {
5824                let oparg = bytecode::LoadSuperAttr::from(u32::from(arg));
5825                let offset = (self.lasti() - 1) * 2;
5826                // Fire CALL event before super() call
5827                let call_args = if self.monitoring_mask & monitoring::EVENT_CALL != 0 {
5828                    let global_super: PyObjectRef = self.nth_value(2).to_owned();
5829                    let arg0 = if oparg.has_class() {
5830                        self.nth_value(1).to_owned()
5831                    } else {
5832                        monitoring::get_missing(vm)
5833                    };
5834                    monitoring::fire_call(vm, self.code, offset, &global_super, arg0.clone())?;
5835                    Some((global_super, arg0))
5836                } else {
5837                    None
5838                };
5839                match self.load_super_attr(vm, oparg) {
5840                    Ok(result) => {
5841                        // Fire C_RETURN on success
5842                        if let Some((global_super, arg0)) = call_args {
5843                            monitoring::fire_c_return(vm, self.code, offset, &global_super, arg0)?;
5844                        }
5845                        Ok(result)
5846                    }
5847                    Err(exc) => {
5848                        // Fire C_RAISE on failure
5849                        let exc = if let Some((global_super, arg0)) = call_args {
5850                            match monitoring::fire_c_raise(
5851                                vm,
5852                                self.code,
5853                                offset,
5854                                &global_super,
5855                                arg0,
5856                            ) {
5857                                Ok(()) => exc,
5858                                Err(monitor_exc) => monitor_exc,
5859                            }
5860                        } else {
5861                            exc
5862                        };
5863                        Err(exc)
5864                    }
5865                }
5866            }
5867            Instruction::InstrumentedJumpForward => {
5868                let src_offset = (self.lasti() - 1) * 2;
5869                let target_idx = self.lasti() + u32::from(arg);
5870                let target = bytecode::Label::from_u32(target_idx);
5871                self.jump(target);
5872                if self.monitoring_mask & monitoring::EVENT_JUMP != 0 {
5873                    monitoring::fire_jump(vm, self.code, src_offset, target.as_u32() * 2)?;
5874                }
5875                Ok(None)
5876            }
5877            Instruction::InstrumentedJumpBackward => {
5878                let src_offset = (self.lasti() - 1) * 2;
5879                let target_idx = self.lasti() + 1 - u32::from(arg);
5880                let target = bytecode::Label::from_u32(target_idx);
5881                self.jump(target);
5882                if self.monitoring_mask & monitoring::EVENT_JUMP != 0 {
5883                    monitoring::fire_jump(vm, self.code, src_offset, target.as_u32() * 2)?;
5884                }
5885                Ok(None)
5886            }
5887            Instruction::InstrumentedForIter => {
5888                let src_offset = (self.lasti() - 1) * 2;
5889                let target = bytecode::Label::from_u32(self.lasti() + 1 + u32::from(arg));
5890                let continued = self.execute_for_iter(vm, target)?;
5891                if continued {
5892                    if self.monitoring_mask & monitoring::EVENT_BRANCH_LEFT != 0 {
5893                        let dest_offset = (self.lasti() + 1) * 2; // after caches
5894                        monitoring::fire_branch_left(vm, self.code, src_offset, dest_offset)?;
5895                    }
5896                } else if self.monitoring_mask & monitoring::EVENT_BRANCH_RIGHT != 0 {
5897                    let dest_offset = self.lasti() * 2;
5898                    monitoring::fire_branch_right(vm, self.code, src_offset, dest_offset)?;
5899                }
5900                Ok(None)
5901            }
5902            Instruction::InstrumentedEndFor => {
5903                // Stack: [value, receiver(iter), ...]
5904                // PyGen_Check: only fire STOP_ITERATION for generators
5905                let is_gen = self
5906                    .nth_value(1)
5907                    .downcast_ref::<crate::builtins::PyGenerator>()
5908                    .is_some();
5909                let value = self.pop_value();
5910                if is_gen && self.monitoring_mask & monitoring::EVENT_STOP_ITERATION != 0 {
5911                    let offset = (self.lasti() - 1) * 2;
5912                    monitoring::fire_stop_iteration(vm, self.code, offset, &value)?;
5913                }
5914                Ok(None)
5915            }
5916            Instruction::InstrumentedEndSend => {
5917                let value = self.pop_value();
5918                let receiver = self.pop_value();
5919                // PyGen_Check || PyCoro_CheckExact
5920                let is_gen_or_coro = receiver
5921                    .downcast_ref::<crate::builtins::PyGenerator>()
5922                    .is_some()
5923                    || receiver
5924                        .downcast_ref::<crate::builtins::PyCoroutine>()
5925                        .is_some();
5926                if is_gen_or_coro && self.monitoring_mask & monitoring::EVENT_STOP_ITERATION != 0 {
5927                    let offset = (self.lasti() - 1) * 2;
5928                    monitoring::fire_stop_iteration(vm, self.code, offset, &value)?;
5929                }
5930                self.push_value(value);
5931                Ok(None)
5932            }
5933            Instruction::InstrumentedPopJumpIfTrue => {
5934                let src_offset = (self.lasti() - 1) * 2;
5935                let target_idx = self.lasti() + 1 + u32::from(arg);
5936                let obj = self.pop_value();
5937                let value = obj.try_to_bool(vm)?;
5938                if value {
5939                    self.jump(bytecode::Label::from_u32(target_idx));
5940                    if self.monitoring_mask & monitoring::EVENT_BRANCH_RIGHT != 0 {
5941                        monitoring::fire_branch_right(vm, self.code, src_offset, target_idx * 2)?;
5942                    }
5943                }
5944                Ok(None)
5945            }
5946            Instruction::InstrumentedPopJumpIfFalse => {
5947                let src_offset = (self.lasti() - 1) * 2;
5948                let target_idx = self.lasti() + 1 + u32::from(arg);
5949                let obj = self.pop_value();
5950                let value = obj.try_to_bool(vm)?;
5951                if !value {
5952                    self.jump(bytecode::Label::from_u32(target_idx));
5953                    if self.monitoring_mask & monitoring::EVENT_BRANCH_RIGHT != 0 {
5954                        monitoring::fire_branch_right(vm, self.code, src_offset, target_idx * 2)?;
5955                    }
5956                }
5957                Ok(None)
5958            }
5959            Instruction::InstrumentedPopJumpIfNone => {
5960                let src_offset = (self.lasti() - 1) * 2;
5961                let target_idx = self.lasti() + 1 + u32::from(arg);
5962                let value = self.pop_value();
5963                if vm.is_none(&value) {
5964                    self.jump(bytecode::Label::from_u32(target_idx));
5965                    if self.monitoring_mask & monitoring::EVENT_BRANCH_RIGHT != 0 {
5966                        monitoring::fire_branch_right(vm, self.code, src_offset, target_idx * 2)?;
5967                    }
5968                }
5969                Ok(None)
5970            }
5971            Instruction::InstrumentedPopJumpIfNotNone => {
5972                let src_offset = (self.lasti() - 1) * 2;
5973                let target_idx = self.lasti() + 1 + u32::from(arg);
5974                let value = self.pop_value();
5975                if !vm.is_none(&value) {
5976                    self.jump(bytecode::Label::from_u32(target_idx));
5977                    if self.monitoring_mask & monitoring::EVENT_BRANCH_RIGHT != 0 {
5978                        monitoring::fire_branch_right(vm, self.code, src_offset, target_idx * 2)?;
5979                    }
5980                }
5981                Ok(None)
5982            }
5983            Instruction::InstrumentedNotTaken => {
5984                if self.monitoring_mask & monitoring::EVENT_BRANCH_LEFT != 0 {
5985                    let not_taken_idx = self.lasti() as usize - 1;
5986                    // Scan backwards past CACHE entries to find the branch instruction
5987                    let mut branch_idx = not_taken_idx.saturating_sub(1);
5988                    while branch_idx > 0
5989                        && matches!(
5990                            self.code.instructions.read_op(branch_idx),
5991                            Instruction::Cache
5992                        )
5993                    {
5994                        branch_idx -= 1;
5995                    }
5996                    let src_offset = (branch_idx as u32) * 2;
5997                    let dest_offset = self.lasti() * 2;
5998                    monitoring::fire_branch_left(vm, self.code, src_offset, dest_offset)?;
5999                }
6000                Ok(None)
6001            }
6002            Instruction::InstrumentedPopIter => {
6003                // BRANCH_RIGHT is fired by InstrumentedForIter, not here.
6004                self.pop_value();
6005                Ok(None)
6006            }
6007            Instruction::InstrumentedEndAsyncFor => {
6008                if self.monitoring_mask & monitoring::EVENT_BRANCH_RIGHT != 0 {
6009                    let oparg_val = u32::from(arg);
6010                    // src = next_instr - oparg (END_SEND position)
6011                    let src_offset = (self.lasti() - oparg_val) * 2;
6012                    // dest = this_instr + 1
6013                    let dest_offset = self.lasti() * 2;
6014                    monitoring::fire_branch_right(vm, self.code, src_offset, dest_offset)?;
6015                }
6016                let exc = self.pop_value();
6017                let _awaitable = self.pop_value();
6018                let exc = exc
6019                    .downcast::<PyBaseException>()
6020                    .expect("EndAsyncFor expects exception on stack");
6021                if exc.fast_isinstance(vm.ctx.exceptions.stop_async_iteration) {
6022                    vm.set_exception(None);
6023                    Ok(None)
6024                } else {
6025                    Err(exc)
6026                }
6027            }
6028            Instruction::InstrumentedLine => {
6029                let idx = self.lasti() as usize - 1;
6030                let offset = idx as u32 * 2;
6031
6032                // Read the full side-table chain before firing any events,
6033                // because a callback may de-instrument and clear the tables.
6034                let (real_op_byte, also_instruction) = {
6035                    let data = self.code.monitoring_data.lock();
6036                    let line_op = data.as_ref().map(|d| d.line_opcodes[idx]).unwrap_or(0);
6037                    if line_op == u8::from(Instruction::InstrumentedInstruction) {
6038                        // LINE wraps INSTRUCTION: resolve the INSTRUCTION side-table too
6039                        let inst_op = data
6040                            .as_ref()
6041                            .map(|d| d.per_instruction_opcodes[idx])
6042                            .unwrap_or(0);
6043                        (inst_op, true)
6044                    } else {
6045                        (line_op, false)
6046                    }
6047                };
6048                debug_assert!(
6049                    real_op_byte != 0,
6050                    "INSTRUMENTED_LINE at {idx} without stored opcode"
6051                );
6052
6053                // Fire LINE event only if line changed
6054                if let Some((loc, _)) = self.code.locations.get(idx) {
6055                    let line = loc.line.get() as u32;
6056                    if line != *self.prev_line && line > 0 {
6057                        *self.prev_line = line;
6058                        monitoring::fire_line(vm, self.code, offset, line)?;
6059                    }
6060                }
6061
6062                // If the LINE position also had INSTRUCTION, fire that event too
6063                if also_instruction {
6064                    monitoring::fire_instruction(vm, self.code, offset)?;
6065                }
6066
6067                // Re-dispatch to the real original opcode
6068                let original_op = Instruction::try_from(real_op_byte)
6069                    .expect("invalid opcode in side-table chain");
6070                let lasti_before_dispatch = self.lasti();
6071                let result = if original_op.to_base().is_some() {
6072                    self.execute_instrumented(original_op, arg, vm)
6073                } else {
6074                    let mut do_extend_arg = false;
6075                    self.execute_instruction(original_op, arg, &mut do_extend_arg, vm)
6076                };
6077                let orig_caches = original_op.to_base().unwrap_or(original_op).cache_entries();
6078                if orig_caches > 0 && self.lasti() == lasti_before_dispatch {
6079                    self.update_lasti(|i| *i += orig_caches as u32);
6080                }
6081                result
6082            }
6083            Instruction::InstrumentedInstruction => {
6084                let idx = self.lasti() as usize - 1;
6085                let offset = idx as u32 * 2;
6086
6087                // Get original opcode from side-table
6088                let original_op_byte = {
6089                    let data = self.code.monitoring_data.lock();
6090                    data.as_ref()
6091                        .map(|d| d.per_instruction_opcodes[idx])
6092                        .unwrap_or(0)
6093                };
6094                debug_assert!(
6095                    original_op_byte != 0,
6096                    "INSTRUMENTED_INSTRUCTION at {idx} without stored opcode"
6097                );
6098
6099                // Fire INSTRUCTION event
6100                monitoring::fire_instruction(vm, self.code, offset)?;
6101
6102                // Re-dispatch to original opcode
6103                let original_op = Instruction::try_from(original_op_byte)
6104                    .expect("invalid opcode in instruction side-table");
6105                let lasti_before_dispatch = self.lasti();
6106                let result = if original_op.to_base().is_some() {
6107                    self.execute_instrumented(original_op, arg, vm)
6108                } else {
6109                    let mut do_extend_arg = false;
6110                    self.execute_instruction(original_op, arg, &mut do_extend_arg, vm)
6111                };
6112                let orig_caches = original_op.to_base().unwrap_or(original_op).cache_entries();
6113                if orig_caches > 0 && self.lasti() == lasti_before_dispatch {
6114                    self.update_lasti(|i| *i += orig_caches as u32);
6115                }
6116                result
6117            }
6118            _ => {
6119                unreachable!("{instruction:?} instruction should not be executed")
6120            }
6121        }
6122    }
6123
6124    #[inline]
6125    fn load_global_or_builtin(&self, name: &Py<PyStr>, vm: &VirtualMachine) -> PyResult {
6126        if let Some(builtins_dict) = self.builtins_dict {
6127            // Fast path: both globals and builtins are exact dicts
6128            // SAFETY: builtins_dict is only set when globals is also exact dict
6129            let globals_exact = unsafe { PyExact::ref_unchecked(self.globals.as_ref()) };
6130            globals_exact
6131                .get_chain_exact(builtins_dict, name, vm)?
6132                .ok_or_else(|| {
6133                    vm.new_name_error(format!("name '{name}' is not defined"), name.to_owned())
6134                })
6135        } else {
6136            // Slow path: builtins is not a dict, use generic __getitem__
6137            if let Some(value) = self.globals.get_item_opt(name, vm)? {
6138                return Ok(value);
6139            }
6140            self.builtins.get_item(name, vm).map_err(|e| {
6141                if e.fast_isinstance(vm.ctx.exceptions.key_error) {
6142                    vm.new_name_error(format!("name '{name}' is not defined"), name.to_owned())
6143                } else {
6144                    e
6145                }
6146            })
6147        }
6148    }
6149
6150    #[cfg_attr(feature = "flame-it", flame("Frame"))]
6151    fn import(&mut self, vm: &VirtualMachine, module_name: Option<&Py<PyStr>>) -> PyResult<()> {
6152        let module_name = module_name.unwrap_or(vm.ctx.empty_str);
6153        let top = self.pop_value();
6154        let from_list = match <Option<PyTupleRef>>::try_from_object(vm, top)? {
6155            Some(from_list) => from_list.try_into_typed::<PyStr>(vm)?,
6156            None => vm.ctx.empty_tuple_typed().to_owned(),
6157        };
6158        let level = usize::try_from_object(vm, self.pop_value())?;
6159
6160        let module = vm.import_from(module_name, &from_list, level)?;
6161
6162        self.push_value(module);
6163        Ok(())
6164    }
6165
6166    #[cfg_attr(feature = "flame-it", flame("Frame"))]
6167    fn import_from(&mut self, vm: &VirtualMachine, idx: bytecode::NameIdx) -> PyResult {
6168        let module = self.top_value();
6169        let name = self.code.names[idx as usize];
6170
6171        // Load attribute, and transform any error into import error.
6172        if let Some(obj) = vm.get_attribute_opt(module.to_owned(), name)? {
6173            return Ok(obj);
6174        }
6175        // fallback to importing '{module.__name__}.{name}' from sys.modules
6176        let fallback_module = (|| {
6177            let mod_name = module.get_attr(identifier!(vm, __name__), vm).ok()?;
6178            let mod_name = mod_name.downcast_ref::<PyStr>()?;
6179            let full_mod_name = format!("{mod_name}.{name}");
6180            let sys_modules = vm.sys_module.get_attr("modules", vm).ok()?;
6181            sys_modules.get_item(&full_mod_name, vm).ok()
6182        })();
6183
6184        if let Some(sub_module) = fallback_module {
6185            return Ok(sub_module);
6186        }
6187
6188        use crate::import::{
6189            get_spec_file_origin, is_possibly_shadowing_path, is_stdlib_module_name,
6190        };
6191
6192        // Get module name for the error message
6193        let mod_name_obj = module.get_attr(identifier!(vm, __name__), vm).ok();
6194        let mod_name_str = mod_name_obj
6195            .as_ref()
6196            .and_then(|n| n.downcast_ref::<PyUtf8Str>().map(|s| s.as_str().to_owned()));
6197        let module_name = mod_name_str.as_deref().unwrap_or("<unknown module name>");
6198
6199        let spec = module
6200            .get_attr("__spec__", vm)
6201            .ok()
6202            .filter(|s| !vm.is_none(s));
6203
6204        let origin = get_spec_file_origin(&spec, vm);
6205
6206        let is_possibly_shadowing = origin
6207            .as_ref()
6208            .map(|o| is_possibly_shadowing_path(o, vm))
6209            .unwrap_or(false);
6210        let is_possibly_shadowing_stdlib = if is_possibly_shadowing {
6211            if let Some(ref mod_name) = mod_name_obj {
6212                is_stdlib_module_name(mod_name, vm)?
6213            } else {
6214                false
6215            }
6216        } else {
6217            false
6218        };
6219
6220        let msg = if is_possibly_shadowing_stdlib {
6221            let origin = origin.as_ref().unwrap();
6222            format!(
6223                "cannot import name '{name}' from '{module_name}' \
6224                 (consider renaming '{origin}' since it has the same \
6225                 name as the standard library module named '{module_name}' \
6226                 and prevents importing that standard library module)"
6227            )
6228        } else {
6229            let is_init = is_module_initializing(module, vm);
6230            if is_init {
6231                if is_possibly_shadowing {
6232                    let origin = origin.as_ref().unwrap();
6233                    format!(
6234                        "cannot import name '{name}' from '{module_name}' \
6235                         (consider renaming '{origin}' if it has the same name \
6236                         as a library you intended to import)"
6237                    )
6238                } else if let Some(ref path) = origin {
6239                    format!(
6240                        "cannot import name '{name}' from partially initialized module \
6241                         '{module_name}' (most likely due to a circular import) ({path})"
6242                    )
6243                } else {
6244                    format!(
6245                        "cannot import name '{name}' from partially initialized module \
6246                         '{module_name}' (most likely due to a circular import)"
6247                    )
6248                }
6249            } else if let Some(ref path) = origin {
6250                format!("cannot import name '{name}' from '{module_name}' ({path})")
6251            } else {
6252                format!("cannot import name '{name}' from '{module_name}' (unknown location)")
6253            }
6254        };
6255        let err = vm.new_import_error(msg, vm.ctx.new_utf8_str(module_name));
6256
6257        if let Some(ref path) = origin {
6258            let _ignore = err
6259                .as_object()
6260                .set_attr("path", vm.ctx.new_str(path.as_str()), vm);
6261        }
6262
6263        // name_from = the attribute name that failed to import (best-effort metadata)
6264        let _ignore = err.as_object().set_attr("name_from", name.to_owned(), vm);
6265
6266        Err(err)
6267    }
6268
6269    #[cfg_attr(feature = "flame-it", flame("Frame"))]
6270    fn import_star(&mut self, vm: &VirtualMachine) -> PyResult<()> {
6271        let module = self.pop_value();
6272
6273        let Some(dict) = module.dict() else {
6274            return Ok(());
6275        };
6276
6277        let mod_name = module
6278            .get_attr(identifier!(vm, __name__), vm)
6279            .ok()
6280            .and_then(|n| n.downcast::<PyStr>().ok());
6281
6282        let require_str = |obj: PyObjectRef, attr: &str| -> PyResult<PyRef<PyStr>> {
6283            obj.downcast().map_err(|obj: PyObjectRef| {
6284                let source = if let Some(ref mod_name) = mod_name {
6285                    format!("{}.{attr}", mod_name.as_wtf8())
6286                } else {
6287                    attr.to_owned()
6288                };
6289                let repr = obj.repr(vm).unwrap_or_else(|_| vm.ctx.new_str("?"));
6290                vm.new_type_error(format!(
6291                    "{} in {} must be str, not {}",
6292                    repr.as_wtf8(),
6293                    source,
6294                    obj.class().name()
6295                ))
6296            })
6297        };
6298
6299        let locals_map = self.locals.mapping(vm);
6300        if let Ok(all) = dict.get_item(identifier!(vm, __all__), vm) {
6301            let items: Vec<PyObjectRef> = all.try_to_value(vm)?;
6302            for item in items {
6303                let name = require_str(item, "__all__")?;
6304                let value = module.get_attr(&*name, vm)?;
6305                locals_map.ass_subscript(&name, Some(value), vm)?;
6306            }
6307        } else {
6308            for (k, v) in dict {
6309                let k = require_str(k, "__dict__")?;
6310                if !k.as_bytes().starts_with(b"_") {
6311                    locals_map.ass_subscript(&k, Some(v), vm)?;
6312                }
6313            }
6314        }
6315        Ok(())
6316    }
6317
6318    /// Unwind blocks.
6319    /// The reason for unwinding gives a hint on what to do when
6320    /// unwinding a block.
6321    /// Optionally returns an exception.
6322    #[cfg_attr(feature = "flame-it", flame("Frame"))]
6323    fn unwind_blocks(&mut self, vm: &VirtualMachine, reason: UnwindReason) -> FrameResult {
6324        // use exception table for exception handling
6325        match reason {
6326            UnwindReason::Raising { exception } => {
6327                // Look up handler in exception table
6328                // lasti points to NEXT instruction (already incremented in run loop)
6329                // The exception occurred at the previous instruction
6330                // Python uses signed int where INSTR_OFFSET() - 1 = -1 before first instruction.
6331                // We use u32, so check for 0 explicitly.
6332                if self.lasti() == 0 {
6333                    // No instruction executed yet, no handler can match
6334                    return Err(exception);
6335                }
6336                let offset = self.lasti() - 1;
6337                if let Some(entry) =
6338                    bytecode::find_exception_handler(&self.code.exceptiontable, offset)
6339                {
6340                    // Fire EXCEPTION_HANDLED before setting up handler.
6341                    // If the callback raises, the handler is NOT set up and the
6342                    // new exception propagates instead.
6343                    if vm.state.monitoring_events.load() & monitoring::EVENT_EXCEPTION_HANDLED != 0
6344                    {
6345                        let byte_offset = offset * 2;
6346                        let exc_obj: PyObjectRef = exception.clone().into();
6347                        monitoring::fire_exception_handled(vm, self.code, byte_offset, &exc_obj)?;
6348                    }
6349
6350                    // 1. Pop stack to entry.depth
6351                    while self.localsplus.stack_len() > entry.depth as usize {
6352                        let _ = self.localsplus.stack_pop();
6353                    }
6354
6355                    // 2. If push_lasti=true (SETUP_CLEANUP), push lasti before exception
6356                    // pushes lasti as PyLong
6357                    if entry.push_lasti {
6358                        self.push_value(vm.ctx.new_int(offset as i32).into());
6359                    }
6360
6361                    // 3. Push exception onto stack
6362                    // always push exception, PUSH_EXC_INFO transforms [exc] -> [prev_exc, exc]
6363                    // Do NOT call vm.set_exception here! PUSH_EXC_INFO will do it.
6364                    // PUSH_EXC_INFO needs to get prev_exc from vm.current_exception() BEFORE setting the new one.
6365                    self.push_value(exception.into());
6366
6367                    // 4. Jump to handler
6368                    self.jump(bytecode::Label::from_u32(entry.target));
6369
6370                    Ok(None)
6371                } else {
6372                    // No handler found, propagate exception
6373                    Err(exception)
6374                }
6375            }
6376            UnwindReason::Returning { value } => Ok(Some(ExecutionResult::Return(value))),
6377        }
6378    }
6379
6380    fn execute_store_subscript(&mut self, vm: &VirtualMachine) -> FrameResult {
6381        let idx = self.pop_value();
6382        let obj = self.pop_value();
6383        let value = self.pop_value();
6384        obj.set_item(&*idx, value, vm)?;
6385        Ok(None)
6386    }
6387
6388    fn execute_delete_subscript(&mut self, vm: &VirtualMachine) -> FrameResult {
6389        let idx = self.pop_value();
6390        let obj = self.pop_value();
6391        obj.del_item(&*idx, vm)?;
6392        Ok(None)
6393    }
6394
6395    fn execute_build_map(&mut self, vm: &VirtualMachine, size: u32) -> FrameResult {
6396        let size = size as usize;
6397        let map_obj = vm.ctx.new_dict();
6398        for (key, value) in self.pop_multiple(2 * size).tuples() {
6399            map_obj.set_item(&*key, value, vm)?;
6400        }
6401
6402        self.push_value(map_obj.into());
6403        Ok(None)
6404    }
6405
6406    fn execute_build_slice(
6407        &mut self,
6408        vm: &VirtualMachine,
6409        argc: bytecode::BuildSliceArgCount,
6410    ) -> FrameResult {
6411        let step = match argc {
6412            bytecode::BuildSliceArgCount::Two => None,
6413            bytecode::BuildSliceArgCount::Three => Some(self.pop_value()),
6414        };
6415        let stop = self.pop_value();
6416        let start = self.pop_value();
6417
6418        let obj = PySlice {
6419            start: Some(start),
6420            stop,
6421            step,
6422        }
6423        .into_ref(&vm.ctx);
6424        self.push_value(obj.into());
6425        Ok(None)
6426    }
6427
6428    fn collect_positional_args(&mut self, nargs: u32) -> FuncArgs {
6429        FuncArgs {
6430            args: self.pop_multiple(nargs as usize).collect(),
6431            kwargs: IndexMap::new(),
6432        }
6433    }
6434
6435    fn collect_keyword_args(&mut self, nargs: u32) -> FuncArgs {
6436        let kwarg_names = self
6437            .pop_value()
6438            .downcast::<PyTuple>()
6439            .expect("kwarg names should be tuple of strings");
6440        let args = self.pop_multiple(nargs as usize);
6441
6442        let kwarg_names = kwarg_names.as_slice().iter().map(|pyobj| {
6443            pyobj
6444                .downcast_ref::<PyUtf8Str>()
6445                .unwrap()
6446                .as_str()
6447                .to_owned()
6448        });
6449        FuncArgs::with_kwargs_names(args, kwarg_names)
6450    }
6451
6452    fn collect_ex_args(&mut self, vm: &VirtualMachine) -> PyResult<FuncArgs> {
6453        let kwargs_or_null = self.pop_value_opt();
6454        let kwargs = if let Some(kw_obj) = kwargs_or_null {
6455            let mut kwargs = IndexMap::new();
6456
6457            // Stack: [callable, self_or_null, args_tuple]
6458            let callable = self.nth_value(2);
6459            let func_str = Self::object_function_str(callable, vm);
6460
6461            Self::iterate_mapping_keys(vm, &kw_obj, &func_str, |key| {
6462                let key_str = key
6463                    .downcast_ref::<PyUtf8Str>()
6464                    .ok_or_else(|| vm.new_type_error("keywords must be strings"))?;
6465                let value = kw_obj.get_item(&*key, vm)?;
6466                kwargs.insert(key_str.as_str().to_owned(), value);
6467                Ok(())
6468            })?;
6469            kwargs
6470        } else {
6471            IndexMap::new()
6472        };
6473        let args_obj = self.pop_value();
6474        let args = if let Some(tuple) = args_obj.downcast_ref::<PyTuple>() {
6475            tuple.as_slice().to_vec()
6476        } else {
6477            // Single *arg passed directly; convert to sequence at runtime.
6478            // Stack: [callable, self_or_null]
6479            let callable = self.nth_value(1);
6480            let func_str = Self::object_function_str(callable, vm);
6481            let not_iterable = args_obj.class().slots.iter.load().is_none()
6482                && args_obj
6483                    .get_class_attr(vm.ctx.intern_str("__getitem__"))
6484                    .is_none();
6485            args_obj.try_to_value::<Vec<PyObjectRef>>(vm).map_err(|e| {
6486                if not_iterable && e.class().is(vm.ctx.exceptions.type_error) {
6487                    vm.new_type_error(format!(
6488                        "{} argument after * must be an iterable, not {}",
6489                        func_str,
6490                        args_obj.class().name()
6491                    ))
6492                } else {
6493                    e
6494                }
6495            })?
6496        };
6497        Ok(FuncArgs { args, kwargs })
6498    }
6499
6500    /// Returns a display string for a callable object for use in error messages.
6501    /// For objects with `__qualname__`, returns "module.qualname()" or "qualname()".
6502    /// For other objects, returns repr(obj).
6503    fn object_function_str(obj: &PyObject, vm: &VirtualMachine) -> Wtf8Buf {
6504        let repr_fallback = || {
6505            obj.repr(vm)
6506                .as_ref()
6507                .map_or("?".as_ref(), |s| s.as_wtf8())
6508                .to_owned()
6509        };
6510        let Ok(qualname) = obj.get_attr(vm.ctx.intern_str("__qualname__"), vm) else {
6511            return repr_fallback();
6512        };
6513        let Some(qualname_str) = qualname.downcast_ref::<PyStr>() else {
6514            return repr_fallback();
6515        };
6516        if let Ok(module) = obj.get_attr(vm.ctx.intern_str("__module__"), vm)
6517            && let Some(module_str) = module.downcast_ref::<PyStr>()
6518            && module_str.as_bytes() != b"builtins"
6519        {
6520            return wtf8_concat!(module_str.as_wtf8(), ".", qualname_str.as_wtf8(), "()");
6521        }
6522        wtf8_concat!(qualname_str.as_wtf8(), "()")
6523    }
6524
6525    /// Helper function to iterate over mapping keys using the keys() method.
6526    /// This ensures proper order preservation for OrderedDict and other custom mappings.
6527    fn iterate_mapping_keys<F>(
6528        vm: &VirtualMachine,
6529        mapping: &PyObject,
6530        func_str: &Wtf8,
6531        mut key_handler: F,
6532    ) -> PyResult<()>
6533    where
6534        F: FnMut(PyObjectRef) -> PyResult<()>,
6535    {
6536        let Some(keys_method) = vm.get_method(mapping.to_owned(), vm.ctx.intern_str("keys")) else {
6537            return Err(vm.new_type_error(format!(
6538                "{} argument after ** must be a mapping, not {}",
6539                func_str,
6540                mapping.class().name()
6541            )));
6542        };
6543
6544        let keys = keys_method?.call((), vm)?.get_iter(vm)?;
6545        while let PyIterReturn::Return(key) = keys.next(vm)? {
6546            key_handler(key)?;
6547        }
6548        Ok(())
6549    }
6550
6551    /// Vectorcall dispatch for Instruction::Call (positional args only).
6552    /// Uses vectorcall slot if available, otherwise falls back to FuncArgs.
6553    #[inline]
6554    fn execute_call_vectorcall(&mut self, nargs: u32, vm: &VirtualMachine) -> FrameResult {
6555        let nargs_usize = nargs as usize;
6556        let stack_len = self.localsplus.stack_len();
6557        debug_assert!(
6558            stack_len >= nargs_usize + 2,
6559            "CALL stack underflow: need callable + self_or_null + {nargs_usize} args, have {stack_len}"
6560        );
6561        let callable_idx = stack_len - nargs_usize - 2;
6562        let self_or_null_idx = stack_len - nargs_usize - 1;
6563        let args_start = stack_len - nargs_usize;
6564
6565        // Build args: [self?, arg1, ..., argN]
6566        let self_or_null = self
6567            .localsplus
6568            .stack_index_mut(self_or_null_idx)
6569            .take()
6570            .map(|sr| sr.to_pyobj());
6571        let has_self = self_or_null.is_some();
6572
6573        let effective_nargs = if has_self {
6574            nargs_usize + 1
6575        } else {
6576            nargs_usize
6577        };
6578        let mut args_vec = Vec::with_capacity(effective_nargs);
6579        if let Some(self_val) = self_or_null {
6580            args_vec.push(self_val);
6581        }
6582        for stack_idx in args_start..stack_len {
6583            let val = self
6584                .localsplus
6585                .stack_index_mut(stack_idx)
6586                .take()
6587                .unwrap()
6588                .to_pyobj();
6589            args_vec.push(val);
6590        }
6591
6592        let callable_obj = self
6593            .localsplus
6594            .stack_index_mut(callable_idx)
6595            .take()
6596            .unwrap()
6597            .to_pyobj();
6598        self.localsplus.stack_truncate(callable_idx);
6599
6600        // invoke_vectorcall falls back to FuncArgs if no vectorcall slot
6601        let result = callable_obj.vectorcall(args_vec, effective_nargs, None, vm)?;
6602        self.push_value(result);
6603        Ok(None)
6604    }
6605
6606    /// Vectorcall dispatch for Instruction::CallKw (positional + keyword args).
6607    #[inline]
6608    fn execute_call_kw_vectorcall(&mut self, nargs: u32, vm: &VirtualMachine) -> FrameResult {
6609        let nargs_usize = nargs as usize;
6610
6611        // Pop kwarg_names tuple from top of stack
6612        let kwarg_names_obj = self.pop_value();
6613        let kwarg_names_tuple = kwarg_names_obj
6614            .downcast_ref::<PyTuple>()
6615            .expect("kwarg names should be tuple");
6616        let kw_count = kwarg_names_tuple.len();
6617        debug_assert!(kw_count <= nargs_usize, "CALL_KW kw_count exceeds nargs");
6618
6619        let stack_len = self.localsplus.stack_len();
6620        debug_assert!(
6621            stack_len >= nargs_usize + 2,
6622            "CALL_KW stack underflow: need callable + self_or_null + {nargs_usize} args, have {stack_len}"
6623        );
6624        let callable_idx = stack_len - nargs_usize - 2;
6625        let self_or_null_idx = stack_len - nargs_usize - 1;
6626        let args_start = stack_len - nargs_usize;
6627
6628        // Build args: [self?, pos_arg1, ..., pos_argM, kw_val1, ..., kw_valK]
6629        let self_or_null = self
6630            .localsplus
6631            .stack_index_mut(self_or_null_idx)
6632            .take()
6633            .map(|sr| sr.to_pyobj());
6634        let has_self = self_or_null.is_some();
6635
6636        let pos_count = nargs_usize
6637            .checked_sub(kw_count)
6638            .expect("CALL_KW: kw_count exceeds nargs");
6639        let effective_nargs = if has_self { pos_count + 1 } else { pos_count };
6640
6641        // Build the full args slice: positional (including self) + kwarg values
6642        let total_args = effective_nargs + kw_count;
6643        let mut args_vec = Vec::with_capacity(total_args);
6644        if let Some(self_val) = self_or_null {
6645            args_vec.push(self_val);
6646        }
6647        for stack_idx in args_start..stack_len {
6648            let val = self
6649                .localsplus
6650                .stack_index_mut(stack_idx)
6651                .take()
6652                .unwrap()
6653                .to_pyobj();
6654            args_vec.push(val);
6655        }
6656
6657        let callable_obj = self
6658            .localsplus
6659            .stack_index_mut(callable_idx)
6660            .take()
6661            .unwrap()
6662            .to_pyobj();
6663        self.localsplus.stack_truncate(callable_idx);
6664
6665        // invoke_vectorcall falls back to FuncArgs if no vectorcall slot
6666        let kwnames = kwarg_names_tuple.as_slice();
6667        let result = callable_obj.vectorcall(args_vec, effective_nargs, Some(kwnames), vm)?;
6668        self.push_value(result);
6669        Ok(None)
6670    }
6671
6672    #[inline]
6673    fn execute_call(&mut self, args: FuncArgs, vm: &VirtualMachine) -> FrameResult {
6674        // Stack: [callable, self_or_null, ...]
6675        let self_or_null = self.pop_value_opt(); // Option<PyObjectRef>
6676        let callable = self.pop_value();
6677
6678        let final_args = if let Some(self_val) = self_or_null {
6679            let mut args = args;
6680            args.prepend_arg(self_val);
6681            args
6682        } else {
6683            args
6684        };
6685
6686        let value = callable.call(final_args, vm)?;
6687        self.push_value(value);
6688        Ok(None)
6689    }
6690
6691    /// Instrumented version of execute_call: fires CALL, C_RETURN, and C_RAISE events.
6692    fn execute_call_instrumented(&mut self, args: FuncArgs, vm: &VirtualMachine) -> FrameResult {
6693        let self_or_null = self.pop_value_opt();
6694        let callable = self.pop_value();
6695
6696        let final_args = if let Some(self_val) = self_or_null {
6697            let mut args = args;
6698            args.prepend_arg(self_val);
6699            args
6700        } else {
6701            args
6702        };
6703
6704        let is_python_call = callable.downcast_ref_if_exact::<PyFunction>(vm).is_some();
6705
6706        // Fire CALL event
6707        let call_arg0 = if self.monitoring_mask & monitoring::EVENT_CALL != 0 {
6708            let arg0 = final_args
6709                .args
6710                .first()
6711                .cloned()
6712                .unwrap_or_else(|| monitoring::get_missing(vm));
6713            let offset = (self.lasti() - 1) * 2;
6714            monitoring::fire_call(vm, self.code, offset, &callable, arg0.clone())?;
6715            Some(arg0)
6716        } else {
6717            None
6718        };
6719
6720        match callable.call(final_args, vm) {
6721            Ok(value) => {
6722                if let Some(arg0) = call_arg0
6723                    && !is_python_call
6724                {
6725                    let offset = (self.lasti() - 1) * 2;
6726                    monitoring::fire_c_return(vm, self.code, offset, &callable, arg0)?;
6727                }
6728                self.push_value(value);
6729                Ok(None)
6730            }
6731            Err(exc) => {
6732                let exc = if let Some(arg0) = call_arg0
6733                    && !is_python_call
6734                {
6735                    let offset = (self.lasti() - 1) * 2;
6736                    match monitoring::fire_c_raise(vm, self.code, offset, &callable, arg0) {
6737                        Ok(()) => exc,
6738                        Err(monitor_exc) => monitor_exc,
6739                    }
6740                } else {
6741                    exc
6742                };
6743                Err(exc)
6744            }
6745        }
6746    }
6747
6748    fn execute_raise(&mut self, vm: &VirtualMachine, kind: bytecode::RaiseKind) -> FrameResult {
6749        let cause = match kind {
6750            bytecode::RaiseKind::RaiseCause => {
6751                let val = self.pop_value();
6752                Some(if vm.is_none(&val) {
6753                    // if the cause arg is none, we clear the cause
6754                    None
6755                } else {
6756                    // if the cause arg is an exception, we overwrite it
6757                    let ctor = ExceptionCtor::try_from_object(vm, val).map_err(|_| {
6758                        vm.new_type_error("exception causes must derive from BaseException")
6759                    })?;
6760                    Some(ctor.instantiate(vm)?)
6761                })
6762            }
6763            // if there's no cause arg, we keep the cause as is
6764            _ => None,
6765        };
6766        let exception = match kind {
6767            bytecode::RaiseKind::RaiseCause | bytecode::RaiseKind::Raise => {
6768                ExceptionCtor::try_from_object(vm, self.pop_value())?.instantiate(vm)?
6769            }
6770            bytecode::RaiseKind::BareRaise => {
6771                // RAISE_VARARGS 0: bare `raise` gets exception from VM state
6772                // This is the current exception set by PUSH_EXC_INFO
6773                vm.topmost_exception()
6774                    .ok_or_else(|| vm.new_runtime_error("No active exception to reraise"))?
6775            }
6776            bytecode::RaiseKind::ReraiseFromStack => {
6777                // RERAISE: gets exception from stack top
6778                // Used in cleanup blocks where exception is on stack after COPY 3
6779                let exc = self.pop_value();
6780                exc.downcast::<PyBaseException>().map_err(|obj| {
6781                    vm.new_type_error(format!(
6782                        "exceptions must derive from BaseException, not {}",
6783                        obj.class().name()
6784                    ))
6785                })?
6786            }
6787        };
6788        #[cfg(debug_assertions)]
6789        debug!("Exception raised: {exception:?} with cause: {cause:?}");
6790        if let Some(cause) = cause {
6791            exception.set___cause__(cause);
6792        }
6793        Err(exception)
6794    }
6795
6796    fn builtin_coro<'a>(&self, coro: &'a PyObject) -> Option<&'a Coro> {
6797        match_class!(match coro {
6798            ref g @ PyGenerator => Some(g.as_coro()),
6799            ref c @ PyCoroutine => Some(c.as_coro()),
6800            _ => None,
6801        })
6802    }
6803
6804    fn _send(
6805        &self,
6806        jen: &PyObject,
6807        val: PyObjectRef,
6808        vm: &VirtualMachine,
6809    ) -> PyResult<PyIterReturn> {
6810        match self.builtin_coro(jen) {
6811            Some(coro) => coro.send(jen, val, vm),
6812            // TODO: turn return type to PyResult<PyIterReturn> then ExecutionResult will be simplified
6813            None if vm.is_none(&val) => PyIter::new(jen).next(vm),
6814            None => {
6815                let meth = jen.get_attr("send", vm)?;
6816                PyIterReturn::from_pyresult(meth.call((val,), vm), vm)
6817            }
6818        }
6819    }
6820
6821    fn execute_unpack_ex(&mut self, vm: &VirtualMachine, before: u8, after: u8) -> FrameResult {
6822        let (before, after) = (before as usize, after as usize);
6823        let value = self.pop_value();
6824        let not_iterable = value.class().slots.iter.load().is_none()
6825            && value
6826                .get_class_attr(vm.ctx.intern_str("__getitem__"))
6827                .is_none();
6828        let elements: Vec<_> = value.try_to_value(vm).map_err(|e| {
6829            if not_iterable && e.class().is(vm.ctx.exceptions.type_error) {
6830                vm.new_type_error(format!(
6831                    "cannot unpack non-iterable {} object",
6832                    value.class().name()
6833                ))
6834            } else {
6835                e
6836            }
6837        })?;
6838        let min_expected = before + after;
6839
6840        let middle = elements.len().checked_sub(min_expected).ok_or_else(|| {
6841            vm.new_value_error(format!(
6842                "not enough values to unpack (expected at least {}, got {})",
6843                min_expected,
6844                elements.len()
6845            ))
6846        })?;
6847
6848        let mut elements = elements;
6849        // Elements on stack from right-to-left:
6850        self.localsplus.stack_extend(
6851            elements
6852                .drain(before + middle..)
6853                .rev()
6854                .map(|e| Some(PyStackRef::new_owned(e))),
6855        );
6856
6857        let middle_elements = elements.drain(before..).collect();
6858        let t = vm.ctx.new_list(middle_elements);
6859        self.push_value(t.into());
6860
6861        // Lastly the first reversed values:
6862        self.localsplus.stack_extend(
6863            elements
6864                .into_iter()
6865                .rev()
6866                .map(|e| Some(PyStackRef::new_owned(e))),
6867        );
6868
6869        Ok(None)
6870    }
6871
6872    #[inline]
6873    fn jump(&mut self, label: bytecode::Label) {
6874        let target_pc = label.as_u32();
6875        vm_trace!("jump from {:?} to {:?}", self.lasti(), target_pc);
6876        self.update_lasti(|i| *i = target_pc);
6877    }
6878
6879    /// Jump forward by `delta` code units from after instruction + caches.
6880    /// lasti is already at instruction_index + 1, so after = lasti + caches.
6881    ///
6882    /// Unchecked arithmetic is intentional: the compiler guarantees valid
6883    /// targets, and debug builds will catch overflow via Rust's default checks.
6884    #[inline]
6885    fn jump_relative_forward(&mut self, delta: u32, caches: u32) {
6886        let target = self.lasti() + caches + delta;
6887        self.update_lasti(|i| *i = target);
6888    }
6889
6890    /// Jump backward by `delta` code units from after instruction + caches.
6891    ///
6892    /// Unchecked arithmetic is intentional: the compiler guarantees valid
6893    /// targets, and debug builds will catch underflow via Rust's default checks.
6894    #[inline]
6895    fn jump_relative_backward(&mut self, delta: u32, caches: u32) {
6896        let target = self.lasti() + caches - delta;
6897        self.update_lasti(|i| *i = target);
6898    }
6899
6900    #[inline]
6901    fn pop_jump_if_relative(
6902        &mut self,
6903        vm: &VirtualMachine,
6904        arg: bytecode::OpArg,
6905        caches: u32,
6906        flag: bool,
6907    ) -> FrameResult {
6908        let obj = self.pop_value();
6909        let value = obj.try_to_bool(vm)?;
6910        if value == flag {
6911            self.jump_relative_forward(u32::from(arg), caches);
6912        }
6913        Ok(None)
6914    }
6915
6916    /// Advance the iterator on top of stack.
6917    /// Returns `true` if iteration continued (item pushed), `false` if exhausted (jumped).
6918    fn execute_for_iter(
6919        &mut self,
6920        vm: &VirtualMachine,
6921        target: bytecode::Label,
6922    ) -> Result<bool, PyBaseExceptionRef> {
6923        let top = self.top_value();
6924
6925        // FOR_ITER_RANGE: bypass generic iterator protocol for range iterators
6926        if let Some(range_iter) = top.downcast_ref_if_exact::<PyRangeIterator>(vm) {
6927            if let Some(value) = range_iter.fast_next() {
6928                self.push_value(vm.ctx.new_int(value).into());
6929                return Ok(true);
6930            }
6931            if vm.use_tracing.get() && !vm.is_none(&self.object.trace.lock()) {
6932                let stop_exc = vm.new_stop_iteration(None);
6933                self.fire_exception_trace(&stop_exc, vm)?;
6934            }
6935            self.jump(self.for_iter_jump_target(target));
6936            return Ok(false);
6937        }
6938
6939        let top_of_stack = PyIter::new(top);
6940        let next_obj = top_of_stack.next(vm);
6941
6942        match next_obj {
6943            Ok(PyIterReturn::Return(value)) => {
6944                self.push_value(value);
6945                Ok(true)
6946            }
6947            Ok(PyIterReturn::StopIteration(value)) => {
6948                // Fire 'exception' trace event for StopIteration, matching
6949                // FOR_ITER's inline call to _PyEval_MonitorRaise.
6950                if vm.use_tracing.get() && !vm.is_none(&self.object.trace.lock()) {
6951                    let stop_exc = vm.new_stop_iteration(value);
6952                    self.fire_exception_trace(&stop_exc, vm)?;
6953                }
6954                self.jump(self.for_iter_jump_target(target));
6955                Ok(false)
6956            }
6957            Err(next_error) => {
6958                self.pop_value();
6959                Err(next_error)
6960            }
6961        }
6962    }
6963
6964    /// Compute the jump target for FOR_ITER exhaustion: skip END_FOR and jump to POP_ITER.
6965    fn for_iter_jump_target(&self, target: bytecode::Label) -> bytecode::Label {
6966        let target_idx = target.as_usize();
6967        if let Some(unit) = self.code.instructions.get(target_idx)
6968            && matches!(
6969                unit.op,
6970                bytecode::Instruction::EndFor | bytecode::Instruction::InstrumentedEndFor
6971            )
6972        {
6973            return bytecode::Label::from_u32(target.as_u32() + 1);
6974        }
6975        target
6976    }
6977    fn execute_make_function(&mut self, vm: &VirtualMachine) -> FrameResult {
6978        // MakeFunction only takes code object, no flags
6979        let code_obj: PyRef<PyCode> = self
6980            .pop_value()
6981            .downcast()
6982            .expect("Stack value should be code object");
6983
6984        // Create function with minimal attributes
6985        let func_obj = PyFunction::new(code_obj, self.globals.clone(), vm)?.into_pyobject(vm);
6986
6987        self.push_value(func_obj);
6988        Ok(None)
6989    }
6990
6991    fn execute_set_function_attribute(
6992        &mut self,
6993        vm: &VirtualMachine,
6994        attr: bytecode::MakeFunctionFlag,
6995    ) -> FrameResult {
6996        // SET_FUNCTION_ATTRIBUTE sets attributes on a function
6997        // Stack: [..., attr_value, func] -> [..., func]
6998        // Stack order: func is at -1, attr_value is at -2
6999
7000        let func = self.pop_value_opt();
7001        let attr_value = expect_unchecked(self.replace_top(func), "attr_value must not be null");
7002
7003        let func = self.top_value();
7004        // Get the function reference and call the new method
7005        let func_ref = func
7006            .downcast_ref_if_exact::<PyFunction>(vm)
7007            .expect("SET_FUNCTION_ATTRIBUTE expects function on stack");
7008
7009        let payload: &PyFunction = func_ref.payload();
7010        // SetFunctionAttribute always follows MakeFunction, so at this point
7011        // there are no other references to func. It is therefore safe to treat it as mutable.
7012        unsafe {
7013            let payload_ptr = payload as *const PyFunction as *mut PyFunction;
7014            (*payload_ptr).set_function_attribute(attr, attr_value, vm)?;
7015        };
7016
7017        Ok(None)
7018    }
7019
7020    #[cfg_attr(feature = "flame-it", flame("Frame"))]
7021    fn execute_bin_op(&mut self, vm: &VirtualMachine, op: bytecode::BinaryOperator) -> FrameResult {
7022        let b_ref = &self.pop_value();
7023        let a_ref = &self.pop_value();
7024        let value = match op {
7025            // BINARY_OP_ADD_INT / BINARY_OP_SUBTRACT_INT fast paths:
7026            // bypass binary_op1 dispatch for exact int types, use i64 arithmetic
7027            // when possible to avoid BigInt heap allocation.
7028            bytecode::BinaryOperator::Add | bytecode::BinaryOperator::InplaceAdd => {
7029                if let (Some(a), Some(b)) = (
7030                    a_ref.downcast_ref_if_exact::<PyInt>(vm),
7031                    b_ref.downcast_ref_if_exact::<PyInt>(vm),
7032                ) {
7033                    Ok(self.int_add(a.as_bigint(), b.as_bigint(), vm))
7034                } else if matches!(op, bytecode::BinaryOperator::Add) {
7035                    vm._add(a_ref, b_ref)
7036                } else {
7037                    vm._iadd(a_ref, b_ref)
7038                }
7039            }
7040            bytecode::BinaryOperator::Subtract | bytecode::BinaryOperator::InplaceSubtract => {
7041                if let (Some(a), Some(b)) = (
7042                    a_ref.downcast_ref_if_exact::<PyInt>(vm),
7043                    b_ref.downcast_ref_if_exact::<PyInt>(vm),
7044                ) {
7045                    Ok(self.int_sub(a.as_bigint(), b.as_bigint(), vm))
7046                } else if matches!(op, bytecode::BinaryOperator::Subtract) {
7047                    vm._sub(a_ref, b_ref)
7048                } else {
7049                    vm._isub(a_ref, b_ref)
7050                }
7051            }
7052            bytecode::BinaryOperator::Multiply => vm._mul(a_ref, b_ref),
7053            bytecode::BinaryOperator::MatrixMultiply => vm._matmul(a_ref, b_ref),
7054            bytecode::BinaryOperator::Power => vm._pow(a_ref, b_ref, vm.ctx.none.as_object()),
7055            bytecode::BinaryOperator::TrueDivide => vm._truediv(a_ref, b_ref),
7056            bytecode::BinaryOperator::FloorDivide => vm._floordiv(a_ref, b_ref),
7057            bytecode::BinaryOperator::Remainder => vm._mod(a_ref, b_ref),
7058            bytecode::BinaryOperator::Lshift => vm._lshift(a_ref, b_ref),
7059            bytecode::BinaryOperator::Rshift => vm._rshift(a_ref, b_ref),
7060            bytecode::BinaryOperator::Xor => vm._xor(a_ref, b_ref),
7061            bytecode::BinaryOperator::Or => vm._or(a_ref, b_ref),
7062            bytecode::BinaryOperator::And => vm._and(a_ref, b_ref),
7063            bytecode::BinaryOperator::InplaceMultiply => vm._imul(a_ref, b_ref),
7064            bytecode::BinaryOperator::InplaceMatrixMultiply => vm._imatmul(a_ref, b_ref),
7065            bytecode::BinaryOperator::InplacePower => {
7066                vm._ipow(a_ref, b_ref, vm.ctx.none.as_object())
7067            }
7068            bytecode::BinaryOperator::InplaceTrueDivide => vm._itruediv(a_ref, b_ref),
7069            bytecode::BinaryOperator::InplaceFloorDivide => vm._ifloordiv(a_ref, b_ref),
7070            bytecode::BinaryOperator::InplaceRemainder => vm._imod(a_ref, b_ref),
7071            bytecode::BinaryOperator::InplaceLshift => vm._ilshift(a_ref, b_ref),
7072            bytecode::BinaryOperator::InplaceRshift => vm._irshift(a_ref, b_ref),
7073            bytecode::BinaryOperator::InplaceXor => vm._ixor(a_ref, b_ref),
7074            bytecode::BinaryOperator::InplaceOr => vm._ior(a_ref, b_ref),
7075            bytecode::BinaryOperator::InplaceAnd => vm._iand(a_ref, b_ref),
7076            bytecode::BinaryOperator::Subscr => a_ref.get_item(b_ref.as_object(), vm),
7077        }?;
7078
7079        self.push_value(value);
7080        Ok(None)
7081    }
7082
7083    /// Int addition with i64 fast path to avoid BigInt heap allocation.
7084    #[inline]
7085    fn int_add(&self, a: &BigInt, b: &BigInt, vm: &VirtualMachine) -> PyObjectRef {
7086        use num_traits::ToPrimitive;
7087        if let (Some(av), Some(bv)) = (a.to_i64(), b.to_i64())
7088            && let Some(result) = av.checked_add(bv)
7089        {
7090            return vm.ctx.new_int(result).into();
7091        }
7092        vm.ctx.new_int(a + b).into()
7093    }
7094
7095    /// Int subtraction with i64 fast path to avoid BigInt heap allocation.
7096    #[inline]
7097    fn int_sub(&self, a: &BigInt, b: &BigInt, vm: &VirtualMachine) -> PyObjectRef {
7098        use num_traits::ToPrimitive;
7099        if let (Some(av), Some(bv)) = (a.to_i64(), b.to_i64())
7100            && let Some(result) = av.checked_sub(bv)
7101        {
7102            return vm.ctx.new_int(result).into();
7103        }
7104        vm.ctx.new_int(a - b).into()
7105    }
7106
7107    #[cold]
7108    fn setup_annotations(&mut self, vm: &VirtualMachine) -> FrameResult {
7109        let __annotations__ = identifier!(vm, __annotations__);
7110        let locals_obj = self.locals.as_object(vm);
7111        // Try using locals as dict first, if not, fallback to generic method.
7112        let has_annotations = if let Some(d) = locals_obj.downcast_ref_if_exact::<PyDict>(vm) {
7113            d.contains_key(__annotations__, vm)
7114        } else {
7115            self._in(vm, __annotations__.as_object(), locals_obj)?
7116        };
7117        if !has_annotations {
7118            locals_obj.set_item(__annotations__, vm.ctx.new_dict().into(), vm)?;
7119        }
7120        Ok(None)
7121    }
7122
7123    /// _PyEval_UnpackIterableStackRef
7124    fn unpack_sequence(&mut self, size: u32, vm: &VirtualMachine) -> FrameResult {
7125        let value = self.pop_value();
7126        let size = size as usize;
7127
7128        // Fast path for exact tuple/list types (not subclasses) — push
7129        // elements directly from the slice without intermediate Vec allocation,
7130        // matching UNPACK_SEQUENCE_TUPLE / UNPACK_SEQUENCE_LIST specializations.
7131        let cls = value.class();
7132        if cls.is(vm.ctx.types.tuple_type) {
7133            let tuple = value.downcast_ref::<PyTuple>().unwrap();
7134            return self.unpack_fast(tuple.as_slice(), size, vm);
7135        }
7136        if cls.is(vm.ctx.types.list_type) {
7137            let list = value.downcast_ref::<PyList>().unwrap();
7138            let borrowed = list.borrow_vec();
7139            return self.unpack_fast(&borrowed, size, vm);
7140        }
7141
7142        // General path — iterate up to `size + 1` elements to avoid
7143        // consuming the entire iterator (fixes hang on infinite sequences).
7144        let not_iterable = value.class().slots.iter.load().is_none()
7145            && value
7146                .get_class_attr(vm.ctx.intern_str("__getitem__"))
7147                .is_none();
7148        let iter = PyIter::try_from_object(vm, value.clone()).map_err(|e| {
7149            if not_iterable && e.class().is(vm.ctx.exceptions.type_error) {
7150                vm.new_type_error(format!(
7151                    "cannot unpack non-iterable {} object",
7152                    value.class().name()
7153                ))
7154            } else {
7155                e
7156            }
7157        })?;
7158
7159        let mut elements = Vec::with_capacity(size);
7160        for _ in 0..size {
7161            match iter.next(vm)? {
7162                PyIterReturn::Return(item) => elements.push(item),
7163                PyIterReturn::StopIteration(_) => {
7164                    return Err(vm.new_value_error(format!(
7165                        "not enough values to unpack (expected {size}, got {})",
7166                        elements.len()
7167                    )));
7168                }
7169            }
7170        }
7171
7172        // Check that the iterator is exhausted.
7173        match iter.next(vm)? {
7174            PyIterReturn::Return(_) => {
7175                // For exact dict types, show "got N" using the container's
7176                // size (PyDict_Size). Exact tuple/list are handled by the
7177                // fast path above and never reach here.
7178                let msg = if value.class().is(vm.ctx.types.dict_type) {
7179                    if let Ok(got) = value.length(vm) {
7180                        if got > size {
7181                            format!("too many values to unpack (expected {size}, got {got})")
7182                        } else {
7183                            format!("too many values to unpack (expected {size})")
7184                        }
7185                    } else {
7186                        format!("too many values to unpack (expected {size})")
7187                    }
7188                } else {
7189                    format!("too many values to unpack (expected {size})")
7190                };
7191                Err(vm.new_value_error(msg))
7192            }
7193            PyIterReturn::StopIteration(_) => {
7194                self.localsplus.stack_extend(
7195                    elements
7196                        .into_iter()
7197                        .rev()
7198                        .map(|e| Some(PyStackRef::new_owned(e))),
7199                );
7200                Ok(None)
7201            }
7202        }
7203    }
7204
7205    fn unpack_fast(
7206        &mut self,
7207        elements: &[PyObjectRef],
7208        size: usize,
7209        vm: &VirtualMachine,
7210    ) -> FrameResult {
7211        match elements.len().cmp(&size) {
7212            core::cmp::Ordering::Equal => {
7213                for elem in elements.iter().rev() {
7214                    self.push_value(elem.clone());
7215                }
7216                Ok(None)
7217            }
7218            core::cmp::Ordering::Greater => Err(vm.new_value_error(format!(
7219                "too many values to unpack (expected {size}, got {})",
7220                elements.len()
7221            ))),
7222            core::cmp::Ordering::Less => Err(vm.new_value_error(format!(
7223                "not enough values to unpack (expected {size}, got {})",
7224                elements.len()
7225            ))),
7226        }
7227    }
7228
7229    fn convert_value(
7230        &mut self,
7231        conversion: bytecode::ConvertValueOparg,
7232        vm: &VirtualMachine,
7233    ) -> FrameResult {
7234        use bytecode::ConvertValueOparg;
7235        let value = self.pop_value();
7236        let value = match conversion {
7237            ConvertValueOparg::Str => value.str(vm)?.into(),
7238            ConvertValueOparg::Repr => value.repr(vm)?.into(),
7239            ConvertValueOparg::Ascii => builtins::ascii(value, vm)?.into(),
7240            ConvertValueOparg::None => value,
7241        };
7242
7243        self.push_value(value);
7244        Ok(None)
7245    }
7246
7247    fn _in(&self, vm: &VirtualMachine, needle: &PyObject, haystack: &PyObject) -> PyResult<bool> {
7248        let found = vm._contains(haystack, needle)?;
7249        Ok(found)
7250    }
7251
7252    #[inline(always)]
7253    fn _not_in(
7254        &self,
7255        vm: &VirtualMachine,
7256        needle: &PyObject,
7257        haystack: &PyObject,
7258    ) -> PyResult<bool> {
7259        Ok(!self._in(vm, needle, haystack)?)
7260    }
7261
7262    #[cfg_attr(feature = "flame-it", flame("Frame"))]
7263    fn execute_compare(
7264        &mut self,
7265        vm: &VirtualMachine,
7266        op: bytecode::ComparisonOperator,
7267    ) -> FrameResult {
7268        let b = self.pop_value();
7269        let a = self.pop_value();
7270        let cmp_op: PyComparisonOp = op.into();
7271
7272        // COMPARE_OP_INT: leaf type, cannot recurse — skip rich_compare dispatch
7273        if let (Some(a_int), Some(b_int)) = (
7274            a.downcast_ref_if_exact::<PyInt>(vm),
7275            b.downcast_ref_if_exact::<PyInt>(vm),
7276        ) {
7277            let result = cmp_op.eval_ord(a_int.as_bigint().cmp(b_int.as_bigint()));
7278            self.push_value(vm.ctx.new_bool(result).into());
7279            return Ok(None);
7280        }
7281        // COMPARE_OP_FLOAT: leaf type, cannot recurse — skip rich_compare dispatch.
7282        // Falls through on NaN (partial_cmp returns None) for correct != semantics.
7283        if let (Some(a_f), Some(b_f)) = (
7284            a.downcast_ref_if_exact::<PyFloat>(vm),
7285            b.downcast_ref_if_exact::<PyFloat>(vm),
7286        ) && let Some(ord) = a_f.to_f64().partial_cmp(&b_f.to_f64())
7287        {
7288            let result = cmp_op.eval_ord(ord);
7289            self.push_value(vm.ctx.new_bool(result).into());
7290            return Ok(None);
7291        }
7292
7293        let value = a.rich_compare(b, cmp_op, vm)?;
7294        self.push_value(value);
7295        Ok(None)
7296    }
7297
7298    /// Read a cached descriptor pointer and validate it against the expected
7299    /// type version, using a lock-free double-check pattern:
7300    ///   1. read pointer  →  incref (try_to_owned)
7301    ///   2. re-read version + pointer and confirm they still match
7302    ///
7303    /// This matches the read-side pattern used in LOAD_ATTR_METHOD_WITH_VALUES
7304    /// and friends: no read-side lock, relying on the write side to invalidate
7305    /// the version tag before swapping the pointer.
7306    #[inline]
7307    fn try_read_cached_descriptor(
7308        &self,
7309        cache_base: usize,
7310        expected_type_version: u32,
7311    ) -> Option<PyObjectRef> {
7312        let descr_ptr = self.code.instructions.read_cache_ptr(cache_base + 5);
7313        if descr_ptr == 0 {
7314            return None;
7315        }
7316        // SAFETY: `descr_ptr` was a valid `*mut PyObject` when the writer
7317        // stored it, and the writer keeps a strong reference alive in
7318        // `InlineCacheEntry`.  `try_to_owned_from_ptr` performs a
7319        // conditional incref that fails if the object is already freed.
7320        let cloned = unsafe { PyObject::try_to_owned_from_ptr(descr_ptr as *mut PyObject) }?;
7321        // Double-check: version tag still matches AND pointer unchanged.
7322        if self.code.instructions.read_cache_u32(cache_base + 1) == expected_type_version
7323            && self.code.instructions.read_cache_ptr(cache_base + 5) == descr_ptr
7324        {
7325            Some(cloned)
7326        } else {
7327            drop(cloned);
7328            None
7329        }
7330    }
7331
7332    #[inline]
7333    unsafe fn write_cached_descriptor(
7334        &self,
7335        cache_base: usize,
7336        type_version: u32,
7337        descr_ptr: usize,
7338    ) {
7339        // Publish descriptor cache with version-invalidation protocol:
7340        // invalidate version first, then write payload, then publish version.
7341        // Reader double-checks version+ptr after incref, so no writer lock needed.
7342        unsafe {
7343            self.code.instructions.write_cache_u32(cache_base + 1, 0);
7344            self.code
7345                .instructions
7346                .write_cache_ptr(cache_base + 5, descr_ptr);
7347            self.code
7348                .instructions
7349                .write_cache_u32(cache_base + 1, type_version);
7350        }
7351    }
7352
7353    #[inline]
7354    unsafe fn write_cached_descriptor_with_metaclass(
7355        &self,
7356        cache_base: usize,
7357        type_version: u32,
7358        metaclass_version: u32,
7359        descr_ptr: usize,
7360    ) {
7361        unsafe {
7362            self.code.instructions.write_cache_u32(cache_base + 1, 0);
7363            self.code
7364                .instructions
7365                .write_cache_u32(cache_base + 3, metaclass_version);
7366            self.code
7367                .instructions
7368                .write_cache_ptr(cache_base + 5, descr_ptr);
7369            self.code
7370                .instructions
7371                .write_cache_u32(cache_base + 1, type_version);
7372        }
7373    }
7374
7375    #[inline]
7376    unsafe fn write_cached_binary_op_extend_descr(
7377        &self,
7378        cache_base: usize,
7379        descr: Option<&'static BinaryOpExtendSpecializationDescr>,
7380    ) {
7381        let ptr = descr.map_or(0, |d| {
7382            d as *const BinaryOpExtendSpecializationDescr as usize
7383        });
7384        unsafe {
7385            self.code
7386                .instructions
7387                .write_cache_ptr(cache_base + BINARY_OP_EXTEND_EXTERNAL_CACHE_OFFSET, ptr);
7388        }
7389    }
7390
7391    #[inline]
7392    fn read_cached_binary_op_extend_descr(
7393        &self,
7394        cache_base: usize,
7395    ) -> Option<&'static BinaryOpExtendSpecializationDescr> {
7396        let ptr = self
7397            .code
7398            .instructions
7399            .read_cache_ptr(cache_base + BINARY_OP_EXTEND_EXTERNAL_CACHE_OFFSET);
7400        if ptr == 0 {
7401            return None;
7402        }
7403        // SAFETY: We only store pointers to entries in `BINARY_OP_EXTEND_DESCRIPTORS`.
7404        Some(unsafe { &*(ptr as *const BinaryOpExtendSpecializationDescr) })
7405    }
7406
7407    #[inline]
7408    fn binary_op_extended_specialization(
7409        &self,
7410        op: bytecode::BinaryOperator,
7411        lhs: &PyObject,
7412        rhs: &PyObject,
7413        vm: &VirtualMachine,
7414    ) -> Option<&'static BinaryOpExtendSpecializationDescr> {
7415        BINARY_OP_EXTEND_DESCRIPTORS
7416            .iter()
7417            .find(|d| d.oparg == op && (d.guard)(lhs, rhs, vm))
7418    }
7419
7420    fn load_attr(&mut self, vm: &VirtualMachine, oparg: LoadAttr) -> FrameResult {
7421        self.adaptive(|s, ii, cb| s.specialize_load_attr(vm, oparg, ii, cb));
7422        self.load_attr_slow(vm, oparg)
7423    }
7424
7425    fn specialize_load_attr(
7426        &mut self,
7427        _vm: &VirtualMachine,
7428        oparg: LoadAttr,
7429        instr_idx: usize,
7430        cache_base: usize,
7431    ) {
7432        // Pre-check: bail if already specialized by another thread
7433        if !matches!(
7434            self.code.instructions.read_op(instr_idx),
7435            Instruction::LoadAttr { .. }
7436        ) {
7437            return;
7438        }
7439        let obj = self.top_value();
7440        let cls = obj.class();
7441
7442        // Check if this is a type object (class attribute access)
7443        if obj.downcast_ref::<PyType>().is_some() {
7444            self.specialize_class_load_attr(_vm, oparg, instr_idx, cache_base);
7445            return;
7446        }
7447
7448        // Only specialize if getattro is the default (PyBaseObject::getattro)
7449        let is_default_getattro = cls
7450            .slots
7451            .getattro
7452            .load()
7453            .is_some_and(|f| f as usize == PyBaseObject::getattro as *const () as usize);
7454        if !is_default_getattro {
7455            let mut type_version = cls.tp_version_tag.load(Acquire);
7456            if type_version == 0 {
7457                type_version = cls.assign_version_tag();
7458            }
7459            if type_version != 0
7460                && !oparg.is_method()
7461                && !self.specialization_eval_frame_active(_vm)
7462                && cls.get_attr(identifier!(_vm, __getattr__)).is_none()
7463                && let Some(getattribute) = cls.get_attr(identifier!(_vm, __getattribute__))
7464                && let Some(func) = getattribute.downcast_ref_if_exact::<PyFunction>(_vm)
7465                && func.can_specialize_call(2)
7466            {
7467                let func_version = func.get_version_for_current_state();
7468                if func_version != 0 {
7469                    let func_ptr = &*getattribute as *const PyObject as usize;
7470                    unsafe {
7471                        self.code
7472                            .instructions
7473                            .write_cache_u32(cache_base + 3, func_version);
7474                        self.write_cached_descriptor(cache_base, type_version, func_ptr);
7475                    }
7476                    self.specialize_at(
7477                        instr_idx,
7478                        cache_base,
7479                        Instruction::LoadAttrGetattributeOverridden,
7480                    );
7481                    return;
7482                }
7483            }
7484            unsafe {
7485                self.code.instructions.write_adaptive_counter(
7486                    cache_base,
7487                    bytecode::adaptive_counter_backoff(
7488                        self.code.instructions.read_adaptive_counter(cache_base),
7489                    ),
7490                );
7491            }
7492            return;
7493        }
7494
7495        // Get or assign type version
7496        let mut type_version = cls.tp_version_tag.load(Acquire);
7497        if type_version == 0 {
7498            type_version = cls.assign_version_tag();
7499        }
7500        if type_version == 0 {
7501            // Version counter overflow — backoff to avoid re-attempting every execution
7502            unsafe {
7503                self.code.instructions.write_adaptive_counter(
7504                    cache_base,
7505                    bytecode::adaptive_counter_backoff(
7506                        self.code.instructions.read_adaptive_counter(cache_base),
7507                    ),
7508                );
7509            }
7510            return;
7511        }
7512
7513        let attr_name = self.code.names[oparg.name_idx() as usize];
7514
7515        // Match CPython: only specialize module attribute loads when the
7516        // current module dict has no __getattr__ override and the attribute is
7517        // already present.
7518        if let Some(module) = obj.downcast_ref_if_exact::<PyModule>(_vm) {
7519            let module_dict = module.dict();
7520            match (
7521                module_dict.get_item_opt(identifier!(_vm, __getattr__), _vm),
7522                module_dict.get_item_opt(attr_name, _vm),
7523            ) {
7524                (Ok(None), Ok(Some(_))) => {
7525                    unsafe {
7526                        self.code
7527                            .instructions
7528                            .write_cache_u32(cache_base + 1, type_version);
7529                    }
7530                    self.specialize_at(instr_idx, cache_base, Instruction::LoadAttrModule);
7531                }
7532                (Ok(_), Ok(_)) => self.cooldown_adaptive_at(cache_base),
7533                _ => unsafe {
7534                    self.code.instructions.write_adaptive_counter(
7535                        cache_base,
7536                        bytecode::adaptive_counter_backoff(
7537                            self.code.instructions.read_adaptive_counter(cache_base),
7538                        ),
7539                    );
7540                },
7541            }
7542            return;
7543        }
7544
7545        // Look up attr in class via MRO
7546        let cls_attr = cls.get_attr(attr_name);
7547        let class_has_dict = cls.slots.flags.has_feature(PyTypeFlags::HAS_DICT);
7548
7549        if oparg.is_method() {
7550            // Method specialization
7551            if let Some(ref descr) = cls_attr
7552                && descr
7553                    .class()
7554                    .slots
7555                    .flags
7556                    .has_feature(PyTypeFlags::METHOD_DESCRIPTOR)
7557            {
7558                let descr_ptr = &**descr as *const PyObject as usize;
7559                unsafe {
7560                    self.write_cached_descriptor(cache_base, type_version, descr_ptr);
7561                }
7562
7563                let new_op = if !class_has_dict {
7564                    Instruction::LoadAttrMethodNoDict
7565                } else if obj.dict().is_none() {
7566                    Instruction::LoadAttrMethodLazyDict
7567                } else {
7568                    Instruction::LoadAttrMethodWithValues
7569                };
7570                self.specialize_at(instr_idx, cache_base, new_op);
7571                return;
7572            }
7573            // Can't specialize this method call
7574            unsafe {
7575                self.code.instructions.write_adaptive_counter(
7576                    cache_base,
7577                    bytecode::adaptive_counter_backoff(
7578                        self.code.instructions.read_adaptive_counter(cache_base),
7579                    ),
7580                );
7581            }
7582        } else {
7583            // Regular attribute access
7584            let has_data_descr = cls_attr.as_ref().is_some_and(|descr| {
7585                let descr_cls = descr.class();
7586                descr_cls.slots.descr_get.load().is_some()
7587                    && descr_cls.slots.descr_set.load().is_some()
7588            });
7589            let has_descr_get = cls_attr
7590                .as_ref()
7591                .is_some_and(|descr| descr.class().slots.descr_get.load().is_some());
7592
7593            if has_data_descr {
7594                // Check for member descriptor (slot access)
7595                if let Some(ref descr) = cls_attr
7596                    && let Some(member_descr) = descr.downcast_ref::<PyMemberDescriptor>()
7597                    && let MemberGetter::Offset(offset) = member_descr.member.getter
7598                {
7599                    unsafe {
7600                        self.code
7601                            .instructions
7602                            .write_cache_u32(cache_base + 1, type_version);
7603                        self.code
7604                            .instructions
7605                            .write_cache_u32(cache_base + 3, offset as u32);
7606                    }
7607                    self.specialize_at(instr_idx, cache_base, Instruction::LoadAttrSlot);
7608                } else if let Some(ref descr) = cls_attr
7609                    && let Some(prop) = descr.downcast_ref::<PyProperty>()
7610                    && let Some(fget) = prop.get_fget()
7611                    && let Some(func) = fget.downcast_ref_if_exact::<PyFunction>(_vm)
7612                    && func.can_specialize_call(1)
7613                    && !self.specialization_eval_frame_active(_vm)
7614                {
7615                    // Property specialization caches fget directly.
7616                    let fget_ptr = &*fget as *const PyObject as usize;
7617                    unsafe {
7618                        self.write_cached_descriptor(cache_base, type_version, fget_ptr);
7619                    }
7620                    self.specialize_at(instr_idx, cache_base, Instruction::LoadAttrProperty);
7621                } else {
7622                    unsafe {
7623                        self.code.instructions.write_adaptive_counter(
7624                            cache_base,
7625                            bytecode::adaptive_counter_backoff(
7626                                self.code.instructions.read_adaptive_counter(cache_base),
7627                            ),
7628                        );
7629                    }
7630                }
7631            } else if has_descr_get {
7632                // Non-data descriptor with __get__ — can't specialize
7633                unsafe {
7634                    self.code.instructions.write_adaptive_counter(
7635                        cache_base,
7636                        bytecode::adaptive_counter_backoff(
7637                            self.code.instructions.read_adaptive_counter(cache_base),
7638                        ),
7639                    );
7640                }
7641            } else if class_has_dict {
7642                if let Some(ref descr) = cls_attr {
7643                    // Plain class attr + class supports dict — check dict first, fallback
7644                    let descr_ptr = &**descr as *const PyObject as usize;
7645                    unsafe {
7646                        self.write_cached_descriptor(cache_base, type_version, descr_ptr);
7647                    }
7648                    self.specialize_at(
7649                        instr_idx,
7650                        cache_base,
7651                        Instruction::LoadAttrNondescriptorWithValues,
7652                    );
7653                } else {
7654                    // Match CPython ABSENT/no-shadow behavior: if the
7655                    // attribute is missing on both the class and the current
7656                    // instance, keep the generic opcode and just enter
7657                    // cooldown instead of specializing a repeated miss path.
7658                    let has_instance_attr = if let Some(dict) = obj.dict() {
7659                        match dict.get_item_opt(attr_name, _vm) {
7660                            Ok(Some(_)) => true,
7661                            Ok(None) => false,
7662                            Err(_) => {
7663                                unsafe {
7664                                    self.code.instructions.write_adaptive_counter(
7665                                        cache_base,
7666                                        bytecode::adaptive_counter_backoff(
7667                                            self.code
7668                                                .instructions
7669                                                .read_adaptive_counter(cache_base),
7670                                        ),
7671                                    );
7672                                }
7673                                return;
7674                            }
7675                        }
7676                    } else {
7677                        false
7678                    };
7679                    if has_instance_attr {
7680                        unsafe {
7681                            self.code
7682                                .instructions
7683                                .write_cache_u32(cache_base + 1, type_version);
7684                        }
7685                        self.specialize_at(instr_idx, cache_base, Instruction::LoadAttrWithHint);
7686                    } else {
7687                        self.cooldown_adaptive_at(cache_base);
7688                    }
7689                }
7690            } else if let Some(ref descr) = cls_attr {
7691                // No dict support, plain class attr — cache directly
7692                let descr_ptr = &**descr as *const PyObject as usize;
7693                unsafe {
7694                    self.write_cached_descriptor(cache_base, type_version, descr_ptr);
7695                }
7696                self.specialize_at(
7697                    instr_idx,
7698                    cache_base,
7699                    Instruction::LoadAttrNondescriptorNoDict,
7700                );
7701            } else {
7702                // No dict and no class attr: repeated miss path, so cooldown.
7703                self.cooldown_adaptive_at(cache_base);
7704            }
7705        }
7706    }
7707
7708    fn specialize_class_load_attr(
7709        &mut self,
7710        _vm: &VirtualMachine,
7711        oparg: LoadAttr,
7712        instr_idx: usize,
7713        cache_base: usize,
7714    ) {
7715        let obj = self.top_value();
7716        let owner_type = obj.downcast_ref::<PyType>().unwrap();
7717
7718        // Get or assign type version for the type object itself
7719        let mut type_version = owner_type.tp_version_tag.load(Acquire);
7720        if type_version == 0 {
7721            type_version = owner_type.assign_version_tag();
7722        }
7723        if type_version == 0 {
7724            unsafe {
7725                self.code.instructions.write_adaptive_counter(
7726                    cache_base,
7727                    bytecode::adaptive_counter_backoff(
7728                        self.code.instructions.read_adaptive_counter(cache_base),
7729                    ),
7730                );
7731            }
7732            return;
7733        }
7734
7735        let attr_name = self.code.names[oparg.name_idx() as usize];
7736
7737        // Check metaclass: ensure no data descriptor on metaclass for this name
7738        let mcl = obj.class();
7739        let mcl_attr = mcl.get_attr(attr_name);
7740        if let Some(ref attr) = mcl_attr {
7741            let attr_class = attr.class();
7742            if attr_class.slots.descr_set.load().is_some() {
7743                // Data descriptor on metaclass — can't specialize
7744                unsafe {
7745                    self.code.instructions.write_adaptive_counter(
7746                        cache_base,
7747                        bytecode::adaptive_counter_backoff(
7748                            self.code.instructions.read_adaptive_counter(cache_base),
7749                        ),
7750                    );
7751                }
7752                return;
7753            }
7754        }
7755        let mut metaclass_version = 0;
7756        if !mcl.slots.flags.has_feature(PyTypeFlags::IMMUTABLETYPE) {
7757            metaclass_version = mcl.tp_version_tag.load(Acquire);
7758            if metaclass_version == 0 {
7759                metaclass_version = mcl.assign_version_tag();
7760            }
7761            if metaclass_version == 0 {
7762                unsafe {
7763                    self.code.instructions.write_adaptive_counter(
7764                        cache_base,
7765                        bytecode::adaptive_counter_backoff(
7766                            self.code.instructions.read_adaptive_counter(cache_base),
7767                        ),
7768                    );
7769                }
7770                return;
7771            }
7772        }
7773
7774        // Look up attr in the type's own MRO
7775        let cls_attr = owner_type.get_attr(attr_name);
7776        if let Some(ref descr) = cls_attr {
7777            let descr_class = descr.class();
7778            let has_descr_get = descr_class.slots.descr_get.load().is_some();
7779            if !has_descr_get {
7780                // METHOD or NON_DESCRIPTOR — can cache directly
7781                let descr_ptr = &**descr as *const PyObject as usize;
7782                let new_op = if metaclass_version == 0 {
7783                    Instruction::LoadAttrClass
7784                } else {
7785                    Instruction::LoadAttrClassWithMetaclassCheck
7786                };
7787                unsafe {
7788                    if metaclass_version == 0 {
7789                        self.write_cached_descriptor(cache_base, type_version, descr_ptr);
7790                    } else {
7791                        self.write_cached_descriptor_with_metaclass(
7792                            cache_base,
7793                            type_version,
7794                            metaclass_version,
7795                            descr_ptr,
7796                        );
7797                    }
7798                }
7799                self.specialize_at(instr_idx, cache_base, new_op);
7800                return;
7801            }
7802        }
7803
7804        // Can't specialize
7805        unsafe {
7806            self.code.instructions.write_adaptive_counter(
7807                cache_base,
7808                bytecode::adaptive_counter_backoff(
7809                    self.code.instructions.read_adaptive_counter(cache_base),
7810                ),
7811            );
7812        }
7813    }
7814
7815    fn load_attr_slow(&mut self, vm: &VirtualMachine, oparg: LoadAttr) -> FrameResult {
7816        let attr_name = self.code.names[oparg.name_idx() as usize];
7817        let parent = self.pop_value();
7818
7819        if oparg.is_method() {
7820            // Method call: push [method, self_or_null]
7821            let method = PyMethod::get(parent.clone(), attr_name, vm)?;
7822            match method {
7823                PyMethod::Function { target: _, func } => {
7824                    self.push_value(func);
7825                    self.push_value(parent);
7826                }
7827                PyMethod::Attribute(val) => {
7828                    self.push_value(val);
7829                    self.push_null();
7830                }
7831            }
7832        } else {
7833            // Regular attribute access
7834            let obj = parent.get_attr(attr_name, vm)?;
7835            self.push_value(obj);
7836        }
7837        Ok(None)
7838    }
7839
7840    fn specialize_binary_op(
7841        &mut self,
7842        vm: &VirtualMachine,
7843        op: bytecode::BinaryOperator,
7844        instr_idx: usize,
7845        cache_base: usize,
7846    ) {
7847        if !matches!(
7848            self.code.instructions.read_op(instr_idx),
7849            Instruction::BinaryOp { .. }
7850        ) {
7851            return;
7852        }
7853        let b = self.top_value();
7854        let a = self.nth_value(1);
7855        // `external_cache` in _PyBinaryOpCache is used only by BINARY_OP_EXTEND.
7856        unsafe {
7857            self.write_cached_binary_op_extend_descr(cache_base, None);
7858        }
7859        let mut cached_extend_descr = None;
7860
7861        let new_op = match op {
7862            bytecode::BinaryOperator::Add => {
7863                if a.downcast_ref_if_exact::<PyInt>(vm).is_some()
7864                    && b.downcast_ref_if_exact::<PyInt>(vm).is_some()
7865                {
7866                    Some(Instruction::BinaryOpAddInt)
7867                } else if a.downcast_ref_if_exact::<PyFloat>(vm).is_some()
7868                    && b.downcast_ref_if_exact::<PyFloat>(vm).is_some()
7869                {
7870                    Some(Instruction::BinaryOpAddFloat)
7871                } else if a.downcast_ref_if_exact::<PyStr>(vm).is_some()
7872                    && b.downcast_ref_if_exact::<PyStr>(vm).is_some()
7873                {
7874                    if self
7875                        .binary_op_inplace_unicode_target_local(cache_base, a)
7876                        .is_some()
7877                    {
7878                        Some(Instruction::BinaryOpInplaceAddUnicode)
7879                    } else {
7880                        Some(Instruction::BinaryOpAddUnicode)
7881                    }
7882                } else if let Some(descr) = self.binary_op_extended_specialization(op, a, b, vm) {
7883                    cached_extend_descr = Some(descr);
7884                    Some(Instruction::BinaryOpExtend)
7885                } else {
7886                    None
7887                }
7888            }
7889            bytecode::BinaryOperator::Subtract => {
7890                if a.downcast_ref_if_exact::<PyInt>(vm).is_some()
7891                    && b.downcast_ref_if_exact::<PyInt>(vm).is_some()
7892                {
7893                    Some(Instruction::BinaryOpSubtractInt)
7894                } else if a.downcast_ref_if_exact::<PyFloat>(vm).is_some()
7895                    && b.downcast_ref_if_exact::<PyFloat>(vm).is_some()
7896                {
7897                    Some(Instruction::BinaryOpSubtractFloat)
7898                } else if let Some(descr) = self.binary_op_extended_specialization(op, a, b, vm) {
7899                    cached_extend_descr = Some(descr);
7900                    Some(Instruction::BinaryOpExtend)
7901                } else {
7902                    None
7903                }
7904            }
7905            bytecode::BinaryOperator::Multiply => {
7906                if a.downcast_ref_if_exact::<PyInt>(vm).is_some()
7907                    && b.downcast_ref_if_exact::<PyInt>(vm).is_some()
7908                {
7909                    Some(Instruction::BinaryOpMultiplyInt)
7910                } else if a.downcast_ref_if_exact::<PyFloat>(vm).is_some()
7911                    && b.downcast_ref_if_exact::<PyFloat>(vm).is_some()
7912                {
7913                    Some(Instruction::BinaryOpMultiplyFloat)
7914                } else if let Some(descr) = self.binary_op_extended_specialization(op, a, b, vm) {
7915                    cached_extend_descr = Some(descr);
7916                    Some(Instruction::BinaryOpExtend)
7917                } else {
7918                    None
7919                }
7920            }
7921            bytecode::BinaryOperator::TrueDivide => {
7922                if let Some(descr) = self.binary_op_extended_specialization(op, a, b, vm) {
7923                    cached_extend_descr = Some(descr);
7924                    Some(Instruction::BinaryOpExtend)
7925                } else {
7926                    None
7927                }
7928            }
7929            bytecode::BinaryOperator::Subscr => {
7930                let b_is_nonnegative_int = b
7931                    .downcast_ref_if_exact::<PyInt>(vm)
7932                    .is_some_and(|i| specialization_nonnegative_compact_index(i, vm).is_some());
7933                if a.downcast_ref_if_exact::<PyList>(vm).is_some() && b_is_nonnegative_int {
7934                    Some(Instruction::BinaryOpSubscrListInt)
7935                } else if a.downcast_ref_if_exact::<PyTuple>(vm).is_some() && b_is_nonnegative_int {
7936                    Some(Instruction::BinaryOpSubscrTupleInt)
7937                } else if a.downcast_ref_if_exact::<PyDict>(vm).is_some() {
7938                    Some(Instruction::BinaryOpSubscrDict)
7939                } else if a.downcast_ref_if_exact::<PyStr>(vm).is_some() && b_is_nonnegative_int {
7940                    Some(Instruction::BinaryOpSubscrStrInt)
7941                } else if a.downcast_ref_if_exact::<PyList>(vm).is_some()
7942                    && b.downcast_ref::<PySlice>().is_some()
7943                {
7944                    Some(Instruction::BinaryOpSubscrListSlice)
7945                } else {
7946                    let cls = a.class();
7947                    if cls.slots.flags.has_feature(PyTypeFlags::HEAPTYPE)
7948                        && !self.specialization_eval_frame_active(vm)
7949                        && let Some(_getitem) = cls.get_attr(identifier!(vm, __getitem__))
7950                        && let Some(func) = _getitem.downcast_ref_if_exact::<PyFunction>(vm)
7951                        && func.can_specialize_call(2)
7952                    {
7953                        let mut type_version = cls.tp_version_tag.load(Acquire);
7954                        if type_version == 0 {
7955                            type_version = cls.assign_version_tag();
7956                        }
7957                        if type_version != 0 {
7958                            if cls.cache_getitem_for_specialization(
7959                                func.to_owned(),
7960                                type_version,
7961                                vm,
7962                            ) {
7963                                Some(Instruction::BinaryOpSubscrGetitem)
7964                            } else {
7965                                None
7966                            }
7967                        } else {
7968                            None
7969                        }
7970                    } else {
7971                        None
7972                    }
7973                }
7974            }
7975            bytecode::BinaryOperator::InplaceAdd => {
7976                if a.downcast_ref_if_exact::<PyStr>(vm).is_some()
7977                    && b.downcast_ref_if_exact::<PyStr>(vm).is_some()
7978                {
7979                    if self
7980                        .binary_op_inplace_unicode_target_local(cache_base, a)
7981                        .is_some()
7982                    {
7983                        Some(Instruction::BinaryOpInplaceAddUnicode)
7984                    } else {
7985                        Some(Instruction::BinaryOpAddUnicode)
7986                    }
7987                } else if a.downcast_ref_if_exact::<PyInt>(vm).is_some()
7988                    && b.downcast_ref_if_exact::<PyInt>(vm).is_some()
7989                {
7990                    Some(Instruction::BinaryOpAddInt)
7991                } else if a.downcast_ref_if_exact::<PyFloat>(vm).is_some()
7992                    && b.downcast_ref_if_exact::<PyFloat>(vm).is_some()
7993                {
7994                    Some(Instruction::BinaryOpAddFloat)
7995                } else {
7996                    None
7997                }
7998            }
7999            bytecode::BinaryOperator::InplaceSubtract => {
8000                if a.downcast_ref_if_exact::<PyInt>(vm).is_some()
8001                    && b.downcast_ref_if_exact::<PyInt>(vm).is_some()
8002                {
8003                    Some(Instruction::BinaryOpSubtractInt)
8004                } else if a.downcast_ref_if_exact::<PyFloat>(vm).is_some()
8005                    && b.downcast_ref_if_exact::<PyFloat>(vm).is_some()
8006                {
8007                    Some(Instruction::BinaryOpSubtractFloat)
8008                } else {
8009                    None
8010                }
8011            }
8012            bytecode::BinaryOperator::InplaceMultiply => {
8013                if a.downcast_ref_if_exact::<PyInt>(vm).is_some()
8014                    && b.downcast_ref_if_exact::<PyInt>(vm).is_some()
8015                {
8016                    Some(Instruction::BinaryOpMultiplyInt)
8017                } else if a.downcast_ref_if_exact::<PyFloat>(vm).is_some()
8018                    && b.downcast_ref_if_exact::<PyFloat>(vm).is_some()
8019                {
8020                    Some(Instruction::BinaryOpMultiplyFloat)
8021                } else {
8022                    None
8023                }
8024            }
8025            bytecode::BinaryOperator::And
8026            | bytecode::BinaryOperator::Or
8027            | bytecode::BinaryOperator::Xor
8028            | bytecode::BinaryOperator::InplaceAnd
8029            | bytecode::BinaryOperator::InplaceOr
8030            | bytecode::BinaryOperator::InplaceXor => {
8031                if let Some(descr) = self.binary_op_extended_specialization(op, a, b, vm) {
8032                    cached_extend_descr = Some(descr);
8033                    Some(Instruction::BinaryOpExtend)
8034                } else {
8035                    None
8036                }
8037            }
8038            _ => None,
8039        };
8040
8041        if matches!(new_op, Some(Instruction::BinaryOpExtend)) {
8042            unsafe {
8043                self.write_cached_binary_op_extend_descr(cache_base, cached_extend_descr);
8044            }
8045        }
8046        self.commit_specialization(instr_idx, cache_base, new_op);
8047    }
8048
8049    #[inline]
8050    fn binary_op_inplace_unicode_target_local(
8051        &self,
8052        cache_base: usize,
8053        left: &PyObject,
8054    ) -> Option<usize> {
8055        let next_idx = cache_base + Instruction::BinaryOp { op: Arg::marker() }.cache_entries();
8056        let unit = self.code.instructions.get(next_idx)?;
8057        let next_op = unit.op.to_base().unwrap_or(unit.op);
8058        if !matches!(next_op, Instruction::StoreFast { .. }) {
8059            return None;
8060        }
8061        let local_idx = usize::from(u8::from(unit.arg));
8062        self.localsplus
8063            .fastlocals()
8064            .get(local_idx)
8065            .and_then(|slot| slot.as_ref())
8066            .filter(|local| local.is(left))
8067            .map(|_| local_idx)
8068    }
8069
8070    /// Adaptive counter: trigger specialization at zero, otherwise advance countdown.
8071    #[inline]
8072    fn adaptive(&mut self, specialize: impl FnOnce(&mut Self, usize, usize)) {
8073        let instr_idx = self.lasti() as usize - 1;
8074        let cache_base = instr_idx + 1;
8075        let counter = self.code.instructions.read_adaptive_counter(cache_base);
8076        if bytecode::adaptive_counter_triggers(counter) {
8077            specialize(self, instr_idx, cache_base);
8078        } else {
8079            unsafe {
8080                self.code.instructions.write_adaptive_counter(
8081                    cache_base,
8082                    bytecode::advance_adaptive_counter(counter),
8083                );
8084            }
8085        }
8086    }
8087
8088    /// Install a specialized opcode and set adaptive cooldown bits.
8089    #[inline]
8090    fn specialize_at(&mut self, instr_idx: usize, cache_base: usize, new_op: Instruction) {
8091        unsafe {
8092            self.code
8093                .instructions
8094                .write_adaptive_counter(cache_base, ADAPTIVE_COOLDOWN_VALUE);
8095            self.code.instructions.replace_op(instr_idx, new_op);
8096        }
8097    }
8098
8099    #[inline]
8100    fn cooldown_adaptive_at(&mut self, cache_base: usize) {
8101        unsafe {
8102            self.code
8103                .instructions
8104                .write_adaptive_counter(cache_base, ADAPTIVE_COOLDOWN_VALUE);
8105        }
8106    }
8107
8108    /// Commit a specialization result: replace op on success, backoff on failure.
8109    #[inline]
8110    fn commit_specialization(
8111        &mut self,
8112        instr_idx: usize,
8113        cache_base: usize,
8114        new_op: Option<Instruction>,
8115    ) {
8116        if let Some(new_op) = new_op {
8117            self.specialize_at(instr_idx, cache_base, new_op);
8118        } else {
8119            unsafe {
8120                self.code.instructions.write_adaptive_counter(
8121                    cache_base,
8122                    bytecode::adaptive_counter_backoff(
8123                        self.code.instructions.read_adaptive_counter(cache_base),
8124                    ),
8125                );
8126            }
8127        }
8128    }
8129
8130    /// Execute a specialized binary op on two int operands.
8131    /// Fallback to generic binary op if either operand is not an exact int.
8132    #[inline]
8133    fn execute_binary_op_int(
8134        &mut self,
8135        vm: &VirtualMachine,
8136        op: impl FnOnce(&BigInt, &BigInt) -> BigInt,
8137        deopt_op: bytecode::BinaryOperator,
8138    ) -> FrameResult {
8139        let b = self.top_value();
8140        let a = self.nth_value(1);
8141        if let (Some(a_int), Some(b_int)) = (
8142            a.downcast_ref_if_exact::<PyInt>(vm),
8143            b.downcast_ref_if_exact::<PyInt>(vm),
8144        ) {
8145            let result = op(a_int.as_bigint(), b_int.as_bigint());
8146            self.pop_value();
8147            self.pop_value();
8148            self.push_value(vm.ctx.new_bigint(&result).into());
8149            Ok(None)
8150        } else {
8151            self.execute_bin_op(vm, deopt_op)
8152        }
8153    }
8154
8155    /// Execute a specialized binary op on two float operands.
8156    /// Fallback to generic binary op if either operand is not an exact float.
8157    #[inline]
8158    fn execute_binary_op_float(
8159        &mut self,
8160        vm: &VirtualMachine,
8161        op: impl FnOnce(f64, f64) -> f64,
8162        deopt_op: bytecode::BinaryOperator,
8163    ) -> FrameResult {
8164        let b = self.top_value();
8165        let a = self.nth_value(1);
8166        if let (Some(a_f), Some(b_f)) = (
8167            a.downcast_ref_if_exact::<PyFloat>(vm),
8168            b.downcast_ref_if_exact::<PyFloat>(vm),
8169        ) {
8170            let result = op(a_f.to_f64(), b_f.to_f64());
8171            self.pop_value();
8172            self.pop_value();
8173            self.push_value(vm.ctx.new_float(result).into());
8174            Ok(None)
8175        } else {
8176            self.execute_bin_op(vm, deopt_op)
8177        }
8178    }
8179
8180    fn specialize_call(
8181        &mut self,
8182        vm: &VirtualMachine,
8183        nargs: u32,
8184        instr_idx: usize,
8185        cache_base: usize,
8186    ) {
8187        if !matches!(
8188            self.code.instructions.read_op(instr_idx),
8189            Instruction::Call { .. }
8190        ) {
8191            return;
8192        }
8193        // Stack: [callable, self_or_null, arg1, ..., argN]
8194        // callable is at position nargs + 1 from top
8195        // self_or_null is at position nargs from top
8196        let stack_len = self.localsplus.stack_len();
8197        let self_or_null_is_some = self
8198            .localsplus
8199            .stack_index(stack_len - nargs as usize - 1)
8200            .is_some();
8201        let callable = self.nth_value(nargs + 1);
8202
8203        if let Some(func) = callable.downcast_ref_if_exact::<PyFunction>(vm) {
8204            if self.specialization_eval_frame_active(vm) {
8205                unsafe {
8206                    self.code.instructions.write_adaptive_counter(
8207                        cache_base,
8208                        bytecode::adaptive_counter_backoff(
8209                            self.code.instructions.read_adaptive_counter(cache_base),
8210                        ),
8211                    );
8212                }
8213                return;
8214            }
8215            if !func.is_optimized_for_call_specialization() {
8216                unsafe {
8217                    self.code.instructions.write_adaptive_counter(
8218                        cache_base,
8219                        bytecode::adaptive_counter_backoff(
8220                            self.code.instructions.read_adaptive_counter(cache_base),
8221                        ),
8222                    );
8223                }
8224                return;
8225            }
8226            let version = func.get_version_for_current_state();
8227            if version == 0 {
8228                unsafe {
8229                    self.code.instructions.write_adaptive_counter(
8230                        cache_base,
8231                        bytecode::adaptive_counter_backoff(
8232                            self.code.instructions.read_adaptive_counter(cache_base),
8233                        ),
8234                    );
8235                }
8236                return;
8237            }
8238
8239            let effective_nargs = if self_or_null_is_some {
8240                nargs + 1
8241            } else {
8242                nargs
8243            };
8244
8245            let new_op = if func.can_specialize_call(effective_nargs) {
8246                Instruction::CallPyExactArgs
8247            } else {
8248                Instruction::CallPyGeneral
8249            };
8250            unsafe {
8251                self.code
8252                    .instructions
8253                    .write_cache_u32(cache_base + 1, version);
8254            }
8255            self.specialize_at(instr_idx, cache_base, new_op);
8256            return;
8257        }
8258
8259        // Bound Python method object (`method`) specialization.
8260        if !self_or_null_is_some
8261            && let Some(bound_method) = callable.downcast_ref_if_exact::<PyBoundMethod>(vm)
8262        {
8263            if let Some(func) = bound_method
8264                .function_obj()
8265                .downcast_ref_if_exact::<PyFunction>(vm)
8266            {
8267                if self.specialization_eval_frame_active(vm) {
8268                    unsafe {
8269                        self.code.instructions.write_adaptive_counter(
8270                            cache_base,
8271                            bytecode::adaptive_counter_backoff(
8272                                self.code.instructions.read_adaptive_counter(cache_base),
8273                            ),
8274                        );
8275                    }
8276                    return;
8277                }
8278                if !func.is_optimized_for_call_specialization() {
8279                    unsafe {
8280                        self.code.instructions.write_adaptive_counter(
8281                            cache_base,
8282                            bytecode::adaptive_counter_backoff(
8283                                self.code.instructions.read_adaptive_counter(cache_base),
8284                            ),
8285                        );
8286                    }
8287                    return;
8288                }
8289                let version = func.get_version_for_current_state();
8290                if version == 0 {
8291                    unsafe {
8292                        self.code.instructions.write_adaptive_counter(
8293                            cache_base,
8294                            bytecode::adaptive_counter_backoff(
8295                                self.code.instructions.read_adaptive_counter(cache_base),
8296                            ),
8297                        );
8298                    }
8299                    return;
8300                }
8301
8302                let new_op = if func.can_specialize_call(nargs + 1) {
8303                    Instruction::CallBoundMethodExactArgs
8304                } else {
8305                    Instruction::CallBoundMethodGeneral
8306                };
8307                unsafe {
8308                    self.code
8309                        .instructions
8310                        .write_cache_u32(cache_base + 1, version);
8311                }
8312                self.specialize_at(instr_idx, cache_base, new_op);
8313            } else {
8314                // Match CPython: bound methods wrapping non-Python callables
8315                // are not specialized as CALL_NON_PY_GENERAL.
8316                unsafe {
8317                    self.code.instructions.write_adaptive_counter(
8318                        cache_base,
8319                        bytecode::adaptive_counter_backoff(
8320                            self.code.instructions.read_adaptive_counter(cache_base),
8321                        ),
8322                    );
8323                }
8324            }
8325            return;
8326        }
8327
8328        // Try to specialize method descriptor calls
8329        if let Some(descr) = callable.downcast_ref_if_exact::<PyMethodDescriptor>(vm) {
8330            let call_cache_entries = Instruction::CallListAppend.cache_entries();
8331            let next_idx = cache_base + call_cache_entries;
8332            let next_is_pop_top = if next_idx < self.code.instructions.len() {
8333                let next_op = self.code.instructions.read_op(next_idx);
8334                matches!(next_op.to_base().unwrap_or(next_op), Instruction::PopTop)
8335            } else {
8336                false
8337            };
8338
8339            let call_conv = descr.method.flags
8340                & (PyMethodFlags::VARARGS
8341                    | PyMethodFlags::FASTCALL
8342                    | PyMethodFlags::NOARGS
8343                    | PyMethodFlags::O
8344                    | PyMethodFlags::KEYWORDS);
8345            let total_nargs = nargs + u32::from(self_or_null_is_some);
8346
8347            let new_op = if call_conv == PyMethodFlags::NOARGS {
8348                if total_nargs != 1 {
8349                    unsafe {
8350                        self.code.instructions.write_adaptive_counter(
8351                            cache_base,
8352                            bytecode::adaptive_counter_backoff(
8353                                self.code.instructions.read_adaptive_counter(cache_base),
8354                            ),
8355                        );
8356                    }
8357                    return;
8358                }
8359                Instruction::CallMethodDescriptorNoargs
8360            } else if call_conv == PyMethodFlags::O {
8361                if total_nargs != 2 {
8362                    unsafe {
8363                        self.code.instructions.write_adaptive_counter(
8364                            cache_base,
8365                            bytecode::adaptive_counter_backoff(
8366                                self.code.instructions.read_adaptive_counter(cache_base),
8367                            ),
8368                        );
8369                    }
8370                    return;
8371                }
8372                if self_or_null_is_some
8373                    && nargs == 1
8374                    && next_is_pop_top
8375                    && vm
8376                        .callable_cache
8377                        .list_append
8378                        .as_ref()
8379                        .is_some_and(|list_append| callable.is(list_append))
8380                {
8381                    Instruction::CallListAppend
8382                } else {
8383                    Instruction::CallMethodDescriptorO
8384                }
8385            } else if call_conv == PyMethodFlags::FASTCALL {
8386                Instruction::CallMethodDescriptorFast
8387            } else if call_conv == (PyMethodFlags::FASTCALL | PyMethodFlags::KEYWORDS) {
8388                Instruction::CallMethodDescriptorFastWithKeywords
8389            } else {
8390                Instruction::CallNonPyGeneral
8391            };
8392            self.specialize_at(instr_idx, cache_base, new_op);
8393            return;
8394        }
8395
8396        // Try to specialize builtin calls
8397        if let Some(native) = callable.downcast_ref_if_exact::<PyNativeFunction>(vm) {
8398            let effective_nargs = nargs + u32::from(self_or_null_is_some);
8399            let call_conv = native.value.flags
8400                & (PyMethodFlags::VARARGS
8401                    | PyMethodFlags::FASTCALL
8402                    | PyMethodFlags::NOARGS
8403                    | PyMethodFlags::O
8404                    | PyMethodFlags::KEYWORDS);
8405            let new_op = if call_conv == PyMethodFlags::O {
8406                if effective_nargs != 1 {
8407                    unsafe {
8408                        self.code.instructions.write_adaptive_counter(
8409                            cache_base,
8410                            bytecode::adaptive_counter_backoff(
8411                                self.code.instructions.read_adaptive_counter(cache_base),
8412                            ),
8413                        );
8414                    }
8415                    return;
8416                }
8417                if native.zelf.is_none()
8418                    && nargs == 1
8419                    && vm
8420                        .callable_cache
8421                        .len
8422                        .as_ref()
8423                        .is_some_and(|len_callable| callable.is(len_callable))
8424                {
8425                    Instruction::CallLen
8426                } else {
8427                    Instruction::CallBuiltinO
8428                }
8429            } else if call_conv == PyMethodFlags::FASTCALL {
8430                if native.zelf.is_none()
8431                    && effective_nargs == 2
8432                    && vm
8433                        .callable_cache
8434                        .isinstance
8435                        .as_ref()
8436                        .is_some_and(|isinstance_callable| callable.is(isinstance_callable))
8437                {
8438                    Instruction::CallIsinstance
8439                } else {
8440                    Instruction::CallBuiltinFast
8441                }
8442            } else if call_conv == (PyMethodFlags::FASTCALL | PyMethodFlags::KEYWORDS) {
8443                Instruction::CallBuiltinFastWithKeywords
8444            } else {
8445                Instruction::CallNonPyGeneral
8446            };
8447            self.specialize_at(instr_idx, cache_base, new_op);
8448            return;
8449        }
8450
8451        // type/str/tuple(x) and class-call specializations
8452        if let Some(cls) = callable.downcast_ref::<PyType>() {
8453            if cls.slots.flags.has_feature(PyTypeFlags::IMMUTABLETYPE) {
8454                if !self_or_null_is_some && nargs == 1 {
8455                    let new_op = if callable.is(&vm.ctx.types.type_type.as_object()) {
8456                        Some(Instruction::CallType1)
8457                    } else if callable.is(&vm.ctx.types.str_type.as_object()) {
8458                        Some(Instruction::CallStr1)
8459                    } else if callable.is(&vm.ctx.types.tuple_type.as_object()) {
8460                        Some(Instruction::CallTuple1)
8461                    } else {
8462                        None
8463                    };
8464                    if let Some(new_op) = new_op {
8465                        self.specialize_at(instr_idx, cache_base, new_op);
8466                        return;
8467                    }
8468                }
8469                if cls.slots.vectorcall.load().is_some() {
8470                    self.specialize_at(instr_idx, cache_base, Instruction::CallBuiltinClass);
8471                    return;
8472                }
8473                self.specialize_at(instr_idx, cache_base, Instruction::CallNonPyGeneral);
8474                return;
8475            }
8476
8477            // CPython only considers CALL_ALLOC_AND_ENTER_INIT for types whose
8478            // metaclass is exactly `type`.
8479            if !callable.class().is(vm.ctx.types.type_type) {
8480                self.specialize_at(instr_idx, cache_base, Instruction::CallNonPyGeneral);
8481                return;
8482            }
8483
8484            // CallAllocAndEnterInit: heap type with default __new__
8485            if !self_or_null_is_some && cls.slots.flags.has_feature(PyTypeFlags::HEAPTYPE) {
8486                let object_new = vm.ctx.types.object_type.slots.new.load();
8487                let cls_new = cls.slots.new.load();
8488                let object_alloc = vm.ctx.types.object_type.slots.alloc.load();
8489                let cls_alloc = cls.slots.alloc.load();
8490                if let (Some(cls_new_fn), Some(obj_new_fn), Some(cls_alloc_fn), Some(obj_alloc_fn)) =
8491                    (cls_new, object_new, cls_alloc, object_alloc)
8492                    && cls_new_fn as usize == obj_new_fn as usize
8493                    && cls_alloc_fn as usize == obj_alloc_fn as usize
8494                {
8495                    let init = cls.get_attr(identifier!(vm, __init__));
8496                    let mut version = cls.tp_version_tag.load(Acquire);
8497                    if version == 0 {
8498                        version = cls.assign_version_tag();
8499                    }
8500                    if version == 0 {
8501                        unsafe {
8502                            self.code.instructions.write_adaptive_counter(
8503                                cache_base,
8504                                bytecode::adaptive_counter_backoff(
8505                                    self.code.instructions.read_adaptive_counter(cache_base),
8506                                ),
8507                            );
8508                        }
8509                        return;
8510                    }
8511                    if let Some(init) = init
8512                        && let Some(init_func) = init.downcast_ref_if_exact::<PyFunction>(vm)
8513                        && init_func.is_simple_for_call_specialization()
8514                        && cls.cache_init_for_specialization(init_func.to_owned(), version, vm)
8515                    {
8516                        unsafe {
8517                            self.code
8518                                .instructions
8519                                .write_cache_u32(cache_base + 1, version);
8520                        }
8521                        self.specialize_at(
8522                            instr_idx,
8523                            cache_base,
8524                            Instruction::CallAllocAndEnterInit,
8525                        );
8526                        return;
8527                    }
8528                }
8529            }
8530            self.specialize_at(instr_idx, cache_base, Instruction::CallNonPyGeneral);
8531            return;
8532        }
8533
8534        // General fallback: specialized non-Python callable path
8535        self.specialize_at(instr_idx, cache_base, Instruction::CallNonPyGeneral);
8536    }
8537
8538    fn specialize_call_kw(
8539        &mut self,
8540        vm: &VirtualMachine,
8541        nargs: u32,
8542        instr_idx: usize,
8543        cache_base: usize,
8544    ) {
8545        if !matches!(
8546            self.code.instructions.read_op(instr_idx),
8547            Instruction::CallKw { .. }
8548        ) {
8549            return;
8550        }
8551        // Stack: [callable, self_or_null, arg1, ..., argN, kwarg_names]
8552        // callable is at position nargs + 2 from top
8553        let stack_len = self.localsplus.stack_len();
8554        let self_or_null_is_some = self
8555            .localsplus
8556            .stack_index(stack_len - nargs as usize - 2)
8557            .is_some();
8558        let callable = self.nth_value(nargs + 2);
8559
8560        if let Some(func) = callable.downcast_ref_if_exact::<PyFunction>(vm) {
8561            if self.specialization_eval_frame_active(vm) {
8562                unsafe {
8563                    self.code.instructions.write_adaptive_counter(
8564                        cache_base,
8565                        bytecode::adaptive_counter_backoff(
8566                            self.code.instructions.read_adaptive_counter(cache_base),
8567                        ),
8568                    );
8569                }
8570                return;
8571            }
8572            if !func.is_optimized_for_call_specialization() {
8573                unsafe {
8574                    self.code.instructions.write_adaptive_counter(
8575                        cache_base,
8576                        bytecode::adaptive_counter_backoff(
8577                            self.code.instructions.read_adaptive_counter(cache_base),
8578                        ),
8579                    );
8580                }
8581                return;
8582            }
8583            let version = func.get_version_for_current_state();
8584            if version == 0 {
8585                unsafe {
8586                    self.code.instructions.write_adaptive_counter(
8587                        cache_base,
8588                        bytecode::adaptive_counter_backoff(
8589                            self.code.instructions.read_adaptive_counter(cache_base),
8590                        ),
8591                    );
8592                }
8593                return;
8594            }
8595
8596            unsafe {
8597                self.code
8598                    .instructions
8599                    .write_cache_u32(cache_base + 1, version);
8600            }
8601            self.specialize_at(instr_idx, cache_base, Instruction::CallKwPy);
8602            return;
8603        }
8604
8605        if !self_or_null_is_some
8606            && let Some(bound_method) = callable.downcast_ref_if_exact::<PyBoundMethod>(vm)
8607        {
8608            if let Some(func) = bound_method
8609                .function_obj()
8610                .downcast_ref_if_exact::<PyFunction>(vm)
8611            {
8612                if self.specialization_eval_frame_active(vm) {
8613                    unsafe {
8614                        self.code.instructions.write_adaptive_counter(
8615                            cache_base,
8616                            bytecode::adaptive_counter_backoff(
8617                                self.code.instructions.read_adaptive_counter(cache_base),
8618                            ),
8619                        );
8620                    }
8621                    return;
8622                }
8623                if !func.is_optimized_for_call_specialization() {
8624                    unsafe {
8625                        self.code.instructions.write_adaptive_counter(
8626                            cache_base,
8627                            bytecode::adaptive_counter_backoff(
8628                                self.code.instructions.read_adaptive_counter(cache_base),
8629                            ),
8630                        );
8631                    }
8632                    return;
8633                }
8634                let version = func.get_version_for_current_state();
8635                if version == 0 {
8636                    unsafe {
8637                        self.code.instructions.write_adaptive_counter(
8638                            cache_base,
8639                            bytecode::adaptive_counter_backoff(
8640                                self.code.instructions.read_adaptive_counter(cache_base),
8641                            ),
8642                        );
8643                    }
8644                    return;
8645                }
8646                unsafe {
8647                    self.code
8648                        .instructions
8649                        .write_cache_u32(cache_base + 1, version);
8650                }
8651                self.specialize_at(instr_idx, cache_base, Instruction::CallKwBoundMethod);
8652            } else {
8653                // Match CPython: bound methods wrapping non-Python callables
8654                // are not specialized as CALL_KW_NON_PY.
8655                unsafe {
8656                    self.code.instructions.write_adaptive_counter(
8657                        cache_base,
8658                        bytecode::adaptive_counter_backoff(
8659                            self.code.instructions.read_adaptive_counter(cache_base),
8660                        ),
8661                    );
8662                }
8663            }
8664            return;
8665        }
8666
8667        // General fallback: specialized non-Python callable path
8668        self.specialize_at(instr_idx, cache_base, Instruction::CallKwNonPy);
8669    }
8670
8671    fn specialize_send(&mut self, vm: &VirtualMachine, instr_idx: usize, cache_base: usize) {
8672        if !matches!(
8673            self.code.instructions.read_op(instr_idx),
8674            Instruction::Send { .. }
8675        ) {
8676            return;
8677        }
8678        // Stack: [receiver, val] — receiver is at position 1
8679        let receiver = self.nth_value(1);
8680        let is_exact_gen_or_coro = receiver.downcast_ref_if_exact::<PyGenerator>(vm).is_some()
8681            || receiver.downcast_ref_if_exact::<PyCoroutine>(vm).is_some();
8682        if is_exact_gen_or_coro && !self.specialization_eval_frame_active(vm) {
8683            self.specialize_at(instr_idx, cache_base, Instruction::SendGen);
8684        } else {
8685            unsafe {
8686                self.code.instructions.write_adaptive_counter(
8687                    cache_base,
8688                    bytecode::adaptive_counter_backoff(
8689                        self.code.instructions.read_adaptive_counter(cache_base),
8690                    ),
8691                );
8692            }
8693        }
8694    }
8695
8696    fn specialize_load_super_attr(
8697        &mut self,
8698        vm: &VirtualMachine,
8699        oparg: LoadSuperAttr,
8700        instr_idx: usize,
8701        cache_base: usize,
8702    ) {
8703        if !matches!(
8704            self.code.instructions.read_op(instr_idx),
8705            Instruction::LoadSuperAttr { .. }
8706        ) {
8707            return;
8708        }
8709        // Stack: [global_super, class, self]
8710        let global_super = self.nth_value(2);
8711        let class = self.nth_value(1);
8712
8713        if !global_super.is(&vm.ctx.types.super_type.as_object())
8714            || class.downcast_ref::<PyType>().is_none()
8715        {
8716            unsafe {
8717                self.code.instructions.write_adaptive_counter(
8718                    cache_base,
8719                    bytecode::adaptive_counter_backoff(
8720                        self.code.instructions.read_adaptive_counter(cache_base),
8721                    ),
8722                );
8723            }
8724            return;
8725        }
8726
8727        let new_op = if oparg.is_load_method() {
8728            Instruction::LoadSuperAttrMethod
8729        } else {
8730            Instruction::LoadSuperAttrAttr
8731        };
8732        self.specialize_at(instr_idx, cache_base, new_op);
8733    }
8734
8735    fn specialize_compare_op(
8736        &mut self,
8737        vm: &VirtualMachine,
8738        op: bytecode::ComparisonOperator,
8739        instr_idx: usize,
8740        cache_base: usize,
8741    ) {
8742        if !matches!(
8743            self.code.instructions.read_op(instr_idx),
8744            Instruction::CompareOp { .. }
8745        ) {
8746            return;
8747        }
8748        let b = self.top_value();
8749        let a = self.nth_value(1);
8750
8751        let new_op = if let (Some(a_int), Some(b_int)) = (
8752            a.downcast_ref_if_exact::<PyInt>(vm),
8753            b.downcast_ref_if_exact::<PyInt>(vm),
8754        ) {
8755            if specialization_compact_int_value(a_int, vm).is_some()
8756                && specialization_compact_int_value(b_int, vm).is_some()
8757            {
8758                Some(Instruction::CompareOpInt)
8759            } else {
8760                None
8761            }
8762        } else if a.downcast_ref_if_exact::<PyFloat>(vm).is_some()
8763            && b.downcast_ref_if_exact::<PyFloat>(vm).is_some()
8764        {
8765            Some(Instruction::CompareOpFloat)
8766        } else if a.downcast_ref_if_exact::<PyStr>(vm).is_some()
8767            && b.downcast_ref_if_exact::<PyStr>(vm).is_some()
8768            && (op == bytecode::ComparisonOperator::Equal
8769                || op == bytecode::ComparisonOperator::NotEqual)
8770        {
8771            Some(Instruction::CompareOpStr)
8772        } else {
8773            None
8774        };
8775
8776        self.commit_specialization(instr_idx, cache_base, new_op);
8777    }
8778
8779    /// Recover the ComparisonOperator from the instruction arg byte.
8780    /// `replace_op` preserves the arg byte, so the original op remains accessible.
8781    fn compare_op_from_arg(&self, arg: bytecode::OpArg) -> PyComparisonOp {
8782        bytecode::ComparisonOperator::try_from(u32::from(arg))
8783            .unwrap_or(bytecode::ComparisonOperator::Equal)
8784            .into()
8785    }
8786
8787    /// Recover the BinaryOperator from the instruction arg byte.
8788    /// `replace_op` preserves the arg byte, so the original op remains accessible.
8789    fn binary_op_from_arg(&self, arg: bytecode::OpArg) -> bytecode::BinaryOperator {
8790        bytecode::BinaryOperator::try_from(u32::from(arg)).unwrap_or(bytecode::BinaryOperator::Add)
8791    }
8792
8793    fn specialize_to_bool(&mut self, vm: &VirtualMachine, instr_idx: usize, cache_base: usize) {
8794        if !matches!(
8795            self.code.instructions.read_op(instr_idx),
8796            Instruction::ToBool
8797        ) {
8798            return;
8799        }
8800        let obj = self.top_value();
8801        let cls = obj.class();
8802
8803        let new_op = if cls.is(vm.ctx.types.bool_type) {
8804            Some(Instruction::ToBoolBool)
8805        } else if cls.is(PyInt::class(&vm.ctx)) {
8806            Some(Instruction::ToBoolInt)
8807        } else if cls.is(vm.ctx.types.none_type) {
8808            Some(Instruction::ToBoolNone)
8809        } else if cls.is(PyList::class(&vm.ctx)) {
8810            Some(Instruction::ToBoolList)
8811        } else if cls.is(PyStr::class(&vm.ctx)) {
8812            Some(Instruction::ToBoolStr)
8813        } else if cls.slots.flags.has_feature(PyTypeFlags::HEAPTYPE)
8814            && cls.slots.as_number.boolean.load().is_none()
8815            && cls.slots.as_mapping.length.load().is_none()
8816            && cls.slots.as_sequence.length.load().is_none()
8817        {
8818            // Cache type version for ToBoolAlwaysTrue guard
8819            let mut type_version = cls.tp_version_tag.load(Acquire);
8820            if type_version == 0 {
8821                type_version = cls.assign_version_tag();
8822            }
8823            if type_version != 0 {
8824                unsafe {
8825                    self.code
8826                        .instructions
8827                        .write_cache_u32(cache_base + 1, type_version);
8828                }
8829                self.specialize_at(instr_idx, cache_base, Instruction::ToBoolAlwaysTrue);
8830            } else {
8831                unsafe {
8832                    self.code.instructions.write_adaptive_counter(
8833                        cache_base,
8834                        bytecode::adaptive_counter_backoff(
8835                            self.code.instructions.read_adaptive_counter(cache_base),
8836                        ),
8837                    );
8838                }
8839            }
8840            return;
8841        } else {
8842            None
8843        };
8844
8845        self.commit_specialization(instr_idx, cache_base, new_op);
8846    }
8847
8848    fn specialize_for_iter(
8849        &mut self,
8850        vm: &VirtualMachine,
8851        jump_delta: u32,
8852        instr_idx: usize,
8853        cache_base: usize,
8854    ) {
8855        if !matches!(
8856            self.code.instructions.read_op(instr_idx),
8857            Instruction::ForIter { .. }
8858        ) {
8859            return;
8860        }
8861        let iter = self.top_value();
8862
8863        let new_op = if iter.downcast_ref_if_exact::<PyRangeIterator>(vm).is_some() {
8864            Some(Instruction::ForIterRange)
8865        } else if iter.downcast_ref_if_exact::<PyListIterator>(vm).is_some() {
8866            Some(Instruction::ForIterList)
8867        } else if iter.downcast_ref_if_exact::<PyTupleIterator>(vm).is_some() {
8868            Some(Instruction::ForIterTuple)
8869        } else if iter.downcast_ref_if_exact::<PyGenerator>(vm).is_some()
8870            && jump_delta <= i16::MAX as u32
8871            && self.for_iter_has_end_for_shape(instr_idx, jump_delta)
8872            && !self.specialization_eval_frame_active(vm)
8873        {
8874            Some(Instruction::ForIterGen)
8875        } else {
8876            None
8877        };
8878
8879        self.commit_specialization(instr_idx, cache_base, new_op);
8880    }
8881
8882    #[inline]
8883    fn specialization_eval_frame_active(&self, vm: &VirtualMachine) -> bool {
8884        vm.use_tracing.get()
8885    }
8886
8887    #[inline]
8888    fn specialization_has_datastack_space_for_func(
8889        &self,
8890        vm: &VirtualMachine,
8891        func: &Py<PyFunction>,
8892    ) -> bool {
8893        self.specialization_has_datastack_space_for_func_with_extra(vm, func, 0)
8894    }
8895
8896    #[inline]
8897    fn specialization_has_datastack_space_for_func_with_extra(
8898        &self,
8899        vm: &VirtualMachine,
8900        func: &Py<PyFunction>,
8901        extra_bytes: usize,
8902    ) -> bool {
8903        match func.datastack_frame_size_bytes() {
8904            Some(frame_size) => frame_size
8905                .checked_add(extra_bytes)
8906                .is_some_and(|size| vm.datastack_has_space(size)),
8907            None => extra_bytes == 0 || vm.datastack_has_space(extra_bytes),
8908        }
8909    }
8910
8911    #[inline]
8912    fn specialization_call_recursion_guard(&self, vm: &VirtualMachine) -> bool {
8913        self.specialization_call_recursion_guard_with_extra_frames(vm, 0)
8914    }
8915
8916    #[inline]
8917    fn specialization_call_recursion_guard_with_extra_frames(
8918        &self,
8919        vm: &VirtualMachine,
8920        extra_frames: usize,
8921    ) -> bool {
8922        vm.current_recursion_depth()
8923            .saturating_add(1)
8924            .saturating_add(extra_frames)
8925            >= vm.recursion_limit.get()
8926    }
8927
8928    #[inline]
8929    fn for_iter_has_end_for_shape(&self, instr_idx: usize, jump_delta: u32) -> bool {
8930        let target_idx = instr_idx
8931            + 1
8932            + Instruction::ForIter {
8933                delta: Arg::marker(),
8934            }
8935            .cache_entries()
8936            + jump_delta as usize;
8937        self.code.instructions.get(target_idx).is_some_and(|unit| {
8938            matches!(
8939                unit.op,
8940                Instruction::EndFor | Instruction::InstrumentedEndFor
8941            )
8942        })
8943    }
8944
8945    /// Handle iterator exhaustion in specialized FOR_ITER handlers.
8946    /// Skips END_FOR if present at target and jumps.
8947    fn for_iter_jump_on_exhausted(&mut self, target: bytecode::Label) {
8948        let target_idx = target.as_usize();
8949        let jump_target = if let Some(unit) = self.code.instructions.get(target_idx) {
8950            if matches!(
8951                unit.op,
8952                bytecode::Instruction::EndFor | bytecode::Instruction::InstrumentedEndFor
8953            ) {
8954                bytecode::Label::from_u32(target.as_u32() + 1)
8955            } else {
8956                target
8957            }
8958        } else {
8959            target
8960        };
8961        self.jump(jump_target);
8962    }
8963
8964    fn specialize_load_global(
8965        &mut self,
8966        vm: &VirtualMachine,
8967        oparg: u32,
8968        instr_idx: usize,
8969        cache_base: usize,
8970    ) {
8971        if !matches!(
8972            self.code.instructions.read_op(instr_idx),
8973            Instruction::LoadGlobal { .. }
8974        ) {
8975            return;
8976        }
8977        let name = self.code.names[(oparg >> 1) as usize];
8978        let Ok(globals_version) = u16::try_from(self.globals.version()) else {
8979            unsafe {
8980                self.code.instructions.write_adaptive_counter(
8981                    cache_base,
8982                    bytecode::adaptive_counter_backoff(
8983                        self.code.instructions.read_adaptive_counter(cache_base),
8984                    ),
8985                );
8986            }
8987            return;
8988        };
8989
8990        if let Ok(Some(globals_hint)) = self.globals.hint_for_key(name, vm) {
8991            unsafe {
8992                self.code
8993                    .instructions
8994                    .write_cache_u16(cache_base + 1, globals_version);
8995                self.code.instructions.write_cache_u16(cache_base + 2, 0);
8996                self.code
8997                    .instructions
8998                    .write_cache_u16(cache_base + 3, globals_hint);
8999            }
9000            self.specialize_at(instr_idx, cache_base, Instruction::LoadGlobalModule);
9001            return;
9002        }
9003
9004        if let Some(builtins_dict) = self.builtins.downcast_ref_if_exact::<PyDict>(vm)
9005            && let Ok(Some(builtins_hint)) = builtins_dict.hint_for_key(name, vm)
9006            && let Ok(builtins_version) = u16::try_from(builtins_dict.version())
9007        {
9008            unsafe {
9009                self.code
9010                    .instructions
9011                    .write_cache_u16(cache_base + 1, globals_version);
9012                self.code
9013                    .instructions
9014                    .write_cache_u16(cache_base + 2, builtins_version);
9015                self.code
9016                    .instructions
9017                    .write_cache_u16(cache_base + 3, builtins_hint);
9018            }
9019            self.specialize_at(instr_idx, cache_base, Instruction::LoadGlobalBuiltin);
9020            return;
9021        }
9022
9023        unsafe {
9024            self.code.instructions.write_adaptive_counter(
9025                cache_base,
9026                bytecode::adaptive_counter_backoff(
9027                    self.code.instructions.read_adaptive_counter(cache_base),
9028                ),
9029            );
9030        }
9031    }
9032
9033    fn specialize_store_subscr(
9034        &mut self,
9035        vm: &VirtualMachine,
9036        instr_idx: usize,
9037        cache_base: usize,
9038    ) {
9039        if !matches!(
9040            self.code.instructions.read_op(instr_idx),
9041            Instruction::StoreSubscr
9042        ) {
9043            return;
9044        }
9045        // Stack: [value, obj, idx] — obj is TOS-1
9046        let obj = self.nth_value(1);
9047        let idx = self.top_value();
9048
9049        let new_op = if let (Some(list), Some(int_idx)) = (
9050            obj.downcast_ref_if_exact::<PyList>(vm),
9051            idx.downcast_ref_if_exact::<PyInt>(vm),
9052        ) {
9053            let list_len = list.borrow_vec().len();
9054            if specialization_nonnegative_compact_index(int_idx, vm).is_some_and(|i| i < list_len) {
9055                Some(Instruction::StoreSubscrListInt)
9056            } else {
9057                None
9058            }
9059        } else if obj.downcast_ref_if_exact::<PyDict>(vm).is_some() {
9060            Some(Instruction::StoreSubscrDict)
9061        } else {
9062            None
9063        };
9064
9065        self.commit_specialization(instr_idx, cache_base, new_op);
9066    }
9067
9068    fn specialize_contains_op(&mut self, vm: &VirtualMachine, instr_idx: usize, cache_base: usize) {
9069        if !matches!(
9070            self.code.instructions.read_op(instr_idx),
9071            Instruction::ContainsOp { .. }
9072        ) {
9073            return;
9074        }
9075        let haystack = self.top_value(); // b = TOS = haystack
9076        let new_op = if haystack.downcast_ref_if_exact::<PyDict>(vm).is_some() {
9077            Some(Instruction::ContainsOpDict)
9078        } else if haystack.downcast_ref_if_exact::<PySet>(vm).is_some()
9079            || haystack.downcast_ref_if_exact::<PyFrozenSet>(vm).is_some()
9080        {
9081            Some(Instruction::ContainsOpSet)
9082        } else {
9083            None
9084        };
9085
9086        self.commit_specialization(instr_idx, cache_base, new_op);
9087    }
9088
9089    fn specialize_unpack_sequence(
9090        &mut self,
9091        vm: &VirtualMachine,
9092        expected_count: u32,
9093        instr_idx: usize,
9094        cache_base: usize,
9095    ) {
9096        if !matches!(
9097            self.code.instructions.read_op(instr_idx),
9098            Instruction::UnpackSequence { .. }
9099        ) {
9100            return;
9101        }
9102        let obj = self.top_value();
9103        let new_op = if let Some(tuple) = obj.downcast_ref_if_exact::<PyTuple>(vm) {
9104            if tuple.len() != expected_count as usize {
9105                None
9106            } else if expected_count == 2 {
9107                Some(Instruction::UnpackSequenceTwoTuple)
9108            } else {
9109                Some(Instruction::UnpackSequenceTuple)
9110            }
9111        } else if let Some(list) = obj.downcast_ref_if_exact::<PyList>(vm) {
9112            if list.borrow_vec().len() == expected_count as usize {
9113                Some(Instruction::UnpackSequenceList)
9114            } else {
9115                None
9116            }
9117        } else {
9118            None
9119        };
9120
9121        self.commit_specialization(instr_idx, cache_base, new_op);
9122    }
9123
9124    fn specialize_store_attr(
9125        &mut self,
9126        vm: &VirtualMachine,
9127        attr_idx: bytecode::NameIdx,
9128        instr_idx: usize,
9129        cache_base: usize,
9130    ) {
9131        if !matches!(
9132            self.code.instructions.read_op(instr_idx),
9133            Instruction::StoreAttr { .. }
9134        ) {
9135            return;
9136        }
9137        // TOS = owner (the object being assigned to)
9138        let owner = self.top_value();
9139        let cls = owner.class();
9140
9141        // Only specialize if setattr is the default (generic_setattr)
9142        let is_default_setattr = cls
9143            .slots
9144            .setattro
9145            .load()
9146            .is_some_and(|f| f as usize == PyBaseObject::slot_setattro as *const () as usize);
9147        if !is_default_setattr {
9148            unsafe {
9149                self.code.instructions.write_adaptive_counter(
9150                    cache_base,
9151                    bytecode::adaptive_counter_backoff(
9152                        self.code.instructions.read_adaptive_counter(cache_base),
9153                    ),
9154                );
9155            }
9156            return;
9157        }
9158
9159        // Get or assign type version
9160        let mut type_version = cls.tp_version_tag.load(Acquire);
9161        if type_version == 0 {
9162            type_version = cls.assign_version_tag();
9163        }
9164        if type_version == 0 {
9165            unsafe {
9166                self.code.instructions.write_adaptive_counter(
9167                    cache_base,
9168                    bytecode::adaptive_counter_backoff(
9169                        self.code.instructions.read_adaptive_counter(cache_base),
9170                    ),
9171                );
9172            }
9173            return;
9174        }
9175
9176        // Check for data descriptor
9177        let attr_name = self.code.names[attr_idx as usize];
9178        let cls_attr = cls.get_attr(attr_name);
9179        let has_data_descr = cls_attr.as_ref().is_some_and(|descr| {
9180            let descr_cls = descr.class();
9181            descr_cls.slots.descr_get.load().is_some() && descr_cls.slots.descr_set.load().is_some()
9182        });
9183
9184        if has_data_descr {
9185            // Check for member descriptor (slot access)
9186            if let Some(ref descr) = cls_attr
9187                && let Some(member_descr) = descr.downcast_ref::<PyMemberDescriptor>()
9188                && let MemberGetter::Offset(offset) = member_descr.member.getter
9189            {
9190                unsafe {
9191                    self.code
9192                        .instructions
9193                        .write_cache_u32(cache_base + 1, type_version);
9194                    self.code
9195                        .instructions
9196                        .write_cache_u16(cache_base + 3, offset as u16);
9197                }
9198                self.specialize_at(instr_idx, cache_base, Instruction::StoreAttrSlot);
9199            } else {
9200                unsafe {
9201                    self.code.instructions.write_adaptive_counter(
9202                        cache_base,
9203                        bytecode::adaptive_counter_backoff(
9204                            self.code.instructions.read_adaptive_counter(cache_base),
9205                        ),
9206                    );
9207                }
9208            }
9209        } else if let Some(dict) = owner.dict() {
9210            let use_hint = match dict.get_item_opt(attr_name, vm) {
9211                Ok(Some(_)) => true,
9212                Ok(None) => false,
9213                Err(_) => {
9214                    unsafe {
9215                        self.code.instructions.write_adaptive_counter(
9216                            cache_base,
9217                            bytecode::adaptive_counter_backoff(
9218                                self.code.instructions.read_adaptive_counter(cache_base),
9219                            ),
9220                        );
9221                    }
9222                    return;
9223                }
9224            };
9225            unsafe {
9226                self.code
9227                    .instructions
9228                    .write_cache_u32(cache_base + 1, type_version);
9229            }
9230            self.specialize_at(
9231                instr_idx,
9232                cache_base,
9233                if use_hint {
9234                    Instruction::StoreAttrWithHint
9235                } else {
9236                    Instruction::StoreAttrInstanceValue
9237                },
9238            );
9239        } else {
9240            unsafe {
9241                self.code.instructions.write_adaptive_counter(
9242                    cache_base,
9243                    bytecode::adaptive_counter_backoff(
9244                        self.code.instructions.read_adaptive_counter(cache_base),
9245                    ),
9246                );
9247            }
9248        }
9249    }
9250
9251    fn load_super_attr(&mut self, vm: &VirtualMachine, oparg: LoadSuperAttr) -> FrameResult {
9252        let attr_name = self.code.names[oparg.name_idx() as usize];
9253
9254        // Stack layout (bottom to top): [super, class, self]
9255        // Pop in LIFO order: self, class, super
9256        let self_obj = self.pop_value();
9257        let class = self.pop_value();
9258        let global_super = self.pop_value();
9259
9260        // Create super object - pass args based on has_class flag
9261        // When super is shadowed, has_class=false means call with 0 args
9262        let super_obj = if oparg.has_class() {
9263            global_super.call((class.clone(), self_obj.clone()), vm)?
9264        } else {
9265            global_super.call((), vm)?
9266        };
9267
9268        if oparg.is_load_method() {
9269            // Method load: push [method, self_or_null]
9270            let method = PyMethod::get(super_obj, attr_name, vm)?;
9271            match method {
9272                PyMethod::Function { target: _, func } => {
9273                    self.push_value(func);
9274                    self.push_value(self_obj);
9275                }
9276                PyMethod::Attribute(val) => {
9277                    self.push_value(val);
9278                    self.push_null();
9279                }
9280            }
9281        } else {
9282            // Regular attribute access
9283            let obj = super_obj.get_attr(attr_name, vm)?;
9284            self.push_value(obj);
9285        }
9286        Ok(None)
9287    }
9288
9289    fn store_attr(&mut self, vm: &VirtualMachine, attr: bytecode::NameIdx) -> FrameResult {
9290        let attr_name = self.code.names[attr as usize];
9291        let parent = self.pop_value();
9292        let value = self.pop_value();
9293        parent.set_attr(attr_name, value, vm)?;
9294        Ok(None)
9295    }
9296
9297    fn delete_attr(&mut self, vm: &VirtualMachine, attr: bytecode::NameIdx) -> FrameResult {
9298        let attr_name = self.code.names[attr as usize];
9299        let parent = self.pop_value();
9300        parent.del_attr(attr_name, vm)?;
9301        Ok(None)
9302    }
9303
9304    // Block stack functions removed - exception table handles all exception/cleanup
9305
9306    #[inline]
9307    #[track_caller]
9308    fn push_stackref_opt(&mut self, obj: Option<PyStackRef>) {
9309        match self.localsplus.stack_try_push(obj) {
9310            Ok(()) => {}
9311            Err(_e) => self.fatal("tried to push value onto stack but overflowed max_stackdepth"),
9312        }
9313    }
9314
9315    #[inline]
9316    #[track_caller] // not a real track_caller but push_value is less useful for debugging
9317    fn push_value_opt(&mut self, obj: Option<PyObjectRef>) {
9318        self.push_stackref_opt(obj.map(PyStackRef::new_owned));
9319    }
9320
9321    #[inline]
9322    #[track_caller]
9323    fn push_value(&mut self, obj: PyObjectRef) {
9324        self.push_stackref_opt(Some(PyStackRef::new_owned(obj)));
9325    }
9326
9327    /// Push a borrowed reference onto the stack (no refcount increment).
9328    ///
9329    /// # Safety
9330    /// The object must remain alive until the borrowed ref is consumed.
9331    /// The compiler guarantees consumption within the same basic block.
9332    #[inline]
9333    #[track_caller]
9334    #[allow(dead_code)]
9335    unsafe fn push_borrowed(&mut self, obj: &PyObject) {
9336        self.push_stackref_opt(Some(unsafe { PyStackRef::new_borrowed(obj) }));
9337    }
9338
9339    #[inline]
9340    fn push_null(&mut self) {
9341        self.push_stackref_opt(None);
9342    }
9343
9344    /// Pop a raw stackref from the stack, returning None if the stack slot is NULL.
9345    #[inline]
9346    fn pop_stackref_opt(&mut self) -> Option<PyStackRef> {
9347        if self.localsplus.stack_is_empty() {
9348            self.fatal("tried to pop from empty stack");
9349        }
9350        self.localsplus.stack_pop()
9351    }
9352
9353    /// Pop a raw stackref from the stack. Panics if NULL.
9354    #[inline]
9355    #[track_caller]
9356    fn pop_stackref(&mut self) -> PyStackRef {
9357        expect_unchecked(
9358            self.pop_stackref_opt(),
9359            "pop stackref but null found. This is a compiler bug.",
9360        )
9361    }
9362
9363    /// Pop a value from the stack, returning None if the stack slot is NULL.
9364    /// Automatically promotes borrowed refs to owned.
9365    #[inline]
9366    fn pop_value_opt(&mut self) -> Option<PyObjectRef> {
9367        self.pop_stackref_opt().map(|sr| sr.to_pyobj())
9368    }
9369
9370    #[inline]
9371    #[track_caller]
9372    fn pop_value(&mut self) -> PyObjectRef {
9373        self.pop_stackref().to_pyobj()
9374    }
9375
9376    fn call_intrinsic_1(
9377        &mut self,
9378        func: bytecode::IntrinsicFunction1,
9379        arg: PyObjectRef,
9380        vm: &VirtualMachine,
9381    ) -> PyResult {
9382        match func {
9383            bytecode::IntrinsicFunction1::Print => {
9384                let displayhook = vm
9385                    .sys_module
9386                    .get_attr("displayhook", vm)
9387                    .map_err(|_| vm.new_runtime_error("lost sys.displayhook"))?;
9388                displayhook.call((arg,), vm)
9389            }
9390            bytecode::IntrinsicFunction1::ImportStar => {
9391                // arg is the module object
9392                self.push_value(arg); // Push module back on stack for import_star
9393                self.import_star(vm)?;
9394                Ok(vm.ctx.none())
9395            }
9396            bytecode::IntrinsicFunction1::UnaryPositive => vm._pos(&arg),
9397            bytecode::IntrinsicFunction1::SubscriptGeneric => {
9398                // Used for PEP 695: Generic[*type_params]
9399                crate::builtins::genericalias::subscript_generic(arg, vm)
9400            }
9401            bytecode::IntrinsicFunction1::TypeVar => {
9402                let type_var: PyObjectRef =
9403                    _typing::TypeVar::new(vm, arg.clone(), vm.ctx.none(), vm.ctx.none())
9404                        .into_ref(&vm.ctx)
9405                        .into();
9406                Ok(type_var)
9407            }
9408            bytecode::IntrinsicFunction1::ParamSpec => {
9409                let param_spec: PyObjectRef = _typing::ParamSpec::new(arg.clone(), vm)
9410                    .into_ref(&vm.ctx)
9411                    .into();
9412                Ok(param_spec)
9413            }
9414            bytecode::IntrinsicFunction1::TypeVarTuple => {
9415                let type_var_tuple: PyObjectRef = _typing::TypeVarTuple::new(arg.clone(), vm)
9416                    .into_ref(&vm.ctx)
9417                    .into();
9418                Ok(type_var_tuple)
9419            }
9420            bytecode::IntrinsicFunction1::TypeAlias => {
9421                // TypeAlias receives a tuple of (name, type_params, value)
9422                let tuple: PyTupleRef = arg
9423                    .downcast()
9424                    .map_err(|_| vm.new_type_error("TypeAlias expects a tuple argument"))?;
9425
9426                if tuple.len() != 3 {
9427                    return Err(vm.new_type_error(format!(
9428                        "TypeAlias expects exactly 3 arguments, got {}",
9429                        tuple.len()
9430                    )));
9431                }
9432
9433                let name = tuple.as_slice()[0].clone();
9434                let type_params_obj = tuple.as_slice()[1].clone();
9435                let compute_value = tuple.as_slice()[2].clone();
9436
9437                let type_params: PyTupleRef = if vm.is_none(&type_params_obj) {
9438                    vm.ctx.empty_tuple.clone()
9439                } else {
9440                    type_params_obj
9441                        .downcast()
9442                        .map_err(|_| vm.new_type_error("Type params must be a tuple."))?
9443                };
9444
9445                let name = name
9446                    .downcast::<crate::builtins::PyStr>()
9447                    .map_err(|_| vm.new_type_error("TypeAliasType name must be a string"))?;
9448                let type_alias = _typing::TypeAliasType::new(name, type_params, compute_value);
9449                Ok(type_alias.into_ref(&vm.ctx).into())
9450            }
9451            bytecode::IntrinsicFunction1::ListToTuple => {
9452                // Convert list to tuple
9453                let list = arg
9454                    .downcast::<PyList>()
9455                    .map_err(|_| vm.new_type_error("LIST_TO_TUPLE expects a list"))?;
9456                Ok(vm.ctx.new_tuple(list.borrow_vec().to_vec()).into())
9457            }
9458            bytecode::IntrinsicFunction1::StopIterationError => {
9459                // Convert StopIteration to RuntimeError (PEP 479)
9460                // Returns the exception object; RERAISE will re-raise it
9461                if arg.fast_isinstance(vm.ctx.exceptions.stop_iteration) {
9462                    let flags = &self.code.flags;
9463                    let msg = if flags
9464                        .contains(bytecode::CodeFlags::COROUTINE | bytecode::CodeFlags::GENERATOR)
9465                    {
9466                        "async generator raised StopIteration"
9467                    } else if flags.contains(bytecode::CodeFlags::COROUTINE) {
9468                        "coroutine raised StopIteration"
9469                    } else {
9470                        "generator raised StopIteration"
9471                    };
9472                    let err = vm.new_runtime_error(msg);
9473                    err.set___cause__(arg.downcast().ok());
9474                    Ok(err.into())
9475                } else {
9476                    // Not StopIteration, pass through for RERAISE
9477                    Ok(arg)
9478                }
9479            }
9480            bytecode::IntrinsicFunction1::AsyncGenWrap => {
9481                // Wrap value for async generator
9482                // Creates an AsyncGenWrappedValue
9483                Ok(crate::builtins::asyncgenerator::PyAsyncGenWrappedValue(arg)
9484                    .into_ref(&vm.ctx)
9485                    .into())
9486            }
9487        }
9488    }
9489
9490    fn call_intrinsic_2(
9491        &mut self,
9492        func: bytecode::IntrinsicFunction2,
9493        arg1: PyObjectRef,
9494        arg2: PyObjectRef,
9495        vm: &VirtualMachine,
9496    ) -> PyResult {
9497        match func {
9498            bytecode::IntrinsicFunction2::SetTypeparamDefault => {
9499                crate::stdlib::_typing::set_typeparam_default(arg1, arg2, vm)
9500            }
9501            bytecode::IntrinsicFunction2::SetFunctionTypeParams => {
9502                // arg1 is the function, arg2 is the type params tuple
9503                // Set __type_params__ attribute on the function
9504                arg1.set_attr("__type_params__", arg2, vm)?;
9505                Ok(arg1)
9506            }
9507            bytecode::IntrinsicFunction2::TypeVarWithBound => {
9508                let type_var: PyObjectRef =
9509                    _typing::TypeVar::new(vm, arg1.clone(), arg2, vm.ctx.none())
9510                        .into_ref(&vm.ctx)
9511                        .into();
9512                Ok(type_var)
9513            }
9514            bytecode::IntrinsicFunction2::TypeVarWithConstraint => {
9515                let type_var: PyObjectRef =
9516                    _typing::TypeVar::new(vm, arg1.clone(), vm.ctx.none(), arg2)
9517                        .into_ref(&vm.ctx)
9518                        .into();
9519                Ok(type_var)
9520            }
9521            bytecode::IntrinsicFunction2::PrepReraiseStar => {
9522                // arg1 = orig (original exception)
9523                // arg2 = excs (list of exceptions raised/reraised in except* blocks)
9524                // Returns: exception to reraise, or None if nothing to reraise
9525                crate::exceptions::prep_reraise_star(arg1, arg2, vm)
9526            }
9527        }
9528    }
9529
9530    /// Pop multiple values from the stack. Panics if any slot is NULL.
9531    fn pop_multiple(&mut self, count: usize) -> impl ExactSizeIterator<Item = PyObjectRef> + '_ {
9532        let stack_len = self.localsplus.stack_len();
9533        if count > stack_len {
9534            let instr = self.code.instructions.get(self.lasti() as usize);
9535            let op_name = instr
9536                .map(|i| format!("{:?}", i.op))
9537                .unwrap_or_else(|| "None".to_string());
9538            panic!(
9539                "Stack underflow in pop_multiple: trying to pop {} elements from stack with {} elements. lasti={}, code={}, op={}, source_path={}",
9540                count,
9541                stack_len,
9542                self.lasti(),
9543                self.code.obj_name,
9544                op_name,
9545                self.code.source_path()
9546            );
9547        }
9548        self.localsplus.stack_drain(stack_len - count).map(|obj| {
9549            expect_unchecked(obj, "pop_multiple but null found. This is a compiler bug.").to_pyobj()
9550        })
9551    }
9552
9553    #[inline]
9554    fn replace_top(&mut self, top: Option<PyObjectRef>) -> Option<PyObjectRef> {
9555        let mut slot = top.map(PyStackRef::new_owned);
9556        let last = self.localsplus.stack_last_mut().unwrap();
9557        core::mem::swap(last, &mut slot);
9558        slot.map(|sr| sr.to_pyobj())
9559    }
9560
9561    #[inline]
9562    #[track_caller]
9563    fn top_value(&self) -> &PyObject {
9564        match self.localsplus.stack_last() {
9565            Some(Some(last)) => last.as_object(),
9566            Some(None) => self.fatal("tried to get top of stack but got NULL"),
9567            None => self.fatal("tried to get top of stack but stack is empty"),
9568        }
9569    }
9570
9571    #[inline]
9572    #[track_caller]
9573    fn nth_value(&self, depth: u32) -> &PyObject {
9574        let idx = self.localsplus.stack_len() - depth as usize - 1;
9575        match self.localsplus.stack_index(idx) {
9576            Some(obj) => obj.as_object(),
9577            None => unsafe { core::hint::unreachable_unchecked() },
9578        }
9579    }
9580
9581    #[cold]
9582    #[inline(never)]
9583    #[track_caller]
9584    fn fatal(&self, msg: &'static str) -> ! {
9585        dbg!(self);
9586        panic!("{msg}")
9587    }
9588}
9589
9590impl fmt::Debug for Frame {
9591    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
9592        // SAFETY: Debug is best-effort; concurrent mutation is unlikely
9593        // and would only affect debug output.
9594        let iframe = unsafe { &*self.iframe.get() };
9595        let stack_str =
9596            iframe
9597                .localsplus
9598                .stack_as_slice()
9599                .iter()
9600                .fold(String::new(), |mut s, slot| {
9601                    match slot {
9602                        Some(elem) if elem.downcastable::<Self>() => {
9603                            s.push_str("\n  > {frame}");
9604                        }
9605                        Some(elem) => {
9606                            core::fmt::write(&mut s, format_args!("\n  > {elem:?}")).unwrap();
9607                        }
9608                        None => {
9609                            s.push_str("\n  > NULL");
9610                        }
9611                    }
9612                    s
9613                });
9614        // TODO: fix this up
9615        write!(
9616            f,
9617            "Frame Object {{ \n Stack:{}\n Locals initialized:{}\n}}",
9618            stack_str,
9619            self.locals.get().is_some()
9620        )
9621    }
9622}
9623
9624/// _PyEval_SpecialMethodCanSuggest
9625fn special_method_can_suggest(
9626    obj: &PyObjectRef,
9627    oparg: SpecialMethod,
9628    vm: &VirtualMachine,
9629) -> PyResult<bool> {
9630    Ok(match oparg {
9631        SpecialMethod::Enter | SpecialMethod::Exit => {
9632            vm.get_special_method(obj, get_special_method_name(SpecialMethod::AEnter, vm))?
9633                .is_some()
9634                && vm
9635                    .get_special_method(obj, get_special_method_name(SpecialMethod::AExit, vm))?
9636                    .is_some()
9637        }
9638        SpecialMethod::AEnter | SpecialMethod::AExit => {
9639            vm.get_special_method(obj, get_special_method_name(SpecialMethod::Enter, vm))?
9640                .is_some()
9641                && vm
9642                    .get_special_method(obj, get_special_method_name(SpecialMethod::Exit, vm))?
9643                    .is_some()
9644        }
9645    })
9646}
9647
9648fn get_special_method_name(oparg: SpecialMethod, vm: &VirtualMachine) -> &'static PyStrInterned {
9649    match oparg {
9650        SpecialMethod::Enter => identifier!(vm, __enter__),
9651        SpecialMethod::Exit => identifier!(vm, __exit__),
9652        SpecialMethod::AEnter => identifier!(vm, __aenter__),
9653        SpecialMethod::AExit => identifier!(vm, __aexit__),
9654    }
9655}
9656
9657/// _Py_SpecialMethod _Py_SpecialMethods
9658fn get_special_method_error_msg(
9659    oparg: SpecialMethod,
9660    class_name: &str,
9661    can_suggest: bool,
9662) -> String {
9663    if can_suggest {
9664        match oparg {
9665            SpecialMethod::Enter => format!(
9666                "'{class_name}' object does not support the context manager protocol (missed __enter__ method) but it supports the asynchronous context manager protocol. Did you mean to use 'async with'?"
9667            ),
9668            SpecialMethod::Exit => format!(
9669                "'{class_name}' object does not support the context manager protocol (missed __exit__ method) but it supports the asynchronous context manager protocol. Did you mean to use 'async with'?"
9670            ),
9671            SpecialMethod::AEnter => format!(
9672                "'{class_name}' object does not support the asynchronous context manager protocol (missed __aenter__ method) but it supports the context manager protocol. Did you mean to use 'with'?"
9673            ),
9674            SpecialMethod::AExit => format!(
9675                "'{class_name}' object does not support the asynchronous context manager protocol (missed __aexit__ method) but it supports the context manager protocol. Did you mean to use 'with'?"
9676            ),
9677        }
9678    } else {
9679        match oparg {
9680            SpecialMethod::Enter => format!(
9681                "'{class_name}' object does not support the context manager protocol (missed __enter__ method)"
9682            ),
9683            SpecialMethod::Exit => format!(
9684                "'{class_name}' object does not support the context manager protocol (missed __exit__ method)"
9685            ),
9686            SpecialMethod::AEnter => format!(
9687                "'{class_name}' object does not support the asynchronous context manager protocol (missed __aenter__ method)"
9688            ),
9689            SpecialMethod::AExit => format!(
9690                "'{class_name}' object does not support the asynchronous context manager protocol (missed __aexit__ method)"
9691            ),
9692        }
9693    }
9694}
9695
9696fn is_module_initializing(module: &PyObject, vm: &VirtualMachine) -> bool {
9697    let Ok(spec) = module.get_attr(&vm.ctx.new_str("__spec__"), vm) else {
9698        return false;
9699    };
9700    if vm.is_none(&spec) {
9701        return false;
9702    }
9703    let Ok(initializing_attr) = spec.get_attr(&vm.ctx.new_str("_initializing"), vm) else {
9704        return false;
9705    };
9706    initializing_attr.try_to_bool(vm).unwrap_or(false)
9707}
9708
9709fn expect_unchecked<T: fmt::Debug>(optional: Option<T>, err_msg: &'static str) -> T {
9710    if cfg!(debug_assertions) {
9711        optional.expect(err_msg)
9712    } else {
9713        unsafe { optional.unwrap_unchecked() }
9714    }
9715}