radix_engine/kernel/
kernel.rs

1use super::heap::Heap;
2use super::id_allocator::IdAllocator;
3use crate::errors::*;
4use crate::internal_prelude::*;
5use crate::kernel::call_frame::*;
6use crate::kernel::kernel_api::*;
7use crate::kernel::kernel_callback_api::*;
8use crate::kernel::substate_io::{SubstateDevice, SubstateIO};
9use crate::kernel::substate_locks::SubstateLocks;
10use crate::track::interface::*;
11use crate::track::Track;
12use radix_engine_interface::api::field_api::LockFlags;
13use radix_engine_profiling_derive::trace_resources;
14use radix_substate_store_interface::db_key_mapper::SubstateKeyContent;
15use radix_substate_store_interface::interface::SubstateDatabase;
16use sbor::rust::mem;
17
18macro_rules! as_read_only {
19    ($kernel:expr) => {{
20        let (current_frame, previous_frame) = $kernel.stacks.current_frame_and_previous_frame();
21        KernelReadOnly {
22            current_frame,
23            previous_frame,
24            heap: &$kernel.substate_io.heap,
25            callback: $kernel.callback,
26        }
27    }};
28}
29
30pub type KernelBootSubstate = KernelBoot;
31
32#[derive(Debug, Clone, PartialEq, Eq, Sbor, ScryptoSborAssertion)]
33#[sbor_assert(backwards_compatible(
34    cuttlefish = "FILE:kernel_boot_substate_cuttlefish_schema.bin",
35))]
36pub enum KernelBoot {
37    V1,
38    V2 {
39        global_nodes_version: AlwaysVisibleGlobalNodesVersion,
40    },
41}
42
43impl KernelBoot {
44    /// Loads kernel boot from the database, or resolves a fallback.
45    pub fn load(substate_db: &impl SubstateDatabase) -> Self {
46        substate_db
47            .get_substate(
48                TRANSACTION_TRACKER,
49                BOOT_LOADER_PARTITION,
50                BootLoaderField::KernelBoot,
51            )
52            .unwrap_or_else(KernelBoot::babylon)
53    }
54
55    pub fn babylon() -> Self {
56        Self::V1
57    }
58
59    pub fn cuttlefish() -> Self {
60        Self::V2 {
61            global_nodes_version: AlwaysVisibleGlobalNodesVersion::V2,
62        }
63    }
64
65    pub fn always_visible_global_nodes_version(&self) -> AlwaysVisibleGlobalNodesVersion {
66        match self {
67            KernelBoot::V1 => AlwaysVisibleGlobalNodesVersion::V1,
68            KernelBoot::V2 {
69                global_nodes_version,
70                ..
71            } => *global_nodes_version,
72        }
73    }
74
75    pub fn always_visible_global_nodes(&self) -> &'static IndexSet<NodeId> {
76        always_visible_global_nodes(self.always_visible_global_nodes_version())
77    }
78}
79
80pub struct KernelInit<
81    's,
82    S: SubstateDatabase,
83    I: InitializationParameters<For: KernelTransactionExecutor<Init = I>>,
84> {
85    substate_db: &'s S,
86    kernel_boot: KernelBoot,
87    callback_init: I,
88}
89
90impl<
91        's,
92        S: SubstateDatabase,
93        I: InitializationParameters<For: KernelTransactionExecutor<Init = I>>,
94    > KernelInit<'s, S, I>
95{
96    pub fn load(substate_db: &'s S, callback_init: I) -> Self {
97        let kernel_boot = KernelBoot::load(substate_db);
98        Self {
99            substate_db,
100            kernel_boot,
101            callback_init,
102        }
103    }
104
105    /// Executes a transaction
106    pub fn execute(
107        self,
108        executable: &<I::For as KernelTransactionExecutor>::Executable,
109    ) -> <I::For as KernelTransactionExecutor>::Receipt {
110        let boot_loader = BootLoader {
111            id_allocator: IdAllocator::new(executable.unique_seed_for_id_allocator()),
112            track: Track::new(self.substate_db),
113        };
114
115        #[cfg(not(all(target_os = "linux", feature = "std", feature = "cpu_ram_metrics")))]
116        {
117            boot_loader.execute::<I::For>(self.kernel_boot, self.callback_init, executable)
118        }
119
120        #[cfg(all(target_os = "linux", feature = "std", feature = "cpu_ram_metrics"))]
121        {
122            use crate::kernel::resources_tracker::ResourcesTracker;
123
124            let mut resources_tracker = ResourcesTracker::start_measurement();
125            let mut receipt =
126                boot_loader.execute::<I::For>(self.kernel_boot, self.callback_init, executable);
127            receipt.set_resource_usage(resources_tracker.end_measurement());
128            receipt
129        }
130    }
131}
132
133/// Organizes the radix engine stack to make a function entrypoint available for execution
134pub struct BootLoader<'h, S: SubstateDatabase> {
135    id_allocator: IdAllocator,
136    track: Track<'h, S>,
137}
138
139impl<'h, S: SubstateDatabase> BootLoader<'h, S> {
140    fn execute<E: KernelTransactionExecutor>(
141        mut self,
142        kernel_boot: KernelBoot,
143        callback_init: E::Init,
144        executable: &E::Executable,
145    ) -> E::Receipt {
146        #[cfg(feature = "resource_tracker")]
147        radix_engine_profiling::QEMU_PLUGIN_CALIBRATOR.with(|v| {
148            v.borrow_mut();
149        });
150
151        // Upper Layer Initialization
152        let system_init_result = E::init(
153            &mut self.track,
154            executable,
155            callback_init,
156            kernel_boot.always_visible_global_nodes(),
157        );
158
159        let (mut system, call_frame_inits) = match system_init_result {
160            Ok(success) => success,
161            Err(receipt) => return receipt,
162        };
163
164        // Kernel Initialization
165        let mut kernel = Kernel::new(
166            &mut self.track,
167            &mut self.id_allocator,
168            &mut system,
169            call_frame_inits,
170        );
171
172        // Execution
173        let result = || -> Result<E::ExecutionOutput, RuntimeError> {
174            // Invoke transaction processor
175            let output = E::execute(&mut kernel, executable)?;
176
177            // Sanity check call frame
178            for stack in &kernel.stacks.stacks {
179                assert!(stack.prev_frames.is_empty());
180            }
181
182            // Sanity check heap
183            assert!(kernel.substate_io.heap.is_empty());
184
185            // Finalize state updates based on what has occurred
186            let commit_info = kernel.substate_io.store.get_commit_info();
187            kernel.callback.finalize(executable, commit_info)?;
188
189            Ok(output)
190        }()
191        .map_err(TransactionExecutionError::RuntimeError);
192
193        // Create receipt representing the result of a transaction
194        system.create_receipt(self.track, result)
195    }
196}
197
198pub struct KernelStack<M: KernelCallbackObject> {
199    current_frame: CallFrame<M::CallFrameData, M::LockData>,
200    prev_frames: Vec<CallFrame<M::CallFrameData, M::LockData>>,
201}
202
203impl<M: KernelCallbackObject> KernelStack<M> {
204    pub fn new(init: CallFrameInit<M::CallFrameData>) -> Self {
205        Self {
206            current_frame: CallFrame::new_root(init),
207            prev_frames: vec![],
208        }
209    }
210}
211
212/// The kernel manages multiple call frame stacks. There will always be a single
213/// "current" stack (and call frame) in context.
214pub struct KernelStacks<M: KernelCallbackObject> {
215    current_stack_index: usize,
216    stacks: Vec<KernelStack<M>>,
217}
218
219impl<M: KernelCallbackObject> KernelStacks<M> {
220    pub fn new(call_frames: Vec<CallFrameInit<M::CallFrameData>>) -> Self {
221        let stacks = call_frames
222            .into_iter()
223            .map(|call_frame| KernelStack::new(call_frame))
224            .collect();
225        Self {
226            current_stack_index: 0usize,
227            stacks,
228        }
229    }
230
231    fn current_stack_mut(&mut self) -> &mut KernelStack<M> {
232        self.stacks.get_mut(self.current_stack_index).unwrap()
233    }
234
235    fn current_stack(&self) -> &KernelStack<M> {
236        self.stacks.get(self.current_stack_index).unwrap()
237    }
238
239    /// Pushes a new call frame on the current stack
240    pub fn push_frame(&mut self, frame: CallFrame<M::CallFrameData, M::LockData>) {
241        let stack = self.current_stack_mut();
242        let parent = mem::replace(&mut stack.current_frame, frame);
243        stack.prev_frames.push(parent);
244    }
245
246    /// Pushes a call frame from the current stack
247    pub fn pop_frame(&mut self) {
248        let stack = self.current_stack_mut();
249        let parent = stack.prev_frames.pop().unwrap();
250        let _ = core::mem::replace(&mut stack.current_frame, parent);
251    }
252
253    /// Switches the current stack
254    pub fn switch_stack(&mut self, stack_index: usize) -> Result<(), RuntimeError> {
255        if stack_index >= self.stacks.len() {
256            return Err(RuntimeError::KernelError(KernelError::StackError(
257                StackError::InvalidStackId,
258            )));
259        }
260        self.current_stack_index = stack_index;
261
262        Ok(())
263    }
264
265    #[allow(clippy::type_complexity)]
266    pub fn current_frame_mut_in_this_and_other_stack(
267        &mut self,
268        other_stack: usize,
269    ) -> (
270        &mut CallFrame<M::CallFrameData, M::LockData>,
271        &mut CallFrame<M::CallFrameData, M::LockData>,
272    ) {
273        let mut mut_stacks: Vec<_> = self
274            .stacks
275            .iter_mut()
276            .enumerate()
277            .filter(|(id, _)| (*id).eq(&self.current_stack_index) || (*id).eq(&other_stack))
278            .map(Some)
279            .collect();
280
281        let (id0, stack0) = mut_stacks[0].take().unwrap();
282        let (_id1, stack1) = mut_stacks[1].take().unwrap();
283        if id0.eq(&self.current_stack_index) {
284            (&mut stack0.current_frame, &mut stack1.current_frame)
285        } else {
286            (&mut stack1.current_frame, &mut stack0.current_frame)
287        }
288    }
289
290    #[allow(clippy::type_complexity)]
291    pub fn current_frame_and_previous_frame(
292        &self,
293    ) -> (
294        &CallFrame<M::CallFrameData, M::LockData>,
295        Option<&CallFrame<M::CallFrameData, M::LockData>>,
296    ) {
297        let stack = self.current_stack();
298        (&stack.current_frame, stack.prev_frames.last())
299    }
300
301    #[allow(clippy::type_complexity)]
302    pub fn mut_current_frame_and_previous_frame(
303        &mut self,
304    ) -> (
305        &mut CallFrame<M::CallFrameData, M::LockData>,
306        Option<&CallFrame<M::CallFrameData, M::LockData>>,
307    ) {
308        let stack = self.current_stack_mut();
309        (&mut stack.current_frame, stack.prev_frames.last())
310    }
311
312    #[allow(clippy::type_complexity)]
313    pub fn mut_current_frame_and_mut_previous_frame(
314        &mut self,
315    ) -> (
316        &mut CallFrame<M::CallFrameData, M::LockData>,
317        Option<&mut CallFrame<M::CallFrameData, M::LockData>>,
318    ) {
319        let stack = self.current_stack_mut();
320        (&mut stack.current_frame, stack.prev_frames.last_mut())
321    }
322
323    pub fn current_frame(&self) -> &CallFrame<M::CallFrameData, M::LockData> {
324        &self.current_stack().current_frame
325    }
326
327    pub fn current_frame_mut(&mut self) -> &mut CallFrame<M::CallFrameData, M::LockData> {
328        &mut self.current_stack_mut().current_frame
329    }
330
331    #[cfg(feature = "radix_engine_tests")]
332    pub fn previous_frames_mut(&mut self) -> &mut Vec<CallFrame<M::CallFrameData, M::LockData>> {
333        &mut self.current_stack_mut().prev_frames
334    }
335}
336
337pub struct Kernel<
338    'g, // Lifetime of values outliving all frames
339    M,  // Upstream System layer
340    S,  // Substate store
341> where
342    M: KernelCallbackObject,
343    S: CommitableSubstateStore,
344{
345    stacks: KernelStacks<M>,
346
347    substate_io: SubstateIO<'g, S>,
348
349    /// ID allocator
350    id_allocator: &'g mut IdAllocator,
351
352    /// Upper system layer
353    callback: &'g mut M,
354}
355
356#[cfg(feature = "radix_engine_tests")]
357impl<'g, M: KernelCallbackObject<CallFrameData: Default>, S: CommitableSubstateStore>
358    Kernel<'g, M, S>
359{
360    pub fn new_no_refs(
361        store: &'g mut S,
362        id_allocator: &'g mut IdAllocator,
363        callback: &'g mut M,
364    ) -> Self {
365        Self::new(
366            store,
367            id_allocator,
368            callback,
369            vec![CallFrameInit {
370                data: M::CallFrameData::default(),
371                direct_accesses: Default::default(),
372                global_addresses: Default::default(),
373                always_visible_global_nodes: always_visible_global_nodes(
374                    AlwaysVisibleGlobalNodesVersion::latest(),
375                ),
376                stack_id: 0,
377            }],
378        )
379    }
380}
381
382impl<'g, M: KernelCallbackObject, S: CommitableSubstateStore> Kernel<'g, M, S> {
383    pub fn new(
384        store: &'g mut S,
385        id_allocator: &'g mut IdAllocator,
386        callback: &'g mut M,
387        call_frame_inits: Vec<CallFrameInit<M::CallFrameData>>,
388    ) -> Self {
389        Kernel {
390            stacks: KernelStacks::new(call_frame_inits),
391            substate_io: SubstateIO {
392                heap: Heap::new(),
393                store,
394                non_global_node_refs: NonGlobalNodeRefs::new(),
395                substate_locks: SubstateLocks::new(),
396                heap_transient_substates: TransientSubstates::new(),
397                pinned_to_heap: BTreeSet::new(),
398            },
399            id_allocator,
400            callback,
401        }
402    }
403}
404
405struct KernelHandler<
406    'a,
407    M: KernelCallbackObject,
408    F: FnMut(&mut KernelReadOnly<M>, IOAccess) -> Result<(), RuntimeError>,
409> {
410    callback: &'a mut M,
411    prev_frame: Option<&'a CallFrame<M::CallFrameData, M::LockData>>,
412    on_io_access: F,
413}
414
415impl<
416        M: KernelCallbackObject,
417        F: FnMut(&mut KernelReadOnly<M>, IOAccess) -> Result<(), RuntimeError>,
418    > CallFrameIOAccessHandler<M::CallFrameData, M::LockData, RuntimeError>
419    for KernelHandler<'_, M, F>
420{
421    fn on_io_access(
422        &mut self,
423        current_frame: &CallFrame<M::CallFrameData, M::LockData>,
424        heap: &Heap,
425        io_access: IOAccess,
426    ) -> Result<(), RuntimeError> {
427        let mut read_only = KernelReadOnly {
428            current_frame,
429            previous_frame: self.prev_frame,
430            heap,
431            callback: self.callback,
432        };
433
434        (self.on_io_access)(&mut read_only, io_access)
435    }
436}
437
438impl<
439        M: KernelCallbackObject,
440        F: FnMut(&mut KernelReadOnly<M>, IOAccess) -> Result<(), RuntimeError>,
441    > CallFrameSubstateReadHandler<M::CallFrameData, M::LockData> for KernelHandler<'_, M, F>
442{
443    type Error = RuntimeError;
444    fn on_read_substate(
445        &mut self,
446        current_frame: &CallFrame<M::CallFrameData, M::LockData>,
447        heap: &Heap,
448        handle: SubstateHandle,
449        value: &IndexedScryptoValue,
450        device: SubstateDevice,
451    ) -> Result<(), Self::Error> {
452        let mut read_only = KernelReadOnly {
453            current_frame,
454            previous_frame: self.prev_frame,
455            heap,
456            callback: self.callback,
457        };
458
459        M::on_read_substate(
460            ReadSubstateEvent::OnRead {
461                handle,
462                value,
463                device,
464            },
465            &mut read_only,
466        )
467    }
468}
469
470impl<'g, M, S> KernelNodeApi for Kernel<'g, M, S>
471where
472    M: KernelCallbackObject,
473    S: CommitableSubstateStore,
474{
475    #[trace_resources]
476    fn kernel_pin_node(&mut self, node_id: NodeId) -> Result<(), RuntimeError> {
477        M::on_pin_node(&node_id, &mut as_read_only!(self))?;
478
479        self.stacks
480            .current_frame_mut()
481            .pin_node(&mut self.substate_io, node_id)
482            .map_err(|e| {
483                RuntimeError::KernelError(KernelError::CallFrameError(
484                    CallFrameError::PinNodeError(e),
485                ))
486            })
487    }
488
489    #[trace_resources]
490    fn kernel_allocate_node_id(&mut self, entity_type: EntityType) -> Result<NodeId, RuntimeError> {
491        M::on_allocate_node_id(entity_type, self)?;
492
493        self.id_allocator.allocate_node_id(entity_type)
494    }
495
496    #[trace_resources]
497    fn kernel_create_node(
498        &mut self,
499        node_id: NodeId,
500        node_substates: NodeSubstates,
501    ) -> Result<(), RuntimeError> {
502        let mut read_only = as_read_only!(self);
503        M::on_create_node(
504            CreateNodeEvent::Start(&node_id, &node_substates),
505            &mut read_only,
506        )?;
507
508        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
509
510        let mut handler = KernelHandler {
511            callback: self.callback,
512            prev_frame,
513            on_io_access: |api, io_access| {
514                M::on_create_node(CreateNodeEvent::IOAccess(&io_access), api)
515            },
516        };
517
518        cur_frame
519            .create_node(&mut self.substate_io, &mut handler, node_id, node_substates)
520            .map_err(|e| match e {
521                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
522                    CallFrameError::CreateNodeError(e),
523                )),
524                CallbackError::CallbackError(e) => e,
525            })?;
526
527        let mut read_only = as_read_only!(self);
528        M::on_create_node(CreateNodeEvent::End(&node_id), &mut read_only)?;
529
530        Ok(())
531    }
532
533    #[trace_resources]
534    fn kernel_create_node_from(
535        &mut self,
536        node_id: NodeId,
537        partitions: BTreeMap<PartitionNumber, (NodeId, PartitionNumber)>,
538    ) -> Result<(), RuntimeError> {
539        {
540            let node_substates = NodeSubstates::new();
541            let mut read_only = as_read_only!(self);
542            M::on_create_node(
543                CreateNodeEvent::Start(&node_id, &node_substates),
544                &mut read_only,
545            )?;
546
547            let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
548
549            let mut handler = KernelHandler {
550                callback: self.callback,
551                prev_frame,
552                on_io_access: |api, io_access| {
553                    M::on_create_node(CreateNodeEvent::IOAccess(&io_access), api)
554                },
555            };
556
557            cur_frame
558                .create_node(
559                    &mut self.substate_io,
560                    &mut handler,
561                    node_id,
562                    NodeSubstates::new(),
563                )
564                .map_err(|e| match e {
565                    CallbackError::Error(e) => RuntimeError::KernelError(
566                        KernelError::CallFrameError(CallFrameError::CreateNodeError(e)),
567                    ),
568                    CallbackError::CallbackError(e) => e,
569                })?;
570
571            let mut read_only = as_read_only!(self);
572            M::on_create_node(CreateNodeEvent::End(&node_id), &mut read_only)?;
573        }
574
575        {
576            let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
577
578            let mut handler = KernelHandler {
579                callback: self.callback,
580                prev_frame,
581                on_io_access: |api, io_access| {
582                    M::on_move_module(MoveModuleEvent::IOAccess(&io_access), api)
583                },
584            };
585
586            for (dest_partition_number, (src_node_id, src_partition_number)) in partitions {
587                cur_frame
588                    .move_partition(
589                        &mut self.substate_io,
590                        &mut handler,
591                        &src_node_id,
592                        src_partition_number,
593                        &node_id,
594                        dest_partition_number,
595                    )
596                    .map_err(|e| match e {
597                        CallbackError::Error(e) => RuntimeError::KernelError(
598                            KernelError::CallFrameError(CallFrameError::MovePartitionError(e)),
599                        ),
600                        CallbackError::CallbackError(e) => e,
601                    })?;
602            }
603        }
604
605        Ok(())
606    }
607
608    #[trace_resources]
609    fn kernel_drop_node(&mut self, node_id: &NodeId) -> Result<DroppedNode, RuntimeError> {
610        let mut read_only = as_read_only!(self);
611        M::on_drop_node(DropNodeEvent::Start(node_id), &mut read_only)?;
612
613        M::on_drop_node_mut(node_id, self)?;
614
615        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
616
617        let mut handler = KernelHandler {
618            callback: self.callback,
619            prev_frame,
620            on_io_access: |api, io_access| {
621                M::on_drop_node(DropNodeEvent::IOAccess(&io_access), api)
622            },
623        };
624        let dropped_node = cur_frame
625            .drop_node(&mut self.substate_io, node_id, &mut handler)
626            .map_err(|e| match e {
627                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
628                    CallFrameError::DropNodeError(e),
629                )),
630                CallbackError::CallbackError(e) => e,
631            })?;
632
633        let mut read_only = as_read_only!(self);
634        M::on_drop_node(
635            DropNodeEvent::End(node_id, &dropped_node.substates),
636            &mut read_only,
637        )?;
638
639        Ok(dropped_node)
640    }
641}
642
643// TODO: Remove
644impl<'g, M: KernelCallbackObject, S: CommitableSubstateStore> KernelInternalApi
645    for Kernel<'g, M, S>
646{
647    type System = M;
648
649    fn kernel_get_node_visibility_uncosted(&self, node_id: &NodeId) -> NodeVisibility {
650        self.stacks.current_frame().get_node_visibility(node_id)
651    }
652
653    fn kernel_get_current_stack_depth_uncosted(&self) -> usize {
654        self.stacks.current_frame().depth()
655    }
656
657    fn kernel_get_current_stack_id_uncosted(&self) -> usize {
658        self.stacks.current_stack_index
659    }
660
661    fn kernel_get_system_state(&mut self) -> SystemState<'_, M> {
662        let (cur, prev) = self.stacks.current_frame_and_previous_frame();
663        let caller_actor = match prev {
664            Some(call_frame) => call_frame.data(),
665            None => {
666                // This will only occur on initialization
667                cur.data()
668            }
669        };
670        SystemState {
671            system: self.callback,
672            current_call_frame: cur.data(),
673            caller_call_frame: caller_actor,
674        }
675    }
676
677    fn kernel_read_substate_uncosted(
678        &self,
679        node_id: &NodeId,
680        partition_num: PartitionNumber,
681        substate_key: &SubstateKey,
682    ) -> Option<&IndexedScryptoValue> {
683        self.substate_io
684            .heap
685            .get_substate(node_id, partition_num, substate_key)
686    }
687}
688
689struct KernelReadOnly<'g, M>
690where
691    M: KernelCallbackObject,
692{
693    current_frame: &'g CallFrame<M::CallFrameData, M::LockData>,
694    previous_frame: Option<&'g CallFrame<M::CallFrameData, M::LockData>>,
695    heap: &'g Heap,
696    callback: &'g mut M,
697}
698
699impl<'g, M: KernelCallbackObject> KernelInternalApi for KernelReadOnly<'g, M> {
700    type System = M;
701
702    fn kernel_get_node_visibility_uncosted(&self, node_id: &NodeId) -> NodeVisibility {
703        self.current_frame.get_node_visibility(node_id)
704    }
705
706    fn kernel_get_current_stack_depth_uncosted(&self) -> usize {
707        self.current_frame.depth()
708    }
709
710    fn kernel_get_current_stack_id_uncosted(&self) -> usize {
711        self.current_frame.stack_id()
712    }
713
714    fn kernel_get_system_state(&mut self) -> SystemState<'_, M> {
715        let caller_call_frame = match self.previous_frame {
716            Some(call_frame) => call_frame.data(),
717            None => {
718                // This will only occur on initialization
719                self.current_frame.data()
720            }
721        };
722        SystemState {
723            system: self.callback,
724            current_call_frame: self.current_frame.data(),
725            caller_call_frame,
726        }
727    }
728
729    fn kernel_read_substate_uncosted(
730        &self,
731        node_id: &NodeId,
732        partition_num: PartitionNumber,
733        substate_key: &SubstateKey,
734    ) -> Option<&IndexedScryptoValue> {
735        self.heap.get_substate(node_id, partition_num, substate_key)
736    }
737}
738
739impl<'g, M, S> KernelSubstateApi<M::LockData> for Kernel<'g, M, S>
740where
741    M: KernelCallbackObject,
742    S: CommitableSubstateStore,
743{
744    #[trace_resources]
745    fn kernel_mark_substate_as_transient(
746        &mut self,
747        node_id: NodeId,
748        partition_num: PartitionNumber,
749        key: SubstateKey,
750    ) -> Result<(), RuntimeError> {
751        M::on_mark_substate_as_transient(&node_id, &partition_num, &key, &mut as_read_only!(self))?;
752
753        self.stacks
754            .current_frame_mut()
755            .mark_substate_as_transient(&mut self.substate_io, node_id, partition_num, key)
756            .map_err(|e| {
757                RuntimeError::KernelError(KernelError::CallFrameError(
758                    CallFrameError::MarkTransientSubstateError(e),
759                ))
760            })
761    }
762
763    #[trace_resources]
764    fn kernel_open_substate_with_default<F: FnOnce() -> IndexedScryptoValue>(
765        &mut self,
766        node_id: &NodeId,
767        partition_num: PartitionNumber,
768        substate_key: &SubstateKey,
769        flags: LockFlags,
770        default: Option<F>,
771        data: M::LockData,
772    ) -> Result<SubstateHandle, RuntimeError> {
773        M::on_open_substate(
774            OpenSubstateEvent::Start {
775                node_id,
776                partition_num: &partition_num,
777                substate_key,
778                flags: &flags,
779            },
780            &mut as_read_only!(self),
781        )?;
782
783        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
784
785        let mut handler = KernelHandler {
786            callback: self.callback,
787            prev_frame,
788            on_io_access: |api, io_access| {
789                M::on_open_substate(OpenSubstateEvent::IOAccess(&io_access), api)
790            },
791        };
792
793        let maybe_lock_handle = cur_frame.open_substate(
794            &mut self.substate_io,
795            node_id,
796            partition_num,
797            substate_key,
798            flags,
799            default,
800            data,
801            &mut handler,
802        );
803
804        let (lock_handle, value_size): (u32, usize) = match &maybe_lock_handle {
805            Ok((lock_handle, value_size)) => (*lock_handle, *value_size),
806            Err(CallbackError::CallbackError(e)) => return Err(e.clone()),
807            Err(CallbackError::Error(OpenSubstateError::SubstateFault)) => {
808                let retry = M::on_substate_lock_fault(*node_id, partition_num, substate_key, self)?;
809
810                if retry {
811                    let (cur_frame, prev_frame) =
812                        self.stacks.mut_current_frame_and_previous_frame();
813
814                    let mut handler = KernelHandler {
815                        callback: self.callback,
816                        prev_frame,
817                        on_io_access: |api, io_access| {
818                            M::on_open_substate(OpenSubstateEvent::IOAccess(&io_access), api)
819                        },
820                    };
821
822                    cur_frame
823                        .open_substate(
824                            &mut self.substate_io,
825                            node_id,
826                            partition_num,
827                            substate_key,
828                            flags,
829                            None::<fn() -> IndexedScryptoValue>,
830                            M::LockData::default(),
831                            &mut handler,
832                        )
833                        .map_err(|e| match e {
834                            CallbackError::Error(e) => RuntimeError::KernelError(
835                                KernelError::CallFrameError(CallFrameError::OpenSubstateError(e)),
836                            ),
837                            CallbackError::CallbackError(e) => e,
838                        })?
839                } else {
840                    return maybe_lock_handle
841                        .map(|(lock_handle, _)| lock_handle)
842                        .map_err(|e| match e {
843                            CallbackError::Error(e) => RuntimeError::KernelError(
844                                KernelError::CallFrameError(CallFrameError::OpenSubstateError(e)),
845                            ),
846                            CallbackError::CallbackError(e) => e,
847                        });
848                }
849            }
850            Err(err) => {
851                let runtime_error = match err {
852                    CallbackError::Error(e) => RuntimeError::KernelError(
853                        KernelError::CallFrameError(CallFrameError::OpenSubstateError(e.clone())),
854                    ),
855                    CallbackError::CallbackError(e) => e.clone(),
856                };
857                return Err(runtime_error);
858            }
859        };
860
861        let mut read_only = as_read_only!(self);
862        M::on_open_substate(
863            OpenSubstateEvent::End {
864                handle: lock_handle,
865                node_id,
866                size: value_size,
867            },
868            &mut read_only,
869        )?;
870
871        Ok(lock_handle)
872    }
873
874    #[trace_resources]
875    fn kernel_get_lock_data(
876        &mut self,
877        lock_handle: SubstateHandle,
878    ) -> Result<M::LockData, RuntimeError> {
879        self.stacks
880            .current_frame()
881            .get_handle_info(lock_handle)
882            .ok_or(RuntimeError::KernelError(
883                KernelError::SubstateHandleDoesNotExist(lock_handle),
884            ))
885    }
886
887    #[trace_resources]
888    fn kernel_read_substate(
889        &mut self,
890        lock_handle: SubstateHandle,
891    ) -> Result<&IndexedScryptoValue, RuntimeError> {
892        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
893        let mut handler = KernelHandler {
894            callback: self.callback,
895            prev_frame,
896            on_io_access: |api, io_access| {
897                M::on_read_substate(ReadSubstateEvent::IOAccess(&io_access), api)
898            },
899        };
900
901        let value = cur_frame
902            .read_substate(&mut self.substate_io, lock_handle, &mut handler)
903            .map_err(|e| match e {
904                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
905                    CallFrameError::ReadSubstateError(e),
906                )),
907                CallbackError::CallbackError(e) => e,
908            })?;
909
910        Ok(value)
911    }
912
913    #[trace_resources]
914    fn kernel_write_substate(
915        &mut self,
916        lock_handle: SubstateHandle,
917        value: IndexedScryptoValue,
918    ) -> Result<(), RuntimeError> {
919        let mut read_only = as_read_only!(self);
920        M::on_write_substate(
921            WriteSubstateEvent::Start {
922                handle: lock_handle,
923                value: &value,
924            },
925            &mut read_only,
926        )?;
927
928        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
929
930        let mut handler = KernelHandler {
931            callback: self.callback,
932            prev_frame,
933            on_io_access: |api, io_access| {
934                M::on_write_substate(WriteSubstateEvent::IOAccess(&io_access), api)
935            },
936        };
937
938        cur_frame
939            .write_substate(&mut self.substate_io, lock_handle, value, &mut handler)
940            .map_err(|e| match e {
941                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
942                    CallFrameError::WriteSubstateError(e),
943                )),
944                CallbackError::CallbackError(e) => e,
945            })?;
946
947        Ok(())
948    }
949
950    #[trace_resources]
951    fn kernel_close_substate(&mut self, lock_handle: SubstateHandle) -> Result<(), RuntimeError> {
952        // Note: It is very important that this occurs before the actual call to close_substate
953        // as we want to check limits/costing before doing the actual action. Otherwise,
954        // certain invariants might break such as a costing error occurring after a vault
955        // lock_fee has been force committed.
956        let mut read_only = as_read_only!(self);
957        M::on_close_substate(CloseSubstateEvent::Start(lock_handle), &mut read_only)?;
958
959        self.stacks
960            .current_frame_mut()
961            .close_substate(&mut self.substate_io, lock_handle)
962            .map_err(|e| {
963                RuntimeError::KernelError(KernelError::CallFrameError(
964                    CallFrameError::CloseSubstateError(e),
965                ))
966            })?;
967
968        Ok(())
969    }
970
971    #[trace_resources]
972    fn kernel_set_substate(
973        &mut self,
974        node_id: &NodeId,
975        partition_num: PartitionNumber,
976        substate_key: SubstateKey,
977        value: IndexedScryptoValue,
978    ) -> Result<(), RuntimeError> {
979        M::on_set_substate(
980            SetSubstateEvent::Start(node_id, &partition_num, &substate_key, &value),
981            &mut as_read_only!(self),
982        )?;
983
984        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
985
986        let mut handler = KernelHandler {
987            callback: self.callback,
988            prev_frame,
989            on_io_access: |api, io_access| {
990                M::on_set_substate(SetSubstateEvent::IOAccess(&io_access), api)
991            },
992        };
993
994        cur_frame
995            .set_substate(
996                &mut self.substate_io,
997                node_id,
998                partition_num,
999                substate_key,
1000                value,
1001                &mut handler,
1002            )
1003            .map_err(|e| match e {
1004                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
1005                    CallFrameError::SetSubstatesError(e),
1006                )),
1007                CallbackError::CallbackError(e) => e,
1008            })?;
1009
1010        Ok(())
1011    }
1012
1013    #[trace_resources]
1014    fn kernel_remove_substate(
1015        &mut self,
1016        node_id: &NodeId,
1017        partition_num: PartitionNumber,
1018        substate_key: &SubstateKey,
1019    ) -> Result<Option<IndexedScryptoValue>, RuntimeError> {
1020        M::on_remove_substate(
1021            RemoveSubstateEvent::Start(node_id, &partition_num, substate_key),
1022            &mut as_read_only!(self),
1023        )?;
1024
1025        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
1026
1027        let mut handler = KernelHandler {
1028            callback: self.callback,
1029            prev_frame,
1030            on_io_access: |api, io_access| {
1031                M::on_remove_substate(RemoveSubstateEvent::IOAccess(&io_access), api)
1032            },
1033        };
1034
1035        let substate = cur_frame
1036            .remove_substate(
1037                &mut self.substate_io,
1038                node_id,
1039                partition_num,
1040                substate_key,
1041                &mut handler,
1042            )
1043            .map_err(|e| match e {
1044                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
1045                    CallFrameError::RemoveSubstatesError(e),
1046                )),
1047                CallbackError::CallbackError(e) => e,
1048            })?;
1049
1050        Ok(substate)
1051    }
1052
1053    #[trace_resources]
1054    fn kernel_scan_keys<K: SubstateKeyContent>(
1055        &mut self,
1056        node_id: &NodeId,
1057        partition_num: PartitionNumber,
1058        limit: u32,
1059    ) -> Result<Vec<SubstateKey>, RuntimeError> {
1060        M::on_scan_keys(ScanKeysEvent::Start, &mut as_read_only!(self))?;
1061
1062        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
1063
1064        let mut handler = KernelHandler {
1065            callback: self.callback,
1066            prev_frame,
1067            on_io_access: |api, io_access| {
1068                M::on_scan_keys(ScanKeysEvent::IOAccess(&io_access), api)
1069            },
1070        };
1071
1072        let keys = cur_frame
1073            .scan_keys::<K, _, _>(
1074                &mut self.substate_io,
1075                node_id,
1076                partition_num,
1077                limit,
1078                &mut handler,
1079            )
1080            .map_err(|e| match e {
1081                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
1082                    CallFrameError::ScanSubstatesError(e),
1083                )),
1084                CallbackError::CallbackError(e) => e,
1085            })?;
1086
1087        Ok(keys)
1088    }
1089
1090    #[trace_resources(log=limit)]
1091    fn kernel_drain_substates<K: SubstateKeyContent>(
1092        &mut self,
1093        node_id: &NodeId,
1094        partition_num: PartitionNumber,
1095        limit: u32,
1096    ) -> Result<Vec<(SubstateKey, IndexedScryptoValue)>, RuntimeError> {
1097        M::on_drain_substates(DrainSubstatesEvent::Start(limit), &mut as_read_only!(self))?;
1098
1099        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
1100
1101        let mut handler = KernelHandler {
1102            callback: self.callback,
1103            prev_frame,
1104            on_io_access: |api, io_access| {
1105                M::on_drain_substates(DrainSubstatesEvent::IOAccess(&io_access), api)
1106            },
1107        };
1108
1109        let substates = cur_frame
1110            .drain_substates::<K, _, _>(
1111                &mut self.substate_io,
1112                node_id,
1113                partition_num,
1114                limit,
1115                &mut handler,
1116            )
1117            .map_err(|e| match e {
1118                CallbackError::CallbackError(e) => e,
1119                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
1120                    CallFrameError::DrainSubstatesError(e),
1121                )),
1122            })?;
1123
1124        Ok(substates)
1125    }
1126
1127    #[trace_resources]
1128    fn kernel_scan_sorted_substates(
1129        &mut self,
1130        node_id: &NodeId,
1131        partition_num: PartitionNumber,
1132        limit: u32,
1133    ) -> Result<Vec<(SortedKey, IndexedScryptoValue)>, RuntimeError> {
1134        M::on_scan_sorted_substates(ScanSortedSubstatesEvent::Start, &mut as_read_only!(self))?;
1135
1136        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
1137
1138        let mut handler = KernelHandler {
1139            callback: self.callback,
1140            prev_frame,
1141            on_io_access: |api, io_access| {
1142                M::on_scan_sorted_substates(ScanSortedSubstatesEvent::IOAccess(&io_access), api)
1143            },
1144        };
1145
1146        let substates = cur_frame
1147            .scan_sorted(
1148                &mut self.substate_io,
1149                node_id,
1150                partition_num,
1151                limit,
1152                &mut handler,
1153            )
1154            .map_err(|e| match e {
1155                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
1156                    CallFrameError::ScanSortedSubstatesError(e),
1157                )),
1158                CallbackError::CallbackError(e) => e,
1159            })?;
1160
1161        Ok(substates)
1162    }
1163}
1164
1165impl<'g, M, S> KernelInvokeApi<M::CallFrameData> for Kernel<'g, M, S>
1166where
1167    M: KernelCallbackObject,
1168    S: CommitableSubstateStore,
1169{
1170    #[trace_resources]
1171    fn kernel_invoke(
1172        &mut self,
1173        invocation: Box<KernelInvocation<M::CallFrameData>>,
1174    ) -> Result<IndexedScryptoValue, RuntimeError> {
1175        M::before_invoke(invocation.as_ref(), self)?;
1176
1177        // Before push call frame
1178        let callee = invocation.call_frame_data;
1179        let args = &invocation.args;
1180        let message = CallFrameMessage::from_input(args, &callee);
1181
1182        // Push call frame
1183        {
1184            let frame = CallFrame::new_child_from_parent(
1185                &self.substate_io,
1186                self.stacks.current_frame_mut(),
1187                callee,
1188                message,
1189            )
1190            .map_err(CallFrameError::CreateFrameError)
1191            .map_err(KernelError::CallFrameError)?;
1192
1193            self.stacks.push_frame(frame);
1194        }
1195
1196        // Execute
1197        let (output, message) = {
1198            // Handle execution start
1199            M::on_execution_start(self)?;
1200
1201            // Auto drop locks
1202            for handle in self.stacks.current_frame().open_substates() {
1203                M::on_close_substate(CloseSubstateEvent::Start(handle), self)?;
1204            }
1205            self.stacks
1206                .current_frame_mut()
1207                .close_all_substates(&mut self.substate_io);
1208
1209            // Run
1210            let output = M::invoke_upstream(args, self)?;
1211            let message = CallFrameMessage::from_output(&output);
1212
1213            // Auto-drop locks again in case module forgot to drop
1214            for handle in self.stacks.current_frame().open_substates() {
1215                M::on_close_substate(CloseSubstateEvent::Start(handle), self)?;
1216            }
1217            self.stacks
1218                .current_frame_mut()
1219                .close_all_substates(&mut self.substate_io);
1220
1221            // Handle execution finish
1222            M::on_execution_finish(&message, self)?;
1223
1224            (output, message)
1225        };
1226
1227        // Move
1228        {
1229            let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_mut_previous_frame();
1230
1231            // Move resource
1232            CallFrame::pass_message(
1233                &self.substate_io,
1234                cur_frame,
1235                prev_frame.unwrap(),
1236                message.clone(),
1237            )
1238            .map_err(CallFrameError::PassMessageError)
1239            .map_err(KernelError::CallFrameError)?;
1240
1241            // Auto-drop
1242            let owned_nodes = cur_frame.owned_nodes();
1243            M::auto_drop(owned_nodes, self)?;
1244
1245            // Now, check if any own has been left!
1246            let owned_nodes = self.stacks.current_frame().owned_nodes();
1247            if !owned_nodes.is_empty() {
1248                return Err(RuntimeError::KernelError(KernelError::OrphanedNodes(
1249                    owned_nodes.into_iter().map(|n| n.into()).collect(),
1250                )));
1251            }
1252        }
1253
1254        // Pop call frame
1255        self.stacks.pop_frame();
1256
1257        M::after_invoke(&output, self)?;
1258
1259        Ok(output)
1260    }
1261}
1262
1263impl<'g, M: KernelCallbackObject, S: CommitableSubstateStore> KernelStackApi for Kernel<'g, M, S> {
1264    type CallFrameData = M::CallFrameData;
1265
1266    fn kernel_get_stack_id(&mut self) -> Result<usize, RuntimeError> {
1267        M::on_get_stack_id(&mut as_read_only!(self))?;
1268
1269        Ok(self.stacks.current_stack_index)
1270    }
1271
1272    fn kernel_switch_stack(&mut self, other_stack_index: usize) -> Result<(), RuntimeError> {
1273        M::on_switch_stack(&mut as_read_only!(self))?;
1274
1275        self.stacks.switch_stack(other_stack_index)?;
1276        Ok(())
1277    }
1278
1279    fn kernel_send_to_stack(
1280        &mut self,
1281        other_stack_index: usize,
1282        value: &IndexedScryptoValue,
1283    ) -> Result<(), RuntimeError> {
1284        M::on_send_to_stack(value, &mut as_read_only!(self))?;
1285
1286        let message = CallFrameMessage::from_output(value);
1287
1288        let (cur, other) = self
1289            .stacks
1290            .current_frame_mut_in_this_and_other_stack(other_stack_index);
1291
1292        CallFrame::pass_message(&self.substate_io, cur, other, message)
1293            .map_err(CallFrameError::PassMessageError)
1294            .map_err(KernelError::CallFrameError)?;
1295
1296        Ok(())
1297    }
1298
1299    fn kernel_set_call_frame_data(&mut self, data: M::CallFrameData) -> Result<(), RuntimeError> {
1300        M::on_set_call_frame_data(&data, &mut as_read_only!(self))?;
1301
1302        *self.stacks.current_frame_mut().data_mut() = data;
1303        Ok(())
1304    }
1305
1306    fn kernel_get_owned_nodes(&mut self) -> Result<Vec<NodeId>, RuntimeError> {
1307        M::on_get_owned_nodes(&mut as_read_only!(self))?;
1308
1309        Ok(self.stacks.current_frame().owned_nodes())
1310    }
1311}
1312
1313impl<'g, M: KernelCallbackObject, S: CommitableSubstateStore> KernelApi for Kernel<'g, M, S> {
1314    type CallbackObject = M;
1315}
1316
1317#[cfg(feature = "radix_engine_tests")]
1318impl<'g, M, S> Kernel<'g, M, S>
1319where
1320    M: KernelCallbackObject<CallFrameData: Default>,
1321    S: CommitableSubstateStore,
1322{
1323    pub fn kernel_create_kernel_for_testing(
1324        substate_io: SubstateIO<'g, S>,
1325        id_allocator: &'g mut IdAllocator,
1326        callback: &'g mut M,
1327        always_visible_global_nodes: &'static IndexSet<NodeId>,
1328    ) -> Kernel<'g, M, S> {
1329        Self {
1330            stacks: KernelStacks::new(vec![CallFrameInit {
1331                data: M::CallFrameData::default(),
1332                direct_accesses: Default::default(),
1333                global_addresses: Default::default(),
1334                always_visible_global_nodes,
1335                stack_id: 0,
1336            }]),
1337            substate_io,
1338            id_allocator,
1339            callback,
1340        }
1341    }
1342}
1343
1344#[cfg(feature = "radix_engine_tests")]
1345impl<'g, M: KernelCallbackObject, S: CommitableSubstateStore> Kernel<'g, M, S> {
1346    pub fn kernel_current_frame(
1347        &self,
1348    ) -> &CallFrame<<M as KernelCallbackObject>::CallFrameData, <M as KernelCallbackObject>::LockData>
1349    {
1350        self.stacks.current_frame()
1351    }
1352
1353    pub fn kernel_current_frame_mut(
1354        &mut self,
1355    ) -> (
1356        &SubstateIO<'_, S>,
1357        &mut CallFrame<
1358            <M as KernelCallbackObject>::CallFrameData,
1359            <M as KernelCallbackObject>::LockData,
1360        >,
1361    ) {
1362        (&self.substate_io, self.stacks.current_frame_mut())
1363    }
1364
1365    pub fn kernel_prev_frame_stack_mut(
1366        &mut self,
1367    ) -> &mut Vec<
1368        CallFrame<
1369            <M as KernelCallbackObject>::CallFrameData,
1370            <M as KernelCallbackObject>::LockData,
1371        >,
1372    > {
1373        self.stacks.previous_frames_mut()
1374    }
1375
1376    pub fn kernel_substate_io(&self) -> &SubstateIO<'g, S> {
1377        &self.substate_io
1378    }
1379
1380    pub fn kernel_substate_io_mut(&mut self) -> &mut SubstateIO<'g, S> {
1381        &mut self.substate_io
1382    }
1383
1384    pub fn kernel_id_allocator(&self) -> &IdAllocator {
1385        self.id_allocator
1386    }
1387
1388    pub fn kernel_id_allocator_mut(&mut self) -> &mut &'g mut IdAllocator {
1389        &mut self.id_allocator
1390    }
1391
1392    pub fn kernel_callback(&self) -> &M {
1393        self.callback
1394    }
1395
1396    pub fn kernel_callback_mut(&mut self) -> &mut M {
1397        self.callback
1398    }
1399}