radix_engine/kernel/
kernel.rs

1use super::heap::Heap;
2use super::id_allocator::IdAllocator;
3use crate::errors::*;
4use crate::internal_prelude::*;
5use crate::kernel::call_frame::*;
6use crate::kernel::kernel_api::*;
7use crate::kernel::kernel_callback_api::*;
8use crate::kernel::substate_io::{SubstateDevice, SubstateIO};
9use crate::kernel::substate_locks::SubstateLocks;
10use crate::track::interface::*;
11use crate::track::Track;
12use radix_engine_interface::api::field_api::LockFlags;
13use radix_engine_profiling_derive::trace_resources;
14use radix_substate_store_interface::db_key_mapper::SubstateKeyContent;
15use radix_substate_store_interface::interface::SubstateDatabase;
16use sbor::rust::mem;
17
18macro_rules! as_read_only {
19    ($kernel:expr) => {{
20        let (current_frame, previous_frame) = $kernel.stacks.current_frame_and_previous_frame();
21        KernelReadOnly {
22            current_frame,
23            previous_frame,
24            heap: &$kernel.substate_io.heap,
25            callback: $kernel.callback,
26        }
27    }};
28}
29
30pub type KernelBootSubstate = KernelBoot;
31
32#[derive(Debug, Clone, PartialEq, Eq, Sbor, ScryptoSborAssertion)]
33#[sbor_assert(backwards_compatible(
34    cuttlefish = "FILE:kernel_boot_substate_cuttlefish_schema.bin",
35))]
36pub enum KernelBoot {
37    V1,
38    V2 {
39        global_nodes_version: AlwaysVisibleGlobalNodesVersion,
40    },
41}
42
43impl KernelBoot {
44    /// Loads kernel boot from the database, or resolves a fallback.
45    pub fn load(substate_db: &impl SubstateDatabase) -> Self {
46        substate_db
47            .get_substate(
48                TRANSACTION_TRACKER,
49                BOOT_LOADER_PARTITION,
50                BootLoaderField::KernelBoot,
51            )
52            .unwrap_or_else(|| KernelBoot::babylon())
53    }
54
55    pub fn babylon() -> Self {
56        Self::V1
57    }
58
59    pub fn cuttlefish() -> Self {
60        Self::V2 {
61            global_nodes_version: AlwaysVisibleGlobalNodesVersion::V2,
62        }
63    }
64
65    pub fn always_visible_global_nodes_version(&self) -> AlwaysVisibleGlobalNodesVersion {
66        match self {
67            KernelBoot::V1 => AlwaysVisibleGlobalNodesVersion::V1,
68            KernelBoot::V2 {
69                global_nodes_version,
70                ..
71            } => *global_nodes_version,
72        }
73    }
74
75    pub fn always_visible_global_nodes(&self) -> &'static IndexSet<NodeId> {
76        always_visible_global_nodes(self.always_visible_global_nodes_version())
77    }
78}
79
80pub struct KernelInit<
81    's,
82    S: SubstateDatabase,
83    I: InitializationParameters<For: KernelTransactionExecutor<Init = I>>,
84> {
85    substate_db: &'s S,
86    kernel_boot: KernelBoot,
87    callback_init: I,
88}
89
90impl<
91        's,
92        S: SubstateDatabase,
93        I: InitializationParameters<For: KernelTransactionExecutor<Init = I>>,
94    > KernelInit<'s, S, I>
95{
96    pub fn load(substate_db: &'s S, callback_init: I) -> Self {
97        let kernel_boot = KernelBoot::load(substate_db);
98        Self {
99            substate_db,
100            kernel_boot,
101            callback_init,
102        }
103    }
104
105    /// Executes a transaction
106    pub fn execute(
107        self,
108        executable: &<I::For as KernelTransactionExecutor>::Executable,
109    ) -> <I::For as KernelTransactionExecutor>::Receipt {
110        let boot_loader = BootLoader {
111            id_allocator: IdAllocator::new(executable.unique_seed_for_id_allocator()),
112            track: Track::new(self.substate_db),
113        };
114
115        #[cfg(not(all(target_os = "linux", feature = "std", feature = "cpu_ram_metrics")))]
116        {
117            boot_loader.execute::<I::For>(self.kernel_boot, self.callback_init, executable)
118        }
119
120        #[cfg(all(target_os = "linux", feature = "std", feature = "cpu_ram_metrics"))]
121        {
122            use crate::kernel::resources_tracker::ResourcesTracker;
123
124            let mut resources_tracker = ResourcesTracker::start_measurement();
125            let mut receipt =
126                boot_loader.execute::<I::For>(self.kernel_boot, self.callback_init, executable);
127            receipt.set_resource_usage(resources_tracker.end_measurement());
128            receipt
129        }
130    }
131}
132
133/// Organizes the radix engine stack to make a function entrypoint available for execution
134pub struct BootLoader<'h, S: SubstateDatabase> {
135    id_allocator: IdAllocator,
136    track: Track<'h, S>,
137}
138
139impl<'h, S: SubstateDatabase> BootLoader<'h, S> {
140    fn execute<E: KernelTransactionExecutor>(
141        mut self,
142        kernel_boot: KernelBoot,
143        callback_init: E::Init,
144        executable: &E::Executable,
145    ) -> E::Receipt {
146        #[cfg(feature = "resource_tracker")]
147        radix_engine_profiling::QEMU_PLUGIN_CALIBRATOR.with(|v| {
148            v.borrow_mut();
149        });
150
151        // Upper Layer Initialization
152        let system_init_result = E::init(
153            &mut self.track,
154            &executable,
155            callback_init,
156            kernel_boot.always_visible_global_nodes(),
157        );
158
159        let (mut system, call_frame_inits) = match system_init_result {
160            Ok(success) => success,
161            Err(receipt) => return receipt,
162        };
163
164        // Kernel Initialization
165        let mut kernel = Kernel::new(
166            &mut self.track,
167            &mut self.id_allocator,
168            &mut system,
169            call_frame_inits,
170        );
171
172        // Execution
173        let result = || -> Result<E::ExecutionOutput, RuntimeError> {
174            // Invoke transaction processor
175            let output = E::execute(&mut kernel, executable)?;
176
177            // Sanity check call frame
178            for stack in &kernel.stacks.stacks {
179                assert!(stack.prev_frames.is_empty());
180            }
181
182            // Sanity check heap
183            assert!(kernel.substate_io.heap.is_empty());
184
185            // Finalize state updates based on what has occurred
186            let commit_info = kernel.substate_io.store.get_commit_info();
187            kernel.callback.finalize(executable, commit_info)?;
188
189            Ok(output)
190        }()
191        .map_err(|e| TransactionExecutionError::RuntimeError(e));
192
193        // Create receipt representing the result of a transaction
194        system.create_receipt(self.track, result)
195    }
196}
197
198pub struct KernelStack<M: KernelCallbackObject> {
199    current_frame: CallFrame<M::CallFrameData, M::LockData>,
200    prev_frames: Vec<CallFrame<M::CallFrameData, M::LockData>>,
201}
202
203impl<M: KernelCallbackObject> KernelStack<M> {
204    pub fn new(init: CallFrameInit<M::CallFrameData>) -> Self {
205        Self {
206            current_frame: CallFrame::new_root(init),
207            prev_frames: vec![],
208        }
209    }
210}
211
212/// The kernel manages multiple call frame stacks. There will always be a single
213/// "current" stack (and call frame) in context.
214pub struct KernelStacks<M: KernelCallbackObject> {
215    current_stack_index: usize,
216    stacks: Vec<KernelStack<M>>,
217}
218
219impl<M: KernelCallbackObject> KernelStacks<M> {
220    pub fn new(call_frames: Vec<CallFrameInit<M::CallFrameData>>) -> Self {
221        let stacks = call_frames
222            .into_iter()
223            .map(|call_frame| KernelStack::new(call_frame))
224            .collect();
225        Self {
226            current_stack_index: 0usize,
227            stacks,
228        }
229    }
230
231    fn current_stack_mut(&mut self) -> &mut KernelStack<M> {
232        self.stacks.get_mut(self.current_stack_index).unwrap()
233    }
234
235    fn current_stack(&self) -> &KernelStack<M> {
236        self.stacks.get(self.current_stack_index).unwrap()
237    }
238
239    /// Pushes a new call frame on the current stack
240    pub fn push_frame(&mut self, frame: CallFrame<M::CallFrameData, M::LockData>) {
241        let stack = self.current_stack_mut();
242        let parent = mem::replace(&mut stack.current_frame, frame);
243        stack.prev_frames.push(parent);
244    }
245
246    /// Pushes a call frame from the current stack
247    pub fn pop_frame(&mut self) {
248        let stack = self.current_stack_mut();
249        let parent = stack.prev_frames.pop().unwrap();
250        let _ = core::mem::replace(&mut stack.current_frame, parent);
251    }
252
253    /// Switches the current stack
254    pub fn switch_stack(&mut self, stack_index: usize) -> Result<(), RuntimeError> {
255        if stack_index >= self.stacks.len() {
256            return Err(RuntimeError::KernelError(KernelError::StackError(
257                StackError::InvalidStackId,
258            )));
259        }
260        self.current_stack_index = stack_index;
261
262        Ok(())
263    }
264
265    pub fn current_frame_mut_in_this_and_other_stack(
266        &mut self,
267        other_stack: usize,
268    ) -> (
269        &mut CallFrame<M::CallFrameData, M::LockData>,
270        &mut CallFrame<M::CallFrameData, M::LockData>,
271    ) {
272        let mut mut_stacks: Vec<_> = self
273            .stacks
274            .iter_mut()
275            .enumerate()
276            .filter(|(id, _)| (*id).eq(&self.current_stack_index) || (*id).eq(&other_stack))
277            .map(|stack| Some(stack))
278            .collect();
279
280        let (id0, stack0) = mut_stacks[0].take().unwrap();
281        let (_id1, stack1) = mut_stacks[1].take().unwrap();
282        if id0.eq(&self.current_stack_index) {
283            (&mut stack0.current_frame, &mut stack1.current_frame)
284        } else {
285            (&mut stack1.current_frame, &mut stack0.current_frame)
286        }
287    }
288
289    pub fn current_frame_and_previous_frame(
290        &self,
291    ) -> (
292        &CallFrame<M::CallFrameData, M::LockData>,
293        Option<&CallFrame<M::CallFrameData, M::LockData>>,
294    ) {
295        let stack = self.current_stack();
296        (&stack.current_frame, stack.prev_frames.last())
297    }
298
299    pub fn mut_current_frame_and_previous_frame(
300        &mut self,
301    ) -> (
302        &mut CallFrame<M::CallFrameData, M::LockData>,
303        Option<&CallFrame<M::CallFrameData, M::LockData>>,
304    ) {
305        let stack = self.current_stack_mut();
306        (&mut stack.current_frame, stack.prev_frames.last())
307    }
308
309    pub fn mut_current_frame_and_mut_previous_frame(
310        &mut self,
311    ) -> (
312        &mut CallFrame<M::CallFrameData, M::LockData>,
313        Option<&mut CallFrame<M::CallFrameData, M::LockData>>,
314    ) {
315        let stack = self.current_stack_mut();
316        (&mut stack.current_frame, stack.prev_frames.last_mut())
317    }
318
319    pub fn current_frame(&self) -> &CallFrame<M::CallFrameData, M::LockData> {
320        &self.current_stack().current_frame
321    }
322
323    pub fn current_frame_mut(&mut self) -> &mut CallFrame<M::CallFrameData, M::LockData> {
324        &mut self.current_stack_mut().current_frame
325    }
326
327    #[cfg(feature = "radix_engine_tests")]
328    pub fn previous_frames_mut(&mut self) -> &mut Vec<CallFrame<M::CallFrameData, M::LockData>> {
329        &mut self.current_stack_mut().prev_frames
330    }
331}
332
333pub struct Kernel<
334    'g, // Lifetime of values outliving all frames
335    M,  // Upstream System layer
336    S,  // Substate store
337> where
338    M: KernelCallbackObject,
339    S: CommitableSubstateStore,
340{
341    stacks: KernelStacks<M>,
342
343    substate_io: SubstateIO<'g, S>,
344
345    /// ID allocator
346    id_allocator: &'g mut IdAllocator,
347
348    /// Upper system layer
349    callback: &'g mut M,
350}
351
352#[cfg(feature = "radix_engine_tests")]
353impl<'g, M: KernelCallbackObject<CallFrameData: Default>, S: CommitableSubstateStore>
354    Kernel<'g, M, S>
355{
356    pub fn new_no_refs(
357        store: &'g mut S,
358        id_allocator: &'g mut IdAllocator,
359        callback: &'g mut M,
360    ) -> Self {
361        Self::new(
362            store,
363            id_allocator,
364            callback,
365            vec![CallFrameInit {
366                data: M::CallFrameData::default(),
367                direct_accesses: Default::default(),
368                global_addresses: Default::default(),
369                always_visible_global_nodes: always_visible_global_nodes(
370                    AlwaysVisibleGlobalNodesVersion::latest(),
371                ),
372                stack_id: 0,
373            }],
374        )
375    }
376}
377
378impl<'g, M: KernelCallbackObject, S: CommitableSubstateStore> Kernel<'g, M, S> {
379    pub fn new(
380        store: &'g mut S,
381        id_allocator: &'g mut IdAllocator,
382        callback: &'g mut M,
383        call_frame_inits: Vec<CallFrameInit<M::CallFrameData>>,
384    ) -> Self {
385        Kernel {
386            stacks: KernelStacks::new(call_frame_inits),
387            substate_io: SubstateIO {
388                heap: Heap::new(),
389                store,
390                non_global_node_refs: NonGlobalNodeRefs::new(),
391                substate_locks: SubstateLocks::new(),
392                heap_transient_substates: TransientSubstates::new(),
393                pinned_to_heap: BTreeSet::new(),
394            },
395            id_allocator,
396            callback,
397        }
398    }
399}
400
401struct KernelHandler<
402    'a,
403    M: KernelCallbackObject,
404    F: FnMut(&mut KernelReadOnly<M>, IOAccess) -> Result<(), RuntimeError>,
405> {
406    callback: &'a mut M,
407    prev_frame: Option<&'a CallFrame<M::CallFrameData, M::LockData>>,
408    on_io_access: F,
409}
410
411impl<
412        M: KernelCallbackObject,
413        F: FnMut(&mut KernelReadOnly<M>, IOAccess) -> Result<(), RuntimeError>,
414    > CallFrameIOAccessHandler<M::CallFrameData, M::LockData, RuntimeError>
415    for KernelHandler<'_, M, F>
416{
417    fn on_io_access(
418        &mut self,
419        current_frame: &CallFrame<M::CallFrameData, M::LockData>,
420        heap: &Heap,
421        io_access: IOAccess,
422    ) -> Result<(), RuntimeError> {
423        let mut read_only = KernelReadOnly {
424            current_frame,
425            previous_frame: self.prev_frame,
426            heap,
427            callback: self.callback,
428        };
429
430        (self.on_io_access)(&mut read_only, io_access)
431    }
432}
433
434impl<
435        M: KernelCallbackObject,
436        F: FnMut(&mut KernelReadOnly<M>, IOAccess) -> Result<(), RuntimeError>,
437    > CallFrameSubstateReadHandler<M::CallFrameData, M::LockData> for KernelHandler<'_, M, F>
438{
439    type Error = RuntimeError;
440    fn on_read_substate(
441        &mut self,
442        current_frame: &CallFrame<M::CallFrameData, M::LockData>,
443        heap: &Heap,
444        handle: SubstateHandle,
445        value: &IndexedScryptoValue,
446        device: SubstateDevice,
447    ) -> Result<(), Self::Error> {
448        let mut read_only = KernelReadOnly {
449            current_frame,
450            previous_frame: self.prev_frame,
451            heap,
452            callback: self.callback,
453        };
454
455        M::on_read_substate(
456            ReadSubstateEvent::OnRead {
457                handle,
458                value,
459                device,
460            },
461            &mut read_only,
462        )
463    }
464}
465
466impl<'g, M, S> KernelNodeApi for Kernel<'g, M, S>
467where
468    M: KernelCallbackObject,
469    S: CommitableSubstateStore,
470{
471    #[trace_resources]
472    fn kernel_pin_node(&mut self, node_id: NodeId) -> Result<(), RuntimeError> {
473        M::on_pin_node(&node_id, &mut as_read_only!(self))?;
474
475        self.stacks
476            .current_frame_mut()
477            .pin_node(&mut self.substate_io, node_id)
478            .map_err(|e| {
479                RuntimeError::KernelError(KernelError::CallFrameError(
480                    CallFrameError::PinNodeError(e),
481                ))
482            })
483    }
484
485    #[trace_resources]
486    fn kernel_allocate_node_id(&mut self, entity_type: EntityType) -> Result<NodeId, RuntimeError> {
487        M::on_allocate_node_id(entity_type, self)?;
488
489        self.id_allocator.allocate_node_id(entity_type)
490    }
491
492    #[trace_resources]
493    fn kernel_create_node(
494        &mut self,
495        node_id: NodeId,
496        node_substates: NodeSubstates,
497    ) -> Result<(), RuntimeError> {
498        let mut read_only = as_read_only!(self);
499        M::on_create_node(
500            CreateNodeEvent::Start(&node_id, &node_substates),
501            &mut read_only,
502        )?;
503
504        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
505
506        let mut handler = KernelHandler {
507            callback: self.callback,
508            prev_frame,
509            on_io_access: |api, io_access| {
510                M::on_create_node(CreateNodeEvent::IOAccess(&io_access), api)
511            },
512        };
513
514        cur_frame
515            .create_node(&mut self.substate_io, &mut handler, node_id, node_substates)
516            .map_err(|e| match e {
517                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
518                    CallFrameError::CreateNodeError(e),
519                )),
520                CallbackError::CallbackError(e) => e,
521            })?;
522
523        let mut read_only = as_read_only!(self);
524        M::on_create_node(CreateNodeEvent::End(&node_id), &mut read_only)?;
525
526        Ok(())
527    }
528
529    #[trace_resources]
530    fn kernel_create_node_from(
531        &mut self,
532        node_id: NodeId,
533        partitions: BTreeMap<PartitionNumber, (NodeId, PartitionNumber)>,
534    ) -> Result<(), RuntimeError> {
535        {
536            let node_substates = NodeSubstates::new();
537            let mut read_only = as_read_only!(self);
538            M::on_create_node(
539                CreateNodeEvent::Start(&node_id, &node_substates),
540                &mut read_only,
541            )?;
542
543            let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
544
545            let mut handler = KernelHandler {
546                callback: self.callback,
547                prev_frame,
548                on_io_access: |api, io_access| {
549                    M::on_create_node(CreateNodeEvent::IOAccess(&io_access), api)
550                },
551            };
552
553            cur_frame
554                .create_node(
555                    &mut self.substate_io,
556                    &mut handler,
557                    node_id,
558                    NodeSubstates::new(),
559                )
560                .map_err(|e| match e {
561                    CallbackError::Error(e) => RuntimeError::KernelError(
562                        KernelError::CallFrameError(CallFrameError::CreateNodeError(e)),
563                    ),
564                    CallbackError::CallbackError(e) => e,
565                })?;
566
567            let mut read_only = as_read_only!(self);
568            M::on_create_node(CreateNodeEvent::End(&node_id), &mut read_only)?;
569        }
570
571        {
572            let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
573
574            let mut handler = KernelHandler {
575                callback: self.callback,
576                prev_frame,
577                on_io_access: |api, io_access| {
578                    M::on_move_module(MoveModuleEvent::IOAccess(&io_access), api)
579                },
580            };
581
582            for (dest_partition_number, (src_node_id, src_partition_number)) in partitions {
583                cur_frame
584                    .move_partition(
585                        &mut self.substate_io,
586                        &mut handler,
587                        &src_node_id,
588                        src_partition_number,
589                        &node_id,
590                        dest_partition_number,
591                    )
592                    .map_err(|e| match e {
593                        CallbackError::Error(e) => RuntimeError::KernelError(
594                            KernelError::CallFrameError(CallFrameError::MovePartitionError(e)),
595                        ),
596                        CallbackError::CallbackError(e) => e,
597                    })?;
598            }
599        }
600
601        Ok(())
602    }
603
604    #[trace_resources]
605    fn kernel_drop_node(&mut self, node_id: &NodeId) -> Result<DroppedNode, RuntimeError> {
606        let mut read_only = as_read_only!(self);
607        M::on_drop_node(DropNodeEvent::Start(node_id), &mut read_only)?;
608
609        M::on_drop_node_mut(node_id, self)?;
610
611        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
612
613        let mut handler = KernelHandler {
614            callback: self.callback,
615            prev_frame,
616            on_io_access: |api, io_access| {
617                M::on_drop_node(DropNodeEvent::IOAccess(&io_access), api)
618            },
619        };
620        let dropped_node = cur_frame
621            .drop_node(&mut self.substate_io, node_id, &mut handler)
622            .map_err(|e| match e {
623                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
624                    CallFrameError::DropNodeError(e),
625                )),
626                CallbackError::CallbackError(e) => e,
627            })?;
628
629        let mut read_only = as_read_only!(self);
630        M::on_drop_node(
631            DropNodeEvent::End(node_id, &dropped_node.substates),
632            &mut read_only,
633        )?;
634
635        Ok(dropped_node)
636    }
637}
638
639// TODO: Remove
640impl<'g, M: KernelCallbackObject, S: CommitableSubstateStore> KernelInternalApi
641    for Kernel<'g, M, S>
642{
643    type System = M;
644
645    fn kernel_get_node_visibility_uncosted(&self, node_id: &NodeId) -> NodeVisibility {
646        self.stacks.current_frame().get_node_visibility(node_id)
647    }
648
649    fn kernel_get_current_stack_depth_uncosted(&self) -> usize {
650        self.stacks.current_frame().depth()
651    }
652
653    fn kernel_get_current_stack_id_uncosted(&self) -> usize {
654        self.stacks.current_stack_index
655    }
656
657    fn kernel_get_system_state(&mut self) -> SystemState<'_, M> {
658        let (cur, prev) = self.stacks.current_frame_and_previous_frame();
659        let caller_actor = match prev {
660            Some(call_frame) => call_frame.data(),
661            None => {
662                // This will only occur on initialization
663                cur.data()
664            }
665        };
666        SystemState {
667            system: &mut self.callback,
668            current_call_frame: cur.data(),
669            caller_call_frame: caller_actor,
670        }
671    }
672
673    fn kernel_read_substate_uncosted(
674        &self,
675        node_id: &NodeId,
676        partition_num: PartitionNumber,
677        substate_key: &SubstateKey,
678    ) -> Option<&IndexedScryptoValue> {
679        self.substate_io
680            .heap
681            .get_substate(node_id, partition_num, substate_key)
682    }
683}
684
685struct KernelReadOnly<'g, M>
686where
687    M: KernelCallbackObject,
688{
689    current_frame: &'g CallFrame<M::CallFrameData, M::LockData>,
690    previous_frame: Option<&'g CallFrame<M::CallFrameData, M::LockData>>,
691    heap: &'g Heap,
692    callback: &'g mut M,
693}
694
695impl<'g, M: KernelCallbackObject> KernelInternalApi for KernelReadOnly<'g, M> {
696    type System = M;
697
698    fn kernel_get_node_visibility_uncosted(&self, node_id: &NodeId) -> NodeVisibility {
699        self.current_frame.get_node_visibility(node_id)
700    }
701
702    fn kernel_get_current_stack_depth_uncosted(&self) -> usize {
703        self.current_frame.depth()
704    }
705
706    fn kernel_get_current_stack_id_uncosted(&self) -> usize {
707        self.current_frame.stack_id()
708    }
709
710    fn kernel_get_system_state(&mut self) -> SystemState<'_, M> {
711        let caller_call_frame = match self.previous_frame {
712            Some(call_frame) => call_frame.data(),
713            None => {
714                // This will only occur on initialization
715                self.current_frame.data()
716            }
717        };
718        SystemState {
719            system: self.callback,
720            current_call_frame: self.current_frame.data(),
721            caller_call_frame,
722        }
723    }
724
725    fn kernel_read_substate_uncosted(
726        &self,
727        node_id: &NodeId,
728        partition_num: PartitionNumber,
729        substate_key: &SubstateKey,
730    ) -> Option<&IndexedScryptoValue> {
731        self.heap.get_substate(node_id, partition_num, substate_key)
732    }
733}
734
735impl<'g, M, S> KernelSubstateApi<M::LockData> for Kernel<'g, M, S>
736where
737    M: KernelCallbackObject,
738    S: CommitableSubstateStore,
739{
740    #[trace_resources]
741    fn kernel_mark_substate_as_transient(
742        &mut self,
743        node_id: NodeId,
744        partition_num: PartitionNumber,
745        key: SubstateKey,
746    ) -> Result<(), RuntimeError> {
747        M::on_mark_substate_as_transient(&node_id, &partition_num, &key, &mut as_read_only!(self))?;
748
749        self.stacks
750            .current_frame_mut()
751            .mark_substate_as_transient(&mut self.substate_io, node_id, partition_num, key)
752            .map_err(|e| {
753                RuntimeError::KernelError(KernelError::CallFrameError(
754                    CallFrameError::MarkTransientSubstateError(e),
755                ))
756            })
757    }
758
759    #[trace_resources]
760    fn kernel_open_substate_with_default<F: FnOnce() -> IndexedScryptoValue>(
761        &mut self,
762        node_id: &NodeId,
763        partition_num: PartitionNumber,
764        substate_key: &SubstateKey,
765        flags: LockFlags,
766        default: Option<F>,
767        data: M::LockData,
768    ) -> Result<SubstateHandle, RuntimeError> {
769        M::on_open_substate(
770            OpenSubstateEvent::Start {
771                node_id: &node_id,
772                partition_num: &partition_num,
773                substate_key,
774                flags: &flags,
775            },
776            &mut as_read_only!(self),
777        )?;
778
779        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
780
781        let mut handler = KernelHandler {
782            callback: self.callback,
783            prev_frame,
784            on_io_access: |api, io_access| {
785                M::on_open_substate(OpenSubstateEvent::IOAccess(&io_access), api)
786            },
787        };
788
789        let maybe_lock_handle = cur_frame.open_substate(
790            &mut self.substate_io,
791            node_id,
792            partition_num,
793            substate_key,
794            flags,
795            default,
796            data,
797            &mut handler,
798        );
799
800        let (lock_handle, value_size): (u32, usize) = match &maybe_lock_handle {
801            Ok((lock_handle, value_size)) => (*lock_handle, *value_size),
802            Err(CallbackError::CallbackError(e)) => return Err(e.clone()),
803            Err(CallbackError::Error(OpenSubstateError::SubstateFault)) => {
804                let retry =
805                    M::on_substate_lock_fault(*node_id, partition_num, &substate_key, self)?;
806
807                if retry {
808                    let (cur_frame, prev_frame) =
809                        self.stacks.mut_current_frame_and_previous_frame();
810
811                    let mut handler = KernelHandler {
812                        callback: self.callback,
813                        prev_frame,
814                        on_io_access: |api, io_access| {
815                            M::on_open_substate(OpenSubstateEvent::IOAccess(&io_access), api)
816                        },
817                    };
818
819                    cur_frame
820                        .open_substate(
821                            &mut self.substate_io,
822                            &node_id,
823                            partition_num,
824                            &substate_key,
825                            flags,
826                            None::<fn() -> IndexedScryptoValue>,
827                            M::LockData::default(),
828                            &mut handler,
829                        )
830                        .map_err(|e| match e {
831                            CallbackError::Error(e) => RuntimeError::KernelError(
832                                KernelError::CallFrameError(CallFrameError::OpenSubstateError(e)),
833                            ),
834                            CallbackError::CallbackError(e) => e,
835                        })?
836                } else {
837                    return maybe_lock_handle
838                        .map(|(lock_handle, _)| lock_handle)
839                        .map_err(|e| match e {
840                            CallbackError::Error(e) => RuntimeError::KernelError(
841                                KernelError::CallFrameError(CallFrameError::OpenSubstateError(e)),
842                            ),
843                            CallbackError::CallbackError(e) => e,
844                        });
845                }
846            }
847            Err(err) => {
848                let runtime_error = match err {
849                    CallbackError::Error(e) => RuntimeError::KernelError(
850                        KernelError::CallFrameError(CallFrameError::OpenSubstateError(e.clone())),
851                    ),
852                    CallbackError::CallbackError(e) => e.clone(),
853                };
854                return Err(runtime_error);
855            }
856        };
857
858        let mut read_only = as_read_only!(self);
859        M::on_open_substate(
860            OpenSubstateEvent::End {
861                handle: lock_handle,
862                node_id: &node_id,
863                size: value_size,
864            },
865            &mut read_only,
866        )?;
867
868        Ok(lock_handle)
869    }
870
871    #[trace_resources]
872    fn kernel_get_lock_data(
873        &mut self,
874        lock_handle: SubstateHandle,
875    ) -> Result<M::LockData, RuntimeError> {
876        self.stacks
877            .current_frame()
878            .get_handle_info(lock_handle)
879            .ok_or(RuntimeError::KernelError(
880                KernelError::SubstateHandleDoesNotExist(lock_handle),
881            ))
882    }
883
884    #[trace_resources]
885    fn kernel_read_substate(
886        &mut self,
887        lock_handle: SubstateHandle,
888    ) -> Result<&IndexedScryptoValue, RuntimeError> {
889        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
890        let mut handler = KernelHandler {
891            callback: self.callback,
892            prev_frame,
893            on_io_access: |api, io_access| {
894                M::on_read_substate(ReadSubstateEvent::IOAccess(&io_access), api)
895            },
896        };
897
898        let value = cur_frame
899            .read_substate(&mut self.substate_io, lock_handle, &mut handler)
900            .map_err(|e| match e {
901                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
902                    CallFrameError::ReadSubstateError(e),
903                )),
904                CallbackError::CallbackError(e) => e,
905            })?;
906
907        Ok(value)
908    }
909
910    #[trace_resources]
911    fn kernel_write_substate(
912        &mut self,
913        lock_handle: SubstateHandle,
914        value: IndexedScryptoValue,
915    ) -> Result<(), RuntimeError> {
916        let mut read_only = as_read_only!(self);
917        M::on_write_substate(
918            WriteSubstateEvent::Start {
919                handle: lock_handle,
920                value: &value,
921            },
922            &mut read_only,
923        )?;
924
925        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
926
927        let mut handler = KernelHandler {
928            callback: self.callback,
929            prev_frame,
930            on_io_access: |api, io_access| {
931                M::on_write_substate(WriteSubstateEvent::IOAccess(&io_access), api)
932            },
933        };
934
935        cur_frame
936            .write_substate(&mut self.substate_io, lock_handle, value, &mut handler)
937            .map_err(|e| match e {
938                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
939                    CallFrameError::WriteSubstateError(e),
940                )),
941                CallbackError::CallbackError(e) => e,
942            })?;
943
944        Ok(())
945    }
946
947    #[trace_resources]
948    fn kernel_close_substate(&mut self, lock_handle: SubstateHandle) -> Result<(), RuntimeError> {
949        // Note: It is very important that this occurs before the actual call to close_substate
950        // as we want to check limits/costing before doing the actual action. Otherwise,
951        // certain invariants might break such as a costing error occurring after a vault
952        // lock_fee has been force committed.
953        let mut read_only = as_read_only!(self);
954        M::on_close_substate(CloseSubstateEvent::Start(lock_handle), &mut read_only)?;
955
956        self.stacks
957            .current_frame_mut()
958            .close_substate(&mut self.substate_io, lock_handle)
959            .map_err(|e| {
960                RuntimeError::KernelError(KernelError::CallFrameError(
961                    CallFrameError::CloseSubstateError(e),
962                ))
963            })?;
964
965        Ok(())
966    }
967
968    #[trace_resources]
969    fn kernel_set_substate(
970        &mut self,
971        node_id: &NodeId,
972        partition_num: PartitionNumber,
973        substate_key: SubstateKey,
974        value: IndexedScryptoValue,
975    ) -> Result<(), RuntimeError> {
976        M::on_set_substate(
977            SetSubstateEvent::Start(node_id, &partition_num, &substate_key, &value),
978            &mut as_read_only!(self),
979        )?;
980
981        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
982
983        let mut handler = KernelHandler {
984            callback: self.callback,
985            prev_frame,
986            on_io_access: |api, io_access| {
987                M::on_set_substate(SetSubstateEvent::IOAccess(&io_access), api)
988            },
989        };
990
991        cur_frame
992            .set_substate(
993                &mut self.substate_io,
994                node_id,
995                partition_num,
996                substate_key,
997                value,
998                &mut handler,
999            )
1000            .map_err(|e| match e {
1001                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
1002                    CallFrameError::SetSubstatesError(e),
1003                )),
1004                CallbackError::CallbackError(e) => e,
1005            })?;
1006
1007        Ok(())
1008    }
1009
1010    #[trace_resources]
1011    fn kernel_remove_substate(
1012        &mut self,
1013        node_id: &NodeId,
1014        partition_num: PartitionNumber,
1015        substate_key: &SubstateKey,
1016    ) -> Result<Option<IndexedScryptoValue>, RuntimeError> {
1017        M::on_remove_substate(
1018            RemoveSubstateEvent::Start(node_id, &partition_num, substate_key),
1019            &mut as_read_only!(self),
1020        )?;
1021
1022        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
1023
1024        let mut handler = KernelHandler {
1025            callback: self.callback,
1026            prev_frame,
1027            on_io_access: |api, io_access| {
1028                M::on_remove_substate(RemoveSubstateEvent::IOAccess(&io_access), api)
1029            },
1030        };
1031
1032        let substate = cur_frame
1033            .remove_substate(
1034                &mut self.substate_io,
1035                node_id,
1036                partition_num,
1037                &substate_key,
1038                &mut handler,
1039            )
1040            .map_err(|e| match e {
1041                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
1042                    CallFrameError::RemoveSubstatesError(e),
1043                )),
1044                CallbackError::CallbackError(e) => e,
1045            })?;
1046
1047        Ok(substate)
1048    }
1049
1050    #[trace_resources]
1051    fn kernel_scan_keys<K: SubstateKeyContent>(
1052        &mut self,
1053        node_id: &NodeId,
1054        partition_num: PartitionNumber,
1055        limit: u32,
1056    ) -> Result<Vec<SubstateKey>, RuntimeError> {
1057        M::on_scan_keys(ScanKeysEvent::Start, &mut as_read_only!(self))?;
1058
1059        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
1060
1061        let mut handler = KernelHandler {
1062            callback: self.callback,
1063            prev_frame,
1064            on_io_access: |api, io_access| {
1065                M::on_scan_keys(ScanKeysEvent::IOAccess(&io_access), api)
1066            },
1067        };
1068
1069        let keys = cur_frame
1070            .scan_keys::<K, _, _>(
1071                &mut self.substate_io,
1072                node_id,
1073                partition_num,
1074                limit,
1075                &mut handler,
1076            )
1077            .map_err(|e| match e {
1078                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
1079                    CallFrameError::ScanSubstatesError(e),
1080                )),
1081                CallbackError::CallbackError(e) => e,
1082            })?;
1083
1084        Ok(keys)
1085    }
1086
1087    #[trace_resources(log=limit)]
1088    fn kernel_drain_substates<K: SubstateKeyContent>(
1089        &mut self,
1090        node_id: &NodeId,
1091        partition_num: PartitionNumber,
1092        limit: u32,
1093    ) -> Result<Vec<(SubstateKey, IndexedScryptoValue)>, RuntimeError> {
1094        M::on_drain_substates(DrainSubstatesEvent::Start(limit), &mut as_read_only!(self))?;
1095
1096        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
1097
1098        let mut handler = KernelHandler {
1099            callback: self.callback,
1100            prev_frame,
1101            on_io_access: |api, io_access| {
1102                M::on_drain_substates(DrainSubstatesEvent::IOAccess(&io_access), api)
1103            },
1104        };
1105
1106        let substates = cur_frame
1107            .drain_substates::<K, _, _>(
1108                &mut self.substate_io,
1109                node_id,
1110                partition_num,
1111                limit,
1112                &mut handler,
1113            )
1114            .map_err(|e| match e {
1115                CallbackError::CallbackError(e) => e,
1116                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
1117                    CallFrameError::DrainSubstatesError(e),
1118                )),
1119            })?;
1120
1121        Ok(substates)
1122    }
1123
1124    #[trace_resources]
1125    fn kernel_scan_sorted_substates(
1126        &mut self,
1127        node_id: &NodeId,
1128        partition_num: PartitionNumber,
1129        limit: u32,
1130    ) -> Result<Vec<(SortedKey, IndexedScryptoValue)>, RuntimeError> {
1131        M::on_scan_sorted_substates(ScanSortedSubstatesEvent::Start, &mut as_read_only!(self))?;
1132
1133        let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_previous_frame();
1134
1135        let mut handler = KernelHandler {
1136            callback: self.callback,
1137            prev_frame,
1138            on_io_access: |api, io_access| {
1139                M::on_scan_sorted_substates(ScanSortedSubstatesEvent::IOAccess(&io_access), api)
1140            },
1141        };
1142
1143        let substates = cur_frame
1144            .scan_sorted(
1145                &mut self.substate_io,
1146                node_id,
1147                partition_num,
1148                limit,
1149                &mut handler,
1150            )
1151            .map_err(|e| match e {
1152                CallbackError::Error(e) => RuntimeError::KernelError(KernelError::CallFrameError(
1153                    CallFrameError::ScanSortedSubstatesError(e),
1154                )),
1155                CallbackError::CallbackError(e) => e,
1156            })?;
1157
1158        Ok(substates)
1159    }
1160}
1161
1162impl<'g, M, S> KernelInvokeApi<M::CallFrameData> for Kernel<'g, M, S>
1163where
1164    M: KernelCallbackObject,
1165    S: CommitableSubstateStore,
1166{
1167    #[trace_resources]
1168    fn kernel_invoke(
1169        &mut self,
1170        invocation: Box<KernelInvocation<M::CallFrameData>>,
1171    ) -> Result<IndexedScryptoValue, RuntimeError> {
1172        M::before_invoke(invocation.as_ref(), self)?;
1173
1174        // Before push call frame
1175        let callee = invocation.call_frame_data;
1176        let args = &invocation.args;
1177        let message = CallFrameMessage::from_input(&args, &callee);
1178
1179        // Push call frame
1180        {
1181            let frame = CallFrame::new_child_from_parent(
1182                &self.substate_io,
1183                self.stacks.current_frame_mut(),
1184                callee,
1185                message,
1186            )
1187            .map_err(CallFrameError::CreateFrameError)
1188            .map_err(KernelError::CallFrameError)?;
1189
1190            self.stacks.push_frame(frame);
1191        }
1192
1193        // Execute
1194        let (output, message) = {
1195            // Handle execution start
1196            M::on_execution_start(self)?;
1197
1198            // Auto drop locks
1199            for handle in self.stacks.current_frame().open_substates() {
1200                M::on_close_substate(CloseSubstateEvent::Start(handle), self)?;
1201            }
1202            self.stacks
1203                .current_frame_mut()
1204                .close_all_substates(&mut self.substate_io);
1205
1206            // Run
1207            let output = M::invoke_upstream(args, self)?;
1208            let message = CallFrameMessage::from_output(&output);
1209
1210            // Auto-drop locks again in case module forgot to drop
1211            for handle in self.stacks.current_frame().open_substates() {
1212                M::on_close_substate(CloseSubstateEvent::Start(handle), self)?;
1213            }
1214            self.stacks
1215                .current_frame_mut()
1216                .close_all_substates(&mut self.substate_io);
1217
1218            // Handle execution finish
1219            M::on_execution_finish(&message, self)?;
1220
1221            (output, message)
1222        };
1223
1224        // Move
1225        {
1226            let (cur_frame, prev_frame) = self.stacks.mut_current_frame_and_mut_previous_frame();
1227
1228            // Move resource
1229            CallFrame::pass_message(
1230                &self.substate_io,
1231                cur_frame,
1232                prev_frame.unwrap(),
1233                message.clone(),
1234            )
1235            .map_err(CallFrameError::PassMessageError)
1236            .map_err(KernelError::CallFrameError)?;
1237
1238            // Auto-drop
1239            let owned_nodes = cur_frame.owned_nodes();
1240            M::auto_drop(owned_nodes, self)?;
1241
1242            // Now, check if any own has been left!
1243            let owned_nodes = self.stacks.current_frame().owned_nodes();
1244            if !owned_nodes.is_empty() {
1245                return Err(RuntimeError::KernelError(KernelError::OrphanedNodes(
1246                    owned_nodes.into_iter().map(|n| n.into()).collect(),
1247                )));
1248            }
1249        }
1250
1251        // Pop call frame
1252        self.stacks.pop_frame();
1253
1254        M::after_invoke(&output, self)?;
1255
1256        Ok(output)
1257    }
1258}
1259
1260impl<'g, M: KernelCallbackObject, S: CommitableSubstateStore> KernelStackApi for Kernel<'g, M, S> {
1261    type CallFrameData = M::CallFrameData;
1262
1263    fn kernel_get_stack_id(&mut self) -> Result<usize, RuntimeError> {
1264        M::on_get_stack_id(&mut as_read_only!(self))?;
1265
1266        Ok(self.stacks.current_stack_index)
1267    }
1268
1269    fn kernel_switch_stack(&mut self, other_stack_index: usize) -> Result<(), RuntimeError> {
1270        M::on_switch_stack(&mut as_read_only!(self))?;
1271
1272        self.stacks.switch_stack(other_stack_index)?;
1273        Ok(())
1274    }
1275
1276    fn kernel_send_to_stack(
1277        &mut self,
1278        other_stack_index: usize,
1279        value: &IndexedScryptoValue,
1280    ) -> Result<(), RuntimeError> {
1281        M::on_send_to_stack(value, &mut as_read_only!(self))?;
1282
1283        let message = CallFrameMessage::from_output(value);
1284
1285        let (cur, other) = self
1286            .stacks
1287            .current_frame_mut_in_this_and_other_stack(other_stack_index);
1288
1289        CallFrame::pass_message(&self.substate_io, cur, other, message)
1290            .map_err(CallFrameError::PassMessageError)
1291            .map_err(KernelError::CallFrameError)?;
1292
1293        Ok(())
1294    }
1295
1296    fn kernel_set_call_frame_data(&mut self, data: M::CallFrameData) -> Result<(), RuntimeError> {
1297        M::on_set_call_frame_data(&data, &mut as_read_only!(self))?;
1298
1299        *self.stacks.current_frame_mut().data_mut() = data;
1300        Ok(())
1301    }
1302
1303    fn kernel_get_owned_nodes(&mut self) -> Result<Vec<NodeId>, RuntimeError> {
1304        M::on_get_owned_nodes(&mut as_read_only!(self))?;
1305
1306        Ok(self.stacks.current_frame().owned_nodes())
1307    }
1308}
1309
1310impl<'g, M: KernelCallbackObject, S: CommitableSubstateStore> KernelApi for Kernel<'g, M, S> {
1311    type CallbackObject = M;
1312}
1313
1314#[cfg(feature = "radix_engine_tests")]
1315impl<'g, M, S> Kernel<'g, M, S>
1316where
1317    M: KernelCallbackObject<CallFrameData: Default>,
1318    S: CommitableSubstateStore,
1319{
1320    pub fn kernel_create_kernel_for_testing(
1321        substate_io: SubstateIO<'g, S>,
1322        id_allocator: &'g mut IdAllocator,
1323        callback: &'g mut M,
1324        always_visible_global_nodes: &'static IndexSet<NodeId>,
1325    ) -> Kernel<'g, M, S> {
1326        Self {
1327            stacks: KernelStacks::new(vec![CallFrameInit {
1328                data: M::CallFrameData::default(),
1329                direct_accesses: Default::default(),
1330                global_addresses: Default::default(),
1331                always_visible_global_nodes,
1332                stack_id: 0,
1333            }]),
1334            substate_io,
1335            id_allocator,
1336            callback,
1337        }
1338    }
1339}
1340
1341#[cfg(feature = "radix_engine_tests")]
1342impl<'g, M: KernelCallbackObject, S: CommitableSubstateStore> Kernel<'g, M, S> {
1343    pub fn kernel_current_frame(
1344        &self,
1345    ) -> &CallFrame<<M as KernelCallbackObject>::CallFrameData, <M as KernelCallbackObject>::LockData>
1346    {
1347        self.stacks.current_frame()
1348    }
1349
1350    pub fn kernel_current_frame_mut(
1351        &mut self,
1352    ) -> (
1353        &SubstateIO<S>,
1354        &mut CallFrame<
1355            <M as KernelCallbackObject>::CallFrameData,
1356            <M as KernelCallbackObject>::LockData,
1357        >,
1358    ) {
1359        (&self.substate_io, self.stacks.current_frame_mut())
1360    }
1361
1362    pub fn kernel_prev_frame_stack_mut(
1363        &mut self,
1364    ) -> &mut Vec<
1365        CallFrame<
1366            <M as KernelCallbackObject>::CallFrameData,
1367            <M as KernelCallbackObject>::LockData,
1368        >,
1369    > {
1370        self.stacks.previous_frames_mut()
1371    }
1372
1373    pub fn kernel_substate_io(&self) -> &SubstateIO<'g, S> {
1374        &self.substate_io
1375    }
1376
1377    pub fn kernel_substate_io_mut(&mut self) -> &mut SubstateIO<'g, S> {
1378        &mut self.substate_io
1379    }
1380
1381    pub fn kernel_id_allocator(&self) -> &IdAllocator {
1382        &self.id_allocator
1383    }
1384
1385    pub fn kernel_id_allocator_mut(&mut self) -> &mut &'g mut IdAllocator {
1386        &mut self.id_allocator
1387    }
1388
1389    pub fn kernel_callback(&self) -> &M {
1390        &self.callback
1391    }
1392
1393    pub fn kernel_callback_mut(&mut self) -> &mut M {
1394        &mut self.callback
1395    }
1396}