Skip to main content

shape_jit/ffi/
async_ops.rs

1// Heap allocation audit (PR-9 V8 Gap Closure):
2//   Category A (NaN-boxed returns): 0 sites
3//   Category B (intermediate/consumed): 1 site
4//     Vec::with_capacity in jit_join_init — consumed by ValueWord::from_heap_value
5//   Category C (heap islands): 0 sites
6//     (async ops use trampoline dispatch and ValueWord conversion, no raw JitAlloc)
7//!
8//! FFI Trampolines for Async Operations
9//!
10//! These extern "C" functions are called from JIT-compiled code to
11//! interact with the async execution system (event queue, alerts, etc.)
12//!
13//! Design principles:
14//! - Platform-agnostic: works on Tokio and bare metal
15//! - No Tokio-specific async in these functions
16//! - All async coordination through event queue abstraction
17
18use super::super::context::JITContext;
19
20/// Suspension state constants
21pub const SUSPENSION_RUNNING: u32 = 0;
22pub const SUSPENSION_YIELDED: u32 = 1;
23pub const SUSPENSION_SUSPENDED: u32 = 2;
24
25/// Poll the event queue for the next event
26///
27/// Returns a NaN-boxed pointer to the event, or TAG_NULL if empty.
28/// Called from JIT code to check for incoming events.
29#[unsafe(no_mangle)]
30pub extern "C" fn __shape_poll_event(ctx: *mut JITContext) -> u64 {
31    if ctx.is_null() {
32        return crate::nan_boxing::TAG_NULL;
33    }
34
35    let ctx = unsafe { &mut *ctx };
36
37    if ctx.event_queue_ptr.is_null() {
38        return crate::nan_boxing::TAG_NULL;
39    }
40
41    // Cast to SharedEventQueue and poll
42    // For now, return null - full implementation needs the event queue type
43    // to be accessible here
44    crate::nan_boxing::TAG_NULL
45}
46
47/// Check if JIT code should yield for cooperative scheduling
48///
49/// Returns 1 (true) if should yield, 0 (false) otherwise.
50/// Called at loop boundaries in JIT-compiled code.
51#[unsafe(no_mangle)]
52pub extern "C" fn __shape_should_yield(ctx: *mut JITContext) -> i32 {
53    if ctx.is_null() {
54        return 0;
55    }
56
57    let ctx = unsafe { &mut *ctx };
58
59    // Increment iteration counter
60    ctx.iterations_since_yield += 1;
61
62    // Check if we've hit the yield threshold
63    if ctx.yield_threshold > 0 && ctx.iterations_since_yield >= ctx.yield_threshold {
64        ctx.iterations_since_yield = 0;
65        1 // Should yield
66    } else {
67        0 // Continue execution
68    }
69}
70
71/// Mark the context as yielded
72///
73/// Called when JIT code decides to yield control.
74#[unsafe(no_mangle)]
75pub extern "C" fn __shape_yield(ctx: *mut JITContext) {
76    if ctx.is_null() {
77        return;
78    }
79
80    let ctx = unsafe { &mut *ctx };
81    ctx.suspension_state = SUSPENSION_YIELDED;
82    ctx.iterations_since_yield = 0;
83}
84
85/// Suspend execution waiting for a specific event type
86///
87/// `wait_type`: 0 = any event, 1 = next bar, 2 = timer
88/// `wait_data`: type-specific data (e.g., timer ID)
89#[unsafe(no_mangle)]
90pub extern "C" fn __shape_suspend(ctx: *mut JITContext, _wait_type: u32, _wait_data: u64) {
91    if ctx.is_null() {
92        return;
93    }
94
95    let ctx = unsafe { &mut *ctx };
96    ctx.suspension_state = SUSPENSION_SUSPENDED;
97}
98
99/// Resume execution after suspension
100///
101/// Returns 1 if resume succeeded, 0 if context wasn't suspended.
102#[unsafe(no_mangle)]
103pub extern "C" fn __shape_resume(ctx: *mut JITContext) -> i32 {
104    if ctx.is_null() {
105        return 0;
106    }
107
108    let ctx = unsafe { &mut *ctx };
109
110    if ctx.suspension_state != SUSPENSION_RUNNING {
111        ctx.suspension_state = SUSPENSION_RUNNING;
112        1 // Resume succeeded
113    } else {
114        0 // Was already running
115    }
116}
117
118/// Emit an alert to the alert pipeline
119///
120/// `alert_ptr`: Pointer to MessagePack-encoded alert data
121/// `alert_len`: Length of the alert data
122///
123/// Returns 0 on success, non-zero on error.
124#[unsafe(no_mangle)]
125pub extern "C" fn __shape_emit_alert(
126    ctx: *mut JITContext,
127    _alert_ptr: *const u8,
128    _alert_len: usize,
129) -> i32 {
130    if ctx.is_null() {
131        return -1;
132    }
133
134    let ctx = unsafe { &*ctx };
135
136    if ctx.alert_pipeline_ptr.is_null() {
137        // No alert pipeline configured - silently succeed
138        return 0;
139    }
140
141    // Full implementation would:
142    // 1. Deserialize alert from MessagePack
143    // 2. Cast alert_pipeline_ptr to AlertRouter
144    // 3. Call router.emit(alert)
145    //
146    // For now, just acknowledge receipt
147    0
148}
149
150/// Push an event to the event queue
151///
152/// `event_ptr`: Pointer to MessagePack-encoded event data
153/// `event_len`: Length of the event data
154///
155/// Returns 0 on success, non-zero on error.
156#[unsafe(no_mangle)]
157pub extern "C" fn __shape_emit_event(
158    ctx: *mut JITContext,
159    _event_ptr: *const u8,
160    _event_len: usize,
161) -> i32 {
162    if ctx.is_null() {
163        return -1;
164    }
165
166    let ctx = unsafe { &*ctx };
167
168    if ctx.event_queue_ptr.is_null() {
169        // No event queue configured - silently succeed
170        return 0;
171    }
172
173    // Full implementation would:
174    // 1. Deserialize event from MessagePack
175    // 2. Cast event_queue_ptr to SharedEventQueue
176    // 3. Call queue.push(event)
177    //
178    // For now, just acknowledge receipt
179    0
180}
181
182// ============================================================================
183// Simulation Event Scheduling FFI
184// ============================================================================
185
186/// Schedule a future event for simulation.
187///
188/// This is a lightweight FFI for scheduling discrete events in the HybridKernel.
189/// For maximum performance, it bypasses serialization and works directly with
190/// raw values.
191///
192/// # Arguments
193/// * `ctx` - JIT execution context (contains event_queue_ptr)
194/// * `time` - Scheduled time (Unix microseconds)
195/// * `event_type` - User-defined event type ID
196/// * `payload` - NaN-boxed payload value
197///
198/// # Returns
199/// * 0 on success
200/// * -1 if ctx is null
201/// * -2 if event_queue_ptr is null
202///
203/// # Safety
204/// Requires `ctx.event_queue_ptr` to point to a valid `shape_runtime::simulation::EventQueue`.
205#[unsafe(no_mangle)]
206pub extern "C" fn __shape_schedule_event(
207    ctx: *mut JITContext,
208    time: i64,
209    event_type: u32,
210    payload: u64,
211) -> i32 {
212    if ctx.is_null() {
213        return -1;
214    }
215
216    let ctx = unsafe { &*ctx };
217
218    if ctx.event_queue_ptr.is_null() {
219        return -2; // No event queue configured
220    }
221
222    // Cast to EventQueue and schedule
223    // SAFETY: Caller must ensure event_queue_ptr points to valid EventQueue
224    unsafe {
225        let queue = ctx.event_queue_ptr as *mut EventQueueOpaque;
226        schedule_event_raw(queue, time, event_type, payload);
227    }
228
229    0
230}
231
232/// Opaque type for event queue scheduling.
233/// This allows JIT to schedule events without knowing the full EventQueue type.
234#[repr(C)]
235pub struct EventQueueOpaque {
236    _private: [u8; 0],
237}
238
239/// Raw scheduling function that will be resolved at link time.
240/// This is implemented in shape-runtime when the HybridKernel sets up the context.
241#[inline]
242unsafe fn schedule_event_raw(
243    queue: *mut EventQueueOpaque,
244    time: i64,
245    event_type: u32,
246    payload: u64,
247) {
248    // Store in a temporary buffer that the HybridKernel will drain
249    // For now, use a simple trampoline approach
250    if !queue.is_null() {
251        // Cast back to actual EventQueue pointer
252        // The HybridKernel is responsible for setting this up correctly
253        let schedule_fn = SCHEDULE_EVENT_FN.load(std::sync::atomic::Ordering::Relaxed);
254        if !schedule_fn.is_null() {
255            let f: extern "C" fn(*mut EventQueueOpaque, i64, u32, u64) =
256                unsafe { std::mem::transmute(schedule_fn) };
257            f(queue, time, event_type, payload);
258        }
259    }
260}
261
262/// ModuleBinding function pointer for event scheduling.
263/// Set by HybridKernel when simulation starts.
264pub static SCHEDULE_EVENT_FN: std::sync::atomic::AtomicPtr<()> =
265    std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
266
267/// Register the schedule function for JIT calls.
268///
269/// # Safety
270/// The function pointer must point to a valid scheduling function with signature:
271/// `extern "C" fn(*mut EventQueueOpaque, i64, u32, u64)`
272pub unsafe fn register_schedule_event_fn(f: extern "C" fn(*mut EventQueueOpaque, i64, u32, u64)) {
273    SCHEDULE_EVENT_FN.store(f as *mut (), std::sync::atomic::Ordering::Release);
274}
275
276/// Clear the schedule function registration.
277pub fn unregister_schedule_event_fn() {
278    SCHEDULE_EVENT_FN.store(std::ptr::null_mut(), std::sync::atomic::Ordering::Release);
279}
280
281// ============================================================================
282// Async Task Scheduling FFI (SpawnTask / JoinInit / JoinAwait / CancelTask)
283// ============================================================================
284//
285// Design: These FFI functions use static atomic function pointers (trampoline
286// pattern) to bridge from JIT-compiled code to the interpreter's task
287// scheduler. The runtime registers the trampolines before JIT execution
288// and clears them afterwards. This avoids cross-crate visibility issues
289// (task_scheduler lives in shape-vm, which is a different crate).
290
291/// Suspension state for async-wait (JoinAwait returns this to signal the JIT
292/// execution loop should hand control back to the interpreter).
293pub const SUSPENSION_ASYNC_WAIT: u32 = 3;
294
295// ---- Static trampoline function pointers ----
296
297/// Spawn trampoline: `fn(callable_bits: u64) -> u64` (returns Future bits)
298pub static SPAWN_TASK_FN: std::sync::atomic::AtomicPtr<()> =
299    std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
300
301/// Cancel trampoline: `fn(future_bits: u64)`
302pub static CANCEL_TASK_FN: std::sync::atomic::AtomicPtr<()> =
303    std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
304
305/// Async scope enter trampoline: `fn()`
306pub static ASYNC_SCOPE_ENTER_FN: std::sync::atomic::AtomicPtr<()> =
307    std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
308
309/// Async scope exit trampoline: `fn()`
310pub static ASYNC_SCOPE_EXIT_FN: std::sync::atomic::AtomicPtr<()> =
311    std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
312
313/// Register all async task trampolines.
314///
315/// # Safety
316/// The function pointers must be valid for the duration of JIT execution.
317pub unsafe fn register_async_task_fns(
318    spawn: *mut (),
319    cancel: *mut (),
320    scope_enter: *mut (),
321    scope_exit: *mut (),
322) {
323    SPAWN_TASK_FN.store(spawn, std::sync::atomic::Ordering::Release);
324    CANCEL_TASK_FN.store(cancel, std::sync::atomic::Ordering::Release);
325    ASYNC_SCOPE_ENTER_FN.store(scope_enter, std::sync::atomic::Ordering::Release);
326    ASYNC_SCOPE_EXIT_FN.store(scope_exit, std::sync::atomic::Ordering::Release);
327}
328
329/// Clear all async task trampoline registrations.
330pub fn unregister_async_task_fns() {
331    SPAWN_TASK_FN.store(std::ptr::null_mut(), std::sync::atomic::Ordering::Release);
332    CANCEL_TASK_FN.store(std::ptr::null_mut(), std::sync::atomic::Ordering::Release);
333    ASYNC_SCOPE_ENTER_FN.store(std::ptr::null_mut(), std::sync::atomic::Ordering::Release);
334    ASYNC_SCOPE_EXIT_FN.store(std::ptr::null_mut(), std::sync::atomic::Ordering::Release);
335}
336
337/// Spawn a new async task.
338///
339/// Delegates to the registered trampoline which has access to the VM's task
340/// scheduler.
341///
342/// # Arguments
343/// * `ctx` - JIT execution context (unused directly, but kept for ABI consistency)
344/// * `callable_bits` - NaN-boxed callable value (function or closure)
345///
346/// # Returns
347/// NaN-boxed Future(task_id) on success, TAG_NULL if no trampoline registered.
348#[unsafe(no_mangle)]
349pub extern "C" fn jit_spawn_task(_ctx: *mut JITContext, callable_bits: u64) -> u64 {
350    let f = SPAWN_TASK_FN.load(std::sync::atomic::Ordering::Acquire);
351    if f.is_null() {
352        return crate::nan_boxing::TAG_NULL;
353    }
354    let spawn: fn(u64) -> u64 = unsafe { std::mem::transmute(f) };
355    spawn(callable_bits)
356}
357
358/// Initialize a join group from task futures.
359///
360/// Collects `arity` Future values from the JIT stack into a TaskGroup.
361///
362/// # Arguments
363/// * `ctx` - JIT execution context
364/// * `packed` - High 2 bits = join kind (all/race/any/settle), low 14 bits = arity
365///
366/// # Returns
367/// NaN-boxed TaskGroup value, or TAG_NULL on failure.
368#[unsafe(no_mangle)]
369pub extern "C" fn jit_join_init(ctx: *mut JITContext, packed: u16) -> u64 {
370    if ctx.is_null() {
371        return crate::nan_boxing::TAG_NULL;
372    }
373
374    let ctx = unsafe { &mut *ctx };
375
376    let kind = ((packed >> 14) & 0x03) as u8;
377    let arity = (packed & 0x3FFF) as usize;
378
379    // Pop `arity` futures from the JIT stack
380    let mut task_ids = Vec::with_capacity(arity);
381    for _ in 0..arity {
382        if ctx.stack_ptr == 0 {
383            return crate::nan_boxing::TAG_NULL;
384        }
385        ctx.stack_ptr -= 1;
386        let bits = ctx.stack[ctx.stack_ptr];
387        let vw = crate::ffi::object::conversion::jit_bits_to_nanboxed(bits);
388        if let Some(id) = vw.as_future() {
389            task_ids.push(id);
390        } else {
391            return crate::nan_boxing::TAG_NULL;
392        }
393    }
394    // Reverse so task_ids[0] corresponds to the first branch
395    task_ids.reverse();
396
397    let tg =
398        shape_value::ValueWord::from_heap_value(shape_value::heap_value::HeapValue::TaskGroup {
399            kind,
400            task_ids,
401        });
402    crate::ffi::object::conversion::nanboxed_to_jit_bits(&tg)
403}
404
405/// Await a task group, suspending JIT execution.
406///
407/// Sets suspension_state = SUSPENSION_ASYNC_WAIT to signal the JIT execution
408/// loop should exit and hand control back to the interpreter. The task group
409/// value is left on the JIT stack for the interpreter to pick up.
410///
411/// # Arguments
412/// * `ctx` - JIT execution context
413/// * `task_group_bits` - NaN-boxed TaskGroup value
414///
415/// # Returns
416/// TAG_NULL (caller checks suspension_state to detect suspension).
417#[unsafe(no_mangle)]
418pub extern "C" fn jit_join_await(ctx: *mut JITContext, task_group_bits: u64) -> u64 {
419    if ctx.is_null() {
420        return crate::nan_boxing::TAG_NULL;
421    }
422
423    let ctx = unsafe { &mut *ctx };
424
425    // Push the task group onto the JIT stack so the interpreter can pick it up
426    // after the JIT function returns with the suspension signal.
427    if ctx.stack_ptr < ctx.stack.len() {
428        ctx.stack[ctx.stack_ptr] = task_group_bits;
429        ctx.stack_ptr += 1;
430    }
431
432    // Signal suspension — the JIT execution loop checks this and exits
433    ctx.suspension_state = SUSPENSION_ASYNC_WAIT;
434
435    crate::nan_boxing::TAG_NULL
436}
437
438/// Cancel a running task by its future ID.
439///
440/// # Arguments
441/// * `ctx` - JIT execution context (unused directly)
442/// * `future_bits` - NaN-boxed Future(task_id) value
443///
444/// # Returns
445/// 0 on success, -1 on failure.
446#[unsafe(no_mangle)]
447pub extern "C" fn jit_cancel_task(_ctx: *mut JITContext, future_bits: u64) -> i32 {
448    let f = CANCEL_TASK_FN.load(std::sync::atomic::Ordering::Acquire);
449    if f.is_null() {
450        return -1;
451    }
452    let cancel: fn(u64) = unsafe { std::mem::transmute(f) };
453
454    let vw = crate::ffi::object::conversion::jit_bits_to_nanboxed(future_bits);
455    if vw.as_future().is_some() {
456        cancel(future_bits);
457        0
458    } else {
459        -1
460    }
461}
462
463/// Enter an async scope (structured concurrency boundary).
464///
465/// Pushes a new empty task list onto the VM's async_scope_stack via trampoline.
466///
467/// # Returns
468/// 0 on success, -1 if no trampoline registered.
469#[unsafe(no_mangle)]
470pub extern "C" fn jit_async_scope_enter(_ctx: *mut JITContext) -> i32 {
471    let f = ASYNC_SCOPE_ENTER_FN.load(std::sync::atomic::Ordering::Acquire);
472    if f.is_null() {
473        return -1;
474    }
475    let enter: fn() = unsafe { std::mem::transmute(f) };
476    enter();
477    0
478}
479
480/// Exit an async scope (structured concurrency boundary).
481///
482/// Pops the current scope from the async_scope_stack and cancels all
483/// tasks spawned within it that are still pending, in LIFO order.
484///
485/// # Returns
486/// 0 on success, -1 if no trampoline registered.
487#[unsafe(no_mangle)]
488pub extern "C" fn jit_async_scope_exit(_ctx: *mut JITContext) -> i32 {
489    let f = ASYNC_SCOPE_EXIT_FN.load(std::sync::atomic::Ordering::Acquire);
490    if f.is_null() {
491        return -1;
492    }
493    let exit: fn() = unsafe { std::mem::transmute(f) };
494    exit();
495    0
496}
497
498/// Get the current suspension state
499///
500/// Returns: 0 = running, 1 = yielded, 2 = suspended
501#[unsafe(no_mangle)]
502pub extern "C" fn __shape_get_suspension_state(ctx: *const JITContext) -> u32 {
503    if ctx.is_null() {
504        return SUSPENSION_RUNNING;
505    }
506
507    let ctx = unsafe { &*ctx };
508    ctx.suspension_state
509}
510
511/// Set the yield threshold for cooperative scheduling
512///
513/// `threshold`: Number of iterations before automatic yield (0 = disable)
514#[unsafe(no_mangle)]
515pub extern "C" fn __shape_set_yield_threshold(ctx: *mut JITContext, threshold: u64) {
516    if ctx.is_null() {
517        return;
518    }
519
520    let ctx = unsafe { &mut *ctx };
521    ctx.yield_threshold = threshold;
522}
523
524#[cfg(test)]
525mod tests {
526    use super::*;
527
528    #[test]
529    fn test_yield_threshold() {
530        let mut ctx = JITContext::default();
531        ctx.yield_threshold = 100;
532
533        // Should not yield before threshold
534        for _ in 0..99 {
535            assert_eq!(__shape_should_yield(&mut ctx), 0);
536        }
537
538        // Should yield at threshold
539        assert_eq!(__shape_should_yield(&mut ctx), 1);
540
541        // Counter should reset
542        assert_eq!(ctx.iterations_since_yield, 0);
543    }
544
545    #[test]
546    fn test_suspension_state() {
547        let mut ctx = JITContext::default();
548
549        assert_eq!(__shape_get_suspension_state(&ctx), SUSPENSION_RUNNING);
550
551        __shape_yield(&mut ctx);
552        assert_eq!(__shape_get_suspension_state(&ctx), SUSPENSION_YIELDED);
553
554        __shape_resume(&mut ctx);
555        assert_eq!(__shape_get_suspension_state(&ctx), SUSPENSION_RUNNING);
556    }
557
558    #[test]
559    fn test_schedule_event_null_ctx() {
560        let result = __shape_schedule_event(std::ptr::null_mut(), 1000, 1, 0);
561        assert_eq!(result, -1);
562    }
563
564    #[test]
565    fn test_schedule_event_null_queue() {
566        let mut ctx = JITContext::default();
567        // event_queue_ptr is null by default
568        let result = __shape_schedule_event(&mut ctx, 1000, 1, 0);
569        assert_eq!(result, -2);
570    }
571
572    #[test]
573    fn test_spawn_task_null_trampoline() {
574        let mut ctx = JITContext::default();
575        // No trampoline registered — should return TAG_NULL
576        let result = jit_spawn_task(&mut ctx, 0);
577        assert_eq!(result, crate::nan_boxing::TAG_NULL);
578    }
579
580    #[test]
581    fn test_join_init_empty() {
582        let mut ctx = JITContext::default();
583        // kind=0 (All), arity=0
584        let result = jit_join_init(&mut ctx, 0);
585        // Should succeed with an empty TaskGroup
586        assert_ne!(result, crate::nan_boxing::TAG_NULL);
587    }
588
589    #[test]
590    fn test_join_await_sets_suspension() {
591        let mut ctx = JITContext::default();
592        assert_eq!(ctx.suspension_state, SUSPENSION_RUNNING);
593
594        let tg = shape_value::ValueWord::from_heap_value(
595            shape_value::heap_value::HeapValue::TaskGroup {
596                kind: 0,
597                task_ids: vec![1, 2],
598            },
599        );
600        let tg_bits = crate::ffi::object::conversion::nanboxed_to_jit_bits(&tg);
601
602        let result = jit_join_await(&mut ctx, tg_bits);
603        assert_eq!(result, crate::nan_boxing::TAG_NULL);
604        assert_eq!(ctx.suspension_state, SUSPENSION_ASYNC_WAIT);
605        // Task group should be on the stack
606        assert!(ctx.stack_ptr > 0);
607    }
608
609    #[test]
610    fn test_cancel_task_null_trampoline() {
611        let mut ctx = JITContext::default();
612        let result = jit_cancel_task(&mut ctx, 0);
613        assert_eq!(result, -1); // No trampoline
614    }
615
616    #[test]
617    fn test_async_scope_enter_null_trampoline() {
618        let mut ctx = JITContext::default();
619        let result = jit_async_scope_enter(&mut ctx);
620        assert_eq!(result, -1); // No trampoline
621    }
622
623    #[test]
624    fn test_async_scope_exit_null_trampoline() {
625        let mut ctx = JITContext::default();
626        let result = jit_async_scope_exit(&mut ctx);
627        assert_eq!(result, -1); // No trampoline
628    }
629
630    #[test]
631    fn test_schedule_event_registration() {
632        // Test the registration mechanism
633        use std::sync::atomic::{AtomicI64, AtomicU32, AtomicU64, Ordering};
634        static CALL_COUNT: AtomicU32 = AtomicU32::new(0);
635        static LAST_TIME: AtomicI64 = AtomicI64::new(0);
636        static LAST_TYPE: AtomicU32 = AtomicU32::new(0);
637        static LAST_PAYLOAD: AtomicU64 = AtomicU64::new(0);
638
639        extern "C" fn test_scheduler(
640            _queue: *mut EventQueueOpaque,
641            time: i64,
642            event_type: u32,
643            payload: u64,
644        ) {
645            CALL_COUNT.fetch_add(1, Ordering::SeqCst);
646            LAST_TIME.store(time, Ordering::SeqCst);
647            LAST_TYPE.store(event_type, Ordering::SeqCst);
648            LAST_PAYLOAD.store(payload, Ordering::SeqCst);
649        }
650
651        // Register the test scheduler
652        unsafe { register_schedule_event_fn(test_scheduler) };
653
654        // Create a context with a non-null queue pointer (just for testing)
655        let mut ctx = JITContext::default();
656        let dummy_queue: u8 = 0;
657        ctx.event_queue_ptr = &dummy_queue as *const u8 as *mut std::ffi::c_void;
658
659        // Schedule an event
660        let result = __shape_schedule_event(&mut ctx, 5000, 42, 12345);
661        assert_eq!(result, 0);
662
663        // Verify the callback was called
664        assert_eq!(CALL_COUNT.load(Ordering::SeqCst), 1);
665        assert_eq!(LAST_TIME.load(Ordering::SeqCst), 5000);
666        assert_eq!(LAST_TYPE.load(Ordering::SeqCst), 42);
667        assert_eq!(LAST_PAYLOAD.load(Ordering::SeqCst), 12345);
668
669        // Cleanup
670        unregister_schedule_event_fn();
671    }
672}