1use super::super::context::JITContext;
19
20pub const SUSPENSION_RUNNING: u32 = 0;
22pub const SUSPENSION_YIELDED: u32 = 1;
23pub const SUSPENSION_SUSPENDED: u32 = 2;
24
25#[unsafe(no_mangle)]
30pub extern "C" fn __shape_poll_event(ctx: *mut JITContext) -> u64 {
31 if ctx.is_null() {
32 return crate::nan_boxing::TAG_NULL;
33 }
34
35 let ctx = unsafe { &mut *ctx };
36
37 if ctx.event_queue_ptr.is_null() {
38 return crate::nan_boxing::TAG_NULL;
39 }
40
41 crate::nan_boxing::TAG_NULL
45}
46
47#[unsafe(no_mangle)]
52pub extern "C" fn __shape_should_yield(ctx: *mut JITContext) -> i32 {
53 if ctx.is_null() {
54 return 0;
55 }
56
57 let ctx = unsafe { &mut *ctx };
58
59 ctx.iterations_since_yield += 1;
61
62 if ctx.yield_threshold > 0 && ctx.iterations_since_yield >= ctx.yield_threshold {
64 ctx.iterations_since_yield = 0;
65 1 } else {
67 0 }
69}
70
71#[unsafe(no_mangle)]
75pub extern "C" fn __shape_yield(ctx: *mut JITContext) {
76 if ctx.is_null() {
77 return;
78 }
79
80 let ctx = unsafe { &mut *ctx };
81 ctx.suspension_state = SUSPENSION_YIELDED;
82 ctx.iterations_since_yield = 0;
83}
84
85#[unsafe(no_mangle)]
90pub extern "C" fn __shape_suspend(ctx: *mut JITContext, _wait_type: u32, _wait_data: u64) {
91 if ctx.is_null() {
92 return;
93 }
94
95 let ctx = unsafe { &mut *ctx };
96 ctx.suspension_state = SUSPENSION_SUSPENDED;
97}
98
99#[unsafe(no_mangle)]
103pub extern "C" fn __shape_resume(ctx: *mut JITContext) -> i32 {
104 if ctx.is_null() {
105 return 0;
106 }
107
108 let ctx = unsafe { &mut *ctx };
109
110 if ctx.suspension_state != SUSPENSION_RUNNING {
111 ctx.suspension_state = SUSPENSION_RUNNING;
112 1 } else {
114 0 }
116}
117
118#[unsafe(no_mangle)]
125pub extern "C" fn __shape_emit_alert(
126 ctx: *mut JITContext,
127 _alert_ptr: *const u8,
128 _alert_len: usize,
129) -> i32 {
130 if ctx.is_null() {
131 return -1;
132 }
133
134 let ctx = unsafe { &*ctx };
135
136 if ctx.alert_pipeline_ptr.is_null() {
137 return 0;
139 }
140
141 0
148}
149
150#[unsafe(no_mangle)]
157pub extern "C" fn __shape_emit_event(
158 ctx: *mut JITContext,
159 _event_ptr: *const u8,
160 _event_len: usize,
161) -> i32 {
162 if ctx.is_null() {
163 return -1;
164 }
165
166 let ctx = unsafe { &*ctx };
167
168 if ctx.event_queue_ptr.is_null() {
169 return 0;
171 }
172
173 0
180}
181
182#[unsafe(no_mangle)]
206pub extern "C" fn __shape_schedule_event(
207 ctx: *mut JITContext,
208 time: i64,
209 event_type: u32,
210 payload: u64,
211) -> i32 {
212 if ctx.is_null() {
213 return -1;
214 }
215
216 let ctx = unsafe { &*ctx };
217
218 if ctx.event_queue_ptr.is_null() {
219 return -2; }
221
222 unsafe {
225 let queue = ctx.event_queue_ptr as *mut EventQueueOpaque;
226 schedule_event_raw(queue, time, event_type, payload);
227 }
228
229 0
230}
231
232#[repr(C)]
235pub struct EventQueueOpaque {
236 _private: [u8; 0],
237}
238
239#[inline]
242unsafe fn schedule_event_raw(
243 queue: *mut EventQueueOpaque,
244 time: i64,
245 event_type: u32,
246 payload: u64,
247) {
248 if !queue.is_null() {
251 let schedule_fn = SCHEDULE_EVENT_FN.load(std::sync::atomic::Ordering::Relaxed);
254 if !schedule_fn.is_null() {
255 let f: extern "C" fn(*mut EventQueueOpaque, i64, u32, u64) =
256 unsafe { std::mem::transmute(schedule_fn) };
257 f(queue, time, event_type, payload);
258 }
259 }
260}
261
262pub static SCHEDULE_EVENT_FN: std::sync::atomic::AtomicPtr<()> =
265 std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
266
267pub unsafe fn register_schedule_event_fn(f: extern "C" fn(*mut EventQueueOpaque, i64, u32, u64)) {
273 SCHEDULE_EVENT_FN.store(f as *mut (), std::sync::atomic::Ordering::Release);
274}
275
276pub fn unregister_schedule_event_fn() {
278 SCHEDULE_EVENT_FN.store(std::ptr::null_mut(), std::sync::atomic::Ordering::Release);
279}
280
281pub const SUSPENSION_ASYNC_WAIT: u32 = 3;
294
295pub static SPAWN_TASK_FN: std::sync::atomic::AtomicPtr<()> =
299 std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
300
301pub static CANCEL_TASK_FN: std::sync::atomic::AtomicPtr<()> =
303 std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
304
305pub static ASYNC_SCOPE_ENTER_FN: std::sync::atomic::AtomicPtr<()> =
307 std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
308
309pub static ASYNC_SCOPE_EXIT_FN: std::sync::atomic::AtomicPtr<()> =
311 std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
312
313pub unsafe fn register_async_task_fns(
318 spawn: *mut (),
319 cancel: *mut (),
320 scope_enter: *mut (),
321 scope_exit: *mut (),
322) {
323 SPAWN_TASK_FN.store(spawn, std::sync::atomic::Ordering::Release);
324 CANCEL_TASK_FN.store(cancel, std::sync::atomic::Ordering::Release);
325 ASYNC_SCOPE_ENTER_FN.store(scope_enter, std::sync::atomic::Ordering::Release);
326 ASYNC_SCOPE_EXIT_FN.store(scope_exit, std::sync::atomic::Ordering::Release);
327}
328
329pub fn unregister_async_task_fns() {
331 SPAWN_TASK_FN.store(std::ptr::null_mut(), std::sync::atomic::Ordering::Release);
332 CANCEL_TASK_FN.store(std::ptr::null_mut(), std::sync::atomic::Ordering::Release);
333 ASYNC_SCOPE_ENTER_FN.store(std::ptr::null_mut(), std::sync::atomic::Ordering::Release);
334 ASYNC_SCOPE_EXIT_FN.store(std::ptr::null_mut(), std::sync::atomic::Ordering::Release);
335}
336
337#[unsafe(no_mangle)]
349pub extern "C" fn jit_spawn_task(_ctx: *mut JITContext, callable_bits: u64) -> u64 {
350 let f = SPAWN_TASK_FN.load(std::sync::atomic::Ordering::Acquire);
351 if f.is_null() {
352 return crate::nan_boxing::TAG_NULL;
353 }
354 let spawn: fn(u64) -> u64 = unsafe { std::mem::transmute(f) };
355 spawn(callable_bits)
356}
357
358#[unsafe(no_mangle)]
369pub extern "C" fn jit_join_init(ctx: *mut JITContext, packed: u16) -> u64 {
370 if ctx.is_null() {
371 return crate::nan_boxing::TAG_NULL;
372 }
373
374 let ctx = unsafe { &mut *ctx };
375
376 let kind = ((packed >> 14) & 0x03) as u8;
377 let arity = (packed & 0x3FFF) as usize;
378
379 let mut task_ids = Vec::with_capacity(arity);
381 for _ in 0..arity {
382 if ctx.stack_ptr == 0 {
383 return crate::nan_boxing::TAG_NULL;
384 }
385 ctx.stack_ptr -= 1;
386 let bits = ctx.stack[ctx.stack_ptr];
387 let vw = crate::ffi::object::conversion::jit_bits_to_nanboxed(bits);
388 if let Some(id) = vw.as_future() {
389 task_ids.push(id);
390 } else {
391 return crate::nan_boxing::TAG_NULL;
392 }
393 }
394 task_ids.reverse();
396
397 let tg =
398 shape_value::ValueWord::from_heap_value(shape_value::heap_value::HeapValue::TaskGroup {
399 kind,
400 task_ids,
401 });
402 crate::ffi::object::conversion::nanboxed_to_jit_bits(&tg)
403}
404
405#[unsafe(no_mangle)]
418pub extern "C" fn jit_join_await(ctx: *mut JITContext, task_group_bits: u64) -> u64 {
419 if ctx.is_null() {
420 return crate::nan_boxing::TAG_NULL;
421 }
422
423 let ctx = unsafe { &mut *ctx };
424
425 if ctx.stack_ptr < ctx.stack.len() {
428 ctx.stack[ctx.stack_ptr] = task_group_bits;
429 ctx.stack_ptr += 1;
430 }
431
432 ctx.suspension_state = SUSPENSION_ASYNC_WAIT;
434
435 crate::nan_boxing::TAG_NULL
436}
437
438#[unsafe(no_mangle)]
447pub extern "C" fn jit_cancel_task(_ctx: *mut JITContext, future_bits: u64) -> i32 {
448 let f = CANCEL_TASK_FN.load(std::sync::atomic::Ordering::Acquire);
449 if f.is_null() {
450 return -1;
451 }
452 let cancel: fn(u64) = unsafe { std::mem::transmute(f) };
453
454 let vw = crate::ffi::object::conversion::jit_bits_to_nanboxed(future_bits);
455 if vw.as_future().is_some() {
456 cancel(future_bits);
457 0
458 } else {
459 -1
460 }
461}
462
463#[unsafe(no_mangle)]
470pub extern "C" fn jit_async_scope_enter(_ctx: *mut JITContext) -> i32 {
471 let f = ASYNC_SCOPE_ENTER_FN.load(std::sync::atomic::Ordering::Acquire);
472 if f.is_null() {
473 return -1;
474 }
475 let enter: fn() = unsafe { std::mem::transmute(f) };
476 enter();
477 0
478}
479
480#[unsafe(no_mangle)]
488pub extern "C" fn jit_async_scope_exit(_ctx: *mut JITContext) -> i32 {
489 let f = ASYNC_SCOPE_EXIT_FN.load(std::sync::atomic::Ordering::Acquire);
490 if f.is_null() {
491 return -1;
492 }
493 let exit: fn() = unsafe { std::mem::transmute(f) };
494 exit();
495 0
496}
497
498#[unsafe(no_mangle)]
502pub extern "C" fn __shape_get_suspension_state(ctx: *const JITContext) -> u32 {
503 if ctx.is_null() {
504 return SUSPENSION_RUNNING;
505 }
506
507 let ctx = unsafe { &*ctx };
508 ctx.suspension_state
509}
510
511#[unsafe(no_mangle)]
515pub extern "C" fn __shape_set_yield_threshold(ctx: *mut JITContext, threshold: u64) {
516 if ctx.is_null() {
517 return;
518 }
519
520 let ctx = unsafe { &mut *ctx };
521 ctx.yield_threshold = threshold;
522}
523
524#[cfg(test)]
525mod tests {
526 use super::*;
527
528 #[test]
529 fn test_yield_threshold() {
530 let mut ctx = JITContext::default();
531 ctx.yield_threshold = 100;
532
533 for _ in 0..99 {
535 assert_eq!(__shape_should_yield(&mut ctx), 0);
536 }
537
538 assert_eq!(__shape_should_yield(&mut ctx), 1);
540
541 assert_eq!(ctx.iterations_since_yield, 0);
543 }
544
545 #[test]
546 fn test_suspension_state() {
547 let mut ctx = JITContext::default();
548
549 assert_eq!(__shape_get_suspension_state(&ctx), SUSPENSION_RUNNING);
550
551 __shape_yield(&mut ctx);
552 assert_eq!(__shape_get_suspension_state(&ctx), SUSPENSION_YIELDED);
553
554 __shape_resume(&mut ctx);
555 assert_eq!(__shape_get_suspension_state(&ctx), SUSPENSION_RUNNING);
556 }
557
558 #[test]
559 fn test_schedule_event_null_ctx() {
560 let result = __shape_schedule_event(std::ptr::null_mut(), 1000, 1, 0);
561 assert_eq!(result, -1);
562 }
563
564 #[test]
565 fn test_schedule_event_null_queue() {
566 let mut ctx = JITContext::default();
567 let result = __shape_schedule_event(&mut ctx, 1000, 1, 0);
569 assert_eq!(result, -2);
570 }
571
572 #[test]
573 fn test_spawn_task_null_trampoline() {
574 let mut ctx = JITContext::default();
575 let result = jit_spawn_task(&mut ctx, 0);
577 assert_eq!(result, crate::nan_boxing::TAG_NULL);
578 }
579
580 #[test]
581 fn test_join_init_empty() {
582 let mut ctx = JITContext::default();
583 let result = jit_join_init(&mut ctx, 0);
585 assert_ne!(result, crate::nan_boxing::TAG_NULL);
587 }
588
589 #[test]
590 fn test_join_await_sets_suspension() {
591 let mut ctx = JITContext::default();
592 assert_eq!(ctx.suspension_state, SUSPENSION_RUNNING);
593
594 let tg = shape_value::ValueWord::from_heap_value(
595 shape_value::heap_value::HeapValue::TaskGroup {
596 kind: 0,
597 task_ids: vec![1, 2],
598 },
599 );
600 let tg_bits = crate::ffi::object::conversion::nanboxed_to_jit_bits(&tg);
601
602 let result = jit_join_await(&mut ctx, tg_bits);
603 assert_eq!(result, crate::nan_boxing::TAG_NULL);
604 assert_eq!(ctx.suspension_state, SUSPENSION_ASYNC_WAIT);
605 assert!(ctx.stack_ptr > 0);
607 }
608
609 #[test]
610 fn test_cancel_task_null_trampoline() {
611 let mut ctx = JITContext::default();
612 let result = jit_cancel_task(&mut ctx, 0);
613 assert_eq!(result, -1); }
615
616 #[test]
617 fn test_async_scope_enter_null_trampoline() {
618 let mut ctx = JITContext::default();
619 let result = jit_async_scope_enter(&mut ctx);
620 assert_eq!(result, -1); }
622
623 #[test]
624 fn test_async_scope_exit_null_trampoline() {
625 let mut ctx = JITContext::default();
626 let result = jit_async_scope_exit(&mut ctx);
627 assert_eq!(result, -1); }
629
630 #[test]
631 fn test_schedule_event_registration() {
632 use std::sync::atomic::{AtomicI64, AtomicU32, AtomicU64, Ordering};
634 static CALL_COUNT: AtomicU32 = AtomicU32::new(0);
635 static LAST_TIME: AtomicI64 = AtomicI64::new(0);
636 static LAST_TYPE: AtomicU32 = AtomicU32::new(0);
637 static LAST_PAYLOAD: AtomicU64 = AtomicU64::new(0);
638
639 extern "C" fn test_scheduler(
640 _queue: *mut EventQueueOpaque,
641 time: i64,
642 event_type: u32,
643 payload: u64,
644 ) {
645 CALL_COUNT.fetch_add(1, Ordering::SeqCst);
646 LAST_TIME.store(time, Ordering::SeqCst);
647 LAST_TYPE.store(event_type, Ordering::SeqCst);
648 LAST_PAYLOAD.store(payload, Ordering::SeqCst);
649 }
650
651 unsafe { register_schedule_event_fn(test_scheduler) };
653
654 let mut ctx = JITContext::default();
656 let dummy_queue: u8 = 0;
657 ctx.event_queue_ptr = &dummy_queue as *const u8 as *mut std::ffi::c_void;
658
659 let result = __shape_schedule_event(&mut ctx, 5000, 42, 12345);
661 assert_eq!(result, 0);
662
663 assert_eq!(CALL_COUNT.load(Ordering::SeqCst), 1);
665 assert_eq!(LAST_TIME.load(Ordering::SeqCst), 5000);
666 assert_eq!(LAST_TYPE.load(Ordering::SeqCst), 42);
667 assert_eq!(LAST_PAYLOAD.load(Ordering::SeqCst), 12345);
668
669 unregister_schedule_event_fn();
671 }
672}