#![deny(
unreachable_code,
improper_ctypes_definitions,
future_incompatible,
nonstandard_style,
rust_2018_idioms,
clippy::perf,
clippy::correctness,
clippy::suspicious,
clippy::unwrap_used,
clippy::expect_used,
clippy::indexing_slicing,
clippy::arithmetic_side_effects,
clippy::missing_safety_doc,
clippy::same_item_push,
clippy::implicit_clone,
clippy::all,
clippy::pedantic,
missing_docs,
clippy::nursery,
clippy::single_call_fn
)]
#![warn(
dead_code,
warnings,
clippy::dbg_macro,
clippy::todo,
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
clippy::cast_possible_wrap,
clippy::unnecessary_safety_comment
)]
#![allow(
unsafe_code,
unused_unsafe,
private_interfaces,
clippy::restriction,
clippy::inline_always,
unused_doc_comments,
clippy::empty_line_after_doc_comments,
clippy::missing_const_for_thread_local
)]
#![crate_name = "dtact"]
extern crate alloc;
pub use crate::api::config::set_deflection_threshold;
pub use crate::api::fiber::spawn_with_stack;
#[cfg(feature = "hw-acceleration")]
pub use crate::api::hw::cldemote;
#[cfg(feature = "hw-acceleration")]
pub use crate::api::hw::uintr_signal;
pub use crate::api::spawn;
pub use crate::api::yield_now;
pub use crate::api::yield_to;
pub use crate::c_ffi::dtact_await;
pub use crate::c_ffi::dtact_handle_t;
pub use crate::errors::DtactError;
pub use crate::future_bridge::wait;
pub use dtact_macros::dtact_init;
pub use dtact_macros::export_async;
pub use dtact_macros::export_fiber;
pub use dtact_macros::task;
#[doc(hidden)]
pub mod api;
#[doc(hidden)]
pub mod c_ffi;
#[doc(hidden)]
pub mod common_types;
#[doc(hidden)]
pub mod context_switch;
#[doc(hidden)]
pub mod dta_scheduler;
#[doc(hidden)]
pub mod errors;
#[doc(hidden)]
pub mod future_bridge;
#[doc(hidden)]
pub mod memory_management;
#[doc(hidden)]
pub mod utils;
pub use api::*;
#[doc(hidden)]
pub struct Runtime {
pub scheduler: dta_scheduler::DtaScheduler,
pub pool: memory_management::ContextPool,
pub started: core::sync::atomic::AtomicBool,
pub shutdown: core::sync::atomic::AtomicBool,
}
impl Runtime {
pub fn start(&'static self) {
if self
.started
.swap(true, core::sync::atomic::Ordering::SeqCst)
{
return;
}
let workers_count = self.scheduler.workers.len();
for i in 0..workers_count {
let sched: &'static dta_scheduler::DtaScheduler = &self.scheduler;
let pool: &'static memory_management::ContextPool = &self.pool;
let shutdown: &'static core::sync::atomic::AtomicBool = &self.shutdown;
let my_id = i;
std::thread::Builder::new()
.name(format!("dtact-worker-{my_id}"))
.spawn(move || {
crate::dta_scheduler::DtaScheduler::run_worker_static(
sched, my_id, pool, shutdown,
);
})
.expect("Failed to spawn Dtact worker thread");
}
}
}
#[doc(hidden)]
pub static GLOBAL_RUNTIME: std::sync::OnceLock<Runtime> = std::sync::OnceLock::new();
#[doc(hidden)]
pub static HEAP_ESCAPED_SPAWNS: core::sync::atomic::AtomicU64 =
core::sync::atomic::AtomicU64::new(0);
#[inline(always)]
pub(crate) fn wake_fiber(origin_core: usize, fiber_index: u32) {
if let Some(runtime) = GLOBAL_RUNTIME.get() {
loop {
let success =
runtime
.scheduler
.enqueue_task(origin_core, u64::from(fiber_index), fiber_index);
if success {
break;
}
let ctx_ptr = crate::future_bridge::CURRENT_FIBER.with(std::cell::Cell::get);
if ctx_ptr.is_null() {
std::thread::yield_now();
} else {
unsafe {
let ctx = &mut *ctx_ptr;
ctx.state.store(
crate::memory_management::FiberStatus::Notified as u32,
core::sync::atomic::Ordering::Release,
);
(ctx.switch_fn)(&raw mut ctx.regs, &raw const ctx.executor_regs);
}
}
}
} else {
panic!("dtact::wake_fiber() invoked before Runtime Initialization");
}
}
#[allow(clippy::mixed_attributes_style)]
#[cfg_attr(miri, ignore)]
#[doc(hidden)]
mod readme {
#![doc = include_str!("../README.md")]
}