dtact/lib.rs
1//! # Dtact-V3: Distributed Task-Aware Coroutine Toolkit
2//!
3//! Dtact is a high-performance, low-latency asynchronous runtime designed for systems-level
4//! programming across heterogeneous architectures (`x86_64`, `AArch64`, `RISC-V`).
5//!
6//! ## Core Architecture
7//! 1. **Lock-Free Arena**: A page-aligned memory pool for fiber contexts, providing O(1) allocation
8//! and hardware-level guard pages for memory safety.
9//! 2. **P2P Scheduler Mesh**: A distributed work-stealing/deflection scheduler that minimizes L3
10//! cache thrashing and maximizes NUMA-local execution.
11//! 3. **Zero-Copy Migration**: Leveraging self-referential futures and direct stack-top injection
12//! to move running tasks across cores without heap allocation.
13//!
14//! Dtact provides tiered safety levels (0-2) allowing developers to trade off between raw
15//! performance and hardware-enforced isolation (e.g., guard pages and SEH registration).
16
17// =========================================================================
18// RUST LINT CONFIGURATION: dtact
19// =========================================================================
20
21// -------------------------------------------------------------------------
22// LEVEL 1: CRITICAL ERRORS (Deny)
23// -------------------------------------------------------------------------
24#![deny(
25 unreachable_code,
26 improper_ctypes_definitions,
27 future_incompatible,
28 nonstandard_style,
29 rust_2018_idioms,
30 clippy::perf,
31 clippy::correctness,
32 clippy::suspicious,
33 clippy::unwrap_used,
34 clippy::expect_used,
35 clippy::indexing_slicing,
36 clippy::arithmetic_side_effects,
37 clippy::missing_safety_doc,
38 clippy::same_item_push,
39 clippy::implicit_clone,
40 clippy::all,
41 clippy::pedantic,
42 missing_docs,
43 clippy::nursery,
44 clippy::single_call_fn
45)]
46// -------------------------------------------------------------------------
47// LEVEL 2: STYLE WARNINGS (Warn)
48// -------------------------------------------------------------------------
49#![warn(
50 dead_code,
51 warnings,
52 clippy::dbg_macro,
53 clippy::todo,
54 clippy::cast_possible_truncation,
55 clippy::cast_sign_loss,
56 clippy::cast_possible_wrap,
57 clippy::unnecessary_safety_comment
58)]
59// -------------------------------------------------------------------------
60// LEVEL 3: ALLOW/IGNORABLE (Allow)
61// -------------------------------------------------------------------------
62#![allow(
63 unsafe_code,
64 unused_unsafe,
65 private_interfaces,
66 clippy::restriction,
67 clippy::inline_always,
68 unused_doc_comments,
69 clippy::empty_line_after_doc_comments,
70 clippy::missing_const_for_thread_local
71)]
72#![crate_name = "dtact"]
73
74extern crate alloc;
75
76/// Set the deflection threshold for the DTA-V3 Scheduler.
77pub use crate::api::config::set_deflection_threshold;
78/// Spawn a fiber with a custom stack size.
79pub use crate::api::fiber::spawn_with_stack;
80/// Hardware-level demotion API.
81#[cfg(feature = "hw-acceleration")]
82pub use crate::api::hw::cldemote;
83/// Hardware-level interrupt signaling API.
84#[cfg(feature = "hw-acceleration")]
85pub use crate::api::hw::uintr_signal;
86/// Spawn a fiber.
87pub use crate::api::spawn;
88/// Yield execution to the scheduler.
89pub use crate::api::yield_now;
90/// Yield execution to another fiber.
91pub use crate::api::yield_to;
92/// Wait for a fiber to complete.
93pub use crate::c_ffi::dtact_await;
94/// Handle for C-compatible FFI.
95pub use crate::c_ffi::dtact_handle_t;
96/// Runtime error types.
97pub use crate::errors::DtactError;
98/// Wait for a fiber to complete.
99pub use crate::future_bridge::wait;
100/// Attribute macro for initializing the Dtact runtime.
101pub use dtact_macros::dtact_init;
102/// Attribute macro for exporting an async function to C.
103pub use dtact_macros::export_async;
104/// Attribute macro for exporting a fiber to C.
105pub use dtact_macros::export_fiber;
106/// Attribute macro for defining a Dtact task.
107pub use dtact_macros::task;
108
109/// Public user-facing API for spawning and managing fibers.
110#[doc(hidden)]
111pub mod api;
112/// C-compatible FFI boundary for cross-language integration.
113#[doc(hidden)]
114pub mod c_ffi;
115/// Common types used across the Dtact runtime.
116#[doc(hidden)]
117pub mod common_types;
118/// Low-level assembly-based context switching primitives.
119#[doc(hidden)]
120pub mod context_switch;
121/// Distributed P2P Mesh scheduler implementation.
122#[doc(hidden)]
123pub mod dta_scheduler;
124/// Standard error types for the Dtact runtime.
125#[doc(hidden)]
126pub mod errors;
127/// Bridge for polling futures within a `FiberContext`.
128#[doc(hidden)]
129pub mod future_bridge;
130/// Lock-free arena and OS-level memory management.
131#[doc(hidden)]
132pub mod memory_management;
133/// Timing, topology, and OS-specific primitives.
134#[doc(hidden)]
135pub mod utils;
136
137pub use api::*;
138
139/// DTA-V3 Runtime Environment.
140///
141/// Consolidates the distributed scheduler and the memory pool into a single
142/// unit to ensure architectural consistency across all worker threads.
143#[doc(hidden)]
144pub struct Runtime {
145 /// The distributed P2P work-deflection scheduler.
146 pub scheduler: dta_scheduler::DtaScheduler,
147 /// The lock-free arena for managing fiber stacks and contexts.
148 pub pool: memory_management::ContextPool,
149 /// Flag indicating if the worker threads have been started.
150 pub started: core::sync::atomic::AtomicBool,
151 /// Cooperative shutdown signal for worker threads.
152 pub shutdown: core::sync::atomic::AtomicBool,
153}
154
155impl Runtime {
156 /// Spawns the OS worker threads for the scheduler.
157 ///
158 /// # Panics
159 ///
160 /// Panics if the system fails to spawn a new thread. This can occur if
161 /// the operating system limits on the number of threads have been reached.
162 pub fn start(&'static self) {
163 if self
164 .started
165 .swap(true, core::sync::atomic::Ordering::SeqCst)
166 {
167 return;
168 }
169
170 let workers_count = self.scheduler.workers.len();
171
172 for i in 0..workers_count {
173 // Each closure must capture its own copy of these values.
174 let sched: &'static dta_scheduler::DtaScheduler = &self.scheduler;
175 let pool: &'static memory_management::ContextPool = &self.pool;
176 let shutdown: &'static core::sync::atomic::AtomicBool = &self.shutdown;
177 let my_id = i;
178
179 std::thread::Builder::new()
180 .name(format!("dtact-worker-{my_id}"))
181 .spawn(move || {
182 crate::dta_scheduler::DtaScheduler::run_worker_static(
183 sched, my_id, pool, shutdown,
184 );
185 })
186 .expect("Failed to spawn Dtact worker thread");
187 }
188 }
189}
190
191/// Global Singleton for the Runtime Environment.
192///
193/// This is initialized exactly once per process via `dtact_init` or
194/// implicit autostart triggers in the proc-macro layer.
195#[doc(hidden)]
196pub static GLOBAL_RUNTIME: std::sync::OnceLock<Runtime> = std::sync::OnceLock::new();
197
198/// Telemetry: Tracks fibers that failed the 8KB zero-copy check and fell back to heap allocation.
199///
200/// A high value indicates that captured future sizes exceed the pre-allocated
201/// stack-top buffer, causing a performance cliff due to heap traffic.
202#[doc(hidden)]
203pub static HEAP_ESCAPED_SPAWNS: core::sync::atomic::AtomicU64 =
204 core::sync::atomic::AtomicU64::new(0);
205
206/// Awakens a suspended fiber by pushing it onto the DTA-V3 Scheduler mesh.
207///
208/// This function is the primary signaling mechanism for cross-thread wakeups.
209/// It uses the fiber's index as a flow-id for deterministic load distribution
210/// across the worker cores.
211///
212/// # Arguments
213/// * `origin_core` - The core ID where the fiber was originally spawned.
214/// * `fiber_index` - The unique identifier of the fiber in the context pool.
215#[inline(always)]
216pub(crate) fn wake_fiber(origin_core: usize, fiber_index: u32) {
217 if let Some(runtime) = GLOBAL_RUNTIME.get() {
218 // Submit the fiber back to the mesh. Loop with yield on backpressure.
219 loop {
220 let success =
221 runtime
222 .scheduler
223 .enqueue_task(origin_core, u64::from(fiber_index), fiber_index);
224
225 if success {
226 break;
227 }
228
229 // Queue is full: yield to scheduler to let it drain
230 let ctx_ptr = crate::future_bridge::CURRENT_FIBER.with(std::cell::Cell::get);
231 if ctx_ptr.is_null() {
232 std::thread::yield_now();
233 } else {
234 unsafe {
235 let ctx = &mut *ctx_ptr;
236 ctx.state.store(
237 crate::memory_management::FiberStatus::Notified as u32,
238 core::sync::atomic::Ordering::Release,
239 );
240 (ctx.switch_fn)(&raw mut ctx.regs, &raw const ctx.executor_regs);
241 }
242 }
243 }
244 } else {
245 panic!("dtact::wake_fiber() invoked before Runtime Initialization");
246 }
247}
248
249#[allow(clippy::mixed_attributes_style)]
250#[cfg_attr(miri, ignore)]
251#[doc(hidden)]
252mod readme {
253 #![doc = include_str!("../README.md")]
254}