harn_vm/value/handles.rs
1use std::rc::Rc;
2use std::sync::atomic::{AtomicBool, AtomicI64};
3use std::sync::{Arc, Mutex};
4
5use super::{VmError, VmValue};
6
7/// The raw join handle type for spawned tasks.
8pub type VmJoinHandle = tokio::task::JoinHandle<Result<(VmValue, String), VmError>>;
9
10/// A spawned async task handle with cancellation support.
11pub struct VmTaskHandle {
12 pub handle: VmJoinHandle,
13 /// Cooperative cancellation token. Set to true to request graceful shutdown.
14 pub cancel_token: Arc<AtomicBool>,
15}
16
17/// A channel handle for the VM (uses tokio mpsc).
18#[derive(Debug, Clone)]
19pub struct VmChannelHandle {
20 pub name: Rc<str>,
21 pub sender: Arc<tokio::sync::mpsc::Sender<VmValue>>,
22 pub receiver: Arc<tokio::sync::Mutex<tokio::sync::mpsc::Receiver<VmValue>>>,
23 pub closed: Arc<AtomicBool>,
24}
25
26/// An atomic integer handle for the VM.
27#[derive(Debug, Clone)]
28pub struct VmAtomicHandle {
29 pub value: Arc<AtomicI64>,
30}
31
32/// A reproducible random number generator handle.
33#[derive(Clone)]
34pub struct VmRngHandle {
35 pub rng: Arc<Mutex<rand::rngs::StdRng>>,
36}
37
38impl std::fmt::Debug for VmRngHandle {
39 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
40 f.write_str("VmRngHandle { .. }")
41 }
42}
43
44/// A held synchronization permit for mutex/semaphore/gate primitives.
45#[derive(Debug, Clone)]
46pub struct VmSyncPermitHandle {
47 pub(crate) lease: Arc<crate::synchronization::VmSyncLease>,
48}
49
50impl VmSyncPermitHandle {
51 pub(crate) fn release(&self) -> bool {
52 self.lease.release()
53 }
54
55 pub(crate) fn kind(&self) -> &str {
56 self.lease.kind()
57 }
58
59 pub(crate) fn key(&self) -> &str {
60 self.lease.key()
61 }
62}
63
64/// A lazy integer range — Python-style. Stores only `(start, end, inclusive)`
65/// so the in-memory footprint is O(1) regardless of the range's length.
66/// `len()`, indexing (`r[k]`), `.contains(x)`, `.first()`, `.last()` are all
67/// O(1); direct iteration walks step-by-step without materializing a list.
68///
69/// Empty-range convention (Python-consistent):
70/// - Inclusive empty when `start > end`.
71/// - Exclusive empty when `start >= end`.
72///
73/// Negative / reversed ranges are NOT supported in v1: `5 to 1` is simply
74/// empty. Authors who want reverse iteration should call `.to_list().reverse()`.
75#[derive(Debug, Clone, Copy)]
76pub struct VmRange {
77 pub start: i64,
78 pub end: i64,
79 pub inclusive: bool,
80}
81
82impl VmRange {
83 /// Number of elements this range yields.
84 ///
85 /// Uses saturating arithmetic so that pathological ranges near
86 /// `i64::MAX`/`i64::MIN` do not panic on overflow. Because a range's
87 /// element count must fit in `i64` the returned length saturates at
88 /// `i64::MAX` for ranges whose width exceeds that (e.g. `i64::MIN to
89 /// i64::MAX` inclusive). Callers that later narrow to `usize` for
90 /// allocation should still guard against huge lengths — see
91 /// `to_vec` / `get` for the indexable-range invariants.
92 pub fn len(&self) -> i64 {
93 if self.inclusive {
94 if self.start > self.end {
95 0
96 } else {
97 self.end.saturating_sub(self.start).saturating_add(1)
98 }
99 } else if self.start >= self.end {
100 0
101 } else {
102 self.end.saturating_sub(self.start)
103 }
104 }
105
106 pub fn is_empty(&self) -> bool {
107 self.len() == 0
108 }
109
110 /// Element at the given 0-based index, bounds-checked.
111 /// Returns `None` when out of bounds or when `start + idx` would
112 /// overflow (which can only happen when `len()` saturated).
113 pub fn get(&self, idx: i64) -> Option<i64> {
114 if idx < 0 || idx >= self.len() {
115 None
116 } else {
117 self.start.checked_add(idx)
118 }
119 }
120
121 /// First element or `None` when empty.
122 pub fn first(&self) -> Option<i64> {
123 if self.is_empty() {
124 None
125 } else {
126 Some(self.start)
127 }
128 }
129
130 /// Last element or `None` when empty.
131 pub fn last(&self) -> Option<i64> {
132 if self.is_empty() {
133 None
134 } else if self.inclusive {
135 Some(self.end)
136 } else {
137 Some(self.end - 1)
138 }
139 }
140
141 /// Whether `v` falls inside the range (O(1)).
142 pub fn contains(&self, v: i64) -> bool {
143 if self.is_empty() {
144 return false;
145 }
146 if self.inclusive {
147 v >= self.start && v <= self.end
148 } else {
149 v >= self.start && v < self.end
150 }
151 }
152
153 /// Materialize to a `Vec<VmValue>` — the explicit escape hatch.
154 ///
155 /// Uses `checked_add` on the per-element index so a range near
156 /// `i64::MAX` stops at the representable bound instead of panicking.
157 /// Callers should still treat a very long range as unwise to
158 /// materialize (the whole point of `VmRange` is to avoid this).
159 pub fn to_vec(&self) -> Vec<VmValue> {
160 let len = self.len();
161 if len <= 0 {
162 return Vec::new();
163 }
164 let cap = len as usize;
165 let mut out = Vec::with_capacity(cap);
166 for i in 0..len {
167 match self.start.checked_add(i) {
168 Some(v) => out.push(VmValue::Int(v)),
169 None => break,
170 }
171 }
172 out
173 }
174}
175
176/// A generator object: lazily produces values via yield.
177/// The generator body runs as a spawned task that sends values through a channel.
178#[derive(Debug, Clone)]
179pub struct VmGenerator {
180 /// Whether the generator has finished (returned or exhausted).
181 pub done: Rc<std::cell::Cell<bool>>,
182 /// Receiver end of the yield channel (generator sends values here).
183 /// Wrapped in a shared async mutex so recv() can be called without holding
184 /// a RefCell borrow across await points.
185 pub receiver: Rc<tokio::sync::Mutex<tokio::sync::mpsc::Receiver<Result<VmValue, VmError>>>>,
186}
187
188/// A stream object: lazily produces values from a `gen fn`.
189#[derive(Debug, Clone)]
190pub struct VmStream {
191 /// Whether the stream has finished (returned, thrown, or exhausted).
192 pub done: Rc<std::cell::Cell<bool>>,
193 /// Receiver end of the stream channel.
194 pub receiver: Rc<tokio::sync::Mutex<tokio::sync::mpsc::Receiver<Result<VmValue, VmError>>>>,
195}