seq_runtime/value.rs
1use crate::seqstring::SeqString;
2use may::sync::mpmc;
3use std::collections::HashMap;
4use std::hash::{Hash, Hasher};
5use std::sync::Arc;
6
7/// Channel data: holds sender and receiver for direct handle passing
8///
9/// Both sender and receiver are Clone (MPMC), so duplicating a Channel value
10/// just clones the Arc. Send/receive operations use the handles directly
11/// with zero mutex overhead.
12#[derive(Debug)]
13pub struct ChannelData {
14 pub sender: mpmc::Sender<Value>,
15 pub receiver: mpmc::Receiver<Value>,
16}
17
18impl Clone for ChannelData {
19 fn clone(&self) -> Self {
20 Self {
21 sender: self.sender.clone(),
22 receiver: self.receiver.clone(),
23 }
24 }
25}
26
27// PartialEq by identity (Arc pointer comparison)
28impl PartialEq for ChannelData {
29 fn eq(&self, other: &Self) -> bool {
30 std::ptr::eq(self, other)
31 }
32}
33
34// Note: Arc is used for both Closure.env and Variant to enable O(1) cloning.
35// This is essential for functional programming with recursive data structures.
36
37/// MapKey: Hashable subset of Value for use as map keys
38///
39/// Only types that can be meaningfully hashed are allowed as map keys:
40/// Int, String, Bool. Float is excluded due to NaN equality issues.
41#[derive(Debug, Clone, PartialEq, Eq)]
42pub enum MapKey {
43 Int(i64),
44 String(SeqString),
45 Bool(bool),
46}
47
48impl Hash for MapKey {
49 fn hash<H: Hasher>(&self, state: &mut H) {
50 // Discriminant for type safety
51 std::mem::discriminant(self).hash(state);
52 match self {
53 MapKey::Int(n) => n.hash(state),
54 MapKey::String(s) => s.as_str().hash(state),
55 MapKey::Bool(b) => b.hash(state),
56 }
57 }
58}
59
60impl MapKey {
61 /// Try to convert a Value to a MapKey
62 /// Returns None for non-hashable types (Float, Variant, Quotation, Closure, Map)
63 pub fn from_value(value: &Value) -> Option<MapKey> {
64 match value {
65 Value::Int(n) => Some(MapKey::Int(*n)),
66 Value::String(s) => Some(MapKey::String(s.clone())),
67 Value::Bool(b) => Some(MapKey::Bool(*b)),
68 _ => None,
69 }
70 }
71
72 /// Convert MapKey back to Value
73 pub fn to_value(&self) -> Value {
74 match self {
75 MapKey::Int(n) => Value::Int(*n),
76 MapKey::String(s) => Value::String(s.clone()),
77 MapKey::Bool(b) => Value::Bool(*b),
78 }
79 }
80}
81
82/// Value: What the language talks about
83///
84/// This is pure data with no pointers to other values.
85/// Values can be pushed on the stack, stored in variants, etc.
86/// The key insight: Value is independent of Stack structure.
87///
88/// # Memory Layout
89///
90/// Using `#[repr(C)]` ensures a predictable C-compatible layout:
91/// - Discriminant (tag) at offset 0
92/// - Payload data follows at a fixed offset
93///
94/// This allows compiled code to write Values directly without FFI calls,
95/// enabling inline integer/boolean operations for better performance.
96#[repr(C)]
97#[derive(Debug, Clone, PartialEq)]
98pub enum Value {
99 /// Integer value
100 Int(i64),
101
102 /// Floating-point value (IEEE 754 double precision)
103 Float(f64),
104
105 /// Boolean value
106 Bool(bool),
107
108 /// String (arena or globally allocated via SeqString)
109 String(SeqString),
110
111 /// Variant (sum type with tagged fields)
112 /// Uses Arc for O(1) cloning - essential for recursive data structures
113 Variant(Arc<VariantData>),
114
115 /// Map (key-value dictionary with O(1) lookup)
116 /// Keys must be hashable types (Int, String, Bool)
117 Map(Box<HashMap<MapKey, Value>>),
118
119 /// Quotation (stateless function with two entry points for calling convention compatibility)
120 /// - wrapper: C-convention entry point for calls from the runtime
121 /// - impl_: tailcc entry point for tail calls from compiled code (enables TCO)
122 Quotation {
123 /// C-convention wrapper function pointer (for runtime calls via patch_seq_call)
124 wrapper: usize,
125 /// tailcc implementation function pointer (for musttail from compiled code)
126 impl_: usize,
127 },
128
129 /// Closure (quotation with captured environment)
130 /// Contains function pointer and Arc-shared array of captured values.
131 /// Arc enables TCO: no cleanup needed after tail call, ref-count handles it.
132 Closure {
133 /// Function pointer (transmuted to function taking Stack + environment)
134 fn_ptr: usize,
135 /// Captured values from creation site (Arc for TCO support)
136 /// Ordered top-down: env[0] is top of stack at creation
137 env: Arc<[Value]>,
138 },
139
140 /// Channel (MPMC sender/receiver pair for CSP-style concurrency)
141 /// Uses Arc for O(1) cloning - duplicating a channel shares the underlying handles.
142 /// Send/receive operations use the handles directly with zero mutex overhead.
143 Channel(Arc<ChannelData>),
144}
145
146// Safety: Value can be sent and shared between strands (green threads)
147//
148// Send (safe to transfer ownership between threads):
149// - Int, Float, Bool are Copy types (trivially Send)
150// - String (SeqString) implements Send (clone to global on transfer)
151// - Variant contains Arc<VariantData> which is Send when VariantData is Send+Sync
152// - Quotation stores function pointer as usize (Send-safe, no owned data)
153// - Closure: fn_ptr is usize (Send), env is Arc<[Value]> (Send when Value is Send+Sync)
154// - Map contains Box<HashMap> which is Send because keys and values are Send
155// - Channel contains Arc<ChannelData> which is Send (May's Sender/Receiver are Send)
156//
157// Sync (safe to share references between threads):
158// - Value has no interior mutability (no Cell, RefCell, Mutex, etc.)
159// - All operations on Value are read-only or create new values (functional semantics)
160// - Arc requires T: Send + Sync for full thread-safety
161//
162// This is required for:
163// - Channel communication between strands
164// - Arc-based sharing of Variants, Closure environments, and Channels
165unsafe impl Send for Value {}
166unsafe impl Sync for Value {}
167
168/// VariantData: Composite values (sum types)
169///
170/// Fields are stored in a heap-allocated array, NOT linked via next pointers.
171/// This is the key difference from cem2, which used StackCell.next for field linking.
172///
173/// # Arc and Reference Cycles
174///
175/// Variants use `Arc<VariantData>` for O(1) cloning, which could theoretically
176/// create reference cycles. However, cycles are prevented by design:
177/// - VariantData.fields is immutable (no mutation after creation)
178/// - All variant operations create new variants rather than modifying existing ones
179/// - The Seq language has no mutation primitives for variant fields
180///
181/// This functional/immutable design ensures Arc reference counts always reach zero.
182#[derive(Debug, Clone, PartialEq)]
183pub struct VariantData {
184 /// Tag identifies which variant constructor was used
185 pub tag: u32,
186
187 /// Fields stored as an owned array of values
188 /// This is independent of any stack structure
189 pub fields: Box<[Value]>,
190}
191
192impl VariantData {
193 /// Create a new variant with the given tag and fields
194 pub fn new(tag: u32, fields: Vec<Value>) -> Self {
195 Self {
196 tag,
197 fields: fields.into_boxed_slice(),
198 }
199 }
200}
201
202// We'll implement proper cleanup in Drop later
203// For now, Rust's ownership handles most of it
204
205#[cfg(test)]
206mod tests {
207 use super::*;
208 use std::mem::{align_of, size_of};
209
210 #[test]
211 fn test_value_layout() {
212 // Print sizes for debugging
213 println!("size_of::<Value>() = {}", size_of::<Value>());
214 println!("align_of::<Value>() = {}", align_of::<Value>());
215
216 // Verify Value is exactly 40 bytes to match StackValue layout
217 // This is critical for FFI correctness between LLVM IR and Rust
218 use crate::tagged_stack::StackValue;
219 assert_eq!(
220 size_of::<Value>(),
221 size_of::<StackValue>(),
222 "Value ({} bytes) must match StackValue ({} bytes) for FFI compatibility",
223 size_of::<Value>(),
224 size_of::<StackValue>()
225 );
226 assert_eq!(
227 size_of::<Value>(),
228 40,
229 "Value must be exactly 40 bytes, got {}",
230 size_of::<Value>()
231 );
232
233 // Verify alignment is 8 (for 64-bit pointers)
234 assert_eq!(align_of::<Value>(), 8);
235 }
236
237 #[test]
238 fn test_value_int_layout() {
239 let val = Value::Int(42);
240 let ptr = &val as *const Value as *const u8;
241
242 unsafe {
243 // With #[repr(C)], the discriminant is at offset 0
244 // For 9 variants, discriminant fits in 1 byte but is padded
245 let discriminant_byte = *ptr;
246 assert_eq!(
247 discriminant_byte, 0,
248 "Int discriminant should be 0, got {}",
249 discriminant_byte
250 );
251
252 // The i64 value should be at a fixed offset after the discriminant
253 // With C repr, it's typically at offset 8 (discriminant + padding)
254 let value_ptr = ptr.add(8) as *const i64;
255 let stored_value = *value_ptr;
256 assert_eq!(
257 stored_value, 42,
258 "Int value should be 42 at offset 8, got {}",
259 stored_value
260 );
261 }
262 }
263
264 #[test]
265 fn test_value_bool_layout() {
266 let val_true = Value::Bool(true);
267 let val_false = Value::Bool(false);
268 let ptr_true = &val_true as *const Value as *const u8;
269 let ptr_false = &val_false as *const Value as *const u8;
270
271 unsafe {
272 // Bool is variant index 2 (after Int=0, Float=1)
273 let discriminant = *ptr_true;
274 assert_eq!(
275 discriminant, 2,
276 "Bool discriminant should be 2, got {}",
277 discriminant
278 );
279
280 // The bool value should be at offset 8
281 let value_ptr_true = ptr_true.add(8);
282 let value_ptr_false = ptr_false.add(8);
283 assert_eq!(*value_ptr_true, 1, "true should be 1");
284 assert_eq!(*value_ptr_false, 0, "false should be 0");
285 }
286 }
287}