Skip to main content

runmat_builtins/
lib.rs

1pub use inventory;
2use runmat_gc_api::GcPtr;
3use runmat_thread_local::runmat_thread_local;
4use std::cell::RefCell;
5use std::collections::HashMap;
6use std::convert::TryFrom;
7use std::fmt;
8use std::future::Future;
9use std::pin::Pin;
10
11use indexmap::IndexMap;
12use std::sync::OnceLock;
13
14#[cfg(target_arch = "wasm32")]
15pub mod wasm_registry {
16    use super::{BuiltinDoc, BuiltinFunction, Constant};
17    use once_cell::sync::Lazy;
18    use std::sync::Mutex;
19
20    static FUNCTIONS: Lazy<Mutex<Vec<&'static BuiltinFunction>>> =
21        Lazy::new(|| Mutex::new(Vec::new()));
22    static CONSTANTS: Lazy<Mutex<Vec<&'static Constant>>> = Lazy::new(|| Mutex::new(Vec::new()));
23    static DOCS: Lazy<Mutex<Vec<&'static BuiltinDoc>>> = Lazy::new(|| Mutex::new(Vec::new()));
24    static REGISTERED: Lazy<Mutex<bool>> = Lazy::new(|| Mutex::new(false));
25
26    fn leak<T>(value: T) -> &'static T {
27        Box::leak(Box::new(value))
28    }
29
30    pub fn submit_builtin_function(func: BuiltinFunction) {
31        let leaked = leak(func);
32        FUNCTIONS.lock().unwrap().push(leaked);
33    }
34
35    pub fn submit_constant(constant: Constant) {
36        let leaked = leak(constant);
37        CONSTANTS.lock().unwrap().push(leaked);
38    }
39
40    pub fn submit_builtin_doc(doc: BuiltinDoc) {
41        let leaked = leak(doc);
42        DOCS.lock().unwrap().push(leaked);
43    }
44
45    pub fn builtin_functions() -> Vec<&'static BuiltinFunction> {
46        FUNCTIONS.lock().unwrap().clone()
47    }
48
49    pub fn constants() -> Vec<&'static Constant> {
50        CONSTANTS.lock().unwrap().clone()
51    }
52
53    pub fn builtin_docs() -> Vec<&'static BuiltinDoc> {
54        DOCS.lock().unwrap().clone()
55    }
56
57    pub fn mark_registered() {
58        *REGISTERED.lock().unwrap() = true;
59    }
60
61    pub fn is_registered() -> bool {
62        *REGISTERED.lock().unwrap()
63    }
64}
65
66#[derive(Debug, Clone, PartialEq)]
67pub enum Value {
68    Int(IntValue),
69    Num(f64),
70    /// Complex scalar value represented as (re, im)
71    Complex(f64, f64),
72    Bool(bool),
73    // Logical array (N-D of booleans). Scalars use Bool.
74    LogicalArray(LogicalArray),
75    String(String),
76    // String array (R2016b+): N-D array of string scalars
77    StringArray(StringArray),
78    // Char array (single-quoted): 2-D character array (rows x cols)
79    CharArray(CharArray),
80    Tensor(Tensor),
81    /// Complex numeric array; same column-major shape semantics as `Tensor`
82    ComplexTensor(ComplexTensor),
83    Cell(CellArray),
84    // Struct (scalar or nested). Struct arrays are represented in higher layers;
85    // this variant holds a single struct's fields.
86    Struct(StructValue),
87    // GPU-resident tensor handle (opaque; buffer managed by backend)
88    GpuTensor(runmat_accelerate_api::GpuTensorHandle),
89    // Simple object instance until full class system lands
90    Object(ObjectInstance),
91    /// Handle-object wrapper providing identity semantics and validity tracking
92    HandleObject(HandleRef),
93    /// Event listener handle for events
94    Listener(Listener),
95    /// Multiple outputs captured as a list (internal destructuring helper)
96    OutputList(Vec<Value>),
97    // Function handle pointing to a named function (builtin or user)
98    FunctionHandle(String),
99    Closure(Closure),
100    ClassRef(String),
101    MException(MException),
102}
103#[derive(Debug, Clone, PartialEq, Eq)]
104pub enum IntValue {
105    I8(i8),
106    I16(i16),
107    I32(i32),
108    I64(i64),
109    U8(u8),
110    U16(u16),
111    U32(u32),
112    U64(u64),
113}
114
115impl IntValue {
116    pub fn to_i64(&self) -> i64 {
117        match self {
118            IntValue::I8(v) => *v as i64,
119            IntValue::I16(v) => *v as i64,
120            IntValue::I32(v) => *v as i64,
121            IntValue::I64(v) => *v,
122            IntValue::U8(v) => *v as i64,
123            IntValue::U16(v) => *v as i64,
124            IntValue::U32(v) => *v as i64,
125            IntValue::U64(v) => {
126                if *v > i64::MAX as u64 {
127                    i64::MAX
128                } else {
129                    *v as i64
130                }
131            }
132        }
133    }
134    pub fn to_f64(&self) -> f64 {
135        self.to_i64() as f64
136    }
137    pub fn is_zero(&self) -> bool {
138        self.to_i64() == 0
139    }
140    pub fn class_name(&self) -> &'static str {
141        match self {
142            IntValue::I8(_) => "int8",
143            IntValue::I16(_) => "int16",
144            IntValue::I32(_) => "int32",
145            IntValue::I64(_) => "int64",
146            IntValue::U8(_) => "uint8",
147            IntValue::U16(_) => "uint16",
148            IntValue::U32(_) => "uint32",
149            IntValue::U64(_) => "uint64",
150        }
151    }
152}
153
154#[derive(Debug, Clone, PartialEq)]
155pub struct StructValue {
156    pub fields: IndexMap<String, Value>,
157}
158
159impl StructValue {
160    pub fn new() -> Self {
161        Self {
162            fields: IndexMap::new(),
163        }
164    }
165
166    /// Insert a field, preserving insertion order when the name is new.
167    pub fn insert(&mut self, name: impl Into<String>, value: Value) -> Option<Value> {
168        self.fields.insert(name.into(), value)
169    }
170
171    /// Remove a field while preserving the relative order of remaining fields.
172    pub fn remove(&mut self, name: &str) -> Option<Value> {
173        self.fields.shift_remove(name)
174    }
175
176    /// Returns an iterator over field names in their stored order.
177    pub fn field_names(&self) -> impl Iterator<Item = &String> {
178        self.fields.keys()
179    }
180}
181
182impl Default for StructValue {
183    fn default() -> Self {
184        Self::new()
185    }
186}
187
188#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
189pub enum NumericDType {
190    F64,
191    F32,
192    U8,
193    U16,
194}
195
196impl NumericDType {
197    pub fn class_name(self) -> &'static str {
198        match self {
199            NumericDType::F64 => "double",
200            NumericDType::F32 => "single",
201            NumericDType::U8 => "uint8",
202            NumericDType::U16 => "uint16",
203        }
204    }
205
206    pub fn byte_size(self) -> usize {
207        match self {
208            NumericDType::F64 => 8,
209            NumericDType::F32 => 4,
210            NumericDType::U8 => 1,
211            NumericDType::U16 => 2,
212        }
213    }
214}
215
216#[derive(Debug, Clone, PartialEq)]
217pub struct Tensor {
218    pub data: Vec<f64>,
219    pub shape: Vec<usize>, // Column-major layout
220    pub rows: usize,       // Compatibility for 2D usage
221    pub cols: usize,       // Compatibility for 2D usage
222    /// Logical numeric class of this tensor; host storage remains f64.
223    pub dtype: NumericDType,
224}
225
226#[derive(Debug, Clone, PartialEq)]
227pub struct ComplexTensor {
228    pub data: Vec<(f64, f64)>,
229    pub shape: Vec<usize>,
230    pub rows: usize,
231    pub cols: usize,
232}
233
234#[derive(Debug, Clone, PartialEq)]
235pub struct StringArray {
236    pub data: Vec<String>,
237    pub shape: Vec<usize>,
238    pub rows: usize,
239    pub cols: usize,
240}
241
242#[derive(Debug, Clone, PartialEq)]
243pub struct LogicalArray {
244    pub data: Vec<u8>, // 0 or 1 values; compact bitset can come later
245    pub shape: Vec<usize>,
246}
247
248impl LogicalArray {
249    pub fn new(data: Vec<u8>, shape: Vec<usize>) -> Result<Self, String> {
250        let expected: usize = shape.iter().product();
251        if data.len() != expected {
252            return Err(format!(
253                "LogicalArray data length {} doesn't match shape {:?} ({} elements)",
254                data.len(),
255                shape,
256                expected
257            ));
258        }
259        // Normalize to 0/1
260        let mut d = data;
261        for v in &mut d {
262            *v = if *v != 0 { 1 } else { 0 };
263        }
264        Ok(LogicalArray { data: d, shape })
265    }
266    pub fn zeros(shape: Vec<usize>) -> Self {
267        let expected: usize = shape.iter().product();
268        LogicalArray {
269            data: vec![0u8; expected],
270            shape,
271        }
272    }
273    pub fn len(&self) -> usize {
274        self.data.len()
275    }
276    pub fn is_empty(&self) -> bool {
277        self.data.is_empty()
278    }
279}
280
281#[derive(Debug, Clone, PartialEq)]
282pub struct CharArray {
283    pub data: Vec<char>,
284    pub rows: usize,
285    pub cols: usize,
286}
287
288impl CharArray {
289    pub fn new_row(s: &str) -> Self {
290        CharArray {
291            data: s.chars().collect(),
292            rows: 1,
293            cols: s.chars().count(),
294        }
295    }
296    pub fn new(data: Vec<char>, rows: usize, cols: usize) -> Result<Self, String> {
297        if rows * cols != data.len() {
298            return Err(format!(
299                "Char data length {} doesn't match dimensions {}x{}",
300                data.len(),
301                rows,
302                cols
303            ));
304        }
305        Ok(CharArray { data, rows, cols })
306    }
307}
308
309impl StringArray {
310    pub fn new(data: Vec<String>, shape: Vec<usize>) -> Result<Self, String> {
311        let expected: usize = shape.iter().product();
312        if data.len() != expected {
313            return Err(format!(
314                "StringArray data length {} doesn't match shape {:?} ({} elements)",
315                data.len(),
316                shape,
317                expected
318            ));
319        }
320        let (rows, cols) = if shape.len() >= 2 {
321            (shape[0], shape[1])
322        } else if shape.len() == 1 {
323            (1, shape[0])
324        } else {
325            (0, 0)
326        };
327        Ok(StringArray {
328            data,
329            shape,
330            rows,
331            cols,
332        })
333    }
334    pub fn new_2d(data: Vec<String>, rows: usize, cols: usize) -> Result<Self, String> {
335        Self::new(data, vec![rows, cols])
336    }
337    pub fn rows(&self) -> usize {
338        self.shape.first().copied().unwrap_or(1)
339    }
340    pub fn cols(&self) -> usize {
341        self.shape.get(1).copied().unwrap_or(1)
342    }
343}
344
345// GpuTensorHandle now lives in runmat-accel-api
346
347impl Tensor {
348    pub fn new(data: Vec<f64>, shape: Vec<usize>) -> Result<Self, String> {
349        let expected: usize = shape.iter().product();
350        if data.len() != expected {
351            return Err(format!(
352                "Tensor data length {} doesn't match shape {:?} ({} elements)",
353                data.len(),
354                shape,
355                expected
356            ));
357        }
358        let (rows, cols) = if shape.len() >= 2 {
359            (shape[0], shape[1])
360        } else if shape.len() == 1 {
361            (1, shape[0])
362        } else {
363            (0, 0)
364        };
365        Ok(Tensor {
366            data,
367            shape,
368            rows,
369            cols,
370            dtype: NumericDType::F64,
371        })
372    }
373
374    pub fn new_2d(data: Vec<f64>, rows: usize, cols: usize) -> Result<Self, String> {
375        Self::new(data, vec![rows, cols])
376    }
377
378    pub fn from_f32(data: Vec<f32>, shape: Vec<usize>) -> Result<Self, String> {
379        let converted: Vec<f64> = data.into_iter().map(|v| v as f64).collect();
380        Self::new_with_dtype(converted, shape, NumericDType::F32)
381    }
382
383    pub fn from_f32_slice(data: &[f32], shape: &[usize]) -> Result<Self, String> {
384        let converted: Vec<f64> = data.iter().map(|&v| v as f64).collect();
385        Self::new_with_dtype(converted, shape.to_vec(), NumericDType::F32)
386    }
387
388    pub fn new_with_dtype(
389        data: Vec<f64>,
390        shape: Vec<usize>,
391        dtype: NumericDType,
392    ) -> Result<Self, String> {
393        let mut t = Self::new(data, shape)?;
394        t.dtype = dtype;
395        Ok(t)
396    }
397
398    pub fn zeros(shape: Vec<usize>) -> Self {
399        let size: usize = shape.iter().product();
400        let (rows, cols) = if shape.len() >= 2 {
401            (shape[0], shape[1])
402        } else if shape.len() == 1 {
403            (1, shape[0])
404        } else {
405            (0, 0)
406        };
407        Tensor {
408            data: vec![0.0; size],
409            shape,
410            rows,
411            cols,
412            dtype: NumericDType::F64,
413        }
414    }
415
416    pub fn ones(shape: Vec<usize>) -> Self {
417        let size: usize = shape.iter().product();
418        let (rows, cols) = if shape.len() >= 2 {
419            (shape[0], shape[1])
420        } else if shape.len() == 1 {
421            (1, shape[0])
422        } else {
423            (0, 0)
424        };
425        Tensor {
426            data: vec![1.0; size],
427            shape,
428            rows,
429            cols,
430            dtype: NumericDType::F64,
431        }
432    }
433
434    // 2D helpers for transitional call sites
435    pub fn zeros2(rows: usize, cols: usize) -> Self {
436        Self::zeros(vec![rows, cols])
437    }
438    pub fn ones2(rows: usize, cols: usize) -> Self {
439        Self::ones(vec![rows, cols])
440    }
441
442    pub fn rows(&self) -> usize {
443        self.shape.first().copied().unwrap_or(1)
444    }
445    pub fn cols(&self) -> usize {
446        self.shape.get(1).copied().unwrap_or(1)
447    }
448
449    pub fn get2(&self, row: usize, col: usize) -> Result<f64, String> {
450        let rows = self.rows();
451        let cols = self.cols();
452        if row >= rows || col >= cols {
453            return Err(format!(
454                "Index ({row}, {col}) out of bounds for {rows}x{cols} tensor"
455            ));
456        }
457        // Column-major linearization: lin = row + col*rows
458        Ok(self.data[row + col * rows])
459    }
460
461    pub fn set2(&mut self, row: usize, col: usize, value: f64) -> Result<(), String> {
462        let rows = self.rows();
463        let cols = self.cols();
464        if row >= rows || col >= cols {
465            return Err(format!(
466                "Index ({row}, {col}) out of bounds for {rows}x{cols} tensor"
467            ));
468        }
469        // Column-major linearization
470        self.data[row + col * rows] = value;
471        Ok(())
472    }
473
474    pub fn scalar_to_tensor2(scalar: f64, rows: usize, cols: usize) -> Tensor {
475        Tensor {
476            data: vec![scalar; rows * cols],
477            shape: vec![rows, cols],
478            rows,
479            cols,
480            dtype: NumericDType::F64,
481        }
482    }
483    // No-compat constructors: prefer new/new_2d/zeros/zeros2/ones/ones2
484}
485
486impl ComplexTensor {
487    pub fn new(data: Vec<(f64, f64)>, shape: Vec<usize>) -> Result<Self, String> {
488        let expected: usize = shape.iter().product();
489        if data.len() != expected {
490            return Err(format!(
491                "ComplexTensor data length {} doesn't match shape {:?} ({} elements)",
492                data.len(),
493                shape,
494                expected
495            ));
496        }
497        let (rows, cols) = if shape.len() >= 2 {
498            (shape[0], shape[1])
499        } else if shape.len() == 1 {
500            (1, shape[0])
501        } else {
502            (0, 0)
503        };
504        Ok(ComplexTensor {
505            data,
506            shape,
507            rows,
508            cols,
509        })
510    }
511    pub fn new_2d(data: Vec<(f64, f64)>, rows: usize, cols: usize) -> Result<Self, String> {
512        Self::new(data, vec![rows, cols])
513    }
514    pub fn zeros(shape: Vec<usize>) -> Self {
515        let size: usize = shape.iter().product();
516        let (rows, cols) = if shape.len() >= 2 {
517            (shape[0], shape[1])
518        } else if shape.len() == 1 {
519            (1, shape[0])
520        } else {
521            (0, 0)
522        };
523        ComplexTensor {
524            data: vec![(0.0, 0.0); size],
525            shape,
526            rows,
527            cols,
528        }
529    }
530}
531
532const MAX_ND_DISPLAY_ELEMENTS: usize = 4096;
533
534fn should_expand_nd_display(shape: &[usize]) -> bool {
535    shape.len() > 2
536        && matches!(
537            total_len(shape),
538            Some(total) if total > 0 && total <= MAX_ND_DISPLAY_ELEMENTS
539        )
540}
541
542fn column_major_strides(shape: &[usize]) -> Vec<usize> {
543    let mut strides = Vec::with_capacity(shape.len());
544    let mut stride = 1usize;
545    for &dim in shape {
546        strides.push(stride);
547        stride = stride.saturating_mul(dim);
548    }
549    strides
550}
551
552fn decode_page_coords(mut page_index: usize, page_shape: &[usize]) -> Vec<usize> {
553    let mut coords = Vec::with_capacity(page_shape.len());
554    for &dim in page_shape {
555        if dim == 0 {
556            coords.push(0);
557        } else {
558            coords.push(page_index % dim);
559            page_index /= dim;
560        }
561    }
562    coords
563}
564
565fn write_nd_pages(
566    f: &mut fmt::Formatter<'_>,
567    shape: &[usize],
568    mut write_element: impl FnMut(&mut fmt::Formatter<'_>, usize) -> fmt::Result,
569) -> fmt::Result {
570    if shape.len() <= 2 {
571        return Ok(());
572    }
573    let rows = shape[0];
574    let cols = shape[1];
575    if rows == 0 || cols == 0 {
576        return write!(f, "[]");
577    }
578    let Some(page_count) = total_len(&shape[2..]) else {
579        return write!(f, "Tensor(shape={shape:?})");
580    };
581    if page_count == 0 {
582        return write!(f, "[]");
583    }
584    let strides = column_major_strides(shape);
585    for page_index in 0..page_count {
586        if page_index > 0 {
587            write!(f, "\n\n")?;
588        }
589        let coords = decode_page_coords(page_index, &shape[2..]);
590        write!(f, "(:, :")?;
591        for &coord in &coords {
592            write!(f, ", {}", coord + 1)?;
593        }
594        write!(f, ") =")?;
595
596        let mut page_base = 0usize;
597        for (offset, &coord) in coords.iter().enumerate() {
598            page_base += coord * strides[offset + 2];
599        }
600        for r in 0..rows {
601            writeln!(f)?;
602            write!(f, "  ")?;
603            for c in 0..cols {
604                if c > 0 {
605                    write!(f, "  ")?;
606                }
607                let linear = page_base + r + c * rows;
608                write_element(f, linear)?;
609            }
610        }
611    }
612    Ok(())
613}
614
615impl fmt::Display for Tensor {
616    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
617        match self.shape.len() {
618            0 | 1 => {
619                // Treat as row vector for display
620                write!(f, "[")?;
621                for (i, v) in self.data.iter().enumerate() {
622                    if i > 0 {
623                        write!(f, " ")?;
624                    }
625                    write!(f, "{}", format_number(*v))?;
626                }
627                write!(f, "]")
628            }
629            2 => {
630                let rows = self.rows();
631                let cols = self.cols();
632                // Display as matrix
633                for r in 0..rows {
634                    writeln!(f)?;
635                    write!(f, "  ")?; // Indent
636                    for c in 0..cols {
637                        if c > 0 {
638                            write!(f, "  ")?;
639                        }
640                        let v = self.data[r + c * rows];
641                        write!(f, "{}", format_number(v))?;
642                    }
643                }
644                Ok(())
645            }
646            _ => {
647                if should_expand_nd_display(&self.shape) {
648                    write_nd_pages(f, &self.shape, |f, idx| {
649                        write!(f, "{}", format_number(self.data[idx]))
650                    })
651                } else {
652                    write!(f, "Tensor(shape={:?})", self.shape)
653                }
654            }
655        }
656    }
657}
658
659impl fmt::Display for StringArray {
660    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
661        let (rows, cols) = match self.shape.len() {
662            0 => (0, 0),
663            1 => (1, self.shape[0]),
664            _ => (self.shape[0], self.shape[1]),
665        };
666        let count = self.data.len();
667        if count == 1 && rows == 1 && cols == 1 {
668            let v = &self.data[0];
669            if v == "<missing>" {
670                return write!(f, "<missing>");
671            }
672            let escaped = v.replace('"', "\\\"");
673            return write!(f, "\"{escaped}\"");
674        }
675        if self.shape.len() > 2 {
676            let dims: Vec<String> = self.shape.iter().map(|d| d.to_string()).collect();
677            return write!(f, "{} string array", dims.join("x"));
678        }
679        write!(f, "{rows}x{cols} string array")?;
680        if rows == 0 || cols == 0 {
681            return Ok(());
682        }
683        for r in 0..rows {
684            writeln!(f)?;
685            write!(f, "  ")?;
686            for c in 0..cols {
687                if c > 0 {
688                    write!(f, "  ")?;
689                }
690                let v = &self.data[r + c * rows];
691                if v == "<missing>" {
692                    write!(f, "<missing>")?;
693                } else {
694                    let escaped = v.replace('"', "\\\"");
695                    write!(f, "\"{escaped}\"")?;
696                }
697            }
698        }
699        Ok(())
700    }
701}
702
703impl fmt::Display for LogicalArray {
704    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
705        if self.data.len() == 1 {
706            return write!(f, "{}", if self.data[0] != 0 { 1 } else { 0 });
707        }
708        match self.shape.len() {
709            0 => write!(f, "[]"),
710            1 => {
711                write!(f, "[")?;
712                for (i, v) in self.data.iter().enumerate() {
713                    if i > 0 {
714                        write!(f, " ")?;
715                    }
716                    write!(f, "{}", if *v != 0 { 1 } else { 0 })?;
717                }
718                write!(f, "]")
719            }
720            2 => {
721                let rows = self.shape[0];
722                let cols = self.shape[1];
723                // Display as matrix
724                for r in 0..rows {
725                    writeln!(f)?;
726                    write!(f, "  ")?; // Indent
727                    for c in 0..cols {
728                        if c > 0 {
729                            write!(f, "  ")?;
730                        }
731                        let idx = r + c * rows;
732                        write!(f, "{}", if self.data[idx] != 0 { 1 } else { 0 })?;
733                    }
734                }
735                Ok(())
736            }
737            _ => {
738                if should_expand_nd_display(&self.shape) {
739                    write_nd_pages(f, &self.shape, |f, idx| {
740                        write!(f, "{}", if self.data[idx] != 0 { 1 } else { 0 })
741                    })
742                } else {
743                    let dims: Vec<String> = self.shape.iter().map(|d| d.to_string()).collect();
744                    write!(f, "{} logical array", dims.join("x"))
745                }
746            }
747        }
748    }
749}
750
751impl fmt::Display for CharArray {
752    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
753        for r in 0..self.rows {
754            writeln!(f)?;
755            write!(f, "  ")?; // Indent
756            for c in 0..self.cols {
757                let ch = self.data[r * self.cols + c];
758                write!(f, "{ch}")?;
759            }
760        }
761        Ok(())
762    }
763}
764
765// From implementations for Value
766impl From<i32> for Value {
767    fn from(i: i32) -> Self {
768        Value::Int(IntValue::I32(i))
769    }
770}
771impl From<i64> for Value {
772    fn from(i: i64) -> Self {
773        Value::Int(IntValue::I64(i))
774    }
775}
776impl From<u32> for Value {
777    fn from(i: u32) -> Self {
778        Value::Int(IntValue::U32(i))
779    }
780}
781impl From<u64> for Value {
782    fn from(i: u64) -> Self {
783        Value::Int(IntValue::U64(i))
784    }
785}
786impl From<i16> for Value {
787    fn from(i: i16) -> Self {
788        Value::Int(IntValue::I16(i))
789    }
790}
791impl From<i8> for Value {
792    fn from(i: i8) -> Self {
793        Value::Int(IntValue::I8(i))
794    }
795}
796impl From<u16> for Value {
797    fn from(i: u16) -> Self {
798        Value::Int(IntValue::U16(i))
799    }
800}
801impl From<u8> for Value {
802    fn from(i: u8) -> Self {
803        Value::Int(IntValue::U8(i))
804    }
805}
806
807impl From<f64> for Value {
808    fn from(f: f64) -> Self {
809        Value::Num(f)
810    }
811}
812
813impl From<bool> for Value {
814    fn from(b: bool) -> Self {
815        Value::Bool(b)
816    }
817}
818
819impl From<String> for Value {
820    fn from(s: String) -> Self {
821        Value::String(s)
822    }
823}
824
825impl From<&str> for Value {
826    fn from(s: &str) -> Self {
827        Value::String(s.to_string())
828    }
829}
830
831impl From<Tensor> for Value {
832    fn from(m: Tensor) -> Self {
833        Value::Tensor(m)
834    }
835}
836
837// Remove blanket From<Vec<Value>> to avoid losing shape information
838
839// TryFrom implementations for extracting native types
840impl TryFrom<&Value> for i32 {
841    type Error = String;
842    fn try_from(v: &Value) -> Result<Self, Self::Error> {
843        match v {
844            Value::Int(i) => Ok(i.to_i64() as i32),
845            Value::Num(n) => Ok(*n as i32),
846            _ => Err(format!("cannot convert {v:?} to i32")),
847        }
848    }
849}
850
851impl TryFrom<&Value> for f64 {
852    type Error = String;
853    fn try_from(v: &Value) -> Result<Self, Self::Error> {
854        match v {
855            Value::Num(n) => Ok(*n),
856            Value::Int(i) => Ok(i.to_f64()),
857            _ => Err(format!("cannot convert {v:?} to f64")),
858        }
859    }
860}
861
862impl TryFrom<&Value> for bool {
863    type Error = String;
864    fn try_from(v: &Value) -> Result<Self, Self::Error> {
865        match v {
866            Value::Bool(b) => Ok(*b),
867            Value::Int(i) => Ok(!i.is_zero()),
868            Value::Num(n) => Ok(*n != 0.0),
869            _ => Err(format!("cannot convert {v:?} to bool")),
870        }
871    }
872}
873
874impl TryFrom<&Value> for String {
875    type Error = String;
876    fn try_from(v: &Value) -> Result<Self, Self::Error> {
877        match v {
878            Value::String(s) => Ok(s.clone()),
879            Value::StringArray(sa) => {
880                if sa.data.len() == 1 {
881                    Ok(sa.data[0].clone())
882                } else {
883                    Err("cannot convert string array to scalar string".to_string())
884                }
885            }
886            Value::CharArray(ca) => {
887                // Convert full char array to one string if it is a single row; else error
888                if ca.rows == 1 {
889                    Ok(ca.data.iter().collect())
890                } else {
891                    Err("cannot convert multi-row char array to scalar string".to_string())
892                }
893            }
894            Value::Int(i) => Ok(i.to_i64().to_string()),
895            Value::Num(n) => Ok(n.to_string()),
896            Value::Bool(b) => Ok(b.to_string()),
897            _ => Err(format!("cannot convert {v:?} to String")),
898        }
899    }
900}
901
902impl TryFrom<&Value> for Tensor {
903    type Error = String;
904    fn try_from(v: &Value) -> Result<Self, Self::Error> {
905        match v {
906            Value::Tensor(m) => Ok(m.clone()),
907            _ => Err(format!("cannot convert {v:?} to Tensor")),
908        }
909    }
910}
911
912impl TryFrom<&Value> for Value {
913    type Error = String;
914    fn try_from(v: &Value) -> Result<Self, Self::Error> {
915        Ok(v.clone())
916    }
917}
918
919impl TryFrom<&Value> for Vec<Value> {
920    type Error = String;
921    fn try_from(v: &Value) -> Result<Self, Self::Error> {
922        match v {
923            Value::Cell(c) => Ok(c.data.iter().map(|p| (**p).clone()).collect()),
924            _ => Err(format!("cannot convert {v:?} to Vec<Value>")),
925        }
926    }
927}
928
929use serde::{Deserialize, Serialize};
930
931/// Enhanced type system used throughout RunMat for HIR and builtin functions
932/// Designed to mirror Value variants for better type inference and LSP support
933#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
934pub enum Type {
935    /// Integer number type
936    Int,
937    /// Floating-point number type  
938    Num,
939    /// Boolean type
940    Bool,
941    /// Logical array type (N-D boolean array) with optional shape information
942    Logical {
943        /// Optional full shape; None means unknown/dynamic; individual dims can be omitted by using None
944        shape: Option<Vec<Option<usize>>>,
945    },
946    /// String type
947    String,
948    /// Tensor type with optional shape information (column-major semantics in runtime)
949    Tensor {
950        /// Optional full shape; None means unknown/dynamic; individual dims can be omitted by using None
951        shape: Option<Vec<Option<usize>>>,
952    },
953    /// Cell array type with optional element type information
954    Cell {
955        /// Optional element type (None means mixed/unknown)
956        element_type: Option<Box<Type>>,
957        /// Optional length (None means unknown/dynamic)
958        length: Option<usize>,
959    },
960    /// Function type with parameter and return types
961    Function {
962        /// Parameter types
963        params: Vec<Type>,
964        /// Return type
965        returns: Box<Type>,
966    },
967    /// Void type (no value)
968    Void,
969    /// Unknown type (for type inference)
970    Unknown,
971    /// Union type (multiple possible types)
972    Union(Vec<Type>),
973    /// Struct-like type with optional known field set (purely for inference)
974    Struct {
975        /// Optional set of known field names observed via control-flow (None = unknown fields)
976        known_fields: Option<Vec<String>>, // kept sorted unique for deterministic Eq
977    },
978    /// Multiple return values captured as a list (internal destructuring helper)
979    OutputList(Vec<Type>),
980    /// Dataset handle with optional compile-time schema information
981    DataDataset {
982        arrays: Option<std::collections::BTreeMap<String, DataArrayTypeInfo>>,
983    },
984    /// Data array handle with optional dtype/shape metadata
985    DataArray {
986        dtype: Option<String>,
987        shape: Option<Vec<Option<usize>>>,
988        chunk_shape: Option<Vec<Option<usize>>>,
989        codec: Option<String>,
990    },
991    /// Data transaction handle
992    DataTransaction,
993}
994
995#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
996pub struct DataArrayTypeInfo {
997    pub dtype: Option<String>,
998    pub shape: Option<Vec<Option<usize>>>,
999    pub chunk_shape: Option<Vec<Option<usize>>>,
1000    pub codec: Option<String>,
1001}
1002
1003impl Type {
1004    /// Create a tensor type with unknown shape
1005    pub fn tensor() -> Self {
1006        Type::Tensor { shape: None }
1007    }
1008
1009    /// Create a logical type with unknown shape
1010    pub fn logical() -> Self {
1011        Type::Logical { shape: None }
1012    }
1013
1014    /// Create a logical type with known shape
1015    pub fn logical_with_shape(shape: Vec<usize>) -> Self {
1016        Type::Logical {
1017            shape: Some(shape.into_iter().map(Some).collect()),
1018        }
1019    }
1020
1021    /// Create a tensor type with known shape
1022    pub fn tensor_with_shape(shape: Vec<usize>) -> Self {
1023        Type::Tensor {
1024            shape: Some(shape.into_iter().map(Some).collect()),
1025        }
1026    }
1027
1028    /// Create a cell array type with unknown element type
1029    pub fn cell() -> Self {
1030        Type::Cell {
1031            element_type: None,
1032            length: None,
1033        }
1034    }
1035
1036    /// Create a cell array type with known element type
1037    pub fn cell_of(element_type: Type) -> Self {
1038        Type::Cell {
1039            element_type: Some(Box::new(element_type)),
1040            length: None,
1041        }
1042    }
1043
1044    /// Check if this type is compatible with another type
1045    pub fn is_compatible_with(&self, other: &Type) -> bool {
1046        match (self, other) {
1047            (Type::Unknown, _) | (_, Type::Unknown) => true,
1048            (Type::Int, Type::Num) | (Type::Num, Type::Int) => true, // Number compatibility
1049            (Type::Tensor { .. }, Type::Tensor { .. }) => true, // Tensor compatibility regardless of dims for now
1050            (Type::OutputList(a), Type::OutputList(b)) => a.len() == b.len(),
1051            (Type::DataDataset { .. }, Type::DataDataset { .. }) => true,
1052            (Type::DataArray { .. }, Type::DataArray { .. }) => true,
1053            (Type::DataTransaction, Type::DataTransaction) => true,
1054            (a, b) => a == b,
1055        }
1056    }
1057
1058    /// Get the most specific common type between two types
1059    pub fn unify(&self, other: &Type) -> Type {
1060        match (self, other) {
1061            (Type::Unknown, t) | (t, Type::Unknown) => t.clone(),
1062            (Type::Int, Type::Num) | (Type::Num, Type::Int) => Type::Num,
1063            (Type::Tensor { shape: a }, Type::Tensor { shape: b }) => {
1064                let a_norm = match a {
1065                    Some(dims) if dims.is_empty() => None,
1066                    _ => a.clone(),
1067                };
1068                let b_norm = match b {
1069                    Some(dims) if dims.is_empty() => None,
1070                    _ => b.clone(),
1071                };
1072                let a_unknown = a_norm
1073                    .as_ref()
1074                    .map(|dims| dims.iter().all(|d| d.is_none()))
1075                    .unwrap_or(true);
1076                let b_unknown = b_norm
1077                    .as_ref()
1078                    .map(|dims| dims.iter().all(|d| d.is_none()))
1079                    .unwrap_or(true);
1080                if a_norm == b_norm
1081                    || (!a_unknown && b_unknown)
1082                    || (a_norm.is_some() && b_norm.is_none())
1083                {
1084                    Type::Tensor { shape: a_norm }
1085                } else if (a_unknown && !b_unknown) || (a_norm.is_none() && b_norm.is_some()) {
1086                    Type::Tensor { shape: b_norm }
1087                } else {
1088                    Type::tensor()
1089                }
1090            }
1091            (Type::Logical { shape: a }, Type::Logical { shape: b }) => {
1092                let a_norm = match a {
1093                    Some(dims) if dims.is_empty() => None,
1094                    _ => a.clone(),
1095                };
1096                let b_norm = match b {
1097                    Some(dims) if dims.is_empty() => None,
1098                    _ => b.clone(),
1099                };
1100                let a_unknown = a_norm
1101                    .as_ref()
1102                    .map(|dims| dims.iter().all(|d| d.is_none()))
1103                    .unwrap_or(true);
1104                let b_unknown = b_norm
1105                    .as_ref()
1106                    .map(|dims| dims.iter().all(|d| d.is_none()))
1107                    .unwrap_or(true);
1108                if a_norm == b_norm
1109                    || (!a_unknown && b_unknown)
1110                    || (a_norm.is_some() && b_norm.is_none())
1111                {
1112                    Type::Logical { shape: a_norm }
1113                } else if (a_unknown && !b_unknown) || (a_norm.is_none() && b_norm.is_some()) {
1114                    Type::Logical { shape: b_norm }
1115                } else {
1116                    Type::logical()
1117                }
1118            }
1119            (Type::Struct { known_fields: a }, Type::Struct { known_fields: b }) => match (a, b) {
1120                (None, None) => Type::Struct { known_fields: None },
1121                (Some(ka), None) | (None, Some(ka)) => Type::Struct {
1122                    known_fields: Some(ka.clone()),
1123                },
1124                (Some(ka), Some(kb)) => {
1125                    let mut set: std::collections::BTreeSet<String> = ka.iter().cloned().collect();
1126                    set.extend(kb.iter().cloned());
1127                    Type::Struct {
1128                        known_fields: Some(set.into_iter().collect()),
1129                    }
1130                }
1131            },
1132            (Type::OutputList(a), Type::OutputList(b)) => {
1133                if a.len() == b.len() {
1134                    let items = a
1135                        .iter()
1136                        .zip(b.iter())
1137                        .map(|(lhs, rhs)| lhs.unify(rhs))
1138                        .collect();
1139                    Type::OutputList(items)
1140                } else {
1141                    Type::OutputList(vec![Type::Unknown; a.len().max(b.len())])
1142                }
1143            }
1144            (Type::DataDataset { arrays: a }, Type::DataDataset { arrays: b }) => {
1145                let merged = match (a, b) {
1146                    (None, None) => None,
1147                    (Some(sa), None) | (None, Some(sa)) => Some(sa.clone()),
1148                    (Some(sa), Some(sb)) => {
1149                        let mut out = sa.clone();
1150                        for (name, right) in sb {
1151                            out.entry(name.clone())
1152                                .and_modify(|left| {
1153                                    *left = unify_array_type_info(left, right);
1154                                })
1155                                .or_insert_with(|| right.clone());
1156                        }
1157                        Some(out)
1158                    }
1159                };
1160                Type::DataDataset { arrays: merged }
1161            }
1162            (
1163                Type::DataArray {
1164                    dtype: ad,
1165                    shape: ashp,
1166                    chunk_shape: ach,
1167                    codec: ac,
1168                },
1169                Type::DataArray {
1170                    dtype: bd,
1171                    shape: bshp,
1172                    chunk_shape: bch,
1173                    codec: bc,
1174                },
1175            ) => Type::DataArray {
1176                dtype: ad.clone().or_else(|| bd.clone()),
1177                shape: unify_optional_dims(ashp, bshp),
1178                chunk_shape: unify_optional_dims(ach, bch),
1179                codec: ac.clone().or_else(|| bc.clone()),
1180            },
1181            (Type::DataTransaction, Type::DataTransaction) => Type::DataTransaction,
1182            (a, b) if a == b => a.clone(),
1183            _ => Type::Union(vec![self.clone(), other.clone()]),
1184        }
1185    }
1186
1187    /// Infer type from a Value
1188    pub fn from_value(value: &Value) -> Type {
1189        match value {
1190            Value::Int(_) => Type::Int,
1191            Value::Num(_) => Type::Num,
1192            Value::Complex(_, _) => Type::Num, // treat as numeric double (complex) in type system for now
1193            Value::Bool(_) => Type::Bool,
1194            Value::LogicalArray(arr) => Type::Logical {
1195                shape: Some(arr.shape.iter().map(|&d| Some(d)).collect()),
1196            },
1197            Value::String(_) => Type::String,
1198            Value::StringArray(_sa) => {
1199                // Model as Cell of String for type system for now
1200                Type::cell_of(Type::String)
1201            }
1202            Value::Tensor(t) => Type::Tensor {
1203                shape: Some(t.shape.iter().map(|&d| Some(d)).collect()),
1204            },
1205            Value::ComplexTensor(t) => Type::Tensor {
1206                shape: Some(t.shape.iter().map(|&d| Some(d)).collect()),
1207            },
1208            Value::Cell(cells) => {
1209                if cells.data.is_empty() {
1210                    Type::cell()
1211                } else {
1212                    // Infer element type from first element
1213                    let element_type = Type::from_value(&cells.data[0]);
1214                    Type::Cell {
1215                        element_type: Some(Box::new(element_type)),
1216                        length: Some(cells.data.len()),
1217                    }
1218                }
1219            }
1220            Value::GpuTensor(h) => Type::Tensor {
1221                shape: Some(h.shape.iter().map(|&d| Some(d)).collect()),
1222            },
1223            Value::Object(_) => Type::Unknown,
1224            Value::HandleObject(_) => Type::Unknown,
1225            Value::Listener(_) => Type::Unknown,
1226            Value::Struct(_) => Type::Struct { known_fields: None },
1227            Value::FunctionHandle(_) => Type::Function {
1228                params: vec![Type::Unknown],
1229                returns: Box::new(Type::Unknown),
1230            },
1231            Value::Closure(_) => Type::Function {
1232                params: vec![Type::Unknown],
1233                returns: Box::new(Type::Unknown),
1234            },
1235            Value::ClassRef(_) => Type::Unknown,
1236            Value::MException(_) => Type::Unknown,
1237            Value::CharArray(ca) => {
1238                // Treat as cell of char for type purposes; or a 2-D char matrix conceptually
1239                Type::Cell {
1240                    element_type: Some(Box::new(Type::String)),
1241                    length: Some(ca.rows * ca.cols),
1242                }
1243            }
1244            Value::OutputList(values) => {
1245                Type::OutputList(values.iter().map(Type::from_value).collect())
1246            }
1247        }
1248    }
1249}
1250
1251fn unify_optional_dims(
1252    lhs: &Option<Vec<Option<usize>>>,
1253    rhs: &Option<Vec<Option<usize>>>,
1254) -> Option<Vec<Option<usize>>> {
1255    match (lhs, rhs) {
1256        (None, None) => None,
1257        (Some(a), None) | (None, Some(a)) => Some(a.clone()),
1258        (Some(a), Some(b)) if a == b => Some(a.clone()),
1259        (Some(a), Some(b)) if a.len() == b.len() => Some(
1260            a.iter()
1261                .zip(b.iter())
1262                .map(|(x, y)| if x == y { *x } else { None })
1263                .collect(),
1264        ),
1265        (Some(_), Some(_)) => None,
1266    }
1267}
1268
1269fn unify_array_type_info(lhs: &DataArrayTypeInfo, rhs: &DataArrayTypeInfo) -> DataArrayTypeInfo {
1270    DataArrayTypeInfo {
1271        dtype: lhs.dtype.clone().or_else(|| rhs.dtype.clone()),
1272        shape: unify_optional_dims(&lhs.shape, &rhs.shape),
1273        chunk_shape: unify_optional_dims(&lhs.chunk_shape, &rhs.chunk_shape),
1274        codec: lhs.codec.clone().or_else(|| rhs.codec.clone()),
1275    }
1276}
1277
1278#[derive(Debug, Clone, PartialEq)]
1279pub struct Closure {
1280    pub function_name: String,
1281    pub captures: Vec<Value>,
1282}
1283
1284/// Acceleration metadata describing GPU-friendly characteristics of a builtin.
1285#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1286pub enum AccelTag {
1287    Unary,
1288    Elementwise,
1289    Reduction,
1290    MatMul,
1291    Transpose,
1292    ArrayConstruct,
1293}
1294
1295/// Control-flow type for builtins that may suspend or error.
1296pub type BuiltinControlFlow = runmat_async::RuntimeError;
1297
1298/// Async result type for builtins.
1299pub type BuiltinFuture = Pin<Box<dyn Future<Output = Result<Value, BuiltinControlFlow>> + 'static>>;
1300
1301#[derive(Clone, Debug, Default)]
1302pub struct ResolveContext {
1303    pub literal_args: Vec<LiteralValue>,
1304}
1305
1306#[derive(Clone, Debug, PartialEq)]
1307pub enum LiteralValue {
1308    Number(f64),
1309    Bool(bool),
1310    String(String),
1311    Vector(Vec<LiteralValue>),
1312    Unknown,
1313}
1314
1315impl ResolveContext {
1316    pub fn new(literal_args: Vec<LiteralValue>) -> Self {
1317        Self { literal_args }
1318    }
1319
1320    pub fn numeric_dims(&self) -> Vec<Option<usize>> {
1321        self.numeric_dims_from(0)
1322    }
1323
1324    pub fn numeric_dims_from(&self, start: usize) -> Vec<Option<usize>> {
1325        let slice = self.literal_args.get(start..).unwrap_or(&[]);
1326        if let Some(LiteralValue::Vector(values)) = slice.first() {
1327            return values
1328                .iter()
1329                .map(Self::numeric_dimension_from_literal)
1330                .collect();
1331        }
1332        slice
1333            .iter()
1334            .map(Self::numeric_dimension_from_literal)
1335            .collect()
1336    }
1337
1338    pub fn literal_string_at(&self, index: usize) -> Option<String> {
1339        match self.literal_args.get(index) {
1340            Some(LiteralValue::String(value)) => Some(value.to_ascii_lowercase()),
1341            _ => None,
1342        }
1343    }
1344
1345    pub fn literal_bool_at(&self, index: usize) -> Option<bool> {
1346        match self.literal_args.get(index) {
1347            Some(LiteralValue::Bool(value)) => Some(*value),
1348            _ => None,
1349        }
1350    }
1351
1352    pub fn literal_vector_at(&self, index: usize) -> Option<Vec<LiteralValue>> {
1353        match self.literal_args.get(index) {
1354            Some(LiteralValue::Vector(values)) => Some(values.clone()),
1355            _ => None,
1356        }
1357    }
1358
1359    pub fn numeric_vector_at(&self, index: usize) -> Option<Vec<Option<usize>>> {
1360        let values = match self.literal_args.get(index) {
1361            Some(LiteralValue::Vector(values)) => values,
1362            _ => return None,
1363        };
1364        if values
1365            .iter()
1366            .any(|value| matches!(value, LiteralValue::Vector(_)))
1367        {
1368            return None;
1369        }
1370        Some(
1371            values
1372                .iter()
1373                .map(Self::numeric_dimension_from_literal)
1374                .collect(),
1375        )
1376    }
1377
1378    fn numeric_dimension_from_literal(value: &LiteralValue) -> Option<usize> {
1379        match value {
1380            LiteralValue::Number(num) => {
1381                if num.is_finite() {
1382                    let rounded = num.round();
1383                    if (num - rounded).abs() <= 1e-9 && rounded >= 0.0 {
1384                        return Some(rounded as usize);
1385                    }
1386                }
1387                None
1388            }
1389            _ => None,
1390        }
1391    }
1392}
1393
1394#[cfg(test)]
1395mod resolve_context_tests {
1396    use super::{LiteralValue, ResolveContext};
1397
1398    #[test]
1399    fn numeric_dims_reads_vector_literal() {
1400        let ctx = ResolveContext::new(vec![LiteralValue::Vector(vec![
1401            LiteralValue::Number(2.0),
1402            LiteralValue::Number(3.0),
1403        ])]);
1404        assert_eq!(ctx.numeric_dims(), vec![Some(2), Some(3)]);
1405    }
1406
1407    #[test]
1408    fn numeric_dims_skips_non_numeric_entries() {
1409        let ctx = ResolveContext::new(vec![
1410            LiteralValue::Number(4.0),
1411            LiteralValue::String("like".to_string()),
1412            LiteralValue::Unknown,
1413        ]);
1414        assert_eq!(ctx.numeric_dims(), vec![Some(4), None, None]);
1415    }
1416
1417    #[test]
1418    fn numeric_dims_prefers_vector_even_with_trailing_args() {
1419        let ctx = ResolveContext::new(vec![
1420            LiteralValue::Vector(vec![LiteralValue::Number(1.0), LiteralValue::Number(5.0)]),
1421            LiteralValue::String("like".to_string()),
1422        ]);
1423        assert_eq!(ctx.numeric_dims(), vec![Some(1), Some(5)]);
1424    }
1425
1426    #[test]
1427    fn literal_string_is_lowercased() {
1428        let ctx = ResolveContext::new(vec![LiteralValue::String("OmItNaN".to_string())]);
1429        assert_eq!(ctx.literal_string_at(0), Some("omitnan".to_string()));
1430    }
1431
1432    #[test]
1433    fn literal_bool_is_available() {
1434        let ctx = ResolveContext::new(vec![LiteralValue::Bool(true)]);
1435        assert_eq!(ctx.literal_bool_at(0), Some(true));
1436    }
1437
1438    #[test]
1439    fn literal_vector_at_returns_clone() {
1440        let ctx = ResolveContext::new(vec![LiteralValue::Vector(vec![
1441            LiteralValue::Number(7.0),
1442            LiteralValue::Unknown,
1443        ])]);
1444        assert_eq!(
1445            ctx.literal_vector_at(0),
1446            Some(vec![LiteralValue::Number(7.0), LiteralValue::Unknown])
1447        );
1448    }
1449
1450    #[test]
1451    fn numeric_vector_at_rejects_nested_vectors() {
1452        let ctx = ResolveContext::new(vec![LiteralValue::Vector(vec![LiteralValue::Vector(
1453            vec![LiteralValue::Number(1.0)],
1454        )])]);
1455        assert_eq!(ctx.numeric_vector_at(0), None);
1456    }
1457}
1458
1459pub type TypeResolver = fn(args: &[Type]) -> Type;
1460pub type TypeResolverWithContext = fn(args: &[Type], ctx: &ResolveContext) -> Type;
1461
1462#[derive(Clone, Copy, Debug)]
1463pub enum TypeResolverKind {
1464    Legacy(TypeResolver),
1465    WithContext(TypeResolverWithContext),
1466}
1467
1468pub fn type_resolver_kind(resolver: TypeResolver) -> TypeResolverKind {
1469    TypeResolverKind::Legacy(resolver)
1470}
1471
1472pub fn type_resolver_kind_ctx(resolver: TypeResolverWithContext) -> TypeResolverKind {
1473    TypeResolverKind::WithContext(resolver)
1474}
1475
1476/// Simple builtin function definition using the unified type system
1477#[derive(Debug, Clone)]
1478pub struct BuiltinFunction {
1479    pub name: &'static str,
1480    pub description: &'static str,
1481    pub category: &'static str,
1482    pub doc: &'static str,
1483    pub examples: &'static str,
1484    pub param_types: Vec<Type>,
1485    pub return_type: Type,
1486    pub type_resolver: Option<TypeResolverKind>,
1487    pub implementation: fn(&[Value]) -> BuiltinFuture,
1488    pub accel_tags: &'static [AccelTag],
1489    pub is_sink: bool,
1490    pub suppress_auto_output: bool,
1491}
1492
1493impl BuiltinFunction {
1494    #[allow(clippy::too_many_arguments)]
1495    pub fn new(
1496        name: &'static str,
1497        description: &'static str,
1498        category: &'static str,
1499        doc: &'static str,
1500        examples: &'static str,
1501        param_types: Vec<Type>,
1502        return_type: Type,
1503        type_resolver: Option<TypeResolverKind>,
1504        implementation: fn(&[Value]) -> BuiltinFuture,
1505        accel_tags: &'static [AccelTag],
1506        is_sink: bool,
1507        suppress_auto_output: bool,
1508    ) -> Self {
1509        Self {
1510            name,
1511            description,
1512            category,
1513            doc,
1514            examples,
1515            param_types,
1516            return_type,
1517            type_resolver,
1518            implementation,
1519            accel_tags,
1520            is_sink,
1521            suppress_auto_output,
1522        }
1523    }
1524
1525    pub fn infer_return_type(&self, args: &[Type]) -> Type {
1526        self.infer_return_type_with_context(args, &ResolveContext::default())
1527    }
1528
1529    pub fn infer_return_type_with_context(&self, args: &[Type], ctx: &ResolveContext) -> Type {
1530        if let Some(resolver) = self.type_resolver {
1531            return match resolver {
1532                TypeResolverKind::Legacy(resolver) => resolver(args),
1533                TypeResolverKind::WithContext(resolver) => resolver(args, ctx),
1534            };
1535        }
1536        self.return_type.clone()
1537    }
1538}
1539
1540/// A constant value that can be accessed as a variable
1541#[derive(Clone)]
1542pub struct Constant {
1543    pub name: &'static str,
1544    pub value: Value,
1545}
1546
1547pub mod shape_rules;
1548
1549impl std::fmt::Debug for Constant {
1550    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1551        write!(
1552            f,
1553            "Constant {{ name: {:?}, value: {:?} }}",
1554            self.name, self.value
1555        )
1556    }
1557}
1558
1559#[cfg(not(target_arch = "wasm32"))]
1560inventory::collect!(BuiltinFunction);
1561#[cfg(not(target_arch = "wasm32"))]
1562inventory::collect!(Constant);
1563
1564#[cfg(not(target_arch = "wasm32"))]
1565pub fn builtin_functions() -> Vec<&'static BuiltinFunction> {
1566    inventory::iter::<BuiltinFunction>().collect()
1567}
1568
1569#[cfg(target_arch = "wasm32")]
1570pub fn builtin_functions() -> Vec<&'static BuiltinFunction> {
1571    wasm_registry::builtin_functions()
1572}
1573
1574#[cfg(not(target_arch = "wasm32"))]
1575static BUILTIN_LOOKUP: OnceLock<HashMap<String, &'static BuiltinFunction>> = OnceLock::new();
1576
1577#[cfg(not(target_arch = "wasm32"))]
1578fn builtin_lookup_map() -> &'static HashMap<String, &'static BuiltinFunction> {
1579    BUILTIN_LOOKUP.get_or_init(|| {
1580        let mut map = HashMap::new();
1581        for func in builtin_functions() {
1582            map.insert(func.name.to_ascii_lowercase(), func);
1583        }
1584        map
1585    })
1586}
1587
1588#[cfg(not(target_arch = "wasm32"))]
1589pub fn builtin_function_by_name(name: &str) -> Option<&'static BuiltinFunction> {
1590    builtin_lookup_map()
1591        .get(&name.to_ascii_lowercase())
1592        .copied()
1593}
1594
1595#[cfg(target_arch = "wasm32")]
1596pub fn builtin_function_by_name(name: &str) -> Option<&'static BuiltinFunction> {
1597    wasm_registry::builtin_functions()
1598        .into_iter()
1599        .find(|f| f.name.eq_ignore_ascii_case(name))
1600}
1601
1602pub fn suppresses_auto_output(name: &str) -> bool {
1603    builtin_function_by_name(name)
1604        .map(|f| f.suppress_auto_output)
1605        .unwrap_or(false)
1606}
1607
1608#[cfg(not(target_arch = "wasm32"))]
1609pub fn constants() -> Vec<&'static Constant> {
1610    inventory::iter::<Constant>().collect()
1611}
1612
1613#[cfg(target_arch = "wasm32")]
1614pub fn constants() -> Vec<&'static Constant> {
1615    wasm_registry::constants()
1616}
1617
1618// ----------------------
1619// Builtin documentation metadata (optional, registered by macros)
1620// ----------------------
1621
1622#[derive(Debug)]
1623pub struct BuiltinDoc {
1624    pub name: &'static str,
1625    pub category: Option<&'static str>,
1626    pub summary: Option<&'static str>,
1627    pub keywords: Option<&'static str>,
1628    pub errors: Option<&'static str>,
1629    pub related: Option<&'static str>,
1630    pub introduced: Option<&'static str>,
1631    pub status: Option<&'static str>,
1632    pub examples: Option<&'static str>,
1633}
1634
1635#[cfg(not(target_arch = "wasm32"))]
1636inventory::collect!(BuiltinDoc);
1637
1638#[cfg(not(target_arch = "wasm32"))]
1639pub fn builtin_docs() -> Vec<&'static BuiltinDoc> {
1640    inventory::iter::<BuiltinDoc>().collect()
1641}
1642
1643#[cfg(target_arch = "wasm32")]
1644pub fn builtin_docs() -> Vec<&'static BuiltinDoc> {
1645    wasm_registry::builtin_docs()
1646}
1647
1648// ----------------------
1649// Display implementations
1650// ----------------------
1651
1652/// Controls how numeric values are displayed in the console, mirroring MATLAB's `format` command.
1653#[derive(Debug, Clone, Copy, PartialEq, Default)]
1654pub enum FormatMode {
1655    /// 4 decimal places, fixed or scientific (MATLAB default).
1656    #[default]
1657    Short,
1658    /// 15 decimal places, fixed or scientific.
1659    Long,
1660    /// Always scientific notation, 4 decimal places.
1661    ShortE,
1662    /// Always scientific notation, 14 decimal places.
1663    LongE,
1664    /// Compact: shorter of fixed/scientific, 5 significant digits.
1665    ShortG,
1666    /// Compact: shorter of fixed/scientific, 15 significant digits.
1667    LongG,
1668    /// Rational approximation (p/q).
1669    Rational,
1670    /// IEEE 754 hexadecimal representation.
1671    Hex,
1672}
1673
1674runmat_thread_local! {
1675    static DISPLAY_FORMAT: RefCell<FormatMode> = const { RefCell::new(FormatMode::Short) };
1676}
1677
1678pub fn set_display_format(mode: FormatMode) {
1679    DISPLAY_FORMAT.with(|c| *c.borrow_mut() = mode);
1680}
1681
1682pub fn get_display_format() -> FormatMode {
1683    DISPLAY_FORMAT.with(|c| *c.borrow())
1684}
1685
1686/// Format a number using the current thread-local display format.
1687pub fn format_number(value: f64) -> String {
1688    if value.is_nan() {
1689        return "NaN".to_string();
1690    }
1691    if value.is_infinite() {
1692        return if value.is_sign_negative() {
1693            "-Inf"
1694        } else {
1695            "Inf"
1696        }
1697        .to_string();
1698    }
1699    let mode = get_display_format();
1700    if mode == FormatMode::Hex {
1701        return fmt_hex(value);
1702    }
1703    let v = if value == 0.0 { 0.0 } else { value };
1704    match mode {
1705        FormatMode::Short => fmt_short(v),
1706        FormatMode::Long => fmt_long(v),
1707        FormatMode::ShortE => fmt_sci(v, 4),
1708        FormatMode::LongE => fmt_sci(v, 14),
1709        FormatMode::ShortG => fmt_compact(v, 5),
1710        FormatMode::LongG => fmt_compact(v, 15),
1711        FormatMode::Rational => fmt_rational(v),
1712        FormatMode::Hex => unreachable!("hex mode handled before zero normalization"),
1713    }
1714}
1715
1716/// Reformat Rust's `e`-notation exponent into MATLAB style (`e+02`, `e-03`).
1717fn matlab_exp(s: &str) -> String {
1718    if let Some(e_pos) = s.find('e') {
1719        let mantissa = &s[..e_pos];
1720        let exp: i32 = s[e_pos + 1..].parse().unwrap_or(0);
1721        let sign = if exp >= 0 { '+' } else { '-' };
1722        format!("{mantissa}e{sign}{:02}", exp.unsigned_abs())
1723    } else {
1724        s.to_string()
1725    }
1726}
1727
1728fn fmt_sci(v: f64, dec: usize) -> String {
1729    if v == 0.0 {
1730        return format!("0.{:0>dec$}e+00", 0, dec = dec);
1731    }
1732    let s = format!("{v:.dec$e}");
1733    matlab_exp(&s)
1734}
1735
1736fn fmt_short(v: f64) -> String {
1737    let abs = v.abs();
1738    if abs == 0.0 {
1739        return "0".to_string();
1740    }
1741    if v.fract() == 0.0 && abs < 1e15 {
1742        return format!("{}", v as i64);
1743    }
1744    if (0.001..10000.0).contains(&abs) {
1745        format!("{:.4}", v)
1746    } else {
1747        fmt_sci(v, 4)
1748    }
1749}
1750
1751fn fmt_long(v: f64) -> String {
1752    let abs = v.abs();
1753    if abs == 0.0 {
1754        return "0".to_string();
1755    }
1756    if v.fract() == 0.0 && abs < 1e15 {
1757        return format!("{}", v as i64);
1758    }
1759    if (0.001..10000.0).contains(&abs) {
1760        format!("{:.15}", v)
1761    } else {
1762        fmt_sci(v, 14)
1763    }
1764}
1765
1766fn fmt_compact(v: f64, sig_digits: usize) -> String {
1767    let abs = v.abs();
1768    if abs == 0.0 {
1769        return "0".to_string();
1770    }
1771    let use_scientific = !(1e-4..1e6).contains(&abs);
1772    if use_scientific {
1773        let dec = sig_digits - 1;
1774        let s = format!("{v:.dec$e}");
1775        // trim trailing zeros in mantissa then reformat exponent
1776        if let Some(e_pos) = s.find('e') {
1777            let exp_part = &s[e_pos..];
1778            let mut mantissa = s[..e_pos].to_string();
1779            if let Some(dot) = mantissa.find('.') {
1780                let mut end = mantissa.len();
1781                while end > dot + 1 && mantissa.as_bytes()[end - 1] == b'0' {
1782                    end -= 1;
1783                }
1784                if mantissa.as_bytes()[end - 1] == b'.' {
1785                    end -= 1;
1786                }
1787                mantissa.truncate(end);
1788            }
1789            return matlab_exp(&format!("{mantissa}{exp_part}"));
1790        }
1791        return matlab_exp(&s);
1792    }
1793    let exp10 = abs.log10().floor() as i32;
1794    let decimals = ((sig_digits as i32 - 1 - exp10).max(0)) as usize;
1795    let pow = 10f64.powi(decimals as i32);
1796    let rounded = (v * pow).round() / pow;
1797    let mut s = format!("{rounded:.decimals$}");
1798    if let Some(dot) = s.find('.') {
1799        let mut end = s.len();
1800        while end > dot + 1 && s.as_bytes()[end - 1] == b'0' {
1801            end -= 1;
1802        }
1803        if s.as_bytes()[end - 1] == b'.' {
1804            end -= 1;
1805        }
1806        s.truncate(end);
1807    }
1808    if s.is_empty() || s == "-0" {
1809        s = "0".to_string();
1810    }
1811    s
1812}
1813
1814fn fmt_rational(v: f64) -> String {
1815    if v == 0.0 {
1816        return "0".to_string();
1817    }
1818    let negative = v < 0.0;
1819    let abs = v.abs();
1820    if v.fract() == 0.0 && abs < 1e15 {
1821        return format!("{}", v as i64);
1822    }
1823    // Continued fraction convergents; stop at the first one within MATLAB's
1824    // 5e-7 relative tolerance (matches `format rational` behaviour for pi → 355/113).
1825    let tol = 5e-7 * abs;
1826    let max_d = 1_000_000i64;
1827    let mut n0: i64 = 1;
1828    let mut n1: i64 = abs.floor() as i64;
1829    let mut d0: i64 = 0;
1830    let mut d1: i64 = 1;
1831    let mut a = abs;
1832    let mut best_n = n1;
1833    let mut best_d = d1;
1834    for _ in 0..50 {
1835        if (abs - best_n as f64 / best_d as f64).abs() <= tol {
1836            break;
1837        }
1838        let f = a.fract();
1839        if f < 1e-10 {
1840            break;
1841        }
1842        a = 1.0 / f;
1843        let q = a.floor() as i64;
1844        let Some(n2) = q.checked_mul(n1).and_then(|v| v.checked_add(n0)) else {
1845            break;
1846        };
1847        let Some(d2) = q.checked_mul(d1).and_then(|v| v.checked_add(d0)) else {
1848            break;
1849        };
1850        if d2 > max_d {
1851            break;
1852        }
1853        best_n = n2;
1854        best_d = d2;
1855        n0 = n1;
1856        n1 = n2;
1857        d0 = d1;
1858        d1 = d2;
1859    }
1860    let sign = if negative { "-" } else { "" };
1861    if best_d == 1 {
1862        format!("{sign}{best_n}")
1863    } else {
1864        format!("{sign}{best_n}/{best_d}")
1865    }
1866}
1867
1868fn fmt_hex(v: f64) -> String {
1869    format!("{:016x}", v.to_bits())
1870}
1871
1872// -------- Exception type --------
1873#[derive(Debug, Clone, PartialEq)]
1874pub struct MException {
1875    pub identifier: String,
1876    pub message: String,
1877    pub stack: Vec<String>,
1878}
1879
1880impl MException {
1881    pub fn new(identifier: String, message: String) -> Self {
1882        Self {
1883            identifier,
1884            message,
1885            stack: Vec::new(),
1886        }
1887    }
1888}
1889
1890/// Reference to a GC-allocated object providing language handle semantics
1891#[derive(Debug, Clone)]
1892pub struct HandleRef {
1893    pub class_name: String,
1894    pub target: GcPtr<Value>,
1895    pub valid: bool,
1896}
1897
1898impl PartialEq for HandleRef {
1899    fn eq(&self, other: &Self) -> bool {
1900        let a = unsafe { self.target.as_raw() } as usize;
1901        let b = unsafe { other.target.as_raw() } as usize;
1902        a == b
1903    }
1904}
1905
1906/// Event listener handle for events
1907#[derive(Debug, Clone, PartialEq)]
1908pub struct Listener {
1909    pub id: u64,
1910    pub target: GcPtr<Value>,
1911    pub event_name: String,
1912    pub callback: GcPtr<Value>,
1913    pub enabled: bool,
1914    pub valid: bool,
1915}
1916
1917impl Listener {
1918    pub fn class_name(&self) -> String {
1919        match unsafe { &*self.target.as_raw() } {
1920            Value::Object(o) => o.class_name.clone(),
1921            Value::HandleObject(h) => h.class_name.clone(),
1922            _ => String::new(),
1923        }
1924    }
1925}
1926
1927impl fmt::Display for Value {
1928    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1929        match self {
1930            Value::Int(i) => write!(f, "{}", i.to_i64()),
1931            Value::Num(n) => write!(f, "{}", format_number(*n)),
1932            Value::Complex(re, im) => {
1933                if *im == 0.0 {
1934                    write!(f, "{}", format_number(*re))
1935                } else if *re == 0.0 {
1936                    write!(f, "{}i", format_number(*im))
1937                } else if *im < 0.0 {
1938                    write!(f, "{}-{}i", format_number(*re), format_number(im.abs()))
1939                } else {
1940                    write!(f, "{}+{}i", format_number(*re), format_number(*im))
1941                }
1942            }
1943            Value::Bool(b) => write!(f, "{}", if *b { 1 } else { 0 }),
1944            Value::LogicalArray(la) => write!(f, "{la}"),
1945            Value::String(s) => write!(f, "'{s}'"),
1946            Value::StringArray(sa) => write!(f, "{sa}"),
1947            Value::CharArray(ca) => write!(f, "{ca}"),
1948            Value::Tensor(m) => write!(f, "{m}"),
1949            Value::ComplexTensor(m) => write!(f, "{m}"),
1950            Value::Cell(ca) => ca.fmt(f),
1951
1952            Value::GpuTensor(h) => write!(
1953                f,
1954                "GpuTensor(shape={:?}, device={}, buffer={})",
1955                h.shape, h.device_id, h.buffer_id
1956            ),
1957            Value::Object(obj) => write!(f, "{}(props={})", obj.class_name, obj.properties.len()),
1958            Value::HandleObject(h) => {
1959                let ptr = unsafe { h.target.as_raw() } as usize;
1960                write!(
1961                    f,
1962                    "<handle {} @0x{:x} valid={}>",
1963                    h.class_name, ptr, h.valid
1964                )
1965            }
1966            Value::Listener(l) => {
1967                let ptr = unsafe { l.target.as_raw() } as usize;
1968                write!(
1969                    f,
1970                    "<listener id={} {}@0x{:x} '{}' enabled={} valid={}>",
1971                    l.id,
1972                    l.class_name(),
1973                    ptr,
1974                    l.event_name,
1975                    l.enabled,
1976                    l.valid
1977                )
1978            }
1979            Value::Struct(st) => {
1980                write!(f, "struct {{")?;
1981                for (i, (key, val)) in st.fields.iter().enumerate() {
1982                    if i > 0 {
1983                        write!(f, ", ")?;
1984                    }
1985                    write!(f, "{}: {}", key, val)?;
1986                }
1987                write!(f, "}}")
1988            }
1989            Value::OutputList(values) => {
1990                write!(f, "[")?;
1991                for (i, value) in values.iter().enumerate() {
1992                    if i > 0 {
1993                        write!(f, ", ")?;
1994                    }
1995                    write!(f, "{}", value)?;
1996                }
1997                write!(f, "]")
1998            }
1999            Value::FunctionHandle(name) => write!(f, "@{name}"),
2000            Value::Closure(c) => write!(
2001                f,
2002                "<closure {} captures={}>",
2003                c.function_name,
2004                c.captures.len()
2005            ),
2006            Value::ClassRef(name) => write!(f, "<class {name}>"),
2007            Value::MException(e) => write!(
2008                f,
2009                "MException(identifier='{}', message='{}')",
2010                e.identifier, e.message
2011            ),
2012        }
2013    }
2014}
2015
2016impl fmt::Display for ComplexTensor {
2017    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2018        match self.shape.len() {
2019            0 | 1 => {
2020                write!(f, "[")?;
2021                for (i, (re, im)) in self.data.iter().enumerate() {
2022                    if i > 0 {
2023                        write!(f, " ")?;
2024                    }
2025                    let s = Value::Complex(*re, *im).to_string();
2026                    write!(f, "{s}")?;
2027                }
2028                write!(f, "]")
2029            }
2030            2 => {
2031                let rows = self.rows;
2032                let cols = self.cols;
2033                write!(f, "[")?;
2034                for r in 0..rows {
2035                    for c in 0..cols {
2036                        if c > 0 {
2037                            write!(f, " ")?;
2038                        }
2039                        let (re, im) = self.data[r + c * rows];
2040                        let s = Value::Complex(re, im).to_string();
2041                        write!(f, "{s}")?;
2042                    }
2043                    if r + 1 < rows {
2044                        write!(f, "; ")?;
2045                    }
2046                }
2047                write!(f, "]")
2048            }
2049            _ => {
2050                if should_expand_nd_display(&self.shape) {
2051                    write_nd_pages(f, &self.shape, |f, idx| {
2052                        let (re, im) = self.data[idx];
2053                        write!(f, "{}", Value::Complex(re, im))
2054                    })
2055                } else {
2056                    write!(f, "ComplexTensor(shape={:?})", self.shape)
2057                }
2058            }
2059        }
2060    }
2061}
2062
2063#[cfg(test)]
2064mod display_tests {
2065    use super::{
2066        fmt_rational, format_number, set_display_format, ComplexTensor, FormatMode, LogicalArray,
2067        Tensor,
2068    };
2069
2070    #[test]
2071    fn fmt_rational_large_value_with_tiny_fract_does_not_overflow() {
2072        // abs ~1e15 with a small fractional part: q*n1 would overflow i64 without
2073        // checked arithmetic.
2074        let result = std::panic::catch_unwind(|| fmt_rational(1_000_000_000_000_000.000_1));
2075        assert!(
2076            result.is_ok(),
2077            "fmt_rational panicked on large value with tiny fract"
2078        );
2079
2080        // Negative counterpart.
2081        let result = std::panic::catch_unwind(|| fmt_rational(-1_000_000_000_000_000.000_1));
2082        assert!(
2083            result.is_ok(),
2084            "fmt_rational panicked on negative large value with tiny fract"
2085        );
2086    }
2087
2088    #[test]
2089    fn tensor_nd_display_uses_page_headers() {
2090        let tensor = Tensor::new(
2091            vec![1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
2092            vec![2, 3, 2],
2093        )
2094        .expect("tensor");
2095        let rendered = tensor.to_string();
2096        assert!(rendered.contains("(:, :, 1) ="));
2097        assert!(rendered.contains("(:, :, 2) ="));
2098        assert!(rendered.contains("  1  0  0"));
2099    }
2100
2101    #[test]
2102    fn tensor_nd_display_falls_back_for_large_arrays() {
2103        let tensor = Tensor::new(vec![0.0; 4097], vec![1, 1, 4097]).expect("tensor");
2104        assert_eq!(tensor.to_string(), "Tensor(shape=[1, 1, 4097])");
2105    }
2106
2107    #[test]
2108    fn logical_nd_display_uses_headers_and_fallback_summary() {
2109        let logical =
2110            LogicalArray::new(vec![1, 0, 0, 1, 1, 0, 0, 1], vec![2, 2, 2]).expect("logical");
2111        let rendered = logical.to_string();
2112        assert!(rendered.contains("(:, :, 1) ="));
2113        assert!(rendered.contains("(:, :, 2) ="));
2114
2115        let large = LogicalArray::new(vec![1; 4097], vec![1, 1, 4097]).expect("large logical");
2116        assert_eq!(large.to_string(), "1x1x4097 logical array");
2117    }
2118
2119    #[test]
2120    fn complex_nd_display_uses_page_headers() {
2121        let complex = ComplexTensor::new(
2122            vec![(1.0, 0.0), (0.0, 1.0), (0.0, 0.0), (1.0, 0.0)],
2123            vec![2, 1, 2],
2124        )
2125        .expect("complex");
2126        let rendered = complex.to_string();
2127        assert!(rendered.contains("(:, :, 1) ="));
2128        assert!(rendered.contains("(:, :, 2) ="));
2129    }
2130
2131    #[test]
2132    fn format_hex_preserves_negative_zero_sign_bit() {
2133        set_display_format(FormatMode::Hex);
2134        assert_eq!(format_number(-0.0), "8000000000000000");
2135        assert_eq!(format_number(0.0), "0000000000000000");
2136        set_display_format(FormatMode::Short);
2137    }
2138}
2139
2140#[derive(Debug, Clone, PartialEq)]
2141pub struct CellArray {
2142    pub data: Vec<GcPtr<Value>>,
2143    /// Full MATLAB-visible shape vector (column-major semantics).
2144    pub shape: Vec<usize>,
2145    /// Cached row count for 2-D interop; equals `shape[0]` when present.
2146    pub rows: usize,
2147    /// Cached column count for 2-D interop; equals `shape[1]` when present, otherwise 1 (or 0 for empty).
2148    pub cols: usize,
2149}
2150
2151impl CellArray {
2152    pub fn new_handles(
2153        handles: Vec<GcPtr<Value>>,
2154        rows: usize,
2155        cols: usize,
2156    ) -> Result<Self, String> {
2157        Self::new_handles_with_shape(handles, vec![rows, cols])
2158    }
2159
2160    pub fn new_handles_with_shape(
2161        handles: Vec<GcPtr<Value>>,
2162        shape: Vec<usize>,
2163    ) -> Result<Self, String> {
2164        let expected = total_len(&shape)
2165            .ok_or_else(|| "Cell data shape exceeds platform limits".to_string())?;
2166        if expected != handles.len() {
2167            return Err(format!(
2168                "Cell data length {} doesn't match shape {:?} ({} elements)",
2169                handles.len(),
2170                shape,
2171                expected
2172            ));
2173        }
2174        let (rows, cols) = shape_rows_cols(&shape);
2175        Ok(CellArray {
2176            data: handles,
2177            shape,
2178            rows,
2179            cols,
2180        })
2181    }
2182
2183    pub fn new(data: Vec<Value>, rows: usize, cols: usize) -> Result<Self, String> {
2184        Self::new_with_shape(data, vec![rows, cols])
2185    }
2186
2187    pub fn new_with_shape(data: Vec<Value>, shape: Vec<usize>) -> Result<Self, String> {
2188        let expected = total_len(&shape)
2189            .ok_or_else(|| "Cell data shape exceeds platform limits".to_string())?;
2190        if expected != data.len() {
2191            return Err(format!(
2192                "Cell data length {} doesn't match shape {:?} ({} elements)",
2193                data.len(),
2194                shape,
2195                expected
2196            ));
2197        }
2198        // Note: data will be allocated into GC handles by callers (runtime/vm) to avoid builtins↔gc cycles
2199        let handles: Vec<GcPtr<Value>> = data
2200            .into_iter()
2201            .map(|v| unsafe { GcPtr::from_raw(Box::into_raw(Box::new(v))) })
2202            .collect();
2203        Self::new_handles_with_shape(handles, shape)
2204    }
2205
2206    pub fn get(&self, row: usize, col: usize) -> Result<Value, String> {
2207        if row >= self.rows || col >= self.cols {
2208            return Err(format!(
2209                "Cell index ({row}, {col}) out of bounds for {}x{} cell array",
2210                self.rows, self.cols
2211            ));
2212        }
2213        Ok((*self.data[row * self.cols + col]).clone())
2214    }
2215}
2216
2217fn total_len(shape: &[usize]) -> Option<usize> {
2218    if shape.is_empty() {
2219        return Some(0);
2220    }
2221    shape
2222        .iter()
2223        .try_fold(1usize, |acc, &dim| acc.checked_mul(dim))
2224}
2225
2226fn shape_rows_cols(shape: &[usize]) -> (usize, usize) {
2227    if shape.is_empty() {
2228        return (0, 0);
2229    }
2230    if shape.len() == 1 {
2231        return (1, shape[0]);
2232    }
2233    (shape[0], shape[1])
2234}
2235
2236impl fmt::Display for CellArray {
2237    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2238        let dims: Vec<String> = self.shape.iter().map(|d| d.to_string()).collect();
2239        if self.shape.len() > 2 {
2240            return write!(f, "{} cell array", dims.join("x"));
2241        }
2242        write!(f, "{}x{} cell array", self.rows, self.cols)?;
2243        if self.rows == 0 || self.cols == 0 {
2244            return Ok(());
2245        }
2246        for r in 0..self.rows {
2247            writeln!(f)?;
2248            write!(f, "  ")?;
2249            for c in 0..self.cols {
2250                if c > 0 {
2251                    write!(f, "  ")?;
2252                }
2253                let value = self.get(r, c).unwrap_or_else(|_| Value::Num(f64::NAN));
2254                write!(f, "{{{value}}}")?;
2255            }
2256        }
2257        Ok(())
2258    }
2259}
2260
2261#[derive(Debug, Clone, PartialEq)]
2262pub struct ObjectInstance {
2263    pub class_name: String,
2264    pub properties: HashMap<String, Value>,
2265}
2266
2267impl ObjectInstance {
2268    pub fn new(class_name: String) -> Self {
2269        Self {
2270            class_name,
2271            properties: HashMap::new(),
2272        }
2273    }
2274
2275    pub fn is_class(&self, name: &str) -> bool {
2276        self.class_name == name
2277    }
2278}
2279
2280// -------- Class registry (scaffolding) --------
2281#[derive(Debug, Clone, PartialEq, Eq)]
2282pub enum Access {
2283    Public,
2284    Private,
2285}
2286
2287#[derive(Debug, Clone)]
2288pub struct PropertyDef {
2289    pub name: String,
2290    pub is_static: bool,
2291    pub is_dependent: bool,
2292    pub get_access: Access,
2293    pub set_access: Access,
2294    pub default_value: Option<Value>,
2295}
2296
2297#[derive(Debug, Clone)]
2298pub struct MethodDef {
2299    pub name: String,
2300    pub is_static: bool,
2301    pub access: Access,
2302    pub function_name: String, // bound runtime builtin/user func name
2303}
2304
2305#[derive(Debug, Clone)]
2306pub struct ClassDef {
2307    pub name: String, // namespaced e.g. pkg.Point
2308    pub parent: Option<String>,
2309    pub properties: HashMap<String, PropertyDef>,
2310    pub methods: HashMap<String, MethodDef>,
2311}
2312
2313use std::sync::Mutex;
2314
2315static CLASS_REGISTRY: OnceLock<Mutex<HashMap<String, ClassDef>>> = OnceLock::new();
2316static STATIC_VALUES: OnceLock<Mutex<HashMap<(String, String), Value>>> = OnceLock::new();
2317
2318fn registry() -> &'static Mutex<HashMap<String, ClassDef>> {
2319    CLASS_REGISTRY.get_or_init(|| Mutex::new(HashMap::new()))
2320}
2321
2322pub fn register_class(def: ClassDef) {
2323    let mut m = registry().lock().unwrap();
2324    m.insert(def.name.clone(), def);
2325}
2326
2327pub fn get_class(name: &str) -> Option<ClassDef> {
2328    registry().lock().unwrap().get(name).cloned()
2329}
2330
2331/// Resolve a property through the inheritance chain, returning the property definition and
2332/// the name of the class where it was defined.
2333pub fn lookup_property(class_name: &str, prop: &str) -> Option<(PropertyDef, String)> {
2334    let reg = registry().lock().unwrap();
2335    let mut current = Some(class_name.to_string());
2336    let guard: Option<std::sync::MutexGuard<'_, std::collections::HashMap<String, ClassDef>>> =
2337        None;
2338    drop(guard);
2339    while let Some(name) = current {
2340        if let Some(cls) = reg.get(&name) {
2341            if let Some(p) = cls.properties.get(prop) {
2342                return Some((p.clone(), name));
2343            }
2344            current = cls.parent.clone();
2345        } else {
2346            break;
2347        }
2348    }
2349    None
2350}
2351
2352/// Resolve a method through the inheritance chain, returning the method definition and
2353/// the name of the class where it was defined.
2354pub fn lookup_method(class_name: &str, method: &str) -> Option<(MethodDef, String)> {
2355    let reg = registry().lock().unwrap();
2356    let mut current = Some(class_name.to_string());
2357    while let Some(name) = current {
2358        if let Some(cls) = reg.get(&name) {
2359            if let Some(m) = cls.methods.get(method) {
2360                return Some((m.clone(), name));
2361            }
2362            current = cls.parent.clone();
2363        } else {
2364            break;
2365        }
2366    }
2367    None
2368}
2369
2370fn static_values() -> &'static Mutex<HashMap<(String, String), Value>> {
2371    STATIC_VALUES.get_or_init(|| Mutex::new(HashMap::new()))
2372}
2373
2374pub fn get_static_property_value(class_name: &str, prop: &str) -> Option<Value> {
2375    static_values()
2376        .lock()
2377        .unwrap()
2378        .get(&(class_name.to_string(), prop.to_string()))
2379        .cloned()
2380}
2381
2382pub fn set_static_property_value(class_name: &str, prop: &str, value: Value) {
2383    static_values()
2384        .lock()
2385        .unwrap()
2386        .insert((class_name.to_string(), prop.to_string()), value);
2387}
2388
2389/// Set a static property, resolving the defining ancestor class for storage.
2390pub fn set_static_property_value_in_owner(
2391    class_name: &str,
2392    prop: &str,
2393    value: Value,
2394) -> Result<(), String> {
2395    if let Some((_p, owner)) = lookup_property(class_name, prop) {
2396        set_static_property_value(&owner, prop, value);
2397        Ok(())
2398    } else {
2399        Err(format!("Unknown static property '{class_name}.{prop}'"))
2400    }
2401}