runmat_runtime/
arrays.rs

1//! Array generation functions
2//!
3//! This module provides array generation functions like linspace, logspace,
4//! zeros, ones, eye, etc. These functions are optimized for performance and memory efficiency.
5
6use runmat_builtins::{Tensor, Value};
7use runmat_macros::runtime_builtin;
8
9/// Generate linearly spaced vector
10/// linspace(x1, x2, n) generates n points between x1 and x2 (inclusive)
11#[runtime_builtin(
12    name = "linspace",
13    category = "array/creation",
14    summary = "Linearly spaced vector.",
15    examples = "x = linspace(0, 1, 5)  % [0 0.25 0.5 0.75 1]",
16    keywords = "linspace,range,vector"
17)]
18fn linspace_builtin(x1: f64, x2: f64, n: i32) -> Result<Tensor, String> {
19    if n < 1 {
20        return Err("Number of points must be positive".to_string());
21    }
22
23    if n == 1 {
24        return Tensor::new_2d(vec![x2], 1, 1);
25    }
26
27    let n_usize = n as usize;
28    let mut data = Vec::with_capacity(n_usize);
29
30    if n == 2 {
31        data.push(x1);
32        data.push(x2);
33    } else {
34        let step = (x2 - x1) / ((n - 1) as f64);
35        for i in 0..n_usize {
36            data.push(x1 + (i as f64) * step);
37        }
38        // Ensure the last point is exactly x2 to avoid floating point errors
39        data[n_usize - 1] = x2;
40    }
41
42    Tensor::new_2d(data, 1, n_usize)
43}
44
45/// Generate logarithmically spaced vector
46/// logspace(a, b, n) generates n points between 10^a and 10^b
47#[runtime_builtin(
48    name = "logspace",
49    category = "array/creation",
50    summary = "Logarithmically spaced vector.",
51    examples = "x = logspace(1, 3, 3)  % [10 100 1000]"
52)]
53fn logspace_builtin(a: f64, b: f64, n: i32) -> Result<Tensor, String> {
54    if n < 1 {
55        return Err("Number of points must be positive".to_string());
56    }
57
58    let n_usize = n as usize;
59    let mut data = Vec::with_capacity(n_usize);
60
61    if n == 1 {
62        data.push(10.0_f64.powf(b));
63    } else {
64        let log_step = (b - a) / ((n - 1) as f64);
65        for i in 0..n_usize {
66            let log_val = a + (i as f64) * log_step;
67            data.push(10.0_f64.powf(log_val));
68        }
69    }
70
71    Tensor::new_2d(data, 1, n_usize)
72}
73
74/// Generate zeros tensor with arbitrary dimensions
75/// zeros(d1, d2, ..., dk) creates a k-D tensor of zeros
76#[runtime_builtin(
77    name = "zeros",
78    category = "array/creation",
79    summary = "Array of all zeros.",
80    examples = "A = zeros(2,3)",
81    keywords = "zeros,allocate,create",
82    related = "ones,eye"
83)]
84fn zeros_var_builtin(rest: Vec<Value>) -> Result<Tensor, String> {
85    if rest.is_empty() {
86        return Err("zeros: expected at least 1 dimension".to_string());
87    }
88    let mut dims: Vec<usize> = Vec::with_capacity(rest.len());
89    for v in &rest {
90        let n: f64 = v.try_into()?;
91        if n < 0.0 {
92            return Err("Matrix dimensions must be non-negative".to_string());
93        }
94        dims.push(n as usize);
95    }
96    // Semantics: zeros(n) -> n x n (2-D). For >1 args, use them as provided.
97    if dims.len() == 1 {
98        dims = vec![dims[0], dims[0]];
99    }
100    Ok(Tensor::zeros(dims))
101}
102
103#[cfg(test)]
104fn zeros_builtin(m: i32, n: i32) -> Result<Tensor, String> {
105    if m < 0 || n < 0 {
106        return Err("Matrix dimensions must be non-negative".to_string());
107    }
108    Ok(Tensor::zeros(vec![m as usize, n as usize]))
109}
110
111/// Generate ones tensor with arbitrary dimensions
112#[runtime_builtin(
113    name = "ones",
114    category = "array/creation",
115    summary = "Array of all ones.",
116    examples = "B = ones(3)",
117    keywords = "ones,allocate,create",
118    related = "zeros,eye"
119)]
120fn ones_var_builtin(rest: Vec<Value>) -> Result<Tensor, String> {
121    if rest.is_empty() {
122        return Err("ones: expected at least 1 dimension".to_string());
123    }
124    let mut dims: Vec<usize> = Vec::with_capacity(rest.len());
125    for v in &rest {
126        let n: f64 = v.try_into()?;
127        if n < 0.0 {
128            return Err("Matrix dimensions must be non-negative".to_string());
129        }
130        dims.push(n as usize);
131    }
132    if dims.len() == 1 {
133        dims = vec![dims[0], dims[0]];
134    }
135    Ok(Tensor::ones(dims))
136}
137
138#[cfg(test)]
139fn ones_builtin(m: i32, n: i32) -> Result<Tensor, String> {
140    if m < 0 || n < 0 {
141        return Err("Matrix dimensions must be non-negative".to_string());
142    }
143    Ok(Tensor::ones(vec![m as usize, n as usize]))
144}
145
146/// Generate identity matrix
147/// eye(n) creates an n×n identity matrix
148#[runtime_builtin(
149    name = "eye",
150    category = "array/creation",
151    summary = "Identity matrix.",
152    examples = "I = eye(3)",
153    related = "zeros,ones"
154)]
155fn eye_builtin(n: i32) -> Result<Tensor, String> {
156    if n < 0 {
157        return Err("Matrix size must be non-negative".to_string());
158    }
159    let n_usize = n as usize;
160    let mut data = vec![0.0; n_usize * n_usize];
161
162    // Set diagonal elements to 1
163    for i in 0..n_usize {
164        data[i * n_usize + i] = 1.0;
165    }
166
167    Tensor::new_2d(data, n_usize, n_usize)
168}
169
170/// Generate random matrix with uniform distribution [0,1)
171/// rand(m, n) creates an m×n matrix of random numbers
172#[runtime_builtin(name = "rand")]
173fn rand_builtin(m: i32, n: i32) -> Result<Tensor, String> {
174    if m < 0 || n < 0 {
175        return Err("Matrix dimensions must be non-negative".to_string());
176    }
177
178    let rows = m as usize;
179    let cols = n as usize;
180    let total_elements = rows * cols;
181
182    // Use a simple linear congruential generator for reproducible results
183    // This is not cryptographically secure but suitable for basic mathematical operations
184    static mut SEED: u64 = 1;
185    let mut data = Vec::with_capacity(total_elements);
186
187    unsafe {
188        for _ in 0..total_elements {
189            SEED = SEED.wrapping_mul(1103515245).wrapping_add(12345);
190            let random_val = ((SEED >> 16) & 0x7fff) as f64 / 32768.0;
191            data.push(random_val);
192        }
193    }
194
195    Tensor::new_2d(data, rows, cols)
196}
197
198/// Generate matrix filled with specific value
199/// fill(value, m, n) creates an m×n matrix filled with value
200#[runtime_builtin(name = "fill")]
201fn fill_builtin(value: f64, m: i32, n: i32) -> Result<Tensor, String> {
202    if m < 0 || n < 0 {
203        return Err("Matrix dimensions must be non-negative".to_string());
204    }
205
206    let rows = m as usize;
207    let cols = n as usize;
208    let data = vec![value; rows * cols];
209
210    Tensor::new_2d(data, rows, cols)
211}
212
213/// Generate random matrix with normal distribution (mean=0, std=1)
214/// randn(m, n) creates an m×n matrix of normally distributed random numbers
215#[runtime_builtin(name = "randn")]
216fn randn_builtin(m: i32, n: i32) -> Result<Tensor, String> {
217    if m < 0 || n < 0 {
218        return Err("Matrix dimensions must be non-negative".to_string());
219    }
220
221    let rows = m as usize;
222    let cols = n as usize;
223    let total_elements = rows * cols;
224
225    // Simple approximation of normal distribution using central limit theorem
226    // Generate 12 uniform random numbers and sum them, then subtract 6
227    // This approximates a normal distribution with mean=0, std=1
228    use std::sync::Mutex;
229    use std::sync::OnceLock;
230
231    static SEED: OnceLock<Mutex<u64>> = OnceLock::new();
232    let seed_mutex = SEED.get_or_init(|| Mutex::new(1));
233
234    let mut data = Vec::with_capacity(total_elements);
235
236    for _ in 0..total_elements {
237        let mut seed_guard = seed_mutex
238            .lock()
239            .map_err(|_| "Failed to acquire RNG lock")?;
240        let mut sum = 0.0;
241
242        // Generate 12 uniform random numbers using linear congruential generator
243        for _ in 0..12 {
244            *seed_guard = seed_guard.wrapping_mul(1103515245).wrapping_add(12345);
245            let uniform = ((*seed_guard >> 16) & 0x7fff) as f64 / 32768.0;
246            sum += uniform;
247        }
248
249        // Apply central limit theorem transformation: N(0,1) ≈ sum(12 uniform) - 6
250        let normal_val = sum - 6.0;
251        data.push(normal_val);
252    }
253
254    Tensor::new_2d(data, rows, cols)
255}
256
257/// Get the length of the largest dimension of a matrix
258/// length(X) returns the size of the largest dimension of matrix X
259#[runtime_builtin(name = "length")]
260fn length_builtin(matrix: Tensor) -> Result<f64, String> {
261    Ok(matrix.rows().max(matrix.cols()) as f64)
262}
263
264/// Generate range vector
265/// range(start, step, stop) creates a vector from start to stop with given step
266#[runtime_builtin(name = "range")]
267fn range_builtin(start: f64, step: f64, stop: f64) -> Result<Tensor, String> {
268    if step == 0.0 {
269        return Err("Step size cannot be zero".to_string());
270    }
271
272    if (step > 0.0 && start > stop) || (step < 0.0 && start < stop) {
273        // Empty range
274        return Tensor::new_2d(vec![], 1, 0);
275    }
276
277    let mut data = Vec::new();
278    let mut current = start;
279
280    if step > 0.0 {
281        while current <= stop + f64::EPSILON {
282            data.push(current);
283            current += step;
284        }
285    } else {
286        while current >= stop - f64::EPSILON {
287            data.push(current);
288            current += step;
289        }
290    }
291
292    let len = data.len();
293    Tensor::new_2d(data, 1, len)
294}
295
296/// Generate meshgrid for 2D plotting
297/// [X, Y] = meshgrid(x, y) creates 2D coordinate arrays
298#[runtime_builtin(name = "meshgrid")]
299fn meshgrid_builtin(x: Tensor, y: Tensor) -> Result<Tensor, String> {
300    // For simplicity, return flattened coordinate matrices
301    // In a full implementation, this would return two separate matrices
302    if x.rows() != 1 && x.cols() != 1 {
303        return Err("Input x must be a vector".to_string());
304    }
305    if y.rows() != 1 && y.cols() != 1 {
306        return Err("Input y must be a vector".to_string());
307    }
308
309    let x_vec = &x.data;
310    let y_vec = &y.data;
311    let nx = x_vec.len();
312    let ny = y_vec.len();
313
314    // Create X matrix (repeated rows)
315    let mut x_data = Vec::with_capacity(nx * ny);
316    for _ in 0..ny {
317        x_data.extend_from_slice(x_vec);
318    }
319
320    Tensor::new_2d(x_data, ny, nx)
321}
322
323/// Create a range vector (equivalent to start:end or start:step:end)
324pub fn create_range(start: f64, step: Option<f64>, end: f64) -> Result<Value, String> {
325    let step = step.unwrap_or(1.0);
326
327    if step == 0.0 {
328        return Err("Range step cannot be zero".to_string());
329    }
330
331    let mut values = Vec::new();
332
333    if step > 0.0 {
334        let mut current = start;
335        while current <= end + f64::EPSILON {
336            values.push(current);
337            current += step;
338        }
339    } else {
340        let mut current = start;
341        while current >= end - f64::EPSILON {
342            values.push(current);
343            current += step;
344        }
345    }
346
347    if values.is_empty() {
348        // Return empty matrix for invalid ranges
349        return Ok(Value::Tensor(Tensor::new(vec![], vec![0, 0])?));
350    }
351
352    // Create a row vector (1 x n)
353    let cols = values.len();
354    let matrix = Tensor::new_2d(values, 1, cols)?;
355    Ok(Value::Tensor(matrix))
356}
357
358#[cfg(test)]
359mod tests {
360    use super::*;
361
362    #[test]
363    fn test_linspace() {
364        let result = linspace_builtin(0.0, 10.0, 11).unwrap();
365        assert_eq!(result.cols, 11);
366        assert_eq!(result.rows, 1);
367        assert!((result.data[0] - 0.0).abs() < f64::EPSILON);
368        assert!((result.data[10] - 10.0).abs() < f64::EPSILON);
369        assert!((result.data[5] - 5.0).abs() < f64::EPSILON);
370    }
371
372    #[test]
373    fn test_linspace_single_point() {
374        let result = linspace_builtin(5.0, 10.0, 1).unwrap();
375        assert_eq!(result.cols, 1);
376        assert_eq!(result.data[0], 10.0);
377    }
378
379    #[test]
380    fn test_logspace() {
381        let result = logspace_builtin(1.0, 3.0, 3).unwrap();
382        assert_eq!(result.cols, 3);
383        assert!((result.data[0] - 10.0).abs() < f64::EPSILON);
384        assert!((result.data[1] - 100.0).abs() < f64::EPSILON);
385        assert!((result.data[2] - 1000.0).abs() < f64::EPSILON);
386    }
387
388    #[test]
389    fn test_zeros() {
390        let result = zeros_builtin(2, 3).unwrap();
391        assert_eq!(result.rows, 2);
392        assert_eq!(result.cols, 3);
393        assert!(result.data.iter().all(|&x| x == 0.0));
394    }
395
396    #[test]
397    fn test_ones() {
398        let result = ones_builtin(2, 2).unwrap();
399        assert_eq!(result.rows, 2);
400        assert_eq!(result.cols, 2);
401        assert!(result.data.iter().all(|&x| x == 1.0));
402    }
403
404    #[test]
405    fn test_eye() {
406        let result = eye_builtin(3).unwrap();
407        assert_eq!(result.rows, 3);
408        assert_eq!(result.cols, 3);
409        // Check diagonal elements
410        assert_eq!(result.data[0], 1.0); // (0,0)
411        assert_eq!(result.data[4], 1.0); // (1,1)
412        assert_eq!(result.data[8], 1.0); // (2,2)
413                                         // Check off-diagonal elements
414        assert_eq!(result.data[1], 0.0); // (0,1)
415        assert_eq!(result.data[3], 0.0); // (1,0)
416    }
417
418    #[test]
419    fn test_fill() {
420        let result = fill_builtin(std::f64::consts::PI, 2, 2).unwrap();
421        assert_eq!(result.rows, 2);
422        assert_eq!(result.cols, 2);
423        assert!(result
424            .data
425            .iter()
426            .all(|&x| (x - std::f64::consts::PI).abs() < f64::EPSILON));
427    }
428
429    #[test]
430    fn test_range() {
431        let result = range_builtin(1.0, 1.0, 5.0).unwrap();
432        assert_eq!(result.cols, 5);
433        assert_eq!(result.data, vec![1.0, 2.0, 3.0, 4.0, 5.0]);
434    }
435
436    #[test]
437    fn test_range_negative_step() {
438        let result = range_builtin(5.0, -1.0, 1.0).unwrap();
439        assert_eq!(result.cols, 5);
440        assert_eq!(result.data, vec![5.0, 4.0, 3.0, 2.0, 1.0]);
441    }
442
443    #[test]
444    fn test_rand_dimensions() {
445        let result = rand_builtin(3, 4).unwrap();
446        assert_eq!(result.rows, 3);
447        assert_eq!(result.cols, 4);
448        assert_eq!(result.data.len(), 12);
449        // Check that values are in [0, 1)
450        assert!(result.data.iter().all(|&x| (0.0..1.0).contains(&x)));
451    }
452
453    #[test]
454    fn test_error_cases() {
455        assert!(linspace_builtin(0.0, 1.0, -1).is_err());
456        assert!(zeros_builtin(-1, 5).is_err());
457        assert!(eye_builtin(-1).is_err());
458        assert!(range_builtin(1.0, 0.0, 5.0).is_err());
459    }
460}