runmat_runtime/
mathematics.rs

1//! Mathematical functions
2//!
3//! This module provides comprehensive mathematical functions including trigonometric,
4//! logarithmic, exponential, hyperbolic, and other mathematical operations.
5//! All functions are optimized for performance and handle both scalars and matrices.
6
7use runmat_builtins::{Tensor, Value};
8use runmat_macros::runtime_builtin;
9
10// Trigonometric functions - full MATLAB-compatible variants over Value
11fn sin_complex(re: f64, im: f64) -> (f64, f64) {
12    (re.sin() * im.cosh(), re.cos() * im.sinh())
13}
14
15fn cos_complex(re: f64, im: f64) -> (f64, f64) {
16    (re.cos() * im.cosh(), -(re.sin() * im.sinh()))
17}
18
19fn div_complex(a_re: f64, a_im: f64, b_re: f64, b_im: f64) -> (f64, f64) {
20    let denom = b_re * b_re + b_im * b_im;
21    (
22        (a_re * b_re + a_im * b_im) / denom,
23        (a_im * b_re - a_re * b_im) / denom,
24    )
25}
26
27#[runtime_builtin(
28    name = "sin",
29    category = "math/trigonometry",
30    summary = "Sine of input in radians (element-wise).",
31    examples = "y = sin(pi/2)",
32    keywords = "sine,trig,angle",
33    related = "cos,tan"
34)]
35fn sin_builtin(x: Value) -> Result<Value, String> {
36    match x {
37        Value::Num(n) => Ok(Value::Num(n.sin())),
38        Value::Int(i) => Ok(Value::Num(i.to_f64().sin())),
39        Value::Tensor(t) => {
40            let data: Vec<f64> = t.data.iter().map(|&v| v.sin()).collect();
41            Ok(Value::Tensor(Tensor::new(data, t.shape.clone())?))
42        }
43        Value::Complex(re, im) => {
44            let (r, i) = sin_complex(re, im);
45            Ok(Value::Complex(r, i))
46        }
47        Value::ComplexTensor(ct) => {
48            let out: Vec<(f64, f64)> = ct
49                .data
50                .iter()
51                .map(|&(re, im)| sin_complex(re, im))
52                .collect();
53            Ok(Value::ComplexTensor(runmat_builtins::ComplexTensor::new(
54                out,
55                ct.shape.clone(),
56            )?))
57        }
58        Value::LogicalArray(la) => {
59            let data: Vec<f64> = la
60                .data
61                .iter()
62                .map(|&b| if b != 0 { 1.0f64.sin() } else { 0.0f64.sin() })
63                .collect();
64            Ok(Value::Tensor(Tensor::new(data, la.shape.clone())?))
65        }
66        Value::CharArray(ca) => {
67            let data: Vec<f64> = ca.data.iter().map(|&ch| (ch as u32 as f64).sin()).collect();
68            Ok(Value::Tensor(Tensor::new(data, vec![ca.rows, ca.cols])?))
69        }
70        Value::String(_) | Value::StringArray(_) => Err("sin: expected numeric input".to_string()),
71        Value::GpuTensor(_) => Err("sin: unsupported for gpuArray".to_string()),
72        other => Err(format!("sin: unsupported input {other:?}")),
73    }
74}
75
76#[runtime_builtin(
77    name = "cos",
78    category = "math/trigonometry",
79    summary = "Cosine of input in radians (element-wise).",
80    examples = "y = cos(0)",
81    keywords = "cosine,trig,angle",
82    related = "sin,tan"
83)]
84fn cos_builtin(x: Value) -> Result<Value, String> {
85    match x {
86        Value::Num(n) => Ok(Value::Num(n.cos())),
87        Value::Int(i) => Ok(Value::Num(i.to_f64().cos())),
88        Value::Tensor(t) => {
89            let data: Vec<f64> = t.data.iter().map(|&v| v.cos()).collect();
90            Ok(Value::Tensor(Tensor::new(data, t.shape.clone())?))
91        }
92        Value::Complex(re, im) => {
93            let (r, i) = cos_complex(re, im);
94            Ok(Value::Complex(r, i))
95        }
96        Value::ComplexTensor(ct) => {
97            let out: Vec<(f64, f64)> = ct
98                .data
99                .iter()
100                .map(|&(re, im)| cos_complex(re, im))
101                .collect();
102            Ok(Value::ComplexTensor(runmat_builtins::ComplexTensor::new(
103                out,
104                ct.shape.clone(),
105            )?))
106        }
107        Value::LogicalArray(la) => {
108            let data: Vec<f64> = la
109                .data
110                .iter()
111                .map(|&b| if b != 0 { 1.0f64.cos() } else { 0.0f64.cos() })
112                .collect();
113            Ok(Value::Tensor(Tensor::new(data, la.shape.clone())?))
114        }
115        Value::CharArray(ca) => {
116            let data: Vec<f64> = ca.data.iter().map(|&ch| (ch as u32 as f64).cos()).collect();
117            Ok(Value::Tensor(Tensor::new(data, vec![ca.rows, ca.cols])?))
118        }
119        Value::String(_) | Value::StringArray(_) => Err("cos: expected numeric input".to_string()),
120        Value::GpuTensor(_) => Err("cos: unsupported for gpuArray".to_string()),
121        other => Err(format!("cos: unsupported input {other:?}")),
122    }
123}
124
125#[runtime_builtin(
126    name = "tan",
127    category = "math/trigonometry",
128    summary = "Tangent of input in radians (element-wise).",
129    examples = "y = tan(pi/4)",
130    keywords = "tangent,trig,angle",
131    related = "sin,cos"
132)]
133fn tan_builtin(x: Value) -> Result<Value, String> {
134    match x {
135        Value::Num(n) => Ok(Value::Num(n.tan())),
136        Value::Int(i) => Ok(Value::Num(i.to_f64().tan())),
137        Value::Tensor(t) => {
138            let data: Vec<f64> = t.data.iter().map(|&v| v.tan()).collect();
139            Ok(Value::Tensor(Tensor::new(data, t.shape.clone())?))
140        }
141        Value::Complex(re, im) => {
142            let (sr, si) = sin_complex(re, im);
143            let (cr, ci) = cos_complex(re, im);
144            let (r, i) = div_complex(sr, si, cr, ci);
145            Ok(Value::Complex(r, i))
146        }
147        Value::ComplexTensor(ct) => {
148            let out: Vec<(f64, f64)> = ct
149                .data
150                .iter()
151                .map(|&(re, im)| {
152                    let (sr, si) = sin_complex(re, im);
153                    let (cr, ci) = cos_complex(re, im);
154                    div_complex(sr, si, cr, ci)
155                })
156                .collect();
157            Ok(Value::ComplexTensor(runmat_builtins::ComplexTensor::new(
158                out,
159                ct.shape.clone(),
160            )?))
161        }
162        Value::LogicalArray(la) => {
163            let data: Vec<f64> = la
164                .data
165                .iter()
166                .map(|&b| if b != 0 { 1.0f64.tan() } else { 0.0f64.tan() })
167                .collect();
168            Ok(Value::Tensor(Tensor::new(data, la.shape.clone())?))
169        }
170        Value::CharArray(ca) => {
171            let data: Vec<f64> = ca.data.iter().map(|&ch| (ch as u32 as f64).tan()).collect();
172            Ok(Value::Tensor(Tensor::new(data, vec![ca.rows, ca.cols])?))
173        }
174        Value::String(_) | Value::StringArray(_) => Err("tan: expected numeric input".to_string()),
175        Value::GpuTensor(_) => Err("tan: unsupported for gpuArray".to_string()),
176        other => Err(format!("tan: unsupported input {other:?}")),
177    }
178}
179
180#[runtime_builtin(name = "asin")]
181fn asin_builtin(x: f64) -> Result<f64, String> {
182    if !(-1.0..=1.0).contains(&x) {
183        Err("Input must be in range [-1, 1] for asin".to_string())
184    } else {
185        Ok(x.asin())
186    }
187}
188
189#[runtime_builtin(name = "acos")]
190fn acos_builtin(x: f64) -> Result<f64, String> {
191    if !(-1.0..=1.0).contains(&x) {
192        Err("Input must be in range [-1, 1] for acos".to_string())
193    } else {
194        Ok(x.acos())
195    }
196}
197
198#[runtime_builtin(name = "atan")]
199fn atan_builtin(x: f64) -> Result<f64, String> {
200    Ok(x.atan())
201}
202
203#[runtime_builtin(name = "atan2")]
204fn atan2_builtin(y: f64, x: f64) -> Result<f64, String> {
205    Ok(y.atan2(x))
206}
207
208// Hyperbolic functions
209
210#[runtime_builtin(name = "sinh")]
211fn sinh_builtin(x: f64) -> Result<f64, String> {
212    Ok(x.sinh())
213}
214
215#[runtime_builtin(name = "cosh")]
216fn cosh_builtin(x: f64) -> Result<f64, String> {
217    Ok(x.cosh())
218}
219
220#[runtime_builtin(name = "tanh")]
221fn tanh_builtin(x: f64) -> Result<f64, String> {
222    Ok(x.tanh())
223}
224
225#[runtime_builtin(name = "asinh")]
226fn asinh_builtin(x: f64) -> Result<f64, String> {
227    Ok(x.asinh())
228}
229
230#[runtime_builtin(name = "acosh")]
231fn acosh_builtin(x: f64) -> Result<f64, String> {
232    if x < 1.0 {
233        Err("Input must be >= 1 for acosh".to_string())
234    } else {
235        Ok(x.acosh())
236    }
237}
238
239#[runtime_builtin(name = "atanh")]
240fn atanh_builtin(x: f64) -> Result<f64, String> {
241    if x <= -1.0 || x >= 1.0 {
242        Err("Input must be in range (-1, 1) for atanh".to_string())
243    } else {
244        Ok(x.atanh())
245    }
246}
247
248// Logarithmic and exponential functions
249
250#[runtime_builtin(name = "ln")]
251fn ln_builtin(x: f64) -> Result<f64, String> {
252    if x <= 0.0 {
253        Err("Input must be positive for ln".to_string())
254    } else {
255        Ok(x.ln())
256    }
257}
258
259#[runtime_builtin(name = "log2")]
260fn log2_builtin(x: f64) -> Result<f64, String> {
261    if x <= 0.0 {
262        Err("Input must be positive for log2".to_string())
263    } else {
264        Ok(x.log2())
265    }
266}
267
268#[runtime_builtin(name = "log10")]
269fn log10_builtin(x: f64) -> Result<f64, String> {
270    if x <= 0.0 {
271        Err("Input must be positive for log10".to_string())
272    } else {
273        Ok(x.log10())
274    }
275}
276
277#[runtime_builtin(name = "exp2")]
278fn exp2_builtin(x: f64) -> Result<f64, String> {
279    Ok(2.0_f64.powf(x))
280}
281
282#[runtime_builtin(name = "exp10")]
283fn exp10_builtin(x: f64) -> Result<f64, String> {
284    Ok(10.0_f64.powf(x))
285}
286
287#[runtime_builtin(name = "pow")]
288fn pow_builtin(base: f64, exponent: f64) -> Result<f64, String> {
289    Ok(base.powf(exponent))
290}
291
292// Basic math functions
293#[runtime_builtin(name = "abs")]
294fn abs_runtime_builtin(x: Value) -> Result<Value, String> {
295    match x {
296        Value::Num(n) => Ok(Value::Num(n.abs())),
297        Value::Int(i) => Ok(Value::Num(i.to_f64().abs())),
298        Value::Tensor(t) => {
299            let data: Vec<f64> = t.data.iter().map(|&v| v.abs()).collect();
300            Ok(Value::Tensor(Tensor::new_2d(data, t.rows(), t.cols())?))
301        }
302        Value::Complex(re, im) => Ok(Value::Num((re * re + im * im).sqrt())),
303        Value::ComplexTensor(ct) => {
304            let data: Vec<f64> = ct
305                .data
306                .iter()
307                .map(|(re, im)| (re * re + im * im).sqrt())
308                .collect();
309            Ok(Value::Tensor(Tensor::new_2d(data, ct.rows, ct.cols)?))
310        }
311        Value::LogicalArray(la) => {
312            let data: Vec<f64> = la
313                .data
314                .iter()
315                .map(|&b| if b != 0 { 1.0 } else { 0.0 })
316                .collect();
317            let (rows, cols) = match la.shape.len() {
318                0 => (0, 0),
319                1 => (1, la.shape[0]),
320                _ => (la.shape[0], la.shape[1]),
321            };
322            Ok(Value::Tensor(Tensor::new_2d(data, rows, cols)?))
323        }
324        other => Err(format!("abs: unsupported input {other:?}")),
325    }
326}
327
328// Rounding and related functions
329
330#[runtime_builtin(name = "round")]
331fn round_builtin(x: f64) -> Result<f64, String> {
332    Ok(x.round())
333}
334
335#[runtime_builtin(name = "floor")]
336fn floor_builtin(x: f64) -> Result<f64, String> {
337    Ok(x.floor())
338}
339
340#[runtime_builtin(name = "ceil")]
341fn ceil_builtin(x: f64) -> Result<f64, String> {
342    Ok(x.ceil())
343}
344
345#[runtime_builtin(name = "trunc")]
346fn trunc_builtin(x: f64) -> Result<f64, String> {
347    Ok(x.trunc())
348}
349
350#[runtime_builtin(name = "fract")]
351fn fract_builtin(x: f64) -> Result<f64, String> {
352    Ok(x.fract())
353}
354
355#[runtime_builtin(name = "sign")]
356fn sign_builtin(x: f64) -> Result<f64, String> {
357    if x > 0.0 {
358        Ok(1.0)
359    } else if x < 0.0 {
360        Ok(-1.0)
361    } else {
362        Ok(0.0)
363    }
364}
365
366// Complex number support (basic)
367
368#[runtime_builtin(name = "real")]
369fn real_builtin(x: f64) -> Result<f64, String> {
370    Ok(x) // For real numbers, real part is the number itself
371}
372
373#[runtime_builtin(name = "imag")]
374fn imag_builtin(_x: f64) -> Result<f64, String> {
375    Ok(0.0) // For real numbers, imaginary part is always 0
376}
377
378#[runtime_builtin(name = "angle")]
379fn angle_builtin(x: f64) -> Result<f64, String> {
380    if x >= 0.0 {
381        Ok(0.0)
382    } else {
383        Ok(std::f64::consts::PI)
384    }
385}
386
387// Special functions
388
389#[runtime_builtin(name = "gamma")]
390fn gamma_builtin(x: f64) -> Result<f64, String> {
391    if x <= 0.0 {
392        return Err("Gamma function undefined for non-positive integers".to_string());
393    }
394
395    // Stirling's approximation for large x
396    if x > 10.0 {
397        let term1 = (2.0 * std::f64::consts::PI / x).sqrt();
398        let term2 = (x / std::f64::consts::E).powf(x);
399        return Ok(term1 * term2);
400    }
401
402    // For smaller values, use recursive relation Γ(x+1) = x * Γ(x)
403    if x < 1.0 {
404        return Ok(gamma_builtin(x + 1.0)? / x);
405    }
406
407    // Simple approximation for 1 <= x <= 10
408    let mut result = 1.0;
409    let mut n = x;
410    while n > 2.0 {
411        n -= 1.0;
412        result *= n;
413    }
414
415    Ok(result)
416}
417
418#[runtime_builtin(name = "factorial")]
419fn factorial_builtin(n: i32) -> Result<f64, String> {
420    if n < 0 {
421        return Err("Factorial undefined for negative numbers".to_string());
422    }
423
424    if n > 170 {
425        return Err("Factorial overflow for n > 170".to_string());
426    }
427
428    let mut result = 1.0;
429    for i in 2..=n {
430        result *= i as f64;
431    }
432
433    Ok(result)
434}
435
436// Statistical functions
437
438#[cfg(test)]
439#[allow(dead_code)]
440fn sum_builtin(matrix: Tensor) -> Result<f64, String> {
441    Ok(matrix.data.iter().sum())
442}
443
444#[cfg(test)]
445#[allow(dead_code)]
446fn mean_builtin(matrix: Tensor) -> Result<f64, String> {
447    if matrix.data.is_empty() {
448        return Err("Cannot compute mean of empty matrix".to_string());
449    }
450    Ok(matrix.data.iter().sum::<f64>() / matrix.data.len() as f64)
451}
452
453#[runtime_builtin(name = "std")]
454fn std_builtin(matrix: Tensor) -> Result<f64, String> {
455    if matrix.data.len() < 2 {
456        return Err("Need at least 2 elements to compute standard deviation".to_string());
457    }
458
459    let mean = matrix.data.iter().sum::<f64>() / matrix.data.len() as f64;
460    let variance = matrix.data.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
461        / (matrix.data.len() - 1) as f64;
462
463    Ok(variance.sqrt())
464}
465
466#[runtime_builtin(name = "var")]
467fn var_builtin(matrix: Tensor) -> Result<f64, String> {
468    if matrix.data.len() < 2 {
469        return Err("Need at least 2 elements to compute variance".to_string());
470    }
471
472    let mean = matrix.data.iter().sum::<f64>() / matrix.data.len() as f64;
473    let variance = matrix.data.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
474        / (matrix.data.len() - 1) as f64;
475
476    Ok(variance)
477}
478
479// Unit tests for mathematics live under crates/runmat-runtime/tests/mathematics.rs