1#![allow(dead_code)]
2use core_mumu::parser::types::Value;
3
4pub fn cpu_matrix_multiply(a: &Vec<Vec<f64>>, b: &Vec<Vec<f64>>) -> Result<Value, String> {
6 if a.is_empty() || b.is_empty() {
7 return Err("gpu:multiply => empty matrix".to_string());
8 }
9 let m = a.len();
10 let k1 = a[0].len();
11 if a.iter().any(|r| r.len() != k1) {
12 return Err("gpu:multiply => left matrix has ragged rows".to_string());
13 }
14 let k2 = b.len();
15 let n = b[0].len();
16 if b.iter().any(|r| r.len() != n) {
17 return Err("gpu:multiply => right matrix has ragged rows".to_string());
18 }
19 if k1 != k2 {
20 return Err(format!(
21 "Dimension mismatch: left is {}×{}, right is {}×{}",
22 m, k1, k2, n
23 ));
24 }
25
26 let mut out = vec![vec![0.0f64; n]; m];
27 for i in 0..m {
28 for j in 0..n {
29 let mut acc = 0.0;
30 for x in 0..k1 {
31 acc += a[i][x] * b[x][j];
32 }
33 out[i][j] = acc;
34 }
35 }
36 Ok(Value::Float2DArray(out))
37}
38
39pub fn cpu_inverse_2x2(t: &Vec<Vec<f64>>) -> Result<Value, String> {
42 if t.len() != 2 || t[0].len() != 2 || t[1].len() != 2 {
43 return Err("gpu:inverse => only 2x2 supported".to_string());
44 }
45 let a = t[0][0];
46 let b = t[0][1];
47 let c = t[1][0];
48 let d = t[1][1];
49 let det = a * d - b * c;
50 if det.abs() < 1e-12 {
51 return Err("Det is near zero => cannot invert".to_string());
52 }
53 let invd = 1.0 / det;
54 let ra = d * invd;
55 let rb = -b * invd;
56 let rc = -c * invd;
57 let rd = a * invd;
58 Ok(Value::Float2DArray(vec![vec![ra, rb], vec![rc, rd]]))
59}
60
61pub fn cpu_transpose_2d(t: &Vec<Vec<f64>>) -> Result<Value, String> {
63 if t.is_empty() {
64 return Ok(Value::Float2DArray(vec![]));
65 }
66 let rows = t.len();
67 let cols = t[0].len();
68 if t.iter().any(|r| r.len() != cols) {
69 return Err("Rank must be 2 (ragged rows not allowed)".to_string());
70 }
71
72 let mut out = vec![vec![0.0f64; rows]; cols];
73 for r in 0..rows {
74 for c in 0..cols {
75 out[c][r] = t[r][c];
76 }
77 }
78 Ok(Value::Float2DArray(out))
79}
80
81pub fn cpu_reduce_sum(t: &Vec<Vec<f64>>) -> Result<Value, String> {
83 let mut accum = 0f64;
84 for row in t {
85 for &x in row {
86 accum += x;
87 }
88 }
89 Ok(Value::Float(accum))
90}
91
92pub fn cpu_scale_tensor(t: &Vec<Vec<f64>>, scalar: f64) -> Result<Value, String> {
94 let mut out = Vec::with_capacity(t.len());
95 for row in t {
96 let mut r2 = Vec::with_capacity(row.len());
97 for &x in row {
98 r2.push(x * scalar);
99 }
100 out.push(r2);
101 }
102 Ok(Value::Float2DArray(out))
103}
104
105pub fn perform_compute_multiply(
108 _ctx: &crate::vulkan::AshVulkanContext,
109 a: &Vec<Vec<f64>>,
110 b: &Vec<Vec<f64>>,
111) -> Result<Value, String> {
112 cpu_matrix_multiply(a, b)
113}
114
115pub fn perform_compute_add(
116 _ctx: &crate::vulkan::AshVulkanContext,
117 a: &Vec<Vec<f64>>,
118 b: &Vec<Vec<f64>>,
119) -> Result<Value, String> {
120 super::operators::elementwise::elementwise_op(a, b, |x, y| x + y)
121}
122
123pub fn perform_compute_subtract(
124 _ctx: &crate::vulkan::AshVulkanContext,
125 a: &Vec<Vec<f64>>,
126 b: &Vec<Vec<f64>>,
127) -> Result<Value, String> {
128 super::operators::elementwise::elementwise_op(a, b, |x, y| x - y)
129}
130
131pub fn perform_compute_hadamard(
132 _ctx: &crate::vulkan::AshVulkanContext,
133 a: &Vec<Vec<f64>>,
134 b: &Vec<Vec<f64>>,
135) -> Result<Value, String> {
136 super::operators::elementwise::elementwise_op(a, b, |x, y| x * y)
137}
138
139pub fn perform_compute_transpose(
140 _ctx: &crate::vulkan::AshVulkanContext,
141 t: &Vec<Vec<f64>>,
142) -> Result<Value, String> {
143 cpu_transpose_2d(t)
144}