1use runmat_accelerate_api::{CovNormalization, CovRows, CovarianceOptions};
4use runmat_builtins::{Tensor, Value};
5use runmat_macros::runtime_builtin;
6
7use crate::builtins::common::gpu_helpers;
8use crate::builtins::common::spec::{
9 BroadcastSemantics, BuiltinFusionSpec, BuiltinGpuSpec, ConstantStrategy, GpuOpKind,
10 ProviderHook, ReductionNaN, ResidencyPolicy, ScalarType, ShapeRequirements,
11};
12use crate::builtins::common::tensor;
13use crate::builtins::stats::type_resolvers::cov_type;
14use crate::{build_runtime_error, BuiltinResult, RuntimeError};
15
16const NAME: &str = "cov";
17
18fn builtin_error(message: impl Into<String>) -> RuntimeError {
19 build_runtime_error(message).with_builtin(NAME).build()
20}
21
22#[runmat_macros::register_gpu_spec(builtin_path = "crate::builtins::stats::summary::cov")]
23pub const GPU_SPEC: BuiltinGpuSpec = BuiltinGpuSpec {
24 name: "cov",
25 op_kind: GpuOpKind::Custom("summary-stats"),
26 supported_precisions: &[ScalarType::F32, ScalarType::F64],
27 broadcast: BroadcastSemantics::None,
28 provider_hooks: &[ProviderHook::Custom("covariance")],
29 constant_strategy: ConstantStrategy::InlineLiteral,
30 residency: ResidencyPolicy::NewHandle,
31 nan_mode: ReductionNaN::Include,
32 two_pass_threshold: None,
33 workgroup_size: None,
34 accepts_nan_mode: false,
35 notes: "GPU execution is available when rows='all' and no weight vector is supplied; other cases fall back to the CPU path.",
36};
37
38#[runmat_macros::register_fusion_spec(builtin_path = "crate::builtins::stats::summary::cov")]
39pub const FUSION_SPEC: BuiltinFusionSpec = BuiltinFusionSpec {
40 name: "cov",
41 shape: ShapeRequirements::Any,
42 constant_strategy: ConstantStrategy::InlineLiteral,
43 elementwise: None,
44 reduction: None,
45 emits_nan: true,
46 notes: "The covariance builtin is treated as a fusion boundary and executes via dedicated kernels or the host reference.",
47};
48
49#[runtime_builtin(
50 name = "cov",
51 category = "stats/summary",
52 summary = "Compute covariance matrices for vectors, matrices, or paired data sets.",
53 keywords = "cov,covariance,statistics,weights,gpu",
54 accel = "reduction",
55 type_resolver(cov_type),
56 builtin_path = "crate::builtins::stats::summary::cov"
57)]
58async fn cov_builtin(value: Value, rest: Vec<Value>) -> BuiltinResult<Value> {
59 let args = CovArgs::parse(value, rest)?;
60 if let Some(result) = cov_try_gpu(&args).await? {
61 return Ok(result);
62 }
63 cov_host(args).await
64}
65
66pub fn cov_from_tensors(
68 left: Tensor,
69 right: Option<Tensor>,
70 rows: CovRows,
71 weight: CovWeightSpec,
72) -> BuiltinResult<Tensor> {
73 let matrix = combine_tensors(left, right)?;
74 if let CovWeightSpec::Vector(ref vec) = weight {
75 if matrix.rows != vec.len() {
76 return Err(builtin_error(format!(
77 "cov: weight vector must contain {} elements",
78 matrix.rows
79 )));
80 }
81 }
82 match rows {
83 CovRows::All => covariance_dense(&matrix, &weight),
84 CovRows::OmitRows => {
85 let (filtered, filtered_weight) = filter_complete_rows(&matrix, weight);
86 covariance_dense(&filtered, &filtered_weight)
87 }
88 CovRows::PartialRows => covariance_pairwise(&matrix, &weight),
89 }
90}
91
92#[derive(Debug)]
93struct CovArgs {
94 first: Value,
95 second: Option<Value>,
96 normalization: CovNormalization,
97 rows: CovRows,
98 weight_vector: Option<Value>,
99}
100
101impl CovArgs {
102 fn parse(first: Value, rest: Vec<Value>) -> BuiltinResult<Self> {
103 let mut second_candidate: Option<Value> = None;
104 let mut weight_candidate: Option<Value> = None;
105 let mut normalization = CovNormalization::Unbiased;
106 let mut normalization_explicit = false;
107 let mut rows = CovRows::All;
108
109 let iter = rest.into_iter();
110 for arg in iter {
111 match arg {
112 Value::String(_) | Value::StringArray(_) | Value::CharArray(_) => {
113 let key = tensor::value_to_string(&arg)
114 .ok_or_else(|| builtin_error("cov: expected string option"))?;
115 let lowered = key.trim().to_ascii_lowercase();
116 rows = parse_rows_option(&lowered)?;
117 }
118 Value::Tensor(_) | Value::LogicalArray(_) | Value::GpuTensor(_) => {
119 if second_candidate.is_none() {
120 second_candidate = Some(arg);
121 } else if weight_candidate.is_none() {
122 weight_candidate = Some(arg);
123 } else {
124 return Err(builtin_error("cov: too many array arguments"));
125 }
126 }
127 Value::Num(_) | Value::Int(_) | Value::Bool(_) => {
128 if normalization_explicit || weight_candidate.is_some() {
129 return Err(builtin_error(
130 "cov: normalization flag specified more than once",
131 ));
132 }
133 normalization = parse_normalization(arg)?;
134 normalization_explicit = true;
135 }
136 Value::ComplexTensor(_) => {
137 return Err(builtin_error("cov: complex inputs are not supported yet"));
138 }
139 other => {
140 return Err(builtin_error(format!(
141 "cov: unsupported argument type {:?}",
142 other
143 )));
144 }
145 }
146 }
147
148 if let Some(weight_array) = weight_candidate {
149 return Ok(Self {
151 first,
152 second: second_candidate,
153 normalization,
154 rows,
155 weight_vector: Some(weight_array),
156 });
157 }
158
159 let mut second = second_candidate;
160 let mut weight_vector: Option<Value> = None;
161
162 if let Some(candidate) = second.take() {
163 if should_treat_as_weight(&first, &candidate, normalization_explicit, rows)? {
164 weight_vector = Some(candidate);
165 } else {
166 second = Some(candidate);
167 }
168 }
169
170 Ok(Self {
171 first,
172 second,
173 normalization,
174 rows,
175 weight_vector,
176 })
177 }
178}
179
180#[derive(Debug, Clone)]
181pub enum CovWeightSpec {
182 Scalar(CovNormalization),
183 Vector(Vec<f64>),
184}
185
186async fn cov_try_gpu(args: &CovArgs) -> BuiltinResult<Option<Value>> {
187 if args.rows != CovRows::All || args.weight_vector.is_some() {
188 return Ok(None);
189 }
190
191 let provider = match runmat_accelerate_api::provider() {
192 Some(p) => p,
193 None => return Ok(None),
194 };
195
196 let first_handle = match &args.first {
197 Value::GpuTensor(handle) => handle,
198 _ => return Ok(None),
199 };
200
201 let maybe_second_handle = match &args.second {
202 Some(Value::GpuTensor(handle)) => Some(handle),
203 Some(_) => return Ok(None),
204 None => None,
205 };
206
207 let options = CovarianceOptions {
208 normalization: args.normalization,
209 rows: args.rows,
210 has_weight_vector: false,
211 };
212
213 match provider
214 .covariance(first_handle, maybe_second_handle, None, &options)
215 .await
216 {
217 Ok(result) => Ok(Some(Value::GpuTensor(result))),
218 Err(_) => Ok(None),
219 }
220}
221
222async fn cov_host(args: CovArgs) -> BuiltinResult<Value> {
223 let CovArgs {
224 first,
225 second,
226 normalization,
227 rows,
228 weight_vector,
229 } = args;
230
231 let left = value_to_tensor_gather(first).await?;
232 let right = match second {
233 Some(value) => Some(value_to_tensor_gather(value).await?),
234 None => None,
235 };
236
237 let weight_spec = if let Some(weight_value) = weight_vector {
238 let vector = value_to_weight_vector(weight_value, left.rows()).await?;
239 CovWeightSpec::Vector(vector)
240 } else {
241 CovWeightSpec::Scalar(normalization)
242 };
243
244 let tensor = cov_from_tensors(left, right, rows, weight_spec)?;
245 Ok(Value::Tensor(tensor))
246}
247
248async fn value_to_tensor_gather(value: Value) -> BuiltinResult<Tensor> {
249 match value {
250 Value::GpuTensor(handle) => gpu_helpers::gather_tensor_async(&handle).await,
251 Value::LogicalArray(logical) => tensor::logical_to_tensor(&logical).map_err(builtin_error),
252 other => tensor::value_into_tensor_for("cov", other).map_err(builtin_error),
253 }
254}
255
256async fn value_to_weight_vector(value: Value, expected_rows: usize) -> BuiltinResult<Vec<f64>> {
257 let tensor = match value {
258 Value::GpuTensor(handle) => gpu_helpers::gather_tensor_async(&handle).await?,
259 Value::LogicalArray(logical) => {
260 tensor::logical_to_tensor(&logical).map_err(builtin_error)?
261 }
262 other => tensor::value_into_tensor_for("cov", other).map_err(builtin_error)?,
263 };
264
265 if tensor.shape.len() > 2 {
266 return Err(builtin_error("cov: weight vector must be one-dimensional"));
267 }
268 if tensor.rows() != expected_rows && tensor.cols() != expected_rows {
269 return Err(builtin_error(format!(
270 "cov: weight vector must contain {} elements",
271 expected_rows
272 )));
273 }
274 for (idx, weight) in tensor.data.iter().enumerate() {
275 if !weight.is_finite() || *weight < 0.0 {
276 return Err(builtin_error(format!(
277 "cov: weights must be non-negative finite values (index {idx})"
278 )));
279 }
280 }
281 if tensor.data.is_empty() {
282 return Err(builtin_error("cov: weight vector cannot be empty"));
283 }
284 Ok(tensor.data)
285}
286
287fn parse_rows_option(value: &str) -> BuiltinResult<CovRows> {
288 match value {
289 "all" => Ok(CovRows::All),
290 "omitrows" | "omit" => Ok(CovRows::OmitRows),
291 "partialrows" | "partial" | "pairwise" => Ok(CovRows::PartialRows),
292 other => Err(builtin_error(format!("cov: unknown rows option '{other}'"))),
293 }
294}
295
296fn parse_normalization(value: Value) -> BuiltinResult<CovNormalization> {
297 match value {
298 Value::Int(i) => match i.to_i64() {
299 0 => Ok(CovNormalization::Unbiased),
300 1 => Ok(CovNormalization::Biased),
301 other => Err(builtin_error(format!(
302 "cov: normalization flag must be 0 or 1, received {other}"
303 ))),
304 },
305 Value::Num(n) => {
306 if !n.is_finite() {
307 return Err(builtin_error("cov: normalization flag must be finite"));
308 }
309 let rounded = n.round();
310 if (rounded - n).abs() > 1.0e-12 {
311 return Err(builtin_error("cov: normalization flag must be an integer"));
312 }
313 match rounded as i64 {
314 0 => Ok(CovNormalization::Unbiased),
315 1 => Ok(CovNormalization::Biased),
316 other => Err(builtin_error(format!(
317 "cov: normalization flag must be 0 or 1, received {other}"
318 ))),
319 }
320 }
321 Value::Bool(flag) => Ok(if flag {
322 CovNormalization::Biased
323 } else {
324 CovNormalization::Unbiased
325 }),
326 other => Err(builtin_error(format!(
327 "cov: normalization flag must be numeric, received {:?}",
328 other
329 ))),
330 }
331}
332
333fn should_treat_as_weight(
334 first: &Value,
335 candidate: &Value,
336 normalization_explicit: bool,
337 rows_option: CovRows,
338) -> BuiltinResult<bool> {
339 let (rows_first, cols_first) = value_rows_cols(first)?;
340 let (rows_candidate, cols_candidate) = value_rows_cols(candidate)?;
341
342 let is_vector = rows_candidate == 1
343 || cols_candidate == 1
344 || rows_candidate * cols_candidate == rows_candidate
345 && (rows_candidate == rows_first || cols_candidate == rows_first);
346
347 if !is_vector {
348 return Ok(false);
349 }
350
351 if rows_candidate != rows_first && cols_candidate != rows_first {
352 return Ok(false);
354 }
355
356 if cols_first == 1 && !normalization_explicit && matches!(rows_option, CovRows::All) {
357 return Ok(false);
359 }
360
361 Ok(true)
362}
363
364fn value_rows_cols(value: &Value) -> BuiltinResult<(usize, usize)> {
365 match value {
366 Value::Tensor(tensor) => Ok((tensor.rows(), tensor.cols())),
367 Value::LogicalArray(array) => {
368 if array.shape.len() > 2 {
369 return Err(builtin_error("cov: inputs must be 2-D matrices or vectors"));
370 }
371 let rows = if array.shape.is_empty() {
372 1
373 } else {
374 array.shape[0]
375 };
376 let cols = if array.shape.len() >= 2 {
377 array.shape[1]
378 } else {
379 1
380 };
381 Ok((rows, cols))
382 }
383 Value::GpuTensor(handle) => {
384 if handle.shape.len() > 2 {
385 return Err(builtin_error("cov: inputs must be 2-D matrices or vectors"));
386 }
387 let rows = if handle.shape.is_empty() {
388 1
389 } else {
390 handle.shape[0]
391 };
392 let cols = if handle.shape.len() >= 2 {
393 handle.shape[1]
394 } else {
395 1
396 };
397 Ok((rows, cols))
398 }
399 Value::Num(_) | Value::Int(_) | Value::Bool(_) => Ok((1, 1)),
400 other => Err(builtin_error(format!(
401 "cov: unsupported input type for shape inspection: {:?}",
402 other
403 ))),
404 }
405}
406
407#[derive(Debug, Clone)]
408struct Matrix {
409 data: Vec<f64>,
410 rows: usize,
411 cols: usize,
412}
413
414impl Matrix {
415 fn from_tensor(name: &str, tensor: Tensor) -> BuiltinResult<Self> {
416 if tensor.shape.len() > 2 {
417 return Err(builtin_error(format!(
418 "{name}: inputs must be 2-D matrices or vectors"
419 )));
420 }
421 Ok(Self {
422 rows: tensor.rows(),
423 cols: tensor.cols(),
424 data: tensor.data,
425 })
426 }
427
428 #[inline]
429 fn get(&self, row: usize, col: usize) -> f64 {
430 self.data[row + col * self.rows]
431 }
432
433 #[inline]
434 fn column(&self, col: usize) -> &[f64] {
435 let start = col * self.rows;
436 let end = start + self.rows;
437 &self.data[start..end]
438 }
439}
440
441fn combine_tensors(left: Tensor, right: Option<Tensor>) -> BuiltinResult<Matrix> {
442 let mut matrix = Matrix::from_tensor("cov", left)?;
443 if let Some(second) = right {
444 let right_matrix = Matrix::from_tensor("cov", second)?;
445 if matrix.rows != right_matrix.rows {
446 return Err(builtin_error(
447 "cov: inputs must have the same number of rows",
448 ));
449 }
450 matrix.cols += right_matrix.cols;
451 matrix
452 .data
453 .extend_from_slice(&right_matrix.data[..right_matrix.rows * right_matrix.cols]);
454 }
455 Ok(matrix)
456}
457
458fn covariance_dense(matrix: &Matrix, weight: &CovWeightSpec) -> BuiltinResult<Tensor> {
459 let cols = matrix.cols;
460 let rows = matrix.rows;
461
462 if cols == 0 {
463 return Tensor::new(Vec::new(), vec![0, 0]).map_err(|e| builtin_error(format!("cov: {e}")));
464 }
465
466 let mut result = vec![f64::NAN; cols * cols];
467
468 match weight {
469 CovWeightSpec::Scalar(normalization) => {
470 let denom = match normalization {
471 CovNormalization::Unbiased => (rows as f64) - 1.0,
472 CovNormalization::Biased => rows as f64,
473 };
474 if denom <= 0.0 {
475 return Tensor::new(result, vec![cols, cols])
476 .map_err(|e| builtin_error(format!("cov: {e}")));
477 }
478
479 let mut means = vec![0.0; cols];
480 for (col, mean_slot) in means.iter_mut().enumerate() {
481 let column = matrix.column(col);
482 let mut sum = 0.0;
483 let mut valid = true;
484 for &value in column {
485 if !value.is_finite() {
486 valid = false;
487 break;
488 }
489 sum += value;
490 }
491 *mean_slot = if valid { sum / (rows as f64) } else { f64::NAN };
492 }
493
494 for i in 0..cols {
495 for j in i..cols {
496 let value = covariance_unweighted_pair(matrix, i, j, &means, denom);
497 set_entry(&mut result, cols, i, j, sanitize_covariance(i == j, value));
498 }
499 }
500 }
501 CovWeightSpec::Vector(weights) => {
502 if weights.len() != rows {
503 return Err(builtin_error(format!(
504 "cov: weight vector must contain {} elements",
505 rows
506 )));
507 }
508 let sum_w: f64 = weights.iter().sum();
509 if sum_w <= 0.0 {
510 return Tensor::new(result, vec![cols, cols])
511 .map_err(|e| builtin_error(format!("cov: {e}")));
512 }
513 let denom = sum_w - 1.0;
514 if denom <= 0.0 {
515 return Tensor::new(result, vec![cols, cols])
516 .map_err(|e| builtin_error(format!("cov: {e}")));
517 }
518
519 let mut means = vec![0.0; cols];
520 for (col, mean_slot) in means.iter_mut().enumerate() {
521 let column = matrix.column(col);
522 let mut weighted_sum = 0.0;
523 let mut valid = true;
524 for (row, &value) in column.iter().enumerate() {
525 if !value.is_finite() {
526 valid = false;
527 break;
528 }
529 weighted_sum += weights[row] * value;
530 }
531 *mean_slot = if valid {
532 weighted_sum / sum_w
533 } else {
534 f64::NAN
535 };
536 }
537
538 for i in 0..cols {
539 for j in i..cols {
540 let value = covariance_weighted_pair(matrix, i, j, weights, &means, denom);
541 set_entry(&mut result, cols, i, j, sanitize_covariance(i == j, value));
542 }
543 }
544 }
545 }
546
547 Tensor::new(result, vec![cols, cols]).map_err(|e| builtin_error(format!("cov: {e}")))
548}
549
550fn filter_complete_rows(matrix: &Matrix, weight: CovWeightSpec) -> (Matrix, CovWeightSpec) {
551 if matrix.rows == 0 {
552 return (
553 Matrix {
554 data: Vec::new(),
555 rows: 0,
556 cols: matrix.cols,
557 },
558 weight,
559 );
560 }
561
562 let mut valid_rows = Vec::new();
563 for row in 0..matrix.rows {
564 let mut is_valid = true;
565 for col in 0..matrix.cols {
566 if !matrix.get(row, col).is_finite() {
567 is_valid = false;
568 break;
569 }
570 }
571 if is_valid {
572 valid_rows.push(row);
573 }
574 }
575
576 if valid_rows.len() == matrix.rows {
577 return (matrix.clone(), weight);
579 }
580
581 let mut data = Vec::with_capacity(valid_rows.len() * matrix.cols);
582 for col in 0..matrix.cols {
583 for &row in &valid_rows {
584 data.push(matrix.get(row, col));
585 }
586 }
587
588 let filtered_matrix = Matrix {
589 data,
590 rows: valid_rows.len(),
591 cols: matrix.cols,
592 };
593
594 let filtered_weight = match weight {
595 CovWeightSpec::Scalar(norm) => CovWeightSpec::Scalar(norm),
596 CovWeightSpec::Vector(vec) => {
597 let mut filtered = Vec::with_capacity(valid_rows.len());
598 for &row in &valid_rows {
599 filtered.push(vec[row]);
600 }
601 CovWeightSpec::Vector(filtered)
602 }
603 };
604
605 (filtered_matrix, filtered_weight)
606}
607
608fn covariance_pairwise(matrix: &Matrix, weight: &CovWeightSpec) -> BuiltinResult<Tensor> {
609 let cols = matrix.cols;
610 if cols == 0 {
611 return Tensor::new(Vec::new(), vec![0, 0]).map_err(|e| builtin_error(format!("cov: {e}")));
612 }
613 let mut result = vec![f64::NAN; cols * cols];
614 for i in 0..cols {
615 let variance = covariance_pair(matrix, i, i, weight);
616 set_entry(&mut result, cols, i, i, sanitize_covariance(true, variance));
617 for j in (i + 1)..cols {
618 let value = covariance_pair(matrix, i, j, weight);
619 set_entry(&mut result, cols, i, j, sanitize_covariance(false, value));
620 }
621 }
622 Tensor::new(result, vec![cols, cols]).map_err(|e| builtin_error(format!("cov: {e}")))
623}
624
625fn covariance_unweighted_pair(
626 matrix: &Matrix,
627 lhs: usize,
628 rhs: usize,
629 means: &[f64],
630 denom: f64,
631) -> f64 {
632 if !means[lhs].is_finite() || !means[rhs].is_finite() {
633 return f64::NAN;
634 }
635 let mut accumulator = 0.0;
636 for row in 0..matrix.rows {
637 let x = matrix.get(row, lhs);
638 let y = matrix.get(row, rhs);
639 if !x.is_finite() || !y.is_finite() {
640 return f64::NAN;
641 }
642 accumulator += (x - means[lhs]) * (y - means[rhs]);
643 }
644 accumulator / denom
645}
646
647fn covariance_weighted_pair(
648 matrix: &Matrix,
649 lhs: usize,
650 rhs: usize,
651 weights: &[f64],
652 means: &[f64],
653 denom: f64,
654) -> f64 {
655 if !means[lhs].is_finite() || !means[rhs].is_finite() {
656 return f64::NAN;
657 }
658 let mut accumulator = 0.0;
659 for (row, &weight) in weights.iter().enumerate().take(matrix.rows) {
660 if weight == 0.0 {
661 continue;
662 }
663 let x = matrix.get(row, lhs);
664 let y = matrix.get(row, rhs);
665 if !x.is_finite() || !y.is_finite() {
666 return f64::NAN;
667 }
668 accumulator += weight * (x - means[lhs]) * (y - means[rhs]);
669 }
670 accumulator / denom
671}
672
673fn covariance_pair(matrix: &Matrix, lhs: usize, rhs: usize, weight: &CovWeightSpec) -> f64 {
674 match weight {
675 CovWeightSpec::Scalar(normalization) => {
676 let mut xs = Vec::new();
677 let mut ys = Vec::new();
678 for row in 0..matrix.rows {
679 let x = matrix.get(row, lhs);
680 let y = matrix.get(row, rhs);
681 if x.is_finite() && y.is_finite() {
682 xs.push(x);
683 ys.push(y);
684 }
685 }
686 covariance_unweighted_slice(&xs, &ys, *normalization)
687 }
688 CovWeightSpec::Vector(weights) => {
689 let mut xs = Vec::new();
690 let mut ys = Vec::new();
691 let mut ws = Vec::new();
692 for (row, &weight) in weights.iter().enumerate().take(matrix.rows) {
693 let x = matrix.get(row, lhs);
694 let y = matrix.get(row, rhs);
695 if x.is_finite() && y.is_finite() {
696 xs.push(x);
697 ys.push(y);
698 ws.push(weight);
699 }
700 }
701 covariance_weighted_slice(&xs, &ys, &ws)
702 }
703 }
704}
705
706fn covariance_unweighted_slice(xs: &[f64], ys: &[f64], normalization: CovNormalization) -> f64 {
707 if xs.is_empty() || ys.is_empty() {
708 return f64::NAN;
709 }
710 let n = xs.len().min(ys.len());
711 if n == 0 {
712 return f64::NAN;
713 }
714 let denom = match normalization {
715 CovNormalization::Unbiased => (n as f64) - 1.0,
716 CovNormalization::Biased => n as f64,
717 };
718 if denom <= 0.0 {
719 return f64::NAN;
720 }
721 let sum_x: f64 = xs.iter().take(n).sum();
722 let sum_y: f64 = ys.iter().take(n).sum();
723 let mean_x = sum_x / (n as f64);
724 let mean_y = sum_y / (n as f64);
725 let mut accumulator = 0.0;
726 for idx in 0..n {
727 accumulator += (xs[idx] - mean_x) * (ys[idx] - mean_y);
728 }
729 accumulator / denom
730}
731
732fn covariance_weighted_slice(xs: &[f64], ys: &[f64], weights: &[f64]) -> f64 {
733 if xs.is_empty() || ys.is_empty() || weights.is_empty() {
734 return f64::NAN;
735 }
736 let n = xs.len().min(ys.len()).min(weights.len());
737 if n == 0 {
738 return f64::NAN;
739 }
740 let sum_w: f64 = weights.iter().take(n).sum();
741 if sum_w <= 0.0 {
742 return f64::NAN;
743 }
744 let denom = sum_w - 1.0;
745 if denom <= 0.0 {
746 return f64::NAN;
747 }
748 let mut mean_x = 0.0;
749 let mut mean_y = 0.0;
750 for idx in 0..n {
751 mean_x += weights[idx] * xs[idx];
752 mean_y += weights[idx] * ys[idx];
753 }
754 mean_x /= sum_w;
755 mean_y /= sum_w;
756 let mut accumulator = 0.0;
757 for idx in 0..n {
758 accumulator += weights[idx] * (xs[idx] - mean_x) * (ys[idx] - mean_y);
759 }
760 accumulator / denom
761}
762
763fn sanitize_covariance(is_diag: bool, value: f64) -> f64 {
764 if !value.is_finite() {
765 return value;
766 }
767 if is_diag && value < 0.0 && value > -1.0e-12 {
768 0.0
769 } else {
770 value
771 }
772}
773
774fn set_entry(buffer: &mut [f64], dim: usize, row: usize, col: usize, value: f64) {
775 let idx = row + col * dim;
776 buffer[idx] = value;
777 if row != col {
778 let symmetrical = col + row * dim;
779 buffer[symmetrical] = value;
780 }
781}
782
783#[cfg(test)]
784pub(crate) mod tests {
785 use super::*;
786 use crate::builtins::common::test_support;
787 use futures::executor::block_on;
788 use runmat_builtins::{ResolveContext, Tensor, Type};
789
790 fn assert_tensor_close(actual: &Tensor, expected: &[f64], tol: f64) {
791 let dim = (expected.len() as f64).sqrt() as usize;
792 assert_eq!(actual.shape, vec![dim, dim], "unexpected tensor shape");
793 for (idx, (&got, &want)) in actual.data.iter().zip(expected.iter()).enumerate() {
794 if want.is_nan() {
795 assert!(
796 got.is_nan(),
797 "expected NaN at linear index {idx}, found {got}"
798 );
799 } else {
800 assert!(
801 (got - want).abs() <= tol,
802 "mismatch at linear index {idx}: got {got}, expected {want}"
803 );
804 }
805 }
806 }
807
808 #[test]
809 fn cov_type_preserves_column_count() {
810 let out = cov_type(
811 &[Type::Tensor {
812 shape: Some(vec![Some(5), Some(3)]),
813 }],
814 &ResolveContext::new(Vec::new()),
815 );
816 assert_eq!(
817 out,
818 Type::Tensor {
819 shape: Some(vec![Some(3), Some(3)])
820 }
821 );
822 }
823
824 #[test]
825 fn cov_type_vector_returns_scalar() {
826 let out = cov_type(
827 &[Type::Tensor {
828 shape: Some(vec![Some(1), Some(4)]),
829 }],
830 &ResolveContext::new(Vec::new()),
831 );
832 assert_eq!(out, Type::Num);
833 }
834
835 #[cfg(feature = "wgpu")]
836 fn cov_builtin_sync(value: Value, rest: Vec<Value>) -> BuiltinResult<Value> {
837 block_on(super::cov_builtin(value, rest))
838 }
839
840 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
841 #[test]
842 fn cov_matrix_basic() {
843 let tensor = Tensor::new(
844 vec![
845 4.0, 4.2, 3.9, 4.3, 4.1, 2.0, 2.1, 2.0, 2.1, 2.2, 0.60, 0.59, 0.58, 0.62, 0.63,
848 ],
849 vec![5, 3],
850 )
851 .unwrap();
852 let result = block_on(cov_builtin(Value::Tensor(tensor), Vec::new())).expect("cov");
853 let tensor = match result {
854 Value::Tensor(t) => t,
855 other => panic!("expected tensor result, got {other:?}"),
856 };
857 let expected = [
858 0.0250, 0.0075, 0.00175, 0.0075, 0.0070, 0.00135, 0.00175, 0.00135, 0.00043,
861 ];
862 assert_tensor_close(&tensor, &expected, 1.0e-6);
863 }
864
865 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
866 #[test]
867 fn cov_two_vectors() {
868 let x = Tensor::new(vec![1.0, 2.0, 3.0, 4.0], vec![4, 1]).unwrap();
869 let y = Tensor::new(vec![10.0, 11.0, 9.0, 12.0], vec![4, 1]).unwrap();
870 let result = block_on(cov_builtin(Value::Tensor(x), vec![Value::Tensor(y)])).expect("cov");
871 let tensor = match result {
872 Value::Tensor(t) => t,
873 other => panic!("expected tensor result, got {other:?}"),
874 };
875 let expected = [
876 1.6666666666666667,
877 0.6666666666666666, 0.6666666666666666,
879 1.6666666666666667,
880 ];
881 assert_tensor_close(&tensor, &expected, 1.0e-6);
882 }
883
884 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
885 #[test]
886 fn cov_weighted_vector() {
887 let tensor = Tensor::new(
888 vec![
889 4.0, 4.2, 3.9, 4.3, 4.1, 2.0, 2.1, 2.0, 2.1, 2.2,
891 ],
892 vec![5, 2],
893 )
894 .unwrap();
895 let weights = Tensor::new(vec![1.0, 1.0, 1.0, 2.0, 2.0], vec![5, 1]).unwrap();
896 let result = block_on(cov_builtin(
897 Value::Tensor(tensor),
898 vec![Value::Tensor(weights)],
899 ))
900 .expect("cov");
901 let tensor = match result {
902 Value::Tensor(t) => t,
903 other => panic!("expected tensor result, got {other:?}"),
904 };
905 let expected = [
906 0.022380952380952376,
907 0.004999999999999994, 0.004999999999999994,
909 0.006666666666666678,
910 ];
911 assert_tensor_close(&tensor, &expected, 1.0e-6);
912 }
913
914 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
915 #[test]
916 fn cov_omitrows() {
917 let tensor = Tensor::new(
918 vec![
919 1.0,
920 3.0,
921 f64::NAN,
922 8.0, f64::NAN,
924 4.0,
925 6.0,
926 9.0, 2.0,
928 5.0,
929 7.0,
930 10.0,
931 ],
932 vec![4, 3],
933 )
934 .unwrap();
935 let result = block_on(cov_builtin(
936 Value::Tensor(tensor),
937 vec![Value::from("omitrows")],
938 ))
939 .expect("cov");
940 let tensor = match result {
941 Value::Tensor(t) => t,
942 other => panic!("expected tensor result, got {other:?}"),
943 };
944 let expected = [
945 12.5, 12.5, 12.5, 12.5, 12.5, 12.5, 12.5, 12.5, 12.5,
948 ];
949 assert_tensor_close(&tensor, &expected, 1.0e-6);
950 }
951
952 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
953 #[test]
954 fn cov_partialrows() {
955 let tensor = Tensor::new(
956 vec![
957 1.0,
958 4.0,
959 7.0, 2.0,
961 f64::NAN,
962 8.0, f64::NAN,
964 6.0,
965 9.0,
966 ],
967 vec![3, 3],
968 )
969 .unwrap();
970 let result = block_on(cov_builtin(
971 Value::Tensor(tensor),
972 vec![Value::from("partialrows")],
973 ))
974 .expect("cov");
975 let tensor = match result {
976 Value::Tensor(t) => t,
977 other => panic!("expected tensor result, got {other:?}"),
978 };
979 let expected = [
980 9.0,
981 18.0,
982 4.5, 18.0,
984 18.0,
985 f64::NAN, 4.5,
987 f64::NAN,
988 4.5,
989 ];
990 assert_tensor_close(&tensor, &expected, 1.0e-6);
991 }
992
993 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
994 #[test]
995 fn cov_gpu_roundtrip() {
996 test_support::with_test_provider(|provider| {
997 let tensor = Tensor::new(
998 vec![
999 4.0, 4.2, 3.9, 4.3, 4.1, 2.0, 2.1, 2.0, 2.1, 2.2,
1001 ],
1002 vec![5, 2],
1003 )
1004 .unwrap();
1005 let view = runmat_accelerate_api::HostTensorView {
1006 data: &tensor.data,
1007 shape: &tensor.shape,
1008 };
1009 let handle = provider.upload(&view).expect("upload");
1010 let result = block_on(cov_builtin(Value::GpuTensor(handle), Vec::new())).expect("cov");
1011 let gathered = test_support::gather(result).expect("gather");
1012 let expected = [
1013 0.0250, 0.0075, 0.0075, 0.0070,
1015 ];
1016 assert_tensor_close(&gathered, &expected, 1.0e-6);
1017 });
1018 }
1019
1020 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
1021 #[test]
1022 #[cfg(feature = "wgpu")]
1023 fn cov_wgpu_matches_cpu() {
1024 let _ = runmat_accelerate::backend::wgpu::provider::register_wgpu_provider(
1025 runmat_accelerate::backend::wgpu::provider::WgpuProviderOptions::default(),
1026 );
1027
1028 let tensor = Tensor::new(
1029 vec![
1030 4.0, 4.2, 3.9, 4.3, 4.1, 2.0, 2.1, 2.0, 2.1, 2.2,
1032 ],
1033 vec![5, 2],
1034 )
1035 .unwrap();
1036
1037 let cpu_result =
1038 block_on(cov_builtin(Value::Tensor(tensor.clone()), Vec::new())).expect("cov");
1039 let cpu_tensor = match cpu_result {
1040 Value::Tensor(t) => t,
1041 other => panic!("expected tensor result, got {other:?}"),
1042 };
1043
1044 let provider = runmat_accelerate_api::provider().expect("wgpu provider");
1045 let view = runmat_accelerate_api::HostTensorView {
1046 data: &tensor.data,
1047 shape: &tensor.shape,
1048 };
1049 let handle = provider.upload(&view).expect("upload");
1050
1051 let gpu_value = cov_builtin_sync(Value::GpuTensor(handle), Vec::new()).expect("cov");
1052 let gathered = test_support::gather(gpu_value).expect("gather");
1053
1054 assert_tensor_close(&gathered, &cpu_tensor.data, 1.0e-6);
1055 }
1056}