1use anyhow::Result;
22use scirs2_core::ndarray_ext::{Array1, Array2};
23use scirs2_core::numeric::{Float, FloatConst, NumAssign, NumCast};
24use scirs2_core::random::{thread_rng, Distribution, RandNormal as Normal, Rng};
25use scirs2_linalg::{lstsq, LinalgError};
26use std::iter::Sum;
27use tenrso_core::DenseND;
28use tenrso_kernels::mttkrp;
29use thiserror::Error;
30
31#[derive(Error, Debug)]
32pub enum CpError {
33 #[error("Invalid rank: {0}")]
34 InvalidRank(usize),
35
36 #[error("Invalid tolerance: {0}")]
37 InvalidTolerance(f64),
38
39 #[error("Linear algebra error: {0}")]
40 LinalgError(#[from] LinalgError),
41
42 #[error("Shape mismatch: {0}")]
43 ShapeMismatch(String),
44
45 #[error("Convergence failed after {0} iterations")]
46 ConvergenceFailed(usize),
47}
48
49#[derive(Debug, Clone, Copy)]
51pub enum InitStrategy {
52 Random,
54 RandomNormal,
56 Svd,
58 Nnsvd,
64 LeverageScore,
71}
72
73#[derive(Debug, Clone, Copy)]
77pub struct CpConstraints {
78 pub nonnegative: bool,
81
82 pub l2_reg: f64,
86
87 pub orthogonal: bool,
91}
92
93impl Default for CpConstraints {
94 fn default() -> Self {
95 Self {
96 nonnegative: false,
97 l2_reg: 0.0,
98 orthogonal: false,
99 }
100 }
101}
102
103impl CpConstraints {
104 pub fn nonnegative() -> Self {
106 Self {
107 nonnegative: true,
108 ..Default::default()
109 }
110 }
111
112 pub fn l2_regularized(lambda: f64) -> Self {
114 Self {
115 l2_reg: lambda,
116 ..Default::default()
117 }
118 }
119
120 pub fn orthogonal() -> Self {
122 Self {
123 orthogonal: true,
124 ..Default::default()
125 }
126 }
127}
128
129#[derive(Debug, Clone, PartialEq)]
131pub enum ConvergenceReason {
132 FitTolerance,
134 MaxIterations,
136 Oscillation,
138 TimeLimit,
140}
141
142#[derive(Debug, Clone)]
147pub struct ConvergenceInfo<T> {
148 pub fit_history: Vec<T>,
150
151 pub reason: ConvergenceReason,
153
154 pub oscillated: bool,
156
157 pub oscillation_count: usize,
159
160 pub final_fit_change: T,
162}
163
164#[derive(Debug, Clone)]
168pub struct CpDecomp<T> {
169 pub factors: Vec<Array2<T>>,
172
173 pub weights: Option<Array1<T>>,
176
177 pub fit: T,
180
181 pub iters: usize,
183
184 pub convergence: Option<ConvergenceInfo<T>>,
186}
187
188impl<T> CpDecomp<T>
189where
190 T: Float + FloatConst + NumCast,
191{
192 pub fn reconstruct(&self, shape: &[usize]) -> Result<DenseND<T>> {
203 let n_modes = self.factors.len();
204
205 if n_modes != shape.len() {
207 anyhow::bail!(
208 "Shape rank mismatch: expected {} modes, got {}",
209 n_modes,
210 shape.len()
211 );
212 }
213
214 for (i, factor) in self.factors.iter().enumerate() {
215 if factor.shape()[0] != shape[i] {
216 anyhow::bail!(
217 "Mode-{} size mismatch: expected {}, got {}",
218 i,
219 shape[i],
220 factor.shape()[0]
221 );
222 }
223 }
224
225 let factor_views: Vec<_> = self.factors.iter().map(|f| f.view()).collect();
227 let weights_view = self.weights.as_ref().map(|w| w.view());
228
229 let reconstructed = tenrso_kernels::cp_reconstruct(&factor_views, weights_view.as_ref())?;
230
231 Ok(DenseND::from_array(reconstructed))
233 }
234
235 pub fn extract_weights(&mut self) {
240 let rank = self.factors[0].shape()[1];
241 let mut weights = Array1::<T>::ones(rank);
242
243 for factor in &mut self.factors {
244 for r in 0..rank {
245 let mut norm_sq = T::zero();
246 for i in 0..factor.shape()[0] {
247 let val = factor[[i, r]];
248 norm_sq = norm_sq + val * val;
249 }
250
251 let norm = norm_sq.sqrt();
252 if norm > T::epsilon() {
253 weights[r] = weights[r] * norm;
254
255 for i in 0..factor.shape()[0] {
257 factor[[i, r]] = factor[[i, r]] / norm;
258 }
259 }
260 }
261 }
262
263 self.weights = Some(weights);
264 }
265}
266
267pub fn cp_als<T>(
316 tensor: &DenseND<T>,
317 rank: usize,
318 max_iters: usize,
319 tol: f64,
320 init: InitStrategy,
321 time_limit: Option<std::time::Duration>,
322) -> Result<CpDecomp<T>, CpError>
323where
324 T: Float
325 + FloatConst
326 + NumCast
327 + NumAssign
328 + Sum
329 + scirs2_core::ndarray_ext::ScalarOperand
330 + Send
331 + Sync
332 + std::fmt::Display
333 + 'static,
334{
335 let shape = tensor.shape();
336 let n_modes = tensor.rank();
337
338 if rank == 0 {
340 return Err(CpError::InvalidRank(rank));
341 }
342
343 for &mode_size in shape.iter() {
344 if rank > mode_size {
345 return Err(CpError::InvalidRank(rank));
346 }
347 }
348
349 if !(0.0..1.0).contains(&tol) {
350 return Err(CpError::InvalidTolerance(tol));
351 }
352
353 let mut factors = initialize_factors(tensor, rank, init)?;
355
356 let tensor_norm_sq = compute_norm_squared(tensor);
358
359 let mut prev_fit = T::zero();
360 let mut fit = T::zero();
361 let mut iters = 0;
362
363 let mut fit_history = Vec::with_capacity(max_iters);
365 let mut oscillation_count = 0;
366 let mut convergence_reason = ConvergenceReason::MaxIterations;
367 let mut final_fit_change = T::zero();
368
369 let start_time = std::time::Instant::now();
371
372 for iter in 0..max_iters {
374 if let Some(limit) = time_limit {
376 if start_time.elapsed() > limit {
377 convergence_reason = ConvergenceReason::TimeLimit;
378 break;
379 }
380 }
381
382 iters = iter + 1;
383
384 for mode in 0..n_modes {
386 let factor_views: Vec<_> = factors.iter().map(|f| f.view()).collect();
388 let mttkrp_result = mttkrp(&tensor.view(), &factor_views, mode)
389 .map_err(|e| CpError::ShapeMismatch(e.to_string()))?;
390
391 let gram = compute_gram_hadamard(&factors, mode);
393
394 factors[mode] = solve_least_squares(&mttkrp_result, &gram)?;
396 }
397
398 fit = compute_fit(tensor, &factors, tensor_norm_sq)?;
400 fit_history.push(fit);
401
402 if iter > 0 && fit < prev_fit {
404 oscillation_count += 1;
405 }
406
407 let fit_change = (fit - prev_fit).abs();
409 final_fit_change = fit_change;
410
411 if iter > 0 && fit_change < NumCast::from(tol).unwrap() {
412 convergence_reason = ConvergenceReason::FitTolerance;
413 break;
414 }
415
416 if oscillation_count > 5 && iter > 10 {
418 convergence_reason = ConvergenceReason::Oscillation;
419 break;
420 }
421
422 prev_fit = fit;
423 }
424
425 Ok(CpDecomp {
426 factors,
427 weights: None,
428 fit,
429 iters,
430 convergence: Some(ConvergenceInfo {
431 fit_history,
432 reason: convergence_reason,
433 oscillated: oscillation_count > 0,
434 oscillation_count,
435 final_fit_change,
436 }),
437 })
438}
439
440pub fn cp_als_constrained<T>(
473 tensor: &DenseND<T>,
474 rank: usize,
475 max_iters: usize,
476 tol: f64,
477 init: InitStrategy,
478 constraints: CpConstraints,
479 time_limit: Option<std::time::Duration>,
480) -> Result<CpDecomp<T>, CpError>
481where
482 T: Float
483 + FloatConst
484 + NumCast
485 + NumAssign
486 + Sum
487 + scirs2_core::ndarray_ext::ScalarOperand
488 + Send
489 + Sync
490 + std::fmt::Display
491 + 'static,
492{
493 let shape = tensor.shape();
494 let n_modes = tensor.rank();
495
496 if rank == 0 {
498 return Err(CpError::InvalidRank(rank));
499 }
500
501 for &mode_size in shape.iter() {
502 if rank > mode_size {
503 return Err(CpError::InvalidRank(rank));
504 }
505 }
506
507 if !(0.0..1.0).contains(&tol) {
508 return Err(CpError::InvalidTolerance(tol));
509 }
510
511 if constraints.l2_reg < 0.0 {
512 return Err(CpError::InvalidTolerance(constraints.l2_reg));
513 }
514
515 let mut factors = initialize_factors(tensor, rank, init)?;
517
518 if constraints.nonnegative {
520 for factor in &mut factors {
521 factor.mapv_inplace(|x| x.max(T::zero()));
522 }
523 }
524
525 let tensor_norm_sq = compute_norm_squared(tensor);
527
528 let mut prev_fit = T::zero();
529 let mut fit = T::zero();
530 let mut iters = 0;
531
532 let mut fit_history = Vec::with_capacity(max_iters);
534 let mut oscillation_count = 0;
535 let mut convergence_reason = ConvergenceReason::MaxIterations;
536 let mut final_fit_change = T::zero();
537
538 let start_time = std::time::Instant::now();
540
541 for iter in 0..max_iters {
543 if let Some(limit) = time_limit {
545 if start_time.elapsed() > limit {
546 convergence_reason = ConvergenceReason::TimeLimit;
547 break;
548 }
549 }
550
551 iters = iter + 1;
552
553 for mode in 0..n_modes {
555 let factor_views: Vec<_> = factors.iter().map(|f| f.view()).collect();
557 let mttkrp_result = mttkrp(&tensor.view(), &factor_views, mode)
558 .map_err(|e| CpError::ShapeMismatch(e.to_string()))?;
559
560 let mut gram = compute_gram_hadamard(&factors, mode);
562
563 if constraints.l2_reg > 0.0 {
565 let reg = NumCast::from(constraints.l2_reg).unwrap();
566 for i in 0..gram.nrows() {
567 gram[[i, i]] += reg;
568 }
569 }
570
571 factors[mode] = solve_least_squares(&mttkrp_result, &gram)?;
573
574 if constraints.nonnegative {
576 factors[mode].mapv_inplace(|x| x.max(T::zero()));
578 }
579
580 if constraints.orthogonal {
581 factors[mode] = orthonormalize_factor(&factors[mode])?;
583 }
584 }
585
586 fit = compute_fit(tensor, &factors, tensor_norm_sq)?;
588 fit_history.push(fit);
589
590 if iter > 0 && fit < prev_fit {
592 oscillation_count += 1;
593 }
594
595 let fit_change = (fit - prev_fit).abs();
597 final_fit_change = fit_change;
598
599 if iter > 0 && fit_change < NumCast::from(tol).unwrap() {
600 convergence_reason = ConvergenceReason::FitTolerance;
601 break;
602 }
603
604 if oscillation_count > 5 && iter > 10 {
606 convergence_reason = ConvergenceReason::Oscillation;
607 break;
608 }
609
610 prev_fit = fit;
611 }
612
613 Ok(CpDecomp {
614 factors,
615 weights: None,
616 fit,
617 iters,
618 convergence: Some(ConvergenceInfo {
619 fit_history,
620 reason: convergence_reason,
621 oscillated: oscillation_count > 0,
622 oscillation_count,
623 final_fit_change,
624 }),
625 })
626}
627
628pub fn cp_als_accelerated<T>(
679 tensor: &DenseND<T>,
680 rank: usize,
681 max_iters: usize,
682 tol: f64,
683 init: InitStrategy,
684 time_limit: Option<std::time::Duration>,
685) -> Result<CpDecomp<T>, CpError>
686where
687 T: Float
688 + FloatConst
689 + NumCast
690 + NumAssign
691 + Sum
692 + Send
693 + Sync
694 + scirs2_core::ndarray_ext::ScalarOperand
695 + scirs2_core::numeric::FromPrimitive
696 + 'static,
697{
698 let start_time = std::time::Instant::now();
699
700 if rank == 0 {
702 return Err(CpError::InvalidRank(rank));
703 }
704 if tol <= 0.0 || tol >= 1.0 {
705 return Err(CpError::InvalidTolerance(tol));
706 }
707
708 let mut factors = initialize_factors(tensor, rank, init)?;
710 let n_modes = factors.len();
711
712 let mut prev_factors: Vec<Array2<T>> = factors.to_vec();
714
715 let mut alpha = T::from(0.5).unwrap(); let alpha_max = T::from(0.9).unwrap();
718 let alpha_min = T::from(0.1).unwrap();
719
720 let tol_t = T::from(tol).unwrap();
721 let tensor_norm = tensor.frobenius_norm();
722 let tensor_norm_sq = tensor_norm * tensor_norm; let mut prev_fit = T::zero();
724 let mut fit = T::zero();
725
726 let mut fit_history = Vec::with_capacity(max_iters);
728 let mut oscillation_count = 0;
729 let mut convergence_reason = ConvergenceReason::MaxIterations;
730 let mut final_fit_change = T::zero();
731 let mut iters = 0;
732
733 for iter in 0..max_iters {
734 iters = iter + 1;
735
736 if let Some(limit) = time_limit {
738 if start_time.elapsed() > limit {
739 convergence_reason = ConvergenceReason::TimeLimit;
740 break;
741 }
742 }
743
744 for mode in 0..n_modes {
746 let tensor_view = tensor.view();
748 let factor_views: Vec<_> = factors.iter().map(|f| f.view()).collect();
749
750 let mttkrp_result = mttkrp(&tensor_view, &factor_views, mode)
752 .map_err(|e| CpError::ShapeMismatch(format!("MTTKRP failed: {}", e)))?;
753
754 let gram = compute_gram_hadamard(&factors, mode);
756
757 let mut factor_new = solve_least_squares(&mttkrp_result, &gram)?;
759
760 let alpha_ls = line_search_cp(
762 tensor,
763 &factors,
764 &prev_factors,
765 mode,
766 &factor_new,
767 T::from(0.5).unwrap(),
768 5, );
770
771 if iter > 0 {
773 let factor_prev = &prev_factors[mode];
775 for i in 0..factor_new.shape()[0] {
776 for j in 0..factor_new.shape()[1] {
777 let diff = factor_new[[i, j]] - factor_prev[[i, j]];
778 factor_new[[i, j]] += alpha_ls * alpha * diff;
779 }
780 }
781 }
782
783 prev_factors[mode] = factors[mode].clone();
785
786 factors[mode] = factor_new;
788 }
789
790 fit = compute_fit(tensor, &factors, tensor_norm_sq)?;
792 fit_history.push(fit);
793
794 if iter > 0 {
796 if fit > prev_fit {
797 alpha = (alpha * T::from(1.05).unwrap()).min(alpha_max);
799 } else {
800 alpha = (alpha * T::from(0.7).unwrap()).max(alpha_min);
802 oscillation_count += 1;
803
804 if oscillation_count > 5 && iter > 10 {
806 convergence_reason = ConvergenceReason::Oscillation;
807 break;
808 }
809 }
810 }
811
812 if iter > 0 {
814 final_fit_change = (fit - prev_fit).abs();
815 let relative_change = final_fit_change / (prev_fit.abs() + T::from(1e-10).unwrap());
816
817 if relative_change < tol_t {
818 convergence_reason = ConvergenceReason::FitTolerance;
819 break;
820 }
821 }
822
823 prev_fit = fit;
824 }
825
826 Ok(CpDecomp {
827 factors,
828 weights: None,
829 fit,
830 iters,
831 convergence: Some(ConvergenceInfo {
832 fit_history,
833 reason: convergence_reason,
834 oscillated: oscillation_count > 0,
835 oscillation_count,
836 final_fit_change,
837 }),
838 })
839}
840
841pub fn cp_completion<T>(
902 tensor: &DenseND<T>,
903 mask: &DenseND<T>,
904 rank: usize,
905 max_iters: usize,
906 tol: f64,
907 init: InitStrategy,
908) -> Result<CpDecomp<T>, CpError>
909where
910 T: Float
911 + FloatConst
912 + NumCast
913 + NumAssign
914 + Sum
915 + scirs2_core::ndarray_ext::ScalarOperand
916 + scirs2_core::numeric::FromPrimitive
917 + Send
918 + Sync
919 + 'static,
920{
921 let shape = tensor.shape();
923 let n_modes = shape.len();
924
925 if mask.shape() != shape {
926 return Err(CpError::ShapeMismatch(format!(
927 "Mask shape {:?} doesn't match tensor shape {:?}",
928 mask.shape(),
929 shape
930 )));
931 }
932
933 if rank == 0 || shape.iter().any(|&s| rank > s) {
934 return Err(CpError::InvalidRank(rank));
935 }
936
937 if tol <= 0.0 || tol >= 1.0 {
938 return Err(CpError::InvalidTolerance(tol));
939 }
940
941 let tol_t = T::from(tol).unwrap();
942
943 let mut factors = initialize_factors(tensor, rank, init)?;
945
946 let mask_view = mask.view();
948 let mut n_observed = T::zero();
949 for &m in mask_view.iter() {
950 n_observed += m;
951 }
952
953 if n_observed == T::zero() {
954 return Err(CpError::ShapeMismatch(
955 "Mask has no observed entries".to_string(),
956 ));
957 }
958
959 let tensor_view = tensor.view();
960 let mut prev_fit = T::neg_infinity();
961 let mut fit = T::zero();
962 let mut iters = 0;
963
964 for iter in 0..max_iters {
966 iters = iter + 1;
967
968 for mode in 0..n_modes {
969 let mode_size = shape[mode];
971 let mut mttkrp_result = Array2::<T>::zeros((mode_size, rank));
972
973 let kr = compute_khatri_rao_except(&factors, mode);
975
976 let unfolded = tensor
978 .unfold(mode)
979 .map_err(|e| CpError::ShapeMismatch(format!("Unfold failed: {}", e)))?;
980 let mask_unfolded = mask
981 .unfold(mode)
982 .map_err(|e| CpError::ShapeMismatch(format!("Mask unfold failed: {}", e)))?;
983
984 for i in 0..mode_size {
985 for r in 0..rank {
986 let mut sum = T::zero();
987 for j in 0..kr.nrows() {
988 let observed = mask_unfolded[[i, j]];
989 if observed > T::zero() {
990 sum += unfolded[[i, j]] * kr[[j, r]];
991 }
992 }
993 mttkrp_result[[i, r]] = sum;
994 }
995 }
996
997 let mut gram = Array2::<T>::zeros((rank, rank));
1000
1001 for r1 in 0..rank {
1002 for r2 in 0..rank {
1003 let mut sum = T::zero();
1004 for i in 0..mode_size {
1005 for j in 0..kr.nrows() {
1006 let observed = mask_unfolded[[i, j]];
1007 if observed > T::zero() {
1008 sum += kr[[j, r1]] * kr[[j, r2]];
1009 }
1010 }
1011 }
1012 gram[[r1, r2]] = sum;
1013 }
1014 }
1015
1016 let factor_new = solve_least_squares(&mttkrp_result, &gram)?;
1018
1019 factors[mode] = factor_new;
1021 }
1022
1023 let reconstructed = compute_reconstruction(&factors)
1025 .map_err(|e| CpError::ShapeMismatch(format!("Reconstruction failed: {}", e)))?;
1026 let recon_view = reconstructed.view();
1027
1028 let mut error_sq = T::zero();
1029 let mut norm_sq = T::zero();
1030
1031 for (((&t_val, &m_val), &r_val), _idx) in tensor_view
1032 .iter()
1033 .zip(mask_view.iter())
1034 .zip(recon_view.iter())
1035 .zip(0..)
1036 {
1037 if m_val > T::zero() {
1038 let diff = t_val - r_val;
1039 error_sq += diff * diff;
1040 norm_sq += t_val * t_val;
1041 }
1042 }
1043
1044 fit = T::one() - (error_sq / norm_sq).sqrt();
1045 fit = fit.max(T::zero()).min(T::one());
1047
1048 if iter > 0 {
1050 let fit_change = (fit - prev_fit).abs();
1051 let relative_change = fit_change / (prev_fit.abs() + T::from(1e-10).unwrap());
1052
1053 if relative_change < tol_t {
1054 break;
1055 }
1056 }
1057
1058 prev_fit = fit;
1059 }
1060
1061 Ok(CpDecomp {
1062 factors,
1063 weights: None,
1064 fit,
1065 iters,
1066 convergence: None,
1067 })
1068}
1069
1070pub fn cp_randomized<T>(
1127 tensor: &DenseND<T>,
1128 rank: usize,
1129 max_iters: usize,
1130 tol: f64,
1131 init: InitStrategy,
1132 sketch_size: usize,
1133 fit_check_freq: usize,
1134) -> Result<CpDecomp<T>, CpError>
1135where
1136 T: Float
1137 + FloatConst
1138 + NumCast
1139 + NumAssign
1140 + Sum
1141 + scirs2_core::ndarray_ext::ScalarOperand
1142 + scirs2_core::numeric::FromPrimitive
1143 + Send
1144 + Sync
1145 + std::fmt::Display
1146 + 'static,
1147{
1148 use scirs2_core::random::{thread_rng, Distribution, RandNormal as Normal};
1149
1150 let shape = tensor.shape();
1151 let n_modes = tensor.rank();
1152
1153 if rank == 0 {
1155 return Err(CpError::InvalidRank(rank));
1156 }
1157 for &mode_size in shape.iter() {
1158 if rank > mode_size {
1159 return Err(CpError::InvalidRank(rank));
1160 }
1161 }
1162 if !(0.0..1.0).contains(&tol) {
1163 return Err(CpError::InvalidTolerance(tol));
1164 }
1165 if sketch_size < rank {
1166 return Err(CpError::InvalidRank(0)); }
1168
1169 let mut factors = initialize_factors(tensor, rank, init)?;
1171
1172 let tensor_norm_sq = compute_norm_squared(tensor);
1174
1175 let mut prev_fit = T::zero();
1176 let mut fit = T::zero();
1177 let mut iters = 0;
1178
1179 let mut rng = thread_rng();
1180 let normal = Normal::new(0.0, 1.0).unwrap();
1181
1182 for iter in 0..max_iters {
1184 iters = iter + 1;
1185
1186 for mode in 0..n_modes {
1188 let kr = compute_khatri_rao_except(&factors, mode);
1194 let kr_rows = kr.shape()[0];
1195
1196 let mut omega = Array2::<T>::zeros((kr_rows, sketch_size));
1199 for i in 0..kr_rows {
1200 for j in 0..sketch_size {
1201 omega[[i, j]] = T::from(normal.sample(&mut rng)).unwrap();
1202 }
1203 }
1204
1205 let kr_sketch = kr.t().dot(&omega.view());
1208
1209 let unfolding = tensor
1211 .unfold(mode)
1212 .map_err(|e| CpError::ShapeMismatch(e.to_string()))?;
1213
1214 let x_sketch = unfolding.dot(&omega.view());
1216
1217 let gram = kr_sketch.dot(&kr_sketch.t().view()); let rhs = kr_sketch.dot(&x_sketch.t().view()); let mut new_factor = Array2::<T>::zeros((shape[mode], rank));
1235
1236 for i in 0..shape[mode] {
1237 let b = rhs.column(i).to_owned(); let solution =
1241 lstsq(&gram.view(), &b.view(), None).map_err(CpError::LinalgError)?;
1242
1243 for j in 0..rank {
1245 new_factor[[i, j]] = solution.x[j];
1246 }
1247 }
1248
1249 factors[mode] = new_factor;
1250 }
1251
1252 if iter % fit_check_freq == 0 || iter == max_iters - 1 {
1254 fit = compute_fit(tensor, &factors, tensor_norm_sq)?;
1255
1256 let fit_change = (fit - prev_fit).abs();
1258 if iter > 0 && fit_change < NumCast::from(tol).unwrap() {
1259 break;
1260 }
1261
1262 prev_fit = fit;
1263 }
1264 }
1265
1266 if !(max_iters - 1).is_multiple_of(fit_check_freq) {
1268 fit = compute_fit(tensor, &factors, tensor_norm_sq)?;
1269 }
1270
1271 Ok(CpDecomp {
1272 factors,
1273 weights: None,
1274 fit,
1275 iters,
1276 convergence: None,
1277 })
1278}
1279
1280#[derive(Debug, Clone, Copy)]
1282pub enum IncrementalMode {
1283 Append,
1286
1287 SlidingWindow {
1290 lambda: f64,
1294 },
1295}
1296
1297pub fn cp_als_incremental<T>(
1369 current: &CpDecomp<T>,
1370 new_data: &DenseND<T>,
1371 update_mode: usize,
1372 mode: IncrementalMode,
1373 max_iters: usize,
1374 tol: f64,
1375) -> Result<CpDecomp<T>, CpError>
1376where
1377 T: Float
1378 + FloatConst
1379 + NumCast
1380 + NumAssign
1381 + Sum
1382 + scirs2_core::ndarray_ext::ScalarOperand
1383 + Send
1384 + Sync
1385 + std::fmt::Display
1386 + 'static,
1387{
1388 let rank = current.factors[0].shape()[1];
1389 let n_modes = current.factors.len();
1390
1391 if update_mode >= n_modes {
1393 return Err(CpError::ShapeMismatch(format!(
1394 "Update mode {} exceeds number of modes {}",
1395 update_mode, n_modes
1396 )));
1397 }
1398
1399 if new_data.rank() != n_modes {
1400 return Err(CpError::ShapeMismatch(format!(
1401 "New data rank {} doesn't match CP rank {}",
1402 new_data.rank(),
1403 n_modes
1404 )));
1405 }
1406
1407 for i in 0..n_modes {
1409 if i != update_mode && new_data.shape()[i] != current.factors[i].shape()[0] {
1410 return Err(CpError::ShapeMismatch(format!(
1411 "New data mode-{} size {} doesn't match current factor size {}",
1412 i,
1413 new_data.shape()[i],
1414 current.factors[i].shape()[0]
1415 )));
1416 }
1417 }
1418
1419 let mut factors = current.factors.clone();
1421 let combined_tensor: DenseND<T>;
1422
1423 match mode {
1424 IncrementalMode::Append => {
1425 let old_rows = current.factors[update_mode].shape()[0];
1427 let new_rows = new_data.shape()[update_mode];
1428 let total_rows = old_rows + new_rows;
1429
1430 let mut extended_factor = Array2::<T>::zeros((total_rows, rank));
1432
1433 for i in 0..old_rows {
1435 for j in 0..rank {
1436 extended_factor[[i, j]] = current.factors[update_mode][[i, j]];
1437 }
1438 }
1439
1440 let mut rng = thread_rng();
1442 for i in old_rows..total_rows {
1443 for j in 0..rank {
1444 let mut col_mean = T::zero();
1446 for k in 0..old_rows {
1447 col_mean += current.factors[update_mode][[k, j]];
1448 }
1449 col_mean /= T::from(old_rows).unwrap();
1450
1451 let noise = T::from(rng.random::<f64>() * 0.1 - 0.05).unwrap();
1453 extended_factor[[i, j]] = col_mean + noise;
1454 }
1455 }
1456
1457 factors[update_mode] = extended_factor;
1458
1459 let old_tensor = tensor_from_factors(¤t.factors, None)
1461 .map_err(|e| CpError::ShapeMismatch(format!("Failed to reconstruct: {}", e)))?;
1462 combined_tensor = concatenate_tensors(&old_tensor, new_data, update_mode)
1463 .map_err(|e| CpError::ShapeMismatch(format!("Concatenation failed: {}", e)))?;
1464 }
1465
1466 IncrementalMode::SlidingWindow { lambda } => {
1467 if !(0.0..=1.0).contains(&lambda) {
1468 return Err(CpError::InvalidTolerance(lambda));
1469 }
1470
1471 combined_tensor = new_data.clone();
1478 }
1479 }
1480
1481 let refine_iters = max_iters.min(10);
1484
1485 let tensor_norm_sq = compute_norm_squared(&combined_tensor);
1486 let mut fit = T::zero();
1487 let mut iters = 0;
1488
1489 for iter in 0..refine_iters {
1490 iters = iter + 1;
1491 let prev_fit = fit;
1492
1493 for mode_idx in 0..n_modes {
1495 let factor_views: Vec<_> = factors.iter().map(|f| f.view()).collect();
1497 let mttkrp_result = mttkrp(&combined_tensor.view(), &factor_views, mode_idx)
1498 .map_err(|e| CpError::ShapeMismatch(e.to_string()))?;
1499
1500 let gram = compute_gram_hadamard(&factors, mode_idx);
1502
1503 factors[mode_idx] = solve_least_squares(&mttkrp_result, &gram)?;
1505 }
1506
1507 fit = compute_fit(&combined_tensor, &factors, tensor_norm_sq)?;
1509
1510 if iter > 0 {
1512 let fit_change = (fit - prev_fit).abs() / (prev_fit + T::epsilon());
1513 if fit_change < T::from(tol).unwrap() {
1514 break;
1515 }
1516 }
1517 }
1518
1519 Ok(CpDecomp {
1520 factors,
1521 weights: None,
1522 fit,
1523 iters,
1524 convergence: None,
1525 })
1526}
1527
1528fn concatenate_tensors<T>(
1530 tensor1: &DenseND<T>,
1531 tensor2: &DenseND<T>,
1532 mode: usize,
1533) -> Result<DenseND<T>>
1534where
1535 T: Float + NumCast + scirs2_core::ndarray_ext::ScalarOperand + 'static,
1536{
1537 use scirs2_core::ndarray_ext::{Array, Axis, IxDyn};
1538
1539 if tensor1.rank() != tensor2.rank() {
1541 anyhow::bail!(
1542 "Tensor ranks don't match: {} vs {}",
1543 tensor1.rank(),
1544 tensor2.rank()
1545 );
1546 }
1547
1548 let n_modes = tensor1.rank();
1549
1550 for i in 0..n_modes {
1552 if i != mode && tensor1.shape()[i] != tensor2.shape()[i] {
1553 anyhow::bail!(
1554 "Mode-{} sizes don't match: {} vs {}",
1555 i,
1556 tensor1.shape()[i],
1557 tensor2.shape()[i]
1558 );
1559 }
1560 }
1561
1562 let mut output_shape = tensor1.shape().to_vec();
1564 let size1 = tensor1.shape()[mode];
1565 let size2 = tensor2.shape()[mode];
1566 output_shape[mode] = size1 + size2;
1567
1568 let mut output = Array::<T, IxDyn>::zeros(IxDyn(&output_shape));
1570
1571 let view1_full = tensor1.view();
1573 let view2_full = tensor2.view();
1574
1575 for i in 0..size1 {
1577 let mut slice_out = output.index_axis_mut(Axis(mode), i);
1578 let slice_in = view1_full.index_axis(Axis(mode), i);
1579 slice_out.assign(&slice_in);
1580 }
1581
1582 for i in 0..size2 {
1583 let mut slice_out = output.index_axis_mut(Axis(mode), size1 + i);
1584 let slice_in = view2_full.index_axis(Axis(mode), i);
1585 slice_out.assign(&slice_in);
1586 }
1587
1588 Ok(DenseND::from_array(output))
1589}
1590
1591fn tensor_from_factors<T>(factors: &[Array2<T>], weights: Option<&Array1<T>>) -> Result<DenseND<T>>
1593where
1594 T: Float + NumCast + scirs2_core::ndarray_ext::ScalarOperand + 'static,
1595{
1596 let factor_views: Vec<_> = factors.iter().map(|f| f.view()).collect();
1597 let weights_view = weights.map(|w| w.view());
1598
1599 let reconstructed = tenrso_kernels::cp_reconstruct(&factor_views, weights_view.as_ref())?;
1600 Ok(DenseND::from_array(reconstructed))
1601}
1602
1603fn compute_khatri_rao_except<T>(factors: &[Array2<T>], skip_mode: usize) -> Array2<T>
1605where
1606 T: Float + NumCast + NumAssign + scirs2_core::ndarray_ext::ScalarOperand,
1607{
1608 use tenrso_kernels::khatri_rao;
1609
1610 let n_modes = factors.len();
1611
1612 let result_idx = if skip_mode == 0 { 1 } else { 0 };
1614 let mut result = factors[result_idx].clone();
1615
1616 for (i, factor) in factors.iter().enumerate().take(n_modes) {
1617 if i == skip_mode || i == result_idx {
1618 continue;
1619 }
1620
1621 result = khatri_rao(&result.view(), &factor.view());
1623 }
1624
1625 result
1626}
1627
1628fn compute_reconstruction<T>(factors: &[Array2<T>]) -> Result<DenseND<T>>
1630where
1631 T: Float + NumCast + scirs2_core::ndarray_ext::ScalarOperand + 'static,
1632{
1633 let factor_views: Vec<_> = factors.iter().map(|f| f.view()).collect();
1634
1635 let reconstructed = tenrso_kernels::cp_reconstruct(&factor_views, None)?;
1636 Ok(DenseND::from_array(reconstructed))
1637}
1638
1639fn line_search_cp<T>(
1643 tensor: &DenseND<T>,
1644 factors: &[Array2<T>],
1645 prev_factors: &[Array2<T>],
1646 mode: usize,
1647 new_factor: &Array2<T>,
1648 _initial_alpha: T,
1649 _max_iters: usize,
1650) -> T
1651where
1652 T: Float
1653 + FloatConst
1654 + NumCast
1655 + NumAssign
1656 + Sum
1657 + Send
1658 + Sync
1659 + scirs2_core::ndarray_ext::ScalarOperand
1660 + scirs2_core::numeric::FromPrimitive
1661 + 'static,
1662{
1663 let tensor_norm = tensor.frobenius_norm();
1664 let tensor_norm_sq = tensor_norm * tensor_norm; let mut best_alpha = T::one();
1666 let mut best_fit = T::neg_infinity();
1667
1668 let alphas = [
1670 T::from(0.25).unwrap(),
1671 T::from(0.5).unwrap(),
1672 T::from(0.75).unwrap(),
1673 T::one(),
1674 T::from(1.25).unwrap(),
1675 ];
1676
1677 for &alpha in &alphas {
1678 let mut test_factors = factors.to_vec();
1680 let factor_prev = &prev_factors[mode];
1681
1682 let mut test_factor = new_factor.clone();
1683 for i in 0..test_factor.shape()[0] {
1684 for j in 0..test_factor.shape()[1] {
1685 let diff = new_factor[[i, j]] - factor_prev[[i, j]];
1686 test_factor[[i, j]] = factor_prev[[i, j]] + alpha * diff;
1687 }
1688 }
1689 test_factors[mode] = test_factor;
1690
1691 if let Ok(fit) = compute_fit(tensor, &test_factors, tensor_norm_sq) {
1693 if fit > best_fit {
1694 best_fit = fit;
1695 best_alpha = alpha;
1696 }
1697 }
1698 }
1699
1700 best_alpha
1701}
1702
1703fn initialize_factors<T>(
1705 tensor: &DenseND<T>,
1706 rank: usize,
1707 init: InitStrategy,
1708) -> Result<Vec<Array2<T>>, CpError>
1709where
1710 T: Float
1711 + FloatConst
1712 + NumCast
1713 + NumAssign
1714 + Sum
1715 + scirs2_core::ndarray_ext::ScalarOperand
1716 + Send
1717 + Sync
1718 + 'static,
1719{
1720 let shape = tensor.shape();
1721 let n_modes = shape.len();
1722
1723 let mut factors = Vec::with_capacity(n_modes);
1724 let mut rng = thread_rng();
1725
1726 match init {
1727 InitStrategy::Random => {
1728 for &mode_size in shape.iter() {
1730 let factor = Array2::from_shape_fn((mode_size, rank), |_| {
1732 T::from(rng.random::<f64>()).unwrap()
1733 });
1734 factors.push(factor);
1735 }
1736 }
1737 InitStrategy::RandomNormal => {
1738 for &mode_size in shape.iter() {
1740 let normal = Normal::new(0.0, 1.0).unwrap();
1741
1742 let factor = Array2::from_shape_fn((mode_size, rank), |_| {
1744 T::from(normal.sample(&mut rng)).unwrap()
1745 });
1746 factors.push(factor);
1747 }
1748 }
1749 InitStrategy::Svd => {
1750 use scirs2_linalg::svd;
1752
1753 for (mode, &mode_size) in shape.iter().enumerate() {
1754 let unfolded = tensor
1756 .unfold(mode)
1757 .map_err(|e| CpError::ShapeMismatch(format!("Unfold failed: {}", e)))?;
1758
1759 let (u, _s, _vt) =
1761 svd(&unfolded.view(), false, None).map_err(CpError::LinalgError)?;
1762
1763 let actual_rank = rank.min(u.shape()[1]);
1765 let mut factor = Array2::<T>::zeros((mode_size, rank));
1766
1767 for i in 0..mode_size {
1768 for j in 0..actual_rank {
1769 factor[[i, j]] = u[[i, j]];
1770 }
1771 }
1772
1773 if rank > actual_rank {
1775 let normal = Normal::new(0.0, 0.01).unwrap();
1776
1777 for j in actual_rank..rank {
1778 for i in 0..mode_size {
1779 factor[[i, j]] = T::from(normal.sample(&mut rng)).unwrap();
1780 }
1781 }
1782 }
1783
1784 factors.push(factor);
1785 }
1786 }
1787 InitStrategy::Nnsvd => {
1788 use scirs2_linalg::svd;
1791
1792 for (mode, &mode_size) in shape.iter().enumerate() {
1793 let unfolded = tensor
1795 .unfold(mode)
1796 .map_err(|e| CpError::ShapeMismatch(format!("Unfold failed: {}", e)))?;
1797
1798 let (u, s, vt) =
1800 svd(&unfolded.view(), false, None).map_err(CpError::LinalgError)?;
1801
1802 let actual_rank = rank.min(u.shape()[1]).min(vt.shape()[0]);
1803 let mut factor = Array2::<T>::zeros((mode_size, rank));
1804
1805 for r in 0..actual_rank {
1807 let u_col = u.column(r);
1809 let v_row = vt.row(r);
1810
1811 let (u_pos, u_neg) = split_sign(&u_col);
1813 let (v_pos, v_neg) = split_sign(&v_row);
1814
1815 let u_pos_norm = compute_vec_norm(&u_pos);
1817 let u_neg_norm = compute_vec_norm(&u_neg);
1818 let v_pos_norm = compute_vec_norm(&v_pos);
1819 let v_neg_norm = compute_vec_norm(&v_neg);
1820
1821 let pos_prod = u_pos_norm * v_pos_norm;
1823 let neg_prod = u_neg_norm * v_neg_norm;
1824
1825 if pos_prod >= neg_prod {
1826 let scale = (s[r] * pos_prod).sqrt();
1828 for i in 0..mode_size {
1829 factor[[i, r]] = u_pos[i] * scale;
1830 }
1831 } else {
1832 let scale = (s[r] * neg_prod).sqrt();
1834 for i in 0..mode_size {
1835 factor[[i, r]] = u_neg[i] * scale;
1836 }
1837 }
1838 }
1839
1840 if rank > actual_rank {
1842 let normal = Normal::new(0.0, 0.01).unwrap();
1843 for j in actual_rank..rank {
1844 for i in 0..mode_size {
1845 let val = T::from(normal.sample(&mut rng)).unwrap();
1846 factor[[i, j]] = val.abs(); }
1848 }
1849 }
1850
1851 factors.push(factor);
1852 }
1853 }
1854 InitStrategy::LeverageScore => {
1855 use scirs2_linalg::svd;
1858
1859 for (mode, &mode_size) in shape.iter().enumerate() {
1860 let unfolded = tensor
1862 .unfold(mode)
1863 .map_err(|e| CpError::ShapeMismatch(format!("Unfold failed: {}", e)))?;
1864
1865 let (u, s, _vt) =
1867 svd(&unfolded.view(), false, None).map_err(CpError::LinalgError)?;
1868
1869 let actual_rank = rank.min(u.shape()[1]).min(s.len());
1870
1871 let mut leverage_scores = vec![T::zero(); mode_size];
1874 for i in 0..mode_size {
1875 let mut score = T::zero();
1876 for j in 0..actual_rank {
1877 let val = u[[i, j]];
1878 score += val * val;
1879 }
1880 leverage_scores[i] = score / T::from(actual_rank).unwrap();
1881 }
1882
1883 let total_score: T = leverage_scores.iter().copied().sum();
1885 if total_score > T::epsilon() {
1886 for score in &mut leverage_scores {
1887 *score /= total_score;
1888 }
1889 }
1890
1891 let mut factor = Array2::<T>::zeros((mode_size, rank));
1894
1895 for r in 0..actual_rank {
1896 let weight = s[r].sqrt();
1898 for i in 0..mode_size {
1899 let leverage_weight =
1901 (leverage_scores[i] * T::from(mode_size).unwrap()).sqrt();
1902 factor[[i, r]] = u[[i, r]] * weight * leverage_weight;
1903 }
1904 }
1905
1906 if rank > actual_rank {
1908 let normal = Normal::new(0.0, 0.01).unwrap();
1909 for j in actual_rank..rank {
1910 for i in 0..mode_size {
1911 let base_val = T::from(normal.sample(&mut rng)).unwrap();
1913 let leverage_weight = leverage_scores[i];
1914 factor[[i, j]] = base_val * leverage_weight;
1915 }
1916 }
1917 }
1918
1919 factors.push(factor);
1920 }
1921 }
1922 }
1923
1924 Ok(factors)
1925}
1926
1927fn split_sign<T>(vec: &scirs2_core::ndarray_ext::ArrayView1<T>) -> (Vec<T>, Vec<T>)
1929where
1930 T: Float,
1931{
1932 let mut pos = Vec::with_capacity(vec.len());
1933 let mut neg = Vec::with_capacity(vec.len());
1934
1935 for &val in vec.iter() {
1936 if val > T::zero() {
1937 pos.push(val);
1938 neg.push(T::zero());
1939 } else {
1940 pos.push(T::zero());
1941 neg.push(-val); }
1943 }
1944
1945 (pos, neg)
1946}
1947
1948fn compute_vec_norm<T>(vec: &[T]) -> T
1950where
1951 T: Float + Sum,
1952{
1953 vec.iter().map(|&x| x * x).sum::<T>().sqrt()
1954}
1955
1956fn compute_gram_hadamard<T>(factors: &[Array2<T>], skip_mode: usize) -> Array2<T>
1960where
1961 T: Float,
1962{
1963 let rank = factors[0].shape()[1];
1964 let mut gram = Array2::<T>::ones((rank, rank));
1965
1966 for (i, factor) in factors.iter().enumerate() {
1967 if i == skip_mode {
1968 continue;
1969 }
1970
1971 let factor_gram = compute_gram_matrix(factor);
1973
1974 for r1 in 0..rank {
1976 for r2 in 0..rank {
1977 gram[[r1, r2]] = gram[[r1, r2]] * factor_gram[[r1, r2]];
1978 }
1979 }
1980 }
1981
1982 gram
1983}
1984
1985fn compute_gram_matrix<T>(factor: &Array2<T>) -> Array2<T>
1987where
1988 T: Float,
1989{
1990 let (rows, cols) = (factor.shape()[0], factor.shape()[1]);
1991 let mut gram = Array2::<T>::zeros((cols, cols));
1992
1993 for i in 0..cols {
1994 for j in 0..cols {
1995 let mut sum = T::zero();
1996 for k in 0..rows {
1997 sum = sum + factor[[k, i]] * factor[[k, j]];
1998 }
1999 gram[[i, j]] = sum;
2000 }
2001 }
2002
2003 gram
2004}
2005
2006fn solve_least_squares<T>(mttkrp_result: &Array2<T>, gram: &Array2<T>) -> Result<Array2<T>, CpError>
2014where
2015 T: Float + NumAssign + Sum + scirs2_core::ndarray_ext::ScalarOperand + Send + Sync + 'static,
2016{
2017 let (rows, rank) = (mttkrp_result.shape()[0], mttkrp_result.shape()[1]);
2018
2019 let gram_t = gram.t().to_owned();
2021
2022 let mut result = Array2::<T>::zeros((rows, rank));
2024
2025 for i in 0..rows {
2027 let b = mttkrp_result.row(i).to_owned();
2029
2030 match lstsq(&gram_t.view(), &b.view(), None) {
2032 Ok(solution) => {
2033 for j in 0..rank {
2035 result[[i, j]] = solution.x[j];
2036 }
2037 }
2038 Err(_) => {
2039 let eps = T::epsilon() * T::from(rank * 10).unwrap();
2041 let mut gram_reg = gram_t.clone();
2042 for k in 0..rank.min(gram_reg.shape()[0]) {
2043 gram_reg[[k, k]] += eps;
2044 }
2045
2046 let solution =
2048 lstsq(&gram_reg.view(), &b.view(), None).map_err(CpError::LinalgError)?;
2049
2050 for j in 0..rank {
2051 result[[i, j]] = solution.x[j];
2052 }
2053 }
2054 }
2055 }
2056
2057 Ok(result)
2058}
2059
2060fn compute_norm_squared<T>(tensor: &DenseND<T>) -> T
2062where
2063 T: Float,
2064{
2065 let view = tensor.view();
2066 let mut norm_sq = T::zero();
2067
2068 for &val in view.iter() {
2069 norm_sq = norm_sq + val * val;
2070 }
2071
2072 norm_sq
2073}
2074
2075fn compute_fit<T>(
2077 tensor: &DenseND<T>,
2078 factors: &[Array2<T>],
2079 tensor_norm_sq: T,
2080) -> Result<T, CpError>
2081where
2082 T: Float + NumCast,
2083{
2084 let recon_norm_sq = compute_reconstruction_norm_squared(factors);
2089
2090 let inner_product = compute_inner_product(tensor, factors)?;
2092
2093 let error_sq = tensor_norm_sq + recon_norm_sq - T::from(2).unwrap() * inner_product;
2094 let error = error_sq.max(T::zero()).sqrt();
2095
2096 let fit = T::one() - error / tensor_norm_sq.sqrt();
2097
2098 Ok(fit.max(T::zero()).min(T::one()))
2099}
2100
2101fn compute_reconstruction_norm_squared<T>(factors: &[Array2<T>]) -> T
2103where
2104 T: Float,
2105{
2106 let rank = factors[0].shape()[1];
2109 let mut norm_sq = T::zero();
2110
2111 for r in 0..rank {
2112 for s in 0..rank {
2113 let mut cross_term = T::one();
2114 for factor in factors {
2115 let mut inner_prod = T::zero();
2117 for i in 0..factor.shape()[0] {
2118 inner_prod = inner_prod + factor[[i, r]] * factor[[i, s]];
2119 }
2120 cross_term = cross_term * inner_prod;
2121 }
2122 norm_sq = norm_sq + cross_term;
2123 }
2124 }
2125
2126 norm_sq
2127}
2128
2129fn compute_inner_product<T>(tensor: &DenseND<T>, factors: &[Array2<T>]) -> Result<T, CpError>
2131where
2132 T: Float,
2133{
2134 let mut inner_prod = T::zero();
2135 let rank = factors[0].shape()[1];
2136
2137 let factor_views: Vec<_> = factors.iter().map(|f| f.view()).collect();
2140 let mttkrp_result = mttkrp(&tensor.view(), &factor_views, 0)
2141 .map_err(|e| CpError::ShapeMismatch(e.to_string()))?;
2142
2143 for r in 0..rank {
2144 for i in 0..factors[0].shape()[0] {
2145 inner_prod = inner_prod + mttkrp_result[[i, r]] * factors[0][[i, r]];
2146 }
2147 }
2148
2149 Ok(inner_prod)
2150}
2151
2152fn orthonormalize_factor<T>(factor: &Array2<T>) -> Result<Array2<T>, CpError>
2160where
2161 T: Float
2162 + FloatConst
2163 + NumCast
2164 + NumAssign
2165 + Sum
2166 + scirs2_core::ndarray_ext::ScalarOperand
2167 + Send
2168 + Sync
2169 + std::fmt::Display
2170 + 'static,
2171{
2172 use scirs2_core::ndarray_ext::s;
2173 use scirs2_linalg::qr;
2174
2175 let (_m, n) = factor.dim();
2176
2177 let (q_full, _r) = qr(&factor.view(), None).map_err(CpError::LinalgError)?;
2179
2180 let q = q_full.slice(s![.., ..n]).to_owned();
2183
2184 Ok(q)
2185}
2186
2187#[cfg(test)]
2188mod tests {
2189 use super::*;
2190
2191 #[test]
2192 fn test_cp_als_basic() {
2193 let tensor = DenseND::<f64>::random_uniform(&[3, 4, 5], 0.0, 1.0);
2196 let result = cp_als(&tensor, 2, 10, 1e-4, InitStrategy::Svd, None);
2199
2200 assert!(result.is_ok());
2201 let cp = result.unwrap();
2202
2203 assert_eq!(cp.factors.len(), 3);
2204 assert_eq!(cp.factors[0].shape(), &[3, 2]);
2205 assert_eq!(cp.factors[1].shape(), &[4, 2]);
2206 assert_eq!(cp.factors[2].shape(), &[5, 2]);
2207 }
2208
2209 #[test]
2210 fn test_gram_matrix() {
2211 use scirs2_core::ndarray_ext::array;
2212
2213 let factor = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]];
2214 let gram = compute_gram_matrix(&factor);
2215
2216 assert_eq!(gram.shape(), &[2, 2]);
2217 assert!((gram[[0, 0]] - 35.0).abs() < 1e-10);
2219 assert!((gram[[1, 1]] - 56.0).abs() < 1e-10);
2221 }
2222
2223 #[test]
2224 fn test_cp_als_nonnegative() {
2225 let tensor = DenseND::<f64>::random_uniform(&[5, 5, 5], 0.0, 1.0);
2227 let constraints = CpConstraints::nonnegative();
2228 let result = cp_als_constrained(
2229 &tensor,
2230 3,
2231 20,
2232 1e-4,
2233 InitStrategy::Random,
2234 constraints,
2235 None,
2236 );
2237
2238 assert!(result.is_ok());
2239 let cp = result.unwrap();
2240
2241 for factor in &cp.factors {
2243 for &val in factor.iter() {
2244 assert!(
2245 val >= 0.0,
2246 "Factor value should be non-negative, got {}",
2247 val
2248 );
2249 }
2250 }
2251 }
2252
2253 #[test]
2254 fn test_cp_als_l2_regularized() {
2255 let tensor = DenseND::<f64>::random_uniform(&[5, 5, 5], 0.0, 1.0);
2257 let constraints = CpConstraints::l2_regularized(0.01);
2258 let result = cp_als_constrained(
2259 &tensor,
2260 3,
2261 20,
2262 1e-4,
2263 InitStrategy::Random,
2264 constraints,
2265 None,
2266 );
2267
2268 assert!(result.is_ok());
2269 let cp = result.unwrap();
2270
2271 assert!(cp.fit > 0.0 && cp.fit <= 1.0);
2273 }
2274
2275 #[test]
2276 fn test_cp_als_orthogonal() {
2277 let tensor = DenseND::<f64>::random_uniform(&[8, 8, 8], 0.0, 1.0);
2279 let constraints = CpConstraints::orthogonal();
2280 let result = cp_als_constrained(
2281 &tensor,
2282 4,
2283 10,
2284 1e-4,
2285 InitStrategy::Random,
2286 constraints,
2287 None,
2288 );
2289
2290 if let Err(e) = &result {
2291 eprintln!("Orthogonal CP-ALS failed: {:?}", e);
2292 }
2293 assert!(result.is_ok());
2294 let cp = result.unwrap();
2295
2296 for factor in &cp.factors {
2298 let gram = factor.t().dot(factor);
2299
2300 for i in 0..gram.nrows() {
2301 for j in 0..gram.ncols() {
2302 let expected = if i == j { 1.0 } else { 0.0 };
2303 let actual = gram[[i, j]];
2304 let diff = (actual - expected).abs();
2305
2306 assert!(
2307 diff < 0.1,
2308 "Orthogonality check failed: gram[{},{}] = {:.4}, expected {}",
2309 i,
2310 j,
2311 actual,
2312 expected
2313 );
2314 }
2315 }
2316 }
2317 }
2318
2319 #[test]
2320 fn test_constraint_combinations() {
2321 let tensor = DenseND::<f64>::random_uniform(&[6, 6, 6], 0.0, 1.0);
2323 let constraints = CpConstraints {
2324 nonnegative: true,
2325 l2_reg: 0.01,
2326 orthogonal: false,
2327 };
2328 let result = cp_als_constrained(
2329 &tensor,
2330 3,
2331 20,
2332 1e-4,
2333 InitStrategy::Random,
2334 constraints,
2335 None,
2336 );
2337
2338 assert!(result.is_ok());
2339 let cp = result.unwrap();
2340
2341 for factor in &cp.factors {
2343 for &val in factor.iter() {
2344 assert!(val >= 0.0, "Factor value should be non-negative");
2345 }
2346 }
2347
2348 assert!(cp.fit > 0.0);
2349 }
2350
2351 #[test]
2352 fn test_nnsvd_initialization() {
2353 let tensor = DenseND::<f64>::random_uniform(&[8, 8, 8], 0.0, 1.0);
2355 let result = cp_als(&tensor, 4, 20, 1e-4, InitStrategy::Nnsvd, None);
2356
2357 assert!(result.is_ok());
2358 let cp = result.unwrap();
2359
2360 assert!(cp.fit > 0.0 && cp.fit <= 1.0);
2362
2363 assert_eq!(cp.factors.len(), 3);
2365 assert_eq!(cp.factors[0].shape(), &[8, 4]);
2366 }
2367
2368 #[test]
2369 fn test_leverage_score_initialization() {
2370 let tensor = DenseND::<f64>::random_uniform(&[10, 10, 10], 0.0, 1.0);
2372 let result = cp_als(&tensor, 5, 20, 1e-4, InitStrategy::LeverageScore, None);
2373
2374 assert!(result.is_ok());
2375 let cp = result.unwrap();
2376
2377 assert!(cp.fit > 0.0 && cp.fit <= 1.0);
2379 assert_eq!(cp.factors.len(), 3);
2380
2381 assert!(cp.convergence.is_some());
2383 }
2384
2385 #[test]
2386 fn test_convergence_diagnostics() {
2387 let tensor = DenseND::<f64>::random_uniform(&[6, 6, 6], 0.0, 1.0);
2389 let result = cp_als(&tensor, 3, 10, 1e-4, InitStrategy::Random, None);
2390
2391 assert!(result.is_ok());
2392 let cp = result.unwrap();
2393
2394 assert!(cp.convergence.is_some());
2396
2397 let conv = cp.convergence.unwrap();
2398
2399 assert!(!conv.fit_history.is_empty());
2401 assert!(conv.fit_history.len() <= 10);
2402
2403 assert!((cp.fit - conv.fit_history[conv.fit_history.len() - 1]).abs() < 1e-10);
2405
2406 match conv.reason {
2408 ConvergenceReason::FitTolerance
2409 | ConvergenceReason::MaxIterations
2410 | ConvergenceReason::Oscillation
2411 | ConvergenceReason::TimeLimit => {}
2412 }
2413 }
2414
2415 #[test]
2416 fn test_convergence_fit_history() {
2417 let tensor = DenseND::<f64>::random_uniform(&[5, 5, 5], 0.0, 1.0);
2419 let result = cp_als(&tensor, 3, 30, 1e-6, InitStrategy::Svd, None);
2420
2421 assert!(result.is_ok());
2422 let cp = result.unwrap();
2423 let conv = cp.convergence.unwrap();
2424
2425 assert!(!conv.fit_history.is_empty());
2427
2428 for &fit in &conv.fit_history {
2430 assert!(
2431 (0.0..=1.0).contains(&fit),
2432 "Fit value should be in [0,1], got {}",
2433 fit
2434 );
2435 }
2436
2437 assert!(cp.fit > 0.0 && cp.fit <= 1.0);
2439 }
2440
2441 #[test]
2442 fn test_oscillation_detection() {
2443 let tensor = DenseND::<f64>::random_uniform(&[4, 4, 4], 0.0, 1.0);
2445 let result = cp_als(&tensor, 2, 50, 1e-8, InitStrategy::Random, None);
2446
2447 assert!(result.is_ok());
2448 let cp = result.unwrap();
2449 let conv = cp.convergence.unwrap();
2450
2451 assert!(conv.oscillation_count <= 50);
2453
2454 if conv.oscillation_count > 0 {
2456 assert!(conv.oscillated);
2457 }
2458 }
2459
2460 #[test]
2461 fn test_cp_als_accelerated_basic() {
2462 let tensor = DenseND::<f64>::random_uniform(&[8, 8, 8], 0.0, 1.0);
2464 let result = cp_als_accelerated(&tensor, 4, 30, 1e-4, InitStrategy::Random, None);
2465
2466 assert!(result.is_ok());
2467 let cp = result.unwrap();
2468
2469 assert!(cp.fit > 0.0 && cp.fit <= 1.0);
2471
2472 assert_eq!(cp.factors.len(), 3);
2474 assert_eq!(cp.factors[0].shape(), &[8, 4]);
2475 assert_eq!(cp.factors[1].shape(), &[8, 4]);
2476 assert_eq!(cp.factors[2].shape(), &[8, 4]);
2477
2478 assert!(cp.convergence.is_some());
2480 }
2481
2482 #[test]
2483 fn test_cp_als_accelerated_faster_convergence() {
2484 let tensor = DenseND::<f64>::random_uniform(&[12, 12, 12], 0.0, 1.0);
2486
2487 let cp_standard = cp_als(&tensor, 5, 50, 1e-5, InitStrategy::Svd, None).unwrap();
2489
2490 let cp_accel = cp_als_accelerated(&tensor, 5, 50, 1e-5, InitStrategy::Svd, None).unwrap();
2492
2493 let fit_diff = (cp_standard.fit - cp_accel.fit).abs();
2495 assert!(
2496 fit_diff < 0.1,
2497 "Fits should be similar: standard={:.4}, accel={:.4}",
2498 cp_standard.fit,
2499 cp_accel.fit
2500 );
2501
2502 println!(
2505 "Standard iters: {}, Accelerated iters: {}",
2506 cp_standard.iters, cp_accel.iters
2507 );
2508 }
2509
2510 #[test]
2511 fn test_cp_als_accelerated_with_svd_init() {
2512 let tensor = DenseND::<f64>::random_uniform(&[10, 10, 10], 0.0, 1.0);
2514 let result = cp_als_accelerated(&tensor, 6, 25, 1e-4, InitStrategy::Svd, None);
2515
2516 assert!(result.is_ok());
2517 let cp = result.unwrap();
2518
2519 assert!(cp.fit > 0.3, "SVD initialization should provide good fit");
2521
2522 let conv = cp.convergence.unwrap();
2524 assert!(!conv.fit_history.is_empty());
2525 }
2526
2527 #[test]
2528 fn test_cp_als_accelerated_reconstruction() {
2529 let tensor = DenseND::<f64>::random_uniform(&[6, 6, 6], 0.0, 1.0);
2531 let cp = cp_als_accelerated(&tensor, 4, 30, 1e-4, InitStrategy::Random, None).unwrap();
2532
2533 let reconstructed = cp.reconstruct(&[6, 6, 6]).unwrap();
2535
2536 assert_eq!(reconstructed.shape(), &[6, 6, 6]);
2538
2539 let diff = &tensor - &reconstructed;
2541 let error_norm = diff.frobenius_norm();
2542 let tensor_norm = tensor.frobenius_norm();
2543 let relative_error = error_norm / tensor_norm;
2544 let computed_fit = 1.0 - relative_error;
2545
2546 let fit_diff = (cp.fit - computed_fit).abs();
2547 assert!(
2548 fit_diff < 0.1, "Fit should approximately match reconstruction: fit={:.4}, computed={:.4}",
2550 cp.fit,
2551 computed_fit
2552 );
2553 }
2554
2555 #[test]
2556 fn test_cp_completion_basic() {
2557 use scirs2_core::ndarray_ext::Array;
2559
2560 let mut data = Array::zeros(vec![4, 4, 4]);
2562 let mut mask = Array::zeros(vec![4, 4, 4]);
2563
2564 for i in 0..4 {
2566 for j in 0..4 {
2567 for k in 0..4 {
2568 if (i + j + k) % 2 == 0 {
2569 data[[i, j, k]] = (i + j + k) as f64 / 10.0;
2571 mask[[i, j, k]] = 1.0;
2572 }
2573 }
2574 }
2575 }
2576
2577 let tensor = DenseND::from_array(data.into_dyn());
2578 let mask_tensor = DenseND::from_array(mask.into_dyn());
2579
2580 let result = cp_completion(&tensor, &mask_tensor, 3, 50, 1e-4, InitStrategy::Random);
2582
2583 assert!(result.is_ok());
2584 let cp = result.unwrap();
2585
2586 assert_eq!(cp.factors.len(), 3);
2588 assert_eq!(cp.factors[0].shape(), &[4, 3]);
2589 assert_eq!(cp.factors[1].shape(), &[4, 3]);
2590 assert_eq!(cp.factors[2].shape(), &[4, 3]);
2591
2592 assert!(cp.fit > 0.0 && cp.fit <= 1.0);
2594 }
2595
2596 #[test]
2597 fn test_cp_completion_reconstruction() {
2598 use scirs2_core::ndarray_ext::Array;
2600
2601 let factor1 = Array::from_shape_fn((6, 2), |(i, r)| (i + r) as f64 / 10.0);
2603 let factor2 = Array::from_shape_fn((6, 2), |(i, r)| (i + r + 1) as f64 / 10.0);
2604 let factor3 = Array::from_shape_fn((6, 2), |(i, r)| (i + r + 2) as f64 / 10.0);
2605
2606 let factors_vec = vec![factor1.clone(), factor2.clone(), factor3.clone()];
2607 let original = compute_reconstruction(&factors_vec).unwrap();
2608
2609 let mut mask_data = Array::zeros(vec![6, 6, 6]);
2611 let mut rng = thread_rng();
2612 for idx in mask_data.iter_mut() {
2613 if rng.random::<f64>() < 0.7 {
2614 *idx = 1.0;
2615 }
2616 }
2617
2618 let mask = DenseND::from_array(mask_data.into_dyn());
2619
2620 let cp = cp_completion(&original, &mask, 2, 100, 1e-5, InitStrategy::Svd).unwrap();
2622
2623 let _reconstructed = cp.reconstruct(&[6, 6, 6]).unwrap();
2625
2626 assert!(
2629 cp.fit > 0.0,
2630 "Completion should achieve positive fit on observed entries, got {:.4}",
2631 cp.fit
2632 );
2633 println!("Completion fit: {:.4}", cp.fit);
2634 }
2635
2636 #[test]
2637 fn test_cp_completion_mask_validation() {
2638 use scirs2_core::ndarray_ext::Array;
2640
2641 let data = Array::<f64, _>::zeros(vec![4, 4, 4]);
2642 let wrong_mask = Array::<f64, _>::zeros(vec![4, 4, 5]); let tensor = DenseND::from_array(data.into_dyn());
2645 let mask_tensor = DenseND::from_array(wrong_mask.into_dyn());
2646
2647 let result = cp_completion(&tensor, &mask_tensor, 2, 50, 1e-4, InitStrategy::Random);
2648
2649 assert!(result.is_err());
2650 match result {
2651 Err(CpError::ShapeMismatch(_)) => {} _ => panic!("Expected ShapeMismatch error"),
2653 }
2654 }
2655
2656 #[test]
2657 fn test_cp_completion_no_observed_entries() {
2658 use scirs2_core::ndarray_ext::Array;
2660
2661 let data = Array::<f64, _>::zeros(vec![3, 3, 3]);
2662 let mask = Array::<f64, _>::zeros(vec![3, 3, 3]); let tensor = DenseND::from_array(data.into_dyn());
2665 let mask_tensor = DenseND::from_array(mask.into_dyn());
2666
2667 let result = cp_completion(&tensor, &mask_tensor, 2, 50, 1e-4, InitStrategy::Random);
2668
2669 assert!(result.is_err());
2670 match result {
2671 Err(CpError::ShapeMismatch(_)) => {} _ => panic!("Expected ShapeMismatch error for empty mask"),
2673 }
2674 }
2675
2676 #[test]
2677 fn test_cp_completion_convergence() {
2678 use scirs2_core::ndarray_ext::Array;
2680
2681 let mut data = Array::zeros(vec![8, 8, 8]);
2682 let mut mask = Array::zeros(vec![8, 8, 8]);
2683
2684 for i in 0..8 {
2686 for j in 0..8 {
2687 for k in 0..8 {
2688 if (i + j * 2 + k * 3) % 3 == 0 {
2689 data[[i, j, k]] = i as f64 * 0.1 + j as f64 * 0.2 + k as f64 * 0.15;
2690 mask[[i, j, k]] = 1.0;
2691 }
2692 }
2693 }
2694 }
2695
2696 let tensor = DenseND::from_array(data.into_dyn());
2697 let mask_tensor = DenseND::from_array(mask.into_dyn());
2698
2699 let cp = cp_completion(&tensor, &mask_tensor, 3, 200, 1e-6, InitStrategy::Svd).unwrap();
2700
2701 assert!(
2703 cp.fit > 0.0,
2704 "Should achieve positive fit, got {:.4}",
2705 cp.fit
2706 );
2707
2708 println!("Converged in {} iterations", cp.iters);
2710 }
2711
2712 #[test]
2713 fn test_cp_completion_high_missing_rate() {
2714 use scirs2_core::ndarray_ext::Array;
2716
2717 let mut data = Array::zeros(vec![10, 10, 10]);
2718 let mut mask = Array::zeros(vec![10, 10, 10]);
2719 let mut rng = thread_rng();
2720
2721 for i in 0..10 {
2723 for j in 0..10 {
2724 for k in 0..10 {
2725 let val = (i + j + k) as f64 / 30.0;
2726 data[[i, j, k]] = val;
2727
2728 if rng.random::<f64>() < 0.1 {
2729 mask[[i, j, k]] = 1.0;
2730 }
2731 }
2732 }
2733 }
2734
2735 let tensor = DenseND::from_array(data.into_dyn());
2736 let mask_tensor = DenseND::from_array(mask.into_dyn());
2737
2738 let result = cp_completion(&tensor, &mask_tensor, 4, 100, 1e-4, InitStrategy::Random);
2739
2740 assert!(result.is_ok());
2742 let cp = result.unwrap();
2743
2744 println!("High missing rate fit: {:.4}", cp.fit);
2745 assert!(cp.fit >= 0.0 && cp.fit <= 1.0);
2746 }
2747
2748 #[test]
2753 fn test_cp_randomized_basic() {
2754 let tensor = DenseND::<f64>::random_uniform(&[15, 15, 15], 0.0, 1.0);
2756
2757 let rank = 5;
2758 let sketch_size = rank * 3; let result = cp_randomized(
2760 &tensor,
2761 rank,
2762 30,
2763 1e-4,
2764 InitStrategy::Random,
2765 sketch_size,
2766 5,
2767 );
2768
2769 assert!(result.is_ok(), "Randomized CP should succeed");
2770 let cp = result.unwrap();
2771
2772 assert_eq!(cp.factors.len(), 3);
2774 assert_eq!(cp.factors[0].shape(), &[15, 5]);
2775 assert_eq!(cp.factors[1].shape(), &[15, 5]);
2776 assert_eq!(cp.factors[2].shape(), &[15, 5]);
2777
2778 assert!(
2780 cp.fit > 0.0 && cp.fit <= 1.0,
2781 "Fit should be in [0, 1], got {:.4}",
2782 cp.fit
2783 );
2784 }
2785
2786 #[test]
2787 fn test_cp_randomized_reconstruction() {
2788 let tensor = DenseND::<f64>::random_uniform(&[12, 12, 12], 0.0, 1.0);
2790
2791 let rank = 8;
2792 let sketch_size = rank * 4; let cp = cp_randomized(
2794 &tensor,
2795 rank,
2796 50,
2797 1e-4,
2798 InitStrategy::Random,
2799 sketch_size,
2800 3,
2801 )
2802 .unwrap();
2803
2804 let reconstructed = cp.reconstruct(tensor.shape());
2806 assert!(reconstructed.is_ok(), "Reconstruction should succeed");
2807
2808 let recon = reconstructed.unwrap();
2809 assert_eq!(recon.shape(), tensor.shape());
2810
2811 let diff = &tensor - &recon;
2813 let error_norm = diff.frobenius_norm();
2814 let tensor_norm = tensor.frobenius_norm();
2815 let computed_fit = 1.0 - (error_norm / tensor_norm);
2816
2817 println!(
2818 "Randomized CP fit: {:.4}, computed fit: {:.4}",
2819 cp.fit, computed_fit
2820 );
2821
2822 assert!(
2824 computed_fit > 0.0,
2825 "Computed fit should be positive, got {:.4}",
2826 computed_fit
2827 );
2828 }
2829
2830 #[test]
2831 fn test_cp_randomized_vs_standard() {
2832 let tensor = DenseND::<f64>::random_uniform(&[20, 20, 20], 0.0, 1.0);
2834 let rank = 6;
2835
2836 let cp_std = cp_als(&tensor, rank, 30, 1e-4, InitStrategy::Random, None).unwrap();
2838
2839 let sketch_size = rank * 5; let cp_rand = cp_randomized(
2842 &tensor,
2843 rank,
2844 30,
2845 1e-4,
2846 InitStrategy::Random,
2847 sketch_size,
2848 3,
2849 )
2850 .unwrap();
2851
2852 println!(
2853 "Standard CP fit: {:.4}, Randomized CP fit: {:.4}",
2854 cp_std.fit, cp_rand.fit
2855 );
2856
2857 assert!(cp_rand.fit > 0.0, "Randomized fit should be positive");
2860
2861 assert!(cp_rand.reconstruct(tensor.shape()).is_ok());
2863 assert!(cp_std.reconstruct(tensor.shape()).is_ok());
2864 }
2865
2866 #[test]
2867 fn test_cp_randomized_oversampling() {
2868 let tensor = DenseND::<f64>::random_uniform(&[10, 10, 10], 0.0, 1.0);
2870 let rank = 4;
2871
2872 let sketch_low = rank * 3;
2874 let cp_low =
2875 cp_randomized(&tensor, rank, 30, 1e-4, InitStrategy::Random, sketch_low, 5).unwrap();
2876
2877 let sketch_high = rank * 7;
2879 let cp_high = cp_randomized(
2880 &tensor,
2881 rank,
2882 30,
2883 1e-4,
2884 InitStrategy::Random,
2885 sketch_high,
2886 5,
2887 )
2888 .unwrap();
2889
2890 println!(
2891 "Low oversampling (3x) fit: {:.4}, High oversampling (7x) fit: {:.4}",
2892 cp_low.fit, cp_high.fit
2893 );
2894
2895 assert!(cp_low.fit >= 0.0);
2897 assert!(cp_high.fit >= 0.0);
2898
2899 assert!(cp_low.fit <= 1.0);
2901 assert!(cp_high.fit <= 1.0);
2902 }
2903
2904 #[test]
2905 fn test_cp_randomized_fit_check_frequency() {
2906 let tensor = DenseND::<f64>::random_uniform(&[12, 12, 12], 0.0, 1.0);
2908 let rank = 5;
2909 let sketch_size = rank * 4;
2910
2911 let cp_freq1 = cp_randomized(
2913 &tensor,
2914 rank,
2915 20,
2916 1e-4,
2917 InitStrategy::Random,
2918 sketch_size,
2919 1,
2920 )
2921 .unwrap();
2922
2923 let cp_freq10 = cp_randomized(
2925 &tensor,
2926 rank,
2927 20,
2928 1e-4,
2929 InitStrategy::Random,
2930 sketch_size,
2931 10,
2932 )
2933 .unwrap();
2934
2935 println!("Freq=1 iters: {}, fit: {:.4}", cp_freq1.iters, cp_freq1.fit);
2936 println!(
2937 "Freq=10 iters: {}, fit: {:.4}",
2938 cp_freq10.iters, cp_freq10.fit
2939 );
2940
2941 assert!(cp_freq1.iters > 0);
2943 assert!(cp_freq10.iters > 0);
2944
2945 assert!(cp_freq1.fit > 0.0 && cp_freq1.fit <= 1.0);
2947 assert!(cp_freq10.fit > 0.0 && cp_freq10.fit <= 1.0);
2948 }
2949
2950 #[test]
2951 fn test_cp_randomized_invalid_params() {
2952 let tensor = DenseND::<f64>::random_uniform(&[10, 10, 10], 0.0, 1.0);
2953
2954 let result1 = cp_randomized(&tensor, 5, 20, 1e-4, InitStrategy::Random, 3, 5);
2956 assert!(result1.is_err(), "Should fail with sketch_size < rank");
2957
2958 let result2 = cp_randomized(&tensor, 0, 20, 1e-4, InitStrategy::Random, 10, 5);
2960 assert!(result2.is_err(), "Should fail with rank = 0");
2961
2962 let result3 = cp_randomized(&tensor, 15, 20, 1e-4, InitStrategy::Random, 30, 5);
2964 assert!(result3.is_err(), "Should fail with rank > mode_size");
2965 }
2966
2967 #[test]
2968 fn test_cp_randomized_convergence() {
2969 let tensor = DenseND::<f64>::random_uniform(&[8, 8, 8], 0.0, 1.0);
2971 let rank = 4;
2972 let sketch_size = rank * 4;
2973
2974 let cp_tight =
2976 cp_randomized(&tensor, rank, 100, 1e-5, InitStrategy::Svd, sketch_size, 5).unwrap();
2977
2978 let cp_loose =
2980 cp_randomized(&tensor, rank, 100, 1e-2, InitStrategy::Svd, sketch_size, 5).unwrap();
2981
2982 println!(
2983 "Tight tol: {} iters, fit: {:.4}",
2984 cp_tight.iters, cp_tight.fit
2985 );
2986 println!(
2987 "Loose tol: {} iters, fit: {:.4}",
2988 cp_loose.iters, cp_loose.fit
2989 );
2990
2991 assert!(cp_loose.iters <= cp_tight.iters);
2993
2994 assert!(cp_tight.fit > 0.0);
2996 assert!(cp_loose.fit > 0.0);
2997 }
2998
2999 #[test]
3000 fn test_cp_randomized_init_strategies() {
3001 let tensor = DenseND::<f64>::random_uniform(&[10, 10, 10], 0.0, 1.0);
3003 let rank = 5;
3004 let sketch_size = rank * 3;
3005
3006 let cp_rand = cp_randomized(
3008 &tensor,
3009 rank,
3010 30,
3011 1e-4,
3012 InitStrategy::Random,
3013 sketch_size,
3014 5,
3015 )
3016 .unwrap();
3017 assert!(cp_rand.fit > 0.0);
3018
3019 let cp_svd =
3021 cp_randomized(&tensor, rank, 30, 1e-4, InitStrategy::Svd, sketch_size, 5).unwrap();
3022 assert!(cp_svd.fit > 0.0);
3023
3024 println!(
3025 "Random init fit: {:.4}, SVD init fit: {:.4}",
3026 cp_rand.fit, cp_svd.fit
3027 );
3028 }
3029
3030 #[test]
3035 fn test_cp_incremental_append_mode() {
3036 let initial_tensor = DenseND::<f64>::random_uniform(&[20, 10, 10], 0.0, 1.0);
3038 let rank = 5;
3039
3040 let cp_initial =
3042 cp_als(&initial_tensor, rank, 30, 1e-4, InitStrategy::Random, None).unwrap();
3043
3044 let new_slice = DenseND::<f64>::random_uniform(&[5, 10, 10], 0.0, 1.0);
3046
3047 let cp_updated = cp_als_incremental(
3049 &cp_initial,
3050 &new_slice,
3051 0, IncrementalMode::Append,
3053 10, 1e-4,
3055 )
3056 .unwrap();
3057
3058 assert_eq!(cp_updated.factors.len(), 3);
3060 assert_eq!(cp_updated.factors[0].shape()[0], 25); assert_eq!(cp_updated.factors[0].shape()[1], rank);
3062 assert_eq!(cp_updated.factors[1].shape()[0], 10);
3063 assert_eq!(cp_updated.factors[2].shape()[0], 10);
3064
3065 assert!(cp_updated.fit > 0.0 && cp_updated.fit <= 1.0);
3067
3068 println!(
3069 "Initial fit: {:.4}, Updated fit: {:.4}",
3070 cp_initial.fit, cp_updated.fit
3071 );
3072 }
3073
3074 #[test]
3075 fn test_cp_incremental_sliding_window() {
3076 let initial_tensor = DenseND::<f64>::random_uniform(&[20, 10, 10], 0.0, 1.0);
3078 let rank = 5;
3079
3080 let cp_initial =
3082 cp_als(&initial_tensor, rank, 30, 1e-4, InitStrategy::Random, None).unwrap();
3083
3084 let new_data = DenseND::<f64>::random_uniform(&[20, 10, 10], 0.0, 1.0);
3086
3087 let cp_updated = cp_als_incremental(
3089 &cp_initial,
3090 &new_data,
3091 0, IncrementalMode::SlidingWindow { lambda: 0.9 },
3093 10,
3094 1e-4,
3095 )
3096 .unwrap();
3097
3098 assert_eq!(cp_updated.factors.len(), 3);
3100 assert_eq!(cp_updated.factors[0].shape()[0], 20);
3101 assert_eq!(cp_updated.factors[0].shape()[1], rank);
3102
3103 assert!(cp_updated.fit > 0.0 && cp_updated.fit <= 1.0);
3105
3106 println!(
3107 "Initial fit: {:.4}, Updated fit (sliding): {:.4}",
3108 cp_initial.fit, cp_updated.fit
3109 );
3110 }
3111
3112 #[test]
3113 fn test_cp_incremental_dimensions() {
3114 let tensor1 = DenseND::<f64>::random_uniform(&[10, 8, 6], 0.0, 1.0);
3116 let rank = 3;
3117
3118 let cp = cp_als(&tensor1, rank, 20, 1e-4, InitStrategy::Random, None).unwrap();
3119
3120 let new_data = DenseND::<f64>::random_uniform(&[5, 8, 6], 0.0, 1.0);
3122 let cp_updated =
3123 cp_als_incremental(&cp, &new_data, 0, IncrementalMode::Append, 5, 1e-4).unwrap();
3124
3125 assert_eq!(cp_updated.factors[0].shape()[0], 15); assert_eq!(cp_updated.factors[1].shape()[0], 8);
3127 assert_eq!(cp_updated.factors[2].shape()[0], 6);
3128 }
3129
3130 #[test]
3131 fn test_cp_incremental_invalid_mode() {
3132 let tensor = DenseND::<f64>::random_uniform(&[10, 10, 10], 0.0, 1.0);
3134 let rank = 5;
3135
3136 let cp = cp_als(&tensor, rank, 20, 1e-4, InitStrategy::Random, None).unwrap();
3137 let new_data = DenseND::<f64>::random_uniform(&[5, 10, 10], 0.0, 1.0);
3138
3139 let result = cp_als_incremental(&cp, &new_data, 3, IncrementalMode::Append, 5, 1e-4);
3141
3142 assert!(result.is_err(), "Should fail with invalid mode");
3143 }
3144
3145 #[test]
3146 fn test_cp_incremental_shape_mismatch() {
3147 let tensor = DenseND::<f64>::random_uniform(&[10, 10, 10], 0.0, 1.0);
3149 let rank = 5;
3150
3151 let cp = cp_als(&tensor, rank, 20, 1e-4, InitStrategy::Random, None).unwrap();
3152
3153 let new_data = DenseND::<f64>::random_uniform(&[5, 12, 10], 0.0, 1.0);
3155
3156 let result = cp_als_incremental(&cp, &new_data, 0, IncrementalMode::Append, 5, 1e-4);
3157
3158 assert!(result.is_err(), "Should fail with shape mismatch");
3159 }
3160
3161 #[test]
3162 fn test_cp_incremental_convergence() {
3163 let tensor = DenseND::<f64>::random_uniform(&[15, 8, 8], 0.0, 1.0);
3165 let rank = 4;
3166
3167 let cp = cp_als(&tensor, rank, 30, 1e-4, InitStrategy::Svd, None).unwrap();
3168
3169 let new_data = DenseND::<f64>::random_uniform(&[5, 8, 8], 0.0, 1.0);
3171
3172 let cp_updated =
3173 cp_als_incremental(&cp, &new_data, 0, IncrementalMode::Append, 10, 1e-4).unwrap();
3174
3175 assert!(cp_updated.iters <= 10);
3177 assert!(cp_updated.iters > 0);
3178
3179 println!(
3180 "Incremental update converged in {} iterations",
3181 cp_updated.iters
3182 );
3183 }
3184
3185 #[test]
3186 fn test_cp_incremental_reconstruction_quality() {
3187 let tensor1 = DenseND::<f64>::random_uniform(&[15, 10, 10], 0.0, 1.0);
3189 let rank = 6;
3190
3191 let cp = cp_als(&tensor1, rank, 30, 1e-4, InitStrategy::Random, None).unwrap();
3192
3193 let new_data = DenseND::<f64>::random_uniform(&[5, 10, 10], 0.0, 1.0);
3195
3196 let cp_updated =
3197 cp_als_incremental(&cp, &new_data, 0, IncrementalMode::Append, 10, 1e-4).unwrap();
3198
3199 let reconstructed = cp_updated.reconstruct(&[20, 10, 10]).unwrap();
3201
3202 assert_eq!(reconstructed.shape(), &[20, 10, 10]);
3204
3205 assert!(
3207 cp_updated.fit > 0.5,
3208 "Fit should be reasonably good: {:.4}",
3209 cp_updated.fit
3210 );
3211 }
3212}