1pub mod additive;
142pub mod dual_decomposition;
143pub mod interior_point;
144pub mod multidimensional;
145pub mod projected_gradient;
146pub mod quadratic_programming;
147pub mod simd_operations;
148pub mod sparse;
149
150pub use simd_operations::{
152 simd_armijo_line_search, simd_constraint_violations, simd_dot_product,
153 simd_gradient_computation, simd_hessian_approximation, simd_isotonic_projection,
154 simd_newton_step, simd_qp_matrix_vector_multiply, simd_vector_norm,
155};
156
157pub use quadratic_programming::{
158 isotonic_regression_active_set, isotonic_regression_qp, ActiveSetIsotonicRegressor,
159 QuadraticProgrammingIsotonicRegressor,
160};
161
162pub use interior_point::{isotonic_regression_interior_point, InteriorPointIsotonicRegressor};
163
164pub use projected_gradient::{
165 isotonic_regression_projected_gradient, ProjectedGradientIsotonicRegressor,
166};
167
168pub use dual_decomposition::{
169 isotonic_regression_dual_decomposition, parallel_dual_decomposition,
170 DualDecompositionIsotonicRegressor,
171};
172
173pub use multidimensional::{
174 create_partial_order, interpolate_multidimensional, non_separable_isotonic_regression,
175 separable_isotonic_regression, NonSeparableMultiDimensionalIsotonicRegression,
176 SeparableMultiDimensionalIsotonicRegression,
177};
178
179pub use sparse::{sparse_isotonic_regression, SparseIsotonicRegression};
180
181pub use additive::{additive_isotonic_regression, AdditiveIsotonicRegression};
182
183#[derive(Debug, Clone, Copy, PartialEq, Eq)]
185pub enum OptimizationAlgorithm {
187 PoolAdjacentViolators,
189 ActiveSet,
191 QuadraticProgramming,
193 InteriorPoint,
195 ProjectedGradient,
197 DualDecomposition,
199}
200
201impl Default for OptimizationAlgorithm {
202 fn default() -> Self {
203 Self::PoolAdjacentViolators
204 }
205}
206
207impl OptimizationAlgorithm {
208 pub fn description(&self) -> &'static str {
210 match self {
211 Self::PoolAdjacentViolators => "Pool Adjacent Violators - O(n) optimal algorithm",
212 Self::ActiveSet => "Active set method for bounded isotonic regression",
213 Self::QuadraticProgramming => "General quadratic programming formulation",
214 Self::InteriorPoint => "Interior point method with barrier functions",
215 Self::ProjectedGradient => "Projected gradient method with constraint projection",
216 Self::DualDecomposition => "Dual decomposition for large-scale problems",
217 }
218 }
219
220 pub fn complexity(&self) -> &'static str {
222 match self {
223 Self::PoolAdjacentViolators => "O(n)",
224 Self::ActiveSet => "O(n³) worst case, O(n) typical",
225 Self::QuadraticProgramming => "O(n³)",
226 Self::InteriorPoint => "O(n³) per iteration",
227 Self::ProjectedGradient => "O(n log n) per iteration",
228 Self::DualDecomposition => "O(k·n) where k is block size",
229 }
230 }
231
232 pub fn recommend(n_samples: usize, has_bounds: bool, is_sparse: bool) -> Self {
234 match (n_samples, has_bounds, is_sparse) {
235 (n, _, true) if n > 1000 => Self::DualDecomposition,
236 (n, false, false) if n < 10000 => Self::PoolAdjacentViolators,
237 (n, true, false) if n < 1000 => Self::ActiveSet,
238 (n, true, false) if n >= 1000 => Self::InteriorPoint,
239 (n, false, false) if n >= 10000 => Self::DualDecomposition,
240 _ => Self::ProjectedGradient,
241 }
242 }
243}
244
245#[derive(Debug, Clone)]
247pub struct BenchmarkResults {
249 pub algorithm: OptimizationAlgorithm,
251 pub n_samples: usize,
253 pub execution_time_ms: f64,
255 pub memory_usage_bytes: usize,
257 pub objective_value: f64,
259 pub iterations: usize,
261 pub converged: bool,
263}
264
265impl BenchmarkResults {
266 pub fn new(
268 algorithm: OptimizationAlgorithm,
269 n_samples: usize,
270 execution_time_ms: f64,
271 memory_usage_bytes: usize,
272 objective_value: f64,
273 iterations: usize,
274 converged: bool,
275 ) -> Self {
276 Self {
277 algorithm,
278 n_samples,
279 execution_time_ms,
280 memory_usage_bytes,
281 objective_value,
282 iterations,
283 converged,
284 }
285 }
286
287 pub fn performance_score(&self) -> f64 {
289 let time_penalty = self.execution_time_ms;
290 let memory_penalty = (self.memory_usage_bytes as f64) / 1_000_000.0; let convergence_penalty = if self.converged { 0.0 } else { 1000.0 };
292
293 time_penalty + memory_penalty + convergence_penalty
294 }
295
296 pub fn throughput(&self) -> f64 {
298 if self.execution_time_ms > 0.0 {
299 (self.n_samples as f64) / (self.execution_time_ms / 1000.0)
300 } else {
301 f64::INFINITY
302 }
303 }
304}
305
306#[derive(Debug, Clone)]
308pub struct OptimizationConfig {
310 pub algorithm: OptimizationAlgorithm,
312 pub max_iterations: usize,
314 pub tolerance: f64,
316 pub enable_simd: bool,
318 pub block_size: Option<usize>,
320 pub regularization: f64,
322}
323
324impl Default for OptimizationConfig {
325 fn default() -> Self {
326 Self {
327 algorithm: OptimizationAlgorithm::PoolAdjacentViolators,
328 max_iterations: 1000,
329 tolerance: 1e-8,
330 enable_simd: true,
331 block_size: None,
332 regularization: 0.0,
333 }
334 }
335}
336
337impl OptimizationConfig {
338 pub fn for_problem_size(n_samples: usize) -> Self {
340 let algorithm = OptimizationAlgorithm::recommend(n_samples, false, false);
341 let block_size = if n_samples > 1000 {
342 Some((n_samples / 10).clamp(100, 1000))
343 } else {
344 None
345 };
346
347 Self {
348 algorithm,
349 max_iterations: if n_samples > 10000 { 200 } else { 1000 },
350 tolerance: if n_samples > 10000 { 1e-6 } else { 1e-8 },
351 enable_simd: true,
352 block_size,
353 regularization: 0.0,
354 }
355 }
356
357 pub fn for_bounded_problem(n_samples: usize) -> Self {
359 let algorithm = OptimizationAlgorithm::recommend(n_samples, true, false);
360
361 Self {
362 algorithm,
363 max_iterations: 1000,
364 tolerance: 1e-8,
365 enable_simd: true,
366 block_size: None,
367 regularization: 1e-12, }
369 }
370
371 pub fn for_sparse_problem(n_samples: usize, sparsity_ratio: f64) -> Self {
373 let is_very_sparse = sparsity_ratio > 0.9;
374 let algorithm = if is_very_sparse {
375 OptimizationAlgorithm::DualDecomposition
376 } else {
377 OptimizationAlgorithm::recommend(n_samples, false, true)
378 };
379
380 Self {
381 algorithm,
382 max_iterations: if is_very_sparse { 500 } else { 1000 },
383 tolerance: 1e-6,
384 enable_simd: true,
385 block_size: Some((n_samples / 20).clamp(50, 500)),
386 regularization: 0.0,
387 }
388 }
389}
390
391#[allow(non_snake_case)]
392#[cfg(test)]
393mod tests {
394 use super::*;
395 use scirs2_core::ndarray::array;
396
397 #[test]
398 fn test_optimization_algorithm_recommendation() {
399 assert_eq!(
401 OptimizationAlgorithm::recommend(100, false, false),
402 OptimizationAlgorithm::PoolAdjacentViolators
403 );
404
405 let result = OptimizationAlgorithm::recommend(500, true, false);
407 assert!(
408 result == OptimizationAlgorithm::ActiveSet
409 || result == OptimizationAlgorithm::InteriorPoint
410 );
411
412 assert_eq!(
414 OptimizationAlgorithm::recommend(50000, false, false),
415 OptimizationAlgorithm::DualDecomposition
416 );
417
418 assert_eq!(
420 OptimizationAlgorithm::recommend(10000, false, true),
421 OptimizationAlgorithm::DualDecomposition
422 );
423 }
424
425 #[test]
426 fn test_benchmark_results() {
427 let results = BenchmarkResults::new(
428 OptimizationAlgorithm::ActiveSet,
429 1000,
430 50.0,
431 1_000_000,
432 0.5,
433 25,
434 true,
435 );
436
437 assert_eq!(results.algorithm, OptimizationAlgorithm::ActiveSet);
438 assert_eq!(results.n_samples, 1000);
439 assert!(results.converged);
440
441 let score = results.performance_score();
442 assert!(score > 0.0);
443
444 let throughput = results.throughput();
445 assert!(throughput > 0.0);
446 }
447
448 #[test]
449 fn test_optimization_config_creation() {
450 let config = OptimizationConfig::for_problem_size(5000);
451 assert_eq!(
452 config.algorithm,
453 OptimizationAlgorithm::PoolAdjacentViolators
454 );
455 assert!(config.enable_simd);
456
457 let bounded_config = OptimizationConfig::for_bounded_problem(500);
458 assert_eq!(bounded_config.algorithm, OptimizationAlgorithm::ActiveSet);
459
460 let sparse_config = OptimizationConfig::for_sparse_problem(10000, 0.95);
461 assert_eq!(
462 sparse_config.algorithm,
463 OptimizationAlgorithm::DualDecomposition
464 );
465 assert!(sparse_config.block_size.is_some());
466 }
467
468 #[test]
469 fn test_algorithm_descriptions() {
470 for &algorithm in &[
471 OptimizationAlgorithm::PoolAdjacentViolators,
472 OptimizationAlgorithm::ActiveSet,
473 OptimizationAlgorithm::QuadraticProgramming,
474 OptimizationAlgorithm::InteriorPoint,
475 OptimizationAlgorithm::ProjectedGradient,
476 OptimizationAlgorithm::DualDecomposition,
477 ] {
478 assert!(!algorithm.description().is_empty());
479 assert!(!algorithm.complexity().is_empty());
480 }
481 }
482
483 #[test]
484 fn test_basic_qp_integration() {
485 let y = array![3.0, 1.0, 2.0, 4.0];
486 let result = isotonic_regression_qp(&y, None, true);
487 assert!(result.is_ok());
488
489 let solution = result.unwrap();
490 for i in 1..solution.len() {
492 assert!(solution[i] >= solution[i - 1] - 1e-10);
493 }
494 }
495
496 #[test]
497 fn test_basic_interior_point_integration() {
498 let y = array![2.0, 1.0, 3.0];
499 let result = isotonic_regression_interior_point(&y, None, true);
500 assert!(result.is_ok());
501
502 let solution = result.unwrap();
503 for i in 1..solution.len() {
505 assert!(solution[i] >= solution[i - 1] - 1e-10);
506 }
507 }
508
509 #[test]
510 fn test_basic_projected_gradient_integration() {
511 let y = array![1.0, 3.0, 2.0];
512 let result = isotonic_regression_projected_gradient(&y, None, true);
513 assert!(result.is_ok());
514
515 let solution = result.unwrap();
516 for i in 1..solution.len() {
518 assert!(solution[i] >= solution[i - 1] - 1e-10);
519 }
520 }
521
522 #[test]
523 fn test_basic_dual_decomposition_integration() {
524 let y = array![4.0, 2.0, 3.0, 1.0, 5.0];
525 let result = isotonic_regression_dual_decomposition(&y, None, true);
526 assert!(result.is_ok());
527
528 let solution = result.unwrap();
529 for i in 1..solution.len() {
531 assert!(solution[i] >= solution[i - 1] - 1e-10);
532 }
533 }
534
535 #[test]
536 fn test_basic_sparse_integration() {
537 let x = array![0.0, 1.0, 0.0, 2.0];
538 let y = array![0.0, 1.0, 0.0, 4.0];
539 let result = sparse_isotonic_regression(&x, &y, true, None);
540 assert!(result.is_ok());
541
542 let predictions = result.unwrap();
543 assert_eq!(predictions.len(), 4);
544 }
545
546 #[test]
547 fn test_basic_multidimensional_integration() {
548 let x = array![[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]];
549 let y = array![1.0, 4.0, 9.0];
550 let constraints = vec![true, true];
551
552 let result = separable_isotonic_regression(&x, &y, &constraints, None);
553 assert!(result.is_ok());
554
555 let predictions = result.unwrap();
556 assert_eq!(predictions.len(), 3);
557 }
558
559 #[test]
560 fn test_basic_additive_integration() {
561 let x = array![[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]];
562 let y = array![1.0, 4.0, 9.0];
563 let constraints = vec![true, true];
564
565 let result = additive_isotonic_regression(&x, &y, &constraints, None, None);
566 assert!(result.is_ok());
567
568 let predictions = result.unwrap();
569 assert_eq!(predictions.len(), 3);
570 }
571
572 #[test]
573 fn test_simd_operations_integration() {
574 let a = array![1.0, 2.0, 3.0, 4.0];
575 let b = array![2.0, 3.0, 4.0, 5.0];
576
577 let dot_product = simd_dot_product(&a, &b);
578 let expected = a.dot(&b);
579 assert!((dot_product - expected).abs() < 1e-10);
580
581 let norm = simd_vector_norm(&a);
582 let expected_norm = a.mapv(|x| x * x).sum().sqrt();
583 assert!((norm - expected_norm).abs() < 1e-10);
584 }
585}