single_svdlib/legacy.rs
1//! # svdlibrs
2//!
3//! A Rust port of LAS2 from SVDLIBC
4//!
5//! A library that computes an svd on a sparse matrix, typically a large sparse matrix
6//!
7//! This is a functional port (mostly a translation) of the algorithm as implemented in Doug Rohde's SVDLIBC
8//!
9//! This library performs [singular value decomposition](https://en.wikipedia.org/wiki/Singular_value_decomposition) on a sparse input [Matrix](https://docs.rs/nalgebra-sparse/latest/nalgebra_sparse/) using the [Lanczos algorithm](https://en.wikipedia.org/wiki/Lanczos_algorithm) and returns the decomposition as [ndarray](https://docs.rs/ndarray/latest/ndarray/) components.
10//!
11//! # Usage
12//!
13//! Input: [Sparse Matrix (CSR, CSC, or COO)](https://docs.rs/nalgebra-sparse/latest/nalgebra_sparse/)
14//!
15//! Output: decomposition `U`,`S`,`V` where `U`,`V` are [`Array2`](https://docs.rs/ndarray/latest/ndarray/type.Array2.html) and `S` is [`Array1`](https://docs.rs/ndarray/latest/ndarray/type.Array1.html), packaged in a [Result](https://doc.rust-lang.org/stable/core/result/enum.Result.html)\<`SvdRec`, `SvdLibError`\>
16//!
17//! # Quick Start
18//!
19//! ## There are 3 convenience methods to handle common use cases
20//! 1. `svd` -- simply computes an SVD
21//!
22//! 2. `svd_dim` -- computes an SVD supplying a desired numer of `dimensions`
23//!
24//! 3. `svd_dim_seed` -- computes an SVD supplying a desired numer of `dimensions` and a fixed `seed` to the LAS2 algorithm (the algorithm initializes with a random vector and will generate an internal seed if one isn't supplied)
25//!
26//! ```rust
27//! # extern crate ndarray;
28//! # use ndarray::prelude::*;
29//! use svdlibrs::svd;
30//! # let mut coo = nalgebra_sparse::coo::CooMatrix::<f64>::new(2, 2);
31//! # coo.push(0, 0, 1.0);
32//! # coo.push(1, 0, 3.0);
33//! # coo.push(1, 1, -5.0);
34//!
35//! # let csr = nalgebra_sparse::csr::CsrMatrix::from(&coo);
36//! // SVD on a Compressed Sparse Row matrix
37//! let svd = svd(&csr)?;
38//! # Ok::<(), svdlibrs::error::SvdLibError>(())
39//! ```
40//!
41//! ```rust
42//! # extern crate ndarray;
43//! # use ndarray::prelude::*;
44//! use svdlibrs::svd_dim;
45//! # let mut coo = nalgebra_sparse::coo::CooMatrix::<f64>::new(3, 3);
46//! # coo.push(0, 0, 1.0); coo.push(0, 1, 16.0); coo.push(0, 2, 49.0);
47//! # coo.push(1, 0, 4.0); coo.push(1, 1, 25.0); coo.push(1, 2, 64.0);
48//! # coo.push(2, 0, 9.0); coo.push(2, 1, 36.0); coo.push(2, 2, 81.0);
49//!
50//! # let csc = nalgebra_sparse::csc::CscMatrix::from(&coo);
51//! // SVD on a Compressed Sparse Column matrix specifying the desired dimensions, 3 in this example
52//! let svd = svd_dim(&csc, 3)?;
53//! # Ok::<(), svdlibrs::error::SvdLibError>(())
54//! ```
55//!
56//! ```rust
57//! # extern crate ndarray;
58//! # use ndarray::prelude::*;
59//! use svdlibrs::svd_dim_seed;
60//! # let mut coo = nalgebra_sparse::coo::CooMatrix::<f64>::new(3, 3);
61//! # coo.push(0, 0, 1.0); coo.push(0, 1, 16.0); coo.push(0, 2, 49.0);
62//! # coo.push(1, 0, 4.0); coo.push(1, 1, 25.0); coo.push(1, 2, 64.0);
63//! # coo.push(2, 0, 9.0); coo.push(2, 1, 36.0); coo.push(2, 2, 81.0);
64//! # let dimensions = 3;
65//!
66//! // SVD on a Coordinate-form matrix requesting the
67//! // dimensions and supplying a fixed seed to the LAS2 algorithm
68//! let svd = svd_dim_seed(&coo, dimensions, 12345)?;
69//! # Ok::<(), svdlibrs::error::SvdLibError>(())
70//! ```
71//!
72//! # The SVD Decomposition and informational Diagnostics are returned in `SvdRec`
73//!
74//! ```rust
75//! # extern crate ndarray;
76//! # use ndarray::prelude::*;
77//! pub struct SvdRec {
78//! pub d: usize, // Dimensionality (rank), the number of rows of both ut, vt and the length of s
79//! pub ut: Array2<f64>, // Transpose of left singular vectors, the vectors are the rows of ut
80//! pub s: Array1<f64>, // Singular values (length d)
81//! pub vt: Array2<f64>, // Transpose of right singular vectors, the vectors are the rows of vt
82//! pub diagnostics: Diagnostics, // Computational diagnostics
83//! }
84//!
85//! pub struct Diagnostics {
86//! pub non_zero: usize, // Number of non-zeros in the input matrix
87//! pub dimensions: usize, // Number of dimensions attempted (bounded by matrix shape)
88//! pub iterations: usize, // Number of iterations attempted (bounded by dimensions and matrix shape)
89//! pub transposed: bool, // True if the matrix was transposed internally
90//! pub lanczos_steps: usize, // Number of Lanczos steps performed
91//! pub ritz_values_stabilized: usize, // Number of ritz values
92//! pub significant_values: usize, // Number of significant values discovered
93//! pub singular_values: usize, // Number of singular values returned
94//! pub end_interval: [f64; 2], // Left, Right end of interval containing unwanted eigenvalues
95//! pub kappa: f64, // Relative accuracy of ritz values acceptable as eigenvalues
96//! pub random_seed: u32, // Random seed provided or the seed generated
97//! }
98//! ```
99//!
100//! # The method `svdLAS2` provides the following parameter control
101//!
102//! ```rust
103//! # extern crate ndarray;
104//! # use ndarray::prelude::*;
105//! use svdlibrs::{svd, svd_dim, svd_dim_seed, svdLAS2, SvdRec};
106//! # let mut matrix = nalgebra_sparse::coo::CooMatrix::<f64>::new(3, 3);
107//! # matrix.push(0, 0, 1.0); matrix.push(0, 1, 16.0); matrix.push(0, 2, 49.0);
108//! # matrix.push(1, 0, 4.0); matrix.push(1, 1, 25.0); matrix.push(1, 2, 64.0);
109//! # matrix.push(2, 0, 9.0); matrix.push(2, 1, 36.0); matrix.push(2, 2, 81.0);
110//! # let dimensions = 3;
111//! # let iterations = 0;
112//! # let end_interval = &[-1.0e-30, 1.0e-30];
113//! # let kappa = 1.0e-6;
114//! # let random_seed = 0;
115//!
116//! let svd: SvdRec = svdLAS2(
117//! &matrix, // sparse matrix (nalgebra_sparse::{csr,csc,coo}
118//! dimensions, // upper limit of desired number of dimensions
119//! // supplying 0 will use the input matrix shape to determine dimensions
120//! iterations, // number of algorithm iterations
121//! // supplying 0 will use the input matrix shape to determine iterations
122//! end_interval, // left, right end of interval containing unwanted eigenvalues,
123//! // typically small values centered around zero
124//! // set to [-1.0e-30, 1.0e-30] for convenience methods svd(), svd_dim(), svd_dim_seed()
125//! kappa, // relative accuracy of ritz values acceptable as eigenvalues
126//! // set to 1.0e-6 for convenience methods svd(), svd_dim(), svd_dim_seed()
127//! random_seed, // a supplied seed if > 0, otherwise an internal seed will be generated
128//! )?;
129//! # Ok::<(), svdlibrs::error::SvdLibError>(())
130//! ```
131//!
132//! # SVD Examples
133//!
134//! ### SVD using [R](https://www.r-project.org/)
135//!
136//! ```text
137//! $ Rscript -e 'options(digits=12);m<-matrix(1:9,nrow=3)^2;print(m);r<-svd(m);print(r);r$u%*%diag(r$d)%*%t(r$v)'
138//!
139//! • The input matrix: M
140//! [,1] [,2] [,3]
141//! [1,] 1 16 49
142//! [2,] 4 25 64
143//! [3,] 9 36 81
144//!
145//! • The diagonal matrix (singular values): S
146//! $d
147//! [1] 123.676578742544 6.084527896514 0.287038004183
148//!
149//! • The left singular vectors: U
150//! $u
151//! [,1] [,2] [,3]
152//! [1,] -0.415206840886 -0.753443585619 -0.509829424976
153//! [2,] -0.556377565194 -0.233080213641 0.797569820742
154//! [3,] -0.719755016815 0.614814099788 -0.322422608499
155//!
156//! • The right singular vectors: V
157//! $v
158//! [,1] [,2] [,3]
159//! [1,] -0.0737286909592 0.632351847728 -0.771164846712
160//! [2,] -0.3756889918995 0.698691000150 0.608842071210
161//! [3,] -0.9238083467338 -0.334607272761 -0.186054055373
162//!
163//! • Recreating the original input matrix: r$u %*% diag(r$d) %*% t(r$v)
164//! [,1] [,2] [,3]
165//! [1,] 1 16 49
166//! [2,] 4 25 64
167//! [3,] 9 36 81
168//! ```
169//!
170//! ### SVD using svdlibrs
171//!
172//! ```rust
173//! # extern crate ndarray;
174//! # use ndarray::prelude::*;
175//! use nalgebra_sparse::{coo::CooMatrix, csc::CscMatrix};
176//! use svdlibrs::svd_dim_seed;
177//!
178//! // create a CscMatrix from a CooMatrix
179//! // use the same matrix values as the R example above
180//! // [,1] [,2] [,3]
181//! // [1,] 1 16 49
182//! // [2,] 4 25 64
183//! // [3,] 9 36 81
184//! let mut coo = CooMatrix::<f64>::new(3, 3);
185//! coo.push(0, 0, 1.0); coo.push(0, 1, 16.0); coo.push(0, 2, 49.0);
186//! coo.push(1, 0, 4.0); coo.push(1, 1, 25.0); coo.push(1, 2, 64.0);
187//! coo.push(2, 0, 9.0); coo.push(2, 1, 36.0); coo.push(2, 2, 81.0);
188//!
189//! // our input
190//! let csc = CscMatrix::from(&coo);
191//!
192//! // compute the svd
193//! // 1. supply 0 as the dimension (requesting max)
194//! // 2. supply a fixed seed so outputs are repeatable between runs
195//! let svd = svd_dim_seed(&csc, 0, 3141).unwrap();
196//!
197//! // svd.d dimensions were found by the algorithm
198//! // svd.ut is a 2-d array holding the left vectors
199//! // svd.vt is a 2-d array holding the right vectors
200//! // svd.s is a 1-d array holding the singular values
201//! // assert the shape of all results in terms of svd.d
202//! assert_eq!(svd.d, 3);
203//! assert_eq!(svd.d, svd.ut.nrows());
204//! assert_eq!(svd.d, svd.s.dim());
205//! assert_eq!(svd.d, svd.vt.nrows());
206//!
207//! // show transposed output
208//! println!("svd.d = {}\n", svd.d);
209//! println!("U =\n{:#?}\n", svd.ut.t());
210//! println!("S =\n{:#?}\n", svd.s);
211//! println!("V =\n{:#?}\n", svd.vt.t());
212//!
213//! // Note: svd.ut & svd.vt are returned in transposed form
214//! // M = USV*
215//! let m_approx = svd.ut.t().dot(&Array2::from_diag(&svd.s)).dot(&svd.vt);
216//! assert_eq!(svd.recompose(), m_approx);
217//!
218//! // assert computed values are an acceptable approximation
219//! let epsilon = 1.0e-12;
220//! assert!((m_approx[[0, 0]] - 1.0).abs() < epsilon);
221//! assert!((m_approx[[0, 1]] - 16.0).abs() < epsilon);
222//! assert!((m_approx[[0, 2]] - 49.0).abs() < epsilon);
223//! assert!((m_approx[[1, 0]] - 4.0).abs() < epsilon);
224//! assert!((m_approx[[1, 1]] - 25.0).abs() < epsilon);
225//! assert!((m_approx[[1, 2]] - 64.0).abs() < epsilon);
226//! assert!((m_approx[[2, 0]] - 9.0).abs() < epsilon);
227//! assert!((m_approx[[2, 1]] - 36.0).abs() < epsilon);
228//! assert!((m_approx[[2, 2]] - 81.0).abs() < epsilon);
229//!
230//! assert!((svd.s[0] - 123.676578742544).abs() < epsilon);
231//! assert!((svd.s[1] - 6.084527896514).abs() < epsilon);
232//! assert!((svd.s[2] - 0.287038004183).abs() < epsilon);
233//! ```
234//!
235//! # Output
236//!
237//! ```text
238//! svd.d = 3
239//!
240//! U =
241//! [[-0.4152068408862081, -0.7534435856189199, -0.5098294249756481],
242//! [-0.556377565193878, -0.23308021364108839, 0.7975698207417085],
243//! [-0.719755016814907, 0.6148140997884891, -0.3224226084985998]], shape=[3, 3], strides=[1, 3], layout=Ff (0xa), const ndim=2
244//!
245//! S =
246//! [123.67657874254405, 6.084527896513759, 0.2870380041828973], shape=[3], strides=[1], layout=CFcf (0xf), const ndim=1
247//!
248//! V =
249//! [[-0.07372869095916511, 0.6323518477280158, -0.7711648467120451],
250//! [-0.3756889918994792, 0.6986910001499903, 0.6088420712097343],
251//! [-0.9238083467337805, -0.33460727276072516, -0.18605405537270261]], shape=[3, 3], strides=[1, 3], layout=Ff (0xa), const ndim=2
252//! ```
253//!
254//! # The full Result\<SvdRec\> for above example looks like this:
255//! ```text
256//! svd = Ok(
257//! SvdRec {
258//! d: 3,
259//! ut: [[-0.4152068408862081, -0.556377565193878, -0.719755016814907],
260//! [-0.7534435856189199, -0.23308021364108839, 0.6148140997884891],
261//! [-0.5098294249756481, 0.7975698207417085, -0.3224226084985998]], shape=[3, 3], strides=[3, 1], layout=Cc (0x5), const ndim=2,
262//! s: [123.67657874254405, 6.084527896513759, 0.2870380041828973], shape=[3], strides=[1], layout=CFcf (0xf), const ndim=1,
263//! vt: [[-0.07372869095916511, -0.3756889918994792, -0.9238083467337805],
264//! [0.6323518477280158, 0.6986910001499903, -0.33460727276072516],
265//! [-0.7711648467120451, 0.6088420712097343, -0.18605405537270261]], shape=[3, 3], strides=[3, 1], layout=Cc (0x5), const ndim=2,
266//! diagnostics: Diagnostics {
267//! non_zero: 9,
268//! dimensions: 3,
269//! iterations: 3,
270//! transposed: false,
271//! lanczos_steps: 3,
272//! ritz_values_stabilized: 3,
273//! significant_values: 3,
274//! singular_values: 3,
275//! end_interval: [
276//! -1e-30,
277//! 1e-30,
278//! ],
279//! kappa: 1e-6,
280//! random_seed: 3141,
281//! },
282//! },
283//! )
284//! ```
285
286// ==================================================================================
287// This is a functional port (mostly a translation) of "svdLAS2()" from Doug Rohde's SVDLIBC
288// It uses the same conceptual "workspace" storage as the C implementation.
289// Most of the original function & variable names have been preserved.
290// All C-style comments /* ... */ are from the original source, provided for context.
291//
292// dwf -- Wed May 5 16:48:01 MDT 2021
293// ==================================================================================
294
295/*
296SVDLIBC License
297
298The following BSD License applies to all SVDLIBC source code and documentation:
299
300Copyright © 2002, University of Tennessee Research Foundation.
301All rights reserved.
302
303Redistribution and use in source and binary forms, with or without
304modification, are permitted provided that the following conditions are met:
305
306
307 Redistributions of source code must retain the above copyright notice, this
308 list of conditions and the following disclaimer.
309
310 Redistributions in binary form must reproduce the above copyright notice,
311 this list of conditions and the following disclaimer in the documentation
312 and/or other materials provided with the distribution.
313
314 Neither the name of the University of Tennessee nor the names of its
315 contributors may be used to endorse or promote products derived from this
316 software without specific prior written permission.
317
318
319THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
320AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
321IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
322ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
323LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
324CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
325SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
326INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
327CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
328ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
329POSSIBILITY OF SUCH DAMAGE.
330*/
331
332/***********************************************************************
333 * *
334 * main() *
335 * Sparse SVD(A) via Eigensystem of A'A symmetric Matrix *
336 * (double precision) *
337 * *
338 ***********************************************************************/
339/***********************************************************************
340
341 Description
342 -----------
343
344 This sample program uses landr to compute singular triplets of A via
345 the equivalent symmetric eigenvalue problem
346
347 B x = lambda x, where x' = (u',v'), lambda = sigma**2,
348 where sigma is a singular value of A,
349
350 B = A'A , and A is m (nrow) by n (ncol) (nrow >> ncol),
351
352 so that {u,sqrt(lambda),v} is a singular triplet of A.
353 (A' = transpose of A)
354
355 User supplied routines: svd_opa, opb, store, timer
356
357 svd_opa( x,y) takes an n-vector x and returns A*x in y.
358 svd_opb(ncol,x,y) takes an n-vector x and returns B*x in y.
359
360 Based on operation flag isw, store(n,isw,j,s) stores/retrieves
361 to/from storage a vector of length n in s.
362
363 User should edit timer() with an appropriate call to an intrinsic
364 timing routine that returns elapsed user time.
365
366
367 Local parameters
368 ----------------
369
370 (input)
371 endl left end of interval containing unwanted eigenvalues of B
372 endr right end of interval containing unwanted eigenvalues of B
373 kappa relative accuracy of ritz values acceptable as eigenvalues
374 of B
375 vectors is not equal to 1
376 r work array
377 n dimension of the eigenproblem for matrix B (ncol)
378 dimensions upper limit of desired number of singular triplets of A
379 iterations upper limit of desired number of Lanczos steps
380 nnzero number of nonzeros in A
381 vectors 1 indicates both singular values and singular vectors are
382 wanted and they can be found in output file lav2;
383 0 indicates only singular values are wanted
384
385 (output)
386 ritz array of ritz values
387 bnd array of error bounds
388 d array of singular values
389 memory total memory allocated in bytes to solve the B-eigenproblem
390
391
392 Functions used
393 --------------
394
395 BLAS svd_daxpy, svd_dscal, svd_ddot
396 USER svd_opa, svd_opb, timer
397 MISC write_header, check_parameters
398 LAS2 landr
399
400
401 Precision
402 ---------
403
404 All floating-point calculations are done in double precision;
405 variables are declared as long and double.
406
407
408 LAS2 development
409 ----------------
410
411 LAS2 is a C translation of the Fortran-77 LAS2 from the SVDPACK
412 library written by Michael W. Berry, University of Tennessee,
413 Dept. of Computer Science, 107 Ayres Hall, Knoxville, TN, 37996-1301
414
415 31 Jan 1992: Date written
416
417 Theresa H. Do
418 University of Tennessee
419 Dept. of Computer Science
420 107 Ayres Hall
421 Knoxville, TN, 37996-1301
422 internet: tdo@cs.utk.edu
423
424***********************************************************************/
425
426use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng};
427use std::mem;
428extern crate ndarray;
429use ndarray::prelude::*;
430mod error;
431use error::SvdLibError;
432
433// ====================
434// Public
435// ====================
436
437/// Sparse matrix
438pub trait SMat {
439 fn nrows(&self) -> usize;
440 fn ncols(&self) -> usize;
441 fn nnz(&self) -> usize;
442 fn svd_opa(&self, x: &[f64], y: &mut [f64], transposed: bool); // y = A*x
443}
444
445/// Singular Value Decomposition Components
446///
447/// # Fields
448/// - d: Dimensionality (rank), the number of rows of both `ut`, `vt` and the length of `s`
449/// - ut: Transpose of left singular vectors, the vectors are the rows of `ut`
450/// - s: Singular values (length `d`)
451/// - vt: Transpose of right singular vectors, the vectors are the rows of `vt`
452/// - diagnostics: Computational diagnostics
453#[derive(Debug, Clone, PartialEq)]
454pub struct SvdRec {
455 pub d: usize,
456 pub ut: Array2<f64>,
457 pub s: Array1<f64>,
458 pub vt: Array2<f64>,
459 pub diagnostics: Diagnostics,
460}
461
462/// Computational Diagnostics
463///
464/// # Fields
465/// - non_zero: Number of non-zeros in the matrix
466/// - dimensions: Number of dimensions attempted (bounded by matrix shape)
467/// - iterations: Number of iterations attempted (bounded by dimensions and matrix shape)
468/// - transposed: True if the matrix was transposed internally
469/// - lanczos_steps: Number of Lanczos steps performed
470/// - ritz_values_stabilized: Number of ritz values
471/// - significant_values: Number of significant values discovered
472/// - singular_values: Number of singular values returned
473/// - end_interval: left, right end of interval containing unwanted eigenvalues
474/// - kappa: relative accuracy of ritz values acceptable as eigenvalues
475/// - random_seed: Random seed provided or the seed generated
476#[derive(Debug, Clone, PartialEq)]
477pub struct Diagnostics {
478 pub non_zero: usize,
479 pub dimensions: usize,
480 pub iterations: usize,
481 pub transposed: bool,
482 pub lanczos_steps: usize,
483 pub ritz_values_stabilized: usize,
484 pub significant_values: usize,
485 pub singular_values: usize,
486 pub end_interval: [f64; 2],
487 pub kappa: f64,
488 pub random_seed: u32,
489}
490
491#[allow(non_snake_case)]
492/// SVD at full dimensionality, calls `svdLAS2` with the highlighted defaults
493///
494/// svdLAS2(A, `0`, `0`, `&[-1.0e-30, 1.0e-30]`, `1.0e-6`, `0`)
495///
496/// # Parameters
497/// - A: Sparse matrix
498pub fn svd(A: &dyn SMat) -> Result<SvdRec, SvdLibError> {
499 svdLAS2(A, 0, 0, &[-1.0e-30, 1.0e-30], 1.0e-6, 0)
500}
501
502#[allow(non_snake_case)]
503/// SVD at desired dimensionality, calls `svdLAS2` with the highlighted defaults
504///
505/// svdLAS2(A, dimensions, `0`, `&[-1.0e-30, 1.0e-30]`, `1.0e-6`, `0`)
506///
507/// # Parameters
508/// - A: Sparse matrix
509/// - dimensions: Upper limit of desired number of dimensions, bounded by the matrix shape
510pub fn svd_dim(A: &dyn SMat, dimensions: usize) -> Result<SvdRec, SvdLibError> {
511 svdLAS2(A, dimensions, 0, &[-1.0e-30, 1.0e-30], 1.0e-6, 0)
512}
513
514#[allow(non_snake_case)]
515/// SVD at desired dimensionality with supplied seed, calls `svdLAS2` with the highlighted defaults
516///
517/// svdLAS2(A, dimensions, `0`, `&[-1.0e-30, 1.0e-30]`, `1.0e-6`, random_seed)
518///
519/// # Parameters
520/// - A: Sparse matrix
521/// - dimensions: Upper limit of desired number of dimensions, bounded by the matrix shape
522/// - random_seed: A supplied seed `if > 0`, otherwise an internal seed will be generated
523pub fn svd_dim_seed(A: &dyn SMat, dimensions: usize, random_seed: u32) -> Result<SvdRec, SvdLibError> {
524 svdLAS2(A, dimensions, 0, &[-1.0e-30, 1.0e-30], 1.0e-6, random_seed)
525}
526
527#[allow(clippy::redundant_field_names)]
528#[allow(non_snake_case)]
529/// Compute a singular value decomposition
530///
531/// # Parameters
532///
533/// - A: Sparse matrix
534/// - dimensions: Upper limit of desired number of dimensions (0 = max),
535/// where "max" is a value bounded by the matrix shape, the smaller of
536/// the matrix rows or columns. e.g. `A.nrows().min(A.ncols())`
537/// - iterations: Upper limit of desired number of lanczos steps (0 = max),
538/// where "max" is a value bounded by the matrix shape, the smaller of
539/// the matrix rows or columns. e.g. `A.nrows().min(A.ncols())`
540/// iterations must also be in range [`dimensions`, `A.nrows().min(A.ncols())`]
541/// - end_interval: Left, right end of interval containing unwanted eigenvalues,
542/// typically small values centered around zero, e.g. `[-1.0e-30, 1.0e-30]`
543/// - kappa: Relative accuracy of ritz values acceptable as eigenvalues, e.g. `1.0e-6`
544/// - random_seed: A supplied seed `if > 0`, otherwise an internal seed will be generated
545///
546/// # More on `dimensions`, `iterations` and `bounding` by the input matrix shape:
547///
548/// let `min_nrows_ncols` = `A.nrows().min(A.ncols())`; // The smaller of `rows`, `columns`
549///
550/// `dimensions` will be adjusted to `min_nrows_ncols` if `dimensions == 0` or `dimensions > min_nrows_ncols`
551///
552/// The algorithm begins with the following assertion on `dimensions`:
553///
554/// #### assert!(dimensions > 1 && dimensions <= min_nrows_ncols);
555///
556/// ---
557///
558/// `iterations` will be adjusted to `min_nrows_ncols` if `iterations == 0` or `iterations > min_nrows_ncols`
559///
560/// `iterations` will be adjusted to `dimensions` if `iterations < dimensions`
561///
562/// The algorithm begins with the following assertion on `iterations`:
563///
564/// #### assert!(iterations >= dimensions && iterations <= min_nrows_ncols);
565///
566/// # Returns
567///
568/// Ok(`SvdRec`) on successful decomposition
569pub fn svdLAS2(
570 A: &dyn SMat,
571 dimensions: usize,
572 iterations: usize,
573 end_interval: &[f64; 2],
574 kappa: f64,
575 random_seed: u32,
576) -> Result<SvdRec, SvdLibError> {
577 let random_seed = match random_seed > 0 {
578 true => random_seed,
579 false => thread_rng().gen::<_>(),
580 };
581
582 let min_nrows_ncols = A.nrows().min(A.ncols());
583
584 let dimensions = match dimensions {
585 n if n == 0 || n > min_nrows_ncols => min_nrows_ncols,
586 _ => dimensions,
587 };
588
589 let iterations = match iterations {
590 n if n == 0 || n > min_nrows_ncols => min_nrows_ncols,
591 n if n < dimensions => dimensions,
592 _ => iterations,
593 };
594
595 if dimensions < 2 {
596 return Err(SvdLibError::Las2Error(format!(
597 "svdLAS2: insufficient dimensions: {dimensions}"
598 )));
599 }
600
601 assert!(dimensions > 1 && dimensions <= min_nrows_ncols);
602 assert!(iterations >= dimensions && iterations <= min_nrows_ncols);
603
604 // If the matrix is wide, the SVD is computed on its transpose for speed
605 let transposed = A.ncols() as f64 >= (A.nrows() as f64 * 1.2);
606 let nrows = if transposed { A.ncols() } else { A.nrows() };
607 let ncols = if transposed { A.nrows() } else { A.ncols() };
608
609 let mut wrk = WorkSpace::new(nrows, ncols, transposed, iterations)?;
610 let mut store = Store::new(ncols)?;
611
612 // Actually run the lanczos thing
613 let mut neig = 0;
614 let steps = lanso(
615 A,
616 dimensions,
617 iterations,
618 end_interval,
619 &mut wrk,
620 &mut neig,
621 &mut store,
622 random_seed,
623 )?;
624
625 // Compute the singular vectors of matrix A
626 let kappa = kappa.abs().max(eps34());
627 let mut R = ritvec(A, dimensions, kappa, &mut wrk, steps, neig, &mut store)?;
628
629 // This swaps and transposes the singular matrices if A was transposed.
630 if transposed {
631 mem::swap(&mut R.Ut, &mut R.Vt);
632 }
633
634 Ok(SvdRec {
635 // Dimensionality (number of Ut,Vt rows & length of S)
636 d: R.d,
637 ut: Array::from_shape_vec((R.d, R.Ut.cols), R.Ut.value)?,
638 s: Array::from_shape_vec(R.d, R.S)?,
639 vt: Array::from_shape_vec((R.d, R.Vt.cols), R.Vt.value)?,
640 diagnostics: Diagnostics {
641 non_zero: A.nnz(),
642 dimensions: dimensions,
643 iterations: iterations,
644 transposed: transposed,
645 lanczos_steps: steps + 1,
646 ritz_values_stabilized: neig,
647 significant_values: R.d,
648 singular_values: R.nsig,
649 end_interval: *end_interval,
650 kappa: kappa,
651 random_seed: random_seed,
652 },
653 })
654}
655
656//================================================================
657// Everything below is the private implementation
658//================================================================
659
660// ====================
661// Private
662// ====================
663
664const MAXLL: usize = 2;
665
666fn eps34() -> f64 {
667 f64::EPSILON.powf(0.75) // f64::EPSILON.sqrt() * f64::EPSILON.sqrt().sqrt();
668}
669
670/***********************************************************************
671 * *
672 * store() *
673 * *
674 ***********************************************************************/
675/***********************************************************************
676
677 Description
678 -----------
679
680 store() is a user-supplied function which, based on the input
681 operation flag, stores to or retrieves from memory a vector.
682
683
684 Arguments
685 ---------
686
687 (input)
688 n length of vector to be stored or retrieved
689 isw operation flag:
690 isw = 1 request to store j-th Lanczos vector q(j)
691 isw = 2 request to retrieve j-th Lanczos vector q(j)
692 isw = 3 request to store q(j) for j = 0 or 1
693 isw = 4 request to retrieve q(j) for j = 0 or 1
694 s contains the vector to be stored for a "store" request
695
696 (output)
697 s contains the vector retrieved for a "retrieve" request
698
699 Functions used
700 --------------
701
702 BLAS svd_dcopy
703
704***********************************************************************/
705#[derive(Debug, Clone, PartialEq)]
706struct Store {
707 n: usize,
708 vecs: Vec<Vec<f64>>,
709}
710impl Store {
711 fn new(n: usize) -> Result<Self, SvdLibError> {
712 Ok(Self { n, vecs: vec![] })
713 }
714 fn storq(&mut self, idx: usize, v: &[f64]) {
715 while idx + MAXLL >= self.vecs.len() {
716 self.vecs.push(vec![0.0; self.n]);
717 }
718 //self.vecs[idx + MAXLL] = v.to_vec();
719 //self.vecs[idx + MAXLL][..self.n].clone_from_slice(&v[..self.n]);
720 self.vecs[idx + MAXLL].copy_from_slice(v);
721 }
722 fn storp(&mut self, idx: usize, v: &[f64]) {
723 while idx >= self.vecs.len() {
724 self.vecs.push(vec![0.0; self.n]);
725 }
726 //self.vecs[idx] = v.to_vec();
727 //self.vecs[idx][..self.n].clone_from_slice(&v[..self.n]);
728 self.vecs[idx].copy_from_slice(v);
729 }
730 fn retrq(&mut self, idx: usize) -> &[f64] {
731 &self.vecs[idx + MAXLL]
732 }
733 fn retrp(&mut self, idx: usize) -> &[f64] {
734 &self.vecs[idx]
735 }
736}
737
738#[derive(Debug, Clone, PartialEq)]
739struct WorkSpace {
740 nrows: usize,
741 ncols: usize,
742 transposed: bool,
743 w0: Vec<f64>, // workspace 0
744 w1: Vec<f64>, // workspace 1
745 w2: Vec<f64>, // workspace 2
746 w3: Vec<f64>, // workspace 3
747 w4: Vec<f64>, // workspace 4
748 w5: Vec<f64>, // workspace 5
749 alf: Vec<f64>, // array to hold diagonal of the tridiagonal matrix T
750 eta: Vec<f64>, // orthogonality estimate of Lanczos vectors at step j
751 oldeta: Vec<f64>, // orthogonality estimate of Lanczos vectors at step j-1
752 bet: Vec<f64>, // array to hold off-diagonal of T
753 bnd: Vec<f64>, // array to hold the error bounds
754 ritz: Vec<f64>, // array to hold the ritz values
755 temp: Vec<f64>, // array to hold the temp values
756}
757impl WorkSpace {
758 fn new(nrows: usize, ncols: usize, transposed: bool, iterations: usize) -> Result<Self, SvdLibError> {
759 Ok(Self {
760 nrows,
761 ncols,
762 transposed,
763 w0: vec![0.0; ncols],
764 w1: vec![0.0; ncols],
765 w2: vec![0.0; ncols],
766 w3: vec![0.0; ncols],
767 w4: vec![0.0; ncols],
768 w5: vec![0.0; ncols],
769 alf: vec![0.0; iterations],
770 eta: vec![0.0; iterations],
771 oldeta: vec![0.0; iterations],
772 bet: vec![0.0; 1 + iterations],
773 ritz: vec![0.0; 1 + iterations],
774 bnd: vec![f64::MAX; 1 + iterations],
775 temp: vec![0.0; nrows],
776 })
777 }
778}
779
780/* Row-major dense matrix. Rows are consecutive vectors. */
781#[derive(Debug, Clone, PartialEq)]
782struct DMat {
783 //long rows;
784 //long cols;
785 //double **value; /* Accessed by [row][col]. Free value[0] and value to free.*/
786 cols: usize,
787 value: Vec<f64>,
788}
789
790#[allow(non_snake_case)]
791#[derive(Debug, Clone, PartialEq)]
792struct SVDRawRec {
793 //int d; /* Dimensionality (rank) */
794 //DMat Ut; /* Transpose of left singular vectors. (d by m)
795 // The vectors are the rows of Ut. */
796 //double *S; /* Array of singular values. (length d) */
797 //DMat Vt; /* Transpose of right singular vectors. (d by n)
798 // The vectors are the rows of Vt. */
799 d: usize,
800 nsig: usize,
801 Ut: DMat,
802 S: Vec<f64>,
803 Vt: DMat,
804}
805
806// =================================================================
807
808// compare two floats within epsilon
809fn compare(computed: f64, expected: f64) -> bool {
810 (expected - computed).abs() < f64::EPSILON
811}
812
813/* Function sorts array1 and array2 into increasing order for array1 */
814fn insert_sort<T: PartialOrd>(n: usize, array1: &mut [T], array2: &mut [T]) {
815 for i in 1..n {
816 for j in (1..i + 1).rev() {
817 if array1[j - 1] <= array1[j] {
818 break;
819 }
820 array1.swap(j - 1, j);
821 array2.swap(j - 1, j);
822 }
823 }
824}
825
826#[allow(non_snake_case)]
827#[rustfmt::skip]
828fn svd_opb(A: &dyn SMat, x: &[f64], y: &mut [f64], temp: &mut [f64], transposed: bool) {
829 let nrows = if transposed { A.ncols() } else { A.nrows() };
830 let ncols = if transposed { A.nrows() } else { A.ncols() };
831 assert_eq!(x.len(), ncols, "svd_opb: x must be A.ncols() in length, x = {}, A.ncols = {}", x.len(), ncols);
832 assert_eq!(y.len(), ncols, "svd_opb: y must be A.ncols() in length, y = {}, A.ncols = {}", y.len(), ncols);
833 assert_eq!(temp.len(), nrows, "svd_opa: temp must be A.nrows() in length, temp = {}, A.nrows = {}", temp.len(), nrows);
834 A.svd_opa(x, temp, transposed); // temp = (A * x)
835 A.svd_opa(temp, y, !transposed); // y = A' * (A * x) = A' * temp
836}
837
838// constant times a vector plus a vector
839fn svd_daxpy(da: f64, x: &[f64], y: &mut [f64]) {
840 for (xval, yval) in x.iter().zip(y.iter_mut()) {
841 *yval += da * xval
842 }
843}
844
845// finds the index of element having max absolute value
846fn svd_idamax(n: usize, x: &[f64]) -> usize {
847 assert!(n > 0, "svd_idamax: unexpected inputs!");
848
849 match n {
850 1 => 0,
851 _ => {
852 let mut imax = 0;
853 for (i, xval) in x.iter().enumerate().take(n).skip(1) {
854 if xval.abs() > x[imax].abs() {
855 imax = i;
856 }
857 }
858 imax
859 }
860 }
861}
862
863// returns |a| if b is positive; else fsign returns -|a|
864fn svd_fsign(a: f64, b: f64) -> f64 {
865 match a >= 0.0 && b >= 0.0 || a < 0.0 && b < 0.0 {
866 true => a,
867 false => -a,
868 }
869}
870
871// finds sqrt(a^2 + b^2) without overflow or destructive underflow
872fn svd_pythag(a: f64, b: f64) -> f64 {
873 match a.abs().max(b.abs()) {
874 n if n > 0.0 => {
875 let mut p = n;
876 let mut r = (a.abs().min(b.abs()) / p).powi(2);
877 let mut t = 4.0 + r;
878 while !compare(t, 4.0) {
879 let s = r / t;
880 let u = 1.0 + 2.0 * s;
881 p *= u;
882 r *= (s / u).powi(2);
883 t = 4.0 + r;
884 }
885 p
886 }
887 _ => 0.0,
888 }
889}
890
891// dot product of two vectors
892fn svd_ddot(x: &[f64], y: &[f64]) -> f64 {
893 x.iter().zip(y).map(|(a, b)| a * b).sum()
894}
895
896// norm (length) of a vector
897fn svd_norm(x: &[f64]) -> f64 {
898 svd_ddot(x, x).sqrt()
899}
900
901// scales an input vector 'x', by a constant, storing in 'y'
902fn svd_datx(d: f64, x: &[f64], y: &mut [f64]) {
903 for (i, xval) in x.iter().enumerate() {
904 y[i] = d * xval;
905 }
906}
907
908// scales an input vector 'x' by a constant, modifying 'x'
909fn svd_dscal(d: f64, x: &mut [f64]) {
910 for elem in x.iter_mut() {
911 *elem *= d;
912 }
913}
914
915// copies a vector x to a vector y (reversed direction)
916fn svd_dcopy(n: usize, offset: usize, x: &[f64], y: &mut [f64]) {
917 if n > 0 {
918 let start = n - 1;
919 for i in 0..n {
920 y[offset + start - i] = x[offset + i];
921 }
922 }
923}
924
925/***********************************************************************
926 * *
927 * imtqlb() *
928 * *
929 ***********************************************************************/
930/***********************************************************************
931
932 Description
933 -----------
934
935 imtqlb() is a translation of a Fortran version of the Algol
936 procedure IMTQL1, Num. Math. 12, 377-383(1968) by Martin and
937 Wilkinson, as modified in Num. Math. 15, 450(1970) by Dubrulle.
938 Handbook for Auto. Comp., vol.II-Linear Algebra, 241-248(1971).
939 See also B. T. Smith et al, Eispack Guide, Lecture Notes in
940 Computer Science, Springer-Verlag, (1976).
941
942 The function finds the eigenvalues of a symmetric tridiagonal
943 matrix by the implicit QL method.
944
945
946 Arguments
947 ---------
948
949 (input)
950 n order of the symmetric tridiagonal matrix
951 d contains the diagonal elements of the input matrix
952 e contains the subdiagonal elements of the input matrix in its
953 last n-1 positions. e[0] is arbitrary
954
955 (output)
956 d contains the eigenvalues in ascending order. if an error
957 exit is made, the eigenvalues are correct and ordered for
958 indices 0,1,...ierr, but may not be the smallest eigenvalues.
959 e has been destroyed.
960***********************************************************************/
961fn imtqlb(n: usize, d: &mut [f64], e: &mut [f64], bnd: &mut [f64]) -> Result<(), SvdLibError> {
962 if n == 1 {
963 return Ok(());
964 }
965
966 bnd[0] = 1.0;
967 let last = n - 1;
968 for i in 1..=last {
969 bnd[i] = 0.0;
970 e[i - 1] = e[i];
971 }
972 e[last] = 0.0;
973
974 let mut i = 0;
975
976 for l in 0..=last {
977 let mut iteration = 0;
978 while iteration <= 30 {
979 let mut m = l;
980 while m < n {
981 if m == last {
982 break;
983 }
984 let test = d[m].abs() + d[m + 1].abs();
985 if compare(test, test + e[m].abs()) {
986 break; // convergence = true;
987 }
988 m += 1;
989 }
990 let mut p = d[l];
991 let mut f = bnd[l];
992 if m == l {
993 // order the eigenvalues
994 let mut exchange = true;
995 if l > 0 {
996 i = l;
997 while i >= 1 && exchange {
998 if p < d[i - 1] {
999 d[i] = d[i - 1];
1000 bnd[i] = bnd[i - 1];
1001 i -= 1;
1002 } else {
1003 exchange = false;
1004 }
1005 }
1006 }
1007 if exchange {
1008 i = 0;
1009 }
1010 d[i] = p;
1011 bnd[i] = f;
1012 iteration = 31;
1013 } else {
1014 if iteration == 30 {
1015 return Err(SvdLibError::ImtqlbError(
1016 "imtqlb no convergence to an eigenvalue after 30 iterations".to_string(),
1017 ));
1018 }
1019 iteration += 1;
1020 // ........ form shift ........
1021 let mut g = (d[l + 1] - p) / (2.0 * e[l]);
1022 let mut r = svd_pythag(g, 1.0);
1023 g = d[m] - p + e[l] / (g + svd_fsign(r, g));
1024 let mut s = 1.0;
1025 let mut c = 1.0;
1026 p = 0.0;
1027
1028 assert!(m > 0, "imtqlb: expected 'm' to be non-zero");
1029 i = m - 1;
1030 let mut underflow = false;
1031 while !underflow && i >= l {
1032 f = s * e[i];
1033 let b = c * e[i];
1034 r = svd_pythag(f, g);
1035 e[i + 1] = r;
1036 if compare(r, 0.0) {
1037 underflow = true;
1038 break;
1039 }
1040 s = f / r;
1041 c = g / r;
1042 g = d[i + 1] - p;
1043 r = (d[i] - g) * s + 2.0 * c * b;
1044 p = s * r;
1045 d[i + 1] = g + p;
1046 g = c * r - b;
1047 f = bnd[i + 1];
1048 bnd[i + 1] = s * bnd[i] + c * f;
1049 bnd[i] = c * bnd[i] - s * f;
1050 if i == 0 {
1051 break;
1052 }
1053 i -= 1;
1054 }
1055 // ........ recover from underflow .........
1056 if underflow {
1057 d[i + 1] -= p;
1058 } else {
1059 d[l] -= p;
1060 e[l] = g;
1061 }
1062 e[m] = 0.0;
1063 }
1064 }
1065 }
1066 Ok(())
1067}
1068
1069/***********************************************************************
1070 * *
1071 * startv() *
1072 * *
1073 ***********************************************************************/
1074/***********************************************************************
1075
1076 Description
1077 -----------
1078
1079 Function delivers a starting vector in r and returns |r|; it returns
1080 zero if the range is spanned, and ierr is non-zero if no starting
1081 vector within range of operator can be found.
1082
1083 Parameters
1084 ---------
1085
1086 (input)
1087 n dimension of the eigenproblem matrix B
1088 wptr array of pointers that point to work space
1089 j starting index for a Lanczos run
1090 eps machine epsilon (relative precision)
1091
1092 (output)
1093 wptr array of pointers that point to work space that contains
1094 r[j], q[j], q[j-1], p[j], p[j-1]
1095***********************************************************************/
1096#[allow(non_snake_case)]
1097fn startv(
1098 A: &dyn SMat,
1099 wrk: &mut WorkSpace,
1100 step: usize,
1101 store: &mut Store,
1102 random_seed: u32,
1103) -> Result<f64, SvdLibError> {
1104 // get initial vector; default is random
1105 let mut rnm2 = svd_ddot(&wrk.w0, &wrk.w0);
1106 for id in 0..3 {
1107 if id > 0 || step > 0 || compare(rnm2, 0.0) {
1108 let mut bytes = [0; 32];
1109 for (i, b) in random_seed.to_le_bytes().iter().enumerate() {
1110 bytes[i] = *b;
1111 }
1112 let mut seeded_rng = StdRng::from_seed(bytes);
1113 wrk.w0.fill_with(|| seeded_rng.gen_range(-1.0..1.0));
1114 }
1115 wrk.w3.copy_from_slice(&wrk.w0);
1116
1117 // apply operator to put r in range (essential if m singular)
1118 svd_opb(A, &wrk.w3, &mut wrk.w0, &mut wrk.temp, wrk.transposed);
1119 wrk.w3.copy_from_slice(&wrk.w0);
1120 rnm2 = svd_ddot(&wrk.w3, &wrk.w3);
1121 if rnm2 > 0.0 {
1122 break;
1123 }
1124 }
1125
1126 if rnm2 <= 0.0 {
1127 return Err(SvdLibError::StartvError(format!("rnm2 <= 0.0, rnm2 = {rnm2}")));
1128 }
1129
1130 if step > 0 {
1131 for i in 0..step {
1132 let v = store.retrq(i);
1133 svd_daxpy(-svd_ddot(&wrk.w3, v), v, &mut wrk.w0);
1134 }
1135
1136 // make sure q[step] is orthogonal to q[step-1]
1137 svd_daxpy(-svd_ddot(&wrk.w4, &wrk.w0), &wrk.w2, &mut wrk.w0);
1138 wrk.w3.copy_from_slice(&wrk.w0);
1139
1140 rnm2 = match svd_ddot(&wrk.w3, &wrk.w3) {
1141 dot if dot <= f64::EPSILON * rnm2 => 0.0,
1142 dot => dot,
1143 }
1144 }
1145 Ok(rnm2.sqrt())
1146}
1147
1148/***********************************************************************
1149 * *
1150 * stpone() *
1151 * *
1152 ***********************************************************************/
1153/***********************************************************************
1154
1155 Description
1156 -----------
1157
1158 Function performs the first step of the Lanczos algorithm. It also
1159 does a step of extended local re-orthogonalization.
1160
1161 Arguments
1162 ---------
1163
1164 (input)
1165 n dimension of the eigenproblem for matrix B
1166
1167 (output)
1168 ierr error flag
1169 wptr array of pointers that point to work space that contains
1170 wptr[0] r[j]
1171 wptr[1] q[j]
1172 wptr[2] q[j-1]
1173 wptr[3] p
1174 wptr[4] p[j-1]
1175 wptr[6] diagonal elements of matrix T
1176***********************************************************************/
1177#[allow(non_snake_case)]
1178fn stpone(A: &dyn SMat, wrk: &mut WorkSpace, store: &mut Store, random_seed: u32) -> Result<(f64, f64), SvdLibError> {
1179 // get initial vector; default is random
1180 let mut rnm = startv(A, wrk, 0, store, random_seed)?;
1181 if compare(rnm, 0.0) {
1182 return Err(SvdLibError::StponeError("rnm == 0.0".to_string()));
1183 }
1184
1185 // normalize starting vector
1186 svd_datx(rnm.recip(), &wrk.w0, &mut wrk.w1);
1187 svd_dscal(rnm.recip(), &mut wrk.w3);
1188
1189 // take the first step
1190 svd_opb(A, &wrk.w3, &mut wrk.w0, &mut wrk.temp, wrk.transposed);
1191 wrk.alf[0] = svd_ddot(&wrk.w0, &wrk.w3);
1192 svd_daxpy(-wrk.alf[0], &wrk.w1, &mut wrk.w0);
1193 let t = svd_ddot(&wrk.w0, &wrk.w3);
1194 wrk.alf[0] += t;
1195 svd_daxpy(-t, &wrk.w1, &mut wrk.w0);
1196 wrk.w4.copy_from_slice(&wrk.w0);
1197 rnm = svd_norm(&wrk.w4);
1198 let anorm = rnm + wrk.alf[0].abs();
1199 Ok((rnm, f64::EPSILON.sqrt() * anorm))
1200}
1201
1202/***********************************************************************
1203 * *
1204 * lanczos_step() *
1205 * *
1206 ***********************************************************************/
1207/***********************************************************************
1208
1209 Description
1210 -----------
1211
1212 Function embodies a single Lanczos step
1213
1214 Arguments
1215 ---------
1216
1217 (input)
1218 n dimension of the eigenproblem for matrix B
1219 first start of index through loop
1220 last end of index through loop
1221 wptr array of pointers pointing to work space
1222 alf array to hold diagonal of the tridiagonal matrix T
1223 eta orthogonality estimate of Lanczos vectors at step j
1224 oldeta orthogonality estimate of Lanczos vectors at step j-1
1225 bet array to hold off-diagonal of T
1226 ll number of intitial Lanczos vectors in local orthog.
1227 (has value of 0, 1 or 2)
1228 enough stop flag
1229***********************************************************************/
1230#[allow(non_snake_case)]
1231#[allow(clippy::too_many_arguments)]
1232fn lanczos_step(
1233 A: &dyn SMat,
1234 wrk: &mut WorkSpace,
1235 first: usize,
1236 last: usize,
1237 ll: &mut usize,
1238 enough: &mut bool,
1239 rnm: &mut f64,
1240 tol: &mut f64,
1241 store: &mut Store,
1242) -> Result<usize, SvdLibError> {
1243 let eps1 = f64::EPSILON * (wrk.ncols as f64).sqrt();
1244 let mut j = first;
1245
1246 while j < last {
1247 mem::swap(&mut wrk.w1, &mut wrk.w2);
1248 mem::swap(&mut wrk.w3, &mut wrk.w4);
1249
1250 store.storq(j - 1, &wrk.w2);
1251 if j - 1 < MAXLL {
1252 store.storp(j - 1, &wrk.w4);
1253 }
1254 wrk.bet[j] = *rnm;
1255
1256 // restart if invariant subspace is found
1257 if compare(*rnm, 0.0) {
1258 *rnm = startv(A, wrk, j, store, 0)?;
1259 if compare(*rnm, 0.0) {
1260 *enough = true;
1261 }
1262 }
1263
1264 if *enough {
1265 // added by Doug...
1266 // These lines fix a bug that occurs with low-rank matrices
1267 mem::swap(&mut wrk.w1, &mut wrk.w2);
1268 // ...added by Doug
1269 break;
1270 }
1271
1272 // take a lanczos step
1273 svd_datx(rnm.recip(), &wrk.w0, &mut wrk.w1);
1274 svd_dscal(rnm.recip(), &mut wrk.w3);
1275 svd_opb(A, &wrk.w3, &mut wrk.w0, &mut wrk.temp, wrk.transposed);
1276 svd_daxpy(-*rnm, &wrk.w2, &mut wrk.w0);
1277 wrk.alf[j] = svd_ddot(&wrk.w0, &wrk.w3);
1278 svd_daxpy(-wrk.alf[j], &wrk.w1, &mut wrk.w0);
1279
1280 // orthogonalize against initial lanczos vectors
1281 if j <= MAXLL && wrk.alf[j - 1].abs() > 4.0 * wrk.alf[j].abs() {
1282 *ll = j;
1283 }
1284 for i in 0..(j - 1).min(*ll) {
1285 let v1 = store.retrp(i);
1286 let t = svd_ddot(v1, &wrk.w0);
1287 let v2 = store.retrq(i);
1288 svd_daxpy(-t, v2, &mut wrk.w0);
1289 wrk.eta[i] = eps1;
1290 wrk.oldeta[i] = eps1;
1291 }
1292
1293 // extended local reorthogonalization
1294 let t = svd_ddot(&wrk.w0, &wrk.w4);
1295 svd_daxpy(-t, &wrk.w2, &mut wrk.w0);
1296 if wrk.bet[j] > 0.0 {
1297 wrk.bet[j] += t;
1298 }
1299 let t = svd_ddot(&wrk.w0, &wrk.w3);
1300 svd_daxpy(-t, &wrk.w1, &mut wrk.w0);
1301 wrk.alf[j] += t;
1302 wrk.w4.copy_from_slice(&wrk.w0);
1303 *rnm = svd_norm(&wrk.w4);
1304 let anorm = wrk.bet[j] + wrk.alf[j].abs() + *rnm;
1305 *tol = f64::EPSILON.sqrt() * anorm;
1306
1307 // update the orthogonality bounds
1308 ortbnd(wrk, j, *rnm, eps1);
1309
1310 // restore the orthogonality state when needed
1311 purge(wrk.ncols, *ll, wrk, j, rnm, *tol, store);
1312 if *rnm <= *tol {
1313 *rnm = 0.0;
1314 }
1315 j += 1;
1316 }
1317 Ok(j)
1318}
1319
1320/***********************************************************************
1321 * *
1322 * purge() *
1323 * *
1324 ***********************************************************************/
1325/***********************************************************************
1326
1327 Description
1328 -----------
1329
1330 Function examines the state of orthogonality between the new Lanczos
1331 vector and the previous ones to decide whether re-orthogonalization
1332 should be performed
1333
1334
1335 Arguments
1336 ---------
1337
1338 (input)
1339 n dimension of the eigenproblem for matrix B
1340 ll number of intitial Lanczos vectors in local orthog.
1341 r residual vector to become next Lanczos vector
1342 q current Lanczos vector
1343 ra previous Lanczos vector
1344 qa previous Lanczos vector
1345 wrk temporary vector to hold the previous Lanczos vector
1346 eta state of orthogonality between r and prev. Lanczos vectors
1347 oldeta state of orthogonality between q and prev. Lanczos vectors
1348 j current Lanczos step
1349
1350 (output)
1351 r residual vector orthogonalized against previous Lanczos
1352 vectors
1353 q current Lanczos vector orthogonalized against previous ones
1354***********************************************************************/
1355fn purge(n: usize, ll: usize, wrk: &mut WorkSpace, step: usize, rnm: &mut f64, tol: f64, store: &mut Store) {
1356 if step < ll + 2 {
1357 return;
1358 }
1359
1360 let reps = f64::EPSILON.sqrt();
1361 let eps1 = f64::EPSILON * (n as f64).sqrt();
1362
1363 let k = svd_idamax(step - (ll + 1), &wrk.eta) + ll;
1364 if wrk.eta[k].abs() > reps {
1365 let reps1 = eps1 / reps;
1366 let mut iteration = 0;
1367 let mut flag = true;
1368 while iteration < 2 && flag {
1369 if *rnm > tol {
1370 // bring in a lanczos vector t and orthogonalize both r and q against it
1371 let mut tq = 0.0;
1372 let mut tr = 0.0;
1373 for i in ll..step {
1374 let v = store.retrq(i);
1375 let t = svd_ddot(v, &wrk.w3);
1376 tq += t.abs();
1377 svd_daxpy(-t, v, &mut wrk.w1);
1378 let t = svd_ddot(v, &wrk.w4);
1379 tr += t.abs();
1380 svd_daxpy(-t, v, &mut wrk.w0);
1381 }
1382 wrk.w3.copy_from_slice(&wrk.w1);
1383 let t = svd_ddot(&wrk.w0, &wrk.w3);
1384 tr += t.abs();
1385 svd_daxpy(-t, &wrk.w1, &mut wrk.w0);
1386 wrk.w4.copy_from_slice(&wrk.w0);
1387 *rnm = svd_norm(&wrk.w4);
1388 if tq <= reps1 && tr <= *rnm * reps1 {
1389 flag = false;
1390 }
1391 }
1392 iteration += 1;
1393 }
1394 for i in ll..=step {
1395 wrk.eta[i] = eps1;
1396 wrk.oldeta[i] = eps1;
1397 }
1398 }
1399}
1400
1401/***********************************************************************
1402 * *
1403 * ortbnd() *
1404 * *
1405 ***********************************************************************/
1406/***********************************************************************
1407
1408 Description
1409 -----------
1410
1411 Function updates the eta recurrence
1412
1413 Arguments
1414 ---------
1415
1416 (input)
1417 alf array to hold diagonal of the tridiagonal matrix T
1418 eta orthogonality estimate of Lanczos vectors at step j
1419 oldeta orthogonality estimate of Lanczos vectors at step j-1
1420 bet array to hold off-diagonal of T
1421 n dimension of the eigenproblem for matrix B
1422 j dimension of T
1423 rnm norm of the next residual vector
1424 eps1 roundoff estimate for dot product of two unit vectors
1425
1426 (output)
1427 eta orthogonality estimate of Lanczos vectors at step j+1
1428 oldeta orthogonality estimate of Lanczos vectors at step j
1429***********************************************************************/
1430fn ortbnd(wrk: &mut WorkSpace, step: usize, rnm: f64, eps1: f64) {
1431 if step < 1 {
1432 return;
1433 }
1434 if !compare(rnm, 0.0) && step > 1 {
1435 wrk.oldeta[0] =
1436 (wrk.bet[1] * wrk.eta[1] + (wrk.alf[0] - wrk.alf[step]) * wrk.eta[0] - wrk.bet[step] * wrk.oldeta[0]) / rnm
1437 + eps1;
1438 if step > 2 {
1439 for i in 1..=step - 2 {
1440 wrk.oldeta[i] = (wrk.bet[i + 1] * wrk.eta[i + 1]
1441 + (wrk.alf[i] - wrk.alf[step]) * wrk.eta[i]
1442 + wrk.bet[i] * wrk.eta[i - 1]
1443 - wrk.bet[step] * wrk.oldeta[i])
1444 / rnm
1445 + eps1;
1446 }
1447 }
1448 }
1449 wrk.oldeta[step - 1] = eps1;
1450 mem::swap(&mut wrk.oldeta, &mut wrk.eta);
1451 wrk.eta[step] = eps1;
1452}
1453
1454/***********************************************************************
1455 * *
1456 * error_bound() *
1457 * *
1458 ***********************************************************************/
1459/***********************************************************************
1460
1461 Description
1462 -----------
1463
1464 Function massages error bounds for very close ritz values by placing
1465 a gap between them. The error bounds are then refined to reflect
1466 this.
1467
1468
1469 Arguments
1470 ---------
1471
1472 (input)
1473 endl left end of interval containing unwanted eigenvalues
1474 endr right end of interval containing unwanted eigenvalues
1475 ritz array to store the ritz values
1476 bnd array to store the error bounds
1477 enough stop flag
1478***********************************************************************/
1479fn error_bound(
1480 enough: &mut bool,
1481 endl: f64,
1482 endr: f64,
1483 ritz: &mut [f64],
1484 bnd: &mut [f64],
1485 step: usize,
1486 tol: f64,
1487) -> usize {
1488 assert!(step > 0, "error_bound: expected 'step' to be non-zero");
1489
1490 // massage error bounds for very close ritz values
1491 let mid = svd_idamax(step + 1, bnd);
1492
1493 let mut i = ((step + 1) + (step - 1)) / 2;
1494 while i > mid + 1 {
1495 if (ritz[i - 1] - ritz[i]).abs() < eps34() * ritz[i].abs() && bnd[i] > tol && bnd[i - 1] > tol {
1496 bnd[i - 1] = (bnd[i].powi(2) + bnd[i - 1].powi(2)).sqrt();
1497 bnd[i] = 0.0;
1498 }
1499 i -= 1;
1500 }
1501
1502 let mut i = ((step + 1) - (step - 1)) / 2;
1503 while i + 1 < mid {
1504 if (ritz[i + 1] - ritz[i]).abs() < eps34() * ritz[i].abs() && bnd[i] > tol && bnd[i + 1] > tol {
1505 bnd[i + 1] = (bnd[i].powi(2) + bnd[i + 1].powi(2)).sqrt();
1506 bnd[i] = 0.0;
1507 }
1508 i += 1;
1509 }
1510
1511 // refine the error bounds
1512 let mut neig = 0;
1513 let mut gapl = ritz[step] - ritz[0];
1514 for i in 0..=step {
1515 let mut gap = gapl;
1516 if i < step {
1517 gapl = ritz[i + 1] - ritz[i];
1518 }
1519 gap = gap.min(gapl);
1520 if gap > bnd[i] {
1521 bnd[i] *= bnd[i] / gap;
1522 }
1523 if bnd[i] <= 16.0 * f64::EPSILON * ritz[i].abs() {
1524 neig += 1;
1525 if !*enough {
1526 *enough = endl < ritz[i] && ritz[i] < endr;
1527 }
1528 }
1529 }
1530 neig
1531}
1532
1533/***********************************************************************
1534 * *
1535 * imtql2() *
1536 * *
1537 ***********************************************************************/
1538/***********************************************************************
1539
1540 Description
1541 -----------
1542
1543 imtql2() is a translation of a Fortran version of the Algol
1544 procedure IMTQL2, Num. Math. 12, 377-383(1968) by Martin and
1545 Wilkinson, as modified in Num. Math. 15, 450(1970) by Dubrulle.
1546 Handbook for Auto. Comp., vol.II-Linear Algebra, 241-248(1971).
1547 See also B. T. Smith et al, Eispack Guide, Lecture Notes in
1548 Computer Science, Springer-Verlag, (1976).
1549
1550 This function finds the eigenvalues and eigenvectors of a symmetric
1551 tridiagonal matrix by the implicit QL method.
1552
1553
1554 Arguments
1555 ---------
1556
1557 (input)
1558 nm row dimension of the symmetric tridiagonal matrix
1559 n order of the matrix
1560 d contains the diagonal elements of the input matrix
1561 e contains the subdiagonal elements of the input matrix in its
1562 last n-1 positions. e[0] is arbitrary
1563 z contains the identity matrix
1564
1565 (output)
1566 d contains the eigenvalues in ascending order. if an error
1567 exit is made, the eigenvalues are correct but unordered for
1568 for indices 0,1,...,ierr.
1569 e has been destroyed.
1570 z contains orthonormal eigenvectors of the symmetric
1571 tridiagonal (or full) matrix. if an error exit is made,
1572 z contains the eigenvectors associated with the stored
1573 eigenvalues.
1574***********************************************************************/
1575fn imtql2(nm: usize, n: usize, d: &mut [f64], e: &mut [f64], z: &mut [f64]) -> Result<(), SvdLibError> {
1576 if n == 1 {
1577 return Ok(());
1578 }
1579 assert!(n > 1, "imtql2: expected 'n' to be > 1");
1580
1581 let last = n - 1;
1582
1583 for i in 1..n {
1584 e[i - 1] = e[i];
1585 }
1586 e[last] = 0.0;
1587
1588 let nnm = n * nm;
1589 for l in 0..n {
1590 let mut iteration = 0;
1591
1592 // look for small sub-diagonal element
1593 while iteration <= 30 {
1594 let mut m = l;
1595 while m < n {
1596 if m == last {
1597 break;
1598 }
1599 let test = d[m].abs() + d[m + 1].abs();
1600 if compare(test, test + e[m].abs()) {
1601 break; // convergence = true;
1602 }
1603 m += 1;
1604 }
1605 if m == l {
1606 break;
1607 }
1608
1609 // error -- no convergence to an eigenvalue after 30 iterations.
1610 if iteration == 30 {
1611 return Err(SvdLibError::Imtql2Error(
1612 "imtql2 no convergence to an eigenvalue after 30 iterations".to_string(),
1613 ));
1614 }
1615 iteration += 1;
1616
1617 // form shift
1618 let mut g = (d[l + 1] - d[l]) / (2.0 * e[l]);
1619 let mut r = svd_pythag(g, 1.0);
1620 g = d[m] - d[l] + e[l] / (g + svd_fsign(r, g));
1621
1622 let mut s = 1.0;
1623 let mut c = 1.0;
1624 let mut p = 0.0;
1625
1626 assert!(m > 0, "imtql2: expected 'm' to be non-zero");
1627 let mut i = m - 1;
1628 let mut underflow = false;
1629 while !underflow && i >= l {
1630 let mut f = s * e[i];
1631 let b = c * e[i];
1632 r = svd_pythag(f, g);
1633 e[i + 1] = r;
1634 if compare(r, 0.0) {
1635 underflow = true;
1636 } else {
1637 s = f / r;
1638 c = g / r;
1639 g = d[i + 1] - p;
1640 r = (d[i] - g) * s + 2.0 * c * b;
1641 p = s * r;
1642 d[i + 1] = g + p;
1643 g = c * r - b;
1644
1645 // form vector
1646 for k in (0..nnm).step_by(n) {
1647 let index = k + i;
1648 f = z[index + 1];
1649 z[index + 1] = s * z[index] + c * f;
1650 z[index] = c * z[index] - s * f;
1651 }
1652 if i == 0 {
1653 break;
1654 }
1655 i -= 1;
1656 }
1657 } /* end while (underflow != FALSE && i >= l) */
1658 /*........ recover from underflow .........*/
1659 if underflow {
1660 d[i + 1] -= p;
1661 } else {
1662 d[l] -= p;
1663 e[l] = g;
1664 }
1665 e[m] = 0.0;
1666 }
1667 }
1668
1669 // order the eigenvalues
1670 for l in 1..n {
1671 let i = l - 1;
1672 let mut k = i;
1673 let mut p = d[i];
1674 for (j, item) in d.iter().enumerate().take(n).skip(l) {
1675 if *item < p {
1676 k = j;
1677 p = *item;
1678 }
1679 }
1680
1681 // ...and corresponding eigenvectors
1682 if k != i {
1683 d[k] = d[i];
1684 d[i] = p;
1685 for j in (0..nnm).step_by(n) {
1686 z.swap(j + i, j + k);
1687 }
1688 }
1689 }
1690
1691 Ok(())
1692}
1693
1694fn rotate_array(a: &mut [f64], x: usize) {
1695 let n = a.len();
1696 let mut j = 0;
1697 let mut start = 0;
1698 let mut t1 = a[0];
1699
1700 for _ in 0..n {
1701 j = match j >= x {
1702 true => j - x,
1703 false => j + n - x,
1704 };
1705
1706 let t2 = a[j];
1707 a[j] = t1;
1708
1709 if j == start {
1710 j += 1;
1711 start = j;
1712 t1 = a[j];
1713 } else {
1714 t1 = t2;
1715 }
1716 }
1717}
1718
1719/***********************************************************************
1720 * *
1721 * ritvec() *
1722 * Function computes the singular vectors of matrix A *
1723 * *
1724 ***********************************************************************/
1725/***********************************************************************
1726
1727 Description
1728 -----------
1729
1730 This function is invoked by landr() only if eigenvectors of the A'A
1731 eigenproblem are desired. When called, ritvec() computes the
1732 singular vectors of A and writes the result to an unformatted file.
1733
1734
1735 Parameters
1736 ----------
1737
1738 (input)
1739 nrow number of rows of A
1740 steps number of Lanczos iterations performed
1741 fp_out2 pointer to unformatted output file
1742 n dimension of matrix A
1743 kappa relative accuracy of ritz values acceptable as
1744 eigenvalues of A'A
1745 ritz array of ritz values
1746 bnd array of error bounds
1747 alf array of diagonal elements of the tridiagonal matrix T
1748 bet array of off-diagonal elements of T
1749 w1, w2 work space
1750
1751 (output)
1752 xv1 array of eigenvectors of A'A (right singular vectors of A)
1753 ierr error code
1754 0 for normal return from imtql2()
1755 k if convergence did not occur for k-th eigenvalue in
1756 imtql2()
1757 nsig number of accepted ritz values based on kappa
1758
1759 (local)
1760 s work array which is initialized to the identity matrix
1761 of order (j + 1) upon calling imtql2(). After the call,
1762 s contains the orthonormal eigenvectors of the symmetric
1763 tridiagonal matrix T
1764***********************************************************************/
1765#[allow(non_snake_case)]
1766fn ritvec(
1767 A: &dyn SMat,
1768 dimensions: usize,
1769 kappa: f64,
1770 wrk: &mut WorkSpace,
1771 steps: usize,
1772 neig: usize,
1773 store: &mut Store,
1774) -> Result<SVDRawRec, SvdLibError> {
1775 let js = steps + 1;
1776 let jsq = js * js;
1777 let mut s = vec![0.0; jsq];
1778
1779 // initialize s to an identity matrix
1780 for i in (0..jsq).step_by(js + 1) {
1781 s[i] = 1.0;
1782 }
1783
1784 let mut Vt = DMat {
1785 cols: wrk.ncols,
1786 value: vec![0.0; wrk.ncols * dimensions],
1787 };
1788
1789 svd_dcopy(js, 0, &wrk.alf, &mut Vt.value);
1790 svd_dcopy(steps, 1, &wrk.bet, &mut wrk.w5);
1791
1792 // on return from imtql2(), `R.Vt.value` contains eigenvalues in
1793 // ascending order and `s` contains the corresponding eigenvectors
1794 imtql2(js, js, &mut Vt.value, &mut wrk.w5, &mut s)?;
1795
1796 let mut nsig = 0;
1797 let mut x = 0;
1798 let mut id2 = jsq - js;
1799 for k in 0..js {
1800 if wrk.bnd[k] <= kappa * wrk.ritz[k].abs() && k + 1 > js - neig {
1801 x = match x {
1802 0 => dimensions - 1,
1803 _ => x - 1,
1804 };
1805
1806 let offset = x * Vt.cols;
1807 Vt.value[offset..offset + Vt.cols].fill(0.0);
1808 let mut idx = id2 + js;
1809 for i in 0..js {
1810 idx -= js;
1811 if s[idx] != 0.0 {
1812 for (j, item) in store.retrq(i).iter().enumerate().take(Vt.cols) {
1813 Vt.value[j + offset] += s[idx] * item;
1814 }
1815 }
1816 }
1817 nsig += 1;
1818 }
1819 id2 += 1;
1820 }
1821
1822 // Rotate the singular vectors and values.
1823 // `x` is now the location of the highest singular value.
1824 if x > 0 {
1825 rotate_array(&mut Vt.value, x * Vt.cols);
1826 }
1827
1828 // final dimension size
1829 let d = dimensions.min(nsig);
1830 let mut S = vec![0.0; d];
1831 let mut Ut = DMat {
1832 cols: wrk.nrows,
1833 value: vec![0.0; wrk.nrows * d],
1834 };
1835 Vt.value.resize(Vt.cols * d, 0.0);
1836
1837 let mut tmp_vec = vec![0.0; Vt.cols];
1838 for (i, sval) in S.iter_mut().enumerate() {
1839 let vt_offset = i * Vt.cols;
1840 let ut_offset = i * Ut.cols;
1841
1842 let vt_vec = &Vt.value[vt_offset..vt_offset + Vt.cols];
1843 let ut_vec = &mut Ut.value[ut_offset..ut_offset + Ut.cols];
1844
1845 // multiply by matrix B first
1846 svd_opb(A, vt_vec, &mut tmp_vec, &mut wrk.temp, wrk.transposed);
1847 let t = svd_ddot(vt_vec, &tmp_vec);
1848
1849 // store the Singular Value at S[i]
1850 *sval = t.sqrt();
1851
1852 svd_daxpy(-t, vt_vec, &mut tmp_vec);
1853 wrk.bnd[js] = svd_norm(&tmp_vec) * sval.recip();
1854
1855 // multiply by matrix A to get (scaled) left s-vector
1856 A.svd_opa(vt_vec, ut_vec, wrk.transposed);
1857 svd_dscal(sval.recip(), ut_vec);
1858 }
1859
1860 Ok(SVDRawRec {
1861 // Dimensionality (rank)
1862 d,
1863
1864 // Significant values
1865 nsig,
1866
1867 // DMat Ut Transpose of left singular vectors. (d by m)
1868 // The vectors are the rows of Ut.
1869 Ut,
1870
1871 // Array of singular values. (length d)
1872 S,
1873
1874 // DMat Vt Transpose of right singular vectors. (d by n)
1875 // The vectors are the rows of Vt.
1876 Vt,
1877 })
1878}
1879
1880/***********************************************************************
1881 * *
1882 * lanso() *
1883 * *
1884 ***********************************************************************/
1885/***********************************************************************
1886
1887 Description
1888 -----------
1889
1890 Function determines when the restart of the Lanczos algorithm should
1891 occur and when it should terminate.
1892
1893 Arguments
1894 ---------
1895
1896 (input)
1897 n dimension of the eigenproblem for matrix B
1898 iterations upper limit of desired number of lanczos steps
1899 dimensions upper limit of desired number of eigenpairs
1900 endl left end of interval containing unwanted eigenvalues
1901 endr right end of interval containing unwanted eigenvalues
1902 ritz array to hold the ritz values
1903 bnd array to hold the error bounds
1904 wptr array of pointers that point to work space:
1905 wptr[0]-wptr[5] six vectors of length n
1906 wptr[6] array to hold diagonal of the tridiagonal matrix T
1907 wptr[9] array to hold off-diagonal of T
1908 wptr[7] orthogonality estimate of Lanczos vectors at
1909 step j
1910 wptr[8] orthogonality estimate of Lanczos vectors at
1911 step j-1
1912 (output)
1913 j number of Lanczos steps actually taken
1914 neig number of ritz values stabilized
1915 ritz array to hold the ritz values
1916 bnd array to hold the error bounds
1917 ierr (globally declared) error flag
1918 ierr = 8192 if stpone() fails to find a starting vector
1919 ierr = k if convergence did not occur for k-th eigenvalue
1920 in imtqlb()
1921***********************************************************************/
1922#[allow(non_snake_case)]
1923#[allow(clippy::too_many_arguments)]
1924fn lanso(
1925 A: &dyn SMat,
1926 dim: usize,
1927 iterations: usize,
1928 end_interval: &[f64; 2],
1929 wrk: &mut WorkSpace,
1930 neig: &mut usize,
1931 store: &mut Store,
1932 random_seed: u32,
1933) -> Result<usize, SvdLibError> {
1934 let (endl, endr) = (end_interval[0], end_interval[1]);
1935
1936 /* take the first step */
1937 let rnm_tol = stpone(A, wrk, store, random_seed)?;
1938 let mut rnm = rnm_tol.0;
1939 let mut tol = rnm_tol.1;
1940
1941 let eps1 = f64::EPSILON * (wrk.ncols as f64).sqrt();
1942 wrk.eta[0] = eps1;
1943 wrk.oldeta[0] = eps1;
1944 let mut ll = 0;
1945 let mut first = 1;
1946 let mut last = iterations.min(dim.max(8) + dim);
1947 let mut enough = false;
1948 let mut j = 0;
1949 let mut intro = 0;
1950
1951 while !enough {
1952 if rnm <= tol {
1953 rnm = 0.0;
1954 }
1955
1956 // the actual lanczos loop
1957 let steps = lanczos_step(A, wrk, first, last, &mut ll, &mut enough, &mut rnm, &mut tol, store)?;
1958 j = match enough {
1959 true => steps - 1,
1960 false => last - 1,
1961 };
1962
1963 first = j + 1;
1964 wrk.bet[first] = rnm;
1965
1966 // analyze T
1967 let mut l = 0;
1968 for _ in 0..j {
1969 if l > j {
1970 break;
1971 }
1972
1973 let mut i = l;
1974 while i <= j {
1975 if compare(wrk.bet[i + 1], 0.0) {
1976 break;
1977 }
1978 i += 1;
1979 }
1980 i = i.min(j);
1981
1982 // now i is at the end of an unreduced submatrix
1983 let sz = i - l;
1984 svd_dcopy(sz + 1, l, &wrk.alf, &mut wrk.ritz);
1985 svd_dcopy(sz, l + 1, &wrk.bet, &mut wrk.w5);
1986
1987 imtqlb(sz + 1, &mut wrk.ritz[l..], &mut wrk.w5[l..], &mut wrk.bnd[l..])?;
1988
1989 for m in l..=i {
1990 wrk.bnd[m] = rnm * wrk.bnd[m].abs();
1991 }
1992 l = i + 1;
1993 }
1994
1995 // sort eigenvalues into increasing order
1996 insert_sort(j + 1, &mut wrk.ritz, &mut wrk.bnd);
1997
1998 *neig = error_bound(&mut enough, endl, endr, &mut wrk.ritz, &mut wrk.bnd, j, tol);
1999
2000 // should we stop?
2001 if *neig < dim {
2002 if *neig == 0 {
2003 last = first + 9;
2004 intro = first;
2005 } else {
2006 last = first + 3.max(1 + ((j - intro) * (dim - *neig)) / *neig);
2007 }
2008 last = last.min(iterations);
2009 } else {
2010 enough = true
2011 }
2012 enough = enough || first >= iterations;
2013 }
2014 store.storq(j, &wrk.w1);
2015 Ok(j)
2016}
2017
2018//////////////////////////////////////////
2019// SvdRec implementation
2020//////////////////////////////////////////
2021
2022impl SvdRec {
2023 pub fn recompose(&self) -> Array2<f64> {
2024 let sdiag = Array2::from_diag(&self.s);
2025 self.ut.t().dot(&sdiag).dot(&self.vt)
2026 }
2027}
2028
2029//////////////////////////////////////////
2030// SMat implementation for CscMatrix
2031//////////////////////////////////////////
2032
2033#[rustfmt::skip]
2034impl SMat for nalgebra_sparse::csc::CscMatrix<f64> {
2035 fn nrows(&self) -> usize { self.nrows() }
2036 fn ncols(&self) -> usize { self.ncols() }
2037 fn nnz(&self) -> usize { self.nnz() }
2038
2039 /// takes an n-vector x and returns A*x in y
2040 fn svd_opa(&self, x: &[f64], y: &mut [f64], transposed: bool) {
2041 let nrows = if transposed { self.ncols() } else { self.nrows() };
2042 let ncols = if transposed { self.nrows() } else { self.ncols() };
2043 assert_eq!(x.len(), ncols, "svd_opa: x must be A.ncols() in length, x = {}, A.ncols = {}", x.len(), ncols);
2044 assert_eq!(y.len(), nrows, "svd_opa: y must be A.nrows() in length, y = {}, A.nrows = {}", y.len(), nrows);
2045
2046 let (major_offsets, minor_indices, values) = self.csc_data();
2047
2048 y.fill(0.0);
2049 if transposed {
2050 for (i, yval) in y.iter_mut().enumerate() {
2051 for j in major_offsets[i]..major_offsets[i + 1] {
2052 *yval += values[j] * x[minor_indices[j]];
2053 }
2054 }
2055 } else {
2056 for (i, xval) in x.iter().enumerate() {
2057 for j in major_offsets[i]..major_offsets[i + 1] {
2058 y[minor_indices[j]] += values[j] * xval;
2059 }
2060 }
2061 }
2062 }
2063}
2064
2065//////////////////////////////////////////
2066// SMat implementation for CsrMatrix
2067//////////////////////////////////////////
2068
2069#[rustfmt::skip]
2070impl SMat for nalgebra_sparse::csr::CsrMatrix<f64> {
2071 fn nrows(&self) -> usize { self.nrows() }
2072 fn ncols(&self) -> usize { self.ncols() }
2073 fn nnz(&self) -> usize { self.nnz() }
2074
2075 /// takes an n-vector x and returns A*x in y
2076 fn svd_opa(&self, x: &[f64], y: &mut [f64], transposed: bool) {
2077 let nrows = if transposed { self.ncols() } else { self.nrows() };
2078 let ncols = if transposed { self.nrows() } else { self.ncols() };
2079 assert_eq!(x.len(), ncols, "svd_opa: x must be A.ncols() in length, x = {}, A.ncols = {}", x.len(), ncols);
2080 assert_eq!(y.len(), nrows, "svd_opa: y must be A.nrows() in length, y = {}, A.nrows = {}", y.len(), nrows);
2081
2082 let (major_offsets, minor_indices, values) = self.csr_data();
2083
2084 y.fill(0.0);
2085 if !transposed {
2086 for (i, yval) in y.iter_mut().enumerate() {
2087 for j in major_offsets[i]..major_offsets[i + 1] {
2088 *yval += values[j] * x[minor_indices[j]];
2089 }
2090 }
2091 } else {
2092 for (i, xval) in x.iter().enumerate() {
2093 for j in major_offsets[i]..major_offsets[i + 1] {
2094 y[minor_indices[j]] += values[j] * xval;
2095 }
2096 }
2097 }
2098 }
2099}
2100
2101//////////////////////////////////////////
2102// SMat implementation for CooMatrix
2103//////////////////////////////////////////
2104
2105#[rustfmt::skip]
2106impl SMat for nalgebra_sparse::coo::CooMatrix<f64> {
2107 fn nrows(&self) -> usize { self.nrows() }
2108 fn ncols(&self) -> usize { self.ncols() }
2109 fn nnz(&self) -> usize { self.nnz() }
2110
2111 /// takes an n-vector x and returns A*x in y
2112 fn svd_opa(&self, x: &[f64], y: &mut [f64], transposed: bool) {
2113 let nrows = if transposed { self.ncols() } else { self.nrows() };
2114 let ncols = if transposed { self.nrows() } else { self.ncols() };
2115 assert_eq!(x.len(), ncols, "svd_opa: x must be A.ncols() in length, x = {}, A.ncols = {}", x.len(), ncols);
2116 assert_eq!(y.len(), nrows, "svd_opa: y must be A.nrows() in length, y = {}, A.nrows = {}", y.len(), nrows);
2117
2118 y.fill(0.0);
2119 if transposed {
2120 for (i, j, v) in self.triplet_iter() {
2121 y[j] += v * x[i];
2122 }
2123 } else {
2124 for (i, j, v) in self.triplet_iter() {
2125 y[i] += v * x[j];
2126 }
2127 }
2128 }
2129}
2130
2131//////////////////////////////////////////
2132// Tests
2133//////////////////////////////////////////
2134
2135#[cfg(test)]
2136mod tests {
2137 use super::*;
2138 use nalgebra_sparse::{coo::CooMatrix, csc::CscMatrix, csr::CsrMatrix};
2139
2140 fn is_normal<T: Clone + Sized + Send + Sync + Unpin>() {}
2141 fn is_dynamic_trait<T: ?Sized>() {}
2142
2143 #[test]
2144 fn normal_types() {
2145 is_normal::<SvdRec>();
2146 is_normal::<Diagnostics>();
2147 is_normal::<Store>();
2148 is_normal::<WorkSpace>();
2149 is_normal::<DMat>();
2150 is_normal::<SVDRawRec>();
2151 }
2152
2153 #[test]
2154 fn dynamic_types() {
2155 is_dynamic_trait::<dyn SMat>();
2156 }
2157
2158 #[test]
2159 fn coo_csc_csr() {
2160 let coo = CooMatrix::try_from_triplets(4, 4, vec![1, 2], vec![0, 1], vec![3.0, 4.0]).unwrap();
2161 let csc = CscMatrix::from(&coo);
2162 let csr = CsrMatrix::from(&csc);
2163 assert_eq!(svd_dim_seed(&coo, 3, 12345), svd_dim_seed(&csc, 3, 12345));
2164 assert_eq!(svd_dim_seed(&csc, 3, 12345), svd_dim_seed(&csr, 3, 12345));
2165 }
2166
2167 #[test]
2168 #[rustfmt::skip]
2169 fn recomp() {
2170 let mut coo = CooMatrix::<f64>::new(3, 3);
2171 coo.push(0, 0, 1.0); coo.push(0, 1, 16.0); coo.push(0, 2, 49.0);
2172 coo.push(1, 0, 4.0); coo.push(1, 1, 25.0); coo.push(1, 2, 64.0);
2173 coo.push(2, 0, 9.0); coo.push(2, 1, 36.0); coo.push(2, 2, 81.0);
2174
2175 // Note: svd.ut & svd.vt are returned in transposed form
2176 // M = USV*
2177 let svd = svd(&coo).unwrap();
2178 let matrix_approximation = svd.ut.t().dot(&Array2::from_diag(&svd.s)).dot(&svd.vt);
2179 assert_eq!(svd.recompose(), matrix_approximation);
2180 }
2181
2182 #[test]
2183 #[rustfmt::skip]
2184 fn basic_2x2() {
2185 // [
2186 // [ 4, 0 ],
2187 // [ 3, -5 ]
2188 // ]
2189 let mut coo = CooMatrix::<f64>::new(2, 2);
2190 coo.push(0, 0, 4.0);
2191 coo.push(1, 0, 3.0);
2192 coo.push(1, 1, -5.0);
2193
2194 let svd = svd(&coo).unwrap();
2195 assert_eq!(svd.d, svd.ut.nrows());
2196 assert_eq!(svd.d, svd.s.dim());
2197 assert_eq!(svd.d, svd.vt.nrows());
2198
2199 // Note: svd.ut & svd.vt are returned in transposed form
2200 // M = USV*
2201 let matrix_approximation = svd.ut.t().dot(&Array2::from_diag(&svd.s)).dot(&svd.vt);
2202 assert_eq!(svd.recompose(), matrix_approximation);
2203
2204 let epsilon = 1.0e-12;
2205 assert_eq!(svd.d, 2);
2206 assert!((matrix_approximation[[0, 0]] - 4.0).abs() < epsilon);
2207 assert!((matrix_approximation[[0, 1]] - 0.0).abs() < epsilon);
2208 assert!((matrix_approximation[[1, 0]] - 3.0).abs() < epsilon);
2209 assert!((matrix_approximation[[1, 1]] - -5.0).abs() < epsilon);
2210
2211 assert!((svd.s[0] - 6.3245553203368).abs() < epsilon);
2212 assert!((svd.s[1] - 3.1622776601684).abs() < epsilon);
2213 }
2214
2215 #[test]
2216 #[rustfmt::skip]
2217 fn identity_3x3() {
2218 // [ [ 1, 0, 0 ],
2219 // [ 0, 1, 0 ],
2220 // [ 0, 0, 1 ] ]
2221 let mut coo = CooMatrix::<f64>::new(3, 3);
2222 coo.push(0, 0, 1.0);
2223 coo.push(1, 1, 1.0);
2224 coo.push(2, 2, 1.0);
2225
2226 let csc = CscMatrix::from(&coo);
2227 let svd = svd(&csc).unwrap();
2228 assert_eq!(svd.d, svd.ut.nrows());
2229 assert_eq!(svd.d, svd.s.dim());
2230 assert_eq!(svd.d, svd.vt.nrows());
2231
2232 let epsilon = 1.0e-12;
2233 assert_eq!(svd.d, 1);
2234 assert!((svd.s[0] - 1.0).abs() < epsilon);
2235 }
2236}