svdlibrs/lib.rs
1//! # svdlibrs
2//!
3//! A Rust port of LAS2 from SVDLIBC
4//!
5//! A library that computes an svd on a sparse matrix, typically a large sparse matrix
6//!
7//! This is a functional port (mostly a translation) of the algorithm as implemented in Doug Rohde's SVDLIBC
8//!
9//! This library performs [singular value decomposition](https://en.wikipedia.org/wiki/Singular_value_decomposition) on a sparse input [Matrix](https://docs.rs/nalgebra-sparse/latest/nalgebra_sparse/) using the [Lanczos algorithm](https://en.wikipedia.org/wiki/Lanczos_algorithm) and returns the decomposition as [ndarray](https://docs.rs/ndarray/latest/ndarray/) components.
10//!
11//! # Usage
12//!
13//! Input: [Sparse Matrix (CSR, CSC, or COO)](https://docs.rs/nalgebra-sparse/latest/nalgebra_sparse/)
14//!
15//! Output: decomposition `U`,`S`,`V` where `U`,`V` are [`Array2`](https://docs.rs/ndarray/latest/ndarray/type.Array2.html) and `S` is [`Array1`](https://docs.rs/ndarray/latest/ndarray/type.Array1.html), packaged in a [Result](https://doc.rust-lang.org/stable/core/result/enum.Result.html)\<`SvdRec`, `SvdLibError`\>
16//!
17//! # Quick Start
18//!
19//! ## There are 3 convenience methods to handle common use cases
20//! 1. `svd` -- simply computes an SVD
21//!
22//! 2. `svd_dim` -- computes an SVD supplying a desired numer of `dimensions`
23//!
24//! 3. `svd_dim_seed` -- computes an SVD supplying a desired numer of `dimensions` and a fixed `seed` to the LAS2 algorithm (the algorithm initializes with a random vector and will generate an internal seed if one isn't supplied)
25//!
26//! ```rust
27//! # extern crate ndarray;
28//! # use ndarray::prelude::*;
29//! use svdlibrs::svd;
30//! # let mut coo = nalgebra_sparse::coo::CooMatrix::<f64>::new(2, 2);
31//! # coo.push(0, 0, 1.0);
32//! # coo.push(1, 0, 3.0);
33//! # coo.push(1, 1, -5.0);
34//!
35//! # let csr = nalgebra_sparse::csr::CsrMatrix::from(&coo);
36//! // SVD on a Compressed Sparse Row matrix
37//! let svd = svd(&csr)?;
38//! # Ok::<(), svdlibrs::error::SvdLibError>(())
39//! ```
40//!
41//! ```rust
42//! # extern crate ndarray;
43//! # use ndarray::prelude::*;
44//! use svdlibrs::svd_dim;
45//! # let mut coo = nalgebra_sparse::coo::CooMatrix::<f64>::new(3, 3);
46//! # coo.push(0, 0, 1.0); coo.push(0, 1, 16.0); coo.push(0, 2, 49.0);
47//! # coo.push(1, 0, 4.0); coo.push(1, 1, 25.0); coo.push(1, 2, 64.0);
48//! # coo.push(2, 0, 9.0); coo.push(2, 1, 36.0); coo.push(2, 2, 81.0);
49//!
50//! # let csc = nalgebra_sparse::csc::CscMatrix::from(&coo);
51//! // SVD on a Compressed Sparse Column matrix specifying the desired dimensions, 3 in this example
52//! let svd = svd_dim(&csc, 3)?;
53//! # Ok::<(), svdlibrs::error::SvdLibError>(())
54//! ```
55//!
56//! ```rust
57//! # extern crate ndarray;
58//! # use ndarray::prelude::*;
59//! use svdlibrs::svd_dim_seed;
60//! # let mut coo = nalgebra_sparse::coo::CooMatrix::<f64>::new(3, 3);
61//! # coo.push(0, 0, 1.0); coo.push(0, 1, 16.0); coo.push(0, 2, 49.0);
62//! # coo.push(1, 0, 4.0); coo.push(1, 1, 25.0); coo.push(1, 2, 64.0);
63//! # coo.push(2, 0, 9.0); coo.push(2, 1, 36.0); coo.push(2, 2, 81.0);
64//! # let dimensions = 3;
65//!
66//! // SVD on a Coordinate-form matrix requesting the
67//! // dimensions and supplying a fixed seed to the LAS2 algorithm
68//! let svd = svd_dim_seed(&coo, dimensions, 12345)?;
69//! # Ok::<(), svdlibrs::error::SvdLibError>(())
70//! ```
71//!
72//! # The SVD Decomposition and informational Diagnostics are returned in `SvdRec`
73//!
74//! ```rust
75//! # extern crate ndarray;
76//! # use ndarray::prelude::*;
77//! pub struct SvdRec {
78//! pub d: usize, // Dimensionality (rank), the number of rows of both ut, vt and the length of s
79//! pub ut: Array2<f64>, // Transpose of left singular vectors, the vectors are the rows of ut
80//! pub s: Array1<f64>, // Singular values (length d)
81//! pub vt: Array2<f64>, // Transpose of right singular vectors, the vectors are the rows of vt
82//! pub diagnostics: Diagnostics, // Computational diagnostics
83//! }
84//!
85//! pub struct Diagnostics {
86//! pub non_zero: usize, // Number of non-zeros in the input matrix
87//! pub dimensions: usize, // Number of dimensions attempted (bounded by matrix shape)
88//! pub iterations: usize, // Number of iterations attempted (bounded by dimensions and matrix shape)
89//! pub transposed: bool, // True if the matrix was transposed internally
90//! pub lanczos_steps: usize, // Number of Lanczos steps performed
91//! pub ritz_values_stabilized: usize, // Number of ritz values
92//! pub significant_values: usize, // Number of significant values discovered
93//! pub singular_values: usize, // Number of singular values returned
94//! pub end_interval: [f64; 2], // Left, Right end of interval containing unwanted eigenvalues
95//! pub kappa: f64, // Relative accuracy of ritz values acceptable as eigenvalues
96//! pub random_seed: u32, // Random seed provided or the seed generated
97//! }
98//! ```
99//!
100//! # The method `svdLAS2` provides the following parameter control
101//!
102//! ```rust
103//! # extern crate ndarray;
104//! # use ndarray::prelude::*;
105//! use svdlibrs::{svd, svd_dim, svd_dim_seed, svdLAS2, SvdRec};
106//! # let mut matrix = nalgebra_sparse::coo::CooMatrix::<f64>::new(3, 3);
107//! # matrix.push(0, 0, 1.0); matrix.push(0, 1, 16.0); matrix.push(0, 2, 49.0);
108//! # matrix.push(1, 0, 4.0); matrix.push(1, 1, 25.0); matrix.push(1, 2, 64.0);
109//! # matrix.push(2, 0, 9.0); matrix.push(2, 1, 36.0); matrix.push(2, 2, 81.0);
110//! # let dimensions = 3;
111//! # let iterations = 0;
112//! # let end_interval = &[-1.0e-30, 1.0e-30];
113//! # let kappa = 1.0e-6;
114//! # let random_seed = 0;
115//!
116//! let svd: SvdRec = svdLAS2(
117//! &matrix, // sparse matrix (nalgebra_sparse::{csr,csc,coo}
118//! dimensions, // upper limit of desired number of dimensions
119//! // supplying 0 will use the input matrix shape to determine dimensions
120//! iterations, // number of algorithm iterations
121//! // supplying 0 will use the input matrix shape to determine iterations
122//! end_interval, // left, right end of interval containing unwanted eigenvalues,
123//! // typically small values centered around zero
124//! // set to [-1.0e-30, 1.0e-30] for convenience methods svd(), svd_dim(), svd_dim_seed()
125//! kappa, // relative accuracy of ritz values acceptable as eigenvalues
126//! // set to 1.0e-6 for convenience methods svd(), svd_dim(), svd_dim_seed()
127//! random_seed, // a supplied seed if > 0, otherwise an internal seed will be generated
128//! )?;
129//! # Ok::<(), svdlibrs::error::SvdLibError>(())
130//! ```
131//!
132//! # SVD Examples
133//!
134//! ### SVD using [R](https://www.r-project.org/)
135//!
136//! ```text
137//! $ Rscript -e 'options(digits=12);m<-matrix(1:9,nrow=3)^2;print(m);r<-svd(m);print(r);r$u%*%diag(r$d)%*%t(r$v)'
138//!
139//! • The input matrix: M
140//! [,1] [,2] [,3]
141//! [1,] 1 16 49
142//! [2,] 4 25 64
143//! [3,] 9 36 81
144//!
145//! • The diagonal matrix (singular values): S
146//! $d
147//! [1] 123.676578742544 6.084527896514 0.287038004183
148//!
149//! • The left singular vectors: U
150//! $u
151//! [,1] [,2] [,3]
152//! [1,] -0.415206840886 -0.753443585619 -0.509829424976
153//! [2,] -0.556377565194 -0.233080213641 0.797569820742
154//! [3,] -0.719755016815 0.614814099788 -0.322422608499
155//!
156//! • The right singular vectors: V
157//! $v
158//! [,1] [,2] [,3]
159//! [1,] -0.0737286909592 0.632351847728 -0.771164846712
160//! [2,] -0.3756889918995 0.698691000150 0.608842071210
161//! [3,] -0.9238083467338 -0.334607272761 -0.186054055373
162//!
163//! • Recreating the original input matrix: r$u %*% diag(r$d) %*% t(r$v)
164//! [,1] [,2] [,3]
165//! [1,] 1 16 49
166//! [2,] 4 25 64
167//! [3,] 9 36 81
168//! ```
169//!
170//! ### SVD using svdlibrs
171//!
172//! ```rust
173//! # extern crate ndarray;
174//! # use ndarray::prelude::*;
175//! use nalgebra_sparse::{coo::CooMatrix, csc::CscMatrix};
176//! use svdlibrs::svd_dim_seed;
177//!
178//! // create a CscMatrix from a CooMatrix
179//! // use the same matrix values as the R example above
180//! // [,1] [,2] [,3]
181//! // [1,] 1 16 49
182//! // [2,] 4 25 64
183//! // [3,] 9 36 81
184//! let mut coo = CooMatrix::<f64>::new(3, 3);
185//! coo.push(0, 0, 1.0); coo.push(0, 1, 16.0); coo.push(0, 2, 49.0);
186//! coo.push(1, 0, 4.0); coo.push(1, 1, 25.0); coo.push(1, 2, 64.0);
187//! coo.push(2, 0, 9.0); coo.push(2, 1, 36.0); coo.push(2, 2, 81.0);
188//!
189//! // our input
190//! let csc = CscMatrix::from(&coo);
191//!
192//! // compute the svd
193//! // 1. supply 0 as the dimension (requesting max)
194//! // 2. supply a fixed seed so outputs are repeatable between runs
195//! let svd = svd_dim_seed(&csc, 0, 3141).unwrap();
196//!
197//! // svd.d dimensions were found by the algorithm
198//! // svd.ut is a 2-d array holding the left vectors
199//! // svd.vt is a 2-d array holding the right vectors
200//! // svd.s is a 1-d array holding the singular values
201//! // assert the shape of all results in terms of svd.d
202//! assert_eq!(svd.d, 3);
203//! assert_eq!(svd.d, svd.ut.nrows());
204//! assert_eq!(svd.d, svd.s.dim());
205//! assert_eq!(svd.d, svd.vt.nrows());
206//!
207//! // show transposed output
208//! println!("svd.d = {}\n", svd.d);
209//! println!("U =\n{:#?}\n", svd.ut.t());
210//! println!("S =\n{:#?}\n", svd.s);
211//! println!("V =\n{:#?}\n", svd.vt.t());
212//!
213//! // Note: svd.ut & svd.vt are returned in transposed form
214//! // M = USV*
215//! let m_approx = svd.ut.t().dot(&Array2::from_diag(&svd.s)).dot(&svd.vt);
216//! assert_eq!(svd.recompose(), m_approx);
217//!
218//! // assert computed values are an acceptable approximation
219//! let epsilon = 1.0e-12;
220//! assert!((m_approx[[0, 0]] - 1.0).abs() < epsilon);
221//! assert!((m_approx[[0, 1]] - 16.0).abs() < epsilon);
222//! assert!((m_approx[[0, 2]] - 49.0).abs() < epsilon);
223//! assert!((m_approx[[1, 0]] - 4.0).abs() < epsilon);
224//! assert!((m_approx[[1, 1]] - 25.0).abs() < epsilon);
225//! assert!((m_approx[[1, 2]] - 64.0).abs() < epsilon);
226//! assert!((m_approx[[2, 0]] - 9.0).abs() < epsilon);
227//! assert!((m_approx[[2, 1]] - 36.0).abs() < epsilon);
228//! assert!((m_approx[[2, 2]] - 81.0).abs() < epsilon);
229//!
230//! assert!((svd.s[0] - 123.676578742544).abs() < epsilon);
231//! assert!((svd.s[1] - 6.084527896514).abs() < epsilon);
232//! assert!((svd.s[2] - 0.287038004183).abs() < epsilon);
233//! ```
234//!
235//! # Output
236//!
237//! ```text
238//! svd.d = 3
239//!
240//! U =
241//! [[-0.4152068408862081, -0.7534435856189199, -0.5098294249756481],
242//! [-0.556377565193878, -0.23308021364108839, 0.7975698207417085],
243//! [-0.719755016814907, 0.6148140997884891, -0.3224226084985998]], shape=[3, 3], strides=[1, 3], layout=Ff (0xa), const ndim=2
244//!
245//! S =
246//! [123.67657874254405, 6.084527896513759, 0.2870380041828973], shape=[3], strides=[1], layout=CFcf (0xf), const ndim=1
247//!
248//! V =
249//! [[-0.07372869095916511, 0.6323518477280158, -0.7711648467120451],
250//! [-0.3756889918994792, 0.6986910001499903, 0.6088420712097343],
251//! [-0.9238083467337805, -0.33460727276072516, -0.18605405537270261]], shape=[3, 3], strides=[1, 3], layout=Ff (0xa), const ndim=2
252//! ```
253//!
254//! # The full Result\<SvdRec\> for above example looks like this:
255//! ```text
256//! svd = Ok(
257//! SvdRec {
258//! d: 3,
259//! ut: [[-0.4152068408862081, -0.556377565193878, -0.719755016814907],
260//! [-0.7534435856189199, -0.23308021364108839, 0.6148140997884891],
261//! [-0.5098294249756481, 0.7975698207417085, -0.3224226084985998]], shape=[3, 3], strides=[3, 1], layout=Cc (0x5), const ndim=2,
262//! s: [123.67657874254405, 6.084527896513759, 0.2870380041828973], shape=[3], strides=[1], layout=CFcf (0xf), const ndim=1,
263//! vt: [[-0.07372869095916511, -0.3756889918994792, -0.9238083467337805],
264//! [0.6323518477280158, 0.6986910001499903, -0.33460727276072516],
265//! [-0.7711648467120451, 0.6088420712097343, -0.18605405537270261]], shape=[3, 3], strides=[3, 1], layout=Cc (0x5), const ndim=2,
266//! diagnostics: Diagnostics {
267//! non_zero: 9,
268//! dimensions: 3,
269//! iterations: 3,
270//! transposed: false,
271//! lanczos_steps: 3,
272//! ritz_values_stabilized: 3,
273//! significant_values: 3,
274//! singular_values: 3,
275//! end_interval: [
276//! -1e-30,
277//! 1e-30,
278//! ],
279//! kappa: 1e-6,
280//! random_seed: 3141,
281//! },
282//! },
283//! )
284//! ```
285
286// ==================================================================================
287// This is a functional port (mostly a translation) of "svdLAS2()" from Doug Rohde's SVDLIBC
288// It uses the same conceptual "workspace" storage as the C implementation.
289// Most of the original function & variable names have been preserved.
290// All C-style comments /* ... */ are from the original source, provided for context.
291//
292// dwf -- Wed May 5 16:48:01 MDT 2021
293// ==================================================================================
294
295/*
296SVDLIBC License
297
298The following BSD License applies to all SVDLIBC source code and documentation:
299
300Copyright © 2002, University of Tennessee Research Foundation.
301All rights reserved.
302
303Redistribution and use in source and binary forms, with or without
304modification, are permitted provided that the following conditions are met:
305
306
307 Redistributions of source code must retain the above copyright notice, this
308 list of conditions and the following disclaimer.
309
310 Redistributions in binary form must reproduce the above copyright notice,
311 this list of conditions and the following disclaimer in the documentation
312 and/or other materials provided with the distribution.
313
314 Neither the name of the University of Tennessee nor the names of its
315 contributors may be used to endorse or promote products derived from this
316 software without specific prior written permission.
317
318
319THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
320AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
321IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
322ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
323LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
324CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
325SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
326INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
327CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
328ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
329POSSIBILITY OF SUCH DAMAGE.
330*/
331
332/***********************************************************************
333 * *
334 * main() *
335 * Sparse SVD(A) via Eigensystem of A'A symmetric Matrix *
336 * (double precision) *
337 * *
338 ***********************************************************************/
339/***********************************************************************
340
341 Description
342 -----------
343
344 This sample program uses landr to compute singular triplets of A via
345 the equivalent symmetric eigenvalue problem
346
347 B x = lambda x, where x' = (u',v'), lambda = sigma**2,
348 where sigma is a singular value of A,
349
350 B = A'A , and A is m (nrow) by n (ncol) (nrow >> ncol),
351
352 so that {u,sqrt(lambda),v} is a singular triplet of A.
353 (A' = transpose of A)
354
355 User supplied routines: svd_opa, opb, store, timer
356
357 svd_opa( x,y) takes an n-vector x and returns A*x in y.
358 svd_opb(ncol,x,y) takes an n-vector x and returns B*x in y.
359
360 Based on operation flag isw, store(n,isw,j,s) stores/retrieves
361 to/from storage a vector of length n in s.
362
363 User should edit timer() with an appropriate call to an intrinsic
364 timing routine that returns elapsed user time.
365
366
367 Local parameters
368 ----------------
369
370 (input)
371 endl left end of interval containing unwanted eigenvalues of B
372 endr right end of interval containing unwanted eigenvalues of B
373 kappa relative accuracy of ritz values acceptable as eigenvalues
374 of B
375 vectors is not equal to 1
376 r work array
377 n dimension of the eigenproblem for matrix B (ncol)
378 dimensions upper limit of desired number of singular triplets of A
379 iterations upper limit of desired number of Lanczos steps
380 nnzero number of nonzeros in A
381 vectors 1 indicates both singular values and singular vectors are
382 wanted and they can be found in output file lav2;
383 0 indicates only singular values are wanted
384
385 (output)
386 ritz array of ritz values
387 bnd array of error bounds
388 d array of singular values
389 memory total memory allocated in bytes to solve the B-eigenproblem
390
391
392 Functions used
393 --------------
394
395 BLAS svd_daxpy, svd_dscal, svd_ddot
396 USER svd_opa, svd_opb, timer
397 MISC write_header, check_parameters
398 LAS2 landr
399
400
401 Precision
402 ---------
403
404 All floating-point calculations are done in double precision;
405 variables are declared as long and double.
406
407
408 LAS2 development
409 ----------------
410
411 LAS2 is a C translation of the Fortran-77 LAS2 from the SVDPACK
412 library written by Michael W. Berry, University of Tennessee,
413 Dept. of Computer Science, 107 Ayres Hall, Knoxville, TN, 37996-1301
414
415 31 Jan 1992: Date written
416
417 Theresa H. Do
418 University of Tennessee
419 Dept. of Computer Science
420 107 Ayres Hall
421 Knoxville, TN, 37996-1301
422 internet: tdo@cs.utk.edu
423
424***********************************************************************/
425
426use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng};
427use std::mem;
428extern crate ndarray;
429use ndarray::prelude::*;
430
431pub mod error;
432use error::SvdLibError;
433
434// ====================
435// Public
436// ====================
437
438/// Sparse matrix
439pub trait SMat {
440 fn nrows(&self) -> usize;
441 fn ncols(&self) -> usize;
442 fn nnz(&self) -> usize;
443 fn svd_opa(&self, x: &[f64], y: &mut [f64], transposed: bool); // y = A*x
444}
445
446/// Singular Value Decomposition Components
447///
448/// # Fields
449/// - d: Dimensionality (rank), the number of rows of both `ut`, `vt` and the length of `s`
450/// - ut: Transpose of left singular vectors, the vectors are the rows of `ut`
451/// - s: Singular values (length `d`)
452/// - vt: Transpose of right singular vectors, the vectors are the rows of `vt`
453/// - diagnostics: Computational diagnostics
454#[derive(Debug, Clone, PartialEq)]
455pub struct SvdRec {
456 pub d: usize,
457 pub ut: Array2<f64>,
458 pub s: Array1<f64>,
459 pub vt: Array2<f64>,
460 pub diagnostics: Diagnostics,
461}
462
463/// Computational Diagnostics
464///
465/// # Fields
466/// - non_zero: Number of non-zeros in the matrix
467/// - dimensions: Number of dimensions attempted (bounded by matrix shape)
468/// - iterations: Number of iterations attempted (bounded by dimensions and matrix shape)
469/// - transposed: True if the matrix was transposed internally
470/// - lanczos_steps: Number of Lanczos steps performed
471/// - ritz_values_stabilized: Number of ritz values
472/// - significant_values: Number of significant values discovered
473/// - singular_values: Number of singular values returned
474/// - end_interval: left, right end of interval containing unwanted eigenvalues
475/// - kappa: relative accuracy of ritz values acceptable as eigenvalues
476/// - random_seed: Random seed provided or the seed generated
477#[derive(Debug, Clone, PartialEq)]
478pub struct Diagnostics {
479 pub non_zero: usize,
480 pub dimensions: usize,
481 pub iterations: usize,
482 pub transposed: bool,
483 pub lanczos_steps: usize,
484 pub ritz_values_stabilized: usize,
485 pub significant_values: usize,
486 pub singular_values: usize,
487 pub end_interval: [f64; 2],
488 pub kappa: f64,
489 pub random_seed: u32,
490}
491
492#[allow(non_snake_case)]
493/// SVD at full dimensionality, calls `svdLAS2` with the highlighted defaults
494///
495/// svdLAS2(A, `0`, `0`, `&[-1.0e-30, 1.0e-30]`, `1.0e-6`, `0`)
496///
497/// # Parameters
498/// - A: Sparse matrix
499pub fn svd(A: &dyn SMat) -> Result<SvdRec, SvdLibError> {
500 svdLAS2(A, 0, 0, &[-1.0e-30, 1.0e-30], 1.0e-6, 0)
501}
502
503#[allow(non_snake_case)]
504/// SVD at desired dimensionality, calls `svdLAS2` with the highlighted defaults
505///
506/// svdLAS2(A, dimensions, `0`, `&[-1.0e-30, 1.0e-30]`, `1.0e-6`, `0`)
507///
508/// # Parameters
509/// - A: Sparse matrix
510/// - dimensions: Upper limit of desired number of dimensions, bounded by the matrix shape
511pub fn svd_dim(A: &dyn SMat, dimensions: usize) -> Result<SvdRec, SvdLibError> {
512 svdLAS2(A, dimensions, 0, &[-1.0e-30, 1.0e-30], 1.0e-6, 0)
513}
514
515#[allow(non_snake_case)]
516/// SVD at desired dimensionality with supplied seed, calls `svdLAS2` with the highlighted defaults
517///
518/// svdLAS2(A, dimensions, `0`, `&[-1.0e-30, 1.0e-30]`, `1.0e-6`, random_seed)
519///
520/// # Parameters
521/// - A: Sparse matrix
522/// - dimensions: Upper limit of desired number of dimensions, bounded by the matrix shape
523/// - random_seed: A supplied seed `if > 0`, otherwise an internal seed will be generated
524pub fn svd_dim_seed(A: &dyn SMat, dimensions: usize, random_seed: u32) -> Result<SvdRec, SvdLibError> {
525 svdLAS2(A, dimensions, 0, &[-1.0e-30, 1.0e-30], 1.0e-6, random_seed)
526}
527
528#[allow(clippy::redundant_field_names)]
529#[allow(non_snake_case)]
530/// Compute a singular value decomposition
531///
532/// # Parameters
533///
534/// - A: Sparse matrix
535/// - dimensions: Upper limit of desired number of dimensions (0 = max),
536/// where "max" is a value bounded by the matrix shape, the smaller of
537/// the matrix rows or columns. e.g. `A.nrows().min(A.ncols())`
538/// - iterations: Upper limit of desired number of lanczos steps (0 = max),
539/// where "max" is a value bounded by the matrix shape, the smaller of
540/// the matrix rows or columns. e.g. `A.nrows().min(A.ncols())`
541/// iterations must also be in range [`dimensions`, `A.nrows().min(A.ncols())`]
542/// - end_interval: Left, right end of interval containing unwanted eigenvalues,
543/// typically small values centered around zero, e.g. `[-1.0e-30, 1.0e-30]`
544/// - kappa: Relative accuracy of ritz values acceptable as eigenvalues, e.g. `1.0e-6`
545/// - random_seed: A supplied seed `if > 0`, otherwise an internal seed will be generated
546///
547/// # More on `dimensions`, `iterations` and `bounding` by the input matrix shape:
548///
549/// let `min_nrows_ncols` = `A.nrows().min(A.ncols())`; // The smaller of `rows`, `columns`
550///
551/// `dimensions` will be adjusted to `min_nrows_ncols` if `dimensions == 0` or `dimensions > min_nrows_ncols`
552///
553/// The algorithm begins with the following assertion on `dimensions`:
554///
555/// #### assert!(dimensions > 1 && dimensions <= min_nrows_ncols);
556///
557/// ---
558///
559/// `iterations` will be adjusted to `min_nrows_ncols` if `iterations == 0` or `iterations > min_nrows_ncols`
560///
561/// `iterations` will be adjusted to `dimensions` if `iterations < dimensions`
562///
563/// The algorithm begins with the following assertion on `iterations`:
564///
565/// #### assert!(iterations >= dimensions && iterations <= min_nrows_ncols);
566///
567/// # Returns
568///
569/// Ok(`SvdRec`) on successful decomposition
570pub fn svdLAS2(
571 A: &dyn SMat,
572 dimensions: usize,
573 iterations: usize,
574 end_interval: &[f64; 2],
575 kappa: f64,
576 random_seed: u32,
577) -> Result<SvdRec, SvdLibError> {
578 let random_seed = match random_seed > 0 {
579 true => random_seed,
580 false => thread_rng().gen::<_>(),
581 };
582
583 let min_nrows_ncols = A.nrows().min(A.ncols());
584
585 let dimensions = match dimensions {
586 n if n == 0 || n > min_nrows_ncols => min_nrows_ncols,
587 _ => dimensions,
588 };
589
590 let iterations = match iterations {
591 n if n == 0 || n > min_nrows_ncols => min_nrows_ncols,
592 n if n < dimensions => dimensions,
593 _ => iterations,
594 };
595
596 if dimensions < 2 {
597 return Err(SvdLibError::Las2Error(format!(
598 "svdLAS2: insufficient dimensions: {dimensions}"
599 )));
600 }
601
602 assert!(dimensions > 1 && dimensions <= min_nrows_ncols);
603 assert!(iterations >= dimensions && iterations <= min_nrows_ncols);
604
605 // If the matrix is wide, the SVD is computed on its transpose for speed
606 let transposed = A.ncols() as f64 >= (A.nrows() as f64 * 1.2);
607 let nrows = if transposed { A.ncols() } else { A.nrows() };
608 let ncols = if transposed { A.nrows() } else { A.ncols() };
609
610 let mut wrk = WorkSpace::new(nrows, ncols, transposed, iterations)?;
611 let mut store = Store::new(ncols)?;
612
613 // Actually run the lanczos thing
614 let mut neig = 0;
615 let steps = lanso(
616 A,
617 dimensions,
618 iterations,
619 end_interval,
620 &mut wrk,
621 &mut neig,
622 &mut store,
623 random_seed,
624 )?;
625
626 // Compute the singular vectors of matrix A
627 let kappa = kappa.abs().max(eps34());
628 let mut R = ritvec(A, dimensions, kappa, &mut wrk, steps, neig, &mut store)?;
629
630 // This swaps and transposes the singular matrices if A was transposed.
631 if transposed {
632 mem::swap(&mut R.Ut, &mut R.Vt);
633 }
634
635 Ok(SvdRec {
636 // Dimensionality (number of Ut,Vt rows & length of S)
637 d: R.d,
638 ut: Array::from_shape_vec((R.d, R.Ut.cols), R.Ut.value)?,
639 s: Array::from_shape_vec(R.d, R.S)?,
640 vt: Array::from_shape_vec((R.d, R.Vt.cols), R.Vt.value)?,
641 diagnostics: Diagnostics {
642 non_zero: A.nnz(),
643 dimensions: dimensions,
644 iterations: iterations,
645 transposed: transposed,
646 lanczos_steps: steps + 1,
647 ritz_values_stabilized: neig,
648 significant_values: R.d,
649 singular_values: R.nsig,
650 end_interval: *end_interval,
651 kappa: kappa,
652 random_seed: random_seed,
653 },
654 })
655}
656
657//================================================================
658// Everything below is the private implementation
659//================================================================
660
661// ====================
662// Private
663// ====================
664
665const MAXLL: usize = 2;
666
667fn eps34() -> f64 {
668 f64::EPSILON.powf(0.75) // f64::EPSILON.sqrt() * f64::EPSILON.sqrt().sqrt();
669}
670
671/***********************************************************************
672 * *
673 * store() *
674 * *
675 ***********************************************************************/
676/***********************************************************************
677
678 Description
679 -----------
680
681 store() is a user-supplied function which, based on the input
682 operation flag, stores to or retrieves from memory a vector.
683
684
685 Arguments
686 ---------
687
688 (input)
689 n length of vector to be stored or retrieved
690 isw operation flag:
691 isw = 1 request to store j-th Lanczos vector q(j)
692 isw = 2 request to retrieve j-th Lanczos vector q(j)
693 isw = 3 request to store q(j) for j = 0 or 1
694 isw = 4 request to retrieve q(j) for j = 0 or 1
695 s contains the vector to be stored for a "store" request
696
697 (output)
698 s contains the vector retrieved for a "retrieve" request
699
700 Functions used
701 --------------
702
703 BLAS svd_dcopy
704
705***********************************************************************/
706#[derive(Debug, Clone, PartialEq)]
707struct Store {
708 n: usize,
709 vecs: Vec<Vec<f64>>,
710}
711impl Store {
712 fn new(n: usize) -> Result<Self, SvdLibError> {
713 Ok(Self { n, vecs: vec![] })
714 }
715 fn storq(&mut self, idx: usize, v: &[f64]) {
716 while idx + MAXLL >= self.vecs.len() {
717 self.vecs.push(vec![0.0; self.n]);
718 }
719 //self.vecs[idx + MAXLL] = v.to_vec();
720 //self.vecs[idx + MAXLL][..self.n].clone_from_slice(&v[..self.n]);
721 self.vecs[idx + MAXLL].copy_from_slice(v);
722 }
723 fn storp(&mut self, idx: usize, v: &[f64]) {
724 while idx >= self.vecs.len() {
725 self.vecs.push(vec![0.0; self.n]);
726 }
727 //self.vecs[idx] = v.to_vec();
728 //self.vecs[idx][..self.n].clone_from_slice(&v[..self.n]);
729 self.vecs[idx].copy_from_slice(v);
730 }
731 fn retrq(&mut self, idx: usize) -> &[f64] {
732 &self.vecs[idx + MAXLL]
733 }
734 fn retrp(&mut self, idx: usize) -> &[f64] {
735 &self.vecs[idx]
736 }
737}
738
739#[derive(Debug, Clone, PartialEq)]
740struct WorkSpace {
741 nrows: usize,
742 ncols: usize,
743 transposed: bool,
744 w0: Vec<f64>, // workspace 0
745 w1: Vec<f64>, // workspace 1
746 w2: Vec<f64>, // workspace 2
747 w3: Vec<f64>, // workspace 3
748 w4: Vec<f64>, // workspace 4
749 w5: Vec<f64>, // workspace 5
750 alf: Vec<f64>, // array to hold diagonal of the tridiagonal matrix T
751 eta: Vec<f64>, // orthogonality estimate of Lanczos vectors at step j
752 oldeta: Vec<f64>, // orthogonality estimate of Lanczos vectors at step j-1
753 bet: Vec<f64>, // array to hold off-diagonal of T
754 bnd: Vec<f64>, // array to hold the error bounds
755 ritz: Vec<f64>, // array to hold the ritz values
756 temp: Vec<f64>, // array to hold the temp values
757}
758impl WorkSpace {
759 fn new(nrows: usize, ncols: usize, transposed: bool, iterations: usize) -> Result<Self, SvdLibError> {
760 Ok(Self {
761 nrows,
762 ncols,
763 transposed,
764 w0: vec![0.0; ncols],
765 w1: vec![0.0; ncols],
766 w2: vec![0.0; ncols],
767 w3: vec![0.0; ncols],
768 w4: vec![0.0; ncols],
769 w5: vec![0.0; ncols],
770 alf: vec![0.0; iterations],
771 eta: vec![0.0; iterations],
772 oldeta: vec![0.0; iterations],
773 bet: vec![0.0; 1 + iterations],
774 ritz: vec![0.0; 1 + iterations],
775 bnd: vec![f64::MAX; 1 + iterations],
776 temp: vec![0.0; nrows],
777 })
778 }
779}
780
781/* Row-major dense matrix. Rows are consecutive vectors. */
782#[derive(Debug, Clone, PartialEq)]
783struct DMat {
784 //long rows;
785 //long cols;
786 //double **value; /* Accessed by [row][col]. Free value[0] and value to free.*/
787 cols: usize,
788 value: Vec<f64>,
789}
790
791#[allow(non_snake_case)]
792#[derive(Debug, Clone, PartialEq)]
793struct SVDRawRec {
794 //int d; /* Dimensionality (rank) */
795 //DMat Ut; /* Transpose of left singular vectors. (d by m)
796 // The vectors are the rows of Ut. */
797 //double *S; /* Array of singular values. (length d) */
798 //DMat Vt; /* Transpose of right singular vectors. (d by n)
799 // The vectors are the rows of Vt. */
800 d: usize,
801 nsig: usize,
802 Ut: DMat,
803 S: Vec<f64>,
804 Vt: DMat,
805}
806
807// =================================================================
808
809// compare two floats within epsilon
810fn compare(computed: f64, expected: f64) -> bool {
811 (expected - computed).abs() < f64::EPSILON
812}
813
814/* Function sorts array1 and array2 into increasing order for array1 */
815fn insert_sort<T: PartialOrd>(n: usize, array1: &mut [T], array2: &mut [T]) {
816 for i in 1..n {
817 for j in (1..i + 1).rev() {
818 if array1[j - 1] <= array1[j] {
819 break;
820 }
821 array1.swap(j - 1, j);
822 array2.swap(j - 1, j);
823 }
824 }
825}
826
827#[allow(non_snake_case)]
828#[rustfmt::skip]
829fn svd_opb(A: &dyn SMat, x: &[f64], y: &mut [f64], temp: &mut [f64], transposed: bool) {
830 let nrows = if transposed { A.ncols() } else { A.nrows() };
831 let ncols = if transposed { A.nrows() } else { A.ncols() };
832 assert_eq!(x.len(), ncols, "svd_opb: x must be A.ncols() in length, x = {}, A.ncols = {}", x.len(), ncols);
833 assert_eq!(y.len(), ncols, "svd_opb: y must be A.ncols() in length, y = {}, A.ncols = {}", y.len(), ncols);
834 assert_eq!(temp.len(), nrows, "svd_opa: temp must be A.nrows() in length, temp = {}, A.nrows = {}", temp.len(), nrows);
835 A.svd_opa(x, temp, transposed); // temp = (A * x)
836 A.svd_opa(temp, y, !transposed); // y = A' * (A * x) = A' * temp
837}
838
839// constant times a vector plus a vector
840fn svd_daxpy(da: f64, x: &[f64], y: &mut [f64]) {
841 for (xval, yval) in x.iter().zip(y.iter_mut()) {
842 *yval += da * xval
843 }
844}
845
846// finds the index of element having max absolute value
847fn svd_idamax(n: usize, x: &[f64]) -> usize {
848 assert!(n > 0, "svd_idamax: unexpected inputs!");
849
850 match n {
851 1 => 0,
852 _ => {
853 let mut imax = 0;
854 for (i, xval) in x.iter().enumerate().take(n).skip(1) {
855 if xval.abs() > x[imax].abs() {
856 imax = i;
857 }
858 }
859 imax
860 }
861 }
862}
863
864// returns |a| if b is positive; else fsign returns -|a|
865fn svd_fsign(a: f64, b: f64) -> f64 {
866 match a >= 0.0 && b >= 0.0 || a < 0.0 && b < 0.0 {
867 true => a,
868 false => -a,
869 }
870}
871
872// finds sqrt(a^2 + b^2) without overflow or destructive underflow
873fn svd_pythag(a: f64, b: f64) -> f64 {
874 match a.abs().max(b.abs()) {
875 n if n > 0.0 => {
876 let mut p = n;
877 let mut r = (a.abs().min(b.abs()) / p).powi(2);
878 let mut t = 4.0 + r;
879 while !compare(t, 4.0) {
880 let s = r / t;
881 let u = 1.0 + 2.0 * s;
882 p *= u;
883 r *= (s / u).powi(2);
884 t = 4.0 + r;
885 }
886 p
887 }
888 _ => 0.0,
889 }
890}
891
892// dot product of two vectors
893fn svd_ddot(x: &[f64], y: &[f64]) -> f64 {
894 x.iter().zip(y).map(|(a, b)| a * b).sum()
895}
896
897// norm (length) of a vector
898fn svd_norm(x: &[f64]) -> f64 {
899 svd_ddot(x, x).sqrt()
900}
901
902// scales an input vector 'x', by a constant, storing in 'y'
903fn svd_datx(d: f64, x: &[f64], y: &mut [f64]) {
904 for (i, xval) in x.iter().enumerate() {
905 y[i] = d * xval;
906 }
907}
908
909// scales an input vector 'x' by a constant, modifying 'x'
910fn svd_dscal(d: f64, x: &mut [f64]) {
911 for elem in x.iter_mut() {
912 *elem *= d;
913 }
914}
915
916// copies a vector x to a vector y (reversed direction)
917fn svd_dcopy(n: usize, offset: usize, x: &[f64], y: &mut [f64]) {
918 if n > 0 {
919 let start = n - 1;
920 for i in 0..n {
921 y[offset + start - i] = x[offset + i];
922 }
923 }
924}
925
926/***********************************************************************
927 * *
928 * imtqlb() *
929 * *
930 ***********************************************************************/
931/***********************************************************************
932
933 Description
934 -----------
935
936 imtqlb() is a translation of a Fortran version of the Algol
937 procedure IMTQL1, Num. Math. 12, 377-383(1968) by Martin and
938 Wilkinson, as modified in Num. Math. 15, 450(1970) by Dubrulle.
939 Handbook for Auto. Comp., vol.II-Linear Algebra, 241-248(1971).
940 See also B. T. Smith et al, Eispack Guide, Lecture Notes in
941 Computer Science, Springer-Verlag, (1976).
942
943 The function finds the eigenvalues of a symmetric tridiagonal
944 matrix by the implicit QL method.
945
946
947 Arguments
948 ---------
949
950 (input)
951 n order of the symmetric tridiagonal matrix
952 d contains the diagonal elements of the input matrix
953 e contains the subdiagonal elements of the input matrix in its
954 last n-1 positions. e[0] is arbitrary
955
956 (output)
957 d contains the eigenvalues in ascending order. if an error
958 exit is made, the eigenvalues are correct and ordered for
959 indices 0,1,...ierr, but may not be the smallest eigenvalues.
960 e has been destroyed.
961***********************************************************************/
962fn imtqlb(n: usize, d: &mut [f64], e: &mut [f64], bnd: &mut [f64]) -> Result<(), SvdLibError> {
963 if n == 1 {
964 return Ok(());
965 }
966
967 bnd[0] = 1.0;
968 let last = n - 1;
969 for i in 1..=last {
970 bnd[i] = 0.0;
971 e[i - 1] = e[i];
972 }
973 e[last] = 0.0;
974
975 let mut i = 0;
976
977 for l in 0..=last {
978 let mut iteration = 0;
979 while iteration <= 30 {
980 let mut m = l;
981 while m < n {
982 if m == last {
983 break;
984 }
985 let test = d[m].abs() + d[m + 1].abs();
986 if compare(test, test + e[m].abs()) {
987 break; // convergence = true;
988 }
989 m += 1;
990 }
991 let mut p = d[l];
992 let mut f = bnd[l];
993 if m == l {
994 // order the eigenvalues
995 let mut exchange = true;
996 if l > 0 {
997 i = l;
998 while i >= 1 && exchange {
999 if p < d[i - 1] {
1000 d[i] = d[i - 1];
1001 bnd[i] = bnd[i - 1];
1002 i -= 1;
1003 } else {
1004 exchange = false;
1005 }
1006 }
1007 }
1008 if exchange {
1009 i = 0;
1010 }
1011 d[i] = p;
1012 bnd[i] = f;
1013 iteration = 31;
1014 } else {
1015 if iteration == 30 {
1016 return Err(SvdLibError::ImtqlbError(
1017 "imtqlb no convergence to an eigenvalue after 30 iterations".to_string(),
1018 ));
1019 }
1020 iteration += 1;
1021 // ........ form shift ........
1022 let mut g = (d[l + 1] - p) / (2.0 * e[l]);
1023 let mut r = svd_pythag(g, 1.0);
1024 g = d[m] - p + e[l] / (g + svd_fsign(r, g));
1025 let mut s = 1.0;
1026 let mut c = 1.0;
1027 p = 0.0;
1028
1029 assert!(m > 0, "imtqlb: expected 'm' to be non-zero");
1030 i = m - 1;
1031 let mut underflow = false;
1032 while !underflow && i >= l {
1033 f = s * e[i];
1034 let b = c * e[i];
1035 r = svd_pythag(f, g);
1036 e[i + 1] = r;
1037 if compare(r, 0.0) {
1038 underflow = true;
1039 break;
1040 }
1041 s = f / r;
1042 c = g / r;
1043 g = d[i + 1] - p;
1044 r = (d[i] - g) * s + 2.0 * c * b;
1045 p = s * r;
1046 d[i + 1] = g + p;
1047 g = c * r - b;
1048 f = bnd[i + 1];
1049 bnd[i + 1] = s * bnd[i] + c * f;
1050 bnd[i] = c * bnd[i] - s * f;
1051 if i == 0 {
1052 break;
1053 }
1054 i -= 1;
1055 }
1056 // ........ recover from underflow .........
1057 if underflow {
1058 d[i + 1] -= p;
1059 } else {
1060 d[l] -= p;
1061 e[l] = g;
1062 }
1063 e[m] = 0.0;
1064 }
1065 }
1066 }
1067 Ok(())
1068}
1069
1070/***********************************************************************
1071 * *
1072 * startv() *
1073 * *
1074 ***********************************************************************/
1075/***********************************************************************
1076
1077 Description
1078 -----------
1079
1080 Function delivers a starting vector in r and returns |r|; it returns
1081 zero if the range is spanned, and ierr is non-zero if no starting
1082 vector within range of operator can be found.
1083
1084 Parameters
1085 ---------
1086
1087 (input)
1088 n dimension of the eigenproblem matrix B
1089 wptr array of pointers that point to work space
1090 j starting index for a Lanczos run
1091 eps machine epsilon (relative precision)
1092
1093 (output)
1094 wptr array of pointers that point to work space that contains
1095 r[j], q[j], q[j-1], p[j], p[j-1]
1096***********************************************************************/
1097#[allow(non_snake_case)]
1098fn startv(
1099 A: &dyn SMat,
1100 wrk: &mut WorkSpace,
1101 step: usize,
1102 store: &mut Store,
1103 random_seed: u32,
1104) -> Result<f64, SvdLibError> {
1105 // get initial vector; default is random
1106 let mut rnm2 = svd_ddot(&wrk.w0, &wrk.w0);
1107 for id in 0..3 {
1108 if id > 0 || step > 0 || compare(rnm2, 0.0) {
1109 let mut bytes = [0; 32];
1110 for (i, b) in random_seed.to_le_bytes().iter().enumerate() {
1111 bytes[i] = *b;
1112 }
1113 let mut seeded_rng = StdRng::from_seed(bytes);
1114 wrk.w0.fill_with(|| seeded_rng.gen_range(-1.0..1.0));
1115 }
1116 wrk.w3.copy_from_slice(&wrk.w0);
1117
1118 // apply operator to put r in range (essential if m singular)
1119 svd_opb(A, &wrk.w3, &mut wrk.w0, &mut wrk.temp, wrk.transposed);
1120 wrk.w3.copy_from_slice(&wrk.w0);
1121 rnm2 = svd_ddot(&wrk.w3, &wrk.w3);
1122 if rnm2 > 0.0 {
1123 break;
1124 }
1125 }
1126
1127 if rnm2 <= 0.0 {
1128 return Err(SvdLibError::StartvError(format!("rnm2 <= 0.0, rnm2 = {rnm2}")));
1129 }
1130
1131 if step > 0 {
1132 for i in 0..step {
1133 let v = store.retrq(i);
1134 svd_daxpy(-svd_ddot(&wrk.w3, v), v, &mut wrk.w0);
1135 }
1136
1137 // make sure q[step] is orthogonal to q[step-1]
1138 svd_daxpy(-svd_ddot(&wrk.w4, &wrk.w0), &wrk.w2, &mut wrk.w0);
1139 wrk.w3.copy_from_slice(&wrk.w0);
1140
1141 rnm2 = match svd_ddot(&wrk.w3, &wrk.w3) {
1142 dot if dot <= f64::EPSILON * rnm2 => 0.0,
1143 dot => dot,
1144 }
1145 }
1146 Ok(rnm2.sqrt())
1147}
1148
1149/***********************************************************************
1150 * *
1151 * stpone() *
1152 * *
1153 ***********************************************************************/
1154/***********************************************************************
1155
1156 Description
1157 -----------
1158
1159 Function performs the first step of the Lanczos algorithm. It also
1160 does a step of extended local re-orthogonalization.
1161
1162 Arguments
1163 ---------
1164
1165 (input)
1166 n dimension of the eigenproblem for matrix B
1167
1168 (output)
1169 ierr error flag
1170 wptr array of pointers that point to work space that contains
1171 wptr[0] r[j]
1172 wptr[1] q[j]
1173 wptr[2] q[j-1]
1174 wptr[3] p
1175 wptr[4] p[j-1]
1176 wptr[6] diagonal elements of matrix T
1177***********************************************************************/
1178#[allow(non_snake_case)]
1179fn stpone(A: &dyn SMat, wrk: &mut WorkSpace, store: &mut Store, random_seed: u32) -> Result<(f64, f64), SvdLibError> {
1180 // get initial vector; default is random
1181 let mut rnm = startv(A, wrk, 0, store, random_seed)?;
1182 if compare(rnm, 0.0) {
1183 return Err(SvdLibError::StponeError("rnm == 0.0".to_string()));
1184 }
1185
1186 // normalize starting vector
1187 svd_datx(rnm.recip(), &wrk.w0, &mut wrk.w1);
1188 svd_dscal(rnm.recip(), &mut wrk.w3);
1189
1190 // take the first step
1191 svd_opb(A, &wrk.w3, &mut wrk.w0, &mut wrk.temp, wrk.transposed);
1192 wrk.alf[0] = svd_ddot(&wrk.w0, &wrk.w3);
1193 svd_daxpy(-wrk.alf[0], &wrk.w1, &mut wrk.w0);
1194 let t = svd_ddot(&wrk.w0, &wrk.w3);
1195 wrk.alf[0] += t;
1196 svd_daxpy(-t, &wrk.w1, &mut wrk.w0);
1197 wrk.w4.copy_from_slice(&wrk.w0);
1198 rnm = svd_norm(&wrk.w4);
1199 let anorm = rnm + wrk.alf[0].abs();
1200 Ok((rnm, f64::EPSILON.sqrt() * anorm))
1201}
1202
1203/***********************************************************************
1204 * *
1205 * lanczos_step() *
1206 * *
1207 ***********************************************************************/
1208/***********************************************************************
1209
1210 Description
1211 -----------
1212
1213 Function embodies a single Lanczos step
1214
1215 Arguments
1216 ---------
1217
1218 (input)
1219 n dimension of the eigenproblem for matrix B
1220 first start of index through loop
1221 last end of index through loop
1222 wptr array of pointers pointing to work space
1223 alf array to hold diagonal of the tridiagonal matrix T
1224 eta orthogonality estimate of Lanczos vectors at step j
1225 oldeta orthogonality estimate of Lanczos vectors at step j-1
1226 bet array to hold off-diagonal of T
1227 ll number of intitial Lanczos vectors in local orthog.
1228 (has value of 0, 1 or 2)
1229 enough stop flag
1230***********************************************************************/
1231#[allow(non_snake_case)]
1232#[allow(clippy::too_many_arguments)]
1233fn lanczos_step(
1234 A: &dyn SMat,
1235 wrk: &mut WorkSpace,
1236 first: usize,
1237 last: usize,
1238 ll: &mut usize,
1239 enough: &mut bool,
1240 rnm: &mut f64,
1241 tol: &mut f64,
1242 store: &mut Store,
1243) -> Result<usize, SvdLibError> {
1244 let eps1 = f64::EPSILON * (wrk.ncols as f64).sqrt();
1245 let mut j = first;
1246
1247 while j < last {
1248 mem::swap(&mut wrk.w1, &mut wrk.w2);
1249 mem::swap(&mut wrk.w3, &mut wrk.w4);
1250
1251 store.storq(j - 1, &wrk.w2);
1252 if j - 1 < MAXLL {
1253 store.storp(j - 1, &wrk.w4);
1254 }
1255 wrk.bet[j] = *rnm;
1256
1257 // restart if invariant subspace is found
1258 if compare(*rnm, 0.0) {
1259 *rnm = startv(A, wrk, j, store, 0)?;
1260 if compare(*rnm, 0.0) {
1261 *enough = true;
1262 }
1263 }
1264
1265 if *enough {
1266 // added by Doug...
1267 // These lines fix a bug that occurs with low-rank matrices
1268 mem::swap(&mut wrk.w1, &mut wrk.w2);
1269 // ...added by Doug
1270 break;
1271 }
1272
1273 // take a lanczos step
1274 svd_datx(rnm.recip(), &wrk.w0, &mut wrk.w1);
1275 svd_dscal(rnm.recip(), &mut wrk.w3);
1276 svd_opb(A, &wrk.w3, &mut wrk.w0, &mut wrk.temp, wrk.transposed);
1277 svd_daxpy(-*rnm, &wrk.w2, &mut wrk.w0);
1278 wrk.alf[j] = svd_ddot(&wrk.w0, &wrk.w3);
1279 svd_daxpy(-wrk.alf[j], &wrk.w1, &mut wrk.w0);
1280
1281 // orthogonalize against initial lanczos vectors
1282 if j <= MAXLL && wrk.alf[j - 1].abs() > 4.0 * wrk.alf[j].abs() {
1283 *ll = j;
1284 }
1285 for i in 0..(j - 1).min(*ll) {
1286 let v1 = store.retrp(i);
1287 let t = svd_ddot(v1, &wrk.w0);
1288 let v2 = store.retrq(i);
1289 svd_daxpy(-t, v2, &mut wrk.w0);
1290 wrk.eta[i] = eps1;
1291 wrk.oldeta[i] = eps1;
1292 }
1293
1294 // extended local reorthogonalization
1295 let t = svd_ddot(&wrk.w0, &wrk.w4);
1296 svd_daxpy(-t, &wrk.w2, &mut wrk.w0);
1297 if wrk.bet[j] > 0.0 {
1298 wrk.bet[j] += t;
1299 }
1300 let t = svd_ddot(&wrk.w0, &wrk.w3);
1301 svd_daxpy(-t, &wrk.w1, &mut wrk.w0);
1302 wrk.alf[j] += t;
1303 wrk.w4.copy_from_slice(&wrk.w0);
1304 *rnm = svd_norm(&wrk.w4);
1305 let anorm = wrk.bet[j] + wrk.alf[j].abs() + *rnm;
1306 *tol = f64::EPSILON.sqrt() * anorm;
1307
1308 // update the orthogonality bounds
1309 ortbnd(wrk, j, *rnm, eps1);
1310
1311 // restore the orthogonality state when needed
1312 purge(wrk.ncols, *ll, wrk, j, rnm, *tol, store);
1313 if *rnm <= *tol {
1314 *rnm = 0.0;
1315 }
1316 j += 1;
1317 }
1318 Ok(j)
1319}
1320
1321/***********************************************************************
1322 * *
1323 * purge() *
1324 * *
1325 ***********************************************************************/
1326/***********************************************************************
1327
1328 Description
1329 -----------
1330
1331 Function examines the state of orthogonality between the new Lanczos
1332 vector and the previous ones to decide whether re-orthogonalization
1333 should be performed
1334
1335
1336 Arguments
1337 ---------
1338
1339 (input)
1340 n dimension of the eigenproblem for matrix B
1341 ll number of intitial Lanczos vectors in local orthog.
1342 r residual vector to become next Lanczos vector
1343 q current Lanczos vector
1344 ra previous Lanczos vector
1345 qa previous Lanczos vector
1346 wrk temporary vector to hold the previous Lanczos vector
1347 eta state of orthogonality between r and prev. Lanczos vectors
1348 oldeta state of orthogonality between q and prev. Lanczos vectors
1349 j current Lanczos step
1350
1351 (output)
1352 r residual vector orthogonalized against previous Lanczos
1353 vectors
1354 q current Lanczos vector orthogonalized against previous ones
1355***********************************************************************/
1356fn purge(n: usize, ll: usize, wrk: &mut WorkSpace, step: usize, rnm: &mut f64, tol: f64, store: &mut Store) {
1357 if step < ll + 2 {
1358 return;
1359 }
1360
1361 let reps = f64::EPSILON.sqrt();
1362 let eps1 = f64::EPSILON * (n as f64).sqrt();
1363
1364 let k = svd_idamax(step - (ll + 1), &wrk.eta) + ll;
1365 if wrk.eta[k].abs() > reps {
1366 let reps1 = eps1 / reps;
1367 let mut iteration = 0;
1368 let mut flag = true;
1369 while iteration < 2 && flag {
1370 if *rnm > tol {
1371 // bring in a lanczos vector t and orthogonalize both r and q against it
1372 let mut tq = 0.0;
1373 let mut tr = 0.0;
1374 for i in ll..step {
1375 let v = store.retrq(i);
1376 let t = svd_ddot(v, &wrk.w3);
1377 tq += t.abs();
1378 svd_daxpy(-t, v, &mut wrk.w1);
1379 let t = svd_ddot(v, &wrk.w4);
1380 tr += t.abs();
1381 svd_daxpy(-t, v, &mut wrk.w0);
1382 }
1383 wrk.w3.copy_from_slice(&wrk.w1);
1384 let t = svd_ddot(&wrk.w0, &wrk.w3);
1385 tr += t.abs();
1386 svd_daxpy(-t, &wrk.w1, &mut wrk.w0);
1387 wrk.w4.copy_from_slice(&wrk.w0);
1388 *rnm = svd_norm(&wrk.w4);
1389 if tq <= reps1 && tr <= *rnm * reps1 {
1390 flag = false;
1391 }
1392 }
1393 iteration += 1;
1394 }
1395 for i in ll..=step {
1396 wrk.eta[i] = eps1;
1397 wrk.oldeta[i] = eps1;
1398 }
1399 }
1400}
1401
1402/***********************************************************************
1403 * *
1404 * ortbnd() *
1405 * *
1406 ***********************************************************************/
1407/***********************************************************************
1408
1409 Description
1410 -----------
1411
1412 Function updates the eta recurrence
1413
1414 Arguments
1415 ---------
1416
1417 (input)
1418 alf array to hold diagonal of the tridiagonal matrix T
1419 eta orthogonality estimate of Lanczos vectors at step j
1420 oldeta orthogonality estimate of Lanczos vectors at step j-1
1421 bet array to hold off-diagonal of T
1422 n dimension of the eigenproblem for matrix B
1423 j dimension of T
1424 rnm norm of the next residual vector
1425 eps1 roundoff estimate for dot product of two unit vectors
1426
1427 (output)
1428 eta orthogonality estimate of Lanczos vectors at step j+1
1429 oldeta orthogonality estimate of Lanczos vectors at step j
1430***********************************************************************/
1431fn ortbnd(wrk: &mut WorkSpace, step: usize, rnm: f64, eps1: f64) {
1432 if step < 1 {
1433 return;
1434 }
1435 if !compare(rnm, 0.0) && step > 1 {
1436 wrk.oldeta[0] =
1437 (wrk.bet[1] * wrk.eta[1] + (wrk.alf[0] - wrk.alf[step]) * wrk.eta[0] - wrk.bet[step] * wrk.oldeta[0]) / rnm
1438 + eps1;
1439 if step > 2 {
1440 for i in 1..=step - 2 {
1441 wrk.oldeta[i] = (wrk.bet[i + 1] * wrk.eta[i + 1]
1442 + (wrk.alf[i] - wrk.alf[step]) * wrk.eta[i]
1443 + wrk.bet[i] * wrk.eta[i - 1]
1444 - wrk.bet[step] * wrk.oldeta[i])
1445 / rnm
1446 + eps1;
1447 }
1448 }
1449 }
1450 wrk.oldeta[step - 1] = eps1;
1451 mem::swap(&mut wrk.oldeta, &mut wrk.eta);
1452 wrk.eta[step] = eps1;
1453}
1454
1455/***********************************************************************
1456 * *
1457 * error_bound() *
1458 * *
1459 ***********************************************************************/
1460/***********************************************************************
1461
1462 Description
1463 -----------
1464
1465 Function massages error bounds for very close ritz values by placing
1466 a gap between them. The error bounds are then refined to reflect
1467 this.
1468
1469
1470 Arguments
1471 ---------
1472
1473 (input)
1474 endl left end of interval containing unwanted eigenvalues
1475 endr right end of interval containing unwanted eigenvalues
1476 ritz array to store the ritz values
1477 bnd array to store the error bounds
1478 enough stop flag
1479***********************************************************************/
1480fn error_bound(
1481 enough: &mut bool,
1482 endl: f64,
1483 endr: f64,
1484 ritz: &mut [f64],
1485 bnd: &mut [f64],
1486 step: usize,
1487 tol: f64,
1488) -> usize {
1489 assert!(step > 0, "error_bound: expected 'step' to be non-zero");
1490
1491 // massage error bounds for very close ritz values
1492 let mid = svd_idamax(step + 1, bnd);
1493
1494 let mut i = ((step + 1) + (step - 1)) / 2;
1495 while i > mid + 1 {
1496 if (ritz[i - 1] - ritz[i]).abs() < eps34() * ritz[i].abs() && bnd[i] > tol && bnd[i - 1] > tol {
1497 bnd[i - 1] = (bnd[i].powi(2) + bnd[i - 1].powi(2)).sqrt();
1498 bnd[i] = 0.0;
1499 }
1500 i -= 1;
1501 }
1502
1503 let mut i = ((step + 1) - (step - 1)) / 2;
1504 while i + 1 < mid {
1505 if (ritz[i + 1] - ritz[i]).abs() < eps34() * ritz[i].abs() && bnd[i] > tol && bnd[i + 1] > tol {
1506 bnd[i + 1] = (bnd[i].powi(2) + bnd[i + 1].powi(2)).sqrt();
1507 bnd[i] = 0.0;
1508 }
1509 i += 1;
1510 }
1511
1512 // refine the error bounds
1513 let mut neig = 0;
1514 let mut gapl = ritz[step] - ritz[0];
1515 for i in 0..=step {
1516 let mut gap = gapl;
1517 if i < step {
1518 gapl = ritz[i + 1] - ritz[i];
1519 }
1520 gap = gap.min(gapl);
1521 if gap > bnd[i] {
1522 bnd[i] *= bnd[i] / gap;
1523 }
1524 if bnd[i] <= 16.0 * f64::EPSILON * ritz[i].abs() {
1525 neig += 1;
1526 if !*enough {
1527 *enough = endl < ritz[i] && ritz[i] < endr;
1528 }
1529 }
1530 }
1531 neig
1532}
1533
1534/***********************************************************************
1535 * *
1536 * imtql2() *
1537 * *
1538 ***********************************************************************/
1539/***********************************************************************
1540
1541 Description
1542 -----------
1543
1544 imtql2() is a translation of a Fortran version of the Algol
1545 procedure IMTQL2, Num. Math. 12, 377-383(1968) by Martin and
1546 Wilkinson, as modified in Num. Math. 15, 450(1970) by Dubrulle.
1547 Handbook for Auto. Comp., vol.II-Linear Algebra, 241-248(1971).
1548 See also B. T. Smith et al, Eispack Guide, Lecture Notes in
1549 Computer Science, Springer-Verlag, (1976).
1550
1551 This function finds the eigenvalues and eigenvectors of a symmetric
1552 tridiagonal matrix by the implicit QL method.
1553
1554
1555 Arguments
1556 ---------
1557
1558 (input)
1559 nm row dimension of the symmetric tridiagonal matrix
1560 n order of the matrix
1561 d contains the diagonal elements of the input matrix
1562 e contains the subdiagonal elements of the input matrix in its
1563 last n-1 positions. e[0] is arbitrary
1564 z contains the identity matrix
1565
1566 (output)
1567 d contains the eigenvalues in ascending order. if an error
1568 exit is made, the eigenvalues are correct but unordered for
1569 for indices 0,1,...,ierr.
1570 e has been destroyed.
1571 z contains orthonormal eigenvectors of the symmetric
1572 tridiagonal (or full) matrix. if an error exit is made,
1573 z contains the eigenvectors associated with the stored
1574 eigenvalues.
1575***********************************************************************/
1576fn imtql2(nm: usize, n: usize, d: &mut [f64], e: &mut [f64], z: &mut [f64]) -> Result<(), SvdLibError> {
1577 if n == 1 {
1578 return Ok(());
1579 }
1580 assert!(n > 1, "imtql2: expected 'n' to be > 1");
1581
1582 let last = n - 1;
1583
1584 for i in 1..n {
1585 e[i - 1] = e[i];
1586 }
1587 e[last] = 0.0;
1588
1589 let nnm = n * nm;
1590 for l in 0..n {
1591 let mut iteration = 0;
1592
1593 // look for small sub-diagonal element
1594 while iteration <= 30 {
1595 let mut m = l;
1596 while m < n {
1597 if m == last {
1598 break;
1599 }
1600 let test = d[m].abs() + d[m + 1].abs();
1601 if compare(test, test + e[m].abs()) {
1602 break; // convergence = true;
1603 }
1604 m += 1;
1605 }
1606 if m == l {
1607 break;
1608 }
1609
1610 // error -- no convergence to an eigenvalue after 30 iterations.
1611 if iteration == 30 {
1612 return Err(SvdLibError::Imtql2Error(
1613 "imtql2 no convergence to an eigenvalue after 30 iterations".to_string(),
1614 ));
1615 }
1616 iteration += 1;
1617
1618 // form shift
1619 let mut g = (d[l + 1] - d[l]) / (2.0 * e[l]);
1620 let mut r = svd_pythag(g, 1.0);
1621 g = d[m] - d[l] + e[l] / (g + svd_fsign(r, g));
1622
1623 let mut s = 1.0;
1624 let mut c = 1.0;
1625 let mut p = 0.0;
1626
1627 assert!(m > 0, "imtql2: expected 'm' to be non-zero");
1628 let mut i = m - 1;
1629 let mut underflow = false;
1630 while !underflow && i >= l {
1631 let mut f = s * e[i];
1632 let b = c * e[i];
1633 r = svd_pythag(f, g);
1634 e[i + 1] = r;
1635 if compare(r, 0.0) {
1636 underflow = true;
1637 } else {
1638 s = f / r;
1639 c = g / r;
1640 g = d[i + 1] - p;
1641 r = (d[i] - g) * s + 2.0 * c * b;
1642 p = s * r;
1643 d[i + 1] = g + p;
1644 g = c * r - b;
1645
1646 // form vector
1647 for k in (0..nnm).step_by(n) {
1648 let index = k + i;
1649 f = z[index + 1];
1650 z[index + 1] = s * z[index] + c * f;
1651 z[index] = c * z[index] - s * f;
1652 }
1653 if i == 0 {
1654 break;
1655 }
1656 i -= 1;
1657 }
1658 } /* end while (underflow != FALSE && i >= l) */
1659 /*........ recover from underflow .........*/
1660 if underflow {
1661 d[i + 1] -= p;
1662 } else {
1663 d[l] -= p;
1664 e[l] = g;
1665 }
1666 e[m] = 0.0;
1667 }
1668 }
1669
1670 // order the eigenvalues
1671 for l in 1..n {
1672 let i = l - 1;
1673 let mut k = i;
1674 let mut p = d[i];
1675 for (j, item) in d.iter().enumerate().take(n).skip(l) {
1676 if *item < p {
1677 k = j;
1678 p = *item;
1679 }
1680 }
1681
1682 // ...and corresponding eigenvectors
1683 if k != i {
1684 d[k] = d[i];
1685 d[i] = p;
1686 for j in (0..nnm).step_by(n) {
1687 z.swap(j + i, j + k);
1688 }
1689 }
1690 }
1691
1692 Ok(())
1693}
1694
1695fn rotate_array(a: &mut [f64], x: usize) {
1696 let n = a.len();
1697 let mut j = 0;
1698 let mut start = 0;
1699 let mut t1 = a[0];
1700
1701 for _ in 0..n {
1702 j = match j >= x {
1703 true => j - x,
1704 false => j + n - x,
1705 };
1706
1707 let t2 = a[j];
1708 a[j] = t1;
1709
1710 if j == start {
1711 j += 1;
1712 start = j;
1713 t1 = a[j];
1714 } else {
1715 t1 = t2;
1716 }
1717 }
1718}
1719
1720/***********************************************************************
1721 * *
1722 * ritvec() *
1723 * Function computes the singular vectors of matrix A *
1724 * *
1725 ***********************************************************************/
1726/***********************************************************************
1727
1728 Description
1729 -----------
1730
1731 This function is invoked by landr() only if eigenvectors of the A'A
1732 eigenproblem are desired. When called, ritvec() computes the
1733 singular vectors of A and writes the result to an unformatted file.
1734
1735
1736 Parameters
1737 ----------
1738
1739 (input)
1740 nrow number of rows of A
1741 steps number of Lanczos iterations performed
1742 fp_out2 pointer to unformatted output file
1743 n dimension of matrix A
1744 kappa relative accuracy of ritz values acceptable as
1745 eigenvalues of A'A
1746 ritz array of ritz values
1747 bnd array of error bounds
1748 alf array of diagonal elements of the tridiagonal matrix T
1749 bet array of off-diagonal elements of T
1750 w1, w2 work space
1751
1752 (output)
1753 xv1 array of eigenvectors of A'A (right singular vectors of A)
1754 ierr error code
1755 0 for normal return from imtql2()
1756 k if convergence did not occur for k-th eigenvalue in
1757 imtql2()
1758 nsig number of accepted ritz values based on kappa
1759
1760 (local)
1761 s work array which is initialized to the identity matrix
1762 of order (j + 1) upon calling imtql2(). After the call,
1763 s contains the orthonormal eigenvectors of the symmetric
1764 tridiagonal matrix T
1765***********************************************************************/
1766#[allow(non_snake_case)]
1767fn ritvec(
1768 A: &dyn SMat,
1769 dimensions: usize,
1770 kappa: f64,
1771 wrk: &mut WorkSpace,
1772 steps: usize,
1773 neig: usize,
1774 store: &mut Store,
1775) -> Result<SVDRawRec, SvdLibError> {
1776 let js = steps + 1;
1777 let jsq = js * js;
1778 let mut s = vec![0.0; jsq];
1779
1780 // initialize s to an identity matrix
1781 for i in (0..jsq).step_by(js + 1) {
1782 s[i] = 1.0;
1783 }
1784
1785 let mut Vt = DMat {
1786 cols: wrk.ncols,
1787 value: vec![0.0; wrk.ncols * dimensions],
1788 };
1789
1790 svd_dcopy(js, 0, &wrk.alf, &mut Vt.value);
1791 svd_dcopy(steps, 1, &wrk.bet, &mut wrk.w5);
1792
1793 // on return from imtql2(), `R.Vt.value` contains eigenvalues in
1794 // ascending order and `s` contains the corresponding eigenvectors
1795 imtql2(js, js, &mut Vt.value, &mut wrk.w5, &mut s)?;
1796
1797 let mut nsig = 0;
1798 let mut x = 0;
1799 let mut id2 = jsq - js;
1800 for k in 0..js {
1801 if wrk.bnd[k] <= kappa * wrk.ritz[k].abs() && k + 1 > js - neig {
1802 x = match x {
1803 0 => dimensions - 1,
1804 _ => x - 1,
1805 };
1806
1807 let offset = x * Vt.cols;
1808 Vt.value[offset..offset + Vt.cols].fill(0.0);
1809 let mut idx = id2 + js;
1810 for i in 0..js {
1811 idx -= js;
1812 if s[idx] != 0.0 {
1813 for (j, item) in store.retrq(i).iter().enumerate().take(Vt.cols) {
1814 Vt.value[j + offset] += s[idx] * item;
1815 }
1816 }
1817 }
1818 nsig += 1;
1819 }
1820 id2 += 1;
1821 }
1822
1823 // Rotate the singular vectors and values.
1824 // `x` is now the location of the highest singular value.
1825 if x > 0 {
1826 rotate_array(&mut Vt.value, x * Vt.cols);
1827 }
1828
1829 // final dimension size
1830 let d = dimensions.min(nsig);
1831 let mut S = vec![0.0; d];
1832 let mut Ut = DMat {
1833 cols: wrk.nrows,
1834 value: vec![0.0; wrk.nrows * d],
1835 };
1836 Vt.value.resize(Vt.cols * d, 0.0);
1837
1838 let mut tmp_vec = vec![0.0; Vt.cols];
1839 for (i, sval) in S.iter_mut().enumerate() {
1840 let vt_offset = i * Vt.cols;
1841 let ut_offset = i * Ut.cols;
1842
1843 let vt_vec = &Vt.value[vt_offset..vt_offset + Vt.cols];
1844 let ut_vec = &mut Ut.value[ut_offset..ut_offset + Ut.cols];
1845
1846 // multiply by matrix B first
1847 svd_opb(A, vt_vec, &mut tmp_vec, &mut wrk.temp, wrk.transposed);
1848 let t = svd_ddot(vt_vec, &tmp_vec);
1849
1850 // store the Singular Value at S[i]
1851 *sval = t.sqrt();
1852
1853 svd_daxpy(-t, vt_vec, &mut tmp_vec);
1854 wrk.bnd[js] = svd_norm(&tmp_vec) * sval.recip();
1855
1856 // multiply by matrix A to get (scaled) left s-vector
1857 A.svd_opa(vt_vec, ut_vec, wrk.transposed);
1858 svd_dscal(sval.recip(), ut_vec);
1859 }
1860
1861 Ok(SVDRawRec {
1862 // Dimensionality (rank)
1863 d,
1864
1865 // Significant values
1866 nsig,
1867
1868 // DMat Ut Transpose of left singular vectors. (d by m)
1869 // The vectors are the rows of Ut.
1870 Ut,
1871
1872 // Array of singular values. (length d)
1873 S,
1874
1875 // DMat Vt Transpose of right singular vectors. (d by n)
1876 // The vectors are the rows of Vt.
1877 Vt,
1878 })
1879}
1880
1881/***********************************************************************
1882 * *
1883 * lanso() *
1884 * *
1885 ***********************************************************************/
1886/***********************************************************************
1887
1888 Description
1889 -----------
1890
1891 Function determines when the restart of the Lanczos algorithm should
1892 occur and when it should terminate.
1893
1894 Arguments
1895 ---------
1896
1897 (input)
1898 n dimension of the eigenproblem for matrix B
1899 iterations upper limit of desired number of lanczos steps
1900 dimensions upper limit of desired number of eigenpairs
1901 endl left end of interval containing unwanted eigenvalues
1902 endr right end of interval containing unwanted eigenvalues
1903 ritz array to hold the ritz values
1904 bnd array to hold the error bounds
1905 wptr array of pointers that point to work space:
1906 wptr[0]-wptr[5] six vectors of length n
1907 wptr[6] array to hold diagonal of the tridiagonal matrix T
1908 wptr[9] array to hold off-diagonal of T
1909 wptr[7] orthogonality estimate of Lanczos vectors at
1910 step j
1911 wptr[8] orthogonality estimate of Lanczos vectors at
1912 step j-1
1913 (output)
1914 j number of Lanczos steps actually taken
1915 neig number of ritz values stabilized
1916 ritz array to hold the ritz values
1917 bnd array to hold the error bounds
1918 ierr (globally declared) error flag
1919 ierr = 8192 if stpone() fails to find a starting vector
1920 ierr = k if convergence did not occur for k-th eigenvalue
1921 in imtqlb()
1922***********************************************************************/
1923#[allow(non_snake_case)]
1924#[allow(clippy::too_many_arguments)]
1925fn lanso(
1926 A: &dyn SMat,
1927 dim: usize,
1928 iterations: usize,
1929 end_interval: &[f64; 2],
1930 wrk: &mut WorkSpace,
1931 neig: &mut usize,
1932 store: &mut Store,
1933 random_seed: u32,
1934) -> Result<usize, SvdLibError> {
1935 let (endl, endr) = (end_interval[0], end_interval[1]);
1936
1937 /* take the first step */
1938 let rnm_tol = stpone(A, wrk, store, random_seed)?;
1939 let mut rnm = rnm_tol.0;
1940 let mut tol = rnm_tol.1;
1941
1942 let eps1 = f64::EPSILON * (wrk.ncols as f64).sqrt();
1943 wrk.eta[0] = eps1;
1944 wrk.oldeta[0] = eps1;
1945 let mut ll = 0;
1946 let mut first = 1;
1947 let mut last = iterations.min(dim.max(8) + dim);
1948 let mut enough = false;
1949 let mut j = 0;
1950 let mut intro = 0;
1951
1952 while !enough {
1953 if rnm <= tol {
1954 rnm = 0.0;
1955 }
1956
1957 // the actual lanczos loop
1958 let steps = lanczos_step(A, wrk, first, last, &mut ll, &mut enough, &mut rnm, &mut tol, store)?;
1959 j = match enough {
1960 true => steps - 1,
1961 false => last - 1,
1962 };
1963
1964 first = j + 1;
1965 wrk.bet[first] = rnm;
1966
1967 // analyze T
1968 let mut l = 0;
1969 for _ in 0..j {
1970 if l > j {
1971 break;
1972 }
1973
1974 let mut i = l;
1975 while i <= j {
1976 if compare(wrk.bet[i + 1], 0.0) {
1977 break;
1978 }
1979 i += 1;
1980 }
1981 i = i.min(j);
1982
1983 // now i is at the end of an unreduced submatrix
1984 let sz = i - l;
1985 svd_dcopy(sz + 1, l, &wrk.alf, &mut wrk.ritz);
1986 svd_dcopy(sz, l + 1, &wrk.bet, &mut wrk.w5);
1987
1988 imtqlb(sz + 1, &mut wrk.ritz[l..], &mut wrk.w5[l..], &mut wrk.bnd[l..])?;
1989
1990 for m in l..=i {
1991 wrk.bnd[m] = rnm * wrk.bnd[m].abs();
1992 }
1993 l = i + 1;
1994 }
1995
1996 // sort eigenvalues into increasing order
1997 insert_sort(j + 1, &mut wrk.ritz, &mut wrk.bnd);
1998
1999 *neig = error_bound(&mut enough, endl, endr, &mut wrk.ritz, &mut wrk.bnd, j, tol);
2000
2001 // should we stop?
2002 if *neig < dim {
2003 if *neig == 0 {
2004 last = first + 9;
2005 intro = first;
2006 } else {
2007 last = first + 3.max(1 + ((j - intro) * (dim - *neig)) / *neig);
2008 }
2009 last = last.min(iterations);
2010 } else {
2011 enough = true
2012 }
2013 enough = enough || first >= iterations;
2014 }
2015 store.storq(j, &wrk.w1);
2016 Ok(j)
2017}
2018
2019//////////////////////////////////////////
2020// SvdRec implementation
2021//////////////////////////////////////////
2022
2023impl SvdRec {
2024 pub fn recompose(&self) -> Array2<f64> {
2025 let sdiag = Array2::from_diag(&self.s);
2026 self.ut.t().dot(&sdiag).dot(&self.vt)
2027 }
2028}
2029
2030//////////////////////////////////////////
2031// SMat implementation for CscMatrix
2032//////////////////////////////////////////
2033
2034#[rustfmt::skip]
2035impl SMat for nalgebra_sparse::csc::CscMatrix<f64> {
2036 fn nrows(&self) -> usize { self.nrows() }
2037 fn ncols(&self) -> usize { self.ncols() }
2038 fn nnz(&self) -> usize { self.nnz() }
2039
2040 /// takes an n-vector x and returns A*x in y
2041 fn svd_opa(&self, x: &[f64], y: &mut [f64], transposed: bool) {
2042 let nrows = if transposed { self.ncols() } else { self.nrows() };
2043 let ncols = if transposed { self.nrows() } else { self.ncols() };
2044 assert_eq!(x.len(), ncols, "svd_opa: x must be A.ncols() in length, x = {}, A.ncols = {}", x.len(), ncols);
2045 assert_eq!(y.len(), nrows, "svd_opa: y must be A.nrows() in length, y = {}, A.nrows = {}", y.len(), nrows);
2046
2047 let (major_offsets, minor_indices, values) = self.csc_data();
2048
2049 y.fill(0.0);
2050 if transposed {
2051 for (i, yval) in y.iter_mut().enumerate() {
2052 for j in major_offsets[i]..major_offsets[i + 1] {
2053 *yval += values[j] * x[minor_indices[j]];
2054 }
2055 }
2056 } else {
2057 for (i, xval) in x.iter().enumerate() {
2058 for j in major_offsets[i]..major_offsets[i + 1] {
2059 y[minor_indices[j]] += values[j] * xval;
2060 }
2061 }
2062 }
2063 }
2064}
2065
2066//////////////////////////////////////////
2067// SMat implementation for CsrMatrix
2068//////////////////////////////////////////
2069
2070#[rustfmt::skip]
2071impl SMat for nalgebra_sparse::csr::CsrMatrix<f64> {
2072 fn nrows(&self) -> usize { self.nrows() }
2073 fn ncols(&self) -> usize { self.ncols() }
2074 fn nnz(&self) -> usize { self.nnz() }
2075
2076 /// takes an n-vector x and returns A*x in y
2077 fn svd_opa(&self, x: &[f64], y: &mut [f64], transposed: bool) {
2078 let nrows = if transposed { self.ncols() } else { self.nrows() };
2079 let ncols = if transposed { self.nrows() } else { self.ncols() };
2080 assert_eq!(x.len(), ncols, "svd_opa: x must be A.ncols() in length, x = {}, A.ncols = {}", x.len(), ncols);
2081 assert_eq!(y.len(), nrows, "svd_opa: y must be A.nrows() in length, y = {}, A.nrows = {}", y.len(), nrows);
2082
2083 let (major_offsets, minor_indices, values) = self.csr_data();
2084
2085 y.fill(0.0);
2086 if !transposed {
2087 for (i, yval) in y.iter_mut().enumerate() {
2088 for j in major_offsets[i]..major_offsets[i + 1] {
2089 *yval += values[j] * x[minor_indices[j]];
2090 }
2091 }
2092 } else {
2093 for (i, xval) in x.iter().enumerate() {
2094 for j in major_offsets[i]..major_offsets[i + 1] {
2095 y[minor_indices[j]] += values[j] * xval;
2096 }
2097 }
2098 }
2099 }
2100}
2101
2102//////////////////////////////////////////
2103// SMat implementation for CooMatrix
2104//////////////////////////////////////////
2105
2106#[rustfmt::skip]
2107impl SMat for nalgebra_sparse::coo::CooMatrix<f64> {
2108 fn nrows(&self) -> usize { self.nrows() }
2109 fn ncols(&self) -> usize { self.ncols() }
2110 fn nnz(&self) -> usize { self.nnz() }
2111
2112 /// takes an n-vector x and returns A*x in y
2113 fn svd_opa(&self, x: &[f64], y: &mut [f64], transposed: bool) {
2114 let nrows = if transposed { self.ncols() } else { self.nrows() };
2115 let ncols = if transposed { self.nrows() } else { self.ncols() };
2116 assert_eq!(x.len(), ncols, "svd_opa: x must be A.ncols() in length, x = {}, A.ncols = {}", x.len(), ncols);
2117 assert_eq!(y.len(), nrows, "svd_opa: y must be A.nrows() in length, y = {}, A.nrows = {}", y.len(), nrows);
2118
2119 y.fill(0.0);
2120 if transposed {
2121 for (i, j, v) in self.triplet_iter() {
2122 y[j] += v * x[i];
2123 }
2124 } else {
2125 for (i, j, v) in self.triplet_iter() {
2126 y[i] += v * x[j];
2127 }
2128 }
2129 }
2130}
2131
2132//////////////////////////////////////////
2133// Tests
2134//////////////////////////////////////////
2135
2136#[cfg(test)]
2137mod tests {
2138 use super::*;
2139 use nalgebra_sparse::{coo::CooMatrix, csc::CscMatrix, csr::CsrMatrix};
2140
2141 fn is_normal<T: Clone + Sized + Send + Sync + Unpin>() {}
2142 fn is_dynamic_trait<T: ?Sized>() {}
2143
2144 #[test]
2145 fn normal_types() {
2146 is_normal::<SvdRec>();
2147 is_normal::<Diagnostics>();
2148 is_normal::<Store>();
2149 is_normal::<WorkSpace>();
2150 is_normal::<DMat>();
2151 is_normal::<SVDRawRec>();
2152 }
2153
2154 #[test]
2155 fn dynamic_types() {
2156 is_dynamic_trait::<dyn SMat>();
2157 }
2158
2159 #[test]
2160 fn coo_csc_csr() {
2161 let coo = CooMatrix::try_from_triplets(4, 4, vec![1, 2], vec![0, 1], vec![3.0, 4.0]).unwrap();
2162 let csc = CscMatrix::from(&coo);
2163 let csr = CsrMatrix::from(&csc);
2164 assert_eq!(svd_dim_seed(&coo, 3, 12345), svd_dim_seed(&csc, 3, 12345));
2165 assert_eq!(svd_dim_seed(&csc, 3, 12345), svd_dim_seed(&csr, 3, 12345));
2166 }
2167
2168 #[test]
2169 #[rustfmt::skip]
2170 fn recomp() {
2171 let mut coo = CooMatrix::<f64>::new(3, 3);
2172 coo.push(0, 0, 1.0); coo.push(0, 1, 16.0); coo.push(0, 2, 49.0);
2173 coo.push(1, 0, 4.0); coo.push(1, 1, 25.0); coo.push(1, 2, 64.0);
2174 coo.push(2, 0, 9.0); coo.push(2, 1, 36.0); coo.push(2, 2, 81.0);
2175
2176 // Note: svd.ut & svd.vt are returned in transposed form
2177 // M = USV*
2178 let svd = svd(&coo).unwrap();
2179 let matrix_approximation = svd.ut.t().dot(&Array2::from_diag(&svd.s)).dot(&svd.vt);
2180 assert_eq!(svd.recompose(), matrix_approximation);
2181 }
2182
2183 #[test]
2184 #[rustfmt::skip]
2185 fn basic_2x2() {
2186 // [
2187 // [ 4, 0 ],
2188 // [ 3, -5 ]
2189 // ]
2190 let mut coo = CooMatrix::<f64>::new(2, 2);
2191 coo.push(0, 0, 4.0);
2192 coo.push(1, 0, 3.0);
2193 coo.push(1, 1, -5.0);
2194
2195 let svd = svd(&coo).unwrap();
2196 assert_eq!(svd.d, svd.ut.nrows());
2197 assert_eq!(svd.d, svd.s.dim());
2198 assert_eq!(svd.d, svd.vt.nrows());
2199
2200 // Note: svd.ut & svd.vt are returned in transposed form
2201 // M = USV*
2202 let matrix_approximation = svd.ut.t().dot(&Array2::from_diag(&svd.s)).dot(&svd.vt);
2203 assert_eq!(svd.recompose(), matrix_approximation);
2204
2205 let epsilon = 1.0e-12;
2206 assert_eq!(svd.d, 2);
2207 assert!((matrix_approximation[[0, 0]] - 4.0).abs() < epsilon);
2208 assert!((matrix_approximation[[0, 1]] - 0.0).abs() < epsilon);
2209 assert!((matrix_approximation[[1, 0]] - 3.0).abs() < epsilon);
2210 assert!((matrix_approximation[[1, 1]] - -5.0).abs() < epsilon);
2211
2212 assert!((svd.s[0] - 6.3245553203368).abs() < epsilon);
2213 assert!((svd.s[1] - 3.1622776601684).abs() < epsilon);
2214 }
2215
2216 #[test]
2217 #[rustfmt::skip]
2218 fn identity_3x3() {
2219 // [ [ 1, 0, 0 ],
2220 // [ 0, 1, 0 ],
2221 // [ 0, 0, 1 ] ]
2222 let mut coo = CooMatrix::<f64>::new(3, 3);
2223 coo.push(0, 0, 1.0);
2224 coo.push(1, 1, 1.0);
2225 coo.push(2, 2, 1.0);
2226
2227 let csc = CscMatrix::from(&coo);
2228 let svd = svd(&csc).unwrap();
2229 assert_eq!(svd.d, svd.ut.nrows());
2230 assert_eq!(svd.d, svd.s.dim());
2231 assert_eq!(svd.d, svd.vt.nrows());
2232
2233 let epsilon = 1.0e-12;
2234 assert_eq!(svd.d, 1);
2235 assert!((svd.s[0] - 1.0).abs() < epsilon);
2236 }
2237}