faer/operator/eigen/
mod.rs

1use super::*;
2use crate::assert;
3use crate::linalg::matmul::matmul;
4use linalg::evd::schur;
5
6const MIN_DIM: usize = 32;
7
8/// partial eigendecomposition tuning parameters.
9#[derive(Debug, Copy, Clone)]
10pub struct PartialEigenParams {
11	/// minimum projection subspace dimension.
12	pub min_dim: usize,
13	/// maximum projection subspace dimension.
14	pub max_dim: usize,
15	/// maximum number of algorithm restarts.
16	pub max_restarts: usize,
17
18	#[doc(hidden)]
19	pub non_exhaustive: NonExhaustive,
20}
21
22/// partial eigendecomposition tuning parameters.
23#[derive(Debug, Copy, Clone)]
24pub struct PartialEigenInfo {
25	/// number of converged eigenvalues and eigenvectors.
26	pub n_converged_eigen: usize,
27
28	#[doc(hidden)]
29	pub non_exhaustive: NonExhaustive,
30}
31
32impl Default for PartialEigenParams {
33	fn default() -> Self {
34		Self {
35			min_dim: 0,
36			max_dim: 0,
37			max_restarts: 1000,
38			non_exhaustive: NonExhaustive(()),
39		}
40	}
41}
42
43#[math]
44fn iterate_arnoldi<T: ComplexField>(A: &dyn LinOp<T>, H: MatMut<'_, T>, V: MatMut<'_, T>, start: usize, end: usize, par: Par, stack: &mut MemStack) {
45	let mut V = V;
46	let mut H = H;
47
48	for j in start..end + 1 {
49		let mut H = H.rb_mut().col_mut(j - 1);
50		H.fill(zero());
51
52		let (V, Vnext) = V.rb_mut().split_at_col_mut(j);
53		let V = V.rb();
54
55		let mut Vnext = Vnext.col_mut(0);
56		A.apply(Vnext.rb_mut().as_mat_mut(), V.col(j - 1).as_mat(), par, stack);
57
58		let (mut converged, _) = stack.collect(core::iter::repeat_n(false, j));
59
60		let mut h = H.rb_mut().get_mut(..j);
61
62		for i in 0..j {
63			let r = V.col(i).adjoint() * Vnext.rb();
64			zip!(Vnext.rb_mut(), V.col(i)).for_each(|unzip!(y, x): Zip!(&mut T, &T)| *y = *y - r * *x);
65			h[i] = r;
66		}
67
68		let f = from_f64::<T::Real>(Ord::max(j, 8) as f64) * eps::<T::Real>();
69
70		loop {
71			let mut all_true = true;
72			for i in 0..j {
73				if !converged[i] {
74					all_true = false;
75
76					let r = V.col(i).adjoint() * Vnext.rb();
77					zip!(Vnext.rb_mut(), V.col(i)).for_each(|unzip!(y, x): Zip!(&mut T, &T)| *y = *y - r * *x);
78					h[i] = h[i] + r;
79
80					converged[i] = abs(r) < f * Vnext.norm_l2();
81				}
82			}
83			if all_true {
84				break;
85			}
86		}
87
88		let norm = Vnext.norm_l2();
89		if norm > zero() {
90			let norm_inv = recip(norm);
91			zip!(&mut Vnext).for_each(|unzip!(v)| *v = mul_real(*v, norm_inv));
92		}
93		H[j] = from_real(norm);
94	}
95}
96
97fn schur_swap<T: ComplexField>(a: MatMut<T>, q: Option<MatMut<T>>, j0: usize, n1: usize, n2: usize) -> isize {
98	if const { T::IS_REAL } {
99		unsafe { schur::real_schur::schur_swap::<T::Real>(core::mem::transmute(a), core::mem::transmute(q), j0, n1, n2) }
100	} else {
101		assert!(all(n1 == 1, n2 == 1));
102		schur::complex_schur::schur_swap(a, q, j0)
103	}
104}
105
106#[math]
107fn reorder_schur<T: ComplexField>(mut A: MatMut<'_, T>, mut Q: Option<MatMut<'_, T>>, mut ifst: usize, mut ilst: usize) {
108	let zero = zero::<T>();
109	let n = A.nrows();
110
111	// *
112	// * Determine the first row of the specified block and find out
113	// * if it is 1-by-1 or 2-by-2.
114	// *
115	let mut nbf = 1;
116	if const { T::IS_REAL } {
117		if ifst > 0 {
118			if A[(ifst, ifst - 1)] != zero {
119				ifst -= 1;
120			}
121		}
122
123		if ifst < n - 1 {
124			if A[(ifst + 1, ifst)] != zero {
125				nbf = 2;
126			}
127		}
128	}
129
130	// *
131	// * Determine the first row of the final block
132	// * and find out if it is 1-by-1 or 2-by-2.
133	// *
134	let mut nbl = 1;
135	if const { T::IS_REAL } {
136		if ilst > 0 {
137			if A[(ilst, ilst - 1)] != zero {
138				ilst = ilst - 1;
139			}
140		}
141		if ilst < n - 1 {
142			if A[(ilst + 1, ilst)] != zero {
143				nbl = 2
144			}
145		}
146	}
147
148	if ifst == ilst {
149		return;
150	}
151
152	if ifst < ilst {
153		if nbf == 2 && nbl == 1 {
154			ilst -= 1;
155		}
156		if nbf == 1 && nbl == 2 {
157			ilst += 1;
158		}
159
160		let mut here = ifst;
161		// * Swap with next one below.
162		loop {
163			if nbf == 1 || nbf == 2 {
164				// * Current block either 1-by-1 or 2-by-2.
165				let mut nbnext = 1;
166				if const { T::IS_REAL } {
167					if here + nbf + 1 <= n - 1 {
168						if A[(here + nbf + 1, here + nbf)] != zero {
169							nbnext = 2;
170						}
171					}
172				}
173
174				schur_swap(A.rb_mut(), Q.rb_mut(), here, nbf, nbnext);
175
176				here += nbnext;
177				// * Test if 2-by-2 block breaks into two 1-by-1 blocks.
178				if const { T::IS_REAL } {
179					if nbf == 2 {
180						if A[(here + 1, here)] == zero {
181							nbf = 3;
182						}
183					}
184				}
185			} else if const { T::IS_REAL } {
186				// * Current block consists of two 1-by-1 blocks, each of which
187				// * must be swapped individually.
188				let mut nbnext = 1;
189				if here + 3 <= n - 1 {
190					if A[(here + 3, here + 2)] != zero {
191						nbnext = 2;
192					}
193				}
194
195				schur_swap(A.rb_mut(), Q.rb_mut(), here + 1, 1, nbnext);
196
197				if nbnext == 1 {
198					// * Swap two 1-by-1 blocks.
199					schur_swap(A.rb_mut(), Q.rb_mut(), here, 1, 1);
200					here += 1;
201				} else {
202					// * Recompute NBNEXT in case of 2-by-2 split.
203					if A[(here + 2, here + 1)] == zero {
204						nbnext = 1;
205					}
206
207					if nbnext == 2 {
208						// * 2-by-2 block did not split.
209						schur_swap(A.rb_mut(), Q.rb_mut(), here, 1, nbnext);
210						here += 2;
211					} else {
212						// * 2-by-2 block did split.
213						schur_swap(A.rb_mut(), Q.rb_mut(), here, 1, 1);
214						here += 1;
215						schur_swap(A.rb_mut(), Q.rb_mut(), here, 1, 1);
216						here += 1;
217					}
218				}
219			}
220
221			if here >= ilst {
222				break;
223			}
224		}
225	} else {
226		let mut here = ifst;
227
228		loop {
229			// * Swap with next one below.
230			if nbf == 1 || nbf == 2 {
231				// * Current block either 1-by-1 or 2-by-2.
232				let mut nbnext = 1;
233				if const { T::IS_REAL } {
234					if here >= 2 {
235						if A[(here - 1, here - 2)] != zero {
236							nbnext = 2;
237						}
238					}
239				}
240				schur_swap(A.rb_mut(), Q.rb_mut(), here - nbnext, nbnext, nbf);
241				here -= nbnext;
242
243				// * Test if 2-by-2 block breaks into two 1-by-1 blocks.
244				if const { T::IS_REAL } {
245					if nbf == 2 {
246						if A[(here + 1, here)] == zero {
247							nbf = 3;
248						}
249					}
250				}
251			} else if const { T::IS_REAL } {
252				// * Current block consists of two 1-by-1 blocks, each of which
253				// * must be swapped individually.
254				let mut nbnext = 1;
255				if here >= 2 {
256					if A[(here - 1, here - 2)] != zero {
257						nbnext = 2;
258					}
259				}
260
261				schur_swap(A.rb_mut(), Q.rb_mut(), here - nbnext, nbnext, 1);
262				if nbnext == 1 {
263					// * Swap two 1-by-1 blocks.
264					schur_swap(A.rb_mut(), Q.rb_mut(), here, nbnext, 1);
265					here -= 1;
266				} else {
267					// * Recompute NBNEXT in case of 2-by-2 split.
268					if A[(here, here - 1)] == zero {
269						nbnext = 1;
270					}
271					if nbnext == 2 {
272						// * 2-by-2 block did not split.
273						schur_swap(A.rb_mut(), Q.rb_mut(), here - 1, 2, 1);
274						here -= 2;
275					} else {
276						// * 2-by-2 block did split.
277						schur_swap(A.rb_mut(), Q.rb_mut(), here, 1, 1);
278						here -= 1;
279						schur_swap(A.rb_mut(), Q.rb_mut(), here, 1, 1);
280						here -= 1;
281					}
282				}
283			}
284
285			if here <= ilst {
286				break;
287			}
288		}
289	}
290}
291
292#[math]
293fn partial_schur_real_imp<T: RealField>(
294	eigvecs: MatMut<'_, Complex<T>>,
295	eigvals: &mut [Complex<T>],
296
297	A: &dyn LinOp<T>,
298	v0: ColRef<'_, T>,
299	min_dim: usize,
300	max_dim: usize,
301	n_eigval: usize,
302	tol: T,
303	restarts: usize,
304	par: Par,
305	stack: &mut MemStack,
306) -> usize {
307	let n = A.nrows();
308
309	let (mut H, stack) = temp_mat_zeroed::<T, _, _>(max_dim + 1, max_dim, stack);
310	let mut H = H.as_mat_mut();
311	let (mut V, stack) = temp_mat_zeroed::<T, _, _>(n, max_dim + 1, stack);
312	let mut V = V.as_mat_mut();
313	let (mut X, stack) = temp_mat_zeroed::<T, _, _>(max_dim, max_dim, stack);
314	let mut X = X.as_mat_mut();
315	let (mut vecs, stack) = temp_mat_zeroed::<T, _, _>(max_dim, max_dim, stack);
316	let mut vecs = vecs.as_mat_mut();
317	let (mut tmp, stack) = temp_mat_zeroed::<T, _, _>(n, max_dim, stack);
318	let mut tmp = tmp.as_mat_mut();
319	let mut active = 0usize;
320	if max_dim < n {
321		let (mut Q, stack) = temp_mat_zeroed::<T, _, _>(max_dim, max_dim, stack);
322		let mut Q = Q.as_mat_mut();
323
324		let (mut residual, stack) = temp_mat_zeroed::<T, _, _>(max_dim, 1, stack);
325		let mut residual = residual.as_mat_mut().col_mut(0);
326		let (mut w, stack) = temp_mat_zeroed::<T, _, _>(max_dim, 2, stack);
327		let mut w = w.as_mat_mut();
328
329		let block_size = linalg::qr::no_pivoting::factor::recommended_block_size::<T>(max_dim, max_dim);
330		let (mut householder, stack) = temp_mat_zeroed::<T, _, _>(block_size, max_dim, stack);
331		let mut householder = householder.as_mat_mut();
332
333		let f = v0.norm_l2();
334		if f > min_positive() {
335			let f = recip(f);
336			zip!(V.rb_mut().col_mut(0), v0).for_each(|unzip!(y, x): Zip!(&mut T, &T)| *y = f * *x);
337		} else {
338			let n0 = n as u32;
339			let n1 = (n >> 32) as u32;
340
341			let n = from_f64::<T>(n0 as f64) + from_f64::<T>(n1 as f64);
342			let f = recip(sqrt(n));
343
344			zip!(V.rb_mut().col_mut(0)).for_each(|unzip!(y)| *y = copy(f));
345		}
346
347		iterate_arnoldi(A, H.as_mut(), V.as_mut(), 1, min_dim, par, stack);
348		let mut k = min_dim;
349
350		for iter in 0..restarts {
351			_ = iter;
352
353			iterate_arnoldi(A, H.as_mut(), V.as_mut(), k + 1, max_dim, par, stack);
354
355			let Hmm = abs(H[(max_dim, max_dim - 1)]);
356
357			let n = max_dim - active;
358			let (mut w_re, mut w_im) = w.rb_mut().get_mut(active..max_dim, ..).two_cols_mut(0, 1);
359
360			Q.fill(zero());
361			Q.rb_mut().diagonal_mut().fill(one());
362
363			let mut Q_slice = Q.rb_mut().get_mut(active..max_dim, active..max_dim);
364			let mut H_slice = H.rb_mut().get_mut(active..max_dim, active..max_dim);
365
366			{
367				let n = max_dim - active;
368
369				let block_size = linalg::qr::no_pivoting::factor::recommended_block_size::<T>(n, n);
370				let mut householder = householder.rb_mut().get_mut(..block_size, ..n - 1);
371
372				linalg::evd::hessenberg::hessenberg_in_place(H_slice.rb_mut(), householder.rb_mut(), par, stack, default());
373
374				linalg::householder::apply_block_householder_sequence_on_the_right_in_place_with_conj(
375					H_slice.rb().submatrix(1, 0, n - 1, n - 1),
376					householder.rb(),
377					Conj::No,
378					Q_slice.rb_mut().submatrix_mut(1, 1, n - 1, n - 1),
379					par,
380					stack,
381				);
382
383				for j in 0..n {
384					for i in j + 2..n {
385						H_slice[(i, j)] = zero();
386					}
387				}
388			}
389
390			schur::real_schur::multishift_qr(
391				true,
392				H_slice.rb_mut(),
393				Some(Q_slice.rb_mut()),
394				w_re.rb_mut(),
395				w_im.rb_mut(),
396				0,
397				n,
398				par,
399				stack,
400				auto!(T),
401			);
402
403			let mut j = 0usize;
404			while j < n {
405				let mut i = j;
406				let mut idx = i;
407				let mut max = zero::<T>();
408				while i < n {
409					let cplx = i + 1 < n && H_slice[(i + 1, i)] != zero::<T>();
410					let (v, bs) = if cplx { (hypot(w_re[i], w_im[i]), 2) } else { (abs(w_re[i]), 1) };
411
412					if v > max {
413						max = v;
414						idx = i;
415					}
416
417					i += bs;
418				}
419
420				let i = idx;
421				let cplx = i + 1 < n && H_slice[(i + 1, i)] != zero::<T>();
422				let bs = if cplx { 2 } else { 1 };
423				if i != j {
424					reorder_schur(H_slice.rb_mut(), Some(Q_slice.rb_mut()), i, j);
425
426					let x_re = w_re.rb_mut().try_as_col_major_mut().unwrap().as_slice_mut();
427					let x_im = w_im.rb_mut().try_as_col_major_mut().unwrap().as_slice_mut();
428
429					for x in [x_re, x_im] {
430						x[j..i + bs].rotate_right(bs)
431					}
432				}
433
434				j += bs;
435			}
436
437			let mut X = X.rb_mut().get_mut(..n, ..n);
438			linalg::evd::evd_from_real_schur_imp(H_slice.rb(), X.as_mut(), par, auto!(T));
439			let mut vecs = vecs.rb_mut().get_mut(..n, ..n);
440			matmul(vecs.rb_mut(), Accum::Replace, Q_slice.rb(), X.rb(), one(), par);
441			let vecs = vecs.rb();
442
443			let mut H_tmp = tmp.rb_mut().get_mut(..active, ..);
444			matmul(H_tmp.rb_mut(), Accum::Replace, H.rb().get(..active, ..), Q.rb(), one(), par);
445			H.rb_mut().get_mut(..active, ..).copy_from(&H_tmp);
446
447			// AV = VH
448			// x in span(V)
449			// Ax = AV y = (VH + f e*) y = k V y + f e* y = kx + f * y[-1]
450			let mut j = 0usize;
451			while j < n {
452				let re = &vecs[(max_dim - active - 1, j)];
453				if w_im[j] != zero::<T>() {
454					let im = &vecs[(max_dim - active - 1, j + 1)];
455					let res = Hmm * hypot(*re, *im);
456
457					residual[active + j] = copy(res);
458					residual[active + j + 1] = res;
459					j += 2;
460				} else {
461					residual[active + j] = Hmm * abs(*re);
462					j += 1;
463				}
464			}
465
466			#[derive(Copy, Clone, PartialEq, Eq, Debug)]
467			enum Group {
468				Lock,
469				Retain,
470				Purge,
471			}
472
473			let mut groups = alloc::vec![Group::Purge; max_dim];
474
475			let mut nev = n_eigval;
476			if w_im[nev - active - 1] > zero::<T>() {
477				nev += 1;
478			}
479
480			let mut nlock = 0usize;
481			for j in 0..nev {
482				if residual[j] <= tol {
483					groups[j] = Group::Lock;
484					nlock += 1;
485				} else {
486					groups[j] = Group::Retain;
487				}
488			}
489
490			let ideal_size = Ord::min(nlock + min_dim, (min_dim + max_dim) / 2);
491			k = nev;
492
493			let mut i = nev;
494			while i < max_dim {
495				let cplx = w[(i, 1)] != zero::<T>();
496				let bs = if cplx { 2 } else { 1 };
497
498				let group;
499				if k < ideal_size && residual[i] > tol {
500					group = Group::Retain;
501					k += bs;
502				} else {
503					group = Group::Purge;
504				}
505
506				for k in 0..bs {
507					groups[i + k] = group;
508				}
509				i += bs;
510			}
511
512			let mut purge = 0usize;
513			while purge < active && groups[purge] == Group::Lock {
514				purge += 1;
515			}
516
517			let mut lo = 0usize;
518			let mut mi = 0usize;
519			let mut hi = 0usize;
520
521			while hi < max_dim {
522				let cplx = hi + 1 < max_dim && H[(hi + 1, hi)] != zero::<T>();
523				let bs = if cplx { 2 } else { 1 };
524
525				match groups[hi] {
526					Group::Lock => {
527						reorder_schur(H.rb_mut().get_mut(..max_dim, ..max_dim), Some(Q.as_mut()), hi, lo);
528						for k in 0..bs {
529							residual[lo + k] = copy(residual[hi + k]);
530						}
531
532						lo += bs;
533						mi += bs;
534						hi += bs;
535					},
536					Group::Retain => {
537						reorder_schur(H.rb_mut().get_mut(..max_dim, ..max_dim), Some(Q.as_mut()), hi, mi);
538						mi += bs;
539						hi += bs;
540					},
541					Group::Purge => {
542						hi += bs;
543					},
544				}
545			}
546
547			let mut V_tmp = tmp.rb_mut().get_mut(.., purge..k);
548			matmul(
549				V_tmp.rb_mut(),
550				Accum::Replace,
551				V.rb().get(.., purge..max_dim),
552				Q.rb().get(purge..max_dim, purge..k),
553				one(),
554				par,
555			);
556			V.rb_mut().get_mut(.., purge..k).copy_from(&V_tmp);
557
558			let mut b_tmp = tmp.rb_mut().get_mut(0, ..);
559			matmul(b_tmp.rb_mut(), Accum::Replace, H.rb().get(max_dim, ..), Q.rb(), one(), par);
560			H.rb_mut().get_mut(max_dim, ..).copy_from(b_tmp);
561
562			let (mut x, y) = V.rb_mut().two_cols_mut(k, max_dim);
563			x.copy_from(&y);
564
565			let (mut x, mut y) = H.rb_mut().two_rows_mut(k, max_dim);
566			x.copy_from(&y);
567			y.fill(zero());
568
569			active = nlock;
570			if nlock >= n_eigval {
571				break;
572			}
573		}
574	} else {
575		let mut H = H.rb_mut().get_mut(..n, ..n);
576		let mut V = V.rb_mut().get_mut(..n, ..n);
577		V.rb_mut().diagonal_mut().fill(one());
578		A.apply(H.rb_mut(), V.rb(), par, stack);
579
580		let (mut w, stack) = temp_mat_zeroed::<T, _, _>(n, 2, stack);
581		let mut w = w.as_mat_mut();
582		let (mut w_re, mut w_im) = w.rb_mut().get_mut(active..max_dim, ..).two_cols_mut(0, 1);
583
584		{
585			let block_size = linalg::qr::no_pivoting::factor::recommended_block_size::<T>(n, n);
586			let (mut householder, stack) = temp_mat_zeroed::<T, _, _>(block_size, n - 1, stack);
587			let mut householder = householder.as_mat_mut();
588
589			linalg::evd::hessenberg::hessenberg_in_place(H.rb_mut(), householder.rb_mut(), par, stack, default());
590
591			linalg::householder::apply_block_householder_sequence_on_the_right_in_place_with_conj(
592				H.rb().submatrix(1, 0, n - 1, n - 1),
593				householder.rb(),
594				Conj::No,
595				V.rb_mut().submatrix_mut(1, 1, n - 1, n - 1),
596				par,
597				stack,
598			);
599
600			for j in 0..n {
601				for i in j + 2..n {
602					H[(i, j)] = zero();
603				}
604			}
605		}
606
607		schur::real_schur::multishift_qr(
608			true,
609			H.rb_mut(),
610			Some(V.rb_mut()),
611			w_re.rb_mut(),
612			w_im.rb_mut(),
613			0,
614			n,
615			par,
616			stack,
617			auto!(T),
618		);
619		active = n;
620	}
621
622	let n = active;
623	let H = H.rb().get(..n, ..n);
624	let V = V.rb().get(.., ..n);
625	let mut X = X.rb_mut().get_mut(..n, ..n);
626	linalg::evd::evd_from_real_schur_imp(H, X.as_mut(), par, auto!(T));
627	let mut vecs = tmp.rb_mut().get_mut(.., ..n);
628	matmul(vecs.rb_mut(), Accum::Replace, V, X.rb(), one(), par);
629	let V = vecs.rb();
630
631	let (mut norms, stack) = stack.make_with(n, |_| zero::<T::Real>());
632	let (mut perm, stack) = stack.make_with(n, |_| 0usize);
633	let _ = stack;
634
635	let perm = &mut *perm;
636	let norms = &mut *norms;
637
638	let mut j = 0usize;
639	while j < n {
640		let cplx = j + 1 < n && H[(j + 1, j)] != zero::<T>();
641		let bs = if cplx { 2 } else { 1 };
642		let re = &H[(j, j)];
643
644		if cplx {
645			let im = sqrt(abs(H[(j + 1, j)])) * sqrt(abs(H[(j, j + 1)]));
646			norms[j] = hypot(*re, im);
647			norms[j + 1] = copy(norms[j]);
648
649			perm[j] = j;
650			perm[j + 1] = j;
651		} else {
652			norms[j] = abs(*re);
653			perm[j] = j;
654		}
655		j += bs;
656	}
657	perm.sort_unstable_by(|&i, &j| {
658		if norms[i] > norms[j] {
659			core::cmp::Ordering::Less
660		} else if norms[i] < norms[j] {
661			core::cmp::Ordering::Greater
662		} else {
663			core::cmp::Ordering::Equal
664		}
665	});
666
667	let mut eigvecs = eigvecs;
668	let limit = Ord::min(n, n_eigval);
669
670	let mut idx = 0usize;
671	while idx < limit {
672		let j = perm[idx];
673
674		let cplx = j + 1 < n && H[(j + 1, j)] != zero::<T>();
675		let bs = if cplx { 2 } else { 1 };
676		let re = &H[(j, j)];
677		let v_re = V.col(j);
678
679		if cplx {
680			let v_im = V.col(j + 1);
681			let im = sqrt(abs(H[(j + 1, j)])) * sqrt(abs(H[(j, j + 1)]));
682
683			if idx + 1 < limit {
684				eigvals[idx + 1] = Complex::new(copy(*re), -im);
685			}
686			eigvals[idx] = Complex::new(copy(*re), im);
687			if idx + 1 < limit {
688				let (ej, ej1) = eigvecs.rb_mut().two_cols_mut(idx, idx + 1);
689				zip!(ej, ej1, v_re, v_im).for_each(|unzip!(y0, y1, re, im): Zip!(&mut Complex<T>, &mut Complex<T>, &T, &T)| {
690					*y0 = Complex::new(copy(*re), copy(*im));
691					*y1 = Complex::new(copy(*re), -*im);
692				});
693			} else {
694				let ej = eigvecs.rb_mut().col_mut(idx);
695				zip!(ej, v_re, v_im).for_each(|unzip!(y0, re, im): Zip!(&mut Complex<T>, &T, &T)| {
696					*y0 = Complex::new(copy(*re), copy(*im));
697				});
698			}
699		} else {
700			eigvals[idx] = Complex::new(copy(*re), zero());
701			zip!(eigvecs.rb_mut().col_mut(idx), v_re).for_each(|unzip!(y, x): Zip!(&mut Complex<T>, &T)| *y = Complex::new(copy(*x), zero()));
702		}
703
704		idx += bs;
705	}
706	limit
707}
708
709#[math]
710fn partial_schur_cplx_imp<T: ComplexField>(
711	eigvecs: MatMut<'_, Complex<T::Real>>,
712	eigvals: &mut [Complex<T::Real>],
713
714	A: &dyn LinOp<T>,
715	v0: ColRef<'_, T>,
716	min_dim: usize,
717	max_dim: usize,
718	n_eigval: usize,
719	tol: T::Real,
720	restarts: usize,
721	par: Par,
722	stack: &mut MemStack,
723) -> usize {
724	let n = A.nrows();
725
726	let (mut H, stack) = temp_mat_zeroed::<T, _, _>(max_dim + 1, max_dim, stack);
727	let mut H = H.as_mat_mut();
728	let (mut V, stack) = temp_mat_zeroed::<T, _, _>(n, max_dim + 1, stack);
729	let mut V = V.as_mat_mut();
730	let (mut X, stack) = temp_mat_zeroed::<T, _, _>(max_dim, max_dim, stack);
731	let mut X = X.as_mat_mut();
732	let (mut vecs, stack) = temp_mat_zeroed::<T, _, _>(max_dim, max_dim, stack);
733	let mut vecs = vecs.as_mat_mut();
734	let (mut tmp, stack) = temp_mat_zeroed::<T, _, _>(n, max_dim, stack);
735	let mut tmp = tmp.as_mat_mut();
736
737	let mut active = 0usize;
738	if max_dim < n {
739		let (mut w, stack) = temp_mat_zeroed::<T, _, _>(max_dim, 1, stack);
740		let mut w = w.as_mat_mut().col_mut(0);
741		let (mut residual, stack) = temp_mat_zeroed::<T::Real, _, _>(max_dim, 1, stack);
742		let mut residual = residual.as_mat_mut().col_mut(0);
743		let (mut Q, stack) = temp_mat_zeroed::<T, _, _>(max_dim, max_dim, stack);
744		let mut Q = Q.as_mat_mut();
745
746		let block_size = linalg::qr::no_pivoting::factor::recommended_block_size::<T>(max_dim, max_dim);
747		let (mut householder, stack) = temp_mat_zeroed::<T, _, _>(block_size, max_dim, stack);
748		let mut householder = householder.as_mat_mut();
749
750		let f = v0.norm_l2();
751		if f > min_positive() {
752			let f = recip(f);
753			zip!(V.rb_mut().col_mut(0), v0).for_each(|unzip!(y, x): Zip!(&mut T, &T)| *y = mul_real(*x, f));
754		} else {
755			let n0 = n as u32;
756			let n1 = (n >> 32) as u32;
757
758			let n = from_f64::<T>(n0 as f64) + from_f64::<T>(n1 as f64);
759			let f = recip(sqrt(n));
760
761			zip!(V.rb_mut().col_mut(0)).for_each(|unzip!(y)| *y = copy(f));
762		}
763
764		iterate_arnoldi(A, H.as_mut(), V.as_mut(), 1, min_dim, par, stack);
765		let mut k = min_dim;
766
767		for iter in 0..restarts {
768			_ = iter;
769
770			iterate_arnoldi(A, H.as_mut(), V.as_mut(), k + 1, max_dim, par, stack);
771
772			let Hmm = copy(H[(max_dim, max_dim - 1)]);
773
774			let n = max_dim - active;
775			let mut w = w.rb_mut().get_mut(active..max_dim);
776
777			Q.fill(zero());
778			Q.rb_mut().diagonal_mut().fill(one());
779
780			let mut Q_slice = Q.rb_mut().get_mut(active..max_dim, active..max_dim);
781			let mut H_slice = H.rb_mut().get_mut(active..max_dim, active..max_dim);
782
783			{
784				let n = max_dim - active;
785
786				let block_size = linalg::qr::no_pivoting::factor::recommended_block_size::<T>(n, n);
787				let mut householder = householder.rb_mut().get_mut(..block_size, ..n - 1);
788
789				linalg::evd::hessenberg::hessenberg_in_place(H_slice.rb_mut(), householder.rb_mut(), par, stack, default());
790
791				linalg::householder::apply_block_householder_sequence_on_the_right_in_place_with_conj(
792					H_slice.rb().submatrix(1, 0, n - 1, n - 1),
793					householder.rb(),
794					Conj::No,
795					Q_slice.rb_mut().submatrix_mut(1, 1, n - 1, n - 1),
796					par,
797					stack,
798				);
799
800				for j in 0..n {
801					for i in j + 2..n {
802						H_slice[(i, j)] = zero();
803					}
804				}
805			}
806
807			schur::complex_schur::multishift_qr(true, H_slice.rb_mut(), Some(Q_slice.rb_mut()), w.rb_mut(), 0, n, par, stack, auto!(T));
808
809			for j in 0..n {
810				let mut idx = j;
811				let mut max = zero::<T::Real>();
812				for i in j..n {
813					let v = abs(w[i]);
814
815					if v > max {
816						max = v;
817						idx = i;
818					}
819				}
820
821				let i = idx;
822				if i != j {
823					reorder_schur(H_slice.rb_mut(), Some(Q_slice.rb_mut()), i, j);
824					w.rb_mut().try_as_col_major_mut().unwrap().as_slice_mut()[j..i + 1].rotate_right(1);
825				}
826			}
827
828			let mut X = X.rb_mut().get_mut(..n, ..n);
829			linalg::evd::evd_from_cplx_schur_imp(H_slice.rb(), Conj::No, X.as_mut(), par, auto!(T));
830			let mut vecs = vecs.rb_mut().get_mut(..n, ..n);
831			matmul(vecs.rb_mut(), Accum::Replace, Q_slice.rb(), X.rb(), one(), par);
832			let vecs = vecs.rb();
833
834			let mut H_tmp = tmp.rb_mut().get_mut(..active, ..);
835			matmul(H_tmp.rb_mut(), Accum::Replace, H.rb().get(..active, ..), Q.rb(), one(), par);
836			H.rb_mut().get_mut(..active, ..).copy_from(&H_tmp);
837
838			// AV = VH
839			// x in span(V)
840			// Ax = AV y = (VH + f e*) y = k V y + f e* y = kx + f * y[-1]
841			let Hmm_abs = abs(Hmm);
842			for j in 0..n {
843				residual[active + j] = Hmm_abs * abs(vecs[(max_dim - active - 1, j)]);
844			}
845
846			#[derive(Copy, Clone, PartialEq, Eq, Debug)]
847			enum Group {
848				Lock,
849				Retain,
850				Purge,
851			}
852
853			let mut groups = alloc::vec![Group::Purge; max_dim];
854
855			let nev = n_eigval;
856
857			let mut nlock = 0usize;
858			for j in 0..nev {
859				if residual[j] <= tol {
860					groups[j] = Group::Lock;
861					nlock += 1;
862				} else {
863					groups[j] = Group::Retain;
864				}
865			}
866
867			let ideal_size = Ord::min(nlock + min_dim, (min_dim + max_dim) / 2);
868			k = nev;
869
870			for i in nev..max_dim {
871				let group;
872				if k < ideal_size && residual[i] > tol {
873					group = Group::Retain;
874					k += 1;
875				} else {
876					group = Group::Purge;
877				}
878
879				groups[i] = group;
880			}
881
882			let mut purge = 0usize;
883			while purge < active && groups[purge] == Group::Lock {
884				purge += 1;
885			}
886
887			let mut lo = 0usize;
888			let mut mi = 0usize;
889			let mut hi = 0usize;
890
891			while hi < max_dim {
892				match groups[hi] {
893					Group::Lock => {
894						reorder_schur(H.rb_mut().get_mut(..max_dim, ..max_dim), Some(Q.as_mut()), hi, lo);
895						residual[lo] = copy(residual[hi]);
896
897						lo += 1;
898						mi += 1;
899						hi += 1;
900					},
901					Group::Retain => {
902						reorder_schur(H.rb_mut().get_mut(..max_dim, ..max_dim), Some(Q.as_mut()), hi, mi);
903						mi += 1;
904						hi += 1;
905					},
906					Group::Purge => {
907						hi += 1;
908					},
909				}
910			}
911
912			let mut V_tmp = tmp.rb_mut().get_mut(.., purge..k);
913			matmul(
914				V_tmp.rb_mut(),
915				Accum::Replace,
916				V.rb().get(.., purge..max_dim),
917				Q.rb().get(purge..max_dim, purge..k),
918				one(),
919				par,
920			);
921			V.rb_mut().get_mut(.., purge..k).copy_from(&V_tmp);
922
923			let mut b_tmp = tmp.rb_mut().get_mut(0, ..);
924			matmul(b_tmp.rb_mut(), Accum::Replace, H.rb().get(max_dim, ..), Q.rb(), one(), par);
925			H.rb_mut().get_mut(max_dim, ..).copy_from(b_tmp);
926
927			let (mut x, y) = V.rb_mut().two_cols_mut(k, max_dim);
928			x.copy_from(&y);
929
930			let (mut x, mut y) = H.rb_mut().two_rows_mut(k, max_dim);
931			x.copy_from(&y);
932			y.fill(zero());
933
934			active = nlock;
935			if nlock >= n_eigval {
936				break;
937			}
938		}
939	} else {
940		let mut H = H.rb_mut().get_mut(..n, ..n);
941		let mut V = V.rb_mut().get_mut(..n, ..n);
942		V.rb_mut().diagonal_mut().fill(one());
943		A.apply(H.rb_mut(), V.rb(), par, stack);
944
945		let (mut w, stack) = temp_mat_zeroed::<T, _, _>(n, 1, stack);
946		let mut w = w.as_mat_mut();
947
948		{
949			let block_size = linalg::qr::no_pivoting::factor::recommended_block_size::<T>(n, n);
950			let (mut householder, stack) = temp_mat_zeroed::<T, _, _>(block_size, n - 1, stack);
951			let mut householder = householder.as_mat_mut();
952
953			linalg::evd::hessenberg::hessenberg_in_place(H.rb_mut(), householder.rb_mut(), par, stack, default());
954
955			linalg::householder::apply_block_householder_sequence_on_the_right_in_place_with_conj(
956				H.rb().submatrix(1, 0, n - 1, n - 1),
957				householder.rb(),
958				Conj::No,
959				V.rb_mut().submatrix_mut(1, 1, n - 1, n - 1),
960				par,
961				stack,
962			);
963
964			for j in 0..n {
965				for i in j + 2..n {
966					H[(i, j)] = zero();
967				}
968			}
969		}
970
971		schur::complex_schur::multishift_qr(true, H.rb_mut(), Some(V.rb_mut()), w.rb_mut().col_mut(0), 0, n, par, stack, auto!(T));
972		active = n;
973	}
974
975	let n = active;
976	let H = H.rb().get(..n, ..n);
977	let V = V.rb().get(.., ..n);
978	let mut X = X.rb_mut().get_mut(..n, ..n);
979	linalg::evd::evd_from_cplx_schur_imp(H, Conj::No, X.as_mut(), par, auto!(T));
980
981	let mut vecs = tmp.rb_mut().get_mut(.., ..n);
982	matmul(vecs.rb_mut(), Accum::Replace, V, X.rb(), one(), par);
983
984	let V = vecs.rb();
985
986	let (mut norms, stack) = stack.make_with(n, |j| abs(H[(j, j)]));
987	let (mut perm, stack) = stack.make_with(n, |j| j);
988	let _ = stack;
989
990	let perm = &mut *perm;
991	let norms = &mut *norms;
992
993	perm.sort_unstable_by(|&i, &j| {
994		if norms[i] > norms[j] {
995			core::cmp::Ordering::Less
996		} else if norms[i] < norms[j] {
997			core::cmp::Ordering::Greater
998		} else {
999			core::cmp::Ordering::Equal
1000		}
1001	});
1002
1003	let mut eigvecs = eigvecs;
1004	let limit = Ord::min(n, n_eigval);
1005
1006	for idx in 0..limit {
1007		let j = perm[idx];
1008		let w = &H[(j, j)];
1009		let v = V.col(j);
1010
1011		eigvals[idx] = Complex::new(real(*w), imag(*w));
1012		zip!(eigvecs.rb_mut().col_mut(idx), v).for_each(|unzip!(y, x): Zip!(&mut Complex<T::Real>, &T)| *y = Complex::new(real(*x), imag(*x)));
1013	}
1014	limit
1015}
1016
1017/// computes the layout of required workspace for computing the `n_eigval` eigenvalues
1018/// (and corresponding eigenvectors) of $A$ with the largest magnitude.
1019pub fn partial_eigen_scratch<T: ComplexField>(A: &dyn LinOp<T>, n_eigval: usize, par: Par, params: PartialEigenParams) -> StackReq {
1020	let n = A.nrows();
1021	assert!(A.ncols() == n);
1022	if n == 0 {
1023		return StackReq::EMPTY;
1024	}
1025
1026	let n_eigval = Ord::min(n_eigval, n);
1027
1028	let max_dim = Ord::min(Ord::max(params.max_dim, Ord::max(2 * MIN_DIM, 2 * n_eigval)), n);
1029
1030	let w = temp_mat_scratch::<T>(max_dim, if T::IS_REAL { 2 } else { 1 });
1031	let residual = temp_mat_scratch::<T::Real>(max_dim, 1);
1032	let H = temp_mat_scratch::<T>(max_dim + 1, max_dim);
1033	let V = temp_mat_scratch::<T>(n, max_dim + 1);
1034	let Q = temp_mat_scratch::<T>(max_dim, max_dim);
1035	let X = Q;
1036	let vecs = Q;
1037	let tmp = temp_mat_scratch::<T>(n, max_dim);
1038
1039	let block_size = linalg::qr::no_pivoting::factor::recommended_block_size::<T>(max_dim, max_dim);
1040	let householder = temp_mat_scratch::<T>(block_size, max_dim);
1041	let arnoldi = A.apply_scratch(1, par).or(StackReq::new::<bool>(max_dim));
1042
1043	let hess = linalg::evd::hessenberg::hessenberg_in_place_scratch::<T>(max_dim, block_size, par, default());
1044	let apply_house = linalg::householder::apply_block_householder_sequence_on_the_right_in_place_scratch::<T>(max_dim - 1, block_size, max_dim - 1);
1045	let schur = schur::multishift_qr_scratch::<T>(max_dim, max_dim, true, true, par, auto!(T));
1046
1047	StackReq::all_of(&[
1048		w,
1049		residual,
1050		H,
1051		V,
1052		Q,
1053		X,
1054		vecs,
1055		tmp,
1056		householder,
1057		StackReq::any_of(&[hess, apply_house, schur, arnoldi]),
1058	])
1059}
1060
1061/// computes an estimate of the eigenvalues (and corresponding eigenvectors) of $A$ with the largest
1062/// magnitude until the provided outputs are full or the maximum number of algorithm restarts is
1063/// reached.
1064pub fn partial_eigen<T: ComplexField>(
1065	eigvecs: MatMut<'_, Complex<T::Real>>,
1066	eigvals: &mut [Complex<T::Real>],
1067	A: &dyn LinOp<T>,
1068	v0: ColRef<'_, T>,
1069	tolerance: T::Real,
1070	par: Par,
1071	stack: &mut MemStack,
1072	params: PartialEigenParams,
1073) -> PartialEigenInfo {
1074	let n = v0.nrows();
1075	assert!(all(
1076		eigvals.len() == eigvecs.ncols(),
1077		A.nrows() == n,
1078		A.ncols() == n,
1079		eigvecs.nrows() == n,
1080	));
1081	let n_eigval = eigvals.len();
1082	let n_eigval = Ord::min(n_eigval, n);
1083
1084	if n == 0 {
1085		return PartialEigenInfo {
1086			n_converged_eigen: 0,
1087			non_exhaustive: NonExhaustive(()),
1088		};
1089	}
1090
1091	let min_dim = Ord::min(Ord::max(params.min_dim, Ord::max(MIN_DIM, n_eigval)), n);
1092	let max_dim = Ord::min(Ord::max(params.max_dim, Ord::max(2 * MIN_DIM, 2 * n_eigval)), n);
1093
1094	let n_eigval = if const { T::IS_REAL } {
1095		partial_schur_real_imp::<T::Real>(
1096			eigvecs,
1097			eigvals,
1098			unsafe { core::mem::transmute(A) },
1099			unsafe { core::mem::transmute(v0) },
1100			min_dim,
1101			max_dim,
1102			n_eigval,
1103			tolerance,
1104			params.max_restarts,
1105			par,
1106			stack,
1107		)
1108	} else {
1109		partial_schur_cplx_imp(
1110			eigvecs,
1111			eigvals,
1112			A,
1113			v0,
1114			min_dim,
1115			max_dim,
1116			n_eigval,
1117			tolerance,
1118			params.max_restarts,
1119			par,
1120			stack,
1121		)
1122	};
1123
1124	PartialEigenInfo {
1125		n_converged_eigen: n_eigval,
1126		non_exhaustive: NonExhaustive(()),
1127	}
1128}
1129
1130/// computes an estimate of the eigenvalues (and corresponding eigenvectors) of $A$ with the largest
1131/// magnitude, assuming $A$ is self-adjoint, until the provided outputs are full or the maximum
1132/// number of algorithm restarts is reached.
1133pub fn partial_self_adjoint_eigen<T: ComplexField>(
1134	eigvecs: MatMut<'_, T>,
1135	eigvals: &mut [T],
1136	A: &dyn LinOp<T>,
1137	v0: ColRef<'_, T>,
1138	tolerance: T::Real,
1139	par: Par,
1140	stack: &mut MemStack,
1141	params: PartialEigenParams,
1142) -> PartialEigenInfo {
1143	let n = v0.nrows();
1144	assert!(all(
1145		eigvals.len() == eigvecs.ncols(),
1146		A.nrows() == n,
1147		A.ncols() == n,
1148		eigvecs.nrows() == n,
1149	));
1150	let n_eigval = eigvals.len();
1151	let n_eigval = Ord::min(n_eigval, n);
1152
1153	if n == 0 {
1154		return PartialEigenInfo {
1155			n_converged_eigen: 0,
1156			non_exhaustive: NonExhaustive(()),
1157		};
1158	}
1159
1160	let min_dim = Ord::min(Ord::max(params.min_dim, Ord::max(MIN_DIM, n_eigval)), n);
1161	let max_dim = Ord::min(Ord::max(params.max_dim, Ord::max(2 * MIN_DIM, 2 * n_eigval)), n);
1162
1163	let n_eigval = {
1164		super::self_adjoint_eigen::partial_self_adjoint_eigen_imp(
1165			eigvecs,
1166			eigvals,
1167			A,
1168			v0,
1169			min_dim,
1170			max_dim,
1171			n_eigval,
1172			tolerance,
1173			params.max_restarts,
1174			par,
1175			stack,
1176		)
1177	};
1178
1179	PartialEigenInfo {
1180		n_converged_eigen: n_eigval,
1181		non_exhaustive: NonExhaustive(()),
1182	}
1183}
1184
1185/// computes an estimate of the singular values (and corresponding singular vectors) of $A$ with the
1186/// largest magnitude, until the provided outputs are full or the maximum number of algorithm
1187/// restarts is reached.
1188pub fn partial_svd<T: ComplexField>(
1189	left_singular_vecs: MatMut<'_, T>,
1190	right_singular_vecs: MatMut<'_, T>,
1191	singular_vals: &mut [T],
1192	A: &dyn BiLinOp<T>,
1193	v0: ColRef<'_, T>,
1194	tolerance: T::Real,
1195	par: Par,
1196	stack: &mut MemStack,
1197	params: PartialEigenParams,
1198) -> PartialEigenInfo {
1199	let n = v0.nrows();
1200	assert!(all(
1201		singular_vals.len() == left_singular_vecs.ncols(),
1202		singular_vals.len() == right_singular_vecs.ncols(),
1203		A.nrows() == n,
1204		A.ncols() == n,
1205		left_singular_vecs.nrows() == n,
1206		right_singular_vecs.nrows() == n,
1207	));
1208	let n_eigval = singular_vals.len();
1209	let n_eigval = Ord::min(n_eigval, n);
1210
1211	if n == 0 {
1212		return PartialEigenInfo {
1213			n_converged_eigen: 0,
1214			non_exhaustive: NonExhaustive(()),
1215		};
1216	}
1217
1218	let min_dim = Ord::min(Ord::max(params.min_dim, Ord::max(MIN_DIM, n_eigval)), n);
1219	let max_dim = Ord::min(Ord::max(params.max_dim, Ord::max(2 * MIN_DIM, 2 * n_eigval)), n);
1220
1221	let n_eigval = {
1222		super::svd::partial_svd_imp(
1223			left_singular_vecs,
1224			right_singular_vecs,
1225			singular_vals,
1226			A,
1227			v0,
1228			min_dim,
1229			max_dim,
1230			n_eigval,
1231			tolerance,
1232			params.max_restarts,
1233			par,
1234			stack,
1235		)
1236	};
1237
1238	PartialEigenInfo {
1239		n_converged_eigen: n_eigval,
1240		non_exhaustive: NonExhaustive(()),
1241	}
1242}
1243
1244#[cfg(test)]
1245mod tests {
1246	use super::*;
1247	use crate::stats::prelude::*;
1248	use crate::{Scale, assert};
1249	use rand::prelude::*;
1250
1251	#[test]
1252	fn test_arnoldi_real() {
1253		let rng = &mut StdRng::seed_from_u64(1);
1254		let n = 100;
1255		let n_eigval = 20;
1256		let min_dim = 30;
1257		let max_dim = 60;
1258		let restarts = 1000;
1259
1260		let mat = CwiseMatDistribution {
1261			nrows: n,
1262			ncols: n,
1263			dist: StandardNormal,
1264		};
1265		let col = CwiseColDistribution {
1266			nrows: n,
1267			dist: StandardNormal,
1268		};
1269		let A: Mat<f64> = mat.sample(rng);
1270
1271		let mut v0: Col<f64> = col.sample(rng);
1272		v0 /= v0.norm_l2();
1273		let A = A.as_ref();
1274		let v0 = v0.as_ref();
1275
1276		let par = Par::Seq;
1277		let mem = &mut MemBuffer::new(partial_eigen_scratch(
1278			&A,
1279			n_eigval,
1280			par,
1281			PartialEigenParams {
1282				min_dim,
1283				max_dim,
1284				max_restarts: restarts,
1285				..Default::default()
1286			},
1287		));
1288		let mut V = Mat::zeros(n, n_eigval);
1289		let mut w = vec![c64::ZERO; n_eigval];
1290
1291		let info = partial_eigen(
1292			V.rb_mut(),
1293			&mut w,
1294			&A,
1295			v0,
1296			f64::EPSILON * 128.0,
1297			par,
1298			MemStack::new(mem),
1299			PartialEigenParams {
1300				min_dim,
1301				max_dim,
1302				max_restarts: restarts,
1303				..Default::default()
1304			},
1305		);
1306		assert!(w.iter().map(|x| x.norm()).is_sorted_by(|x, y| x >= y));
1307
1308		let A = &zip!(A).map(|unzip!(x)| Complex::from(*x));
1309		for j in 0..info.n_converged_eigen {
1310			assert!((A * V.col(j) - Scale(w[j]) * V.col(j)).norm_l2() < 1e-10);
1311		}
1312	}
1313
1314	#[test]
1315	fn test_arnoldi_cplx() {
1316		let rng = &mut StdRng::seed_from_u64(1);
1317		let n = 100;
1318		let n_eigval = 20;
1319		let min_dim = 30;
1320		let max_dim = 60;
1321		let restarts = 1000;
1322
1323		let mat = CwiseMatDistribution {
1324			nrows: n,
1325			ncols: n,
1326			dist: ComplexDistribution::new(StandardNormal, StandardNormal),
1327		};
1328		let col = CwiseColDistribution {
1329			nrows: n,
1330			dist: ComplexDistribution::new(StandardNormal, StandardNormal),
1331		};
1332		let A: Mat<c64> = mat.sample(rng);
1333
1334		let mut v0: Col<c64> = col.sample(rng);
1335		v0 /= v0.norm_l2();
1336		let A = A.as_ref();
1337		let v0 = v0.as_ref();
1338
1339		let par = Par::Seq;
1340		let mem = &mut MemBuffer::new(partial_eigen_scratch(
1341			&A,
1342			n_eigval,
1343			par,
1344			PartialEigenParams {
1345				min_dim,
1346				max_dim,
1347				max_restarts: restarts,
1348				..Default::default()
1349			},
1350		));
1351
1352		let mut V = Mat::zeros(n, n_eigval);
1353		let mut w = vec![c64::ZERO; n_eigval];
1354
1355		let info = partial_eigen(
1356			V.rb_mut(),
1357			&mut w,
1358			&A,
1359			v0,
1360			f64::EPSILON * 128.0,
1361			par,
1362			MemStack::new(mem),
1363			PartialEigenParams {
1364				min_dim,
1365				max_dim,
1366				max_restarts: restarts,
1367				..Default::default()
1368			},
1369		);
1370		assert!(w.iter().map(|x| x.norm()).is_sorted_by(|x, y| x >= y));
1371
1372		for j in 0..info.n_converged_eigen {
1373			assert!((A * V.col(j) - Scale(w[j]) * V.col(j)).norm_l2() < 1e-10);
1374		}
1375	}
1376
1377	#[test]
1378	fn test_toeplitz() {
1379		let n = 10;
1380		let mut mat = Mat::<f64>::zeros(n, n);
1381
1382		for i in 0..n {
1383			mat[(i, i)] = 6.0;
1384			if i + 1 < n {
1385				mat[(i, i + 1)] = 3.0;
1386			}
1387			if i > 0 {
1388				mat[(i, i - 1)] = 3.0;
1389			}
1390		}
1391
1392		let rng = &mut StdRng::seed_from_u64(1);
1393		let col = CwiseColDistribution {
1394			nrows: n,
1395			dist: StandardNormal,
1396		};
1397		let mut v0: Col<f64> = col.sample(rng);
1398		v0 /= v0.norm_l2();
1399		let A = mat.as_ref();
1400		let v0 = v0.as_ref();
1401
1402		let par = Par::Seq;
1403		let n_eigval = 5;
1404		let min_dim = 7;
1405		let max_dim = 9;
1406		let mem = &mut MemBuffer::new(partial_eigen_scratch(
1407			&A,
1408			n_eigval,
1409			par,
1410			PartialEigenParams {
1411				min_dim,
1412				max_dim,
1413				max_restarts: 20,
1414				..Default::default()
1415			},
1416		));
1417
1418		let mut V = Mat::zeros(n, n_eigval);
1419		let mut w = vec![c64::ZERO; n_eigval];
1420
1421		let info = partial_eigen(
1422			V.rb_mut(),
1423			&mut w,
1424			&A,
1425			v0,
1426			0.000001,
1427			par,
1428			MemStack::new(mem),
1429			PartialEigenParams {
1430				min_dim,
1431				max_dim,
1432				max_restarts: 20,
1433				..Default::default()
1434			},
1435		);
1436		assert!(w.iter().map(|x| x.norm()).is_sorted_by(|x, y| x >= y));
1437
1438		let A = &zip!(A).map(|unzip!(x)| Complex::from(*x));
1439		for j in 0..info.n_converged_eigen {
1440			assert!((A * V.col(j) - Scale(w[j]) * V.col(j)).norm_l2() < 1e-10);
1441		}
1442	}
1443
1444	#[test]
1445	fn test_small_real() {
1446		let rng = &mut StdRng::seed_from_u64(1);
1447		let n = 5;
1448		let n_eigval = 3;
1449
1450		let mat = CwiseMatDistribution {
1451			nrows: n,
1452			ncols: n,
1453			dist: StandardNormal,
1454		};
1455		let col = CwiseColDistribution {
1456			nrows: n,
1457			dist: StandardNormal,
1458		};
1459		let A: Mat<f64> = mat.sample(rng);
1460
1461		let mut v0: Col<f64> = col.sample(rng);
1462		v0 /= v0.norm_l2();
1463		let A = A.as_ref();
1464		let v0 = v0.as_ref();
1465
1466		let par = Par::Seq;
1467		let mem = &mut MemBuffer::new(partial_eigen_scratch(&A, n_eigval, par, default()));
1468		let mut V = Mat::zeros(n, n_eigval);
1469		let mut w = vec![c64::ZERO; n_eigval];
1470
1471		let info = partial_eigen(V.rb_mut(), &mut w, &A, v0, f64::EPSILON * 128.0, par, MemStack::new(mem), default());
1472		assert!(w.iter().map(|x| x.norm()).is_sorted_by(|x, y| x >= y));
1473
1474		let A = &zip!(A).map(|unzip!(x)| Complex::from(*x));
1475		for j in 0..info.n_converged_eigen {
1476			assert!((A * V.col(j) - Scale(w[j]) * V.col(j)).norm_l2() < 1e-10);
1477		}
1478	}
1479
1480	#[test]
1481	fn test_small_cplx() {
1482		let rng = &mut StdRng::seed_from_u64(1);
1483		let n = 5;
1484		let n_eigval = 3;
1485
1486		let mat = CwiseMatDistribution {
1487			nrows: n,
1488			ncols: n,
1489			dist: ComplexDistribution::new(StandardNormal, StandardNormal),
1490		};
1491		let col = CwiseColDistribution {
1492			nrows: n,
1493			dist: ComplexDistribution::new(StandardNormal, StandardNormal),
1494		};
1495		let A: Mat<c64> = mat.sample(rng);
1496
1497		let mut v0: Col<c64> = col.sample(rng);
1498		v0 /= v0.norm_l2();
1499		let A = A.as_ref();
1500		let v0 = v0.as_ref();
1501
1502		let par = Par::Seq;
1503		let mem = &mut MemBuffer::new(partial_eigen_scratch(&A, n_eigval, par, default()));
1504		let mut V = Mat::zeros(n, n_eigval);
1505		let mut w = vec![c64::ZERO; n_eigval];
1506
1507		let info = partial_eigen(V.rb_mut(), &mut w, &A, v0, f64::EPSILON * 128.0, par, MemStack::new(mem), default());
1508		assert!(w.iter().map(|x| x.norm()).is_sorted_by(|x, y| x >= y));
1509
1510		let A = &zip!(A).map(|unzip!(x)| Complex::from(*x));
1511		for j in 0..info.n_converged_eigen {
1512			assert!((A * V.col(j) - Scale(w[j]) * V.col(j)).norm_l2() < 1e-10);
1513		}
1514	}
1515}