1use crate::error::OptimizeError;
9use crate::quantum_classical::QcResult;
10
11#[derive(Debug, Clone)]
19pub struct Tensor {
20 pub data: Vec<f64>,
22 pub shape: Vec<usize>,
24}
25
26impl Tensor {
27 pub fn zeros(shape: &[usize]) -> Self {
29 let size = shape.iter().product();
30 Self {
31 data: vec![0.0; size],
32 shape: shape.to_vec(),
33 }
34 }
35
36 pub fn size(&self) -> usize {
38 self.data.len()
39 }
40
41 pub fn ndim(&self) -> usize {
43 self.shape.len()
44 }
45
46 #[inline]
48 pub fn get(&self, idx: &[usize]) -> f64 {
49 let flat = self.flat_index(idx);
50 self.data[flat]
51 }
52
53 #[inline]
55 pub fn set(&mut self, idx: &[usize], val: f64) {
56 let flat = self.flat_index(idx);
57 self.data[flat] = val;
58 }
59
60 fn flat_index(&self, idx: &[usize]) -> usize {
61 let mut flat = 0;
62 let mut stride = 1;
63 for (i, &dim) in self.shape.iter().enumerate().rev() {
64 flat += idx[i] * stride;
65 stride *= dim;
66 }
67 flat
68 }
69}
70
71#[derive(Debug, Clone)]
78pub struct MPS {
79 pub tensors: Vec<Tensor>,
81 pub n_sites: usize,
83 pub phys_dim: usize,
85 pub max_bond_dim: usize,
87}
88
89impl MPS {
90 pub fn product_state(values: &[usize], phys_dim: usize) -> QcResult<Self> {
94 if values.is_empty() {
95 return Err(OptimizeError::ValueError(
96 "Product state must have at least one site".to_string(),
97 ));
98 }
99 for &v in values {
100 if v >= phys_dim {
101 return Err(OptimizeError::ValueError(format!(
102 "State value {v} exceeds phys_dim={phys_dim}"
103 )));
104 }
105 }
106
107 let n = values.len();
108 let tensors: Vec<Tensor> = values
109 .iter()
110 .map(|&v| {
111 let mut t = Tensor::zeros(&[1, phys_dim, 1]);
113 t.set(&[0, v, 0], 1.0);
114 t
115 })
116 .collect();
117
118 Ok(Self {
119 tensors,
120 n_sites: n,
121 phys_dim,
122 max_bond_dim: 1,
123 })
124 }
125
126 pub fn bond_dim_left(&self, site: usize) -> usize {
128 self.tensors[site].shape[0]
129 }
130
131 pub fn bond_dim_right(&self, site: usize) -> usize {
133 self.tensors[site].shape[2]
134 }
135
136 pub fn overlap(&self, other: &MPS) -> QcResult<f64> {
140 if self.n_sites != other.n_sites {
141 return Err(OptimizeError::ValueError(
142 "MPS must have the same number of sites for overlap".to_string(),
143 ));
144 }
145 if self.phys_dim != other.phys_dim {
146 return Err(OptimizeError::ValueError(
147 "MPS must have the same physical dimension for overlap".to_string(),
148 ));
149 }
150
151 let n = self.n_sites;
152 let mut transfer: Vec<Vec<f64>> = vec![vec![1.0]];
159
160 for site in 0..n {
161 let ta = &self.tensors[site]; let tb = &other.tensors[site]; let dl_a = ta.shape[0];
165 let d = ta.shape[1];
166 let dr_a = ta.shape[2];
167 let dl_b = tb.shape[0];
168 let dr_b = tb.shape[2];
169
170 let mut new_transfer = vec![vec![0.0; dr_b]; dr_a];
172
173 for alpha_new in 0..dr_a {
174 for beta_new in 0..dr_b {
175 let mut val = 0.0;
176 for alpha in 0..dl_a {
177 for beta in 0..dl_b {
178 let t_ab = transfer[alpha][beta];
179 if t_ab.abs() < f64::EPSILON * 1e-6 {
180 continue;
181 }
182 for sigma in 0..d {
183 let a_val = ta.get(&[alpha, sigma, alpha_new]);
184 let b_val = tb.get(&[beta, sigma, beta_new]);
185 val += t_ab * a_val * b_val;
186 }
187 }
188 }
189 new_transfer[alpha_new][beta_new] = val;
190 }
191 }
192 transfer = new_transfer;
193 }
194
195 if transfer.len() != 1 || transfer[0].len() != 1 {
197 return Err(OptimizeError::ComputationError(
198 "Transfer matrix should be 1x1 at the end".to_string(),
199 ));
200 }
201 Ok(transfer[0][0])
202 }
203
204 pub fn norm(&self) -> QcResult<f64> {
206 let norm2 = self.overlap(self)?;
207 Ok(norm2.abs().sqrt())
208 }
209
210 pub fn normalize(&mut self) -> QcResult<()> {
215 let n = self.norm()?;
216 let threshold = f64::EPSILON * 1e3;
217 if n < threshold {
218 let half = (0.5_f64).sqrt();
220 for t in &mut self.tensors {
221 let d = t.shape[1];
223 for val in &mut t.data {
224 *val = 0.0;
225 }
226 let dl = t.shape[0];
227 let dr = t.shape[2];
228 for al in 0..dl.min(1) {
229 for sig in 0..d {
230 for ar in 0..dr.min(1) {
231 let flat = (al * d + sig) * dr + ar;
232 if flat < t.data.len() {
233 t.data[flat] = if d <= 2 {
234 half
235 } else {
236 1.0 / (d as f64).sqrt()
237 };
238 }
239 }
240 }
241 }
242 }
243 return Ok(());
244 }
245 for val in &mut self.tensors[0].data {
246 *val /= n;
247 }
248 Ok(())
249 }
250}
251
252fn svd_2d(a: &[f64], m: usize, n: usize) -> QcResult<(Vec<f64>, Vec<f64>, Vec<f64>)> {
259 if a.len() != m * n {
260 return Err(OptimizeError::ValueError(format!(
261 "Matrix size mismatch: {} != {}*{}",
262 a.len(),
263 m,
264 n
265 )));
266 }
267
268 let k = m.min(n);
269 jacobi_svd(a, m, n, k)
274}
275
276fn jacobi_svd(a: &[f64], m: usize, n: usize, k: usize) -> QcResult<(Vec<f64>, Vec<f64>, Vec<f64>)> {
279 let mut v_data = vec![0.0_f64; n * k]; for i in 0..k {
293 v_data[i * k + i] = 1.0; }
295 let mut v_data2 = vec![0.0_f64; n * k];
297 for i in 0..k {
298 v_data2[i * k + i] = 1.0;
299 }
300 let _ = v_data; let mut b = a.to_vec(); let mut v_full = vec![0.0_f64; n * n];
308 for i in 0..n {
309 v_full[i * n + i] = 1.0;
310 }
311
312 let max_sweeps = 100;
313 let tol = 1e-12_f64;
314
315 for _sweep in 0..max_sweeps {
316 let mut changed = false;
317 for p in 0..n {
318 for q in (p + 1)..n {
319 let bpp: f64 = (0..m).map(|i| b[i * n + p] * b[i * n + p]).sum();
321 let bqq: f64 = (0..m).map(|i| b[i * n + q] * b[i * n + q]).sum();
322 let bpq: f64 = (0..m).map(|i| b[i * n + p] * b[i * n + q]).sum();
323
324 if bpq.abs() < tol * (bpp * bqq).sqrt().max(1e-300) {
325 continue;
326 }
327 changed = true;
328
329 let tau = (bqq - bpp) / (2.0 * bpq);
331 let t = if tau >= 0.0 {
332 1.0 / (tau + (1.0 + tau * tau).sqrt())
333 } else {
334 1.0 / (tau - (1.0 + tau * tau).sqrt())
335 };
336 let c = 1.0 / (1.0 + t * t).sqrt();
337 let s = t * c;
338
339 for i in 0..m {
341 let bp = b[i * n + p];
342 let bq = b[i * n + q];
343 b[i * n + p] = c * bp - s * bq;
344 b[i * n + q] = s * bp + c * bq;
345 }
346
347 for i in 0..n {
349 let vp = v_full[i * n + p];
350 let vq = v_full[i * n + q];
351 v_full[i * n + p] = c * vp - s * vq;
352 v_full[i * n + q] = s * vp + c * vq;
353 }
354 }
355 }
356 if !changed {
357 break;
358 }
359 }
360
361 let mut sigma = vec![0.0_f64; k];
363 let mut u_data = vec![0.0_f64; m * k]; for j in 0..k {
366 let col_norm: f64 = (0..m)
367 .map(|i| b[i * n + j] * b[i * n + j])
368 .sum::<f64>()
369 .sqrt();
370 sigma[j] = col_norm;
371 if col_norm > tol {
372 for i in 0..m {
373 u_data[i * k + j] = b[i * n + j] / col_norm;
374 }
375 } else {
376 if j < m {
378 u_data[j * k + j] = 1.0;
379 }
380 }
381 }
382
383 let mut vt_data = vec![0.0_f64; k * n];
385 for j in 0..k {
386 for i in 0..n {
387 vt_data[j * n + i] = v_full[i * n + j];
388 }
389 }
390
391 let mut order: Vec<usize> = (0..k).collect();
393 order.sort_by(|&a, &b| {
394 sigma[b]
395 .partial_cmp(&sigma[a])
396 .unwrap_or(std::cmp::Ordering::Equal)
397 });
398
399 let mut u_sorted = vec![0.0_f64; m * k];
400 let mut s_sorted = vec![0.0_f64; k];
401 let mut vt_sorted = vec![0.0_f64; k * n];
402
403 for (new_j, &old_j) in order.iter().enumerate() {
404 s_sorted[new_j] = sigma[old_j];
405 for i in 0..m {
406 u_sorted[i * k + new_j] = u_data[i * k + old_j];
407 }
408 for i in 0..n {
409 vt_sorted[new_j * n + i] = vt_data[old_j * n + i];
410 }
411 }
412
413 Ok((u_sorted, s_sorted, vt_sorted))
414}
415
416pub fn svd_compress_bond(
427 tensor: &Tensor,
428 max_bond_dim: usize,
429) -> QcResult<(Tensor, Vec<f64>, Tensor)> {
430 if tensor.shape.len() != 2 {
431 return Err(OptimizeError::ValueError(
432 "svd_compress_bond expects a 2D tensor".to_string(),
433 ));
434 }
435 let m = tensor.shape[0];
436 let n = tensor.shape[1];
437
438 let (u, s, vt) = svd_2d(&tensor.data, m, n)?;
439 let k_full = m.min(n);
440
441 let tol = s[0] * 1e-12_f64.max(f64::EPSILON);
443 let chi = (0..k_full)
444 .filter(|&i| i < max_bond_dim && s[i] > tol)
445 .count()
446 .max(1); let chi = chi.min(max_bond_dim);
449
450 let mut u_tensor = Tensor::zeros(&[m, chi]);
452 for i in 0..m {
453 for j in 0..chi {
454 u_tensor.data[i * chi + j] = u[i * k_full + j];
455 }
456 }
457
458 let s_trunc: Vec<f64> = s[..chi].to_vec();
460
461 let mut vt_tensor = Tensor::zeros(&[chi, n]);
463 for i in 0..chi {
464 for j in 0..n {
465 vt_tensor.data[i * n + j] = vt[i * n + j];
466 }
467 }
468
469 Ok((u_tensor, s_trunc, vt_tensor))
470}
471
472pub fn mpo_apply(mps: &MPS, mpo_tensors: &[Tensor]) -> QcResult<MPS> {
480 if mps.n_sites != mpo_tensors.len() {
481 return Err(OptimizeError::ValueError(format!(
482 "MPS has {} sites but MPO has {} tensors",
483 mps.n_sites,
484 mpo_tensors.len()
485 )));
486 }
487
488 let n = mps.n_sites;
489 let d = mps.phys_dim;
490 let mut new_tensors: Vec<Tensor> = Vec::with_capacity(n);
491
492 for site in 0..n {
493 let a = &mps.tensors[site]; let w = &mpo_tensors[site]; let dl_mps = a.shape[0];
497 let dr_mps = a.shape[2];
498 let dl_mpo = w.shape[0];
499 let dr_mpo = w.shape[3];
500 let d_out = w.shape[1];
501
502 if w.shape[2] != d {
503 return Err(OptimizeError::ValueError(format!(
504 "MPO physical in-dim {} != MPS phys_dim {}",
505 w.shape[2], d
506 )));
507 }
508
509 let dl_new = dl_mps * dl_mpo;
511 let dr_new = dr_mps * dr_mpo;
512 let mut new_t = Tensor::zeros(&[dl_new, d_out, dr_new]);
513
514 for alpha_mps in 0..dl_mps {
515 for alpha_mpo in 0..dl_mpo {
516 for sigma_out in 0..d_out {
517 for beta_mps in 0..dr_mps {
518 for beta_mpo in 0..dr_mpo {
519 let mut val = 0.0;
520 for sigma_in in 0..d {
521 let a_val = a.get(&[alpha_mps, sigma_in, beta_mps]);
522 let w_val = w.get(&[alpha_mpo, sigma_out, sigma_in, beta_mpo]);
523 val += a_val * w_val;
524 }
525 let alpha_new = alpha_mps * dl_mpo + alpha_mpo;
526 let beta_new = beta_mps * dr_mpo + beta_mpo;
527 let cur = new_t.get(&[alpha_new, sigma_out, beta_new]);
528 new_t.set(&[alpha_new, sigma_out, beta_new], cur + val);
529 }
530 }
531 }
532 }
533 }
534 new_tensors.push(new_t);
535 }
536
537 let max_bond = mps.max_bond_dim * mpo_tensors[0].shape[0]; let mut result = MPS {
539 tensors: new_tensors,
540 n_sites: n,
541 phys_dim: d_out_from_mpo(mpo_tensors),
542 max_bond_dim: max_bond,
543 };
544
545 compress_mps_left_to_right(&mut result, mps.max_bond_dim)?;
547
548 Ok(result)
549}
550
551fn d_out_from_mpo(mpo: &[Tensor]) -> usize {
552 if mpo.is_empty() {
553 2
554 } else {
555 mpo[0].shape[1]
556 }
557}
558
559fn compress_mps_left_to_right(mps: &mut MPS, max_bond: usize) -> QcResult<()> {
561 let n = mps.n_sites;
562 for site in 0..(n - 1) {
563 let t = &mps.tensors[site];
564 let dl = t.shape[0];
565 let d = t.shape[1];
566 let dr = t.shape[2];
567
568 let mut mat = Tensor::zeros(&[dl * d, dr]);
570 for al in 0..dl {
571 for sig in 0..d {
572 for ar in 0..dr {
573 let v = t.get(&[al, sig, ar]);
574 mat.data[(al * d + sig) * dr + ar] = v;
575 }
576 }
577 }
578
579 let (u, s, vt) = svd_compress_bond(&mat, max_bond)?;
580 let chi = s.len();
581
582 let mut new_left = Tensor::zeros(&[dl, d, chi]);
584 for al in 0..dl {
585 for sig in 0..d {
586 for j in 0..chi {
587 let v = u.data[(al * d + sig) * chi + j];
588 new_left.set(&[al, sig, j], v);
589 }
590 }
591 }
592 mps.tensors[site] = new_left;
593
594 let right = &mps.tensors[site + 1].clone();
596 let dr_right = right.shape[2];
597 let d_right = right.shape[1];
598 let dl_right = right.shape[0];
599
600 let mut new_right = Tensor::zeros(&[chi, d_right, dr_right]);
602 for j in 0..chi {
603 for k in 0..dl_right.min(vt.shape[1]) {
604 let sv_jk = s[j] * vt.data[j * vt.shape[1] + k];
605 if sv_jk.abs() < f64::EPSILON * 1e-10 {
606 continue;
607 }
608 for sig in 0..d_right {
609 for beta in 0..dr_right {
610 let r = right.get(&[k, sig, beta]);
611 let cur = new_right.get(&[j, sig, beta]);
612 new_right.set(&[j, sig, beta], cur + sv_jk * r);
613 }
614 }
615 }
616 }
617 mps.tensors[site + 1] = new_right;
618 }
619 mps.max_bond_dim = max_bond;
620 Ok(())
621}
622
623pub fn ising_1d_mpo(n_sites: usize, j: f64, h: f64) -> Vec<Tensor> {
637 if n_sites == 0 {
638 return Vec::new();
639 }
640
641 let id: [f64; 4] = [1.0, 0.0, 0.0, 1.0]; let z_mat: [f64; 4] = [1.0, 0.0, 0.0, -1.0]; let x_mat: [f64; 4] = [0.0, 1.0, 1.0, 0.0]; let zero: [f64; 4] = [0.0; 4];
646
647 let d_mpo = 3usize;
649 let d = 2usize; let make_bulk_tensor = |left_boundary: bool, right_boundary: bool| -> Tensor {
660 let dl = if left_boundary { 1 } else { d_mpo };
661 let dr = if right_boundary { 1 } else { d_mpo };
662 let mut t = Tensor::zeros(&[dl, d, d, dr]);
663
664 let a_start = if left_boundary { 0 } else { 0 };
667 let b_end = if right_boundary { 0 } else { 2 };
668 let _ = a_start;
669 let _ = b_end;
670
671 for sigma_out in 0..d {
672 for sigma_in in 0..d {
673 let ops: &[([f64; 4], usize, usize)] = &[
681 (id, 0, 0),
682 (z_mat, 0, 1),
683 (x_mat_scaled(-h), 0, 2),
684 (z_mat_scaled(-j), 1, 2),
685 (id, 2, 2),
686 ];
687
688 for &(ref op, a, b) in ops {
689 let a_idx = if left_boundary {
691 if a == 0 {
692 Some(0)
693 } else {
694 None
695 }
696 } else {
697 Some(a)
698 };
699 let b_idx = if right_boundary {
700 if b == d_mpo - 1 {
701 Some(0)
702 } else {
703 None
704 }
705 } else {
706 Some(b)
707 };
708
709 if let (Some(ai), Some(bi)) = (a_idx, b_idx) {
710 let cur = t.get(&[ai, sigma_out, sigma_in, bi]);
711 t.set(
712 &[ai, sigma_out, sigma_in, bi],
713 cur + op[sigma_out * 2 + sigma_in],
714 );
715 }
716 }
717 }
718 }
719 t
720 };
721
722 let mut mpo = Vec::with_capacity(n_sites);
723 for site in 0..n_sites {
724 let left_boundary = site == 0;
725 let right_boundary = site == n_sites - 1;
726 mpo.push(make_bulk_tensor(left_boundary, right_boundary));
727 }
728 mpo
729}
730
731fn z_mat_scaled(scale: f64) -> [f64; 4] {
733 [scale, 0.0, 0.0, -scale]
734}
735
736fn x_mat_scaled(scale: f64) -> [f64; 4] {
738 [0.0, scale, scale, 0.0]
739}
740
741fn mps_energy(mps: &MPS, hamiltonian_mpo: &[Tensor]) -> QcResult<f64> {
750 mps_expectation_value(mps, hamiltonian_mpo)
751}
752
753fn mps_expectation_value(mps: &MPS, mpo: &[Tensor]) -> QcResult<f64> {
758 let n = mps.n_sites;
759 if n != mpo.len() {
760 return Err(OptimizeError::ValueError(
761 "MPS and MPO sizes must match".to_string(),
762 ));
763 }
764
765 let dl_bra_0 = mps.tensors[0].shape[0]; let dl_mpo_0 = mpo[0].shape[0]; let dl_ket_0 = mps.tensors[0].shape[0]; let mut transfer = vec![0.0f64; dl_bra_0 * dl_mpo_0 * dl_ket_0];
774 if dl_bra_0 == 1 && dl_mpo_0 == 1 && dl_ket_0 == 1 {
776 transfer[0] = 1.0;
777 } else {
778 return Err(OptimizeError::ComputationError(
779 "Left boundary bond dims must be 1".to_string(),
780 ));
781 }
782
783 let mut d_bra = dl_bra_0;
784 let mut d_mpo_cur = dl_mpo_0;
785 let mut d_ket = dl_ket_0;
786
787 for site in 0..n {
788 let a = &mps.tensors[site]; let w = &mpo[site]; let dr_bra = a.shape[2];
792 let dr_mpo = w.shape[3];
793 let dr_ket = a.shape[2];
794 let phys_d = a.shape[1];
795
796 let mut new_transfer = vec![0.0f64; dr_bra * dr_mpo * dr_ket];
798
799 for alpha_bra in 0..d_bra {
804 for a_mpo in 0..d_mpo_cur {
805 for alpha_ket in 0..d_ket {
806 let t_val =
807 transfer[alpha_bra * (d_mpo_cur * d_ket) + a_mpo * d_ket + alpha_ket];
808 if t_val.abs() < f64::EPSILON * 1e-12 {
809 continue;
810 }
811 for sigma in 0..phys_d {
812 for sigma_p in 0..phys_d {
813 for b_mpo in 0..dr_mpo {
814 let w_val = w.get(&[a_mpo, sigma, sigma_p, b_mpo]);
815 if w_val.abs() < f64::EPSILON * 1e-12 {
816 continue;
817 }
818 for beta_bra in 0..dr_bra {
819 let a_bra_val = a.get(&[alpha_bra, sigma, beta_bra]);
820 for beta_ket in 0..dr_ket {
821 let a_ket_val = a.get(&[alpha_ket, sigma_p, beta_ket]);
822 let idx = beta_bra * (dr_mpo * dr_ket)
823 + b_mpo * dr_ket
824 + beta_ket;
825 new_transfer[idx] += t_val * a_bra_val * w_val * a_ket_val;
826 }
827 }
828 }
829 }
830 }
831 }
832 }
833 }
834
835 transfer = new_transfer;
836 d_bra = dr_bra;
837 d_mpo_cur = dr_mpo;
838 d_ket = dr_ket;
839 }
840
841 if transfer.len() == 1 {
843 let numerator = transfer[0];
844
845 let norm2 = mps.overlap(mps)?;
847 if norm2.abs() < f64::EPSILON * 1e-6 {
848 return Err(OptimizeError::ComputationError(
849 "MPS norm is zero in energy computation".to_string(),
850 ));
851 }
852 Ok(numerator / norm2)
853 } else {
854 let numerator: f64 = transfer.iter().sum();
856 let norm2 = mps.overlap(mps)?;
857 if norm2.abs() < f64::EPSILON * 1e-6 {
858 return Err(OptimizeError::ComputationError(
859 "MPS norm is zero in energy computation".to_string(),
860 ));
861 }
862 Ok(numerator / norm2)
863 }
864}
865
866#[derive(Debug, Clone)]
872struct EnvBlock {
873 data: Vec<f64>,
874 d_bra: usize,
875 d_mpo: usize,
876 d_ket: usize,
877}
878
879impl EnvBlock {
880 fn zeros(d_bra: usize, d_mpo: usize, d_ket: usize) -> Self {
881 Self {
882 data: vec![0.0; d_bra * d_mpo * d_ket],
883 d_bra,
884 d_mpo,
885 d_ket,
886 }
887 }
888
889 fn get(&self, alpha_bra: usize, a_mpo: usize, alpha_ket: usize) -> f64 {
890 self.data[alpha_bra * (self.d_mpo * self.d_ket) + a_mpo * self.d_ket + alpha_ket]
891 }
892
893 fn add(&mut self, alpha_bra: usize, a_mpo: usize, alpha_ket: usize, val: f64) {
894 let idx = alpha_bra * (self.d_mpo * self.d_ket) + a_mpo * self.d_ket + alpha_ket;
895 self.data[idx] += val;
896 }
897}
898
899fn build_left_envs(mps: &MPS, mpo: &[Tensor]) -> QcResult<Vec<EnvBlock>> {
903 let n = mps.n_sites;
904 let mut envs: Vec<EnvBlock> = Vec::with_capacity(n + 1);
905
906 let mut l0 = EnvBlock::zeros(1, 1, 1);
908 l0.data[0] = 1.0;
909 envs.push(l0);
910
911 for site in 0..n {
912 let a = &mps.tensors[site];
913 let w = &mpo[site];
914 let prev = &envs[site];
915
916 let dr_bra = a.shape[2];
917 let dr_mpo = w.shape[3];
918 let dr_ket = a.shape[2]; let phys_d = a.shape[1];
921 let dl_mpo = w.shape[0];
922 let d_bra_in = prev.d_bra;
923 let d_mpo_in = prev.d_mpo;
924 let d_ket_in = prev.d_ket;
925
926 let mut new_env = EnvBlock::zeros(dr_bra, dr_mpo, dr_ket);
927
928 for alpha_bra in 0..d_bra_in {
931 for a_idx in 0..d_mpo_in {
932 for alpha_ket in 0..d_ket_in {
933 let p = prev.get(alpha_bra, a_idx, alpha_ket);
934 if p.abs() < f64::EPSILON * 1e-12 {
935 continue;
936 }
937 for sigma in 0..phys_d {
938 for sigma_p in 0..phys_d {
939 for b_mpo in 0..dr_mpo {
940 if a_idx >= dl_mpo {
942 continue;
943 }
944 let w_val = w.get(&[a_idx, sigma, sigma_p, b_mpo]);
945 if w_val.abs() < f64::EPSILON * 1e-12 {
946 continue;
947 }
948 for beta_bra in 0..dr_bra {
949 let a_bra = a.get(&[alpha_bra, sigma, beta_bra]);
950 if a_bra.abs() < f64::EPSILON * 1e-12 {
951 continue;
952 }
953 for beta_ket in 0..dr_ket {
954 let a_ket = a.get(&[alpha_ket, sigma_p, beta_ket]);
955 new_env.add(
956 beta_bra,
957 b_mpo,
958 beta_ket,
959 p * a_bra * w_val * a_ket,
960 );
961 }
962 }
963 }
964 }
965 }
966 }
967 }
968 }
969 envs.push(new_env);
970 }
971 Ok(envs)
972}
973
974fn build_right_envs(mps: &MPS, mpo: &[Tensor]) -> QcResult<Vec<EnvBlock>> {
978 let n = mps.n_sites;
979 let mut envs: Vec<EnvBlock> = vec![EnvBlock::zeros(1, 1, 1); n + 1]; let mut r_last = EnvBlock::zeros(1, 1, 1);
983 r_last.data[0] = 1.0;
984 envs[n] = r_last;
985
986 for site in (0..n).rev() {
987 let a = &mps.tensors[site];
988 let w = &mpo[site];
989 let next = &envs[site + 1].clone();
990
991 let dl_bra = a.shape[0];
992 let dl_mpo = w.shape[0];
993 let dl_ket = a.shape[0];
994 let phys_d = a.shape[1];
995 let dr_mpo = w.shape[3];
996 let d_bra_in = next.d_bra;
997 let d_mpo_in = next.d_mpo;
998 let d_ket_in = next.d_ket;
999
1000 let mut new_env = EnvBlock::zeros(dl_bra, dl_mpo, dl_ket);
1001
1002 for beta_bra in 0..d_bra_in {
1005 for b_mpo in 0..d_mpo_in {
1006 for beta_ket in 0..d_ket_in {
1007 let p = next.get(beta_bra, b_mpo, beta_ket);
1008 if p.abs() < f64::EPSILON * 1e-12 {
1009 continue;
1010 }
1011 for sigma in 0..phys_d {
1012 for sigma_p in 0..phys_d {
1013 for a_mpo in 0..dl_mpo {
1014 if b_mpo >= dr_mpo {
1015 continue;
1016 }
1017 let w_val = w.get(&[a_mpo, sigma, sigma_p, b_mpo]);
1018 if w_val.abs() < f64::EPSILON * 1e-12 {
1019 continue;
1020 }
1021 for alpha_bra in 0..dl_bra {
1022 let a_bra = a.get(&[alpha_bra, sigma, beta_bra]);
1023 if a_bra.abs() < f64::EPSILON * 1e-12 {
1024 continue;
1025 }
1026 for alpha_ket in 0..dl_ket {
1027 let a_ket = a.get(&[alpha_ket, sigma_p, beta_ket]);
1028 new_env.add(
1029 alpha_bra,
1030 a_mpo,
1031 alpha_ket,
1032 p * a_bra * w_val * a_ket,
1033 );
1034 }
1035 }
1036 }
1037 }
1038 }
1039 }
1040 }
1041 }
1042 envs[site] = new_env;
1043 }
1044 Ok(envs)
1045}
1046
1047fn apply_heff_two_site(
1054 theta: &Tensor,
1055 left_env: &EnvBlock,
1056 w1: &Tensor,
1057 w2: &Tensor,
1058 right_env: &EnvBlock,
1059) -> Tensor {
1060 let dl = theta.shape[0];
1061 let d = theta.shape[1];
1062 let dr = theta.shape[3];
1063
1064 let d_left_bra = left_env.d_bra;
1065 let d_left_mpo = left_env.d_mpo;
1066 let d_left_ket = left_env.d_ket;
1067 let d_right_bra = right_env.d_bra;
1068 let d_right_mpo = right_env.d_mpo;
1069 let d_right_ket = right_env.d_ket;
1070
1071 let dm_mpo = w1.shape[3]; let _ = d_left_bra;
1074 let _ = d_right_bra;
1075
1076 let mut result = Tensor::zeros(&[dl, d, d, dr]);
1077
1078 for alpha_l_out in 0..dl {
1082 for sig1_out in 0..d {
1083 for sig2_out in 0..d {
1084 for alpha_r_out in 0..dr {
1085 let mut val = 0.0;
1086 for alpha_l_in in 0..d_left_ket {
1087 for sig1_in in 0..d {
1088 for sig2_in in 0..d {
1089 for alpha_r_in in 0..d_right_ket {
1090 let theta_val =
1091 theta.get(&[alpha_l_in, sig1_in, sig2_in, alpha_r_in]);
1092 if theta_val.abs() < f64::EPSILON * 1e-12 {
1093 continue;
1094 }
1095 for a_mpo in 0..d_left_mpo {
1097 let l_val = left_env.get(alpha_l_out, a_mpo, alpha_l_in);
1098 if l_val.abs() < f64::EPSILON * 1e-12 {
1099 continue;
1100 }
1101 for b_mpo in 0..dm_mpo {
1102 let w1_val = w1.get(&[a_mpo, sig1_out, sig1_in, b_mpo]);
1103 if w1_val.abs() < f64::EPSILON * 1e-12 {
1104 continue;
1105 }
1106 for c_mpo in 0..d_right_mpo {
1107 let w2_val =
1108 w2.get(&[b_mpo, sig2_out, sig2_in, c_mpo]);
1109 if w2_val.abs() < f64::EPSILON * 1e-12 {
1110 continue;
1111 }
1112 let r_val =
1113 right_env.get(alpha_r_out, c_mpo, alpha_r_in);
1114 val += l_val * w1_val * w2_val * r_val * theta_val;
1115 }
1116 }
1117 }
1118 }
1119 }
1120 }
1121 }
1122 let cur = result.get(&[alpha_l_out, sig1_out, sig2_out, alpha_r_out]);
1123 result.set(&[alpha_l_out, sig1_out, sig2_out, alpha_r_out], cur + val);
1124 }
1125 }
1126 }
1127 }
1128 result
1129}
1130
1131pub fn dmrg_two_site_sweep(
1139 mps: &mut MPS,
1140 hamiltonian_mpo: &[Tensor],
1141 n_sweeps: usize,
1142) -> QcResult<f64> {
1143 if mps.n_sites < 2 {
1144 return Err(OptimizeError::ValueError(
1145 "DMRG requires at least 2 sites".to_string(),
1146 ));
1147 }
1148 if mps.n_sites != hamiltonian_mpo.len() {
1149 return Err(OptimizeError::ValueError(
1150 "MPS and MPO must have the same number of sites".to_string(),
1151 ));
1152 }
1153
1154 mps.normalize()?;
1155 let mut energy = mps_energy(mps, hamiltonian_mpo)?;
1156
1157 for _sweep in 0..n_sweeps {
1158 let right_envs = build_right_envs(mps, hamiltonian_mpo)?;
1160
1161 let mut left_env = {
1163 let mut l0 = EnvBlock::zeros(1, 1, 1);
1164 l0.data[0] = 1.0;
1165 l0
1166 };
1167
1168 let n = mps.n_sites;
1169 for site in 0..(n - 1) {
1170 let right_env = &right_envs[site + 2];
1171 let w1 = &hamiltonian_mpo[site];
1172 let w2 = &hamiltonian_mpo[site + 1];
1173
1174 let (mut theta, dl, d, dr) = build_theta(mps, site)?;
1176
1177 let n_iter = 3;
1179 for _pi in 0..n_iter {
1180 let h_theta = apply_heff_two_site(&theta, &left_env, w1, w2, right_env);
1181 let norm: f64 = h_theta.data.iter().map(|x| x * x).sum::<f64>().sqrt();
1183 if norm > f64::EPSILON {
1184 let rayleigh = compute_rayleigh(&theta, &h_theta);
1187 let alpha = 0.2 / (rayleigh.abs() + 1.0);
1188 for i in 0..theta.data.len() {
1189 theta.data[i] -= alpha * h_theta.data[i];
1190 }
1191 let n2: f64 = theta.data.iter().map(|x| x * x).sum::<f64>().sqrt();
1193 if n2 > f64::EPSILON {
1194 for v in &mut theta.data {
1195 *v /= n2;
1196 }
1197 }
1198 }
1199 }
1200
1201 update_mps_from_theta(mps, &theta, site, dl, d, dr, true)?;
1203
1204 left_env = contract_left_env(&left_env, &mps.tensors[site], &hamiltonian_mpo[site])?;
1206 }
1207
1208 mps.normalize()?;
1209 let new_energy = mps_energy(mps, hamiltonian_mpo)?;
1210 energy = new_energy;
1211 }
1212
1213 Ok(energy)
1214}
1215
1216fn compute_rayleigh(theta: &Tensor, h_theta: &Tensor) -> f64 {
1217 let num: f64 = theta
1218 .data
1219 .iter()
1220 .zip(h_theta.data.iter())
1221 .map(|(a, b)| a * b)
1222 .sum();
1223 let denom: f64 = theta.data.iter().map(|x| x * x).sum();
1224 if denom.abs() < f64::EPSILON {
1225 0.0
1226 } else {
1227 num / denom
1228 }
1229}
1230
1231fn build_theta(mps: &MPS, site: usize) -> QcResult<(Tensor, usize, usize, usize)> {
1233 let a1 = &mps.tensors[site];
1234 let a2 = &mps.tensors[site + 1];
1235 let dl = a1.shape[0];
1236 let d = a1.shape[1];
1237 let dm = a1.shape[2];
1238 let dr = a2.shape[2];
1239
1240 let mut theta = Tensor::zeros(&[dl, d, d, dr]);
1241 for al in 0..dl {
1242 for s1 in 0..d {
1243 for gam in 0..dm {
1244 let v1 = a1.get(&[al, s1, gam]);
1245 if v1.abs() < f64::EPSILON * 1e-12 {
1246 continue;
1247 }
1248 for s2 in 0..d {
1249 for ar in 0..dr {
1250 let v2 = a2.get(&[gam, s2, ar]);
1251 let cur = theta.get(&[al, s1, s2, ar]);
1252 theta.set(&[al, s1, s2, ar], cur + v1 * v2);
1253 }
1254 }
1255 }
1256 }
1257 }
1258 Ok((theta, dl, d, dr))
1259}
1260
1261fn update_mps_from_theta(
1263 mps: &mut MPS,
1264 theta: &Tensor,
1265 site: usize,
1266 dl: usize,
1267 d: usize,
1268 dr: usize,
1269 _left_canonical: bool,
1270) -> QcResult<()> {
1271 let rows = dl * d;
1272 let cols = d * dr;
1273 let mut mat = Tensor::zeros(&[rows, cols]);
1274 for al in 0..dl {
1275 for s1 in 0..d {
1276 for s2 in 0..d {
1277 for ar in 0..dr {
1278 let v = theta.get(&[al, s1, s2, ar]);
1279 mat.data[(al * d + s1) * cols + s2 * dr + ar] = v;
1280 }
1281 }
1282 }
1283 }
1284
1285 let max_bond = mps.max_bond_dim;
1286 let (u_mat, s_vals, vt_mat) = svd_compress_bond(&mat, max_bond)?;
1287 let chi = s_vals.len();
1288
1289 let mut new_a1 = Tensor::zeros(&[dl, d, chi]);
1290 for al in 0..dl {
1291 for s1 in 0..d {
1292 for j in 0..chi {
1293 let v = u_mat.data[(al * d + s1) * chi + j];
1294 new_a1.set(&[al, s1, j], v);
1295 }
1296 }
1297 }
1298
1299 let mut new_a2 = Tensor::zeros(&[chi, d, dr]);
1300 for j in 0..chi {
1301 for s2 in 0..d {
1302 for ar in 0..dr {
1303 let v = s_vals[j] * vt_mat.data[j * cols + s2 * dr + ar];
1304 new_a2.set(&[j, s2, ar], v);
1305 }
1306 }
1307 }
1308
1309 mps.tensors[site] = new_a1;
1310 mps.tensors[site + 1] = new_a2;
1311 Ok(())
1312}
1313
1314fn contract_left_env(left: &EnvBlock, a: &Tensor, w: &Tensor) -> QcResult<EnvBlock> {
1316 let dr_bra = a.shape[2];
1317 let dr_mpo = w.shape[3];
1318 let dr_ket = a.shape[2];
1319 let phys_d = a.shape[1];
1320 let dl_mpo = w.shape[0];
1321
1322 let mut new_env = EnvBlock::zeros(dr_bra, dr_mpo, dr_ket);
1323
1324 for alpha_bra in 0..left.d_bra {
1325 for a_idx in 0..left.d_mpo {
1326 for alpha_ket in 0..left.d_ket {
1327 let p = left.get(alpha_bra, a_idx, alpha_ket);
1328 if p.abs() < f64::EPSILON * 1e-12 {
1329 continue;
1330 }
1331 for sigma in 0..phys_d {
1332 for sigma_p in 0..phys_d {
1333 for b_mpo in 0..dr_mpo {
1334 if a_idx >= dl_mpo {
1335 continue;
1336 }
1337 let w_val = w.get(&[a_idx, sigma, sigma_p, b_mpo]);
1338 if w_val.abs() < f64::EPSILON * 1e-12 {
1339 continue;
1340 }
1341 for beta_bra in 0..dr_bra {
1342 let a_bra = a.get(&[alpha_bra, sigma, beta_bra]);
1343 if a_bra.abs() < f64::EPSILON * 1e-12 {
1344 continue;
1345 }
1346 for beta_ket in 0..dr_ket {
1347 let a_ket = a.get(&[alpha_ket, sigma_p, beta_ket]);
1348 new_env.add(
1349 beta_bra,
1350 b_mpo,
1351 beta_ket,
1352 p * a_bra * w_val * a_ket,
1353 );
1354 }
1355 }
1356 }
1357 }
1358 }
1359 }
1360 }
1361 }
1362 Ok(new_env)
1363}
1364
1365#[cfg(test)]
1368mod tests {
1369 use super::*;
1370
1371 #[test]
1372 fn test_product_state_bond_dim_one() {
1373 let mps = MPS::product_state(&[0, 1, 0, 1], 2).unwrap();
1374 assert_eq!(mps.n_sites, 4);
1375 for t in &mps.tensors {
1376 assert_eq!(t.shape[0], 1, "left bond dim should be 1");
1377 assert_eq!(t.shape[2], 1, "right bond dim should be 1");
1378 }
1379 }
1380
1381 #[test]
1382 fn test_overlap_identical_states() {
1383 let mps = MPS::product_state(&[0, 1, 0], 2).unwrap();
1384 let norm2 = mps.overlap(&mps).unwrap();
1385 let norm = mps.norm().unwrap();
1386 assert!((norm2 - norm * norm).abs() < 1e-12, "overlap == norm²");
1387 }
1388
1389 #[test]
1390 fn test_product_state_normalized() {
1391 let mps = MPS::product_state(&[1, 0, 1, 0], 2).unwrap();
1392 let n = mps.norm().unwrap();
1393 assert!(
1394 (n - 1.0).abs() < 1e-12,
1395 "Product state should be normalized"
1396 );
1397 }
1398
1399 #[test]
1400 fn test_svd_compress_reduces_bond_dim() {
1401 let mut mat = Tensor::zeros(&[4, 4]);
1403 for i in 0..4 {
1404 for j in 0..4 {
1405 mat.data[i * 4 + j] = (i + j + 1) as f64;
1406 }
1407 }
1408 let (u, s, vt) = svd_compress_bond(&mat, 2).unwrap();
1409 assert!(s.len() <= 2, "Should have ≤ 2 singular values");
1410 assert_eq!(u.shape[1], s.len());
1411 assert_eq!(vt.shape[0], s.len());
1412 }
1413
1414 #[test]
1415 fn test_ising_mpo_structure() {
1416 let n = 4;
1417 let mpo = ising_1d_mpo(n, 1.0, 0.5);
1418 assert_eq!(mpo.len(), n);
1419 for t in &mpo {
1421 assert_eq!(t.shape[1], 2, "MPO phys out dim should be 2");
1422 assert_eq!(t.shape[2], 2, "MPO phys in dim should be 2");
1423 }
1424 }
1425
1426 #[test]
1427 fn test_mpo_apply_valid_mps() {
1428 let n = 3;
1429 let mps = MPS::product_state(&[0, 1, 0], 2).unwrap();
1430 let mpo = ising_1d_mpo(n, 1.0, 0.5);
1431 let result = mpo_apply(&mps, &mpo).unwrap();
1432 assert_eq!(result.n_sites, n);
1433 assert_eq!(result.phys_dim, 2);
1434 }
1435
1436 #[test]
1437 fn test_dmrg_ising_ground_state_energy() {
1438 let n = 4;
1439 let j = 1.0_f64;
1440 let h = 0.3_f64;
1441
1442 let mut mps = MPS::product_state(&[0, 0, 0, 0], 2).unwrap();
1445 mps.max_bond_dim = 4;
1446
1447 let ham_mpo = ising_1d_mpo(n, j, h);
1448 let energy = dmrg_two_site_sweep(&mut mps, &ham_mpo, 10).unwrap();
1449
1450 assert!(
1455 energy < -1.0,
1456 "DMRG energy {energy:.4} should be < -1.0 (ferromagnetic Ising ground state region)"
1457 );
1458 }
1459}