1use crate::error::OptimizeError;
8use crate::quantum_classical::statevector::Statevector;
9use crate::quantum_classical::QcResult;
10
11#[derive(Debug, Clone)]
17pub struct MaxCutProblem {
18 pub n_vertices: usize,
20 pub edges: Vec<(usize, usize, f64)>,
22}
23
24impl MaxCutProblem {
25 pub fn new(n_vertices: usize, edges: Vec<(usize, usize, f64)>) -> Self {
27 Self { n_vertices, edges }
28 }
29
30 pub fn cost_function(&self, state: &Statevector) -> QcResult<f64> {
34 let mut cost = 0.0;
35 for &(i, j, w) in &self.edges {
36 let zz = state.expectation_zz(i, j)?;
37 cost += w * (1.0 - zz) / 2.0;
38 }
39 Ok(cost)
40 }
41
42 pub fn cut_value(&self, bits: &[bool]) -> f64 {
45 self.edges
46 .iter()
47 .filter(|&&(i, j, _)| bits[i] != bits[j])
48 .map(|&(_, _, w)| w)
49 .sum()
50 }
51}
52
53#[derive(Debug, Clone)]
57pub struct QaoaConfig {
58 pub p_layers: usize,
60 pub init_gamma: Vec<f64>,
62 pub init_beta: Vec<f64>,
64 pub max_iter: usize,
66 pub tol: f64,
68}
69
70impl Default for QaoaConfig {
71 fn default() -> Self {
72 let p = 2;
73 Self {
74 p_layers: p,
75 init_gamma: vec![0.1; p],
76 init_beta: vec![0.1; p],
77 max_iter: 200,
78 tol: 1e-6,
79 }
80 }
81}
82
83#[derive(Debug, Clone)]
87pub struct QaoaCircuit {
88 pub problem: MaxCutProblem,
90 pub config: QaoaConfig,
92}
93
94impl QaoaCircuit {
95 pub fn new(problem: MaxCutProblem, config: QaoaConfig) -> Self {
97 Self { problem, config }
98 }
99
100 pub fn run(&self, gamma: &[f64], beta: &[f64]) -> QcResult<f64> {
107 if gamma.len() != self.config.p_layers {
108 return Err(OptimizeError::ValueError(format!(
109 "Expected {} gamma values, got {}",
110 self.config.p_layers,
111 gamma.len()
112 )));
113 }
114 if beta.len() != self.config.p_layers {
115 return Err(OptimizeError::ValueError(format!(
116 "Expected {} beta values, got {}",
117 self.config.p_layers,
118 beta.len()
119 )));
120 }
121 let n = self.problem.n_vertices;
122 let mut state = Statevector::zero_state(n)?;
123
124 for q in 0..n {
126 state.apply_hadamard(q)?;
127 }
128
129 for l in 0..self.config.p_layers {
131 for &(i, j, w) in &self.problem.edges {
133 state.apply_rzz(i, j, 2.0 * gamma[l] * w)?;
134 }
135
136 for q in 0..n {
138 state.apply_rx(q, 2.0 * beta[l])?;
139 }
140 }
141
142 let cost = self.problem.cost_function(&state)?;
144 Ok(-cost) }
146
147 pub fn optimize(&self) -> QcResult<QaoaResult> {
149 let p = self.config.p_layers;
150 let n_params = 2 * p;
151
152 let mut x: Vec<f64> = self
154 .config
155 .init_gamma
156 .iter()
157 .chain(self.config.init_beta.iter())
158 .cloned()
159 .collect();
160
161 let eval = |params: &[f64]| -> f64 {
162 let gam = ¶ms[..p];
163 let bet = ¶ms[p..];
164 self.run(gam, bet).unwrap_or(0.0)
165 };
166
167 let (best_params, best_val, n_evals) =
168 nelder_mead_minimize(eval, &x, self.config.max_iter, self.config.tol)?;
169
170 x = best_params;
171 let gamma_opt: Vec<f64> = x[..p].to_vec();
172 let beta_opt: Vec<f64> = x[p..].to_vec();
173
174 Ok(QaoaResult {
175 optimal_gamma: gamma_opt,
176 optimal_beta: beta_opt,
177 optimal_value: -best_val, n_evaluations: n_evals,
179 })
180 }
181
182 pub fn best_string(&self, gamma: &[f64], beta: &[f64]) -> QcResult<Vec<bool>> {
184 let n = self.problem.n_vertices;
185 let mut state = Statevector::zero_state(n)?;
186 for q in 0..n {
187 state.apply_hadamard(q)?;
188 }
189 for l in 0..self.config.p_layers {
190 for &(i, j, w) in &self.problem.edges {
191 state.apply_rzz(i, j, 2.0 * gamma[l] * w)?;
192 }
193 for q in 0..n {
194 state.apply_rx(q, 2.0 * beta[l])?;
195 }
196 }
197 let idx = state.most_probable_state();
198 Ok(state.index_to_bits(idx))
199 }
200
201 fn build_state(&self, gamma: &[f64], beta: &[f64]) -> QcResult<Statevector> {
203 let n = self.problem.n_vertices;
204 let mut state = Statevector::zero_state(n)?;
205 for q in 0..n {
206 state.apply_hadamard(q)?;
207 }
208 for l in 0..self.config.p_layers {
209 for &(i, j, w) in &self.problem.edges {
210 state.apply_rzz(i, j, 2.0 * gamma[l] * w)?;
211 }
212 for q in 0..n {
213 state.apply_rx(q, 2.0 * beta[l])?;
214 }
215 }
216 Ok(state)
217 }
218}
219
220#[derive(Debug, Clone)]
224pub struct QaoaResult {
225 pub optimal_gamma: Vec<f64>,
227 pub optimal_beta: Vec<f64>,
229 pub optimal_value: f64,
231 pub n_evaluations: usize,
233}
234
235pub fn parameter_shift_gradient(
243 circuit: &QaoaCircuit,
244 gamma: &[f64],
245 beta: &[f64],
246) -> QcResult<(Vec<f64>, Vec<f64>)> {
247 let p = circuit.config.p_layers;
248 let shift = std::f64::consts::FRAC_PI_2; let mut d_gamma = vec![0.0; p];
251 let mut d_beta = vec![0.0; p];
252
253 for l in 0..p {
255 let mut g_plus = gamma.to_vec();
256 let mut g_minus = gamma.to_vec();
257 g_plus[l] += shift;
258 g_minus[l] -= shift;
259 let e_plus = circuit.run(&g_plus, beta)?;
260 let e_minus = circuit.run(&g_minus, beta)?;
261 d_gamma[l] = 0.5 * (e_plus - e_minus);
262 }
263
264 for l in 0..p {
266 let mut b_plus = beta.to_vec();
267 let mut b_minus = beta.to_vec();
268 b_plus[l] += shift;
269 b_minus[l] -= shift;
270 let e_plus = circuit.run(gamma, &b_plus)?;
271 let e_minus = circuit.run(gamma, &b_minus)?;
272 d_beta[l] = 0.5 * (e_plus - e_minus);
273 }
274
275 Ok((d_gamma, d_beta))
276}
277
278fn nelder_mead_minimize<F>(
284 f: F,
285 x0: &[f64],
286 max_iter: usize,
287 tol: f64,
288) -> QcResult<(Vec<f64>, f64, usize)>
289where
290 F: Fn(&[f64]) -> f64,
291{
292 let n = x0.len();
293 if n == 0 {
294 return Err(OptimizeError::ValueError(
295 "Parameter vector must be non-empty".to_string(),
296 ));
297 }
298
299 let step = 0.2_f64;
301 let mut simplex: Vec<Vec<f64>> = Vec::with_capacity(n + 1);
302 simplex.push(x0.to_vec());
303 for i in 0..n {
304 let mut v = x0.to_vec();
305 v[i] += if v[i].abs() < 1e-10 {
306 0.05
307 } else {
308 step * v[i].abs().max(0.05)
309 };
310 simplex.push(v);
311 }
312
313 let mut fvals: Vec<f64> = simplex.iter().map(|v| f(v)).collect();
314 let mut n_evals = simplex.len();
315
316 let alpha = 1.0; let gamma_nm = 2.0; let rho = 0.5; let sigma = 0.5; for _iter in 0..max_iter {
322 let mut order: Vec<usize> = (0..=n).collect();
324 order.sort_by(|&a, &b| {
325 fvals[a]
326 .partial_cmp(&fvals[b])
327 .unwrap_or(std::cmp::Ordering::Equal)
328 });
329
330 let f_best = fvals[order[0]];
332 let f_worst = fvals[order[n]];
333 if (f_worst - f_best).abs() < tol {
334 let best_params = simplex[order[0]].clone();
335 return Ok((best_params, f_best, n_evals));
336 }
337
338 let mut centroid = vec![0.0; n];
340 for &idx in &order[..n] {
341 for (k, c) in centroid.iter_mut().enumerate() {
342 *c += simplex[idx][k];
343 }
344 }
345 let n_f = n as f64;
346 centroid.iter_mut().for_each(|c| *c /= n_f);
347
348 let worst_idx = order[n];
350 let xr: Vec<f64> = centroid
351 .iter()
352 .zip(&simplex[worst_idx])
353 .map(|(&c, &w)| c + alpha * (c - w))
354 .collect();
355 let fr = f(&xr);
356 n_evals += 1;
357
358 let f_second_worst = fvals[order[n - 1]];
359
360 if fr < fvals[order[0]] {
361 let xe: Vec<f64> = centroid
363 .iter()
364 .zip(&xr)
365 .map(|(&c, &r)| c + gamma_nm * (r - c))
366 .collect();
367 let fe = f(&xe);
368 n_evals += 1;
369 if fe < fr {
370 simplex[worst_idx] = xe;
371 fvals[worst_idx] = fe;
372 } else {
373 simplex[worst_idx] = xr;
374 fvals[worst_idx] = fr;
375 }
376 } else if fr < f_second_worst {
377 simplex[worst_idx] = xr;
378 fvals[worst_idx] = fr;
379 } else {
380 let use_reflection = fr < fvals[worst_idx];
382 let xc: Vec<f64> = if use_reflection {
383 centroid
384 .iter()
385 .zip(&xr)
386 .map(|(&c, &r)| c + rho * (r - c))
387 .collect()
388 } else {
389 centroid
390 .iter()
391 .zip(&simplex[worst_idx])
392 .map(|(&c, &w)| c + rho * (w - c))
393 .collect()
394 };
395 let fc = f(&xc);
396 n_evals += 1;
397
398 let contraction_success = if use_reflection {
399 fc <= fr
400 } else {
401 fc < fvals[worst_idx]
402 };
403
404 if contraction_success {
405 simplex[worst_idx] = xc;
406 fvals[worst_idx] = fc;
407 } else {
408 let best_idx = order[0];
410 let best_vertex = simplex[best_idx].clone();
411 for i in 1..=n {
412 let idx = order[i];
413 simplex[idx] = best_vertex
414 .iter()
415 .zip(&simplex[idx])
416 .map(|(&b, &v)| b + sigma * (v - b))
417 .collect();
418 fvals[idx] = f(&simplex[idx]);
419 n_evals += 1;
420 }
421 }
422 }
423 }
424
425 let best_idx = (0..=n)
427 .min_by(|&a, &b| {
428 fvals[a]
429 .partial_cmp(&fvals[b])
430 .unwrap_or(std::cmp::Ordering::Equal)
431 })
432 .unwrap_or(0);
433 Ok((simplex[best_idx].clone(), fvals[best_idx], n_evals))
434}
435
436#[cfg(test)]
439mod tests {
440 use super::*;
441
442 #[test]
443 fn test_maxcut_two_nodes_improves() {
444 let problem = MaxCutProblem::new(2, vec![(0, 1, 1.0)]);
446 let config = QaoaConfig {
447 p_layers: 1,
448 init_gamma: vec![0.5],
449 init_beta: vec![0.5],
450 max_iter: 100,
451 tol: 1e-5,
452 };
453 let circuit = QaoaCircuit::new(problem, config);
454 let result = circuit.optimize().unwrap();
455 assert!(
457 result.optimal_value >= 0.5,
458 "Expected cut ≥ 0.5, got {}",
459 result.optimal_value
460 );
461 }
462
463 #[test]
464 fn test_qaoa_deterministic_evaluation() {
465 let problem = MaxCutProblem::new(2, vec![(0, 1, 1.0)]);
466 let config = QaoaConfig {
467 p_layers: 1,
468 init_gamma: vec![0.3],
469 init_beta: vec![0.4],
470 max_iter: 10,
471 tol: 1e-6,
472 };
473 let circuit = QaoaCircuit::new(problem, config);
474 let val1 = circuit.run(&[0.3], &[0.4]).unwrap();
475 let val2 = circuit.run(&[0.3], &[0.4]).unwrap();
476 assert!(
477 (val1 - val2).abs() < 1e-14,
478 "Evaluation must be deterministic"
479 );
480 }
481
482 #[test]
483 fn test_parameter_shift_gradient_sign() {
484 let problem = MaxCutProblem::new(3, vec![(0, 1, 1.0), (1, 2, 1.0), (0, 2, 1.0)]);
488 let config = QaoaConfig {
489 p_layers: 1,
490 init_gamma: vec![0.5],
491 init_beta: vec![0.3],
492 max_iter: 50,
493 tol: 1e-6,
494 };
495 let circuit = QaoaCircuit::new(problem, config);
496 let gamma = [0.5];
497 let beta = [0.3];
498 let (d_gamma, d_beta) = parameter_shift_gradient(&circuit, &gamma, &beta).unwrap();
499
500 assert!(d_gamma[0].is_finite(), "d_gamma must be finite");
502 assert!(d_beta[0].is_finite(), "d_beta must be finite");
503
504 let e_plus = circuit
508 .run(&gamma, &[beta[0] + std::f64::consts::FRAC_PI_2])
509 .unwrap();
510 let e_minus = circuit
511 .run(&gamma, &[beta[0] - std::f64::consts::FRAC_PI_2])
512 .unwrap();
513 let ps_check = 0.5 * (e_plus - e_minus);
514 assert!(
515 (d_beta[0] - ps_check).abs() < 1e-12,
516 "Parameter shift gradient {:.6} should equal 0.5*(E+ - E-) = {:.6}",
517 d_beta[0],
518 ps_check
519 );
520
521 let e0 = circuit.run(&gamma, &beta).unwrap();
524 let lr = 0.1;
525 let new_beta = [beta[0] - lr * d_beta[0]];
526 let e_new = circuit.run(&gamma, &new_beta).unwrap();
527 assert!(
530 e_new <= e0 + 0.01,
531 "Gradient step should not increase energy significantly: {e0:.4} -> {e_new:.4}"
532 );
533 }
534
535 #[test]
536 fn test_best_string_returns_valid_assignment() {
537 let problem = MaxCutProblem::new(3, vec![(0, 1, 1.0), (1, 2, 1.0)]);
538 let config = QaoaConfig::default();
539 let circuit = QaoaCircuit::new(problem.clone(), config);
540 let result = circuit.optimize().unwrap();
541 let bits = circuit
542 .best_string(&result.optimal_gamma, &result.optimal_beta)
543 .unwrap();
544 assert_eq!(bits.len(), 3);
545 let cut = problem.cut_value(&bits);
547 assert!(cut >= 0.0);
548 }
549}