use num_complex::Complex64;
use super::linear_optical::hafnian;
fn ln_factorial(n: usize) -> f64 {
match n {
0 | 1 => 0.0,
2..=20 => (2..=n).map(|k| (k as f64).ln()).sum(),
_ => {
let nf = n as f64;
nf * nf.ln() - nf + 0.5 * (2.0 * std::f64::consts::PI * nf).ln() + 1.0 / (12.0 * nf)
}
}
}
fn multiset_coeff(n: usize, k: usize) -> u64 {
let top = n + k - 1;
let bot = k;
if bot > top {
return 0;
}
let bot = bot.min(top - bot);
let mut result = 1u64;
for i in 0..bot {
result = result.saturating_mul(top as u64 - i as u64) / (i as u64 + 1);
}
result
}
#[derive(Debug, Clone)]
pub struct GaussianBosonSampler {
pub n_modes: usize,
pub squeezing_params: Vec<f64>,
pub unitary: Vec<Vec<Complex64>>,
}
impl GaussianBosonSampler {
pub fn new(n_modes: usize, squeezing: Vec<f64>, unitary: Vec<Vec<Complex64>>) -> Self {
let sq: Vec<f64> = squeezing.into_iter().map(|r| r.max(0.0)).collect();
Self {
n_modes,
squeezing_params: sq,
unitary,
}
}
pub fn mean_photon_number(&self, mode: usize) -> f64 {
let r = self.squeezing_params.get(mode).copied().unwrap_or(0.0);
r.sinh().powi(2)
}
pub fn total_mean_photons(&self) -> f64 {
self.squeezing_params
.iter()
.map(|&r| r.sinh().powi(2))
.sum()
}
pub fn adjacency_matrix(&self) -> Vec<Vec<Complex64>> {
let m = self.n_modes;
let mut b = vec![vec![Complex64::new(0.0, 0.0); m]; m];
for (i, b_row) in b.iter_mut().enumerate().take(m) {
for (j, b_cell) in b_row.iter_mut().enumerate().take(m) {
let tanh_rj = self.squeezing_params.get(j).copied().unwrap_or(0.0).tanh();
*b_cell = self.unitary[i][j] * tanh_rj;
}
}
let mut a = vec![vec![Complex64::new(0.0, 0.0); m]; m];
for (i, a_row) in a.iter_mut().enumerate().take(m) {
for (j, a_cell) in a_row.iter_mut().enumerate().take(m) {
for (k, &b_ik) in b[i].iter().enumerate().take(m) {
*a_cell += b_ik * self.unitary[j][k]; }
}
}
a
}
pub fn vacuum_probability(&self) -> f64 {
let denom: f64 = self.squeezing_params.iter().map(|&r| r.cosh()).product();
if denom < f64::EPSILON {
return 0.0;
}
1.0 / denom
}
pub fn pattern_probability(&self, pattern: &[usize]) -> f64 {
if pattern.len() != self.n_modes {
return 0.0;
}
let a = self.adjacency_matrix();
let n_ph: usize = pattern.iter().sum();
if n_ph == 0 {
return self.vacuum_probability();
}
let mut rows: Vec<usize> = Vec::with_capacity(n_ph);
for (i, &cnt) in pattern.iter().enumerate() {
for _ in 0..cnt {
rows.push(i);
}
}
let dim = rows.len();
let mut a_s = vec![vec![Complex64::new(0.0, 0.0); dim]; dim];
for (r, &ri) in rows.iter().enumerate() {
for (c, &ci) in rows.iter().enumerate() {
a_s[r][c] = a[ri][ci];
}
}
let h = hafnian(&a_s);
let haf_sq = h.norm_sqr();
let fact_denom: f64 = pattern.iter().map(|&k| ln_factorial(k)).sum::<f64>().exp();
let p0 = self.vacuum_probability();
p0 * haf_sq / fact_denom
}
pub fn approximate_sample(&self) -> Vec<usize> {
(0..self.n_modes)
.map(|k| {
let mean = self.mean_photon_number(k);
(mean + 0.5) as usize
})
.collect()
}
}
#[derive(Debug, Clone)]
pub struct BosonSamplingCertificate {
pub n_photons: usize,
pub n_modes: usize,
pub collision_free: bool,
}
impl BosonSamplingCertificate {
pub fn new(n: usize, m: usize) -> Self {
let collision_free = m >= n * n;
Self {
n_photons: n,
n_modes: m,
collision_free,
}
}
pub fn hilbert_space_dim(&self) -> u64 {
multiset_coeff(self.n_modes, self.n_photons)
}
pub fn classical_cost_ops(&self) -> f64 {
let n = self.n_photons as f64;
let pow2n = 2.0_f64.powf(n);
pow2n * n * n
}
pub fn has_quantum_advantage(&self) -> bool {
self.n_photons >= 50 && self.collision_free
}
pub fn xeb_score(measured_probs: &[f64], ideal_probs: &[f64]) -> f64 {
if measured_probs.len() != ideal_probs.len() || ideal_probs.is_empty() {
return 0.0;
}
let d = ideal_probs.len() as f64;
let inner: f64 = measured_probs
.iter()
.zip(ideal_probs.iter())
.map(|(&p_m, &p_i)| p_m * p_i)
.sum();
d * inner - 1.0
}
pub fn ideal_xeb_score(&self) -> f64 {
1.0
}
pub fn total_variation_distance_lower_bound(&self) -> f64 {
let d = self.hilbert_space_dim() as f64;
if d < 1.0 {
return 0.0;
}
(1.0 - 1.0 / d.sqrt()).max(0.0)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn approx_eq(a: f64, b: f64, tol: f64) -> bool {
(a - b).abs() < tol
}
fn identity_unitary(n: usize) -> Vec<Vec<Complex64>> {
let mut u = vec![vec![Complex64::new(0.0, 0.0); n]; n];
for (i, row) in u.iter_mut().enumerate().take(n) {
row[i] = Complex64::new(1.0, 0.0);
}
u
}
#[test]
fn test_mean_photon_number() {
let u = identity_unitary(2);
let gbs = GaussianBosonSampler::new(2, vec![1.0, 0.5], u);
let expected_0 = 1.0_f64.sinh().powi(2);
let expected_1 = 0.5_f64.sinh().powi(2);
assert!(approx_eq(gbs.mean_photon_number(0), expected_0, 1e-12));
assert!(approx_eq(gbs.mean_photon_number(1), expected_1, 1e-12));
}
#[test]
fn test_total_mean_photons() {
let u = identity_unitary(2);
let gbs = GaussianBosonSampler::new(2, vec![1.0, 1.0], u);
let expected = 2.0 * 1.0_f64.sinh().powi(2);
assert!(approx_eq(gbs.total_mean_photons(), expected, 1e-12));
}
#[test]
fn test_vacuum_probability_no_squeezing() {
let u = identity_unitary(3);
let gbs = GaussianBosonSampler::new(3, vec![0.0, 0.0, 0.0], u);
assert!(approx_eq(gbs.vacuum_probability(), 1.0, 1e-12));
}
#[test]
fn test_vacuum_probability_with_squeezing() {
let u = identity_unitary(1);
let gbs = GaussianBosonSampler::new(1, vec![1.0], u);
let expected = 1.0 / 1.0_f64.cosh();
assert!(approx_eq(gbs.vacuum_probability(), expected, 1e-12));
}
#[test]
fn test_adjacency_matrix_identity_interferometer() {
let u = identity_unitary(2);
let gbs = GaussianBosonSampler::new(2, vec![0.5, 0.8], u);
let a = gbs.adjacency_matrix();
let tanh0 = 0.5_f64.tanh();
let tanh1 = 0.8_f64.tanh();
assert!(approx_eq(a[0][0].re, tanh0, 1e-12));
assert!(approx_eq(a[1][1].re, tanh1, 1e-12));
assert!(approx_eq(a[0][1].norm(), 0.0, 1e-12));
}
#[test]
fn test_hilbert_space_dim() {
let cert = BosonSamplingCertificate::new(2, 3);
assert_eq!(cert.hilbert_space_dim(), 6);
}
#[test]
fn test_quantum_advantage_threshold() {
let cert = BosonSamplingCertificate::new(50, 2500);
assert!(cert.has_quantum_advantage());
let cert_small = BosonSamplingCertificate::new(5, 25);
assert!(!cert_small.has_quantum_advantage());
}
#[test]
fn test_collision_free_regime() {
let cert = BosonSamplingCertificate::new(3, 9);
assert!(cert.collision_free);
let cert2 = BosonSamplingCertificate::new(3, 8);
assert!(!cert2.collision_free);
}
#[test]
fn test_xeb_score_ideal() {
let n = 4;
let ideal: Vec<f64> = vec![1.0 / n as f64; n];
let measured = ideal.clone();
let xeb = BosonSamplingCertificate::xeb_score(&measured, &ideal);
assert!(approx_eq(xeb, 0.0, 1e-12));
}
#[test]
fn test_classical_cost_ops_scaling() {
let cert10 = BosonSamplingCertificate::new(10, 100);
let cert20 = BosonSamplingCertificate::new(20, 400);
assert!(cert20.classical_cost_ops() > cert10.classical_cost_ops() * 1000.0);
}
#[test]
fn test_pattern_probability_vacuum() {
let u = identity_unitary(2);
let gbs = GaussianBosonSampler::new(2, vec![0.5, 0.3], u);
let p_pattern = gbs.pattern_probability(&[0, 0]);
let p_vac = gbs.vacuum_probability();
assert!(approx_eq(p_pattern, p_vac, 1e-12));
}
}