non_convex_opt/algorithms/parallel_tempering/
statistics.rs1use crate::utils::opt_prob::FloatNumber as FloatNum;
2use nalgebra::{allocator::Allocator, DefaultAllocator, Dim, DimSub, OMatrix, OVector, RealField};
3
4pub struct Statistics<T, N, D>
5where
6 T: FloatNum + RealField + Send + Sync,
7 N: Dim + Send + Sync,
8 D: Dim + Send + Sync + DimSub<nalgebra::Const<1>>,
9 OVector<T, D>: Send + Sync,
10 OVector<T, N>: Send + Sync,
11 OVector<bool, N>: Send + Sync,
12 OMatrix<T, N, D>: Send + Sync,
13 OMatrix<T, D, D>: Send + Sync,
14 DefaultAllocator: Allocator<D>
15 + Allocator<N, D>
16 + Allocator<N>
17 + Allocator<D, D>
18 + Allocator<<D as DimSub<nalgebra::Const<1>>>::Output>,
19{
20 _phantom: std::marker::PhantomData<(T, N, D)>,
21}
22
23impl<T, N, D> Statistics<T, N, D>
24where
25 T: FloatNum + RealField + Send + Sync,
26 N: Dim + Send + Sync,
27 D: Dim + Send + Sync + DimSub<nalgebra::Const<1>>,
28 OVector<T, D>: Send + Sync,
29 OVector<T, N>: Send + Sync,
30 OVector<bool, N>: Send + Sync,
31 OMatrix<T, N, D>: Send + Sync,
32 OMatrix<T, D, D>: Send + Sync,
33 DefaultAllocator: Allocator<D>
34 + Allocator<N, D>
35 + Allocator<N>
36 + Allocator<D, D>
37 + Allocator<<D as DimSub<nalgebra::Const<1>>>::Output>,
38{
39 pub fn compute_effective_sample_size(fitnesses: &[OVector<T, N>]) -> Vec<f64> {
40 let mut ess_values = Vec::with_capacity(fitnesses.len());
41
42 for fitness in fitnesses {
43 let fitness_chain: Vec<f64> =
44 fitness.iter().map(|f| f.to_f64().unwrap_or(0.0)).collect();
45
46 let ess = Self::compute_ess_for_chain(&fitness_chain);
47 ess_values.push(ess);
48 }
49
50 ess_values
51 }
52
53 fn compute_ess_for_chain(chain: &[f64]) -> f64 {
55 let n = chain.len();
56 if n < 10 {
57 return n as f64;
58 }
59
60 let mean = chain.iter().sum::<f64>() / n as f64;
61 let variance = chain.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / (n - 1) as f64;
62
63 if variance < 1e-12 {
64 return n as f64; }
66
67 let max_lag = (n / 4).min(200);
68 let mut autocorr = Vec::with_capacity(max_lag);
69
70 for lag in 0..max_lag {
71 let mut sum = 0.0;
72 let count = n - lag;
73
74 for i in 0..count {
75 sum += (chain[i] - mean) * (chain[i + lag] - mean);
76 }
77
78 let rho = sum / (count as f64 * variance);
79 autocorr.push(rho);
80
81 if lag > 5 && rho < 0.01 {
82 break;
83 }
84 }
85
86 let mut tau_int = 1.0;
88 let mut cumsum = 0.0;
89
90 for (k, &rho) in autocorr.iter().enumerate().skip(1) {
91 if rho <= 0.0 {
92 break;
93 }
94
95 cumsum += rho;
96 let current_tau = 1.0 + 2.0 * cumsum;
97
98 if k as f64 >= 6.0 * current_tau {
99 break;
100 }
101
102 tau_int = current_tau;
103 }
104
105 let ess = n as f64 / (2.0 * tau_int + 1.0);
106
107 ess.min(n as f64).max(1.0)
108 }
109
110 pub fn compute_population_diversity(
112 fitnesses: &[OVector<T, N>],
113 constraints: &[OVector<bool, N>],
114 num_replicas: usize,
115 ) -> f64 {
116 let mut total_variance = 0.0;
117
118 if num_replicas < 2 {
119 return 0.0;
120 }
121
122 let replica_best_fitness: Vec<f64> = (0..num_replicas)
123 .map(|i| {
124 fitnesses[i]
125 .iter()
126 .zip(constraints[i].iter())
127 .filter_map(|(f, c)| if *c { Some(f.to_f64().unwrap()) } else { None })
128 .fold(f64::NEG_INFINITY, f64::max)
129 })
130 .collect();
131
132 let mean_fitness = replica_best_fitness.iter().sum::<f64>() / num_replicas as f64;
133
134 for fitness in replica_best_fitness {
135 total_variance += (fitness - mean_fitness).powi(2);
136 }
137
138 total_variance / num_replicas as f64
139 }
140}