1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
use std::{cell::RefCell, rc::Rc};
use ordered_float::NotNan;
use super::{AdaptiveScheduler, GenericLocalSearchOptimizer, LocalSearchOptimizer};
use crate::{
Duration, OptModel,
callback::{OptCallbackFn, OptProgress},
};
fn tsallis_transition_prob(
current_score: f64,
trial_score: f64,
offset: f64,
beta: f64,
q: f64,
xi: f64,
) -> f64 {
let delta_e = trial_score - current_score;
let denominator = current_score - offset + xi;
let d = delta_e / denominator;
if delta_e <= 0.0 {
1.0
} else {
let arg = 1.0 - (1.0 - q) * beta * d;
arg.powf(1.0 / (1.0 - q)).max(0.01)
}
}
/// Optimizer that implements Tsallis relative annealing algorithm
/// This is a generalization of relative annealing using Tsallis statistics.
/// The acceptance probability for worse solutions is [1 - (1-q) * beta * ΔE / (E - E_best + ξ)]^{1/(1-q)},
/// where ΔE = trial - current, E = current, E_best = offset.
/// Assumes q > 1.0.
#[derive(Clone, Copy)]
pub struct TsallisRelativeAnnealingOptimizer {
patience: usize,
n_trials: usize,
return_iter: usize,
initial_beta: f64,
scheduler: AdaptiveScheduler,
update_frequency: usize,
q: f64,
xi: f64,
}
impl TsallisRelativeAnnealingOptimizer {
/// Constructor of TsallisRelativeAnnealingOptimizer
///
/// - `patience` : the optimizer will give up
/// if there is no improvement of the score after this number of iterations
/// - `n_trials` : number of trial solutions to generate and evaluate at each iteration
/// - `return_iter` : returns to the current best solution if there is no improvement after this number of iterations.
/// - `initial_beta` : initial weight to be multiplied with the relative score difference.
/// Recommended value is reciprocal of expected relative score difference.
/// - `update_frequency` : frequency at which certain parameters (like beta) are updated during optimization.
/// - `q` : Tsallis parameter, assumed to be > 1.0. Recommended value is 2.5.
/// - `xi` : parameter ξ in the acceptance probability formula.
/// Recommended value is 1.0 for integer objective and 0.1% of the objective value for continuous objective.
pub fn new(
patience: usize,
n_trials: usize,
return_iter: usize,
beta: f64,
update_frequency: usize,
q: f64,
xi: f64,
) -> Self {
let scheduler =
AdaptiveScheduler::new(0.3, 0.3, super::TargetAccScheduleMode::Constant, 0.05);
Self {
patience,
n_trials,
return_iter,
initial_beta: beta,
update_frequency,
q,
xi,
scheduler,
}
}
/// Sets the scheduler for the optimizer.
pub fn with_scheduler(mut self, scheduler: AdaptiveScheduler) -> Self {
self.scheduler = scheduler;
self
}
}
impl<M: OptModel<ScoreType = NotNan<f64>>> LocalSearchOptimizer<M>
for TsallisRelativeAnnealingOptimizer
{
/// Start optimization
///
/// - `model` : the model to optimize
/// - `initial_solution` : the initial solution to start optimization
/// - `initial_score` : the initial score of the initial solution
/// - `n_iter`: maximum iterations
/// - `time_limit`: maximum iteration time
/// - `callback` : callback function that will be invoked at the end of each iteration
fn optimize(
&self,
model: &M,
initial_solution: M::SolutionType,
initial_score: M::ScoreType,
n_iter: usize,
time_limit: Duration,
callback: &mut dyn OptCallbackFn<M::SolutionType, M::ScoreType>,
) -> (M::SolutionType, M::ScoreType) {
// wrap current best score (offset) and beta (inverse temperature) in Rc<RefCell> to allow mutation in closure
let current_offset = Rc::new(RefCell::new(initial_score.into_inner()));
let current_beta = Rc::new(RefCell::new(self.initial_beta));
// create transition probability function
let transition_prob = {
let offset = current_offset.clone();
let beta = current_beta.clone();
move |current: NotNan<f64>, trial: NotNan<f64>| {
tsallis_transition_prob(
current.into_inner(),
trial.into_inner(),
*offset.borrow(),
*beta.borrow(),
self.q,
self.xi,
)
}
};
// wrap callback to update offset and beta based on update_frequency
let mut callback_with_updates = |progress: OptProgress<M::SolutionType, M::ScoreType>| {
// update offset
current_offset.replace(progress.score.into_inner());
// potentially update beta with scheduler
if self.update_frequency > 0 && progress.iter % self.update_frequency == 0 {
let new_beta = self.scheduler.update_temperature(
*current_beta.borrow(),
progress.iter,
n_iter,
progress.acceptance_ratio,
);
current_beta.replace(new_beta);
}
// invoke original callback
callback(progress);
};
// create generic optimizer and run optimization
let optimizer = GenericLocalSearchOptimizer::new(
self.patience,
self.n_trials,
self.return_iter,
transition_prob,
);
optimizer.optimize(
model,
initial_solution,
initial_score,
n_iter,
time_limit,
&mut callback_with_updates,
)
}
}
#[cfg(test)]
mod test {
use super::tsallis_transition_prob;
#[test]
fn test_tsallis_transition_prob() {
let beta = 1e1;
let q = 1.5;
let offset = 0.0;
// Improvement: should accept
let p = tsallis_transition_prob(1.0, 0.9, offset, beta, q, 1.0);
assert!(p >= 1.0);
// Worse: probability should decrease as d increases
let p1 = tsallis_transition_prob(1.0, 1.1, offset, beta, q, 1.0);
let p2 = tsallis_transition_prob(1.0, 1.2, offset, beta, q, 1.0);
assert!(p1 > p2);
}
}