gosh_fire/fire.rs
1// [[file:../fire.note::*imports][imports:1]]
2//! The Fast-Inertial-Relaxation-Engine (FIRE) algorithm
3//!
4//! This method is stable with respect to random errors in the potential energy
5//! and force. FIRE has strict adherence to minimizing forces, which makes
6//! constrained minimization easy.
7
8use vecfx::*;
9
10use crate::cache::CachedProblem;
11use crate::common::*;
12use crate::GradientBasedMinimizer;
13use crate::Progress;
14use crate::Termination;
15use crate::TerminationCriteria;
16// imports:1 ends here
17
18// [[file:../fire.note::6cd7c459][6cd7c459]]
19/// The Fast-Inertial-Relaxation-Engine (FIRE) algorithm
20///
21/// # Notes from the paper
22///
23/// * `n_min` larger than 1 (at least a few smooth steps after freezing);
24/// * `f_inc` larger than but near to one (avoid too fast acceleration);
25/// * `f_dec` smaller than 1 but much larger than zero (avoid too heavy slowing down)
26/// * `alpha_start` larger than, but near to zero (avoid too heavy damping)
27/// * `f_alpha` smaller than, but near to one (mixing is efficient some time after restart).
28#[derive(Debug, Clone)]
29pub struct FIRE {
30 /// The maximum size for an optimization step. According to the paper, this
31 /// is the only parameter needs to be adjusted by the user.
32 ///
33 /// The default value is 0.10.
34 pub max_step: f64,
35
36 /// Factor used to decrease alpha-parameter if downhill
37 ///
38 /// The default value is 0.99.
39 pub f_alpha: f64,
40
41 /// Initial alpha-parameter.
42 ///
43 /// The default value is 0.1.
44 pub alpha_start: f64,
45
46 /// Factor used to increase time-step if downhill
47 ///
48 /// The default value is 1.1.
49 pub f_inc: f64,
50
51 /// Factor used to decrease time-step if uphill
52 ///
53 /// The default value is 0.5.
54 pub f_dec: f64,
55
56 /// Minimum number of iterations ("latency" time) performed before time-step
57 /// may be increased, which is important for the stability of the algorithm.
58 ///
59 /// The default value is 5.
60 pub n_min: usize,
61
62 /// adaptive time step for integration of the equations of motion. The
63 /// initial value is 0.1.
64 dt: f64,
65
66 /// The maximum time step as in the paper. Do not change this, change
67 /// max_step instead.
68 dt_max: f64,
69
70 /// The minimum time step when decreasing `dt`. The default is 0.01.
71 dt_min: f64,
72
73 /// Adaptive parameter that controls the velocity used to evolve the system.
74 alpha: f64,
75
76 /// Current velocity
77 velocity: Option<Velocity>,
78
79 /// Displacement vector
80 displacement: Option<Displacement>,
81
82 /// Current number of iterations when go downhill
83 nsteps: usize,
84
85 /// Default termination criteria
86 termination: Termination,
87
88 /// MD scheme
89 scheme: MdScheme,
90
91 /// Apply line search for optimal step size.
92 use_line_search: bool,
93}
94
95impl Default for FIRE {
96 fn default() -> Self {
97 FIRE {
98 // default parameters taken from the original paper
99 dt_max: 1.00,
100 alpha_start: 0.10,
101 f_alpha: 0.99,
102 f_dec: 0.50,
103 f_inc: 1.10,
104 // pele: 0.5, ase: 0.2
105 max_step: 0.10,
106 n_min: 5,
107
108 // counters or adaptive parameters
109 dt: 0.10,
110 dt_min: 0.02,
111 alpha: 0.10,
112 nsteps: 0,
113 velocity: None,
114 displacement: None,
115
116 // others
117 termination: Termination::default(),
118 scheme: MdScheme::default(),
119 use_line_search: false,
120 }
121 }
122}
123// 6cd7c459 ends here
124
125// [[file:../fire.note::*scheme][scheme:1]]
126/// MD Integration formulations for position update and velocity update
127#[derive(Clone, Copy, Debug)]
128pub enum MdScheme {
129 /// Velocity Verlet
130 VelocityVerlet,
131 /// Semi-implicit Euler algorithm. I think this is the algorithm implemented
132 /// in ASE.
133 SemiImplicitEuler,
134}
135
136impl Default for MdScheme {
137 fn default() -> Self {
138 MdScheme::VelocityVerlet
139 // MdScheme::SemiImplicitEuler
140 }
141}
142// scheme:1 ends here
143
144// [[file:../fire.note::37ff34f7][37ff34f7]]
145impl FIRE {
146 /// Set the maximum size for an optimization step.
147 pub fn with_max_step(mut self, maxstep: f64) -> Self {
148 assert!(maxstep.is_sign_positive(), "step size should be a positive number!");
149
150 self.max_step = maxstep;
151 self
152 }
153
154 pub fn with_dt_min(mut self, dt_min: f64) -> Self {
155 self.dt_min = dt_min;
156 self
157 }
158
159 /// Set MD scheme for position and velocity update
160 pub fn with_md_scheme(mut self, scheme: &str) -> Self {
161 match scheme {
162 "SE" => self.scheme = MdScheme::SemiImplicitEuler,
163 "VV" => self.scheme = MdScheme::VelocityVerlet,
164 "ASE" => self.scheme = MdScheme::SemiImplicitEuler,
165 _ => unimplemented!(),
166 }
167 self
168 }
169
170 /// Set the maximum cycles for termination.
171 pub fn with_max_cycles(mut self, n: usize) -> Self {
172 self.termination.max_cycles = n;
173 self
174 }
175
176 /// Set the maximum gradient/force norm for termination.
177 pub fn with_max_gradient_norm(mut self, gmax: f64) -> Self {
178 assert!(gmax.is_sign_positive(), "gmax: bad parameter!");
179 self.termination.max_gradient_norm = gmax;
180
181 self
182 }
183
184 /// Enable line search of optimal step size.
185 ///
186 /// The default is no line search.
187 pub fn with_line_search(mut self) -> Self {
188 self.use_line_search = true;
189 self
190 }
191}
192// 37ff34f7 ends here
193
194// [[file:../fire.note::*core][core:1]]
195impl FIRE {
196 /// Propagate the system for one simulation step using FIRE algorithm.
197 fn propagate<E>(mut self, force_prev: &mut [f64], force: &mut [f64], problem: &mut CachedProblem<E>) -> Self
198 where
199 E: FnMut(&[f64], &mut [f64]) -> f64,
200 {
201 // F0. prepare data
202 let n = force.len();
203 let mut velocity = self.velocity.unwrap_or(Velocity(vec![0.0; n]));
204 // caching displacement for memory performance
205 let mut displacement = self.displacement.unwrap_or(Displacement(vec![0.0; n]));
206
207 // MD. calculate displacement vectors based on a typical MD stepping algorithm
208 // update the internal velocity
209 velocity.take_md_step(&force_prev, &force, self.dt, self.scheme);
210 displacement.take_md_step(&force, &velocity, self.dt, self.scheme);
211 displacement.rescale(self.max_step);
212
213 // perform line search
214 let nls = if self.use_line_search {
215 info!("line search for optimal step size using MoreThuente algorithm.");
216 40
217 } else {
218 1
219 };
220 let ls = linesearch::linesearch().with_algorithm("MoreThuente");
221
222 let phi = |trial_step, out: &mut linesearch::Output| {
223 // restore position
224 problem.revert();
225 // update value and gradient at position `x`
226 problem.take_line_step(&displacement, trial_step);
227 out.fx = problem.value();
228 out.gx = displacement.vecdot(&problem.gradient());
229
230 Ok(())
231 };
232 let _ = ls.find(nls, phi);
233
234 // save state
235 force.vecncpy(problem.gradient());
236 force_prev.vecncpy(problem.gradient_prev());
237
238 // F1. calculate power for uphill/downhill check
239 let downhill = force.vecdot(&velocity).is_sign_positive();
240
241 // F2. adjust velocity
242 velocity.adjust(force, self.alpha);
243
244 // F3 & F4: check the direction: go downhill or go uphill
245 if downhill {
246 // F3. when go downhill
247 // increase time step if we have go downhill for enough times
248 if self.nsteps > self.n_min {
249 self.dt = self.dt_max.min(self.dt * self.f_inc);
250 self.alpha *= self.f_alpha;
251 }
252 // increment step counter
253 self.nsteps += 1;
254 } else {
255 // F4. when go uphill
256 self.dt = self.dt_min.max(self.dt * self.f_dec);
257 // reset alpha
258 self.alpha = self.alpha_start;
259 // reset step counter
260 self.nsteps = 0;
261 // reset velocity to zero
262 velocity.reset();
263 }
264
265 self.velocity = Some(velocity);
266 self.displacement = Some(displacement);
267 self
268 }
269}
270// core:1 ends here
271
272// [[file:../fire.note::*displacement][displacement:2]]
273/// Represents the displacement vector
274#[derive(Debug, Clone)]
275pub(crate) struct Displacement(Vec<f64>);
276
277impl Displacement {
278 /// Update particle displacement vector by performing a regular MD step
279 ///
280 /// Displacement = X(n+1) - X(n)
281 ///
282 pub fn take_md_step(
283 &mut self,
284 force: &[f64], // F(n)
285 velocity: &[f64], // V(n)
286 dt: f64, // Δt
287 scheme: MdScheme,
288 ) {
289 debug_assert!(
290 velocity.len() == force.len(),
291 "the sizes of input vectors are different!"
292 );
293
294 self.0.veccpy(velocity);
295 match scheme {
296 // Velocity Verlet (VV) integration formula.
297 //
298 // X(n+1) - X(n) = dt * V(n) + 0.5 * F(n) * dt^2
299 MdScheme::VelocityVerlet => {
300 // D = dt * V
301 self.0.vecscale(dt);
302 // D += 0.5 * dt^2 * F
303 self.0.vecadd(force, 0.5 * dt.powi(2));
304 }
305 // Semi-implicit Euler (SE)
306 //
307 // D = dt * V
308 MdScheme::SemiImplicitEuler => {
309 self.0.vecscale(dt);
310 }
311 }
312 }
313
314 /// Scale the displacement vector if its norm exceeds a given cutoff.
315 pub fn rescale(&mut self, max_disp: f64) {
316 // get the max norm of displacement vector for atoms
317 let norm = self.0.vec2norm();
318
319 // scale the displacement vectors if too large
320 if norm > max_disp {
321 let s = max_disp / norm;
322 self.0.vecscale(s);
323 }
324 }
325}
326
327// Deref coercion: use Displacement as a normal vec
328use std::ops::Deref;
329impl Deref for Displacement {
330 type Target = Vec<f64>;
331
332 fn deref(&self) -> &Vec<f64> {
333 &self.0
334 }
335}
336// displacement:2 ends here
337
338// [[file:../fire.note::*velocity][velocity:2]]
339/// Represents the velocity vector
340///
341/// # Example
342///
343/// ```ignore
344/// let v = Velocity(vec![0.1, 0.2, 0.3]);
345/// v.reset();
346/// assert_eq!(0.0, v[1]);
347/// ```
348///
349#[derive(Debug, Clone)]
350pub(crate) struct Velocity(Vec<f64>);
351
352impl Deref for Velocity {
353 type Target = Vec<f64>;
354
355 fn deref(&self) -> &Vec<f64> {
356 &self.0
357 }
358}
359
360impl Velocity {
361 /// Reset velocity to zero
362 pub fn reset(&mut self) {
363 for i in 0..self.0.len() {
364 self.0[i] = 0.0;
365 }
366 }
367
368 /// Adjust velocity
369 /// V = (1 - alpha) · V + alpha · F / |F| · |V|
370 ///
371 pub fn adjust(&mut self, force: &[f64], alpha: f64) {
372 let vnorm = self.0.vec2norm();
373 let fnorm = force.vec2norm();
374
375 // V = (1-alpha) · V
376 self.0.vecscale(1.0 - alpha);
377
378 // V += alpha · F / |F| · |V|
379 self.0.vecadd(force, alpha * vnorm / fnorm);
380 }
381
382 /// Update velocity using Velocity Verlet (VV) formulation.
383 /// V(n+1) += dt· F(n)
384 pub fn take_md_step(
385 &mut self,
386 force_this: &[f64], // F(n)
387 force_next: &[f64], // F(n+1)
388 dt: f64, // Δt
389 scheme: MdScheme,
390 ) {
391 match scheme {
392 // Update velocity using Velocity Verlet (VV) formulation.
393 //
394 // V(n+1) = V(n) + 0.5 · dt · [F(n) + F(n+1)]
395 MdScheme::VelocityVerlet => {
396 self.0.vecadd(force_this, 0.5 * dt);
397 self.0.vecadd(force_next, 0.5 * dt);
398 }
399 // V(n+1) = V(n) + dt· F(n+1)
400 MdScheme::SemiImplicitEuler => {
401 self.0.vecadd(force_this, dt);
402 }
403 }
404 }
405}
406// velocity:2 ends here
407
408// [[file:../fire.note::*entry/old][entry/old:1]]
409impl GradientBasedMinimizer for FIRE {
410 /// minimize with user defined termination criteria / monitor
411 fn minimize_alt<E, G>(mut self, x: &mut [f64], mut f: E, mut stopping: Option<G>)
412 where
413 E: FnMut(&[f64], &mut [f64]) -> f64,
414 G: TerminationCriteria,
415 {
416 let mut problem = CachedProblem::new(x, f);
417
418 // Check convergence.
419 // Make sure that the initial variables are not a minimizer.
420 if problem.gradient().vec2norm() <= self.termination.max_gradient_norm {
421 info!("already converged.");
422 return;
423 }
424
425 // FIRE algorithm uses force instead of gradient
426 let mut force = problem.gradient().to_vec();
427 force.vecscale(-1.0);
428 let mut force_prev = force.clone();
429 for i in 1.. {
430 self = self.propagate(&mut force_prev, &mut force, &mut problem);
431 if let Some(ref displ) = self.displacement {
432 let fx = problem.value();
433
434 let progress = Progress {
435 x,
436 fx,
437 step: 1.0,
438 niter: i,
439 gx: &force,
440 gnorm: force.vec2norm(),
441 displacement: displ,
442 ncall: problem.ncalls(),
443 };
444
445 if let Some(ref mut stopping) = stopping {
446 if stopping.met(&progress) {
447 break;
448 }
449 }
450
451 if self.termination.met(&progress) {
452 info!("normal termination!");
453 break;
454 }
455 } else {
456 unreachable!()
457 }
458 }
459 }
460}
461// entry/old:1 ends here
462
463// [[file:../fire.note::*core/iter][core/iter:1]]
464use crate::base::Problem;
465
466impl FIRE {
467 /// Propagate the system for one simulation step using FIRE algorithm.
468 fn propagate_new<U>(mut self, problem: &mut Problem<U>) -> Self {
469 // FIRE algorithm uses force instead of gradient
470 let mut force = problem.gradient().to_vec();
471 force.vecscale(-1.0);
472 let mut force_prev = force.clone();
473
474 // F0. prepare data
475 let n = force.len();
476 let mut velocity = self.velocity.unwrap_or(Velocity(vec![0.0; n]));
477 // caching displacement for memory performance
478 let mut displacement = self.displacement.unwrap_or(Displacement(vec![0.0; n]));
479
480 // MD. calculate displacement vectors based on a typical MD stepping algorithm
481 // update the internal velocity
482 velocity.take_md_step(&force_prev, &force, self.dt, self.scheme);
483 displacement.take_md_step(&force, &velocity, self.dt, self.scheme);
484 displacement.rescale(self.max_step);
485
486 // perform line search
487 let nls = if self.use_line_search {
488 info!("line search for optimal step size using MoreThuente algorithm.");
489 40
490 } else {
491 1
492 };
493 let ls = linesearch::linesearch().with_algorithm("MoreThuente");
494
495 let phi = |trial_step, out: &mut linesearch::Output| {
496 // restore position
497 problem.revert();
498 // update value and gradient at position `x`
499 problem.take_line_step(&displacement, trial_step);
500 out.fx = problem.value();
501 out.gx = displacement.vecdot(&problem.gradient());
502
503 Ok(())
504 };
505 let _ = ls.find(nls, phi);
506
507 // save state
508 force.vecncpy(problem.gradient());
509 force_prev.vecncpy(problem.gradient_prev());
510
511 // F1. calculate power for uphill/downhill check
512 let downhill = force.vecdot(&velocity).is_sign_positive();
513
514 // F2. adjust velocity
515 velocity.adjust(&force, self.alpha);
516
517 // F3 & F4: check the direction: go downhill or go uphill
518 if downhill {
519 // F3. when go downhill
520 // increase time step if we have go downhill for enough times
521 if self.nsteps > self.n_min {
522 self.dt = self.dt_max.min(self.dt * self.f_inc);
523 self.alpha *= self.f_alpha;
524 }
525 // increment step counter
526 self.nsteps += 1;
527 } else {
528 // F4. when go uphill
529 self.dt = self.dt_min.max(self.dt * self.f_dec);
530 // reset alpha
531 self.alpha = self.alpha_start;
532 // reset step counter
533 self.nsteps = 0;
534 // reset velocity to zero
535 velocity.reset();
536 }
537
538 self.velocity = Some(velocity);
539 self.displacement = Some(displacement);
540 self
541 }
542}
543// core/iter:1 ends here
544
545// [[file:../fire.note::*entry/iter][entry/iter:1]]
546use crate::base::{EvaluateFunction, Input, Output};
547
548/// Iterator over optimization iterations.
549pub struct FireIter<'a, U> {
550 fire: Option<FIRE>,
551 problem: Option<Problem<'a, U>>,
552}
553
554impl<'a, U> FireIter<'a, U> {
555 /// minimize with user defined termination criteria / monitor
556 fn next_iter(&mut self) {
557 let mut problem = self.problem.take().expect("fire problem");
558 let mut fire = self.fire.take().expect("fire fire");
559
560 self.fire = fire.propagate_new(&mut problem).into();
561 self.problem = problem.into();
562 }
563}
564
565impl FIRE {
566 /// Iterate over minimization iterations in FIRE algorithm.
567 pub fn minimize_iter<'a, E: 'a, U>(mut self, x0: Vec<f64>, f: E) -> FireIter<'a, U>
568 where
569 E: EvaluateFunction<U>,
570 {
571 FireIter {
572 fire: self.into(),
573 problem: Problem::new(x0, f).into(),
574 }
575 }
576}
577
578/// Get an iterator over the optimization progress. The `Progress` contains user
579/// data, which is the return value of user function for evaluation of function
580/// value and gradient.
581impl<'a, U> Iterator for FireIter<'a, U> {
582 type Item = crate::base::Progress<U>;
583
584 fn next(&mut self) -> Option<Self::Item> {
585 // FIXME: is it really necessary?
586 // Check convergence.
587 // Make sure that the initial variables are not a minimizer.
588 let gnorm = self.problem.as_mut().unwrap().gradient().vec2norm();
589 if gnorm <= self.fire.as_ref().unwrap().termination.max_gradient_norm {
590 info!("already converged.");
591 return None;
592 }
593 self.next_iter();
594
595 let problem = self.problem.as_mut().unwrap();
596 let fx = problem.value();
597 let ncalls = problem.ncalls();
598 let data = problem.user_data.take().expect("user data");
599 let progress = crate::base::Progress {
600 fx,
601 gnorm,
602 ncalls,
603 extra: data,
604 };
605
606 Some(progress)
607 }
608}
609// entry/iter:1 ends here