Skip to main content

graphmind_optimization/algorithms/
sa.rs

1use crate::common::{OptimizationResult, Problem, SolverConfig};
2use ndarray::Array1;
3use rand::prelude::*;
4
5pub struct SASolver {
6    pub config: SolverConfig,
7    pub initial_temp: f64,
8    pub cooling_rate: f64,
9}
10
11impl SASolver {
12    pub fn new(config: SolverConfig) -> Self {
13        Self {
14            config,
15            initial_temp: 1000.0,
16            cooling_rate: 0.95,
17        }
18    }
19
20    pub fn solve<P: Problem>(&self, problem: &P) -> OptimizationResult {
21        let mut rng = thread_rng();
22        let dim = problem.dim();
23        let (lower, upper) = problem.bounds();
24
25        // 1. Initial Solution
26        let mut current_vars = Array1::zeros(dim);
27        for i in 0..dim {
28            current_vars[i] = rng.gen_range(lower[i]..upper[i]);
29        }
30        let mut current_fitness = problem.fitness(&current_vars);
31
32        let mut best_vars = current_vars.clone();
33        let mut best_fitness = current_fitness;
34
35        let mut history = Vec::with_capacity(self.config.max_iterations);
36        let mut temp = self.initial_temp;
37
38        // Since SA is typically a single-point search, we'll interpret 'population_size'
39        // as number of trials per temperature step if we want to utilize the config.
40        // Or we just run for max_iterations total.
41        // To be consistent with other solvers, let's treat population_size * max_iterations
42        // as the total number of evaluations budget.
43
44        let total_steps = self.config.max_iterations;
45
46        for _step in 0..total_steps {
47            history.push(best_fitness);
48
49            // Generate neighbor
50            let mut next_vars = current_vars.clone();
51            for i in 0..dim {
52                // Perturb slightly (Gaussian or random walk)
53                let range = upper[i] - lower[i];
54                let delta = (rng.gen::<f64>() - 0.5) * range * 0.1;
55                next_vars[i] = (next_vars[i] + delta).clamp(lower[i], upper[i]);
56            }
57
58            let next_fitness = problem.fitness(&next_vars);
59
60            // Acceptance probability
61            let delta_e = next_fitness - current_fitness;
62
63            if delta_e < 0.0 || rng.gen::<f64>() < (-delta_e / temp).exp() {
64                current_vars = next_vars;
65                current_fitness = next_fitness;
66
67                if current_fitness < best_fitness {
68                    best_vars = current_vars.clone();
69                    best_fitness = current_fitness;
70                }
71            }
72
73            // Cool down
74            temp *= self.cooling_rate;
75        }
76
77        OptimizationResult {
78            best_variables: best_vars,
79            best_fitness,
80            history,
81        }
82    }
83}