Skip to main content

graphmind_optimization/algorithms/
gwo.rs

1use crate::common::{Individual, OptimizationResult, Problem, SolverConfig};
2use ndarray::Array1;
3use rand::prelude::*;
4
5pub struct GWOSolver {
6    pub config: SolverConfig,
7}
8
9impl GWOSolver {
10    pub fn new(config: SolverConfig) -> Self {
11        Self { config }
12    }
13
14    pub fn solve<P: Problem>(&self, problem: &P) -> OptimizationResult {
15        let mut rng = thread_rng();
16        let dim = problem.dim();
17        let (lower, upper) = problem.bounds();
18
19        // 1. Initialize Population (Wolves)
20        let mut population: Vec<Individual> = (0..self.config.population_size)
21            .map(|_| {
22                let mut vars = Array1::zeros(dim);
23                for i in 0..dim {
24                    vars[i] = rng.gen_range(lower[i]..upper[i]);
25                }
26                let fitness = problem.fitness(&vars);
27                Individual::new(vars, fitness)
28            })
29            .collect();
30
31        // Initialize Alpha, Beta, Delta
32        let mut alpha = population[0].clone();
33        let mut beta = population[0].clone();
34        let mut delta = population[0].clone();
35
36        // Reset fitness to infinity (minimization)
37        alpha.fitness = f64::INFINITY;
38        beta.fitness = f64::INFINITY;
39        delta.fitness = f64::INFINITY;
40
41        let mut history = Vec::with_capacity(self.config.max_iterations);
42
43        for iter in 0..self.config.max_iterations {
44            // Update Alpha, Beta, Delta
45            for ind in &population {
46                if ind.fitness < alpha.fitness {
47                    // Shift down
48                    delta = beta.clone();
49                    beta = alpha.clone();
50                    alpha = ind.clone();
51                } else if ind.fitness < beta.fitness && ind.fitness > alpha.fitness {
52                    delta = beta.clone();
53                    beta = ind.clone();
54                } else if ind.fitness < delta.fitness && ind.fitness > beta.fitness {
55                    delta = ind.clone();
56                }
57            }
58
59            history.push(alpha.fitness);
60
61            let a = 2.0 - 2.0 * (iter as f64 / self.config.max_iterations as f64); // linearly decreases from 2 to 0
62
63            // Update positions of Omegas
64            for wolf in population.iter_mut() {
65                let mut new_vars = Array1::zeros(dim);
66
67                for j in 0..dim {
68                    // Hunting equations
69                    let r1: f64 = rng.gen();
70                    let r2: f64 = rng.gen();
71                    let a1 = 2.0 * a * r1 - a;
72                    let c1 = 2.0 * r2;
73                    let d_alpha = (c1 * alpha.variables[j] - wolf.variables[j]).abs();
74                    let x1 = alpha.variables[j] - a1 * d_alpha;
75
76                    let r1: f64 = rng.gen();
77                    let r2: f64 = rng.gen();
78                    let a2 = 2.0 * a * r1 - a;
79                    let c2 = 2.0 * r2;
80                    let d_beta = (c2 * beta.variables[j] - wolf.variables[j]).abs();
81                    let x2 = beta.variables[j] - a2 * d_beta;
82
83                    let r1: f64 = rng.gen();
84                    let r2: f64 = rng.gen();
85                    let a3 = 2.0 * a * r1 - a;
86                    let c3 = 2.0 * r2;
87                    let d_delta = (c3 * delta.variables[j] - wolf.variables[j]).abs();
88                    let x3 = delta.variables[j] - a3 * d_delta;
89
90                    new_vars[j] = ((x1 + x2 + x3) / 3.0).clamp(lower[j], upper[j]);
91                }
92
93                wolf.variables = new_vars;
94                wolf.fitness = problem.fitness(&wolf.variables);
95            }
96        }
97
98        OptimizationResult {
99            best_variables: alpha.variables,
100            best_fitness: alpha.fitness,
101            history,
102        }
103    }
104}