Skip to main content

samyama_optimization/algorithms/
gotlbo.rs

1use crate::common::{Individual, OptimizationResult, Problem, SolverConfig};
2use ndarray::Array1;
3use rand::prelude::*;
4use rayon::prelude::*;
5
6pub struct GOTLBOSolver {
7    pub config: SolverConfig,
8}
9
10impl GOTLBOSolver {
11    pub fn new(config: SolverConfig) -> Self {
12        Self { config }
13    }
14
15    pub fn solve<P: Problem>(&self, problem: &P) -> OptimizationResult {
16        let mut rng = thread_rng();
17        let dim = problem.dim();
18        let (lower, upper) = problem.bounds();
19
20        // Initialize population
21        let mut population: Vec<Individual> = (0..self.config.population_size)
22            .map(|_| {
23                let mut vars = Array1::zeros(dim);
24                for i in 0..dim {
25                    vars[i] = rng.gen_range(lower[i]..upper[i]);
26                }
27                let fitness = problem.fitness(&vars);
28                Individual::new(vars, fitness)
29            })
30            .collect();
31
32        let mut history = Vec::with_capacity(self.config.max_iterations);
33
34        for iter in 0..self.config.max_iterations {
35            if iter % 10 == 0 {
36                // Optional logging
37            }
38            let best_idx = self.find_best(&population);
39            let teacher_vars = population[best_idx].variables.clone();
40            let best_fitness = population[best_idx].fitness;
41            let mean_vars = self.calculate_mean(&population, dim);
42
43            history.push(best_fitness);
44
45            // 1. Teacher Phase with Opposition
46            population = population
47                .into_par_iter()
48                .map(|mut ind| {
49                    let mut local_rng = thread_rng();
50                    let tf: f64 = local_rng.gen_range(1..3) as f64; // Teaching Factor (1 or 2)
51                    let mut new_vars = Array1::zeros(dim);
52
53                    // Teacher update
54                    for j in 0..dim {
55                        let r: f64 = local_rng.gen();
56                        let delta = r * (teacher_vars[j] - tf * mean_vars[j]);
57                        new_vars[j] = (ind.variables[j] + delta).clamp(lower[j], upper[j]);
58                    }
59
60                    let new_fitness = problem.fitness(&new_vars);
61                    
62                    // Accept if better
63                    if new_fitness < ind.fitness {
64                        ind.variables = new_vars;
65                        ind.fitness = new_fitness;
66                    }
67
68                    // Opposition-Based Learning (OBL)
69                    let mut opp_vars = Array1::zeros(dim);
70                    for j in 0..dim {
71                        // O = a + b - X
72                        opp_vars[j] = (lower[j] + upper[j] - ind.variables[j]).clamp(lower[j], upper[j]);
73                    }
74                    let opp_fitness = problem.fitness(&opp_vars);
75
76                    if opp_fitness < ind.fitness {
77                        ind.variables = opp_vars;
78                        ind.fitness = opp_fitness;
79                    }
80
81                    ind
82                })
83                .collect();
84
85            // 2. Learner Phase with Opposition
86            let pop_len = population.len();
87            for i in 0..pop_len {
88                let mut learner_j_idx;
89                loop {
90                    learner_j_idx = rng.gen_range(0..pop_len);
91                    if learner_j_idx != i { break; }
92                }
93
94                let ind_i = &population[i];
95                let ind_j = &population[learner_j_idx];
96                
97                let mut new_vars = Array1::zeros(dim);
98                for k in 0..dim {
99                    let r: f64 = rng.gen();
100                    let delta = if ind_i.fitness < ind_j.fitness {
101                        r * (&ind_i.variables[k] - &ind_j.variables[k])
102                    } else {
103                        r * (&ind_j.variables[k] - &ind_i.variables[k])
104                    };
105                    new_vars[k] = (ind_i.variables[k] + delta).clamp(lower[k], upper[k]);
106                }
107
108                let new_fitness = problem.fitness(&new_vars);
109                if new_fitness < population[i].fitness {
110                    population[i].variables = new_vars;
111                    population[i].fitness = new_fitness;
112                }
113
114                // Opposition-Based Learning (OBL)
115                let ind_i_curr = &population[i];
116                let mut opp_vars = Array1::zeros(dim);
117                for k in 0..dim {
118                    opp_vars[k] = (lower[k] + upper[k] - ind_i_curr.variables[k]).clamp(lower[k], upper[k]);
119                }
120                let opp_fitness = problem.fitness(&opp_vars);
121
122                if opp_fitness < population[i].fitness {
123                    population[i].variables = opp_vars;
124                    population[i].fitness = opp_fitness;
125                }
126            }
127        }
128
129        let final_best_idx = self.find_best(&population);
130        let final_best = &population[final_best_idx];
131
132        OptimizationResult {
133            best_variables: final_best.variables.clone(),
134            best_fitness: final_best.fitness,
135            history,
136        }
137    }
138
139    fn find_best(&self, population: &[Individual]) -> usize {
140        let mut best_idx = 0;
141        for (i, ind) in population.iter().enumerate() {
142            if ind.fitness < population[best_idx].fitness {
143                best_idx = i;
144            }
145        }
146        best_idx
147    }
148
149    fn calculate_mean(&self, population: &[Individual], dim: usize) -> Array1<f64> {
150        let mut mean = Array1::zeros(dim);
151        for ind in population {
152            mean += &ind.variables;
153        }
154        mean / (population.len() as f64)
155    }
156}