use super::super::utilities::{
parameters::{OperationalLearning, Parameters, TemperatureSchedule},
Solution,
};
use crate::utilities::randomness::{multinomial_draw, random_unit_draw};
#[derive(Clone, Debug)]
pub struct Agent<S: Solution> {
id: usize,
iteration_number: u64,
last_operation: u64,
temperature: f64,
current_solution_quality: f64,
current_solution: S,
best_quality_so_far: f64,
best_solution_so_far: S,
parameters: Parameters,
}
pub trait AgentMethods<S: Solution>: Send {
fn new(id: usize, parameters: Parameters) -> Self;
fn iterate(&mut self);
fn get_best_solution_so_far(&mut self) -> S;
fn get_current_solution(&mut self) -> S;
fn communicate(&mut self, solutions: Vec<S>);
}
impl<S: Solution> AgentMethods<S> for Agent<S> {
fn new(id: usize, parameters: Parameters) -> Self {
let solution = S::new();
Agent {
id,
iteration_number: 1,
last_operation: 0,
temperature: 0.0,
current_solution_quality: solution.get_quality_scalar(),
best_quality_so_far: solution.get_quality_scalar(),
best_solution_so_far: solution.clone(),
current_solution: solution.clone(),
parameters,
}
}
fn iterate(&mut self) {
self.update_temperature();
let candidate = self.generate_candidate_solution();
if candidate > self.current_solution {
self.current_solution = candidate;
} else {
let delta = candidate.clone() - self.current_solution.clone();
let acceptance_probability = (delta / self.temperature).exp();
if random_unit_draw() < acceptance_probability {
self.current_solution = candidate;
}
}
self.update_learning();
if self.current_solution > self.best_solution_so_far {
self.best_solution_so_far = self.current_solution.clone();
self.best_quality_so_far = self.best_solution_so_far.get_quality_scalar();
}
self.iteration_number += 1;
}
fn get_best_solution_so_far(&mut self) -> S {
self.best_solution_so_far.clone()
}
fn get_current_solution(&mut self) -> S {
self.current_solution.clone()
}
fn communicate(&mut self, mut solutions: Vec<S>) {
let mut qualities: Vec<f64> = solutions
.clone()
.into_iter()
.map(|x| x.get_quality_scalar() + self.parameters.quality_bias)
.collect();
qualities[self.id] += self.parameters.self_bias;
let idx = multinomial_draw(qualities);
self.current_solution = solutions.remove(idx);
}
}
impl<S: Solution> Agent<S> {
fn generate_candidate_solution(&mut self) -> S {
let mut candidate = self.current_solution.clone();
candidate.apply_move_operator(0, 1.0);
candidate
}
fn update_learning(&mut self) {
match self.parameters.operational_learning {
OperationalLearning::Multinomial { .. } => {}
OperationalLearning::Markov { .. } => {}
OperationalLearning::HiddenMarkov { .. } => {}
_ => {}
}
}
fn update_temperature(&mut self) {
match self.parameters.temperature_schedule {
TemperatureSchedule::Triki {
initial_temperature,
delta,
} => {
self.temperature = initial_temperature / (self.iteration_number as f64);
}
TemperatureSchedule::Cauchy {
initial_temperature,
} => {
self.temperature = initial_temperature / (self.iteration_number as f64);
}
TemperatureSchedule::Geometric {
initial_temperature,
} => {
self.temperature = initial_temperature / (self.iteration_number as f64);
}
_ => {}
}
}
}