1use std::{
4 cmp::Ordering,
5 time::Instant,
6};
7
8use maria_linalg::{
9 Matrix,
10 Vector,
11};
12
13pub use super::{
14 cli::get_cli,
15 error,
16 help,
17 paradigm::Paradigm,
18};
19
20const DX: f64 = 1E-6;
21
22pub struct Optimizer<const C: usize, const D: usize, const K: usize> {
24 pub print_every: usize,
25 paradigm: Paradigm,
26 obj: fn(Vector<C>, Vector<D>) -> f64,
27 gradient: Option<fn(Vector<C>) -> Vector<C>>,
28 hessian: Option<fn(Vector<C>) -> Matrix<C>>,
29 criterion: f64,
30 maxiter: usize,
31 pub maxtemp: f64,
32 pub stdev: f64,
33}
34
35impl<const C: usize, const D: usize, const K: usize> Optimizer<C, D, K> {
36 pub fn new(
38 obj: fn(Vector<C>, Vector<D>) -> f64,
39 gradient: Option<fn(Vector<C>) -> Vector<C>>,
40 hessian: Option<fn(Vector<C>) -> Matrix<C>>,
41 ) -> Self {
42 let mut print_every = 1;
44 let mut paradigm = Paradigm::SteepestDescent;
45 let mut criterion = 0.001;
46 let mut maxiter = 100;
47 let mut maxtemp = 1.0;
48 let mut stdev = 1.0;
49
50 get_cli(
51 &mut print_every,
52 &mut paradigm,
53 &mut criterion,
54 &mut maxiter,
55 &mut maxtemp,
56 &mut stdev,
57 );
58
59 Self {
60 print_every,
61 paradigm,
62 obj,
63 gradient,
64 hessian,
65 criterion,
66 maxiter,
67 maxtemp,
68 stdev,
69 }
70 }
71
72 pub fn objective(&self, continuous: Vector<C>, discrete: Vector<D>) -> f64 {
76 (self.obj)(continuous, discrete)
77 }
78
79 pub fn gradient(&self, continuous: Vector<C>, discrete: Vector<D>) -> Vector<C> {
83 match self.gradient {
84 Some (g) => g(continuous),
85 None => {
86 let mut gradient = Vector::zero();
87
88 for i in 0..C {
89 let mut high = continuous;
90 let mut low = continuous;
91
92 high[i] += DX / 2.0;
93 low[i] -= DX / 2.0;
94
95 let f_high = self.objective(high, discrete);
96 let f_low = self.objective(low, discrete);
97
98 gradient[i] = (f_high - f_low) / DX;
99 }
100
101 gradient
102 }
103 }
104 }
105
106 pub fn hessian(&self, continuous: Vector<C>, discrete: Vector<D>) -> Matrix<C> {
110 match self.hessian {
111 Some (h) => h(continuous),
112 None => {
113 let mut hessian = Matrix::zero();
114
115 for i in 0..C {
116 let vector = continuous;
117 let mut high = vector;
118 let mut low = vector;
119
120 high[i] += DX / 2.0;
121 low[i] -= DX / 2.0;
122
123 let f_high = self.gradient(high, discrete);
124 let f_low = self.gradient(low, discrete);
125
126 let row = (f_high + f_low.scale(-1.0)).scale(1.0 / DX);
127
128 for j in 0..C {
129 hessian[(i, j)] = row[j];
130 }
131 }
132
133 hessian
134 }
135 }
136 }
137
138 pub fn step(&self, continuous: Vector<C>, discrete: Vector<D>) -> Vector<C> {
140 self.gradient(continuous, discrete).scale(-1.0)
141 }
142
143 pub fn update(&self, iter: usize, continuous: [Vector<C>; K], discrete: [Vector<D>; K], permitted: &[f64]) -> ([Vector<C>; K], [Vector<D>; K]) {
145 self.paradigm.update(
146 self,
147 iter,
148 continuous,
149 discrete,
150 permitted,
151 )
152 }
153
154 pub fn sort(&self, continuous: [Vector<C>; K], discrete: [Vector<D>; K]) -> ([Vector<C>; K], [Vector<D>; K]) {
156 let mut sorted = [(Vector::zero(), Vector::zero()); K];
157
158 for k in 0..K {
159 sorted[k] = (continuous[k], discrete[k]);
160 }
161
162 sorted.sort_by(|one, two| {
163 let a = self.objective(one.0, one.1);
164 let b = self.objective(two.0, two.1);
165
166 match (a.is_nan(), b.is_nan()) {
167 (true, true) => Ordering::Equal,
168 (true, false) => Ordering::Greater,
169 (false, true) => Ordering::Less,
170 (false, false) => a.partial_cmp(&b).unwrap(),
171 }
172 });
173
174 let mut c = [Vector::zero(); K];
175 let mut d = [Vector::zero(); K];
176
177 for k in 0..K {
178 (c[k], d[k]) = sorted[k];
179 }
180
181 (c, d)
182 }
183
184 pub fn get_best(&self, continuous: [Vector<C>; K], discrete: [Vector<D>; K]) -> (Vector<C>, Vector<D>) {
186 let (c, d) = self.sort(continuous, discrete);
187 (c[0], d[0])
188 }
189
190 pub fn optimize(&self, continuous: Vector<C>, discrete: Vector<D>, permitted: &[f64]) -> (Vector<C>, Vector<D>) {
192 let start = Instant::now();
195
196 let mut i = 0;
199
200 let mut criterion = self.gradient(continuous, discrete).norm();
202
203 let mut c = [Vector::zero(); K];
205 let mut d = [Vector::zero(); K];
206 for p in 0..K {
207 c[p] = match self.paradigm {
208 Paradigm::SteepestDescent | Paradigm::Newton => continuous,
209 Paradigm::Genetic | Paradigm::SimulatedAnnealing => Vector::<C>::child(&continuous, &continuous, self.stdev),
210 };
211
212 d[p] = match self.paradigm {
213 Paradigm::SteepestDescent | Paradigm::Newton => discrete,
214 Paradigm::Genetic | Paradigm::SimulatedAnnealing => Vector::<D>::child_discrete(&discrete, &discrete, permitted),
215 };
216 }
217
218 let opt_type = if C == 0 {
219 "DISCRETE"
220 } else if D == 0 {
221 "CONTINUOUS"
222 } else {
223 "MIXED"
224 };
225
226 println!("INITIATING {} OPTIMIZATION", opt_type);
227 println!("Continuous variables: {}", C);
228 println!("Discrete variables: {}", D);
229 println!("Paradigm: {}", self.paradigm);
230 println!("Maximum iterations: {}", self.maxiter);
231 println!();
232
233 while criterion > self.criterion && i < self.maxiter {
234 i += 1;
235 let (best_continuous, best_discrete) = self.get_best(c, d);
236
237 if self.print_every != 0 && i % self.print_every == 0 {
238 println!("Iteration: {}", i);
239 println!("Objective: {:.8}", self.objective(best_continuous, best_discrete));
240 println!("Vector");
241 if C > 0 {
242 println!("Continuous\n{}", best_continuous);
243 }
244 if D > 0 {
245 println!("Discrete\n{}", best_discrete);
246 }
247 println!();
248 println!();
249 }
250
251 (c, d) = self.update(i, c, d, permitted);
252 criterion = self.gradient(best_continuous, best_discrete).norm();
253 }
254
255 let time = start.elapsed().as_micros() as f64 / 1000.0;
256
257 if i == self.maxiter {
258 println!("Maximum iteration limit reached in {:.3} milliseconds", time);
259 } else {
260 println!("Convergence achieved in {:.3} milliseconds", time);
261 }
262
263 let (best_continuous, best_discrete) = self.get_best(c, d);
264 println!("Result\n");
265 if C > 0 {
266 println!("Continuous\n{}", best_continuous);
267 }
268 if D > 0 {
269 println!("Discrete\n{}", best_discrete);
270 }
271 println!("Objective: {:.8}", self.objective(best_continuous, best_discrete));
272
273 (best_continuous, best_discrete)
274 }
275}
276
277#[test]
278fn sort() {
279 let mut sorted = [f64::NAN, 2.0, 1.0, f64::MAX, f64::NAN];
280
281 sorted.sort_by(|a, b| {
282 match (a.is_nan(), b.is_nan()) {
283 (true, true) => Ordering::Equal,
284 (true, false) => Ordering::Greater,
285 (false, true) => Ordering::Less,
286 (false, false) => a.partial_cmp(&b).unwrap(),
287 }
288 });
289
290 dbg!(sorted);
291}