1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
//! This crate implements the multi-dimensional DIRECT function optimization algorithm.
//! The reference followed for this implementation was the original paper [1].
//!
//! [1]: Lipschitzian Optimization Without the Lipschitz Constant, DOI 10.1007/BF00941892

extern crate ordered_float;

struct NewSubdivision {
    idx: u32,
    left_coordinate: ordered_float::NotNaN<f64>,
    right_coordinate: ordered_float::NotNaN<f64>,
}

impl NewSubdivision {
    fn new(idx: u32) -> Self {
        NewSubdivision {
            idx: idx,
            left_coordinate: ordered_float::NotNaN::new(0.0).unwrap(),
            right_coordinate: ordered_float::NotNaN::new(0.0).unwrap(),
        }
    }
}

fn width_from_division_count(dc: u32) -> f64 {
    3f64.powi(-(dc as i32))
}

fn bucket_from_divisions(division_counts: &[u32]) -> usize {
    let mut upgraded = false;
    let mut highest = division_counts[0];
    let mut highest_count = 1;
    for &c in &division_counts[1..] {
        if c == highest {
            highest_count += 1;
        } else if highest > 0 && c == highest - 1 {
            // Nothing to do, valid case.
        } else if c == highest + 1 {
            assert!(!upgraded,
                    "have already upgraded once, invalid counts: {:?}",
                    division_counts);
            upgraded = true;
            highest = c;
            highest_count = 1;
        } else {
            panic!("invalid division counts: {:?}", division_counts);
        }
    }
    // This can be intermittently negative, thus all the casts.
    let res = (highest as i32 - 1) * division_counts.len() as i32 + highest_count as i32;
    assert!(res >= 0, "invalid result {}", res);
    res as usize
}

#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
struct Interval {
    /// The value of the function evaluated at `center`.
    /// We require our callers to provide NotNaN as their function outputs.
    /// This has to be the first field in the struct so automatic Ord derivation does the right
    /// thing.
    value: ordered_float::NotNaN<f64>,
    /// The center coordinate of the interval.
    /// We assert on non-NaN here, as this is guaranteed by our implementation.
    center: Vec<ordered_float::NotNaN<f64>>,
    /// The number of times this interval has already been subdivided, for each dimension. If this
    /// is 0 in any dimension, then there has not been a subdivision yet. The interval has the
    /// normalized length 1 / (3^division_count[i]) in each dimension.
    /// The algorithm guarantees that there are at most two different numbers in this vector,
    /// the number of subdivisions can be the same in each dimension, or it can differ by one for a
    /// few of them.
    division_counts: Vec<u32>,
}

impl Interval {
    fn new(value: ordered_float::NotNaN<f64>,
           center: Vec<ordered_float::NotNaN<f64>>,
           division_counts: Vec<u32>)
           -> Self {
        assert_eq!(center.len(), division_counts.len());
        assert!(!center.is_empty());
        Interval {
            value: value,
            center: center,
            division_counts: division_counts,
        }
    }

    /// Returns the normalized diagonal length through the hyper-rectangle.
    fn diagonal(&self) -> f64 {
        // Implementation of \sqrt(\sum{i} (3^{-i})^2).
        // TODO: this should very likely be computed once on interval creation.
        self.division_counts
            .iter()
            .fold(0.0, |acc, &c| {
                let w = width_from_division_count(c);
                acc + w * w
            })
            .sqrt()
    }

    /// Returns the bucket index of this interval.
    /// The bucket index is the index of this interval in the list of buckets. It is a proxy if the
    /// diagonal length of the hyper-rectangle: because of the constraints of division_counts, the
    /// number of different diagonal lengths is limited, and it is possible to compute an ordering
    /// between different intervals purely based on division counts. This allows us to assign each
    /// hyper-rectangle to a bucket instead of having to fuzzily compare computed diagonal lengths,
    /// which speeds up the convex hull computation a bit.
    fn bucket_index(&self) -> usize {
        bucket_from_divisions(&self.division_counts)
    }

    /// Return the indices and new center coordinates of new subdivisions of this hyper-rectangle.
    /// The rectangle is subdivided along each of its longest axes', i.e. the dimensions with the
    /// smallest subdivision counts.
    fn subdivide(&self) -> Vec<NewSubdivision> {
        let mut res = Vec::with_capacity(self.center.len());
        let mut lowest_division_count = self.division_counts[0];
        res.push(NewSubdivision::new(0));
        for (idx, &count) in self.division_counts[1..].iter().enumerate() {
            // We are much less thourough here in checking assertions than in the bucket index
            // computation. Checking assertions once should be good enough.
            if count < lowest_division_count {
                lowest_division_count = count;
                res.clear();
            }
            if count == lowest_division_count {
                res.push(NewSubdivision::new(idx as u32 + 1));
            }
        }
        // Now we know which dimensions are interesting, compute the coordinates.
        let w = width_from_division_count(lowest_division_count);
        for division in res.iter_mut() {
            let x = *self.center[division.idx as usize];
            // We cane safely call unwrap here as we know all inputs are NotNaN and the operations
            // involved cannot produce any new ones.
            division.left_coordinate = ordered_float::NotNaN::new(x - w / 3.0).unwrap();
            division.right_coordinate = ordered_float::NotNaN::new(x + w / 3.0).unwrap();
        }
        res
    }

    /// Return a new center coordinate with the value exchanged with the supplied value at the
    /// given index.
    fn changed_center(&self,
                      idx: usize,
                      v: ordered_float::NotNaN<f64>)
                      -> Vec<ordered_float::NotNaN<f64>> {
        let mut res = self.center.clone();
        res[idx] = v;
        res
    }
}

/// This value for epsilon should be good enough for many use cases. This is the value that was
/// used in the benchmarks of the original paper.
pub const DEFAULT_EPSILON: f64 = 0.0001;

/// This structure represents the current state of an optimization problem.
pub struct State<F> {
    lower_bounds: Vec<f64>,
    upper_bounds: Vec<f64>,
    /// The function to be minimized.
    f: F,
    /// This value is used to balance exploration vs. refinement of already found minima. The
    /// higher this value, the more exploration (i.e. looking at unexplored terrain) is done.
    epsilon: f64,
    /// This vectors contains vectors of intervals. The index `i` of the top-level vector is the
    /// bucket index of the Intervals that are contained in the subvector (the bucket index itself
    /// is a proxy for the length of the diagonal of the hyper-rectangle). Smaller indices stand
    /// for larger diagonals.
    buckets: Vec<Vec<Interval>>,
    /// The number of iterations over the currently best intervals. The first evaluation at the
    /// center counts as one iteration, so this is always >= 1.
    iteration_count: usize,
    /// The current smallest function evaluation result. Corresponds to the best value in all
    /// stored intervals.
    f_min: ordered_float::NotNaN<f64>,
    /// The argument that produced f_min, i.e., the coordinates that were used to evaluate the
    /// function at that point.
    arg_min: Vec<f64>,
}

impl<F: Fn(&[f64]) -> f64> State<F> {
    /// Create a new optimization state. The function `f` will be *minimized*.
    /// Each entry in the `lower_bounds` vector has to be smaller than the corresponding entry in
    /// the `upper_bounds` vector. `epsilon` has to be > 0.
    /// The optimizer will panic if the function ever returns NaN.
    /// As part of the initialization, `f` will be evaluated once.
    pub fn new(lower_bounds: Vec<f64>, upper_bounds: Vec<f64>, f: F, epsilon: f64) -> Self {
        assert_eq!(lower_bounds.len(), upper_bounds.len());
        for (i, (&l, &u)) in lower_bounds.iter().zip(upper_bounds.iter()).enumerate() {
            assert!(l < u,
                    "invalid bounds at index {}: lower {}, upper {}",
                    i,
                    l,
                    u);
        }
        assert!(epsilon > 0.0);
        let n = lower_bounds.len();

        let mut s = State {
            lower_bounds: lower_bounds,
            upper_bounds: upper_bounds,
            f: f,
            epsilon: epsilon,
            iteration_count: 1,

            // These fields will be overwritten again, fill in empty values.
            buckets: vec![],
            f_min: ordered_float::NotNaN::new(0.0).unwrap(),
            arg_min: vec![],
        };

        let center = vec![ordered_float::NotNaN::new(0.5).unwrap(); n];
        let arg_min = s.denormalize(&center);
        let v = s.eval(&center);
        let first_interval = Interval::new(v, center, vec![0; n]);
        s.buckets = vec![vec![first_interval]];
        s.f_min = v;
        s.arg_min = arg_min;
        s
    }

    /// The number of iterations that were done. Initialization counts as one iteration.
    pub fn iteration_count(&self) -> usize {
        self.iteration_count
    }

    /// The number of times the function to be optimized has been evaluated.
    pub fn evaluation_count(&self) -> usize {
        self.buckets.iter().fold(0, |acc, sub_buckets| acc + sub_buckets.len())
    }

    /// The current best, i.e. smallest, value that was found.
    pub fn f_min(&self) -> ordered_float::NotNaN<f64> {
        self.f_min
    }

    /// The argument that produced f_min(), i.e. the location where the currently best value was
    /// found.
    pub fn arg_min(&self) -> Vec<f64> {
        self.arg_min.clone()
    }

    fn denormalize(&self, norm_x: &[ordered_float::NotNaN<f64>]) -> Vec<f64> {
        assert_eq!(norm_x.len(), self.lower_bounds.len());
        norm_x.iter()
            .zip(self.lower_bounds.iter().zip(self.upper_bounds.iter()))
            .map(|(&x, (&l, &u))| l + x.into_inner() * (u - l))
            .collect()
    }

    /// Evaluate self.f at the normalized coordinate norm_x. This is done by first converting the
    /// normalized coordinate back into the original coordinate system, and then evaluating.
    fn eval(&self, norm_x: &[ordered_float::NotNaN<f64>]) -> ordered_float::NotNaN<f64> {
        let x = self.denormalize(norm_x);
        // We are allowed to unwrap here, as we require the function to never return NaN for this
        // algorithm to work. This is documented.
        ordered_float::NotNaN::new((self.f)(&x)).unwrap()
    }

    /// Run one iteration of the optimization.
    pub fn iterate_once(&mut self) {
        let interval_indices = self.select_intervals();
        // Don't yet insert the intervals into the buckets vector directly, we first have to
        // evaluate all of them as otherwise the bucket indices might get screwed up.
        let mut new_intervals = Vec::new();

        for i in interval_indices {
            let mut interval = self.buckets[i].remove(0);

            // Subdivide the current interval along the longest axes, and evaluate the new sample
            // points.
            let subdivisions = interval.subdivide();
            let mut center_pairs = Vec::with_capacity(subdivisions.len());
            let mut value_pairs = Vec::with_capacity(subdivisions.len());
            for division in &subdivisions {
                let left_center =
                    interval.changed_center(division.idx as usize, division.left_coordinate);
                let right_center =
                    interval.changed_center(division.idx as usize, division.right_coordinate);
                let left_val = self.eval(&left_center);
                let right_val = self.eval(&right_center);

                // Update our best answer while we're at it.
                if left_val < self.f_min {
                    self.f_min = left_val;
                    self.arg_min = self.denormalize(&left_center);
                }
                if right_val < self.f_min {
                    self.f_min = right_val;
                    self.arg_min = self.denormalize(&right_center);
                }

                center_pairs.push((left_center, right_center));
                value_pairs.push((left_val, right_val));
            }

            // Now we have new values and new centers. We have to figure out in which order to
            // allocate the new hyperrectangles. Heuristic from the paper: choose the dimension
            // with the "best", i.e. minimal, sampled value to be in the largest new
            // hyperrectangle, as that increases attractiveness of further subdivisions of that
            // rectangle, leading to faster convergence.
            let mut best_values: Vec<_> = value_pairs.iter()
                .enumerate()
                .map(|(idx, &(left, right))| {
                    // Unwrapping here is safe, as both inputs are NotNaN, taking the min doesn't
                    // change that.
                    (ordered_float::NotNaN::new(left.min(right.into_inner())).unwrap(), idx)
                })
                .collect();
            // Minimal values are at the beginning now.
            best_values.sort();

            // And now we can create the new intervals in the correct order, with the correct
            // subdivisions.
            for (_, idx) in best_values {
                interval.division_counts[subdivisions[idx].idx as usize] += 1;
                // These clone() calls make me sad :(
                let left = Interval::new(value_pairs[idx].0.clone(),
                                         center_pairs[idx].0.clone(),
                                         interval.division_counts.clone());
                let right = Interval::new(value_pairs[idx].1.clone(),
                                          center_pairs[idx].1.clone(),
                                          interval.division_counts.clone());
                new_intervals.push(left);
                new_intervals.push(right);
            }
            new_intervals.push(interval);
        }

        for interval in new_intervals.drain(..) {
            self.insert_interval(interval);
        }

        self.iteration_count += 1;
    }

    fn select_intervals(&self) -> Vec<usize> {
        let mut res = Vec::new();
        // We have to iterate in reverse, the highest bucket has the smallest width.
        for (i, bucket) in self.buckets.iter().enumerate().rev() {
            if bucket.is_empty() {
                continue;
            }
            if res.is_empty() {
                res.push(i);
                continue;
            }

            let this = &bucket[0];

            // Reject all previous points which have a worse value.
            while !res.is_empty() {
                let prev_i = res[res.len() - 1];
                let prev = &self.buckets[prev_i][0];
                if prev.value >= this.value {
                    res.pop();
                } else {
                    break;
                }
            }

            // Reject all previous points which are covered by this point and the one before.
            while res.len() >= 2 {
                let prev_i = res[res.len() - 1];
                let prev = &self.buckets[prev_i][0];

                let prev2_i = res[res.len() - 2];
                let prev2 = &self.buckets[prev2_i][0];

                assert!(this.value > prev.value);
                assert!(this.value > prev2.value);

                let slope = (this.value.into_inner() - prev2.value.into_inner()) /
                            ((this.diagonal() - prev2.diagonal()) * 2.0);
                let comparison = prev2.value.into_inner() +
                                 (prev.diagonal() - prev2.diagonal()) / 2.0 * slope;
                if comparison < prev.value.into_inner() {
                    res.pop();
                } else {
                    break;
                }
            }

            // And push the new interval.
            res.push(i);
        }

        // Finally cull the intervals that were found by enforcing that at least an epsilon
        // improvement compared to the current f_min is possible.
        // The right-most interval will always be kept in consideration, as theoretically an
        // arbitrary improvement can be made (not constrained in slope by an interval that's more
        // to the right).
        while res.len() >= 2 {
            // Look at the left-most point, it's most heavily constrained by both slope and
            // possible distance.
            let i1 = &self.buckets[res[0]][0];
            let i2 = &self.buckets[res[1]][0];
            let k = (i2.value.into_inner() - i1.value.into_inner()) /
                    ((i2.diagonal() - i1.diagonal()) / 2.0);
            let potential_f_min = i1.value.into_inner() - k * i2.diagonal() / 2.0;
            assert!(potential_f_min <= *self.f_min,
                    "potential: {}, f_min: {}",
                    potential_f_min,
                    *self.f_min);
            if (*self.f_min - potential_f_min) / self.f_min.abs() < self.epsilon {
                // Not good enough, cull it.
                res.remove(0);
            } else {
                // The points to the right are even better than this one, bail out.
                break;
            }
        }

        assert!(res.len() > 0,
                "there should be at least one interval to look at");
        res
    }

    fn insert_interval(&mut self, interval: Interval) {
        // We can do this without checking, as this will be a no-op if the left part of the range
        // is >= the right part
        let idx = interval.bucket_index();
        for _ in self.buckets.len()..idx as usize + 1 {
            self.buckets.push(Vec::new());
        }
        let bucket = &mut self.buckets[idx as usize];
        let i = match bucket.binary_search(&interval) {
            Ok(_) => {
                // This shouldn't happen, at least the center point always has to be different.
                panic!("duplicate interval {:?}", interval);
            }
            Err(i) => i,
        };
        bucket.insert(i, interval);
    }
}

#[cfg(test)]
mod test {
    extern crate float_cmp;
    extern crate ordered_float;

    use self::float_cmp::ApproxEqUlps;
    use std::f64::consts;

    use super::{bucket_from_divisions, DEFAULT_EPSILON, State};

    #[test]
    fn test_bucket_computations() {
        // First some one-dimensional sanity checks.
        assert_eq!(0, bucket_from_divisions(&[0]));
        assert_eq!(1, bucket_from_divisions(&[1]));
        assert_eq!(2, bucket_from_divisions(&[2]));

        // Let's try two dimensions.
        assert_eq!(0, bucket_from_divisions(&[0, 0]));
        assert_eq!(1, bucket_from_divisions(&[1, 0]));
        assert_eq!(1, bucket_from_divisions(&[0, 1]));
        assert_eq!(2, bucket_from_divisions(&[1, 1]));
        assert_eq!(3, bucket_from_divisions(&[2, 1]));
        assert_eq!(3, bucket_from_divisions(&[1, 2]));
        assert_eq!(4, bucket_from_divisions(&[2, 2]));

        // And some for three dimensions.
        assert_eq!(0, bucket_from_divisions(&[0, 0, 0]));
        assert_eq!(1, bucket_from_divisions(&[0, 1, 0]));
        assert_eq!(2, bucket_from_divisions(&[1, 1, 0]));
        assert_eq!(3, bucket_from_divisions(&[1, 1, 1]));
        assert_eq!(4, bucket_from_divisions(&[2, 1, 1]));
    }

    fn assert_vec_approx_eq_ulps<T: Into<f64> + Copy>(v1: &[T], v2: &[f64], ulps: i64) {
        assert_eq!(v1.len(), v2.len());
        for (i, (x1, x2)) in v1.iter().zip(v2).enumerate() {
            let x: f64 = (*x1).into();
            assert!(x.approx_eq_ulps(x2, ulps),
                    "idx: {}, left: {}, right {}",
                    i,
                    x,
                    x2);
        }
    }

    #[test]
    fn test_one_iteration() {
        fn slanted_sum(v: &[f64]) -> f64 {
            assert_eq!(v.len(), 2);
            2.0 * v[0] + v[1]
        }

        let mut state = State::new(vec![0.0; 2], vec![1.0; 2], slanted_sum, 1.0);
        assert_eq!(1, state.buckets.len());

        state.iterate_once();
        assert_eq!(5, state.evaluation_count());
        assert_eq!(3, state.buckets.len());
        assert_eq!(0, state.buckets[0].len());
        assert_eq!(2, state.buckets[1].len());
        assert_eq!(3, state.buckets[2].len());

        let bs = &state.buckets[1];
        assert_eq!(&[1, 0], &bs[0].division_counts[..]);
        assert_vec_approx_eq_ulps(&bs[0].center, &[1.0 / 6.0, 0.5], 1);
        assert_eq!(&[1, 0], &bs[1].division_counts[..]);
        assert_vec_approx_eq_ulps(&bs[1].center, &[5.0 / 6.0, 0.5], 1);

        let bs = &state.buckets[2];
        assert_eq!(&[1, 1], &bs[0].division_counts[..]);
        assert_vec_approx_eq_ulps(&bs[0].center, &[0.5, 1.0 / 6.0], 1);
        assert_eq!(&[1, 1], &bs[1].division_counts[..]);
        assert_vec_approx_eq_ulps(&bs[1].center, &[0.5, 0.5], 1);
        assert_eq!(&[1, 1], &bs[2].division_counts[..]);
        assert_vec_approx_eq_ulps(&bs[2].center, &[0.5, 5.0 / 6.0], 1);
    }

    #[test]
    fn test_simple_normal_distribution() {
        const MU: &'static [f64] = &[-1.5, 0.25, 1.0];

        fn my_normal(v: &[f64]) -> f64 {
            assert_eq!(MU.len(), v.len());
            -v.iter()
                .zip(MU)
                .fold(0.0, |acc, (x1, x2)| {
                    let dx = x1 - x2;
                    acc - (dx * dx)
                })
                .exp()
        }

        // Sanity check.
        assert_eq!(my_normal(MU), -1.0);

        let mut state = State::new(vec![-10.0; 3], vec![10.0; 3], my_normal, DEFAULT_EPSILON);
        assert_eq!(1, state.iteration_count());
        assert_eq!(1, state.evaluation_count());

        let mut prev_evaluations = 1;
        for i in 2..100 {
            state.iterate_once();
            assert_eq!(i, state.iteration_count());
            assert!(state.evaluation_count() > prev_evaluations);
            prev_evaluations = state.evaluation_count();
        }

        assert!(state.f_min().approx_eq_ulps(&-1.0, 1),
                "f_min: {}",
                state.f_min());
        // Less accurate here, the function gets really flat at the top, not enough accuracy.
        assert_vec_approx_eq_ulps(&state.arg_min(), MU, 100000000);
    }

    #[test]
    fn test_boundary() {
        // This will be maximal in a corner.
        fn slanted_sum(v: &[f64]) -> f64 {
            assert_eq!(v.len(), 2);
            2.0 * v[0] + v[1]
        }

        let mut state = State::new(vec![-1.0; 2], vec![1.0; 2], slanted_sum, 1.0);
        assert_eq!(1, state.iteration_count());
        assert_eq!(1, state.evaluation_count());

        let mut prev_evaluations = 1;
        for i in 2..200 {
            state.iterate_once();
            assert_eq!(i, state.iteration_count());
            assert!(state.evaluation_count() > prev_evaluations);
            prev_evaluations = state.evaluation_count();
        }

        assert!(state.f_min().approx_eq_ulps(&-3.0, 1),
                "f_min: {}",
                state.f_min());
        assert_vec_approx_eq_ulps(&state.arg_min(), &[-1.0, -1.0], 10);
    }

    #[test]
    fn test_constant() {
        const C: f64 = 1.0;

        fn constant(_: &[f64]) -> f64 {
            C
        }

        let mut state = State::new(vec![-1.0; 2], vec![1.0; 2], constant, DEFAULT_EPSILON);
        assert_eq!(1, state.iteration_count());
        assert_eq!(1, state.evaluation_count());

        let mut prev_evaluations = 1;
        for i in 2..50 {
            state.iterate_once();
            assert_eq!(i, state.iteration_count());
            assert!(state.evaluation_count() > prev_evaluations);
            prev_evaluations = state.evaluation_count();
        }

        assert_eq!(C, *state.f_min());
    }

    #[test]
    fn test_branin() {
        // Branin's function. Apparently often-used benchmark for optimizations.
        fn branin(v: &[f64]) -> f64 {
            assert_eq!(2, v.len());
            let x1 = v[0];
            let x2 = v[1];
            let b = 5.1 / (4.0 * consts::PI * consts::PI);
            let c = 5.0 / consts::PI;
            let r = 6.0;
            let s = 10.0;
            let t = 1.0 / (8.0 * consts::PI);
            (x2 - b * x1 * b + c * x1 - r).powi(2) + s * (1.0 - t) * x1.cos() + s
        }

        let mut state = State::new(vec![-5.0, 10.0], vec![0.0, 15.0], branin, DEFAULT_EPSILON);
        // The paper claims to have converged with 45 iterations.
        for _ in 0..45 {
            state.iterate_once();
        }

        // Super inaccurate, but the authoritative data I have is very innaccurate as well.
        let diff = (*state.f_min() - 0.397887).abs();
        assert!(diff < 0.000001, "f_min: {}", state.f_min());
    }
}