minimax 0.6.0

Generic implementations of Minimax.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
use super::common::{move_to_front, unclamp_value};
use crate::interface::*;
use std::cmp::{max, min};
use std::sync::atomic::{AtomicU32, AtomicU8, Ordering};
use std::sync::Arc;

// Common transposition table stuff.

#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub(super) enum EntryFlag {
    Exact,
    Upperbound,
    Lowerbound,
}

#[derive(Copy, Clone)]
#[repr(align(16))]
pub(super) struct Entry<M> {
    pub(super) high_hash: u32,
    pub(super) value: Evaluation,
    pub(super) depth: u8,
    pub(super) flag: EntryFlag,
    pub(super) generation: u8,
    pub(super) best_move: Option<M>,
}

#[test]
fn test_entry_size() {
    assert!(std::mem::size_of::<Entry<[u16; 2]>>() <= 16);
    assert!(std::mem::size_of::<ConcurrentEntry<[u8; 6]>>() <= 16);
}

pub(super) fn high_bits(hash: u64) -> u32 {
    (hash >> 32) as u32
}

impl<M> Entry<M> {
    pub(super) fn value_string(&self) -> String {
        match unclamp_value(self.value) {
            WORST_EVAL => "-∞".to_owned(),
            BEST_EVAL => "".to_owned(),
            value => value.to_string(),
        }
    }

    pub(super) fn bounds(&self) -> String {
        match self.flag {
            EntryFlag::Exact => "=",
            EntryFlag::Upperbound => "",
            EntryFlag::Lowerbound => "",
        }
        .to_string()
            + &self.value_string()
    }
}

// A trait for a transposition table. The methods are mutual exclusion, but
// the idea is that an implementation can wrap a shared concurrent table.
pub(super) trait Table<M: Copy> {
    fn lookup(&self, hash: u64) -> Option<Entry<M>>;
    fn store(&mut self, hash: u64, value: Evaluation, depth: u8, flag: EntryFlag, best_move: M);
    fn advance_generation(&mut self);

    // Check and update negamax state based on any transposition table hit.
    // Returns Some(value) on an exact match.
    // Returns None, updating mutable arguments, if Negamax should continue to explore this node.
    fn check(
        &self, hash: u64, depth: u8, good_move: &mut Option<M>, alpha: &mut Evaluation,
        beta: &mut Evaluation,
    ) -> Option<Evaluation> {
        if let Some(entry) = self.lookup(hash) {
            *good_move = entry.best_move;
            if entry.depth >= depth {
                match entry.flag {
                    EntryFlag::Exact => {
                        return Some(entry.value);
                    }
                    EntryFlag::Lowerbound => {
                        *alpha = max(*alpha, entry.value);
                    }
                    EntryFlag::Upperbound => {
                        *beta = min(*beta, entry.value);
                    }
                }
                if *alpha >= *beta {
                    return Some(entry.value);
                }
            }
        }
        None
    }

    // Update table based on negamax results.
    fn update(
        &mut self, hash: u64, alpha_orig: Evaluation, beta: Evaluation, depth: u8,
        best: Evaluation, best_move: M,
    ) {
        let flag = if best <= alpha_orig {
            EntryFlag::Upperbound
        } else if best >= beta {
            EntryFlag::Lowerbound
        } else {
            EntryFlag::Exact
        };
        self.store(hash, best, depth, flag, best_move);
    }

    // After finishing a search, populate the principal variation as deep as
    // the table remembers it.
    fn populate_pv<G: Game<M = M>>(&self, pv: &mut Vec<M>, state: &G::S)
    where
        G::S: Clone,
    {
        pv.clear();
        let mut hash_history = Vec::new();
        let mut state = state.clone();
        let mut hash = G::zobrist_hash(&state);
        while let Some(entry) = self.lookup(hash) {
            // The principal variation should only have exact nodes, as other
            // node types are from cutoffs where the node is proven to be
            // worse than a previously explored one.
            //
            // Sometimes, it takes multiple rounds of narrowing bounds for the
            // value to be exact, and we can't guarantee that the table entry
            // will remain in the table between the searches that find
            // equivalent upper and lower bounds.
            let m = entry.best_move.unwrap();
            pv.push(m);
            if let Some(new_state) = G::apply(&mut state, m) {
                state = new_state;
            }
            hash = G::zobrist_hash(&state);
            // Prevent cyclical PVs from being infinitely long.
            if hash_history.contains(&hash) {
                break;
            }
            hash_history.push(hash);
        }
    }
}

pub(super) trait ConcurrentTable<M> {
    fn concurrent_store(
        &self, hash: u64, value: Evaluation, depth: u8, flag: EntryFlag, best_move: M,
    );
    fn concurrent_advance_generation(&self);

    // Update table based on negamax results.
    fn concurrent_update(
        &self, hash: u64, alpha_orig: Evaluation, beta: Evaluation, depth: u8, best: Evaluation,
        best_move: M,
    ) {
        let flag = if best <= alpha_orig {
            EntryFlag::Upperbound
        } else if best >= beta {
            EntryFlag::Lowerbound
        } else {
            EntryFlag::Exact
        };
        self.concurrent_store(hash, best, depth, flag, best_move);
    }
}

impl<M: Copy, T: Table<M> + ConcurrentTable<M>> Table<M> for Arc<T> {
    fn lookup(&self, hash: u64) -> Option<Entry<M>> {
        (**self).lookup(hash)
    }
    fn store(&mut self, hash: u64, value: Evaluation, depth: u8, flag: EntryFlag, best_move: M) {
        self.concurrent_store(hash, value, depth, flag, best_move)
    }
    fn advance_generation(&mut self) {
        self.concurrent_advance_generation()
    }
}

// A concurrent table that doesn't bother to use atomic operations to access its entries.
// It's crazily unsafe, but somehow StockFish gets away with this?
pub(super) struct RacyTable<M> {
    table: Vec<Entry<M>>,
    mask: usize,
    // Incremented for each iterative deepening run.
    // Values from old generations are always overwritten.
    generation: AtomicU8,
}

#[allow(dead_code)]
impl<M> RacyTable<M> {
    pub(super) fn new(table_byte_size: usize) -> Self {
        let size = (table_byte_size / std::mem::size_of::<Entry<M>>()).next_power_of_two();
        let mask = size - 1;
        let mut table = Vec::with_capacity(size);
        for _ in 0..size {
            table.push(Entry::<M> {
                high_hash: 0,
                value: 0,
                depth: 0,
                flag: EntryFlag::Exact,
                generation: 0,
                best_move: None,
            });
        }
        Self { table, mask, generation: AtomicU8::new(0) }
    }
}

impl<M: Copy> Table<M> for RacyTable<M> {
    fn lookup(&self, hash: u64) -> Option<Entry<M>> {
        let index = (hash as usize) & self.mask;
        let entry = self.table[index];
        if high_bits(hash) == entry.high_hash {
            return Some(entry);
        }
        None
    }
    fn store(&mut self, hash: u64, value: Evaluation, depth: u8, flag: EntryFlag, best_move: M) {
        self.concurrent_store(hash, value, depth, flag, best_move)
    }
    fn advance_generation(&mut self) {
        self.concurrent_advance_generation()
    }
}

impl<M: Copy> ConcurrentTable<M> for RacyTable<M> {
    fn concurrent_store(
        &self, hash: u64, value: Evaluation, depth: u8, flag: EntryFlag, best_move: M,
    ) {
        let table_gen = self.generation.load(Ordering::Relaxed);
        let index = (hash as usize) & self.mask;
        let entry = &self.table[index];
        if entry.generation != table_gen || entry.depth <= depth {
            #[allow(mutable_transmutes)]
            let ptr = unsafe { std::mem::transmute::<&Entry<M>, &mut Entry<M>>(entry) };
            *ptr = Entry {
                high_hash: high_bits(hash),
                value,
                depth,
                flag,
                generation: table_gen,
                best_move: Some(best_move),
            };
        }
    }

    fn concurrent_advance_generation(&self) {
        self.generation.fetch_add(1, Ordering::Relaxed);
    }
}

#[repr(align(16))]
struct ConcurrentEntry<M> {
    high_hash: AtomicU32,
    value: Evaluation,
    depth: u8,
    flag: EntryFlag,
    generation: u8,
    best_move: Option<M>,
}

pub(super) struct LockfreeTable<M> {
    table: Vec<ConcurrentEntry<M>>,
    mask: usize,
    generation: AtomicU8,
}

// Safe for cross-thread usage because of manual concurrency operations.
unsafe impl<M> Sync for LockfreeTable<M> {}

impl<M: Copy> Table<M> for LockfreeTable<M> {
    fn lookup(&self, hash: u64) -> Option<Entry<M>> {
        let index = (hash as usize) & self.mask;
        let entry = &self.table[index];
        let table_hash = entry.high_hash.load(Ordering::Acquire);
        if high_bits(hash) | 1 == table_hash | 1 {
            // Copy contents
            let ret = Some(Entry {
                // No one reads the hash.
                high_hash: 0,
                value: entry.value,
                depth: entry.depth,
                flag: entry.flag,
                generation: entry.generation,
                best_move: entry.best_move,
            });
            // Verify the hash hasn't changed during the copy.
            if table_hash == entry.high_hash.load(Ordering::SeqCst) {
                return ret;
            }
        }
        None
    }

    fn store(&mut self, hash: u64, value: Evaluation, depth: u8, flag: EntryFlag, best_move: M) {
        self.concurrent_store(hash, value, depth, flag, best_move)
    }
    fn advance_generation(&mut self) {
        self.concurrent_advance_generation()
    }
}

#[allow(dead_code)]
impl<M> LockfreeTable<M> {
    const WRITING_SENTINEL: u32 = 0xffff_ffff;

    pub(super) fn new(table_byte_size: usize) -> Self {
        let size =
            (table_byte_size / std::mem::size_of::<ConcurrentEntry<M>>()).next_power_of_two();
        let mask = size - 1;
        let mut table = Vec::with_capacity(size);
        for _ in 0..size {
            table.push(ConcurrentEntry::<M> {
                high_hash: AtomicU32::new(0x5555_5555),
                value: 0,
                depth: 0,
                flag: EntryFlag::Exact,
                generation: 0,
                best_move: None,
            });
        }
        Self { table, mask, generation: AtomicU8::new(0) }
    }
}

impl<M: Copy> ConcurrentTable<M> for LockfreeTable<M> {
    fn concurrent_store(
        &self, hash: u64, value: Evaluation, depth: u8, flag: EntryFlag, best_move: M,
    ) {
        let table_gen = self.generation.load(Ordering::Relaxed);
        let index = (hash as usize) & self.mask;
        let entry = &self.table[index];
        // TODO: some not-totally racy reads of generation and depth
        if entry.generation != table_gen || entry.depth <= depth {
            // Set hash to sentinel value during write.
            let x = entry.high_hash.load(Ordering::Acquire);
            if x == Self::WRITING_SENTINEL {
                // Someone's already writing, just forget it.
                return;
            }
            // Try to set to sentinel value:
            if entry
                .high_hash
                .compare_exchange_weak(
                    x,
                    Self::WRITING_SENTINEL,
                    Ordering::Acquire,
                    Ordering::Relaxed,
                )
                .is_err()
            {
                // Someone just started writing, just forget it.
                return;
            }

            // concurrent_lookup will throw out any read that occurs across a write.
            #[allow(mutable_transmutes)]
            let entry = unsafe {
                std::mem::transmute::<&ConcurrentEntry<M>, &mut ConcurrentEntry<M>>(entry)
            };
            entry.value = value;
            entry.depth = depth;
            entry.flag = flag;
            entry.generation = table_gen;
            entry.best_move = Some(best_move);

            // Set hash to correct value to indicate done.
            let new_hash = if high_bits(hash) | 1 == x | 1 {
                // If we're overwriting the same hash, flip the lowest bit to
                // catch any readers reading across this change.
                x ^ 1
            } else {
                high_bits(hash)
            };
            entry.high_hash.store(new_hash, Ordering::Release);
        }
    }

    fn concurrent_advance_generation(&self) {
        self.generation.fetch_add(1, Ordering::Relaxed);
    }
}

// A single-threaded utility to find moves that have done well in other branches.
pub(super) struct CounterMoves<G: Game> {
    countermove_enabled: bool,
    history_enabled: bool,
    // For a given move index, which followup most recently led to a beta cutoff?
    countermove_table: Vec<G::M>,
    // For each move index, how many beta cutoffs has it produced?
    history_table: Vec<u32>,
}

impl<G: Game> CounterMoves<G>
where
    G::M: Eq + Copy,
{
    pub(super) fn new(countermove_enabled: bool, history_enabled: bool) -> Self {
        Self {
            countermove_enabled,
            history_enabled,
            countermove_table: Vec::new(),
            history_table: Vec::new(),
        }
    }

    pub(super) fn reorder(&self, prev: Option<G::M>, moves: &mut [G::M]) {
        if !self.history_table.is_empty() {
            // Stable sort to preserve previous orderings.
            moves.sort_by_key(|&m| !self.history_table[G::table_index(m) as usize]);
        }
        if let Some(prev) = prev {
            if let Some(response) = self.countermove_table.get(G::table_index(prev) as usize) {
                move_to_front(*response, moves);
            }
        }
    }

    pub(super) fn update(&mut self, prev: Option<G::M>, m: G::M) {
        if let Some(prev) = prev {
            if let Some(entry) = self.countermove_table.get_mut(G::table_index(prev) as usize) {
                *entry = m;
            }
        }
        if let Some(entry) = self.history_table.get_mut(G::table_index(m) as usize) {
            *entry = 1u32.saturating_add(*entry);
        }
    }

    pub(super) fn advance_generation(&mut self, null_move: Option<G::M>) {
        // Lazily allocate tables
        if self.countermove_enabled && self.countermove_table.is_empty() {
            if let Some(m) = null_move {
                self.countermove_table = vec![m; G::max_table_index() as usize + 1];
            }
        }
        if self.history_enabled && self.history_table.is_empty() {
            self.history_table = vec![0; G::max_table_index() as usize + 1];
        }

        // Partially degrade old values, to bias towards new data.
        self.history_table.iter_mut().for_each(|n| *n >>= 3);
    }
}