use std::ptr::NonNull;
use std::sync::Arc;
use crate::eval::{evaluate_pass_rights, get_scaled_pass_move_bonus, EvalHash};
use crate::nnue::{evaluate_dispatch, get_network, AccumulatorStackVariant, DirtyPiece};
use crate::position::Position;
use crate::search::PieceToHistory;
use crate::tt::{ProbeResult, TTData, TranspositionTable};
use crate::types::{
Bound, Color, Depth, Move, Piece, PieceType, Square, Value, DEPTH_QS, DEPTH_UNSEARCHED, MAX_PLY,
};
use super::history::{
capture_malus, continuation_history_bonus_with_offset, low_ply_history_bonus,
pawn_history_bonus, quiet_malus, stat_bonus, HistoryTables,
CONTINUATION_HISTORY_NEAR_PLY_OFFSET, CONTINUATION_HISTORY_WEIGHTS, CORRECTION_HISTORY_LIMIT,
CORRECTION_HISTORY_SIZE, LOW_PLY_HISTORY_SIZE, PRIOR_CAPTURE_COUNTERMOVE_BONUS,
TT_MOVE_HISTORY_BONUS, TT_MOVE_HISTORY_MALUS,
};
use super::movepicker::piece_value;
use super::types::{
draw_value, init_stack_array, value_from_tt, value_to_tt, ContHistKey, NodeType,
OrderedMovesBuffer, RootMoves, SearchedMoveList, StackArray, STACK_SIZE,
};
use super::{LimitsType, MovePicker, TimeManagement};
const FUTILITY_MARGIN_BASE: i32 = 90;
const IIR_PRIOR_REDUCTION_THRESHOLD_SHALLOW: i32 = 1;
const IIR_PRIOR_REDUCTION_THRESHOLD_DEEP: i32 = 3;
const IIR_DEPTH_BOUNDARY: Depth = 10;
const IIR_EVAL_SUM_THRESHOLD: i32 = 177;
use std::sync::LazyLock;
const DRAW_JITTER_MASK: u64 = 0x2;
const DRAW_JITTER_OFFSET: i32 = -1;
#[inline]
fn draw_jitter(nodes: u64) -> i32 {
((nodes & DRAW_JITTER_MASK) as i32) + DRAW_JITTER_OFFSET
}
#[inline]
fn msb(x: i32) -> i32 {
if x <= 0 {
return 0;
}
31 - x.leading_zeros() as i32
}
#[inline]
fn to_corrected_static_eval(unadjusted: Value, correction_value: i32) -> Value {
let corrected = unadjusted.raw() + correction_value / 131_072;
Value::new(corrected.clamp(Value::MATED_IN_MAX_PLY.raw() + 1, Value::MATE_IN_MAX_PLY.raw() - 1))
}
type Reductions = [i32; 64];
const REDUCTION_DELTA_SCALE: i32 = 731;
const REDUCTION_NON_IMPROVING_MULT: i32 = 216;
const REDUCTION_NON_IMPROVING_DIV: i32 = 512;
const REDUCTION_BASE_OFFSET: i32 = 1089;
static REDUCTIONS: LazyLock<Reductions> = LazyLock::new(|| {
let mut table: Reductions = [0; 64];
for (i, value) in table.iter_mut().enumerate().skip(1) {
*value = (2782.0 / 128.0 * (i as f64).ln()) as i32;
}
table
});
#[inline]
fn reduction(imp: bool, depth: i32, move_count: i32, delta: i32, root_delta: i32) -> i32 {
if depth <= 0 || move_count <= 0 {
return 0;
}
let d = depth.clamp(1, 63) as usize;
let mc = move_count.clamp(1, 63) as usize;
let reduction_scale = REDUCTIONS[d] * REDUCTIONS[mc];
let root_delta = root_delta.max(1);
let delta = delta.max(0);
reduction_scale - delta * REDUCTION_DELTA_SCALE / root_delta
+ (!imp as i32) * reduction_scale * REDUCTION_NON_IMPROVING_MULT
/ REDUCTION_NON_IMPROVING_DIV
+ REDUCTION_BASE_OFFSET
}
struct TTContext {
key: u64,
result: ProbeResult,
data: TTData,
hit: bool,
mv: Move,
value: Value,
capture: bool,
}
enum ProbeOutcome {
Continue(TTContext),
Cutoff(Value),
}
struct EvalContext {
static_eval: Value,
unadjusted_static_eval: Value,
correction_value: i32,
improving: bool,
opponent_worsening: bool,
}
enum Step14Outcome {
Skip { best_value: Option<Value> },
Continue,
}
#[derive(Clone, Copy)]
struct FutilityParams {
depth: Depth,
beta: Value,
static_eval: Value,
correction_value: i32,
improving: bool,
opponent_worsening: bool,
cut_node: bool,
tt_hit: bool,
pv_node: bool,
in_check: bool,
}
struct Step14Context<'a> {
pos: &'a Position,
mv: Move,
depth: Depth,
ply: i32,
improving: bool,
best_move: Move,
best_value: Value,
alpha: Value,
in_check: bool,
gives_check: bool,
is_capture: bool,
lmr_depth: i32,
mover: Color,
move_count: i32,
cont_history_1: &'a PieceToHistory,
cont_history_2: &'a PieceToHistory,
}
pub struct SearchWorker {
pub tt: Arc<TranspositionTable>,
pub eval_hash: Arc<EvalHash>,
pub thread_id: usize,
pub history: Box<HistoryTables>,
pub cont_history_sentinel: NonNull<PieceToHistory>,
pub root_moves: RootMoves,
pub stack: StackArray,
pub nodes: u64,
pub sel_depth: i32,
pub root_depth: Depth,
pub root_delta: i32,
pub completed_depth: Depth,
pub generate_all_legal_moves: bool,
pub best_move: Move,
pub abort: bool,
pub best_move_changes: f64,
pub max_moves_to_draw: i32,
pub nmp_min_ply: i32,
pub nnue_stack: AccumulatorStackVariant,
calls_cnt: i32,
}
impl SearchWorker {
pub fn new(
tt: Arc<TranspositionTable>,
eval_hash: Arc<EvalHash>,
max_moves_to_draw: i32,
thread_id: usize,
) -> Box<Self> {
let history = HistoryTables::new_boxed();
let cont_history_sentinel =
NonNull::from(history.continuation_history[0][0].get_table(Piece::NONE, Square::SQ_11));
let mut worker = Box::new(Self {
tt,
eval_hash,
thread_id,
history,
cont_history_sentinel,
root_moves: RootMoves::new(),
stack: init_stack_array(),
nodes: 0,
sel_depth: 0,
root_depth: 0,
root_delta: 1,
completed_depth: 0,
generate_all_legal_moves: false,
best_move: Move::NONE,
abort: false,
best_move_changes: 0.0,
max_moves_to_draw,
nmp_min_ply: 0,
nnue_stack: AccumulatorStackVariant::new_default(), calls_cnt: 0,
});
worker.reset_cont_history_ptrs();
worker
}
#[inline]
fn cont_history_ptr(&self, ply: i32, back: i32) -> NonNull<PieceToHistory> {
debug_assert!(ply >= 0 && (ply as usize) < STACK_SIZE, "ply out of bounds: {ply}");
debug_assert!(back >= 0, "back must be non-negative: {back}");
if ply >= back {
self.stack[(ply - back) as usize].cont_history_ptr
} else {
self.cont_history_sentinel
}
}
#[inline]
fn cont_history_ref(&self, ply: i32, back: i32) -> &PieceToHistory {
let ptr = self.cont_history_ptr(ply, back);
unsafe { ptr.as_ref() }
}
#[inline]
fn cont_history_tables(&self, ply: i32) -> [&PieceToHistory; 6] {
[
self.cont_history_ref(ply, 1),
self.cont_history_ref(ply, 2),
self.cont_history_ref(ply, 3),
self.cont_history_ref(ply, 4),
self.cont_history_ref(ply, 5),
self.cont_history_ref(ply, 6),
]
}
fn reset_cont_history_ptrs(&mut self) {
let sentinel = self.cont_history_sentinel;
for stack in self.stack.iter_mut() {
stack.cont_history_ptr = sentinel;
}
}
#[inline]
fn set_cont_history_for_move(
&mut self,
ply: i32,
in_check: bool,
capture: bool,
piece: Piece,
to: Square,
) {
debug_assert!(ply >= 0 && (ply as usize) < STACK_SIZE, "ply out of bounds: {ply}");
let in_check_idx = in_check as usize;
let capture_idx = capture as usize;
let table =
self.history.continuation_history[in_check_idx][capture_idx].get_table(piece, to);
self.stack[ply as usize].cont_history_ptr = NonNull::from(table);
self.stack[ply as usize].cont_hist_key =
Some(ContHistKey::new(in_check, capture, piece, to));
}
#[inline]
fn clear_cont_history_for_null(&mut self, ply: i32) {
self.stack[ply as usize].cont_history_ptr = self.cont_history_sentinel;
self.stack[ply as usize].cont_hist_key = None;
}
pub fn clear(&mut self) {
self.history.clear();
}
pub fn prepare_search(&mut self) {
self.nodes = 0;
self.sel_depth = 0;
self.root_depth = 0;
self.root_delta = 1;
self.completed_depth = 0;
self.best_move = Move::NONE;
self.abort = false;
self.best_move_changes = 0.0;
self.nmp_min_ply = 0;
self.root_moves.clear();
self.history.low_ply_history.clear();
if let Some(network) = get_network() {
if !self.nnue_stack.matches_network(network) {
self.nnue_stack = AccumulatorStackVariant::from_network(network);
} else {
self.nnue_stack.reset();
}
} else {
self.nnue_stack.reset();
}
self.calls_cnt = 0;
}
pub fn decay_best_move_changes(&mut self) {
self.best_move_changes /= 2.0;
}
pub fn set_generate_all_legal_moves(&mut self, flag: bool) {
self.generate_all_legal_moves = flag;
}
#[inline]
fn nnue_evaluate(&mut self, pos: &Position) -> Value {
evaluate_dispatch(pos, &mut self.nnue_stack)
}
#[inline]
fn nnue_push(&mut self, dirty_piece: DirtyPiece) {
self.nnue_stack.push(dirty_piece);
}
#[inline]
fn nnue_pop(&mut self) {
self.nnue_stack.pop();
}
#[inline]
fn check_abort(&mut self, limits: &LimitsType, time_manager: &mut TimeManagement) -> bool {
if self.abort {
#[cfg(debug_assertions)]
eprintln!("check_abort: abort flag already set");
return true;
}
self.calls_cnt -= 1;
if self.calls_cnt > 0 {
return false;
}
self.calls_cnt = if limits.nodes > 0 {
std::cmp::min(512, (limits.nodes / 1024) as i32).max(1)
} else {
512
};
if time_manager.stop_requested() {
#[cfg(debug_assertions)]
eprintln!("check_abort: stop requested");
self.abort = true;
return true;
}
if limits.nodes > 0 && self.nodes >= limits.nodes {
#[cfg(debug_assertions)]
eprintln!(
"check_abort: node limit reached nodes={} limit={}",
self.nodes, limits.nodes
);
self.abort = true;
return true;
}
if self.thread_id == 0 {
if time_manager.take_ponderhit() {
time_manager.on_ponderhit();
}
let elapsed = time_manager.elapsed();
let elapsed_effective = time_manager.elapsed_from_ponderhit();
if time_manager.search_end() > 0 && elapsed >= time_manager.search_end() {
#[cfg(debug_assertions)]
eprintln!(
"check_abort: search_end reached elapsed={} search_end={}",
elapsed,
time_manager.search_end()
);
self.abort = true;
return true;
}
if !time_manager.is_pondering()
&& time_manager.search_end() == 0
&& limits.use_time_management()
&& (elapsed_effective > time_manager.maximum() || time_manager.stop_on_ponderhit())
{
time_manager.set_search_end(elapsed);
}
}
false
}
#[inline]
fn correction_value(&self, pos: &Position, ply: i32) -> i32 {
let us = pos.side_to_move();
let pawn_idx = (pos.pawn_key() as usize) & (CORRECTION_HISTORY_SIZE - 1);
let minor_idx = (pos.minor_piece_key() as usize) & (CORRECTION_HISTORY_SIZE - 1);
let non_pawn_idx_w =
(pos.non_pawn_key(Color::White) as usize) & (CORRECTION_HISTORY_SIZE - 1);
let non_pawn_idx_b =
(pos.non_pawn_key(Color::Black) as usize) & (CORRECTION_HISTORY_SIZE - 1);
let pcv = self.history.correction_history.pawn_value(pawn_idx, us) as i32;
let micv = self.history.correction_history.minor_value(minor_idx, us) as i32;
let wnpcv =
self.history.correction_history.non_pawn_value(non_pawn_idx_w, Color::White, us) as i32;
let bnpcv =
self.history.correction_history.non_pawn_value(non_pawn_idx_b, Color::Black, us) as i32;
let mut cntcv = 0;
if ply >= 2 {
let prev_move = self.stack[(ply - 1) as usize].current_move;
if prev_move.is_normal() {
if let Some(prev2_key) = self.stack[(ply - 2) as usize].cont_hist_key {
let pc = pos.piece_on(prev_move.to());
cntcv = self.history.correction_history.continuation_value(
prev2_key.piece,
prev2_key.to,
pc,
prev_move.to(),
) as i32;
}
}
}
8867 * pcv + 8136 * micv + 10_757 * (wnpcv + bnpcv) + 7232 * cntcv
}
#[inline]
fn update_correction_history(&mut self, pos: &Position, ply: i32, bonus: i32) {
let us = pos.side_to_move();
let pawn_idx = (pos.pawn_key() as usize) & (CORRECTION_HISTORY_SIZE - 1);
let minor_idx = (pos.minor_piece_key() as usize) & (CORRECTION_HISTORY_SIZE - 1);
let non_pawn_idx_w =
(pos.non_pawn_key(Color::White) as usize) & (CORRECTION_HISTORY_SIZE - 1);
let non_pawn_idx_b =
(pos.non_pawn_key(Color::Black) as usize) & (CORRECTION_HISTORY_SIZE - 1);
const NON_PAWN_WEIGHT: i32 = 165;
self.history.correction_history.update_pawn(pawn_idx, us, bonus);
self.history.correction_history.update_minor(minor_idx, us, bonus * 153 / 128);
self.history.correction_history.update_non_pawn(
non_pawn_idx_w,
Color::White,
us,
bonus * NON_PAWN_WEIGHT / 128,
);
self.history.correction_history.update_non_pawn(
non_pawn_idx_b,
Color::Black,
us,
bonus * NON_PAWN_WEIGHT / 128,
);
if ply >= 2 {
let prev_move = self.stack[(ply - 1) as usize].current_move;
if prev_move.is_normal() {
if let Some(prev2_key) = self.stack[(ply - 2) as usize].cont_hist_key {
let pc = pos.piece_on(prev_move.to());
self.history.correction_history.update_continuation(
prev2_key.piece,
prev2_key.to,
pc,
prev_move.to(),
bonus * 153 / 128,
);
}
}
}
}
#[inline]
fn take_prior_reduction(&mut self, ply: i32) -> i32 {
if ply >= 1 {
let parent_idx = (ply - 1) as usize;
let pr = self.stack[parent_idx].reduction;
self.stack[parent_idx].reduction = 0;
pr
} else {
0
}
}
fn probe_transposition<const NT: u8>(
&mut self,
pos: &mut Position,
depth: Depth,
beta: Value,
ply: i32,
pv_node: bool,
in_check: bool,
excluded_move: Move,
) -> ProbeOutcome {
let key = pos.key();
let tt_result = self.tt.probe(key, pos);
let tt_hit = tt_result.found;
let tt_data = tt_result.data;
self.stack[ply as usize].tt_hit = tt_hit;
self.stack[ply as usize].tt_pv = if excluded_move.is_some() {
self.stack[ply as usize].tt_pv
} else {
pv_node || (tt_hit && tt_data.is_pv)
};
let tt_move = if tt_hit { tt_data.mv } else { Move::NONE };
let tt_value = if tt_hit {
value_from_tt(tt_data.value, ply)
} else {
Value::NONE
};
let tt_capture = tt_move.is_some() && pos.is_capture(tt_move);
if !pv_node
&& excluded_move.is_none()
&& tt_hit
&& tt_data.depth >= depth
&& tt_value != Value::NONE
&& tt_data.bound.can_cutoff(tt_value, beta)
{
return ProbeOutcome::Cutoff(tt_value);
}
if NT != NodeType::Root as u8 && !in_check && !tt_hit && excluded_move.is_none() {
let mate_move = pos.mate_1ply();
if mate_move.is_some() {
let value = Value::mate_in(ply + 1);
let stored_depth = (depth + 6).min(MAX_PLY - 1);
tt_result.write(
key,
value,
self.stack[ply as usize].tt_pv,
Bound::Exact,
stored_depth,
mate_move,
Value::NONE,
self.tt.generation(),
);
return ProbeOutcome::Cutoff(value);
}
}
ProbeOutcome::Continue(TTContext {
key,
result: tt_result,
data: tt_data,
hit: tt_hit,
mv: tt_move,
value: tt_value,
capture: tt_capture,
})
}
fn compute_eval_context(
&mut self,
pos: &mut Position,
ply: i32,
in_check: bool,
tt_ctx: &TTContext,
excluded_move: Move,
) -> EvalContext {
let correction_value = self.correction_value(pos, ply);
if excluded_move.is_some() {
let static_eval = self.stack[ply as usize].static_eval;
let improving = if ply >= 2 && !in_check && static_eval != Value::NONE {
static_eval > self.stack[(ply - 2) as usize].static_eval
} else {
false
};
let opponent_worsening = if ply >= 1 && static_eval != Value::NONE {
let prev_eval = self.stack[(ply - 1) as usize].static_eval;
prev_eval != Value::NONE && static_eval > -prev_eval
} else {
false
};
return EvalContext {
static_eval,
unadjusted_static_eval: static_eval, correction_value,
improving,
opponent_worsening,
};
}
let mut unadjusted_static_eval = Value::NONE;
let mut static_eval = if in_check {
Value::NONE
} else if tt_ctx.hit && tt_ctx.data.eval != Value::NONE {
unadjusted_static_eval = tt_ctx.data.eval;
unadjusted_static_eval
} else {
unadjusted_static_eval = self.nnue_evaluate(pos);
unadjusted_static_eval
};
if !in_check && unadjusted_static_eval != Value::NONE {
static_eval = to_corrected_static_eval(unadjusted_static_eval, correction_value);
static_eval += evaluate_pass_rights(pos, pos.game_ply() as u16);
}
if !in_check
&& tt_ctx.hit
&& tt_ctx.value != Value::NONE
&& !tt_ctx.value.is_mate_score()
&& ((tt_ctx.value > static_eval && tt_ctx.data.bound == Bound::Lower)
|| (tt_ctx.value < static_eval && tt_ctx.data.bound == Bound::Upper))
{
static_eval = tt_ctx.value;
}
self.stack[ply as usize].static_eval = static_eval;
let improving = if ply >= 2 && !in_check {
static_eval > self.stack[(ply - 2) as usize].static_eval
} else {
false
};
let opponent_worsening = if ply >= 1 && static_eval != Value::NONE {
let prev_eval = self.stack[(ply - 1) as usize].static_eval;
prev_eval != Value::NONE && static_eval > -prev_eval
} else {
false
};
EvalContext {
static_eval,
unadjusted_static_eval,
correction_value,
improving,
opponent_worsening,
}
}
#[allow(clippy::too_many_arguments)]
fn try_razoring<const NT: u8>(
&mut self,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
ply: i32,
pv_node: bool,
in_check: bool,
static_eval: Value,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Option<Value> {
if !pv_node && !in_check && depth <= 3 {
let razoring_threshold = alpha - Value::new(200 * depth);
if static_eval < razoring_threshold {
let value = self.qsearch::<{ NodeType::NonPV as u8 }>(
pos,
DEPTH_QS,
alpha,
beta,
ply,
limits,
time_manager,
);
if value <= alpha {
return Some(value);
}
}
}
None
}
fn try_futility_pruning(&self, params: FutilityParams) -> Option<Value> {
if !params.pv_node
&& !params.in_check
&& params.depth <= 8
&& params.static_eval != Value::NONE
{
let futility_mult =
FUTILITY_MARGIN_BASE - 20 * (params.cut_node && !params.tt_hit) as i32;
let futility_margin = Value::new(
futility_mult * params.depth
- (params.improving as i32) * futility_mult * 2
- (params.opponent_worsening as i32) * futility_mult / 3
+ (params.correction_value.abs() / 171_290),
);
if params.static_eval - futility_margin >= params.beta {
return Some(params.static_eval);
}
}
None
}
#[allow(clippy::too_many_arguments)]
fn try_null_move_pruning<const NT: u8>(
&mut self,
pos: &mut Position,
depth: Depth,
beta: Value,
ply: i32,
cut_node: bool,
in_check: bool,
static_eval: Value,
mut improving: bool,
excluded_move: Move,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> (Option<Value>, bool) {
if ply < 1 {
return (None, improving);
}
let margin = 18 * depth - 390;
let prev_move = self.stack[(ply - 1) as usize].current_move;
let prev_is_pass = prev_move.is_pass();
if excluded_move.is_none()
&& cut_node
&& !in_check
&& static_eval >= beta - Value::new(margin)
&& ply >= self.nmp_min_ply
&& !beta.is_loss()
&& !prev_move.is_null()
&& !prev_is_pass
{
let r = 7 + depth / 3;
let use_pass = pos.is_pass_rights_enabled() && pos.can_pass();
if use_pass {
self.stack[ply as usize].current_move = Move::PASS;
} else {
self.stack[ply as usize].current_move = Move::NULL;
}
self.clear_cont_history_for_null(ply);
if use_pass {
pos.do_pass_move();
} else {
pos.do_null_move_with_prefetch(self.tt.as_ref());
}
self.nnue_push(DirtyPiece::new()); let null_value = -self.search_node::<{ NodeType::NonPV as u8 }>(
pos,
depth - r,
-beta,
-beta + Value::new(1),
ply + 1,
false, limits,
time_manager,
);
self.nnue_pop();
if use_pass {
pos.undo_pass_move();
} else {
pos.undo_null_move();
}
if null_value >= beta && !null_value.is_win() {
if self.nmp_min_ply != 0 || depth < 16 {
return (Some(null_value), improving);
}
self.nmp_min_ply = ply + 3 * (depth - r) / 4;
let v = self.search_node::<{ NodeType::NonPV as u8 }>(
pos,
depth - r,
beta - Value::new(1),
beta,
ply,
false, limits,
time_manager,
);
self.nmp_min_ply = 0;
if v >= beta {
return (Some(null_value), improving);
}
}
}
if !in_check && static_eval != Value::NONE {
improving |= static_eval >= beta;
}
(None, improving)
}
#[allow(clippy::too_many_arguments)]
fn try_probcut(
&mut self,
pos: &mut Position,
depth: Depth,
beta: Value,
improving: bool,
tt_ctx: &TTContext,
ply: i32,
static_eval: Value,
unadjusted_static_eval: Value,
in_check: bool,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Option<Value> {
if in_check || depth < 3 || static_eval == Value::NONE {
return None;
}
let prob_beta = beta + Value::new(215 - 60 * improving as i32);
if beta.is_mate_score()
|| (tt_ctx.hit
&& tt_ctx.value != Value::NONE
&& tt_ctx.value < prob_beta
&& !tt_ctx.value.is_mate_score())
{
return None;
}
let threshold = prob_beta - static_eval;
if threshold <= Value::ZERO {
return None;
}
let dynamic_reduction = (static_eval - beta).raw() / 300;
let probcut_depth = (depth - 5 - dynamic_reduction).max(0);
let probcut_moves = {
let cont_tables = self.cont_history_tables(ply);
let mp = MovePicker::new_probcut(
pos,
tt_ctx.mv,
threshold,
&self.history.main_history,
&self.history.low_ply_history,
&self.history.capture_history,
cont_tables,
&self.history.pawn_history,
ply,
self.generate_all_legal_moves,
);
let mut buf = [Move::NONE; crate::movegen::MAX_MOVES];
let mut len = 0;
for mv in mp {
buf[len] = mv;
len += 1;
}
(buf, len)
};
let (buf, len) = probcut_moves;
for &mv in buf[..len].iter() {
if !pos.is_legal(mv) {
continue;
}
let gives_check = pos.gives_check(mv);
let is_capture = pos.is_capture(mv);
let cont_hist_piece = mv.moved_piece_after();
let cont_hist_to = mv.to();
self.stack[ply as usize].current_move = mv;
let dirty_piece = pos.do_move_with_prefetch(mv, gives_check, self.tt.as_ref());
self.nnue_push(dirty_piece);
self.nodes += 1;
self.set_cont_history_for_move(
ply,
in_check,
is_capture,
cont_hist_piece,
cont_hist_to,
);
let mut value = -self.qsearch::<{ NodeType::NonPV as u8 }>(
pos,
DEPTH_QS,
-prob_beta,
-prob_beta + Value::new(1),
ply + 1,
limits,
time_manager,
);
if value >= prob_beta && probcut_depth > 0 {
value = -self.search_node::<{ NodeType::NonPV as u8 }>(
pos,
probcut_depth,
-prob_beta,
-prob_beta + Value::new(1),
ply + 1,
true,
limits,
time_manager,
);
}
self.nnue_pop();
pos.undo_move(mv);
if value >= prob_beta {
let stored_depth = (probcut_depth + 1).max(1);
tt_ctx.result.write(
tt_ctx.key,
value_to_tt(value, ply),
self.stack[ply as usize].tt_pv,
Bound::Lower,
stored_depth,
mv,
unadjusted_static_eval,
self.tt.generation(),
);
if value.raw().abs() < Value::INFINITE.raw() {
return Some(value - (prob_beta - beta));
}
return Some(value);
}
}
None
}
#[inline]
fn try_small_probcut(&self, depth: Depth, beta: Value, tt_ctx: &TTContext) -> Option<Value> {
if depth >= 1 {
let sp_beta = beta + Value::new(417);
if tt_ctx.hit
&& tt_ctx.data.bound == Bound::Lower
&& tt_ctx.data.depth >= depth - 4
&& tt_ctx.value != Value::NONE
&& tt_ctx.value >= sp_beta
&& !tt_ctx.value.is_mate_score()
&& !beta.is_mate_score()
{
return Some(sp_beta);
}
}
None
}
fn generate_ordered_moves(
&self,
pos: &mut Position,
tt_move: Move,
depth: Depth,
in_check: bool,
ply: i32,
) -> (OrderedMovesBuffer, Move) {
let mut ordered_moves = OrderedMovesBuffer::new();
let mut tt_move = tt_move;
if depth <= DEPTH_QS
&& tt_move.is_some()
&& (!pos.capture_stage(tt_move) && !pos.gives_check(tt_move) || depth < -16)
{
tt_move = Move::NONE;
}
let cont_tables = self.cont_history_tables(ply);
let mp = MovePicker::new(
pos,
tt_move,
depth,
&self.history.main_history,
&self.history.low_ply_history,
&self.history.capture_history,
cont_tables,
&self.history.pawn_history,
ply,
self.generate_all_legal_moves,
);
for mv in mp {
if mv.is_some() {
ordered_moves.push(mv);
}
}
if !in_check && depth == DEPTH_QS {
let mut buf = crate::movegen::ExtMoveBuffer::new();
let gen_type = if self.generate_all_legal_moves {
crate::movegen::GenType::QuietChecksAll
} else {
crate::movegen::GenType::QuietChecks
};
crate::movegen::generate_with_type(pos, gen_type, &mut buf, None);
for ext in buf.iter() {
if ordered_moves.contains(&ext.mv) {
continue;
}
ordered_moves.push(ext.mv);
}
if pos.can_pass() && pos.gives_check(Move::PASS) && !ordered_moves.contains(&Move::PASS)
{
ordered_moves.push(Move::PASS);
}
}
let prev_move = self.stack[(ply - 1) as usize].current_move;
if depth <= -5 && ply >= 1 && prev_move.is_normal() {
let mut buf = crate::movegen::ExtMoveBuffer::new();
let rec_sq = prev_move.to();
let gen_type = if self.generate_all_legal_moves {
crate::movegen::GenType::RecapturesAll
} else {
crate::movegen::GenType::Recaptures
};
crate::movegen::generate_with_type(pos, gen_type, &mut buf, Some(rec_sq));
ordered_moves.clear();
for ext in buf.iter() {
ordered_moves.push(ext.mv);
}
}
if depth > DEPTH_QS && pos.can_pass() && !ordered_moves.contains(&Move::PASS) {
ordered_moves.push(Move::PASS);
}
(ordered_moves, tt_move)
}
fn step14_pruning(&self, ctx: Step14Context<'_>) -> Step14Outcome {
if ctx.mv.is_pass() {
return Step14Outcome::Continue;
}
let mut lmr_depth = ctx.lmr_depth;
if ctx.ply != 0 && !ctx.best_value.is_loss() {
let lmp_denominator = 2 - ctx.improving as i32;
debug_assert!(lmp_denominator > 0, "LMP denominator must be positive");
let lmp_limit = (3 + ctx.depth * ctx.depth) / lmp_denominator;
if ctx.move_count >= lmp_limit && !ctx.is_capture && !ctx.gives_check {
return Step14Outcome::Skip { best_value: None };
}
if ctx.is_capture || ctx.gives_check {
let captured = ctx.pos.piece_on(ctx.mv.to());
let capt_hist = self.history.capture_history.get_with_captured_piece(
ctx.mv.moved_piece_after(),
ctx.mv.to(),
captured,
) as i32;
if !ctx.gives_check && lmr_depth < 7 && !ctx.in_check {
let futility_value = self.stack[ctx.ply as usize].static_eval
+ Value::new(232 + 224 * lmr_depth)
+ Value::new(piece_value(captured))
+ Value::new(131 * capt_hist / 1024);
if futility_value <= ctx.alpha {
return Step14Outcome::Skip { best_value: None };
}
}
let margin = (158 * ctx.depth + capt_hist / 31).clamp(0, 283 * ctx.depth);
if !ctx.pos.see_ge(ctx.mv, Value::new(-margin)) {
return Step14Outcome::Skip { best_value: None };
}
} else {
let mut history = 0;
history += ctx.cont_history_1.get(ctx.mv.moved_piece_after(), ctx.mv.to()) as i32;
history += ctx.cont_history_2.get(ctx.mv.moved_piece_after(), ctx.mv.to()) as i32;
history += self.history.pawn_history.get(
ctx.pos.pawn_history_index(),
ctx.mv.moved_piece_after(),
ctx.mv.to(),
) as i32;
if history < -4361 * ctx.depth {
return Step14Outcome::Skip { best_value: None };
}
history += 71 * self.history.main_history.get(ctx.mover, ctx.mv) as i32 / 32;
lmr_depth += history / 3233;
let base_futility = if ctx.best_move.is_some() { 46 } else { 230 };
let futility_value = self.stack[ctx.ply as usize].static_eval
+ Value::new(base_futility + 131 * lmr_depth)
+ Value::new(
91 * (self.stack[ctx.ply as usize].static_eval > ctx.alpha) as i32,
);
if !ctx.in_check && lmr_depth < 11 && futility_value <= ctx.alpha {
if ctx.best_value <= futility_value
&& !ctx.best_value.is_mate_score()
&& !futility_value.is_win()
{
return Step14Outcome::Skip {
best_value: Some(futility_value),
};
}
return Step14Outcome::Skip { best_value: None };
}
lmr_depth = lmr_depth.max(0);
if !ctx.pos.see_ge(ctx.mv, Value::new(-26 * lmr_depth * lmr_depth)) {
return Step14Outcome::Skip { best_value: None };
}
}
}
Step14Outcome::Continue
}
pub fn search(
&mut self,
pos: &mut Position,
depth: Depth,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) {
self.root_moves = RootMoves::from_legal_moves(pos, &limits.search_moves);
if self.root_moves.is_empty() {
self.best_move = Move::NONE;
return;
}
for d in 1..=depth {
if self.abort {
break;
}
for rm in self.root_moves.iter_mut() {
rm.effort = 0.0;
}
self.root_depth = d;
self.sel_depth = 0;
let prev_score = if d > 1 {
self.root_moves[0].score
} else {
Value::new(-32001)
};
let mut delta = Value::new(10);
let mut alpha = if d >= 4 {
Value::new(prev_score.raw().saturating_sub(delta.raw()).max(-32001))
} else {
Value::new(-32001)
};
let mut beta = if d >= 4 {
Value::new(prev_score.raw().saturating_add(delta.raw()).min(32001))
} else {
Value::new(32001)
};
loop {
let score = self.search_root(pos, d, alpha, beta, limits, time_manager);
if self.abort {
break;
}
if score <= alpha {
beta = Value::new((alpha.raw() + beta.raw()) / 2);
alpha = Value::new(score.raw().saturating_sub(delta.raw()).max(-32001));
} else if score >= beta {
beta = Value::new(score.raw().saturating_add(delta.raw()).min(32001));
} else {
break;
}
delta = Value::new(
delta.raw().saturating_add(delta.raw() / 3).min(Value::INFINITE.raw()),
);
}
if !self.abort {
self.completed_depth = d;
self.best_move = self.root_moves[0].mv();
}
}
}
pub(crate) fn search_root(
&mut self,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
self.root_delta = (beta.raw() - alpha.raw()).abs().max(1);
let mut alpha = alpha;
let mut best_value = Value::new(-32001);
let mut pv_idx = 0;
let root_in_check = pos.in_check();
self.stack[0].in_check = root_in_check;
self.stack[0].cont_history_ptr = self.cont_history_sentinel;
self.stack[0].cont_hist_key = None;
self.stack[0].pv.clear();
self.stack[1].pv.clear();
for rm_idx in 0..self.root_moves.len() {
if self.check_abort(limits, time_manager) {
return Value::ZERO;
}
self.sel_depth = 0;
let mv = self.root_moves[rm_idx].mv();
let gives_check = pos.gives_check(mv);
let is_capture = pos.is_capture(mv);
let nodes_before = self.nodes;
let dirty_piece = pos.do_move_with_prefetch(mv, gives_check, self.tt.as_ref());
self.nnue_push(dirty_piece);
self.nodes += 1;
self.stack[0].current_move = mv;
if mv.is_pass() {
self.clear_cont_history_for_null(0);
} else {
let cont_hist_piece = mv.moved_piece_after();
let cont_hist_to = mv.to();
self.set_cont_history_for_move(
0,
root_in_check,
is_capture,
cont_hist_piece,
cont_hist_to,
);
}
let value = if rm_idx == 0 {
-self.search_node::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
1,
false,
limits,
time_manager,
)
} else {
let mut value = -self.search_node::<{ NodeType::NonPV as u8 }>(
pos,
depth - 1,
-alpha - Value::new(1),
-alpha,
1,
true,
limits,
time_manager,
);
if value > alpha && value < beta {
value = -self.search_node::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
1,
false,
limits,
time_manager,
);
}
value
};
self.nnue_pop();
pos.undo_move(mv);
let nodes_delta = self.nodes.saturating_sub(nodes_before);
self.root_moves[rm_idx].effort += nodes_delta as f64;
if self.abort {
return Value::ZERO;
}
let mut updated_alpha = rm_idx == 0; {
let rm = &mut self.root_moves[rm_idx];
rm.score = value;
rm.sel_depth = self.sel_depth;
rm.accumulate_score_stats(value);
}
if value > best_value {
best_value = value;
if value > alpha {
if rm_idx > 0 {
self.best_move_changes += 1.0;
}
alpha = value;
pv_idx = rm_idx;
updated_alpha = true;
self.root_moves[rm_idx].pv.truncate(1);
self.root_moves[rm_idx].pv.extend_from_slice(&self.stack[1].pv);
if value >= beta {
break;
}
}
}
if !updated_alpha {
self.root_moves[rm_idx].score = Value::new(-Value::INFINITE.raw());
}
}
self.root_moves.move_to_front(pv_idx);
self.root_moves.sort();
best_value
}
pub(crate) fn search_root_for_pv(
&mut self,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
pv_idx: usize,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
self.root_delta = (beta.raw() - alpha.raw()).abs().max(1);
let mut alpha = alpha;
let mut best_value = Value::new(-32001);
let mut best_rm_idx = pv_idx;
let root_in_check = pos.in_check();
self.stack[0].in_check = root_in_check;
self.stack[0].cont_history_ptr = self.cont_history_sentinel;
self.stack[0].cont_hist_key = None;
self.stack[0].pv.clear();
self.stack[1].pv.clear();
for rm_idx in pv_idx..self.root_moves.len() {
if self.check_abort(limits, time_manager) {
return Value::ZERO;
}
self.sel_depth = 0;
let mv = self.root_moves[rm_idx].mv();
let gives_check = pos.gives_check(mv);
let is_capture = pos.is_capture(mv);
let nodes_before = self.nodes;
let dirty_piece = pos.do_move_with_prefetch(mv, gives_check, self.tt.as_ref());
self.nnue_push(dirty_piece);
self.nodes += 1;
self.stack[0].current_move = mv;
if mv.is_pass() {
self.clear_cont_history_for_null(0);
} else {
let cont_hist_piece = mv.moved_piece_after();
let cont_hist_to = mv.to();
self.set_cont_history_for_move(
0,
root_in_check,
is_capture,
cont_hist_piece,
cont_hist_to,
);
}
let value = if rm_idx == pv_idx {
-self.search_node::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
1,
false,
limits,
time_manager,
)
} else {
let mut value = -self.search_node::<{ NodeType::NonPV as u8 }>(
pos,
depth - 1,
-alpha - Value::new(1),
-alpha,
1,
true,
limits,
time_manager,
);
if value > alpha && value < beta {
value = -self.search_node::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
1,
false,
limits,
time_manager,
);
}
value
};
self.nnue_pop();
pos.undo_move(mv);
let nodes_delta = self.nodes.saturating_sub(nodes_before);
self.root_moves[rm_idx].effort += nodes_delta as f64;
if self.abort {
return Value::ZERO;
}
let mut updated_alpha = rm_idx == pv_idx; {
let rm = &mut self.root_moves[rm_idx];
rm.score = value;
rm.sel_depth = self.sel_depth;
rm.accumulate_score_stats(value);
}
if value > best_value {
best_value = value;
if value > alpha {
if pv_idx == 0 && rm_idx > pv_idx {
self.best_move_changes += 1.0;
}
alpha = value;
best_rm_idx = rm_idx;
updated_alpha = true;
self.root_moves[rm_idx].pv.truncate(1);
self.root_moves[rm_idx].pv.extend_from_slice(&self.stack[1].pv);
if value >= beta {
break;
}
}
}
if !updated_alpha {
self.root_moves[rm_idx].score = Value::new(-Value::INFINITE.raw());
}
}
self.root_moves.move_to_index(best_rm_idx, pv_idx);
best_value
}
fn search_node<const NT: u8>(
&mut self,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
ply: i32,
cut_node: bool,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
let pv_node = NT == NodeType::PV as u8 || NT == NodeType::Root as u8;
let mut depth = depth;
let in_check = pos.in_check();
let all_node = !(pv_node || cut_node);
let mut alpha = alpha;
if depth <= DEPTH_QS {
return self.qsearch::<NT>(pos, depth, alpha, beta, ply, limits, time_manager);
}
if ply >= MAX_PLY {
return if in_check {
Value::ZERO
} else {
self.nnue_evaluate(pos)
};
}
if pv_node && self.sel_depth < ply + 1 {
self.sel_depth = ply + 1;
}
if self.check_abort(limits, time_manager) {
return Value::ZERO;
}
self.stack[ply as usize].in_check = in_check;
self.stack[ply as usize].move_count = 0;
self.stack[(ply + 1) as usize].cutoff_cnt = 0;
if pv_node {
self.stack[ply as usize].pv.clear();
self.stack[(ply + 1) as usize].pv.clear();
}
let prior_reduction = self.take_prior_reduction(ply);
self.stack[ply as usize].reduction = 0;
let excluded_move = self.stack[ply as usize].excluded_move;
let tt_ctx = match self.probe_transposition::<NT>(
pos,
depth,
beta,
ply,
pv_node,
in_check,
excluded_move,
) {
ProbeOutcome::Continue(ctx) => ctx,
ProbeOutcome::Cutoff(value) => return value,
};
let tt_move = tt_ctx.mv;
let tt_value = tt_ctx.value;
let tt_hit = tt_ctx.hit;
let tt_data = tt_ctx.data;
let tt_capture = tt_ctx.capture;
let eval_ctx = self.compute_eval_context(pos, ply, in_check, &tt_ctx, excluded_move);
let mut improving = eval_ctx.improving;
let opponent_worsening = eval_ctx.opponent_worsening;
if prior_reduction
>= if depth < IIR_DEPTH_BOUNDARY {
IIR_PRIOR_REDUCTION_THRESHOLD_SHALLOW
} else {
IIR_PRIOR_REDUCTION_THRESHOLD_DEEP
}
&& !opponent_worsening
{
depth += 1;
}
if prior_reduction >= 2
&& depth >= 2
&& ply >= 1
&& eval_ctx.static_eval != Value::NONE
&& self.stack[(ply - 1) as usize].static_eval != Value::NONE
&& eval_ctx.static_eval + self.stack[(ply - 1) as usize].static_eval
> Value::new(IIR_EVAL_SUM_THRESHOLD)
{
depth -= 1;
}
if let Some(v) = self.try_razoring::<NT>(
pos,
depth,
alpha,
beta,
ply,
pv_node,
in_check,
eval_ctx.static_eval,
limits,
time_manager,
) {
return v;
}
if let Some(v) = self.try_futility_pruning(FutilityParams {
depth,
beta,
static_eval: eval_ctx.static_eval,
correction_value: eval_ctx.correction_value,
improving,
opponent_worsening,
cut_node,
tt_hit,
pv_node,
in_check,
}) {
return v;
}
let (null_value, improving_after_null) = self.try_null_move_pruning::<NT>(
pos,
depth,
beta,
ply,
cut_node,
in_check,
eval_ctx.static_eval,
improving,
excluded_move,
limits,
time_manager,
);
if let Some(v) = null_value {
return v;
}
improving = improving_after_null;
if !all_node && depth >= 6 && tt_move.is_none() && prior_reduction <= 3 {
depth -= 1;
}
if let Some(v) = self.try_probcut(
pos,
depth,
beta,
improving,
&tt_ctx,
ply,
eval_ctx.static_eval,
eval_ctx.unadjusted_static_eval,
in_check,
limits,
time_manager,
) {
return v;
}
if let Some(v) = self.try_small_probcut(depth, beta, &tt_ctx) {
return v;
}
let mut best_value = Value::new(-32001);
let mut best_move = Move::NONE;
let mut move_count = 0;
let mut quiets_tried = SearchedMoveList::new();
let mut captures_tried = SearchedMoveList::new();
let mover = pos.side_to_move();
let (ordered_moves, tt_move) =
self.generate_ordered_moves(pos, tt_move, depth, in_check, ply);
let tt_pv = self.stack[ply as usize].tt_pv;
let root_node = NT == NodeType::Root as u8;
for mv in ordered_moves.iter() {
if mv == excluded_move {
continue;
}
if !pos.pseudo_legal(mv) {
continue;
}
if !pos.is_legal(mv) {
continue;
}
if self.check_abort(limits, time_manager) {
return Value::ZERO;
}
move_count += 1;
self.stack[ply as usize].move_count = move_count;
let is_capture = pos.is_capture(mv);
let gives_check = pos.gives_check(mv);
self.stack[(ply + 1) as usize].quiet_move_streak = if !is_capture && !gives_check {
self.stack[ply as usize].quiet_move_streak + 1
} else {
0
};
let mut new_depth = depth - 1;
let mut extension = 0i32;
if !root_node
&& mv == tt_move
&& excluded_move.is_none()
&& depth >= 6 + tt_pv as i32
&& tt_value != Value::NONE
&& !tt_value.is_mate_score()
&& tt_data.bound.is_lower_or_exact()
&& tt_data.depth >= depth - 3
{
let singular_beta_margin = (56 + 79 * (tt_pv && !pv_node) as i32) * depth / 58;
let singular_beta = tt_value - Value::new(singular_beta_margin);
let singular_depth = new_depth / 2;
self.stack[ply as usize].excluded_move = mv;
let singular_value = self.search_node::<{ NodeType::NonPV as u8 }>(
pos,
singular_depth,
singular_beta - Value::new(1),
singular_beta,
ply,
cut_node,
limits,
time_manager,
);
self.stack[ply as usize].excluded_move = Move::NONE;
if singular_value < singular_beta {
let corr_val_adj = eval_ctx.correction_value.abs() / 249_096;
let double_margin =
4 + 205 * pv_node as i32 - 223 * !tt_capture as i32 - corr_val_adj;
let triple_margin = 80 + 276 * pv_node as i32 - 249 * !tt_capture as i32
+ 86 * tt_pv as i32
- corr_val_adj;
extension = 1
+ (singular_value < singular_beta - Value::new(double_margin)) as i32
+ (singular_value < singular_beta - Value::new(triple_margin)) as i32;
depth += 1;
} else if singular_value >= beta && !singular_value.is_mate_score() {
return singular_value;
} else if tt_value >= beta {
extension = -3;
} else if cut_node {
extension = -2;
}
}
let delta = (beta.raw() - alpha.raw()).max(0);
let mut r = reduction(improving, depth, move_count, delta, self.root_delta.max(1));
if self.stack[ply as usize].tt_pv {
r += 931;
}
let lmr_depth = new_depth - r / 1024;
let step14_ctx = Step14Context {
pos,
mv,
depth,
ply,
improving,
best_move,
best_value,
alpha,
in_check,
gives_check,
is_capture,
lmr_depth,
mover,
move_count,
cont_history_1: self.cont_history_ref(ply, 1),
cont_history_2: self.cont_history_ref(ply, 2),
};
match self.step14_pruning(step14_ctx) {
Step14Outcome::Skip {
best_value: updated,
} => {
if let Some(v) = updated {
best_value = v;
}
continue;
}
Step14Outcome::Continue => {}
}
if !pv_node && !in_check && !is_capture && !mv.is_pass() {
let lmp_limit = (3 + depth * depth) / (2 - improving as i32);
if move_count >= lmp_limit {
continue;
}
}
if !pv_node && depth <= 8 && !in_check && !mv.is_pass() {
let see_threshold = if is_capture {
Value::new(-20 * depth * depth)
} else {
Value::new(-50 * depth)
};
if !pos.see_ge(mv, see_threshold) {
continue;
}
}
self.stack[ply as usize].current_move = mv;
let dirty_piece = pos.do_move_with_prefetch(mv, gives_check, self.tt.as_ref());
self.nnue_push(dirty_piece);
self.nodes += 1;
if mv.is_pass() {
self.clear_cont_history_for_null(ply);
} else {
let cont_hist_piece = mv.moved_piece_after();
let cont_hist_to = mv.to();
self.set_cont_history_for_move(
ply,
in_check,
is_capture,
cont_hist_piece,
cont_hist_to,
);
}
if !mv.is_pass() {
if is_capture {
captures_tried.push(mv);
} else {
quiets_tried.push(mv);
}
}
new_depth += extension;
let msb_depth = msb(depth);
let tt_value_higher = tt_hit && tt_value != Value::NONE && tt_value > alpha;
let tt_depth_ge = tt_hit && tt_data.depth >= depth;
if self.stack[ply as usize].tt_pv {
r -= 2510
+ (pv_node as i32) * 963
+ (tt_value_higher as i32) * 916
+ (tt_depth_ge as i32) * (943 + (cut_node as i32) * 1180);
}
r += 679 - 6 * msb_depth;
r -= move_count * (67 - 2 * msb_depth);
r -= eval_ctx.correction_value.abs() / 27_160;
if cut_node {
let no_tt_move = !tt_hit || tt_move.is_none();
r += 2998 + 2 * msb_depth + (948 + 14 * msb_depth) * (no_tt_move as i32);
}
if tt_capture {
r += 1402 - 39 * msb_depth;
}
if self.stack[(ply + 1) as usize].cutoff_cnt > 2 {
r += 925 + 33 * msb_depth + (all_node as i32) * (701 + 224 * msb_depth);
}
r += self.stack[(ply + 1) as usize].quiet_move_streak * 51;
if mv == tt_move {
r -= 2121 + 28 * msb_depth;
}
let stat_score = if mv.is_pass() {
0 } else if is_capture {
let captured = pos.captured_piece();
let captured_pt = captured.piece_type();
let moved_piece = mv.moved_piece_after();
let hist =
self.history.capture_history.get(moved_piece, mv.to(), captured_pt) as i32;
782 * piece_value(captured) / 128 + hist
} else {
let moved_piece = mv.moved_piece_after();
let main_hist = self.history.main_history.get(mover, mv) as i32;
let cont0 = self.cont_history_ref(ply, 1).get(moved_piece, mv.to()) as i32;
let cont1 = self.cont_history_ref(ply, 2).get(moved_piece, mv.to()) as i32;
2 * main_hist + cont0 + cont1
};
self.stack[ply as usize].stat_score = stat_score;
r -= stat_score * (729 - 12 * msb_depth) / 8192;
let mut value = if depth >= 2 && move_count > 1 {
let d = (std::cmp::max(
1,
std::cmp::min(new_depth - r / 1024, new_depth + 1 + pv_node as i32),
) + pv_node as i32)
.max(1);
let reduction_from_parent = (depth - 1) - d;
self.stack[ply as usize].reduction = reduction_from_parent;
let mut value = -self.search_node::<{ NodeType::NonPV as u8 }>(
pos,
d,
-alpha - Value::new(1),
-alpha,
ply + 1,
true,
limits,
time_manager,
);
self.stack[ply as usize].reduction = 0;
if value > alpha {
let do_deeper =
d < new_depth && value > (best_value + Value::new(43 + 2 * new_depth));
let do_shallower = value < best_value + Value::new(9);
new_depth += do_deeper as i32 - do_shallower as i32;
if new_depth > d {
value = -self.search_node::<{ NodeType::NonPV as u8 }>(
pos,
new_depth,
-alpha - Value::new(1),
-alpha,
ply + 1,
!cut_node,
limits,
time_manager,
);
}
if !mv.is_pass() {
let moved_piece = mv.moved_piece_after();
let to_sq = mv.to();
const CONTHIST_BONUSES: &[(i32, i32)] =
&[(1, 1108), (2, 652), (3, 273), (4, 572), (5, 126), (6, 449)];
for &(offset, weight) in CONTHIST_BONUSES {
if self.stack[ply as usize].in_check && offset > 2 {
break;
}
let idx = ply - offset;
if idx < 0 {
break;
}
if let Some(key) = self.stack[idx as usize].cont_hist_key {
let in_check_idx = key.in_check as usize;
let capture_idx = key.capture as usize;
let bonus = 1412 * weight / 1024 + if offset < 2 { 80 } else { 0 };
self.history.continuation_history[in_check_idx][capture_idx]
.update(key.piece, key.to, moved_piece, to_sq, bonus);
}
}
}
} else if value > alpha && value < best_value + Value::new(9) {
#[allow(unused_assignments)]
{
new_depth -= 1;
}
}
if pv_node && (move_count == 1 || value > alpha) {
self.stack[ply as usize].reduction = 0;
-self.search_node::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
ply + 1,
false,
limits,
time_manager,
)
} else {
value
}
} else if !pv_node || move_count > 1 {
self.stack[ply as usize].reduction = 0;
let mut value = -self.search_node::<{ NodeType::NonPV as u8 }>(
pos,
depth - 1,
-alpha - Value::new(1),
-alpha,
ply + 1,
!cut_node,
limits,
time_manager,
);
self.stack[ply as usize].reduction = 0;
if pv_node && value > alpha && value < beta {
self.stack[ply as usize].reduction = 0;
value = -self.search_node::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
ply + 1,
false,
limits,
time_manager,
);
self.stack[ply as usize].reduction = 0;
}
value
} else {
self.stack[ply as usize].reduction = 0;
-self.search_node::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
ply + 1,
false,
limits,
time_manager,
)
};
self.nnue_pop();
pos.undo_move(mv);
if mv.is_pass() && !value.is_mate_score() {
let bonus = get_scaled_pass_move_bonus(pos.game_ply());
if bonus != 0 {
value += Value::new(bonus);
}
}
if self.abort {
return Value::ZERO;
}
if value > best_value {
best_value = value;
if value > alpha {
best_move = mv;
alpha = value;
if pv_node {
let child_pv = self.stack[(ply + 1) as usize].pv.clone();
self.stack[ply as usize].update_pv(mv, &child_pv);
}
if value >= beta {
self.stack[ply as usize].cutoff_cnt += 1;
break;
}
}
}
}
if move_count == 0 {
if excluded_move.is_some() {
return alpha;
}
if in_check {
return Value::mated_in(ply);
} else {
return Value::ZERO;
}
}
if best_move.is_some() && !best_move.is_pass() {
let is_best_capture = pos.is_capture(best_move);
let is_tt_move = best_move == tt_move;
let bonus = stat_bonus(depth, is_tt_move);
let malus = quiet_malus(depth, quiets_tried.len());
let us = pos.side_to_move();
let pawn_key_idx = pos.pawn_history_index();
let best_moved_pc = pos.moved_piece(best_move);
let best_cont_pc = if best_move.is_promotion() {
best_moved_pc.promote().unwrap_or(best_moved_pc)
} else {
best_moved_pc
};
let best_to = best_move.to();
let max_ply_back = if in_check { 2 } else { 6 };
if !is_best_capture {
let scaled_bonus = bonus * 978 / 1024;
self.history.main_history.update(us, best_move, scaled_bonus);
if ply < LOW_PLY_HISTORY_SIZE as i32 {
let low_ply_bonus = low_ply_history_bonus(scaled_bonus);
self.history.low_ply_history.update(ply as usize, best_move, low_ply_bonus);
}
for &(ply_back, weight) in CONTINUATION_HISTORY_WEIGHTS.iter() {
if ply_back > max_ply_back {
continue;
}
if ply >= ply_back as i32 {
let prev_ply = (ply - ply_back as i32) as usize;
if let Some(key) = self.stack[prev_ply].cont_hist_key {
let in_check_idx = key.in_check as usize;
let capture_idx = key.capture as usize;
let weighted_bonus = continuation_history_bonus_with_offset(
scaled_bonus * weight / 1024,
ply_back,
);
self.history.continuation_history[in_check_idx][capture_idx].update(
key.piece,
key.to,
best_cont_pc,
best_to,
weighted_bonus,
);
}
}
}
let pawn_bonus = pawn_history_bonus(scaled_bonus);
self.history
.pawn_history
.update(pawn_key_idx, best_cont_pc, best_to, pawn_bonus);
let scaled_malus = malus * 1115 / 1024;
for &m in quiets_tried.iter() {
if m != best_move {
self.history.main_history.update(us, m, -scaled_malus);
if ply < LOW_PLY_HISTORY_SIZE as i32 {
let low_ply_malus = low_ply_history_bonus(-scaled_malus);
self.history.low_ply_history.update(ply as usize, m, low_ply_malus);
}
let moved_pc = pos.moved_piece(m);
let cont_pc = if m.is_promotion() {
moved_pc.promote().unwrap_or(moved_pc)
} else {
moved_pc
};
let to = m.to();
for &(ply_back, weight) in CONTINUATION_HISTORY_WEIGHTS.iter() {
if ply_back > max_ply_back {
continue;
}
if ply >= ply_back as i32 {
let prev_ply = (ply - ply_back as i32) as usize;
if let Some(key) = self.stack[prev_ply].cont_hist_key {
let in_check_idx = key.in_check as usize;
let capture_idx = key.capture as usize;
let weighted_malus = continuation_history_bonus_with_offset(
-scaled_malus * weight / 1024,
ply_back,
);
self.history.continuation_history[in_check_idx][capture_idx]
.update(key.piece, key.to, cont_pc, to, weighted_malus);
}
}
}
let pawn_malus = pawn_history_bonus(-scaled_malus);
self.history.pawn_history.update(pawn_key_idx, cont_pc, to, pawn_malus);
}
}
} else {
let captured_pt = pos.piece_on(best_to).piece_type();
self.history.capture_history.update(best_cont_pc, best_to, captured_pt, bonus);
}
let cap_malus = capture_malus(depth, captures_tried.len());
for &m in captures_tried.iter() {
if m != best_move {
let moved_pc = pos.moved_piece(m);
let cont_pc = if m.is_promotion() {
moved_pc.promote().unwrap_or(moved_pc)
} else {
moved_pc
};
let to = m.to();
let captured_pt = pos.piece_on(to).piece_type();
self.history.capture_history.update(
cont_pc,
to,
captured_pt,
-cap_malus * 1431 / 1024,
);
}
}
if ply >= 1 {
let prev_ply = (ply - 1) as usize;
let prev_move_count = self.stack[prev_ply].move_count;
let prev_tt_hit = self.stack[prev_ply].tt_hit;
if prev_move_count == 1 + (prev_tt_hit as i32)
&& pos.captured_piece() == Piece::NONE
{
if let Some(key) = self.stack[prev_ply].cont_hist_key {
let prev_sq = key.to;
let prev_piece = pos.piece_on(prev_sq);
let penalty_base = -cap_malus * 622 / 1024;
let prev_in_check = self.stack[prev_ply].in_check;
let prev_max_ply_back = if prev_in_check { 2 } else { 6 };
for &(ply_back, weight) in CONTINUATION_HISTORY_WEIGHTS.iter() {
if ply_back > prev_max_ply_back {
continue;
}
let target_ply = ply - 1 - ply_back as i32;
if target_ply >= 0 {
if let Some(target_key) =
self.stack[target_ply as usize].cont_hist_key
{
let in_check_idx = target_key.in_check as usize;
let capture_idx = target_key.capture as usize;
let weighted_penalty = penalty_base * weight / 1024
+ if ply_back <= 2 {
CONTINUATION_HISTORY_NEAR_PLY_OFFSET
} else {
0
};
self.history.continuation_history[in_check_idx][capture_idx]
.update(
target_key.piece,
target_key.to,
prev_piece,
prev_sq,
weighted_penalty,
);
}
}
}
}
}
}
if !pv_node && tt_move.is_some() {
if best_move == tt_move {
self.history.tt_move_history.update(ply as usize, TT_MOVE_HISTORY_BONUS);
} else {
self.history.tt_move_history.update(ply as usize, TT_MOVE_HISTORY_MALUS);
}
}
}
else if ply >= 1 {
let prev_ply = (ply - 1) as usize;
if let Some(prev_key) = self.stack[prev_ply].cont_hist_key {
let prior_capture = prev_key.capture;
let prev_sq = prev_key.to;
if !prior_capture {
let parent_stat_score = self.stack[prev_ply].stat_score;
let parent_move_count = self.stack[prev_ply].move_count;
let parent_in_check = self.stack[prev_ply].in_check;
let parent_static_eval = self.stack[prev_ply].static_eval;
let static_eval = self.stack[ply as usize].static_eval;
let mut bonus_scale: i32 = -228;
bonus_scale -= parent_stat_score / 104;
bonus_scale += (63 * depth).min(508);
bonus_scale += 184 * (parent_move_count > 8) as i32;
bonus_scale += 143
* (!in_check
&& static_eval != Value::NONE
&& best_value <= static_eval - Value::new(92))
as i32;
bonus_scale += 149
* (!parent_in_check
&& parent_static_eval != Value::NONE
&& best_value <= -parent_static_eval - Value::new(70))
as i32;
bonus_scale = bonus_scale.max(0);
let scaled_bonus = (144 * depth - 92).min(1365) * bonus_scale;
let prev_piece = pos.piece_on(prev_sq);
let prev_max_ply_back = if parent_in_check { 2 } else { 6 };
let cont_bonus = scaled_bonus * 400 / 32768;
for &(ply_back, weight) in CONTINUATION_HISTORY_WEIGHTS.iter() {
if ply_back > prev_max_ply_back {
continue;
}
let target_ply = ply - 1 - ply_back as i32;
if target_ply >= 0 {
if let Some(target_key) = self.stack[target_ply as usize].cont_hist_key
{
let in_check_idx = target_key.in_check as usize;
let capture_idx = target_key.capture as usize;
let weighted_bonus = cont_bonus * weight / 1024
+ if ply_back <= 2 {
CONTINUATION_HISTORY_NEAR_PLY_OFFSET
} else {
0
};
self.history.continuation_history[in_check_idx][capture_idx]
.update(
target_key.piece,
target_key.to,
prev_piece,
prev_sq,
weighted_bonus,
);
}
}
}
let prev_move = self.stack[prev_ply].current_move;
let main_bonus = scaled_bonus * 220 / 32768;
let opponent = !pos.side_to_move();
self.history.main_history.update(opponent, prev_move, main_bonus);
if prev_piece.piece_type() != PieceType::Pawn && !prev_move.is_promotion() {
let pawn_key_idx = pos.pawn_history_index();
let pawn_bonus = scaled_bonus * 1164 / 32768;
self.history.pawn_history.update(
pawn_key_idx,
prev_piece,
prev_sq,
pawn_bonus,
);
}
} else {
let prev_piece = pos.piece_on(prev_sq);
let captured_piece = pos.captured_piece();
debug_assert!(
captured_piece != Piece::NONE,
"prior_capture is true but captured_piece is NONE"
);
if captured_piece != Piece::NONE {
self.history.capture_history.update(
prev_piece,
prev_sq,
captured_piece.piece_type(),
PRIOR_CAPTURE_COUNTERMOVE_BONUS,
);
}
}
}
}
if !in_check && best_move.is_some() && !pos.is_capture(best_move) {
let static_eval = self.stack[ply as usize].static_eval;
if static_eval != Value::NONE
&& ((best_value < static_eval && best_value < beta) || best_value > static_eval)
{
let bonus = ((best_value.raw() - static_eval.raw()) * depth / 8)
.clamp(-CORRECTION_HISTORY_LIMIT / 4, CORRECTION_HISTORY_LIMIT / 4);
self.update_correction_history(pos, ply, bonus);
}
}
if excluded_move.is_none() {
let bound = if best_value >= beta {
Bound::Lower
} else if pv_node && best_move.is_some() {
Bound::Exact
} else {
Bound::Upper
};
tt_ctx.result.write(
tt_ctx.key,
value_to_tt(best_value, ply),
pv_node,
bound,
depth,
best_move,
eval_ctx.unadjusted_static_eval,
self.tt.generation(),
);
}
best_value
}
fn qsearch<const NT: u8>(
&mut self,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
ply: i32,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
let pv_node = NT == NodeType::PV as u8;
let in_check = pos.in_check();
if ply >= MAX_PLY {
return if in_check {
Value::ZERO
} else {
self.nnue_evaluate(pos)
};
}
if pv_node && self.sel_depth < ply + 1 {
self.sel_depth = ply + 1;
}
if self.check_abort(limits, time_manager) {
return Value::ZERO;
}
let rep_state = pos.repetition_state(ply);
if rep_state.is_repetition() || rep_state.is_superior_inferior() {
let v = draw_value(rep_state, pos.side_to_move());
if v != Value::NONE {
if v == Value::DRAW {
let jittered = Value::new(v.raw() + draw_jitter(self.nodes));
return value_from_tt(jittered, ply);
}
return value_from_tt(v, ply);
}
}
if self.max_moves_to_draw > 0 && pos.game_ply() > self.max_moves_to_draw {
return Value::new(Value::DRAW.raw() + draw_jitter(self.nodes));
}
let key = pos.key();
let tt_result = self.tt.probe(key, pos);
let tt_hit = tt_result.found;
let tt_data = tt_result.data;
let pv_hit = tt_hit && tt_data.is_pv;
self.stack[ply as usize].tt_hit = tt_hit;
self.stack[ply as usize].tt_pv = pv_hit;
let mut tt_move = if tt_hit { tt_data.mv } else { Move::NONE };
let tt_value = if tt_hit {
value_from_tt(tt_data.value, ply)
} else {
Value::NONE
};
if !pv_node
&& tt_hit
&& tt_data.depth >= DEPTH_QS
&& tt_value != Value::NONE
&& tt_data.bound.can_cutoff(tt_value, beta)
{
return tt_value;
}
let mut best_move = Move::NONE;
let correction_value = self.correction_value(pos, ply);
let mut unadjusted_static_eval = Value::NONE;
let mut static_eval = if in_check {
Value::NONE
} else if tt_hit && tt_data.eval != Value::NONE {
unadjusted_static_eval = tt_data.eval;
unadjusted_static_eval
} else {
if !tt_hit {
let mate_move = pos.mate_1ply();
if mate_move.is_some() {
return Value::mate_in(ply + 1);
}
}
unadjusted_static_eval = self.nnue_evaluate(pos);
unadjusted_static_eval
};
if !in_check && unadjusted_static_eval != Value::NONE {
static_eval = to_corrected_static_eval(unadjusted_static_eval, correction_value);
static_eval += evaluate_pass_rights(pos, pos.game_ply() as u16);
}
self.stack[ply as usize].static_eval = static_eval;
let mut alpha = alpha;
let mut best_value = if in_check {
Value::mated_in(ply)
} else {
static_eval
};
if !in_check && tt_hit && tt_value != Value::NONE && !tt_value.is_mate_score() {
let improves = (tt_value > best_value && tt_data.bound == Bound::Lower)
|| (tt_value < best_value && tt_data.bound == Bound::Upper);
if improves {
best_value = tt_value;
static_eval = tt_value;
self.stack[ply as usize].static_eval = static_eval;
}
}
if !in_check && best_value >= beta {
let mut v = best_value;
if !v.is_mate_score() {
v = Value::new((v.raw() + beta.raw()) / 2);
}
if !tt_hit {
tt_result.write(
key,
value_to_tt(v, ply),
pv_hit,
Bound::Lower,
DEPTH_UNSEARCHED,
Move::NONE,
unadjusted_static_eval,
self.tt.generation(),
);
}
return v;
}
if !in_check && best_value > alpha {
alpha = best_value;
}
let futility_base = if in_check {
Value::NONE
} else {
static_eval + Value::new(352)
};
if depth <= DEPTH_QS
&& tt_move.is_some()
&& ((!pos.capture_stage(tt_move) && !pos.gives_check(tt_move)) || depth < -16)
{
tt_move = Move::NONE;
}
let prev_move = if ply >= 1 {
self.stack[(ply - 1) as usize].current_move
} else {
Move::NONE
};
let ordered_moves = {
let cont_tables = self.cont_history_tables(ply);
let mut buf_moves = OrderedMovesBuffer::new();
{
let mp = if in_check {
MovePicker::new_evasions(
pos,
tt_move,
&self.history.main_history,
&self.history.low_ply_history,
&self.history.capture_history,
cont_tables,
&self.history.pawn_history,
ply,
self.generate_all_legal_moves,
)
} else {
MovePicker::new(
pos,
tt_move,
DEPTH_QS,
&self.history.main_history,
&self.history.low_ply_history,
&self.history.capture_history,
cont_tables,
&self.history.pawn_history,
ply,
self.generate_all_legal_moves,
)
};
for mv in mp {
buf_moves.push(mv);
}
}
if !in_check && depth == DEPTH_QS {
let mut buf = crate::movegen::ExtMoveBuffer::new();
let gen_type = if self.generate_all_legal_moves {
crate::movegen::GenType::QuietChecksAll
} else {
crate::movegen::GenType::QuietChecks
};
crate::movegen::generate_with_type(pos, gen_type, &mut buf, None);
for ext in buf.iter() {
if buf_moves.contains(&ext.mv) {
continue;
}
buf_moves.push(ext.mv);
}
}
if !in_check && depth <= -5 && ply >= 1 && prev_move.is_normal() {
let mut buf = crate::movegen::ExtMoveBuffer::new();
let rec_sq = prev_move.to();
let gen_type = if self.generate_all_legal_moves {
crate::movegen::GenType::RecapturesAll
} else {
crate::movegen::GenType::Recaptures
};
crate::movegen::generate_with_type(pos, gen_type, &mut buf, Some(rec_sq));
buf_moves.clear();
for ext in buf.iter() {
buf_moves.push(ext.mv);
}
}
buf_moves
};
let mut move_count = 0;
for mv in ordered_moves.iter() {
if mv.is_pass() {
continue;
}
if !pos.is_legal(mv) {
continue;
}
let gives_check = pos.gives_check(mv);
let capture = pos.capture_stage(mv);
if !in_check && depth <= DEPTH_QS && !capture && !gives_check {
continue;
}
if !in_check && capture && !pos.see_ge(mv, Value::ZERO) {
continue;
}
move_count += 1;
if !best_value.is_loss() {
if !gives_check
&& (!prev_move.is_normal() || mv.to() != prev_move.to())
&& futility_base != Value::NONE
{
if move_count > 2 {
continue;
}
let futility_value =
futility_base + Value::new(piece_value(pos.piece_on(mv.to())));
if futility_value <= alpha {
best_value = best_value.max(futility_value);
continue;
}
if !pos.see_ge(mv, alpha - futility_base) {
best_value = best_value.min(alpha.min(futility_base));
continue;
}
}
if !capture {
let mut cont_score = 0;
cont_score +=
self.cont_history_ref(ply, 1).get(mv.moved_piece_after(), mv.to()) as i32;
let pawn_idx = pos.pawn_history_index();
cont_score +=
self.history.pawn_history.get(pawn_idx, pos.moved_piece(mv), mv.to())
as i32;
if cont_score <= 5868 {
continue;
}
}
if !pos.see_ge(mv, Value::new(-74)) {
continue;
}
}
self.stack[ply as usize].current_move = mv;
let dirty_piece = pos.do_move_with_prefetch(mv, gives_check, self.tt.as_ref());
self.nnue_push(dirty_piece);
self.nodes += 1;
if mv.is_pass() {
self.clear_cont_history_for_null(ply);
} else {
let cont_hist_pc = mv.moved_piece_after();
let cont_hist_to = mv.to();
self.set_cont_history_for_move(ply, in_check, capture, cont_hist_pc, cont_hist_to);
}
let value =
-self.qsearch::<NT>(pos, depth - 1, -beta, -alpha, ply + 1, limits, time_manager);
self.nnue_pop();
pos.undo_move(mv);
if self.abort {
return Value::ZERO;
}
if value > best_value {
best_value = value;
best_move = mv;
if value > alpha {
alpha = value;
if value >= beta {
break;
}
}
}
}
if in_check && move_count == 0 {
return Value::mated_in(ply);
}
if !best_value.is_mate_score() && best_value > beta {
best_value = Value::new((best_value.raw() + beta.raw()) / 2);
}
let bound = if best_value >= beta {
Bound::Lower
} else if pv_node && best_move.is_some() {
Bound::Exact
} else {
Bound::Upper
};
tt_result.write(
key,
value_to_tt(best_value, ply),
pv_hit,
bound,
DEPTH_QS,
best_move,
unadjusted_static_eval,
self.tt.generation(),
);
best_value
}
}
unsafe impl Send for SearchWorker {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_reduction_values() {
let root_delta = 64;
let delta = 32;
assert!(reduction(true, 10, 5, delta, root_delta) / 1024 >= 0);
assert!(
reduction(false, 10, 5, delta, root_delta) / 1024
>= reduction(true, 10, 5, delta, root_delta) / 1024
);
}
#[test]
fn test_reduction_bounds() {
let root_delta = 64;
let delta = 32;
assert_eq!(reduction(true, 0, 0, delta, root_delta), 0); assert!(reduction(true, 63, 63, delta, root_delta) / 1024 < 64);
assert!(reduction(false, 63, 63, delta, root_delta) / 1024 < 64);
}
#[test]
fn test_reduction_returns_nonzero_for_large_values() {
let root_delta = 64;
let delta = 32;
let r = reduction(false, 10, 10, delta, root_delta) / 1024;
assert!(
r > 0,
"reduction should return positive value for depth=10, move_count=10, got {r}"
);
let r_imp = reduction(true, 10, 10, delta, root_delta) / 1024;
assert!(r >= r_imp, "non-improving should have >= reduction than improving");
}
#[test]
fn test_reduction_small_values() {
let root_delta = 64;
let delta = 32;
let r = reduction(true, 1, 1, delta, root_delta) / 1024;
assert!(r >= 0, "reduction should not be negative");
}
#[test]
fn test_reduction_extremes_no_overflow() {
let delta = 0;
let root_delta = 1;
let r = reduction(false, 63, 63, delta, root_delta);
assert!(
(0..i32::MAX / 2).contains(&r),
"reduction extreme should be in safe range, got {r}"
);
}
#[test]
fn test_reduction_zero_root_delta_clamped() {
let r = reduction(false, 10, 10, 0, 0) / 1024;
assert!(r >= 0, "reduction should clamp root_delta to >=1 even when 0 is passed");
}
#[test]
fn test_sentinel_initialization() {
use std::sync::Arc;
let tt = Arc::new(TranspositionTable::new(16));
let eval_hash = Arc::new(EvalHash::new(1));
let worker = SearchWorker::new(tt, eval_hash, 0, 0);
let sentinel = worker.cont_history_sentinel;
let sentinel_ref = unsafe { sentinel.as_ref() };
assert_eq!(
sentinel_ref.get(crate::types::Piece::B_PAWN, crate::types::Square::SQ_11),
0,
"sentinel table should be zero-initialized"
);
for (i, stack) in worker.stack.iter().enumerate() {
assert_eq!(
stack.cont_history_ptr, sentinel,
"stack[{i}].cont_history_ptr should be initialized to sentinel"
);
}
}
#[test]
fn test_cont_history_ptr_returns_sentinel_for_negative_offset() {
use std::sync::Arc;
let tt = Arc::new(TranspositionTable::new(16));
let eval_hash = Arc::new(EvalHash::new(1));
let worker = SearchWorker::new(tt, eval_hash, 0, 0);
let ptr = worker.cont_history_ptr(0, 1);
assert_eq!(ptr, worker.cont_history_sentinel);
let ptr = worker.cont_history_ptr(3, 5);
assert_eq!(ptr, worker.cont_history_sentinel);
}
}