use std::ptr::NonNull;
use std::sync::Arc;
#[cfg(not(feature = "search-no-pass-rules"))]
use crate::eval::evaluate_pass_rights;
use crate::eval::{EvalHash, get_scaled_pass_move_bonus};
use crate::nnue::{AccumulatorCacheLayerStacks, AccumulatorStackVariant, get_network};
use crate::position::Position;
use crate::search::PieceToHistory;
use crate::tt::{ProbeResult, TTData, TranspositionTable};
use crate::types::{
Bound, Color, DEPTH_QS, Depth, EnteringKingRule, MAX_PLY, Move, Piece, PieceType,
RepetitionState, Square, Value,
};
use super::history::{
CORRECTION_HISTORY_LIMIT, HistoryCell, HistoryTables, LOW_PLY_HISTORY_SIZE,
continuation_history_bonus_with_offset, continuation_history_weight, low_ply_history_bonus,
pawn_history_bonus, stat_bonus, stat_malus,
};
use super::movepicker::piece_value;
use super::types::{
ContHistKey, NodeType, RootMoves, SEARCHED_MOVES_CAPACITY, STACK_SIZE, SearchedMoveList,
StackArray, draw_value, init_stack_array, value_from_tt, value_to_tt,
};
use super::{LimitsType, MovePicker, SearchTuneParams, TimeManagement};
use super::eval_helpers::{
compute_eval_context, correction_value, probe_transposition, update_correction_history,
};
use super::pruning::{
step14_pruning, try_futility_pruning, try_null_move_pruning, try_probcut, try_razoring,
};
use super::qsearch::qsearch;
use super::search_helpers::{
check_abort, clear_cont_history_for_null, cont_history_ptr, cont_history_tables,
do_move_and_push, nnue_evaluate, nnue_pop, set_cont_history_for_move, take_prior_reduction,
};
#[cfg(feature = "tt-trace")]
use super::tt_sanity::{TtWriteTrace, helper_tt_write_enabled_for_depth, maybe_trace_tt_write};
pub const DEFAULT_DRAW_VALUE_BLACK: i32 = -2;
pub const DEFAULT_DRAW_VALUE_WHITE: i32 = -2;
#[inline]
pub(super) fn draw_jitter(nodes: u64, tune_params: &SearchTuneParams) -> i32 {
let mask = tune_params.draw_jitter_mask.max(0) as u64;
((nodes & mask) as i32) + tune_params.draw_jitter_offset
}
#[inline]
pub(super) fn to_corrected_static_eval(unadjusted: Value, correction_value: i32) -> Value {
let corrected = unadjusted.raw() + correction_value / 131_072;
Value::new(corrected.clamp(Value::MATED_IN_MAX_PLY.raw() + 1, Value::MATE_IN_MAX_PLY.raw() - 1))
}
#[inline]
fn update_continuation_histories(
h: &mut HistoryTables,
stack: &StackArray,
tune_params: &SearchTuneParams,
base_ply: i32,
in_check: bool,
pc: Piece,
to: Square,
bonus: i32,
) {
let max_ply_back = if in_check { 2 } else { 6 };
for ply_back in 1..=6 {
if ply_back > max_ply_back {
continue;
}
let weight = continuation_history_weight(tune_params, ply_back);
let target_ply = base_ply - ply_back as i32;
if target_ply >= 0
&& let Some(key) = stack[target_ply as usize].cont_hist_key
{
if key.piece.is_none() {
continue;
}
let in_check_idx = key.in_check as usize;
let capture_idx = key.capture as usize;
let weighted_bonus = continuation_history_bonus_with_offset(
bonus * weight / 1024,
ply_back,
tune_params,
);
h.continuation_history[in_check_idx][capture_idx].update(
key.piece,
key.to,
pc,
to,
weighted_bonus,
);
}
}
}
#[inline]
fn update_quiet_histories(
h: &mut HistoryTables,
stack: &StackArray,
tune_params: &SearchTuneParams,
pos: &Position,
ply: i32,
in_check: bool,
mv: Move,
bonus: i32,
) {
let us = pos.side_to_move();
h.main_history.update(us, mv, bonus);
if ply < LOW_PLY_HISTORY_SIZE as i32 {
h.low_ply_history
.update(ply as usize, mv, low_ply_history_bonus(bonus, tune_params));
}
let moved_pc = pos.moved_piece(mv);
let cont_pc = if mv.is_promotion() {
moved_pc.promote().unwrap_or(moved_pc)
} else {
moved_pc
};
let to = mv.to();
let cont_bonus = bonus * tune_params.continuation_history_multiplier / 1024;
update_continuation_histories(h, stack, tune_params, ply, in_check, cont_pc, to, cont_bonus);
let pawn_key_idx = pos.pawn_history_index();
h.pawn_history
.update(pawn_key_idx, cont_pc, to, pawn_history_bonus(bonus, tune_params));
}
type Reductions = [i32; crate::movegen::MAX_MOVES];
pub(crate) fn build_reductions(coeff: i32) -> Box<Reductions> {
let mut table: Box<Reductions> = vec![0i32; crate::movegen::MAX_MOVES]
.into_boxed_slice()
.try_into()
.expect("size mismatch");
let scale = coeff as f64 / 128.0;
for (i, value) in table.iter_mut().enumerate().skip(1) {
*value = (scale * (i as f64).ln()) as i32;
}
table
}
#[inline]
pub(crate) fn reduction(
reductions: &Reductions,
tune_params: &SearchTuneParams,
imp: bool,
depth: i32,
move_count: i32,
delta: i32,
root_delta: i32,
) -> i32 {
if depth <= 0 || move_count <= 0 {
return 0;
}
let max_idx = (crate::movegen::MAX_MOVES as i32) - 1;
let d = depth.clamp(1, max_idx) as usize;
let mc = move_count.clamp(1, max_idx) as usize;
let reduction_scale = reductions[d] * reductions[mc];
let root_delta = root_delta.max(1);
let delta = delta.max(0);
reduction_scale - delta * tune_params.lmr_reduction_delta_scale / root_delta
+ (!imp as i32) * reduction_scale * tune_params.lmr_reduction_non_improving_mult
/ tune_params.lmr_reduction_non_improving_div.max(1)
+ tune_params.lmr_reduction_base_offset
}
#[cfg(feature = "search-stats")]
use super::stats::{STATS_MAX_DEPTH, SearchStats};
use super::stats::{inc_stat, inc_stat_by_depth};
pub(super) struct TTContext {
pub(super) key: u64,
pub(super) result: ProbeResult,
pub(super) data: TTData,
pub(super) hit: bool,
pub(super) mv: Move,
pub(super) value: Value,
pub(super) capture: bool,
}
pub(super) enum ProbeOutcome {
Continue(TTContext),
Cutoff {
value: Value,
tt_move: Move,
tt_capture: bool,
},
}
pub(super) struct EvalContext {
pub(super) eval: Value,
pub(super) static_eval: Value,
pub(super) unadjusted_static_eval: Value,
pub(super) correction_value: i32,
pub(super) improving: bool,
pub(super) opponent_worsening: bool,
}
pub(super) enum Step14Outcome {
Skip { best_value: Option<Value> },
Continue,
}
#[derive(Clone, Copy)]
pub(super) struct FutilityParams {
pub(super) depth: Depth,
pub(super) beta: Value,
pub(super) static_eval: Value,
pub(super) correction_value: i32,
pub(super) improving: bool,
pub(super) opponent_worsening: bool,
pub(super) tt_hit: bool,
pub(super) tt_move_exists: bool, pub(super) tt_capture: bool, pub(super) tt_pv: bool,
pub(super) in_check: bool,
}
pub(super) struct Step14Context<'a> {
pub(super) pos: &'a Position,
pub(super) mv: Move,
pub(super) depth: Depth,
pub(super) ply: i32,
pub(super) best_value: Value,
pub(super) in_check: bool,
pub(super) gives_check: bool,
pub(super) is_capture: bool,
pub(super) lmr_depth: i32,
pub(super) mover: Color,
pub(super) cont_history_1: &'a PieceToHistory,
pub(super) cont_history_2: &'a PieceToHistory,
pub(super) static_eval: Value,
pub(super) alpha: Value,
pub(super) best_move: Move, pub(super) pawn_history_index: usize, }
pub struct SearchContext<'a> {
pub tt: &'a TranspositionTable,
pub eval_hash: &'a EvalHash,
pub history: &'a HistoryCell,
pub cont_history_sentinel: NonNull<PieceToHistory>,
pub generate_all_legal_moves: bool,
pub max_moves_to_draw: i32,
pub thread_id: usize,
pub allow_tt_write: bool,
pub tune_params: &'a SearchTuneParams,
pub reductions: &'a Reductions,
pub draw_value_table: [Value; 2],
}
pub struct SearchState {
pub nodes: u64,
pub stack: StackArray,
pub root_delta: i32,
pub abort: bool,
pub sel_depth: i32,
pub root_depth: Depth,
pub completed_depth: Depth,
pub best_move: Move,
pub best_move_changes: f64,
pub nmp_min_ply: i32,
pub root_moves: RootMoves,
pub previous_pv: Vec<Move>,
pub nnue_stack: AccumulatorStackVariant,
pub acc_cache: Option<AccumulatorCacheLayerStacks>,
pub calls_cnt: i32,
#[cfg(feature = "search-stats")]
pub stats: SearchStats,
}
impl SearchState {
pub fn new() -> Self {
Self {
nodes: 0,
stack: init_stack_array(),
root_delta: 1,
abort: false,
sel_depth: 0,
root_depth: 0,
completed_depth: 0,
best_move: Move::NONE,
best_move_changes: 0.0,
nmp_min_ply: 0,
root_moves: RootMoves::new(),
previous_pv: Vec::new(),
nnue_stack: AccumulatorStackVariant::new_default(),
acc_cache: None,
calls_cnt: 0,
#[cfg(feature = "search-stats")]
stats: SearchStats::default(),
}
}
}
impl Default for SearchState {
fn default() -> Self {
Self::new()
}
}
impl SearchState {
#[inline]
pub fn set_previous_pv(&mut self, pv: &[Move]) {
self.previous_pv.clear();
self.previous_pv.extend_from_slice(pv);
}
#[inline]
pub fn set_root_follow_pv(&mut self) {
self.stack[0].follow_pv = true;
}
#[inline]
pub fn set_child_follow_pv(&mut self, parent_ply: i32, mv: Move) {
let parent_idx = parent_ply as usize;
let child_idx = parent_idx + 1;
let matches_previous =
self.previous_pv.get(parent_idx).copied().is_some_and(|prev| prev == mv);
self.stack[child_idx].follow_pv = self.stack[parent_idx].follow_pv && matches_previous;
}
}
pub struct SearchWorker {
pub tt: Arc<TranspositionTable>,
pub eval_hash: Arc<EvalHash>,
pub history: Box<HistoryCell>,
pub cont_history_sentinel: NonNull<PieceToHistory>,
pub generate_all_legal_moves: bool,
pub max_moves_to_draw: i32,
pub thread_id: usize,
pub allow_tt_write: bool,
pub search_tune_params: SearchTuneParams,
pub reductions: Box<Reductions>,
pub draw_value_black: i32,
pub draw_value_white: i32,
pub draw_value_table: [Value; 2],
pub entering_king_rule: EnteringKingRule,
pub state: SearchState,
}
impl SearchWorker {
#[inline]
fn root_quiet_stat_score(&self, mover: Color, mv: Move) -> i32 {
let moved_piece = mv.moved_piece_after();
let to_sq = mv.to();
let main_hist =
unsafe { self.history.as_ref_unchecked() }.main_history.get(mover, mv) as i32;
let cont_hist =
unsafe { self.cont_history_sentinel.as_ref() }.get(moved_piece, to_sq) as i32;
2 * main_hist + cont_hist + cont_hist
}
pub fn new(
tt: Arc<TranspositionTable>,
eval_hash: Arc<EvalHash>,
max_moves_to_draw: i32,
thread_id: usize,
search_tune_params: SearchTuneParams,
) -> Box<Self> {
let history = HistoryCell::new_boxed();
let cont_history_sentinel = {
let h = unsafe { history.as_ref_unchecked() };
NonNull::from(h.continuation_history[0][0].get_table(Piece::NONE, Square::SQ_11))
};
let reductions = build_reductions(search_tune_params.lmr_table_coeff);
let mut worker = Box::new(Self {
tt,
eval_hash,
history,
cont_history_sentinel,
generate_all_legal_moves: false,
max_moves_to_draw,
thread_id,
allow_tt_write: true,
search_tune_params,
reductions,
draw_value_black: DEFAULT_DRAW_VALUE_BLACK,
draw_value_white: DEFAULT_DRAW_VALUE_WHITE,
draw_value_table: [Value::ZERO; 2],
entering_king_rule: EnteringKingRule::default(),
state: SearchState::new(),
});
worker.reset_cont_history_ptrs();
worker
}
#[inline]
pub fn create_context(&self) -> SearchContext<'_> {
SearchContext {
tt: &self.tt,
eval_hash: &self.eval_hash,
history: &self.history,
cont_history_sentinel: self.cont_history_sentinel,
generate_all_legal_moves: self.generate_all_legal_moves,
max_moves_to_draw: self.max_moves_to_draw,
thread_id: self.thread_id,
allow_tt_write: self.allow_tt_write,
tune_params: &self.search_tune_params,
reductions: &self.reductions,
draw_value_table: self.draw_value_table,
}
}
#[inline]
fn init_root_static_eval(&mut self, pos: &Position, root_in_check: bool) -> (Value, i32) {
let unadjusted_static_eval = if root_in_check {
Value::NONE
} else {
nnue_evaluate(&mut self.state, pos)
};
let ctx = self.create_context();
let corr = correction_value(&self.state, &ctx, pos, 0);
let static_eval = if root_in_check || unadjusted_static_eval == Value::NONE {
Value::NONE
} else {
#[cfg(feature = "search-no-pass-rules")]
let pass_rights_eval = Value::ZERO;
#[cfg(not(feature = "search-no-pass-rules"))]
let pass_rights_eval = evaluate_pass_rights(pos, pos.game_ply() as u16);
to_corrected_static_eval(unadjusted_static_eval, corr) + pass_rights_eval
};
self.state.stack[0].static_eval = static_eval;
(unadjusted_static_eval, corr)
}
#[inline]
pub fn state_mut(&mut self) -> &mut SearchState {
&mut self.state
}
#[inline]
fn init_draw_value_table(&mut self, us: Color) {
let draw_value_option = if us == Color::Black {
self.draw_value_black
} else {
self.draw_value_white
};
let dv = draw_value_option * Value::PAWN_VALUE / 100;
self.draw_value_table[us as usize] = Value::new(dv);
self.draw_value_table[(!us) as usize] = Value::new(-dv);
}
#[inline]
pub fn state(&self) -> &SearchState {
&self.state
}
#[cfg(feature = "search-stats")]
pub fn reset_stats(&mut self) {
self.state.stats.reset();
}
#[cfg(not(feature = "search-stats"))]
pub fn reset_stats(&mut self) {}
#[cfg(feature = "search-stats")]
pub fn get_stats_report(&self) -> String {
self.state.stats.format_report()
}
#[cfg(not(feature = "search-stats"))]
pub fn get_stats_report(&self) -> String {
String::new()
}
fn reset_cont_history_ptrs(&mut self) {
let sentinel = self.cont_history_sentinel;
for stack in self.state.stack.iter_mut() {
stack.cont_history_ptr = sentinel;
}
}
#[inline]
pub(super) fn set_cont_history_for_move(
&mut self,
ply: i32,
in_check: bool,
capture: bool,
piece: Piece,
to: Square,
) {
debug_assert!(ply >= 0 && (ply as usize) < STACK_SIZE, "ply out of bounds: {ply}");
let in_check_idx = in_check as usize;
let capture_idx = capture as usize;
let table = {
let h = unsafe { self.history.as_ref_unchecked() };
NonNull::from(h.continuation_history[in_check_idx][capture_idx].get_table(piece, to))
};
self.state.stack[ply as usize].cont_history_ptr = table;
self.state.stack[ply as usize].cont_hist_key =
Some(ContHistKey::new(in_check, capture, piece, to));
}
#[inline]
pub(super) fn clear_cont_history_for_null(&mut self, ply: i32) {
self.state.stack[ply as usize].cont_history_ptr = self.cont_history_sentinel;
self.state.stack[ply as usize].cont_hist_key = Some(ContHistKey::null_sentinel());
}
pub fn clear(&mut self) {
unsafe { self.history.as_mut_unchecked() }.clear_with_params(&self.search_tune_params);
self.reductions = build_reductions(self.search_tune_params.lmr_table_coeff);
}
pub fn prepare_search(&mut self) {
self.state.nodes = 0;
self.state.sel_depth = 0;
self.state.root_depth = 0;
self.state.root_delta = 1;
self.state.completed_depth = 0;
self.state.best_move = Move::NONE;
self.state.abort = false;
self.state.best_move_changes = 0.0;
self.state.nmp_min_ply = 0;
self.state.root_moves.clear();
self.reset_stats();
unsafe { self.history.as_mut_unchecked() }
.low_ply_history
.clear_with_init(self.search_tune_params.low_ply_history_init as i16);
if let Some(network) = get_network() {
if !self.state.nnue_stack.matches_network(&network) {
self.state.nnue_stack = AccumulatorStackVariant::from_network(&network);
} else {
self.state.nnue_stack.reset();
}
if network.is_layer_stacks() {
if self.state.acc_cache.is_none() {
self.state.acc_cache = Some(AccumulatorCacheLayerStacks::new());
}
if let Some(cache) = &mut self.state.acc_cache {
cache.invalidate();
}
} else {
self.state.acc_cache = None;
}
} else {
self.state.nnue_stack.reset();
self.state.acc_cache = None;
}
self.state.calls_cnt = 0;
}
pub fn decay_best_move_changes(&mut self) {
self.state.best_move_changes /= 2.0;
}
pub fn set_generate_all_legal_moves(&mut self, flag: bool) {
self.generate_all_legal_moves = flag;
}
#[inline]
pub(super) fn nnue_pop(&mut self) {
self.state.nnue_stack.pop();
}
#[inline]
pub(super) fn check_abort(
&mut self,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> bool {
if self.state.abort {
#[cfg(debug_assertions)]
eprintln!("check_abort: abort flag already set");
return true;
}
self.state.calls_cnt -= 1;
if self.state.calls_cnt > 0 {
return false;
}
self.state.calls_cnt = if limits.nodes > 0 {
std::cmp::min(512, (limits.nodes / 1024) as i32).max(1)
} else {
512
};
if time_manager.stop_requested() {
#[cfg(debug_assertions)]
eprintln!("check_abort: stop requested");
self.state.abort = true;
return true;
}
if limits.nodes > 0 && self.state.nodes >= limits.nodes {
#[cfg(debug_assertions)]
eprintln!(
"check_abort: node limit reached nodes={} limit={}",
self.state.nodes, limits.nodes
);
self.state.abort = true;
return true;
}
if self.thread_id == 0 {
if time_manager.take_ponderhit() {
time_manager.on_ponderhit();
}
let elapsed = time_manager.elapsed();
let elapsed_effective = time_manager.elapsed_from_ponderhit();
if time_manager.search_end() > 0 && elapsed >= time_manager.search_end() {
#[cfg(debug_assertions)]
eprintln!(
"check_abort: search_end reached elapsed={} search_end={}",
elapsed,
time_manager.search_end()
);
self.state.abort = true;
return true;
}
if !time_manager.is_pondering()
&& time_manager.search_end() == 0
&& limits.use_time_management()
&& (elapsed_effective > time_manager.maximum() || time_manager.stop_on_ponderhit())
{
time_manager.set_search_end(elapsed);
}
}
false
}
fn root_extend_new_depth(
&self,
mv: Move,
tt_move_root: Move,
tt_value_root: Value,
tt_depth: Depth,
new_depth: Depth,
) -> Depth {
if mv == tt_move_root
&& ((tt_value_root != Value::NONE && tt_value_root.is_mate_score() && tt_depth > 0)
|| (tt_depth > 1 && self.state.root_depth > 8))
{
new_depth.max(1)
} else {
new_depth
}
}
pub(crate) fn search_root(
&mut self,
pos: &mut Position,
mut depth: Depth,
alpha: Value,
beta: Value,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
self.init_draw_value_table(pos.side_to_move());
self.state.root_delta = (beta.raw() - alpha.raw()).abs().max(1);
let mut alpha = alpha;
let mut best_value = Value::new(-32001);
let root_in_check = pos.in_check();
self.state.stack[0].in_check = root_in_check;
self.state.set_previous_pv(&self.state.root_moves[0].pv.clone());
self.state.set_root_follow_pv();
self.state.stack[0].cont_history_ptr = self.cont_history_sentinel;
self.state.stack[0].cont_hist_key = None;
self.state.stack[0].stat_score = 0;
self.state.stack[2].cutoff_cnt = 0;
let (root_unadjusted_static_eval, root_correction_value) =
self.init_root_static_eval(pos, root_in_check);
let root_improving = if root_in_check {
false
} else {
self.state.stack[0].static_eval >= beta
};
let key = pos.key();
let tt_result = self.tt.probe(key, pos);
let tt_hit = tt_result.found;
let tt_data = tt_result.data;
let tt_move_root = self.state.root_moves[0].mv();
let tt_value_root = if tt_hit {
value_from_tt(tt_data.value, 0)
} else {
Value::NONE
};
self.state.stack[0].tt_pv = true;
self.state.stack[0].tt_hit = tt_hit;
let tt_capture_root = tt_move_root.is_some() && pos.capture_stage(tt_move_root);
let root_static_eval = self.state.stack[0].static_eval;
let tt_ctx_root = TTContext {
key,
result: tt_result,
data: tt_data,
hit: tt_hit,
mv: tt_move_root,
value: tt_value_root,
capture: tt_capture_root,
};
{
let ctx = SearchContext {
tt: &self.tt,
eval_hash: &self.eval_hash,
history: &self.history,
cont_history_sentinel: self.cont_history_sentinel,
generate_all_legal_moves: self.generate_all_legal_moves,
max_moves_to_draw: self.max_moves_to_draw,
thread_id: self.thread_id,
allow_tt_write: self.allow_tt_write,
tune_params: &self.search_tune_params,
reductions: &self.reductions,
draw_value_table: self.draw_value_table,
};
if let Some(v) = try_probcut(
&mut self.state,
&ctx,
pos,
depth,
beta,
root_improving,
&tt_ctx_root,
0, root_static_eval,
root_unadjusted_static_eval,
root_in_check,
false, Move::NONE, limits,
time_manager,
Self::search_node::<{ NodeType::NonPV as u8 }>,
) {
return v;
}
}
{
let small_probcut_beta = beta + Value::new(418);
if tt_data.bound.is_lower_or_exact()
&& tt_data.depth >= depth - 4
&& tt_value_root != Value::NONE
&& tt_value_root >= small_probcut_beta
&& !beta.is_mate_score()
&& !tt_value_root.is_mate_score()
{
return small_probcut_beta;
}
}
self.state.stack[0].pv.clear();
self.state.stack[1].pv.clear();
let mut quiets_tried = SearchedMoveList::new();
let mut captures_tried = SearchedMoveList::new();
let sentinel_ref: &PieceToHistory = unsafe { self.cont_history_sentinel.as_ref() };
let cont_tables = [sentinel_ref; 6];
let mut mp = MovePicker::new(
pos,
tt_move_root,
depth,
0,
cont_tables,
self.generate_all_legal_moves,
);
let mut best_move = Move::NONE;
let mut move_count = 0i32;
loop {
let mv = {
let h = unsafe { self.history.as_ref_unchecked() };
mp.next_move(pos, h)
};
if mv == Move::NONE {
break;
}
if !pos.pseudo_legal(mv) {
continue;
}
if !pos.is_legal(mv) {
continue;
}
let rm_idx = match self.state.root_moves.find_from(mv, 0) {
Some(idx) => idx,
None => continue,
};
if self.check_abort(limits, time_manager) {
return Value::ZERO;
}
move_count += 1;
let gives_check = pos.gives_check(mv);
let is_capture = pos.is_capture(mv);
do_move_and_push(&mut self.state, pos, mv, gives_check, self.tt.as_ref());
let nodes_before = self.state.nodes;
self.state.stack[0].current_move = mv;
self.state.stack[0].move_count = move_count;
if mv.is_pass() {
self.clear_cont_history_for_null(0);
} else {
let cont_hist_piece = mv.moved_piece_after();
let cont_hist_to = mv.to();
self.set_cont_history_for_move(
0,
root_in_check,
is_capture,
cont_hist_piece,
cont_hist_to,
);
}
let mut new_depth = depth - 1;
let root_stat_score = if mv.is_pass() {
0
} else if is_capture {
let captured = pos.captured_piece();
let captured_pt = captured.piece_type();
let moved_piece = mv.moved_piece_after();
let hist = unsafe { self.history.as_ref_unchecked() }.capture_history.get(
moved_piece,
mv.to(),
captured_pt,
) as i32;
self.search_tune_params.lmr_step16_capture_stat_scale_num * piece_value(captured)
/ 128
+ hist
} else {
let mover = !pos.side_to_move();
self.root_quiet_stat_score(mover, mv)
};
self.state.stack[0].stat_score = root_stat_score;
let value = if move_count == 1 {
new_depth = self.root_extend_new_depth(
mv,
tt_move_root,
tt_value_root,
tt_data.depth,
new_depth,
);
-self.search_node_wrapper::<{ NodeType::PV as u8 }>(
pos,
new_depth,
-beta,
-alpha,
1,
false,
limits,
time_manager,
)
} else if depth >= 2 && move_count >= 2 {
let (d, deeper_base, deeper_mul, shallower_thr) = {
let tune = &self.search_tune_params;
let delta = (beta.raw() - alpha.raw()).abs().max(1);
let root_delta = self.state.root_delta.max(1);
let mut r = reduction(
&self.reductions,
tune,
root_improving,
depth,
move_count,
delta,
root_delta,
);
r += tune.lmr_ttpv_add;
let tt_value_higher = (tt_value_root > alpha) as i32;
let tt_depth_ge = (tt_data.depth >= depth) as i32;
r -= tune.lmr_step16_ttpv_sub_base
+ tune.lmr_step16_ttpv_sub_pv_node
+ tt_value_higher * tune.lmr_step16_ttpv_sub_tt_value
+ tt_depth_ge * tune.lmr_step16_ttpv_sub_tt_depth;
r += tune.lmr_step16_base_add;
r -= move_count * tune.lmr_step16_move_count_mul;
r -= root_correction_value.abs() / tune.lmr_step16_correction_div.max(1);
if tt_capture_root {
r += tune.lmr_step16_tt_capture_add;
}
if self.state.stack[1].cutoff_cnt > 2 {
r += tune.lmr_step16_cutoff_count_add;
}
if mv == tt_move_root {
r -= tune.lmr_step16_tt_move_penalty;
}
let stat_score = self.state.stack[0].stat_score;
r -= stat_score * tune.lmr_step16_stat_score_scale_num / 8192;
let d =
std::cmp::max(1, std::cmp::min(new_depth - r / 1024, new_depth + 2)) + 1;
(
d,
tune.lmr_research_deeper_base,
tune.lmr_research_deeper_depth_mul,
tune.lmr_research_shallower_threshold,
)
};
self.state.stack[0].reduction = new_depth - d;
let mut value = -self.search_node_wrapper::<{ NodeType::NonPV as u8 }>(
pos,
d,
-alpha - Value::new(1),
-alpha,
1,
true,
limits,
time_manager,
);
self.state.stack[0].reduction = 0;
if value > alpha {
let deeper_threshold = deeper_base + deeper_mul * new_depth;
let do_deeper =
d < new_depth && value > (best_value + Value::new(deeper_threshold));
let do_shallower = value < best_value + Value::new(shallower_thr);
new_depth += do_deeper as i32 - do_shallower as i32;
if new_depth > d {
value = -self.search_node_wrapper::<{ NodeType::NonPV as u8 }>(
pos,
new_depth,
-alpha - Value::new(1),
-alpha,
1,
true,
limits,
time_manager,
);
}
}
if value > alpha {
new_depth = self.root_extend_new_depth(
mv,
tt_move_root,
tt_value_root,
tt_data.depth,
new_depth,
);
value = -self.search_node_wrapper::<{ NodeType::PV as u8 }>(
pos,
new_depth,
-beta,
-alpha,
1,
false,
limits,
time_manager,
);
}
value
} else {
let step18_depth = {
let tune = &self.search_tune_params;
let delta = (beta.raw() - alpha.raw()).abs().max(1);
let root_delta = self.state.root_delta.max(1);
let mut r = reduction(
&self.reductions,
tune,
root_improving,
depth,
move_count,
delta,
root_delta,
);
r += tune.lmr_ttpv_add;
let tt_value_higher = (tt_value_root > alpha) as i32;
let tt_depth_ge = (tt_data.depth >= depth) as i32;
r -= tune.lmr_step16_ttpv_sub_base
+ tune.lmr_step16_ttpv_sub_pv_node
+ tt_value_higher * tune.lmr_step16_ttpv_sub_tt_value
+ tt_depth_ge * tune.lmr_step16_ttpv_sub_tt_depth;
r += tune.lmr_step16_base_add;
r -= move_count * tune.lmr_step16_move_count_mul;
r -= root_correction_value.abs() / tune.lmr_step16_correction_div.max(1);
if tt_capture_root {
r += tune.lmr_step16_tt_capture_add;
}
if self.state.stack[1].cutoff_cnt > 2 {
r += tune.lmr_step16_cutoff_count_add;
}
if mv == tt_move_root {
r -= tune.lmr_step16_tt_move_penalty;
}
let stat_score = self.state.stack[0].stat_score;
r -= stat_score * tune.lmr_step16_stat_score_scale_num / 8192;
if tt_move_root.is_none() {
r += tune.full_depth_no_tt_add;
}
new_depth
- (r > tune.full_depth_r_threshold1) as i32
- ((r > tune.full_depth_r_threshold2 && new_depth > 2) as i32)
};
let mut value = -self.search_node_wrapper::<{ NodeType::NonPV as u8 }>(
pos,
step18_depth,
-alpha - Value::new(1),
-alpha,
1,
true,
limits,
time_manager,
);
if value > alpha {
value = -self.search_node_wrapper::<{ NodeType::PV as u8 }>(
pos,
new_depth,
-beta,
-alpha,
1,
false,
limits,
time_manager,
);
}
value
};
self.nnue_pop();
pos.undo_move(mv);
let nodes_delta = self.state.nodes.saturating_sub(nodes_before);
self.state.root_moves[rm_idx].effort += nodes_delta as f64;
if self.state.abort {
return Value::ZERO;
}
{
let rm = &mut self.state.root_moves[rm_idx];
rm.accumulate_score_stats(value);
}
if move_count == 1 || value > alpha {
let rm = &mut self.state.root_moves[rm_idx];
rm.score = value;
rm.sel_depth = self.state.sel_depth;
rm.pv.truncate(1);
rm.pv.extend_from_slice(&self.state.stack[1].pv);
if move_count > 1 {
self.state.best_move_changes += 1.0;
}
} else {
self.state.root_moves[rm_idx].score = Value::new(-Value::INFINITE.raw());
}
let inc = Value::new(
if value == best_value
&& 2 >= self.state.root_depth && (self.state.nodes as i32 & 14) == 0
&& !Value::new(value.raw().abs() + 1).is_win()
{
1
} else {
0
},
);
if value + inc > best_value {
best_value = value;
if value + inc > alpha {
best_move = mv;
if value >= beta {
break;
}
if depth > 2 && depth < 14 && !value.is_mate_score() {
depth -= 2;
}
alpha = value;
}
}
if mv != best_move && !mv.is_pass() && move_count <= SEARCHED_MOVES_CAPACITY as i32 {
if is_capture {
captures_tried.push(mv);
} else {
quiets_tried.push(mv);
}
}
}
let root_move_count = move_count;
if best_value >= beta && !best_value.is_mate_score() && !alpha.is_mate_score() {
best_value = Value::new((best_value.raw() * depth + beta.raw()) / (depth + 1).max(1));
}
{
let best_move_for_stats = best_move;
if best_move_for_stats.is_some() && !best_move_for_stats.is_pass() {
let is_best_capture = pos.capture_stage(best_move_for_stats);
let is_tt_move = best_move_for_stats == tt_move_root;
let tune = self.search_tune_params;
let bonus = stat_bonus(depth, is_tt_move, &tune);
let malus = stat_malus(depth, root_move_count, &tune);
let us = pos.side_to_move();
let pawn_key_idx = pos.pawn_history_index();
let best_moved_pc = pos.moved_piece(best_move_for_stats);
let best_cont_pc = if best_move_for_stats.is_promotion() {
best_moved_pc.promote().unwrap_or(best_moved_pc)
} else {
best_moved_pc
};
let best_to = best_move_for_stats.to();
if !is_best_capture {
let scaled_bonus = bonus * tune.update_all_stats_quiet_bonus_scale_num / 1024;
let scaled_malus = malus * tune.update_all_stats_quiet_malus_scale_num / 1024;
{
let h = unsafe { self.history.as_mut_unchecked() };
h.main_history.update(us, best_move_for_stats, scaled_bonus);
let low_ply_bonus = low_ply_history_bonus(scaled_bonus, &tune);
h.low_ply_history.update(0, best_move_for_stats, low_ply_bonus);
let pawn_bonus = pawn_history_bonus(scaled_bonus, &tune);
h.pawn_history.update(pawn_key_idx, best_cont_pc, best_to, pawn_bonus);
for &m in quiets_tried.iter() {
h.main_history.update(us, m, -scaled_malus);
let low_ply_malus = low_ply_history_bonus(-scaled_malus, &tune);
h.low_ply_history.update(0, m, low_ply_malus);
let moved_pc = pos.moved_piece(m);
let cont_pc = if m.is_promotion() {
moved_pc.promote().unwrap_or(moved_pc)
} else {
moved_pc
};
let to = m.to();
let pawn_malus = pawn_history_bonus(-scaled_malus, &tune);
h.pawn_history.update(pawn_key_idx, cont_pc, to, pawn_malus);
}
}
} else {
let captured_pt = pos.piece_on(best_to).piece_type();
{
let h = unsafe { self.history.as_mut_unchecked() };
h.capture_history.update(
best_cont_pc,
best_to,
captured_pt,
bonus * tune.update_all_stats_capture_bonus_scale_num / 1024,
);
}
}
if !captures_tried.is_empty() {
{
let h = unsafe { self.history.as_mut_unchecked() };
for &m in captures_tried.iter() {
let moved_pc = pos.moved_piece(m);
let cont_pc = if m.is_promotion() {
moved_pc.promote().unwrap_or(moved_pc)
} else {
moved_pc
};
let to = m.to();
let captured_pt = pos.piece_on(to).piece_type();
h.capture_history.update(
cont_pc,
to,
captured_pt,
-malus * tune.update_all_stats_capture_malus_scale_num / 1024,
);
}
}
}
}
}
if self.allow_tt_write {
let bound = if best_value >= beta {
Bound::Lower
} else if best_move.is_some() {
Bound::Exact
} else {
Bound::Upper
};
let stored_depth = if root_move_count != 0 {
depth
} else {
(depth + 6).min(MAX_PLY - 1)
};
tt_ctx_root.result.write(
key,
value_to_tt(best_value, 0),
true, bound,
stored_depth,
best_move,
root_unadjusted_static_eval,
self.tt.generation(),
);
}
{
let cond_check = !(root_in_check || best_move.is_some() && pos.is_capture(best_move));
let cond_eval = (best_value > self.state.stack[0].static_eval) == best_move.is_some();
let do_update = cond_check && cond_eval;
if do_update {
let static_eval = self.state.stack[0].static_eval;
let divisor = if best_move.is_some() { 10 } else { 8 };
let bonus = ((best_value.raw() - static_eval.raw()) * depth / divisor)
.clamp(-CORRECTION_HISTORY_LIMIT / 4, CORRECTION_HISTORY_LIMIT / 4);
let ctx = SearchContext {
tt: &self.tt,
eval_hash: &self.eval_hash,
history: &self.history,
cont_history_sentinel: self.cont_history_sentinel,
generate_all_legal_moves: self.generate_all_legal_moves,
max_moves_to_draw: self.max_moves_to_draw,
thread_id: self.thread_id,
allow_tt_write: self.allow_tt_write,
tune_params: &self.search_tune_params,
reductions: &self.reductions,
draw_value_table: self.draw_value_table,
};
update_correction_history(&self.state, &ctx, pos, 0, bonus);
}
}
best_value
}
pub(crate) fn search_root_for_pv(
&mut self,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
pv_idx: usize,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
debug_assert!(pv_idx > 0);
self.init_draw_value_table(pos.side_to_move());
self.state.root_delta = (beta.raw() - alpha.raw()).abs().max(1);
let mut alpha = alpha;
let mut best_value = Value::new(-32001);
let mut best_rm_idx = pv_idx;
let root_in_check = pos.in_check();
self.state.stack[0].in_check = root_in_check;
let previous_pv = self.state.root_moves[pv_idx].pv.clone();
self.state.set_previous_pv(&previous_pv);
self.state.set_root_follow_pv();
self.state.stack[0].cont_history_ptr = self.cont_history_sentinel;
self.state.stack[0].cont_hist_key = None;
self.state.stack[0].stat_score = 0;
self.state.stack[2].cutoff_cnt = 0;
let (_root_unadjusted_static_eval, root_correction_value) =
self.init_root_static_eval(pos, root_in_check);
let root_improving = if root_in_check {
false
} else {
self.state.stack[0].static_eval >= beta
};
let key = pos.key();
let tt_result = self.tt.probe(key, pos);
let tt_hit = tt_result.found;
let tt_data = tt_result.data;
let tt_move_root = self.state.root_moves[pv_idx].mv();
let tt_value_root = if tt_hit {
value_from_tt(tt_data.value, 0)
} else {
Value::NONE
};
let tt_capture_root = tt_move_root.is_some() && pos.capture_stage(tt_move_root);
self.state.stack[0].tt_hit = tt_hit;
self.state.stack[0].tt_pv = true;
self.state.stack[0].pv.clear();
self.state.stack[1].pv.clear();
for rm_idx in pv_idx..self.state.root_moves.len() {
if self.check_abort(limits, time_manager) {
return Value::ZERO;
}
let mv = self.state.root_moves[rm_idx].mv();
let gives_check = pos.gives_check(mv);
let is_capture = pos.is_capture(mv);
do_move_and_push(&mut self.state, pos, mv, gives_check, self.tt.as_ref());
let nodes_before = self.state.nodes;
self.state.stack[0].current_move = mv;
self.state.stack[0].move_count = (rm_idx + 1) as i32;
if mv.is_pass() {
self.clear_cont_history_for_null(0);
} else {
let cont_hist_piece = mv.moved_piece_after();
let cont_hist_to = mv.to();
self.set_cont_history_for_move(
0,
root_in_check,
is_capture,
cont_hist_piece,
cont_hist_to,
);
}
self.state.stack[0].stat_score = if mv.is_pass() {
0
} else if is_capture {
let captured = pos.captured_piece();
let captured_pt = captured.piece_type();
let moved_piece = mv.moved_piece_after();
let hist = unsafe { self.history.as_ref_unchecked() }.capture_history.get(
moved_piece,
mv.to(),
captured_pt,
) as i32;
self.search_tune_params.lmr_step16_capture_stat_scale_num * piece_value(captured)
/ 128
+ hist
} else {
let mover = !pos.side_to_move();
self.root_quiet_stat_score(mover, mv)
};
let mut new_depth = depth - 1;
let value = if rm_idx == pv_idx {
new_depth = self.root_extend_new_depth(
mv,
tt_move_root,
tt_value_root,
tt_data.depth,
new_depth,
);
-self.search_node_wrapper::<{ NodeType::PV as u8 }>(
pos,
new_depth,
-beta,
-alpha,
1,
false,
limits,
time_manager,
)
} else if depth >= 2 {
let (d, deeper_base, deeper_mul, shallower_thr) = {
let tune = &self.search_tune_params;
let delta = (beta.raw() - alpha.raw()).abs().max(1);
let root_delta = self.state.root_delta.max(1);
let mut r = reduction(
&self.reductions,
tune,
root_improving,
depth,
(rm_idx + 1) as i32,
delta,
root_delta,
);
r += tune.lmr_ttpv_add;
let tt_value_higher = (tt_value_root > alpha) as i32;
let tt_depth_ge = (tt_data.depth >= depth) as i32;
r -= tune.lmr_step16_ttpv_sub_base
+ tune.lmr_step16_ttpv_sub_pv_node
+ tt_value_higher * tune.lmr_step16_ttpv_sub_tt_value
+ tt_depth_ge * tune.lmr_step16_ttpv_sub_tt_depth;
r += tune.lmr_step16_base_add;
r -= (rm_idx + 1) as i32 * tune.lmr_step16_move_count_mul;
r -= root_correction_value.abs() / tune.lmr_step16_correction_div.max(1);
if tt_capture_root {
r += tune.lmr_step16_tt_capture_add;
}
if self.state.stack[1].cutoff_cnt > 2 {
r += tune.lmr_step16_cutoff_count_add;
}
if mv == tt_move_root {
r -= tune.lmr_step16_tt_move_penalty;
}
let stat_score = self.state.stack[0].stat_score;
r -= stat_score * tune.lmr_step16_stat_score_scale_num / 8192;
let d =
std::cmp::max(1, std::cmp::min(new_depth - r / 1024, new_depth + 2)) + 1; (
d,
tune.lmr_research_deeper_base,
tune.lmr_research_deeper_depth_mul,
tune.lmr_research_shallower_threshold,
)
};
self.state.stack[0].reduction = new_depth - d;
let mut value = -self.search_node_wrapper::<{ NodeType::NonPV as u8 }>(
pos,
d,
-alpha - Value::new(1),
-alpha,
1,
true,
limits,
time_manager,
);
self.state.stack[0].reduction = 0;
if value > alpha {
let deeper_threshold = deeper_base + deeper_mul * new_depth;
let do_deeper =
d < new_depth && value > (best_value + Value::new(deeper_threshold));
let do_shallower = value < best_value + Value::new(shallower_thr);
new_depth += do_deeper as i32 - do_shallower as i32;
if new_depth > d {
value = -self.search_node_wrapper::<{ NodeType::NonPV as u8 }>(
pos,
new_depth,
-alpha - Value::new(1),
-alpha,
1,
true,
limits,
time_manager,
);
}
}
if value > alpha {
new_depth = self.root_extend_new_depth(
mv,
tt_move_root,
tt_value_root,
tt_data.depth,
new_depth,
);
value = -self.search_node_wrapper::<{ NodeType::PV as u8 }>(
pos,
new_depth,
-beta,
-alpha,
1,
false,
limits,
time_manager,
);
}
value
} else {
let tune = &self.search_tune_params;
let delta = (beta.raw() - alpha.raw()).abs().max(1);
let root_delta = self.state.root_delta.max(1);
let mut r = reduction(
&self.reductions,
tune,
root_improving,
depth,
(rm_idx + 1) as i32,
delta,
root_delta,
);
r += tune.lmr_ttpv_add;
let tt_value_higher = (tt_value_root > alpha) as i32;
let tt_depth_ge = (tt_data.depth >= depth) as i32;
r -= tune.lmr_step16_ttpv_sub_base
+ tune.lmr_step16_ttpv_sub_pv_node
+ tt_value_higher * tune.lmr_step16_ttpv_sub_tt_value
+ tt_depth_ge * tune.lmr_step16_ttpv_sub_tt_depth;
r += tune.lmr_step16_base_add;
r -= (rm_idx + 1) as i32 * tune.lmr_step16_move_count_mul;
r -= root_correction_value.abs() / tune.lmr_step16_correction_div.max(1);
if tt_capture_root {
r += tune.lmr_step16_tt_capture_add;
}
if self.state.stack[1].cutoff_cnt > 2 {
r += tune.lmr_step16_cutoff_count_add;
}
if mv == tt_move_root {
r -= tune.lmr_step16_tt_move_penalty;
}
let stat_score = self.state.stack[0].stat_score;
r -= stat_score * tune.lmr_step16_stat_score_scale_num / 8192;
if tt_move_root.is_none() {
r += tune.full_depth_no_tt_add;
}
let step18_depth = new_depth
- (r > tune.full_depth_r_threshold1) as i32
- ((r > tune.full_depth_r_threshold2 && new_depth > 2) as i32);
let mut value = -self.search_node_wrapper::<{ NodeType::NonPV as u8 }>(
pos,
step18_depth,
-alpha - Value::new(1),
-alpha,
1,
true,
limits,
time_manager,
);
if value > alpha {
value = -self.search_node_wrapper::<{ NodeType::PV as u8 }>(
pos,
new_depth,
-beta,
-alpha,
1,
false,
limits,
time_manager,
);
}
value
};
self.nnue_pop();
pos.undo_move(mv);
let nodes_delta = self.state.nodes.saturating_sub(nodes_before);
self.state.root_moves[rm_idx].effort += nodes_delta as f64;
if self.state.abort {
return Value::ZERO;
}
let mut updated_alpha = rm_idx == pv_idx; {
let rm = &mut self.state.root_moves[rm_idx];
rm.score = value;
rm.sel_depth = self.state.sel_depth;
rm.accumulate_score_stats(value);
}
if value > best_value {
best_value = value;
if value > alpha {
if pv_idx == 0 && rm_idx > pv_idx {
self.state.best_move_changes += 1.0;
}
best_rm_idx = rm_idx;
updated_alpha = true;
self.state.root_moves[rm_idx].pv.truncate(1);
self.state.root_moves[rm_idx].pv.extend_from_slice(&self.state.stack[1].pv);
if value >= beta {
break;
}
alpha = value;
}
}
if !updated_alpha {
self.state.root_moves[rm_idx].score = Value::new(-Value::INFINITE.raw());
}
}
self.state.root_moves.move_to_index(best_rm_idx, pv_idx);
best_value
}
#[inline]
pub(super) fn search_node_wrapper<const NT: u8>(
&mut self,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
ply: i32,
cut_node: bool,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
if ply >= 1 {
let parent_ply = ply - 1;
let parent_move = self.state.stack[parent_ply as usize].current_move;
self.state.set_child_follow_pv(parent_ply, parent_move);
}
let ctx = SearchContext {
tt: &self.tt,
eval_hash: &self.eval_hash,
history: &self.history,
cont_history_sentinel: self.cont_history_sentinel,
generate_all_legal_moves: self.generate_all_legal_moves,
max_moves_to_draw: self.max_moves_to_draw,
thread_id: self.thread_id,
allow_tt_write: self.allow_tt_write,
tune_params: &self.search_tune_params,
reductions: &self.reductions,
draw_value_table: self.draw_value_table,
};
Self::search_node::<NT>(
&mut self.state,
&ctx,
pos,
depth,
alpha,
beta,
ply,
cut_node,
limits,
time_manager,
)
}
pub(super) fn search_node<const NT: u8>(
st: &mut SearchState,
ctx: &SearchContext<'_>,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
ply: i32,
cut_node: bool,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
inc_stat!(st, nodes_searched);
inc_stat_by_depth!(st, nodes_by_depth, depth);
let pv_node = NT == NodeType::PV as u8 || NT == NodeType::Root as u8;
let mut depth = depth;
let in_check = pos.in_check();
let all_node = !(pv_node || cut_node);
let mut alpha = alpha;
let mut beta = beta;
if depth <= DEPTH_QS {
return qsearch::<NT>(st, ctx, pos, alpha, beta, ply, limits, time_manager);
}
if ply >= MAX_PLY {
return if in_check {
Value::ZERO
} else {
nnue_evaluate(st, pos)
};
}
if pv_node && st.sel_depth < ply + 1 {
st.sel_depth = ply + 1;
}
if check_abort(st, ctx, limits, time_manager) {
return Value::ZERO;
}
if NT != NodeType::Root as u8 {
let rep_state = pos.repetition_state(ply);
if rep_state.is_repetition() || rep_state.is_superior_inferior() {
let v = draw_value(rep_state, pos.side_to_move(), &ctx.draw_value_table);
if v != Value::NONE {
if rep_state == RepetitionState::Draw {
let jittered = Value::new(v.raw() + draw_jitter(st.nodes, ctx.tune_params));
return jittered;
}
return value_from_tt(v, ply);
}
}
if ctx.max_moves_to_draw > 0 && pos.game_ply() > ctx.max_moves_to_draw {
return Value::new(
ctx.draw_value_table[pos.side_to_move() as usize].raw()
+ draw_jitter(st.nodes, ctx.tune_params),
);
}
}
if NT != NodeType::Root as u8 {
alpha = alpha.max(Value::mated_in(ply));
beta = beta.min(Value::mate_in(ply + 1));
if alpha >= beta {
return alpha;
}
}
let ss = unsafe { st.stack.get_unchecked_mut(ply as usize) };
ss.in_check = in_check;
ss.move_count = 0;
ss.stat_score = 0;
unsafe { st.stack.get_unchecked_mut((ply + 2) as usize) }.cutoff_cnt = 0;
if pv_node {
st.stack[ply as usize].pv.clear();
st.stack[(ply + 1) as usize].pv.clear();
}
let prior_reduction = take_prior_reduction(st, ply);
unsafe { st.stack.get_unchecked_mut(ply as usize) }.reduction = 0;
let excluded_move = unsafe { st.stack.get_unchecked(ply as usize) }.excluded_move;
let prior_capture = pos.captured_piece().is_some();
let tt_ctx = match probe_transposition::<NT>(
st,
ctx,
pos,
depth,
beta,
ply,
pv_node,
in_check,
excluded_move,
cut_node,
) {
ProbeOutcome::Continue(c) => c,
ProbeOutcome::Cutoff {
value,
tt_move: cutoff_tt_move,
tt_capture: cutoff_tt_capture,
} => {
inc_stat!(st, tt_cutoff);
inc_stat_by_depth!(st, tt_cutoff_by_depth, depth);
if cutoff_tt_move.is_some() && value.raw() >= beta.raw() {
if !cutoff_tt_capture {
let bonus = (130 * depth - 71).min(1043);
{
let h = unsafe { ctx.history.as_mut_unchecked() };
update_quiet_histories(
h,
&st.stack,
ctx.tune_params,
pos,
ply,
in_check,
cutoff_tt_move,
bonus,
);
}
}
if ply >= 1 {
let prev_ply = (ply - 1) as usize;
let prev_move_count = st.stack[prev_ply].move_count;
let prev_move = st.stack[prev_ply].current_move;
if prev_move.is_normal() && prev_move_count <= 4 && !prior_capture {
let prev_sq = prev_move.to();
let prev_piece = pos.piece_on(prev_sq);
let prev_in_check = st.stack[prev_ply].in_check;
{
let h = unsafe { ctx.history.as_mut_unchecked() };
update_continuation_histories(
h,
&st.stack,
ctx.tune_params,
ply - 1,
prev_in_check,
prev_piece,
prev_sq,
ctx.tune_params.tt_cutoff_cont_hist_penalty,
);
}
}
}
}
return value;
}
};
let tt_move = tt_ctx.mv;
let tt_value = tt_ctx.value;
let tt_hit = tt_ctx.hit;
let tt_data = tt_ctx.data;
let _tt_capture = tt_ctx.capture;
let eval_ctx =
compute_eval_context(st, ctx, pos, ply, in_check, pv_node, &tt_ctx, excluded_move);
let mut improving = eval_ctx.improving;
let opponent_worsening = eval_ctx.opponent_worsening;
if !in_check && ply >= 1 {
let prev_ply = (ply - 1) as usize;
let prev_move = st.stack[prev_ply].current_move;
let prev_in_check = st.stack[prev_ply].in_check;
if prev_move.is_normal() && !prev_in_check && !prior_capture {
let prev_eval = st.stack[prev_ply].static_eval.raw();
let curr_eval = eval_ctx.static_eval.raw();
let tune = ctx.tune_params;
let eval_diff = (-(prev_eval + curr_eval))
.clamp(tune.eval_diff_clamp_min, tune.eval_diff_clamp_max)
+ tune.eval_diff_offset;
let opponent = !pos.side_to_move();
let prev_sq = prev_move.to();
{
let h = unsafe { ctx.history.as_mut_unchecked() };
h.main_history.update(
opponent,
prev_move,
eval_diff * tune.eval_diff_main_hist_mult,
);
if !tt_hit {
let prev_piece = pos.piece_on(prev_sq);
if prev_piece.piece_type() != PieceType::Pawn && !prev_move.is_promotion() {
let pawn_idx = pos.pawn_history_index();
h.pawn_history.update(
pawn_idx,
prev_piece,
prev_sq,
eval_diff * tune.eval_diff_pawn_hist_mult,
);
}
}
}
}
}
if !in_check {
if prior_reduction
>= if depth < ctx.tune_params.iir_depth_boundary {
ctx.tune_params.iir_prior_reduction_threshold_shallow
} else {
ctx.tune_params.iir_prior_reduction_threshold_deep
}
&& !opponent_worsening
{
depth += 1;
}
if prior_reduction >= 2
&& depth >= 2
&& ply >= 1
&& eval_ctx.static_eval + st.stack[(ply - 1) as usize].static_eval
> Value::new(ctx.tune_params.iir_eval_sum_threshold)
{
depth -= 1;
}
}
if let Some(v) = try_razoring::<NT>(
st,
ctx,
pos,
depth,
alpha,
beta,
ply,
pv_node,
in_check,
eval_ctx.eval,
limits,
time_manager,
) {
return v;
}
let tt_capture = tt_move.is_some() && pos.capture_stage(tt_move);
if let Some(v) = try_futility_pruning(
FutilityParams {
depth,
beta,
static_eval: eval_ctx.eval,
correction_value: eval_ctx.correction_value,
improving,
opponent_worsening,
tt_hit,
tt_move_exists: tt_move.is_some(),
tt_capture,
tt_pv: st.stack[ply as usize].tt_pv,
in_check,
},
ctx.tune_params,
) {
inc_stat!(st, futility_pruned);
inc_stat_by_depth!(st, futility_by_depth, depth);
return v;
}
let (null_value, improving_after_null) = try_null_move_pruning::<NT, _>(
st,
ctx,
pos,
depth,
beta,
ply,
cut_node,
in_check,
eval_ctx.static_eval,
improving,
excluded_move,
limits,
time_manager,
Self::search_node::<{ NodeType::NonPV as u8 }>,
);
if let Some(v) = null_value {
return v;
}
improving = improving_after_null;
if !in_check
&& !all_node
&& depth >= 6
&& tt_move.is_none()
&& prior_reduction <= 3
&& !st.stack[ply as usize].follow_pv
{
depth -= 1;
}
if let Some(v) = try_probcut(
st,
ctx,
pos,
depth,
beta,
improving,
&tt_ctx,
ply,
st.stack[ply as usize].static_eval,
eval_ctx.unadjusted_static_eval,
in_check,
cut_node,
excluded_move,
limits,
time_manager,
Self::search_node::<{ NodeType::NonPV as u8 }>,
) {
return v;
}
{
let small_probcut_beta = beta + Value::new(418);
if tt_data.bound.is_lower_or_exact()
&& tt_data.depth >= depth - 4
&& tt_value != Value::NONE
&& tt_value >= small_probcut_beta
&& !beta.is_mate_score()
&& !tt_value.is_mate_score()
{
return small_probcut_beta;
}
}
let mut best_value = Value::new(-32001);
let mut best_move = Move::NONE;
let mut move_count = 0;
let mut quiets_tried = SearchedMoveList::new();
let mut captures_tried = SearchedMoveList::new();
let mover = pos.side_to_move();
let tt_move = if depth <= DEPTH_QS
&& tt_move.is_some()
&& (!pos.capture_stage(tt_move) && !pos.gives_check(tt_move) || depth < -16)
{
Move::NONE
} else {
tt_move
};
let cont_tables = cont_history_tables(st, ctx, ply);
let cont_hist_ptr_1 = cont_history_ptr(st, ctx, ply, 1);
let cont_hist_ptr_2 = cont_history_ptr(st, ctx, ply, 2);
let mut mp =
MovePicker::new(pos, tt_move, depth, ply, cont_tables, ctx.generate_all_legal_moves);
let tt_pv = st.stack[ply as usize].tt_pv;
let root_node = NT == NodeType::Root as u8;
let mut lmp_triggered = false;
loop {
let mv = {
let h = unsafe { ctx.history.as_ref_unchecked() };
mp.next_move(pos, h)
};
if mv == Move::NONE {
break;
}
if mv == excluded_move {
continue;
}
if !pos.pseudo_legal(mv) {
continue;
}
if !pos.is_legal(mv) {
continue;
}
if check_abort(st, ctx, limits, time_manager) {
return Value::ZERO;
}
move_count += 1;
st.stack[ply as usize].move_count = move_count;
let is_capture = pos.is_capture(mv);
let gives_check = pos.gives_check(mv);
let mut new_depth = depth - 1;
let mut extension = 0i32;
let original_depth = depth;
let delta = (beta.raw() - alpha.raw()).max(0);
let mut r = reduction(
ctx.reductions,
ctx.tune_params,
improving,
original_depth,
move_count,
delta,
st.root_delta.max(1),
);
if st.stack[ply as usize].tt_pv {
r += ctx.tune_params.lmr_ttpv_add;
}
let lmr_depth = new_depth - r / 1024;
if !root_node && !best_value.is_loss() {
let lmp_limit = (3 + original_depth * original_depth) / (2 - improving as i32);
if move_count >= lmp_limit && !lmp_triggered {
mp.skip_quiets();
lmp_triggered = true;
}
}
let step14_ctx = Step14Context {
pos,
mv,
depth: original_depth,
ply,
best_value,
in_check,
gives_check,
is_capture,
lmr_depth,
mover,
cont_history_1: unsafe { cont_hist_ptr_1.as_ref() },
cont_history_2: unsafe { cont_hist_ptr_2.as_ref() },
static_eval: st.stack[ply as usize].static_eval,
alpha,
best_move,
pawn_history_index: pos.pawn_history_index(),
};
match step14_pruning(ctx, step14_ctx) {
Step14Outcome::Skip {
best_value: updated,
} => {
inc_stat!(st, move_loop_pruned);
if let Some(v) = updated {
best_value = v;
}
continue;
}
Step14Outcome::Continue => {}
}
if !root_node
&& mv == tt_move
&& excluded_move.is_none()
&& depth
>= ctx.tune_params.singular_min_depth_base
+ ctx.tune_params.singular_min_depth_tt_pv_add * tt_pv as i32
&& tt_value != Value::NONE
&& !tt_value.is_mate_score()
&& tt_data.bound.is_lower_or_exact()
&& tt_data.depth >= depth - ctx.tune_params.singular_tt_depth_margin
{
let singular_beta_margin = (ctx.tune_params.singular_beta_margin_base
+ ctx.tune_params.singular_beta_margin_tt_pv_non_pv_add
* (tt_pv && !pv_node) as i32)
* depth
/ ctx.tune_params.singular_beta_margin_div.max(1);
let singular_beta = tt_value - Value::new(singular_beta_margin);
let singular_depth = new_depth / ctx.tune_params.singular_depth_div.max(1);
st.stack[ply as usize].excluded_move = mv;
let singular_value = Self::search_node::<{ NodeType::NonPV as u8 }>(
st,
ctx,
pos,
singular_depth,
singular_beta - Value::new(1),
singular_beta,
ply,
cut_node,
limits,
time_manager,
);
st.stack[ply as usize].excluded_move = Move::NONE;
let tt_pv = st.stack[ply as usize].tt_pv;
if singular_value < singular_beta {
inc_stat!(st, singular_extension);
let corr_val_adj = eval_ctx.correction_value.abs()
/ ctx.tune_params.singular_corr_val_adj_div.max(1);
let tt_move_hist =
unsafe { ctx.history.as_ref_unchecked() }.tt_move_history.get() as i32;
let double_margin = ctx.tune_params.singular_double_margin_base
+ ctx.tune_params.singular_double_margin_pv_node * pv_node as i32
+ ctx.tune_params.singular_double_margin_non_tt_capture
* !tt_capture as i32
- corr_val_adj
+ ctx.tune_params.singular_double_margin_tt_move_hist_mult * tt_move_hist
/ ctx.tune_params.singular_double_margin_tt_move_hist_div.max(1)
- (ply > st.root_depth) as i32
* ctx.tune_params.singular_double_margin_late_ply_penalty;
let triple_margin = ctx.tune_params.singular_triple_margin_base
+ ctx.tune_params.singular_triple_margin_pv_node * pv_node as i32
+ ctx.tune_params.singular_triple_margin_non_tt_capture
* !tt_capture as i32
+ ctx.tune_params.singular_triple_margin_tt_pv * tt_pv as i32
- corr_val_adj
- (ply * 2 > st.root_depth * 3) as i32
* ctx.tune_params.singular_triple_margin_late_ply_penalty;
extension = 1
+ (singular_value < singular_beta - Value::new(double_margin)) as i32
+ (singular_value < singular_beta - Value::new(triple_margin)) as i32;
depth += 1;
} else if singular_value >= beta && !singular_value.is_mate_score() {
{
let h = unsafe { ctx.history.as_mut_unchecked() };
h.tt_move_history
.update(super::tt_history::TTMoveHistory::multi_cut_bonus(depth));
}
inc_stat!(st, multi_cut);
return singular_value;
} else if tt_value >= beta {
extension = ctx.tune_params.singular_negative_extension_tt_fail_high;
} else if cut_node {
extension = ctx.tune_params.singular_negative_extension_cut_node;
}
}
st.stack[ply as usize].current_move = mv;
do_move_and_push(st, pos, mv, gives_check, ctx.tt);
if mv.is_pass() {
clear_cont_history_for_null(st, ctx, ply);
} else {
let cont_hist_piece = mv.moved_piece_after();
let cont_hist_to = mv.to();
set_cont_history_for_move(
st,
ctx,
ply,
in_check,
is_capture,
cont_hist_piece,
cont_hist_to,
);
}
new_depth += extension;
let tt_value_higher = tt_value > alpha;
let tt_depth_ge = tt_data.depth >= depth;
if st.stack[ply as usize].tt_pv {
r -= ctx.tune_params.lmr_step16_ttpv_sub_base
+ (pv_node as i32) * ctx.tune_params.lmr_step16_ttpv_sub_pv_node
+ (tt_value_higher as i32) * ctx.tune_params.lmr_step16_ttpv_sub_tt_value
+ (tt_depth_ge as i32)
* (ctx.tune_params.lmr_step16_ttpv_sub_tt_depth
+ (cut_node as i32) * ctx.tune_params.lmr_step16_ttpv_sub_cut_node);
}
r += ctx.tune_params.lmr_step16_base_add;
r -= move_count * ctx.tune_params.lmr_step16_move_count_mul;
r -= eval_ctx.correction_value.abs() / ctx.tune_params.lmr_step16_correction_div.max(1);
if cut_node {
let no_tt_move = !tt_hit || tt_move.is_none();
r += ctx.tune_params.lmr_step16_cut_node_add
+ ctx.tune_params.lmr_step16_cut_node_no_tt_add * (no_tt_move as i32);
}
if tt_capture {
r += ctx.tune_params.lmr_step16_tt_capture_add;
}
if st.stack[(ply + 1) as usize].cutoff_cnt > 2 {
r += ctx.tune_params.lmr_step16_cutoff_count_add
+ (all_node as i32) * ctx.tune_params.lmr_step16_cutoff_count_all_node_add;
}
if mv == tt_move {
r -= ctx.tune_params.lmr_step16_tt_move_penalty;
}
let stat_score = if mv.is_pass() {
0 } else if is_capture {
let captured = pos.captured_piece();
let captured_pt = captured.piece_type();
let moved_piece = mv.moved_piece_after();
let hist = unsafe { ctx.history.as_ref_unchecked() }.capture_history.get(
moved_piece,
mv.to(),
captured_pt,
) as i32;
ctx.tune_params.lmr_step16_capture_stat_scale_num * piece_value(captured) / 128
+ hist
} else {
let moved_piece = mv.moved_piece_after();
let main_hist =
unsafe { ctx.history.as_ref_unchecked() }.main_history.get(mover, mv) as i32;
let cont0 = unsafe { cont_hist_ptr_1.as_ref() }.get(moved_piece, mv.to()) as i32;
let cont1 = unsafe { cont_hist_ptr_2.as_ref() }.get(moved_piece, mv.to()) as i32;
2 * main_hist + cont0 + cont1
};
st.stack[ply as usize].stat_score = stat_score;
r -= stat_score * ctx.tune_params.lmr_step16_stat_score_scale_num / 8192;
let mut value = if depth >= 2 && move_count > 1 {
inc_stat!(st, lmr_applied);
let d = std::cmp::max(1, std::cmp::min(new_depth - r / 1024, new_depth + 2))
+ pv_node as i32;
#[cfg(feature = "search-stats")]
{
let reduction = (r / 1024).max(0) as usize;
let reduction_idx = reduction.min(15);
st.stats.lmr_reduction_histogram[reduction_idx] += 1;
let new_depth_idx = (d as usize).min(STATS_MAX_DEPTH - 1);
st.stats.lmr_new_depth_histogram[new_depth_idx] += 1;
}
#[cfg(feature = "search-stats")]
if d == 1 {
let parent_depth_idx = (depth as usize).min(STATS_MAX_DEPTH - 1);
st.stats.lmr_to_depth1_from[parent_depth_idx] += 1;
}
#[cfg(feature = "search-stats")]
{
if cut_node {
st.stats.lmr_cut_node_applied += 1;
if d == 1 {
st.stats.lmr_cut_node_to_depth1 += 1;
}
} else {
st.stats.lmr_non_cut_node_applied += 1;
if d == 1 {
st.stats.lmr_non_cut_node_to_depth1 += 1;
}
}
}
let reduction_from_parent = new_depth - d;
st.stack[ply as usize].reduction = reduction_from_parent;
st.set_child_follow_pv(ply, mv);
let mut value = -Self::search_node::<{ NodeType::NonPV as u8 }>(
st,
ctx,
pos,
d,
-alpha - Value::new(1),
-alpha,
ply + 1,
true,
limits,
time_manager,
);
st.stack[ply as usize].reduction = 0;
if value > alpha {
let deeper_threshold = ctx.tune_params.lmr_research_deeper_base
+ ctx.tune_params.lmr_research_deeper_depth_mul * new_depth;
let do_deeper =
d < new_depth && value > (best_value + Value::new(deeper_threshold));
let do_shallower = value
< best_value + Value::new(ctx.tune_params.lmr_research_shallower_threshold);
new_depth += do_deeper as i32 - do_shallower as i32;
if new_depth > d {
inc_stat!(st, lmr_research);
st.set_child_follow_pv(ply, mv);
value = -Self::search_node::<{ NodeType::NonPV as u8 }>(
st,
ctx,
pos,
new_depth,
-alpha - Value::new(1),
-alpha,
ply + 1,
!cut_node,
limits,
time_manager,
);
}
if !mv.is_pass() {
let moved_piece = mv.moved_piece_after();
let to_sq = mv.to();
for offset in 1..=6 {
if st.stack[ply as usize].in_check && offset > 2 {
break;
}
let weight = match offset {
1 => ctx.tune_params.fail_high_continuation_weight_1,
2 => ctx.tune_params.fail_high_continuation_weight_2,
3 => ctx.tune_params.fail_high_continuation_weight_3,
4 => ctx.tune_params.fail_high_continuation_weight_4,
5 => ctx.tune_params.fail_high_continuation_weight_5,
6 => ctx.tune_params.fail_high_continuation_weight_6,
_ => 0,
};
let idx = ply - offset;
if idx < 0 {
break;
}
if let Some(key) =
unsafe { st.stack.get_unchecked(idx as usize) }.cont_hist_key
{
if key.piece.is_none() {
continue;
}
let in_check_idx = key.in_check as usize;
let capture_idx = key.capture as usize;
let bonus =
ctx.tune_params.fail_high_continuation_base_num * weight / 1024
+ if offset < 2 {
ctx.tune_params.fail_high_continuation_near_ply_offset
} else {
0
};
{
let h = unsafe { ctx.history.as_mut_unchecked() };
h.continuation_history[in_check_idx][capture_idx].update(
key.piece,
key.to,
moved_piece,
to_sq,
bonus,
);
}
}
}
}
} else if value > alpha && value < best_value + Value::new(9) {
new_depth -= 1;
}
if pv_node && (move_count == 1 || value > alpha) {
if mv == tt_move
&& ((tt_value != Value::NONE
&& tt_value.is_mate_score()
&& tt_data.depth > 0)
|| (tt_data.depth > 1 && st.root_depth > 8))
{
new_depth = new_depth.max(1);
}
st.stack[ply as usize].reduction = 0;
st.set_child_follow_pv(ply, mv);
-Self::search_node::<{ NodeType::PV as u8 }>(
st,
ctx,
pos,
new_depth,
-beta,
-alpha,
ply + 1,
false,
limits,
time_manager,
)
} else {
value
}
} else if !pv_node || move_count > 1 {
let mut non_lmr_depth = new_depth;
if tt_move.is_none() {
r += ctx.tune_params.full_depth_no_tt_add;
}
non_lmr_depth -= (r > ctx.tune_params.full_depth_r_threshold1) as i32;
non_lmr_depth -=
(r > ctx.tune_params.full_depth_r_threshold2 && new_depth > 2) as i32;
st.stack[ply as usize].reduction = 0;
st.set_child_follow_pv(ply, mv);
let mut value = -Self::search_node::<{ NodeType::NonPV as u8 }>(
st,
ctx,
pos,
non_lmr_depth,
-alpha - Value::new(1),
-alpha,
ply + 1,
!cut_node,
limits,
time_manager,
);
st.stack[ply as usize].reduction = 0;
if pv_node && value > alpha {
if mv == tt_move
&& ((tt_value != Value::NONE
&& tt_value.is_mate_score()
&& tt_data.depth > 0)
|| (tt_data.depth > 1 && st.root_depth > 8))
{
new_depth = new_depth.max(1);
}
st.stack[ply as usize].reduction = 0;
st.set_child_follow_pv(ply, mv);
value = -Self::search_node::<{ NodeType::PV as u8 }>(
st,
ctx,
pos,
new_depth,
-beta,
-alpha,
ply + 1,
false,
limits,
time_manager,
);
st.stack[ply as usize].reduction = 0;
}
value
} else {
if mv == tt_move
&& ((tt_value != Value::NONE && tt_value.is_mate_score() && tt_data.depth > 0)
|| (tt_data.depth > 1 && st.root_depth > 8))
{
new_depth = new_depth.max(1);
}
st.stack[ply as usize].reduction = 0;
st.set_child_follow_pv(ply, mv);
-Self::search_node::<{ NodeType::PV as u8 }>(
st,
ctx,
pos,
new_depth,
-beta,
-alpha,
ply + 1,
false,
limits,
time_manager,
)
};
nnue_pop(st);
pos.undo_move(mv);
if mv.is_pass() && !value.is_mate_score() {
let bonus = get_scaled_pass_move_bonus(pos.game_ply());
if bonus != 0 {
value += Value::new(bonus);
}
}
if st.abort {
return Value::ZERO;
}
let inc = Value::new(
if value == best_value
&& ply + 2 >= st.root_depth
&& (st.nodes as i32 & 14) == 0
&& !Value::new(value.raw().abs() + 1).is_win()
{
1
} else {
0
},
);
if value + inc > best_value {
best_value = value;
if value + inc > alpha {
best_move = mv;
if pv_node {
let child_pv = st.stack[(ply + 1) as usize].pv.clone();
st.stack[ply as usize].update_pv(mv, &child_pv);
}
if value >= beta {
st.stack[ply as usize].cutoff_cnt += (extension < 2 || pv_node) as i32;
inc_stat_by_depth!(st, cutoff_by_depth, depth);
if move_count == 1 {
inc_stat_by_depth!(st, first_move_cutoff_by_depth, depth);
}
#[cfg(feature = "search-stats")]
{
let d = (depth as usize).min(STATS_MAX_DEPTH - 1);
st.stats.move_count_sum_by_depth[d] += move_count as u64;
}
break;
}
if depth > 2 && depth < 14 && !value.is_mate_score() {
depth -= 2;
}
alpha = value;
}
}
if mv != best_move && !mv.is_pass() && move_count <= SEARCHED_MOVES_CAPACITY as i32 {
if is_capture {
captures_tried.push(mv);
} else {
quiets_tried.push(mv);
}
}
}
if best_value >= beta && !best_value.is_mate_score() && !alpha.is_mate_score() {
best_value = Value::new((best_value.raw() * depth + beta.raw()) / (depth + 1).max(1));
}
if move_count == 0 {
best_value = if excluded_move.is_some() {
alpha
} else if in_check {
Value::mated_in(ply)
} else {
Value::ZERO
};
} else if best_move.is_some() && !best_move.is_pass() {
let is_best_capture = pos.capture_stage(best_move);
let is_tt_move = best_move == tt_move;
let bonus = stat_bonus(depth, is_tt_move, ctx.tune_params);
let malus = stat_malus(depth, move_count, ctx.tune_params);
let us = pos.side_to_move();
let pawn_key_idx = pos.pawn_history_index();
let best_moved_pc = pos.moved_piece(best_move);
let best_cont_pc = if best_move.is_promotion() {
best_moved_pc.promote().unwrap_or(best_moved_pc)
} else {
best_moved_pc
};
let best_to = best_move.to();
let max_ply_back = if in_check { 2 } else { 6 };
if !is_best_capture {
let scaled_bonus =
bonus * ctx.tune_params.update_all_stats_quiet_bonus_scale_num / 1024;
let scaled_malus =
malus * ctx.tune_params.update_all_stats_quiet_malus_scale_num / 1024;
{
let h = unsafe { ctx.history.as_mut_unchecked() };
h.main_history.update(us, best_move, scaled_bonus);
if ply < LOW_PLY_HISTORY_SIZE as i32 {
let low_ply_bonus = low_ply_history_bonus(scaled_bonus, ctx.tune_params);
h.low_ply_history.update(ply as usize, best_move, low_ply_bonus);
}
let cont_scaled_bonus =
scaled_bonus * ctx.tune_params.continuation_history_multiplier / 1024;
for ply_back in 1..=6 {
if ply_back > max_ply_back {
continue;
}
let weight = continuation_history_weight(ctx.tune_params, ply_back);
if ply >= ply_back as i32 {
let prev_ply = (ply - ply_back as i32) as usize;
if let Some(key) =
unsafe { st.stack.get_unchecked(prev_ply) }.cont_hist_key
{
if key.piece.is_none() {
continue;
}
let in_check_idx = key.in_check as usize;
let capture_idx = key.capture as usize;
let weighted_bonus = continuation_history_bonus_with_offset(
cont_scaled_bonus * weight / 1024,
ply_back,
ctx.tune_params,
);
h.continuation_history[in_check_idx][capture_idx].update(
key.piece,
key.to,
best_cont_pc,
best_to,
weighted_bonus,
);
}
}
}
let pawn_bonus = pawn_history_bonus(scaled_bonus, ctx.tune_params);
h.pawn_history.update(pawn_key_idx, best_cont_pc, best_to, pawn_bonus);
for &m in quiets_tried.iter() {
h.main_history.update(us, m, -scaled_malus);
if ply < LOW_PLY_HISTORY_SIZE as i32 {
let low_ply_malus =
low_ply_history_bonus(-scaled_malus, ctx.tune_params);
h.low_ply_history.update(ply as usize, m, low_ply_malus);
}
let moved_pc = pos.moved_piece(m);
let cont_pc = if m.is_promotion() {
moved_pc.promote().unwrap_or(moved_pc)
} else {
moved_pc
};
let to = m.to();
let cont_scaled_malus =
-scaled_malus * ctx.tune_params.continuation_history_multiplier / 1024;
for ply_back in 1..=6 {
if ply_back > max_ply_back {
continue;
}
let weight = continuation_history_weight(ctx.tune_params, ply_back);
if ply >= ply_back as i32 {
let prev_ply = (ply - ply_back as i32) as usize;
if let Some(key) =
unsafe { st.stack.get_unchecked(prev_ply) }.cont_hist_key
{
if key.piece.is_none() {
continue;
}
let in_check_idx = key.in_check as usize;
let capture_idx = key.capture as usize;
let weighted_malus = continuation_history_bonus_with_offset(
cont_scaled_malus * weight / 1024,
ply_back,
ctx.tune_params,
);
h.continuation_history[in_check_idx][capture_idx].update(
key.piece,
key.to,
cont_pc,
to,
weighted_malus,
);
}
}
}
let pawn_malus = pawn_history_bonus(-scaled_malus, ctx.tune_params);
h.pawn_history.update(pawn_key_idx, cont_pc, to, pawn_malus);
}
}
} else {
let captured_pt = pos.piece_on(best_to).piece_type();
{
let h = unsafe { ctx.history.as_mut_unchecked() };
h.capture_history.update(
best_cont_pc,
best_to,
captured_pt,
bonus * ctx.tune_params.update_all_stats_capture_bonus_scale_num / 1024,
);
}
}
{
let h = unsafe { ctx.history.as_mut_unchecked() };
for &m in captures_tried.iter() {
let moved_pc = pos.moved_piece(m);
let cont_pc = if m.is_promotion() {
moved_pc.promote().unwrap_or(moved_pc)
} else {
moved_pc
};
let to = m.to();
let captured_pt = pos.piece_on(to).piece_type();
h.capture_history.update(
cont_pc,
to,
captured_pt,
-malus * ctx.tune_params.update_all_stats_capture_malus_scale_num / 1024,
);
}
}
if ply >= 1 {
let prev_ply = (ply - 1) as usize;
let prev_move_count = st.stack[prev_ply].move_count;
let prev_tt_hit = st.stack[prev_ply].tt_hit;
if prev_move_count == 1 + (prev_tt_hit as i32)
&& pos.captured_piece() == Piece::NONE
&& let Some(key) = st.stack[prev_ply].cont_hist_key
{
if !key.piece.is_none() {
let prev_sq = key.to;
let prev_piece = pos.piece_on(prev_sq);
let penalty_base = -malus
* ctx.tune_params.update_all_stats_early_refutation_penalty_scale_num
/ 1024;
let prev_in_check = st.stack[prev_ply].in_check;
let prev_max_ply_back = if prev_in_check { 2 } else { 6 };
{
let h = unsafe { ctx.history.as_mut_unchecked() };
for ply_back in 1..=6 {
if ply_back > prev_max_ply_back {
continue;
}
let weight = continuation_history_weight(ctx.tune_params, ply_back);
let target_ply = ply - 1 - ply_back as i32;
if target_ply >= 0
&& let Some(target_key) =
st.stack[target_ply as usize].cont_hist_key
{
if target_key.piece.is_none() {
continue;
}
let in_check_idx = target_key.in_check as usize;
let capture_idx = target_key.capture as usize;
let weighted_penalty = penalty_base * weight / 1024
+ if ply_back < 2 {
ctx.tune_params.continuation_history_near_ply_offset
} else {
0
};
h.continuation_history[in_check_idx][capture_idx].update(
target_key.piece,
target_key.to,
prev_piece,
prev_sq,
weighted_penalty,
);
}
}
}
}
}
}
if !pv_node {
let bonus = if best_move == tt_move {
ctx.tune_params.tt_move_history_bonus
} else {
ctx.tune_params.tt_move_history_malus
};
unsafe { ctx.history.as_mut_unchecked() }.tt_move_history.update(bonus);
}
}
else if ply >= 1 {
let prev_ply = (ply - 1) as usize;
if let Some(prev_key) = st.stack[prev_ply].cont_hist_key {
if !prev_key.piece.is_none() {
let prior_capture = prev_key.capture;
let prev_sq = prev_key.to;
if !prior_capture {
let parent_stat_score = st.stack[prev_ply].stat_score;
let parent_move_count = st.stack[prev_ply].move_count;
let parent_in_check = st.stack[prev_ply].in_check;
let parent_static_eval = st.stack[prev_ply].static_eval;
let static_eval = st.stack[ply as usize].static_eval;
let mut bonus_scale: i32 =
ctx.tune_params.prior_quiet_countermove_bonus_scale_base;
bonus_scale -= parent_stat_score
/ ctx.tune_params.prior_quiet_countermove_parent_stat_div.max(1);
bonus_scale += (ctx.tune_params.prior_quiet_countermove_depth_mul * depth)
.min(ctx.tune_params.prior_quiet_countermove_depth_cap);
bonus_scale += ctx.tune_params.prior_quiet_countermove_move_count_bonus
* (parent_move_count > 8) as i32;
bonus_scale += ctx.tune_params.prior_quiet_countermove_eval_bonus
* (!in_check
&& best_value
<= static_eval
- Value::new(
ctx.tune_params.prior_quiet_countermove_eval_margin,
)) as i32;
bonus_scale += ctx.tune_params.prior_quiet_countermove_parent_eval_bonus
* (!parent_in_check
&& best_value
<= -parent_static_eval
- Value::new(
ctx.tune_params
.prior_quiet_countermove_parent_eval_margin,
)) as i32;
bonus_scale = bonus_scale.max(0);
let scaled_bonus =
(ctx.tune_params.prior_quiet_countermove_scaled_depth_mul * depth
+ ctx.tune_params.prior_quiet_countermove_scaled_offset)
.min(ctx.tune_params.prior_quiet_countermove_scaled_cap)
as i64
* bonus_scale as i64;
let prev_piece = pos.piece_on(prev_sq);
let prev_max_ply_back = if parent_in_check { 2 } else { 6 };
let cont_bonus = (scaled_bonus
* ctx.tune_params.prior_quiet_countermove_cont_scale_num as i64
/ 32768) as i32;
let prev_move = st.stack[prev_ply].current_move;
let main_bonus = (scaled_bonus
* ctx.tune_params.prior_quiet_countermove_main_scale_num as i64
/ 32768) as i32;
let opponent = !pos.side_to_move();
let pawn_key_idx = pos.pawn_history_index();
let pawn_bonus = (scaled_bonus
* ctx.tune_params.prior_quiet_countermove_pawn_scale_num as i64
/ 32768) as i32;
let update_pawn =
prev_piece.piece_type() != PieceType::Pawn && !prev_move.is_promotion();
{
let h = unsafe { ctx.history.as_mut_unchecked() };
for ply_back in 1..=6 {
if ply_back > prev_max_ply_back {
continue;
}
let weight = continuation_history_weight(ctx.tune_params, ply_back);
let target_ply = ply - 1 - ply_back as i32;
if target_ply >= 0
&& let Some(target_key) =
st.stack[target_ply as usize].cont_hist_key
{
if target_key.piece.is_none() {
continue;
}
let in_check_idx = target_key.in_check as usize;
let capture_idx = target_key.capture as usize;
let weighted_bonus = cont_bonus * weight / 1024
+ if ply_back < 2 {
ctx.tune_params.continuation_history_near_ply_offset
} else {
0
};
h.continuation_history[in_check_idx][capture_idx].update(
target_key.piece,
target_key.to,
prev_piece,
prev_sq,
weighted_bonus,
);
}
}
h.main_history.update(opponent, prev_move, main_bonus);
if update_pawn {
h.pawn_history.update(
pawn_key_idx,
prev_piece,
prev_sq,
pawn_bonus,
);
}
}
} else {
let prev_piece = pos.piece_on(prev_sq);
let captured_piece = pos.captured_piece();
debug_assert!(
captured_piece != Piece::NONE,
"prior_capture is true but captured_piece is NONE"
);
if captured_piece != Piece::NONE {
unsafe { ctx.history.as_mut_unchecked() }.capture_history.update(
prev_piece,
prev_sq,
captured_piece.piece_type(),
ctx.tune_params.prior_capture_countermove_bonus,
);
}
}
}
}
}
if best_value <= alpha {
st.stack[ply as usize].tt_pv = st.stack[ply as usize].tt_pv
|| if ply >= 1 {
st.stack[(ply - 1) as usize].tt_pv
} else {
false
};
}
if excluded_move.is_none() {
let bound = if best_value >= beta {
Bound::Lower
} else if pv_node && best_move.is_some() {
Bound::Exact
} else {
Bound::Upper
};
let stored_depth = if move_count != 0 {
depth
} else {
(depth + 6).min(MAX_PLY - 1)
};
#[cfg(feature = "tt-trace")]
let allow_write = ctx.allow_tt_write
&& helper_tt_write_enabled_for_depth(ctx.thread_id, bound, stored_depth);
#[cfg(not(feature = "tt-trace"))]
let allow_write = ctx.allow_tt_write;
if allow_write {
#[cfg(feature = "tt-trace")]
maybe_trace_tt_write(TtWriteTrace {
stage: "ab_store",
thread_id: ctx.thread_id,
ply,
key: tt_ctx.key,
depth: stored_depth,
bound,
is_pv: st.stack[ply as usize].tt_pv,
tt_move: best_move,
stored_value: value_to_tt(best_value, ply),
eval: eval_ctx.unadjusted_static_eval,
root_move: if ply >= 1 {
st.stack[0].current_move
} else {
Move::NONE
},
});
tt_ctx.result.write(
tt_ctx.key,
value_to_tt(best_value, ply),
st.stack[ply as usize].tt_pv,
bound,
stored_depth,
best_move,
eval_ctx.unadjusted_static_eval,
ctx.tt.generation(),
);
inc_stat_by_depth!(st, tt_write_by_depth, stored_depth);
}
}
if !(in_check || best_move.is_some() && pos.is_capture(best_move))
&& (best_value > st.stack[ply as usize].static_eval) == best_move.is_some()
{
let static_eval = st.stack[ply as usize].static_eval;
let divisor = if best_move.is_some() { 10 } else { 8 };
let bonus = ((best_value.raw() - static_eval.raw()) * depth / divisor)
.clamp(-CORRECTION_HISTORY_LIMIT / 4, CORRECTION_HISTORY_LIMIT / 4);
update_correction_history(st, ctx, pos, ply, bonus);
}
best_value
}
}
unsafe impl Send for SearchWorker {}