use std::ptr::NonNull;
use std::sync::Arc;
use crate::eval::{get_scaled_pass_move_bonus, EvalHash};
use crate::nnue::{get_network, AccumulatorStackVariant, DirtyPiece};
use crate::position::Position;
use crate::search::PieceToHistory;
use crate::tt::{ProbeResult, TTData, TranspositionTable};
use crate::types::{Bound, Color, Depth, Move, Piece, PieceType, Square, Value, DEPTH_QS, MAX_PLY};
use super::history::{
capture_malus, continuation_history_bonus_with_offset, low_ply_history_bonus,
pawn_history_bonus, quiet_malus, stat_bonus, HistoryCell, CONTINUATION_HISTORY_NEAR_PLY_OFFSET,
CONTINUATION_HISTORY_WEIGHTS, CORRECTION_HISTORY_LIMIT, LOW_PLY_HISTORY_SIZE,
PRIOR_CAPTURE_COUNTERMOVE_BONUS, TT_MOVE_HISTORY_BONUS, TT_MOVE_HISTORY_MALUS,
};
use super::movepicker::piece_value;
use super::types::{
init_stack_array, value_to_tt, ContHistKey, NodeType, RootMoves, SearchedMoveList, StackArray,
STACK_SIZE,
};
use super::{LimitsType, MovePicker, TimeManagement};
use super::eval_helpers::{compute_eval_context, probe_transposition, update_correction_history};
use super::pruning::{
step14_pruning, try_futility_pruning, try_null_move_pruning, try_probcut, try_razoring,
try_small_probcut,
};
use super::qsearch::qsearch;
use super::search_helpers::{
check_abort, clear_cont_history_for_null, cont_history_ref, cont_history_tables, nnue_evaluate,
nnue_pop, nnue_push, set_cont_history_for_move, take_prior_reduction,
};
const IIR_PRIOR_REDUCTION_THRESHOLD_SHALLOW: i32 = 1;
const IIR_PRIOR_REDUCTION_THRESHOLD_DEEP: i32 = 3;
const IIR_DEPTH_BOUNDARY: Depth = 10;
const IIR_EVAL_SUM_THRESHOLD: i32 = 177;
use std::sync::LazyLock;
const DRAW_JITTER_MASK: u64 = 0x2;
const DRAW_JITTER_OFFSET: i32 = -1;
#[inline]
pub(super) fn draw_jitter(nodes: u64) -> i32 {
((nodes & DRAW_JITTER_MASK) as i32) + DRAW_JITTER_OFFSET
}
#[inline]
fn msb(x: i32) -> i32 {
if x <= 0 {
return 0;
}
31 - x.leading_zeros() as i32
}
#[inline]
pub(super) fn to_corrected_static_eval(unadjusted: Value, correction_value: i32) -> Value {
let corrected = unadjusted.raw() + correction_value / 131_072;
Value::new(corrected.clamp(Value::MATED_IN_MAX_PLY.raw() + 1, Value::MATE_IN_MAX_PLY.raw() - 1))
}
type Reductions = [i32; 64];
const REDUCTION_DELTA_SCALE: i32 = 757;
const REDUCTION_NON_IMPROVING_MULT: i32 = 218;
const REDUCTION_NON_IMPROVING_DIV: i32 = 512;
const REDUCTION_BASE_OFFSET: i32 = 1200;
static REDUCTIONS: LazyLock<Reductions> = LazyLock::new(|| {
let mut table: Reductions = [0; 64];
for (i, value) in table.iter_mut().enumerate().skip(1) {
*value = (2782.0 / 128.0 * (i as f64).ln()) as i32;
}
table
});
#[inline]
pub(crate) fn reduction(
imp: bool,
depth: i32,
move_count: i32,
delta: i32,
root_delta: i32,
) -> i32 {
if depth <= 0 || move_count <= 0 {
return 0;
}
let d = depth.clamp(1, 63) as usize;
let mc = move_count.clamp(1, 63) as usize;
let reduction_scale = REDUCTIONS[d] * REDUCTIONS[mc];
let root_delta = root_delta.max(1);
let delta = delta.max(0);
reduction_scale - delta * REDUCTION_DELTA_SCALE / root_delta
+ (!imp as i32) * reduction_scale * REDUCTION_NON_IMPROVING_MULT
/ REDUCTION_NON_IMPROVING_DIV
+ REDUCTION_BASE_OFFSET
}
use super::stats::{inc_stat, inc_stat_by_depth};
#[cfg(feature = "search-stats")]
use super::stats::{SearchStats, STATS_MAX_DEPTH};
pub(super) struct TTContext {
pub(super) key: u64,
pub(super) result: ProbeResult,
pub(super) data: TTData,
pub(super) hit: bool,
pub(super) mv: Move,
pub(super) value: Value,
pub(super) capture: bool,
}
pub(super) enum ProbeOutcome {
Continue(TTContext),
Cutoff(Value),
}
pub(super) struct EvalContext {
pub(super) static_eval: Value,
pub(super) unadjusted_static_eval: Value,
pub(super) correction_value: i32,
pub(super) improving: bool,
pub(super) opponent_worsening: bool,
}
pub(super) enum Step14Outcome {
Skip { best_value: Option<Value> },
Continue,
}
#[derive(Clone, Copy)]
pub(super) struct FutilityParams {
pub(super) depth: Depth,
pub(super) beta: Value,
pub(super) static_eval: Value,
pub(super) correction_value: i32,
pub(super) improving: bool,
pub(super) opponent_worsening: bool,
pub(super) tt_hit: bool,
pub(super) tt_move_exists: bool, pub(super) tt_capture: bool, pub(super) pv_node: bool,
pub(super) in_check: bool,
}
pub(super) struct Step14Context<'a> {
pub(super) pos: &'a Position,
pub(super) mv: Move,
pub(super) depth: Depth,
pub(super) ply: i32,
pub(super) best_value: Value,
pub(super) in_check: bool,
pub(super) gives_check: bool,
pub(super) is_capture: bool,
pub(super) lmr_depth: i32,
pub(super) mover: Color,
pub(super) cont_history_1: &'a PieceToHistory,
pub(super) cont_history_2: &'a PieceToHistory,
pub(super) static_eval: Value,
pub(super) alpha: Value,
pub(super) tt_move: Move, pub(super) pawn_history_index: usize, }
pub struct SearchContext<'a> {
pub tt: &'a TranspositionTable,
pub eval_hash: &'a EvalHash,
pub history: &'a HistoryCell,
pub cont_history_sentinel: NonNull<PieceToHistory>,
pub generate_all_legal_moves: bool,
pub max_moves_to_draw: i32,
pub thread_id: usize,
}
pub struct SearchState {
pub nodes: u64,
pub stack: StackArray,
pub root_delta: i32,
pub abort: bool,
pub sel_depth: i32,
pub root_depth: Depth,
pub completed_depth: Depth,
pub best_move: Move,
pub best_move_changes: f64,
pub nmp_min_ply: i32,
pub root_moves: RootMoves,
pub nnue_stack: AccumulatorStackVariant,
pub calls_cnt: i32,
#[cfg(feature = "search-stats")]
pub stats: SearchStats,
}
impl SearchState {
pub fn new() -> Self {
Self {
nodes: 0,
stack: init_stack_array(),
root_delta: 1,
abort: false,
sel_depth: 0,
root_depth: 0,
completed_depth: 0,
best_move: Move::NONE,
best_move_changes: 0.0,
nmp_min_ply: 0,
root_moves: RootMoves::new(),
nnue_stack: AccumulatorStackVariant::new_default(),
calls_cnt: 0,
#[cfg(feature = "search-stats")]
stats: SearchStats::default(),
}
}
}
impl Default for SearchState {
fn default() -> Self {
Self::new()
}
}
pub struct SearchWorker {
pub tt: Arc<TranspositionTable>,
pub eval_hash: Arc<EvalHash>,
pub history: Box<HistoryCell>,
pub cont_history_sentinel: NonNull<PieceToHistory>,
pub generate_all_legal_moves: bool,
pub max_moves_to_draw: i32,
pub thread_id: usize,
pub state: SearchState,
}
impl SearchWorker {
pub fn new(
tt: Arc<TranspositionTable>,
eval_hash: Arc<EvalHash>,
max_moves_to_draw: i32,
thread_id: usize,
) -> Box<Self> {
let history = HistoryCell::new_boxed();
let cont_history_sentinel = history.with_read(|h| {
NonNull::from(h.continuation_history[0][0].get_table(Piece::NONE, Square::SQ_11))
});
let mut worker = Box::new(Self {
tt,
eval_hash,
history,
cont_history_sentinel,
generate_all_legal_moves: false,
max_moves_to_draw,
thread_id,
state: SearchState::new(),
});
worker.reset_cont_history_ptrs();
worker
}
#[inline]
pub fn create_context(&self) -> SearchContext<'_> {
SearchContext {
tt: &self.tt,
eval_hash: &self.eval_hash,
history: &self.history,
cont_history_sentinel: self.cont_history_sentinel,
generate_all_legal_moves: self.generate_all_legal_moves,
max_moves_to_draw: self.max_moves_to_draw,
thread_id: self.thread_id,
}
}
#[inline]
pub fn state_mut(&mut self) -> &mut SearchState {
&mut self.state
}
#[inline]
pub fn state(&self) -> &SearchState {
&self.state
}
#[cfg(feature = "search-stats")]
pub fn reset_stats(&mut self) {
self.state.stats.reset();
}
#[cfg(not(feature = "search-stats"))]
pub fn reset_stats(&mut self) {}
#[cfg(feature = "search-stats")]
pub fn get_stats_report(&self) -> String {
self.state.stats.format_report()
}
#[cfg(not(feature = "search-stats"))]
pub fn get_stats_report(&self) -> String {
String::new()
}
fn reset_cont_history_ptrs(&mut self) {
let sentinel = self.cont_history_sentinel;
for stack in self.state.stack.iter_mut() {
stack.cont_history_ptr = sentinel;
}
}
#[inline]
pub(super) fn set_cont_history_for_move(
&mut self,
ply: i32,
in_check: bool,
capture: bool,
piece: Piece,
to: Square,
) {
debug_assert!(ply >= 0 && (ply as usize) < STACK_SIZE, "ply out of bounds: {ply}");
let in_check_idx = in_check as usize;
let capture_idx = capture as usize;
let table = self.history.with_read(|h| {
NonNull::from(h.continuation_history[in_check_idx][capture_idx].get_table(piece, to))
});
self.state.stack[ply as usize].cont_history_ptr = table;
self.state.stack[ply as usize].cont_hist_key =
Some(ContHistKey::new(in_check, capture, piece, to));
}
#[inline]
pub(super) fn clear_cont_history_for_null(&mut self, ply: i32) {
self.state.stack[ply as usize].cont_history_ptr = self.cont_history_sentinel;
self.state.stack[ply as usize].cont_hist_key = None;
}
pub fn clear(&mut self) {
self.history.clear();
}
pub fn prepare_search(&mut self) {
self.state.nodes = 0;
self.state.sel_depth = 0;
self.state.root_depth = 0;
self.state.root_delta = 1;
self.state.completed_depth = 0;
self.state.best_move = Move::NONE;
self.state.abort = false;
self.state.best_move_changes = 0.0;
self.state.nmp_min_ply = 0;
self.state.root_moves.clear();
self.reset_stats();
self.history.with_write(|h| h.low_ply_history.clear());
if let Some(network) = get_network() {
if !self.state.nnue_stack.matches_network(network) {
self.state.nnue_stack = AccumulatorStackVariant::from_network(network);
} else {
self.state.nnue_stack.reset();
}
} else {
self.state.nnue_stack.reset();
}
self.state.calls_cnt = 0;
}
pub fn decay_best_move_changes(&mut self) {
self.state.best_move_changes /= 2.0;
}
pub fn set_generate_all_legal_moves(&mut self, flag: bool) {
self.generate_all_legal_moves = flag;
}
#[inline]
pub(super) fn nnue_push(&mut self, dirty_piece: DirtyPiece) {
self.state.nnue_stack.push(dirty_piece);
}
#[inline]
pub(super) fn nnue_pop(&mut self) {
self.state.nnue_stack.pop();
}
#[inline]
pub(super) fn check_abort(
&mut self,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> bool {
if self.state.abort {
#[cfg(debug_assertions)]
eprintln!("check_abort: abort flag already set");
return true;
}
self.state.calls_cnt -= 1;
if self.state.calls_cnt > 0 {
return false;
}
self.state.calls_cnt = if limits.nodes > 0 {
std::cmp::min(512, (limits.nodes / 1024) as i32).max(1)
} else {
512
};
if time_manager.stop_requested() {
#[cfg(debug_assertions)]
eprintln!("check_abort: stop requested");
self.state.abort = true;
return true;
}
if limits.nodes > 0 && self.state.nodes >= limits.nodes {
#[cfg(debug_assertions)]
eprintln!(
"check_abort: node limit reached nodes={} limit={}",
self.state.nodes, limits.nodes
);
self.state.abort = true;
return true;
}
if self.thread_id == 0 {
if time_manager.take_ponderhit() {
time_manager.on_ponderhit();
}
let elapsed = time_manager.elapsed();
let elapsed_effective = time_manager.elapsed_from_ponderhit();
if time_manager.search_end() > 0 && elapsed >= time_manager.search_end() {
#[cfg(debug_assertions)]
eprintln!(
"check_abort: search_end reached elapsed={} search_end={}",
elapsed,
time_manager.search_end()
);
self.state.abort = true;
return true;
}
if !time_manager.is_pondering()
&& time_manager.search_end() == 0
&& limits.use_time_management()
&& (elapsed_effective > time_manager.maximum() || time_manager.stop_on_ponderhit())
{
time_manager.set_search_end(elapsed);
}
}
false
}
pub fn search(
&mut self,
pos: &mut Position,
depth: Depth,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) {
self.state.root_moves = RootMoves::from_legal_moves(pos, &limits.search_moves);
if self.state.root_moves.is_empty() {
self.state.best_move = Move::NONE;
return;
}
for d in 1..=depth {
if self.state.abort {
break;
}
for rm in self.state.root_moves.iter_mut() {
rm.effort = 0.0;
}
self.state.root_depth = d;
self.state.sel_depth = 0;
let prev_score = if d > 1 {
self.state.root_moves[0].score
} else {
Value::new(-32001)
};
let mut delta = Value::new(10);
let mut alpha = if d >= 4 {
Value::new(prev_score.raw().saturating_sub(delta.raw()).max(-32001))
} else {
Value::new(-32001)
};
let mut beta = if d >= 4 {
Value::new(prev_score.raw().saturating_add(delta.raw()).min(32001))
} else {
Value::new(32001)
};
loop {
let score = self.search_root(pos, d, alpha, beta, limits, time_manager);
if self.state.abort {
break;
}
if score <= alpha {
beta = Value::new((alpha.raw() + beta.raw()) / 2);
alpha = Value::new(score.raw().saturating_sub(delta.raw()).max(-32001));
} else if score >= beta {
beta = Value::new(score.raw().saturating_add(delta.raw()).min(32001));
} else {
break;
}
delta = Value::new(
delta.raw().saturating_add(delta.raw() / 3).min(Value::INFINITE.raw()),
);
}
if !self.state.abort {
self.state.completed_depth = d;
self.state.best_move = self.state.root_moves[0].mv();
}
}
}
pub(crate) fn search_root(
&mut self,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
self.state.root_delta = (beta.raw() - alpha.raw()).abs().max(1);
let mut alpha = alpha;
let mut best_value = Value::new(-32001);
let mut pv_idx = 0;
let root_in_check = pos.in_check();
self.state.stack[0].in_check = root_in_check;
self.state.stack[0].cont_history_ptr = self.cont_history_sentinel;
self.state.stack[0].cont_hist_key = None;
self.state.stack[0].pv.clear();
self.state.stack[1].pv.clear();
for rm_idx in 0..self.state.root_moves.len() {
if self.check_abort(limits, time_manager) {
return Value::ZERO;
}
self.state.sel_depth = 0;
let mv = self.state.root_moves[rm_idx].mv();
let gives_check = pos.gives_check(mv);
let is_capture = pos.is_capture(mv);
let nodes_before = self.state.nodes;
let dirty_piece = pos.do_move_with_prefetch(mv, gives_check, self.tt.as_ref());
self.nnue_push(dirty_piece);
self.state.nodes += 1;
self.state.stack[0].current_move = mv;
if mv.is_pass() {
self.clear_cont_history_for_null(0);
} else {
let cont_hist_piece = mv.moved_piece_after();
let cont_hist_to = mv.to();
self.set_cont_history_for_move(
0,
root_in_check,
is_capture,
cont_hist_piece,
cont_hist_to,
);
}
let value = if rm_idx == 0 {
-self.search_node_wrapper::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
1,
false,
limits,
time_manager,
)
} else {
let mut value = -self.search_node_wrapper::<{ NodeType::NonPV as u8 }>(
pos,
depth - 1,
-alpha - Value::new(1),
-alpha,
1,
true,
limits,
time_manager,
);
if value > alpha && value < beta {
value = -self.search_node_wrapper::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
1,
false,
limits,
time_manager,
);
}
value
};
self.nnue_pop();
pos.undo_move(mv);
let nodes_delta = self.state.nodes.saturating_sub(nodes_before);
self.state.root_moves[rm_idx].effort += nodes_delta as f64;
if self.state.abort {
return Value::ZERO;
}
let mut updated_alpha = rm_idx == 0; {
let rm = &mut self.state.root_moves[rm_idx];
rm.score = value;
rm.sel_depth = self.state.sel_depth;
rm.accumulate_score_stats(value);
}
if value > best_value {
best_value = value;
if value > alpha {
if rm_idx > 0 {
self.state.best_move_changes += 1.0;
}
alpha = value;
pv_idx = rm_idx;
updated_alpha = true;
self.state.root_moves[rm_idx].pv.truncate(1);
self.state.root_moves[rm_idx].pv.extend_from_slice(&self.state.stack[1].pv);
if value >= beta {
break;
}
}
}
if !updated_alpha {
self.state.root_moves[rm_idx].score = Value::new(-Value::INFINITE.raw());
}
}
self.state.root_moves.move_to_front(pv_idx);
self.state.root_moves.sort();
best_value
}
pub(crate) fn search_root_for_pv(
&mut self,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
pv_idx: usize,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
self.state.root_delta = (beta.raw() - alpha.raw()).abs().max(1);
let mut alpha = alpha;
let mut best_value = Value::new(-32001);
let mut best_rm_idx = pv_idx;
let root_in_check = pos.in_check();
self.state.stack[0].in_check = root_in_check;
self.state.stack[0].cont_history_ptr = self.cont_history_sentinel;
self.state.stack[0].cont_hist_key = None;
self.state.stack[0].pv.clear();
self.state.stack[1].pv.clear();
for rm_idx in pv_idx..self.state.root_moves.len() {
if self.check_abort(limits, time_manager) {
return Value::ZERO;
}
self.state.sel_depth = 0;
let mv = self.state.root_moves[rm_idx].mv();
let gives_check = pos.gives_check(mv);
let is_capture = pos.is_capture(mv);
let nodes_before = self.state.nodes;
let dirty_piece = pos.do_move_with_prefetch(mv, gives_check, self.tt.as_ref());
self.nnue_push(dirty_piece);
self.state.nodes += 1;
self.state.stack[0].current_move = mv;
if mv.is_pass() {
self.clear_cont_history_for_null(0);
} else {
let cont_hist_piece = mv.moved_piece_after();
let cont_hist_to = mv.to();
self.set_cont_history_for_move(
0,
root_in_check,
is_capture,
cont_hist_piece,
cont_hist_to,
);
}
let value = if rm_idx == pv_idx {
-self.search_node_wrapper::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
1,
false,
limits,
time_manager,
)
} else {
let mut value = -self.search_node_wrapper::<{ NodeType::NonPV as u8 }>(
pos,
depth - 1,
-alpha - Value::new(1),
-alpha,
1,
true,
limits,
time_manager,
);
if value > alpha && value < beta {
value = -self.search_node_wrapper::<{ NodeType::PV as u8 }>(
pos,
depth - 1,
-beta,
-alpha,
1,
false,
limits,
time_manager,
);
}
value
};
self.nnue_pop();
pos.undo_move(mv);
let nodes_delta = self.state.nodes.saturating_sub(nodes_before);
self.state.root_moves[rm_idx].effort += nodes_delta as f64;
if self.state.abort {
return Value::ZERO;
}
let mut updated_alpha = rm_idx == pv_idx; {
let rm = &mut self.state.root_moves[rm_idx];
rm.score = value;
rm.sel_depth = self.state.sel_depth;
rm.accumulate_score_stats(value);
}
if value > best_value {
best_value = value;
if value > alpha {
if pv_idx == 0 && rm_idx > pv_idx {
self.state.best_move_changes += 1.0;
}
alpha = value;
best_rm_idx = rm_idx;
updated_alpha = true;
self.state.root_moves[rm_idx].pv.truncate(1);
self.state.root_moves[rm_idx].pv.extend_from_slice(&self.state.stack[1].pv);
if value >= beta {
break;
}
}
}
if !updated_alpha {
self.state.root_moves[rm_idx].score = Value::new(-Value::INFINITE.raw());
}
}
self.state.root_moves.move_to_index(best_rm_idx, pv_idx);
best_value
}
#[inline]
pub(super) fn search_node_wrapper<const NT: u8>(
&mut self,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
ply: i32,
cut_node: bool,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
let ctx = SearchContext {
tt: &self.tt,
eval_hash: &self.eval_hash,
history: &self.history,
cont_history_sentinel: self.cont_history_sentinel,
generate_all_legal_moves: self.generate_all_legal_moves,
max_moves_to_draw: self.max_moves_to_draw,
thread_id: self.thread_id,
};
Self::search_node::<NT>(
&mut self.state,
&ctx,
pos,
depth,
alpha,
beta,
ply,
cut_node,
limits,
time_manager,
)
}
pub(super) fn search_node<const NT: u8>(
st: &mut SearchState,
ctx: &SearchContext<'_>,
pos: &mut Position,
depth: Depth,
alpha: Value,
beta: Value,
ply: i32,
cut_node: bool,
limits: &LimitsType,
time_manager: &mut TimeManagement,
) -> Value {
inc_stat!(st, nodes_searched);
inc_stat_by_depth!(st, nodes_by_depth, depth);
let pv_node = NT == NodeType::PV as u8 || NT == NodeType::Root as u8;
let mut depth = depth;
let in_check = pos.in_check();
let all_node = !(pv_node || cut_node);
let mut alpha = alpha;
let mut beta = beta;
if depth <= DEPTH_QS {
return qsearch::<NT>(st, ctx, pos, depth, alpha, beta, ply, limits, time_manager);
}
if ply >= MAX_PLY {
return if in_check {
Value::ZERO
} else {
nnue_evaluate(st, pos)
};
}
if pv_node && st.sel_depth < ply + 1 {
st.sel_depth = ply + 1;
}
if check_abort(st, ctx, limits, time_manager) {
return Value::ZERO;
}
if NT != NodeType::Root as u8 {
alpha = alpha.max(Value::mated_in(ply));
beta = beta.min(Value::mate_in(ply + 1));
if alpha >= beta {
return alpha;
}
}
st.stack[ply as usize].in_check = in_check;
st.stack[ply as usize].move_count = 0;
st.stack[(ply + 1) as usize].cutoff_cnt = 0;
if pv_node {
st.stack[ply as usize].pv.clear();
st.stack[(ply + 1) as usize].pv.clear();
}
let prior_reduction = take_prior_reduction(st, ply);
st.stack[ply as usize].reduction = 0;
let excluded_move = st.stack[ply as usize].excluded_move;
let tt_ctx = match probe_transposition::<NT>(
st,
ctx,
pos,
depth,
beta,
ply,
pv_node,
in_check,
excluded_move,
) {
ProbeOutcome::Continue(c) => c,
ProbeOutcome::Cutoff(value) => {
inc_stat!(st, tt_cutoff);
inc_stat_by_depth!(st, tt_cutoff_by_depth, depth);
return value;
}
};
let tt_move = tt_ctx.mv;
let tt_value = tt_ctx.value;
let tt_hit = tt_ctx.hit;
let tt_data = tt_ctx.data;
let _tt_capture = tt_ctx.capture;
let eval_ctx =
compute_eval_context(st, ctx, pos, ply, in_check, pv_node, &tt_ctx, excluded_move);
let mut improving = eval_ctx.improving;
let opponent_worsening = eval_ctx.opponent_worsening;
if ply >= 1 {
let prev_ply = (ply - 1) as usize;
let prev_move = st.stack[prev_ply].current_move;
let prev_in_check = st.stack[prev_ply].in_check;
let prior_capture = st.stack[prev_ply].cont_hist_key.is_some_and(|k| k.capture);
if prev_move.is_normal()
&& !prev_in_check
&& !prior_capture
&& eval_ctx.static_eval != Value::NONE
&& st.stack[prev_ply].static_eval != Value::NONE
{
let prev_eval = st.stack[prev_ply].static_eval.raw();
let curr_eval = eval_ctx.static_eval.raw();
let eval_diff = (-(prev_eval + curr_eval)).clamp(-200, 156) + 58;
let opponent = !pos.side_to_move();
let prev_sq = prev_move.to();
ctx.history.with_write(|h| {
h.main_history.update(opponent, prev_move, eval_diff * 9);
if !tt_hit {
let prev_piece = pos.piece_on(prev_sq);
if prev_piece.piece_type() != PieceType::Pawn && !prev_move.is_promotion() {
let pawn_idx = pos.pawn_history_index();
h.pawn_history.update(pawn_idx, prev_piece, prev_sq, eval_diff * 14);
}
}
});
}
}
if prior_reduction
>= if depth < IIR_DEPTH_BOUNDARY {
IIR_PRIOR_REDUCTION_THRESHOLD_SHALLOW
} else {
IIR_PRIOR_REDUCTION_THRESHOLD_DEEP
}
&& !opponent_worsening
{
depth += 1;
}
if prior_reduction >= 2
&& depth >= 2
&& ply >= 1
&& eval_ctx.static_eval != Value::NONE
&& st.stack[(ply - 1) as usize].static_eval != Value::NONE
&& eval_ctx.static_eval + st.stack[(ply - 1) as usize].static_eval
> Value::new(IIR_EVAL_SUM_THRESHOLD)
{
depth -= 1;
}
if let Some(v) = try_razoring::<NT>(
st,
ctx,
pos,
depth,
alpha,
beta,
ply,
pv_node,
in_check,
eval_ctx.static_eval,
limits,
time_manager,
) {
return v;
}
let tt_capture = tt_move.is_some() && pos.is_capture(tt_move);
if let Some(v) = try_futility_pruning(FutilityParams {
depth,
beta,
static_eval: eval_ctx.static_eval,
correction_value: eval_ctx.correction_value,
improving,
opponent_worsening,
tt_hit,
tt_move_exists: tt_move.is_some(),
tt_capture,
pv_node,
in_check,
}) {
inc_stat!(st, futility_pruned);
inc_stat_by_depth!(st, futility_by_depth, depth);
return v;
}
let (null_value, improving_after_null) = try_null_move_pruning::<NT, _>(
st,
ctx,
pos,
depth,
beta,
ply,
cut_node,
in_check,
eval_ctx.static_eval,
improving,
excluded_move,
limits,
time_manager,
Self::search_node::<{ NodeType::NonPV as u8 }>,
);
if let Some(v) = null_value {
return v;
}
improving = improving_after_null;
if !all_node && depth >= 6 && tt_move.is_none() && prior_reduction <= 3 {
depth -= 1;
}
if let Some(v) = try_probcut(
st,
ctx,
pos,
depth,
beta,
improving,
&tt_ctx,
ply,
eval_ctx.static_eval,
eval_ctx.unadjusted_static_eval,
in_check,
limits,
time_manager,
Self::search_node::<{ NodeType::NonPV as u8 }>,
) {
return v;
}
if let Some(v) = try_small_probcut(depth, beta, &tt_ctx) {
return v;
}
let mut best_value = Value::new(-32001);
let mut best_move = Move::NONE;
let mut move_count = 0;
let mut quiets_tried = SearchedMoveList::new();
let mut captures_tried = SearchedMoveList::new();
let mover = pos.side_to_move();
let tt_move = if depth <= DEPTH_QS
&& tt_move.is_some()
&& (!pos.capture_stage(tt_move) && !pos.gives_check(tt_move) || depth < -16)
{
Move::NONE
} else {
tt_move
};
let cont_tables = cont_history_tables(st, ctx, ply);
let mut mp =
MovePicker::new(pos, tt_move, depth, ply, cont_tables, ctx.generate_all_legal_moves);
let tt_pv = st.stack[ply as usize].tt_pv;
let root_node = NT == NodeType::Root as u8;
let mut lmp_triggered = false;
loop {
let mv = ctx.history.with_read(|h| mp.next_move(pos, h));
if mv == Move::NONE {
break;
}
if mv == excluded_move {
continue;
}
if !pos.pseudo_legal(mv) {
continue;
}
if !pos.is_legal(mv) {
continue;
}
if check_abort(st, ctx, limits, time_manager) {
return Value::ZERO;
}
move_count += 1;
st.stack[ply as usize].move_count = move_count;
let is_capture = pos.is_capture(mv);
let gives_check = pos.gives_check(mv);
st.stack[(ply + 1) as usize].quiet_move_streak = if !is_capture && !gives_check {
st.stack[ply as usize].quiet_move_streak + 1
} else {
0
};
let mut new_depth = depth - 1;
let mut extension = 0i32;
if !root_node
&& mv == tt_move
&& excluded_move.is_none()
&& depth >= 6 + tt_pv as i32
&& tt_value != Value::NONE
&& !tt_value.is_mate_score()
&& tt_data.bound.is_lower_or_exact()
&& tt_data.depth >= depth - 3
{
let singular_beta_margin = (56 + 79 * (tt_pv && !pv_node) as i32) * depth / 58;
let singular_beta = tt_value - Value::new(singular_beta_margin);
let singular_depth = new_depth / 2;
st.stack[ply as usize].excluded_move = mv;
let singular_value = Self::search_node::<{ NodeType::NonPV as u8 }>(
st,
ctx,
pos,
singular_depth,
singular_beta - Value::new(1),
singular_beta,
ply,
cut_node,
limits,
time_manager,
);
st.stack[ply as usize].excluded_move = Move::NONE;
if singular_value < singular_beta {
inc_stat!(st, singular_extension);
let corr_val_adj = eval_ctx.correction_value.abs() / 249_096;
let tt_move_hist = ctx.history.with_read(|h| h.tt_move_history.get() as i32);
let double_margin = 4 + 205 * pv_node as i32
- 223 * !tt_capture as i32
- corr_val_adj
- 921 * tt_move_hist / 127649
- (ply > st.root_depth) as i32 * 45;
let triple_margin = 80 + 276 * pv_node as i32 - 249 * !tt_capture as i32
+ 86 * tt_pv as i32
- corr_val_adj
- (ply * 2 > st.root_depth * 3) as i32 * 52;
extension = 1
+ (singular_value < singular_beta - Value::new(double_margin)) as i32
+ (singular_value < singular_beta - Value::new(triple_margin)) as i32;
depth += 1;
} else if singular_value >= beta && !singular_value.is_mate_score() {
ctx.history.with_write(|h| {
h.tt_move_history
.update(super::tt_history::TTMoveHistory::multi_cut_bonus(depth));
});
inc_stat!(st, multi_cut);
return singular_value;
} else if tt_value >= beta {
extension = -3;
} else if cut_node {
extension = -2;
}
}
let delta = (beta.raw() - alpha.raw()).max(0);
let mut r = reduction(improving, depth, move_count, delta, st.root_delta.max(1));
if st.stack[ply as usize].tt_pv {
r += 931;
}
let lmr_depth = new_depth - r / 1024;
if !pv_node && !in_check && !is_capture && !best_value.is_loss() && !mv.is_pass() {
let lmp_limit = (3 + depth * depth) / (2 - improving as i32);
if move_count >= lmp_limit {
if !lmp_triggered && mp.is_quiet_stage() {
mp.skip_quiets();
lmp_triggered = true;
}
continue;
}
}
let step14_ctx = Step14Context {
pos,
mv,
depth,
ply,
best_value,
in_check,
gives_check,
is_capture,
lmr_depth,
mover,
cont_history_1: cont_history_ref(st, ctx, ply, 1),
cont_history_2: cont_history_ref(st, ctx, ply, 2),
static_eval: eval_ctx.static_eval,
alpha,
tt_move,
pawn_history_index: pos.pawn_history_index(),
};
match step14_pruning(ctx, step14_ctx) {
Step14Outcome::Skip {
best_value: updated,
} => {
inc_stat!(st, move_loop_pruned);
if let Some(v) = updated {
best_value = v;
}
continue;
}
Step14Outcome::Continue => {}
}
st.stack[ply as usize].current_move = mv;
let dirty_piece = pos.do_move_with_prefetch(mv, gives_check, ctx.tt);
nnue_push(st, dirty_piece);
st.nodes += 1;
if mv.is_pass() {
clear_cont_history_for_null(st, ctx, ply);
} else {
let cont_hist_piece = mv.moved_piece_after();
let cont_hist_to = mv.to();
set_cont_history_for_move(
st,
ctx,
ply,
in_check,
is_capture,
cont_hist_piece,
cont_hist_to,
);
}
if !mv.is_pass() {
if is_capture {
captures_tried.push(mv);
} else {
quiets_tried.push(mv);
}
}
new_depth += extension;
let msb_depth = msb(depth);
let tt_value_higher = tt_hit && tt_value != Value::NONE && tt_value > alpha;
let tt_depth_ge = tt_hit && tt_data.depth >= depth;
if st.stack[ply as usize].tt_pv {
r -= 2510
+ (pv_node as i32) * 963
+ (tt_value_higher as i32) * 916
+ (tt_depth_ge as i32) * (943 + (cut_node as i32) * 1180);
}
r += 679 - 6 * msb_depth;
r -= move_count * (67 - 2 * msb_depth);
r -= eval_ctx.correction_value.abs() / 27_160;
if cut_node {
let no_tt_move = !tt_hit || tt_move.is_none();
r += 2998 + 2 * msb_depth + (948 + 14 * msb_depth) * (no_tt_move as i32);
}
if tt_capture {
r += 1402 - 39 * msb_depth;
}
if st.stack[(ply + 1) as usize].cutoff_cnt > 2 {
r += 925 + 33 * msb_depth + (all_node as i32) * (701 + 224 * msb_depth);
}
r += st.stack[(ply + 1) as usize].quiet_move_streak * 51;
if mv == tt_move {
r -= 2121 + 28 * msb_depth;
}
let stat_score = if mv.is_pass() {
0 } else if is_capture {
let captured = pos.captured_piece();
let captured_pt = captured.piece_type();
let moved_piece = mv.moved_piece_after();
let hist = ctx
.history
.with_read(|h| h.capture_history.get(moved_piece, mv.to(), captured_pt) as i32);
782 * piece_value(captured) / 128 + hist
} else {
let moved_piece = mv.moved_piece_after();
let main_hist = ctx.history.with_read(|h| h.main_history.get(mover, mv) as i32);
let cont0 = cont_history_ref(st, ctx, ply, 1).get(moved_piece, mv.to()) as i32;
let cont1 = cont_history_ref(st, ctx, ply, 2).get(moved_piece, mv.to()) as i32;
2 * main_hist + cont0 + cont1
};
st.stack[ply as usize].stat_score = stat_score;
r -= stat_score * (729 - 12 * msb_depth) / 8192;
let mut value = if depth >= 2 && move_count > 1 {
inc_stat!(st, lmr_applied);
let d = std::cmp::max(1, std::cmp::min(new_depth - r / 1024, new_depth + 2))
+ pv_node as i32;
#[cfg(feature = "search-stats")]
{
let reduction = (r / 1024).max(0) as usize;
let reduction_idx = reduction.min(15);
st.stats.lmr_reduction_histogram[reduction_idx] += 1;
let new_depth_idx = (d as usize).min(STATS_MAX_DEPTH - 1);
st.stats.lmr_new_depth_histogram[new_depth_idx] += 1;
}
#[cfg(feature = "search-stats")]
if d == 1 {
let parent_depth_idx = (depth as usize).min(STATS_MAX_DEPTH - 1);
st.stats.lmr_to_depth1_from[parent_depth_idx] += 1;
}
#[cfg(feature = "search-stats")]
{
if cut_node {
st.stats.lmr_cut_node_applied += 1;
if d == 1 {
st.stats.lmr_cut_node_to_depth1 += 1;
}
} else {
st.stats.lmr_non_cut_node_applied += 1;
if d == 1 {
st.stats.lmr_non_cut_node_to_depth1 += 1;
}
}
}
let reduction_from_parent = (depth - 1) - d;
st.stack[ply as usize].reduction = reduction_from_parent;
let mut value = -Self::search_node::<{ NodeType::NonPV as u8 }>(
st,
ctx,
pos,
d,
-alpha - Value::new(1),
-alpha,
ply + 1,
true,
limits,
time_manager,
);
st.stack[ply as usize].reduction = 0;
if value > alpha {
let do_deeper =
d < new_depth && value > (best_value + Value::new(43 + 2 * new_depth));
let do_shallower = value < best_value + Value::new(9);
new_depth += do_deeper as i32 - do_shallower as i32;
if new_depth > d {
inc_stat!(st, lmr_research);
value = -Self::search_node::<{ NodeType::NonPV as u8 }>(
st,
ctx,
pos,
new_depth,
-alpha - Value::new(1),
-alpha,
ply + 1,
!cut_node,
limits,
time_manager,
);
}
if !mv.is_pass() {
let moved_piece = mv.moved_piece_after();
let to_sq = mv.to();
const CONTHIST_BONUSES: &[(i32, i32)] =
&[(1, 1108), (2, 652), (3, 273), (4, 572), (5, 126), (6, 449)];
for &(offset, weight) in CONTHIST_BONUSES {
if st.stack[ply as usize].in_check && offset > 2 {
break;
}
let idx = ply - offset;
if idx < 0 {
break;
}
if let Some(key) = st.stack[idx as usize].cont_hist_key {
let in_check_idx = key.in_check as usize;
let capture_idx = key.capture as usize;
let bonus = 1412 * weight / 1024 + if offset < 2 { 80 } else { 0 };
ctx.history.with_write(|h| {
h.continuation_history[in_check_idx][capture_idx].update(
key.piece,
key.to,
moved_piece,
to_sq,
bonus,
);
});
}
}
}
} else if value > alpha && value < best_value + Value::new(9) {
#[allow(unused_assignments)]
{
new_depth -= 1;
}
}
if pv_node && (move_count == 1 || value > alpha) {
st.stack[ply as usize].reduction = 0;
-Self::search_node::<{ NodeType::PV as u8 }>(
st,
ctx,
pos,
depth - 1,
-beta,
-alpha,
ply + 1,
false,
limits,
time_manager,
)
} else {
value
}
} else if !pv_node || move_count > 1 {
st.stack[ply as usize].reduction = 0;
let mut value = -Self::search_node::<{ NodeType::NonPV as u8 }>(
st,
ctx,
pos,
depth - 1,
-alpha - Value::new(1),
-alpha,
ply + 1,
!cut_node,
limits,
time_manager,
);
st.stack[ply as usize].reduction = 0;
if pv_node && value > alpha && value < beta {
st.stack[ply as usize].reduction = 0;
value = -Self::search_node::<{ NodeType::PV as u8 }>(
st,
ctx,
pos,
depth - 1,
-beta,
-alpha,
ply + 1,
false,
limits,
time_manager,
);
st.stack[ply as usize].reduction = 0;
}
value
} else {
st.stack[ply as usize].reduction = 0;
-Self::search_node::<{ NodeType::PV as u8 }>(
st,
ctx,
pos,
depth - 1,
-beta,
-alpha,
ply + 1,
false,
limits,
time_manager,
)
};
nnue_pop(st);
pos.undo_move(mv);
if mv.is_pass() && !value.is_mate_score() {
let bonus = get_scaled_pass_move_bonus(pos.game_ply());
if bonus != 0 {
value += Value::new(bonus);
}
}
if st.abort {
return Value::ZERO;
}
if value > best_value {
best_value = value;
if value > alpha {
best_move = mv;
alpha = value;
if pv_node {
let child_pv = st.stack[(ply + 1) as usize].pv.clone();
st.stack[ply as usize].update_pv(mv, &child_pv);
}
if value >= beta {
st.stack[ply as usize].cutoff_cnt += 1;
inc_stat_by_depth!(st, cutoff_by_depth, depth);
if move_count == 1 {
inc_stat_by_depth!(st, first_move_cutoff_by_depth, depth);
}
#[cfg(feature = "search-stats")]
{
let d = (depth as usize).min(STATS_MAX_DEPTH - 1);
st.stats.move_count_sum_by_depth[d] += move_count as u64;
}
break;
}
}
}
}
if move_count == 0 {
if excluded_move.is_some() {
return alpha;
}
if in_check {
return Value::mated_in(ply);
} else {
return Value::ZERO;
}
}
if best_move.is_some() && !best_move.is_pass() {
let is_best_capture = pos.is_capture(best_move);
let is_tt_move = best_move == tt_move;
let bonus = stat_bonus(depth, is_tt_move);
let malus = quiet_malus(depth, quiets_tried.len());
let us = pos.side_to_move();
let pawn_key_idx = pos.pawn_history_index();
let best_moved_pc = pos.moved_piece(best_move);
let best_cont_pc = if best_move.is_promotion() {
best_moved_pc.promote().unwrap_or(best_moved_pc)
} else {
best_moved_pc
};
let best_to = best_move.to();
let max_ply_back = if in_check { 2 } else { 6 };
if !is_best_capture {
let scaled_bonus = bonus * 978 / 1024;
let scaled_malus = malus * 1115 / 1024;
ctx.history.with_write(|h| {
h.main_history.update(us, best_move, scaled_bonus);
if ply < LOW_PLY_HISTORY_SIZE as i32 {
let low_ply_bonus = low_ply_history_bonus(scaled_bonus);
h.low_ply_history.update(ply as usize, best_move, low_ply_bonus);
}
for &(ply_back, weight) in CONTINUATION_HISTORY_WEIGHTS.iter() {
if ply_back > max_ply_back {
continue;
}
if ply >= ply_back as i32 {
let prev_ply = (ply - ply_back as i32) as usize;
if let Some(key) = st.stack[prev_ply].cont_hist_key {
let in_check_idx = key.in_check as usize;
let capture_idx = key.capture as usize;
let weighted_bonus = continuation_history_bonus_with_offset(
scaled_bonus * weight / 1024,
ply_back,
);
h.continuation_history[in_check_idx][capture_idx].update(
key.piece,
key.to,
best_cont_pc,
best_to,
weighted_bonus,
);
}
}
}
let pawn_bonus = pawn_history_bonus(scaled_bonus);
h.pawn_history.update(pawn_key_idx, best_cont_pc, best_to, pawn_bonus);
for &m in quiets_tried.iter() {
if m != best_move {
h.main_history.update(us, m, -scaled_malus);
if ply < LOW_PLY_HISTORY_SIZE as i32 {
let low_ply_malus = low_ply_history_bonus(-scaled_malus);
h.low_ply_history.update(ply as usize, m, low_ply_malus);
}
let moved_pc = pos.moved_piece(m);
let cont_pc = if m.is_promotion() {
moved_pc.promote().unwrap_or(moved_pc)
} else {
moved_pc
};
let to = m.to();
for &(ply_back, weight) in CONTINUATION_HISTORY_WEIGHTS.iter() {
if ply_back > max_ply_back {
continue;
}
if ply >= ply_back as i32 {
let prev_ply = (ply - ply_back as i32) as usize;
if let Some(key) = st.stack[prev_ply].cont_hist_key {
let in_check_idx = key.in_check as usize;
let capture_idx = key.capture as usize;
let weighted_malus = continuation_history_bonus_with_offset(
-scaled_malus * weight / 1024,
ply_back,
);
h.continuation_history[in_check_idx][capture_idx].update(
key.piece,
key.to,
cont_pc,
to,
weighted_malus,
);
}
}
}
let pawn_malus = pawn_history_bonus(-scaled_malus);
h.pawn_history.update(pawn_key_idx, cont_pc, to, pawn_malus);
}
}
});
} else {
let captured_pt = pos.piece_on(best_to).piece_type();
ctx.history.with_write(|h| {
h.capture_history.update(best_cont_pc, best_to, captured_pt, bonus)
});
}
let cap_malus = capture_malus(depth, captures_tried.len());
ctx.history.with_write(|h| {
for &m in captures_tried.iter() {
if m != best_move {
let moved_pc = pos.moved_piece(m);
let cont_pc = if m.is_promotion() {
moved_pc.promote().unwrap_or(moved_pc)
} else {
moved_pc
};
let to = m.to();
let captured_pt = pos.piece_on(to).piece_type();
h.capture_history.update(
cont_pc,
to,
captured_pt,
-cap_malus * 1431 / 1024,
);
}
}
});
if ply >= 1 {
let prev_ply = (ply - 1) as usize;
let prev_move_count = st.stack[prev_ply].move_count;
let prev_tt_hit = st.stack[prev_ply].tt_hit;
if prev_move_count == 1 + (prev_tt_hit as i32)
&& pos.captured_piece() == Piece::NONE
{
if let Some(key) = st.stack[prev_ply].cont_hist_key {
let prev_sq = key.to;
let prev_piece = pos.piece_on(prev_sq);
let penalty_base = -cap_malus * 622 / 1024;
let prev_in_check = st.stack[prev_ply].in_check;
let prev_max_ply_back = if prev_in_check { 2 } else { 6 };
ctx.history.with_write(|h| {
for &(ply_back, weight) in CONTINUATION_HISTORY_WEIGHTS.iter() {
if ply_back > prev_max_ply_back {
continue;
}
let target_ply = ply - 1 - ply_back as i32;
if target_ply >= 0 {
if let Some(target_key) =
st.stack[target_ply as usize].cont_hist_key
{
let in_check_idx = target_key.in_check as usize;
let capture_idx = target_key.capture as usize;
let weighted_penalty = penalty_base * weight / 1024
+ if ply_back <= 2 {
CONTINUATION_HISTORY_NEAR_PLY_OFFSET
} else {
0
};
h.continuation_history[in_check_idx][capture_idx].update(
target_key.piece,
target_key.to,
prev_piece,
prev_sq,
weighted_penalty,
);
}
}
}
});
}
}
}
if !pv_node && tt_move.is_some() {
let bonus = if best_move == tt_move {
TT_MOVE_HISTORY_BONUS
} else {
TT_MOVE_HISTORY_MALUS
};
ctx.history.with_write(|h| h.tt_move_history.update(bonus));
}
}
else if ply >= 1 {
let prev_ply = (ply - 1) as usize;
if let Some(prev_key) = st.stack[prev_ply].cont_hist_key {
let prior_capture = prev_key.capture;
let prev_sq = prev_key.to;
if !prior_capture {
let parent_stat_score = st.stack[prev_ply].stat_score;
let parent_move_count = st.stack[prev_ply].move_count;
let parent_in_check = st.stack[prev_ply].in_check;
let parent_static_eval = st.stack[prev_ply].static_eval;
let static_eval = st.stack[ply as usize].static_eval;
let mut bonus_scale: i32 = -228;
bonus_scale -= parent_stat_score / 104;
bonus_scale += (63 * depth).min(508);
bonus_scale += 184 * (parent_move_count > 8) as i32;
bonus_scale += 143
* (!in_check
&& static_eval != Value::NONE
&& best_value <= static_eval - Value::new(92))
as i32;
bonus_scale += 149
* (!parent_in_check
&& parent_static_eval != Value::NONE
&& best_value <= -parent_static_eval - Value::new(70))
as i32;
bonus_scale = bonus_scale.max(0);
let scaled_bonus = (144 * depth - 92).min(1365) as i64 * bonus_scale as i64;
let prev_piece = pos.piece_on(prev_sq);
let prev_max_ply_back = if parent_in_check { 2 } else { 6 };
let cont_bonus = (scaled_bonus * 400 / 32768) as i32;
let prev_move = st.stack[prev_ply].current_move;
let main_bonus = (scaled_bonus * 220 / 32768) as i32;
let opponent = !pos.side_to_move();
let pawn_key_idx = pos.pawn_history_index();
let pawn_bonus = (scaled_bonus * 1164 / 32768) as i32;
let update_pawn =
prev_piece.piece_type() != PieceType::Pawn && !prev_move.is_promotion();
ctx.history.with_write(|h| {
for &(ply_back, weight) in CONTINUATION_HISTORY_WEIGHTS.iter() {
if ply_back > prev_max_ply_back {
continue;
}
let target_ply = ply - 1 - ply_back as i32;
if target_ply >= 0 {
if let Some(target_key) =
st.stack[target_ply as usize].cont_hist_key
{
let in_check_idx = target_key.in_check as usize;
let capture_idx = target_key.capture as usize;
let weighted_bonus = cont_bonus * weight / 1024
+ if ply_back <= 2 {
CONTINUATION_HISTORY_NEAR_PLY_OFFSET
} else {
0
};
h.continuation_history[in_check_idx][capture_idx].update(
target_key.piece,
target_key.to,
prev_piece,
prev_sq,
weighted_bonus,
);
}
}
}
h.main_history.update(opponent, prev_move, main_bonus);
if update_pawn {
h.pawn_history.update(pawn_key_idx, prev_piece, prev_sq, pawn_bonus);
}
});
} else {
let prev_piece = pos.piece_on(prev_sq);
let captured_piece = pos.captured_piece();
debug_assert!(
captured_piece != Piece::NONE,
"prior_capture is true but captured_piece is NONE"
);
if captured_piece != Piece::NONE {
ctx.history.with_write(|h| {
h.capture_history.update(
prev_piece,
prev_sq,
captured_piece.piece_type(),
PRIOR_CAPTURE_COUNTERMOVE_BONUS,
);
});
}
}
}
}
if !in_check && best_move.is_some() && !pos.is_capture(best_move) {
let static_eval = st.stack[ply as usize].static_eval;
if static_eval != Value::NONE
&& ((best_value < static_eval && best_value < beta) || best_value > static_eval)
{
let bonus = ((best_value.raw() - static_eval.raw()) * depth / 8)
.clamp(-CORRECTION_HISTORY_LIMIT / 4, CORRECTION_HISTORY_LIMIT / 4);
update_correction_history(st, ctx, pos, ply, bonus);
}
}
if excluded_move.is_none() {
let bound = if best_value >= beta {
Bound::Lower
} else if pv_node && best_move.is_some() {
Bound::Exact
} else {
Bound::Upper
};
tt_ctx.result.write(
tt_ctx.key,
value_to_tt(best_value, ply),
pv_node,
bound,
depth,
best_move,
eval_ctx.unadjusted_static_eval,
ctx.tt.generation(),
);
inc_stat_by_depth!(st, tt_write_by_depth, depth);
}
best_value
}
}
unsafe impl Send for SearchWorker {}