1use chess::Board;
15use ndarray::Array1;
16use crate::similarity_search::{SimilaritySearch, SearchResult};
17use crate::tactical_search::{TacticalSearch, TacticalConfig};
18use crate::position_encoder::PositionEncoder;
19use std::collections::HashMap;
20use std::time::{Duration, Instant};
21
22#[derive(Debug, Clone)]
24pub struct CacheStats {
25 pub position_cache_size: usize,
26 pub evaluation_cache_size: usize,
27 pub max_cache_size: usize,
28 pub cache_ttl_secs: u64,
29}
30
31pub struct CoreEvaluator {
33 tactical_evaluator: TacticalSearch,
35 pub similarity_engine: SimilarityEngine,
37 strategic_analyzer: StrategicAnalyzer,
39 evaluation_blender: EvaluationBlender,
41 position_encoder: PositionEncoder,
43 position_cache: HashMap<String, (Array1<f32>, Instant)>,
45 evaluation_cache: HashMap<String, (CoreEvaluationResult, Instant)>,
47 max_cache_size: usize,
49 cache_ttl: Duration,
51}
52
53impl CoreEvaluator {
54 pub fn new() -> Self {
55 Self {
56 tactical_evaluator: TacticalSearch::new(TacticalConfig::default()),
57 similarity_engine: SimilarityEngine::new(),
58 strategic_analyzer: StrategicAnalyzer::new(),
59 evaluation_blender: EvaluationBlender::new(),
60 position_encoder: PositionEncoder::new(1024),
61 position_cache: HashMap::with_capacity(1000),
62 evaluation_cache: HashMap::with_capacity(500),
63 max_cache_size: 1000,
64 cache_ttl: Duration::from_secs(300), }
66 }
67
68 pub fn new_with_cache_config(max_cache_size: usize, cache_ttl_secs: u64) -> Self {
70 Self {
71 tactical_evaluator: TacticalSearch::new(TacticalConfig::default()),
72 similarity_engine: SimilarityEngine::new(),
73 strategic_analyzer: StrategicAnalyzer::new(),
74 evaluation_blender: EvaluationBlender::new(),
75 position_encoder: PositionEncoder::new(1024),
76 position_cache: HashMap::with_capacity(max_cache_size),
77 evaluation_cache: HashMap::with_capacity(max_cache_size / 2),
78 max_cache_size,
79 cache_ttl: Duration::from_secs(cache_ttl_secs),
80 }
81 }
82
83 pub fn evaluate_position(&mut self, board: &Board) -> CoreEvaluationResult {
85 let fen = board.to_string();
86 let now = Instant::now();
87
88 if let Some((cached_result, cached_time)) = self.evaluation_cache.get(&fen) {
90 if now.duration_since(*cached_time) < self.cache_ttl {
91 return cached_result.clone();
92 }
93 }
94
95 self.evict_expired_cache_entries(now);
97
98 let tactical_result = self.tactical_evaluator.search(board);
100 let tactical_eval = tactical_result.evaluation;
101
102 let position_vector = self.get_cached_position_vector(board, &fen, now);
104
105 let similarity_insights = self.similarity_engine.find_strategic_insights_with_vector(&position_vector);
107
108 let strategic_insights = if similarity_insights.confidence > 0.8 {
110 self.strategic_analyzer.analyze_initiative_fast(board)
112 } else {
113 self.strategic_analyzer.analyze_initiative(board)
115 };
116
117 let final_evaluation = self.evaluation_blender.blend_all(
119 tactical_eval,
120 &similarity_insights,
121 &strategic_insights,
122 );
123
124 let unique_insights_provided = self.provides_unique_insights(&similarity_insights, &strategic_insights);
125
126 let result = CoreEvaluationResult {
127 final_evaluation,
128 tactical_component: tactical_eval,
129 similarity_insights,
130 strategic_insights,
131 unique_insights_provided,
132 };
133
134 self.evaluation_cache.insert(fen, (result.clone(), now));
136
137 result
138 }
139
140 pub fn learn_from_position(&mut self, board: &Board, evaluation: f32) {
142 let fen = board.to_string();
143 let now = Instant::now();
144 let position_vector = self.get_cached_position_vector(board, &fen, now);
145 self.similarity_engine.add_position_with_vector(position_vector, evaluation);
146 }
147
148 fn get_cached_position_vector(&mut self, board: &Board, fen: &str, now: Instant) -> Array1<f32> {
150 if let Some((cached_vector, cached_time)) = self.position_cache.get(fen) {
152 if now.duration_since(*cached_time) < self.cache_ttl {
153 return cached_vector.clone();
154 }
155 }
156
157 let position_vector = self.position_encoder.encode(board);
159 self.position_cache.insert(fen.to_string(), (position_vector.clone(), now));
160
161 if self.position_cache.len() > self.max_cache_size {
163 self.evict_oldest_position_cache_entry();
164 }
165
166 position_vector
167 }
168
169 fn evict_expired_cache_entries(&mut self, now: Instant) {
171 self.evaluation_cache.retain(|_, (_, cached_time)| {
173 now.duration_since(*cached_time) < self.cache_ttl
174 });
175
176 self.position_cache.retain(|_, (_, cached_time)| {
178 now.duration_since(*cached_time) < self.cache_ttl
179 });
180 }
181
182 fn evict_oldest_position_cache_entry(&mut self) {
184 if let Some(oldest_key) = self.position_cache
185 .iter()
186 .min_by_key(|(_, (_, time))| *time)
187 .map(|(key, _)| key.clone()) {
188 self.position_cache.remove(&oldest_key);
189 }
190 }
191
192 pub fn get_cache_stats(&self) -> CacheStats {
194 CacheStats {
195 position_cache_size: self.position_cache.len(),
196 evaluation_cache_size: self.evaluation_cache.len(),
197 max_cache_size: self.max_cache_size,
198 cache_ttl_secs: self.cache_ttl.as_secs(),
199 }
200 }
201
202 pub fn clear_caches(&mut self) {
204 self.position_cache.clear();
205 self.evaluation_cache.clear();
206 }
207
208 fn provides_unique_insights(
210 &self,
211 similarity: &SimilarityInsights,
212 strategic: &StrategicInsights,
213 ) -> bool {
214 !similarity.similar_positions.is_empty() || strategic.initiative_advantage.abs() > 0.1
215 }
216}
217
218#[derive(Debug, Clone)]
220pub struct CoreEvaluationResult {
221 pub final_evaluation: f32,
223 pub tactical_component: f32,
225 pub similarity_insights: SimilarityInsights,
227 pub strategic_insights: StrategicInsights,
229 pub unique_insights_provided: bool,
231}
232
233pub struct SimilarityEngine {
235 similarity_search: SimilaritySearch,
236 position_database: Vec<(Array1<f32>, f32)>, }
238
239impl SimilarityEngine {
240 pub fn new() -> Self {
241 Self {
242 similarity_search: SimilaritySearch::new(1024),
243 position_database: Vec::new(),
244 }
245 }
246
247 pub fn add_position(&mut self, board: &Board, evaluation: f32) {
248 let vector = self.encode_position(board);
250 self.add_position_with_vector(vector, evaluation);
251 }
252
253 pub fn add_position_with_vector(&mut self, vector: Array1<f32>, evaluation: f32) {
255 self.position_database.push((vector.clone(), evaluation));
256
257 self.similarity_search.add_position(vector, evaluation);
259 }
260
261 pub fn find_strategic_insights(&self, board: &Board) -> SimilarityInsights {
262 let query_vector = self.encode_position(board);
263 self.find_strategic_insights_with_vector(&query_vector)
264 }
265
266 pub fn find_strategic_insights_with_vector(&self, query_vector: &Array1<f32>) -> SimilarityInsights {
268 let raw_results = self.similarity_search.search_optimized(query_vector, 3);
270
271 let similar_positions: Vec<SearchResult> = raw_results
273 .into_iter()
274 .map(|(vector, evaluation, similarity)| SearchResult {
275 vector,
276 evaluation,
277 similarity,
278 })
279 .collect();
280
281 let average_evaluation = if !similar_positions.is_empty() {
282 similar_positions.iter().map(|s| s.evaluation).sum::<f32>() / similar_positions.len() as f32
283 } else {
284 0.0
285 };
286
287 let confidence = self.calculate_confidence_from_results(&similar_positions);
288
289 SimilarityInsights {
290 similar_positions,
291 suggested_evaluation: average_evaluation,
292 confidence,
293 }
294 }
295
296 fn encode_position(&self, board: &Board) -> Array1<f32> {
298 let mut vector = Array1::zeros(1024);
301
302 let piece_values = [1.0, 3.0, 3.0, 5.0, 9.0, 0.0];
304 let mut material_index = 0;
305
306 for square in chess::ALL_SQUARES {
307 if let Some(piece) = board.piece_on(square) {
308 let value = piece_values[piece as usize];
309 if material_index < 1024 {
310 if board.color_on(square) == Some(chess::Color::White) {
311 vector[material_index] = value;
312 } else {
313 vector[material_index] = -value;
314 }
315 material_index += 1;
316 }
317 }
318 }
319
320 vector
321 }
322
323 fn calculate_confidence_from_results(&self, similar_positions: &[SearchResult]) -> f32 {
324 if similar_positions.is_empty() {
325 0.0
326 } else {
327 similar_positions.iter().map(|s| s.similarity).sum::<f32>() / similar_positions.len() as f32
329 }
330 }
331}
332
333pub struct StrategicAnalyzer {
335 initiative_factors: InitiativeFactors,
336}
337
338impl StrategicAnalyzer {
339 pub fn new() -> Self {
340 Self {
341 initiative_factors: InitiativeFactors::default(),
342 }
343 }
344
345 pub fn analyze_initiative(&self, board: &Board) -> StrategicInsights {
346 let development_advantage = self.calculate_development_advantage(board);
347 let center_control = self.calculate_center_control(board);
348 let piece_activity = self.calculate_piece_activity(board);
349
350 let initiative_advantage = (development_advantage + center_control + piece_activity) / 3.0;
351
352 StrategicInsights {
353 initiative_advantage,
354 development_advantage,
355 center_control,
356 piece_activity,
357 strategic_recommendation: self.generate_recommendation(initiative_advantage),
358 }
359 }
360
361 pub fn analyze_initiative_fast(&self, board: &Board) -> StrategicInsights {
363 let development_advantage = self.calculate_development_advantage(board);
365 let center_control = self.calculate_center_control(board);
366 let piece_activity = 0.0;
368
369 let initiative_advantage = (development_advantage + center_control) / 2.0;
370
371 StrategicInsights {
372 initiative_advantage,
373 development_advantage,
374 center_control,
375 piece_activity,
376 strategic_recommendation: self.generate_recommendation(initiative_advantage),
377 }
378 }
379
380 fn calculate_development_advantage(&self, board: &Board) -> f32 {
381 let mut advantage = 0.0;
382
383 let starting_squares = [
385 (chess::Square::B1, chess::Color::White), (chess::Square::G1, chess::Color::White),
386 (chess::Square::C1, chess::Color::White), (chess::Square::F1, chess::Color::White),
387 (chess::Square::B8, chess::Color::Black), (chess::Square::G8, chess::Color::Black),
388 (chess::Square::C8, chess::Color::Black), (chess::Square::F8, chess::Color::Black),
389 ];
390
391 for (square, color) in starting_squares {
392 if board.piece_on(square).is_none() {
393 match color {
394 chess::Color::White => advantage += 0.1,
395 chess::Color::Black => advantage -= 0.1,
396 }
397 }
398 }
399
400 advantage
401 }
402
403 fn calculate_center_control(&self, board: &Board) -> f32 {
404 let mut control = 0.0;
405 let center_squares = [chess::Square::D4, chess::Square::D5, chess::Square::E4, chess::Square::E5];
406
407 for square in center_squares {
408 if let Some(piece) = board.piece_on(square) {
409 if piece == chess::Piece::Pawn {
410 match board.color_on(square) {
411 Some(chess::Color::White) => control += 0.2,
412 Some(chess::Color::Black) => control -= 0.2,
413 None => {}
414 }
415 }
416 }
417 }
418
419 control
420 }
421
422 fn calculate_piece_activity(&self, _board: &Board) -> f32 {
423 0.0 }
427
428 fn generate_recommendation(&self, initiative_advantage: f32) -> String {
429 if initiative_advantage > 0.2 {
430 "Maintain aggressive stance, capitalize on initiative".to_string()
431 } else if initiative_advantage < -0.2 {
432 "Consolidate position, seek counterplay opportunities".to_string()
433 } else {
434 "Balanced position, seek gradual improvements".to_string()
435 }
436 }
437}
438
439pub struct EvaluationBlender {
441 tactical_weight: f32,
442 similarity_weight: f32,
443 strategic_weight: f32,
444}
445
446impl EvaluationBlender {
447 pub fn new() -> Self {
448 Self {
449 tactical_weight: 0.6, similarity_weight: 0.25, strategic_weight: 0.15, }
453 }
454
455 pub fn blend_all(
456 &self,
457 tactical_eval: f32,
458 similarity: &SimilarityInsights,
459 strategic: &StrategicInsights,
460 ) -> f32 {
461 let mut final_eval = tactical_eval * self.tactical_weight;
462
463 if similarity.confidence > 0.5 {
465 final_eval += similarity.suggested_evaluation * self.similarity_weight;
466 }
467
468 final_eval += strategic.initiative_advantage * self.strategic_weight;
470
471 final_eval
472 }
473}
474
475#[derive(Debug, Clone)]
477pub struct SimilarityInsights {
478 pub similar_positions: Vec<SearchResult>,
479 pub suggested_evaluation: f32,
480 pub confidence: f32,
481}
482
483#[derive(Debug, Clone)]
485pub struct StrategicInsights {
486 pub initiative_advantage: f32,
487 pub development_advantage: f32,
488 pub center_control: f32,
489 pub piece_activity: f32,
490 pub strategic_recommendation: String,
491}
492
493#[derive(Debug)]
495struct InitiativeFactors {
496 development_weight: f32,
497 center_weight: f32,
498 activity_weight: f32,
499}
500
501impl Default for InitiativeFactors {
502 fn default() -> Self {
503 Self {
504 development_weight: 0.4,
505 center_weight: 0.3,
506 activity_weight: 0.3,
507 }
508 }
509}
510
511#[cfg(test)]
512mod tests {
513 use super::*;
514 use chess::Board;
515 use std::str::FromStr;
516
517 #[test]
518 fn test_core_evaluator_basic() {
519 let mut evaluator = CoreEvaluator::new();
520 let board = Board::default();
521
522 evaluator.learn_from_position(&board, 0.0);
524
525 let result = evaluator.evaluate_position(&board);
526
527 assert!(result.final_evaluation.is_finite());
528 assert!(result.tactical_component.is_finite());
529 }
530
531 #[test]
532 fn test_provides_unique_insights() {
533 let mut evaluator = CoreEvaluator::new();
534
535 let positions = [
537 ("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1", 0.0),
538 ("rnbqkbnr/pppp1ppp/8/4p3/4P3/8/PPPP1PPP/RNBQKBNR w KQkq e6 0 2", 0.0),
539 ];
540
541 for (fen, eval) in positions {
542 let board = Board::from_str(fen).unwrap();
543 evaluator.learn_from_position(&board, eval);
544 }
545
546 let test_board = Board::from_str("rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2").unwrap();
548 let result = evaluator.evaluate_position(&test_board);
549
550 assert!(result.unique_insights_provided);
552 assert!(!result.similarity_insights.similar_positions.is_empty());
553 }
554}