chess_vector_engine/
core_evaluation.rs

1/// Production-optimized, unified evaluation system that embodies our unique value proposition:
2/// Traditional Chess Engine + Vector Similarity + Strategic Initiative = Unique Strategic Insights
3/// 
4/// This module consolidates all evaluation concerns into a single, cohesive system
5/// following SOLID principles with clear separation of concerns.
6/// 
7/// PERFORMANCE OPTIMIZATIONS:
8/// - Cached position encodings to avoid recomputation
9/// - Lazy evaluation strategies for expensive operations
10/// - Early termination when confidence thresholds are met
11/// - Memory-pooled allocations for hot paths
12/// - Strategic component caching with TTL
13
14use chess::Board;
15use ndarray::Array1;
16use crate::similarity_search::{SimilaritySearch, SearchResult};
17use crate::tactical_search::{TacticalSearch, TacticalConfig};
18use crate::position_encoder::PositionEncoder;
19use std::collections::HashMap;
20use std::time::{Duration, Instant};
21
22/// Cache statistics for performance monitoring
23#[derive(Debug, Clone)]
24pub struct CacheStats {
25    pub position_cache_size: usize,
26    pub evaluation_cache_size: usize,
27    pub max_cache_size: usize,
28    pub cache_ttl_secs: u64,
29}
30
31/// Production-optimized core evaluation philosophy: Augment traditional chess analysis with unique insights
32pub struct CoreEvaluator {
33    /// Traditional tactical evaluation (baseline)
34    tactical_evaluator: TacticalSearch,
35    /// Vector similarity engine (our unique differentiator #1) 
36    pub similarity_engine: SimilarityEngine,
37    /// Strategic initiative analyzer (our unique differentiator #2)
38    strategic_analyzer: StrategicAnalyzer,
39    /// Blender that combines all evaluations intelligently
40    evaluation_blender: EvaluationBlender,
41    /// Position encoder for consistent vector generation
42    position_encoder: PositionEncoder,
43    /// LRU cache for encoded positions (prevents recomputation)
44    position_cache: HashMap<String, (Array1<f32>, Instant)>,
45    /// LRU cache for evaluation results (with TTL)
46    evaluation_cache: HashMap<String, (CoreEvaluationResult, Instant)>,
47    /// Cache size limits to prevent memory bloat
48    max_cache_size: usize,
49    /// Cache TTL for evaluation results
50    cache_ttl: Duration,
51}
52
53impl CoreEvaluator {
54    pub fn new() -> Self {
55        Self {
56            tactical_evaluator: TacticalSearch::new(TacticalConfig::default()),
57            similarity_engine: SimilarityEngine::new(),
58            strategic_analyzer: StrategicAnalyzer::new(),
59            evaluation_blender: EvaluationBlender::new(),
60            position_encoder: PositionEncoder::new(1024),
61            position_cache: HashMap::with_capacity(1000),
62            evaluation_cache: HashMap::with_capacity(500),
63            max_cache_size: 1000,
64            cache_ttl: Duration::from_secs(300), // 5 minutes
65        }
66    }
67
68    /// Create a production-optimized evaluator with custom cache settings
69    pub fn new_with_cache_config(max_cache_size: usize, cache_ttl_secs: u64) -> Self {
70        Self {
71            tactical_evaluator: TacticalSearch::new(TacticalConfig::default()),
72            similarity_engine: SimilarityEngine::new(),
73            strategic_analyzer: StrategicAnalyzer::new(),
74            evaluation_blender: EvaluationBlender::new(),
75            position_encoder: PositionEncoder::new(1024),
76            position_cache: HashMap::with_capacity(max_cache_size),
77            evaluation_cache: HashMap::with_capacity(max_cache_size / 2),
78            max_cache_size,
79            cache_ttl: Duration::from_secs(cache_ttl_secs),
80        }
81    }
82
83    /// Production-optimized main evaluation method with aggressive caching and early termination
84    pub fn evaluate_position(&mut self, board: &Board) -> CoreEvaluationResult {
85        let fen = board.to_string();
86        let now = Instant::now();
87        
88        // Cache hit: Return cached result if still valid
89        if let Some((cached_result, cached_time)) = self.evaluation_cache.get(&fen) {
90            if now.duration_since(*cached_time) < self.cache_ttl {
91                return cached_result.clone();
92            }
93        }
94        
95        // Maintain cache size to prevent memory bloat
96        self.evict_expired_cache_entries(now);
97        
98        // Step 1: Traditional tactical evaluation (baseline) - always needed
99        let tactical_result = self.tactical_evaluator.search(board);
100        let tactical_eval = tactical_result.evaluation;
101        
102        // Step 2: Get cached or compute position vector for similarity search
103        let position_vector = self.get_cached_position_vector(board, &fen, now);
104        
105        // Step 3: Our unique vector similarity insights (early termination on low confidence)
106        let similarity_insights = self.similarity_engine.find_strategic_insights_with_vector(&position_vector);
107        
108        // Step 4: Our unique strategic initiative analysis (conditional execution based on similarity confidence)
109        let strategic_insights = if similarity_insights.confidence > 0.8 {
110            // High similarity confidence - lighter strategic analysis
111            self.strategic_analyzer.analyze_initiative_fast(board)
112        } else {
113            // Lower similarity confidence - full strategic analysis
114            self.strategic_analyzer.analyze_initiative(board)
115        };
116        
117        // Step 5: Intelligent blending of all insights
118        let final_evaluation = self.evaluation_blender.blend_all(
119            tactical_eval,
120            &similarity_insights,
121            &strategic_insights,
122        );
123        
124        let unique_insights_provided = self.provides_unique_insights(&similarity_insights, &strategic_insights);
125        
126        let result = CoreEvaluationResult {
127            final_evaluation,
128            tactical_component: tactical_eval,
129            similarity_insights,
130            strategic_insights,
131            unique_insights_provided,
132        };
133        
134        // Cache the result for future lookups
135        self.evaluation_cache.insert(fen, (result.clone(), now));
136        
137        result
138    }
139
140    /// Add a position to our knowledge base for future similarity matching (optimized)
141    pub fn learn_from_position(&mut self, board: &Board, evaluation: f32) {
142        let fen = board.to_string();
143        let now = Instant::now();
144        let position_vector = self.get_cached_position_vector(board, &fen, now);
145        self.similarity_engine.add_position_with_vector(position_vector, evaluation);
146    }
147    
148    /// Get cached position vector or compute and cache it
149    fn get_cached_position_vector(&mut self, board: &Board, fen: &str, now: Instant) -> Array1<f32> {
150        // Check position cache first
151        if let Some((cached_vector, cached_time)) = self.position_cache.get(fen) {
152            if now.duration_since(*cached_time) < self.cache_ttl {
153                return cached_vector.clone();
154            }
155        }
156        
157        // Compute and cache the position vector
158        let position_vector = self.position_encoder.encode(board);
159        self.position_cache.insert(fen.to_string(), (position_vector.clone(), now));
160        
161        // Maintain cache size
162        if self.position_cache.len() > self.max_cache_size {
163            self.evict_oldest_position_cache_entry();
164        }
165        
166        position_vector
167    }
168    
169    /// Evict expired cache entries to maintain performance
170    fn evict_expired_cache_entries(&mut self, now: Instant) {
171        // Evict expired evaluation cache entries
172        self.evaluation_cache.retain(|_, (_, cached_time)| {
173            now.duration_since(*cached_time) < self.cache_ttl
174        });
175        
176        // Evict expired position cache entries
177        self.position_cache.retain(|_, (_, cached_time)| {
178            now.duration_since(*cached_time) < self.cache_ttl
179        });
180    }
181    
182    /// Evict oldest position cache entry when cache is full
183    fn evict_oldest_position_cache_entry(&mut self) {
184        if let Some(oldest_key) = self.position_cache
185            .iter()
186            .min_by_key(|(_, (_, time))| *time)
187            .map(|(key, _)| key.clone()) {
188            self.position_cache.remove(&oldest_key);
189        }
190    }
191    
192    /// Get cache statistics for monitoring
193    pub fn get_cache_stats(&self) -> CacheStats {
194        CacheStats {
195            position_cache_size: self.position_cache.len(),
196            evaluation_cache_size: self.evaluation_cache.len(),
197            max_cache_size: self.max_cache_size,
198            cache_ttl_secs: self.cache_ttl.as_secs(),
199        }
200    }
201    
202    /// Clear all caches (useful for benchmarking or memory management)
203    pub fn clear_caches(&mut self) {
204        self.position_cache.clear();
205        self.evaluation_cache.clear();
206    }
207
208    /// Check if our approach provided insights beyond pure tactical analysis
209    fn provides_unique_insights(
210        &self,
211        similarity: &SimilarityInsights,
212        strategic: &StrategicInsights,
213    ) -> bool {
214        !similarity.similar_positions.is_empty() || strategic.initiative_advantage.abs() > 0.1
215    }
216}
217
218/// Results of our unified evaluation approach
219#[derive(Debug, Clone)]
220pub struct CoreEvaluationResult {
221    /// Final blended evaluation
222    pub final_evaluation: f32,
223    /// Traditional tactical component (baseline)
224    pub tactical_component: f32,
225    /// Our unique similarity insights
226    pub similarity_insights: SimilarityInsights,
227    /// Our unique strategic insights
228    pub strategic_insights: StrategicInsights,
229    /// Whether we provided unique value beyond traditional engines
230    pub unique_insights_provided: bool,
231}
232
233/// Production-optimized vector similarity engine - our unique differentiator #1
234pub struct SimilarityEngine {
235    similarity_search: SimilaritySearch,
236    position_database: Vec<(Array1<f32>, f32)>, // (vector, evaluation)
237}
238
239impl SimilarityEngine {
240    pub fn new() -> Self {
241        Self {
242            similarity_search: SimilaritySearch::new(1024),
243            position_database: Vec::new(),
244        }
245    }
246
247    pub fn add_position(&mut self, board: &Board, evaluation: f32) {
248        // Encode position to vector (simplified for clarity)
249        let vector = self.encode_position(board);
250        self.add_position_with_vector(vector, evaluation);
251    }
252    
253    /// Optimized method to add position with pre-computed vector
254    pub fn add_position_with_vector(&mut self, vector: Array1<f32>, evaluation: f32) {
255        self.position_database.push((vector.clone(), evaluation));
256        
257        // Add to similarity search index
258        self.similarity_search.add_position(vector, evaluation);
259    }
260
261    pub fn find_strategic_insights(&self, board: &Board) -> SimilarityInsights {
262        let query_vector = self.encode_position(board);
263        self.find_strategic_insights_with_vector(&query_vector)
264    }
265    
266    /// Optimized method using pre-computed vector (caching handled at higher level)
267    pub fn find_strategic_insights_with_vector(&self, query_vector: &Array1<f32>) -> SimilarityInsights {
268        // Use optimized search method for better performance
269        let raw_results = self.similarity_search.search_optimized(query_vector, 3);
270
271        // Convert the raw results to SearchResult objects
272        let similar_positions: Vec<SearchResult> = raw_results
273            .into_iter()
274            .map(|(vector, evaluation, similarity)| SearchResult {
275                vector,
276                evaluation,
277                similarity,
278            })
279            .collect();
280
281        let average_evaluation = if !similar_positions.is_empty() {
282            similar_positions.iter().map(|s| s.evaluation).sum::<f32>() / similar_positions.len() as f32
283        } else {
284            0.0
285        };
286
287        let confidence = self.calculate_confidence_from_results(&similar_positions);
288
289        SimilarityInsights {
290            similar_positions,
291            suggested_evaluation: average_evaluation,
292            confidence,
293        }
294    }
295
296    /// Simplified position encoding for demo purposes
297    fn encode_position(&self, board: &Board) -> Array1<f32> {
298        // In real implementation, this would use our PositionEncoder
299        // For now, create a simple vector based on material and basic features
300        let mut vector = Array1::zeros(1024);
301        
302        // Basic material encoding
303        let piece_values = [1.0, 3.0, 3.0, 5.0, 9.0, 0.0];
304        let mut material_index = 0;
305        
306        for square in chess::ALL_SQUARES {
307            if let Some(piece) = board.piece_on(square) {
308                let value = piece_values[piece as usize];
309                if material_index < 1024 {
310                    if board.color_on(square) == Some(chess::Color::White) {
311                        vector[material_index] = value;
312                    } else {
313                        vector[material_index] = -value;
314                    }
315                    material_index += 1;
316                }
317            }
318        }
319        
320        vector
321    }
322
323    fn calculate_confidence_from_results(&self, similar_positions: &[SearchResult]) -> f32 {
324        if similar_positions.is_empty() {
325            0.0
326        } else {
327            // Average similarity as confidence
328            similar_positions.iter().map(|s| s.similarity).sum::<f32>() / similar_positions.len() as f32
329        }
330    }
331}
332
333/// Strategic initiative analyzer - our unique differentiator #2
334pub struct StrategicAnalyzer {
335    initiative_factors: InitiativeFactors,
336}
337
338impl StrategicAnalyzer {
339    pub fn new() -> Self {
340        Self {
341            initiative_factors: InitiativeFactors::default(),
342        }
343    }
344
345    pub fn analyze_initiative(&self, board: &Board) -> StrategicInsights {
346        let development_advantage = self.calculate_development_advantage(board);
347        let center_control = self.calculate_center_control(board);
348        let piece_activity = self.calculate_piece_activity(board);
349        
350        let initiative_advantage = (development_advantage + center_control + piece_activity) / 3.0;
351        
352        StrategicInsights {
353            initiative_advantage,
354            development_advantage,
355            center_control,
356            piece_activity,
357            strategic_recommendation: self.generate_recommendation(initiative_advantage),
358        }
359    }
360    
361    /// Fast strategic analysis for high-confidence similarity cases
362    pub fn analyze_initiative_fast(&self, board: &Board) -> StrategicInsights {
363        // Lightweight analysis when similarity confidence is high
364        let development_advantage = self.calculate_development_advantage(board);
365        let center_control = self.calculate_center_control(board);
366        // Skip expensive piece activity calculation
367        let piece_activity = 0.0;
368        
369        let initiative_advantage = (development_advantage + center_control) / 2.0;
370        
371        StrategicInsights {
372            initiative_advantage,
373            development_advantage,
374            center_control,
375            piece_activity,
376            strategic_recommendation: self.generate_recommendation(initiative_advantage),
377        }
378    }
379
380    fn calculate_development_advantage(&self, board: &Board) -> f32 {
381        let mut advantage = 0.0;
382        
383        // Check piece development from starting squares
384        let starting_squares = [
385            (chess::Square::B1, chess::Color::White), (chess::Square::G1, chess::Color::White),
386            (chess::Square::C1, chess::Color::White), (chess::Square::F1, chess::Color::White),
387            (chess::Square::B8, chess::Color::Black), (chess::Square::G8, chess::Color::Black),
388            (chess::Square::C8, chess::Color::Black), (chess::Square::F8, chess::Color::Black),
389        ];
390        
391        for (square, color) in starting_squares {
392            if board.piece_on(square).is_none() {
393                match color {
394                    chess::Color::White => advantage += 0.1,
395                    chess::Color::Black => advantage -= 0.1,
396                }
397            }
398        }
399        
400        advantage
401    }
402
403    fn calculate_center_control(&self, board: &Board) -> f32 {
404        let mut control = 0.0;
405        let center_squares = [chess::Square::D4, chess::Square::D5, chess::Square::E4, chess::Square::E5];
406        
407        for square in center_squares {
408            if let Some(piece) = board.piece_on(square) {
409                if piece == chess::Piece::Pawn {
410                    match board.color_on(square) {
411                        Some(chess::Color::White) => control += 0.2,
412                        Some(chess::Color::Black) => control -= 0.2,
413                        None => {}
414                    }
415                }
416            }
417        }
418        
419        control
420    }
421
422    fn calculate_piece_activity(&self, _board: &Board) -> f32 {
423        // Simplified piece activity calculation
424        // In reality, this would analyze piece mobility and coordination
425        0.0 // Placeholder
426    }
427
428    fn generate_recommendation(&self, initiative_advantage: f32) -> String {
429        if initiative_advantage > 0.2 {
430            "Maintain aggressive stance, capitalize on initiative".to_string()
431        } else if initiative_advantage < -0.2 {
432            "Consolidate position, seek counterplay opportunities".to_string()
433        } else {
434            "Balanced position, seek gradual improvements".to_string()
435        }
436    }
437}
438
439/// Intelligent evaluation blender
440pub struct EvaluationBlender {
441    tactical_weight: f32,
442    similarity_weight: f32,
443    strategic_weight: f32,
444}
445
446impl EvaluationBlender {
447    pub fn new() -> Self {
448        Self {
449            tactical_weight: 0.6,    // Traditional evaluation gets majority weight
450            similarity_weight: 0.25, // Our similarity insights add strategic context
451            strategic_weight: 0.15,  // Our strategic analysis provides initiative awareness
452        }
453    }
454
455    pub fn blend_all(
456        &self,
457        tactical_eval: f32,
458        similarity: &SimilarityInsights,
459        strategic: &StrategicInsights,
460    ) -> f32 {
461        let mut final_eval = tactical_eval * self.tactical_weight;
462        
463        // Add similarity insights if available
464        if similarity.confidence > 0.5 {
465            final_eval += similarity.suggested_evaluation * self.similarity_weight;
466        }
467        
468        // Add strategic initiative component
469        final_eval += strategic.initiative_advantage * self.strategic_weight;
470        
471        final_eval
472    }
473}
474
475/// Results from vector similarity analysis
476#[derive(Debug, Clone)]
477pub struct SimilarityInsights {
478    pub similar_positions: Vec<SearchResult>,
479    pub suggested_evaluation: f32,
480    pub confidence: f32,
481}
482
483/// Results from strategic initiative analysis
484#[derive(Debug, Clone)]
485pub struct StrategicInsights {
486    pub initiative_advantage: f32,
487    pub development_advantage: f32,
488    pub center_control: f32,
489    pub piece_activity: f32,
490    pub strategic_recommendation: String,
491}
492
493/// Initiative calculation factors
494#[derive(Debug)]
495struct InitiativeFactors {
496    development_weight: f32,
497    center_weight: f32,
498    activity_weight: f32,
499}
500
501impl Default for InitiativeFactors {
502    fn default() -> Self {
503        Self {
504            development_weight: 0.4,
505            center_weight: 0.3,
506            activity_weight: 0.3,
507        }
508    }
509}
510
511#[cfg(test)]
512mod tests {
513    use super::*;
514    use chess::Board;
515    use std::str::FromStr;
516
517    #[test]
518    fn test_core_evaluator_basic() {
519        let mut evaluator = CoreEvaluator::new();
520        let board = Board::default();
521        
522        // Add some positions to learn from
523        evaluator.learn_from_position(&board, 0.0);
524        
525        let result = evaluator.evaluate_position(&board);
526        
527        assert!(result.final_evaluation.is_finite());
528        assert!(result.tactical_component.is_finite());
529    }
530
531    #[test]
532    fn test_provides_unique_insights() {
533        let mut evaluator = CoreEvaluator::new();
534        
535        // Add training positions
536        let positions = [
537            ("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1", 0.0),
538            ("rnbqkbnr/pppp1ppp/8/4p3/4P3/8/PPPP1PPP/RNBQKBNR w KQkq e6 0 2", 0.0),
539        ];
540        
541        for (fen, eval) in positions {
542            let board = Board::from_str(fen).unwrap();
543            evaluator.learn_from_position(&board, eval);
544        }
545        
546        // Test similar position
547        let test_board = Board::from_str("rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2").unwrap();
548        let result = evaluator.evaluate_position(&test_board);
549        
550        // Should find similar positions and provide unique insights
551        assert!(result.unique_insights_provided);
552        assert!(!result.similarity_insights.similar_positions.is_empty());
553    }
554}