Skip to main content

jugar_web/
ai.rs

1//! AI opponent module for Pong game.
2//!
3//! This module provides an adaptive AI opponent that uses a pre-trained
4//! `.apr` model file for difficulty profiles. The AI implements Dynamic
5//! Difficulty Adjustment (DDA) based on Flow Theory (Csikszentmihalyi).
6//!
7//! ## Research Foundation
8//!
9//! - **Flow Theory**: Three-channel model (boredom ↔ flow ↔ anxiety)
10//! - **DDA**: [Zohaib 2018](https://onlinelibrary.wiley.com/doi/10.1155/2018/5681652)
11//! - **Reproducibility**: Deterministic RNG with seed stored in model
12//!
13//! ## Architecture
14//!
15//! ```text
16//! ┌─────────────────────────────────────────────────────────────┐
17//! │                    PongAIModel (.apr)                        │
18//! │  - Metadata (name, version, author, license)                │
19//! │  - Determinism config (seed, algorithm)                     │
20//! │  - Flow Theory params (thresholds, adaptation rate)         │
21//! │  - 10 difficulty profiles (0-9)                             │
22//! └─────────────────────────────┬───────────────────────────────┘
23//!                               │
24//!                               ▼
25//! ┌─────────────────────────────────────────────────────────────┐
26//! │                      PongAI                                  │
27//! │  - Loads model from .apr bytes                              │
28//! │  - Tracks player performance (FlowState)                    │
29//! │  - Detects flow channel (boredom/flow/anxiety)              │
30//! │  - Adapts difficulty to maintain flow state                 │
31//! └─────────────────────────────────────────────────────────────┘
32//! ```
33
34use serde::{Deserialize, Serialize};
35
36// ============================================================================
37// Model Metadata (for .apr showcase)
38// ============================================================================
39
40/// Model metadata for the `.apr` file format.
41///
42/// This showcases aprender's portable model format.
43#[derive(Debug, Clone, Serialize, Deserialize)]
44pub struct ModelMetadata {
45    /// Model name
46    pub name: String,
47    /// Semantic version
48    pub version: String,
49    /// Model description
50    pub description: String,
51    /// Author/organization
52    pub author: String,
53    /// License (e.g., "MIT")
54    pub license: String,
55    /// Creation timestamp (ISO 8601)
56    pub created: String,
57}
58
59impl Default for ModelMetadata {
60    fn default() -> Self {
61        Self {
62            name: "Pong AI v1".to_string(),
63            version: "1.0.0".to_string(),
64            description: "Flow Theory-based adaptive Pong opponent".to_string(),
65            author: "PAIML".to_string(),
66            license: "MIT".to_string(),
67            created: "2025-01-01T00:00:00Z".to_string(),
68        }
69    }
70}
71
72// ============================================================================
73// Determinism Configuration (for reproducibility)
74// ============================================================================
75
76/// Determinism configuration for reproducible AI behavior.
77///
78/// Based on [arXiv Reproducibility (2022)](https://arxiv.org/abs/2203.01075).
79#[derive(Debug, Clone, Serialize, Deserialize)]
80pub struct DeterminismConfig {
81    /// Random seed for reproducibility
82    pub seed: u64,
83    /// RNG algorithm name
84    pub rng_algorithm: String,
85}
86
87impl Default for DeterminismConfig {
88    fn default() -> Self {
89        Self {
90            seed: 12345,
91            rng_algorithm: "xorshift64".to_string(),
92        }
93    }
94}
95
96// ============================================================================
97// Flow Theory Configuration
98// ============================================================================
99
100/// Flow Theory parameters for Dynamic Difficulty Adjustment.
101///
102/// Based on Csikszentmihalyi's three-channel model:
103/// - **Boredom**: Challenge too low for skill level
104/// - **Flow**: Challenge matches skill (optimal engagement)
105/// - **Anxiety**: Challenge too high for skill level
106///
107/// Reference: [Think Game Design](https://thinkgamedesign.com/flow-theory-game-design/)
108#[derive(Debug, Clone, Serialize, Deserialize)]
109pub struct FlowTheoryConfig {
110    /// Number of recent points to consider for skill estimation
111    pub skill_window_size: usize,
112    /// Rate at which AI adapts difficulty (0.0-1.0)
113    pub adaptation_rate: f32,
114    /// Win rate threshold above which player is "bored" (e.g., 0.7)
115    pub boredom_threshold: f32,
116    /// Win rate threshold below which player is "anxious" (e.g., 0.3)
117    pub anxiety_threshold: f32,
118    /// Target win rate for optimal flow (typically 0.5)
119    pub target_win_rate: f32,
120}
121
122impl Default for FlowTheoryConfig {
123    fn default() -> Self {
124        Self {
125            skill_window_size: 10,
126            adaptation_rate: 0.15,
127            boredom_threshold: 0.7,
128            anxiety_threshold: 0.3,
129            target_win_rate: 0.5,
130        }
131    }
132}
133
134// ============================================================================
135// Difficulty Profile
136// ============================================================================
137
138/// A single difficulty profile defining AI behavior.
139///
140/// Difficulty curve formula (from design spec):
141/// - `reaction = 500 * (1-t)² + 50` where `t = level/9`
142/// - `accuracy = 0.30 + 0.65 * t`
143/// - `speed = 200 + 400 * t`
144/// - `error = 50 * (1-t)² + 5`
145#[derive(Debug, Clone, Serialize, Deserialize)]
146pub struct DifficultyProfile {
147    /// Difficulty level (0-9)
148    pub level: u8,
149    /// Human-readable name for this level
150    pub name: String,
151    /// Base reaction delay in milliseconds
152    pub reaction_delay_ms: f32,
153    /// Ball position prediction accuracy (0.0-1.0)
154    pub prediction_accuracy: f32,
155    /// Maximum paddle movement speed (pixels/second)
156    pub max_paddle_speed: f32,
157    /// Random error magnitude in target position (pixels)
158    pub error_magnitude: f32,
159    /// Aggression factor - how much to anticipate vs react (0.0-1.0)
160    pub aggression: f32,
161}
162
163impl Default for DifficultyProfile {
164    fn default() -> Self {
165        Self {
166            level: 5,
167            name: "Challenging".to_string(),
168            reaction_delay_ms: 180.0,
169            prediction_accuracy: 0.66,
170            max_paddle_speed: 422.0,
171            error_magnitude: 24.0,
172            aggression: 0.55,
173        }
174    }
175}
176
177// ============================================================================
178// Pong AI Model (.apr format)
179// ============================================================================
180
181/// Pong AI Model - the complete `.apr` file structure.
182///
183/// This is the downloadable model that showcases aprender's format.
184/// Users can inspect, modify, and reload this JSON model.
185#[derive(Debug, Clone, Serialize, Deserialize)]
186pub struct PongAIModel {
187    /// Schema version for forward compatibility
188    #[serde(rename = "$schema", default = "default_schema")]
189    pub schema: String,
190
191    /// Model metadata
192    pub metadata: ModelMetadata,
193
194    /// Model type identifier
195    pub model_type: String,
196
197    /// Determinism configuration for reproducibility
198    pub determinism: DeterminismConfig,
199
200    /// Flow Theory parameters for DDA
201    pub flow_theory: FlowTheoryConfig,
202
203    /// Difficulty profiles (0-9)
204    pub difficulty_profiles: Vec<DifficultyProfile>,
205}
206
207fn default_schema() -> String {
208    "https://paiml.com/schemas/apr/v1".to_string()
209}
210
211impl Default for PongAIModel {
212    fn default() -> Self {
213        Self {
214            schema: default_schema(),
215            metadata: ModelMetadata::default(),
216            model_type: "behavior_profile".to_string(),
217            determinism: DeterminismConfig::default(),
218            flow_theory: FlowTheoryConfig::default(),
219            difficulty_profiles: Self::generate_default_profiles(),
220        }
221    }
222}
223
224impl PongAIModel {
225    /// Creates a new AI model with custom metadata.
226    #[must_use]
227    pub fn new(name: &str, description: &str) -> Self {
228        Self {
229            metadata: ModelMetadata {
230                name: name.to_string(),
231                description: description.to_string(),
232                ..ModelMetadata::default()
233            },
234            ..Default::default()
235        }
236    }
237
238    /// Generates the default 10-level difficulty curve.
239    ///
240    /// Based on game design research:
241    /// - Level 0: "Training wheels" (500ms reaction, 30% accuracy)
242    /// - Level 5: "Challenging" (180ms reaction, 66% accuracy)
243    /// - Level 9: "Perfect" (0ms reaction, 100% accuracy, 0 error - UNBEATABLE)
244    #[must_use]
245    #[allow(clippy::suboptimal_flops)] // mul_add is less readable here
246    pub fn generate_default_profiles() -> Vec<DifficultyProfile> {
247        const LEVEL_NAMES: [&str; 10] = [
248            "Training Wheels",
249            "Beginner",
250            "Easy",
251            "Casual",
252            "Normal",
253            "Challenging",
254            "Hard",
255            "Very Hard",
256            "Expert",
257            "Perfect",
258        ];
259
260        (0..10)
261            .map(|level| {
262                let t = f32::from(level) / 9.0; // 0.0 to 1.0
263
264                // Level 9 is PERFECT - unbeatable AI
265                if level == 9 {
266                    return DifficultyProfile {
267                        level,
268                        name: LEVEL_NAMES[level as usize].to_string(),
269                        reaction_delay_ms: 0.0,   // Instant reaction
270                        prediction_accuracy: 1.0, // Perfect prediction
271                        max_paddle_speed: 1000.0, // Very fast movement
272                        error_magnitude: 0.0,     // Zero error
273                        aggression: 1.0,          // Maximum aggression
274                    };
275                }
276
277                DifficultyProfile {
278                    level,
279                    name: LEVEL_NAMES[level as usize].to_string(),
280                    // Reaction delay: 500ms -> 50ms (exponential decay)
281                    reaction_delay_ms: 500.0 * (1.0 - t).powi(2) + 50.0,
282                    // Prediction accuracy: 30% -> 95% (linear, except level 9)
283                    prediction_accuracy: 0.3 + 0.65 * t,
284                    // Max speed: 200 -> 600 px/s (linear)
285                    max_paddle_speed: 200.0 + 400.0 * t,
286                    // Error magnitude: 50 -> 5 pixels (exponential decay)
287                    error_magnitude: 50.0 * (1.0 - t).powi(2) + 5.0,
288                    // Aggression: 10% -> 90% (linear)
289                    aggression: 0.1 + 0.8 * t,
290                }
291            })
292            .collect()
293    }
294
295    /// Gets the difficulty profile for a given level (clamped to 0-9).
296    #[must_use]
297    pub fn get_profile(&self, level: u8) -> &DifficultyProfile {
298        let index = (level as usize).min(self.difficulty_profiles.len().saturating_sub(1));
299        &self.difficulty_profiles[index]
300    }
301
302    /// Exports the model as a pretty-printed JSON string (APR format).
303    #[must_use]
304    pub fn to_json(&self) -> String {
305        serde_json::to_string_pretty(self).unwrap_or_else(|_| "{}".to_string())
306    }
307
308    /// Exports the model as compact JSON (for size measurement).
309    #[must_use]
310    pub fn to_json_compact(&self) -> String {
311        serde_json::to_string(self).unwrap_or_else(|_| "{}".to_string())
312    }
313
314    /// Returns the approximate size of the model when serialized.
315    #[must_use]
316    pub fn serialized_size(&self) -> usize {
317        self.to_json_compact().len()
318    }
319
320    /// Loads a model from JSON bytes.
321    ///
322    /// # Errors
323    ///
324    /// Returns an error if the JSON is invalid.
325    pub fn from_json(json: &str) -> Result<Self, String> {
326        serde_json::from_str(json).map_err(|e| format!("Failed to parse model: {e}"))
327    }
328}
329
330// ============================================================================
331// Flow State (Player Engagement Tracking)
332// ============================================================================
333
334/// Flow state based on Csikszentmihalyi's three-channel model.
335#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
336pub enum FlowChannel {
337    /// Player winning too easily - increase difficulty
338    Boredom,
339    /// Player optimally challenged - maintain difficulty
340    #[default]
341    Flow,
342    /// Player struggling - decrease difficulty
343    Anxiety,
344}
345
346impl FlowChannel {
347    /// Returns a human-readable label.
348    #[must_use]
349    pub const fn label(self) -> &'static str {
350        match self {
351            Self::Boredom => "Bored (too easy)",
352            Self::Flow => "In Flow (optimal)",
353            Self::Anxiety => "Anxious (too hard)",
354        }
355    }
356}
357
358/// Player performance metrics for flow state detection.
359#[derive(Debug, Clone, Default)]
360pub struct PlayerMetrics {
361    /// Recent point outcomes (true = player won)
362    pub point_history: Vec<bool>,
363    /// Total points won by player
364    pub player_points: u32,
365    /// Total points won by AI
366    pub ai_points: u32,
367    /// Current rally length
368    pub current_rally: u32,
369    /// Rally lengths history
370    pub rally_history: Vec<u32>,
371    /// Total player paddle hits
372    pub player_hits: u32,
373    /// Total player misses
374    pub player_misses: u32,
375}
376
377impl PlayerMetrics {
378    /// Creates new empty metrics.
379    #[must_use]
380    pub fn new() -> Self {
381        Self::default()
382    }
383
384    /// Records a player hit (successfully returned the ball).
385    #[allow(clippy::missing_const_for_fn)]
386    pub fn record_hit(&mut self) {
387        self.player_hits += 1;
388        self.current_rally += 1;
389    }
390
391    /// Records that the player scored a point.
392    pub fn record_player_scored(&mut self, window_size: usize) {
393        self.player_points += 1;
394        self.point_history.push(true);
395        self.finalize_rally(window_size);
396    }
397
398    /// Records that the AI scored a point (player missed).
399    pub fn record_ai_scored(&mut self, window_size: usize) {
400        self.ai_points += 1;
401        self.player_misses += 1;
402        self.point_history.push(false);
403        self.finalize_rally(window_size);
404    }
405
406    /// Finalizes current rally and trims history.
407    fn finalize_rally(&mut self, window_size: usize) {
408        self.rally_history.push(self.current_rally);
409        self.current_rally = 0;
410
411        // Trim histories to window size
412        while self.point_history.len() > window_size {
413            let _ = self.point_history.remove(0);
414        }
415        while self.rally_history.len() > window_size {
416            let _ = self.rally_history.remove(0);
417        }
418    }
419
420    /// Calculates player win rate from recent history.
421    #[must_use]
422    pub fn recent_win_rate(&self) -> f32 {
423        if self.point_history.is_empty() {
424            return 0.5; // Unknown, assume balanced
425        }
426        let wins = self.point_history.iter().filter(|&&w| w).count() as f32;
427        wins / self.point_history.len() as f32
428    }
429
430    /// Calculates average rally length.
431    #[must_use]
432    pub fn average_rally(&self) -> f32 {
433        if self.rally_history.is_empty() {
434            return 5.0; // Default assumption
435        }
436        self.rally_history.iter().sum::<u32>() as f32 / self.rally_history.len() as f32
437    }
438
439    /// Estimates player skill (0.0-1.0).
440    #[must_use]
441    #[allow(clippy::suboptimal_flops)]
442    pub fn estimate_skill(&self) -> f32 {
443        if self.player_hits + self.player_misses == 0 {
444            return 0.5; // Unknown
445        }
446
447        let hit_rate = self.player_hits as f32 / (self.player_hits + self.player_misses) as f32;
448        let rally_factor = (self.average_rally() / 20.0).min(1.0);
449
450        // Weighted combination
451        (hit_rate * 0.6 + rally_factor * 0.4).clamp(0.0, 1.0)
452    }
453
454    /// Detects the current flow channel based on recent performance.
455    #[must_use]
456    pub fn detect_flow_channel(&self, config: &FlowTheoryConfig) -> FlowChannel {
457        let win_rate = self.recent_win_rate();
458
459        if win_rate >= config.boredom_threshold {
460            FlowChannel::Boredom
461        } else if win_rate <= config.anxiety_threshold {
462            FlowChannel::Anxiety
463        } else {
464            FlowChannel::Flow
465        }
466    }
467
468    /// Resets all metrics.
469    pub fn reset(&mut self) {
470        *self = Self::default();
471    }
472}
473
474// ============================================================================
475// SHAP-style Decision Explanation
476// ============================================================================
477
478/// AI decision state for explainability visualization.
479#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)]
480pub enum DecisionState {
481    /// Ball moving away from AI - returning to center
482    #[default]
483    Idle,
484    /// Ball approaching but within reaction delay window
485    Reacting,
486    /// Actively tracking and moving toward predicted position
487    Tracking,
488    /// At target position, waiting for ball
489    Ready,
490}
491
492impl DecisionState {
493    /// Returns a human-readable label for the decision state.
494    #[must_use]
495    pub const fn label(self) -> &'static str {
496        match self {
497            Self::Idle => "Idle (centering)",
498            Self::Reacting => "Reacting (delay)",
499            Self::Tracking => "Tracking ball",
500            Self::Ready => "Ready (at target)",
501        }
502    }
503
504    /// Returns a short code for compact display.
505    #[must_use]
506    pub const fn code(self) -> &'static str {
507        match self {
508            Self::Idle => "IDLE",
509            Self::Reacting => "REACT",
510            Self::Tracking => "TRACK",
511            Self::Ready => "READY",
512        }
513    }
514}
515
516/// SHAP-style feature contribution for AI decision explainability.
517///
518/// Based on Lundberg & Lee (2017) SHAP values - shows how each input
519/// feature contributed to the final decision (paddle velocity).
520#[derive(Debug, Clone, Default, Serialize, Deserialize)]
521pub struct FeatureContribution {
522    /// Feature name
523    pub name: String,
524    /// Raw feature value
525    pub value: f32,
526    /// Contribution to output (positive = move down, negative = move up)
527    pub contribution: f32,
528    /// Normalized importance (0.0 - 1.0) for bar visualization
529    pub importance: f32,
530}
531
532/// Complete AI decision explanation for real-time visualization.
533///
534/// This provides SHAP-style explainability for the AI's decisions,
535/// showing which factors from the `.apr` model influenced the move.
536#[derive(Debug, Clone, Default, Serialize, Deserialize)]
537pub struct DecisionExplanation {
538    /// Current decision state
539    pub state: DecisionState,
540    /// Output velocity (positive = down, negative = up)
541    pub output_velocity: f32,
542    /// Target Y position the AI is moving toward
543    pub target_y: f32,
544    /// Current paddle Y position
545    pub paddle_y: f32,
546    /// Distance to target
547    pub distance_to_target: f32,
548
549    // === .apr Model Parameters (from profile) ===
550    /// Current difficulty level (0-9)
551    pub difficulty_level: u8,
552    /// Difficulty name from model
553    pub difficulty_name: String,
554    /// Reaction delay from model (ms)
555    pub reaction_delay_ms: f32,
556    /// Time spent reacting so far (ms)
557    pub reaction_elapsed_ms: f32,
558    /// Prediction accuracy from model (0-1)
559    pub prediction_accuracy: f32,
560    /// Max paddle speed from model (px/s)
561    pub max_paddle_speed: f32,
562    /// Error magnitude from model (px)
563    pub error_magnitude: f32,
564    /// Applied error this frame (px)
565    pub applied_error: f32,
566
567    // === Input Features ===
568    /// Ball X position (0 = left, canvas_width = right)
569    pub ball_x: f32,
570    /// Ball Y position
571    pub ball_y: f32,
572    /// Ball X velocity (positive = moving right toward AI)
573    pub ball_vx: f32,
574    /// Ball Y velocity
575    pub ball_vy: f32,
576    /// Is ball approaching AI side?
577    pub ball_approaching: bool,
578    /// Is ball on AI's half of court?
579    pub ball_on_ai_side: bool,
580
581    // === SHAP-style Feature Contributions ===
582    /// Ordered list of feature contributions (highest importance first)
583    pub contributions: Vec<FeatureContribution>,
584
585    // === Decision Rationale ===
586    /// Human-readable explanation of the decision
587    pub rationale: String,
588}
589
590impl DecisionExplanation {
591    /// Creates a new empty explanation.
592    #[must_use]
593    pub fn new() -> Self {
594        Self::default()
595    }
596
597    /// Exports explanation as JSON for the widget.
598    #[must_use]
599    pub fn to_json(&self) -> String {
600        serde_json::to_string(self).unwrap_or_else(|_| "{}".to_string())
601    }
602
603    /// Computes SHAP-style feature contributions based on current state.
604    pub fn compute_contributions(&mut self) {
605        self.contributions.clear();
606
607        // Compute absolute contributions for normalization
608        let mut raw_contributions = vec![
609            (
610                "Ball Direction",
611                self.ball_vx,
612                if self.ball_approaching { 1.0 } else { -1.0 },
613            ),
614            (
615                "Reaction Delay",
616                self.reaction_delay_ms,
617                if self.state == DecisionState::Reacting {
618                    1.0
619                } else {
620                    0.0
621                },
622            ),
623            (
624                "Distance to Target",
625                self.distance_to_target,
626                self.distance_to_target.abs() / 300.0,
627            ),
628            (
629                "Prediction Acc",
630                self.prediction_accuracy,
631                self.prediction_accuracy,
632            ),
633            (
634                "Max Speed",
635                self.max_paddle_speed,
636                self.max_paddle_speed / 1000.0,
637            ),
638            (
639                "Applied Error",
640                self.applied_error,
641                self.applied_error.abs() / 50.0,
642            ),
643        ];
644
645        // Find max for normalization
646        let max_contrib = raw_contributions
647            .iter()
648            .map(|(_, _, c)| c.abs())
649            .fold(0.0f32, f32::max)
650            .max(0.001);
651
652        // Sort by absolute contribution
653        raw_contributions.sort_by(|a, b| {
654            b.2.abs()
655                .partial_cmp(&a.2.abs())
656                .unwrap_or(std::cmp::Ordering::Equal)
657        });
658
659        // Convert to FeatureContribution
660        for (name, value, contrib) in raw_contributions {
661            self.contributions.push(FeatureContribution {
662                name: name.to_string(),
663                value,
664                contribution: contrib,
665                importance: (contrib.abs() / max_contrib).min(1.0),
666            });
667        }
668    }
669
670    /// Generates human-readable rationale for the decision.
671    pub fn generate_rationale(&mut self) {
672        self.rationale = match self.state {
673            DecisionState::Idle => {
674                format!(
675                    "Ball moving away (vx={:.0}). Returning to center.",
676                    self.ball_vx
677                )
678            }
679            DecisionState::Reacting => {
680                let remaining = (self.reaction_delay_ms - self.reaction_elapsed_ms).max(0.0);
681                format!(
682                    "Reaction delay: {:.0}ms remaining of {:.0}ms (Level {} profile)",
683                    remaining, self.reaction_delay_ms, self.difficulty_level
684                )
685            }
686            DecisionState::Tracking => {
687                let direction = if self.output_velocity > 0.0 {
688                    "DOWN"
689                } else {
690                    "UP"
691                };
692                format!(
693                    "Tracking ball → target Y={:.0} (acc={:.0}%, speed={:.0}px/s). Moving {}.",
694                    self.target_y,
695                    self.prediction_accuracy * 100.0,
696                    self.max_paddle_speed,
697                    direction
698                )
699            }
700            DecisionState::Ready => {
701                format!(
702                    "At target Y={:.0} (within 5px). Waiting for ball.",
703                    self.target_y
704                )
705            }
706        };
707    }
708}
709
710// ============================================================================
711// Pong AI (Runtime State)
712// ============================================================================
713
714/// AI opponent with Flow Theory-based Dynamic Difficulty Adjustment.
715#[derive(Debug, Clone)]
716pub struct PongAI {
717    /// The loaded AI model
718    model: PongAIModel,
719    /// Current difficulty level (0-9)
720    difficulty: u8,
721    /// Player performance metrics
722    metrics: PlayerMetrics,
723    /// Current flow channel
724    flow_channel: FlowChannel,
725    /// Time since ball crossed to AI side (for reaction delay)
726    time_since_ball_visible: f32,
727    /// Current target Y position for paddle
728    target_y: f32,
729    /// Whether AI has "seen" the ball this rally
730    ball_acquired: bool,
731    /// Deterministic RNG state
732    rng_state: u64,
733    /// Last applied error for explainability
734    last_error: f32,
735    /// Last decision explanation for SHAP-style widget
736    last_explanation: DecisionExplanation,
737}
738
739impl Default for PongAI {
740    fn default() -> Self {
741        let model = PongAIModel::default();
742        let seed = model.determinism.seed;
743        Self::new(model, 5, seed)
744    }
745}
746
747impl PongAI {
748    /// Creates a new AI with the given model, initial difficulty, and seed.
749    #[must_use]
750    pub fn new(model: PongAIModel, difficulty: u8, seed: u64) -> Self {
751        Self {
752            model,
753            difficulty: difficulty.min(9),
754            metrics: PlayerMetrics::new(),
755            flow_channel: FlowChannel::Flow,
756            time_since_ball_visible: 0.0,
757            target_y: 300.0,
758            ball_acquired: false,
759            rng_state: seed,
760            last_error: 0.0,
761            last_explanation: DecisionExplanation::new(),
762        }
763    }
764
765    /// Creates AI with default model and specified difficulty.
766    #[must_use]
767    pub fn with_difficulty(difficulty: u8) -> Self {
768        let model = PongAIModel::default();
769        let seed = model.determinism.seed;
770        Self::new(model, difficulty, seed)
771    }
772
773    /// Sets the difficulty level (0-9).
774    pub fn set_difficulty(&mut self, level: u8) {
775        self.difficulty = level.min(9);
776    }
777
778    /// Gets the current difficulty level.
779    #[must_use]
780    pub const fn difficulty(&self) -> u8 {
781        self.difficulty
782    }
783
784    /// Gets the current difficulty name.
785    #[must_use]
786    pub fn difficulty_name(&self) -> &str {
787        &self.model.get_profile(self.difficulty).name
788    }
789
790    /// Gets the current difficulty profile.
791    #[must_use]
792    pub fn profile(&self) -> &DifficultyProfile {
793        self.model.get_profile(self.difficulty)
794    }
795
796    /// Gets the current flow channel.
797    #[must_use]
798    pub const fn flow_channel(&self) -> FlowChannel {
799        self.flow_channel
800    }
801
802    /// Returns a reference to player metrics.
803    #[must_use]
804    pub const fn metrics(&self) -> &PlayerMetrics {
805        &self.metrics
806    }
807
808    /// Returns a mutable reference to player metrics.
809    #[allow(clippy::missing_const_for_fn)]
810    pub fn metrics_mut(&mut self) -> &mut PlayerMetrics {
811        &mut self.metrics
812    }
813
814    /// Returns the underlying model.
815    #[must_use]
816    pub const fn model(&self) -> &PongAIModel {
817        &self.model
818    }
819
820    /// Exports the model as JSON for download.
821    #[must_use]
822    pub fn export_model(&self) -> String {
823        self.model.to_json()
824    }
825
826    /// Returns the last decision explanation for SHAP-style visualization.
827    #[must_use]
828    pub fn explanation(&self) -> &DecisionExplanation {
829        &self.last_explanation
830    }
831
832    /// Exports the last decision explanation as JSON for the widget.
833    #[must_use]
834    pub fn export_explanation(&self) -> String {
835        self.last_explanation.to_json()
836    }
837
838    /// Deterministic pseudo-random number generator (xorshift64).
839    fn next_random(&mut self) -> f32 {
840        self.rng_state ^= self.rng_state << 13;
841        self.rng_state ^= self.rng_state >> 7;
842        self.rng_state ^= self.rng_state << 17;
843        self.rng_state as f32 / u64::MAX as f32
844    }
845
846    /// Updates AI state and returns the desired paddle velocity.
847    #[allow(clippy::too_many_arguments)]
848    pub fn update(
849        &mut self,
850        ball_x: f32,
851        ball_y: f32,
852        ball_vx: f32,
853        ball_vy: f32,
854        paddle_y: f32,
855        _paddle_height: f32,
856        canvas_width: f32,
857        canvas_height: f32,
858        dt: f32,
859    ) -> f32 {
860        let profile = self.model.get_profile(self.difficulty).clone();
861
862        // Check if ball is moving toward AI (right side)
863        let ball_approaching = ball_vx > 0.0;
864        let ball_on_ai_side = ball_x > canvas_width * 0.5;
865
866        if ball_approaching && ball_on_ai_side {
867            if !self.ball_acquired {
868                self.ball_acquired = true;
869                self.time_since_ball_visible = 0.0;
870
871                self.target_y = self.calculate_target(
872                    ball_x,
873                    ball_y,
874                    ball_vx,
875                    ball_vy,
876                    canvas_width,
877                    canvas_height,
878                    &profile,
879                );
880            }
881            self.time_since_ball_visible += dt;
882        } else {
883            self.ball_acquired = false;
884            self.time_since_ball_visible = 0.0;
885            self.target_y = canvas_height / 2.0;
886        }
887
888        // Check reaction delay
889        let reaction_delay_sec = profile.reaction_delay_ms / 1000.0;
890        let in_reaction_delay =
891            self.time_since_ball_visible < reaction_delay_sec && self.ball_acquired;
892
893        // Move toward target
894        let diff = self.target_y - paddle_y;
895        let max_speed = profile.max_paddle_speed;
896
897        let output_velocity = if in_reaction_delay || diff.abs() < 5.0 {
898            0.0
899        } else if diff > 0.0 {
900            max_speed.min(diff / dt)
901        } else {
902            (-max_speed).max(diff / dt)
903        };
904
905        // Determine decision state for explainability
906        let state = if !ball_approaching || !ball_on_ai_side {
907            DecisionState::Idle
908        } else if in_reaction_delay {
909            DecisionState::Reacting
910        } else if diff.abs() < 5.0 {
911            DecisionState::Ready
912        } else {
913            DecisionState::Tracking
914        };
915
916        // Update explanation for SHAP widget
917        self.last_explanation = DecisionExplanation {
918            state,
919            output_velocity,
920            target_y: self.target_y,
921            paddle_y,
922            distance_to_target: diff,
923            difficulty_level: self.difficulty,
924            difficulty_name: profile.name.clone(),
925            reaction_delay_ms: profile.reaction_delay_ms,
926            reaction_elapsed_ms: self.time_since_ball_visible * 1000.0,
927            prediction_accuracy: profile.prediction_accuracy,
928            max_paddle_speed: profile.max_paddle_speed,
929            error_magnitude: profile.error_magnitude,
930            applied_error: self.last_error,
931            ball_x,
932            ball_y,
933            ball_vx,
934            ball_vy,
935            ball_approaching,
936            ball_on_ai_side,
937            contributions: Vec::new(),
938            rationale: String::new(),
939        };
940        self.last_explanation.compute_contributions();
941        self.last_explanation.generate_rationale();
942
943        output_velocity
944    }
945
946    /// Calculates the target Y position for the paddle.
947    #[allow(clippy::too_many_arguments, clippy::suboptimal_flops)]
948    fn calculate_target(
949        &mut self,
950        ball_x: f32,
951        ball_y: f32,
952        ball_vx: f32,
953        ball_vy: f32,
954        canvas_width: f32,
955        canvas_height: f32,
956        profile: &DifficultyProfile,
957    ) -> f32 {
958        let paddle_x = canvas_width - 35.0;
959        let time_to_paddle = if ball_vx > 0.0 {
960            (paddle_x - ball_x) / ball_vx
961        } else {
962            0.0
963        };
964
965        let mut predicted_y = ball_y + ball_vy * time_to_paddle * profile.prediction_accuracy;
966        predicted_y = predicted_y.clamp(50.0, canvas_height - 50.0);
967
968        // Add deterministic random error (save for explainability)
969        self.last_error = (self.next_random() - 0.5) * 2.0 * profile.error_magnitude;
970        predicted_y += self.last_error;
971
972        predicted_y.clamp(50.0, canvas_height - 50.0)
973    }
974
975    /// Records that the player successfully hit the ball.
976    pub fn record_player_hit(&mut self) {
977        self.metrics.record_hit();
978    }
979
980    /// Records that the player scored (AI missed).
981    pub fn record_player_scored(&mut self) {
982        let window = self.model.flow_theory.skill_window_size;
983        self.metrics.record_player_scored(window);
984    }
985
986    /// Records that the player missed (AI scored).
987    pub fn record_player_miss(&mut self) {
988        let window = self.model.flow_theory.skill_window_size;
989        self.metrics.record_ai_scored(window);
990    }
991
992    /// Adapts difficulty based on Flow Theory.
993    ///
994    /// This is the core DDA algorithm based on Csikszentmihalyi's model.
995    pub fn adapt_difficulty(&mut self) {
996        // Detect current flow channel
997        self.flow_channel = self.metrics.detect_flow_channel(&self.model.flow_theory);
998
999        // Calculate target difficulty adjustment
1000        let adjustment: f32 = match self.flow_channel {
1001            FlowChannel::Boredom => 1.0,  // Increase difficulty
1002            FlowChannel::Flow => 0.0,     // Maintain
1003            FlowChannel::Anxiety => -1.0, // Decrease difficulty
1004        };
1005
1006        if adjustment.abs() < 0.01 {
1007            return; // No adjustment needed
1008        }
1009
1010        // Apply adaptation rate
1011        let rate = self.model.flow_theory.adaptation_rate;
1012        let current = f32::from(self.difficulty);
1013        let new_difficulty = (adjustment * rate).mul_add(9.0, current);
1014
1015        self.difficulty = (new_difficulty.round() as u8).clamp(0, 9);
1016    }
1017
1018    /// Resets AI state for a new game.
1019    pub fn reset(&mut self) {
1020        self.metrics.reset();
1021        self.flow_channel = FlowChannel::Flow;
1022        self.time_since_ball_visible = 0.0;
1023        self.target_y = 300.0;
1024        self.ball_acquired = false;
1025        // Reset RNG to model seed for reproducibility
1026        self.rng_state = self.model.determinism.seed;
1027    }
1028
1029    /// Gets model info as JSON string for display.
1030    #[must_use]
1031    pub fn model_info_json(&self) -> String {
1032        serde_json::json!({
1033            "name": self.model.metadata.name,
1034            "version": self.model.metadata.version,
1035            "description": self.model.metadata.description,
1036            "author": self.model.metadata.author,
1037            "license": self.model.metadata.license,
1038            "difficulty_levels": self.model.difficulty_profiles.len(),
1039            "current_difficulty": self.difficulty,
1040            "current_difficulty_name": self.profile().name,
1041            "flow_channel": self.flow_channel.label(),
1042            "player_win_rate": self.metrics.recent_win_rate(),
1043            "model_size_bytes": self.model.serialized_size(),
1044        })
1045        .to_string()
1046    }
1047
1048    /// Loads a model from JSON bytes.
1049    ///
1050    /// # Errors
1051    ///
1052    /// Returns an error string if the model fails to load.
1053    pub fn load_model_from_json(&mut self, json: &str) -> Result<(), String> {
1054        let model = PongAIModel::from_json(json)?;
1055        self.rng_state = model.determinism.seed;
1056        self.model = model;
1057        Ok(())
1058    }
1059
1060    /// Loads a model from bytes (for .apr file loading via aprender).
1061    ///
1062    /// # Errors
1063    ///
1064    /// Returns an error string if the model fails to load.
1065    pub fn load_model_from_bytes(&mut self, bytes: &[u8]) -> Result<(), String> {
1066        match aprender::format::load_from_bytes::<PongAIModel>(
1067            bytes,
1068            aprender::format::ModelType::Custom,
1069        ) {
1070            Ok(model) => {
1071                self.rng_state = model.determinism.seed;
1072                self.model = model;
1073                Ok(())
1074            }
1075            Err(e) => Err(format!("Failed to load AI model: {e}")),
1076        }
1077    }
1078}
1079
1080// ============================================================================
1081// Tests
1082// ============================================================================
1083
1084#[cfg(test)]
1085#[allow(clippy::float_cmp, clippy::unwrap_used)]
1086mod tests {
1087    use super::*;
1088
1089    // ==================== Model Metadata Tests ====================
1090
1091    #[test]
1092    fn test_model_metadata_default() {
1093        let meta = ModelMetadata::default();
1094        assert_eq!(meta.name, "Pong AI v1");
1095        assert_eq!(meta.version, "1.0.0");
1096        assert_eq!(meta.author, "PAIML");
1097        assert_eq!(meta.license, "MIT");
1098    }
1099
1100    // ==================== DifficultyProfile Tests ====================
1101
1102    #[test]
1103    fn test_difficulty_profile_default() {
1104        let profile = DifficultyProfile::default();
1105        assert_eq!(profile.level, 5);
1106        assert_eq!(profile.name, "Challenging");
1107        assert!((profile.reaction_delay_ms - 180.0).abs() < 1.0);
1108    }
1109
1110    #[test]
1111    fn test_difficulty_profile_values_valid() {
1112        let profile = DifficultyProfile::default();
1113        assert!(profile.reaction_delay_ms > 0.0);
1114        assert!(profile.prediction_accuracy >= 0.0 && profile.prediction_accuracy <= 1.0);
1115        assert!(profile.max_paddle_speed > 0.0);
1116        assert!(profile.error_magnitude >= 0.0);
1117        assert!(profile.aggression >= 0.0 && profile.aggression <= 1.0);
1118    }
1119
1120    // ==================== PongAIModel Tests ====================
1121
1122    #[test]
1123    fn test_pong_ai_model_default() {
1124        let model = PongAIModel::default();
1125        assert_eq!(model.metadata.version, "1.0.0");
1126        assert_eq!(model.difficulty_profiles.len(), 10);
1127        assert_eq!(model.model_type, "behavior_profile");
1128    }
1129
1130    #[test]
1131    fn test_pong_ai_model_new() {
1132        let model = PongAIModel::new("Test AI", "A test AI model");
1133        assert_eq!(model.metadata.name, "Test AI");
1134        assert_eq!(model.metadata.description, "A test AI model");
1135        assert_eq!(model.difficulty_profiles.len(), 10);
1136    }
1137
1138    #[test]
1139    fn test_generate_default_profiles_count() {
1140        let profiles = PongAIModel::generate_default_profiles();
1141        assert_eq!(profiles.len(), 10);
1142    }
1143
1144    #[test]
1145    fn test_generate_default_profiles_levels() {
1146        let profiles = PongAIModel::generate_default_profiles();
1147        for (i, profile) in profiles.iter().enumerate() {
1148            assert_eq!(profile.level, i as u8);
1149        }
1150    }
1151
1152    #[test]
1153    fn test_generate_default_profiles_difficulty_curve() {
1154        let profiles = PongAIModel::generate_default_profiles();
1155
1156        // Level 0 should be easiest (slowest, least accurate)
1157        assert!(profiles[0].reaction_delay_ms > profiles[9].reaction_delay_ms);
1158        assert!(profiles[0].prediction_accuracy < profiles[9].prediction_accuracy);
1159        assert!(profiles[0].max_paddle_speed < profiles[9].max_paddle_speed);
1160        assert!(profiles[0].error_magnitude > profiles[9].error_magnitude);
1161    }
1162
1163    #[test]
1164    fn test_generate_default_profiles_names() {
1165        let profiles = PongAIModel::generate_default_profiles();
1166        assert_eq!(profiles[0].name, "Training Wheels");
1167        assert_eq!(profiles[5].name, "Challenging");
1168        assert_eq!(profiles[9].name, "Perfect");
1169    }
1170
1171    #[test]
1172    fn test_get_profile_valid_level() {
1173        let model = PongAIModel::default();
1174        let profile = model.get_profile(5);
1175        assert_eq!(profile.level, 5);
1176    }
1177
1178    #[test]
1179    fn test_get_profile_clamped_high() {
1180        let model = PongAIModel::default();
1181        let profile = model.get_profile(100);
1182        assert_eq!(profile.level, 9);
1183    }
1184
1185    #[test]
1186    fn test_model_serialization_roundtrip() {
1187        let model = PongAIModel::default();
1188        let json = model.to_json();
1189        let parsed = PongAIModel::from_json(&json).unwrap();
1190
1191        assert_eq!(parsed.metadata.name, model.metadata.name);
1192        assert_eq!(
1193            parsed.difficulty_profiles.len(),
1194            model.difficulty_profiles.len()
1195        );
1196        assert_eq!(parsed.determinism.seed, model.determinism.seed);
1197    }
1198
1199    #[test]
1200    fn test_model_size_reasonable() {
1201        let model = PongAIModel::default();
1202        let size = model.serialized_size();
1203        // Model should be reasonably small (under 5KB)
1204        assert!(size < 5000, "Model size {size} bytes exceeds 5KB");
1205    }
1206
1207    // ==================== FlowChannel Tests ====================
1208
1209    #[test]
1210    fn test_flow_channel_labels() {
1211        assert_eq!(FlowChannel::Boredom.label(), "Bored (too easy)");
1212        assert_eq!(FlowChannel::Flow.label(), "In Flow (optimal)");
1213        assert_eq!(FlowChannel::Anxiety.label(), "Anxious (too hard)");
1214    }
1215
1216    // ==================== PlayerMetrics Tests ====================
1217
1218    #[test]
1219    fn test_player_metrics_new() {
1220        let metrics = PlayerMetrics::new();
1221        assert_eq!(metrics.player_hits, 0);
1222        assert_eq!(metrics.player_misses, 0);
1223        assert_eq!(metrics.current_rally, 0);
1224    }
1225
1226    #[test]
1227    fn test_player_metrics_record_hit() {
1228        let mut metrics = PlayerMetrics::new();
1229        metrics.record_hit();
1230        assert_eq!(metrics.player_hits, 1);
1231        assert_eq!(metrics.current_rally, 1);
1232    }
1233
1234    #[test]
1235    fn test_player_metrics_record_player_scored() {
1236        let mut metrics = PlayerMetrics::new();
1237        metrics.record_hit();
1238        metrics.record_hit();
1239        metrics.record_player_scored(10);
1240
1241        assert_eq!(metrics.player_points, 1);
1242        assert_eq!(metrics.current_rally, 0);
1243        assert_eq!(metrics.rally_history.len(), 1);
1244        assert_eq!(metrics.rally_history[0], 2);
1245        assert!(metrics.point_history[0]); // Player won
1246    }
1247
1248    #[test]
1249    fn test_player_metrics_record_ai_scored() {
1250        let mut metrics = PlayerMetrics::new();
1251        metrics.record_hit();
1252        metrics.record_ai_scored(10);
1253
1254        assert_eq!(metrics.ai_points, 1);
1255        assert_eq!(metrics.player_misses, 1);
1256        assert!(!metrics.point_history[0]); // Player lost
1257    }
1258
1259    #[test]
1260    fn test_player_metrics_window_size() {
1261        let mut metrics = PlayerMetrics::new();
1262
1263        for _ in 0..15 {
1264            metrics.record_player_scored(5);
1265        }
1266
1267        assert_eq!(metrics.point_history.len(), 5);
1268    }
1269
1270    #[test]
1271    fn test_player_metrics_recent_win_rate_unknown() {
1272        let metrics = PlayerMetrics::new();
1273        let rate = metrics.recent_win_rate();
1274        assert!((rate - 0.5).abs() < 0.01);
1275    }
1276
1277    #[test]
1278    fn test_player_metrics_recent_win_rate_all_wins() {
1279        let mut metrics = PlayerMetrics::new();
1280        for _ in 0..5 {
1281            metrics.record_player_scored(10);
1282        }
1283        let rate = metrics.recent_win_rate();
1284        assert!((rate - 1.0).abs() < 0.01);
1285    }
1286
1287    #[test]
1288    fn test_player_metrics_recent_win_rate_all_losses() {
1289        let mut metrics = PlayerMetrics::new();
1290        for _ in 0..5 {
1291            metrics.record_ai_scored(10);
1292        }
1293        let rate = metrics.recent_win_rate();
1294        assert!((rate - 0.0).abs() < 0.01);
1295    }
1296
1297    #[test]
1298    fn test_player_metrics_detect_flow_channel_boredom() {
1299        let mut metrics = PlayerMetrics::new();
1300        // Win 8 out of 10 points
1301        for _ in 0..8 {
1302            metrics.record_player_scored(10);
1303        }
1304        for _ in 0..2 {
1305            metrics.record_ai_scored(10);
1306        }
1307
1308        let config = FlowTheoryConfig::default();
1309        let channel = metrics.detect_flow_channel(&config);
1310        assert_eq!(channel, FlowChannel::Boredom);
1311    }
1312
1313    #[test]
1314    fn test_player_metrics_detect_flow_channel_anxiety() {
1315        let mut metrics = PlayerMetrics::new();
1316        // Win only 2 out of 10 points
1317        for _ in 0..2 {
1318            metrics.record_player_scored(10);
1319        }
1320        for _ in 0..8 {
1321            metrics.record_ai_scored(10);
1322        }
1323
1324        let config = FlowTheoryConfig::default();
1325        let channel = metrics.detect_flow_channel(&config);
1326        assert_eq!(channel, FlowChannel::Anxiety);
1327    }
1328
1329    #[test]
1330    fn test_player_metrics_detect_flow_channel_flow() {
1331        let mut metrics = PlayerMetrics::new();
1332        // Win 5 out of 10 points (balanced)
1333        for _ in 0..5 {
1334            metrics.record_player_scored(10);
1335        }
1336        for _ in 0..5 {
1337            metrics.record_ai_scored(10);
1338        }
1339
1340        let config = FlowTheoryConfig::default();
1341        let channel = metrics.detect_flow_channel(&config);
1342        assert_eq!(channel, FlowChannel::Flow);
1343    }
1344
1345    #[test]
1346    fn test_player_metrics_estimate_skill_unknown() {
1347        let metrics = PlayerMetrics::new();
1348        let skill = metrics.estimate_skill();
1349        assert!((skill - 0.5).abs() < 0.01);
1350    }
1351
1352    #[test]
1353    fn test_player_metrics_estimate_skill_good_player() {
1354        let mut metrics = PlayerMetrics::new();
1355        // Good player: high hit rate, long rallies
1356        for _ in 0..10 {
1357            metrics.record_hit();
1358        }
1359        metrics.record_ai_scored(10);
1360
1361        let skill = metrics.estimate_skill();
1362        assert!(skill > 0.7);
1363    }
1364
1365    // ==================== PongAI Tests ====================
1366
1367    #[test]
1368    fn test_pong_ai_default() {
1369        let ai = PongAI::default();
1370        assert_eq!(ai.difficulty(), 5);
1371        assert_eq!(ai.flow_channel(), FlowChannel::Flow);
1372    }
1373
1374    #[test]
1375    fn test_pong_ai_with_difficulty() {
1376        let ai = PongAI::with_difficulty(3);
1377        assert_eq!(ai.difficulty(), 3);
1378    }
1379
1380    #[test]
1381    fn test_pong_ai_set_difficulty() {
1382        let mut ai = PongAI::default();
1383        ai.set_difficulty(7);
1384        assert_eq!(ai.difficulty(), 7);
1385    }
1386
1387    #[test]
1388    fn test_pong_ai_set_difficulty_clamped() {
1389        let mut ai = PongAI::default();
1390        ai.set_difficulty(100);
1391        assert_eq!(ai.difficulty(), 9);
1392    }
1393
1394    #[test]
1395    fn test_pong_ai_profile() {
1396        let ai = PongAI::default();
1397        let profile = ai.profile();
1398        assert_eq!(profile.level, 5);
1399        assert_eq!(profile.name, "Challenging");
1400    }
1401
1402    #[test]
1403    fn test_pong_ai_deterministic_rng() {
1404        let mut ai1 = PongAI::default();
1405        let mut ai2 = PongAI::default();
1406
1407        // Same seed should produce same sequence
1408        let r1 = ai1.next_random();
1409        let r2 = ai2.next_random();
1410
1411        assert!((r1 - r2).abs() < 0.0001);
1412    }
1413
1414    #[test]
1415    fn test_pong_ai_update_ball_not_approaching() {
1416        let mut ai = PongAI::default();
1417
1418        let velocity = ai.update(
1419            400.0, 300.0, // ball position
1420            -200.0, 100.0, // ball velocity (moving left)
1421            300.0, 100.0, // paddle position and height
1422            800.0, 600.0, // canvas size
1423            0.016, // dt
1424        );
1425
1426        assert!(velocity.abs() < ai.profile().max_paddle_speed + 1.0);
1427    }
1428
1429    #[test]
1430    fn test_pong_ai_adapt_difficulty_on_boredom() {
1431        let mut ai = PongAI::with_difficulty(5);
1432
1433        // Simulate player winning too much
1434        for _ in 0..10 {
1435            ai.record_player_scored();
1436        }
1437
1438        let initial = ai.difficulty();
1439        ai.adapt_difficulty();
1440
1441        // Difficulty should increase
1442        assert!(ai.difficulty() >= initial);
1443        assert_eq!(ai.flow_channel(), FlowChannel::Boredom);
1444    }
1445
1446    #[test]
1447    fn test_pong_ai_adapt_difficulty_on_anxiety() {
1448        let mut ai = PongAI::with_difficulty(5);
1449
1450        // Simulate player losing too much
1451        for _ in 0..10 {
1452            ai.record_player_miss();
1453        }
1454
1455        let initial = ai.difficulty();
1456        ai.adapt_difficulty();
1457
1458        // Difficulty should decrease
1459        assert!(ai.difficulty() <= initial);
1460        assert_eq!(ai.flow_channel(), FlowChannel::Anxiety);
1461    }
1462
1463    #[test]
1464    fn test_pong_ai_reset() {
1465        let mut ai = PongAI::default();
1466        ai.record_player_hit();
1467        ai.set_difficulty(9);
1468        ai.time_since_ball_visible = 1.0;
1469
1470        ai.reset();
1471
1472        assert_eq!(ai.metrics().player_hits, 0);
1473        assert!((ai.time_since_ball_visible - 0.0).abs() < 0.01);
1474        assert_eq!(ai.flow_channel(), FlowChannel::Flow);
1475    }
1476
1477    #[test]
1478    fn test_pong_ai_export_model() {
1479        let ai = PongAI::default();
1480        let json = ai.export_model();
1481
1482        assert!(json.contains("Pong AI v1"));
1483        assert!(json.contains("difficulty_profiles"));
1484        assert!(json.contains("flow_theory"));
1485    }
1486
1487    #[test]
1488    fn test_pong_ai_model_info_json() {
1489        let ai = PongAI::default();
1490        let json = ai.model_info_json();
1491
1492        assert!(json.contains("name"));
1493        assert!(json.contains("version"));
1494        assert!(json.contains("current_difficulty"));
1495        assert!(json.contains("flow_channel"));
1496    }
1497
1498    #[test]
1499    fn test_pong_ai_load_model_from_json() {
1500        let mut ai = PongAI::default();
1501        let model = PongAIModel::new("Custom AI", "Custom description");
1502        let json = model.to_json();
1503
1504        ai.load_model_from_json(&json).unwrap();
1505
1506        assert_eq!(ai.model().metadata.name, "Custom AI");
1507    }
1508
1509    // ==================== Integration Tests ====================
1510
1511    #[test]
1512    fn test_full_rally_simulation() {
1513        let mut ai = PongAI::with_difficulty(5);
1514
1515        let mut ball_x = 400.0;
1516        let mut ball_y = 300.0;
1517        let ball_vx = 300.0;
1518        let ball_vy = 150.0;
1519        let mut paddle_y = 300.0;
1520
1521        // Run for 60 frames (1 second at 60fps)
1522        for _ in 0..60 {
1523            let velocity = ai.update(
1524                ball_x,
1525                ball_y,
1526                ball_vx,
1527                ball_vy,
1528                paddle_y,
1529                100.0,
1530                800.0,
1531                600.0,
1532                1.0 / 60.0,
1533            );
1534
1535            paddle_y += velocity * (1.0 / 60.0);
1536            paddle_y = paddle_y.clamp(50.0, 550.0);
1537
1538            ball_x += ball_vx * (1.0 / 60.0);
1539            ball_y += ball_vy * (1.0 / 60.0);
1540        }
1541
1542        // AI should have moved toward the ball
1543        assert!(paddle_y != 300.0 || ball_x < 500.0);
1544    }
1545
1546    #[test]
1547    fn test_difficulty_affects_behavior() {
1548        let mut ai_easy = PongAI::with_difficulty(0);
1549        let mut ai_hard = PongAI::with_difficulty(9);
1550
1551        // Same ball position
1552        let ball_x = 600.0;
1553        let ball_y = 400.0;
1554        let ball_vx = 200.0;
1555        let ball_vy = 100.0;
1556
1557        // Run enough time to pass reaction delays
1558        for _ in 0..120 {
1559            let _ = ai_easy.update(
1560                ball_x,
1561                ball_y,
1562                ball_vx,
1563                ball_vy,
1564                300.0,
1565                100.0,
1566                800.0,
1567                600.0,
1568                1.0 / 60.0,
1569            );
1570            let _ = ai_hard.update(
1571                ball_x,
1572                ball_y,
1573                ball_vx,
1574                ball_vy,
1575                300.0,
1576                100.0,
1577                800.0,
1578                600.0,
1579                1.0 / 60.0,
1580            );
1581        }
1582
1583        // Hard AI should have acquired ball faster
1584        assert!(ai_hard.ball_acquired);
1585    }
1586
1587    #[test]
1588    fn test_flow_theory_dda_keeps_player_engaged() {
1589        let mut ai = PongAI::with_difficulty(5);
1590
1591        // Simulate alternating wins/losses (balanced play)
1592        for i in 0..20 {
1593            if i % 2 == 0 {
1594                ai.record_player_scored();
1595            } else {
1596                ai.record_player_miss();
1597            }
1598            ai.adapt_difficulty();
1599        }
1600
1601        // Should stay in flow (difficulty should hover around middle)
1602        assert!(ai.difficulty() >= 3 && ai.difficulty() <= 7);
1603        assert_eq!(ai.flow_channel(), FlowChannel::Flow);
1604    }
1605
1606    // ==================== Coverage Gap Tests ====================
1607
1608    #[test]
1609    fn test_load_model_from_bytes_error_path() {
1610        let mut ai = PongAI::default();
1611
1612        // Invalid bytes should return an error
1613        let invalid_bytes: &[u8] = &[0, 1, 2, 3, 4, 5];
1614        let result = ai.load_model_from_bytes(invalid_bytes);
1615        assert!(result.is_err());
1616        assert!(result.unwrap_err().contains("Failed to load AI model"));
1617    }
1618
1619    #[test]
1620    fn test_load_model_from_bytes_empty() {
1621        let mut ai = PongAI::default();
1622
1623        // Empty bytes should return an error
1624        let empty_bytes: &[u8] = &[];
1625        let result = ai.load_model_from_bytes(empty_bytes);
1626        assert!(result.is_err());
1627    }
1628
1629    #[test]
1630    fn test_rng_determinism_with_seed() {
1631        // Create two AIs with the same seed
1632        let mut ai1 = PongAI::default();
1633        let mut ai2 = PongAI::default();
1634
1635        // Set the same RNG state
1636        ai1.rng_state = 12345;
1637        ai2.rng_state = 12345;
1638
1639        // Run several updates - they should produce identical results
1640        for _ in 0..10 {
1641            let v1 = ai1.update(
1642                400.0, 300.0, 200.0, 100.0, 300.0, 100.0, 800.0, 600.0, 0.016,
1643            );
1644            let v2 = ai2.update(
1645                400.0, 300.0, 200.0, 100.0, 300.0, 100.0, 800.0, 600.0, 0.016,
1646            );
1647            assert_eq!(v1, v2);
1648        }
1649    }
1650
1651    #[test]
1652    fn test_finalize_rally_window_trimming() {
1653        let mut ai = PongAI::default();
1654
1655        // Record many rallies to trigger window trimming
1656        for _ in 0..100 {
1657            ai.record_player_scored();
1658        }
1659
1660        // The history should be limited to window size (30 for DDA)
1661        // This tests the finalize_rally path indirectly via metrics
1662        assert!(ai.metrics.rally_history.len() <= 30);
1663    }
1664
1665    // ==================== High-Priority Coverage Tests ====================
1666
1667    #[test]
1668    fn test_pong_ai_model_new_with_metadata() {
1669        let model = PongAIModel::new("TestAI", "A test AI model");
1670        assert_eq!(model.metadata.name, "TestAI");
1671        assert_eq!(model.metadata.description, "A test AI model");
1672        // Inherits default profiles
1673        assert_eq!(model.difficulty_profiles.len(), 10);
1674    }
1675
1676    #[test]
1677    fn test_pong_ai_model_generate_default_profiles() {
1678        let profiles = PongAIModel::generate_default_profiles();
1679        assert_eq!(profiles.len(), 10);
1680
1681        // Level 0 - Training Wheels
1682        assert_eq!(profiles[0].level, 0);
1683        assert_eq!(profiles[0].name, "Training Wheels");
1684        assert!(profiles[0].reaction_delay_ms > 400.0); // High delay
1685
1686        // Level 9 - Perfect (UNBEATABLE)
1687        let perfect = &profiles[9];
1688        assert_eq!(perfect.level, 9);
1689        assert_eq!(perfect.name, "Perfect");
1690        assert!((perfect.reaction_delay_ms - 0.0).abs() < 0.001); // Instant
1691        assert!((perfect.prediction_accuracy - 1.0).abs() < 0.001); // Perfect
1692        assert!((perfect.error_magnitude - 0.0).abs() < 0.001); // Zero error
1693    }
1694
1695    #[test]
1696    fn test_pong_ai_model_to_json_compact() {
1697        let model = PongAIModel::default();
1698        let json = model.to_json_compact();
1699        assert!(!json.is_empty());
1700        assert!(json.len() < model.to_json().len()); // Compact is smaller
1701    }
1702
1703    #[test]
1704    fn test_pong_ai_model_from_json() {
1705        let model = PongAIModel::default();
1706        let json = model.to_json();
1707        let loaded = PongAIModel::from_json(&json);
1708        assert!(loaded.is_ok());
1709        let loaded = loaded.unwrap();
1710        assert_eq!(loaded.metadata.name, model.metadata.name);
1711        assert_eq!(loaded.difficulty_profiles.len(), 10);
1712    }
1713
1714    #[test]
1715    fn test_pong_ai_model_from_json_invalid() {
1716        let result = PongAIModel::from_json("invalid json");
1717        assert!(result.is_err());
1718        let err = result.unwrap_err();
1719        assert!(err.contains("Failed to parse model"));
1720    }
1721
1722    #[test]
1723    fn test_player_metrics_average_rally() {
1724        let mut metrics = PlayerMetrics::default();
1725        // Empty history returns default
1726        assert!((metrics.average_rally() - 5.0).abs() < 0.001);
1727
1728        // Add some rallies
1729        metrics.rally_history.push(10);
1730        metrics.rally_history.push(20);
1731        metrics.rally_history.push(30);
1732        assert!((metrics.average_rally() - 20.0).abs() < 0.001);
1733    }
1734
1735    #[test]
1736    fn test_decision_explanation_compute_contributions() {
1737        let mut explanation = DecisionExplanation {
1738            ball_approaching: true,
1739            ball_vx: 200.0,
1740            reaction_delay_ms: 100.0,
1741            distance_to_target: 50.0,
1742            prediction_accuracy: 0.8,
1743            max_paddle_speed: 400.0,
1744            applied_error: 10.0,
1745            state: DecisionState::Tracking,
1746            ..Default::default()
1747        };
1748
1749        explanation.compute_contributions();
1750        assert!(!explanation.contributions.is_empty());
1751        assert_eq!(explanation.contributions.len(), 6);
1752        // Each contribution should have a name and importance
1753        for contrib in &explanation.contributions {
1754            assert!(!contrib.name.is_empty());
1755            assert!(contrib.importance >= 0.0 && contrib.importance <= 1.0);
1756        }
1757    }
1758
1759    #[test]
1760    fn test_decision_explanation_generate_rationale() {
1761        // Test Idle state
1762        let mut idle = DecisionExplanation {
1763            state: DecisionState::Idle,
1764            ball_vx: -100.0,
1765            ..Default::default()
1766        };
1767        idle.generate_rationale();
1768        assert!(idle.rationale.contains("Ball moving away"));
1769
1770        // Test Reacting state
1771        let mut reacting = DecisionExplanation {
1772            state: DecisionState::Reacting,
1773            reaction_delay_ms: 150.0,
1774            reaction_elapsed_ms: 50.0,
1775            difficulty_level: 3,
1776            ..Default::default()
1777        };
1778        reacting.generate_rationale();
1779        assert!(reacting.rationale.contains("Reaction delay"));
1780        assert!(reacting.rationale.contains("Level 3"));
1781
1782        // Test Tracking state
1783        let mut tracking = DecisionExplanation {
1784            state: DecisionState::Tracking,
1785            target_y: 300.0,
1786            prediction_accuracy: 0.75,
1787            max_paddle_speed: 400.0,
1788            output_velocity: -50.0, // Moving UP
1789            ..Default::default()
1790        };
1791        tracking.generate_rationale();
1792        assert!(tracking.rationale.contains("Tracking ball"));
1793        assert!(tracking.rationale.contains("UP"));
1794
1795        // Test Ready state
1796        let mut ready = DecisionExplanation {
1797            state: DecisionState::Ready,
1798            target_y: 350.0,
1799            ..Default::default()
1800        };
1801        ready.generate_rationale();
1802        assert!(ready.rationale.contains("At target"));
1803    }
1804
1805    #[test]
1806    fn test_pong_ai_with_difficulty_constructor() {
1807        let ai = PongAI::with_difficulty(7);
1808        assert_eq!(ai.difficulty(), 7);
1809        assert_eq!(ai.difficulty_name(), "Very Hard");
1810    }
1811
1812    #[test]
1813    fn test_pong_ai_difficulty_name_levels() {
1814        let ai = PongAI::with_difficulty(0);
1815        assert_eq!(ai.difficulty_name(), "Training Wheels");
1816
1817        let ai = PongAI::with_difficulty(5);
1818        assert_eq!(ai.difficulty_name(), "Challenging");
1819    }
1820
1821    #[test]
1822    fn test_pong_ai_metrics_mut() {
1823        let mut ai = PongAI::default();
1824        let metrics = ai.metrics_mut();
1825        metrics.player_hits = 100;
1826        assert_eq!(ai.metrics().player_hits, 100);
1827    }
1828
1829    #[test]
1830    fn test_pong_ai_explanation_accessor() {
1831        let ai = PongAI::default();
1832        let explanation = ai.explanation();
1833        assert!(matches!(explanation.state, DecisionState::Idle));
1834    }
1835
1836    #[test]
1837    fn test_pong_ai_export_explanation() {
1838        let ai = PongAI::default();
1839        let json = ai.export_explanation();
1840        assert!(json.contains("state"));
1841        assert!(json.contains("target_y"));
1842    }
1843
1844    #[test]
1845    fn test_decision_explanation_compute_contributions_reacting() {
1846        let mut explanation = DecisionExplanation {
1847            ball_approaching: true,
1848            ball_vx: 200.0,
1849            reaction_delay_ms: 200.0,
1850            distance_to_target: 0.0,
1851            prediction_accuracy: 0.5,
1852            max_paddle_speed: 300.0,
1853            applied_error: 0.0,
1854            state: DecisionState::Reacting, // Reacting state gives different contribution
1855            ..Default::default()
1856        };
1857
1858        explanation.compute_contributions();
1859        // Find reaction delay contribution
1860        let reaction_contrib = explanation
1861            .contributions
1862            .iter()
1863            .find(|c| c.name == "Reaction Delay");
1864        assert!(reaction_contrib.is_some());
1865        assert!(reaction_contrib.unwrap().contribution > 0.0); // Should be positive when reacting
1866    }
1867}