Skip to main content

pc_rl_core/
pc_actor.rs

1// Author: Julian Bolivar
2// Version: 1.0.0
3// Date: 2026-03-25
4
5//! Predictive Coding Actor Network.
6//!
7//! Implements an actor that uses iterative top-down/bottom-up predictive coding
8//! inference loops instead of standard feedforward passes. The prediction error
9//! (surprise score) drives learning rate modulation in the actor-critic agent.
10
11use rand::Rng;
12use serde::{Deserialize, Serialize};
13
14use crate::activation::Activation;
15use crate::error::PcError;
16use crate::layer::{Layer, LayerDef};
17use crate::linalg::cpu::CpuLinAlg;
18use crate::linalg::LinAlg;
19
20/// Configuration for the predictive coding actor network.
21///
22/// # Examples
23///
24/// ```
25/// use pc_rl_core::activation::Activation;
26/// use pc_rl_core::layer::LayerDef;
27/// use pc_rl_core::pc_actor::PcActorConfig;
28///
29/// let config = PcActorConfig {
30///     input_size: 9,
31///     hidden_layers: vec![LayerDef { size: 18, activation: Activation::Tanh }],
32///     output_size: 9,
33///     output_activation: Activation::Tanh,
34///     alpha: 0.1,
35///     tol: 0.01,
36///     min_steps: 1,
37///     max_steps: 20,
38///     lr_weights: 0.01,
39///     synchronous: true,
40///     temperature: 1.0,
41///     local_lambda: 1.0,
42///     residual: false,
43///     rezero_init: 0.001,
44/// };
45/// ```
46#[derive(Debug, Clone, Serialize, Deserialize)]
47pub struct PcActorConfig {
48    /// Number of input features (e.g. 9 for tic-tac-toe board).
49    pub input_size: usize,
50    /// Hidden layer topology definitions.
51    pub hidden_layers: Vec<LayerDef>,
52    /// Number of output actions.
53    pub output_size: usize,
54    /// Activation function for the output layer.
55    pub output_activation: Activation,
56    /// Inference learning rate for PC loop state updates (`h += alpha * error`).
57    /// Set to 0.0 to disable PC inference (network behaves as standard MLP).
58    /// Active regardless of `residual` setting. Default: 0.1.
59    #[serde(default = "default_alpha")]
60    pub alpha: f64,
61    /// Convergence threshold for RMS prediction error.
62    /// PC loop exits early when surprise < tol (after at least `min_steps`).
63    /// Active regardless of `residual` setting. Default: 0.01.
64    #[serde(default = "default_tol")]
65    pub tol: f64,
66    /// Minimum PC inference steps before convergence check is allowed.
67    /// Active regardless of `residual` setting. Default: 1.
68    #[serde(default = "default_min_steps")]
69    pub min_steps: usize,
70    /// Maximum PC inference steps per action.
71    /// Active regardless of `residual` setting. Default: 20.
72    #[serde(default = "default_max_steps")]
73    pub max_steps: usize,
74    /// Base learning rate for weight updates. Default: 0.01.
75    #[serde(default = "default_lr_weights")]
76    pub lr_weights: f64,
77    /// If true, use synchronous snapshot mode; otherwise in-place. Default: true.
78    #[serde(default = "default_synchronous")]
79    pub synchronous: bool,
80    /// Softmax temperature for action selection. Default: 1.0.
81    #[serde(default = "default_temperature")]
82    pub temperature: f64,
83    /// Blend factor for hidden layer weight updates, range `[0.0, 1.0]`.
84    ///
85    /// Controls how hidden layers combine two gradient signals:
86    /// `delta = lambda * backprop_grad + (1 - lambda) * pc_prediction_error`
87    ///
88    /// - `1.0` — Pure backprop: reward signal propagated from output (default).
89    /// - `0.0` — Pure local PC: prediction errors from inference loop
90    ///   used as gradients (Millidge et al. 2022). No vanishing gradient
91    ///   but no reward signal reaches hidden layers.
92    /// - `0.0 < lambda < 1.0` — Hybrid: reward-aware backprop regularized
93    ///   by local PC consistency errors.
94    ///
95    /// The output layer always uses standard backprop regardless of this value.
96    #[serde(default = "default_local_lambda")]
97    pub local_lambda: f64,
98    /// Enable residual skip connections between same-dimension hidden layers.
99    /// When false, `rezero_init` is ignored. When true, all hidden layers
100    /// must have the same size, and skip connections with learnable ReZero
101    /// scaling are added between consecutive hidden layers (not the first,
102    /// since input_size typically differs from hidden_size).
103    #[serde(default)]
104    pub residual: bool,
105    /// Initial value for ReZero scaling factors on residual connections.
106    /// Only used when `residual = true`. Controls initial contribution of
107    /// the nonlinear component: `h[i] = rezero_init * tanh(...) + h[i-1]`.
108    ///
109    /// - `0.001` — Near-identity start (ReZero: network learns depth gradually)
110    /// - `1.0` — Standard ResNet residual (full contribution from start)
111    ///
112    /// Ignored when `residual = false`.
113    #[serde(default = "default_rezero_init")]
114    pub rezero_init: f64,
115}
116
117/// Default PC inference learning rate.
118fn default_alpha() -> f64 {
119    0.1
120}
121
122/// Default convergence tolerance for PC loop.
123fn default_tol() -> f64 {
124    0.01
125}
126
127/// Default minimum PC inference steps.
128fn default_min_steps() -> usize {
129    1
130}
131
132/// Default maximum PC inference steps.
133fn default_max_steps() -> usize {
134    20
135}
136
137/// Default base learning rate for weight updates.
138fn default_lr_weights() -> f64 {
139    0.01
140}
141
142/// Default synchronous mode (snapshot).
143fn default_synchronous() -> bool {
144    true
145}
146
147/// Default softmax temperature.
148fn default_temperature() -> f64 {
149    1.0
150}
151
152/// Default local_lambda: 1.0 (pure backprop).
153fn default_local_lambda() -> f64 {
154    1.0
155}
156
157/// Default rezero_init: 0.001 (near-identity at start).
158fn default_rezero_init() -> f64 {
159    0.001
160}
161
162/// Result of the predictive coding inference loop.
163///
164/// Contains converged output logits, hidden state representations,
165/// and diagnostic information about the inference process.
166///
167/// Generic over a [`LinAlg`] backend `L`. Defaults to [`CpuLinAlg`].
168#[derive(Debug, Clone)]
169pub struct InferResult<L: LinAlg = CpuLinAlg> {
170    /// Converged output logits.
171    pub y_conv: L::Vector,
172    /// All hidden states concatenated (fed to critic).
173    pub latent_concat: L::Vector,
174    /// Per-layer hidden state activations.
175    pub hidden_states: Vec<L::Vector>,
176    /// Per-layer prediction errors from the last PC inference step.
177    /// Ordered from top hidden layer to bottom (reverse layer order).
178    pub prediction_errors: Vec<L::Vector>,
179    /// RMS prediction error across layers.
180    pub surprise_score: f64,
181    /// Number of inference steps performed.
182    pub steps_used: usize,
183    /// Whether the inference loop converged within tolerance.
184    pub converged: bool,
185    /// Per-layer tanh components for residual layers.
186    /// `None` for non-skip layers, `Some(tanh_out)` for skip-eligible layers.
187    /// Needed for correct backward pass (derivative on tanh_out, not full h\[i\]).
188    pub tanh_components: Vec<Option<L::Vector>>,
189}
190
191/// Action selection mode.
192#[derive(Debug, Clone, Copy, PartialEq, Eq)]
193pub enum SelectionMode {
194    /// Stochastic sampling from softmax distribution.
195    Training,
196    /// Deterministic argmax selection.
197    Play,
198}
199
200/// Predictive coding actor network.
201///
202/// Uses iterative top-down/bottom-up inference loops to produce
203/// stable hidden representations and output logits.
204///
205/// Generic over a [`LinAlg`] backend `L`. Defaults to [`CpuLinAlg`].
206///
207/// # Examples
208///
209/// ```
210/// use pc_rl_core::activation::Activation;
211/// use pc_rl_core::layer::LayerDef;
212/// use pc_rl_core::linalg::cpu::CpuLinAlg;
213/// use pc_rl_core::pc_actor::{PcActor, PcActorConfig, SelectionMode};
214/// use rand::SeedableRng;
215/// use rand::rngs::StdRng;
216///
217/// let config = PcActorConfig {
218///     input_size: 9,
219///     hidden_layers: vec![LayerDef { size: 18, activation: Activation::Tanh }],
220///     output_size: 9,
221///     output_activation: Activation::Tanh,
222///     alpha: 0.1, tol: 0.01, min_steps: 1, max_steps: 20,
223///     lr_weights: 0.01, synchronous: true, temperature: 1.0,
224///     local_lambda: 1.0,
225///     residual: false,
226///     rezero_init: 0.001,
227/// };
228/// let mut rng = StdRng::seed_from_u64(42);
229/// let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
230/// let result = actor.infer(&[0.0; 9]);
231/// assert_eq!(result.y_conv.len(), 9);
232/// ```
233#[derive(Debug)]
234pub struct PcActor<L: LinAlg = CpuLinAlg> {
235    /// Network layers: hidden_layers.len() + 1 (output layer).
236    pub(crate) layers: Vec<Layer<L>>,
237    /// Actor configuration.
238    pub config: PcActorConfig,
239    /// ReZero scaling factors for skip connections. One per skip layer (all i >= 1 when residual=true).
240    pub(crate) rezero_alpha: Vec<f64>,
241    /// Projection matrices for skip connections between layers of different sizes.
242    /// One entry per skip layer: `None` for identity (same size), `Some(Matrix)` for projection.
243    pub(crate) skip_projections: Vec<Option<L::Matrix>>,
244    /// Backend used for linear algebra operations.
245    pub(crate) backend: L,
246}
247
248impl<L: LinAlg> PcActor<L> {
249    /// Creates a new PC actor with Xavier-initialized layers.
250    ///
251    /// # Arguments
252    ///
253    /// * `config` - Actor configuration specifying topology and hyperparameters.
254    /// * `rng` - Random number generator for weight initialization.
255    ///
256    /// # Errors
257    ///
258    /// Returns `PcError::ConfigValidation` if `input_size`, `output_size`,
259    /// or `temperature` are invalid.
260    pub fn new(backend: L, config: PcActorConfig, rng: &mut impl Rng) -> Result<Self, PcError> {
261        if config.input_size == 0 {
262            return Err(PcError::ConfigValidation("input_size must be > 0".into()));
263        }
264        if config.output_size == 0 {
265            return Err(PcError::ConfigValidation("output_size must be > 0".into()));
266        }
267        if config.temperature <= 0.0 {
268            return Err(PcError::ConfigValidation(format!(
269                "temperature must be positive, got {}",
270                config.temperature
271            )));
272        }
273        if !(0.0..=1.0).contains(&config.local_lambda) {
274            return Err(PcError::ConfigValidation(format!(
275                "local_lambda must be in [0.0, 1.0], got {}",
276                config.local_lambda
277            )));
278        }
279        if config.rezero_init < 0.0 {
280            return Err(PcError::ConfigValidation(format!(
281                "rezero_init must be >= 0, got {}",
282                config.rezero_init
283            )));
284        }
285        let mut layers: Vec<Layer<L>> = Vec::new();
286        let mut prev_size = config.input_size;
287
288        for def in &config.hidden_layers {
289            layers.push(Layer::<L>::new(
290                prev_size,
291                def.size,
292                def.activation,
293                &backend,
294                rng,
295            ));
296            prev_size = def.size;
297        }
298
299        // Output layer
300        layers.push(Layer::<L>::new(
301            prev_size,
302            config.output_size,
303            config.output_activation,
304            &backend,
305            rng,
306        ));
307
308        // Compute rezero_alpha and skip_projections: one per skip layer (all i >= 1)
309        let (rezero_alpha, skip_projections) = if config.residual {
310            let mut alphas = Vec::new();
311            let mut projs = Vec::new();
312            for i in 1..config.hidden_layers.len() {
313                alphas.push(config.rezero_init);
314                if config.hidden_layers[i].size != config.hidden_layers[i - 1].size {
315                    projs.push(Some(backend.xavier_mat(
316                        config.hidden_layers[i].size,
317                        config.hidden_layers[i - 1].size,
318                        rng,
319                    )));
320                } else {
321                    projs.push(None);
322                }
323            }
324            (alphas, projs)
325        } else {
326            (Vec::new(), Vec::new())
327        };
328
329        Ok(Self {
330            layers,
331            config,
332            rezero_alpha,
333            skip_projections,
334            backend,
335        })
336    }
337
338    /// Creates a child actor by crossing over two parent actors using CCA neuron alignment.
339    ///
340    /// Aligns hidden neurons functionally via CCA before blending weights.
341    /// Input and output layers use positional crossover (no permutation problem).
342    ///
343    /// # Arguments
344    ///
345    /// * `parent_a` - First parent (reference, typically higher fitness).
346    /// * `parent_b` - Second parent (aligned to A via CCA).
347    /// * `caches_a` - Per-layer activation matrices for parent A `[batch × neurons]`.
348    /// * `caches_b` - Per-layer activation matrices for parent B `[batch × neurons]`.
349    /// * `alpha` - Blending weight: 1.0 = all A, 0.0 = all B.
350    /// * `child_config` - Topology configuration for the child network.
351    /// * `rng` - Random number generator for Xavier initialization.
352    ///
353    /// # Errors
354    ///
355    /// Returns `PcError::ConfigValidation` if `child_config` is invalid.
356    pub fn crossover(
357        parent_a: &PcActor<L>,
358        parent_b: &PcActor<L>,
359        caches_a: &[L::Matrix],
360        caches_b: &[L::Matrix],
361        alpha: f64,
362        child_config: PcActorConfig,
363        rng: &mut impl Rng,
364    ) -> Result<Self, PcError> {
365        let num_child_hidden = child_config.hidden_layers.len();
366        if num_child_hidden == 0 {
367            return Err(PcError::ConfigValidation(
368                "crossover requires at least one hidden layer".into(),
369            ));
370        }
371        let num_parent_a_hidden = parent_a.config.hidden_layers.len();
372        let num_parent_b_hidden = parent_b.config.hidden_layers.len();
373
374        let mut layers: Vec<Layer<L>> = Vec::new();
375        // Track the previous layer's CCA permutation for column propagation
376        let mut prev_perm: Option<Vec<usize>> = None;
377
378        // ── Input layer (layer 0): CCA-aligned crossover ─────────
379        let child_h0 = &child_config.hidden_layers[0];
380
381        if parent_a.config.input_size == child_config.input_size
382            && parent_b.config.input_size == child_config.input_size
383        {
384            let cache_a_0 = caches_a.first();
385            let cache_b_0 = caches_b.first();
386            let (layer, perm) = cca_align_and_blend_layer(
387                &parent_a.backend,
388                &parent_a.layers[0],
389                &parent_b.layers[0],
390                cache_a_0,
391                cache_b_0,
392                None, // No previous perm for first layer
393                child_h0.size,
394                parent_a.backend.mat_cols(&parent_a.layers[0].weights),
395                child_h0.activation,
396                alpha,
397                rng,
398            )?;
399            layers.push(layer);
400            prev_perm = perm;
401        } else {
402            layers.push(Layer::<L>::new(
403                child_config.input_size,
404                child_h0.size,
405                child_h0.activation,
406                &parent_a.backend,
407                rng,
408            ));
409        }
410
411        // ── Hidden layers 1..n: CCA-aligned crossover ────────────
412        for h_idx in 1..num_child_hidden {
413            let child_def = &child_config.hidden_layers[h_idx];
414            let prev_child_size = child_config.hidden_layers[h_idx - 1].size;
415
416            let a_has = h_idx < num_parent_a_hidden;
417            let b_has = h_idx < num_parent_b_hidden;
418
419            if a_has && b_has {
420                let cache_a_h = caches_a.get(h_idx);
421                let cache_b_h = caches_b.get(h_idx);
422                let (layer, perm) = cca_align_and_blend_layer(
423                    &parent_a.backend,
424                    &parent_a.layers[h_idx],
425                    &parent_b.layers[h_idx],
426                    cache_a_h,
427                    cache_b_h,
428                    prev_perm.as_deref(),
429                    child_def.size,
430                    prev_child_size,
431                    child_def.activation,
432                    alpha,
433                    rng,
434                )?;
435                layers.push(layer);
436                prev_perm = perm;
437            } else {
438                layers.push(Layer::<L>::new(
439                    prev_child_size,
440                    child_def.size,
441                    child_def.activation,
442                    &parent_a.backend,
443                    rng,
444                ));
445                prev_perm = None;
446            }
447        }
448
449        // ── Output layer: positional crossover or Xavier ─────────
450        let last_child_hidden = child_config.hidden_layers.last().map(|d| d.size).unwrap();
451        let a_out = parent_a.layers.last().unwrap();
452        let b_out = parent_b.layers.last().unwrap();
453        let a_out_in = parent_a.backend.mat_cols(&a_out.weights);
454        let b_out_in = parent_a.backend.mat_cols(&b_out.weights);
455
456        if a_out_in == last_child_hidden && b_out_in == last_child_hidden {
457            // Positional crossover with column permutation from last hidden layer
458            let b_out_permuted = if let Some(ref pp) = prev_perm {
459                permute_cols(&parent_a.backend, &b_out.weights, pp)
460            } else {
461                b_out.weights.clone()
462            };
463            let out_rows = child_config.output_size;
464            let mut weights = parent_a.backend.zeros_mat(out_rows, last_child_hidden);
465            let mut biases = parent_a.backend.zeros_vec(out_rows);
466            let blend_rows = out_rows
467                .min(parent_a.backend.mat_rows(&a_out.weights))
468                .min(parent_a.backend.mat_rows(&b_out_permuted));
469            for r in 0..blend_rows {
470                for c in 0..last_child_hidden {
471                    let va = parent_a.backend.mat_get(&a_out.weights, r, c);
472                    let vb = parent_a.backend.mat_get(&b_out_permuted, r, c);
473                    parent_a
474                        .backend
475                        .mat_set(&mut weights, r, c, alpha * va + (1.0 - alpha) * vb);
476                }
477                let ba = parent_a.backend.vec_get(&a_out.bias, r);
478                let bb = parent_a.backend.vec_get(&b_out.bias, r);
479                parent_a
480                    .backend
481                    .vec_set(&mut biases, r, alpha * ba + (1.0 - alpha) * bb);
482            }
483            layers.push(Layer {
484                weights,
485                bias: biases,
486                activation: child_config.output_activation,
487                backend: parent_a.backend.clone(),
488            });
489        } else {
490            layers.push(Layer::<L>::new(
491                last_child_hidden,
492                child_config.output_size,
493                child_config.output_activation,
494                &parent_a.backend,
495                rng,
496            ));
497        }
498
499        // ── Residual components ──────────────────────────────────
500        let (rezero_alpha, skip_projections) = if child_config.residual {
501            let mut alphas = Vec::new();
502            let mut projs = Vec::new();
503            for i in 1..num_child_hidden {
504                // ReZero alpha: blend if both parents have it
505                let a_has_rz = i - 1 < parent_a.rezero_alpha.len();
506                let b_has_rz = i - 1 < parent_b.rezero_alpha.len();
507                let rz = if a_has_rz && b_has_rz {
508                    alpha * parent_a.rezero_alpha[i - 1]
509                        + (1.0 - alpha) * parent_b.rezero_alpha[i - 1]
510                } else if a_has_rz {
511                    parent_a.rezero_alpha[i - 1]
512                } else if b_has_rz {
513                    parent_b.rezero_alpha[i - 1]
514                } else {
515                    child_config.rezero_init
516                };
517                alphas.push(rz);
518
519                // Skip projections
520                let cur_size = child_config.hidden_layers[i].size;
521                let prev_size = child_config.hidden_layers[i - 1].size;
522                if cur_size != prev_size {
523                    let a_proj = parent_a
524                        .skip_projections
525                        .get(i - 1)
526                        .and_then(|p| p.as_ref());
527                    let b_proj = parent_b
528                        .skip_projections
529                        .get(i - 1)
530                        .and_then(|p| p.as_ref());
531                    if let (Some(ap), Some(bp)) = (a_proj, b_proj) {
532                        if parent_a.backend.mat_rows(ap) == cur_size
533                            && parent_a.backend.mat_cols(ap) == prev_size
534                            && parent_a.backend.mat_rows(bp) == cur_size
535                            && parent_a.backend.mat_cols(bp) == prev_size
536                        {
537                            // Blend projections
538                            let mut proj = parent_a.backend.zeros_mat(cur_size, prev_size);
539                            for r in 0..cur_size {
540                                for c in 0..prev_size {
541                                    let va = parent_a.backend.mat_get(ap, r, c);
542                                    let vb = parent_a.backend.mat_get(bp, r, c);
543                                    parent_a.backend.mat_set(
544                                        &mut proj,
545                                        r,
546                                        c,
547                                        alpha * va + (1.0 - alpha) * vb,
548                                    );
549                                }
550                            }
551                            projs.push(Some(proj));
552                        } else {
553                            projs.push(Some(parent_a.backend.xavier_mat(cur_size, prev_size, rng)));
554                        }
555                    } else {
556                        projs.push(Some(parent_a.backend.xavier_mat(cur_size, prev_size, rng)));
557                    }
558                } else {
559                    projs.push(None);
560                }
561            }
562            (alphas, projs)
563        } else {
564            (Vec::new(), Vec::new())
565        };
566
567        Ok(Self {
568            layers,
569            config: child_config,
570            rezero_alpha,
571            skip_projections,
572            backend: parent_a.backend.clone(),
573        })
574    }
575
576    /// Returns the total size of the latent concatenation (sum of hidden layer sizes).
577    pub fn latent_size(&self) -> usize {
578        self.config.hidden_layers.iter().map(|def| def.size).sum()
579    }
580
581    /// Runs the predictive coding inference loop on the given input.
582    ///
583    /// This method is `&self` — it never modifies weights.
584    ///
585    /// # Arguments
586    ///
587    /// * `input` - Input vector of length `input_size`.
588    ///
589    /// # Panics
590    ///
591    /// Panics if `input.len() != config.input_size`.
592    /// Returns whether hidden layer `i` has a skip connection (identity or projection).
593    fn is_skip_layer(&self, i: usize) -> bool {
594        self.config.residual && i >= 1
595    }
596
597    /// Returns the rezero_alpha/skip_projections index for hidden layer `i`.
598    fn skip_alpha_index(&self, i: usize) -> Option<usize> {
599        if !self.is_skip_layer(i) {
600            return None;
601        }
602        Some(i - 1)
603    }
604
605    pub fn infer(&self, input: &[f64]) -> InferResult<L> {
606        assert_eq!(
607            input.len(),
608            self.config.input_size,
609            "input size mismatch: got {}, expected {}",
610            input.len(),
611            self.config.input_size
612        );
613
614        let input_vec = self.backend.vec_from_slice(input);
615        let n_hidden = self.config.hidden_layers.len();
616
617        // Forward pass to initialize hidden states and output
618        let mut hidden_states: Vec<L::Vector> = Vec::with_capacity(n_hidden);
619        let mut tanh_components: Vec<Option<L::Vector>> = Vec::with_capacity(n_hidden);
620        let mut prev = input_vec.clone();
621        for (i, layer) in self.layers[..n_hidden].iter().enumerate() {
622            let tanh_out = layer.forward(&prev);
623            if let Some(alpha_idx) = self.skip_alpha_index(i) {
624                let alpha = self.rezero_alpha[alpha_idx];
625                let scaled = self.backend.vec_scale(&tanh_out, alpha);
626                let skip_path = if let Some(ref proj) = self.skip_projections[alpha_idx] {
627                    self.backend.mat_vec_mul(proj, &prev)
628                } else {
629                    prev.clone()
630                };
631                prev = self.backend.vec_add(&skip_path, &scaled);
632                tanh_components.push(Some(tanh_out));
633            } else {
634                prev = tanh_out;
635                tanh_components.push(None);
636            }
637            hidden_states.push(prev.clone());
638        }
639        // Output from last hidden (or input if no hidden)
640        let last_input = if n_hidden > 0 {
641            &hidden_states[n_hidden - 1]
642        } else {
643            &input_vec
644        };
645        let mut y = self.layers[n_hidden].forward(last_input);
646
647        // PC inference loop
648        let mut steps_used = 0;
649        let mut converged = false;
650        let mut surprise_score = 0.0;
651        let mut last_errors: Vec<L::Vector> = Vec::new();
652
653        for step in 0..self.config.max_steps {
654            steps_used = step + 1;
655
656            // Synchronous mode freezes states before updating (snapshot);
657            // in-place mode reads live states that include prior updates.
658            // Both modes need an owned copy of target[i] since we write
659            // hidden_states[i] within the loop body.
660            let snap_h: Vec<L::Vector>;
661            let snap_tc: Vec<Option<L::Vector>>;
662            let use_snapshot = self.config.synchronous;
663            if use_snapshot {
664                snap_h = hidden_states.clone();
665                snap_tc = tanh_components.clone();
666            } else {
667                snap_h = Vec::new();
668                snap_tc = Vec::new();
669            }
670
671            let mut error_vecs: Vec<L::Vector> = Vec::new();
672
673            for i in (0..n_hidden).rev() {
674                // state_above: sync reads frozen snapshot, in-place reads live
675                let state_above = if i == n_hidden - 1 {
676                    &y
677                } else if use_snapshot {
678                    snap_tc[i + 1].as_ref().unwrap_or(&snap_h[i + 1])
679                } else {
680                    tanh_components[i + 1]
681                        .as_ref()
682                        .unwrap_or(&hidden_states[i + 1])
683                };
684
685                // target: always read pre-update value (clone to own it)
686                let target = if use_snapshot {
687                    snap_tc[i].as_ref().unwrap_or(&snap_h[i]).clone()
688                } else {
689                    tanh_components[i]
690                        .as_ref()
691                        .unwrap_or(&hidden_states[i])
692                        .clone()
693                };
694
695                let prediction = self.layers[i + 1]
696                    .transpose_forward(state_above, self.config.hidden_layers[i].activation);
697
698                let error = self.backend.vec_sub(&prediction, &target);
699                error_vecs.push(error.clone());
700
701                let updated_target = self
702                    .backend
703                    .vec_add(&target, &self.backend.vec_scale(&error, self.config.alpha));
704                if let Some(alpha_idx) = self.skip_alpha_index(i) {
705                    tanh_components[i] = Some(updated_target.clone());
706                    let alpha = self.rezero_alpha[alpha_idx];
707                    let prev_h = if i > 0 {
708                        &hidden_states[i - 1]
709                    } else {
710                        &input_vec
711                    };
712                    let skip_path = if let Some(ref proj) = self.skip_projections[alpha_idx] {
713                        self.backend.mat_vec_mul(proj, prev_h)
714                    } else {
715                        prev_h.clone()
716                    };
717                    hidden_states[i] = self
718                        .backend
719                        .vec_add(&skip_path, &self.backend.vec_scale(&updated_target, alpha));
720                } else {
721                    hidden_states[i] = updated_target;
722                }
723            }
724
725            let top_hidden = if n_hidden > 0 {
726                &hidden_states[n_hidden - 1]
727            } else {
728                &input_vec
729            };
730            y = self.layers[n_hidden].forward(top_hidden);
731
732            let refs: Vec<&L::Vector> = error_vecs.iter().collect();
733            surprise_score = self.backend.rms_error(&refs);
734            last_errors = error_vecs;
735
736            // Convergence check (alpha must be > 0 for meaningful convergence)
737            if self.config.alpha > 0.0
738                && step + 1 >= self.config.min_steps
739                && surprise_score < self.config.tol
740            {
741                converged = true;
742                break;
743            }
744        }
745
746        // Build latent_concat (uses vec_to_vec for GPU compatibility)
747        let mut latent_raw: Vec<f64> = Vec::new();
748        for h in &hidden_states {
749            latent_raw.extend_from_slice(&self.backend.vec_to_vec(h));
750        }
751        let latent_concat = self.backend.vec_from_slice(&latent_raw);
752
753        InferResult {
754            y_conv: y,
755            latent_concat,
756            hidden_states,
757            prediction_errors: last_errors,
758            surprise_score,
759            steps_used,
760            converged,
761            tanh_components,
762        }
763    }
764
765    /// Selects an action given converged output logits and valid actions.
766    ///
767    /// # Arguments
768    ///
769    /// * `y_conv` - Output logits from inference.
770    /// * `valid_actions` - Indices of valid actions.
771    /// * `mode` - Training (stochastic) or Play (deterministic).
772    /// * `rng` - Random number generator (used only in Training mode).
773    ///
774    /// # Panics
775    ///
776    /// Panics if `valid_actions` is empty.
777    pub fn select_action(
778        &self,
779        y_conv: &L::Vector,
780        valid_actions: &[usize],
781        mode: SelectionMode,
782        rng: &mut impl Rng,
783    ) -> usize {
784        assert!(!valid_actions.is_empty(), "valid_actions must not be empty");
785
786        // Scale logits by temperature
787        let scaled = self
788            .backend
789            .vec_scale(y_conv, 1.0 / self.config.temperature);
790
791        let probs = self.backend.softmax_masked(&scaled, valid_actions);
792
793        match mode {
794            SelectionMode::Play => self.backend.argmax_masked(&probs, valid_actions),
795            SelectionMode::Training => self.backend.sample_from_probs(&probs, valid_actions, rng),
796        }
797    }
798
799    /// Updates network weights using a blend of backprop and local PC error.
800    ///
801    /// The `local_lambda` config controls the blend: 1.0 = pure backprop,
802    /// 0.0 = pure local PC learning (Millidge et al. 2022), intermediate = hybrid.
803    ///
804    /// # Arguments
805    ///
806    /// * `output_delta` - Error signal at the output layer.
807    /// * `infer_result` - Result from the most recent inference.
808    /// * `input` - Original input that was fed to `infer`.
809    /// * `surprise_scale` - Multiplier on learning rate based on surprise.
810    ///
811    /// # Panics
812    ///
813    /// Panics if `input.len() != config.input_size`.
814    pub fn update_weights(
815        &mut self,
816        output_delta: &[f64],
817        infer_result: &InferResult<L>,
818        input: &[f64],
819        surprise_scale: f64,
820    ) {
821        assert_eq!(
822            input.len(),
823            self.config.input_size,
824            "input size mismatch: got {}, expected {}",
825            input.len(),
826            self.config.input_size
827        );
828
829        self.update_weights_hybrid(
830            output_delta,
831            infer_result,
832            input,
833            surprise_scale,
834            self.config.local_lambda,
835        );
836    }
837
838    /// Hybrid weight update blending backprop and local PC error signals.
839    ///
840    /// For hidden layers, the effective delta is:
841    /// `delta = lambda * backprop_delta + (1 - lambda) * pc_error`
842    ///
843    /// * `lambda = 1.0` → pure backprop (standard mode).
844    /// * `lambda = 0.0` → pure local PC learning (Millidge et al. 2022).
845    /// * `0 < lambda < 1` → hybrid blend.
846    ///
847    /// The output layer always uses standard backprop from `output_delta`.
848    fn update_weights_hybrid(
849        &mut self,
850        output_delta: &[f64],
851        infer_result: &InferResult<L>,
852        input: &[f64],
853        surprise_scale: f64,
854        lambda: f64,
855    ) {
856        let input_vec = self.backend.vec_from_slice(input);
857        let output_delta_vec = self.backend.vec_from_slice(output_delta);
858        let n_hidden = self.config.hidden_layers.len();
859        let n_layers = self.layers.len();
860
861        // Output layer: always standard backward
862        let output_input = if n_hidden > 0 {
863            &infer_result.hidden_states[n_hidden - 1]
864        } else {
865            &input_vec
866        };
867        let output_output = &infer_result.y_conv;
868        let mut bp_delta = self.layers[n_layers - 1].backward(
869            output_input,
870            output_output,
871            &output_delta_vec,
872            self.config.lr_weights,
873            surprise_scale,
874        );
875
876        // Hidden layers (from top to bottom)
877        for i in (0..n_hidden).rev() {
878            let layer_input = if i > 0 {
879                &infer_result.hidden_states[i - 1]
880            } else {
881                &input_vec
882            };
883
884            // Blend backprop delta with local PC error
885            let effective_delta = if (lambda - 1.0).abs() < f64::EPSILON {
886                bp_delta.clone()
887            } else if lambda.abs() < f64::EPSILON {
888                let error_idx = n_hidden - 1 - i;
889                infer_result.prediction_errors[error_idx].clone()
890            } else {
891                let error_idx = n_hidden - 1 - i;
892                let pc_error = &infer_result.prediction_errors[error_idx];
893                let bp_scaled = self.backend.vec_scale(&bp_delta, lambda);
894                let pc_scaled = self.backend.vec_scale(pc_error, 1.0 - lambda);
895                self.backend.vec_add(&bp_scaled, &pc_scaled)
896            };
897
898            if let Some(alpha_idx) = self.skip_alpha_index(i) {
899                // Skip-eligible layer: use tanh_out for derivative, scale by alpha,
900                // add identity path to propagated gradient, update alpha.
901                let tanh_out = infer_result.tanh_components[i].as_ref().unwrap();
902                let alpha = self.rezero_alpha[alpha_idx];
903                let effective_lr = self.config.lr_weights * surprise_scale;
904
905                // Scale delta by rezero_alpha for the nonlinear path
906                let scaled_delta = self.backend.vec_scale(&effective_delta, alpha);
907
908                // Backward through the layer using tanh_out (not hidden_states[i])
909                let propagated = self.layers[i].backward(
910                    layer_input,
911                    tanh_out,
912                    &scaled_delta,
913                    self.config.lr_weights,
914                    surprise_scale,
915                );
916
917                // Update rezero_alpha: dL/d(alpha) = delta · tanh_out
918                let grad_alpha: f64 = self.backend.vec_dot(&effective_delta, tanh_out);
919                self.rezero_alpha[alpha_idx] -= effective_lr * grad_alpha;
920
921                // Propagated delta = nonlinear path + skip path (identity or projection)
922                if let Some(ref mut proj) = self.skip_projections[alpha_idx] {
923                    // Projection path: W_proj^T × delta
924                    let proj_t = self.backend.mat_transpose(proj);
925                    let skip_delta = self.backend.mat_vec_mul(&proj_t, &effective_delta);
926                    // Update projection: W_proj -= lr × outer(delta, layer_input)
927                    let dw_proj = self.backend.outer_product(&effective_delta, layer_input);
928                    self.backend.mat_scale_add(proj, &dw_proj, -effective_lr);
929                    bp_delta = self.backend.vec_add(&propagated, &skip_delta);
930                } else {
931                    // Identity path: + delta
932                    bp_delta = self.backend.vec_add(&propagated, &effective_delta);
933                }
934            } else {
935                // Standard layer: use hidden_states[i] as output
936                let layer_output = &infer_result.hidden_states[i];
937                bp_delta = self.layers[i].backward(
938                    layer_input,
939                    layer_output,
940                    &effective_delta,
941                    self.config.lr_weights,
942                    surprise_scale,
943                );
944            }
945        }
946    }
947
948    /// Extracts a serializable snapshot of current weights.
949    ///
950    /// Converts generic layers and skip projections to CPU-backed types.
951    pub fn to_weights(&self) -> crate::serializer::PcActorWeights {
952        let cpu_layers: Vec<Layer<CpuLinAlg>> = self
953            .layers
954            .iter()
955            .map(|layer| {
956                let rows = self.backend.mat_rows(&layer.weights);
957                let cols = self.backend.mat_cols(&layer.weights);
958                let mut cpu_weights = crate::matrix::Matrix::zeros(rows, cols);
959                for r in 0..rows {
960                    for c in 0..cols {
961                        cpu_weights.set(r, c, self.backend.mat_get(&layer.weights, r, c));
962                    }
963                }
964                let bias_data = self.backend.vec_to_vec(&layer.bias);
965                Layer {
966                    weights: cpu_weights,
967                    bias: bias_data,
968                    activation: layer.activation,
969                    backend: CpuLinAlg::new(),
970                }
971            })
972            .collect();
973        let cpu_projs: Vec<Option<crate::matrix::Matrix>> = self
974            .skip_projections
975            .iter()
976            .map(|opt| {
977                opt.as_ref().map(|m| {
978                    let rows = self.backend.mat_rows(m);
979                    let cols = self.backend.mat_cols(m);
980                    let mut cpu_m = crate::matrix::Matrix::zeros(rows, cols);
981                    for r in 0..rows {
982                        for c in 0..cols {
983                            cpu_m.set(r, c, self.backend.mat_get(m, r, c));
984                        }
985                    }
986                    cpu_m
987                })
988            })
989            .collect();
990        crate::serializer::PcActorWeights {
991            layers: cpu_layers,
992            rezero_alpha: self.rezero_alpha.clone(),
993            skip_projections: cpu_projs,
994        }
995    }
996
997    /// Restores an actor from saved weights without requiring an RNG.
998    ///
999    /// Converts CPU-backed weight snapshots to the target backend `L`.
1000    /// Validates that all weight matrix dimensions and bias lengths match
1001    /// the expected topology from `config`.
1002    ///
1003    /// # Errors
1004    ///
1005    /// Returns `PcError::DimensionMismatch` if any weight matrix or bias
1006    /// vector has dimensions inconsistent with the config topology.
1007    pub fn from_weights(
1008        backend: L,
1009        config: PcActorConfig,
1010        weights: crate::serializer::PcActorWeights,
1011    ) -> Result<Self, PcError> {
1012        let n_hidden = config.hidden_layers.len();
1013        let expected_layers = n_hidden + 1;
1014
1015        if weights.layers.len() != expected_layers {
1016            return Err(PcError::DimensionMismatch {
1017                expected: expected_layers,
1018                got: weights.layers.len(),
1019                context: "actor layer count",
1020            });
1021        }
1022
1023        // Validate each layer's dimensions
1024        let mut prev_size = config.input_size;
1025        for (i, cpu_layer) in weights.layers.iter().enumerate() {
1026            let (expected_rows, expected_cols) = if i < n_hidden {
1027                (config.hidden_layers[i].size, prev_size)
1028            } else {
1029                (config.output_size, prev_size)
1030            };
1031
1032            if cpu_layer.weights.rows != expected_rows {
1033                return Err(PcError::DimensionMismatch {
1034                    expected: expected_rows,
1035                    got: cpu_layer.weights.rows,
1036                    context: "actor layer weight rows",
1037                });
1038            }
1039            if cpu_layer.weights.cols != expected_cols {
1040                return Err(PcError::DimensionMismatch {
1041                    expected: expected_cols,
1042                    got: cpu_layer.weights.cols,
1043                    context: "actor layer weight cols",
1044                });
1045            }
1046            if cpu_layer.bias.len() != expected_rows {
1047                return Err(PcError::DimensionMismatch {
1048                    expected: expected_rows,
1049                    got: cpu_layer.bias.len(),
1050                    context: "actor layer bias length",
1051                });
1052            }
1053
1054            if i < n_hidden {
1055                prev_size = config.hidden_layers[i].size;
1056            }
1057        }
1058
1059        // Validate residual components
1060        if config.residual {
1061            let expected_residual = n_hidden.saturating_sub(1);
1062            if weights.rezero_alpha.len() != expected_residual {
1063                return Err(PcError::DimensionMismatch {
1064                    expected: expected_residual,
1065                    got: weights.rezero_alpha.len(),
1066                    context: "actor rezero_alpha count",
1067                });
1068            }
1069            if weights.skip_projections.len() != expected_residual {
1070                return Err(PcError::DimensionMismatch {
1071                    expected: expected_residual,
1072                    got: weights.skip_projections.len(),
1073                    context: "actor skip_projections count",
1074                });
1075            }
1076            // Validate skip projection dimensions (rows/cols)
1077            for (i, proj_opt) in weights.skip_projections.iter().enumerate() {
1078                if let Some(ref proj) = proj_opt {
1079                    let expected_rows = config.hidden_layers[i + 1].size;
1080                    let expected_cols = config.hidden_layers[i].size;
1081                    if proj.rows != expected_rows || proj.cols != expected_cols {
1082                        return Err(PcError::DimensionMismatch {
1083                            expected: expected_rows * expected_cols,
1084                            got: proj.rows * proj.cols,
1085                            context: "actor skip_projection dimensions",
1086                        });
1087                    }
1088                }
1089            }
1090        }
1091
1092        // Convert layers
1093        let layers: Vec<Layer<L>> = weights
1094            .layers
1095            .into_iter()
1096            .map(|cpu_layer| {
1097                let rows = cpu_layer.weights.rows;
1098                let cols = cpu_layer.weights.cols;
1099                let mut mat = backend.zeros_mat(rows, cols);
1100                for r in 0..rows {
1101                    for c in 0..cols {
1102                        backend.mat_set(&mut mat, r, c, cpu_layer.weights.get(r, c));
1103                    }
1104                }
1105                let bias = backend.vec_from_slice(&cpu_layer.bias);
1106                Layer {
1107                    weights: mat,
1108                    bias,
1109                    activation: cpu_layer.activation,
1110                    backend: backend.clone(),
1111                }
1112            })
1113            .collect();
1114        let skip_projections: Vec<Option<L::Matrix>> = weights
1115            .skip_projections
1116            .into_iter()
1117            .map(|opt| {
1118                opt.map(|cpu_m| {
1119                    let rows = cpu_m.rows;
1120                    let cols = cpu_m.cols;
1121                    let mut mat = backend.zeros_mat(rows, cols);
1122                    for r in 0..rows {
1123                        for c in 0..cols {
1124                            backend.mat_set(&mut mat, r, c, cpu_m.get(r, c));
1125                        }
1126                    }
1127                    mat
1128                })
1129            })
1130            .collect();
1131        Ok(Self {
1132            layers,
1133            config,
1134            rezero_alpha: weights.rezero_alpha,
1135            skip_projections,
1136            backend,
1137        })
1138    }
1139}
1140
1141/// Permute columns of a weight matrix according to a permutation.
1142/// `perm[i]` = source column index for destination column i.
1143pub(crate) fn permute_cols<L: LinAlg>(backend: &L, m: &L::Matrix, perm: &[usize]) -> L::Matrix {
1144    let rows = backend.mat_rows(m);
1145    let cols = backend.mat_cols(m);
1146    let perm_len = perm.len();
1147    let mut result = backend.zeros_mat(rows, cols);
1148    for (dst, &src) in perm.iter().enumerate().take(cols.min(perm_len)) {
1149        if src < cols {
1150            for r in 0..rows {
1151                backend.mat_set(&mut result, r, dst, backend.mat_get(m, r, src));
1152            }
1153        }
1154    }
1155    // Copy remaining columns (beyond permutation length) in original order
1156    for dst in perm_len..cols {
1157        for r in 0..rows {
1158            backend.mat_set(&mut result, r, dst, backend.mat_get(m, r, dst));
1159        }
1160    }
1161    result
1162}
1163
1164/// Permute rows of a weight matrix according to a permutation.
1165/// `perm[i]` = source row index for destination row i.
1166pub(crate) fn permute_rows<L: LinAlg>(
1167    backend: &L,
1168    m: &L::Matrix,
1169    perm: &[usize],
1170    n: usize,
1171) -> L::Matrix {
1172    let cols = backend.mat_cols(m);
1173    let perm_len = perm.len();
1174    let mut result = backend.zeros_mat(n, cols);
1175    for (dst, &src) in perm.iter().enumerate().take(n.min(perm_len)) {
1176        if src < backend.mat_rows(m) {
1177            for c in 0..cols {
1178                backend.mat_set(&mut result, dst, c, backend.mat_get(m, src, c));
1179            }
1180        }
1181    }
1182    // Copy remaining rows (unmatched) in original order
1183    for dst in perm_len..n {
1184        if dst < backend.mat_rows(m) {
1185            for c in 0..cols {
1186                backend.mat_set(&mut result, dst, c, backend.mat_get(m, dst, c));
1187            }
1188        }
1189    }
1190    result
1191}
1192
1193/// Permute elements of a bias vector according to a permutation.
1194pub(crate) fn permute_vec<L: LinAlg>(
1195    backend: &L,
1196    v: &L::Vector,
1197    perm: &[usize],
1198    n: usize,
1199) -> L::Vector {
1200    let perm_len = perm.len();
1201    let mut result = backend.zeros_vec(n);
1202    for (dst, &src) in perm.iter().enumerate().take(n.min(perm_len)) {
1203        if src < backend.vec_len(v) {
1204            backend.vec_set(&mut result, dst, backend.vec_get(v, src));
1205        }
1206    }
1207    for dst in perm_len..n {
1208        if dst < backend.vec_len(v) {
1209            backend.vec_set(&mut result, dst, backend.vec_get(v, dst));
1210        }
1211    }
1212    result
1213}
1214
1215/// Blend weights from two parent layers into a child layer.
1216/// Handles all 4 dimension cases (equal, child smaller, parents differ, child larger).
1217///
1218/// * `parent_a` - (weights, bias, neuron_count) for parent A.
1219/// * `parent_b` - (weights, bias, neuron_count) for parent B (already CCA-aligned).
1220/// * `child_cols` - Number of columns (input size) for child layer.
1221#[allow(clippy::too_many_arguments)]
1222pub(crate) fn blend_layer_weights<L: LinAlg>(
1223    backend: &L,
1224    parent_a: (&L::Matrix, &L::Vector, usize),
1225    parent_b: (&L::Matrix, &L::Vector, usize),
1226    n_child: usize,
1227    child_cols: usize,
1228    alpha: f64,
1229    rng: &mut impl Rng,
1230) -> (L::Matrix, L::Vector) {
1231    let (a_weights, a_biases, n_a) = parent_a;
1232    let (b_weights, b_biases, n_b) = parent_b;
1233    let n_min = n_a.min(n_b);
1234    let n_max = n_a.max(n_b);
1235    let a_cols = backend.mat_cols(a_weights);
1236    let b_cols = backend.mat_cols(b_weights);
1237    let use_cols = child_cols.min(a_cols).min(b_cols);
1238
1239    let mut weights = backend.zeros_mat(n_child, child_cols);
1240    let mut biases = backend.zeros_vec(n_child);
1241
1242    // Blending zone [0..min(n_min, n_child))
1243    let blend_end = n_min.min(n_child);
1244    for r in 0..blend_end {
1245        for c in 0..use_cols {
1246            let va = backend.mat_get(a_weights, r, c);
1247            let vb = backend.mat_get(b_weights, r, c);
1248            backend.mat_set(&mut weights, r, c, alpha * va + (1.0 - alpha) * vb);
1249        }
1250        let ba = backend.vec_get(a_biases, r);
1251        let bb = backend.vec_get(b_biases, r);
1252        backend.vec_set(&mut biases, r, alpha * ba + (1.0 - alpha) * bb);
1253    }
1254
1255    // Copy zone [n_min..min(n_max, n_child)) from the larger parent
1256    let copy_end = n_max.min(n_child);
1257    if copy_end > blend_end {
1258        let (larger_w, larger_b) = if n_a >= n_b {
1259            (a_weights, a_biases)
1260        } else {
1261            (b_weights, b_biases)
1262        };
1263        let larger_cols = backend.mat_cols(larger_w);
1264        for r in blend_end..copy_end {
1265            for c in 0..child_cols.min(larger_cols) {
1266                backend.mat_set(&mut weights, r, c, backend.mat_get(larger_w, r, c));
1267            }
1268            backend.vec_set(&mut biases, r, backend.vec_get(larger_b, r));
1269        }
1270    }
1271
1272    // Xavier zone [n_max..n_child) for new neurons
1273    if n_child > n_max {
1274        let xavier = backend.xavier_mat(n_child - n_max, child_cols, rng);
1275        for r in n_max..n_child {
1276            for c in 0..child_cols {
1277                backend.mat_set(&mut weights, r, c, backend.mat_get(&xavier, r - n_max, c));
1278            }
1279            // biases stay zero for Xavier zone
1280        }
1281    }
1282
1283    (weights, biases)
1284}
1285
1286/// CCA-aligns and blends a single hidden layer from two parents.
1287///
1288/// Handles the common pattern: CCA alignment → column permutation from
1289/// previous layer → row permutation → blend. Returns the blended layer
1290/// and the CCA permutation applied (for column propagation to the next layer).
1291///
1292/// * `prev_perm` — Permutation from the previous layer to apply to columns.
1293///   Pass `None` to skip column propagation.
1294#[allow(clippy::too_many_arguments)]
1295pub(crate) fn cca_align_and_blend_layer<L: LinAlg>(
1296    backend: &L,
1297    a_layer: &Layer<L>,
1298    b_layer: &Layer<L>,
1299    cache_a: Option<&L::Matrix>,
1300    cache_b: Option<&L::Matrix>,
1301    prev_perm: Option<&[usize]>,
1302    child_rows: usize,
1303    child_cols: usize,
1304    child_activation: Activation,
1305    alpha: f64,
1306    rng: &mut impl Rng,
1307) -> Result<(Layer<L>, Option<Vec<usize>>), crate::error::PcError> {
1308    let n_a = backend.mat_rows(&a_layer.weights);
1309    let n_b = backend.mat_rows(&b_layer.weights);
1310
1311    // CCA alignment
1312    let perm = if let (Some(ca), Some(cb)) = (cache_a, cache_b) {
1313        Some(crate::matrix::cca_neuron_alignment(backend, ca, cb)?)
1314    } else {
1315        None
1316    };
1317
1318    // Apply previous layer's permutation to columns of parent B
1319    let b_weights_col = if let Some(pp) = prev_perm {
1320        permute_cols(backend, &b_layer.weights, pp)
1321    } else {
1322        b_layer.weights.clone()
1323    };
1324
1325    // Apply CCA row permutation to parent B
1326    let b_weights_aligned = if let Some(ref p) = perm {
1327        permute_rows(backend, &b_weights_col, p, n_b)
1328    } else {
1329        b_weights_col
1330    };
1331    let b_bias_aligned = if let Some(ref p) = perm {
1332        permute_vec(backend, &b_layer.bias, p, n_b)
1333    } else {
1334        b_layer.bias.clone()
1335    };
1336
1337    let (weights, biases) = blend_layer_weights(
1338        backend,
1339        (&a_layer.weights, &a_layer.bias, n_a),
1340        (&b_weights_aligned, &b_bias_aligned, n_b),
1341        child_rows,
1342        child_cols,
1343        alpha,
1344        rng,
1345    );
1346
1347    Ok((
1348        Layer {
1349            weights,
1350            bias: biases,
1351            activation: child_activation,
1352            backend: backend.clone(),
1353        },
1354        perm,
1355    ))
1356}
1357
1358#[cfg(test)]
1359mod tests {
1360    use super::*;
1361    use crate::activation::Activation;
1362    use crate::layer::LayerDef;
1363    use crate::matrix::WEIGHT_CLIP;
1364    use rand::rngs::StdRng;
1365    use rand::SeedableRng;
1366
1367    fn make_rng() -> StdRng {
1368        StdRng::seed_from_u64(42)
1369    }
1370
1371    fn default_config() -> PcActorConfig {
1372        PcActorConfig {
1373            input_size: 9,
1374            hidden_layers: vec![LayerDef {
1375                size: 18,
1376                activation: Activation::Tanh,
1377            }],
1378            output_size: 9,
1379            output_activation: Activation::Tanh,
1380            alpha: 0.1,
1381            tol: 0.01,
1382            min_steps: 1,
1383            max_steps: 20,
1384            lr_weights: 0.01,
1385            synchronous: true,
1386            temperature: 1.0,
1387            local_lambda: 1.0,
1388            residual: false,
1389            rezero_init: 0.001,
1390        }
1391    }
1392
1393    fn two_hidden_config() -> PcActorConfig {
1394        PcActorConfig {
1395            hidden_layers: vec![
1396                LayerDef {
1397                    size: 18,
1398                    activation: Activation::Tanh,
1399                },
1400                LayerDef {
1401                    size: 12,
1402                    activation: Activation::Tanh,
1403                },
1404            ],
1405            ..default_config()
1406        }
1407    }
1408
1409    // ── Inference Tests ──────────────────────────────────────────────
1410
1411    #[test]
1412    fn test_infer_converges_on_zero_board() {
1413        let mut rng = make_rng();
1414        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1415        let result = actor.infer(&[0.0; 9]);
1416        // Should complete without panic; all finite
1417        for &v in &result.y_conv {
1418            assert!(v.is_finite());
1419        }
1420    }
1421
1422    #[test]
1423    fn test_infer_steps_used_at_least_min_steps() {
1424        let mut rng = make_rng();
1425        let config = PcActorConfig {
1426            min_steps: 3,
1427            ..default_config()
1428        };
1429        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
1430        let result = actor.infer(&[0.0; 9]);
1431        assert!(result.steps_used >= 3);
1432    }
1433
1434    #[test]
1435    fn test_infer_alpha_zero_does_not_converge() {
1436        let mut rng = make_rng();
1437        let config = PcActorConfig {
1438            alpha: 0.0,
1439            ..default_config()
1440        };
1441        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
1442        let result = actor.infer(&[0.0; 9]);
1443        assert!(!result.converged);
1444        assert_eq!(result.steps_used, 20);
1445    }
1446
1447    #[test]
1448    fn test_infer_does_not_modify_weights() {
1449        let mut rng = make_rng();
1450        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1451        let weights_before: Vec<Vec<f64>> = actor
1452            .layers
1453            .iter()
1454            .map(|l| l.weights.data.clone())
1455            .collect();
1456        let _ = actor.infer(&[0.0; 9]);
1457        for (i, layer) in actor.layers.iter().enumerate() {
1458            assert_eq!(layer.weights.data, weights_before[i]);
1459        }
1460    }
1461
1462    #[test]
1463    fn test_infer_latent_size_single_hidden() {
1464        let mut rng = make_rng();
1465        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1466        let result = actor.infer(&[0.0; 9]);
1467        assert_eq!(result.latent_concat.len(), 18);
1468    }
1469
1470    #[test]
1471    fn test_infer_latent_size_two_hidden() {
1472        let mut rng = make_rng();
1473        let actor: PcActor = PcActor::new(CpuLinAlg::new(), two_hidden_config(), &mut rng).unwrap();
1474        let result = actor.infer(&[0.0; 9]);
1475        assert_eq!(result.latent_concat.len(), 30);
1476    }
1477
1478    #[test]
1479    fn test_infer_latent_size_matches_latent_size_method() {
1480        let mut rng = make_rng();
1481        let actor: PcActor = PcActor::new(CpuLinAlg::new(), two_hidden_config(), &mut rng).unwrap();
1482        let result = actor.infer(&[0.0; 9]);
1483        assert_eq!(result.latent_concat.len(), actor.latent_size());
1484    }
1485
1486    #[test]
1487    fn test_infer_y_conv_length_equals_output_size() {
1488        let mut rng = make_rng();
1489        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1490        let result = actor.infer(&[0.0; 9]);
1491        assert_eq!(result.y_conv.len(), 9);
1492    }
1493
1494    #[test]
1495    fn test_infer_hidden_states_count_matches_hidden_layers() {
1496        let mut rng = make_rng();
1497        let actor: PcActor = PcActor::new(CpuLinAlg::new(), two_hidden_config(), &mut rng).unwrap();
1498        let result = actor.infer(&[0.0; 9]);
1499        assert_eq!(result.hidden_states.len(), 2);
1500    }
1501
1502    #[test]
1503    fn test_infer_all_outputs_finite() {
1504        let mut rng = make_rng();
1505        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1506        let result = actor.infer(&[1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5]);
1507        for &v in &result.y_conv {
1508            assert!(v.is_finite());
1509        }
1510        for &v in &result.latent_concat {
1511            assert!(v.is_finite());
1512        }
1513        assert!(result.surprise_score.is_finite());
1514    }
1515
1516    #[test]
1517    fn test_infer_surprise_score_nonnegative() {
1518        let mut rng = make_rng();
1519        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1520        let result = actor.infer(&[0.0; 9]);
1521        assert!(result.surprise_score >= 0.0);
1522    }
1523
1524    #[test]
1525    fn test_infer_synchronous_and_inplace_both_converge() {
1526        let mut rng = make_rng();
1527        let sync_actor: PcActor =
1528            PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1529        let mut rng2 = make_rng();
1530        let inplace_config = PcActorConfig {
1531            synchronous: false,
1532            ..default_config()
1533        };
1534        let inplace_actor: PcActor =
1535            PcActor::new(CpuLinAlg::new(), inplace_config, &mut rng2).unwrap();
1536        let sync_result = sync_actor.infer(&[0.0; 9]);
1537        let inplace_result = inplace_actor.infer(&[0.0; 9]);
1538        // Both should complete without panic; at least one should converge or use all steps
1539        assert!(sync_result.steps_used > 0);
1540        assert!(inplace_result.steps_used > 0);
1541    }
1542
1543    #[test]
1544    fn test_infer_synchronous_produces_different_result_than_inplace() {
1545        let mut rng = make_rng();
1546        let config = PcActorConfig {
1547            hidden_layers: vec![
1548                LayerDef {
1549                    size: 18,
1550                    activation: Activation::Tanh,
1551                },
1552                LayerDef {
1553                    size: 12,
1554                    activation: Activation::Tanh,
1555                },
1556            ],
1557            alpha: 0.3,
1558            tol: 1e-15,
1559            min_steps: 1,
1560            max_steps: 3,
1561            ..default_config()
1562        };
1563        let sync_actor: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng).unwrap();
1564        let mut rng2 = make_rng();
1565        let inplace_config = PcActorConfig {
1566            synchronous: false,
1567            ..config
1568        };
1569        let inplace_actor: PcActor =
1570            PcActor::new(CpuLinAlg::new(), inplace_config, &mut rng2).unwrap();
1571        let input = [1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
1572        let sync_result = sync_actor.infer(&input);
1573        let inplace_result = inplace_actor.infer(&input);
1574        // Different update orders should produce different hidden representations
1575        let differs = sync_result
1576            .latent_concat
1577            .iter()
1578            .zip(inplace_result.latent_concat.iter())
1579            .any(|(a, b)| (a - b).abs() > 1e-12);
1580        assert!(
1581            differs,
1582            "Synchronous and in-place should produce different results"
1583        );
1584    }
1585
1586    #[test]
1587    #[should_panic(expected = "input size")]
1588    fn test_infer_panics_wrong_input_length() {
1589        let mut rng = make_rng();
1590        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1591        let _ = actor.infer(&[0.0; 5]);
1592    }
1593
1594    // ── Action Selection Tests ───────────────────────────────────────
1595
1596    #[test]
1597    fn test_select_action_training_always_in_valid() {
1598        let mut rng = make_rng();
1599        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1600        let logits = vec![0.1, -0.2, 0.5, -0.1, 0.3, 0.0, -0.3, 0.2, 0.4];
1601        let valid = vec![0, 2, 4, 6, 8];
1602        for _ in 0..20 {
1603            let action = actor.select_action(&logits, &valid, SelectionMode::Training, &mut rng);
1604            assert!(valid.contains(&action));
1605        }
1606    }
1607
1608    #[test]
1609    fn test_select_action_play_mode_deterministic() {
1610        let mut rng1 = StdRng::seed_from_u64(1);
1611        let mut rng2 = StdRng::seed_from_u64(99);
1612        let mut rng_init = make_rng();
1613        let actor: PcActor =
1614            PcActor::new(CpuLinAlg::new(), default_config(), &mut rng_init).unwrap();
1615        let logits = vec![0.1, -0.2, 0.5, -0.1, 0.3, 0.0, -0.3, 0.2, 0.4];
1616        let valid = vec![0, 2, 4, 6, 8];
1617        let a1 = actor.select_action(&logits, &valid, SelectionMode::Play, &mut rng1);
1618        let a2 = actor.select_action(&logits, &valid, SelectionMode::Play, &mut rng2);
1619        assert_eq!(a1, a2, "Play mode should be deterministic");
1620    }
1621
1622    #[test]
1623    fn test_select_action_temperature_gt_one_more_uniform() {
1624        let mut rng = make_rng();
1625        let hot_config = PcActorConfig {
1626            temperature: 5.0,
1627            ..default_config()
1628        };
1629        let actor: PcActor = PcActor::new(CpuLinAlg::new(), hot_config, &mut rng).unwrap();
1630        // With high temperature, sampling should visit more actions
1631        let logits = vec![10.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
1632        let valid: Vec<usize> = (0..9).collect();
1633        let mut seen = std::collections::HashSet::new();
1634        let mut rng2 = StdRng::seed_from_u64(123);
1635        for _ in 0..100 {
1636            let a = actor.select_action(&logits, &valid, SelectionMode::Training, &mut rng2);
1637            seen.insert(a);
1638        }
1639        assert!(seen.len() > 1, "High temperature should explore more");
1640    }
1641
1642    #[test]
1643    #[should_panic]
1644    fn test_select_action_empty_valid_panics() {
1645        let mut rng = make_rng();
1646        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1647        let logits = vec![0.1; 9];
1648        let _ = actor.select_action(&logits, &[], SelectionMode::Training, &mut rng);
1649    }
1650
1651    // ── Weight Update Tests ──────────────────────────────────────────
1652
1653    #[test]
1654    fn test_update_weights_changes_first_layer() {
1655        let mut rng = make_rng();
1656        let mut actor: PcActor =
1657            PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1658        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
1659        let infer_result = actor.infer(&input);
1660        let weights_before = actor.layers[0].weights.data.clone();
1661        let delta = vec![0.1; 9];
1662        actor.update_weights(&delta, &infer_result, &input, 1.0);
1663        assert_ne!(actor.layers[0].weights.data, weights_before);
1664    }
1665
1666    #[test]
1667    fn test_update_weights_clips_all_layers() {
1668        let mut rng = make_rng();
1669        let mut actor: PcActor =
1670            PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1671        let input = vec![1.0; 9];
1672        let infer_result = actor.infer(&input);
1673        let delta = vec![1e6; 9];
1674        actor.update_weights(&delta, &infer_result, &input, 1.0);
1675        for layer in &actor.layers {
1676            for &w in &layer.weights.data {
1677                assert!(
1678                    w.abs() <= WEIGHT_CLIP + 1e-12,
1679                    "Weight {w} exceeds WEIGHT_CLIP"
1680                );
1681            }
1682        }
1683    }
1684
1685    #[test]
1686    fn test_update_weights_two_hidden_changes_both_layers() {
1687        let mut rng = make_rng();
1688        let mut actor: PcActor =
1689            PcActor::new(CpuLinAlg::new(), two_hidden_config(), &mut rng).unwrap();
1690        let input = vec![0.5; 9];
1691        let infer_result = actor.infer(&input);
1692        let w0_before = actor.layers[0].weights.data.clone();
1693        let w1_before = actor.layers[1].weights.data.clone();
1694        let delta = vec![0.1; 9];
1695        actor.update_weights(&delta, &infer_result, &input, 1.0);
1696        assert_ne!(
1697            actor.layers[0].weights.data, w0_before,
1698            "Layer 0 should change"
1699        );
1700        assert_ne!(
1701            actor.layers[1].weights.data, w1_before,
1702            "Layer 1 should change"
1703        );
1704    }
1705
1706    #[test]
1707    #[should_panic(expected = "input size")]
1708    fn test_update_weights_panics_wrong_x_size() {
1709        let mut rng = make_rng();
1710        let mut actor: PcActor =
1711            PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1712        let input = vec![0.0; 9];
1713        let infer_result = actor.infer(&input);
1714        let delta = vec![0.1; 9];
1715        actor.update_weights(&delta, &infer_result, &[0.0; 5], 1.0);
1716    }
1717
1718    // ── Zero Hidden Layers Test ─────────────────────────────────
1719
1720    #[test]
1721    fn test_infer_zero_hidden_layers_produces_finite_output() {
1722        let mut rng = make_rng();
1723        let config = PcActorConfig {
1724            hidden_layers: vec![],
1725            ..default_config()
1726        };
1727        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
1728        let result = actor.infer(&[0.5; 9]);
1729        assert_eq!(result.y_conv.len(), 9);
1730        assert!(result.y_conv.iter().all(|v| v.is_finite()));
1731        assert!(result.latent_concat.is_empty());
1732        assert!(result.hidden_states.is_empty());
1733    }
1734
1735    // ── Config Validation Tests ─────────────────────────────────
1736
1737    #[test]
1738    fn test_new_zero_input_size_returns_error() {
1739        let mut rng = make_rng();
1740        let config = PcActorConfig {
1741            input_size: 0,
1742            ..default_config()
1743        };
1744        let result: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
1745        assert!(result.is_err());
1746        let err = result.unwrap_err();
1747        assert!(matches!(err, crate::error::PcError::ConfigValidation(_)));
1748    }
1749
1750    #[test]
1751    fn test_new_zero_output_size_returns_error() {
1752        let mut rng = make_rng();
1753        let config = PcActorConfig {
1754            output_size: 0,
1755            ..default_config()
1756        };
1757        let result: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
1758        assert!(result.is_err());
1759    }
1760
1761    #[test]
1762    fn test_new_zero_temperature_returns_error() {
1763        let mut rng = make_rng();
1764        let config = PcActorConfig {
1765            temperature: 0.0,
1766            ..default_config()
1767        };
1768        let result: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
1769        assert!(result.is_err());
1770    }
1771
1772    #[test]
1773    fn test_new_negative_temperature_returns_error() {
1774        let mut rng = make_rng();
1775        let config = PcActorConfig {
1776            temperature: -1.0,
1777            ..default_config()
1778        };
1779        let result: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
1780        assert!(result.is_err());
1781    }
1782
1783    // ── Residual / ReZero Config Tests ────────────────────────
1784
1785    #[test]
1786    fn test_default_config_residual_false() {
1787        let config = default_config();
1788        assert!(!config.residual);
1789    }
1790
1791    #[test]
1792    fn test_default_config_rezero_init() {
1793        let config = default_config();
1794        assert!((config.rezero_init - 0.001).abs() < 1e-12);
1795    }
1796
1797    #[test]
1798    fn test_new_negative_rezero_init_returns_error() {
1799        let mut rng = make_rng();
1800        let config = PcActorConfig {
1801            residual: true,
1802            rezero_init: -0.1,
1803            ..default_config()
1804        };
1805        let result: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
1806        assert!(result.is_err());
1807    }
1808
1809    #[test]
1810    fn test_residual_mixed_sizes_accepted() {
1811        let mut rng = make_rng();
1812        let config = PcActorConfig {
1813            residual: true,
1814            hidden_layers: vec![
1815                LayerDef {
1816                    size: 27,
1817                    activation: Activation::Tanh,
1818                },
1819                LayerDef {
1820                    size: 18,
1821                    activation: Activation::Tanh,
1822                },
1823            ],
1824            ..default_config()
1825        };
1826        let result: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
1827        assert!(result.is_ok());
1828    }
1829
1830    #[test]
1831    fn test_residual_mixed_sizes_all_skip() {
1832        // [27, 27, 18]: ALL layers i>=1 get skip — identity for 27→27, projection for 27→18
1833        let mut rng = make_rng();
1834        let config = PcActorConfig {
1835            residual: true,
1836            hidden_layers: vec![
1837                LayerDef {
1838                    size: 27,
1839                    activation: Activation::Tanh,
1840                },
1841                LayerDef {
1842                    size: 27,
1843                    activation: Activation::Tanh,
1844                },
1845                LayerDef {
1846                    size: 18,
1847                    activation: Activation::Tanh,
1848                },
1849            ],
1850            ..default_config()
1851        };
1852        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
1853        // 2 skips: layer 1 (identity) + layer 2 (projection)
1854        assert_eq!(actor.rezero_alpha.len(), 2);
1855    }
1856
1857    #[test]
1858    fn test_residual_heterogeneous_has_projection() {
1859        // [27, 18]: different sizes → projection matrix created
1860        let mut rng = make_rng();
1861        let config = PcActorConfig {
1862            residual: true,
1863            hidden_layers: vec![
1864                LayerDef {
1865                    size: 27,
1866                    activation: Activation::Tanh,
1867                },
1868                LayerDef {
1869                    size: 18,
1870                    activation: Activation::Tanh,
1871                },
1872            ],
1873            ..default_config()
1874        };
1875        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
1876        assert_eq!(actor.rezero_alpha.len(), 1);
1877        assert_eq!(actor.skip_projections.len(), 1);
1878        assert!(actor.skip_projections[0].is_some());
1879        let proj = actor.skip_projections[0].as_ref().unwrap();
1880        assert_eq!(proj.rows, 18); // output dim
1881        assert_eq!(proj.cols, 27); // input dim
1882    }
1883
1884    #[test]
1885    fn test_residual_homogeneous_no_projection() {
1886        // [27, 27]: same sizes → no projection needed
1887        let mut rng = make_rng();
1888        let actor: PcActor =
1889            PcActor::new(CpuLinAlg::new(), residual_two_hidden_config(), &mut rng).unwrap();
1890        assert_eq!(actor.skip_projections.len(), 1);
1891        assert!(actor.skip_projections[0].is_none());
1892    }
1893
1894    #[test]
1895    fn test_residual_mixed_sizes_infer_finite() {
1896        let mut rng = make_rng();
1897        let config = PcActorConfig {
1898            residual: true,
1899            hidden_layers: vec![
1900                LayerDef {
1901                    size: 27,
1902                    activation: Activation::Tanh,
1903                },
1904                LayerDef {
1905                    size: 27,
1906                    activation: Activation::Tanh,
1907                },
1908                LayerDef {
1909                    size: 18,
1910                    activation: Activation::Tanh,
1911                },
1912            ],
1913            ..default_config()
1914        };
1915        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
1916        let result = actor.infer(&[0.5; 9]);
1917        for &v in &result.y_conv {
1918            assert!(v.is_finite());
1919        }
1920        assert_eq!(result.hidden_states.len(), 3);
1921        assert_eq!(result.latent_concat.len(), 27 + 27 + 18);
1922    }
1923
1924    #[test]
1925    fn test_residual_same_size_hidden_layers_accepted() {
1926        let mut rng = make_rng();
1927        let config = PcActorConfig {
1928            residual: true,
1929            hidden_layers: vec![
1930                LayerDef {
1931                    size: 27,
1932                    activation: Activation::Tanh,
1933                },
1934                LayerDef {
1935                    size: 27,
1936                    activation: Activation::Tanh,
1937                },
1938            ],
1939            ..default_config()
1940        };
1941        let result: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
1942        assert!(result.is_ok());
1943    }
1944
1945    fn residual_two_hidden_config() -> PcActorConfig {
1946        PcActorConfig {
1947            residual: true,
1948            hidden_layers: vec![
1949                LayerDef {
1950                    size: 27,
1951                    activation: Activation::Tanh,
1952                },
1953                LayerDef {
1954                    size: 27,
1955                    activation: Activation::Tanh,
1956                },
1957            ],
1958            ..default_config()
1959        }
1960    }
1961
1962    #[test]
1963    fn test_non_residual_actor_empty_rezero_alpha() {
1964        let mut rng = make_rng();
1965        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
1966        assert!(actor.rezero_alpha.is_empty());
1967    }
1968
1969    #[test]
1970    fn test_residual_two_hidden_one_rezero_alpha() {
1971        let mut rng = make_rng();
1972        let actor: PcActor =
1973            PcActor::new(CpuLinAlg::new(), residual_two_hidden_config(), &mut rng).unwrap();
1974        assert_eq!(actor.rezero_alpha.len(), 1);
1975    }
1976
1977    #[test]
1978    fn test_residual_three_hidden_two_rezero_alpha() {
1979        let mut rng = make_rng();
1980        let config = PcActorConfig {
1981            residual: true,
1982            hidden_layers: vec![
1983                LayerDef {
1984                    size: 27,
1985                    activation: Activation::Tanh,
1986                },
1987                LayerDef {
1988                    size: 27,
1989                    activation: Activation::Tanh,
1990                },
1991                LayerDef {
1992                    size: 27,
1993                    activation: Activation::Tanh,
1994                },
1995            ],
1996            ..default_config()
1997        };
1998        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
1999        assert_eq!(actor.rezero_alpha.len(), 2);
2000    }
2001
2002    #[test]
2003    fn test_rezero_alpha_initialized_to_rezero_init() {
2004        let mut rng = make_rng();
2005        let config = PcActorConfig {
2006            rezero_init: 0.005,
2007            ..residual_two_hidden_config()
2008        };
2009        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
2010        assert!((actor.rezero_alpha[0] - 0.005).abs() < 1e-12);
2011    }
2012
2013    #[test]
2014    fn test_residual_single_hidden_zero_rezero_alpha() {
2015        let mut rng = make_rng();
2016        let config = PcActorConfig {
2017            residual: true,
2018            ..default_config()
2019        };
2020        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
2021        assert!(actor.rezero_alpha.is_empty());
2022    }
2023
2024    #[test]
2025    fn test_residual_single_hidden_accepted() {
2026        let mut rng = make_rng();
2027        let config = PcActorConfig {
2028            residual: true,
2029            ..default_config()
2030        };
2031        let result: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
2032        assert!(result.is_ok());
2033    }
2034
2035    // ── Local Learning (PC-based weight updates) Tests ──────────
2036
2037    // ── Residual Inference Tests ──────────────────────────────
2038
2039    #[test]
2040    fn test_residual_false_identical_to_non_residual() {
2041        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2042        let mut rng1 = make_rng();
2043        let actor1: PcActor =
2044            PcActor::new(CpuLinAlg::new(), two_hidden_config(), &mut rng1).unwrap();
2045        let result1 = actor1.infer(&input);
2046
2047        let mut rng2 = make_rng();
2048        let config2 = PcActorConfig {
2049            residual: false,
2050            ..two_hidden_config()
2051        };
2052        let actor2: PcActor = PcActor::new(CpuLinAlg::new(), config2, &mut rng2).unwrap();
2053        let result2 = actor2.infer(&input);
2054
2055        for (a, b) in result1.y_conv.iter().zip(result2.y_conv.iter()) {
2056            assert!((a - b).abs() < 1e-12);
2057        }
2058    }
2059
2060    #[test]
2061    fn test_residual_rezero_zero_second_hidden_near_identity() {
2062        let mut rng = make_rng();
2063        let config = PcActorConfig {
2064            rezero_init: 0.0,
2065            alpha: 0.0,
2066            ..residual_two_hidden_config()
2067        };
2068        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
2069        let result = actor.infer(&[0.5; 9]);
2070        let h0 = &result.hidden_states[0];
2071        let h1 = &result.hidden_states[1];
2072        for (a, b) in h0.iter().zip(h1.iter()) {
2073            assert!(
2074                (a - b).abs() < 1e-12,
2075                "With rezero_init=0, h[1] should equal h[0]"
2076            );
2077        }
2078    }
2079
2080    #[test]
2081    fn test_residual_infer_all_outputs_finite() {
2082        let mut rng = make_rng();
2083        let actor: PcActor =
2084            PcActor::new(CpuLinAlg::new(), residual_two_hidden_config(), &mut rng).unwrap();
2085        let result = actor.infer(&[0.5; 9]);
2086        for &v in &result.y_conv {
2087            assert!(v.is_finite());
2088        }
2089        for &v in &result.latent_concat {
2090            assert!(v.is_finite());
2091        }
2092        assert!(result.surprise_score.is_finite());
2093    }
2094
2095    #[test]
2096    fn test_residual_latent_concat_size() {
2097        let mut rng = make_rng();
2098        let actor: PcActor =
2099            PcActor::new(CpuLinAlg::new(), residual_two_hidden_config(), &mut rng).unwrap();
2100        let result = actor.infer(&[0.5; 9]);
2101        assert_eq!(result.latent_concat.len(), 54); // 27 + 27
2102    }
2103
2104    #[test]
2105    fn test_residual_pc_loop_completes() {
2106        let mut rng = make_rng();
2107        let config = PcActorConfig {
2108            alpha: 0.03,
2109            max_steps: 5,
2110            ..residual_two_hidden_config()
2111        };
2112        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
2113        let result = actor.infer(&[0.5; 9]);
2114        assert!(result.steps_used > 0);
2115        assert!(result.steps_used <= 5);
2116    }
2117
2118    #[test]
2119    fn test_residual_hidden_states_count() {
2120        let mut rng = make_rng();
2121        let actor: PcActor =
2122            PcActor::new(CpuLinAlg::new(), residual_two_hidden_config(), &mut rng).unwrap();
2123        let result = actor.infer(&[0.5; 9]);
2124        assert_eq!(result.hidden_states.len(), 2);
2125    }
2126
2127    #[test]
2128    fn test_residual_infer_does_not_modify_weights() {
2129        let mut rng = make_rng();
2130        let actor: PcActor =
2131            PcActor::new(CpuLinAlg::new(), residual_two_hidden_config(), &mut rng).unwrap();
2132        let weights_before: Vec<Vec<f64>> = actor
2133            .layers
2134            .iter()
2135            .map(|l| l.weights.data.clone())
2136            .collect();
2137        let alpha_before = actor.rezero_alpha.clone();
2138        let _ = actor.infer(&[0.5; 9]);
2139        for (i, layer) in actor.layers.iter().enumerate() {
2140            assert_eq!(layer.weights.data, weights_before[i]);
2141        }
2142        assert_eq!(actor.rezero_alpha, alpha_before);
2143    }
2144
2145    #[test]
2146    fn test_residual_three_hidden_infer_finite() {
2147        let mut rng = make_rng();
2148        let config = PcActorConfig {
2149            residual: true,
2150            hidden_layers: vec![
2151                LayerDef {
2152                    size: 27,
2153                    activation: Activation::Tanh,
2154                },
2155                LayerDef {
2156                    size: 27,
2157                    activation: Activation::Tanh,
2158                },
2159                LayerDef {
2160                    size: 27,
2161                    activation: Activation::Tanh,
2162                },
2163            ],
2164            ..default_config()
2165        };
2166        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
2167        let result = actor.infer(&[0.5; 9]);
2168        for &v in &result.y_conv {
2169            assert!(v.is_finite());
2170        }
2171    }
2172
2173    #[test]
2174    fn test_residual_tanh_components_populated() {
2175        let mut rng = make_rng();
2176        let actor: PcActor =
2177            PcActor::new(CpuLinAlg::new(), residual_two_hidden_config(), &mut rng).unwrap();
2178        let result = actor.infer(&[0.5; 9]);
2179        assert_eq!(result.tanh_components.len(), 2);
2180        assert!(result.tanh_components[0].is_none()); // layer 0: no skip
2181        assert!(result.tanh_components[1].is_some()); // layer 1: has skip
2182        assert_eq!(result.tanh_components[1].as_ref().unwrap().len(), 27);
2183    }
2184
2185    #[test]
2186    fn test_residual_pc_prediction_uses_tanh_component_not_full_state() {
2187        // With rezero_init=1.0, h[1] = tanh_out + h[0] (significantly different
2188        // from tanh_out alone). If PC prediction uses h[1] instead of tanh_out,
2189        // the surprise score and convergence will differ.
2190        // Two runs with same weights: one with alpha=0 (no PC), one with alpha>0.
2191        // The PC loop should converge meaningfully (surprise decreases).
2192        let mut rng = make_rng();
2193        let config = PcActorConfig {
2194            rezero_init: 1.0,
2195            alpha: 0.1,
2196            max_steps: 20,
2197            tol: 0.001,
2198            min_steps: 1,
2199            ..residual_two_hidden_config()
2200        };
2201        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
2202        let result = actor.infer(&[1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5]);
2203        // With proper PC predictions, surprise should be finite and non-negative
2204        assert!(result.surprise_score.is_finite());
2205        assert!(result.surprise_score >= 0.0);
2206        // Prediction errors should all be finite
2207        for errors in &result.prediction_errors {
2208            for &e in errors {
2209                assert!(e.is_finite(), "PC prediction error not finite: {e}");
2210            }
2211        }
2212    }
2213
2214    // ── Residual Backward Tests ────────────────────────────────
2215
2216    #[test]
2217    fn test_residual_false_update_identical_to_non_residual() {
2218        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2219        let delta = vec![0.1; 9];
2220
2221        let mut rng1 = make_rng();
2222        let mut actor1: PcActor =
2223            PcActor::new(CpuLinAlg::new(), two_hidden_config(), &mut rng1).unwrap();
2224        let infer1 = actor1.infer(&input);
2225        actor1.update_weights(&delta, &infer1, &input, 1.0);
2226
2227        let mut rng2 = make_rng();
2228        let config2 = PcActorConfig {
2229            residual: false,
2230            ..two_hidden_config()
2231        };
2232        let mut actor2: PcActor = PcActor::new(CpuLinAlg::new(), config2, &mut rng2).unwrap();
2233        let infer2 = actor2.infer(&input);
2234        actor2.update_weights(&delta, &infer2, &input, 1.0);
2235
2236        for i in 0..actor1.layers.len() {
2237            assert_eq!(actor1.layers[i].weights.data, actor2.layers[i].weights.data);
2238        }
2239    }
2240
2241    #[test]
2242    fn test_residual_update_changes_all_layer_weights() {
2243        let mut rng = make_rng();
2244        let mut actor: PcActor =
2245            PcActor::new(CpuLinAlg::new(), residual_two_hidden_config(), &mut rng).unwrap();
2246        let input = vec![0.5; 9];
2247        let infer_result = actor.infer(&input);
2248        let w0 = actor.layers[0].weights.data.clone();
2249        let w1 = actor.layers[1].weights.data.clone();
2250        let w2 = actor.layers[2].weights.data.clone();
2251        actor.update_weights(&[0.1; 9], &infer_result, &input, 1.0);
2252        assert_ne!(actor.layers[0].weights.data, w0, "Layer 0 should change");
2253        assert_ne!(actor.layers[1].weights.data, w1, "Layer 1 should change");
2254        assert_ne!(
2255            actor.layers[2].weights.data, w2,
2256            "Output layer should change"
2257        );
2258    }
2259
2260    #[test]
2261    fn test_residual_update_changes_rezero_alpha() {
2262        let mut rng = make_rng();
2263        let mut actor: PcActor =
2264            PcActor::new(CpuLinAlg::new(), residual_two_hidden_config(), &mut rng).unwrap();
2265        let input = vec![0.5; 9];
2266        let infer_result = actor.infer(&input);
2267        let alpha_before = actor.rezero_alpha.clone();
2268        actor.update_weights(&[0.1; 9], &infer_result, &input, 1.0);
2269        assert_ne!(
2270            actor.rezero_alpha, alpha_before,
2271            "rezero_alpha should be updated by backprop"
2272        );
2273    }
2274
2275    #[test]
2276    fn test_residual_update_clips_weights() {
2277        let mut rng = make_rng();
2278        let mut actor: PcActor =
2279            PcActor::new(CpuLinAlg::new(), residual_two_hidden_config(), &mut rng).unwrap();
2280        let input = vec![1.0; 9];
2281        let infer_result = actor.infer(&input);
2282        actor.update_weights(&[1e6; 9], &infer_result, &input, 1.0);
2283        for layer in &actor.layers {
2284            for &w in &layer.weights.data {
2285                assert!(
2286                    w.abs() <= WEIGHT_CLIP + 1e-12,
2287                    "Weight {w} exceeds WEIGHT_CLIP"
2288                );
2289            }
2290        }
2291    }
2292
2293    #[test]
2294    fn test_residual_gradient_stronger_than_non_residual() {
2295        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2296        let delta = vec![0.1; 9];
2297
2298        // Non-residual 2 hidden layers (27, 27)
2299        let mut rng1 = make_rng();
2300        let config1 = PcActorConfig {
2301            hidden_layers: vec![
2302                LayerDef {
2303                    size: 27,
2304                    activation: Activation::Tanh,
2305                },
2306                LayerDef {
2307                    size: 27,
2308                    activation: Activation::Tanh,
2309                },
2310            ],
2311            ..default_config()
2312        };
2313        let mut actor1: PcActor = PcActor::new(CpuLinAlg::new(), config1, &mut rng1).unwrap();
2314        let w0_before1 = actor1.layers[0].weights.data.clone();
2315        let infer1 = actor1.infer(&input);
2316        actor1.update_weights(&delta, &infer1, &input, 1.0);
2317        let change1: f64 = actor1.layers[0]
2318            .weights
2319            .data
2320            .iter()
2321            .zip(w0_before1.iter())
2322            .map(|(a, b)| (a - b).abs())
2323            .sum();
2324
2325        // Residual 2 hidden layers (27, 27) with rezero_init=1.0
2326        let mut rng2 = make_rng();
2327        let config2 = PcActorConfig {
2328            rezero_init: 1.0,
2329            ..residual_two_hidden_config()
2330        };
2331        let mut actor2: PcActor = PcActor::new(CpuLinAlg::new(), config2, &mut rng2).unwrap();
2332        let w0_before2 = actor2.layers[0].weights.data.clone();
2333        let infer2 = actor2.infer(&input);
2334        actor2.update_weights(&delta, &infer2, &input, 1.0);
2335        let change2: f64 = actor2.layers[0]
2336            .weights
2337            .data
2338            .iter()
2339            .zip(w0_before2.iter())
2340            .map(|(a, b)| (a - b).abs())
2341            .sum();
2342
2343        assert!(
2344            change2 > change1,
2345            "Residual should propagate stronger gradient to layer 0: residual={change2:.6}, non-residual={change1:.6}"
2346        );
2347    }
2348
2349    #[test]
2350    fn test_residual_hybrid_lambda_works() {
2351        let mut rng = make_rng();
2352        let config = PcActorConfig {
2353            local_lambda: 0.99,
2354            ..residual_two_hidden_config()
2355        };
2356        let mut actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
2357        let input = vec![0.5; 9];
2358        let infer_result = actor.infer(&input);
2359        let w0_before = actor.layers[0].weights.data.clone();
2360        actor.update_weights(&[0.1; 9], &infer_result, &input, 1.0);
2361        assert_ne!(actor.layers[0].weights.data, w0_before);
2362    }
2363
2364    fn local_learning_config() -> PcActorConfig {
2365        PcActorConfig {
2366            local_lambda: 0.0,
2367            ..default_config()
2368        }
2369    }
2370
2371    #[test]
2372    fn test_infer_prediction_errors_count_matches_hidden_layers() {
2373        let mut rng = make_rng();
2374        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
2375        let result = actor.infer(&[0.0; 9]);
2376        assert_eq!(result.prediction_errors.len(), 1);
2377    }
2378
2379    #[test]
2380    fn test_infer_prediction_errors_two_hidden() {
2381        let mut rng = make_rng();
2382        let actor: PcActor = PcActor::new(CpuLinAlg::new(), two_hidden_config(), &mut rng).unwrap();
2383        let result = actor.infer(&[0.0; 9]);
2384        assert_eq!(result.prediction_errors.len(), 2);
2385    }
2386
2387    #[test]
2388    fn test_infer_prediction_errors_zero_hidden_is_empty() {
2389        let mut rng = make_rng();
2390        let config = PcActorConfig {
2391            hidden_layers: vec![],
2392            ..default_config()
2393        };
2394        let actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
2395        let result = actor.infer(&[0.5; 9]);
2396        assert!(result.prediction_errors.is_empty());
2397    }
2398
2399    #[test]
2400    fn test_infer_prediction_errors_all_finite() {
2401        let mut rng = make_rng();
2402        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
2403        let result = actor.infer(&[1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5]);
2404        for errors in &result.prediction_errors {
2405            for &e in errors {
2406                assert!(e.is_finite(), "prediction error not finite: {e}");
2407            }
2408        }
2409    }
2410
2411    #[test]
2412    fn test_infer_prediction_errors_size_matches_hidden_layer_size() {
2413        let mut rng = make_rng();
2414        let actor: PcActor = PcActor::new(CpuLinAlg::new(), default_config(), &mut rng).unwrap();
2415        let result = actor.infer(&[0.0; 9]);
2416        // default_config has one hidden layer of size 18
2417        assert_eq!(result.prediction_errors[0].len(), 18);
2418    }
2419
2420    #[test]
2421    fn test_local_learning_config_accepted() {
2422        let mut rng = make_rng();
2423        let config = local_learning_config();
2424        assert!((config.local_lambda).abs() < f64::EPSILON);
2425        let actor: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
2426        assert!(actor.is_ok());
2427    }
2428
2429    #[test]
2430    fn test_local_learning_update_changes_weights() {
2431        let mut rng = make_rng();
2432        let mut actor: PcActor =
2433            PcActor::new(CpuLinAlg::new(), local_learning_config(), &mut rng).unwrap();
2434        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2435        let infer_result = actor.infer(&input);
2436        let weights_before = actor.layers[0].weights.data.clone();
2437        let delta = vec![0.1; 9];
2438        actor.update_weights(&delta, &infer_result, &input, 1.0);
2439        assert_ne!(actor.layers[0].weights.data, weights_before);
2440    }
2441
2442    #[test]
2443    fn test_local_learning_clips_weights() {
2444        let mut rng = make_rng();
2445        let mut actor: PcActor =
2446            PcActor::new(CpuLinAlg::new(), local_learning_config(), &mut rng).unwrap();
2447        let input = vec![1.0; 9];
2448        let infer_result = actor.infer(&input);
2449        let delta = vec![1e6; 9];
2450        actor.update_weights(&delta, &infer_result, &input, 1.0);
2451        for layer in &actor.layers {
2452            for &w in &layer.weights.data {
2453                assert!(
2454                    w.abs() <= WEIGHT_CLIP + 1e-12,
2455                    "Weight {w} exceeds WEIGHT_CLIP"
2456                );
2457            }
2458        }
2459    }
2460
2461    #[test]
2462    fn test_local_learning_two_hidden_changes_both() {
2463        let mut rng = make_rng();
2464        let config = PcActorConfig {
2465            local_lambda: 0.0,
2466            ..two_hidden_config()
2467        };
2468        let mut actor: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng).unwrap();
2469        let input = vec![0.5; 9];
2470        let infer_result = actor.infer(&input);
2471        let w0_before = actor.layers[0].weights.data.clone();
2472        let w1_before = actor.layers[1].weights.data.clone();
2473        let delta = vec![0.1; 9];
2474        actor.update_weights(&delta, &infer_result, &input, 1.0);
2475        assert_ne!(
2476            actor.layers[0].weights.data, w0_before,
2477            "Layer 0 should change"
2478        );
2479        assert_ne!(
2480            actor.layers[1].weights.data, w1_before,
2481            "Layer 1 should change"
2482        );
2483    }
2484
2485    #[test]
2486    fn test_local_learning_differs_from_backprop() {
2487        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2488        let delta = vec![0.1; 9];
2489
2490        // Backprop actor
2491        let mut rng1 = make_rng();
2492        let mut bp_actor: PcActor =
2493            PcActor::new(CpuLinAlg::new(), default_config(), &mut rng1).unwrap();
2494        let bp_infer = bp_actor.infer(&input);
2495        bp_actor.update_weights(&delta, &bp_infer, &input, 1.0);
2496
2497        // Local learning actor (same initial weights)
2498        let mut rng2 = make_rng();
2499        let mut ll_actor: PcActor =
2500            PcActor::new(CpuLinAlg::new(), local_learning_config(), &mut rng2).unwrap();
2501        let ll_infer = ll_actor.infer(&input);
2502        ll_actor.update_weights(&delta, &ll_infer, &input, 1.0);
2503
2504        // Hidden layer weights should differ between the two approaches
2505        assert_ne!(
2506            bp_actor.layers[0].weights.data, ll_actor.layers[0].weights.data,
2507            "Local learning should produce different weight updates than backprop"
2508        );
2509    }
2510
2511    // ── Hybrid Learning (local_lambda) Tests ────────────────────
2512
2513    fn hybrid_config(lambda: f64) -> PcActorConfig {
2514        PcActorConfig {
2515            local_lambda: lambda,
2516            ..default_config()
2517        }
2518    }
2519
2520    #[test]
2521    fn test_local_lambda_one_equals_backprop() {
2522        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2523        let delta = vec![0.1; 9];
2524
2525        // Pure backprop (local_learning=false, default)
2526        let mut rng1 = make_rng();
2527        let mut bp_actor: PcActor =
2528            PcActor::new(CpuLinAlg::new(), default_config(), &mut rng1).unwrap();
2529        let bp_infer = bp_actor.infer(&input);
2530        bp_actor.update_weights(&delta, &bp_infer, &input, 1.0);
2531
2532        // lambda=1.0 should be identical to backprop
2533        let mut rng2 = make_rng();
2534        let mut lam_actor: PcActor =
2535            PcActor::new(CpuLinAlg::new(), hybrid_config(1.0), &mut rng2).unwrap();
2536        let lam_infer = lam_actor.infer(&input);
2537        lam_actor.update_weights(&delta, &lam_infer, &input, 1.0);
2538
2539        assert_eq!(
2540            bp_actor.layers[0].weights.data, lam_actor.layers[0].weights.data,
2541            "lambda=1.0 should produce identical weights to pure backprop"
2542        );
2543    }
2544
2545    #[test]
2546    fn test_local_lambda_zero_equals_local_learning() {
2547        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2548        let delta = vec![0.1; 9];
2549
2550        // Pure local (local_learning=true)
2551        let mut rng1 = make_rng();
2552        let mut ll_actor: PcActor =
2553            PcActor::new(CpuLinAlg::new(), local_learning_config(), &mut rng1).unwrap();
2554        let ll_infer = ll_actor.infer(&input);
2555        ll_actor.update_weights(&delta, &ll_infer, &input, 1.0);
2556
2557        // lambda=0.0 should be identical to pure local
2558        let mut rng2 = make_rng();
2559        let mut lam_actor: PcActor =
2560            PcActor::new(CpuLinAlg::new(), hybrid_config(0.0), &mut rng2).unwrap();
2561        let lam_infer = lam_actor.infer(&input);
2562        lam_actor.update_weights(&delta, &lam_infer, &input, 1.0);
2563
2564        assert_eq!(
2565            ll_actor.layers[0].weights.data, lam_actor.layers[0].weights.data,
2566            "lambda=0.0 should produce identical weights to pure local learning"
2567        );
2568    }
2569
2570    #[test]
2571    fn test_local_lambda_half_differs_from_both_pure_modes() {
2572        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2573        let delta = vec![0.1; 9];
2574
2575        // Pure backprop
2576        let mut rng1 = make_rng();
2577        let mut bp_actor: PcActor =
2578            PcActor::new(CpuLinAlg::new(), default_config(), &mut rng1).unwrap();
2579        let bp_infer = bp_actor.infer(&input);
2580        bp_actor.update_weights(&delta, &bp_infer, &input, 1.0);
2581
2582        // Pure local
2583        let mut rng2 = make_rng();
2584        let mut ll_actor: PcActor =
2585            PcActor::new(CpuLinAlg::new(), local_learning_config(), &mut rng2).unwrap();
2586        let ll_infer = ll_actor.infer(&input);
2587        ll_actor.update_weights(&delta, &ll_infer, &input, 1.0);
2588
2589        // Hybrid lambda=0.5
2590        let mut rng3 = make_rng();
2591        let mut hy_actor: PcActor =
2592            PcActor::new(CpuLinAlg::new(), hybrid_config(0.5), &mut rng3).unwrap();
2593        let hy_infer = hy_actor.infer(&input);
2594        hy_actor.update_weights(&delta, &hy_infer, &input, 1.0);
2595
2596        assert_ne!(
2597            hy_actor.layers[0].weights.data, bp_actor.layers[0].weights.data,
2598            "lambda=0.5 should differ from pure backprop"
2599        );
2600        assert_ne!(
2601            hy_actor.layers[0].weights.data, ll_actor.layers[0].weights.data,
2602            "lambda=0.5 should differ from pure local"
2603        );
2604    }
2605
2606    #[test]
2607    fn test_local_lambda_changes_weights() {
2608        let mut rng = make_rng();
2609        let mut actor: PcActor =
2610            PcActor::new(CpuLinAlg::new(), hybrid_config(0.5), &mut rng).unwrap();
2611        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2612        let infer_result = actor.infer(&input);
2613        let weights_before = actor.layers[0].weights.data.clone();
2614        let delta = vec![0.1; 9];
2615        actor.update_weights(&delta, &infer_result, &input, 1.0);
2616        assert_ne!(actor.layers[0].weights.data, weights_before);
2617    }
2618
2619    #[test]
2620    fn test_local_lambda_clips_weights() {
2621        let mut rng = make_rng();
2622        let mut actor: PcActor =
2623            PcActor::new(CpuLinAlg::new(), hybrid_config(0.5), &mut rng).unwrap();
2624        let input = vec![1.0; 9];
2625        let infer_result = actor.infer(&input);
2626        let delta = vec![1e6; 9];
2627        actor.update_weights(&delta, &infer_result, &input, 1.0);
2628        for layer in &actor.layers {
2629            for &w in &layer.weights.data {
2630                assert!(
2631                    w.abs() <= WEIGHT_CLIP + 1e-12,
2632                    "Weight {w} exceeds WEIGHT_CLIP"
2633                );
2634            }
2635        }
2636    }
2637
2638    #[test]
2639    fn test_local_lambda_negative_returns_error() {
2640        let mut rng = make_rng();
2641        let config = hybrid_config(-0.1);
2642        let result: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
2643        assert!(result.is_err());
2644    }
2645
2646    #[test]
2647    fn test_local_lambda_above_one_returns_error() {
2648        let mut rng = make_rng();
2649        let config = hybrid_config(1.1);
2650        let result: Result<PcActor, _> = PcActor::new(CpuLinAlg::new(), config, &mut rng);
2651        assert!(result.is_err());
2652    }
2653
2654    // ── Phase 5 Cycle 5.1: Crossover same topology ─────────────
2655
2656    fn crossover_config_27() -> PcActorConfig {
2657        PcActorConfig {
2658            input_size: 9,
2659            hidden_layers: vec![LayerDef {
2660                size: 27,
2661                activation: Activation::Tanh,
2662            }],
2663            output_size: 9,
2664            output_activation: Activation::Linear,
2665            alpha: 0.03,
2666            tol: 0.01,
2667            min_steps: 1,
2668            max_steps: 5,
2669            lr_weights: 0.005,
2670            synchronous: true,
2671            temperature: 1.0,
2672            local_lambda: 0.99,
2673            residual: false,
2674            rezero_init: 0.001,
2675        }
2676    }
2677
2678    fn make_caches_for_actor(actor: &PcActor, batch_size: usize) -> Vec<Vec<Vec<f64>>> {
2679        let num_hidden = actor.config.hidden_layers.len();
2680        let mut layers: Vec<Vec<Vec<f64>>> = (0..num_hidden).map(|_| Vec::new()).collect();
2681        for i in 0..batch_size {
2682            let input: Vec<f64> = (0..actor.config.input_size)
2683                .map(|j| ((i * actor.config.input_size + j) as f64 * 0.01).sin())
2684                .collect();
2685            let result = actor.infer(&input);
2686            for (layer_idx, state) in result.hidden_states.iter().enumerate() {
2687                layers[layer_idx].push(state.clone());
2688            }
2689        }
2690        layers
2691    }
2692
2693    fn build_cache_matrix(
2694        cache_layers: &[Vec<Vec<f64>>],
2695        layer_idx: usize,
2696    ) -> crate::matrix::Matrix {
2697        use crate::linalg::LinAlg;
2698        let samples = &cache_layers[layer_idx];
2699        let batch_size = samples.len();
2700        let n_neurons = samples[0].len();
2701        let mut mat = CpuLinAlg::new().zeros_mat(batch_size, n_neurons);
2702        for (r, sample) in samples.iter().enumerate() {
2703            for (c, &val) in sample.iter().enumerate() {
2704                CpuLinAlg::new().mat_set(&mut mat, r, c, val);
2705            }
2706        }
2707        mat
2708    }
2709
2710    #[test]
2711    fn test_crossover_same_topology_produces_valid_actor() {
2712        let mut rng_a = StdRng::seed_from_u64(42);
2713        let mut rng_b = StdRng::seed_from_u64(123);
2714        let config = crossover_config_27();
2715        let actor_a: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_a).unwrap();
2716        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_b).unwrap();
2717
2718        let caches_a = make_caches_for_actor(&actor_a, 50);
2719        let caches_b = make_caches_for_actor(&actor_b, 50);
2720        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2721        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2722
2723        let mut rng_child = StdRng::seed_from_u64(99);
2724        let child: PcActor = PcActor::crossover(
2725            &actor_a,
2726            &actor_b,
2727            &cache_mats_a,
2728            &cache_mats_b,
2729            0.5,
2730            config,
2731            &mut rng_child,
2732        )
2733        .unwrap();
2734
2735        // Child has same topology
2736        assert_eq!(child.layers.len(), actor_a.layers.len());
2737        for (i, layer) in child.layers.iter().enumerate() {
2738            assert_eq!(
2739                CpuLinAlg::new().mat_rows(&layer.weights),
2740                CpuLinAlg::new().mat_rows(&actor_a.layers[i].weights)
2741            );
2742            assert_eq!(
2743                CpuLinAlg::new().mat_cols(&layer.weights),
2744                CpuLinAlg::new().mat_cols(&actor_a.layers[i].weights)
2745            );
2746        }
2747    }
2748
2749    #[test]
2750    fn test_crossover_same_topology_child_differs_from_parents() {
2751        let mut rng_a = StdRng::seed_from_u64(42);
2752        let mut rng_b = StdRng::seed_from_u64(123);
2753        let config = crossover_config_27();
2754        let actor_a: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_a).unwrap();
2755        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_b).unwrap();
2756
2757        let caches_a = make_caches_for_actor(&actor_a, 50);
2758        let caches_b = make_caches_for_actor(&actor_b, 50);
2759        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2760        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2761
2762        let mut rng_child = StdRng::seed_from_u64(99);
2763        let child: PcActor = PcActor::crossover(
2764            &actor_a,
2765            &actor_b,
2766            &cache_mats_a,
2767            &cache_mats_b,
2768            0.5,
2769            config,
2770            &mut rng_child,
2771        )
2772        .unwrap();
2773
2774        // Child weights differ from both parents (blended)
2775        assert_ne!(child.layers[0].weights.data, actor_a.layers[0].weights.data);
2776        assert_ne!(child.layers[0].weights.data, actor_b.layers[0].weights.data);
2777    }
2778
2779    #[test]
2780    fn test_crossover_alpha_one_approximates_parent_a() {
2781        let mut rng_a = StdRng::seed_from_u64(42);
2782        let mut rng_b = StdRng::seed_from_u64(123);
2783        let config = crossover_config_27();
2784        let actor_a: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_a).unwrap();
2785        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_b).unwrap();
2786
2787        let caches_a = make_caches_for_actor(&actor_a, 50);
2788        let caches_b = make_caches_for_actor(&actor_b, 50);
2789        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2790        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2791
2792        let mut rng_child = StdRng::seed_from_u64(99);
2793        let child: PcActor = PcActor::crossover(
2794            &actor_a,
2795            &actor_b,
2796            &cache_mats_a,
2797            &cache_mats_b,
2798            1.0, // alpha=1.0 → child ≈ parent A
2799            config,
2800            &mut rng_child,
2801        )
2802        .unwrap();
2803
2804        // Input layer (layer 0): positional crossover, should be close to parent A
2805        let a_w = &actor_a.layers[0].weights.data;
2806        let child_w = &child.layers[0].weights.data;
2807        let max_diff: f64 = a_w
2808            .iter()
2809            .zip(child_w.iter())
2810            .map(|(a, c)| (a - c).abs())
2811            .fold(0.0_f64, f64::max);
2812        assert!(
2813            max_diff < 1e-10,
2814            "alpha=1.0: input layer max diff from parent A = {max_diff}"
2815        );
2816    }
2817
2818    #[test]
2819    fn test_crossover_child_weights_finite() {
2820        let mut rng_a = StdRng::seed_from_u64(42);
2821        let mut rng_b = StdRng::seed_from_u64(123);
2822        let config = crossover_config_27();
2823        let actor_a: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_a).unwrap();
2824        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_b).unwrap();
2825
2826        let caches_a = make_caches_for_actor(&actor_a, 50);
2827        let caches_b = make_caches_for_actor(&actor_b, 50);
2828        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2829        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2830
2831        let mut rng_child = StdRng::seed_from_u64(99);
2832        let child: PcActor = PcActor::crossover(
2833            &actor_a,
2834            &actor_b,
2835            &cache_mats_a,
2836            &cache_mats_b,
2837            0.5,
2838            config,
2839            &mut rng_child,
2840        )
2841        .unwrap();
2842
2843        for (i, layer) in child.layers.iter().enumerate() {
2844            for &w in &layer.weights.data {
2845                assert!(w.is_finite(), "NaN/Inf in layer {i} weights");
2846            }
2847            for b in CpuLinAlg::new().vec_to_vec(&layer.bias) {
2848                assert!(b.is_finite(), "NaN/Inf in layer {i} biases");
2849            }
2850        }
2851    }
2852
2853    // ── Phase 5 Cycle 5.2: Crossover child smaller ──────────────
2854
2855    #[test]
2856    fn test_crossover_child_smaller() {
2857        let mut rng_a = StdRng::seed_from_u64(42);
2858        let mut rng_b = StdRng::seed_from_u64(123);
2859        let config_27 = PcActorConfig {
2860            hidden_layers: vec![
2861                LayerDef {
2862                    size: 27,
2863                    activation: Activation::Tanh,
2864                },
2865                LayerDef {
2866                    size: 27,
2867                    activation: Activation::Tanh,
2868                },
2869            ],
2870            ..crossover_config_27()
2871        };
2872        let actor_a: PcActor =
2873            PcActor::new(CpuLinAlg::new(), config_27.clone(), &mut rng_a).unwrap();
2874        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config_27, &mut rng_b).unwrap();
2875
2876        let caches_a = make_caches_for_actor(&actor_a, 50);
2877        let caches_b = make_caches_for_actor(&actor_b, 50);
2878        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
2879        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
2880
2881        let child_config = PcActorConfig {
2882            hidden_layers: vec![
2883                LayerDef {
2884                    size: 18,
2885                    activation: Activation::Tanh,
2886                },
2887                LayerDef {
2888                    size: 18,
2889                    activation: Activation::Tanh,
2890                },
2891            ],
2892            ..crossover_config_27()
2893        };
2894
2895        let mut rng_child = StdRng::seed_from_u64(99);
2896        let child: PcActor = PcActor::crossover(
2897            &actor_a,
2898            &actor_b,
2899            &cache_mats_a,
2900            &cache_mats_b,
2901            0.5,
2902            child_config,
2903            &mut rng_child,
2904        )
2905        .unwrap();
2906
2907        // Child hidden layers have 18 neurons
2908        use crate::linalg::LinAlg;
2909        assert_eq!(CpuLinAlg::new().mat_rows(&child.layers[0].weights), 18);
2910        assert_eq!(CpuLinAlg::new().mat_rows(&child.layers[1].weights), 18);
2911    }
2912
2913    // ── Phase 5 Cycle 5.3: Crossover parents differ ─────────────
2914
2915    #[test]
2916    fn test_crossover_parents_different_sizes() {
2917        let mut rng_a = StdRng::seed_from_u64(42);
2918        let mut rng_b = StdRng::seed_from_u64(123);
2919        let config_a = crossover_config_27(); // [27]
2920        let config_b = PcActorConfig {
2921            hidden_layers: vec![LayerDef {
2922                size: 18,
2923                activation: Activation::Tanh,
2924            }],
2925            ..crossover_config_27()
2926        }; // [18]
2927
2928        let actor_a: PcActor = PcActor::new(CpuLinAlg::new(), config_a, &mut rng_a).unwrap();
2929        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config_b, &mut rng_b).unwrap();
2930
2931        let caches_a = make_caches_for_actor(&actor_a, 50);
2932        let caches_b = make_caches_for_actor(&actor_b, 50);
2933        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2934        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2935
2936        // Child has [27] → blending zone [0..18), copy zone [18..27) from parent A
2937        let child_config = crossover_config_27();
2938        let mut rng_child = StdRng::seed_from_u64(99);
2939        let child: PcActor = PcActor::crossover(
2940            &actor_a,
2941            &actor_b,
2942            &cache_mats_a,
2943            &cache_mats_b,
2944            0.5,
2945            child_config,
2946            &mut rng_child,
2947        )
2948        .unwrap();
2949
2950        use crate::linalg::LinAlg;
2951        // Child has correct dimensions [27]
2952        assert_eq!(CpuLinAlg::new().mat_rows(&child.layers[0].weights), 27);
2953        // All weights finite
2954        for &w in &child.layers[0].weights.data {
2955            assert!(w.is_finite());
2956        }
2957    }
2958
2959    // ── Phase 5 Cycle 5.4: Crossover child larger ───────────────
2960
2961    #[test]
2962    fn test_crossover_child_larger() {
2963        let mut rng_a = StdRng::seed_from_u64(42);
2964        let mut rng_b = StdRng::seed_from_u64(123);
2965        let config_18 = PcActorConfig {
2966            hidden_layers: vec![LayerDef {
2967                size: 18,
2968                activation: Activation::Tanh,
2969            }],
2970            ..crossover_config_27()
2971        };
2972        let actor_a: PcActor =
2973            PcActor::new(CpuLinAlg::new(), config_18.clone(), &mut rng_a).unwrap();
2974        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config_18, &mut rng_b).unwrap();
2975
2976        let caches_a = make_caches_for_actor(&actor_a, 50);
2977        let caches_b = make_caches_for_actor(&actor_b, 50);
2978        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2979        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2980
2981        // Child has [27] → blending zone [0..18), Xavier zone [18..27)
2982        let child_config = crossover_config_27();
2983        let mut rng_child = StdRng::seed_from_u64(99);
2984        let child: PcActor = PcActor::crossover(
2985            &actor_a,
2986            &actor_b,
2987            &cache_mats_a,
2988            &cache_mats_b,
2989            0.5,
2990            child_config,
2991            &mut rng_child,
2992        )
2993        .unwrap();
2994
2995        use crate::linalg::LinAlg;
2996        assert_eq!(CpuLinAlg::new().mat_rows(&child.layers[0].weights), 27);
2997        // All weights finite
2998        for &w in &child.layers[0].weights.data {
2999            assert!(w.is_finite());
3000        }
3001        // Xavier zone weights are not all zero (random init)
3002        let xavier_zone_nonzero = (18..27).any(|r| {
3003            (0..CpuLinAlg::new().mat_cols(&child.layers[0].weights)).any(|c| {
3004                CpuLinAlg::new()
3005                    .mat_get(&child.layers[0].weights, r, c)
3006                    .abs()
3007                    > 1e-15
3008            })
3009        });
3010        assert!(
3011            xavier_zone_nonzero,
3012            "Xavier zone [18..27) should have non-zero weights"
3013        );
3014    }
3015
3016    // ── Phase 5 Cycle 5.5: Crossover layer count mismatch ───────
3017
3018    #[test]
3019    fn test_crossover_child_more_layers() {
3020        let mut rng_a = StdRng::seed_from_u64(42);
3021        let mut rng_b = StdRng::seed_from_u64(123);
3022        let config_2l = PcActorConfig {
3023            hidden_layers: vec![
3024                LayerDef {
3025                    size: 27,
3026                    activation: Activation::Tanh,
3027                },
3028                LayerDef {
3029                    size: 27,
3030                    activation: Activation::Tanh,
3031                },
3032            ],
3033            ..crossover_config_27()
3034        };
3035        let actor_a: PcActor =
3036            PcActor::new(CpuLinAlg::new(), config_2l.clone(), &mut rng_a).unwrap();
3037        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config_2l, &mut rng_b).unwrap();
3038
3039        let caches_a = make_caches_for_actor(&actor_a, 50);
3040        let caches_b = make_caches_for_actor(&actor_b, 50);
3041        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
3042        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
3043
3044        // Child has 3 hidden layers → layers 0-1 crossover, layer 2 Xavier
3045        let child_config = PcActorConfig {
3046            hidden_layers: vec![
3047                LayerDef {
3048                    size: 27,
3049                    activation: Activation::Tanh,
3050                },
3051                LayerDef {
3052                    size: 27,
3053                    activation: Activation::Tanh,
3054                },
3055                LayerDef {
3056                    size: 18,
3057                    activation: Activation::Tanh,
3058                },
3059            ],
3060            ..crossover_config_27()
3061        };
3062
3063        let mut rng_child = StdRng::seed_from_u64(99);
3064        let child: PcActor = PcActor::crossover(
3065            &actor_a,
3066            &actor_b,
3067            &cache_mats_a,
3068            &cache_mats_b,
3069            0.5,
3070            child_config,
3071            &mut rng_child,
3072        )
3073        .unwrap();
3074
3075        use crate::linalg::LinAlg;
3076        // Child has 4 layers (3 hidden + 1 output)
3077        assert_eq!(child.layers.len(), 4);
3078        // Layer 2 (new) has 18 rows
3079        assert_eq!(CpuLinAlg::new().mat_rows(&child.layers[2].weights), 18);
3080        // All weights finite
3081        for (i, layer) in child.layers.iter().enumerate() {
3082            for &w in &layer.weights.data {
3083                assert!(w.is_finite(), "NaN/Inf in layer {i}");
3084            }
3085        }
3086    }
3087
3088    #[test]
3089    fn test_crossover_child_fewer_layers() {
3090        let mut rng_a = StdRng::seed_from_u64(42);
3091        let mut rng_b = StdRng::seed_from_u64(123);
3092        let config_3l = PcActorConfig {
3093            hidden_layers: vec![
3094                LayerDef {
3095                    size: 27,
3096                    activation: Activation::Tanh,
3097                },
3098                LayerDef {
3099                    size: 27,
3100                    activation: Activation::Tanh,
3101                },
3102                LayerDef {
3103                    size: 18,
3104                    activation: Activation::Tanh,
3105                },
3106            ],
3107            ..crossover_config_27()
3108        };
3109        let actor_a: PcActor =
3110            PcActor::new(CpuLinAlg::new(), config_3l.clone(), &mut rng_a).unwrap();
3111        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config_3l, &mut rng_b).unwrap();
3112
3113        let caches_a = make_caches_for_actor(&actor_a, 50);
3114        let caches_b = make_caches_for_actor(&actor_b, 50);
3115        let cache_mats_a: Vec<_> = (0..3).map(|i| build_cache_matrix(&caches_a, i)).collect();
3116        let cache_mats_b: Vec<_> = (0..3).map(|i| build_cache_matrix(&caches_b, i)).collect();
3117
3118        // Child has 2 hidden layers → layers 0-1 crossover, layer 2 discarded
3119        let child_config = PcActorConfig {
3120            hidden_layers: vec![
3121                LayerDef {
3122                    size: 27,
3123                    activation: Activation::Tanh,
3124                },
3125                LayerDef {
3126                    size: 27,
3127                    activation: Activation::Tanh,
3128                },
3129            ],
3130            ..crossover_config_27()
3131        };
3132
3133        let mut rng_child = StdRng::seed_from_u64(99);
3134        let child: PcActor = PcActor::crossover(
3135            &actor_a,
3136            &actor_b,
3137            &cache_mats_a,
3138            &cache_mats_b,
3139            0.5,
3140            child_config,
3141            &mut rng_child,
3142        )
3143        .unwrap();
3144
3145        use crate::linalg::LinAlg;
3146        // Child has 3 layers (2 hidden + 1 output)
3147        assert_eq!(child.layers.len(), 3);
3148        // Output layer input_size = 27 (last hidden size)
3149        assert_eq!(CpuLinAlg::new().mat_cols(&child.layers[2].weights), 27);
3150    }
3151
3152    // ── Phase 5 Cycle 5.6: Crossover residual components ────────
3153
3154    #[test]
3155    fn test_crossover_residual_rezero_blended() {
3156        let mut rng_a = StdRng::seed_from_u64(42);
3157        let mut rng_b = StdRng::seed_from_u64(123);
3158        let config = PcActorConfig {
3159            hidden_layers: vec![
3160                LayerDef {
3161                    size: 27,
3162                    activation: Activation::Softsign,
3163                },
3164                LayerDef {
3165                    size: 27,
3166                    activation: Activation::Softsign,
3167                },
3168            ],
3169            residual: true,
3170            rezero_init: 0.1,
3171            ..crossover_config_27()
3172        };
3173        let actor_a: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_a).unwrap();
3174        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_b).unwrap();
3175
3176        let caches_a = make_caches_for_actor(&actor_a, 50);
3177        let caches_b = make_caches_for_actor(&actor_b, 50);
3178        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
3179        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
3180
3181        let mut rng_child = StdRng::seed_from_u64(99);
3182        let child: PcActor = PcActor::crossover(
3183            &actor_a,
3184            &actor_b,
3185            &cache_mats_a,
3186            &cache_mats_b,
3187            0.5,
3188            config,
3189            &mut rng_child,
3190        )
3191        .unwrap();
3192
3193        // Child has rezero_alpha values
3194        assert!(!child.rezero_alpha.is_empty());
3195        // Blended rezero_alpha: with alpha=0.5 and both parents same init,
3196        // child should be close to parent values
3197        for &rz in &child.rezero_alpha {
3198            assert!(rz.is_finite(), "rezero_alpha is not finite");
3199        }
3200    }
3201
3202    #[test]
3203    fn test_crossover_residual_skip_projections_blended() {
3204        let mut rng_a = StdRng::seed_from_u64(42);
3205        let mut rng_b = StdRng::seed_from_u64(123);
3206        let config = PcActorConfig {
3207            hidden_layers: vec![
3208                LayerDef {
3209                    size: 27,
3210                    activation: Activation::Softsign,
3211                },
3212                LayerDef {
3213                    size: 18,
3214                    activation: Activation::Softsign,
3215                },
3216            ],
3217            residual: true,
3218            rezero_init: 0.1,
3219            ..crossover_config_27()
3220        };
3221        let actor_a: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_a).unwrap();
3222        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_b).unwrap();
3223
3224        let caches_a = make_caches_for_actor(&actor_a, 50);
3225        let caches_b = make_caches_for_actor(&actor_b, 50);
3226        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
3227        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
3228
3229        let mut rng_child = StdRng::seed_from_u64(99);
3230        let child: PcActor = PcActor::crossover(
3231            &actor_a,
3232            &actor_b,
3233            &cache_mats_a,
3234            &cache_mats_b,
3235            0.5,
3236            config,
3237            &mut rng_child,
3238        )
3239        .unwrap();
3240
3241        // Child should have skip_projections for size mismatch (27→18)
3242        assert!(!child.skip_projections.is_empty());
3243        // At least one projection should be Some (27→18 needs projection)
3244        let has_projection = child.skip_projections.iter().any(|p| p.is_some());
3245        assert!(has_projection, "Expected at least one skip projection");
3246
3247        // Projection weights are finite
3248        for mat in child.skip_projections.iter().flatten() {
3249            for &w in &mat.data {
3250                assert!(w.is_finite(), "NaN/Inf in skip projection");
3251            }
3252        }
3253    }
3254
3255    // ── Fix #1: Column permutation propagation ──────────────────
3256
3257    #[test]
3258    fn test_crossover_multilayer_column_permutation_consistency() {
3259        // Two identical parents → child should be identical regardless of
3260        // CCA permutation (identity) or column ordering. But if we manually
3261        // set parent B = parent A with a known neuron permutation at layer 0,
3262        // the child at alpha=0.5 should produce a network whose layer 1
3263        // columns are also reordered to match.
3264        //
3265        // Strategy: crossover parent A with itself (same weights). The CCA
3266        // permutation should be identity, and the child should equal both
3267        // parents. Then crossover with alpha=0.5 using two different parents.
3268        // Run inference on the child — if column permutation is broken,
3269        // the child's layer 1 receives inputs in the wrong order, and
3270        // inference produces different results than a properly-permuted child.
3271        use crate::linalg::LinAlg;
3272        let mut rng_a = StdRng::seed_from_u64(42);
3273        let mut rng_b = StdRng::seed_from_u64(123);
3274        let config = PcActorConfig {
3275            hidden_layers: vec![
3276                LayerDef {
3277                    size: 8,
3278                    activation: Activation::Tanh,
3279                },
3280                LayerDef {
3281                    size: 8,
3282                    activation: Activation::Tanh,
3283                },
3284            ],
3285            input_size: 4,
3286            output_size: 4,
3287            ..crossover_config_27()
3288        };
3289        let actor_a: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_a).unwrap();
3290        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_b).unwrap();
3291
3292        let caches_a = make_caches_for_actor(&actor_a, 100);
3293        let caches_b = make_caches_for_actor(&actor_b, 100);
3294        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
3295        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
3296
3297        // Get CCA permutation for layer 0 to check if it's non-trivial
3298        let perm0 = crate::matrix::cca_neuron_alignment(
3299            &CpuLinAlg::new(),
3300            &cache_mats_a[0],
3301            &cache_mats_b[0],
3302        )
3303        .unwrap();
3304        let is_nontrivial = perm0.iter().enumerate().any(|(i, &p)| i != p);
3305
3306        // Only test column propagation if CCA produced a non-trivial permutation
3307        if !is_nontrivial {
3308            // Parents too similar for meaningful test — skip
3309            return;
3310        }
3311
3312        // Crossover with alpha=0.5
3313        let mut rng_child = StdRng::seed_from_u64(99);
3314        let child: PcActor = PcActor::crossover(
3315            &actor_a,
3316            &actor_b,
3317            &cache_mats_a,
3318            &cache_mats_b,
3319            0.5,
3320            config.clone(),
3321            &mut rng_child,
3322        )
3323        .unwrap();
3324
3325        // Verify: layer 1's input columns should be permuted to match layer 0's
3326        // row permutation of parent B. Check that the child's layer 1 column
3327        // ordering is consistent by verifying that inference produces finite,
3328        // non-degenerate output AND that crossover applied the column permutation.
3329        //
3330        // If columns are NOT permuted, parent B's layer 1 columns still reference
3331        // the original neuron positions, but the blended layer 0 has reordered
3332        // neurons. The inconsistency means column c of layer 1 connects to the
3333        // wrong neuron from layer 0.
3334        //
3335        // We verify by checking that the column permutation was actually applied:
3336        // parent B's layer 1 columns should be reordered by perm0.
3337        let b_layer1 = &actor_b.layers[1];
3338        let b_cols = CpuLinAlg::new().mat_cols(&b_layer1.weights);
3339
3340        // Expected: child layer 1 col[c] = 0.5 * A.layer1.col[c] + 0.5 * B.layer1.col[perm0[c]]
3341        // If column permutation is NOT applied, it would be:
3342        // child layer 1 col[c] = 0.5 * A.layer1.col[c] + 0.5 * B.layer1.col[c]  (wrong!)
3343        let a_layer1 = &actor_a.layers[1];
3344        let child_layer1 = &child.layers[1];
3345        let n_rows = CpuLinAlg::new().mat_rows(&child_layer1.weights);
3346
3347        let mut has_col_permutation = false;
3348        for (c, &src_col) in perm0.iter().enumerate().take(b_cols.min(perm0.len())) {
3349            if src_col == c {
3350                continue; // Identity position, can't distinguish
3351            }
3352            // Check if child col c matches the permuted blend (correct)
3353            // vs the unpermuted blend (broken)
3354            for r in 0..n_rows {
3355                let a_val = CpuLinAlg::new().mat_get(&a_layer1.weights, r, c);
3356                let b_val_permuted = CpuLinAlg::new().mat_get(&b_layer1.weights, r, src_col);
3357                let b_val_unpermuted = CpuLinAlg::new().mat_get(&b_layer1.weights, r, c);
3358                let child_val = CpuLinAlg::new().mat_get(&child_layer1.weights, r, c);
3359
3360                let expected_permuted = 0.5 * a_val + 0.5 * b_val_permuted;
3361                let expected_unpermuted = 0.5 * a_val + 0.5 * b_val_unpermuted;
3362
3363                // If column permutation is applied, child matches permuted expectation
3364                if (child_val - expected_permuted).abs() < 1e-10
3365                    && (child_val - expected_unpermuted).abs() > 1e-10
3366                {
3367                    has_col_permutation = true;
3368                }
3369            }
3370        }
3371
3372        assert!(
3373            has_col_permutation,
3374            "Layer 1 columns should be permuted to match layer 0's CCA \
3375             permutation of parent B. perm0={perm0:?}"
3376        );
3377    }
3378
3379    // ── Fix #5: Empty hidden_layers guard ────────────────────────
3380
3381    #[test]
3382    fn test_crossover_empty_hidden_layers_returns_error() {
3383        let mut rng_a = StdRng::seed_from_u64(42);
3384        let mut rng_b = StdRng::seed_from_u64(123);
3385        let config = crossover_config_27();
3386        let actor_a: PcActor = PcActor::new(CpuLinAlg::new(), config.clone(), &mut rng_a).unwrap();
3387        let actor_b: PcActor = PcActor::new(CpuLinAlg::new(), config, &mut rng_b).unwrap();
3388
3389        let caches_a = make_caches_for_actor(&actor_a, 50);
3390        let caches_b = make_caches_for_actor(&actor_b, 50);
3391        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
3392        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
3393
3394        // Child config with empty hidden layers should return error, not panic
3395        let empty_config = PcActorConfig {
3396            hidden_layers: vec![],
3397            ..crossover_config_27()
3398        };
3399
3400        let mut rng_child = StdRng::seed_from_u64(99);
3401        let result = PcActor::crossover(
3402            &actor_a,
3403            &actor_b,
3404            &cache_mats_a,
3405            &cache_mats_b,
3406            0.5,
3407            empty_config,
3408            &mut rng_child,
3409        );
3410        assert!(
3411            result.is_err(),
3412            "Crossover with empty hidden_layers should return error"
3413        );
3414    }
3415
3416    // ── from_weights dimension validation tests ──────────────────────
3417
3418    /// Helper: build valid PcActorWeights from a config by constructing
3419    /// an actor and extracting its weights.
3420    fn valid_weights_for(config: &PcActorConfig) -> crate::serializer::PcActorWeights {
3421        let mut rng = make_rng();
3422        let actor = PcActor::<CpuLinAlg>::new(CpuLinAlg::new(), config.clone(), &mut rng).unwrap();
3423        actor.to_weights()
3424    }
3425
3426    #[test]
3427    fn test_from_weights_valid_returns_ok() {
3428        let config = default_config();
3429        let weights = valid_weights_for(&config);
3430        let result = PcActor::<CpuLinAlg>::from_weights(CpuLinAlg::new(), config, weights);
3431        assert!(result.is_ok());
3432    }
3433
3434    #[test]
3435    fn test_from_weights_wrong_weight_rows_returns_err() {
3436        let config = default_config(); // input=9, hidden=[18], output=9
3437        let mut weights = valid_weights_for(&config);
3438        // Layer 0 should be 18x9; corrupt rows to 10x9
3439        weights.layers[0].weights = crate::matrix::Matrix::zeros(10, 9);
3440        weights.layers[0].bias = vec![0.0; 10];
3441        let result = PcActor::<CpuLinAlg>::from_weights(CpuLinAlg::new(), config, weights);
3442        assert!(result.is_err());
3443        let err = result.unwrap_err();
3444        assert!(
3445            matches!(err, PcError::DimensionMismatch { .. }),
3446            "Expected DimensionMismatch, got: {err}"
3447        );
3448    }
3449
3450    #[test]
3451    fn test_from_weights_wrong_weight_cols_returns_err() {
3452        let config = default_config(); // input=9, hidden=[18], output=9
3453        let mut weights = valid_weights_for(&config);
3454        // Layer 0 should be 18x9; corrupt cols to 18x5
3455        weights.layers[0].weights = crate::matrix::Matrix::zeros(18, 5);
3456        let result = PcActor::<CpuLinAlg>::from_weights(CpuLinAlg::new(), config, weights);
3457        assert!(result.is_err());
3458        let err = result.unwrap_err();
3459        assert!(
3460            matches!(err, PcError::DimensionMismatch { .. }),
3461            "Expected DimensionMismatch, got: {err}"
3462        );
3463    }
3464
3465    #[test]
3466    fn test_from_weights_wrong_bias_length_returns_err() {
3467        let config = default_config(); // hidden=[18], so layer 0 bias should be len 18
3468        let mut weights = valid_weights_for(&config);
3469        weights.layers[0].bias = vec![0.0; 5]; // wrong length
3470        let result = PcActor::<CpuLinAlg>::from_weights(CpuLinAlg::new(), config, weights);
3471        assert!(result.is_err());
3472        let err = result.unwrap_err();
3473        assert!(
3474            matches!(err, PcError::DimensionMismatch { .. }),
3475            "Expected DimensionMismatch, got: {err}"
3476        );
3477    }
3478
3479    #[test]
3480    fn test_from_weights_wrong_output_layer_dims_returns_err() {
3481        let config = default_config(); // output layer should be 9x18
3482        let mut weights = valid_weights_for(&config);
3483        let last = weights.layers.len() - 1;
3484        weights.layers[last].weights = crate::matrix::Matrix::zeros(9, 10); // wrong cols
3485        let result = PcActor::<CpuLinAlg>::from_weights(CpuLinAlg::new(), config, weights);
3486        assert!(result.is_err());
3487    }
3488
3489    #[test]
3490    fn test_from_weights_wrong_rezero_alpha_count_returns_err() {
3491        let mut config = default_config();
3492        config.hidden_layers = vec![
3493            LayerDef {
3494                size: 18,
3495                activation: Activation::Tanh,
3496            },
3497            LayerDef {
3498                size: 18,
3499                activation: Activation::Tanh,
3500            },
3501        ];
3502        config.residual = true;
3503        let mut weights = valid_weights_for(&config);
3504        // residual with 2 hidden layers expects 1 rezero_alpha; give 0
3505        weights.rezero_alpha = vec![];
3506        let result = PcActor::<CpuLinAlg>::from_weights(CpuLinAlg::new(), config, weights);
3507        assert!(result.is_err());
3508        let err = result.unwrap_err();
3509        assert!(
3510            matches!(err, PcError::DimensionMismatch { .. }),
3511            "Expected DimensionMismatch, got: {err}"
3512        );
3513    }
3514
3515    #[test]
3516    fn test_from_weights_wrong_skip_projection_dims_returns_err() {
3517        // N1: skip projection dimensions (rows/cols) should be validated
3518        let mut config = default_config();
3519        config.hidden_layers = vec![
3520            LayerDef {
3521                size: 27,
3522                activation: Activation::Softsign,
3523            },
3524            LayerDef {
3525                size: 18,
3526                activation: Activation::Softsign,
3527            },
3528        ];
3529        config.residual = true;
3530        let mut weights = valid_weights_for(&config);
3531        // Skip projection should be 18x27; corrupt to 10x5
3532        weights.skip_projections[0] = Some(crate::matrix::Matrix::zeros(10, 5));
3533        let result = PcActor::<CpuLinAlg>::from_weights(CpuLinAlg::new(), config, weights);
3534        assert!(result.is_err());
3535        let err = result.unwrap_err();
3536        assert!(
3537            matches!(err, PcError::DimensionMismatch { .. }),
3538            "Expected DimensionMismatch, got: {err}"
3539        );
3540    }
3541
3542    #[test]
3543    fn test_from_weights_wrong_skip_projections_count_returns_err() {
3544        let mut config = default_config();
3545        config.hidden_layers = vec![
3546            LayerDef {
3547                size: 18,
3548                activation: Activation::Tanh,
3549            },
3550            LayerDef {
3551                size: 18,
3552                activation: Activation::Tanh,
3553            },
3554        ];
3555        config.residual = true;
3556        let mut weights = valid_weights_for(&config);
3557        // Should have 1 skip_projection; give 3
3558        weights.skip_projections = vec![None, None, None];
3559        let result = PcActor::<CpuLinAlg>::from_weights(CpuLinAlg::new(), config, weights);
3560        assert!(result.is_err());
3561        let err = result.unwrap_err();
3562        assert!(
3563            matches!(err, PcError::DimensionMismatch { .. }),
3564            "Expected DimensionMismatch, got: {err}"
3565        );
3566    }
3567}