use scirs2_core::random::ChaCha8Rng;
use scirs2_core::random::{thread_rng, Rng, SeedableRng};
use scirs2_core::RngExt;
use std::collections::HashMap;
use std::time::{Duration, Instant};
use thiserror::Error;
use crate::ising::{IsingError, IsingModel};
use crate::simulator::{AnnealingParams, AnnealingSolution, TemperatureSchedule};
#[derive(Error, Debug)]
pub enum AdaptiveScheduleError {
#[error("Ising error: {0}")]
IsingError(#[from] IsingError),
#[error("Neural network error: {0}")]
NeuralNetworkError(String),
#[error("Training error: {0}")]
TrainingError(String),
#[error("Configuration error: {0}")]
ConfigurationError(String),
#[error("Data processing error: {0}")]
DataError(String),
#[error("Optimization error: {0}")]
OptimizationError(String),
}
pub type AdaptiveScheduleResult<T> = Result<T, AdaptiveScheduleError>;
#[derive(Debug, Clone)]
pub struct NeuralAnnealingScheduler {
pub network: SchedulePredictionNetwork,
pub rl_agent: ScheduleRLAgent,
pub config: SchedulerConfig,
pub training_history: TrainingHistory,
pub feature_cache: HashMap<String, ProblemFeatures>,
pub performance_stats: PerformanceStatistics,
}
#[derive(Debug, Clone)]
pub struct SchedulerConfig {
pub network_layers: Vec<usize>,
pub learning_rate: f64,
pub training_epochs: usize,
pub buffer_size: usize,
pub exploration_rate: f64,
pub discount_factor: f64,
pub update_frequency: usize,
pub use_transfer_learning: bool,
pub seed: Option<u64>,
}
impl Default for SchedulerConfig {
fn default() -> Self {
Self {
network_layers: vec![32, 64, 32, 16],
learning_rate: 0.001,
training_epochs: 100,
buffer_size: 1000,
exploration_rate: 0.1,
discount_factor: 0.95,
update_frequency: 10,
use_transfer_learning: true,
seed: None,
}
}
}
#[derive(Debug, Clone)]
pub struct SchedulePredictionNetwork {
pub layers: Vec<NetworkLayer>,
pub input_normalization: NormalizationParams,
pub output_scaling: NormalizationParams,
pub training_state: NetworkTrainingState,
}
#[derive(Debug, Clone)]
pub struct NetworkLayer {
pub weights: Vec<Vec<f64>>,
pub biases: Vec<f64>,
pub activation: ActivationFunction,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ActivationFunction {
ReLU,
Sigmoid,
Tanh,
Linear,
LeakyReLU(f64),
}
#[derive(Debug, Clone)]
pub struct NormalizationParams {
pub means: Vec<f64>,
pub stds: Vec<f64>,
pub mins: Vec<f64>,
pub maxs: Vec<f64>,
}
#[derive(Debug, Clone)]
pub struct NetworkTrainingState {
pub epoch: usize,
pub training_loss: f64,
pub validation_loss: f64,
pub learning_rate: f64,
pub metrics: HashMap<String, f64>,
}
#[derive(Debug, Clone)]
pub struct ScheduleRLAgent {
pub q_network: SchedulePredictionNetwork,
pub target_network: SchedulePredictionNetwork,
pub experience_buffer: Vec<ScheduleExperience>,
pub config: RLAgentConfig,
pub stats: RLStats,
}
#[derive(Debug, Clone)]
pub struct RLAgentConfig {
pub action_space_size: usize,
pub state_space_size: usize,
pub batch_size: usize,
pub target_update_frequency: usize,
pub epsilon_decay: f64,
pub min_epsilon: f64,
}
#[derive(Debug, Clone)]
pub struct ScheduleExperience {
pub state: Vec<f64>,
pub action: usize,
pub reward: f64,
pub next_state: Vec<f64>,
pub done: bool,
pub metadata: ExperienceMetadata,
}
#[derive(Debug, Clone)]
pub struct ExperienceMetadata {
pub problem_type: String,
pub problem_size: usize,
pub execution_time: Duration,
pub final_energy: f64,
}
#[derive(Debug, Clone)]
pub struct RLStats {
pub episode_rewards: Vec<f64>,
pub average_reward: f64,
pub exploration_history: Vec<f64>,
pub loss_history: Vec<f64>,
pub action_frequency: HashMap<usize, usize>,
}
#[derive(Debug, Clone)]
pub struct ProblemFeatures {
pub size: usize,
pub connectivity_density: f64,
pub coupling_stats: CouplingStatistics,
pub problem_type: ProblemType,
pub landscape_features: LandscapeFeatures,
pub historical_performance: Vec<PerformancePoint>,
}
#[derive(Debug, Clone)]
pub struct CouplingStatistics {
pub mean: f64,
pub std: f64,
pub max_abs: f64,
pub range: f64,
pub skewness: f64,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ProblemType {
Random,
Structured,
Optimization,
MachineLearning,
IndustrySpecific(String),
Unknown,
}
#[derive(Debug, Clone)]
pub struct LandscapeFeatures {
pub num_local_minima: usize,
pub ruggedness: f64,
pub energy_connectivity: f64,
pub barrier_heights: Vec<f64>,
pub funnel_structure: f64,
}
#[derive(Debug, Clone)]
pub struct PerformancePoint {
pub schedule_params: ScheduleParameters,
pub performance: PerformanceMetrics,
pub context: ProblemContext,
}
#[derive(Debug, Clone)]
pub struct ScheduleParameters {
pub initial_temp: f64,
pub final_temp: f64,
pub num_sweeps: usize,
pub cooling_rate: f64,
pub schedule_type: ScheduleType,
pub additional_params: HashMap<String, f64>,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ScheduleType {
Linear,
Exponential,
Logarithmic,
Custom(Vec<f64>),
Adaptive,
}
#[derive(Debug, Clone)]
pub struct PerformanceMetrics {
pub final_energy: f64,
pub num_evaluations: usize,
pub execution_time: Duration,
pub success_rate: f64,
pub convergence_speed: f64,
pub solution_quality: f64,
}
#[derive(Debug, Clone)]
pub struct ProblemContext {
pub problem_id: String,
pub timestamp: Instant,
pub hardware_type: String,
pub environment: HashMap<String, f64>,
}
#[derive(Debug, Clone)]
pub struct TrainingHistory {
pub network_losses: Vec<f64>,
pub rl_rewards: Vec<f64>,
pub validation_scores: Vec<f64>,
pub feature_importance: Vec<HashMap<String, f64>>,
pub training_times: Vec<Duration>,
}
#[derive(Debug, Clone)]
pub struct PerformanceStatistics {
pub problems_solved: usize,
pub avg_improvement: f64,
pub best_improvement: f64,
pub adaptation_time: Duration,
pub success_rate: f64,
pub transfer_effectiveness: f64,
}
impl NeuralAnnealingScheduler {
pub fn new(config: SchedulerConfig) -> AdaptiveScheduleResult<Self> {
let network = SchedulePredictionNetwork::new(&config.network_layers, config.seed)?;
let rl_agent = ScheduleRLAgent::new(RLAgentConfig {
action_space_size: 10, state_space_size: config.network_layers[0] + 4, batch_size: 32,
target_update_frequency: 100,
epsilon_decay: 0.995,
min_epsilon: 0.01,
})?;
Ok(Self {
network,
rl_agent,
config,
training_history: TrainingHistory {
network_losses: Vec::new(),
rl_rewards: Vec::new(),
validation_scores: Vec::new(),
feature_importance: Vec::new(),
training_times: Vec::new(),
},
feature_cache: HashMap::new(),
performance_stats: PerformanceStatistics {
problems_solved: 0,
avg_improvement: 0.0,
best_improvement: 0.0,
adaptation_time: Duration::from_secs(0),
success_rate: 0.0,
transfer_effectiveness: 0.0,
},
})
}
pub fn generate_schedule(
&mut self,
problem: &IsingModel,
) -> AdaptiveScheduleResult<AnnealingParams> {
let start_time = Instant::now();
let features = self.extract_problem_features(problem)?;
if let Some(cached_schedule) = self.check_feature_cache(&features) {
return Ok(cached_schedule);
}
let predicted_params = self.predict_schedule_parameters(&features)?;
let refined_params = self.refine_with_rl(&features, predicted_params)?;
let schedule = self.convert_to_annealing_params(refined_params)?;
self.cache_schedule(&features, schedule.clone());
self.performance_stats.adaptation_time = start_time.elapsed();
Ok(schedule)
}
fn extract_problem_features(
&self,
problem: &IsingModel,
) -> AdaptiveScheduleResult<ProblemFeatures> {
let size = problem.num_qubits;
let mut num_couplings = 0;
let mut coupling_values = Vec::new();
for i in 0..size {
for j in (i + 1)..size {
if let Ok(coupling) = problem.get_coupling(i, j) {
if coupling.abs() > 1e-10 {
num_couplings += 1;
coupling_values.push(coupling.abs());
}
}
}
}
let max_possible_couplings = size * (size - 1) / 2;
let connectivity_density = f64::from(num_couplings) / max_possible_couplings as f64;
let coupling_stats = if coupling_values.is_empty() {
CouplingStatistics {
mean: 0.0,
std: 0.0,
max_abs: 0.0,
range: 0.0,
skewness: 0.0,
}
} else {
let mean = coupling_values.iter().sum::<f64>() / coupling_values.len() as f64;
let variance = coupling_values
.iter()
.map(|x| (x - mean).powi(2))
.sum::<f64>()
/ coupling_values.len() as f64;
let std = variance.sqrt();
let max_abs = coupling_values.iter().fold(0.0f64, |a, &b| a.max(b));
let min_val = coupling_values.iter().fold(f64::INFINITY, |a, &b| a.min(b));
let range = max_abs - min_val;
let skewness = if std > 1e-10 {
coupling_values
.iter()
.map(|x| ((x - mean) / std).powi(3))
.sum::<f64>()
/ coupling_values.len() as f64
} else {
0.0
};
CouplingStatistics {
mean,
std,
max_abs,
range,
skewness,
}
};
let problem_type = if connectivity_density > 0.8 {
ProblemType::Random
} else if connectivity_density < 0.2 {
ProblemType::Structured
} else {
ProblemType::Optimization
};
let landscape_features = LandscapeFeatures {
num_local_minima: (size as f64 * connectivity_density * 10.0) as usize,
ruggedness: coupling_stats.std / coupling_stats.mean.max(1e-10),
energy_connectivity: connectivity_density,
barrier_heights: vec![coupling_stats.mean; 5], funnel_structure: 1.0 - connectivity_density, };
Ok(ProblemFeatures {
size,
connectivity_density,
coupling_stats,
problem_type,
landscape_features,
historical_performance: Vec::new(),
})
}
fn check_feature_cache(&self, features: &ProblemFeatures) -> Option<AnnealingParams> {
for (_, cached_features) in &self.feature_cache {
if (cached_features.size as f64 - features.size as f64).abs() / (features.size as f64)
< 0.1
&& (cached_features.connectivity_density - features.connectivity_density).abs()
< 0.1
&& cached_features.problem_type == features.problem_type
{
return Some(AnnealingParams {
num_sweeps: 1000 + features.size * 10,
initial_temperature: features.coupling_stats.max_abs * 10.0,
final_temperature: features.coupling_stats.max_abs * 0.001,
..Default::default()
});
}
}
None
}
fn predict_schedule_parameters(
&self,
features: &ProblemFeatures,
) -> AdaptiveScheduleResult<ScheduleParameters> {
let input = self.features_to_input_vector(features);
let output = self.network.forward(&input)?;
let initial_temp = output[0].max(1.0).min(100.0);
let mut final_temp = output[1].max(0.001).min(1.0);
if initial_temp <= final_temp {
final_temp = initial_temp * 0.01; }
let schedule_params = ScheduleParameters {
initial_temp,
final_temp,
num_sweeps: (output[2].max(100.0).min(100_000.0)) as usize,
cooling_rate: output[3].max(0.01).min(0.99),
schedule_type: ScheduleType::Exponential, additional_params: HashMap::new(),
};
Ok(schedule_params)
}
fn features_to_input_vector(&self, features: &ProblemFeatures) -> Vec<f64> {
vec![
features.size as f64 / 1000.0, features.connectivity_density,
features.coupling_stats.mean,
features.coupling_stats.std,
features.coupling_stats.max_abs,
features.coupling_stats.skewness,
features.landscape_features.ruggedness,
features.landscape_features.energy_connectivity,
match features.problem_type {
ProblemType::Random => 1.0,
ProblemType::Structured => 2.0,
ProblemType::Optimization => 3.0,
ProblemType::MachineLearning => 4.0,
_ => 0.0,
},
features.landscape_features.funnel_structure,
]
}
fn refine_with_rl(
&self,
features: &ProblemFeatures,
initial_params: ScheduleParameters,
) -> AdaptiveScheduleResult<ScheduleParameters> {
let state = self.create_rl_state(features, &initial_params);
let action = self.rl_agent.select_action(&state)?;
let refined_params = self.apply_rl_action(initial_params, action)?;
Ok(refined_params)
}
fn create_rl_state(&self, features: &ProblemFeatures, params: &ScheduleParameters) -> Vec<f64> {
let mut state = self.features_to_input_vector(features);
state.extend(vec![
params.initial_temp / 100.0, params.final_temp / 1.0,
params.num_sweeps as f64 / 10_000.0,
params.cooling_rate,
]);
state
}
fn apply_rl_action(
&self,
mut params: ScheduleParameters,
action: usize,
) -> AdaptiveScheduleResult<ScheduleParameters> {
match action {
0 => params.initial_temp *= 1.2, 1 => params.initial_temp *= 0.8, 2 => params.final_temp *= 1.5, 3 => params.final_temp *= 0.7, 4 => params.num_sweeps = (params.num_sweeps as f64 * 1.3) as usize, 5 => params.num_sweeps = (params.num_sweeps as f64 * 0.8) as usize, 6 => params.cooling_rate = (params.cooling_rate * 1.1).min(0.99), 7 => params.cooling_rate = (params.cooling_rate * 0.9).max(0.01), 8 => {
params.schedule_type = ScheduleType::Linear;
}
9 => {
params.schedule_type = ScheduleType::Logarithmic;
}
_ => {} }
Ok(params)
}
fn convert_to_annealing_params(
&self,
params: ScheduleParameters,
) -> AdaptiveScheduleResult<AnnealingParams> {
Ok(AnnealingParams {
num_sweeps: params.num_sweeps,
initial_temperature: params.initial_temp,
final_temperature: params.final_temp,
temperature_schedule: match params.schedule_type {
ScheduleType::Linear => TemperatureSchedule::Linear,
ScheduleType::Exponential => TemperatureSchedule::Exponential(1.0),
ScheduleType::Logarithmic => TemperatureSchedule::Exponential(0.5), _ => TemperatureSchedule::Exponential(1.0),
},
..Default::default()
})
}
fn cache_schedule(&mut self, features: &ProblemFeatures, schedule: AnnealingParams) {
let cache_key = format!(
"{}_{:.2}_{:?}",
features.size, features.connectivity_density, features.problem_type
);
self.feature_cache.insert(cache_key, features.clone());
}
pub fn train(&mut self, training_data: &[TrainingExample]) -> AdaptiveScheduleResult<()> {
println!(
"Training neural annealing scheduler with {} examples",
training_data.len()
);
for epoch in 0..self.config.training_epochs {
let start_time = Instant::now();
let network_loss = self.train_network_epoch(training_data)?;
let rl_reward = self.train_rl_epoch(training_data)?;
self.training_history.network_losses.push(network_loss);
self.training_history.rl_rewards.push(rl_reward);
self.training_history
.training_times
.push(start_time.elapsed());
if epoch % 10 == 0 {
println!(
"Epoch {epoch}: Network Loss = {network_loss:.6}, RL Reward = {rl_reward:.6}"
);
}
}
Ok(())
}
fn train_network_epoch(
&mut self,
training_data: &[TrainingExample],
) -> AdaptiveScheduleResult<f64> {
let mut total_loss = 0.0;
for example in training_data {
let input = self.features_to_input_vector(&example.features);
let target = self.params_to_output_vector(&example.optimal_params);
let prediction = self.network.forward(&input)?;
let loss: f64 = prediction
.iter()
.zip(target.iter())
.map(|(pred, targ)| (pred - targ).powi(2))
.sum::<f64>()
/ prediction.len() as f64;
total_loss += loss;
self.network.backward(&input, &target, &prediction)?;
}
Ok(total_loss / training_data.len() as f64)
}
fn train_rl_epoch(&mut self, training_data: &[TrainingExample]) -> AdaptiveScheduleResult<f64> {
let mut total_reward = 0.0;
for example in training_data {
let state = self.create_rl_state(&example.features, &example.baseline_params);
let optimal_state = self.create_rl_state(&example.features, &example.optimal_params);
let reward = example.performance_improvement;
let experience = ScheduleExperience {
state: state.clone(),
action: 0, reward,
next_state: optimal_state,
done: true,
metadata: ExperienceMetadata {
problem_type: format!("{:?}", example.features.problem_type),
problem_size: example.features.size,
execution_time: Duration::from_secs(1),
final_energy: 0.0, },
};
self.rl_agent.store_experience(experience);
total_reward += reward;
}
self.rl_agent.train()?;
Ok(total_reward / training_data.len() as f64)
}
fn params_to_output_vector(&self, params: &ScheduleParameters) -> Vec<f64> {
vec![
params.initial_temp / 100.0,
params.final_temp,
params.num_sweeps as f64 / 10_000.0,
params.cooling_rate,
]
}
}
#[derive(Debug, Clone)]
pub struct TrainingExample {
pub features: ProblemFeatures,
pub baseline_params: ScheduleParameters,
pub optimal_params: ScheduleParameters,
pub performance_improvement: f64,
pub metadata: HashMap<String, f64>,
}
impl SchedulePredictionNetwork {
pub fn new(layer_sizes: &[usize], seed: Option<u64>) -> AdaptiveScheduleResult<Self> {
if layer_sizes.len() < 2 {
return Err(AdaptiveScheduleError::ConfigurationError(
"Network must have at least input and output layers".to_string(),
));
}
let mut rng = match seed {
Some(s) => ChaCha8Rng::seed_from_u64(s),
None => ChaCha8Rng::seed_from_u64(thread_rng().random()),
};
let mut layers = Vec::new();
for i in 0..layer_sizes.len() - 1 {
let input_size = layer_sizes[i];
let output_size = layer_sizes[i + 1];
let mut weights = vec![vec![0.0; input_size]; output_size];
let scale = (2.0 / input_size as f64).sqrt();
for row in &mut weights {
for weight in row {
*weight = rng.random_range(-scale..scale);
}
}
let biases = vec![0.0; output_size];
let activation = if i == layer_sizes.len() - 2 {
ActivationFunction::Linear } else {
ActivationFunction::ReLU };
layers.push(NetworkLayer {
weights,
biases,
activation,
});
}
let input_size = layer_sizes[0];
let output_size = layer_sizes[layer_sizes.len() - 1];
Ok(Self {
layers,
input_normalization: NormalizationParams {
means: vec![0.0; input_size],
stds: vec![1.0; input_size],
mins: vec![0.0; input_size],
maxs: vec![1.0; input_size],
},
output_scaling: NormalizationParams {
means: vec![0.0; output_size],
stds: vec![1.0; output_size],
mins: vec![0.0; output_size],
maxs: vec![1.0; output_size],
},
training_state: NetworkTrainingState {
epoch: 0,
training_loss: 0.0,
validation_loss: 0.0,
learning_rate: 0.001,
metrics: HashMap::new(),
},
})
}
pub fn forward(&self, input: &[f64]) -> AdaptiveScheduleResult<Vec<f64>> {
let mut activations = input.to_vec();
for layer in &self.layers {
activations = self.layer_forward(&activations, layer)?;
}
Ok(activations)
}
fn layer_forward(
&self,
input: &[f64],
layer: &NetworkLayer,
) -> AdaptiveScheduleResult<Vec<f64>> {
if input.len() != layer.weights[0].len() {
return Err(AdaptiveScheduleError::NeuralNetworkError(format!(
"Input size {} doesn't match layer input size {}",
input.len(),
layer.weights[0].len()
)));
}
let mut output = Vec::new();
for (neuron_weights, &bias) in layer.weights.iter().zip(&layer.biases) {
let mut activation = bias;
for (&inp, &weight) in input.iter().zip(neuron_weights) {
activation += inp * weight;
}
activation = match layer.activation {
ActivationFunction::ReLU => activation.max(0.0),
ActivationFunction::Sigmoid => 1.0 / (1.0 + (-activation).exp()),
ActivationFunction::Tanh => activation.tanh(),
ActivationFunction::Linear => activation,
ActivationFunction::LeakyReLU(alpha) => {
if activation > 0.0 {
activation
} else {
alpha * activation
}
}
};
output.push(activation);
}
Ok(output)
}
pub const fn backward(
&mut self,
_input: &[f64],
_target: &[f64],
_prediction: &[f64],
) -> AdaptiveScheduleResult<()> {
self.training_state.epoch += 1;
Ok(())
}
}
impl ScheduleRLAgent {
pub fn new(config: RLAgentConfig) -> AdaptiveScheduleResult<Self> {
let q_network = SchedulePredictionNetwork::new(
&[config.state_space_size, 64, 32, config.action_space_size],
None,
)?;
let target_network = q_network.clone();
Ok(Self {
q_network,
target_network,
experience_buffer: Vec::new(),
config,
stats: RLStats {
episode_rewards: Vec::new(),
average_reward: 0.0,
exploration_history: Vec::new(),
loss_history: Vec::new(),
action_frequency: HashMap::new(),
},
})
}
pub fn select_action(&self, state: &[f64]) -> AdaptiveScheduleResult<usize> {
let mut rng = ChaCha8Rng::seed_from_u64(thread_rng().random());
if rng.random::<f64>() < self.config.min_epsilon {
Ok(rng.random_range(0..self.config.action_space_size))
} else {
let q_values = self.q_network.forward(state)?;
let best_action = q_values
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.map_or(0, |(idx, _)| idx);
Ok(best_action)
}
}
pub fn store_experience(&mut self, experience: ScheduleExperience) {
self.experience_buffer.push(experience);
if self.experience_buffer.len() > 1000 {
self.experience_buffer.remove(0);
}
}
pub fn train(&mut self) -> AdaptiveScheduleResult<()> {
if self.experience_buffer.len() < self.config.batch_size {
return Ok(());
}
Ok(())
}
}
pub fn create_neural_scheduler() -> AdaptiveScheduleResult<NeuralAnnealingScheduler> {
NeuralAnnealingScheduler::new(SchedulerConfig::default())
}
pub fn create_custom_neural_scheduler(
network_layers: Vec<usize>,
learning_rate: f64,
exploration_rate: f64,
) -> AdaptiveScheduleResult<NeuralAnnealingScheduler> {
let config = SchedulerConfig {
network_layers,
learning_rate,
exploration_rate,
..Default::default()
};
NeuralAnnealingScheduler::new(config)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_neural_scheduler_creation() {
let scheduler = create_neural_scheduler().expect("Failed to create scheduler");
assert_eq!(scheduler.config.network_layers, vec![32, 64, 32, 16]);
}
#[test]
fn test_network_creation() {
let network = SchedulePredictionNetwork::new(&[10, 20, 5], Some(42))
.expect("Failed to create network");
assert_eq!(network.layers.len(), 2);
assert_eq!(network.layers[0].weights.len(), 20);
assert_eq!(network.layers[0].weights[0].len(), 10);
}
#[test]
fn test_network_forward_pass() {
let network =
SchedulePredictionNetwork::new(&[3, 5, 2], Some(42)).expect("Failed to create network");
let input = vec![1.0, 0.5, -0.5];
let output = network.forward(&input).expect("Failed forward pass");
assert_eq!(output.len(), 2);
}
#[test]
fn test_feature_extraction() {
let mut ising = IsingModel::new(4);
ising.set_bias(0, 1.0).expect("Failed to set bias");
ising
.set_coupling(0, 1, -0.5)
.expect("Failed to set coupling");
ising
.set_coupling(1, 2, 0.3)
.expect("Failed to set coupling");
let scheduler = create_neural_scheduler().expect("Failed to create scheduler");
let features = scheduler
.extract_problem_features(&ising)
.expect("Failed to extract features");
assert_eq!(features.size, 4);
assert!(features.connectivity_density > 0.0);
assert!(features.coupling_stats.mean > 0.0);
}
#[test]
fn test_rl_agent_creation() {
let config = RLAgentConfig {
action_space_size: 10,
state_space_size: 15,
batch_size: 32,
target_update_frequency: 100,
epsilon_decay: 0.995,
min_epsilon: 0.01,
};
let agent = ScheduleRLAgent::new(config).expect("Failed to create RL agent");
assert_eq!(agent.config.action_space_size, 10);
assert_eq!(agent.config.state_space_size, 15);
}
#[test]
fn test_schedule_generation() {
let mut scheduler = create_custom_neural_scheduler(
vec![10, 16, 8, 4], 0.001,
0.1,
)
.expect("Failed to create custom scheduler");
let mut ising = IsingModel::new(5);
ising.set_bias(0, 1.0).expect("Failed to set bias");
ising
.set_coupling(0, 1, -0.5)
.expect("Failed to set coupling");
let schedule = scheduler
.generate_schedule(&ising)
.expect("Failed to generate schedule");
assert!(schedule.num_sweeps > 0);
assert!(schedule.initial_temperature > 0.0);
assert!(schedule.final_temperature > 0.0);
assert!(schedule.initial_temperature > schedule.final_temperature);
}
}