continuous_rl/
continuous_rl.rs

1//! Quantum Continuous Reinforcement Learning Example
2//!
3//! This example demonstrates quantum reinforcement learning algorithms
4//! for continuous action spaces, including QDDPG and QSAC.
5
6use quantrs2_ml::autodiff::optimizers::Adam;
7use quantrs2_ml::prelude::*;
8use scirs2_core::ndarray::Array1;
9use scirs2_core::random::prelude::*;
10
11fn main() -> Result<()> {
12    println!("=== Quantum Continuous RL Demo ===\n");
13
14    // Step 1: Test pendulum environment
15    println!("1. Testing Pendulum Environment...");
16    test_pendulum_dynamics()?;
17
18    // Step 2: Train QDDPG on pendulum
19    println!("\n2. Training Quantum DDPG on Pendulum Control...");
20    train_qddpg_pendulum()?;
21
22    // Step 3: Compare with random policy
23    println!("\n3. Comparing with Random Policy...");
24    compare_policies()?;
25
26    // Step 4: Demonstrate custom continuous environment
27    println!("\n4. Custom Continuous Environment Example...");
28    custom_environment_demo()?;
29
30    println!("\n=== Continuous RL Demo Complete ===");
31
32    Ok(())
33}
34
35/// Test pendulum environment dynamics
36fn test_pendulum_dynamics() -> Result<()> {
37    let mut env = PendulumEnvironment::new();
38
39    println!("   Initial state: {:?}", env.state());
40    println!("   Action bounds: {:?}", env.action_bounds());
41
42    // Run a few steps with different actions
43    let actions = vec![
44        Array1::from_vec(vec![0.0]),  // No torque
45        Array1::from_vec(vec![2.0]),  // Max positive torque
46        Array1::from_vec(vec![-2.0]), // Max negative torque
47    ];
48
49    for (i, action) in actions.iter().enumerate() {
50        let state = env.reset();
51        let (next_state, reward, done) = env.step(action.clone())?;
52
53        println!("\n   Step {} with action {:.1}:", i + 1, action[0]);
54        println!(
55            "     State: [θ_cos={:.3}, θ_sin={:.3}, θ_dot={:.3}]",
56            state[0], state[1], state[2]
57        );
58        println!(
59            "     Next: [θ_cos={:.3}, θ_sin={:.3}, θ_dot={:.3}]",
60            next_state[0], next_state[1], next_state[2]
61        );
62        println!("     Reward: {reward:.3}, Done: {done}");
63    }
64
65    Ok(())
66}
67
68/// Train QDDPG on pendulum control
69fn train_qddpg_pendulum() -> Result<()> {
70    let state_dim = 3;
71    let action_dim = 1;
72    let action_bounds = vec![(-2.0, 2.0)];
73    let num_qubits = 4;
74    let buffer_capacity = 10000;
75
76    // Create QDDPG agent
77    let mut agent = QuantumDDPG::new(
78        state_dim,
79        action_dim,
80        action_bounds,
81        num_qubits,
82        buffer_capacity,
83    )?;
84
85    // Create environment
86    let mut env = PendulumEnvironment::new();
87
88    // Create optimizers
89    let mut actor_optimizer = Adam::new(0.001);
90    let mut critic_optimizer = Adam::new(0.001);
91
92    // Train for a few episodes (reduced for demo)
93    let episodes = 50;
94    println!("   Training QDDPG for {episodes} episodes...");
95
96    let rewards = agent.train(
97        &mut env,
98        episodes,
99        &mut actor_optimizer,
100        &mut critic_optimizer,
101    )?;
102
103    // Print training statistics
104    let avg_initial = rewards[..10].iter().sum::<f64>() / 10.0;
105    let avg_final = rewards[rewards.len() - 10..].iter().sum::<f64>() / 10.0;
106
107    println!("\n   Training Statistics:");
108    println!("   - Average initial reward: {avg_initial:.2}");
109    println!("   - Average final reward: {avg_final:.2}");
110    println!("   - Improvement: {:.2}", avg_final - avg_initial);
111
112    // Test trained agent
113    println!("\n   Testing trained agent...");
114    test_trained_agent(&agent, &mut env)?;
115
116    Ok(())
117}
118
119/// Test a trained agent
120fn test_trained_agent(agent: &QuantumDDPG, env: &mut dyn ContinuousEnvironment) -> Result<()> {
121    let test_episodes = 5;
122    let mut test_rewards = Vec::new();
123
124    for episode in 0..test_episodes {
125        let mut state = env.reset();
126        let mut episode_reward = 0.0;
127        let mut done = false;
128        let mut steps = 0;
129
130        while !done && steps < 200 {
131            let action = agent.get_action(&state, false)?; // No exploration
132            let (next_state, reward, is_done) = env.step(action.clone())?;
133
134            state = next_state;
135            episode_reward += reward;
136            done = is_done;
137            steps += 1;
138        }
139
140        test_rewards.push(episode_reward);
141        println!(
142            "   Test episode {}: Reward = {:.2}, Steps = {}",
143            episode + 1,
144            episode_reward,
145            steps
146        );
147    }
148
149    let avg_test = test_rewards.iter().sum::<f64>() / f64::from(test_episodes);
150    println!("   Average test reward: {avg_test:.2}");
151
152    Ok(())
153}
154
155/// Compare trained policy with random policy
156fn compare_policies() -> Result<()> {
157    let mut env = PendulumEnvironment::new();
158    let episodes = 10;
159
160    // Random policy performance
161    println!("   Random Policy Performance:");
162    let mut random_rewards = Vec::new();
163
164    for _ in 0..episodes {
165        let mut state = env.reset();
166        let mut episode_reward = 0.0;
167        let mut done = false;
168
169        while !done {
170            // Random action in bounds
171            let action = Array1::from_vec(vec![4.0f64.mul_add(thread_rng().gen::<f64>(), -2.0)]);
172
173            let (next_state, reward, is_done) = env.step(action)?;
174            state = next_state;
175            episode_reward += reward;
176            done = is_done;
177        }
178
179        random_rewards.push(episode_reward);
180    }
181
182    let avg_random = random_rewards.iter().sum::<f64>() / f64::from(episodes);
183    println!("   Average random policy reward: {avg_random:.2}");
184
185    // Simple control policy (proportional control)
186    println!("\n   Simple Control Policy Performance:");
187    let mut control_rewards = Vec::new();
188
189    for _ in 0..episodes {
190        let mut state = env.reset();
191        let mut episode_reward = 0.0;
192        let mut done = false;
193
194        while !done {
195            // Proportional control: torque = -k * theta
196            let theta = state[1].atan2(state[0]); // Reconstruct angle
197            let action = Array1::from_vec(vec![(-2.0 * theta).clamp(-2.0, 2.0)]);
198
199            let (next_state, reward, is_done) = env.step(action)?;
200            state = next_state;
201            episode_reward += reward;
202            done = is_done;
203        }
204
205        control_rewards.push(episode_reward);
206    }
207
208    let avg_control = control_rewards.iter().sum::<f64>() / f64::from(episodes);
209    println!("   Average control policy reward: {avg_control:.2}");
210
211    println!("\n   Performance Summary:");
212    println!("   - Random policy: {avg_random:.2}");
213    println!("   - Simple control: {avg_control:.2}");
214    println!("   - Improvement: {:.2}", avg_control - avg_random);
215
216    Ok(())
217}
218
219/// Custom continuous environment example
220fn custom_environment_demo() -> Result<()> {
221    // Define a simple 2D navigation environment
222    struct Navigation2D {
223        position: Array1<f64>,
224        goal: Array1<f64>,
225        max_steps: usize,
226        current_step: usize,
227    }
228
229    impl Navigation2D {
230        fn new() -> Self {
231            Self {
232                position: Array1::zeros(2),
233                goal: Array1::from_vec(vec![5.0, 5.0]),
234                max_steps: 50,
235                current_step: 0,
236            }
237        }
238    }
239
240    impl ContinuousEnvironment for Navigation2D {
241        fn state(&self) -> Array1<f64> {
242            // State includes position and relative goal position
243            let mut state = Array1::zeros(4);
244            state[0] = self.position[0];
245            state[1] = self.position[1];
246            state[2] = self.goal[0] - self.position[0];
247            state[3] = self.goal[1] - self.position[1];
248            state
249        }
250
251        fn action_bounds(&self) -> Vec<(f64, f64)> {
252            vec![(-1.0, 1.0), (-1.0, 1.0)] // Velocity in x and y
253        }
254
255        fn step(&mut self, action: Array1<f64>) -> Result<(Array1<f64>, f64, bool)> {
256            // Update position
257            self.position = &self.position + &action;
258
259            // Compute distance to goal
260            let distance = (self.position[0] - self.goal[0]).hypot(self.position[1] - self.goal[1]);
261
262            // Reward is negative distance (closer is better)
263            let reward = -distance;
264
265            self.current_step += 1;
266            let done = distance < 0.5 || self.current_step >= self.max_steps;
267
268            Ok((self.state(), reward, done))
269        }
270
271        fn reset(&mut self) -> Array1<f64> {
272            self.position = Array1::from_vec(vec![
273                10.0f64.mul_add(thread_rng().gen::<f64>(), -5.0),
274                10.0f64.mul_add(thread_rng().gen::<f64>(), -5.0),
275            ]);
276            self.current_step = 0;
277            self.state()
278        }
279
280        fn state_dim(&self) -> usize {
281            4
282        }
283        fn action_dim(&self) -> usize {
284            2
285        }
286    }
287
288    println!("   Created 2D Navigation Environment");
289
290    let mut nav_env = Navigation2D::new();
291    let state = nav_env.reset();
292
293    println!("   Initial position: [{:.2}, {:.2}]", state[0], state[1]);
294    println!("   Goal position: [5.00, 5.00]");
295    println!("   Action space: 2D velocity vectors in [-1, 1]");
296
297    // Demonstrate a few steps
298    println!("\n   Taking some steps:");
299    for i in 0..3 {
300        let action = Array1::from_vec(vec![
301            0.5 * 2.0f64.mul_add(thread_rng().gen::<f64>(), -1.0),
302            0.5 * 2.0f64.mul_add(thread_rng().gen::<f64>(), -1.0),
303        ]);
304
305        let (next_state, reward, done) = nav_env.step(action.clone())?;
306
307        println!(
308            "   Step {}: action=[{:.2}, {:.2}], pos=[{:.2}, {:.2}], reward={:.2}, done={}",
309            i + 1,
310            action[0],
311            action[1],
312            next_state[0],
313            next_state[1],
314            reward,
315            done
316        );
317    }
318
319    println!("\n   This demonstrates how to create custom continuous environments");
320    println!("   for quantum RL algorithms!");
321
322    Ok(())
323}