QuantumDDPG

Struct QuantumDDPG 

Source
pub struct QuantumDDPG { /* private fields */ }
Expand description

Quantum Deep Deterministic Policy Gradient (QDDPG)

Implementations§

Source§

impl QuantumDDPG

Source

pub fn new( state_dim: usize, action_dim: usize, action_bounds: Vec<(f64, f64)>, num_qubits: usize, buffer_capacity: usize, ) -> Result<Self>

Create new QDDPG agent

Examples found in repository?
examples/continuous_rl.rs (lines 77-83)
69fn train_qddpg_pendulum() -> Result<()> {
70    let state_dim = 3;
71    let action_dim = 1;
72    let action_bounds = vec![(-2.0, 2.0)];
73    let num_qubits = 4;
74    let buffer_capacity = 10000;
75
76    // Create QDDPG agent
77    let mut agent = QuantumDDPG::new(
78        state_dim,
79        action_dim,
80        action_bounds,
81        num_qubits,
82        buffer_capacity,
83    )?;
84
85    // Create environment
86    let mut env = PendulumEnvironment::new();
87
88    // Create optimizers
89    let mut actor_optimizer = Adam::new(0.001);
90    let mut critic_optimizer = Adam::new(0.001);
91
92    // Train for a few episodes (reduced for demo)
93    let episodes = 50;
94    println!("   Training QDDPG for {episodes} episodes...");
95
96    let rewards = agent.train(
97        &mut env,
98        episodes,
99        &mut actor_optimizer,
100        &mut critic_optimizer,
101    )?;
102
103    // Print training statistics
104    let avg_initial = rewards[..10].iter().sum::<f64>() / 10.0;
105    let avg_final = rewards[rewards.len() - 10..].iter().sum::<f64>() / 10.0;
106
107    println!("\n   Training Statistics:");
108    println!("   - Average initial reward: {avg_initial:.2}");
109    println!("   - Average final reward: {avg_final:.2}");
110    println!("   - Improvement: {:.2}", avg_final - avg_initial);
111
112    // Test trained agent
113    println!("\n   Testing trained agent...");
114    test_trained_agent(&agent, &mut env)?;
115
116    Ok(())
117}
Source

pub fn get_action( &self, state: &Array1<f64>, training: bool, ) -> Result<Array1<f64>>

Get action for state

Examples found in repository?
examples/continuous_rl.rs (line 131)
120fn test_trained_agent(agent: &QuantumDDPG, env: &mut dyn ContinuousEnvironment) -> Result<()> {
121    let test_episodes = 5;
122    let mut test_rewards = Vec::new();
123
124    for episode in 0..test_episodes {
125        let mut state = env.reset();
126        let mut episode_reward = 0.0;
127        let mut done = false;
128        let mut steps = 0;
129
130        while !done && steps < 200 {
131            let action = agent.get_action(&state, false)?; // No exploration
132            let (next_state, reward, is_done) = env.step(action.clone())?;
133
134            state = next_state;
135            episode_reward += reward;
136            done = is_done;
137            steps += 1;
138        }
139
140        test_rewards.push(episode_reward);
141        println!(
142            "   Test episode {}: Reward = {:.2}, Steps = {}",
143            episode + 1,
144            episode_reward,
145            steps
146        );
147    }
148
149    let avg_test = test_rewards.iter().sum::<f64>() / f64::from(test_episodes);
150    println!("   Average test reward: {avg_test:.2}");
151
152    Ok(())
153}
Source

pub fn store_experience(&mut self, exp: Experience)

Store experience in replay buffer

Source

pub fn update( &mut self, actor_optimizer: &mut dyn Optimizer, critic_optimizer: &mut dyn Optimizer, ) -> Result<()>

Update networks

Source

pub fn train( &mut self, env: &mut dyn ContinuousEnvironment, episodes: usize, actor_optimizer: &mut dyn Optimizer, critic_optimizer: &mut dyn Optimizer, ) -> Result<Vec<f64>>

Train on environment

Examples found in repository?
examples/continuous_rl.rs (lines 96-101)
69fn train_qddpg_pendulum() -> Result<()> {
70    let state_dim = 3;
71    let action_dim = 1;
72    let action_bounds = vec![(-2.0, 2.0)];
73    let num_qubits = 4;
74    let buffer_capacity = 10000;
75
76    // Create QDDPG agent
77    let mut agent = QuantumDDPG::new(
78        state_dim,
79        action_dim,
80        action_bounds,
81        num_qubits,
82        buffer_capacity,
83    )?;
84
85    // Create environment
86    let mut env = PendulumEnvironment::new();
87
88    // Create optimizers
89    let mut actor_optimizer = Adam::new(0.001);
90    let mut critic_optimizer = Adam::new(0.001);
91
92    // Train for a few episodes (reduced for demo)
93    let episodes = 50;
94    println!("   Training QDDPG for {episodes} episodes...");
95
96    let rewards = agent.train(
97        &mut env,
98        episodes,
99        &mut actor_optimizer,
100        &mut critic_optimizer,
101    )?;
102
103    // Print training statistics
104    let avg_initial = rewards[..10].iter().sum::<f64>() / 10.0;
105    let avg_final = rewards[rewards.len() - 10..].iter().sum::<f64>() / 10.0;
106
107    println!("\n   Training Statistics:");
108    println!("   - Average initial reward: {avg_initial:.2}");
109    println!("   - Average final reward: {avg_final:.2}");
110    println!("   - Improvement: {:.2}", avg_final - avg_initial);
111
112    // Test trained agent
113    println!("\n   Testing trained agent...");
114    test_trained_agent(&agent, &mut env)?;
115
116    Ok(())
117}

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V