1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
mod actor_critic;
pub mod critics;
mod dqn;
pub mod features;
pub mod policies;
pub mod schedules;

pub use actor_critic::{ActorCriticAgent, ActorCriticConfig};
pub use dqn::{DqnActor, DqnAgent, DqnConfig};

use crate::logging::StatsLogger;
use crate::torch::modules::{AsModule, Module};
use crate::torch::optimizers::{opt_expect_ok_log, Optimizer};
use serde::{Deserialize, Serialize};
use std::cell::RefCell;
use std::fmt;
use std::time::Instant;
use tch::{Device, Tensor};

#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
enum ToLog {
    /// Don't log the absolute loss value (can log loss changes).
    NoAbsLoss,
    /// Log everything
    All,
}

/// Take n backward steps of a loss function with logging.
///
/// # Note
/// The output of `sample_minibatch` is cloned on each call to `loss_fn` so it should be cheap to
/// clone (e.g. a reference or a `Rc`).
/// `loss_fn` does not simply take `&D` because a current limitation in the compiler means that it
/// is difficult to construct closures that take a reference with any lifetime.
fn n_backward_steps<O, G, F, L, D>(
    optimizer: &mut O,
    mut sample_minibatch: G,
    mut loss_fn: F,
    n: u64,
    mut logger: L,
    to_log: ToLog,
    err_msg: &str,
) where
    O: Optimizer + ?Sized,
    G: FnMut() -> D,
    F: FnMut(D) -> Tensor,
    L: StatsLogger,
    D: Clone,
{
    let mut step_logger = (&mut logger).with_scope("step");
    let mut prev_loss = None;
    let mut prev_start = Instant::now();
    for _ in 0..n {
        let minibatch = sample_minibatch();
        let mut minibatch_loss_fn = || loss_fn(minibatch.clone());
        let result = optimizer.backward_step(&mut minibatch_loss_fn, &mut step_logger);
        let loss = opt_expect_ok_log(result, err_msg).map(f64::from);

        if let Some(loss_improvement) = prev_loss.and_then(|p| loss.map(|l| p - l)) {
            step_logger.log_scalar("loss_improvement", loss_improvement);
        }
        prev_loss = loss;
        let end = Instant::now();
        step_logger.log_duration("time", end - prev_start);
        prev_start = end;
    }
    if matches!(to_log, ToLog::All) {
        if let Some(loss) = prev_loss {
            logger.log_scalar("loss", loss);
        }
    }
}

/// Wraps a module to have a lazily-initialized CPU copy if not already in CPU memory.
///
/// This is useful for models used both in training and in simulation because large batch size
/// training is most efficient on the GPU while batch-size-1 simulation is most efficient on the
/// CPU.
#[derive(Serialize, Deserialize)]
pub struct WithCpuCopy<T: AsModule> {
    inner: T,
    /// Device on which `policy` (the master copy) is stored.
    // Tensors will deserialize to CPU
    #[serde(skip, default = "cpu_device")]
    device: Device,
    #[serde(skip, default)]
    cpu_module: RefCell<Option<T::Module>>,
}

const fn cpu_device() -> Device {
    Device::Cpu
}

impl<T: AsModule> WithCpuCopy<T> {
    pub const fn new(inner: T, device: Device) -> Self {
        Self {
            inner,
            device,
            cpu_module: RefCell::new(None),
        }
    }
}

impl<T: AsModule + Clone> Clone for WithCpuCopy<T> {
    fn clone(&self) -> Self {
        Self::new(self.inner.clone(), self.device)
    }
}

impl<T: AsModule> AsModule for WithCpuCopy<T> {
    type Module = T::Module;

    #[inline]
    fn as_module(&self) -> &Self::Module {
        self.as_inner().as_module()
    }

    /// Get mutable reference to the module. Invalidates the cached CPU copy if any.
    #[inline]
    fn as_module_mut(&mut self) -> &mut Self::Module {
        self.as_inner_mut().as_module_mut()
    }
}

impl<T: AsModule> WithCpuCopy<T> {
    /// Get a reference to the inner struct.
    #[inline]
    pub const fn as_inner(&self) -> &T {
        &self.inner
    }

    /// Get a mutable reference to the inner struct. Invalidates the cached CPU copy if any.
    #[inline]
    pub fn as_inner_mut(&mut self) -> &mut T {
        self.cpu_module.take();
        &mut self.inner
    }

    /// Create a shallow clone of the module on CPU memory.
    ///
    /// If the module is not already on the CPU device then a cached deep copy is created on the
    /// CPU first. This cached copy is reused on future calls.
    #[inline]
    pub fn shallow_clone_module_cpu(&self) -> T::Module {
        if self.device == Device::Cpu {
            self.as_module().shallow_clone()
        } else {
            self.cpu_module
                .borrow_mut()
                .get_or_insert_with(|| self.as_module().clone_to_device(Device::Cpu))
                .shallow_clone()
        }
    }
}

impl<T: AsModule + fmt::Debug> fmt::Debug for WithCpuCopy<T> {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        f.debug_struct("WithCpuCopy")
            .field("inner", &self.inner)
            .field("device", &self.device)
            .field(
                "cpu_module",
                &self.cpu_module.borrow().as_ref().map(|_| "..."),
            )
            .finish()
    }
}

impl<T: AsModule + PartialEq> PartialEq for WithCpuCopy<T> {
    fn eq(&self, other: &Self) -> bool {
        self.device == other.device && self.inner == other.inner
    }
}