1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
use crate::{TrainOutput, TrainStep};
use burn_core::{
    data::dataloader::DataLoaderIterator, module::AutodiffModule, tensor::backend::AutodiffBackend,
};
use std::sync::mpsc::{Receiver, Sender};
use std::thread::spawn;

/// Multi devices train step.
pub struct MultiDevicesTrainStep<B: AutodiffBackend, M, TI, TO> {
    workers: Vec<Worker<B, M, TI>>,
    receiver: Receiver<TrainOutput<TO>>,
}

struct Message<M, TI> {
    item: TI,
    model: M,
}

struct Worker<B: AutodiffBackend, M, TI> {
    sender_input: Sender<Message<M, TI>>,
    device: B::Device,
}

impl<B, M, TI> Worker<B, M, TI>
where
    B: AutodiffBackend,
    M: AutodiffModule<B>,
{
    fn register(&self, item: TI, model: &M) {
        let message = Message {
            item,
            model: model.clone(),
        };
        self.sender_input.send(message).unwrap();
    }

    fn start<TO>(
        &self,
        sender_output: Sender<TrainOutput<TO>>,
        receiver_input: Receiver<Message<M, TI>>,
    ) where
        TI: Send + 'static,
        TO: Send + 'static,
        M: TrainStep<TI, TO> + Send + 'static,
    {
        let device = self.device.clone();

        spawn(move || loop {
            match receiver_input.recv() {
                Ok(item) => {
                    let step = item.model.fork(&device);
                    let output = step.step(item.item);

                    sender_output.send(output).unwrap();
                }
                Err(_err) => {
                    log::info!("Closing thread on device {:?}", device);
                    break;
                }
            }
        });
    }
}

impl<B, M, TI, TO> MultiDevicesTrainStep<B, M, TI, TO>
where
    B: AutodiffBackend,
    M: AutodiffModule<B> + TrainStep<TI, TO> + Send + Clone + 'static,
    TI: Send + 'static,
    TO: Send + 'static,
{
    /// Create a new multi devices train step.
    ///
    /// # Arguments
    ///
    /// * `devices` - Devices.
    ///
    /// # Returns
    ///
    /// MultiDevicesTrainStep instance.
    pub fn new(devices: &[B::Device]) -> Self
    where
        TI: Send + 'static,
    {
        let (sender_output, receiver_output) = std::sync::mpsc::channel();
        let workers = devices
            .iter()
            .map(|device| {
                let (sender_input, receiver_input) = std::sync::mpsc::channel();
                let worker = Worker {
                    sender_input,
                    device: device.clone(),
                };

                worker.start(sender_output.clone(), receiver_input);
                worker
            })
            .collect();

        Self {
            workers,
            receiver: receiver_output,
        }
    }

    /// Collect outputs from workers for one step.
    ///
    /// # Arguments
    ///
    /// * `dataloader` - Dataloader.
    /// * `model` - Model.
    ///
    /// # Returns
    ///
    /// Outputs.
    pub fn step<'a>(
        &self,
        dataloader: &mut Box<dyn DataLoaderIterator<TI> + 'a>,
        model: &M,
    ) -> Vec<TrainOutput<TO>> {
        let mut num_send = 0;

        for worker in self.workers.iter() {
            if let Some(item) = dataloader.next() {
                worker.register(item, model);
                num_send += 1;
            }
        }

        let mut outputs = Vec::with_capacity(num_send);

        for _ in 0..num_send {
            let output = self.receiver.recv().unwrap();
            outputs.push(output);
        }

        outputs
    }
}