1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
mod decoder;
mod encoder;
mod mha;

pub use decoder::*;
pub use encoder::*;
pub use mha::*;

use num_traits::Float;
use rand_distr::uniform::SampleUniform;

use crate::{shapes::*, tensor::*, tensor_ops::*};

use super::*;

pub mod builder {
    #[derive(Debug, Clone)]
    pub struct Transformer<
        const MODEL_DIM: usize,
        const NUM_HEADS: usize,
        const NUM_ENCODER_LAYERS: usize,
        const NUM_DECODER_LAYERS: usize,
        const FF_DIM: usize,
    >;

    pub use super::decoder::builder::{TransformerDecoder, TransformerDecoderBlock};
    pub use super::encoder::builder::{TransformerEncoder, TransformerEncoderBlock};
    pub use super::mha::builder::MultiHeadAttention;
}

impl<const M: usize, const H: usize, const A: usize, const B: usize, const F: usize, E, D>
    BuildOnDevice<D, E> for builder::Transformer<M, H, A, B, F>
where
    E: Dtype,
    D: Device<E>,
    Transformer<M, H, A, B, F, E, D>: BuildModule<D, E>,
{
    type Built = Transformer<M, H, A, B, F, E, D>;
    fn try_build_on_device(device: &D) -> Result<Self::Built, <D>::Err> {
        Self::Built::try_build(device)
    }
}

/// Transformer architecture as described in
/// [Attention is all you need](https://arxiv.org/abs/1706.03762).
///
/// This is comprised of a [TransformerEncoder] and a [TransformerDecoder].
///
/// Generics:
/// - `MODEL_DIM`: Size of the input features to the encoder/decoder.
/// - `NUM_HEADS`: Number of heads for [MultiHeadAttention].
/// - `NUM_ENCODER_LAYERS`: Number of [TransformerEncoderBlock] to use
/// - `NUM_DECODER_LAYERS`: Number of [TransformerDecoderBlock] to use
/// - `FF_DIM`: Feedforward hidden dimension for both encoder/decoder
///
/// **Pytorch equivalent**:
/// ```python
/// torch.nn.Transformer(
///     d_model=MODEL_DIM,
///     nhead=NUM_HEADS,
///     num_encoder_layers=NUM_ENCODER_LAYERS,
///     num_decoder_layers=NUM_DECODER_LAYERS,
///     dim_feedforward=FF_DIM,
///     batch_first=True,
/// )
/// ```
#[derive(Debug, Clone)]
pub struct Transformer<
    const MODEL_DIM: usize,
    const NUM_HEADS: usize,
    const NUM_ENCODER_LAYERS: usize,
    const NUM_DECODER_LAYERS: usize,
    const FF_DIM: usize,
    E: Dtype,
    D: Storage<E>,
> {
    pub encoder: TransformerEncoder<MODEL_DIM, NUM_HEADS, FF_DIM, NUM_ENCODER_LAYERS, E, D>,
    pub decoder: TransformerDecoder<MODEL_DIM, NUM_HEADS, FF_DIM, NUM_DECODER_LAYERS, E, D>,
}

impl<const M: usize, const H: usize, const A: usize, const B: usize, const F: usize, E, D>
    TensorCollection<E, D> for Transformer<M, H, A, B, F, E, D>
where
    E: Dtype + Float + SampleUniform,
    D: Device<E>,
{
    type To<E2: Dtype, D2: Device<E2>> = Transformer<M, H, A, B, F, E2, D2>;

    fn iter_tensors<V: ModuleVisitor<Self, E, D>>(
        visitor: &mut V,
    ) -> Result<Option<Self::To<V::E2, V::D2>>, V::Err> {
        visitor.visit_fields(
            (
                Self::module("encoder", |s| &s.encoder, |s| &mut s.encoder),
                Self::module("decoder", |s| &s.decoder, |s| &mut s.decoder),
            ),
            |(encoder, decoder)| Transformer { encoder, decoder },
        )
    }
}

impl<
        const M: usize,
        const H: usize,
        const EL: usize,
        const DL: usize,
        const F: usize,
        E: Dtype,
        D: Device<E>,
        Src: SplitTape,
        Tgt: PutTape<Src::Tape>,
    > Module<(Src, Tgt)> for Transformer<M, H, EL, DL, F, E, D>
where
    TransformerEncoder<M, H, F, EL, E, D>: Module<Src, Output = Src, Error = D::Err>,
    TransformerDecoder<M, H, F, DL, E, D>: Module<
        (<Tgt as PutTape<Src::Tape>>::Output, Src::NoTape),
        Output = <Tgt as PutTape<Src::Tape>>::Output,
        Error = D::Err,
    >,
{
    type Output = <Tgt as PutTape<Src::Tape>>::Output;
    type Error = D::Err;

    fn try_forward(&self, (src, tgt): (Src, Tgt)) -> Result<Self::Output, D::Err> {
        let (mem, tape) = self.encoder.try_forward(src)?.split_tape();
        self.decoder.try_forward((tgt.put_tape(tape), mem))
    }
}

impl<const M: usize, const H: usize, const A: usize, const B: usize, const F: usize, E, D>
    NonMutableModule for Transformer<M, H, A, B, F, E, D>
where
    E: Dtype,
    D: Device<E>,
{
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::{optim::*, tests::*};

    #[test]
    fn test_forward() {
        let dev = TestDevice::seed_from_u64(0);
        type Model = builder::Transformer<16, 4, 3, 3, 8>;
        let mut t = dev.build_module::<Model, TestDtype>();

        // unbatched
        let src = dev.sample_normal::<Rank2<7, 16>>();
        let tgt = dev.sample_normal::<Rank2<9, 16>>();
        let _: Tensor<Rank2<9, 16>, _, _, _> = t.forward_mut((src, tgt));

        // batched
        let src = dev.sample_normal::<Rank3<4, 12, 16>>();
        let tgt = dev.sample_normal::<Rank3<4, 6, 16>>();
        let _: Tensor<Rank3<4, 6, 16>, _, _, _> = t.forward_mut((src, tgt));
    }

    #[test]
    fn test_backward() {
        let dev = TestDevice::seed_from_u64(0);
        type Model = builder::Transformer<16, 4, 3, 3, 8>;
        let mut t = dev.build_module::<Model, TestDtype>();

        let src = dev.sample_normal::<Rank3<4, 12, 16>>();
        let tgt = dev.sample_normal::<Rank3<4, 6, 16>>();
        let out: Tensor<Rank3<4, 6, 16>, _, _, _> = t.forward_mut((src.leaky_trace(), tgt));
        let g = out.mean().backward();

        let mut opt = Sgd::new(&t, Default::default());
        opt.update(&mut t, &g).expect("");
    }
}