candle_transformers/models/
mamba.rs1use crate::models::with_tracing::{linear, linear_no_bias, Linear};
9use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
10use candle_nn::{RmsNorm, VarBuilder};
11
12const D_CONV: usize = 4;
13const D_STATE: usize = 16;
14
15#[derive(Debug, Clone, serde::Deserialize)]
16pub struct Config {
17 pub d_model: usize,
18 pub n_layer: usize,
19 pub vocab_size: usize,
20 pub pad_vocab_size_multiple: usize,
21}
22
23impl Config {
24 fn vocab_size(&self) -> usize {
25 let pad = self.pad_vocab_size_multiple;
26 self.vocab_size.div_ceil(pad) * pad
27 }
28
29 fn dt_rank(&self) -> usize {
30 self.d_model.div_ceil(16)
31 }
32
33 fn d_inner(&self) -> usize {
34 self.d_model * 2
35 }
36}
37
38pub struct State {
39 pub hs: Vec<Tensor>,
40 pub prev_xs: Vec<[Tensor; D_CONV]>,
41 pub pos: usize,
42}
43
44impl State {
45 pub fn new(batch_size: usize, cfg: &Config, dtype: DType, device: &Device) -> Result<Self> {
46 let mut hs = Vec::with_capacity(cfg.n_layer);
47 let mut prev_xs = Vec::with_capacity(cfg.n_layer);
48 for _i in 0..cfg.n_layer {
49 let h = Tensor::zeros((batch_size, cfg.d_inner(), D_STATE), dtype, device)?;
50 let x = Tensor::zeros((batch_size, cfg.d_inner()), dtype, device)?;
51 hs.push(h);
52 prev_xs.push([x.clone(), x.clone(), x.clone(), x.clone()]);
53 }
54 Ok(Self {
55 hs,
56 prev_xs,
57 pos: 0,
58 })
59 }
60}
61
62#[derive(Clone, Debug)]
63pub struct MambaBlock {
64 in_proj: Linear,
65 conv1d_bias: Tensor,
66 conv1d_weights: [Tensor; D_CONV],
67 x_proj: Linear,
68 dt_proj: Linear,
69 a_log: Tensor,
70 d: Tensor,
71 out_proj: Linear,
72 dt_rank: usize,
73 layer_index: usize,
74 d_inner: usize,
75}
76
77impl MambaBlock {
78 pub fn new(layer_index: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
79 let d_inner = cfg.d_inner();
80 let dt_rank = cfg.dt_rank();
81 let in_proj = linear_no_bias(cfg.d_model, d_inner * 2, vb.pp("in_proj"))?;
82 let x_proj = linear_no_bias(d_inner, dt_rank + D_STATE * 2, vb.pp("x_proj"))?;
83 let dt_proj = linear(dt_rank, d_inner, vb.pp("dt_proj"))?;
84 let a_log = vb.get((d_inner, D_STATE), "A_log")?;
85 let d = vb.get(d_inner, "D")?;
86 let out_proj = linear_no_bias(d_inner, cfg.d_model, vb.pp("out_proj"))?;
87 let conv1d_bias = vb.get(d_inner, "conv1d.bias")?;
88 let conv1d_weight = vb.get((d_inner, 1, D_CONV), "conv1d.weight")?;
89 let conv1d_weights = [
90 conv1d_weight.i((.., 0, 0))?,
91 conv1d_weight.i((.., 0, 1))?,
92 conv1d_weight.i((.., 0, 2))?,
93 conv1d_weight.i((.., 0, 3))?,
94 ];
95 Ok(Self {
96 in_proj,
97 conv1d_bias,
98 conv1d_weights,
99 x_proj,
100 dt_proj,
101 a_log,
102 d,
103 out_proj,
104 dt_rank,
105 layer_index,
106 d_inner,
107 })
108 }
109
110 pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
111 let (b_sz, _dim) = xs.dims2()?;
112 let li = self.layer_index;
113 let mut xs = xs.apply(&self.in_proj)?.chunk(2, D::Minus1)?;
114 let proj_for_silu = xs.remove(1);
115 state.prev_xs[li][state.pos % D_CONV] = xs.remove(0);
116 let mut proj_for_conv = self.conv1d_bias.broadcast_as((b_sz, self.d_inner))?;
117 for d_c in 0..D_CONV {
118 proj_for_conv = (proj_for_conv
119 + self.conv1d_weights[d_c]
120 .broadcast_mul(&state.prev_xs[li][(d_c + 1 + state.pos) % D_CONV])?)?;
121 }
122 let proj_for_conv = candle_nn::ops::silu(&proj_for_conv)?;
123 let x_proj = self.x_proj.forward(&proj_for_conv)?;
128 let delta = x_proj.narrow(D::Minus1, 0, self.dt_rank)?.contiguous()?;
129 let b = x_proj.narrow(D::Minus1, self.dt_rank, D_STATE)?;
130 let c = x_proj.narrow(D::Minus1, self.dt_rank + D_STATE, D_STATE)?;
131
132 let delta = delta.apply(&self.dt_proj)?;
133 let delta = (delta.exp()? + 1.)?.log()?;
135 let a = self.a_log.to_dtype(delta.dtype())?.exp()?.neg()?;
136 let d = self.d.to_dtype(delta.dtype())?;
137
138 let delta = delta
141 .unsqueeze(D::Minus1)?
142 .broadcast_as((b_sz, self.d_inner, D_STATE))?;
143 let a = a.broadcast_as((b_sz, self.d_inner, D_STATE))?;
144 let b = b.broadcast_as((b_sz, self.d_inner, D_STATE))?;
145 let proj_for_conv_b =
146 proj_for_conv
147 .unsqueeze(D::Minus1)?
148 .broadcast_as((b_sz, self.d_inner, D_STATE))?;
149 state.hs[li] = ((&state.hs[li] * (&delta * &a)?.exp()?)? + &delta * &b * &proj_for_conv_b)?;
150 let ss = (state.hs[li]
151 .matmul(&c.unsqueeze(D::Minus1)?)?
152 .squeeze(D::Minus1)?
153 + proj_for_conv.broadcast_mul(&d)?)?;
154
155 let ys = (ss * candle_nn::ops::silu(&proj_for_silu))?;
156 ys.apply(&self.out_proj)
157 }
158}
159
160#[derive(Clone, Debug)]
161pub struct ResidualBlock {
162 mixer: MambaBlock,
163 norm: RmsNorm,
164}
165
166impl ResidualBlock {
167 pub fn new(layer_index: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
168 let norm = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm"))?;
169 let mixer = MambaBlock::new(layer_index, cfg, vb.pp("mixer"))?;
170 Ok(Self { mixer, norm })
171 }
172
173 fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
174 self.mixer.forward(&xs.apply(&self.norm)?, state)? + xs
175 }
176}
177
178#[derive(Clone, Debug)]
180pub struct Model {
181 embedding: candle_nn::Embedding,
182 layers: Vec<ResidualBlock>,
183 norm_f: RmsNorm,
184 lm_head: Linear,
185 dtype: DType,
186}
187
188impl Model {
189 pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
190 let embedding = candle_nn::embedding(cfg.vocab_size(), cfg.d_model, vb.pp("embedding"))?;
191 let mut layers = Vec::with_capacity(cfg.n_layer);
192 let vb_l = vb.pp("layers");
193 for layer_idx in 0..cfg.n_layer {
194 let layer = ResidualBlock::new(layer_idx, cfg, vb_l.pp(layer_idx))?;
195 layers.push(layer)
196 }
197 let norm_f = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm_f"))?;
198 let lm_head = Linear::from_weights(embedding.embeddings().clone(), None);
199 Ok(Self {
200 embedding,
201 layers,
202 norm_f,
203 lm_head,
204 dtype: vb.dtype(),
205 })
206 }
207
208 pub fn forward(&self, input_ids: &Tensor, state: &mut State) -> Result<Tensor> {
209 let _b_size = input_ids.dims1()?;
210 let mut xs = self.embedding.forward(input_ids)?;
211 for layer in self.layers.iter() {
212 xs = layer.forward(&xs, state)?
213 }
214 state.pos += 1;
215 xs.apply(&self.norm_f)?.apply(&self.lm_head)
216 }
217
218 pub fn dtype(&self) -> DType {
219 self.dtype
220 }
221}