syntaxdot_transformers/models/squeeze_bert/mod.rs
1//! SqueezeBERT (Iandola et al., 2020)
2//!
3//! SqueezeBERT follows the same architecture as BERT, but replaces most
4//! matrix multiplications by grouped convolutions. This reduces the
5//! number of parameters and speeds up inference.
6
7mod config;
8pub use config::SqueezeBertConfig;
9
10mod embeddings;
11
12mod encoder;
13pub use encoder::SqueezeBertEncoder;
14
15mod layer;
16pub(crate) use layer::SqueezeBertLayer;