opentslm 0.1.0

Rust implementation of OpenTSLM using Burn, WGPU, and llama.cpp
//! Model components for opentslm.
//!
//! The model is split into three cooperating sub-modules:
//!
//! ```text
//! ┌─────────────────────────────────────────────────────┐
//! │  encoder  — TransformerCnnEncoder                   │
//! │             Conv1d patch embedding + Transformer    │
//! │             [B, L] → [B, N_patches, 128]            │
//! ├─────────────────────────────────────────────────────┤
//! │  projector — MlpProjector  (unused in SP variant)  │
//! │              LayerNorm → Linear → GELU → Dropout    │
//! │              [B, N, 128] → [B, N, llm_hidden]       │
//! ├─────────────────────────────────────────────────────┤
//! │  llm       — LlamaCppBackend (frozen GGUF backbone) │
//! │            + OpenTslmSp (trainable encoder + head)  │
//! └─────────────────────────────────────────────────────┘
//! ```
//!
//! The `projector` module is retained for completeness (it mirrors
//! `MLPProjector.py`) but is not used by the SP variant, which projects
//! directly to vocabulary logits via `LogitBiasHead`.

pub mod encoder;
pub mod llm;
pub mod projector;