m2m/inference/
mod.rs

1//! Hydra ML inference for intelligent algorithm routing.
2//!
3//! The [Hydra SLM](https://huggingface.co/infernet/hydra) is a small language model
4//! optimized for M2M protocol tasks.
5//!
6//! # Inference Backends
7//!
8//! - **Native (safetensors)**: Pure Rust inference from safetensors weights
9//! - **ONNX Runtime**: Optional, requires `onnx` feature flag
10//! - **Heuristic fallback**: Rule-based fallback when model unavailable
11//!
12//! # Tokenizers
13//!
14//! Hydra supports multiple tokenizer backends:
15//!
16//! - **Llama 3** (128K vocab): Primary tokenizer for open source ecosystem
17//! - **o200k_base** (200K vocab): OpenAI GPT-4o, o1, o3
18//! - **cl100k_base** (100K vocab): OpenAI GPT-3.5, GPT-4
19//! - **Fallback**: Byte-level tokenizer when nothing else available
20//!
21//! # Tasks
22//!
23//! - **Compression selection**: Predicts optimal algorithm (None/BPE/Brotli/Zlib)
24//! - **Security detection**: Classifies prompt injection and jailbreak attempts
25//! - **Token estimation**: Fast approximate token counting
26//!
27//! # Model Architecture
28//!
29//! ```text
30//! vocab_size: 128000 (Llama 3), hidden_size: 192, num_layers: 4, num_experts: 4
31//! ```
32//!
33//! The Hydra model is a Mixture of Experts classifier:
34//! - 4 MoE layers with top-2 expert routing
35//! - Heterogeneous expert architectures (different depths/widths)
36//! - ~100MB model size (float32 weights with 128K vocab)
37//!
38//! # Download
39//!
40//! ```bash
41//! huggingface-cli download infernet/hydra --local-dir ./models/hydra
42//! ```
43//!
44//! # Example
45//!
46//! ```rust,ignore
47//! use m2m::inference::{HydraModel, CompressionDecision, Llama3Tokenizer};
48//!
49//! // Load model and tokenizer
50//! let model = HydraModel::load("./models/hydra")?;
51//!
52//! let decision = model.predict_compression(&content)?;
53//! match decision.algorithm {
54//!     Algorithm::Brotli => // use brotli
55//!     _ => // use other
56//! }
57//! ```
58
59pub mod bitnet;
60mod hydra;
61pub mod tokenizer;
62
63pub use bitnet::HydraBitNet;
64pub use hydra::{CompressionDecision, HydraModel, SecurityDecision, ThreatType};
65
66// Tokenizer exports
67pub use tokenizer::{
68    boxed, load_tokenizer, load_tokenizer_by_type, BoxedTokenizer, FallbackTokenizer,
69    HydraByteTokenizer, HydraTokenizer, Llama3Tokenizer, TiktokenTokenizer, TokenizerType,
70    MAX_SEQUENCE_LENGTH,
71};
72
73/// Model version
74pub const MODEL_VERSION: &str = "2.0.0";
75
76/// Default model path (safetensors format)
77pub const DEFAULT_MODEL_PATH: &str = "./models/hydra/model.safetensors";
78
79/// Default tokenizer path
80pub const DEFAULT_TOKENIZER_PATH: &str = "./models/hydra/tokenizer.json";