use minillm::inference::InferenceEngine;
fn main() -> minillm::Result<()> {
println!("🔤 MiniLLM Tokenization Example");
let engine = InferenceEngine::new("openai-community/gpt2")?;
let texts = vec![
"Hello, world!",
"The quick brown fox jumps over the lazy dog.",
"Rust is a systems programming language.",
"Transformer models revolutionized natural language processing.",
];
for text in texts {
println!("\n📝 Text: \"{}\"", text);
let tokens = engine.tokenize(text)?;
println!("🔢 Token count: {}", tokens.len());
println!("🎯 Token IDs: {:?}", tokens);
let decoded = engine.decode(&tokens)?;
println!("🔄 Decoded: \"{}\"", decoded);
if text.trim() == decoded.trim() {
println!("✅ Round-trip successful");
} else {
println!("⚠️ Round-trip mismatch");
}
}
Ok(())
}