#![allow(clippy::all)]
#![allow(unused_variables)]
use trustformers::{pipeline, Result};
#[tokio::main]
async fn main() -> Result<()> {
println!("🚀 TrustformeRS Basic Pipeline Examples\n");
text_classification_example().await?;
text_generation_example().await?;
question_answering_example().await?;
println!("\n✅ All examples completed successfully!");
Ok(())
}
async fn text_classification_example() -> Result<()> {
println!("📝 Text Classification Example");
println!("==============================");
let classifier = pipeline(
"text-classification",
None, None, )?;
let texts = vec![
"I love this new transformer library!",
"This is the worst software I've ever used.",
"The weather is nice today.",
"TrustformeRS makes ML so much easier!",
];
println!("Classifying texts:");
for text in &texts {
println!(" Input: \"{}\"", text);
let result = classifier.__call__(text.to_string())?;
println!(" Result: {:?}", result);
println!();
}
println!("Batch processing example:");
let texts_owned: Vec<String> = texts.iter().map(|s| s.to_string()).collect();
let batch_results = classifier.batch(texts_owned.clone())?;
for (text, result) in texts_owned.iter().zip(batch_results.iter()) {
println!(" \"{}\" -> {:?}", text, result);
}
println!();
Ok(())
}
async fn text_generation_example() -> Result<()> {
println!("✍️ Text Generation Example");
println!("===========================");
let generator = pipeline("text-generation", None, None)?;
let prompts = vec![
"The future of artificial intelligence",
"Once upon a time in a distant galaxy",
"The benefits of renewable energy include",
];
println!("Generating text:");
for prompt in &prompts {
println!(" Prompt: \"{}\"", prompt);
let result = generator.__call__(prompt.to_string())?;
println!(" Generated: {:?}", result);
println!();
}
Ok(())
}
async fn question_answering_example() -> Result<()> {
println!("❓ Question Answering Example");
println!("=============================");
let qa_pipeline = pipeline("question-answering", None, None)?;
let context = "TrustformeRS is a high-performance machine learning library written in Rust. \
It provides state-of-the-art transformer models with excellent performance \
and memory efficiency. The library supports multiple model architectures \
including BERT, GPT, T5, and many others.";
let questions = vec![
"What is TrustformeRS?",
"What programming language is TrustformeRS written in?",
"What model architectures does it support?",
];
println!("Context: {}", context);
println!("\nAnswering questions:");
for question in &questions {
println!(" Question: \"{}\"", question);
let qa_input = format!("Question: {} Context: {}", question, context);
let result = qa_pipeline.__call__(qa_input)?;
println!(" Answer: {:?}", result);
println!();
}
Ok(())
}
pub fn print_model_info(model_name: &str) {
println!("📊 Model Information");
println!("Model: {}", model_name);
println!("Framework: TrustformeRS");
println!("Language: Rust");
println!();
}
#[allow(dead_code)]
pub async fn interactive_example() -> Result<()> {
use std::io::{self, Write};
println!("🎯 Interactive TrustformeRS Example");
println!("===================================");
let classifier = pipeline("text-classification", None, None)?;
loop {
print!("Enter text to classify (or 'quit' to exit): ");
let _ = io::stdout().flush();
let mut input = String::new();
io::stdin().read_line(&mut input).expect("Failed to read user input");
let input = input.trim();
if input == "quit" {
break;
}
if input.is_empty() {
continue;
}
match classifier.__call__(input.to_string()) {
Ok(result) => println!("Classification result: {:?}\n", result),
Err(e) => println!("Error: {}\n", e),
}
}
Ok(())
}
#[allow(dead_code)]
pub async fn performance_comparison_example() -> Result<()> {
use std::time::Instant;
println!("⚡ Performance Comparison Example");
println!("=================================");
let classifier = pipeline("text-classification", None, None)?;
let test_texts: Vec<String> =
(0..100).map(|i| format!("This is test sentence number {}", i)).collect();
println!("Single inference benchmark:");
let start = Instant::now();
for text in &test_texts[..10] {
let _ = classifier.__call__(text.clone())?;
}
let single_time = start.elapsed();
println!(" 10 single inferences: {:?}", single_time);
println!("Batch inference benchmark:");
let start = Instant::now();
let _ = classifier.batch(test_texts[..10].to_vec())?;
let batch_time = start.elapsed();
println!(" 1 batch of 10: {:?}", batch_time);
println!(
" Speedup: {:.2}x",
single_time.as_nanos() as f64 / batch_time.as_nanos() as f64
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_basic_examples() {
println!("Testing basic pipeline examples...");
}
#[test]
fn test_model_info() {
print_model_info("test-model");
}
}