stream_response/
stream_response.rs

1// examples/stream_response.rs
2// Example: Streaming response generation
3//
4// This example demonstrates using the `stream_response()` method to get
5// real-time incremental updates from the Foundation Model. This provides
6// a much better user experience with immediate feedback.
7//
8// Usage: cargo run --example stream_response
9
10use fm_bindings::LanguageModelSession;
11use std::io::{self, Write};
12
13fn main() -> Result<(), Box<dyn std::error::Error>> {
14    println!("=== Foundation Models - Streaming Response Example ===\n");
15
16    // Create a new session
17    println!("Creating session...");
18    let session = LanguageModelSession::new()?;
19    println!("Session created!\n");
20
21    // Define the prompt
22    let prompt = "Tell me a short story about a robot learning to paint.";
23    println!("Prompt: \"{}\"\n", prompt);
24    println!("Streaming response:\n");
25    println!("---");
26
27    // Stream the response chunk by chunk
28    // The callback is called for each chunk as it's generated
29    session.stream_response(prompt, |chunk| {
30        // Print each chunk immediately
31        print!("{}", chunk);
32
33        // Flush stdout to ensure immediate display
34        io::stdout().flush().unwrap();
35    })?;
36
37    // Print completion message
38    println!("\n---");
39    println!("\n=== Stream Complete ===");
40
41    Ok(())
42}