Skip to main content

stream_response/
stream_response.rs

1// Example: Streaming response generation
2//
3// This example demonstrates using the `stream_response()` method to get
4// real-time incremental updates from the Foundation Model. This provides
5// a much better user experience with immediate feedback.
6//
7// Usage: cargo run --example stream_response
8
9use fm_bindings::LanguageModelSession;
10use std::io::{self, Write};
11
12fn main() -> Result<(), Box<dyn std::error::Error>> {
13    println!("=== Foundation Models - Streaming Response Example ===\n");
14
15    // Create a new session with instructions
16    println!("Creating session...");
17    let session = LanguageModelSession::with_instructions(
18        "You are a creative storyteller. Write engaging, vivid narratives.",
19    )?;
20    println!("Session created!\n");
21
22    // Define the prompt
23    let prompt = "Tell me a short story about a robot learning to paint.";
24    println!("Prompt: \"{}\"\n", prompt);
25    println!("Streaming response:\n");
26    println!("---");
27
28    // Stream the response chunk by chunk
29    // The callback is called for each chunk as it's generated
30    session.stream_response(prompt, |chunk| {
31        // Print each chunk immediately
32        print!("{}", chunk);
33
34        // Flush stdout to ensure immediate display
35        io::stdout().flush().unwrap();
36    })?;
37
38    // Print completion message
39    println!("\n---");
40    println!("\n=== Stream Complete ===");
41
42    Ok(())
43}