pub struct LanguageModelSession { /* private fields */ }Expand description
A session for interacting with Apple’s Foundation Models
This provides access to on-device language models via the FoundationModels framework. Requires macOS 26+ or iOS 26+ with Apple Intelligence enabled.
§Examples
§Blocking response
let session = LanguageModelSession::new()?;
let response = session.response("What is Rust?")?;
println!("{}", response);§Streaming response
let session = LanguageModelSession::new()?;
session.stream_response("What is Rust?", |chunk| {
print!("{}", chunk);
})?;Implementations§
Source§impl LanguageModelSession
impl LanguageModelSession
Sourcepub fn new() -> Result<Self>
pub fn new() -> Result<Self>
Creates a new language model session
This checks that the Foundation Model is available on the system.
§Errors
Returns Error::ModelNotAvailable if Apple Intelligence is not enabled
or the system model is unavailable.
Examples found in repository?
11fn main() -> Result<(), Box<dyn std::error::Error>> {
12 println!("=== Foundation Models - Blocking Response Example ===\n");
13
14 // Create a new session
15 println!("Creating session...");
16 let session = LanguageModelSession::new()?;
17 println!("Session created!\n");
18
19 // Define the prompt
20 let prompt = "What is Rust programming language? Please explain in 2-3 sentences.";
21 println!("Prompt: \"{}\"\n", prompt);
22 println!("Generating response...\n");
23
24 // Get the complete response
25 // This blocks until the entire response is generated
26 let response = session.response(prompt)?;
27
28 // Print the response
29 println!("Response:\n{}\n", response);
30 println!("=== Complete ===");
31
32 Ok(())
33}More examples
13fn main() -> Result<(), Box<dyn std::error::Error>> {
14 println!("=== Foundation Models - Streaming Response Example ===\n");
15
16 // Create a new session
17 println!("Creating session...");
18 let session = LanguageModelSession::new()?;
19 println!("Session created!\n");
20
21 // Define the prompt
22 let prompt = "Tell me a short story about a robot learning to paint.";
23 println!("Prompt: \"{}\"\n", prompt);
24 println!("Streaming response:\n");
25 println!("---");
26
27 // Stream the response chunk by chunk
28 // The callback is called for each chunk as it's generated
29 session.stream_response(prompt, |chunk| {
30 // Print each chunk immediately
31 print!("{}", chunk);
32
33 // Flush stdout to ensure immediate display
34 io::stdout().flush().unwrap();
35 })?;
36
37 // Print completion message
38 println!("\n---");
39 println!("\n=== Stream Complete ===");
40
41 Ok(())
42}Sourcepub fn response(&self, prompt: &str) -> Result<String>
pub fn response(&self, prompt: &str) -> Result<String>
Generates a complete response to the given prompt
This method blocks until the entire response is generated and returned as a String.
For a better user experience with incremental updates, use stream_response instead.
§Arguments
prompt- The input text to send to the model
§Errors
Error::ModelNotAvailable- If the Foundation Model is not availableError::InvalidInput- If the prompt is empty or invalidError::GenerationError- If an error occurs during generation
§Examples
let session = LanguageModelSession::new()?;
let response = session.response("Explain Rust ownership")?;
println!("Response: {}", response);Examples found in repository?
11fn main() -> Result<(), Box<dyn std::error::Error>> {
12 println!("=== Foundation Models - Blocking Response Example ===\n");
13
14 // Create a new session
15 println!("Creating session...");
16 let session = LanguageModelSession::new()?;
17 println!("Session created!\n");
18
19 // Define the prompt
20 let prompt = "What is Rust programming language? Please explain in 2-3 sentences.";
21 println!("Prompt: \"{}\"\n", prompt);
22 println!("Generating response...\n");
23
24 // Get the complete response
25 // This blocks until the entire response is generated
26 let response = session.response(prompt)?;
27
28 // Print the response
29 println!("Response:\n{}\n", response);
30 println!("=== Complete ===");
31
32 Ok(())
33}Sourcepub fn stream_response<F>(&self, prompt: &str, on_chunk: F) -> Result<()>
pub fn stream_response<F>(&self, prompt: &str, on_chunk: F) -> Result<()>
Generates a streaming response to the given prompt
This method calls the provided callback for each chunk as it’s generated, providing immediate feedback to the user. The callback receives string slices containing incremental text deltas.
§Arguments
prompt- The input text to send to the modelon_chunk- Callback function called for each generated chunk
§Errors
Error::ModelNotAvailable- If the Foundation Model is not availableError::InvalidInput- If the prompt is empty or invalidError::GenerationError- If an error occurs during generation
§Examples
let session = LanguageModelSession::new()?;
session.stream_response("Tell me a story", |chunk| {
print!("{}", chunk);
let _ = io::stdout().flush();
})?;
println!(); // newline after stream completesExamples found in repository?
13fn main() -> Result<(), Box<dyn std::error::Error>> {
14 println!("=== Foundation Models - Streaming Response Example ===\n");
15
16 // Create a new session
17 println!("Creating session...");
18 let session = LanguageModelSession::new()?;
19 println!("Session created!\n");
20
21 // Define the prompt
22 let prompt = "Tell me a short story about a robot learning to paint.";
23 println!("Prompt: \"{}\"\n", prompt);
24 println!("Streaming response:\n");
25 println!("---");
26
27 // Stream the response chunk by chunk
28 // The callback is called for each chunk as it's generated
29 session.stream_response(prompt, |chunk| {
30 // Print each chunk immediately
31 print!("{}", chunk);
32
33 // Flush stdout to ensure immediate display
34 io::stdout().flush().unwrap();
35 })?;
36
37 // Print completion message
38 println!("\n---");
39 println!("\n=== Stream Complete ===");
40
41 Ok(())
42}Sourcepub fn cancel_stream(&self)
pub fn cancel_stream(&self)
Cancels the current streaming response
This method immediately cancels any ongoing streaming operation started with
stream_response. The streaming callback will stop receiving tokens and the
stream will complete with the tokens received so far.
§Notes
- This is a global operation that cancels the current stream
- Safe to call even if no stream is active
- After cancellation, the
stream_responsemethod will return normally
§Examples
let session = LanguageModelSession::new()?;
let session_clone = session.clone();
// Start streaming in a thread
thread::spawn(move || {
session_clone.stream_response("Long prompt...", |chunk| {
print!("{}", chunk);
}).ok();
});
// Cancel after a delay
thread::sleep(Duration::from_secs(2));
session.cancel_stream();Trait Implementations§
Source§impl Clone for LanguageModelSession
impl Clone for LanguageModelSession
Source§fn clone(&self) -> LanguageModelSession
fn clone(&self) -> LanguageModelSession
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read more