pub struct LanguageModelSession { /* private fields */ }Expand description
A session for interacting with Apple’s Foundation Models
This provides access to on-device language models via the FoundationModels framework. Requires macOS 26+ or iOS 26+ with Apple Intelligence enabled.
§Session State
Each session maintains a transcript of all interactions (prompts, responses, etc.). The transcript can be serialized to JSON for persistence and used to restore sessions across app launches.
§Creating Sessions
LanguageModelSession::new()- Create without instructionsLanguageModelSession::with_instructions()- Create with system promptLanguageModelSession::from_transcript_json()- Restore from saved transcript
§Getting Responses
response()- Blocking response (waits for completion)stream_response()- Streaming response (real-time chunks)cancel_stream()- Cancel ongoing stream
§Examples
See the method-level documentation for detailed examples:
new()andresponse()for basic usagestream_response()for streamingtranscript_json()andfrom_transcript_json()for persistence
Implementations§
Source§impl LanguageModelSession
impl LanguageModelSession
Sourcepub fn new() -> Result<Self>
pub fn new() -> Result<Self>
Creates a new language model session without instructions
This is equivalent to calling with_instructions(None).
§Errors
Returns Error::ModelNotAvailable if Apple Intelligence is not enabled
or the system model is unavailable.
Sourcepub fn with_instructions(instructions: &str) -> Result<Self>
pub fn with_instructions(instructions: &str) -> Result<Self>
Creates a new language model session with instructions
Instructions define the model’s persona, behavior, and guidelines for the entire session. They are always the first entry in the session transcript.
§Arguments
instructions- System prompt that guides the model’s behavior
§Errors
Error::ModelNotAvailable- If Apple Intelligence is not enabledError::InvalidInput- If instructions contain a null byte
§Examples
let session = LanguageModelSession::with_instructions(
"You are a helpful coding assistant. Provide concise answers."
)?;Examples found in repository?
29fn create_and_save() -> Result<(), Box<dyn std::error::Error>> {
30 // Create a new session with instructions
31 let session = LanguageModelSession::with_instructions(
32 "You are a knowledgeable travel guide specializing in Japanese culture and tourism.",
33 )?;
34 println!("Created new session with travel guide persona.\n");
35
36 // Have a conversation
37 println!("User: Tell me about Tokyo.");
38 let response1 = session.response("Tell me about Tokyo.")?;
39 println!("Assistant: {}\n", response1);
40
41 println!("User: What's the best time to visit?");
42 let response2 = session.response("What's the best time to visit?")?;
43 println!("Assistant: {}\n", response2);
44
45 // Save the transcript
46 let transcript_json = session.transcript_json()?;
47 fs::write(TRANSCRIPT_FILE, &transcript_json)?;
48 println!("--- Session saved to {} ---", TRANSCRIPT_FILE);
49 println!("Run this example again to restore and continue the conversation.\n");
50
51 Ok(())
52}More examples
12fn main() -> Result<(), Box<dyn std::error::Error>> {
13 println!("=== Foundation Models - Streaming Response Example ===\n");
14
15 // Create a new session with instructions
16 println!("Creating session...");
17 let session = LanguageModelSession::with_instructions(
18 "You are a creative storyteller. Write engaging, vivid narratives.",
19 )?;
20 println!("Session created!\n");
21
22 // Define the prompt
23 let prompt = "Tell me a short story about a robot learning to paint.";
24 println!("Prompt: \"{}\"\n", prompt);
25 println!("Streaming response:\n");
26 println!("---");
27
28 // Stream the response chunk by chunk
29 // The callback is called for each chunk as it's generated
30 session.stream_response(prompt, |chunk| {
31 // Print each chunk immediately
32 print!("{}", chunk);
33
34 // Flush stdout to ensure immediate display
35 io::stdout().flush().unwrap();
36 })?;
37
38 // Print completion message
39 println!("\n---");
40 println!("\n=== Stream Complete ===");
41
42 Ok(())
43}10fn main() -> Result<(), Box<dyn std::error::Error>> {
11 println!("=== Foundation Models - Blocking Response Example ===\n");
12
13 // Create a new session with instructions
14 println!("Creating session with instructions...");
15 let session = LanguageModelSession::with_instructions(
16 "You are a helpful assistant. Provide concise, accurate answers.",
17 )?;
18 println!("Session created!\n");
19
20 // Define the prompt
21 let prompt = "What is Rust programming language? Please explain in 2-3 sentences.";
22 println!("Prompt: \"{}\"\n", prompt);
23 println!("Generating response...\n");
24
25 // Get the complete response
26 // This blocks until the entire response is generated
27 let response = session.response(prompt)?;
28
29 // Print the response
30 println!("Response:\n{}\n", response);
31
32 // Multi-turn conversation example
33 println!("--- Multi-turn conversation ---\n");
34
35 let follow_up = "What are its main advantages?";
36 println!("Follow-up: \"{}\"\n", follow_up);
37
38 let response2 = session.response(follow_up)?;
39 println!("Response:\n{}\n", response2);
40
41 println!("=== Complete ===");
42
43 Ok(())
44}Sourcepub fn from_transcript_json(transcript_json: &str) -> Result<Self>
pub fn from_transcript_json(transcript_json: &str) -> Result<Self>
Creates a session from a serialized transcript JSON
This restores a previous session state, including the original instructions and full conversation history. Use this to resume conversations across app launches.
§Arguments
transcript_json- JSON string fromtranscript_json()
§Errors
Error::ModelNotAvailable- If Apple Intelligence is not enabledError::InvalidInput- If JSON contains a null byte or is invalid
§Examples
let json = std::fs::read_to_string("session.json")?;
let session = LanguageModelSession::from_transcript_json(&json)?;Examples found in repository?
54fn restore_and_continue() -> Result<(), Box<dyn std::error::Error>> {
55 // Load the saved transcript
56 let transcript_json = fs::read_to_string(TRANSCRIPT_FILE)?;
57
58 // Restore the session
59 let session = LanguageModelSession::from_transcript_json(&transcript_json)?;
60 println!("Session restored with previous conversation context.\n");
61
62 // Continue the conversation - the model remembers the previous context
63 println!("User: What about the food there?");
64 let response = session.response("What about the food there?")?;
65 println!("Assistant: {}\n", response);
66
67 println!("User: Any restaurant recommendations?");
68 let response2 = session.response("Any restaurant recommendations?")?;
69 println!("Assistant: {}\n", response2);
70
71 // Clean up the saved file
72 fs::remove_file(TRANSCRIPT_FILE)?;
73 println!("--- Cleaned up saved session ---\n");
74
75 Ok(())
76}Sourcepub fn transcript_json(&self) -> Result<String>
pub fn transcript_json(&self) -> Result<String>
Gets the current session transcript as JSON
The returned JSON can be persisted and later passed to from_transcript_json()
to restore the session state.
§Returns
JSON string representing the full transcript (instructions, prompts, responses)
§Errors
Error::InternalError- If transcript serialization fails
§Examples
let session = LanguageModelSession::new()?;
let _ = session.response("Hello")?;
let json = session.transcript_json()?;
std::fs::write("session.json", &json)?;Examples found in repository?
29fn create_and_save() -> Result<(), Box<dyn std::error::Error>> {
30 // Create a new session with instructions
31 let session = LanguageModelSession::with_instructions(
32 "You are a knowledgeable travel guide specializing in Japanese culture and tourism.",
33 )?;
34 println!("Created new session with travel guide persona.\n");
35
36 // Have a conversation
37 println!("User: Tell me about Tokyo.");
38 let response1 = session.response("Tell me about Tokyo.")?;
39 println!("Assistant: {}\n", response1);
40
41 println!("User: What's the best time to visit?");
42 let response2 = session.response("What's the best time to visit?")?;
43 println!("Assistant: {}\n", response2);
44
45 // Save the transcript
46 let transcript_json = session.transcript_json()?;
47 fs::write(TRANSCRIPT_FILE, &transcript_json)?;
48 println!("--- Session saved to {} ---", TRANSCRIPT_FILE);
49 println!("Run this example again to restore and continue the conversation.\n");
50
51 Ok(())
52}Sourcepub fn response(&self, prompt: &str) -> Result<String>
pub fn response(&self, prompt: &str) -> Result<String>
Generates a complete response to the given prompt
This method blocks until the entire response is generated and returned as a String. The prompt and response are added to the session transcript.
For a better user experience with incremental updates, use stream_response instead.
§Arguments
prompt- The input text to send to the model
§Errors
Error::InvalidInput- If the prompt is empty or contains a null byteError::GenerationError- If an error occurs during generation
§Examples
let session = LanguageModelSession::new()?;
let response = session.response("Explain Rust ownership")?;
println!("Response: {}", response);Examples found in repository?
29fn create_and_save() -> Result<(), Box<dyn std::error::Error>> {
30 // Create a new session with instructions
31 let session = LanguageModelSession::with_instructions(
32 "You are a knowledgeable travel guide specializing in Japanese culture and tourism.",
33 )?;
34 println!("Created new session with travel guide persona.\n");
35
36 // Have a conversation
37 println!("User: Tell me about Tokyo.");
38 let response1 = session.response("Tell me about Tokyo.")?;
39 println!("Assistant: {}\n", response1);
40
41 println!("User: What's the best time to visit?");
42 let response2 = session.response("What's the best time to visit?")?;
43 println!("Assistant: {}\n", response2);
44
45 // Save the transcript
46 let transcript_json = session.transcript_json()?;
47 fs::write(TRANSCRIPT_FILE, &transcript_json)?;
48 println!("--- Session saved to {} ---", TRANSCRIPT_FILE);
49 println!("Run this example again to restore and continue the conversation.\n");
50
51 Ok(())
52}
53
54fn restore_and_continue() -> Result<(), Box<dyn std::error::Error>> {
55 // Load the saved transcript
56 let transcript_json = fs::read_to_string(TRANSCRIPT_FILE)?;
57
58 // Restore the session
59 let session = LanguageModelSession::from_transcript_json(&transcript_json)?;
60 println!("Session restored with previous conversation context.\n");
61
62 // Continue the conversation - the model remembers the previous context
63 println!("User: What about the food there?");
64 let response = session.response("What about the food there?")?;
65 println!("Assistant: {}\n", response);
66
67 println!("User: Any restaurant recommendations?");
68 let response2 = session.response("Any restaurant recommendations?")?;
69 println!("Assistant: {}\n", response2);
70
71 // Clean up the saved file
72 fs::remove_file(TRANSCRIPT_FILE)?;
73 println!("--- Cleaned up saved session ---\n");
74
75 Ok(())
76}More examples
10fn main() -> Result<(), Box<dyn std::error::Error>> {
11 println!("=== Foundation Models - Blocking Response Example ===\n");
12
13 // Create a new session with instructions
14 println!("Creating session with instructions...");
15 let session = LanguageModelSession::with_instructions(
16 "You are a helpful assistant. Provide concise, accurate answers.",
17 )?;
18 println!("Session created!\n");
19
20 // Define the prompt
21 let prompt = "What is Rust programming language? Please explain in 2-3 sentences.";
22 println!("Prompt: \"{}\"\n", prompt);
23 println!("Generating response...\n");
24
25 // Get the complete response
26 // This blocks until the entire response is generated
27 let response = session.response(prompt)?;
28
29 // Print the response
30 println!("Response:\n{}\n", response);
31
32 // Multi-turn conversation example
33 println!("--- Multi-turn conversation ---\n");
34
35 let follow_up = "What are its main advantages?";
36 println!("Follow-up: \"{}\"\n", follow_up);
37
38 let response2 = session.response(follow_up)?;
39 println!("Response:\n{}\n", response2);
40
41 println!("=== Complete ===");
42
43 Ok(())
44}Sourcepub fn stream_response<F>(&self, prompt: &str, on_chunk: F) -> Result<()>
pub fn stream_response<F>(&self, prompt: &str, on_chunk: F) -> Result<()>
Generates a streaming response to the given prompt
This method calls the provided callback for each chunk as it’s generated, providing immediate feedback to the user. The prompt and complete response are added to the session transcript.
§Arguments
prompt- The input text to send to the modelon_chunk- Callback function called for each generated chunk
§Errors
Error::InvalidInput- If the prompt is empty or contains a null byteError::GenerationError- If an error occurs during generation
§Examples
let session = LanguageModelSession::new()?;
session.stream_response("Tell me a story", |chunk| {
print!("{}", chunk);
let _ = io::stdout().flush();
})?;
println!(); // newline after stream completesExamples found in repository?
12fn main() -> Result<(), Box<dyn std::error::Error>> {
13 println!("=== Foundation Models - Streaming Response Example ===\n");
14
15 // Create a new session with instructions
16 println!("Creating session...");
17 let session = LanguageModelSession::with_instructions(
18 "You are a creative storyteller. Write engaging, vivid narratives.",
19 )?;
20 println!("Session created!\n");
21
22 // Define the prompt
23 let prompt = "Tell me a short story about a robot learning to paint.";
24 println!("Prompt: \"{}\"\n", prompt);
25 println!("Streaming response:\n");
26 println!("---");
27
28 // Stream the response chunk by chunk
29 // The callback is called for each chunk as it's generated
30 session.stream_response(prompt, |chunk| {
31 // Print each chunk immediately
32 print!("{}", chunk);
33
34 // Flush stdout to ensure immediate display
35 io::stdout().flush().unwrap();
36 })?;
37
38 // Print completion message
39 println!("\n---");
40 println!("\n=== Stream Complete ===");
41
42 Ok(())
43}Sourcepub fn cancel_stream(&self)
pub fn cancel_stream(&self)
Cancels the current streaming response
This method immediately cancels any ongoing streaming operation. The streaming callback will stop receiving tokens and the stream will complete with the tokens received so far.
§Notes
- Safe to call even if no stream is active
- After cancellation,
stream_responsewill return normally
§Examples
let session = Arc::new(LanguageModelSession::new()?);
let session_clone = Arc::clone(&session);
// Start streaming in a thread
let handle = thread::spawn(move || {
session_clone.stream_response("Write a long essay...", |chunk| {
print!("{}", chunk);
})
});
// Cancel after a delay
thread::sleep(Duration::from_secs(2));
session.cancel_stream();
handle.join().unwrap()?;