pub struct Session { /* private fields */ }Expand description
A session that interacts with a language model.
A session maintains state between requests, allowing for multi-turn conversations. You can reuse the same session for multiple prompts or create a new one each time.
§Example
use fm_rs::{Session, SystemLanguageModel, GenerationOptions};
let model = SystemLanguageModel::new()?;
let session = Session::new(&model)?;
let response = session.respond("Hello!", &GenerationOptions::default())?;
println!("{}", response.content());Implementations§
Source§impl Session
impl Session
Sourcepub fn new(model: &SystemLanguageModel) -> Result<Self>
pub fn new(model: &SystemLanguageModel) -> Result<Self>
Creates a new session with the given model.
Sourcepub fn with_instructions(
model: &SystemLanguageModel,
instructions: &str,
) -> Result<Self>
pub fn with_instructions( model: &SystemLanguageModel, instructions: &str, ) -> Result<Self>
Creates a new session with instructions.
Instructions define the model’s behavior and role.
Sourcepub fn with_tools(
model: &SystemLanguageModel,
tools: &[Arc<dyn Tool>],
) -> Result<Self>
pub fn with_tools( model: &SystemLanguageModel, tools: &[Arc<dyn Tool>], ) -> Result<Self>
Creates a new session with tools.
Tools allow the model to call external functions during generation.
Sourcepub fn with_instructions_and_tools(
model: &SystemLanguageModel,
instructions: &str,
tools: &[Arc<dyn Tool>],
) -> Result<Self>
pub fn with_instructions_and_tools( model: &SystemLanguageModel, instructions: &str, tools: &[Arc<dyn Tool>], ) -> Result<Self>
Creates a new session with both instructions and tools.
Sourcepub fn from_transcript(
model: &SystemLanguageModel,
transcript_json: &str,
) -> Result<Self>
pub fn from_transcript( model: &SystemLanguageModel, transcript_json: &str, ) -> Result<Self>
Creates a session from a transcript JSON string.
This allows restoring a previous conversation.
Note: Restored sessions do not have tools - use with_tools for new sessions.
Sourcepub fn respond(
&self,
prompt: &str,
options: &GenerationOptions,
) -> Result<Response>
pub fn respond( &self, prompt: &str, options: &GenerationOptions, ) -> Result<Response>
Sends a prompt and waits for the complete response.
This method blocks until the model finishes generating.
Sourcepub fn respond_with_timeout(
&self,
prompt: &str,
options: &GenerationOptions,
timeout: Duration,
) -> Result<Response>
pub fn respond_with_timeout( &self, prompt: &str, options: &GenerationOptions, timeout: Duration, ) -> Result<Response>
Sends a prompt and waits for the complete response, with a timeout.
If timeout is zero, this behaves like respond.
Sourcepub fn stream_response<F>(
&self,
prompt: &str,
options: &GenerationOptions,
on_chunk: F,
) -> Result<()>
pub fn stream_response<F>( &self, prompt: &str, options: &GenerationOptions, on_chunk: F, ) -> Result<()>
Sends a prompt and streams the response.
The on_chunk callback is called for each text chunk as it arrives.
This method blocks until streaming is complete.
§Example
use fm_rs::{Session, SystemLanguageModel, GenerationOptions};
let model = SystemLanguageModel::new()?;
let session = Session::new(&model)?;
session.stream_response("Tell me a story", &GenerationOptions::default(), |chunk| {
print!("{}", chunk);
})?;Sourcepub fn is_responding(&self) -> bool
pub fn is_responding(&self) -> bool
Checks if the session is currently generating a response.
Sourcepub fn transcript_json(&self) -> Result<String>
pub fn transcript_json(&self) -> Result<String>
Gets the session transcript as a JSON string.
This can be used to persist and restore conversations.
Sourcepub fn context_usage(&self, limit: &ContextLimit) -> Result<ContextUsage>
pub fn context_usage(&self, limit: &ContextLimit) -> Result<ContextUsage>
Estimates current context usage based on the session transcript.
Sourcepub fn ensure_context_within(&self, limit: &ContextLimit) -> Result<()>
pub fn ensure_context_within(&self, limit: &ContextLimit) -> Result<()>
Returns an error if the estimated context usage exceeds the configured limit.
Sourcepub fn prewarm(&self, prompt_prefix: Option<&str>) -> Result<()>
pub fn prewarm(&self, prompt_prefix: Option<&str>) -> Result<()>
Prewarms the model with an optional prompt prefix.
This can reduce latency for the first response.
Sourcepub fn respond_json(
&self,
prompt: &str,
schema: &Value,
options: &GenerationOptions,
) -> Result<String>
pub fn respond_json( &self, prompt: &str, schema: &Value, options: &GenerationOptions, ) -> Result<String>
Sends a prompt and returns a structured JSON response.
The schema is a JSON Schema that describes the expected output format. The model is instructed to produce JSON that matches the schema.
§Example
use fm_rs::{Session, SystemLanguageModel, GenerationOptions};
use serde::Deserialize;
use serde_json::json;
#[derive(Deserialize)]
struct Person {
name: String,
age: u32,
}
let model = SystemLanguageModel::new()?;
let session = Session::new(&model)?;
let schema = json!({
"type": "object",
"properties": {
"name": { "type": "string" },
"age": { "type": "integer" }
},
"required": ["name", "age"]
});
let json_str = session.respond_json(
"Generate a fictional person",
&schema,
&GenerationOptions::default()
)?;
let person: Person = serde_json::from_str(&json_str)?;Sourcepub fn respond_structured<T: DeserializeOwned>(
&self,
prompt: &str,
schema: &Value,
options: &GenerationOptions,
) -> Result<T>
pub fn respond_structured<T: DeserializeOwned>( &self, prompt: &str, schema: &Value, options: &GenerationOptions, ) -> Result<T>
Sends a prompt and returns a deserialized structured response.
This is a convenience method that calls respond_json and deserializes
the result into the specified type.
§Example
use fm_rs::{Session, SystemLanguageModel, GenerationOptions};
use serde::Deserialize;
use serde_json::json;
#[derive(Deserialize)]
struct Person {
name: String,
age: u32,
}
let model = SystemLanguageModel::new()?;
let session = Session::new(&model)?;
let schema = json!({
"type": "object",
"properties": {
"name": { "type": "string" },
"age": { "type": "integer" }
},
"required": ["name", "age"]
});
let person: Person = session.respond_structured(
"Generate a fictional person",
&schema,
&GenerationOptions::default()
)?;Sourcepub fn respond_structured_gen<T>(
&self,
prompt: &str,
options: &GenerationOptions,
) -> Result<T>where
T: Generable + DeserializeOwned,
pub fn respond_structured_gen<T>(
&self,
prompt: &str,
options: &GenerationOptions,
) -> Result<T>where
T: Generable + DeserializeOwned,
Sends a prompt and returns a deserialized structured response using a derived schema.
This uses the crate::Generable implementation to obtain the JSON schema.
Sourcepub fn stream_json<F>(
&self,
prompt: &str,
schema: &Value,
options: &GenerationOptions,
on_chunk: F,
) -> Result<()>
pub fn stream_json<F>( &self, prompt: &str, schema: &Value, options: &GenerationOptions, on_chunk: F, ) -> Result<()>
Streams a structured JSON response.
The on_chunk callback receives partial JSON as it’s generated.
Note that partial chunks may not be valid JSON until streaming completes.
§Example
use fm_rs::{Session, SystemLanguageModel, GenerationOptions};
use serde_json::json;
let model = SystemLanguageModel::new()?;
let session = Session::new(&model)?;
let schema = json!({
"type": "object",
"properties": {
"items": { "type": "array", "items": { "type": "string" } }
}
});
session.stream_json(
"List 5 programming languages",
&schema,
&GenerationOptions::default(),
|chunk| {
print!("{chunk}");
}
)?;