pub struct TypedModel<'c, T> { /* private fields */ }
Expand description
Type-safe wrapper for GenerativeModel
guaranteeing response type T
.
This type enforces schema contracts through Rust’s type system while maintaining compatibility with Google’s Generative AI API. Use when:
- You need structured output from the model
- Response schema stability is critical
- You want compile-time validation of response handling
§Example
use google_ai_rs::{Client, GenerativeModel, AsSchema};
#[derive(AsSchema)]
struct Recipe {
name: String,
ingredients: Vec<String>,
}
let client = Client::new(auth).await?;
let model = client.typed_model::<Recipe>("gemini-pro");
Implementations§
Source§impl<'c, T> TypedModel<'c, T>where
T: AsSchema,
impl<'c, T> TypedModel<'c, T>where
T: AsSchema,
Sourcepub fn new(client: &'c Client, name: &str) -> Self
pub fn new(client: &'c Client, name: &str) -> Self
Creates a new typed model with schema validation.
§Arguments
client
: Authenticated API clientname
: Model name (e.g., “gemini-pro”)
Sourcepub async fn generate_typed_content<I>(
&self,
contents: I,
) -> Result<TypedResponse<T>, Error>where
I: TryIntoContents,
T: TryFromCandidates,
pub async fn generate_typed_content<I>(
&self,
contents: I,
) -> Result<TypedResponse<T>, Error>where
I: TryIntoContents,
T: TryFromCandidates,
Generates content with full response metadata.
Returns both parsed content and raw API response.
§Example
let model = TypedModel::<StockAnalysis>::new(&client, "gemini-pro");
let analysis: TypedResponse<StockAnalysis> = model.generate_typed_content((
"Analyze NVDA stock performance",
"Consider PE ratio and recent earnings"
)).await?;
println!("Analysis: {:?}", analysis);
Sourcepub async fn generate_content<I>(&self, contents: I) -> Result<T, Error>where
I: TryIntoContents,
T: TryFromCandidates,
pub async fn generate_content<I>(&self, contents: I) -> Result<T, Error>where
I: TryIntoContents,
T: TryFromCandidates,
Generates content and parses it directly into type T
.
This is the primary method for most users wanting type-safe responses without dealing with raw API structures. For 90% of use cases where you just want structured data from the AI, this is what you need.
§Serde Integration
When the serde
feature is enabled, any type implementing serde::Deserialize
automatically works with this method. Just define your response structure and
let the library handle parsing.
§Example: Simple JSON Response
#[derive(AsSchema, Deserialize)]
struct StoryResponse {
title: String,
length: usize,
tags: Vec<String>,
}
let model = TypedModel::<StoryResponse>::new(&client, "gemini-pro");
let story = model.generate_content("Write a short story about a robot astronaut").await?;
println!("{} ({} words)", story.title, story.length);
§Example: Multi-part Input
#[derive(AsSchema, Deserialize)]
struct Analysis { safety_rating: u8 }
let model = TypedModel::<Analysis>::new(&client, "gemini-pro-vision");
let analysis = model.generate_content((
"Analyze this scene safety:",
Part::blob("image/jpeg", image_data),
"Consider vehicles, pedestrians, and weather"
)).await?;
§Errors
Error::InvalidArgument
if input validation failsError::Service
for model errorsError::Net
for network failures
Methods from Deref<Target = GenerativeModel<'c>>§
Sourcepub fn start_chat(&self) -> Session<'_>
pub fn start_chat(&self) -> Session<'_>
Starts a new chat session with empty history
Sourcepub async fn generate_content<T>(
&self,
contents: T,
) -> Result<GenerateContentResponse, Error>where
T: TryIntoContents,
pub async fn generate_content<T>(
&self,
contents: T,
) -> Result<GenerateContentResponse, Error>where
T: TryIntoContents,
Generates content from flexible input types
§Example
use google_ai_rs::Part;
// Simple text generation
let response = model.generate_content("Hello world!").await?;
// Multi-part content
model.generate_content((
"What's in this image?",
Part::blob("image/jpeg", image_data)
)).await?;
§Errors
Returns Error::InvalidArgument
for empty input or invalid combinations.
Error::Service
for model errors or Error::Net
for transport failures
pub async fn typed_generate_content<I, T>( &self, contents: I, ) -> Result<T, Error>
pub async fn generate_typed_content<I, T>( &self, contents: I, ) -> Result<TypedResponse<T>, Error>
Sourcepub async fn stream_generate_content<T>(
&self,
contents: T,
) -> Result<ResponseStream, Error>where
T: TryIntoContents,
pub async fn stream_generate_content<T>(
&self,
contents: T,
) -> Result<ResponseStream, Error>where
T: TryIntoContents,
Generates a streaming response from flexible input
§Example
let mut stream = model.stream_generate_content("Tell me a story.").await?;
while let Some(chunk) = stream.next().await? {
// Process streaming response
}
§Errors
Returns Error::Service
for model errors or Error::Net
for transport failures
Sourcepub async fn count_tokens<T>(
&self,
contents: T,
) -> Result<CountTokensResponse, Error>where
T: TryIntoContents,
pub async fn count_tokens<T>(
&self,
contents: T,
) -> Result<CountTokensResponse, Error>where
T: TryIntoContents,
Estimates token usage for given content
Useful for cost estimation and validation before full generation
§Arguments
parts
- Content input that can be converted to parts
§Example
let token_count = model.count_tokens(content).await?;
println!("Estimated cost: ${}", token_count.total() * COST_PER_TOKEN);
§Errors
Returns Error::InvalidArgument
for empty input
pub fn full_name(&self) -> &str
Sourcepub async fn info(&self) -> Result<Info, Error>
pub async fn info(&self) -> Result<Info, Error>
info returns information about the model.
Info::Tuned
if the current model is a fine-tuned one,
otherwise Info::Model
.
Sourcepub fn with_cloned_instruction<I: IntoContent>(&self, instruction: I) -> Self
pub fn with_cloned_instruction<I: IntoContent>(&self, instruction: I) -> Self
Creates a copy with new system instructions
Trait Implementations§
Source§impl<'c, T: Debug> Debug for TypedModel<'c, T>
impl<'c, T: Debug> Debug for TypedModel<'c, T>
Source§impl<'c, T> Deref for TypedModel<'c, T>
impl<'c, T> Deref for TypedModel<'c, T>
Source§impl<'c, T> From<GenerativeModel<'c>> for TypedModel<'c, T>where
T: AsSchema,
impl<'c, T> From<GenerativeModel<'c>> for TypedModel<'c, T>where
T: AsSchema,
Source§fn from(value: GenerativeModel<'c>) -> Self
fn from(value: GenerativeModel<'c>) -> Self
Auto Trait Implementations§
impl<'c, T> Freeze for TypedModel<'c, T>
impl<'c, T> !RefUnwindSafe for TypedModel<'c, T>
impl<'c, T> Send for TypedModel<'c, T>where
T: Send,
impl<'c, T> Sync for TypedModel<'c, T>where
T: Sync,
impl<'c, T> Unpin for TypedModel<'c, T>where
T: Unpin,
impl<'c, T> !UnwindSafe for TypedModel<'c, T>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoRequest<T> for T
impl<T> IntoRequest<T> for T
Source§fn into_request(self) -> Request<T>
fn into_request(self) -> Request<T>
T
in a tonic::Request