pub struct GenerativeModel<'c> {
pub system_instruction: Option<Content>,
pub tools: Option<Vec<Tool>>,
pub tool_config: Option<ToolConfig>,
pub safety_settings: Option<Vec<SafetySetting>>,
pub generation_config: Option<GenerationConfig>,
pub cached_content: Option<String>,
/* private fields */
}
Expand description
Configured interface for a specific generative AI model
§Example
use google_ai_rs::{Client, GenerativeModel};
let client = Client::new(auth).await?;
let model = client.generative_model("gemini-pro")
.with_system_instruction("You are a helpful assistant")
.with_response_format("application/json");
Fields§
§system_instruction: Option<Content>
System prompt guiding model behavior
tools: Option<Vec<Tool>>
Available functions/tools the model can use
tool_config: Option<ToolConfig>
Configuration for tool usage
safety_settings: Option<Vec<SafetySetting>>
Content safety filters
generation_config: Option<GenerationConfig>
Generation parameters (temperature, top-k, etc.)
cached_content: Option<String>
Fullname of the cached content to use as context (e.g., “cachedContents/NAME”)
Implementations§
Source§impl GenerativeModel<'_>
impl GenerativeModel<'_>
Sourcepub fn start_chat(&self) -> Session<'_>
pub fn start_chat(&self) -> Session<'_>
Starts a new chat session with empty history
Source§impl<'c> GenerativeModel<'c>
impl<'c> GenerativeModel<'c>
Sourcepub fn new(client: &'c Client, name: &str) -> Self
pub fn new(client: &'c Client, name: &str) -> Self
Creates a new model interface with default configuration
§Arguments
client
- Authenticated API clientname
- Model identifier (e.g., “gemini-pro”)
To access a tuned model named NAME, pass “tunedModels/NAME”.
Sourcepub async fn generate_content<T>(
&self,
contents: T,
) -> Result<GenerateContentResponse, Error>where
T: IntoContents,
pub async fn generate_content<T>(
&self,
contents: T,
) -> Result<GenerateContentResponse, Error>where
T: IntoContents,
Generates content from flexible input types
§Example
use google_ai_rs::Part;
// Simple text generation
let response = model.generate_content("Hello world!").await?;
// Multi-part content
model.generate_content((
"What's in this image?",
Part::blob("image/jpeg", image_data)
)).await?;
§Errors
Returns Error::InvalidArgument
for empty input or invalid combinations.
Error::Service
for model errors or Error::Net
for transport failures
Sourcepub async fn stream_generate_content<T>(
&self,
contents: T,
) -> Result<ResponseStream, Error>where
T: IntoContents,
pub async fn stream_generate_content<T>(
&self,
contents: T,
) -> Result<ResponseStream, Error>where
T: IntoContents,
Generates a streaming response from flexible input
§Example
let mut stream = model.stream_generate_content("Tell me a story.").await?;
while let Some(chunk) = stream.next().await? {
// Process streaming response
}
§Errors
Returns Error::Service
for model errors or Error::Net
for transport failures
Sourcepub async fn count_tokens<T>(
&self,
contents: T,
) -> Result<CountTokensResponse, Error>where
T: IntoContents,
pub async fn count_tokens<T>(
&self,
contents: T,
) -> Result<CountTokensResponse, Error>where
T: IntoContents,
Estimates token usage for given content
Useful for cost estimation and validation before full generation
§Arguments
parts
- Content input that can be converted to parts
§Example
let token_count = model.count_tokens(content).await?;
println!("Estimated cost: ${}", token_count.total() * COST_PER_TOKEN);
§Errors
Returns Error::InvalidArgument
for empty input
pub fn change_model(&mut self, to: &str)
pub fn full_name(&self) -> &str
Sourcepub async fn info(&self) -> Result<Info, Error>
pub async fn info(&self) -> Result<Info, Error>
info returns information about the model.
Info::Tuned
if the current model is a fine-tuned one,
otherwise Info::Model
.
Sourcepub fn with_system_instruction(self, instruction: &str) -> Self
pub fn with_system_instruction(self, instruction: &str) -> Self
Sets system-level behavior instructions
Sourcepub fn with_cached_content(self, c: &CachedContent) -> Result<Self, Error>
pub fn with_cached_content(self, c: &CachedContent) -> Result<Self, Error>
Sets cached content for persisted context
§Example
use google_ai_rs::content::IntoContents as _;
let content = "You are a helpful assistant".into_cached_content_for("gemini-1.0-pro");
let cached_content = client.create_cached_content(content).await?;
let model = client.generative_model("gemini-pro")
.with_cached_content(&cached_content);
Sourcepub fn with_response_format(self, mime_type: &str) -> Self
pub fn with_response_format(self, mime_type: &str) -> Self
Specifies expected response format (e.g., “application/json”)
Sourcepub fn as_response_schema<T: AsSchema>(self) -> Self
pub fn as_response_schema<T: AsSchema>(self) -> Self
Set response schema with explicit Schema object using types implementing AsSchema
Similar to with_response_schema
.
§Example
use google_ai_rs::AsSchema;
#[derive(Debug, AsSchema)]
#[schema(description = "A primary colour")]
struct PrimaryColor {
#[schema(description = "The name of the colour")]
name: String,
#[schema(description = "The RGB value of the color, in hex", rename = "RGB")]
rgb: String
}
let model = client.generative_model("gemini-pro")
.as_response_schema::<Vec<PrimaryColor>>();
Sourcepub fn with_response_schema(self, schema: Schema) -> Self
pub fn with_response_schema(self, schema: Schema) -> Self
Set response schema with explicit Schema object
Use when you need full control over schema details. Automatically sets response format to JSON if not specified.
§Example
use google_ai_rs::Schema;
use google_ai_rs::SchemaType;
let model = client.generative_model("gemini-pro")
.with_response_schema(Schema {
r#type: SchemaType::String as i32,
format: "enum".into(),
..Default::default()
});
Sourcepub fn with_cloned_instruction(&self, instruction: &str) -> Self
pub fn with_cloned_instruction(&self, instruction: &str) -> Self
Creates a copy with new system instructions
pub fn set_candidate_count(&mut self, x: i32)
pub fn set_max_output_tokens(&mut self, x: i32)
pub fn set_temperature(&mut self, x: f32)
pub fn set_top_p(&mut self, x: f32)
pub fn set_top_k(&mut self, x: i32)
Trait Implementations§
Source§impl<'c> Clone for GenerativeModel<'c>
impl<'c> Clone for GenerativeModel<'c>
Source§fn clone(&self) -> GenerativeModel<'c>
fn clone(&self) -> GenerativeModel<'c>
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source
. Read moreAuto Trait Implementations§
impl<'c> Freeze for GenerativeModel<'c>
impl<'c> !RefUnwindSafe for GenerativeModel<'c>
impl<'c> Send for GenerativeModel<'c>
impl<'c> Sync for GenerativeModel<'c>
impl<'c> Unpin for GenerativeModel<'c>
impl<'c> !UnwindSafe for GenerativeModel<'c>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoRequest<T> for T
impl<T> IntoRequest<T> for T
Source§fn into_request(self) -> Request<T>
fn into_request(self) -> Request<T>
T
in a tonic::Request