pub struct ContentBuilder {
pub contents: Vec<Content>,
/* private fields */
}
Expand description
Builder for content generation requests
Fields§
§contents: Vec<Content>
Implementations§
Source§impl ContentBuilder
impl ContentBuilder
Sourcepub fn with_system_prompt(self, text: impl Into<String>) -> Self
pub fn with_system_prompt(self, text: impl Into<String>) -> Self
Add a system prompt to the request
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
More examples
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 // Using the full generation config
13 println!("--- Using full generation config ---");
14 let response1 = client
15 .generate_content()
16 .with_system_prompt("You are a helpful assistant.")
17 .with_user_message("Write a short poem about Rust programming language.")
18 .with_generation_config(GenerationConfig {
19 temperature: Some(0.9),
20 top_p: Some(0.8),
21 top_k: Some(20),
22 max_output_tokens: Some(200),
23 candidate_count: Some(1),
24 stop_sequences: Some(vec!["END".to_string()]),
25 response_mime_type: None,
26 response_schema: None,
27 thinking_config: None,
28 ..Default::default()
29 })
30 .execute()
31 .await?;
32
33 println!(
34 "Response with high temperature (0.9):\n{}\n",
35 response1.text()
36 );
37
38 // Using individual generation parameters
39 println!("--- Using individual generation parameters ---");
40 let response2 = client
41 .generate_content()
42 .with_system_prompt("You are a helpful assistant.")
43 .with_user_message("Write a short poem about Rust programming language.")
44 .with_temperature(0.2)
45 .with_max_output_tokens(100)
46 .execute()
47 .await?;
48
49 println!(
50 "Response with low temperature (0.2):\n{}\n",
51 response2.text()
52 );
53
54 // Setting multiple parameters individually
55 println!("--- Setting multiple parameters individually ---");
56 let response3 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful assistant.")
59 .with_user_message("List 3 benefits of using Rust.")
60 .with_temperature(0.7)
61 .with_top_p(0.9)
62 .with_max_output_tokens(150)
63 .with_stop_sequences(vec!["4.".to_string()])
64 .execute()
65 .await?;
66
67 println!(
68 "Response with custom parameters and stop sequence:\n{}",
69 response3.text()
70 );
71
72 Ok(())
73}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Get API key from environment variable
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create client
11 let client = Gemini::new(api_key);
12
13 // Using response_schema for structured output
14 println!("--- Structured Response Example ---");
15
16 // Define a JSON schema for the response
17 let schema = json!({
18 "type": "object",
19 "properties": {
20 "name": {
21 "type": "string",
22 "description": "Name of the programming language"
23 },
24 "year_created": {
25 "type": "integer",
26 "description": "Year the programming language was created"
27 },
28 "creator": {
29 "type": "string",
30 "description": "Person or organization who created the language"
31 },
32 "key_features": {
33 "type": "array",
34 "items": {
35 "type": "string"
36 },
37 "description": "Key features of the programming language"
38 },
39 "popularity_score": {
40 "type": "integer",
41 "description": "Subjective popularity score from 1-10"
42 }
43 },
44 "required": ["name", "year_created", "creator", "key_features", "popularity_score"]
45 });
46
47 let response = client
48 .generate_content()
49 .with_system_prompt("You provide information about programming languages in JSON format.")
50 .with_user_message("Tell me about the Rust programming language.")
51 .with_response_mime_type("application/json")
52 .with_response_schema(schema)
53 .execute()
54 .await?;
55
56 println!("Structured JSON Response:");
57 println!("{}", response.text());
58
59 // Parse the JSON response
60 let json_response: serde_json::Value = serde_json::from_str(&response.text())?;
61
62 println!("\nAccessing specific fields:");
63 println!("Language: {}", json_response["name"]);
64 println!("Created in: {}", json_response["year_created"]);
65 println!("Created by: {}", json_response["creator"]);
66 println!("Popularity: {}/10", json_response["popularity_score"]);
67
68 println!("\nKey Features:");
69 if let Some(features) = json_response["key_features"].as_array() {
70 for (i, feature) in features.iter().enumerate() {
71 println!("{}. {}", i + 1, feature);
72 }
73 }
74
75 Ok(())
76}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Get API key from environment variable
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create client
11 let client = Gemini::new(api_key);
12
13 // Simple streaming generation
14 println!("--- Streaming generation ---");
15
16 let mut stream = client
17 .generate_content()
18 .with_system_prompt("You are a helpful, creative assistant.")
19 .with_user_message("Write a short story about a robot who learns to feel emotions.")
20 .execute_stream()
21 .await?;
22
23 print!("Streaming response: ");
24 while let Some(chunk_result) = stream.next().await {
25 match chunk_result {
26 Ok(chunk) => {
27 print!("{}", chunk.text());
28 std::io::Write::flush(&mut std::io::stdout())?;
29 }
30 Err(e) => eprintln!("Error in stream: {}", e),
31 }
32 }
33 println!("\n");
34
35 // Multi-turn conversation
36 println!("--- Multi-turn conversation ---");
37
38 // First turn
39 let response1 = client
40 .generate_content()
41 .with_system_prompt("You are a helpful travel assistant.")
42 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
43 .execute()
44 .await?;
45
46 println!("User: I'm planning a trip to Japan. What are the best times to visit?");
47 println!("Assistant: {}\n", response1.text());
48
49 // Second turn (continuing the conversation)
50 let response2 = client
51 .generate_content()
52 .with_system_prompt("You are a helpful travel assistant.")
53 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
54 .with_model_message(response1.text())
55 .with_user_message("What about cherry blossom season? When exactly does that happen?")
56 .execute()
57 .await?;
58
59 println!("User: What about cherry blossom season? When exactly does that happen?");
60 println!("Assistant: {}\n", response2.text());
61
62 // Third turn (continuing the conversation)
63 let response3 = client
64 .generate_content()
65 .with_system_prompt("You are a helpful travel assistant.")
66 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
67 .with_model_message(response1.text())
68 .with_user_message("What about cherry blossom season? When exactly does that happen?")
69 .with_model_message(response2.text())
70 .with_user_message("What are some must-visit places in Tokyo?")
71 .execute()
72 .await?;
73
74 println!("User: What are some must-visit places in Tokyo?");
75 println!("Assistant: {}", response3.text());
76
77 Ok(())
78}
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Simple generation
16 println!("--- Simple generation ---");
17 let response = client
18 .generate_content()
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(1000),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 // Function calling example
31 println!("\n--- Function calling example ---");
32
33 // Define a weather function
34 let get_weather = FunctionDeclaration::new(
35 "get_weather",
36 "Get the current weather for a location",
37 FunctionParameters::object()
38 .with_property(
39 "location",
40 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
41 true,
42 )
43 .with_property(
44 "unit",
45 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
46 false,
47 ),
48 );
49
50 // Create a request with function calling
51 let response = client
52 .generate_content()
53 .with_system_prompt("You are a helpful weather assistant.")
54 .with_user_message("What's the weather like in San Francisco right now?")
55 .with_function(get_weather)
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Get parameters from the function call
68 let location: String = function_call.get("location")?;
69 let unit = function_call
70 .get::<String>("unit")
71 .unwrap_or_else(|_| String::from("celsius"));
72
73 println!("Location: {}, Unit: {}", location, unit);
74
75 // Create model content with function call
76 let model_content = Content::function_call((*function_call).clone());
77
78 // Add as model message
79 let model_message = Message {
80 content: model_content,
81 role: Role::Model,
82 };
83
84 // Simulate function execution
85 let weather_response = format!(
86 "{{\"temperature\": 22, \"unit\": \"{}\", \"condition\": \"sunny\"}}",
87 unit
88 );
89
90 // Continue the conversation with the function result
91 let final_response = client
92 .generate_content()
93 .with_system_prompt("You are a helpful weather assistant.")
94 .with_user_message("What's the weather like in San Francisco right now?")
95 .with_message(model_message)
96 .with_function_response_str("get_weather", weather_response)?
97 .with_generation_config(GenerationConfig {
98 temperature: Some(0.7),
99 max_output_tokens: Some(100),
100 ..Default::default()
101 })
102 .execute()
103 .await?;
104
105 println!("Final response: {}", final_response.text());
106 } else {
107 println!("No function calls in the response.");
108 }
109
110 Ok(())
111}
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
11
12 println!("=== Gemini 2.5 Thinking Basic Example ===\n");
13
14 // Example 1: Using default dynamic thinking
15 println!(
16 "--- Example 1: Dynamic thinking (model automatically determines thinking budget) ---"
17 );
18 let response1 = client
19 .generate_content()
20 .with_system_prompt("You are a helpful mathematics assistant.")
21 .with_user_message(
22 "Explain Occam's razor principle and provide a simple example from daily life.",
23 )
24 .with_dynamic_thinking()
25 .with_thoughts_included(true)
26 .execute()
27 .await?;
28
29 // Display thinking process
30 let thoughts = response1.thoughts();
31 if !thoughts.is_empty() {
32 println!("Thinking summary:");
33 for (i, thought) in thoughts.iter().enumerate() {
34 println!("Thought {}: {}\n", i + 1, thought);
35 }
36 }
37
38 println!("Answer: {}\n", response1.text());
39
40 // Display token usage
41 if let Some(usage) = &response1.usage_metadata {
42 println!("Token usage:");
43 println!(" Prompt tokens: {}", usage.prompt_token_count);
44 println!(
45 " Response tokens: {}",
46 usage.candidates_token_count.unwrap_or(0)
47 );
48 if let Some(thinking_tokens) = usage.thoughts_token_count {
49 println!(" Thinking tokens: {}", thinking_tokens);
50 }
51 println!(" Total tokens: {}\n", usage.total_token_count);
52 }
53
54 // Example 2: Set specific thinking budget
55 println!("--- Example 2: Set thinking budget (1024 tokens) ---");
56 let response2 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful programming assistant.")
59 .with_user_message("List 3 main advantages of using the Rust programming language")
60 .with_thinking_budget(1024)
61 .with_thoughts_included(true)
62 .execute()
63 .await?;
64
65 // Display thinking process
66 let thoughts2 = response2.thoughts();
67 if !thoughts2.is_empty() {
68 println!("Thinking summary:");
69 for (i, thought) in thoughts2.iter().enumerate() {
70 println!("Thought {}: {}\n", i + 1, thought);
71 }
72 }
73
74 println!("Answer: {}\n", response2.text());
75
76 // Example 3: Disable thinking feature
77 println!("--- Example 3: Disable thinking feature ---");
78 let response3 = client
79 .generate_content()
80 .with_system_prompt("You are a helpful assistant.")
81 .with_user_message("What is artificial intelligence?")
82 .execute()
83 .await?;
84
85 println!("Answer: {}\n", response3.text());
86
87 // Example 4: Use GenerationConfig to set thinking
88 println!("--- Example 4: Use GenerationConfig to set thinking ---");
89 let thinking_config = ThinkingConfig::new()
90 .with_thinking_budget(2048)
91 .with_thoughts_included(true);
92
93 let generation_config = GenerationConfig {
94 temperature: Some(0.7),
95 max_output_tokens: Some(500),
96 thinking_config: Some(thinking_config),
97 ..Default::default()
98 };
99
100 let response4 = client
101 .generate_content()
102 .with_system_prompt("You are a creative writing assistant.")
103 .with_user_message(
104 "Write the opening of a short story about a robot learning to feel emotions.",
105 )
106 .with_generation_config(generation_config)
107 .execute()
108 .await?;
109
110 // Display thinking process
111 let thoughts4 = response4.thoughts();
112 if !thoughts4.is_empty() {
113 println!("Thinking summary:");
114 for (i, thought) in thoughts4.iter().enumerate() {
115 println!("Thought {}: {}\n", i + 1, thought);
116 }
117 }
118
119 println!("Answer: {}\n", response4.text());
120
121 Ok(())
122}
Sourcepub fn with_system_instruction(self, text: impl Into<String>) -> Self
pub fn with_system_instruction(self, text: impl Into<String>) -> Self
Set the system instruction directly (matching the API format in the curl example)
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Replace with your actual API key
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create a Gemini client
11 let gemini = Gemini::pro(api_key);
12
13 // This example matches the exact curl request format:
14 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
15 // -H 'Content-Type: application/json' \
16 // -d '{
17 // "system_instruction": {
18 // "parts": [
19 // {
20 // "text": "You are a cat. Your name is Neko."
21 // }
22 // ]
23 // },
24 // "contents": [
25 // {
26 // "parts": [
27 // {
28 // "text": "Hello there"
29 // }
30 // ]
31 // }
32 // ]
33 // }'
34 let response = gemini
35 .generate_content()
36 .with_system_instruction("You are a cat. Your name is Neko.")
37 .with_user_message("Hello there")
38 .execute()
39 .await?;
40
41 // Print the response
42 println!("Response: {}", response.text());
43
44 Ok(())
45}
Sourcepub fn with_user_message(self, text: impl Into<String>) -> Self
pub fn with_user_message(self, text: impl Into<String>) -> Self
Add a user message to the request
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY")?;
7
8 // Create client with the default model (gemini-2.0-flash)
9 let client = Gemini::new(api_key);
10
11 println!("Sending request to Gemini API...");
12
13 // Simple text completion with minimal content
14 let response = client
15 .generate_content()
16 .with_user_message("Say hello")
17 .execute()
18 .await?;
19
20 println!("Response: {}", response.text());
21
22 Ok(())
23}
More examples
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 println!("--- Google Search tool example ---");
13
14 // Create a Google Search tool
15 let google_search_tool = Tool::google_search();
16
17 // Create a request with Google Search tool
18 let response = client
19 .generate_content()
20 .with_user_message("What is the current Google stock price?")
21 .with_tool(google_search_tool)
22 .execute()
23 .await?;
24
25 println!("Response: {}", response.text());
26
27 Ok(())
28}
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Replace with your actual API key
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create a Gemini client
11 let gemini = Gemini::pro(api_key);
12
13 // This example matches the exact curl request format:
14 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
15 // -H 'Content-Type: application/json' \
16 // -d '{
17 // "system_instruction": {
18 // "parts": [
19 // {
20 // "text": "You are a cat. Your name is Neko."
21 // }
22 // ]
23 // },
24 // "contents": [
25 // {
26 // "parts": [
27 // {
28 // "text": "Hello there"
29 // }
30 // ]
31 // }
32 // ]
33 // }'
34 let response = gemini
35 .generate_content()
36 .with_system_instruction("You are a cat. Your name is Neko.")
37 .with_user_message("Hello there")
38 .execute()
39 .await?;
40
41 // Print the response
42 println!("Response: {}", response.text());
43
44 Ok(())
45}
11async fn main() -> Result<(), Box<dyn std::error::Error>> {
12 // Read mp4 video file
13 let mut file = File::open("examples/sample.mp4")?;
14 let mut buffer = Vec::new();
15 file.read_to_end(&mut buffer)?;
16 let b64 = general_purpose::STANDARD.encode(&buffer);
17
18 // Get API key
19 let api_key = env::var("GEMINI_API_KEY")?;
20 let gemini = Gemini::pro(api_key);
21
22 // Example 1: Add mp4 blob using Message struct
23 let video_content = Content::inline_data("video/mp4", b64.clone());
24 let response1 = gemini
25 .generate_content()
26 .with_user_message("Please describe the content of this video (Message example)")
27 .with_message(gemini_rust::Message {
28 content: video_content,
29 role: gemini_rust::Role::User,
30 })
31 .execute()
32 .await?;
33
34 println!("AI description (Message): {}", response1.text());
35
36 // Example 2: Add mp4 blob directly using builder's with_inline_data
37 let response2 = gemini
38 .generate_content()
39 .with_user_message("Please describe the content of this video (with_inline_data example)")
40 .with_inline_data(b64, "video/mp4")
41 .execute()
42 .await?;
43
44 println!("AI description (with_inline_data): {}", response2.text());
45 Ok(())
46}
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // This is equivalent to the curl example:
10 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$YOUR_API_KEY" \
11 // -H 'Content-Type: application/json' \
12 // -X POST \
13 // -d '{
14 // "contents": [
15 // {
16 // "parts": [
17 // {
18 // "text": "Explain how AI works in a few words"
19 // }
20 // ]
21 // }
22 // ]
23 // }'
24
25 // Create client - now using gemini-2.0-flash by default
26 let client = Gemini::new(api_key);
27
28 // Method 1: Using the high-level API (simplest approach)
29 println!("--- Method 1: Using the high-level API ---");
30
31 let response = client
32 .generate_content()
33 .with_user_message("Explain how AI works in a few words")
34 .execute()
35 .await?;
36
37 println!("Response: {}", response.text());
38
39 // Method 2: Using Content directly to match the curl example exactly
40 println!("\n--- Method 2: Matching curl example structure exactly ---");
41
42 // Create a content part that matches the JSON in the curl example
43 let text_part = Part::Text {
44 text: "Explain how AI works in a few words".to_string(),
45 thought: None,
46 };
47
48 let content = Content {
49 parts: Some(vec![text_part]),
50 role: None,
51 };
52
53 // Add the content directly to the request
54 // This exactly mirrors the JSON structure in the curl example
55 let mut content_builder = client.generate_content();
56 content_builder.contents.push(content);
57 let response = content_builder.execute().await?;
58
59 println!("Response: {}", response.text());
60
61 Ok(())
62}
- examples/generation_config.rs
- examples/structured_response.rs
- examples/streaming.rs
- examples/simple_image_generation.rs
- examples/advanced.rs
- examples/simple.rs
- examples/thinking_basic.rs
- examples/image_editing.rs
- examples/google_search_with_functions.rs
- examples/simple_speech_generation.rs
- examples/thinking_curl_equivalent.rs
- examples/image_generation.rs
- examples/thinking_advanced.rs
- examples/tools.rs
- examples/multi_speaker_tts.rs
Sourcepub fn with_model_message(self, text: impl Into<String>) -> Self
pub fn with_model_message(self, text: impl Into<String>) -> Self
Add a model message to the request
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Get API key from environment variable
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create client
11 let client = Gemini::new(api_key);
12
13 // Simple streaming generation
14 println!("--- Streaming generation ---");
15
16 let mut stream = client
17 .generate_content()
18 .with_system_prompt("You are a helpful, creative assistant.")
19 .with_user_message("Write a short story about a robot who learns to feel emotions.")
20 .execute_stream()
21 .await?;
22
23 print!("Streaming response: ");
24 while let Some(chunk_result) = stream.next().await {
25 match chunk_result {
26 Ok(chunk) => {
27 print!("{}", chunk.text());
28 std::io::Write::flush(&mut std::io::stdout())?;
29 }
30 Err(e) => eprintln!("Error in stream: {}", e),
31 }
32 }
33 println!("\n");
34
35 // Multi-turn conversation
36 println!("--- Multi-turn conversation ---");
37
38 // First turn
39 let response1 = client
40 .generate_content()
41 .with_system_prompt("You are a helpful travel assistant.")
42 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
43 .execute()
44 .await?;
45
46 println!("User: I'm planning a trip to Japan. What are the best times to visit?");
47 println!("Assistant: {}\n", response1.text());
48
49 // Second turn (continuing the conversation)
50 let response2 = client
51 .generate_content()
52 .with_system_prompt("You are a helpful travel assistant.")
53 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
54 .with_model_message(response1.text())
55 .with_user_message("What about cherry blossom season? When exactly does that happen?")
56 .execute()
57 .await?;
58
59 println!("User: What about cherry blossom season? When exactly does that happen?");
60 println!("Assistant: {}\n", response2.text());
61
62 // Third turn (continuing the conversation)
63 let response3 = client
64 .generate_content()
65 .with_system_prompt("You are a helpful travel assistant.")
66 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
67 .with_model_message(response1.text())
68 .with_user_message("What about cherry blossom season? When exactly does that happen?")
69 .with_model_message(response2.text())
70 .with_user_message("What are some must-visit places in Tokyo?")
71 .execute()
72 .await?;
73
74 println!("User: What are some must-visit places in Tokyo?");
75 println!("Assistant: {}", response3.text());
76
77 Ok(())
78}
Sourcepub fn with_inline_data(
self,
data: impl Into<String>,
mime_type: impl Into<String>,
) -> Self
pub fn with_inline_data( self, data: impl Into<String>, mime_type: impl Into<String>, ) -> Self
Add a inline data (blob data) to the request
Examples found in repository?
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
More examples
11async fn main() -> Result<(), Box<dyn std::error::Error>> {
12 // Read mp4 video file
13 let mut file = File::open("examples/sample.mp4")?;
14 let mut buffer = Vec::new();
15 file.read_to_end(&mut buffer)?;
16 let b64 = general_purpose::STANDARD.encode(&buffer);
17
18 // Get API key
19 let api_key = env::var("GEMINI_API_KEY")?;
20 let gemini = Gemini::pro(api_key);
21
22 // Example 1: Add mp4 blob using Message struct
23 let video_content = Content::inline_data("video/mp4", b64.clone());
24 let response1 = gemini
25 .generate_content()
26 .with_user_message("Please describe the content of this video (Message example)")
27 .with_message(gemini_rust::Message {
28 content: video_content,
29 role: gemini_rust::Role::User,
30 })
31 .execute()
32 .await?;
33
34 println!("AI description (Message): {}", response1.text());
35
36 // Example 2: Add mp4 blob directly using builder's with_inline_data
37 let response2 = gemini
38 .generate_content()
39 .with_user_message("Please describe the content of this video (with_inline_data example)")
40 .with_inline_data(b64, "video/mp4")
41 .execute()
42 .await?;
43
44 println!("AI description (with_inline_data): {}", response2.text());
45 Ok(())
46}
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client with the image generation model
14 let client = Gemini::with_model(api_key, "models/gemini-2.5-flash-image-preview".to_string());
15
16 println!("🎨 Image Editing with Gemini");
17 println!("This example shows how to edit images using text descriptions.");
18 println!();
19
20 // First, let's generate a base image to edit
21 println!("📸 Step 1: Generating a base image...");
22 let base_response = client
23 .generate_content()
24 .with_user_message(
25 "Create a simple landscape image with a blue sky, green grass, \
26 and a single white house in the center. The style should be \
27 clean and minimalist.",
28 )
29 .execute()
30 .await?;
31
32 // Save the base image
33 let mut base_image_data = None;
34 for candidate in base_response.candidates.iter() {
35 if let Some(parts) = &candidate.content.parts {
36 for part in parts.iter() {
37 if let gemini_rust::Part::InlineData { inline_data } = part {
38 base_image_data = Some(inline_data.data.clone());
39 let image_bytes = BASE64.decode(&inline_data.data)?;
40 fs::write("base_landscape.png", image_bytes)?;
41 println!("✅ Base image saved as: base_landscape.png");
42 break;
43 }
44 }
45 }
46 }
47
48 let base_data = match base_image_data {
49 Some(data) => data,
50 None => {
51 println!("❌ Failed to generate base image");
52 return Ok(());
53 }
54 };
55
56 println!();
57 println!("🖌️ Step 2: Editing the image...");
58
59 // Example 1: Add elements to the image
60 println!(" Adding a red barn to the scene...");
61 let edit_response1 = client
62 .generate_content()
63 .with_user_message(
64 "Add a red barn to the left side of this landscape image. \
65 The barn should fit naturally into the scene and match \
66 the minimalist style. Keep everything else exactly the same.",
67 )
68 .with_inline_data(&base_data, "image/png")
69 .execute()
70 .await?;
71
72 save_generated_images(&edit_response1, "landscape_with_barn")?;
73
74 // Example 2: Change the weather/atmosphere
75 println!(" Changing the scene to sunset...");
76 let edit_response2 = client
77 .generate_content()
78 .with_user_message(
79 "Transform this landscape into a beautiful sunset scene. \
80 Change the sky to warm orange and pink colors, add a \
81 setting sun, and adjust the lighting to match golden hour. \
82 Keep the house and grass but make them glow with sunset light.",
83 )
84 .with_inline_data(&base_data, "image/png")
85 .execute()
86 .await?;
87
88 save_generated_images(&edit_response2, "sunset_landscape")?;
89
90 // Example 3: Style transfer
91 println!(" Converting to watercolor style...");
92 let edit_response3 = client
93 .generate_content()
94 .with_user_message(
95 "Transform this landscape into a watercolor painting style. \
96 Preserve the composition but render it with soft, flowing \
97 watercolor brushstrokes, gentle color bleeding, and the \
98 characteristic transparency of watercolor art.",
99 )
100 .with_inline_data(&base_data, "image/png")
101 .execute()
102 .await?;
103
104 save_generated_images(&edit_response3, "watercolor_landscape")?;
105
106 println!();
107 println!("🎉 Image editing examples completed!");
108 println!("Check the generated files:");
109 println!(" - base_landscape.png (original)");
110 println!(" - landscape_with_barn_*.png (with added barn)");
111 println!(" - sunset_landscape_*.png (sunset version)");
112 println!(" - watercolor_landscape_*.png (watercolor style)");
113
114 Ok(())
115}
Sourcepub fn with_function_response(
self,
name: impl Into<String>,
response: Value,
) -> Self
pub fn with_function_response( self, name: impl Into<String>, response: Value, ) -> Self
Add a function response to the request using a JSON value
Examples found in repository?
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 let api_key = env::var("GEMINI_API_KEY")?;
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Define a weather function
16 let get_weather = FunctionDeclaration::new(
17 "get_weather",
18 "Get the current weather for a location",
19 FunctionParameters::object()
20 .with_property(
21 "location",
22 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
23 true,
24 )
25 .with_property(
26 "unit",
27 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
28 false,
29 ),
30 );
31
32 // Create a request with function calling
33 println!("Sending function call request...");
34 let response = client
35 .generate_content()
36 .with_user_message("What's the weather like in Tokyo right now?")
37 .with_function(get_weather)
38 .with_function_calling_mode(FunctionCallingMode::Any)
39 .execute()
40 .await?;
41
42 // Check if there are function calls
43 if let Some(function_call) = response.function_calls().first() {
44 println!(
45 "Function call received: {} with args: {}",
46 function_call.name, function_call.args
47 );
48
49 // Get parameters from the function call
50 let location: String = function_call.get("location")?;
51 let unit = function_call
52 .get::<String>("unit")
53 .unwrap_or_else(|_| String::from("celsius"));
54
55 println!("Location: {}, Unit: {}", location, unit);
56
57 // Simulate function execution (in a real app, this would call a weather API)
58 // Create a JSON response object
59 let weather_response = serde_json::json!({
60 "temperature": 22,
61 "unit": unit,
62 "condition": "sunny",
63 "location": location
64 });
65
66 // Continue the conversation with the function result
67 // We need to replay the entire conversation with the function response
68 println!("Sending function response...");
69
70 // First, need to recreate the original prompt and the model's response
71 let mut final_request = client
72 .generate_content()
73 .with_user_message("What's the weather like in Tokyo right now?");
74
75 // Add the function call from the model's response
76 let mut call_content = Content::default();
77 call_content.parts = Some(vec![Part::FunctionCall {
78 function_call: (*function_call).clone(),
79 }]);
80 final_request.contents.push(call_content);
81
82 // Now add the function response using the JSON value
83 final_request = final_request.with_function_response("get_weather", weather_response);
84
85 // Execute the request
86 let final_response = final_request.execute().await?;
87
88 println!("Final response: {}", final_response.text());
89 } else {
90 println!("No function calls in the response.");
91 println!("Response text: {}", response.text());
92 }
93
94 Ok(())
95}
More examples
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client
14 let client = Gemini::new(api_key);
15
16 println!("--- Meeting Scheduler Function Calling example ---");
17
18 // Define a meeting scheduler function that matches the curl example
19 let schedule_meeting = FunctionDeclaration::new(
20 "schedule_meeting",
21 "Schedules a meeting with specified attendees at a given time and date.",
22 FunctionParameters::object()
23 .with_property(
24 "attendees",
25 PropertyDetails::array(
26 "List of people attending the meeting.",
27 PropertyDetails::string("Attendee name"),
28 ),
29 true,
30 )
31 .with_property(
32 "date",
33 PropertyDetails::string("Date of the meeting (e.g., '2024-07-29')"),
34 true,
35 )
36 .with_property(
37 "time",
38 PropertyDetails::string("Time of the meeting (e.g., '15:00')"),
39 true,
40 )
41 .with_property(
42 "topic",
43 PropertyDetails::string("The subject or topic of the meeting."),
44 true,
45 ),
46 );
47
48 // Create function tool
49 let function_tool = Tool::new(schedule_meeting);
50
51 // Create a request with the tool - matching the curl example
52 let response = client
53 .generate_content()
54 .with_user_message("Schedule a meeting with Bob and Alice for 03/27/2025 at 10:00 AM about the Q3 planning.")
55 .with_tool(function_tool.clone())
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Handle the schedule_meeting function
68 if function_call.name == "schedule_meeting" {
69 let attendees: Vec<String> = function_call.get("attendees")?;
70 let date: String = function_call.get("date")?;
71 let time: String = function_call.get("time")?;
72 let topic: String = function_call.get("topic")?;
73
74 println!("Scheduling meeting:");
75 println!(" Attendees: {:?}", attendees);
76 println!(" Date: {}", date);
77 println!(" Time: {}", time);
78 println!(" Topic: {}", topic);
79
80 // Simulate scheduling the meeting
81 let meeting_id = format!(
82 "meeting_{}",
83 std::time::SystemTime::now()
84 .duration_since(std::time::UNIX_EPOCH)
85 .unwrap()
86 .as_secs()
87 );
88
89 let function_response = json!({
90 "success": true,
91 "meeting_id": meeting_id,
92 "message": format!("Meeting '{}' scheduled for {} at {} with {:?}", topic, date, time, attendees)
93 });
94
95 // Create conversation with function response
96 let mut conversation = client.generate_content();
97
98 // 1. Add original user message
99 conversation = conversation
100 .with_user_message("Schedule a meeting with Bob and Alice for 03/27/2025 at 10:00 AM about the Q3 planning.");
101
102 // 2. Add model message with function call
103 let model_function_call =
104 FunctionCall::new("schedule_meeting", function_call.args.clone());
105 let model_content = Content::function_call(model_function_call).with_role(Role::Model);
106 let model_message = Message {
107 content: model_content,
108 role: Role::Model,
109 };
110 conversation = conversation.with_message(model_message);
111
112 // 3. Add function response
113 conversation =
114 conversation.with_function_response("schedule_meeting", function_response);
115
116 // Execute final request
117 let final_response = conversation.execute().await?;
118
119 println!("Final response: {}", final_response.text());
120 } else {
121 println!("Unknown function call: {}", function_call.name);
122 }
123 } else {
124 println!("No function calls in the response.");
125 println!("Direct response: {}", response.text());
126 }
127
128 Ok(())
129}
Sourcepub fn with_function_response_str(
self,
name: impl Into<String>,
response: impl Into<String>,
) -> Result<Self, Error>
pub fn with_function_response_str( self, name: impl Into<String>, response: impl Into<String>, ) -> Result<Self, Error>
Add a function response to the request using a JSON string
Examples found in repository?
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Simple generation
16 println!("--- Simple generation ---");
17 let response = client
18 .generate_content()
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(1000),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 // Function calling example
31 println!("\n--- Function calling example ---");
32
33 // Define a weather function
34 let get_weather = FunctionDeclaration::new(
35 "get_weather",
36 "Get the current weather for a location",
37 FunctionParameters::object()
38 .with_property(
39 "location",
40 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
41 true,
42 )
43 .with_property(
44 "unit",
45 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
46 false,
47 ),
48 );
49
50 // Create a request with function calling
51 let response = client
52 .generate_content()
53 .with_system_prompt("You are a helpful weather assistant.")
54 .with_user_message("What's the weather like in San Francisco right now?")
55 .with_function(get_weather)
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Get parameters from the function call
68 let location: String = function_call.get("location")?;
69 let unit = function_call
70 .get::<String>("unit")
71 .unwrap_or_else(|_| String::from("celsius"));
72
73 println!("Location: {}, Unit: {}", location, unit);
74
75 // Create model content with function call
76 let model_content = Content::function_call((*function_call).clone());
77
78 // Add as model message
79 let model_message = Message {
80 content: model_content,
81 role: Role::Model,
82 };
83
84 // Simulate function execution
85 let weather_response = format!(
86 "{{\"temperature\": 22, \"unit\": \"{}\", \"condition\": \"sunny\"}}",
87 unit
88 );
89
90 // Continue the conversation with the function result
91 let final_response = client
92 .generate_content()
93 .with_system_prompt("You are a helpful weather assistant.")
94 .with_user_message("What's the weather like in San Francisco right now?")
95 .with_message(model_message)
96 .with_function_response_str("get_weather", weather_response)?
97 .with_generation_config(GenerationConfig {
98 temperature: Some(0.7),
99 max_output_tokens: Some(100),
100 ..Default::default()
101 })
102 .execute()
103 .await?;
104
105 println!("Final response: {}", final_response.text());
106 } else {
107 println!("No function calls in the response.");
108 }
109
110 Ok(())
111}
More examples
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client
14 let client = Gemini::new(api_key);
15
16 println!("--- Tools example with multiple functions ---");
17
18 // Define a weather function
19 let get_weather = FunctionDeclaration::new(
20 "get_weather",
21 "Get the current weather for a location",
22 FunctionParameters::object()
23 .with_property(
24 "location",
25 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
26 true,
27 )
28 .with_property(
29 "unit",
30 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
31 false,
32 ),
33 );
34
35 // Define a calculator function
36 let calculate = FunctionDeclaration::new(
37 "calculate",
38 "Perform a calculation",
39 FunctionParameters::object()
40 .with_property(
41 "operation",
42 PropertyDetails::enum_type(
43 "The mathematical operation to perform",
44 ["add", "subtract", "multiply", "divide"],
45 ),
46 true,
47 )
48 .with_property("a", PropertyDetails::number("The first number"), true)
49 .with_property("b", PropertyDetails::number("The second number"), true),
50 );
51
52 // Create a tool with multiple functions
53 let tool = Tool::with_functions(vec![get_weather, calculate]);
54
55 // Create a request with tool functions
56 let response = client
57 .generate_content()
58 .with_system_prompt(
59 "You are a helpful assistant that can check weather and perform calculations.",
60 )
61 .with_user_message("What's 42 times 12?")
62 .with_tool(tool)
63 .with_function_calling_mode(FunctionCallingMode::Any)
64 .execute()
65 .await?;
66
67 // Process function calls
68 if let Some(function_call) = response.function_calls().first() {
69 println!(
70 "Function call: {} with args: {}",
71 function_call.name, function_call.args
72 );
73
74 // Handle different function calls
75 match function_call.name.as_str() {
76 "calculate" => {
77 let operation: String = function_call.get("operation")?;
78 let a: f64 = function_call.get("a")?;
79 let b: f64 = function_call.get("b")?;
80
81 println!("Calculation: {} {} {}", a, operation, b);
82
83 let result = match operation.as_str() {
84 "add" => a + b,
85 "subtract" => a - b,
86 "multiply" => a * b,
87 "divide" => a / b,
88 _ => panic!("Unknown operation"),
89 };
90
91 let function_response = json!({
92 "result": result,
93 })
94 .to_string();
95
96 // Based on the curl example, we need to structure the conversation properly:
97 // 1. A user message with the original query
98 // 2. A model message containing the function call
99 // 3. A user message containing the function response
100
101 // Construct conversation following the exact curl pattern
102 let mut conversation = client.generate_content();
103
104 // 1. Add user message with original query and system prompt
105 conversation = conversation
106 .with_system_prompt("You are a helpful assistant that can check weather and perform calculations.")
107 .with_user_message("What's 42 times 12?");
108
109 // 2. Create model content with function call
110 let model_content = Content::function_call((*function_call).clone());
111
112 // Add as model message
113 let model_message = Message {
114 content: model_content,
115 role: Role::Model,
116 };
117 conversation = conversation.with_message(model_message);
118
119 // 3. Add user message with function response
120 conversation =
121 conversation.with_function_response_str("calculate", function_response)?;
122
123 // Execute the request
124 let final_response = conversation.execute().await?;
125
126 println!("Final response: {}", final_response.text());
127 }
128 "get_weather" => {
129 let location: String = function_call.get("location")?;
130 let unit = function_call
131 .get::<String>("unit")
132 .unwrap_or_else(|_| String::from("celsius"));
133
134 println!("Weather request for: {}, Unit: {}", location, unit);
135
136 let weather_response = json!({
137 "temperature": 22,
138 "unit": unit,
139 "condition": "sunny"
140 })
141 .to_string();
142
143 // Based on the curl example, we need to structure the conversation properly:
144 // 1. A user message with the original query
145 // 2. A model message containing the function call
146 // 3. A user message containing the function response
147
148 // Construct conversation following the exact curl pattern
149 let mut conversation = client.generate_content();
150
151 // 1. Add user message with original query and system prompt
152 conversation = conversation
153 .with_system_prompt("You are a helpful assistant that can check weather and perform calculations.")
154 .with_user_message("What's 42 times 12?");
155
156 // 2. Create model content with function call
157 let model_content = Content::function_call((*function_call).clone());
158
159 // Add as model message
160 let model_message = Message {
161 content: model_content,
162 role: Role::Model,
163 };
164 conversation = conversation.with_message(model_message);
165
166 // 3. Add user message with function response
167 conversation =
168 conversation.with_function_response_str("get_weather", weather_response)?;
169
170 // Execute the request
171 let final_response = conversation.execute().await?;
172
173 println!("Final response: {}", final_response.text());
174 }
175 _ => println!("Unknown function"),
176 }
177 } else {
178 println!("No function calls in the response.");
179 println!("Response: {}", response.text());
180 }
181
182 Ok(())
183}
Sourcepub fn with_message(self, message: Message) -> Self
pub fn with_message(self, message: Message) -> Self
Add a message to the request
Examples found in repository?
11async fn main() -> Result<(), Box<dyn std::error::Error>> {
12 // Read mp4 video file
13 let mut file = File::open("examples/sample.mp4")?;
14 let mut buffer = Vec::new();
15 file.read_to_end(&mut buffer)?;
16 let b64 = general_purpose::STANDARD.encode(&buffer);
17
18 // Get API key
19 let api_key = env::var("GEMINI_API_KEY")?;
20 let gemini = Gemini::pro(api_key);
21
22 // Example 1: Add mp4 blob using Message struct
23 let video_content = Content::inline_data("video/mp4", b64.clone());
24 let response1 = gemini
25 .generate_content()
26 .with_user_message("Please describe the content of this video (Message example)")
27 .with_message(gemini_rust::Message {
28 content: video_content,
29 role: gemini_rust::Role::User,
30 })
31 .execute()
32 .await?;
33
34 println!("AI description (Message): {}", response1.text());
35
36 // Example 2: Add mp4 blob directly using builder's with_inline_data
37 let response2 = gemini
38 .generate_content()
39 .with_user_message("Please describe the content of this video (with_inline_data example)")
40 .with_inline_data(b64, "video/mp4")
41 .execute()
42 .await?;
43
44 println!("AI description (with_inline_data): {}", response2.text());
45 Ok(())
46}
More examples
18async fn main() -> Result<(), Box<dyn std::error::Error>> {
19 // Get the API key from the environment
20 let api_key = std::env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
21
22 // Create a new Gemini client
23 let gemini = Gemini::new(api_key);
24
25 // Create the first request
26 let request1 = gemini
27 .generate_content()
28 .with_message(Message::user("What is the meaning of life?"))
29 .build();
30
31 // Create the second request
32 let request2 = gemini
33 .generate_content()
34 .with_message(Message::user("What is the best programming language?"))
35 .build();
36
37 // Create the batch request
38 let batch = gemini
39 .batch_generate_content_sync()
40 .with_request(request1)
41 .with_request(request2)
42 .execute()
43 .await?;
44
45 // Print the batch information
46 println!("Batch created successfully!");
47 println!("Batch Name: {}", batch.name());
48
49 // Wait for the batch to complete
50 println!("Waiting for batch to complete...");
51 match batch.wait_for_completion(Duration::from_secs(5)).await {
52 Ok(final_status) => {
53 // Print the final status
54 match final_status {
55 BatchStatus::Succeeded { results } => {
56 println!("Batch succeeded!");
57 for item in results {
58 match item {
59 BatchResultItem::Success { key, response } => {
60 println!("--- Response for Key {} ---", key);
61 println!("{}", response.text());
62 }
63 BatchResultItem::Error { key, error } => {
64 println!("--- Error for Key {} ---", key);
65 println!("Code: {}, Message: {}", error.code, error.message);
66 if let Some(details) = &error.details {
67 println!("Details: {}", details);
68 }
69 }
70 }
71 }
72 }
73 BatchStatus::Cancelled => {
74 println!("Batch was cancelled.");
75 }
76 BatchStatus::Expired => {
77 println!("Batch expired.");
78 }
79 _ => {
80 println!(
81 "Batch finished with an unexpected status: {:?}",
82 final_status
83 );
84 }
85 }
86 }
87 Err((_batch, e)) => {
88 println!(
89 "Batch failed: {}. You can retry with the returned batch.",
90 e
91 );
92 // Here you could retry: batch.wait_for_completion(Duration::from_secs(5)).await, etc.
93 }
94 }
95
96 Ok(())
97}
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Simple generation
16 println!("--- Simple generation ---");
17 let response = client
18 .generate_content()
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(1000),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 // Function calling example
31 println!("\n--- Function calling example ---");
32
33 // Define a weather function
34 let get_weather = FunctionDeclaration::new(
35 "get_weather",
36 "Get the current weather for a location",
37 FunctionParameters::object()
38 .with_property(
39 "location",
40 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
41 true,
42 )
43 .with_property(
44 "unit",
45 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
46 false,
47 ),
48 );
49
50 // Create a request with function calling
51 let response = client
52 .generate_content()
53 .with_system_prompt("You are a helpful weather assistant.")
54 .with_user_message("What's the weather like in San Francisco right now?")
55 .with_function(get_weather)
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Get parameters from the function call
68 let location: String = function_call.get("location")?;
69 let unit = function_call
70 .get::<String>("unit")
71 .unwrap_or_else(|_| String::from("celsius"));
72
73 println!("Location: {}, Unit: {}", location, unit);
74
75 // Create model content with function call
76 let model_content = Content::function_call((*function_call).clone());
77
78 // Add as model message
79 let model_message = Message {
80 content: model_content,
81 role: Role::Model,
82 };
83
84 // Simulate function execution
85 let weather_response = format!(
86 "{{\"temperature\": 22, \"unit\": \"{}\", \"condition\": \"sunny\"}}",
87 unit
88 );
89
90 // Continue the conversation with the function result
91 let final_response = client
92 .generate_content()
93 .with_system_prompt("You are a helpful weather assistant.")
94 .with_user_message("What's the weather like in San Francisco right now?")
95 .with_message(model_message)
96 .with_function_response_str("get_weather", weather_response)?
97 .with_generation_config(GenerationConfig {
98 temperature: Some(0.7),
99 max_output_tokens: Some(100),
100 ..Default::default()
101 })
102 .execute()
103 .await?;
104
105 println!("Final response: {}", final_response.text());
106 } else {
107 println!("No function calls in the response.");
108 }
109
110 Ok(())
111}
15async fn main() -> Result<()> {
16 // Get the API key from the environment
17 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY must be set");
18
19 // Create the Gemini client
20 let gemini = Gemini::new(api_key);
21
22 // Create a batch with multiple requests
23 let mut batch_generate_content = gemini
24 .batch_generate_content_sync()
25 .with_name("batch_cancel_example".to_string());
26
27 // Add several requests to make the batch take some time to process
28 for i in 1..=10 {
29 let request = gemini
30 .generate_content()
31 .with_message(Message::user(format!(
32 "Write a creative story about a robot learning to paint, part {}. Make it at least 100 words long.",
33 i
34 )))
35 .build();
36
37 batch_generate_content = batch_generate_content.with_request(request);
38 }
39
40 // Build and start the batch
41 let batch = batch_generate_content.execute().await?;
42 println!("Batch created successfully!");
43 println!("Batch Name: {}", batch.name());
44 println!("Press CTRL-C to cancel the batch operation...");
45
46 // Wrap the batch in an Arc<Mutex<Option<Batch>>> to allow safe sharing
47 let batch = Arc::new(Mutex::new(Some(batch)));
48 let batch_clone = Arc::clone(&batch);
49
50 // Spawn a task to handle CTRL-C
51 let cancel_task = tokio::spawn(async move {
52 // Wait for CTRL-C signal
53 signal::ctrl_c().await.expect("Failed to listen for CTRL-C");
54 println!("Received CTRL-C, canceling batch operation...");
55
56 // Take the batch from the Option, leaving None.
57 // The lock is released immediately after this block.
58 let mut batch_to_cancel = batch_clone.lock().await;
59
60 if let Some(batch) = batch_to_cancel.take() {
61 // Cancel the batch operation
62 match batch.cancel().await {
63 Ok(()) => {
64 println!("Batch canceled successfully!");
65 }
66 Err((batch, e)) => {
67 println!("Failed to cancel batch: {}. Retrying...", e);
68 // Retry once
69 match batch.cancel().await {
70 Ok(()) => {
71 println!("Batch canceled successfully on retry!");
72 }
73 Err((_, retry_error)) => {
74 eprintln!("Failed to cancel batch even on retry: {}", retry_error);
75 }
76 }
77 }
78 }
79 } else {
80 println!("Batch was already processed.");
81 }
82 });
83
84 // Wait for a short moment to ensure the cancel task is ready
85 tokio::time::sleep(Duration::from_millis(100)).await;
86
87 // Wait for the batch to complete or be canceled
88 if let Some(batch) = batch.lock().await.take() {
89 println!("Waiting for batch to complete or be canceled...");
90 match batch.wait_for_completion(Duration::from_secs(5)).await {
91 Ok(final_status) => {
92 // Cancel task is no longer needed since batch completed
93 cancel_task.abort();
94
95 println!("Batch completed with status: {:?}", final_status);
96
97 // Print some details about the results
98 match final_status {
99 gemini_rust::BatchStatus::Succeeded { .. } => {
100 println!("Batch succeeded!");
101 }
102 gemini_rust::BatchStatus::Cancelled => {
103 println!("Batch was cancelled as requested.");
104 }
105 gemini_rust::BatchStatus::Expired => {
106 println!("Batch expired.");
107 }
108 _ => {
109 println!("Batch finished with an unexpected status.");
110 }
111 }
112 }
113 Err((batch, e)) => {
114 // This could happen if there was a network error while polling
115 println!("Error while waiting for batch completion: {}", e);
116
117 // Try one more time to get the status
118 match batch.status().await {
119 Ok(status) => println!("Current batch status: {:?}", status),
120 Err(status_error) => println!("Error getting final status: {}", status_error),
121 }
122 }
123 }
124 }
125
126 Ok(())
127}
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client
14 let client = Gemini::new(api_key);
15
16 println!("--- Meeting Scheduler Function Calling example ---");
17
18 // Define a meeting scheduler function that matches the curl example
19 let schedule_meeting = FunctionDeclaration::new(
20 "schedule_meeting",
21 "Schedules a meeting with specified attendees at a given time and date.",
22 FunctionParameters::object()
23 .with_property(
24 "attendees",
25 PropertyDetails::array(
26 "List of people attending the meeting.",
27 PropertyDetails::string("Attendee name"),
28 ),
29 true,
30 )
31 .with_property(
32 "date",
33 PropertyDetails::string("Date of the meeting (e.g., '2024-07-29')"),
34 true,
35 )
36 .with_property(
37 "time",
38 PropertyDetails::string("Time of the meeting (e.g., '15:00')"),
39 true,
40 )
41 .with_property(
42 "topic",
43 PropertyDetails::string("The subject or topic of the meeting."),
44 true,
45 ),
46 );
47
48 // Create function tool
49 let function_tool = Tool::new(schedule_meeting);
50
51 // Create a request with the tool - matching the curl example
52 let response = client
53 .generate_content()
54 .with_user_message("Schedule a meeting with Bob and Alice for 03/27/2025 at 10:00 AM about the Q3 planning.")
55 .with_tool(function_tool.clone())
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Handle the schedule_meeting function
68 if function_call.name == "schedule_meeting" {
69 let attendees: Vec<String> = function_call.get("attendees")?;
70 let date: String = function_call.get("date")?;
71 let time: String = function_call.get("time")?;
72 let topic: String = function_call.get("topic")?;
73
74 println!("Scheduling meeting:");
75 println!(" Attendees: {:?}", attendees);
76 println!(" Date: {}", date);
77 println!(" Time: {}", time);
78 println!(" Topic: {}", topic);
79
80 // Simulate scheduling the meeting
81 let meeting_id = format!(
82 "meeting_{}",
83 std::time::SystemTime::now()
84 .duration_since(std::time::UNIX_EPOCH)
85 .unwrap()
86 .as_secs()
87 );
88
89 let function_response = json!({
90 "success": true,
91 "meeting_id": meeting_id,
92 "message": format!("Meeting '{}' scheduled for {} at {} with {:?}", topic, date, time, attendees)
93 });
94
95 // Create conversation with function response
96 let mut conversation = client.generate_content();
97
98 // 1. Add original user message
99 conversation = conversation
100 .with_user_message("Schedule a meeting with Bob and Alice for 03/27/2025 at 10:00 AM about the Q3 planning.");
101
102 // 2. Add model message with function call
103 let model_function_call =
104 FunctionCall::new("schedule_meeting", function_call.args.clone());
105 let model_content = Content::function_call(model_function_call).with_role(Role::Model);
106 let model_message = Message {
107 content: model_content,
108 role: Role::Model,
109 };
110 conversation = conversation.with_message(model_message);
111
112 // 3. Add function response
113 conversation =
114 conversation.with_function_response("schedule_meeting", function_response);
115
116 // Execute final request
117 let final_response = conversation.execute().await?;
118
119 println!("Final response: {}", final_response.text());
120 } else {
121 println!("Unknown function call: {}", function_call.name);
122 }
123 } else {
124 println!("No function calls in the response.");
125 println!("Direct response: {}", response.text());
126 }
127
128 Ok(())
129}
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client
14 let client = Gemini::new(api_key);
15
16 println!("--- Tools example with multiple functions ---");
17
18 // Define a weather function
19 let get_weather = FunctionDeclaration::new(
20 "get_weather",
21 "Get the current weather for a location",
22 FunctionParameters::object()
23 .with_property(
24 "location",
25 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
26 true,
27 )
28 .with_property(
29 "unit",
30 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
31 false,
32 ),
33 );
34
35 // Define a calculator function
36 let calculate = FunctionDeclaration::new(
37 "calculate",
38 "Perform a calculation",
39 FunctionParameters::object()
40 .with_property(
41 "operation",
42 PropertyDetails::enum_type(
43 "The mathematical operation to perform",
44 ["add", "subtract", "multiply", "divide"],
45 ),
46 true,
47 )
48 .with_property("a", PropertyDetails::number("The first number"), true)
49 .with_property("b", PropertyDetails::number("The second number"), true),
50 );
51
52 // Create a tool with multiple functions
53 let tool = Tool::with_functions(vec![get_weather, calculate]);
54
55 // Create a request with tool functions
56 let response = client
57 .generate_content()
58 .with_system_prompt(
59 "You are a helpful assistant that can check weather and perform calculations.",
60 )
61 .with_user_message("What's 42 times 12?")
62 .with_tool(tool)
63 .with_function_calling_mode(FunctionCallingMode::Any)
64 .execute()
65 .await?;
66
67 // Process function calls
68 if let Some(function_call) = response.function_calls().first() {
69 println!(
70 "Function call: {} with args: {}",
71 function_call.name, function_call.args
72 );
73
74 // Handle different function calls
75 match function_call.name.as_str() {
76 "calculate" => {
77 let operation: String = function_call.get("operation")?;
78 let a: f64 = function_call.get("a")?;
79 let b: f64 = function_call.get("b")?;
80
81 println!("Calculation: {} {} {}", a, operation, b);
82
83 let result = match operation.as_str() {
84 "add" => a + b,
85 "subtract" => a - b,
86 "multiply" => a * b,
87 "divide" => a / b,
88 _ => panic!("Unknown operation"),
89 };
90
91 let function_response = json!({
92 "result": result,
93 })
94 .to_string();
95
96 // Based on the curl example, we need to structure the conversation properly:
97 // 1. A user message with the original query
98 // 2. A model message containing the function call
99 // 3. A user message containing the function response
100
101 // Construct conversation following the exact curl pattern
102 let mut conversation = client.generate_content();
103
104 // 1. Add user message with original query and system prompt
105 conversation = conversation
106 .with_system_prompt("You are a helpful assistant that can check weather and perform calculations.")
107 .with_user_message("What's 42 times 12?");
108
109 // 2. Create model content with function call
110 let model_content = Content::function_call((*function_call).clone());
111
112 // Add as model message
113 let model_message = Message {
114 content: model_content,
115 role: Role::Model,
116 };
117 conversation = conversation.with_message(model_message);
118
119 // 3. Add user message with function response
120 conversation =
121 conversation.with_function_response_str("calculate", function_response)?;
122
123 // Execute the request
124 let final_response = conversation.execute().await?;
125
126 println!("Final response: {}", final_response.text());
127 }
128 "get_weather" => {
129 let location: String = function_call.get("location")?;
130 let unit = function_call
131 .get::<String>("unit")
132 .unwrap_or_else(|_| String::from("celsius"));
133
134 println!("Weather request for: {}, Unit: {}", location, unit);
135
136 let weather_response = json!({
137 "temperature": 22,
138 "unit": unit,
139 "condition": "sunny"
140 })
141 .to_string();
142
143 // Based on the curl example, we need to structure the conversation properly:
144 // 1. A user message with the original query
145 // 2. A model message containing the function call
146 // 3. A user message containing the function response
147
148 // Construct conversation following the exact curl pattern
149 let mut conversation = client.generate_content();
150
151 // 1. Add user message with original query and system prompt
152 conversation = conversation
153 .with_system_prompt("You are a helpful assistant that can check weather and perform calculations.")
154 .with_user_message("What's 42 times 12?");
155
156 // 2. Create model content with function call
157 let model_content = Content::function_call((*function_call).clone());
158
159 // Add as model message
160 let model_message = Message {
161 content: model_content,
162 role: Role::Model,
163 };
164 conversation = conversation.with_message(model_message);
165
166 // 3. Add user message with function response
167 conversation =
168 conversation.with_function_response_str("get_weather", weather_response)?;
169
170 // Execute the request
171 let final_response = conversation.execute().await?;
172
173 println!("Final response: {}", final_response.text());
174 }
175 _ => println!("Unknown function"),
176 }
177 } else {
178 println!("No function calls in the response.");
179 println!("Response: {}", response.text());
180 }
181
182 Ok(())
183}
Sourcepub fn with_messages(self, messages: impl IntoIterator<Item = Message>) -> Self
pub fn with_messages(self, messages: impl IntoIterator<Item = Message>) -> Self
Add multiple messages to the request
Sourcepub fn with_generation_config(self, config: GenerationConfig) -> Self
pub fn with_generation_config(self, config: GenerationConfig) -> Self
Set the generation config for the request
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
More examples
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 // Using the full generation config
13 println!("--- Using full generation config ---");
14 let response1 = client
15 .generate_content()
16 .with_system_prompt("You are a helpful assistant.")
17 .with_user_message("Write a short poem about Rust programming language.")
18 .with_generation_config(GenerationConfig {
19 temperature: Some(0.9),
20 top_p: Some(0.8),
21 top_k: Some(20),
22 max_output_tokens: Some(200),
23 candidate_count: Some(1),
24 stop_sequences: Some(vec!["END".to_string()]),
25 response_mime_type: None,
26 response_schema: None,
27 thinking_config: None,
28 ..Default::default()
29 })
30 .execute()
31 .await?;
32
33 println!(
34 "Response with high temperature (0.9):\n{}\n",
35 response1.text()
36 );
37
38 // Using individual generation parameters
39 println!("--- Using individual generation parameters ---");
40 let response2 = client
41 .generate_content()
42 .with_system_prompt("You are a helpful assistant.")
43 .with_user_message("Write a short poem about Rust programming language.")
44 .with_temperature(0.2)
45 .with_max_output_tokens(100)
46 .execute()
47 .await?;
48
49 println!(
50 "Response with low temperature (0.2):\n{}\n",
51 response2.text()
52 );
53
54 // Setting multiple parameters individually
55 println!("--- Setting multiple parameters individually ---");
56 let response3 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful assistant.")
59 .with_user_message("List 3 benefits of using Rust.")
60 .with_temperature(0.7)
61 .with_top_p(0.9)
62 .with_max_output_tokens(150)
63 .with_stop_sequences(vec!["4.".to_string()])
64 .execute()
65 .await?;
66
67 println!(
68 "Response with custom parameters and stop sequence:\n{}",
69 response3.text()
70 );
71
72 Ok(())
73}
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client with the image generation model
14 // Use Gemini 2.5 Flash Image Preview for image generation
15 let client = Gemini::with_model(api_key, "models/gemini-2.5-flash-image-preview".to_string());
16
17 println!("🎨 Generating image with Gemini...");
18
19 // Generate an image from text description
20 let response = client
21 .generate_content()
22 .with_user_message(
23 "Create a photorealistic image of a cute robot sitting in a garden, \
24 surrounded by colorful flowers. The robot should have a friendly \
25 expression and be made of polished metal. The lighting should be \
26 soft and natural, as if taken during golden hour.",
27 )
28 .with_generation_config(GenerationConfig {
29 temperature: Some(0.8),
30 max_output_tokens: Some(8192),
31 ..Default::default()
32 })
33 .execute()
34 .await?;
35
36 // Process the response
37 let mut images_saved = 0;
38 for candidate in response.candidates.iter() {
39 if let Some(parts) = &candidate.content.parts {
40 for part in parts.iter() {
41 match part {
42 gemini_rust::Part::Text { text, .. } => {
43 println!("📝 Model response: {}", text);
44 }
45 gemini_rust::Part::InlineData { inline_data } => {
46 println!("🖼️ Image generated!");
47 println!(" MIME type: {}", inline_data.mime_type);
48
49 // Decode and save the image
50 match BASE64.decode(&inline_data.data) {
51 Ok(image_bytes) => {
52 images_saved += 1;
53 let filename = format!("robot_garden_{}.png", images_saved);
54 fs::write(&filename, image_bytes)?;
55 println!("✅ Image saved as: {}", filename);
56 }
57 Err(e) => {
58 println!("❌ Failed to decode image: {}", e);
59 }
60 }
61 }
62 _ => {
63 println!("🔍 Other content type in response");
64 }
65 }
66 }
67 }
68 }
69
70 if images_saved == 0 {
71 println!("⚠️ No images were generated. This might be due to:");
72 println!(" - Content policy restrictions");
73 println!(" - API limitations");
74 println!(" - Model configuration issues");
75 } else {
76 println!("🎉 Successfully generated {} image(s)!", images_saved);
77 }
78
79 Ok(())
80}
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Simple generation
16 println!("--- Simple generation ---");
17 let response = client
18 .generate_content()
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(1000),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 // Function calling example
31 println!("\n--- Function calling example ---");
32
33 // Define a weather function
34 let get_weather = FunctionDeclaration::new(
35 "get_weather",
36 "Get the current weather for a location",
37 FunctionParameters::object()
38 .with_property(
39 "location",
40 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
41 true,
42 )
43 .with_property(
44 "unit",
45 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
46 false,
47 ),
48 );
49
50 // Create a request with function calling
51 let response = client
52 .generate_content()
53 .with_system_prompt("You are a helpful weather assistant.")
54 .with_user_message("What's the weather like in San Francisco right now?")
55 .with_function(get_weather)
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Get parameters from the function call
68 let location: String = function_call.get("location")?;
69 let unit = function_call
70 .get::<String>("unit")
71 .unwrap_or_else(|_| String::from("celsius"));
72
73 println!("Location: {}, Unit: {}", location, unit);
74
75 // Create model content with function call
76 let model_content = Content::function_call((*function_call).clone());
77
78 // Add as model message
79 let model_message = Message {
80 content: model_content,
81 role: Role::Model,
82 };
83
84 // Simulate function execution
85 let weather_response = format!(
86 "{{\"temperature\": 22, \"unit\": \"{}\", \"condition\": \"sunny\"}}",
87 unit
88 );
89
90 // Continue the conversation with the function result
91 let final_response = client
92 .generate_content()
93 .with_system_prompt("You are a helpful weather assistant.")
94 .with_user_message("What's the weather like in San Francisco right now?")
95 .with_message(model_message)
96 .with_function_response_str("get_weather", weather_response)?
97 .with_generation_config(GenerationConfig {
98 temperature: Some(0.7),
99 max_output_tokens: Some(100),
100 ..Default::default()
101 })
102 .execute()
103 .await?;
104
105 println!("Final response: {}", final_response.text());
106 } else {
107 println!("No function calls in the response.");
108 }
109
110 Ok(())
111}
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
11
12 println!("=== Gemini 2.5 Thinking Basic Example ===\n");
13
14 // Example 1: Using default dynamic thinking
15 println!(
16 "--- Example 1: Dynamic thinking (model automatically determines thinking budget) ---"
17 );
18 let response1 = client
19 .generate_content()
20 .with_system_prompt("You are a helpful mathematics assistant.")
21 .with_user_message(
22 "Explain Occam's razor principle and provide a simple example from daily life.",
23 )
24 .with_dynamic_thinking()
25 .with_thoughts_included(true)
26 .execute()
27 .await?;
28
29 // Display thinking process
30 let thoughts = response1.thoughts();
31 if !thoughts.is_empty() {
32 println!("Thinking summary:");
33 for (i, thought) in thoughts.iter().enumerate() {
34 println!("Thought {}: {}\n", i + 1, thought);
35 }
36 }
37
38 println!("Answer: {}\n", response1.text());
39
40 // Display token usage
41 if let Some(usage) = &response1.usage_metadata {
42 println!("Token usage:");
43 println!(" Prompt tokens: {}", usage.prompt_token_count);
44 println!(
45 " Response tokens: {}",
46 usage.candidates_token_count.unwrap_or(0)
47 );
48 if let Some(thinking_tokens) = usage.thoughts_token_count {
49 println!(" Thinking tokens: {}", thinking_tokens);
50 }
51 println!(" Total tokens: {}\n", usage.total_token_count);
52 }
53
54 // Example 2: Set specific thinking budget
55 println!("--- Example 2: Set thinking budget (1024 tokens) ---");
56 let response2 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful programming assistant.")
59 .with_user_message("List 3 main advantages of using the Rust programming language")
60 .with_thinking_budget(1024)
61 .with_thoughts_included(true)
62 .execute()
63 .await?;
64
65 // Display thinking process
66 let thoughts2 = response2.thoughts();
67 if !thoughts2.is_empty() {
68 println!("Thinking summary:");
69 for (i, thought) in thoughts2.iter().enumerate() {
70 println!("Thought {}: {}\n", i + 1, thought);
71 }
72 }
73
74 println!("Answer: {}\n", response2.text());
75
76 // Example 3: Disable thinking feature
77 println!("--- Example 3: Disable thinking feature ---");
78 let response3 = client
79 .generate_content()
80 .with_system_prompt("You are a helpful assistant.")
81 .with_user_message("What is artificial intelligence?")
82 .execute()
83 .await?;
84
85 println!("Answer: {}\n", response3.text());
86
87 // Example 4: Use GenerationConfig to set thinking
88 println!("--- Example 4: Use GenerationConfig to set thinking ---");
89 let thinking_config = ThinkingConfig::new()
90 .with_thinking_budget(2048)
91 .with_thoughts_included(true);
92
93 let generation_config = GenerationConfig {
94 temperature: Some(0.7),
95 max_output_tokens: Some(500),
96 thinking_config: Some(thinking_config),
97 ..Default::default()
98 };
99
100 let response4 = client
101 .generate_content()
102 .with_system_prompt("You are a creative writing assistant.")
103 .with_user_message(
104 "Write the opening of a short story about a robot learning to feel emotions.",
105 )
106 .with_generation_config(generation_config)
107 .execute()
108 .await?;
109
110 // Display thinking process
111 let thoughts4 = response4.thoughts();
112 if !thoughts4.is_empty() {
113 println!("Thinking summary:");
114 for (i, thought) in thoughts4.iter().enumerate() {
115 println!("Thought {}: {}\n", i + 1, thought);
116 }
117 }
118
119 println!("Answer: {}\n", response4.text());
120
121 Ok(())
122}
Sourcepub fn with_temperature(self, temperature: f32) -> Self
pub fn with_temperature(self, temperature: f32) -> Self
Set the temperature for the request
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 // Using the full generation config
13 println!("--- Using full generation config ---");
14 let response1 = client
15 .generate_content()
16 .with_system_prompt("You are a helpful assistant.")
17 .with_user_message("Write a short poem about Rust programming language.")
18 .with_generation_config(GenerationConfig {
19 temperature: Some(0.9),
20 top_p: Some(0.8),
21 top_k: Some(20),
22 max_output_tokens: Some(200),
23 candidate_count: Some(1),
24 stop_sequences: Some(vec!["END".to_string()]),
25 response_mime_type: None,
26 response_schema: None,
27 thinking_config: None,
28 ..Default::default()
29 })
30 .execute()
31 .await?;
32
33 println!(
34 "Response with high temperature (0.9):\n{}\n",
35 response1.text()
36 );
37
38 // Using individual generation parameters
39 println!("--- Using individual generation parameters ---");
40 let response2 = client
41 .generate_content()
42 .with_system_prompt("You are a helpful assistant.")
43 .with_user_message("Write a short poem about Rust programming language.")
44 .with_temperature(0.2)
45 .with_max_output_tokens(100)
46 .execute()
47 .await?;
48
49 println!(
50 "Response with low temperature (0.2):\n{}\n",
51 response2.text()
52 );
53
54 // Setting multiple parameters individually
55 println!("--- Setting multiple parameters individually ---");
56 let response3 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful assistant.")
59 .with_user_message("List 3 benefits of using Rust.")
60 .with_temperature(0.7)
61 .with_top_p(0.9)
62 .with_max_output_tokens(150)
63 .with_stop_sequences(vec!["4.".to_string()])
64 .execute()
65 .await?;
66
67 println!(
68 "Response with custom parameters and stop sequence:\n{}",
69 response3.text()
70 );
71
72 Ok(())
73}
Sourcepub fn with_top_p(self, top_p: f32) -> Self
pub fn with_top_p(self, top_p: f32) -> Self
Set the top-p value for the request
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 // Using the full generation config
13 println!("--- Using full generation config ---");
14 let response1 = client
15 .generate_content()
16 .with_system_prompt("You are a helpful assistant.")
17 .with_user_message("Write a short poem about Rust programming language.")
18 .with_generation_config(GenerationConfig {
19 temperature: Some(0.9),
20 top_p: Some(0.8),
21 top_k: Some(20),
22 max_output_tokens: Some(200),
23 candidate_count: Some(1),
24 stop_sequences: Some(vec!["END".to_string()]),
25 response_mime_type: None,
26 response_schema: None,
27 thinking_config: None,
28 ..Default::default()
29 })
30 .execute()
31 .await?;
32
33 println!(
34 "Response with high temperature (0.9):\n{}\n",
35 response1.text()
36 );
37
38 // Using individual generation parameters
39 println!("--- Using individual generation parameters ---");
40 let response2 = client
41 .generate_content()
42 .with_system_prompt("You are a helpful assistant.")
43 .with_user_message("Write a short poem about Rust programming language.")
44 .with_temperature(0.2)
45 .with_max_output_tokens(100)
46 .execute()
47 .await?;
48
49 println!(
50 "Response with low temperature (0.2):\n{}\n",
51 response2.text()
52 );
53
54 // Setting multiple parameters individually
55 println!("--- Setting multiple parameters individually ---");
56 let response3 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful assistant.")
59 .with_user_message("List 3 benefits of using Rust.")
60 .with_temperature(0.7)
61 .with_top_p(0.9)
62 .with_max_output_tokens(150)
63 .with_stop_sequences(vec!["4.".to_string()])
64 .execute()
65 .await?;
66
67 println!(
68 "Response with custom parameters and stop sequence:\n{}",
69 response3.text()
70 );
71
72 Ok(())
73}
Sourcepub fn with_top_k(self, top_k: i32) -> Self
pub fn with_top_k(self, top_k: i32) -> Self
Set the top-k value for the request
Sourcepub fn with_max_output_tokens(self, max_output_tokens: i32) -> Self
pub fn with_max_output_tokens(self, max_output_tokens: i32) -> Self
Set the maximum output tokens for the request
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 // Using the full generation config
13 println!("--- Using full generation config ---");
14 let response1 = client
15 .generate_content()
16 .with_system_prompt("You are a helpful assistant.")
17 .with_user_message("Write a short poem about Rust programming language.")
18 .with_generation_config(GenerationConfig {
19 temperature: Some(0.9),
20 top_p: Some(0.8),
21 top_k: Some(20),
22 max_output_tokens: Some(200),
23 candidate_count: Some(1),
24 stop_sequences: Some(vec!["END".to_string()]),
25 response_mime_type: None,
26 response_schema: None,
27 thinking_config: None,
28 ..Default::default()
29 })
30 .execute()
31 .await?;
32
33 println!(
34 "Response with high temperature (0.9):\n{}\n",
35 response1.text()
36 );
37
38 // Using individual generation parameters
39 println!("--- Using individual generation parameters ---");
40 let response2 = client
41 .generate_content()
42 .with_system_prompt("You are a helpful assistant.")
43 .with_user_message("Write a short poem about Rust programming language.")
44 .with_temperature(0.2)
45 .with_max_output_tokens(100)
46 .execute()
47 .await?;
48
49 println!(
50 "Response with low temperature (0.2):\n{}\n",
51 response2.text()
52 );
53
54 // Setting multiple parameters individually
55 println!("--- Setting multiple parameters individually ---");
56 let response3 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful assistant.")
59 .with_user_message("List 3 benefits of using Rust.")
60 .with_temperature(0.7)
61 .with_top_p(0.9)
62 .with_max_output_tokens(150)
63 .with_stop_sequences(vec!["4.".to_string()])
64 .execute()
65 .await?;
66
67 println!(
68 "Response with custom parameters and stop sequence:\n{}",
69 response3.text()
70 );
71
72 Ok(())
73}
Sourcepub fn with_candidate_count(self, candidate_count: i32) -> Self
pub fn with_candidate_count(self, candidate_count: i32) -> Self
Set the candidate count for the request
Sourcepub fn with_stop_sequences(self, stop_sequences: Vec<String>) -> Self
pub fn with_stop_sequences(self, stop_sequences: Vec<String>) -> Self
Set the stop sequences for the request
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 // Using the full generation config
13 println!("--- Using full generation config ---");
14 let response1 = client
15 .generate_content()
16 .with_system_prompt("You are a helpful assistant.")
17 .with_user_message("Write a short poem about Rust programming language.")
18 .with_generation_config(GenerationConfig {
19 temperature: Some(0.9),
20 top_p: Some(0.8),
21 top_k: Some(20),
22 max_output_tokens: Some(200),
23 candidate_count: Some(1),
24 stop_sequences: Some(vec!["END".to_string()]),
25 response_mime_type: None,
26 response_schema: None,
27 thinking_config: None,
28 ..Default::default()
29 })
30 .execute()
31 .await?;
32
33 println!(
34 "Response with high temperature (0.9):\n{}\n",
35 response1.text()
36 );
37
38 // Using individual generation parameters
39 println!("--- Using individual generation parameters ---");
40 let response2 = client
41 .generate_content()
42 .with_system_prompt("You are a helpful assistant.")
43 .with_user_message("Write a short poem about Rust programming language.")
44 .with_temperature(0.2)
45 .with_max_output_tokens(100)
46 .execute()
47 .await?;
48
49 println!(
50 "Response with low temperature (0.2):\n{}\n",
51 response2.text()
52 );
53
54 // Setting multiple parameters individually
55 println!("--- Setting multiple parameters individually ---");
56 let response3 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful assistant.")
59 .with_user_message("List 3 benefits of using Rust.")
60 .with_temperature(0.7)
61 .with_top_p(0.9)
62 .with_max_output_tokens(150)
63 .with_stop_sequences(vec!["4.".to_string()])
64 .execute()
65 .await?;
66
67 println!(
68 "Response with custom parameters and stop sequence:\n{}",
69 response3.text()
70 );
71
72 Ok(())
73}
Sourcepub fn with_response_mime_type(self, mime_type: impl Into<String>) -> Self
pub fn with_response_mime_type(self, mime_type: impl Into<String>) -> Self
Set the response mime type for the request
Examples found in repository?
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
More examples
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Get API key from environment variable
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create client
11 let client = Gemini::new(api_key);
12
13 // Using response_schema for structured output
14 println!("--- Structured Response Example ---");
15
16 // Define a JSON schema for the response
17 let schema = json!({
18 "type": "object",
19 "properties": {
20 "name": {
21 "type": "string",
22 "description": "Name of the programming language"
23 },
24 "year_created": {
25 "type": "integer",
26 "description": "Year the programming language was created"
27 },
28 "creator": {
29 "type": "string",
30 "description": "Person or organization who created the language"
31 },
32 "key_features": {
33 "type": "array",
34 "items": {
35 "type": "string"
36 },
37 "description": "Key features of the programming language"
38 },
39 "popularity_score": {
40 "type": "integer",
41 "description": "Subjective popularity score from 1-10"
42 }
43 },
44 "required": ["name", "year_created", "creator", "key_features", "popularity_score"]
45 });
46
47 let response = client
48 .generate_content()
49 .with_system_prompt("You provide information about programming languages in JSON format.")
50 .with_user_message("Tell me about the Rust programming language.")
51 .with_response_mime_type("application/json")
52 .with_response_schema(schema)
53 .execute()
54 .await?;
55
56 println!("Structured JSON Response:");
57 println!("{}", response.text());
58
59 // Parse the JSON response
60 let json_response: serde_json::Value = serde_json::from_str(&response.text())?;
61
62 println!("\nAccessing specific fields:");
63 println!("Language: {}", json_response["name"]);
64 println!("Created in: {}", json_response["year_created"]);
65 println!("Created by: {}", json_response["creator"]);
66 println!("Popularity: {}/10", json_response["popularity_score"]);
67
68 println!("\nKey Features:");
69 if let Some(features) = json_response["key_features"].as_array() {
70 for (i, feature) in features.iter().enumerate() {
71 println!("{}. {}", i + 1, feature);
72 }
73 }
74
75 Ok(())
76}
Sourcepub fn with_response_schema(self, schema: Value) -> Self
pub fn with_response_schema(self, schema: Value) -> Self
Set the response schema for structured output
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Get API key from environment variable
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create client
11 let client = Gemini::new(api_key);
12
13 // Using response_schema for structured output
14 println!("--- Structured Response Example ---");
15
16 // Define a JSON schema for the response
17 let schema = json!({
18 "type": "object",
19 "properties": {
20 "name": {
21 "type": "string",
22 "description": "Name of the programming language"
23 },
24 "year_created": {
25 "type": "integer",
26 "description": "Year the programming language was created"
27 },
28 "creator": {
29 "type": "string",
30 "description": "Person or organization who created the language"
31 },
32 "key_features": {
33 "type": "array",
34 "items": {
35 "type": "string"
36 },
37 "description": "Key features of the programming language"
38 },
39 "popularity_score": {
40 "type": "integer",
41 "description": "Subjective popularity score from 1-10"
42 }
43 },
44 "required": ["name", "year_created", "creator", "key_features", "popularity_score"]
45 });
46
47 let response = client
48 .generate_content()
49 .with_system_prompt("You provide information about programming languages in JSON format.")
50 .with_user_message("Tell me about the Rust programming language.")
51 .with_response_mime_type("application/json")
52 .with_response_schema(schema)
53 .execute()
54 .await?;
55
56 println!("Structured JSON Response:");
57 println!("{}", response.text());
58
59 // Parse the JSON response
60 let json_response: serde_json::Value = serde_json::from_str(&response.text())?;
61
62 println!("\nAccessing specific fields:");
63 println!("Language: {}", json_response["name"]);
64 println!("Created in: {}", json_response["year_created"]);
65 println!("Created by: {}", json_response["creator"]);
66 println!("Popularity: {}/10", json_response["popularity_score"]);
67
68 println!("\nKey Features:");
69 if let Some(features) = json_response["key_features"].as_array() {
70 for (i, feature) in features.iter().enumerate() {
71 println!("{}. {}", i + 1, feature);
72 }
73 }
74
75 Ok(())
76}
Sourcepub fn with_tool(self, tool: Tool) -> Self
pub fn with_tool(self, tool: Tool) -> Self
Add a tool to the request
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 println!("--- Google Search tool example ---");
13
14 // Create a Google Search tool
15 let google_search_tool = Tool::google_search();
16
17 // Create a request with Google Search tool
18 let response = client
19 .generate_content()
20 .with_user_message("What is the current Google stock price?")
21 .with_tool(google_search_tool)
22 .execute()
23 .await?;
24
25 println!("Response: {}", response.text());
26
27 Ok(())
28}
More examples
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 println!("--- Curl equivalent with Google Search tool ---");
10
11 // This is equivalent to the curl example:
12 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
13 // -H "Content-Type: application/json" \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {"text": "What is the current Google stock price?"}
19 // ]
20 // }
21 // ],
22 // "tools": [
23 // {
24 // "google_search": {}
25 // }
26 // ]
27 // }'
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 // Create a content part that matches the JSON in the curl example
33 let text_part = Part::Text {
34 text: "What is the current Google stock price?".to_string(),
35 thought: None,
36 };
37
38 let content = Content {
39 parts: vec![text_part].into(),
40 role: None,
41 };
42
43 // Create a Google Search tool
44 let google_search_tool = Tool::google_search();
45
46 // Add the content and tool directly to the request
47 // This exactly mirrors the JSON structure in the curl example
48 let mut content_builder = client.generate_content();
49 content_builder.contents.push(content);
50 content_builder = content_builder.with_tool(google_search_tool);
51
52 let response = content_builder.execute().await?;
53
54 println!("Response: {}", response.text());
55
56 Ok(())
57}
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client
14 let client = Gemini::new(api_key);
15
16 println!("--- Meeting Scheduler Function Calling example ---");
17
18 // Define a meeting scheduler function that matches the curl example
19 let schedule_meeting = FunctionDeclaration::new(
20 "schedule_meeting",
21 "Schedules a meeting with specified attendees at a given time and date.",
22 FunctionParameters::object()
23 .with_property(
24 "attendees",
25 PropertyDetails::array(
26 "List of people attending the meeting.",
27 PropertyDetails::string("Attendee name"),
28 ),
29 true,
30 )
31 .with_property(
32 "date",
33 PropertyDetails::string("Date of the meeting (e.g., '2024-07-29')"),
34 true,
35 )
36 .with_property(
37 "time",
38 PropertyDetails::string("Time of the meeting (e.g., '15:00')"),
39 true,
40 )
41 .with_property(
42 "topic",
43 PropertyDetails::string("The subject or topic of the meeting."),
44 true,
45 ),
46 );
47
48 // Create function tool
49 let function_tool = Tool::new(schedule_meeting);
50
51 // Create a request with the tool - matching the curl example
52 let response = client
53 .generate_content()
54 .with_user_message("Schedule a meeting with Bob and Alice for 03/27/2025 at 10:00 AM about the Q3 planning.")
55 .with_tool(function_tool.clone())
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Handle the schedule_meeting function
68 if function_call.name == "schedule_meeting" {
69 let attendees: Vec<String> = function_call.get("attendees")?;
70 let date: String = function_call.get("date")?;
71 let time: String = function_call.get("time")?;
72 let topic: String = function_call.get("topic")?;
73
74 println!("Scheduling meeting:");
75 println!(" Attendees: {:?}", attendees);
76 println!(" Date: {}", date);
77 println!(" Time: {}", time);
78 println!(" Topic: {}", topic);
79
80 // Simulate scheduling the meeting
81 let meeting_id = format!(
82 "meeting_{}",
83 std::time::SystemTime::now()
84 .duration_since(std::time::UNIX_EPOCH)
85 .unwrap()
86 .as_secs()
87 );
88
89 let function_response = json!({
90 "success": true,
91 "meeting_id": meeting_id,
92 "message": format!("Meeting '{}' scheduled for {} at {} with {:?}", topic, date, time, attendees)
93 });
94
95 // Create conversation with function response
96 let mut conversation = client.generate_content();
97
98 // 1. Add original user message
99 conversation = conversation
100 .with_user_message("Schedule a meeting with Bob and Alice for 03/27/2025 at 10:00 AM about the Q3 planning.");
101
102 // 2. Add model message with function call
103 let model_function_call =
104 FunctionCall::new("schedule_meeting", function_call.args.clone());
105 let model_content = Content::function_call(model_function_call).with_role(Role::Model);
106 let model_message = Message {
107 content: model_content,
108 role: Role::Model,
109 };
110 conversation = conversation.with_message(model_message);
111
112 // 3. Add function response
113 conversation =
114 conversation.with_function_response("schedule_meeting", function_response);
115
116 // Execute final request
117 let final_response = conversation.execute().await?;
118
119 println!("Final response: {}", final_response.text());
120 } else {
121 println!("Unknown function call: {}", function_call.name);
122 }
123 } else {
124 println!("No function calls in the response.");
125 println!("Direct response: {}", response.text());
126 }
127
128 Ok(())
129}
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client
14 let client = Gemini::new(api_key);
15
16 println!("--- Tools example with multiple functions ---");
17
18 // Define a weather function
19 let get_weather = FunctionDeclaration::new(
20 "get_weather",
21 "Get the current weather for a location",
22 FunctionParameters::object()
23 .with_property(
24 "location",
25 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
26 true,
27 )
28 .with_property(
29 "unit",
30 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
31 false,
32 ),
33 );
34
35 // Define a calculator function
36 let calculate = FunctionDeclaration::new(
37 "calculate",
38 "Perform a calculation",
39 FunctionParameters::object()
40 .with_property(
41 "operation",
42 PropertyDetails::enum_type(
43 "The mathematical operation to perform",
44 ["add", "subtract", "multiply", "divide"],
45 ),
46 true,
47 )
48 .with_property("a", PropertyDetails::number("The first number"), true)
49 .with_property("b", PropertyDetails::number("The second number"), true),
50 );
51
52 // Create a tool with multiple functions
53 let tool = Tool::with_functions(vec![get_weather, calculate]);
54
55 // Create a request with tool functions
56 let response = client
57 .generate_content()
58 .with_system_prompt(
59 "You are a helpful assistant that can check weather and perform calculations.",
60 )
61 .with_user_message("What's 42 times 12?")
62 .with_tool(tool)
63 .with_function_calling_mode(FunctionCallingMode::Any)
64 .execute()
65 .await?;
66
67 // Process function calls
68 if let Some(function_call) = response.function_calls().first() {
69 println!(
70 "Function call: {} with args: {}",
71 function_call.name, function_call.args
72 );
73
74 // Handle different function calls
75 match function_call.name.as_str() {
76 "calculate" => {
77 let operation: String = function_call.get("operation")?;
78 let a: f64 = function_call.get("a")?;
79 let b: f64 = function_call.get("b")?;
80
81 println!("Calculation: {} {} {}", a, operation, b);
82
83 let result = match operation.as_str() {
84 "add" => a + b,
85 "subtract" => a - b,
86 "multiply" => a * b,
87 "divide" => a / b,
88 _ => panic!("Unknown operation"),
89 };
90
91 let function_response = json!({
92 "result": result,
93 })
94 .to_string();
95
96 // Based on the curl example, we need to structure the conversation properly:
97 // 1. A user message with the original query
98 // 2. A model message containing the function call
99 // 3. A user message containing the function response
100
101 // Construct conversation following the exact curl pattern
102 let mut conversation = client.generate_content();
103
104 // 1. Add user message with original query and system prompt
105 conversation = conversation
106 .with_system_prompt("You are a helpful assistant that can check weather and perform calculations.")
107 .with_user_message("What's 42 times 12?");
108
109 // 2. Create model content with function call
110 let model_content = Content::function_call((*function_call).clone());
111
112 // Add as model message
113 let model_message = Message {
114 content: model_content,
115 role: Role::Model,
116 };
117 conversation = conversation.with_message(model_message);
118
119 // 3. Add user message with function response
120 conversation =
121 conversation.with_function_response_str("calculate", function_response)?;
122
123 // Execute the request
124 let final_response = conversation.execute().await?;
125
126 println!("Final response: {}", final_response.text());
127 }
128 "get_weather" => {
129 let location: String = function_call.get("location")?;
130 let unit = function_call
131 .get::<String>("unit")
132 .unwrap_or_else(|_| String::from("celsius"));
133
134 println!("Weather request for: {}, Unit: {}", location, unit);
135
136 let weather_response = json!({
137 "temperature": 22,
138 "unit": unit,
139 "condition": "sunny"
140 })
141 .to_string();
142
143 // Based on the curl example, we need to structure the conversation properly:
144 // 1. A user message with the original query
145 // 2. A model message containing the function call
146 // 3. A user message containing the function response
147
148 // Construct conversation following the exact curl pattern
149 let mut conversation = client.generate_content();
150
151 // 1. Add user message with original query and system prompt
152 conversation = conversation
153 .with_system_prompt("You are a helpful assistant that can check weather and perform calculations.")
154 .with_user_message("What's 42 times 12?");
155
156 // 2. Create model content with function call
157 let model_content = Content::function_call((*function_call).clone());
158
159 // Add as model message
160 let model_message = Message {
161 content: model_content,
162 role: Role::Model,
163 };
164 conversation = conversation.with_message(model_message);
165
166 // 3. Add user message with function response
167 conversation =
168 conversation.with_function_response_str("get_weather", weather_response)?;
169
170 // Execute the request
171 let final_response = conversation.execute().await?;
172
173 println!("Final response: {}", final_response.text());
174 }
175 _ => println!("Unknown function"),
176 }
177 } else {
178 println!("No function calls in the response.");
179 println!("Response: {}", response.text());
180 }
181
182 Ok(())
183}
Sourcepub fn with_function(self, function: FunctionDeclaration) -> Self
pub fn with_function(self, function: FunctionDeclaration) -> Self
Add a function declaration as a tool
Examples found in repository?
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 let api_key = env::var("GEMINI_API_KEY")?;
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Define a weather function
16 let get_weather = FunctionDeclaration::new(
17 "get_weather",
18 "Get the current weather for a location",
19 FunctionParameters::object()
20 .with_property(
21 "location",
22 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
23 true,
24 )
25 .with_property(
26 "unit",
27 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
28 false,
29 ),
30 );
31
32 // Create a request with function calling
33 println!("Sending function call request...");
34 let response = client
35 .generate_content()
36 .with_user_message("What's the weather like in Tokyo right now?")
37 .with_function(get_weather)
38 .with_function_calling_mode(FunctionCallingMode::Any)
39 .execute()
40 .await?;
41
42 // Check if there are function calls
43 if let Some(function_call) = response.function_calls().first() {
44 println!(
45 "Function call received: {} with args: {}",
46 function_call.name, function_call.args
47 );
48
49 // Get parameters from the function call
50 let location: String = function_call.get("location")?;
51 let unit = function_call
52 .get::<String>("unit")
53 .unwrap_or_else(|_| String::from("celsius"));
54
55 println!("Location: {}, Unit: {}", location, unit);
56
57 // Simulate function execution (in a real app, this would call a weather API)
58 // Create a JSON response object
59 let weather_response = serde_json::json!({
60 "temperature": 22,
61 "unit": unit,
62 "condition": "sunny",
63 "location": location
64 });
65
66 // Continue the conversation with the function result
67 // We need to replay the entire conversation with the function response
68 println!("Sending function response...");
69
70 // First, need to recreate the original prompt and the model's response
71 let mut final_request = client
72 .generate_content()
73 .with_user_message("What's the weather like in Tokyo right now?");
74
75 // Add the function call from the model's response
76 let mut call_content = Content::default();
77 call_content.parts = Some(vec![Part::FunctionCall {
78 function_call: (*function_call).clone(),
79 }]);
80 final_request.contents.push(call_content);
81
82 // Now add the function response using the JSON value
83 final_request = final_request.with_function_response("get_weather", weather_response);
84
85 // Execute the request
86 let final_response = final_request.execute().await?;
87
88 println!("Final response: {}", final_response.text());
89 } else {
90 println!("No function calls in the response.");
91 println!("Response text: {}", response.text());
92 }
93
94 Ok(())
95}
More examples
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Simple generation
16 println!("--- Simple generation ---");
17 let response = client
18 .generate_content()
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(1000),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 // Function calling example
31 println!("\n--- Function calling example ---");
32
33 // Define a weather function
34 let get_weather = FunctionDeclaration::new(
35 "get_weather",
36 "Get the current weather for a location",
37 FunctionParameters::object()
38 .with_property(
39 "location",
40 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
41 true,
42 )
43 .with_property(
44 "unit",
45 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
46 false,
47 ),
48 );
49
50 // Create a request with function calling
51 let response = client
52 .generate_content()
53 .with_system_prompt("You are a helpful weather assistant.")
54 .with_user_message("What's the weather like in San Francisco right now?")
55 .with_function(get_weather)
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Get parameters from the function call
68 let location: String = function_call.get("location")?;
69 let unit = function_call
70 .get::<String>("unit")
71 .unwrap_or_else(|_| String::from("celsius"));
72
73 println!("Location: {}, Unit: {}", location, unit);
74
75 // Create model content with function call
76 let model_content = Content::function_call((*function_call).clone());
77
78 // Add as model message
79 let model_message = Message {
80 content: model_content,
81 role: Role::Model,
82 };
83
84 // Simulate function execution
85 let weather_response = format!(
86 "{{\"temperature\": 22, \"unit\": \"{}\", \"condition\": \"sunny\"}}",
87 unit
88 );
89
90 // Continue the conversation with the function result
91 let final_response = client
92 .generate_content()
93 .with_system_prompt("You are a helpful weather assistant.")
94 .with_user_message("What's the weather like in San Francisco right now?")
95 .with_message(model_message)
96 .with_function_response_str("get_weather", weather_response)?
97 .with_generation_config(GenerationConfig {
98 temperature: Some(0.7),
99 max_output_tokens: Some(100),
100 ..Default::default()
101 })
102 .execute()
103 .await?;
104
105 println!("Final response: {}", final_response.text());
106 } else {
107 println!("No function calls in the response.");
108 }
109
110 Ok(())
111}
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
14
15 println!("=== Gemini 2.5 Thinking Advanced Example ===\n");
16
17 // Example 1: Streaming with thinking
18 println!("--- Example 1: Streaming with thinking ---");
19 let mut stream = client
20 .generate_content()
21 .with_system_prompt("You are a mathematics expert skilled at solving complex mathematical problems.")
22 .with_user_message("Solve this math problem: Find the sum of the first 50 prime numbers. Please explain your solution process in detail.")
23 .with_thinking_budget(2048)
24 .with_thoughts_included(true)
25 .execute_stream()
26 .await?;
27
28 println!("Streaming response:");
29 let mut thoughts_shown = false;
30 while let Some(chunk_result) = stream.next().await {
31 match chunk_result {
32 Ok(chunk) => {
33 // Check if there's thinking content
34 let thoughts = chunk.thoughts();
35 if !thoughts.is_empty() && !thoughts_shown {
36 println!("\nThinking process:");
37 for (i, thought) in thoughts.iter().enumerate() {
38 println!("Thought {}: {}", i + 1, thought);
39 }
40 println!("\nAnswer:");
41 thoughts_shown = true;
42 }
43
44 // Display general text content
45 print!("{}", chunk.text());
46 std::io::Write::flush(&mut std::io::stdout())?;
47 }
48 Err(e) => eprintln!("Streaming error: {}", e),
49 }
50 }
51 println!("\n");
52
53 // Example 2: Thinking combined with function calls
54 println!("--- Example 2: Thinking combined with function calls ---");
55
56 // Define a calculator function
57 let calculator = FunctionDeclaration::new(
58 "calculate",
59 "Perform basic mathematical calculations",
60 FunctionParameters::object()
61 .with_property(
62 "expression",
63 PropertyDetails::string(
64 "The mathematical expression to calculate, e.g., '2 + 3 * 4'",
65 ),
66 true,
67 )
68 .with_property(
69 "operation_type",
70 PropertyDetails::enum_type("Type of calculation", ["arithmetic", "advanced"]),
71 false,
72 ),
73 );
74
75 let response = client
76 .generate_content()
77 .with_system_prompt("You are a mathematics assistant. When calculations are needed, use the provided calculator function.")
78 .with_user_message("Calculate the result of (15 + 25) * 3 - 8 and explain the calculation steps.")
79 .with_function(calculator)
80 .with_thinking_budget(1024)
81 .with_thoughts_included(true)
82 .execute()
83 .await?;
84
85 // Display thinking process
86 let thoughts = response.thoughts();
87 if !thoughts.is_empty() {
88 println!("Thinking process:");
89 for (i, thought) in thoughts.iter().enumerate() {
90 println!("Thought {}: {}\n", i + 1, thought);
91 }
92 }
93
94 // Check for function calls
95 let function_calls = response.function_calls();
96 if !function_calls.is_empty() {
97 println!("Function calls:");
98 for (i, call) in function_calls.iter().enumerate() {
99 println!("Call {}: {} Args: {}", i + 1, call.name, call.args);
100 }
101 println!();
102 }
103
104 println!("Answer: {}\n", response.text());
105
106 // Example 3: Complex reasoning task
107 println!("--- Example 3: Complex reasoning task ---");
108 let complex_response = client
109 .generate_content()
110 .with_system_prompt("You are a logical reasoning expert.")
111 .with_user_message(
112 "There are three people: Alice, Bob, and Carol, who live in red, green, and blue houses respectively.\
113 Given:\
114 1. The person in the red house owns a cat\
115 2. Bob does not live in the green house\
116 3. Carol owns a dog\
117 4. The green house is to the left of the red house\
118 5. Alice does not own a cat\
119 Please reason out which color house each person lives in and what pets they own.",
120 )
121 .with_thinking_config(
122 ThinkingConfig::new()
123 .with_thinking_budget(3072)
124 .with_thoughts_included(true),
125 )
126 .execute()
127 .await?;
128
129 // Display thinking process
130 let complex_thoughts = complex_response.thoughts();
131 if !complex_thoughts.is_empty() {
132 println!("Reasoning process:");
133 for (i, thought) in complex_thoughts.iter().enumerate() {
134 println!("Reasoning step {}: {}\n", i + 1, thought);
135 }
136 }
137
138 println!("Conclusion: {}\n", complex_response.text());
139
140 // Display token usage statistics
141 if let Some(usage) = &complex_response.usage_metadata {
142 println!("Token usage statistics:");
143 println!(" Prompt tokens: {}", usage.prompt_token_count);
144 println!(
145 " Response tokens: {}",
146 usage.candidates_token_count.unwrap_or(0)
147 );
148 if let Some(thinking_tokens) = usage.thoughts_token_count {
149 println!(" Thinking tokens: {}", thinking_tokens);
150 }
151 println!(" Total tokens: {}", usage.total_token_count);
152 }
153
154 Ok(())
155}
Sourcepub fn with_function_calling_mode(self, mode: FunctionCallingMode) -> Self
pub fn with_function_calling_mode(self, mode: FunctionCallingMode) -> Self
Set the function calling mode for the request
Examples found in repository?
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 let api_key = env::var("GEMINI_API_KEY")?;
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Define a weather function
16 let get_weather = FunctionDeclaration::new(
17 "get_weather",
18 "Get the current weather for a location",
19 FunctionParameters::object()
20 .with_property(
21 "location",
22 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
23 true,
24 )
25 .with_property(
26 "unit",
27 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
28 false,
29 ),
30 );
31
32 // Create a request with function calling
33 println!("Sending function call request...");
34 let response = client
35 .generate_content()
36 .with_user_message("What's the weather like in Tokyo right now?")
37 .with_function(get_weather)
38 .with_function_calling_mode(FunctionCallingMode::Any)
39 .execute()
40 .await?;
41
42 // Check if there are function calls
43 if let Some(function_call) = response.function_calls().first() {
44 println!(
45 "Function call received: {} with args: {}",
46 function_call.name, function_call.args
47 );
48
49 // Get parameters from the function call
50 let location: String = function_call.get("location")?;
51 let unit = function_call
52 .get::<String>("unit")
53 .unwrap_or_else(|_| String::from("celsius"));
54
55 println!("Location: {}, Unit: {}", location, unit);
56
57 // Simulate function execution (in a real app, this would call a weather API)
58 // Create a JSON response object
59 let weather_response = serde_json::json!({
60 "temperature": 22,
61 "unit": unit,
62 "condition": "sunny",
63 "location": location
64 });
65
66 // Continue the conversation with the function result
67 // We need to replay the entire conversation with the function response
68 println!("Sending function response...");
69
70 // First, need to recreate the original prompt and the model's response
71 let mut final_request = client
72 .generate_content()
73 .with_user_message("What's the weather like in Tokyo right now?");
74
75 // Add the function call from the model's response
76 let mut call_content = Content::default();
77 call_content.parts = Some(vec![Part::FunctionCall {
78 function_call: (*function_call).clone(),
79 }]);
80 final_request.contents.push(call_content);
81
82 // Now add the function response using the JSON value
83 final_request = final_request.with_function_response("get_weather", weather_response);
84
85 // Execute the request
86 let final_response = final_request.execute().await?;
87
88 println!("Final response: {}", final_response.text());
89 } else {
90 println!("No function calls in the response.");
91 println!("Response text: {}", response.text());
92 }
93
94 Ok(())
95}
More examples
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Simple generation
16 println!("--- Simple generation ---");
17 let response = client
18 .generate_content()
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(1000),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 // Function calling example
31 println!("\n--- Function calling example ---");
32
33 // Define a weather function
34 let get_weather = FunctionDeclaration::new(
35 "get_weather",
36 "Get the current weather for a location",
37 FunctionParameters::object()
38 .with_property(
39 "location",
40 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
41 true,
42 )
43 .with_property(
44 "unit",
45 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
46 false,
47 ),
48 );
49
50 // Create a request with function calling
51 let response = client
52 .generate_content()
53 .with_system_prompt("You are a helpful weather assistant.")
54 .with_user_message("What's the weather like in San Francisco right now?")
55 .with_function(get_weather)
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Get parameters from the function call
68 let location: String = function_call.get("location")?;
69 let unit = function_call
70 .get::<String>("unit")
71 .unwrap_or_else(|_| String::from("celsius"));
72
73 println!("Location: {}, Unit: {}", location, unit);
74
75 // Create model content with function call
76 let model_content = Content::function_call((*function_call).clone());
77
78 // Add as model message
79 let model_message = Message {
80 content: model_content,
81 role: Role::Model,
82 };
83
84 // Simulate function execution
85 let weather_response = format!(
86 "{{\"temperature\": 22, \"unit\": \"{}\", \"condition\": \"sunny\"}}",
87 unit
88 );
89
90 // Continue the conversation with the function result
91 let final_response = client
92 .generate_content()
93 .with_system_prompt("You are a helpful weather assistant.")
94 .with_user_message("What's the weather like in San Francisco right now?")
95 .with_message(model_message)
96 .with_function_response_str("get_weather", weather_response)?
97 .with_generation_config(GenerationConfig {
98 temperature: Some(0.7),
99 max_output_tokens: Some(100),
100 ..Default::default()
101 })
102 .execute()
103 .await?;
104
105 println!("Final response: {}", final_response.text());
106 } else {
107 println!("No function calls in the response.");
108 }
109
110 Ok(())
111}
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client
14 let client = Gemini::new(api_key);
15
16 println!("--- Meeting Scheduler Function Calling example ---");
17
18 // Define a meeting scheduler function that matches the curl example
19 let schedule_meeting = FunctionDeclaration::new(
20 "schedule_meeting",
21 "Schedules a meeting with specified attendees at a given time and date.",
22 FunctionParameters::object()
23 .with_property(
24 "attendees",
25 PropertyDetails::array(
26 "List of people attending the meeting.",
27 PropertyDetails::string("Attendee name"),
28 ),
29 true,
30 )
31 .with_property(
32 "date",
33 PropertyDetails::string("Date of the meeting (e.g., '2024-07-29')"),
34 true,
35 )
36 .with_property(
37 "time",
38 PropertyDetails::string("Time of the meeting (e.g., '15:00')"),
39 true,
40 )
41 .with_property(
42 "topic",
43 PropertyDetails::string("The subject or topic of the meeting."),
44 true,
45 ),
46 );
47
48 // Create function tool
49 let function_tool = Tool::new(schedule_meeting);
50
51 // Create a request with the tool - matching the curl example
52 let response = client
53 .generate_content()
54 .with_user_message("Schedule a meeting with Bob and Alice for 03/27/2025 at 10:00 AM about the Q3 planning.")
55 .with_tool(function_tool.clone())
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Handle the schedule_meeting function
68 if function_call.name == "schedule_meeting" {
69 let attendees: Vec<String> = function_call.get("attendees")?;
70 let date: String = function_call.get("date")?;
71 let time: String = function_call.get("time")?;
72 let topic: String = function_call.get("topic")?;
73
74 println!("Scheduling meeting:");
75 println!(" Attendees: {:?}", attendees);
76 println!(" Date: {}", date);
77 println!(" Time: {}", time);
78 println!(" Topic: {}", topic);
79
80 // Simulate scheduling the meeting
81 let meeting_id = format!(
82 "meeting_{}",
83 std::time::SystemTime::now()
84 .duration_since(std::time::UNIX_EPOCH)
85 .unwrap()
86 .as_secs()
87 );
88
89 let function_response = json!({
90 "success": true,
91 "meeting_id": meeting_id,
92 "message": format!("Meeting '{}' scheduled for {} at {} with {:?}", topic, date, time, attendees)
93 });
94
95 // Create conversation with function response
96 let mut conversation = client.generate_content();
97
98 // 1. Add original user message
99 conversation = conversation
100 .with_user_message("Schedule a meeting with Bob and Alice for 03/27/2025 at 10:00 AM about the Q3 planning.");
101
102 // 2. Add model message with function call
103 let model_function_call =
104 FunctionCall::new("schedule_meeting", function_call.args.clone());
105 let model_content = Content::function_call(model_function_call).with_role(Role::Model);
106 let model_message = Message {
107 content: model_content,
108 role: Role::Model,
109 };
110 conversation = conversation.with_message(model_message);
111
112 // 3. Add function response
113 conversation =
114 conversation.with_function_response("schedule_meeting", function_response);
115
116 // Execute final request
117 let final_response = conversation.execute().await?;
118
119 println!("Final response: {}", final_response.text());
120 } else {
121 println!("Unknown function call: {}", function_call.name);
122 }
123 } else {
124 println!("No function calls in the response.");
125 println!("Direct response: {}", response.text());
126 }
127
128 Ok(())
129}
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client
14 let client = Gemini::new(api_key);
15
16 println!("--- Tools example with multiple functions ---");
17
18 // Define a weather function
19 let get_weather = FunctionDeclaration::new(
20 "get_weather",
21 "Get the current weather for a location",
22 FunctionParameters::object()
23 .with_property(
24 "location",
25 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
26 true,
27 )
28 .with_property(
29 "unit",
30 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
31 false,
32 ),
33 );
34
35 // Define a calculator function
36 let calculate = FunctionDeclaration::new(
37 "calculate",
38 "Perform a calculation",
39 FunctionParameters::object()
40 .with_property(
41 "operation",
42 PropertyDetails::enum_type(
43 "The mathematical operation to perform",
44 ["add", "subtract", "multiply", "divide"],
45 ),
46 true,
47 )
48 .with_property("a", PropertyDetails::number("The first number"), true)
49 .with_property("b", PropertyDetails::number("The second number"), true),
50 );
51
52 // Create a tool with multiple functions
53 let tool = Tool::with_functions(vec![get_weather, calculate]);
54
55 // Create a request with tool functions
56 let response = client
57 .generate_content()
58 .with_system_prompt(
59 "You are a helpful assistant that can check weather and perform calculations.",
60 )
61 .with_user_message("What's 42 times 12?")
62 .with_tool(tool)
63 .with_function_calling_mode(FunctionCallingMode::Any)
64 .execute()
65 .await?;
66
67 // Process function calls
68 if let Some(function_call) = response.function_calls().first() {
69 println!(
70 "Function call: {} with args: {}",
71 function_call.name, function_call.args
72 );
73
74 // Handle different function calls
75 match function_call.name.as_str() {
76 "calculate" => {
77 let operation: String = function_call.get("operation")?;
78 let a: f64 = function_call.get("a")?;
79 let b: f64 = function_call.get("b")?;
80
81 println!("Calculation: {} {} {}", a, operation, b);
82
83 let result = match operation.as_str() {
84 "add" => a + b,
85 "subtract" => a - b,
86 "multiply" => a * b,
87 "divide" => a / b,
88 _ => panic!("Unknown operation"),
89 };
90
91 let function_response = json!({
92 "result": result,
93 })
94 .to_string();
95
96 // Based on the curl example, we need to structure the conversation properly:
97 // 1. A user message with the original query
98 // 2. A model message containing the function call
99 // 3. A user message containing the function response
100
101 // Construct conversation following the exact curl pattern
102 let mut conversation = client.generate_content();
103
104 // 1. Add user message with original query and system prompt
105 conversation = conversation
106 .with_system_prompt("You are a helpful assistant that can check weather and perform calculations.")
107 .with_user_message("What's 42 times 12?");
108
109 // 2. Create model content with function call
110 let model_content = Content::function_call((*function_call).clone());
111
112 // Add as model message
113 let model_message = Message {
114 content: model_content,
115 role: Role::Model,
116 };
117 conversation = conversation.with_message(model_message);
118
119 // 3. Add user message with function response
120 conversation =
121 conversation.with_function_response_str("calculate", function_response)?;
122
123 // Execute the request
124 let final_response = conversation.execute().await?;
125
126 println!("Final response: {}", final_response.text());
127 }
128 "get_weather" => {
129 let location: String = function_call.get("location")?;
130 let unit = function_call
131 .get::<String>("unit")
132 .unwrap_or_else(|_| String::from("celsius"));
133
134 println!("Weather request for: {}, Unit: {}", location, unit);
135
136 let weather_response = json!({
137 "temperature": 22,
138 "unit": unit,
139 "condition": "sunny"
140 })
141 .to_string();
142
143 // Based on the curl example, we need to structure the conversation properly:
144 // 1. A user message with the original query
145 // 2. A model message containing the function call
146 // 3. A user message containing the function response
147
148 // Construct conversation following the exact curl pattern
149 let mut conversation = client.generate_content();
150
151 // 1. Add user message with original query and system prompt
152 conversation = conversation
153 .with_system_prompt("You are a helpful assistant that can check weather and perform calculations.")
154 .with_user_message("What's 42 times 12?");
155
156 // 2. Create model content with function call
157 let model_content = Content::function_call((*function_call).clone());
158
159 // Add as model message
160 let model_message = Message {
161 content: model_content,
162 role: Role::Model,
163 };
164 conversation = conversation.with_message(model_message);
165
166 // 3. Add user message with function response
167 conversation =
168 conversation.with_function_response_str("get_weather", weather_response)?;
169
170 // Execute the request
171 let final_response = conversation.execute().await?;
172
173 println!("Final response: {}", final_response.text());
174 }
175 _ => println!("Unknown function"),
176 }
177 } else {
178 println!("No function calls in the response.");
179 println!("Response: {}", response.text());
180 }
181
182 Ok(())
183}
Sourcepub fn with_thinking_config(self, thinking_config: ThinkingConfig) -> Self
pub fn with_thinking_config(self, thinking_config: ThinkingConfig) -> Self
Set the thinking configuration for the request (Gemini 2.5 series only)
Examples found in repository?
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
14
15 println!("=== Gemini 2.5 Thinking Advanced Example ===\n");
16
17 // Example 1: Streaming with thinking
18 println!("--- Example 1: Streaming with thinking ---");
19 let mut stream = client
20 .generate_content()
21 .with_system_prompt("You are a mathematics expert skilled at solving complex mathematical problems.")
22 .with_user_message("Solve this math problem: Find the sum of the first 50 prime numbers. Please explain your solution process in detail.")
23 .with_thinking_budget(2048)
24 .with_thoughts_included(true)
25 .execute_stream()
26 .await?;
27
28 println!("Streaming response:");
29 let mut thoughts_shown = false;
30 while let Some(chunk_result) = stream.next().await {
31 match chunk_result {
32 Ok(chunk) => {
33 // Check if there's thinking content
34 let thoughts = chunk.thoughts();
35 if !thoughts.is_empty() && !thoughts_shown {
36 println!("\nThinking process:");
37 for (i, thought) in thoughts.iter().enumerate() {
38 println!("Thought {}: {}", i + 1, thought);
39 }
40 println!("\nAnswer:");
41 thoughts_shown = true;
42 }
43
44 // Display general text content
45 print!("{}", chunk.text());
46 std::io::Write::flush(&mut std::io::stdout())?;
47 }
48 Err(e) => eprintln!("Streaming error: {}", e),
49 }
50 }
51 println!("\n");
52
53 // Example 2: Thinking combined with function calls
54 println!("--- Example 2: Thinking combined with function calls ---");
55
56 // Define a calculator function
57 let calculator = FunctionDeclaration::new(
58 "calculate",
59 "Perform basic mathematical calculations",
60 FunctionParameters::object()
61 .with_property(
62 "expression",
63 PropertyDetails::string(
64 "The mathematical expression to calculate, e.g., '2 + 3 * 4'",
65 ),
66 true,
67 )
68 .with_property(
69 "operation_type",
70 PropertyDetails::enum_type("Type of calculation", ["arithmetic", "advanced"]),
71 false,
72 ),
73 );
74
75 let response = client
76 .generate_content()
77 .with_system_prompt("You are a mathematics assistant. When calculations are needed, use the provided calculator function.")
78 .with_user_message("Calculate the result of (15 + 25) * 3 - 8 and explain the calculation steps.")
79 .with_function(calculator)
80 .with_thinking_budget(1024)
81 .with_thoughts_included(true)
82 .execute()
83 .await?;
84
85 // Display thinking process
86 let thoughts = response.thoughts();
87 if !thoughts.is_empty() {
88 println!("Thinking process:");
89 for (i, thought) in thoughts.iter().enumerate() {
90 println!("Thought {}: {}\n", i + 1, thought);
91 }
92 }
93
94 // Check for function calls
95 let function_calls = response.function_calls();
96 if !function_calls.is_empty() {
97 println!("Function calls:");
98 for (i, call) in function_calls.iter().enumerate() {
99 println!("Call {}: {} Args: {}", i + 1, call.name, call.args);
100 }
101 println!();
102 }
103
104 println!("Answer: {}\n", response.text());
105
106 // Example 3: Complex reasoning task
107 println!("--- Example 3: Complex reasoning task ---");
108 let complex_response = client
109 .generate_content()
110 .with_system_prompt("You are a logical reasoning expert.")
111 .with_user_message(
112 "There are three people: Alice, Bob, and Carol, who live in red, green, and blue houses respectively.\
113 Given:\
114 1. The person in the red house owns a cat\
115 2. Bob does not live in the green house\
116 3. Carol owns a dog\
117 4. The green house is to the left of the red house\
118 5. Alice does not own a cat\
119 Please reason out which color house each person lives in and what pets they own.",
120 )
121 .with_thinking_config(
122 ThinkingConfig::new()
123 .with_thinking_budget(3072)
124 .with_thoughts_included(true),
125 )
126 .execute()
127 .await?;
128
129 // Display thinking process
130 let complex_thoughts = complex_response.thoughts();
131 if !complex_thoughts.is_empty() {
132 println!("Reasoning process:");
133 for (i, thought) in complex_thoughts.iter().enumerate() {
134 println!("Reasoning step {}: {}\n", i + 1, thought);
135 }
136 }
137
138 println!("Conclusion: {}\n", complex_response.text());
139
140 // Display token usage statistics
141 if let Some(usage) = &complex_response.usage_metadata {
142 println!("Token usage statistics:");
143 println!(" Prompt tokens: {}", usage.prompt_token_count);
144 println!(
145 " Response tokens: {}",
146 usage.candidates_token_count.unwrap_or(0)
147 );
148 if let Some(thinking_tokens) = usage.thoughts_token_count {
149 println!(" Thinking tokens: {}", thinking_tokens);
150 }
151 println!(" Total tokens: {}", usage.total_token_count);
152 }
153
154 Ok(())
155}
Sourcepub fn with_thinking_budget(self, budget: i32) -> Self
pub fn with_thinking_budget(self, budget: i32) -> Self
Set the thinking budget for the request (Gemini 2.5 series only)
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
11
12 println!("=== Gemini 2.5 Thinking Basic Example ===\n");
13
14 // Example 1: Using default dynamic thinking
15 println!(
16 "--- Example 1: Dynamic thinking (model automatically determines thinking budget) ---"
17 );
18 let response1 = client
19 .generate_content()
20 .with_system_prompt("You are a helpful mathematics assistant.")
21 .with_user_message(
22 "Explain Occam's razor principle and provide a simple example from daily life.",
23 )
24 .with_dynamic_thinking()
25 .with_thoughts_included(true)
26 .execute()
27 .await?;
28
29 // Display thinking process
30 let thoughts = response1.thoughts();
31 if !thoughts.is_empty() {
32 println!("Thinking summary:");
33 for (i, thought) in thoughts.iter().enumerate() {
34 println!("Thought {}: {}\n", i + 1, thought);
35 }
36 }
37
38 println!("Answer: {}\n", response1.text());
39
40 // Display token usage
41 if let Some(usage) = &response1.usage_metadata {
42 println!("Token usage:");
43 println!(" Prompt tokens: {}", usage.prompt_token_count);
44 println!(
45 " Response tokens: {}",
46 usage.candidates_token_count.unwrap_or(0)
47 );
48 if let Some(thinking_tokens) = usage.thoughts_token_count {
49 println!(" Thinking tokens: {}", thinking_tokens);
50 }
51 println!(" Total tokens: {}\n", usage.total_token_count);
52 }
53
54 // Example 2: Set specific thinking budget
55 println!("--- Example 2: Set thinking budget (1024 tokens) ---");
56 let response2 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful programming assistant.")
59 .with_user_message("List 3 main advantages of using the Rust programming language")
60 .with_thinking_budget(1024)
61 .with_thoughts_included(true)
62 .execute()
63 .await?;
64
65 // Display thinking process
66 let thoughts2 = response2.thoughts();
67 if !thoughts2.is_empty() {
68 println!("Thinking summary:");
69 for (i, thought) in thoughts2.iter().enumerate() {
70 println!("Thought {}: {}\n", i + 1, thought);
71 }
72 }
73
74 println!("Answer: {}\n", response2.text());
75
76 // Example 3: Disable thinking feature
77 println!("--- Example 3: Disable thinking feature ---");
78 let response3 = client
79 .generate_content()
80 .with_system_prompt("You are a helpful assistant.")
81 .with_user_message("What is artificial intelligence?")
82 .execute()
83 .await?;
84
85 println!("Answer: {}\n", response3.text());
86
87 // Example 4: Use GenerationConfig to set thinking
88 println!("--- Example 4: Use GenerationConfig to set thinking ---");
89 let thinking_config = ThinkingConfig::new()
90 .with_thinking_budget(2048)
91 .with_thoughts_included(true);
92
93 let generation_config = GenerationConfig {
94 temperature: Some(0.7),
95 max_output_tokens: Some(500),
96 thinking_config: Some(thinking_config),
97 ..Default::default()
98 };
99
100 let response4 = client
101 .generate_content()
102 .with_system_prompt("You are a creative writing assistant.")
103 .with_user_message(
104 "Write the opening of a short story about a robot learning to feel emotions.",
105 )
106 .with_generation_config(generation_config)
107 .execute()
108 .await?;
109
110 // Display thinking process
111 let thoughts4 = response4.thoughts();
112 if !thoughts4.is_empty() {
113 println!("Thinking summary:");
114 for (i, thought) in thoughts4.iter().enumerate() {
115 println!("Thought {}: {}\n", i + 1, thought);
116 }
117 }
118
119 println!("Answer: {}\n", response4.text());
120
121 Ok(())
122}
More examples
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // This is equivalent to the following curl example:
10 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent" \
11 // -H "x-goog-api-key: $GEMINI_API_KEY" \
12 // -H 'Content-Type: application/json' \
13 // -X POST \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {
19 // "text": "Provide a list of the top 3 famous physicists and their major contributions"
20 // }
21 // ]
22 // }
23 // ],
24 // "generationConfig": {
25 // "thinkingConfig": {
26 // "thinkingBudget": 1024,
27 // "includeThoughts": true
28 // }
29 // }
30 // }'
31
32 // Create client
33 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
34
35 println!("=== Thinking Curl Equivalent Example ===\n");
36
37 // Method 1: Using high-level API (simplest approach)
38 println!("--- Method 1: Using high-level API ---");
39
40 let response1 = client
41 .generate_content()
42 .with_user_message(
43 "Provide a list of the top 3 famous physicists and their major contributions",
44 )
45 .with_thinking_budget(1024)
46 .with_thoughts_included(true)
47 .execute()
48 .await?;
49
50 // Display thinking process
51 let thoughts1 = response1.thoughts();
52 if !thoughts1.is_empty() {
53 println!("Thinking summary:");
54 for (i, thought) in thoughts1.iter().enumerate() {
55 println!("Thought {}: {}\n", i + 1, thought);
56 }
57 }
58
59 println!("Answer: {}\n", response1.text());
60
61 // Method 2: Using GenerationConfig to fully match curl example structure
62 println!("--- Method 2: Fully matching curl example structure ---");
63
64 let thinking_config = ThinkingConfig {
65 thinking_budget: Some(1024),
66 include_thoughts: Some(true),
67 };
68
69 let generation_config = GenerationConfig {
70 thinking_config: Some(thinking_config),
71 ..Default::default()
72 };
73
74 let response2 = client
75 .generate_content()
76 .with_user_message(
77 "Provide a list of the top 3 famous physicists and their major contributions",
78 )
79 .with_generation_config(generation_config)
80 .execute()
81 .await?;
82
83 // Display thinking process
84 let thoughts2 = response2.thoughts();
85 if !thoughts2.is_empty() {
86 println!("Thinking summary:");
87 for (i, thought) in thoughts2.iter().enumerate() {
88 println!("Thought {}: {}\n", i + 1, thought);
89 }
90 }
91
92 println!("Answer: {}\n", response2.text());
93
94 // Show token usage
95 if let Some(usage) = &response2.usage_metadata {
96 println!("Token usage:");
97 println!(" Prompt tokens: {}", usage.prompt_token_count);
98 println!(
99 " Response tokens: {}",
100 usage.candidates_token_count.unwrap_or(0)
101 );
102 if let Some(thinking_tokens) = usage.thoughts_token_count {
103 println!(" Thinking tokens: {}", thinking_tokens);
104 }
105 println!(" Total tokens: {}", usage.total_token_count);
106 }
107
108 // Method 3: Demonstrate different thinking budget settings
109 println!("\n--- Method 3: Different thinking budget comparison ---");
110
111 // Thinking disabled
112 println!("Thinking disabled:");
113 let response_no_thinking = client
114 .generate_content()
115 .with_user_message("Explain the basic principles of quantum mechanics")
116 .execute()
117 .await?;
118 println!("Answer: {}\n", response_no_thinking.text());
119
120 // Dynamic thinking
121 println!("Dynamic thinking:");
122 let response_dynamic = client
123 .generate_content()
124 .with_user_message("Explain the basic principles of quantum mechanics")
125 .with_dynamic_thinking()
126 .with_thoughts_included(true)
127 .execute()
128 .await?;
129
130 let thoughts_dynamic = response_dynamic.thoughts();
131 if !thoughts_dynamic.is_empty() {
132 println!("Thinking summary:");
133 for (i, thought) in thoughts_dynamic.iter().enumerate() {
134 println!("Thought {}: {}\n", i + 1, thought);
135 }
136 }
137 println!("Answer: {}\n", response_dynamic.text());
138
139 // High thinking budget
140 println!("High thinking budget (4096 tokens):");
141 let response_high_budget = client
142 .generate_content()
143 .with_user_message("Explain the basic principles of quantum mechanics")
144 .with_thinking_budget(4096)
145 .with_thoughts_included(true)
146 .execute()
147 .await?;
148
149 let thoughts_high = response_high_budget.thoughts();
150 if !thoughts_high.is_empty() {
151 println!("Thinking summary:");
152 for (i, thought) in thoughts_high.iter().enumerate() {
153 println!("Thought {}: {}\n", i + 1, thought);
154 }
155 }
156 println!("Answer: {}", response_high_budget.text());
157
158 Ok(())
159}
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
14
15 println!("=== Gemini 2.5 Thinking Advanced Example ===\n");
16
17 // Example 1: Streaming with thinking
18 println!("--- Example 1: Streaming with thinking ---");
19 let mut stream = client
20 .generate_content()
21 .with_system_prompt("You are a mathematics expert skilled at solving complex mathematical problems.")
22 .with_user_message("Solve this math problem: Find the sum of the first 50 prime numbers. Please explain your solution process in detail.")
23 .with_thinking_budget(2048)
24 .with_thoughts_included(true)
25 .execute_stream()
26 .await?;
27
28 println!("Streaming response:");
29 let mut thoughts_shown = false;
30 while let Some(chunk_result) = stream.next().await {
31 match chunk_result {
32 Ok(chunk) => {
33 // Check if there's thinking content
34 let thoughts = chunk.thoughts();
35 if !thoughts.is_empty() && !thoughts_shown {
36 println!("\nThinking process:");
37 for (i, thought) in thoughts.iter().enumerate() {
38 println!("Thought {}: {}", i + 1, thought);
39 }
40 println!("\nAnswer:");
41 thoughts_shown = true;
42 }
43
44 // Display general text content
45 print!("{}", chunk.text());
46 std::io::Write::flush(&mut std::io::stdout())?;
47 }
48 Err(e) => eprintln!("Streaming error: {}", e),
49 }
50 }
51 println!("\n");
52
53 // Example 2: Thinking combined with function calls
54 println!("--- Example 2: Thinking combined with function calls ---");
55
56 // Define a calculator function
57 let calculator = FunctionDeclaration::new(
58 "calculate",
59 "Perform basic mathematical calculations",
60 FunctionParameters::object()
61 .with_property(
62 "expression",
63 PropertyDetails::string(
64 "The mathematical expression to calculate, e.g., '2 + 3 * 4'",
65 ),
66 true,
67 )
68 .with_property(
69 "operation_type",
70 PropertyDetails::enum_type("Type of calculation", ["arithmetic", "advanced"]),
71 false,
72 ),
73 );
74
75 let response = client
76 .generate_content()
77 .with_system_prompt("You are a mathematics assistant. When calculations are needed, use the provided calculator function.")
78 .with_user_message("Calculate the result of (15 + 25) * 3 - 8 and explain the calculation steps.")
79 .with_function(calculator)
80 .with_thinking_budget(1024)
81 .with_thoughts_included(true)
82 .execute()
83 .await?;
84
85 // Display thinking process
86 let thoughts = response.thoughts();
87 if !thoughts.is_empty() {
88 println!("Thinking process:");
89 for (i, thought) in thoughts.iter().enumerate() {
90 println!("Thought {}: {}\n", i + 1, thought);
91 }
92 }
93
94 // Check for function calls
95 let function_calls = response.function_calls();
96 if !function_calls.is_empty() {
97 println!("Function calls:");
98 for (i, call) in function_calls.iter().enumerate() {
99 println!("Call {}: {} Args: {}", i + 1, call.name, call.args);
100 }
101 println!();
102 }
103
104 println!("Answer: {}\n", response.text());
105
106 // Example 3: Complex reasoning task
107 println!("--- Example 3: Complex reasoning task ---");
108 let complex_response = client
109 .generate_content()
110 .with_system_prompt("You are a logical reasoning expert.")
111 .with_user_message(
112 "There are three people: Alice, Bob, and Carol, who live in red, green, and blue houses respectively.\
113 Given:\
114 1. The person in the red house owns a cat\
115 2. Bob does not live in the green house\
116 3. Carol owns a dog\
117 4. The green house is to the left of the red house\
118 5. Alice does not own a cat\
119 Please reason out which color house each person lives in and what pets they own.",
120 )
121 .with_thinking_config(
122 ThinkingConfig::new()
123 .with_thinking_budget(3072)
124 .with_thoughts_included(true),
125 )
126 .execute()
127 .await?;
128
129 // Display thinking process
130 let complex_thoughts = complex_response.thoughts();
131 if !complex_thoughts.is_empty() {
132 println!("Reasoning process:");
133 for (i, thought) in complex_thoughts.iter().enumerate() {
134 println!("Reasoning step {}: {}\n", i + 1, thought);
135 }
136 }
137
138 println!("Conclusion: {}\n", complex_response.text());
139
140 // Display token usage statistics
141 if let Some(usage) = &complex_response.usage_metadata {
142 println!("Token usage statistics:");
143 println!(" Prompt tokens: {}", usage.prompt_token_count);
144 println!(
145 " Response tokens: {}",
146 usage.candidates_token_count.unwrap_or(0)
147 );
148 if let Some(thinking_tokens) = usage.thoughts_token_count {
149 println!(" Thinking tokens: {}", thinking_tokens);
150 }
151 println!(" Total tokens: {}", usage.total_token_count);
152 }
153
154 Ok(())
155}
Sourcepub fn with_dynamic_thinking(self) -> Self
pub fn with_dynamic_thinking(self) -> Self
Enable dynamic thinking (model decides the budget) (Gemini 2.5 series only)
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
11
12 println!("=== Gemini 2.5 Thinking Basic Example ===\n");
13
14 // Example 1: Using default dynamic thinking
15 println!(
16 "--- Example 1: Dynamic thinking (model automatically determines thinking budget) ---"
17 );
18 let response1 = client
19 .generate_content()
20 .with_system_prompt("You are a helpful mathematics assistant.")
21 .with_user_message(
22 "Explain Occam's razor principle and provide a simple example from daily life.",
23 )
24 .with_dynamic_thinking()
25 .with_thoughts_included(true)
26 .execute()
27 .await?;
28
29 // Display thinking process
30 let thoughts = response1.thoughts();
31 if !thoughts.is_empty() {
32 println!("Thinking summary:");
33 for (i, thought) in thoughts.iter().enumerate() {
34 println!("Thought {}: {}\n", i + 1, thought);
35 }
36 }
37
38 println!("Answer: {}\n", response1.text());
39
40 // Display token usage
41 if let Some(usage) = &response1.usage_metadata {
42 println!("Token usage:");
43 println!(" Prompt tokens: {}", usage.prompt_token_count);
44 println!(
45 " Response tokens: {}",
46 usage.candidates_token_count.unwrap_or(0)
47 );
48 if let Some(thinking_tokens) = usage.thoughts_token_count {
49 println!(" Thinking tokens: {}", thinking_tokens);
50 }
51 println!(" Total tokens: {}\n", usage.total_token_count);
52 }
53
54 // Example 2: Set specific thinking budget
55 println!("--- Example 2: Set thinking budget (1024 tokens) ---");
56 let response2 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful programming assistant.")
59 .with_user_message("List 3 main advantages of using the Rust programming language")
60 .with_thinking_budget(1024)
61 .with_thoughts_included(true)
62 .execute()
63 .await?;
64
65 // Display thinking process
66 let thoughts2 = response2.thoughts();
67 if !thoughts2.is_empty() {
68 println!("Thinking summary:");
69 for (i, thought) in thoughts2.iter().enumerate() {
70 println!("Thought {}: {}\n", i + 1, thought);
71 }
72 }
73
74 println!("Answer: {}\n", response2.text());
75
76 // Example 3: Disable thinking feature
77 println!("--- Example 3: Disable thinking feature ---");
78 let response3 = client
79 .generate_content()
80 .with_system_prompt("You are a helpful assistant.")
81 .with_user_message("What is artificial intelligence?")
82 .execute()
83 .await?;
84
85 println!("Answer: {}\n", response3.text());
86
87 // Example 4: Use GenerationConfig to set thinking
88 println!("--- Example 4: Use GenerationConfig to set thinking ---");
89 let thinking_config = ThinkingConfig::new()
90 .with_thinking_budget(2048)
91 .with_thoughts_included(true);
92
93 let generation_config = GenerationConfig {
94 temperature: Some(0.7),
95 max_output_tokens: Some(500),
96 thinking_config: Some(thinking_config),
97 ..Default::default()
98 };
99
100 let response4 = client
101 .generate_content()
102 .with_system_prompt("You are a creative writing assistant.")
103 .with_user_message(
104 "Write the opening of a short story about a robot learning to feel emotions.",
105 )
106 .with_generation_config(generation_config)
107 .execute()
108 .await?;
109
110 // Display thinking process
111 let thoughts4 = response4.thoughts();
112 if !thoughts4.is_empty() {
113 println!("Thinking summary:");
114 for (i, thought) in thoughts4.iter().enumerate() {
115 println!("Thought {}: {}\n", i + 1, thought);
116 }
117 }
118
119 println!("Answer: {}\n", response4.text());
120
121 Ok(())
122}
More examples
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // This is equivalent to the following curl example:
10 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent" \
11 // -H "x-goog-api-key: $GEMINI_API_KEY" \
12 // -H 'Content-Type: application/json' \
13 // -X POST \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {
19 // "text": "Provide a list of the top 3 famous physicists and their major contributions"
20 // }
21 // ]
22 // }
23 // ],
24 // "generationConfig": {
25 // "thinkingConfig": {
26 // "thinkingBudget": 1024,
27 // "includeThoughts": true
28 // }
29 // }
30 // }'
31
32 // Create client
33 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
34
35 println!("=== Thinking Curl Equivalent Example ===\n");
36
37 // Method 1: Using high-level API (simplest approach)
38 println!("--- Method 1: Using high-level API ---");
39
40 let response1 = client
41 .generate_content()
42 .with_user_message(
43 "Provide a list of the top 3 famous physicists and their major contributions",
44 )
45 .with_thinking_budget(1024)
46 .with_thoughts_included(true)
47 .execute()
48 .await?;
49
50 // Display thinking process
51 let thoughts1 = response1.thoughts();
52 if !thoughts1.is_empty() {
53 println!("Thinking summary:");
54 for (i, thought) in thoughts1.iter().enumerate() {
55 println!("Thought {}: {}\n", i + 1, thought);
56 }
57 }
58
59 println!("Answer: {}\n", response1.text());
60
61 // Method 2: Using GenerationConfig to fully match curl example structure
62 println!("--- Method 2: Fully matching curl example structure ---");
63
64 let thinking_config = ThinkingConfig {
65 thinking_budget: Some(1024),
66 include_thoughts: Some(true),
67 };
68
69 let generation_config = GenerationConfig {
70 thinking_config: Some(thinking_config),
71 ..Default::default()
72 };
73
74 let response2 = client
75 .generate_content()
76 .with_user_message(
77 "Provide a list of the top 3 famous physicists and their major contributions",
78 )
79 .with_generation_config(generation_config)
80 .execute()
81 .await?;
82
83 // Display thinking process
84 let thoughts2 = response2.thoughts();
85 if !thoughts2.is_empty() {
86 println!("Thinking summary:");
87 for (i, thought) in thoughts2.iter().enumerate() {
88 println!("Thought {}: {}\n", i + 1, thought);
89 }
90 }
91
92 println!("Answer: {}\n", response2.text());
93
94 // Show token usage
95 if let Some(usage) = &response2.usage_metadata {
96 println!("Token usage:");
97 println!(" Prompt tokens: {}", usage.prompt_token_count);
98 println!(
99 " Response tokens: {}",
100 usage.candidates_token_count.unwrap_or(0)
101 );
102 if let Some(thinking_tokens) = usage.thoughts_token_count {
103 println!(" Thinking tokens: {}", thinking_tokens);
104 }
105 println!(" Total tokens: {}", usage.total_token_count);
106 }
107
108 // Method 3: Demonstrate different thinking budget settings
109 println!("\n--- Method 3: Different thinking budget comparison ---");
110
111 // Thinking disabled
112 println!("Thinking disabled:");
113 let response_no_thinking = client
114 .generate_content()
115 .with_user_message("Explain the basic principles of quantum mechanics")
116 .execute()
117 .await?;
118 println!("Answer: {}\n", response_no_thinking.text());
119
120 // Dynamic thinking
121 println!("Dynamic thinking:");
122 let response_dynamic = client
123 .generate_content()
124 .with_user_message("Explain the basic principles of quantum mechanics")
125 .with_dynamic_thinking()
126 .with_thoughts_included(true)
127 .execute()
128 .await?;
129
130 let thoughts_dynamic = response_dynamic.thoughts();
131 if !thoughts_dynamic.is_empty() {
132 println!("Thinking summary:");
133 for (i, thought) in thoughts_dynamic.iter().enumerate() {
134 println!("Thought {}: {}\n", i + 1, thought);
135 }
136 }
137 println!("Answer: {}\n", response_dynamic.text());
138
139 // High thinking budget
140 println!("High thinking budget (4096 tokens):");
141 let response_high_budget = client
142 .generate_content()
143 .with_user_message("Explain the basic principles of quantum mechanics")
144 .with_thinking_budget(4096)
145 .with_thoughts_included(true)
146 .execute()
147 .await?;
148
149 let thoughts_high = response_high_budget.thoughts();
150 if !thoughts_high.is_empty() {
151 println!("Thinking summary:");
152 for (i, thought) in thoughts_high.iter().enumerate() {
153 println!("Thought {}: {}\n", i + 1, thought);
154 }
155 }
156 println!("Answer: {}", response_high_budget.text());
157
158 Ok(())
159}
Sourcepub fn with_thoughts_included(self, include: bool) -> Self
pub fn with_thoughts_included(self, include: bool) -> Self
Include thought summaries in the response (Gemini 2.5 series only)
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
11
12 println!("=== Gemini 2.5 Thinking Basic Example ===\n");
13
14 // Example 1: Using default dynamic thinking
15 println!(
16 "--- Example 1: Dynamic thinking (model automatically determines thinking budget) ---"
17 );
18 let response1 = client
19 .generate_content()
20 .with_system_prompt("You are a helpful mathematics assistant.")
21 .with_user_message(
22 "Explain Occam's razor principle and provide a simple example from daily life.",
23 )
24 .with_dynamic_thinking()
25 .with_thoughts_included(true)
26 .execute()
27 .await?;
28
29 // Display thinking process
30 let thoughts = response1.thoughts();
31 if !thoughts.is_empty() {
32 println!("Thinking summary:");
33 for (i, thought) in thoughts.iter().enumerate() {
34 println!("Thought {}: {}\n", i + 1, thought);
35 }
36 }
37
38 println!("Answer: {}\n", response1.text());
39
40 // Display token usage
41 if let Some(usage) = &response1.usage_metadata {
42 println!("Token usage:");
43 println!(" Prompt tokens: {}", usage.prompt_token_count);
44 println!(
45 " Response tokens: {}",
46 usage.candidates_token_count.unwrap_or(0)
47 );
48 if let Some(thinking_tokens) = usage.thoughts_token_count {
49 println!(" Thinking tokens: {}", thinking_tokens);
50 }
51 println!(" Total tokens: {}\n", usage.total_token_count);
52 }
53
54 // Example 2: Set specific thinking budget
55 println!("--- Example 2: Set thinking budget (1024 tokens) ---");
56 let response2 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful programming assistant.")
59 .with_user_message("List 3 main advantages of using the Rust programming language")
60 .with_thinking_budget(1024)
61 .with_thoughts_included(true)
62 .execute()
63 .await?;
64
65 // Display thinking process
66 let thoughts2 = response2.thoughts();
67 if !thoughts2.is_empty() {
68 println!("Thinking summary:");
69 for (i, thought) in thoughts2.iter().enumerate() {
70 println!("Thought {}: {}\n", i + 1, thought);
71 }
72 }
73
74 println!("Answer: {}\n", response2.text());
75
76 // Example 3: Disable thinking feature
77 println!("--- Example 3: Disable thinking feature ---");
78 let response3 = client
79 .generate_content()
80 .with_system_prompt("You are a helpful assistant.")
81 .with_user_message("What is artificial intelligence?")
82 .execute()
83 .await?;
84
85 println!("Answer: {}\n", response3.text());
86
87 // Example 4: Use GenerationConfig to set thinking
88 println!("--- Example 4: Use GenerationConfig to set thinking ---");
89 let thinking_config = ThinkingConfig::new()
90 .with_thinking_budget(2048)
91 .with_thoughts_included(true);
92
93 let generation_config = GenerationConfig {
94 temperature: Some(0.7),
95 max_output_tokens: Some(500),
96 thinking_config: Some(thinking_config),
97 ..Default::default()
98 };
99
100 let response4 = client
101 .generate_content()
102 .with_system_prompt("You are a creative writing assistant.")
103 .with_user_message(
104 "Write the opening of a short story about a robot learning to feel emotions.",
105 )
106 .with_generation_config(generation_config)
107 .execute()
108 .await?;
109
110 // Display thinking process
111 let thoughts4 = response4.thoughts();
112 if !thoughts4.is_empty() {
113 println!("Thinking summary:");
114 for (i, thought) in thoughts4.iter().enumerate() {
115 println!("Thought {}: {}\n", i + 1, thought);
116 }
117 }
118
119 println!("Answer: {}\n", response4.text());
120
121 Ok(())
122}
More examples
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // This is equivalent to the following curl example:
10 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent" \
11 // -H "x-goog-api-key: $GEMINI_API_KEY" \
12 // -H 'Content-Type: application/json' \
13 // -X POST \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {
19 // "text": "Provide a list of the top 3 famous physicists and their major contributions"
20 // }
21 // ]
22 // }
23 // ],
24 // "generationConfig": {
25 // "thinkingConfig": {
26 // "thinkingBudget": 1024,
27 // "includeThoughts": true
28 // }
29 // }
30 // }'
31
32 // Create client
33 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
34
35 println!("=== Thinking Curl Equivalent Example ===\n");
36
37 // Method 1: Using high-level API (simplest approach)
38 println!("--- Method 1: Using high-level API ---");
39
40 let response1 = client
41 .generate_content()
42 .with_user_message(
43 "Provide a list of the top 3 famous physicists and their major contributions",
44 )
45 .with_thinking_budget(1024)
46 .with_thoughts_included(true)
47 .execute()
48 .await?;
49
50 // Display thinking process
51 let thoughts1 = response1.thoughts();
52 if !thoughts1.is_empty() {
53 println!("Thinking summary:");
54 for (i, thought) in thoughts1.iter().enumerate() {
55 println!("Thought {}: {}\n", i + 1, thought);
56 }
57 }
58
59 println!("Answer: {}\n", response1.text());
60
61 // Method 2: Using GenerationConfig to fully match curl example structure
62 println!("--- Method 2: Fully matching curl example structure ---");
63
64 let thinking_config = ThinkingConfig {
65 thinking_budget: Some(1024),
66 include_thoughts: Some(true),
67 };
68
69 let generation_config = GenerationConfig {
70 thinking_config: Some(thinking_config),
71 ..Default::default()
72 };
73
74 let response2 = client
75 .generate_content()
76 .with_user_message(
77 "Provide a list of the top 3 famous physicists and their major contributions",
78 )
79 .with_generation_config(generation_config)
80 .execute()
81 .await?;
82
83 // Display thinking process
84 let thoughts2 = response2.thoughts();
85 if !thoughts2.is_empty() {
86 println!("Thinking summary:");
87 for (i, thought) in thoughts2.iter().enumerate() {
88 println!("Thought {}: {}\n", i + 1, thought);
89 }
90 }
91
92 println!("Answer: {}\n", response2.text());
93
94 // Show token usage
95 if let Some(usage) = &response2.usage_metadata {
96 println!("Token usage:");
97 println!(" Prompt tokens: {}", usage.prompt_token_count);
98 println!(
99 " Response tokens: {}",
100 usage.candidates_token_count.unwrap_or(0)
101 );
102 if let Some(thinking_tokens) = usage.thoughts_token_count {
103 println!(" Thinking tokens: {}", thinking_tokens);
104 }
105 println!(" Total tokens: {}", usage.total_token_count);
106 }
107
108 // Method 3: Demonstrate different thinking budget settings
109 println!("\n--- Method 3: Different thinking budget comparison ---");
110
111 // Thinking disabled
112 println!("Thinking disabled:");
113 let response_no_thinking = client
114 .generate_content()
115 .with_user_message("Explain the basic principles of quantum mechanics")
116 .execute()
117 .await?;
118 println!("Answer: {}\n", response_no_thinking.text());
119
120 // Dynamic thinking
121 println!("Dynamic thinking:");
122 let response_dynamic = client
123 .generate_content()
124 .with_user_message("Explain the basic principles of quantum mechanics")
125 .with_dynamic_thinking()
126 .with_thoughts_included(true)
127 .execute()
128 .await?;
129
130 let thoughts_dynamic = response_dynamic.thoughts();
131 if !thoughts_dynamic.is_empty() {
132 println!("Thinking summary:");
133 for (i, thought) in thoughts_dynamic.iter().enumerate() {
134 println!("Thought {}: {}\n", i + 1, thought);
135 }
136 }
137 println!("Answer: {}\n", response_dynamic.text());
138
139 // High thinking budget
140 println!("High thinking budget (4096 tokens):");
141 let response_high_budget = client
142 .generate_content()
143 .with_user_message("Explain the basic principles of quantum mechanics")
144 .with_thinking_budget(4096)
145 .with_thoughts_included(true)
146 .execute()
147 .await?;
148
149 let thoughts_high = response_high_budget.thoughts();
150 if !thoughts_high.is_empty() {
151 println!("Thinking summary:");
152 for (i, thought) in thoughts_high.iter().enumerate() {
153 println!("Thought {}: {}\n", i + 1, thought);
154 }
155 }
156 println!("Answer: {}", response_high_budget.text());
157
158 Ok(())
159}
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
14
15 println!("=== Gemini 2.5 Thinking Advanced Example ===\n");
16
17 // Example 1: Streaming with thinking
18 println!("--- Example 1: Streaming with thinking ---");
19 let mut stream = client
20 .generate_content()
21 .with_system_prompt("You are a mathematics expert skilled at solving complex mathematical problems.")
22 .with_user_message("Solve this math problem: Find the sum of the first 50 prime numbers. Please explain your solution process in detail.")
23 .with_thinking_budget(2048)
24 .with_thoughts_included(true)
25 .execute_stream()
26 .await?;
27
28 println!("Streaming response:");
29 let mut thoughts_shown = false;
30 while let Some(chunk_result) = stream.next().await {
31 match chunk_result {
32 Ok(chunk) => {
33 // Check if there's thinking content
34 let thoughts = chunk.thoughts();
35 if !thoughts.is_empty() && !thoughts_shown {
36 println!("\nThinking process:");
37 for (i, thought) in thoughts.iter().enumerate() {
38 println!("Thought {}: {}", i + 1, thought);
39 }
40 println!("\nAnswer:");
41 thoughts_shown = true;
42 }
43
44 // Display general text content
45 print!("{}", chunk.text());
46 std::io::Write::flush(&mut std::io::stdout())?;
47 }
48 Err(e) => eprintln!("Streaming error: {}", e),
49 }
50 }
51 println!("\n");
52
53 // Example 2: Thinking combined with function calls
54 println!("--- Example 2: Thinking combined with function calls ---");
55
56 // Define a calculator function
57 let calculator = FunctionDeclaration::new(
58 "calculate",
59 "Perform basic mathematical calculations",
60 FunctionParameters::object()
61 .with_property(
62 "expression",
63 PropertyDetails::string(
64 "The mathematical expression to calculate, e.g., '2 + 3 * 4'",
65 ),
66 true,
67 )
68 .with_property(
69 "operation_type",
70 PropertyDetails::enum_type("Type of calculation", ["arithmetic", "advanced"]),
71 false,
72 ),
73 );
74
75 let response = client
76 .generate_content()
77 .with_system_prompt("You are a mathematics assistant. When calculations are needed, use the provided calculator function.")
78 .with_user_message("Calculate the result of (15 + 25) * 3 - 8 and explain the calculation steps.")
79 .with_function(calculator)
80 .with_thinking_budget(1024)
81 .with_thoughts_included(true)
82 .execute()
83 .await?;
84
85 // Display thinking process
86 let thoughts = response.thoughts();
87 if !thoughts.is_empty() {
88 println!("Thinking process:");
89 for (i, thought) in thoughts.iter().enumerate() {
90 println!("Thought {}: {}\n", i + 1, thought);
91 }
92 }
93
94 // Check for function calls
95 let function_calls = response.function_calls();
96 if !function_calls.is_empty() {
97 println!("Function calls:");
98 for (i, call) in function_calls.iter().enumerate() {
99 println!("Call {}: {} Args: {}", i + 1, call.name, call.args);
100 }
101 println!();
102 }
103
104 println!("Answer: {}\n", response.text());
105
106 // Example 3: Complex reasoning task
107 println!("--- Example 3: Complex reasoning task ---");
108 let complex_response = client
109 .generate_content()
110 .with_system_prompt("You are a logical reasoning expert.")
111 .with_user_message(
112 "There are three people: Alice, Bob, and Carol, who live in red, green, and blue houses respectively.\
113 Given:\
114 1. The person in the red house owns a cat\
115 2. Bob does not live in the green house\
116 3. Carol owns a dog\
117 4. The green house is to the left of the red house\
118 5. Alice does not own a cat\
119 Please reason out which color house each person lives in and what pets they own.",
120 )
121 .with_thinking_config(
122 ThinkingConfig::new()
123 .with_thinking_budget(3072)
124 .with_thoughts_included(true),
125 )
126 .execute()
127 .await?;
128
129 // Display thinking process
130 let complex_thoughts = complex_response.thoughts();
131 if !complex_thoughts.is_empty() {
132 println!("Reasoning process:");
133 for (i, thought) in complex_thoughts.iter().enumerate() {
134 println!("Reasoning step {}: {}\n", i + 1, thought);
135 }
136 }
137
138 println!("Conclusion: {}\n", complex_response.text());
139
140 // Display token usage statistics
141 if let Some(usage) = &complex_response.usage_metadata {
142 println!("Token usage statistics:");
143 println!(" Prompt tokens: {}", usage.prompt_token_count);
144 println!(
145 " Response tokens: {}",
146 usage.candidates_token_count.unwrap_or(0)
147 );
148 if let Some(thinking_tokens) = usage.thoughts_token_count {
149 println!(" Thinking tokens: {}", thinking_tokens);
150 }
151 println!(" Total tokens: {}", usage.total_token_count);
152 }
153
154 Ok(())
155}
Sourcepub fn with_audio_output(self) -> Self
pub fn with_audio_output(self) -> Self
Enable audio output (text-to-speech)
Sourcepub fn with_speech_config(self, speech_config: SpeechConfig) -> Self
pub fn with_speech_config(self, speech_config: SpeechConfig) -> Self
Set speech configuration for text-to-speech generation
Sourcepub fn with_voice(self, voice_name: impl Into<String>) -> Self
pub fn with_voice(self, voice_name: impl Into<String>) -> Self
Set a single voice for text-to-speech generation
Sourcepub fn with_multi_speaker_config(
self,
speakers: Vec<SpeakerVoiceConfig>,
) -> Self
pub fn with_multi_speaker_config( self, speakers: Vec<SpeakerVoiceConfig>, ) -> Self
Set multi-speaker configuration for text-to-speech generation
Sourcepub fn build(self) -> GenerateContentRequest
pub fn build(self) -> GenerateContentRequest
Examples found in repository?
18async fn main() -> Result<(), Box<dyn std::error::Error>> {
19 // Get the API key from the environment
20 let api_key = std::env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
21
22 // Create a new Gemini client
23 let gemini = Gemini::new(api_key);
24
25 // Create the first request
26 let request1 = gemini
27 .generate_content()
28 .with_message(Message::user("What is the meaning of life?"))
29 .build();
30
31 // Create the second request
32 let request2 = gemini
33 .generate_content()
34 .with_message(Message::user("What is the best programming language?"))
35 .build();
36
37 // Create the batch request
38 let batch = gemini
39 .batch_generate_content_sync()
40 .with_request(request1)
41 .with_request(request2)
42 .execute()
43 .await?;
44
45 // Print the batch information
46 println!("Batch created successfully!");
47 println!("Batch Name: {}", batch.name());
48
49 // Wait for the batch to complete
50 println!("Waiting for batch to complete...");
51 match batch.wait_for_completion(Duration::from_secs(5)).await {
52 Ok(final_status) => {
53 // Print the final status
54 match final_status {
55 BatchStatus::Succeeded { results } => {
56 println!("Batch succeeded!");
57 for item in results {
58 match item {
59 BatchResultItem::Success { key, response } => {
60 println!("--- Response for Key {} ---", key);
61 println!("{}", response.text());
62 }
63 BatchResultItem::Error { key, error } => {
64 println!("--- Error for Key {} ---", key);
65 println!("Code: {}, Message: {}", error.code, error.message);
66 if let Some(details) = &error.details {
67 println!("Details: {}", details);
68 }
69 }
70 }
71 }
72 }
73 BatchStatus::Cancelled => {
74 println!("Batch was cancelled.");
75 }
76 BatchStatus::Expired => {
77 println!("Batch expired.");
78 }
79 _ => {
80 println!(
81 "Batch finished with an unexpected status: {:?}",
82 final_status
83 );
84 }
85 }
86 }
87 Err((_batch, e)) => {
88 println!(
89 "Batch failed: {}. You can retry with the returned batch.",
90 e
91 );
92 // Here you could retry: batch.wait_for_completion(Duration::from_secs(5)).await, etc.
93 }
94 }
95
96 Ok(())
97}
More examples
15async fn main() -> Result<()> {
16 // Get the API key from the environment
17 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY must be set");
18
19 // Create the Gemini client
20 let gemini = Gemini::new(api_key);
21
22 // Create a batch with multiple requests
23 let mut batch_generate_content = gemini
24 .batch_generate_content_sync()
25 .with_name("batch_cancel_example".to_string());
26
27 // Add several requests to make the batch take some time to process
28 for i in 1..=10 {
29 let request = gemini
30 .generate_content()
31 .with_message(Message::user(format!(
32 "Write a creative story about a robot learning to paint, part {}. Make it at least 100 words long.",
33 i
34 )))
35 .build();
36
37 batch_generate_content = batch_generate_content.with_request(request);
38 }
39
40 // Build and start the batch
41 let batch = batch_generate_content.execute().await?;
42 println!("Batch created successfully!");
43 println!("Batch Name: {}", batch.name());
44 println!("Press CTRL-C to cancel the batch operation...");
45
46 // Wrap the batch in an Arc<Mutex<Option<Batch>>> to allow safe sharing
47 let batch = Arc::new(Mutex::new(Some(batch)));
48 let batch_clone = Arc::clone(&batch);
49
50 // Spawn a task to handle CTRL-C
51 let cancel_task = tokio::spawn(async move {
52 // Wait for CTRL-C signal
53 signal::ctrl_c().await.expect("Failed to listen for CTRL-C");
54 println!("Received CTRL-C, canceling batch operation...");
55
56 // Take the batch from the Option, leaving None.
57 // The lock is released immediately after this block.
58 let mut batch_to_cancel = batch_clone.lock().await;
59
60 if let Some(batch) = batch_to_cancel.take() {
61 // Cancel the batch operation
62 match batch.cancel().await {
63 Ok(()) => {
64 println!("Batch canceled successfully!");
65 }
66 Err((batch, e)) => {
67 println!("Failed to cancel batch: {}. Retrying...", e);
68 // Retry once
69 match batch.cancel().await {
70 Ok(()) => {
71 println!("Batch canceled successfully on retry!");
72 }
73 Err((_, retry_error)) => {
74 eprintln!("Failed to cancel batch even on retry: {}", retry_error);
75 }
76 }
77 }
78 }
79 } else {
80 println!("Batch was already processed.");
81 }
82 });
83
84 // Wait for a short moment to ensure the cancel task is ready
85 tokio::time::sleep(Duration::from_millis(100)).await;
86
87 // Wait for the batch to complete or be canceled
88 if let Some(batch) = batch.lock().await.take() {
89 println!("Waiting for batch to complete or be canceled...");
90 match batch.wait_for_completion(Duration::from_secs(5)).await {
91 Ok(final_status) => {
92 // Cancel task is no longer needed since batch completed
93 cancel_task.abort();
94
95 println!("Batch completed with status: {:?}", final_status);
96
97 // Print some details about the results
98 match final_status {
99 gemini_rust::BatchStatus::Succeeded { .. } => {
100 println!("Batch succeeded!");
101 }
102 gemini_rust::BatchStatus::Cancelled => {
103 println!("Batch was cancelled as requested.");
104 }
105 gemini_rust::BatchStatus::Expired => {
106 println!("Batch expired.");
107 }
108 _ => {
109 println!("Batch finished with an unexpected status.");
110 }
111 }
112 }
113 Err((batch, e)) => {
114 // This could happen if there was a network error while polling
115 println!("Error while waiting for batch completion: {}", e);
116
117 // Try one more time to get the status
118 match batch.status().await {
119 Ok(status) => println!("Current batch status: {:?}", status),
120 Err(status_error) => println!("Error getting final status: {}", status_error),
121 }
122 }
123 }
124 }
125
126 Ok(())
127}
Sourcepub async fn execute(self) -> Result<GenerationResponse>
pub async fn execute(self) -> Result<GenerationResponse>
Execute the request
Examples found in repository?
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY")?;
7
8 // Create client with the default model (gemini-2.0-flash)
9 let client = Gemini::new(api_key);
10
11 println!("Sending request to Gemini API...");
12
13 // Simple text completion with minimal content
14 let response = client
15 .generate_content()
16 .with_user_message("Say hello")
17 .execute()
18 .await?;
19
20 println!("Response: {}", response.text());
21
22 Ok(())
23}
More examples
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 println!("--- Google Search tool example ---");
13
14 // Create a Google Search tool
15 let google_search_tool = Tool::google_search();
16
17 // Create a request with Google Search tool
18 let response = client
19 .generate_content()
20 .with_user_message("What is the current Google stock price?")
21 .with_tool(google_search_tool)
22 .execute()
23 .await?;
24
25 println!("Response: {}", response.text());
26
27 Ok(())
28}
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Replace with your actual API key
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create a Gemini client
11 let gemini = Gemini::pro(api_key);
12
13 // This example matches the exact curl request format:
14 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
15 // -H 'Content-Type: application/json' \
16 // -d '{
17 // "system_instruction": {
18 // "parts": [
19 // {
20 // "text": "You are a cat. Your name is Neko."
21 // }
22 // ]
23 // },
24 // "contents": [
25 // {
26 // "parts": [
27 // {
28 // "text": "Hello there"
29 // }
30 // ]
31 // }
32 // ]
33 // }'
34 let response = gemini
35 .generate_content()
36 .with_system_instruction("You are a cat. Your name is Neko.")
37 .with_user_message("Hello there")
38 .execute()
39 .await?;
40
41 // Print the response
42 println!("Response: {}", response.text());
43
44 Ok(())
45}
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
11async fn main() -> Result<(), Box<dyn std::error::Error>> {
12 // Read mp4 video file
13 let mut file = File::open("examples/sample.mp4")?;
14 let mut buffer = Vec::new();
15 file.read_to_end(&mut buffer)?;
16 let b64 = general_purpose::STANDARD.encode(&buffer);
17
18 // Get API key
19 let api_key = env::var("GEMINI_API_KEY")?;
20 let gemini = Gemini::pro(api_key);
21
22 // Example 1: Add mp4 blob using Message struct
23 let video_content = Content::inline_data("video/mp4", b64.clone());
24 let response1 = gemini
25 .generate_content()
26 .with_user_message("Please describe the content of this video (Message example)")
27 .with_message(gemini_rust::Message {
28 content: video_content,
29 role: gemini_rust::Role::User,
30 })
31 .execute()
32 .await?;
33
34 println!("AI description (Message): {}", response1.text());
35
36 // Example 2: Add mp4 blob directly using builder's with_inline_data
37 let response2 = gemini
38 .generate_content()
39 .with_user_message("Please describe the content of this video (with_inline_data example)")
40 .with_inline_data(b64, "video/mp4")
41 .execute()
42 .await?;
43
44 println!("AI description (with_inline_data): {}", response2.text());
45 Ok(())
46}
- examples/curl_google_search.rs
- examples/curl_equivalent.rs
- examples/generation_config.rs
- examples/structured_response.rs
- examples/streaming.rs
- examples/simple_image_generation.rs
- examples/advanced.rs
- examples/simple.rs
- examples/thinking_basic.rs
- examples/image_editing.rs
- examples/google_search_with_functions.rs
- examples/simple_speech_generation.rs
- examples/thinking_curl_equivalent.rs
- examples/image_generation.rs
- examples/thinking_advanced.rs
- examples/tools.rs
- examples/multi_speaker_tts.rs
Sourcepub async fn execute_stream(
self,
) -> Result<Pin<Box<dyn Stream<Item = Result<GenerationResponse>> + Send>>>
pub async fn execute_stream( self, ) -> Result<Pin<Box<dyn Stream<Item = Result<GenerationResponse>> + Send>>>
Execute the request with streaming
Examples found in repository?
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Get API key from environment variable
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create client
11 let client = Gemini::new(api_key);
12
13 // Simple streaming generation
14 println!("--- Streaming generation ---");
15
16 let mut stream = client
17 .generate_content()
18 .with_system_prompt("You are a helpful, creative assistant.")
19 .with_user_message("Write a short story about a robot who learns to feel emotions.")
20 .execute_stream()
21 .await?;
22
23 print!("Streaming response: ");
24 while let Some(chunk_result) = stream.next().await {
25 match chunk_result {
26 Ok(chunk) => {
27 print!("{}", chunk.text());
28 std::io::Write::flush(&mut std::io::stdout())?;
29 }
30 Err(e) => eprintln!("Error in stream: {}", e),
31 }
32 }
33 println!("\n");
34
35 // Multi-turn conversation
36 println!("--- Multi-turn conversation ---");
37
38 // First turn
39 let response1 = client
40 .generate_content()
41 .with_system_prompt("You are a helpful travel assistant.")
42 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
43 .execute()
44 .await?;
45
46 println!("User: I'm planning a trip to Japan. What are the best times to visit?");
47 println!("Assistant: {}\n", response1.text());
48
49 // Second turn (continuing the conversation)
50 let response2 = client
51 .generate_content()
52 .with_system_prompt("You are a helpful travel assistant.")
53 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
54 .with_model_message(response1.text())
55 .with_user_message("What about cherry blossom season? When exactly does that happen?")
56 .execute()
57 .await?;
58
59 println!("User: What about cherry blossom season? When exactly does that happen?");
60 println!("Assistant: {}\n", response2.text());
61
62 // Third turn (continuing the conversation)
63 let response3 = client
64 .generate_content()
65 .with_system_prompt("You are a helpful travel assistant.")
66 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
67 .with_model_message(response1.text())
68 .with_user_message("What about cherry blossom season? When exactly does that happen?")
69 .with_model_message(response2.text())
70 .with_user_message("What are some must-visit places in Tokyo?")
71 .execute()
72 .await?;
73
74 println!("User: What are some must-visit places in Tokyo?");
75 println!("Assistant: {}", response3.text());
76
77 Ok(())
78}
More examples
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
14
15 println!("=== Gemini 2.5 Thinking Advanced Example ===\n");
16
17 // Example 1: Streaming with thinking
18 println!("--- Example 1: Streaming with thinking ---");
19 let mut stream = client
20 .generate_content()
21 .with_system_prompt("You are a mathematics expert skilled at solving complex mathematical problems.")
22 .with_user_message("Solve this math problem: Find the sum of the first 50 prime numbers. Please explain your solution process in detail.")
23 .with_thinking_budget(2048)
24 .with_thoughts_included(true)
25 .execute_stream()
26 .await?;
27
28 println!("Streaming response:");
29 let mut thoughts_shown = false;
30 while let Some(chunk_result) = stream.next().await {
31 match chunk_result {
32 Ok(chunk) => {
33 // Check if there's thinking content
34 let thoughts = chunk.thoughts();
35 if !thoughts.is_empty() && !thoughts_shown {
36 println!("\nThinking process:");
37 for (i, thought) in thoughts.iter().enumerate() {
38 println!("Thought {}: {}", i + 1, thought);
39 }
40 println!("\nAnswer:");
41 thoughts_shown = true;
42 }
43
44 // Display general text content
45 print!("{}", chunk.text());
46 std::io::Write::flush(&mut std::io::stdout())?;
47 }
48 Err(e) => eprintln!("Streaming error: {}", e),
49 }
50 }
51 println!("\n");
52
53 // Example 2: Thinking combined with function calls
54 println!("--- Example 2: Thinking combined with function calls ---");
55
56 // Define a calculator function
57 let calculator = FunctionDeclaration::new(
58 "calculate",
59 "Perform basic mathematical calculations",
60 FunctionParameters::object()
61 .with_property(
62 "expression",
63 PropertyDetails::string(
64 "The mathematical expression to calculate, e.g., '2 + 3 * 4'",
65 ),
66 true,
67 )
68 .with_property(
69 "operation_type",
70 PropertyDetails::enum_type("Type of calculation", ["arithmetic", "advanced"]),
71 false,
72 ),
73 );
74
75 let response = client
76 .generate_content()
77 .with_system_prompt("You are a mathematics assistant. When calculations are needed, use the provided calculator function.")
78 .with_user_message("Calculate the result of (15 + 25) * 3 - 8 and explain the calculation steps.")
79 .with_function(calculator)
80 .with_thinking_budget(1024)
81 .with_thoughts_included(true)
82 .execute()
83 .await?;
84
85 // Display thinking process
86 let thoughts = response.thoughts();
87 if !thoughts.is_empty() {
88 println!("Thinking process:");
89 for (i, thought) in thoughts.iter().enumerate() {
90 println!("Thought {}: {}\n", i + 1, thought);
91 }
92 }
93
94 // Check for function calls
95 let function_calls = response.function_calls();
96 if !function_calls.is_empty() {
97 println!("Function calls:");
98 for (i, call) in function_calls.iter().enumerate() {
99 println!("Call {}: {} Args: {}", i + 1, call.name, call.args);
100 }
101 println!();
102 }
103
104 println!("Answer: {}\n", response.text());
105
106 // Example 3: Complex reasoning task
107 println!("--- Example 3: Complex reasoning task ---");
108 let complex_response = client
109 .generate_content()
110 .with_system_prompt("You are a logical reasoning expert.")
111 .with_user_message(
112 "There are three people: Alice, Bob, and Carol, who live in red, green, and blue houses respectively.\
113 Given:\
114 1. The person in the red house owns a cat\
115 2. Bob does not live in the green house\
116 3. Carol owns a dog\
117 4. The green house is to the left of the red house\
118 5. Alice does not own a cat\
119 Please reason out which color house each person lives in and what pets they own.",
120 )
121 .with_thinking_config(
122 ThinkingConfig::new()
123 .with_thinking_budget(3072)
124 .with_thoughts_included(true),
125 )
126 .execute()
127 .await?;
128
129 // Display thinking process
130 let complex_thoughts = complex_response.thoughts();
131 if !complex_thoughts.is_empty() {
132 println!("Reasoning process:");
133 for (i, thought) in complex_thoughts.iter().enumerate() {
134 println!("Reasoning step {}: {}\n", i + 1, thought);
135 }
136 }
137
138 println!("Conclusion: {}\n", complex_response.text());
139
140 // Display token usage statistics
141 if let Some(usage) = &complex_response.usage_metadata {
142 println!("Token usage statistics:");
143 println!(" Prompt tokens: {}", usage.prompt_token_count);
144 println!(
145 " Response tokens: {}",
146 usage.candidates_token_count.unwrap_or(0)
147 );
148 if let Some(thinking_tokens) = usage.thoughts_token_count {
149 println!(" Thinking tokens: {}", thinking_tokens);
150 }
151 println!(" Total tokens: {}", usage.total_token_count);
152 }
153
154 Ok(())
155}