pub struct GenerationResponse {
pub candidates: Vec<Candidate>,
pub prompt_feedback: Option<PromptFeedback>,
pub usage_metadata: Option<UsageMetadata>,
pub model_version: Option<String>,
pub response_id: Option<String>,
}
Expand description
Response from the Gemini API for content generation
Fields§
§candidates: Vec<Candidate>
The candidates generated
prompt_feedback: Option<PromptFeedback>
The prompt feedback
usage_metadata: Option<UsageMetadata>
Usage metadata
model_version: Option<String>
Model version used
response_id: Option<String>
Response ID
Implementations§
Source§impl GenerationResponse
impl GenerationResponse
Sourcepub fn text(&self) -> String
pub fn text(&self) -> String
Get the text of the first candidate
Examples found in repository?
examples/test_api.rs (line 20)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY")?;
7
8 // Create client with the default model (gemini-2.0-flash)
9 let client = Gemini::new(api_key);
10
11 println!("Sending request to Gemini API...");
12
13 // Simple text completion with minimal content
14 let response = client
15 .generate_content()
16 .with_user_message("Say hello")
17 .execute()
18 .await?;
19
20 println!("Response: {}", response.text());
21
22 Ok(())
23}
More examples
examples/google_search.rs (line 25)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 println!("--- Google Search tool example ---");
13
14 // Create a Google Search tool
15 let google_search_tool = Tool::google_search();
16
17 // Create a request with Google Search tool
18 let response = client
19 .generate_content()
20 .with_user_message("What is the current Google stock price?")
21 .with_tool(google_search_tool)
22 .execute()
23 .await?;
24
25 println!("Response: {}", response.text());
26
27 Ok(())
28}
examples/custom_base_url.rs (line 28)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
examples/gemini_pro_example.rs (line 42)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Replace with your actual API key
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create a Gemini client
11 let gemini = Gemini::pro(api_key);
12
13 // This example matches the exact curl request format:
14 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
15 // -H 'Content-Type: application/json' \
16 // -d '{
17 // "system_instruction": {
18 // "parts": [
19 // {
20 // "text": "You are a cat. Your name is Neko."
21 // }
22 // ]
23 // },
24 // "contents": [
25 // {
26 // "parts": [
27 // {
28 // "text": "Hello there"
29 // }
30 // ]
31 // }
32 // ]
33 // }'
34 let response = gemini
35 .generate_content()
36 .with_system_instruction("You are a cat. Your name is Neko.")
37 .with_user_message("Hello there")
38 .execute()
39 .await?;
40
41 // Print the response
42 println!("Response: {}", response.text());
43
44 Ok(())
45}
examples/blob.rs (line 45)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
examples/mp4_describe.rs (line 34)
11async fn main() -> Result<(), Box<dyn std::error::Error>> {
12 // Read mp4 video file
13 let mut file = File::open("examples/sample.mp4")?;
14 let mut buffer = Vec::new();
15 file.read_to_end(&mut buffer)?;
16 let b64 = general_purpose::STANDARD.encode(&buffer);
17
18 // Get API key
19 let api_key = env::var("GEMINI_API_KEY")?;
20 let gemini = Gemini::pro(api_key);
21
22 // Example 1: Add mp4 blob using Message struct
23 let video_content = Content::inline_data("video/mp4", b64.clone());
24 let response1 = gemini
25 .generate_content()
26 .with_user_message("Please describe the content of this video (Message example)")
27 .with_message(gemini_rust::Message {
28 content: video_content,
29 role: gemini_rust::Role::User,
30 })
31 .execute()
32 .await?;
33
34 println!("AI description (Message): {}", response1.text());
35
36 // Example 2: Add mp4 blob directly using builder's with_inline_data
37 let response2 = gemini
38 .generate_content()
39 .with_user_message("Please describe the content of this video (with_inline_data example)")
40 .with_inline_data(b64, "video/mp4")
41 .execute()
42 .await?;
43
44 println!("AI description (with_inline_data): {}", response2.text());
45 Ok(())
46}
Additional examples can be found in:
- examples/curl_google_search.rs
- examples/curl_equivalent.rs
- examples/generation_config.rs
- examples/structured_response.rs
- examples/streaming.rs
- examples/batch_generate.rs
- examples/advanced.rs
- examples/simple.rs
- examples/thinking_basic.rs
- examples/google_search_with_functions.rs
- examples/thinking_curl_equivalent.rs
- examples/thinking_advanced.rs
- examples/tools.rs
Sourcepub fn function_calls(&self) -> Vec<&FunctionCall>
pub fn function_calls(&self) -> Vec<&FunctionCall>
Get function calls from the response
Examples found in repository?
examples/advanced.rs (line 43)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 let api_key = env::var("GEMINI_API_KEY")?;
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Define a weather function
16 let get_weather = FunctionDeclaration::new(
17 "get_weather",
18 "Get the current weather for a location",
19 FunctionParameters::object()
20 .with_property(
21 "location",
22 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
23 true,
24 )
25 .with_property(
26 "unit",
27 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
28 false,
29 ),
30 );
31
32 // Create a request with function calling
33 println!("Sending function call request...");
34 let response = client
35 .generate_content()
36 .with_user_message("What's the weather like in Tokyo right now?")
37 .with_function(get_weather)
38 .with_function_calling_mode(FunctionCallingMode::Any)
39 .execute()
40 .await?;
41
42 // Check if there are function calls
43 if let Some(function_call) = response.function_calls().first() {
44 println!(
45 "Function call received: {} with args: {}",
46 function_call.name, function_call.args
47 );
48
49 // Get parameters from the function call
50 let location: String = function_call.get("location")?;
51 let unit = function_call
52 .get::<String>("unit")
53 .unwrap_or_else(|_| String::from("celsius"));
54
55 println!("Location: {}, Unit: {}", location, unit);
56
57 // Simulate function execution (in a real app, this would call a weather API)
58 // Create a JSON response object
59 let weather_response = serde_json::json!({
60 "temperature": 22,
61 "unit": unit,
62 "condition": "sunny",
63 "location": location
64 });
65
66 // Continue the conversation with the function result
67 // We need to replay the entire conversation with the function response
68 println!("Sending function response...");
69
70 // First, need to recreate the original prompt and the model's response
71 let mut final_request = client
72 .generate_content()
73 .with_user_message("What's the weather like in Tokyo right now?");
74
75 // Add the function call from the model's response
76 let mut call_content = Content::default();
77 call_content.parts = Some(vec![Part::FunctionCall {
78 function_call: (*function_call).clone(),
79 }]);
80 final_request.contents.push(call_content);
81
82 // Now add the function response using the JSON value
83 final_request = final_request.with_function_response("get_weather", weather_response);
84
85 // Execute the request
86 let final_response = final_request.execute().await?;
87
88 println!("Final response: {}", final_response.text());
89 } else {
90 println!("No function calls in the response.");
91 println!("Response text: {}", response.text());
92 }
93
94 Ok(())
95}
More examples
examples/simple.rs (line 61)
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::new(api_key);
14
15 // Simple generation
16 println!("--- Simple generation ---");
17 let response = client
18 .generate_content()
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(1000),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 // Function calling example
31 println!("\n--- Function calling example ---");
32
33 // Define a weather function
34 let get_weather = FunctionDeclaration::new(
35 "get_weather",
36 "Get the current weather for a location",
37 FunctionParameters::object()
38 .with_property(
39 "location",
40 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
41 true,
42 )
43 .with_property(
44 "unit",
45 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
46 false,
47 ),
48 );
49
50 // Create a request with function calling
51 let response = client
52 .generate_content()
53 .with_system_prompt("You are a helpful weather assistant.")
54 .with_user_message("What's the weather like in San Francisco right now?")
55 .with_function(get_weather)
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Get parameters from the function call
68 let location: String = function_call.get("location")?;
69 let unit = function_call
70 .get::<String>("unit")
71 .unwrap_or_else(|_| String::from("celsius"));
72
73 println!("Location: {}, Unit: {}", location, unit);
74
75 // Create model content with function call
76 let model_content = Content::function_call((*function_call).clone());
77
78 // Add as model message
79 let model_message = Message {
80 content: model_content,
81 role: Role::Model,
82 };
83
84 // Simulate function execution
85 let weather_response = format!(
86 "{{\"temperature\": 22, \"unit\": \"{}\", \"condition\": \"sunny\"}}",
87 unit
88 );
89
90 // Continue the conversation with the function result
91 let final_response = client
92 .generate_content()
93 .with_system_prompt("You are a helpful weather assistant.")
94 .with_user_message("What's the weather like in San Francisco right now?")
95 .with_message(model_message)
96 .with_function_response_str("get_weather", weather_response)?
97 .with_generation_config(GenerationConfig {
98 temperature: Some(0.7),
99 max_output_tokens: Some(100),
100 ..Default::default()
101 })
102 .execute()
103 .await?;
104
105 println!("Final response: {}", final_response.text());
106 } else {
107 println!("No function calls in the response.");
108 }
109
110 Ok(())
111}
examples/google_search_with_functions.rs (line 61)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client
14 let client = Gemini::new(api_key);
15
16 println!("--- Meeting Scheduler Function Calling example ---");
17
18 // Define a meeting scheduler function that matches the curl example
19 let schedule_meeting = FunctionDeclaration::new(
20 "schedule_meeting",
21 "Schedules a meeting with specified attendees at a given time and date.",
22 FunctionParameters::object()
23 .with_property(
24 "attendees",
25 PropertyDetails::array(
26 "List of people attending the meeting.",
27 PropertyDetails::string("Attendee name"),
28 ),
29 true,
30 )
31 .with_property(
32 "date",
33 PropertyDetails::string("Date of the meeting (e.g., '2024-07-29')"),
34 true,
35 )
36 .with_property(
37 "time",
38 PropertyDetails::string("Time of the meeting (e.g., '15:00')"),
39 true,
40 )
41 .with_property(
42 "topic",
43 PropertyDetails::string("The subject or topic of the meeting."),
44 true,
45 ),
46 );
47
48 // Create function tool
49 let function_tool = Tool::new(schedule_meeting);
50
51 // Create a request with the tool - matching the curl example
52 let response = client
53 .generate_content()
54 .with_user_message("Schedule a meeting with Bob and Alice for 03/27/2025 at 10:00 AM about the Q3 planning.")
55 .with_tool(function_tool.clone())
56 .with_function_calling_mode(FunctionCallingMode::Any)
57 .execute()
58 .await?;
59
60 // Check if there are function calls
61 if let Some(function_call) = response.function_calls().first() {
62 println!(
63 "Function call: {} with args: {}",
64 function_call.name, function_call.args
65 );
66
67 // Handle the schedule_meeting function
68 if function_call.name == "schedule_meeting" {
69 let attendees: Vec<String> = function_call.get("attendees")?;
70 let date: String = function_call.get("date")?;
71 let time: String = function_call.get("time")?;
72 let topic: String = function_call.get("topic")?;
73
74 println!("Scheduling meeting:");
75 println!(" Attendees: {:?}", attendees);
76 println!(" Date: {}", date);
77 println!(" Time: {}", time);
78 println!(" Topic: {}", topic);
79
80 // Simulate scheduling the meeting
81 let meeting_id = format!(
82 "meeting_{}",
83 std::time::SystemTime::now()
84 .duration_since(std::time::UNIX_EPOCH)
85 .unwrap()
86 .as_secs()
87 );
88
89 let function_response = json!({
90 "success": true,
91 "meeting_id": meeting_id,
92 "message": format!("Meeting '{}' scheduled for {} at {} with {:?}", topic, date, time, attendees)
93 });
94
95 // Create conversation with function response
96 let mut conversation = client.generate_content();
97
98 // 1. Add original user message
99 conversation = conversation
100 .with_user_message("Schedule a meeting with Bob and Alice for 03/27/2025 at 10:00 AM about the Q3 planning.");
101
102 // 2. Add model message with function call
103 let model_function_call =
104 FunctionCall::new("schedule_meeting", function_call.args.clone());
105 let model_content = Content::function_call(model_function_call).with_role(Role::Model);
106 let model_message = Message {
107 content: model_content,
108 role: Role::Model,
109 };
110 conversation = conversation.with_message(model_message);
111
112 // 3. Add function response
113 conversation =
114 conversation.with_function_response("schedule_meeting", function_response);
115
116 // Execute final request
117 let final_response = conversation.execute().await?;
118
119 println!("Final response: {}", final_response.text());
120 } else {
121 println!("Unknown function call: {}", function_call.name);
122 }
123 } else {
124 println!("No function calls in the response.");
125 println!("Direct response: {}", response.text());
126 }
127
128 Ok(())
129}
examples/thinking_advanced.rs (line 95)
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
14
15 println!("=== Gemini 2.5 Thinking Advanced Example ===\n");
16
17 // Example 1: Streaming with thinking
18 println!("--- Example 1: Streaming with thinking ---");
19 let mut stream = client
20 .generate_content()
21 .with_system_prompt("You are a mathematics expert skilled at solving complex mathematical problems.")
22 .with_user_message("Solve this math problem: Find the sum of the first 50 prime numbers. Please explain your solution process in detail.")
23 .with_thinking_budget(2048)
24 .with_thoughts_included(true)
25 .execute_stream()
26 .await?;
27
28 println!("Streaming response:");
29 let mut thoughts_shown = false;
30 while let Some(chunk_result) = stream.next().await {
31 match chunk_result {
32 Ok(chunk) => {
33 // Check if there's thinking content
34 let thoughts = chunk.thoughts();
35 if !thoughts.is_empty() && !thoughts_shown {
36 println!("\nThinking process:");
37 for (i, thought) in thoughts.iter().enumerate() {
38 println!("Thought {}: {}", i + 1, thought);
39 }
40 println!("\nAnswer:");
41 thoughts_shown = true;
42 }
43
44 // Display general text content
45 print!("{}", chunk.text());
46 std::io::Write::flush(&mut std::io::stdout())?;
47 }
48 Err(e) => eprintln!("Streaming error: {}", e),
49 }
50 }
51 println!("\n");
52
53 // Example 2: Thinking combined with function calls
54 println!("--- Example 2: Thinking combined with function calls ---");
55
56 // Define a calculator function
57 let calculator = FunctionDeclaration::new(
58 "calculate",
59 "Perform basic mathematical calculations",
60 FunctionParameters::object()
61 .with_property(
62 "expression",
63 PropertyDetails::string(
64 "The mathematical expression to calculate, e.g., '2 + 3 * 4'",
65 ),
66 true,
67 )
68 .with_property(
69 "operation_type",
70 PropertyDetails::enum_type("Type of calculation", ["arithmetic", "advanced"]),
71 false,
72 ),
73 );
74
75 let response = client
76 .generate_content()
77 .with_system_prompt("You are a mathematics assistant. When calculations are needed, use the provided calculator function.")
78 .with_user_message("Calculate the result of (15 + 25) * 3 - 8 and explain the calculation steps.")
79 .with_function(calculator)
80 .with_thinking_budget(1024)
81 .with_thoughts_included(true)
82 .execute()
83 .await?;
84
85 // Display thinking process
86 let thoughts = response.thoughts();
87 if !thoughts.is_empty() {
88 println!("Thinking process:");
89 for (i, thought) in thoughts.iter().enumerate() {
90 println!("Thought {}: {}\n", i + 1, thought);
91 }
92 }
93
94 // Check for function calls
95 let function_calls = response.function_calls();
96 if !function_calls.is_empty() {
97 println!("Function calls:");
98 for (i, call) in function_calls.iter().enumerate() {
99 println!("Call {}: {} Args: {}", i + 1, call.name, call.args);
100 }
101 println!();
102 }
103
104 println!("Answer: {}\n", response.text());
105
106 // Example 3: Complex reasoning task
107 println!("--- Example 3: Complex reasoning task ---");
108 let complex_response = client
109 .generate_content()
110 .with_system_prompt("You are a logical reasoning expert.")
111 .with_user_message(
112 "There are three people: Alice, Bob, and Carol, who live in red, green, and blue houses respectively.\
113 Given:\
114 1. The person in the red house owns a cat\
115 2. Bob does not live in the green house\
116 3. Carol owns a dog\
117 4. The green house is to the left of the red house\
118 5. Alice does not own a cat\
119 Please reason out which color house each person lives in and what pets they own.",
120 )
121 .with_thinking_config(
122 ThinkingConfig::new()
123 .with_thinking_budget(3072)
124 .with_thoughts_included(true),
125 )
126 .execute()
127 .await?;
128
129 // Display thinking process
130 let complex_thoughts = complex_response.thoughts();
131 if !complex_thoughts.is_empty() {
132 println!("Reasoning process:");
133 for (i, thought) in complex_thoughts.iter().enumerate() {
134 println!("Reasoning step {}: {}\n", i + 1, thought);
135 }
136 }
137
138 println!("Conclusion: {}\n", complex_response.text());
139
140 // Display token usage statistics
141 if let Some(usage) = &complex_response.usage_metadata {
142 println!("Token usage statistics:");
143 println!(" Prompt tokens: {}", usage.prompt_token_count);
144 println!(
145 " Response tokens: {}",
146 usage.candidates_token_count.unwrap_or(0)
147 );
148 if let Some(thinking_tokens) = usage.thoughts_token_count {
149 println!(" Thinking tokens: {}", thinking_tokens);
150 }
151 println!(" Total tokens: {}", usage.total_token_count);
152 }
153
154 Ok(())
155}
examples/tools.rs (line 68)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client
14 let client = Gemini::new(api_key);
15
16 println!("--- Tools example with multiple functions ---");
17
18 // Define a weather function
19 let get_weather = FunctionDeclaration::new(
20 "get_weather",
21 "Get the current weather for a location",
22 FunctionParameters::object()
23 .with_property(
24 "location",
25 PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
26 true,
27 )
28 .with_property(
29 "unit",
30 PropertyDetails::enum_type("The unit of temperature", ["celsius", "fahrenheit"]),
31 false,
32 ),
33 );
34
35 // Define a calculator function
36 let calculate = FunctionDeclaration::new(
37 "calculate",
38 "Perform a calculation",
39 FunctionParameters::object()
40 .with_property(
41 "operation",
42 PropertyDetails::enum_type(
43 "The mathematical operation to perform",
44 ["add", "subtract", "multiply", "divide"],
45 ),
46 true,
47 )
48 .with_property("a", PropertyDetails::number("The first number"), true)
49 .with_property("b", PropertyDetails::number("The second number"), true),
50 );
51
52 // Create a tool with multiple functions
53 let tool = Tool::with_functions(vec![get_weather, calculate]);
54
55 // Create a request with tool functions
56 let response = client
57 .generate_content()
58 .with_system_prompt(
59 "You are a helpful assistant that can check weather and perform calculations.",
60 )
61 .with_user_message("What's 42 times 12?")
62 .with_tool(tool)
63 .with_function_calling_mode(FunctionCallingMode::Any)
64 .execute()
65 .await?;
66
67 // Process function calls
68 if let Some(function_call) = response.function_calls().first() {
69 println!(
70 "Function call: {} with args: {}",
71 function_call.name, function_call.args
72 );
73
74 // Handle different function calls
75 match function_call.name.as_str() {
76 "calculate" => {
77 let operation: String = function_call.get("operation")?;
78 let a: f64 = function_call.get("a")?;
79 let b: f64 = function_call.get("b")?;
80
81 println!("Calculation: {} {} {}", a, operation, b);
82
83 let result = match operation.as_str() {
84 "add" => a + b,
85 "subtract" => a - b,
86 "multiply" => a * b,
87 "divide" => a / b,
88 _ => panic!("Unknown operation"),
89 };
90
91 let function_response = json!({
92 "result": result,
93 })
94 .to_string();
95
96 // Based on the curl example, we need to structure the conversation properly:
97 // 1. A user message with the original query
98 // 2. A model message containing the function call
99 // 3. A user message containing the function response
100
101 // Construct conversation following the exact curl pattern
102 let mut conversation = client.generate_content();
103
104 // 1. Add user message with original query and system prompt
105 conversation = conversation
106 .with_system_prompt("You are a helpful assistant that can check weather and perform calculations.")
107 .with_user_message("What's 42 times 12?");
108
109 // 2. Create model content with function call
110 let model_content = Content::function_call((*function_call).clone());
111
112 // Add as model message
113 let model_message = Message {
114 content: model_content,
115 role: Role::Model,
116 };
117 conversation = conversation.with_message(model_message);
118
119 // 3. Add user message with function response
120 conversation =
121 conversation.with_function_response_str("calculate", function_response)?;
122
123 // Execute the request
124 let final_response = conversation.execute().await?;
125
126 println!("Final response: {}", final_response.text());
127 }
128 "get_weather" => {
129 let location: String = function_call.get("location")?;
130 let unit = function_call
131 .get::<String>("unit")
132 .unwrap_or_else(|_| String::from("celsius"));
133
134 println!("Weather request for: {}, Unit: {}", location, unit);
135
136 let weather_response = json!({
137 "temperature": 22,
138 "unit": unit,
139 "condition": "sunny"
140 })
141 .to_string();
142
143 // Based on the curl example, we need to structure the conversation properly:
144 // 1. A user message with the original query
145 // 2. A model message containing the function call
146 // 3. A user message containing the function response
147
148 // Construct conversation following the exact curl pattern
149 let mut conversation = client.generate_content();
150
151 // 1. Add user message with original query and system prompt
152 conversation = conversation
153 .with_system_prompt("You are a helpful assistant that can check weather and perform calculations.")
154 .with_user_message("What's 42 times 12?");
155
156 // 2. Create model content with function call
157 let model_content = Content::function_call((*function_call).clone());
158
159 // Add as model message
160 let model_message = Message {
161 content: model_content,
162 role: Role::Model,
163 };
164 conversation = conversation.with_message(model_message);
165
166 // 3. Add user message with function response
167 conversation =
168 conversation.with_function_response_str("get_weather", weather_response)?;
169
170 // Execute the request
171 let final_response = conversation.execute().await?;
172
173 println!("Final response: {}", final_response.text());
174 }
175 _ => println!("Unknown function"),
176 }
177 } else {
178 println!("No function calls in the response.");
179 println!("Response: {}", response.text());
180 }
181
182 Ok(())
183}
Sourcepub fn thoughts(&self) -> Vec<String>
pub fn thoughts(&self) -> Vec<String>
Get thought summaries from the response
Examples found in repository?
examples/thinking_basic.rs (line 30)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
11
12 println!("=== Gemini 2.5 Thinking Basic Example ===\n");
13
14 // Example 1: Using default dynamic thinking
15 println!(
16 "--- Example 1: Dynamic thinking (model automatically determines thinking budget) ---"
17 );
18 let response1 = client
19 .generate_content()
20 .with_system_prompt("You are a helpful mathematics assistant.")
21 .with_user_message(
22 "Explain Occam's razor principle and provide a simple example from daily life.",
23 )
24 .with_dynamic_thinking()
25 .with_thoughts_included(true)
26 .execute()
27 .await?;
28
29 // Display thinking process
30 let thoughts = response1.thoughts();
31 if !thoughts.is_empty() {
32 println!("Thinking summary:");
33 for (i, thought) in thoughts.iter().enumerate() {
34 println!("Thought {}: {}\n", i + 1, thought);
35 }
36 }
37
38 println!("Answer: {}\n", response1.text());
39
40 // Display token usage
41 if let Some(usage) = &response1.usage_metadata {
42 println!("Token usage:");
43 println!(" Prompt tokens: {}", usage.prompt_token_count);
44 println!(
45 " Response tokens: {}",
46 usage.candidates_token_count.unwrap_or(0)
47 );
48 if let Some(thinking_tokens) = usage.thoughts_token_count {
49 println!(" Thinking tokens: {}", thinking_tokens);
50 }
51 println!(" Total tokens: {}\n", usage.total_token_count);
52 }
53
54 // Example 2: Set specific thinking budget
55 println!("--- Example 2: Set thinking budget (1024 tokens) ---");
56 let response2 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful programming assistant.")
59 .with_user_message("List 3 main advantages of using the Rust programming language")
60 .with_thinking_budget(1024)
61 .with_thoughts_included(true)
62 .execute()
63 .await?;
64
65 // Display thinking process
66 let thoughts2 = response2.thoughts();
67 if !thoughts2.is_empty() {
68 println!("Thinking summary:");
69 for (i, thought) in thoughts2.iter().enumerate() {
70 println!("Thought {}: {}\n", i + 1, thought);
71 }
72 }
73
74 println!("Answer: {}\n", response2.text());
75
76 // Example 3: Disable thinking feature
77 println!("--- Example 3: Disable thinking feature ---");
78 let response3 = client
79 .generate_content()
80 .with_system_prompt("You are a helpful assistant.")
81 .with_user_message("What is artificial intelligence?")
82 .execute()
83 .await?;
84
85 println!("Answer: {}\n", response3.text());
86
87 // Example 4: Use GenerationConfig to set thinking
88 println!("--- Example 4: Use GenerationConfig to set thinking ---");
89 let thinking_config = ThinkingConfig::new()
90 .with_thinking_budget(2048)
91 .with_thoughts_included(true);
92
93 let generation_config = GenerationConfig {
94 temperature: Some(0.7),
95 max_output_tokens: Some(500),
96 thinking_config: Some(thinking_config),
97 ..Default::default()
98 };
99
100 let response4 = client
101 .generate_content()
102 .with_system_prompt("You are a creative writing assistant.")
103 .with_user_message(
104 "Write the opening of a short story about a robot learning to feel emotions.",
105 )
106 .with_generation_config(generation_config)
107 .execute()
108 .await?;
109
110 // Display thinking process
111 let thoughts4 = response4.thoughts();
112 if !thoughts4.is_empty() {
113 println!("Thinking summary:");
114 for (i, thought) in thoughts4.iter().enumerate() {
115 println!("Thought {}: {}\n", i + 1, thought);
116 }
117 }
118
119 println!("Answer: {}\n", response4.text());
120
121 Ok(())
122}
More examples
examples/thinking_curl_equivalent.rs (line 51)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // This is equivalent to the following curl example:
10 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent" \
11 // -H "x-goog-api-key: $GEMINI_API_KEY" \
12 // -H 'Content-Type: application/json' \
13 // -X POST \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {
19 // "text": "Provide a list of the top 3 famous physicists and their major contributions"
20 // }
21 // ]
22 // }
23 // ],
24 // "generationConfig": {
25 // "thinkingConfig": {
26 // "thinkingBudget": 1024,
27 // "includeThoughts": true
28 // }
29 // }
30 // }'
31
32 // Create client
33 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
34
35 println!("=== Thinking Curl Equivalent Example ===\n");
36
37 // Method 1: Using high-level API (simplest approach)
38 println!("--- Method 1: Using high-level API ---");
39
40 let response1 = client
41 .generate_content()
42 .with_user_message(
43 "Provide a list of the top 3 famous physicists and their major contributions",
44 )
45 .with_thinking_budget(1024)
46 .with_thoughts_included(true)
47 .execute()
48 .await?;
49
50 // Display thinking process
51 let thoughts1 = response1.thoughts();
52 if !thoughts1.is_empty() {
53 println!("Thinking summary:");
54 for (i, thought) in thoughts1.iter().enumerate() {
55 println!("Thought {}: {}\n", i + 1, thought);
56 }
57 }
58
59 println!("Answer: {}\n", response1.text());
60
61 // Method 2: Using GenerationConfig to fully match curl example structure
62 println!("--- Method 2: Fully matching curl example structure ---");
63
64 let thinking_config = ThinkingConfig {
65 thinking_budget: Some(1024),
66 include_thoughts: Some(true),
67 };
68
69 let generation_config = GenerationConfig {
70 thinking_config: Some(thinking_config),
71 ..Default::default()
72 };
73
74 let response2 = client
75 .generate_content()
76 .with_user_message(
77 "Provide a list of the top 3 famous physicists and their major contributions",
78 )
79 .with_generation_config(generation_config)
80 .execute()
81 .await?;
82
83 // Display thinking process
84 let thoughts2 = response2.thoughts();
85 if !thoughts2.is_empty() {
86 println!("Thinking summary:");
87 for (i, thought) in thoughts2.iter().enumerate() {
88 println!("Thought {}: {}\n", i + 1, thought);
89 }
90 }
91
92 println!("Answer: {}\n", response2.text());
93
94 // Show token usage
95 if let Some(usage) = &response2.usage_metadata {
96 println!("Token usage:");
97 println!(" Prompt tokens: {}", usage.prompt_token_count);
98 println!(
99 " Response tokens: {}",
100 usage.candidates_token_count.unwrap_or(0)
101 );
102 if let Some(thinking_tokens) = usage.thoughts_token_count {
103 println!(" Thinking tokens: {}", thinking_tokens);
104 }
105 println!(" Total tokens: {}", usage.total_token_count);
106 }
107
108 // Method 3: Demonstrate different thinking budget settings
109 println!("\n--- Method 3: Different thinking budget comparison ---");
110
111 // Thinking disabled
112 println!("Thinking disabled:");
113 let response_no_thinking = client
114 .generate_content()
115 .with_user_message("Explain the basic principles of quantum mechanics")
116 .execute()
117 .await?;
118 println!("Answer: {}\n", response_no_thinking.text());
119
120 // Dynamic thinking
121 println!("Dynamic thinking:");
122 let response_dynamic = client
123 .generate_content()
124 .with_user_message("Explain the basic principles of quantum mechanics")
125 .with_dynamic_thinking()
126 .with_thoughts_included(true)
127 .execute()
128 .await?;
129
130 let thoughts_dynamic = response_dynamic.thoughts();
131 if !thoughts_dynamic.is_empty() {
132 println!("Thinking summary:");
133 for (i, thought) in thoughts_dynamic.iter().enumerate() {
134 println!("Thought {}: {}\n", i + 1, thought);
135 }
136 }
137 println!("Answer: {}\n", response_dynamic.text());
138
139 // High thinking budget
140 println!("High thinking budget (4096 tokens):");
141 let response_high_budget = client
142 .generate_content()
143 .with_user_message("Explain the basic principles of quantum mechanics")
144 .with_thinking_budget(4096)
145 .with_thoughts_included(true)
146 .execute()
147 .await?;
148
149 let thoughts_high = response_high_budget.thoughts();
150 if !thoughts_high.is_empty() {
151 println!("Thinking summary:");
152 for (i, thought) in thoughts_high.iter().enumerate() {
153 println!("Thought {}: {}\n", i + 1, thought);
154 }
155 }
156 println!("Answer: {}", response_high_budget.text());
157
158 Ok(())
159}
examples/thinking_advanced.rs (line 34)
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
14
15 println!("=== Gemini 2.5 Thinking Advanced Example ===\n");
16
17 // Example 1: Streaming with thinking
18 println!("--- Example 1: Streaming with thinking ---");
19 let mut stream = client
20 .generate_content()
21 .with_system_prompt("You are a mathematics expert skilled at solving complex mathematical problems.")
22 .with_user_message("Solve this math problem: Find the sum of the first 50 prime numbers. Please explain your solution process in detail.")
23 .with_thinking_budget(2048)
24 .with_thoughts_included(true)
25 .execute_stream()
26 .await?;
27
28 println!("Streaming response:");
29 let mut thoughts_shown = false;
30 while let Some(chunk_result) = stream.next().await {
31 match chunk_result {
32 Ok(chunk) => {
33 // Check if there's thinking content
34 let thoughts = chunk.thoughts();
35 if !thoughts.is_empty() && !thoughts_shown {
36 println!("\nThinking process:");
37 for (i, thought) in thoughts.iter().enumerate() {
38 println!("Thought {}: {}", i + 1, thought);
39 }
40 println!("\nAnswer:");
41 thoughts_shown = true;
42 }
43
44 // Display general text content
45 print!("{}", chunk.text());
46 std::io::Write::flush(&mut std::io::stdout())?;
47 }
48 Err(e) => eprintln!("Streaming error: {}", e),
49 }
50 }
51 println!("\n");
52
53 // Example 2: Thinking combined with function calls
54 println!("--- Example 2: Thinking combined with function calls ---");
55
56 // Define a calculator function
57 let calculator = FunctionDeclaration::new(
58 "calculate",
59 "Perform basic mathematical calculations",
60 FunctionParameters::object()
61 .with_property(
62 "expression",
63 PropertyDetails::string(
64 "The mathematical expression to calculate, e.g., '2 + 3 * 4'",
65 ),
66 true,
67 )
68 .with_property(
69 "operation_type",
70 PropertyDetails::enum_type("Type of calculation", ["arithmetic", "advanced"]),
71 false,
72 ),
73 );
74
75 let response = client
76 .generate_content()
77 .with_system_prompt("You are a mathematics assistant. When calculations are needed, use the provided calculator function.")
78 .with_user_message("Calculate the result of (15 + 25) * 3 - 8 and explain the calculation steps.")
79 .with_function(calculator)
80 .with_thinking_budget(1024)
81 .with_thoughts_included(true)
82 .execute()
83 .await?;
84
85 // Display thinking process
86 let thoughts = response.thoughts();
87 if !thoughts.is_empty() {
88 println!("Thinking process:");
89 for (i, thought) in thoughts.iter().enumerate() {
90 println!("Thought {}: {}\n", i + 1, thought);
91 }
92 }
93
94 // Check for function calls
95 let function_calls = response.function_calls();
96 if !function_calls.is_empty() {
97 println!("Function calls:");
98 for (i, call) in function_calls.iter().enumerate() {
99 println!("Call {}: {} Args: {}", i + 1, call.name, call.args);
100 }
101 println!();
102 }
103
104 println!("Answer: {}\n", response.text());
105
106 // Example 3: Complex reasoning task
107 println!("--- Example 3: Complex reasoning task ---");
108 let complex_response = client
109 .generate_content()
110 .with_system_prompt("You are a logical reasoning expert.")
111 .with_user_message(
112 "There are three people: Alice, Bob, and Carol, who live in red, green, and blue houses respectively.\
113 Given:\
114 1. The person in the red house owns a cat\
115 2. Bob does not live in the green house\
116 3. Carol owns a dog\
117 4. The green house is to the left of the red house\
118 5. Alice does not own a cat\
119 Please reason out which color house each person lives in and what pets they own.",
120 )
121 .with_thinking_config(
122 ThinkingConfig::new()
123 .with_thinking_budget(3072)
124 .with_thoughts_included(true),
125 )
126 .execute()
127 .await?;
128
129 // Display thinking process
130 let complex_thoughts = complex_response.thoughts();
131 if !complex_thoughts.is_empty() {
132 println!("Reasoning process:");
133 for (i, thought) in complex_thoughts.iter().enumerate() {
134 println!("Reasoning step {}: {}\n", i + 1, thought);
135 }
136 }
137
138 println!("Conclusion: {}\n", complex_response.text());
139
140 // Display token usage statistics
141 if let Some(usage) = &complex_response.usage_metadata {
142 println!("Token usage statistics:");
143 println!(" Prompt tokens: {}", usage.prompt_token_count);
144 println!(
145 " Response tokens: {}",
146 usage.candidates_token_count.unwrap_or(0)
147 );
148 if let Some(thinking_tokens) = usage.thoughts_token_count {
149 println!(" Thinking tokens: {}", thinking_tokens);
150 }
151 println!(" Total tokens: {}", usage.total_token_count);
152 }
153
154 Ok(())
155}
Trait Implementations§
Source§impl Clone for GenerationResponse
impl Clone for GenerationResponse
Source§fn clone(&self) -> GenerationResponse
fn clone(&self) -> GenerationResponse
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source
. Read moreSource§impl Debug for GenerationResponse
impl Debug for GenerationResponse
Source§impl<'de> Deserialize<'de> for GenerationResponse
impl<'de> Deserialize<'de> for GenerationResponse
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Source§impl PartialEq for GenerationResponse
impl PartialEq for GenerationResponse
Source§impl Serialize for GenerationResponse
impl Serialize for GenerationResponse
impl StructuralPartialEq for GenerationResponse
Auto Trait Implementations§
impl Freeze for GenerationResponse
impl RefUnwindSafe for GenerationResponse
impl Send for GenerationResponse
impl Sync for GenerationResponse
impl Unpin for GenerationResponse
impl UnwindSafe for GenerationResponse
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more