pub struct Gemini { /* private fields */ }
Expand description
Client for the Gemini API
Implementations§
Source§impl Gemini
impl Gemini
Sourcepub fn new(api_key: impl Into<String>) -> Self
pub fn new(api_key: impl Into<String>) -> Self
Create a new client with the specified API key
Examples found in repository?
examples/test_api.rs (line 9)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY")?;
7
8 // Create client with the default model (gemini-2.0-flash)
9 let client = Gemini::new(api_key);
10
11 println!("Sending request to Gemini API...");
12
13 // Simple text completion with minimal content
14 let response = client
15 .generate_content()
16 .with_user_message("Say hello")
17 .execute()
18 .await?;
19
20 println!("Response: {}", response.text());
21
22 Ok(())
23}
More examples
examples/google_search.rs (line 10)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 println!("--- Google Search tool example ---");
13
14 // Create a Google Search tool
15 let google_search_tool = Tool::google_search();
16
17 // Create a request with Google Search tool
18 let response = client
19 .generate_content()
20 .with_user_message("What is the current Google stock price?")
21 .with_tool(google_search_tool)
22 .execute()
23 .await?;
24
25 println!("Response: {}", response.text());
26
27 Ok(())
28}
examples/batch_list.rs (line 21)
16async fn main() -> Result<(), Box<dyn std::error::Error>> {
17 // Get the API key from the environment
18 let api_key = std::env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
19
20 // Create a new Gemini client
21 let gemini = Gemini::new(api_key);
22
23 println!("Listing all batch operations...");
24
25 // List all batch operations using the stream
26 let stream = gemini.list_batches(5); // page_size of 5
27 tokio::pin!(stream);
28
29 while let Some(result) = stream.next().await {
30 match result {
31 Ok(operation) => {
32 println!(
33 " - Batch: {}, State: {:?}, Created: {}",
34 operation.name, operation.metadata.state, operation.metadata.create_time
35 );
36 }
37 Err(e) => {
38 eprintln!("Error fetching batch operation: {}", e);
39 }
40 }
41 }
42
43 println!("\nFinished listing operations.");
44
45 Ok(())
46}
examples/blob.rs (line 30)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
examples/batch_delete.rs (line 31)
23async fn main() -> Result<(), Box<dyn std::error::Error>> {
24 // Get the API key from the environment
25 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
26
27 // Get the batch name from the environment
28 let batch_name = env::var("BATCH_NAME").expect("BATCH_NAME not set");
29
30 // Create a new Gemini client
31 let gemini = Gemini::new(api_key);
32
33 // Get the batch operation
34 let batch = gemini.get_batch(&batch_name);
35
36 // Check the batch status
37 match batch.status().await {
38 Ok(status) => {
39 println!("Batch status: {:?}", status);
40
41 // Only delete completed batches (succeeded, failed, cancelled, or expired)
42 match status {
43 BatchStatus::Succeeded { .. } | BatchStatus::Cancelled | BatchStatus::Expired => {
44 println!("Deleting batch operation...");
45 // We need to handle the std::result::Result<(), (Batch, Error)> return type
46 match batch.delete().await {
47 Ok(()) => println!("Batch deleted successfully!"),
48 Err((_batch, e)) => {
49 println!("Failed to delete batch: {}. You can retry with the returned batch.", e);
50 // Here you could retry: batch.delete().await, etc.
51 }
52 }
53 }
54 _ => {
55 println!("Batch is still running or pending. Use cancel() to stop it, or wait for completion before deleting.");
56 }
57 }
58 }
59 Err(e) => println!("Failed to get batch status: {}", e),
60 }
61
62 Ok(())
63}
examples/curl_google_search.rs (line 30)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 println!("--- Curl equivalent with Google Search tool ---");
10
11 // This is equivalent to the curl example:
12 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
13 // -H "Content-Type: application/json" \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {"text": "What is the current Google stock price?"}
19 // ]
20 // }
21 // ],
22 // "tools": [
23 // {
24 // "google_search": {}
25 // }
26 // ]
27 // }'
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 // Create a content part that matches the JSON in the curl example
33 let text_part = Part::Text {
34 text: "What is the current Google stock price?".to_string(),
35 thought: None,
36 };
37
38 let content = Content {
39 parts: vec![text_part].into(),
40 role: None,
41 };
42
43 // Create a Google Search tool
44 let google_search_tool = Tool::google_search();
45
46 // Add the content and tool directly to the request
47 // This exactly mirrors the JSON structure in the curl example
48 let mut content_builder = client.generate_content();
49 content_builder.contents.push(content);
50 content_builder = content_builder.with_tool(google_search_tool);
51
52 let response = content_builder.execute().await?;
53
54 println!("Response: {}", response.text());
55
56 Ok(())
57}
Sourcepub fn pro(api_key: impl Into<String>) -> Self
pub fn pro(api_key: impl Into<String>) -> Self
Create a new client for the Gemini Pro model
Examples found in repository?
examples/gemini_pro_example.rs (line 11)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Replace with your actual API key
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create a Gemini client
11 let gemini = Gemini::pro(api_key);
12
13 // This example matches the exact curl request format:
14 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
15 // -H 'Content-Type: application/json' \
16 // -d '{
17 // "system_instruction": {
18 // "parts": [
19 // {
20 // "text": "You are a cat. Your name is Neko."
21 // }
22 // ]
23 // },
24 // "contents": [
25 // {
26 // "parts": [
27 // {
28 // "text": "Hello there"
29 // }
30 // ]
31 // }
32 // ]
33 // }'
34 let response = gemini
35 .generate_content()
36 .with_system_instruction("You are a cat. Your name is Neko.")
37 .with_user_message("Hello there")
38 .execute()
39 .await?;
40
41 // Print the response
42 println!("Response: {}", response.text());
43
44 Ok(())
45}
More examples
examples/mp4_describe.rs (line 20)
11async fn main() -> Result<(), Box<dyn std::error::Error>> {
12 // Read mp4 video file
13 let mut file = File::open("examples/sample.mp4")?;
14 let mut buffer = Vec::new();
15 file.read_to_end(&mut buffer)?;
16 let b64 = general_purpose::STANDARD.encode(&buffer);
17
18 // Get API key
19 let api_key = env::var("GEMINI_API_KEY")?;
20 let gemini = Gemini::pro(api_key);
21
22 // Example 1: Add mp4 blob using Message struct
23 let video_content = Content::inline_data("video/mp4", b64.clone());
24 let response1 = gemini
25 .generate_content()
26 .with_user_message("Please describe the content of this video (Message example)")
27 .with_message(gemini_rust::Message {
28 content: video_content,
29 role: gemini_rust::Role::User,
30 })
31 .execute()
32 .await?;
33
34 println!("AI description (Message): {}", response1.text());
35
36 // Example 2: Add mp4 blob directly using builder's with_inline_data
37 let response2 = gemini
38 .generate_content()
39 .with_user_message("Please describe the content of this video (with_inline_data example)")
40 .with_inline_data(b64, "video/mp4")
41 .execute()
42 .await?;
43
44 println!("AI description (with_inline_data): {}", response2.text());
45 Ok(())
46}
Sourcepub fn with_model(api_key: impl Into<String>, model: String) -> Self
pub fn with_model(api_key: impl Into<String>, model: String) -> Self
Create a new client with the specified API key and model
Examples found in repository?
examples/embedding.rs (line 8)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_text("Hello")
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute()
18 .await?;
19
20 println!("Response: {:?}", response.embedding.values);
21
22 Ok(())
23}
More examples
examples/batch_embedding.rs (line 8)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending batch embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_chunks(vec!["Hello", "World", "Test embedding 3"])
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute_batch()
18 .await?;
19
20 println!("Response: ");
21 for (i, e) in response.embeddings.iter().enumerate() {
22 println!("|{}|: {:?}\n", i, e.values);
23 }
24
25 Ok(())
26}
examples/thinking_basic.rs (line 10)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
11
12 println!("=== Gemini 2.5 Thinking Basic Example ===\n");
13
14 // Example 1: Using default dynamic thinking
15 println!(
16 "--- Example 1: Dynamic thinking (model automatically determines thinking budget) ---"
17 );
18 let response1 = client
19 .generate_content()
20 .with_system_prompt("You are a helpful mathematics assistant.")
21 .with_user_message(
22 "Explain Occam's razor principle and provide a simple example from daily life.",
23 )
24 .with_dynamic_thinking()
25 .with_thoughts_included(true)
26 .execute()
27 .await?;
28
29 // Display thinking process
30 let thoughts = response1.thoughts();
31 if !thoughts.is_empty() {
32 println!("Thinking summary:");
33 for (i, thought) in thoughts.iter().enumerate() {
34 println!("Thought {}: {}\n", i + 1, thought);
35 }
36 }
37
38 println!("Answer: {}\n", response1.text());
39
40 // Display token usage
41 if let Some(usage) = &response1.usage_metadata {
42 println!("Token usage:");
43 println!(" Prompt tokens: {}", usage.prompt_token_count);
44 println!(
45 " Response tokens: {}",
46 usage.candidates_token_count.unwrap_or(0)
47 );
48 if let Some(thinking_tokens) = usage.thoughts_token_count {
49 println!(" Thinking tokens: {}", thinking_tokens);
50 }
51 println!(" Total tokens: {}\n", usage.total_token_count);
52 }
53
54 // Example 2: Set specific thinking budget
55 println!("--- Example 2: Set thinking budget (1024 tokens) ---");
56 let response2 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful programming assistant.")
59 .with_user_message("List 3 main advantages of using the Rust programming language")
60 .with_thinking_budget(1024)
61 .with_thoughts_included(true)
62 .execute()
63 .await?;
64
65 // Display thinking process
66 let thoughts2 = response2.thoughts();
67 if !thoughts2.is_empty() {
68 println!("Thinking summary:");
69 for (i, thought) in thoughts2.iter().enumerate() {
70 println!("Thought {}: {}\n", i + 1, thought);
71 }
72 }
73
74 println!("Answer: {}\n", response2.text());
75
76 // Example 3: Disable thinking feature
77 println!("--- Example 3: Disable thinking feature ---");
78 let response3 = client
79 .generate_content()
80 .with_system_prompt("You are a helpful assistant.")
81 .with_user_message("What is artificial intelligence?")
82 .execute()
83 .await?;
84
85 println!("Answer: {}\n", response3.text());
86
87 // Example 4: Use GenerationConfig to set thinking
88 println!("--- Example 4: Use GenerationConfig to set thinking ---");
89 let thinking_config = ThinkingConfig::new()
90 .with_thinking_budget(2048)
91 .with_thoughts_included(true);
92
93 let generation_config = GenerationConfig {
94 temperature: Some(0.7),
95 max_output_tokens: Some(500),
96 thinking_config: Some(thinking_config),
97 ..Default::default()
98 };
99
100 let response4 = client
101 .generate_content()
102 .with_system_prompt("You are a creative writing assistant.")
103 .with_user_message(
104 "Write the opening of a short story about a robot learning to feel emotions.",
105 )
106 .with_generation_config(generation_config)
107 .execute()
108 .await?;
109
110 // Display thinking process
111 let thoughts4 = response4.thoughts();
112 if !thoughts4.is_empty() {
113 println!("Thinking summary:");
114 for (i, thought) in thoughts4.iter().enumerate() {
115 println!("Thought {}: {}\n", i + 1, thought);
116 }
117 }
118
119 println!("Answer: {}\n", response4.text());
120
121 Ok(())
122}
examples/thinking_curl_equivalent.rs (line 33)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // This is equivalent to the following curl example:
10 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent" \
11 // -H "x-goog-api-key: $GEMINI_API_KEY" \
12 // -H 'Content-Type: application/json' \
13 // -X POST \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {
19 // "text": "Provide a list of the top 3 famous physicists and their major contributions"
20 // }
21 // ]
22 // }
23 // ],
24 // "generationConfig": {
25 // "thinkingConfig": {
26 // "thinkingBudget": 1024,
27 // "includeThoughts": true
28 // }
29 // }
30 // }'
31
32 // Create client
33 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
34
35 println!("=== Thinking Curl Equivalent Example ===\n");
36
37 // Method 1: Using high-level API (simplest approach)
38 println!("--- Method 1: Using high-level API ---");
39
40 let response1 = client
41 .generate_content()
42 .with_user_message(
43 "Provide a list of the top 3 famous physicists and their major contributions",
44 )
45 .with_thinking_budget(1024)
46 .with_thoughts_included(true)
47 .execute()
48 .await?;
49
50 // Display thinking process
51 let thoughts1 = response1.thoughts();
52 if !thoughts1.is_empty() {
53 println!("Thinking summary:");
54 for (i, thought) in thoughts1.iter().enumerate() {
55 println!("Thought {}: {}\n", i + 1, thought);
56 }
57 }
58
59 println!("Answer: {}\n", response1.text());
60
61 // Method 2: Using GenerationConfig to fully match curl example structure
62 println!("--- Method 2: Fully matching curl example structure ---");
63
64 let thinking_config = ThinkingConfig {
65 thinking_budget: Some(1024),
66 include_thoughts: Some(true),
67 };
68
69 let generation_config = GenerationConfig {
70 thinking_config: Some(thinking_config),
71 ..Default::default()
72 };
73
74 let response2 = client
75 .generate_content()
76 .with_user_message(
77 "Provide a list of the top 3 famous physicists and their major contributions",
78 )
79 .with_generation_config(generation_config)
80 .execute()
81 .await?;
82
83 // Display thinking process
84 let thoughts2 = response2.thoughts();
85 if !thoughts2.is_empty() {
86 println!("Thinking summary:");
87 for (i, thought) in thoughts2.iter().enumerate() {
88 println!("Thought {}: {}\n", i + 1, thought);
89 }
90 }
91
92 println!("Answer: {}\n", response2.text());
93
94 // Show token usage
95 if let Some(usage) = &response2.usage_metadata {
96 println!("Token usage:");
97 println!(" Prompt tokens: {}", usage.prompt_token_count);
98 println!(
99 " Response tokens: {}",
100 usage.candidates_token_count.unwrap_or(0)
101 );
102 if let Some(thinking_tokens) = usage.thoughts_token_count {
103 println!(" Thinking tokens: {}", thinking_tokens);
104 }
105 println!(" Total tokens: {}", usage.total_token_count);
106 }
107
108 // Method 3: Demonstrate different thinking budget settings
109 println!("\n--- Method 3: Different thinking budget comparison ---");
110
111 // Thinking disabled
112 println!("Thinking disabled:");
113 let response_no_thinking = client
114 .generate_content()
115 .with_user_message("Explain the basic principles of quantum mechanics")
116 .execute()
117 .await?;
118 println!("Answer: {}\n", response_no_thinking.text());
119
120 // Dynamic thinking
121 println!("Dynamic thinking:");
122 let response_dynamic = client
123 .generate_content()
124 .with_user_message("Explain the basic principles of quantum mechanics")
125 .with_dynamic_thinking()
126 .with_thoughts_included(true)
127 .execute()
128 .await?;
129
130 let thoughts_dynamic = response_dynamic.thoughts();
131 if !thoughts_dynamic.is_empty() {
132 println!("Thinking summary:");
133 for (i, thought) in thoughts_dynamic.iter().enumerate() {
134 println!("Thought {}: {}\n", i + 1, thought);
135 }
136 }
137 println!("Answer: {}\n", response_dynamic.text());
138
139 // High thinking budget
140 println!("High thinking budget (4096 tokens):");
141 let response_high_budget = client
142 .generate_content()
143 .with_user_message("Explain the basic principles of quantum mechanics")
144 .with_thinking_budget(4096)
145 .with_thoughts_included(true)
146 .execute()
147 .await?;
148
149 let thoughts_high = response_high_budget.thoughts();
150 if !thoughts_high.is_empty() {
151 println!("Thinking summary:");
152 for (i, thought) in thoughts_high.iter().enumerate() {
153 println!("Thought {}: {}\n", i + 1, thought);
154 }
155 }
156 println!("Answer: {}", response_high_budget.text());
157
158 Ok(())
159}
examples/thinking_advanced.rs (line 13)
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
14
15 println!("=== Gemini 2.5 Thinking Advanced Example ===\n");
16
17 // Example 1: Streaming with thinking
18 println!("--- Example 1: Streaming with thinking ---");
19 let mut stream = client
20 .generate_content()
21 .with_system_prompt("You are a mathematics expert skilled at solving complex mathematical problems.")
22 .with_user_message("Solve this math problem: Find the sum of the first 50 prime numbers. Please explain your solution process in detail.")
23 .with_thinking_budget(2048)
24 .with_thoughts_included(true)
25 .execute_stream()
26 .await?;
27
28 println!("Streaming response:");
29 let mut thoughts_shown = false;
30 while let Some(chunk_result) = stream.next().await {
31 match chunk_result {
32 Ok(chunk) => {
33 // Check if there's thinking content
34 let thoughts = chunk.thoughts();
35 if !thoughts.is_empty() && !thoughts_shown {
36 println!("\nThinking process:");
37 for (i, thought) in thoughts.iter().enumerate() {
38 println!("Thought {}: {}", i + 1, thought);
39 }
40 println!("\nAnswer:");
41 thoughts_shown = true;
42 }
43
44 // Display general text content
45 print!("{}", chunk.text());
46 std::io::Write::flush(&mut std::io::stdout())?;
47 }
48 Err(e) => eprintln!("Streaming error: {}", e),
49 }
50 }
51 println!("\n");
52
53 // Example 2: Thinking combined with function calls
54 println!("--- Example 2: Thinking combined with function calls ---");
55
56 // Define a calculator function
57 let calculator = FunctionDeclaration::new(
58 "calculate",
59 "Perform basic mathematical calculations",
60 FunctionParameters::object()
61 .with_property(
62 "expression",
63 PropertyDetails::string(
64 "The mathematical expression to calculate, e.g., '2 + 3 * 4'",
65 ),
66 true,
67 )
68 .with_property(
69 "operation_type",
70 PropertyDetails::enum_type("Type of calculation", ["arithmetic", "advanced"]),
71 false,
72 ),
73 );
74
75 let response = client
76 .generate_content()
77 .with_system_prompt("You are a mathematics assistant. When calculations are needed, use the provided calculator function.")
78 .with_user_message("Calculate the result of (15 + 25) * 3 - 8 and explain the calculation steps.")
79 .with_function(calculator)
80 .with_thinking_budget(1024)
81 .with_thoughts_included(true)
82 .execute()
83 .await?;
84
85 // Display thinking process
86 let thoughts = response.thoughts();
87 if !thoughts.is_empty() {
88 println!("Thinking process:");
89 for (i, thought) in thoughts.iter().enumerate() {
90 println!("Thought {}: {}\n", i + 1, thought);
91 }
92 }
93
94 // Check for function calls
95 let function_calls = response.function_calls();
96 if !function_calls.is_empty() {
97 println!("Function calls:");
98 for (i, call) in function_calls.iter().enumerate() {
99 println!("Call {}: {} Args: {}", i + 1, call.name, call.args);
100 }
101 println!();
102 }
103
104 println!("Answer: {}\n", response.text());
105
106 // Example 3: Complex reasoning task
107 println!("--- Example 3: Complex reasoning task ---");
108 let complex_response = client
109 .generate_content()
110 .with_system_prompt("You are a logical reasoning expert.")
111 .with_user_message(
112 "There are three people: Alice, Bob, and Carol, who live in red, green, and blue houses respectively.\
113 Given:\
114 1. The person in the red house owns a cat\
115 2. Bob does not live in the green house\
116 3. Carol owns a dog\
117 4. The green house is to the left of the red house\
118 5. Alice does not own a cat\
119 Please reason out which color house each person lives in and what pets they own.",
120 )
121 .with_thinking_config(
122 ThinkingConfig::new()
123 .with_thinking_budget(3072)
124 .with_thoughts_included(true),
125 )
126 .execute()
127 .await?;
128
129 // Display thinking process
130 let complex_thoughts = complex_response.thoughts();
131 if !complex_thoughts.is_empty() {
132 println!("Reasoning process:");
133 for (i, thought) in complex_thoughts.iter().enumerate() {
134 println!("Reasoning step {}: {}\n", i + 1, thought);
135 }
136 }
137
138 println!("Conclusion: {}\n", complex_response.text());
139
140 // Display token usage statistics
141 if let Some(usage) = &complex_response.usage_metadata {
142 println!("Token usage statistics:");
143 println!(" Prompt tokens: {}", usage.prompt_token_count);
144 println!(
145 " Response tokens: {}",
146 usage.candidates_token_count.unwrap_or(0)
147 );
148 if let Some(thinking_tokens) = usage.thoughts_token_count {
149 println!(" Thinking tokens: {}", thinking_tokens);
150 }
151 println!(" Total tokens: {}", usage.total_token_count);
152 }
153
154 Ok(())
155}
Sourcepub fn with_base_url(api_key: impl Into<String>, base_url: String) -> Self
pub fn with_base_url(api_key: impl Into<String>, base_url: String) -> Self
Create a new client with custom base URL
Sourcepub fn with_model_and_base_url(
api_key: impl Into<String>,
model: String,
base_url: String,
) -> Self
pub fn with_model_and_base_url( api_key: impl Into<String>, model: String, base_url: String, ) -> Self
Create a new client with the specified API key, model, and base URL
Examples found in repository?
examples/custom_base_url.rs (lines 10-14)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
Sourcepub fn generate_content(&self) -> ContentBuilder
pub fn generate_content(&self) -> ContentBuilder
Start building a content generation request
Examples found in repository?
examples/test_api.rs (line 15)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY")?;
7
8 // Create client with the default model (gemini-2.0-flash)
9 let client = Gemini::new(api_key);
10
11 println!("Sending request to Gemini API...");
12
13 // Simple text completion with minimal content
14 let response = client
15 .generate_content()
16 .with_user_message("Say hello")
17 .execute()
18 .await?;
19
20 println!("Response: {}", response.text());
21
22 Ok(())
23}
More examples
examples/google_search.rs (line 19)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 println!("--- Google Search tool example ---");
13
14 // Create a Google Search tool
15 let google_search_tool = Tool::google_search();
16
17 // Create a request with Google Search tool
18 let response = client
19 .generate_content()
20 .with_user_message("What is the current Google stock price?")
21 .with_tool(google_search_tool)
22 .execute()
23 .await?;
24
25 println!("Response: {}", response.text());
26
27 Ok(())
28}
examples/custom_base_url.rs (line 17)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
examples/gemini_pro_example.rs (line 35)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Replace with your actual API key
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create a Gemini client
11 let gemini = Gemini::pro(api_key);
12
13 // This example matches the exact curl request format:
14 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
15 // -H 'Content-Type: application/json' \
16 // -d '{
17 // "system_instruction": {
18 // "parts": [
19 // {
20 // "text": "You are a cat. Your name is Neko."
21 // }
22 // ]
23 // },
24 // "contents": [
25 // {
26 // "parts": [
27 // {
28 // "text": "Hello there"
29 // }
30 // ]
31 // }
32 // ]
33 // }'
34 let response = gemini
35 .generate_content()
36 .with_system_instruction("You are a cat. Your name is Neko.")
37 .with_user_message("Hello there")
38 .execute()
39 .await?;
40
41 // Print the response
42 println!("Response: {}", response.text());
43
44 Ok(())
45}
examples/blob.rs (line 34)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
examples/mp4_describe.rs (line 25)
11async fn main() -> Result<(), Box<dyn std::error::Error>> {
12 // Read mp4 video file
13 let mut file = File::open("examples/sample.mp4")?;
14 let mut buffer = Vec::new();
15 file.read_to_end(&mut buffer)?;
16 let b64 = general_purpose::STANDARD.encode(&buffer);
17
18 // Get API key
19 let api_key = env::var("GEMINI_API_KEY")?;
20 let gemini = Gemini::pro(api_key);
21
22 // Example 1: Add mp4 blob using Message struct
23 let video_content = Content::inline_data("video/mp4", b64.clone());
24 let response1 = gemini
25 .generate_content()
26 .with_user_message("Please describe the content of this video (Message example)")
27 .with_message(gemini_rust::Message {
28 content: video_content,
29 role: gemini_rust::Role::User,
30 })
31 .execute()
32 .await?;
33
34 println!("AI description (Message): {}", response1.text());
35
36 // Example 2: Add mp4 blob directly using builder's with_inline_data
37 let response2 = gemini
38 .generate_content()
39 .with_user_message("Please describe the content of this video (with_inline_data example)")
40 .with_inline_data(b64, "video/mp4")
41 .execute()
42 .await?;
43
44 println!("AI description (with_inline_data): {}", response2.text());
45 Ok(())
46}
Additional examples can be found in:
- examples/curl_google_search.rs
- examples/curl_equivalent.rs
- examples/generation_config.rs
- examples/structured_response.rs
- examples/streaming.rs
- examples/batch_generate.rs
- examples/advanced.rs
- examples/simple.rs
- examples/thinking_basic.rs
- examples/google_search_with_functions.rs
- examples/batch_cancel.rs
- examples/thinking_curl_equivalent.rs
- examples/thinking_advanced.rs
- examples/tools.rs
Sourcepub fn embed_content(&self) -> EmbedBuilder
pub fn embed_content(&self) -> EmbedBuilder
Start building a content generation request
Examples found in repository?
examples/embedding.rs (line 14)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_text("Hello")
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute()
18 .await?;
19
20 println!("Response: {:?}", response.embedding.values);
21
22 Ok(())
23}
More examples
examples/batch_embedding.rs (line 14)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending batch embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_chunks(vec!["Hello", "World", "Test embedding 3"])
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute_batch()
18 .await?;
19
20 println!("Response: ");
21 for (i, e) in response.embeddings.iter().enumerate() {
22 println!("|{}|: {:?}\n", i, e.values);
23 }
24
25 Ok(())
26}
Sourcepub fn batch_generate_content_sync(&self) -> BatchBuilder
pub fn batch_generate_content_sync(&self) -> BatchBuilder
Start building a synchronous batch content generation request
Examples found in repository?
examples/batch_generate.rs (line 39)
18async fn main() -> Result<(), Box<dyn std::error::Error>> {
19 // Get the API key from the environment
20 let api_key = std::env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
21
22 // Create a new Gemini client
23 let gemini = Gemini::new(api_key);
24
25 // Create the first request
26 let request1 = gemini
27 .generate_content()
28 .with_message(Message::user("What is the meaning of life?"))
29 .build();
30
31 // Create the second request
32 let request2 = gemini
33 .generate_content()
34 .with_message(Message::user("What is the best programming language?"))
35 .build();
36
37 // Create the batch request
38 let batch = gemini
39 .batch_generate_content_sync()
40 .with_request(request1)
41 .with_request(request2)
42 .execute()
43 .await?;
44
45 // Print the batch information
46 println!("Batch created successfully!");
47 println!("Batch Name: {}", batch.name());
48
49 // Wait for the batch to complete
50 println!("Waiting for batch to complete...");
51 match batch.wait_for_completion(Duration::from_secs(5)).await {
52 Ok(final_status) => {
53 // Print the final status
54 match final_status {
55 BatchStatus::Succeeded { results } => {
56 println!("Batch succeeded!");
57 for item in results {
58 match item {
59 BatchResultItem::Success { key, response } => {
60 println!("--- Response for Key {} ---", key);
61 println!("{}", response.text());
62 }
63 BatchResultItem::Error { key, error } => {
64 println!("--- Error for Key {} ---", key);
65 println!("Code: {}, Message: {}", error.code, error.message);
66 if let Some(details) = &error.details {
67 println!("Details: {}", details);
68 }
69 }
70 }
71 }
72 }
73 BatchStatus::Cancelled => {
74 println!("Batch was cancelled.");
75 }
76 BatchStatus::Expired => {
77 println!("Batch expired.");
78 }
79 _ => {
80 println!(
81 "Batch finished with an unexpected status: {:?}",
82 final_status
83 );
84 }
85 }
86 }
87 Err((_batch, e)) => {
88 println!(
89 "Batch failed: {}. You can retry with the returned batch.",
90 e
91 );
92 // Here you could retry: batch.wait_for_completion(Duration::from_secs(5)).await, etc.
93 }
94 }
95
96 Ok(())
97}
More examples
examples/batch_cancel.rs (line 24)
15async fn main() -> Result<()> {
16 // Get the API key from the environment
17 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY must be set");
18
19 // Create the Gemini client
20 let gemini = Gemini::new(api_key);
21
22 // Create a batch with multiple requests
23 let mut batch_generate_content = gemini
24 .batch_generate_content_sync()
25 .with_name("batch_cancel_example".to_string());
26
27 // Add several requests to make the batch take some time to process
28 for i in 1..=10 {
29 let request = gemini
30 .generate_content()
31 .with_message(Message::user(format!(
32 "Write a creative story about a robot learning to paint, part {}. Make it at least 100 words long.",
33 i
34 )))
35 .build();
36
37 batch_generate_content = batch_generate_content.with_request(request);
38 }
39
40 // Build and start the batch
41 let batch = batch_generate_content.execute().await?;
42 println!("Batch created successfully!");
43 println!("Batch Name: {}", batch.name());
44 println!("Press CTRL-C to cancel the batch operation...");
45
46 // Wrap the batch in an Arc<Mutex<Option<Batch>>> to allow safe sharing
47 let batch = Arc::new(Mutex::new(Some(batch)));
48 let batch_clone = Arc::clone(&batch);
49
50 // Spawn a task to handle CTRL-C
51 let cancel_task = tokio::spawn(async move {
52 // Wait for CTRL-C signal
53 signal::ctrl_c().await.expect("Failed to listen for CTRL-C");
54 println!("Received CTRL-C, canceling batch operation...");
55
56 // Take the batch from the Option, leaving None.
57 // The lock is released immediately after this block.
58 let mut batch_to_cancel = batch_clone.lock().await;
59
60 if let Some(batch) = batch_to_cancel.take() {
61 // Cancel the batch operation
62 match batch.cancel().await {
63 Ok(()) => {
64 println!("Batch canceled successfully!");
65 }
66 Err((batch, e)) => {
67 println!("Failed to cancel batch: {}. Retrying...", e);
68 // Retry once
69 match batch.cancel().await {
70 Ok(()) => {
71 println!("Batch canceled successfully on retry!");
72 }
73 Err((_, retry_error)) => {
74 eprintln!("Failed to cancel batch even on retry: {}", retry_error);
75 }
76 }
77 }
78 }
79 } else {
80 println!("Batch was already processed.");
81 }
82 });
83
84 // Wait for a short moment to ensure the cancel task is ready
85 tokio::time::sleep(Duration::from_millis(100)).await;
86
87 // Wait for the batch to complete or be canceled
88 if let Some(batch) = batch.lock().await.take() {
89 println!("Waiting for batch to complete or be canceled...");
90 match batch.wait_for_completion(Duration::from_secs(5)).await {
91 Ok(final_status) => {
92 // Cancel task is no longer needed since batch completed
93 cancel_task.abort();
94
95 println!("Batch completed with status: {:?}", final_status);
96
97 // Print some details about the results
98 match final_status {
99 gemini_rust::BatchStatus::Succeeded { .. } => {
100 println!("Batch succeeded!");
101 }
102 gemini_rust::BatchStatus::Cancelled => {
103 println!("Batch was cancelled as requested.");
104 }
105 gemini_rust::BatchStatus::Expired => {
106 println!("Batch expired.");
107 }
108 _ => {
109 println!("Batch finished with an unexpected status.");
110 }
111 }
112 }
113 Err((batch, e)) => {
114 // This could happen if there was a network error while polling
115 println!("Error while waiting for batch completion: {}", e);
116
117 // Try one more time to get the status
118 match batch.status().await {
119 Ok(status) => println!("Current batch status: {:?}", status),
120 Err(status_error) => println!("Error getting final status: {}", status_error),
121 }
122 }
123 }
124 }
125
126 Ok(())
127}
Sourcepub fn get_batch(&self, name: &str) -> Batch
pub fn get_batch(&self, name: &str) -> Batch
Get a handle to a batch operation by its name.
Examples found in repository?
examples/batch_delete.rs (line 34)
23async fn main() -> Result<(), Box<dyn std::error::Error>> {
24 // Get the API key from the environment
25 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
26
27 // Get the batch name from the environment
28 let batch_name = env::var("BATCH_NAME").expect("BATCH_NAME not set");
29
30 // Create a new Gemini client
31 let gemini = Gemini::new(api_key);
32
33 // Get the batch operation
34 let batch = gemini.get_batch(&batch_name);
35
36 // Check the batch status
37 match batch.status().await {
38 Ok(status) => {
39 println!("Batch status: {:?}", status);
40
41 // Only delete completed batches (succeeded, failed, cancelled, or expired)
42 match status {
43 BatchStatus::Succeeded { .. } | BatchStatus::Cancelled | BatchStatus::Expired => {
44 println!("Deleting batch operation...");
45 // We need to handle the std::result::Result<(), (Batch, Error)> return type
46 match batch.delete().await {
47 Ok(()) => println!("Batch deleted successfully!"),
48 Err((_batch, e)) => {
49 println!("Failed to delete batch: {}. You can retry with the returned batch.", e);
50 // Here you could retry: batch.delete().await, etc.
51 }
52 }
53 }
54 _ => {
55 println!("Batch is still running or pending. Use cancel() to stop it, or wait for completion before deleting.");
56 }
57 }
58 }
59 Err(e) => println!("Failed to get batch status: {}", e),
60 }
61
62 Ok(())
63}
Sourcepub fn list_batches(
&self,
page_size: impl Into<Option<u32>>,
) -> impl Stream<Item = Result<BatchOperation>> + Send
pub fn list_batches( &self, page_size: impl Into<Option<u32>>, ) -> impl Stream<Item = Result<BatchOperation>> + Send
Lists batch operations.
This method returns a stream that handles pagination automatically.
Examples found in repository?
examples/batch_list.rs (line 26)
16async fn main() -> Result<(), Box<dyn std::error::Error>> {
17 // Get the API key from the environment
18 let api_key = std::env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
19
20 // Create a new Gemini client
21 let gemini = Gemini::new(api_key);
22
23 println!("Listing all batch operations...");
24
25 // List all batch operations using the stream
26 let stream = gemini.list_batches(5); // page_size of 5
27 tokio::pin!(stream);
28
29 while let Some(result) = stream.next().await {
30 match result {
31 Ok(operation) => {
32 println!(
33 " - Batch: {}, State: {:?}, Created: {}",
34 operation.name, operation.metadata.state, operation.metadata.create_time
35 );
36 }
37 Err(e) => {
38 eprintln!("Error fetching batch operation: {}", e);
39 }
40 }
41 }
42
43 println!("\nFinished listing operations.");
44
45 Ok(())
46}
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Gemini
impl !RefUnwindSafe for Gemini
impl Send for Gemini
impl Sync for Gemini
impl Unpin for Gemini
impl !UnwindSafe for Gemini
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more