pub struct Gemini { /* private fields */ }
Expand description
Client for the Gemini API
Implementations§
Source§impl Gemini
impl Gemini
Sourcepub fn new(api_key: impl Into<String>) -> Self
pub fn new(api_key: impl Into<String>) -> Self
Create a new client with the specified API key
Examples found in repository?
examples/test_api.rs (line 9)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY")?;
7
8 // Create client with the default model (gemini-2.0-flash)
9 let client = Gemini::new(api_key);
10
11 println!("Sending request to Gemini API...");
12
13 // Simple text completion with minimal content
14 let response = client
15 .generate_content()
16 .with_user_message("Say hello")
17 .execute()
18 .await?;
19
20 println!("Response: {}", response.text());
21
22 Ok(())
23}
More examples
examples/google_search.rs (line 10)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 println!("--- Google Search tool example ---");
13
14 // Create a Google Search tool
15 let google_search_tool = Tool::google_search();
16
17 // Create a request with Google Search tool
18 let response = client
19 .generate_content()
20 .with_user_message("What is the current Google stock price?")
21 .with_tool(google_search_tool)
22 .execute()
23 .await?;
24
25 println!("Response: {}", response.text());
26
27 Ok(())
28}
examples/blob.rs (line 30)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
examples/curl_google_search.rs (line 30)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 println!("--- Curl equivalent with Google Search tool ---");
10
11 // This is equivalent to the curl example:
12 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
13 // -H "Content-Type: application/json" \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {"text": "What is the current Google stock price?"}
19 // ]
20 // }
21 // ],
22 // "tools": [
23 // {
24 // "google_search": {}
25 // }
26 // ]
27 // }'
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 // Create a content part that matches the JSON in the curl example
33 let text_part = Part::Text {
34 text: "What is the current Google stock price?".to_string(),
35 thought: None,
36 };
37
38 let content = Content {
39 parts: vec![text_part],
40 role: None,
41 };
42
43 // Create a Google Search tool
44 let google_search_tool = Tool::google_search();
45
46 // Add the content and tool directly to the request
47 // This exactly mirrors the JSON structure in the curl example
48 let mut content_builder = client.generate_content();
49 content_builder.contents.push(content);
50 content_builder = content_builder.with_tool(google_search_tool);
51
52 let response = content_builder.execute().await?;
53
54 println!("Response: {}", response.text());
55
56 Ok(())
57}
examples/curl_equivalent.rs (line 26)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // This is equivalent to the curl example:
10 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$YOUR_API_KEY" \
11 // -H 'Content-Type: application/json' \
12 // -X POST \
13 // -d '{
14 // "contents": [
15 // {
16 // "parts": [
17 // {
18 // "text": "Explain how AI works in a few words"
19 // }
20 // ]
21 // }
22 // ]
23 // }'
24
25 // Create client - now using gemini-2.0-flash by default
26 let client = Gemini::new(api_key);
27
28 // Method 1: Using the high-level API (simplest approach)
29 println!("--- Method 1: Using the high-level API ---");
30
31 let response = client
32 .generate_content()
33 .with_user_message("Explain how AI works in a few words")
34 .execute()
35 .await?;
36
37 println!("Response: {}", response.text());
38
39 // Method 2: Using Content directly to match the curl example exactly
40 println!("\n--- Method 2: Matching curl example structure exactly ---");
41
42 // Create a content part that matches the JSON in the curl example
43 let text_part = Part::Text {
44 text: "Explain how AI works in a few words".to_string(),
45 thought: None,
46 };
47
48 let content = Content {
49 parts: vec![text_part],
50 role: None,
51 };
52
53 // Add the content directly to the request
54 // This exactly mirrors the JSON structure in the curl example
55 let mut content_builder = client.generate_content();
56 content_builder.contents.push(content);
57 let response = content_builder.execute().await?;
58
59 println!("Response: {}", response.text());
60
61 Ok(())
62}
examples/generation_config.rs (line 10)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 // Using the full generation config
13 println!("--- Using full generation config ---");
14 let response1 = client
15 .generate_content()
16 .with_system_prompt("You are a helpful assistant.")
17 .with_user_message("Write a short poem about Rust programming language.")
18 .with_generation_config(GenerationConfig {
19 temperature: Some(0.9),
20 top_p: Some(0.8),
21 top_k: Some(20),
22 max_output_tokens: Some(200),
23 candidate_count: Some(1),
24 stop_sequences: Some(vec!["END".to_string()]),
25 response_mime_type: None,
26 response_schema: None,
27 thinking_config: None,
28 })
29 .execute()
30 .await?;
31
32 println!(
33 "Response with high temperature (0.9):\n{}\n",
34 response1.text()
35 );
36
37 // Using individual generation parameters
38 println!("--- Using individual generation parameters ---");
39 let response2 = client
40 .generate_content()
41 .with_system_prompt("You are a helpful assistant.")
42 .with_user_message("Write a short poem about Rust programming language.")
43 .with_temperature(0.2)
44 .with_max_output_tokens(100)
45 .execute()
46 .await?;
47
48 println!(
49 "Response with low temperature (0.2):\n{}\n",
50 response2.text()
51 );
52
53 // Setting multiple parameters individually
54 println!("--- Setting multiple parameters individually ---");
55 let response3 = client
56 .generate_content()
57 .with_system_prompt("You are a helpful assistant.")
58 .with_user_message("List 3 benefits of using Rust.")
59 .with_temperature(0.7)
60 .with_top_p(0.9)
61 .with_max_output_tokens(150)
62 .with_stop_sequences(vec!["4.".to_string()])
63 .execute()
64 .await?;
65
66 println!(
67 "Response with custom parameters and stop sequence:\n{}",
68 response3.text()
69 );
70
71 Ok(())
72}
Sourcepub fn pro(api_key: impl Into<String>) -> Self
pub fn pro(api_key: impl Into<String>) -> Self
Create a new client for the Gemini Pro model
Examples found in repository?
examples/gemini_pro_example.rs (line 11)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Replace with your actual API key
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create a Gemini client
11 let gemini = Gemini::pro(api_key);
12
13 // This example matches the exact curl request format:
14 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
15 // -H 'Content-Type: application/json' \
16 // -d '{
17 // "system_instruction": {
18 // "parts": [
19 // {
20 // "text": "You are a cat. Your name is Neko."
21 // }
22 // ]
23 // },
24 // "contents": [
25 // {
26 // "parts": [
27 // {
28 // "text": "Hello there"
29 // }
30 // ]
31 // }
32 // ]
33 // }'
34 let response = gemini
35 .generate_content()
36 .with_system_instruction("You are a cat. Your name is Neko.")
37 .with_user_message("Hello there")
38 .execute()
39 .await?;
40
41 // Print the response
42 println!("Response: {}", response.text());
43
44 Ok(())
45}
Sourcepub fn with_model(api_key: impl Into<String>, model: String) -> Self
pub fn with_model(api_key: impl Into<String>, model: String) -> Self
Create a new client with the specified API key and model
Examples found in repository?
examples/embedding.rs (line 8)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_text("Hello")
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute()
18 .await?;
19
20 println!("Response: {:?}", response.embedding.values);
21
22 Ok(())
23}
More examples
examples/batch_embedding.rs (line 8)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending batch embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_chunks(vec!["Hello", "World", "Test embedding 3"])
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute_batch()
18 .await?;
19
20 println!("Response: ");
21 for (i, e) in response.embeddings.iter().enumerate() {
22 println!("|{}|: {:?}\n", i, e.values);
23 }
24
25 Ok(())
26}
examples/thinking_basic.rs (line 10)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
11
12 println!("=== Gemini 2.5 Thinking Basic Example ===\n");
13
14 // Example 1: Using default dynamic thinking
15 println!(
16 "--- Example 1: Dynamic thinking (model automatically determines thinking budget) ---"
17 );
18 let response1 = client
19 .generate_content()
20 .with_system_prompt("You are a helpful mathematics assistant.")
21 .with_user_message(
22 "Explain Occam's razor principle and provide a simple example from daily life.",
23 )
24 .with_dynamic_thinking()
25 .with_thoughts_included(true)
26 .execute()
27 .await?;
28
29 // Display thinking process
30 let thoughts = response1.thoughts();
31 if !thoughts.is_empty() {
32 println!("Thinking summary:");
33 for (i, thought) in thoughts.iter().enumerate() {
34 println!("Thought {}: {}\n", i + 1, thought);
35 }
36 }
37
38 println!("Answer: {}\n", response1.text());
39
40 // Display token usage
41 if let Some(usage) = &response1.usage_metadata {
42 println!("Token usage:");
43 println!(" Prompt tokens: {}", usage.prompt_token_count);
44 println!(
45 " Response tokens: {}",
46 usage.candidates_token_count.unwrap_or(0)
47 );
48 if let Some(thinking_tokens) = usage.thoughts_token_count {
49 println!(" Thinking tokens: {}", thinking_tokens);
50 }
51 println!(" Total tokens: {}\n", usage.total_token_count);
52 }
53
54 // Example 2: Set specific thinking budget
55 println!("--- Example 2: Set thinking budget (1024 tokens) ---");
56 let response2 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful programming assistant.")
59 .with_user_message("List 3 main advantages of using the Rust programming language")
60 .with_thinking_budget(1024)
61 .with_thoughts_included(true)
62 .execute()
63 .await?;
64
65 // Display thinking process
66 let thoughts2 = response2.thoughts();
67 if !thoughts2.is_empty() {
68 println!("Thinking summary:");
69 for (i, thought) in thoughts2.iter().enumerate() {
70 println!("Thought {}: {}\n", i + 1, thought);
71 }
72 }
73
74 println!("Answer: {}\n", response2.text());
75
76 // Example 3: Disable thinking feature
77 println!("--- Example 3: Disable thinking feature ---");
78 let response3 = client
79 .generate_content()
80 .with_system_prompt("You are a helpful assistant.")
81 .with_user_message("What is artificial intelligence?")
82 .execute()
83 .await?;
84
85 println!("Answer: {}\n", response3.text());
86
87 // Example 4: Use GenerationConfig to set thinking
88 println!("--- Example 4: Use GenerationConfig to set thinking ---");
89 let thinking_config = ThinkingConfig::new()
90 .with_thinking_budget(2048)
91 .with_thoughts_included(true);
92
93 let generation_config = GenerationConfig {
94 temperature: Some(0.7),
95 max_output_tokens: Some(500),
96 thinking_config: Some(thinking_config),
97 ..Default::default()
98 };
99
100 let response4 = client
101 .generate_content()
102 .with_system_prompt("You are a creative writing assistant.")
103 .with_user_message(
104 "Write the opening of a short story about a robot learning to feel emotions.",
105 )
106 .with_generation_config(generation_config)
107 .execute()
108 .await?;
109
110 // Display thinking process
111 let thoughts4 = response4.thoughts();
112 if !thoughts4.is_empty() {
113 println!("Thinking summary:");
114 for (i, thought) in thoughts4.iter().enumerate() {
115 println!("Thought {}: {}\n", i + 1, thought);
116 }
117 }
118
119 println!("Answer: {}\n", response4.text());
120
121 Ok(())
122}
examples/thinking_curl_equivalent.rs (line 33)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // This is equivalent to the following curl example:
10 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent" \
11 // -H "x-goog-api-key: $GEMINI_API_KEY" \
12 // -H 'Content-Type: application/json' \
13 // -X POST \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {
19 // "text": "Provide a list of the top 3 famous physicists and their major contributions"
20 // }
21 // ]
22 // }
23 // ],
24 // "generationConfig": {
25 // "thinkingConfig": {
26 // "thinkingBudget": 1024,
27 // "includeThoughts": true
28 // }
29 // }
30 // }'
31
32 // Create client
33 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
34
35 println!("=== Thinking Curl Equivalent Example ===\n");
36
37 // Method 1: Using high-level API (simplest approach)
38 println!("--- Method 1: Using high-level API ---");
39
40 let response1 = client
41 .generate_content()
42 .with_user_message(
43 "Provide a list of the top 3 famous physicists and their major contributions",
44 )
45 .with_thinking_budget(1024)
46 .with_thoughts_included(true)
47 .execute()
48 .await?;
49
50 // Display thinking process
51 let thoughts1 = response1.thoughts();
52 if !thoughts1.is_empty() {
53 println!("Thinking summary:");
54 for (i, thought) in thoughts1.iter().enumerate() {
55 println!("Thought {}: {}\n", i + 1, thought);
56 }
57 }
58
59 println!("Answer: {}\n", response1.text());
60
61 // Method 2: Using GenerationConfig to fully match curl example structure
62 println!("--- Method 2: Fully matching curl example structure ---");
63
64 let thinking_config = ThinkingConfig {
65 thinking_budget: Some(1024),
66 include_thoughts: Some(true),
67 };
68
69 let generation_config = GenerationConfig {
70 thinking_config: Some(thinking_config),
71 ..Default::default()
72 };
73
74 let response2 = client
75 .generate_content()
76 .with_user_message(
77 "Provide a list of the top 3 famous physicists and their major contributions",
78 )
79 .with_generation_config(generation_config)
80 .execute()
81 .await?;
82
83 // Display thinking process
84 let thoughts2 = response2.thoughts();
85 if !thoughts2.is_empty() {
86 println!("Thinking summary:");
87 for (i, thought) in thoughts2.iter().enumerate() {
88 println!("Thought {}: {}\n", i + 1, thought);
89 }
90 }
91
92 println!("Answer: {}\n", response2.text());
93
94 // Show token usage
95 if let Some(usage) = &response2.usage_metadata {
96 println!("Token usage:");
97 println!(" Prompt tokens: {}", usage.prompt_token_count);
98 println!(
99 " Response tokens: {}",
100 usage.candidates_token_count.unwrap_or(0)
101 );
102 if let Some(thinking_tokens) = usage.thoughts_token_count {
103 println!(" Thinking tokens: {}", thinking_tokens);
104 }
105 println!(" Total tokens: {}", usage.total_token_count);
106 }
107
108 // Method 3: Demonstrate different thinking budget settings
109 println!("\n--- Method 3: Different thinking budget comparison ---");
110
111 // Thinking disabled
112 println!("Thinking disabled:");
113 let response_no_thinking = client
114 .generate_content()
115 .with_user_message("Explain the basic principles of quantum mechanics")
116 .execute()
117 .await?;
118 println!("Answer: {}\n", response_no_thinking.text());
119
120 // Dynamic thinking
121 println!("Dynamic thinking:");
122 let response_dynamic = client
123 .generate_content()
124 .with_user_message("Explain the basic principles of quantum mechanics")
125 .with_dynamic_thinking()
126 .with_thoughts_included(true)
127 .execute()
128 .await?;
129
130 let thoughts_dynamic = response_dynamic.thoughts();
131 if !thoughts_dynamic.is_empty() {
132 println!("Thinking summary:");
133 for (i, thought) in thoughts_dynamic.iter().enumerate() {
134 println!("Thought {}: {}\n", i + 1, thought);
135 }
136 }
137 println!("Answer: {}\n", response_dynamic.text());
138
139 // High thinking budget
140 println!("High thinking budget (4096 tokens):");
141 let response_high_budget = client
142 .generate_content()
143 .with_user_message("Explain the basic principles of quantum mechanics")
144 .with_thinking_budget(4096)
145 .with_thoughts_included(true)
146 .execute()
147 .await?;
148
149 let thoughts_high = response_high_budget.thoughts();
150 if !thoughts_high.is_empty() {
151 println!("Thinking summary:");
152 for (i, thought) in thoughts_high.iter().enumerate() {
153 println!("Thought {}: {}\n", i + 1, thought);
154 }
155 }
156 println!("Answer: {}", response_high_budget.text());
157
158 Ok(())
159}
examples/thinking_advanced.rs (line 13)
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 // Get API key from environment variable
10 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12 // Create client
13 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
14
15 println!("=== Gemini 2.5 Thinking Advanced Example ===\n");
16
17 // Example 1: Streaming with thinking
18 println!("--- Example 1: Streaming with thinking ---");
19 let mut stream = client
20 .generate_content()
21 .with_system_prompt("You are a mathematics expert skilled at solving complex mathematical problems.")
22 .with_user_message("Solve this math problem: Find the sum of the first 50 prime numbers. Please explain your solution process in detail.")
23 .with_thinking_budget(2048)
24 .with_thoughts_included(true)
25 .execute_stream()
26 .await?;
27
28 println!("Streaming response:");
29 let mut thoughts_shown = false;
30 while let Some(chunk_result) = stream.next().await {
31 match chunk_result {
32 Ok(chunk) => {
33 // Check if there's thinking content
34 let thoughts = chunk.thoughts();
35 if !thoughts.is_empty() && !thoughts_shown {
36 println!("\nThinking process:");
37 for (i, thought) in thoughts.iter().enumerate() {
38 println!("Thought {}: {}", i + 1, thought);
39 }
40 println!("\nAnswer:");
41 thoughts_shown = true;
42 }
43
44 // Display general text content
45 print!("{}", chunk.text());
46 std::io::Write::flush(&mut std::io::stdout())?;
47 }
48 Err(e) => eprintln!("Streaming error: {}", e),
49 }
50 }
51 println!("\n");
52
53 // Example 2: Thinking combined with function calls
54 println!("--- Example 2: Thinking combined with function calls ---");
55
56 // Define a calculator function
57 let calculator = FunctionDeclaration::new(
58 "calculate",
59 "Perform basic mathematical calculations",
60 FunctionParameters::object()
61 .with_property(
62 "expression",
63 PropertyDetails::string(
64 "The mathematical expression to calculate, e.g., '2 + 3 * 4'",
65 ),
66 true,
67 )
68 .with_property(
69 "operation_type",
70 PropertyDetails::enum_type("Type of calculation", ["arithmetic", "advanced"]),
71 false,
72 ),
73 );
74
75 let response = client
76 .generate_content()
77 .with_system_prompt("You are a mathematics assistant. When calculations are needed, use the provided calculator function.")
78 .with_user_message("Calculate the result of (15 + 25) * 3 - 8 and explain the calculation steps.")
79 .with_function(calculator)
80 .with_thinking_budget(1024)
81 .with_thoughts_included(true)
82 .execute()
83 .await?;
84
85 // Display thinking process
86 let thoughts = response.thoughts();
87 if !thoughts.is_empty() {
88 println!("Thinking process:");
89 for (i, thought) in thoughts.iter().enumerate() {
90 println!("Thought {}: {}\n", i + 1, thought);
91 }
92 }
93
94 // Check for function calls
95 let function_calls = response.function_calls();
96 if !function_calls.is_empty() {
97 println!("Function calls:");
98 for (i, call) in function_calls.iter().enumerate() {
99 println!("Call {}: {} Args: {}", i + 1, call.name, call.args);
100 }
101 println!();
102 }
103
104 println!("Answer: {}\n", response.text());
105
106 // Example 3: Complex reasoning task
107 println!("--- Example 3: Complex reasoning task ---");
108 let complex_response = client
109 .generate_content()
110 .with_system_prompt("You are a logical reasoning expert.")
111 .with_user_message(
112 "There are three people: Alice, Bob, and Carol, who live in red, green, and blue houses respectively.\
113 Given:\
114 1. The person in the red house owns a cat\
115 2. Bob does not live in the green house\
116 3. Carol owns a dog\
117 4. The green house is to the left of the red house\
118 5. Alice does not own a cat\
119 Please reason out which color house each person lives in and what pets they own.",
120 )
121 .with_thinking_config(
122 ThinkingConfig::new()
123 .with_thinking_budget(3072)
124 .with_thoughts_included(true),
125 )
126 .execute()
127 .await?;
128
129 // Display thinking process
130 let complex_thoughts = complex_response.thoughts();
131 if !complex_thoughts.is_empty() {
132 println!("Reasoning process:");
133 for (i, thought) in complex_thoughts.iter().enumerate() {
134 println!("Reasoning step {}: {}\n", i + 1, thought);
135 }
136 }
137
138 println!("Conclusion: {}\n", complex_response.text());
139
140 // Display token usage statistics
141 if let Some(usage) = &complex_response.usage_metadata {
142 println!("Token usage statistics:");
143 println!(" Prompt tokens: {}", usage.prompt_token_count);
144 println!(
145 " Response tokens: {}",
146 usage.candidates_token_count.unwrap_or(0)
147 );
148 if let Some(thinking_tokens) = usage.thoughts_token_count {
149 println!(" Thinking tokens: {}", thinking_tokens);
150 }
151 println!(" Total tokens: {}", usage.total_token_count);
152 }
153
154 Ok(())
155}
Sourcepub fn with_base_url(api_key: impl Into<String>, base_url: String) -> Self
pub fn with_base_url(api_key: impl Into<String>, base_url: String) -> Self
Create a new client with custom base URL
Sourcepub fn with_model_and_base_url(
api_key: impl Into<String>,
model: String,
base_url: String,
) -> Self
pub fn with_model_and_base_url( api_key: impl Into<String>, model: String, base_url: String, ) -> Self
Create a new client with the specified API key, model, and base URL
Examples found in repository?
examples/custom_base_url.rs (lines 10-14)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
Sourcepub fn generate_content(&self) -> ContentBuilder
pub fn generate_content(&self) -> ContentBuilder
Start building a content generation request
Examples found in repository?
examples/test_api.rs (line 15)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY")?;
7
8 // Create client with the default model (gemini-2.0-flash)
9 let client = Gemini::new(api_key);
10
11 println!("Sending request to Gemini API...");
12
13 // Simple text completion with minimal content
14 let response = client
15 .generate_content()
16 .with_user_message("Say hello")
17 .execute()
18 .await?;
19
20 println!("Response: {}", response.text());
21
22 Ok(())
23}
More examples
examples/google_search.rs (line 19)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 println!("--- Google Search tool example ---");
13
14 // Create a Google Search tool
15 let google_search_tool = Tool::google_search();
16
17 // Create a request with Google Search tool
18 let response = client
19 .generate_content()
20 .with_user_message("What is the current Google stock price?")
21 .with_tool(google_search_tool)
22 .execute()
23 .await?;
24
25 println!("Response: {}", response.text());
26
27 Ok(())
28}
examples/custom_base_url.rs (line 17)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
examples/gemini_pro_example.rs (line 35)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Replace with your actual API key
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create a Gemini client
11 let gemini = Gemini::pro(api_key);
12
13 // This example matches the exact curl request format:
14 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
15 // -H 'Content-Type: application/json' \
16 // -d '{
17 // "system_instruction": {
18 // "parts": [
19 // {
20 // "text": "You are a cat. Your name is Neko."
21 // }
22 // ]
23 // },
24 // "contents": [
25 // {
26 // "parts": [
27 // {
28 // "text": "Hello there"
29 // }
30 // ]
31 // }
32 // ]
33 // }'
34 let response = gemini
35 .generate_content()
36 .with_system_instruction("You are a cat. Your name is Neko.")
37 .with_user_message("Hello there")
38 .execute()
39 .await?;
40
41 // Print the response
42 println!("Response: {}", response.text());
43
44 Ok(())
45}
examples/blob.rs (line 34)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
examples/curl_google_search.rs (line 48)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 println!("--- Curl equivalent with Google Search tool ---");
10
11 // This is equivalent to the curl example:
12 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
13 // -H "Content-Type: application/json" \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {"text": "What is the current Google stock price?"}
19 // ]
20 // }
21 // ],
22 // "tools": [
23 // {
24 // "google_search": {}
25 // }
26 // ]
27 // }'
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 // Create a content part that matches the JSON in the curl example
33 let text_part = Part::Text {
34 text: "What is the current Google stock price?".to_string(),
35 thought: None,
36 };
37
38 let content = Content {
39 parts: vec![text_part],
40 role: None,
41 };
42
43 // Create a Google Search tool
44 let google_search_tool = Tool::google_search();
45
46 // Add the content and tool directly to the request
47 // This exactly mirrors the JSON structure in the curl example
48 let mut content_builder = client.generate_content();
49 content_builder.contents.push(content);
50 content_builder = content_builder.with_tool(google_search_tool);
51
52 let response = content_builder.execute().await?;
53
54 println!("Response: {}", response.text());
55
56 Ok(())
57}
Additional examples can be found in:
Sourcepub fn embed_content(&self) -> EmbedBuilder
pub fn embed_content(&self) -> EmbedBuilder
Start building a content generation request
Examples found in repository?
examples/embedding.rs (line 14)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_text("Hello")
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute()
18 .await?;
19
20 println!("Response: {:?}", response.embedding.values);
21
22 Ok(())
23}
More examples
examples/batch_embedding.rs (line 14)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending batch embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_chunks(vec!["Hello", "World", "Test embedding 3"])
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute_batch()
18 .await?;
19
20 println!("Response: ");
21 for (i, e) in response.embeddings.iter().enumerate() {
22 println!("|{}|: {:?}\n", i, e.values);
23 }
24
25 Ok(())
26}
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Gemini
impl !RefUnwindSafe for Gemini
impl Send for Gemini
impl Sync for Gemini
impl Unpin for Gemini
impl !UnwindSafe for Gemini
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more