pub struct Gemini { /* private fields */ }
Expand description
Client for the Gemini API
Implementations§
Source§impl Gemini
impl Gemini
Sourcepub fn new(api_key: impl Into<String>) -> Self
pub fn new(api_key: impl Into<String>) -> Self
Create a new client with the specified API key
Examples found in repository?
examples/test_api.rs (line 9)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY")?;
7
8 // Create client with the default model (gemini-2.0-flash)
9 let client = Gemini::new(api_key);
10
11 println!("Sending request to Gemini API...");
12
13 // Simple text completion with minimal content
14 let response = client
15 .generate_content()
16 .with_user_message("Say hello")
17 .execute()
18 .await?;
19
20 println!("Response: {}", response.text());
21
22 Ok(())
23}
More examples
examples/google_search.rs (line 10)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 println!("--- Google Search tool example ---");
13
14 // Create a Google Search tool
15 let google_search_tool = Tool::google_search();
16
17 // Create a request with Google Search tool
18 let response = client
19 .generate_content()
20 .with_user_message("What is the current Google stock price?")
21 .with_tool(google_search_tool)
22 .execute()
23 .await?;
24
25 println!("Response: {}", response.text());
26
27 Ok(())
28}
examples/batch_list.rs (line 21)
16async fn main() -> Result<(), Box<dyn std::error::Error>> {
17 // Get the API key from the environment
18 let api_key = std::env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
19
20 // Create a new Gemini client
21 let gemini = Gemini::new(api_key);
22
23 println!("Listing all batch operations...");
24
25 // List all batch operations using the stream
26 let stream = gemini.list_batches(5); // page_size of 5
27 tokio::pin!(stream);
28
29 while let Some(result) = stream.next().await {
30 match result {
31 Ok(operation) => {
32 println!(
33 " - Batch: {}, State: {:?}, Created: {}",
34 operation.name, operation.metadata.state, operation.metadata.create_time
35 );
36 }
37 Err(e) => {
38 eprintln!("Error fetching batch operation: {}", e);
39 }
40 }
41 }
42
43 println!("\nFinished listing operations.");
44
45 Ok(())
46}
examples/blob.rs (line 30)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
examples/batch_delete.rs (line 31)
23async fn main() -> Result<(), Box<dyn std::error::Error>> {
24 // Get the API key from the environment
25 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
26
27 // Get the batch name from the environment
28 let batch_name = env::var("BATCH_NAME").expect("BATCH_NAME not set");
29
30 // Create a new Gemini client
31 let gemini = Gemini::new(api_key);
32
33 // Get the batch operation
34 let batch = gemini.get_batch(&batch_name);
35
36 // Check the batch status
37 match batch.status().await {
38 Ok(status) => {
39 println!("Batch status: {:?}", status);
40
41 // Only delete completed batches (succeeded, failed, cancelled, or expired)
42 match status {
43 BatchStatus::Succeeded { .. } | BatchStatus::Cancelled | BatchStatus::Expired => {
44 println!("Deleting batch operation...");
45 // We need to handle the std::result::Result<(), (Batch, Error)> return type
46 match batch.delete().await {
47 Ok(()) => println!("Batch deleted successfully!"),
48 Err((_batch, e)) => {
49 println!("Failed to delete batch: {}. You can retry with the returned batch.", e);
50 // Here you could retry: batch.delete().await, etc.
51 }
52 }
53 }
54 _ => {
55 println!("Batch is still running or pending. Use cancel() to stop it, or wait for completion before deleting.");
56 }
57 }
58 }
59 Err(e) => println!("Failed to get batch status: {}", e),
60 }
61
62 Ok(())
63}
examples/curl_google_search.rs (line 30)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 println!("--- Curl equivalent with Google Search tool ---");
10
11 // This is equivalent to the curl example:
12 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
13 // -H "Content-Type: application/json" \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {"text": "What is the current Google stock price?"}
19 // ]
20 // }
21 // ],
22 // "tools": [
23 // {
24 // "google_search": {}
25 // }
26 // ]
27 // }'
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 // Create a content part that matches the JSON in the curl example
33 let text_part = Part::Text {
34 text: "What is the current Google stock price?".to_string(),
35 thought: None,
36 };
37
38 let content = Content {
39 parts: vec![text_part].into(),
40 role: None,
41 };
42
43 // Create a Google Search tool
44 let google_search_tool = Tool::google_search();
45
46 // Add the content and tool directly to the request
47 // This exactly mirrors the JSON structure in the curl example
48 let mut content_builder = client.generate_content();
49 content_builder.contents.push(content);
50 content_builder = content_builder.with_tool(google_search_tool);
51
52 let response = content_builder.execute().await?;
53
54 println!("Response: {}", response.text());
55
56 Ok(())
57}
Sourcepub fn pro(api_key: impl Into<String>) -> Self
pub fn pro(api_key: impl Into<String>) -> Self
Create a new client for the Gemini Pro model
Examples found in repository?
examples/gemini_pro_example.rs (line 11)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Replace with your actual API key
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create a Gemini client
11 let gemini = Gemini::pro(api_key);
12
13 // This example matches the exact curl request format:
14 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
15 // -H 'Content-Type: application/json' \
16 // -d '{
17 // "system_instruction": {
18 // "parts": [
19 // {
20 // "text": "You are a cat. Your name is Neko."
21 // }
22 // ]
23 // },
24 // "contents": [
25 // {
26 // "parts": [
27 // {
28 // "text": "Hello there"
29 // }
30 // ]
31 // }
32 // ]
33 // }'
34 let response = gemini
35 .generate_content()
36 .with_system_instruction("You are a cat. Your name is Neko.")
37 .with_user_message("Hello there")
38 .execute()
39 .await?;
40
41 // Print the response
42 println!("Response: {}", response.text());
43
44 Ok(())
45}
More examples
examples/mp4_describe.rs (line 20)
11async fn main() -> Result<(), Box<dyn std::error::Error>> {
12 // Read mp4 video file
13 let mut file = File::open("examples/sample.mp4")?;
14 let mut buffer = Vec::new();
15 file.read_to_end(&mut buffer)?;
16 let b64 = general_purpose::STANDARD.encode(&buffer);
17
18 // Get API key
19 let api_key = env::var("GEMINI_API_KEY")?;
20 let gemini = Gemini::pro(api_key);
21
22 // Example 1: Add mp4 blob using Message struct
23 let video_content = Content::inline_data("video/mp4", b64.clone());
24 let response1 = gemini
25 .generate_content()
26 .with_user_message("Please describe the content of this video (Message example)")
27 .with_message(gemini_rust::Message {
28 content: video_content,
29 role: gemini_rust::Role::User,
30 })
31 .execute()
32 .await?;
33
34 println!("AI description (Message): {}", response1.text());
35
36 // Example 2: Add mp4 blob directly using builder's with_inline_data
37 let response2 = gemini
38 .generate_content()
39 .with_user_message("Please describe the content of this video (with_inline_data example)")
40 .with_inline_data(b64, "video/mp4")
41 .execute()
42 .await?;
43
44 println!("AI description (with_inline_data): {}", response2.text());
45 Ok(())
46}
Sourcepub fn with_model(api_key: impl Into<String>, model: String) -> Self
pub fn with_model(api_key: impl Into<String>, model: String) -> Self
Create a new client with the specified API key and model
Examples found in repository?
examples/embedding.rs (line 8)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_text("Hello")
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute()
18 .await?;
19
20 println!("Response: {:?}", response.embedding.values);
21
22 Ok(())
23}
More examples
examples/batch_embedding.rs (line 8)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending batch embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_chunks(vec!["Hello", "World", "Test embedding 3"])
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute_batch()
18 .await?;
19
20 println!("Response: ");
21 for (i, e) in response.embeddings.iter().enumerate() {
22 println!("|{}|: {:?}\n", i, e.values);
23 }
24
25 Ok(())
26}
examples/simple_image_generation.rs (line 15)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client with the image generation model
14 // Use Gemini 2.5 Flash Image Preview for image generation
15 let client = Gemini::with_model(api_key, "models/gemini-2.5-flash-image-preview".to_string());
16
17 println!("🎨 Generating image with Gemini...");
18
19 // Generate an image from text description
20 let response = client
21 .generate_content()
22 .with_user_message(
23 "Create a photorealistic image of a cute robot sitting in a garden, \
24 surrounded by colorful flowers. The robot should have a friendly \
25 expression and be made of polished metal. The lighting should be \
26 soft and natural, as if taken during golden hour.",
27 )
28 .with_generation_config(GenerationConfig {
29 temperature: Some(0.8),
30 max_output_tokens: Some(8192),
31 ..Default::default()
32 })
33 .execute()
34 .await?;
35
36 // Process the response
37 let mut images_saved = 0;
38 for candidate in response.candidates.iter() {
39 if let Some(parts) = &candidate.content.parts {
40 for part in parts.iter() {
41 match part {
42 gemini_rust::Part::Text { text, .. } => {
43 println!("📝 Model response: {}", text);
44 }
45 gemini_rust::Part::InlineData { inline_data } => {
46 println!("🖼️ Image generated!");
47 println!(" MIME type: {}", inline_data.mime_type);
48
49 // Decode and save the image
50 match BASE64.decode(&inline_data.data) {
51 Ok(image_bytes) => {
52 images_saved += 1;
53 let filename = format!("robot_garden_{}.png", images_saved);
54 fs::write(&filename, image_bytes)?;
55 println!("✅ Image saved as: {}", filename);
56 }
57 Err(e) => {
58 println!("❌ Failed to decode image: {}", e);
59 }
60 }
61 }
62 _ => {
63 println!("🔍 Other content type in response");
64 }
65 }
66 }
67 }
68 }
69
70 if images_saved == 0 {
71 println!("⚠️ No images were generated. This might be due to:");
72 println!(" - Content policy restrictions");
73 println!(" - API limitations");
74 println!(" - Model configuration issues");
75 } else {
76 println!("🎉 Successfully generated {} image(s)!", images_saved);
77 }
78
79 Ok(())
80}
examples/thinking_basic.rs (line 10)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::with_model(api_key, "models/gemini-2.5-pro".to_string());
11
12 println!("=== Gemini 2.5 Thinking Basic Example ===\n");
13
14 // Example 1: Using default dynamic thinking
15 println!(
16 "--- Example 1: Dynamic thinking (model automatically determines thinking budget) ---"
17 );
18 let response1 = client
19 .generate_content()
20 .with_system_prompt("You are a helpful mathematics assistant.")
21 .with_user_message(
22 "Explain Occam's razor principle and provide a simple example from daily life.",
23 )
24 .with_dynamic_thinking()
25 .with_thoughts_included(true)
26 .execute()
27 .await?;
28
29 // Display thinking process
30 let thoughts = response1.thoughts();
31 if !thoughts.is_empty() {
32 println!("Thinking summary:");
33 for (i, thought) in thoughts.iter().enumerate() {
34 println!("Thought {}: {}\n", i + 1, thought);
35 }
36 }
37
38 println!("Answer: {}\n", response1.text());
39
40 // Display token usage
41 if let Some(usage) = &response1.usage_metadata {
42 println!("Token usage:");
43 println!(" Prompt tokens: {}", usage.prompt_token_count);
44 println!(
45 " Response tokens: {}",
46 usage.candidates_token_count.unwrap_or(0)
47 );
48 if let Some(thinking_tokens) = usage.thoughts_token_count {
49 println!(" Thinking tokens: {}", thinking_tokens);
50 }
51 println!(" Total tokens: {}\n", usage.total_token_count);
52 }
53
54 // Example 2: Set specific thinking budget
55 println!("--- Example 2: Set thinking budget (1024 tokens) ---");
56 let response2 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful programming assistant.")
59 .with_user_message("List 3 main advantages of using the Rust programming language")
60 .with_thinking_budget(1024)
61 .with_thoughts_included(true)
62 .execute()
63 .await?;
64
65 // Display thinking process
66 let thoughts2 = response2.thoughts();
67 if !thoughts2.is_empty() {
68 println!("Thinking summary:");
69 for (i, thought) in thoughts2.iter().enumerate() {
70 println!("Thought {}: {}\n", i + 1, thought);
71 }
72 }
73
74 println!("Answer: {}\n", response2.text());
75
76 // Example 3: Disable thinking feature
77 println!("--- Example 3: Disable thinking feature ---");
78 let response3 = client
79 .generate_content()
80 .with_system_prompt("You are a helpful assistant.")
81 .with_user_message("What is artificial intelligence?")
82 .execute()
83 .await?;
84
85 println!("Answer: {}\n", response3.text());
86
87 // Example 4: Use GenerationConfig to set thinking
88 println!("--- Example 4: Use GenerationConfig to set thinking ---");
89 let thinking_config = ThinkingConfig::new()
90 .with_thinking_budget(2048)
91 .with_thoughts_included(true);
92
93 let generation_config = GenerationConfig {
94 temperature: Some(0.7),
95 max_output_tokens: Some(500),
96 thinking_config: Some(thinking_config),
97 ..Default::default()
98 };
99
100 let response4 = client
101 .generate_content()
102 .with_system_prompt("You are a creative writing assistant.")
103 .with_user_message(
104 "Write the opening of a short story about a robot learning to feel emotions.",
105 )
106 .with_generation_config(generation_config)
107 .execute()
108 .await?;
109
110 // Display thinking process
111 let thoughts4 = response4.thoughts();
112 if !thoughts4.is_empty() {
113 println!("Thinking summary:");
114 for (i, thought) in thoughts4.iter().enumerate() {
115 println!("Thought {}: {}\n", i + 1, thought);
116 }
117 }
118
119 println!("Answer: {}\n", response4.text());
120
121 Ok(())
122}
examples/image_editing.rs (line 14)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Create client with the image generation model
14 let client = Gemini::with_model(api_key, "models/gemini-2.5-flash-image-preview".to_string());
15
16 println!("🎨 Image Editing with Gemini");
17 println!("This example shows how to edit images using text descriptions.");
18 println!();
19
20 // First, let's generate a base image to edit
21 println!("📸 Step 1: Generating a base image...");
22 let base_response = client
23 .generate_content()
24 .with_user_message(
25 "Create a simple landscape image with a blue sky, green grass, \
26 and a single white house in the center. The style should be \
27 clean and minimalist.",
28 )
29 .execute()
30 .await?;
31
32 // Save the base image
33 let mut base_image_data = None;
34 for candidate in base_response.candidates.iter() {
35 if let Some(parts) = &candidate.content.parts {
36 for part in parts.iter() {
37 if let gemini_rust::Part::InlineData { inline_data } = part {
38 base_image_data = Some(inline_data.data.clone());
39 let image_bytes = BASE64.decode(&inline_data.data)?;
40 fs::write("base_landscape.png", image_bytes)?;
41 println!("✅ Base image saved as: base_landscape.png");
42 break;
43 }
44 }
45 }
46 }
47
48 let base_data = match base_image_data {
49 Some(data) => data,
50 None => {
51 println!("❌ Failed to generate base image");
52 return Ok(());
53 }
54 };
55
56 println!();
57 println!("🖌️ Step 2: Editing the image...");
58
59 // Example 1: Add elements to the image
60 println!(" Adding a red barn to the scene...");
61 let edit_response1 = client
62 .generate_content()
63 .with_user_message(
64 "Add a red barn to the left side of this landscape image. \
65 The barn should fit naturally into the scene and match \
66 the minimalist style. Keep everything else exactly the same.",
67 )
68 .with_inline_data(&base_data, "image/png")
69 .execute()
70 .await?;
71
72 save_generated_images(&edit_response1, "landscape_with_barn")?;
73
74 // Example 2: Change the weather/atmosphere
75 println!(" Changing the scene to sunset...");
76 let edit_response2 = client
77 .generate_content()
78 .with_user_message(
79 "Transform this landscape into a beautiful sunset scene. \
80 Change the sky to warm orange and pink colors, add a \
81 setting sun, and adjust the lighting to match golden hour. \
82 Keep the house and grass but make them glow with sunset light.",
83 )
84 .with_inline_data(&base_data, "image/png")
85 .execute()
86 .await?;
87
88 save_generated_images(&edit_response2, "sunset_landscape")?;
89
90 // Example 3: Style transfer
91 println!(" Converting to watercolor style...");
92 let edit_response3 = client
93 .generate_content()
94 .with_user_message(
95 "Transform this landscape into a watercolor painting style. \
96 Preserve the composition but render it with soft, flowing \
97 watercolor brushstrokes, gentle color bleeding, and the \
98 characteristic transparency of watercolor art.",
99 )
100 .with_inline_data(&base_data, "image/png")
101 .execute()
102 .await?;
103
104 save_generated_images(&edit_response3, "watercolor_landscape")?;
105
106 println!();
107 println!("🎉 Image editing examples completed!");
108 println!("Check the generated files:");
109 println!(" - base_landscape.png (original)");
110 println!(" - landscape_with_barn_*.png (with added barn)");
111 println!(" - sunset_landscape_*.png (sunset version)");
112 println!(" - watercolor_landscape_*.png (watercolor style)");
113
114 Ok(())
115}
examples/simple_speech_generation.rs (line 13)
7async fn main() -> Result<(), Box<dyn std::error::Error>> {
8 // Load API key from environment variable
9 let api_key =
10 std::env::var("GEMINI_API_KEY").expect("Please set GEMINI_API_KEY environment variable");
11
12 // Create client with TTS-enabled model
13 let client = Gemini::with_model(api_key, "models/gemini-2.5-flash-preview-tts".to_string());
14
15 println!("🎤 Gemini Speech Generation Example");
16 println!("Generating audio from text...\n");
17
18 // Create generation config with speech settings
19 let generation_config = GenerationConfig {
20 response_modalities: Some(vec!["AUDIO".to_string()]),
21 speech_config: Some(SpeechConfig {
22 voice_config: Some(VoiceConfig {
23 prebuilt_voice_config: Some(PrebuiltVoiceConfig {
24 voice_name: "Puck".to_string(),
25 }),
26 }),
27 multi_speaker_voice_config: None,
28 }),
29 ..Default::default()
30 };
31
32 match client
33 .generate_content()
34 .with_user_message("Hello! This is a demonstration of text-to-speech using Google's Gemini API. The voice you're hearing is generated entirely by AI.")
35 .with_generation_config(generation_config)
36 .execute()
37 .await {
38 Ok(response) => {
39 println!("✅ Speech generation completed!");
40
41 // Check if we have candidates
42 for (i, candidate) in response.candidates.iter().enumerate() {
43 if let Some(parts) = &candidate.content.parts {
44 for (j, part) in parts.iter().enumerate() {
45 match part {
46 // Look for inline data with audio MIME type
47 Part::InlineData { inline_data } => {
48 if inline_data.mime_type.starts_with("audio/") {
49 println!("📄 Found audio data: {}", inline_data.mime_type);
50
51 // Decode base64 audio data using the new API
52 match general_purpose::STANDARD.decode(&inline_data.data) {
53 Ok(audio_bytes) => {
54 let filename = format!("speech_output_{}_{}.pcm", i, j);
55
56 // Save audio to file
57 match File::create(&filename) {
58 Ok(mut file) => {
59 if let Err(e) = file.write_all(&audio_bytes) {
60 eprintln!("❌ Error writing audio file: {}", e);
61 } else {
62 println!("💾 Audio saved as: {}", filename);
63 println!("🔊 You can play it with: aplay {} (Linux) or afplay {} (macOS)", filename, filename);
64 }
65 },
66 Err(e) => eprintln!("❌ Error creating audio file: {}", e),
67 }
68 },
69 Err(e) => eprintln!("❌ Error decoding base64 audio: {}", e),
70 }
71 }
72 },
73 // Display any text content
74 Part::Text { text, thought } => {
75 if thought.unwrap_or(false) {
76 println!("💭 Thought: {}", text);
77 } else {
78 println!("📝 Text content: {}", text);
79 }
80 },
81 _ => {
82 // Handle other part types if needed
83 }
84 }
85 }
86 }
87 }
88
89 // Display usage metadata if available
90 if let Some(usage_metadata) = &response.usage_metadata {
91 println!("\n📊 Usage Statistics:");
92 println!(" Prompt tokens: {}", usage_metadata.prompt_token_count);
93 println!(" Total tokens: {}", usage_metadata.total_token_count);
94 }
95 },
96 Err(e) => {
97 eprintln!("❌ Error generating speech: {}", e);
98 eprintln!("\n💡 Troubleshooting tips:");
99 eprintln!(" 1. Make sure GEMINI_API_KEY environment variable is set");
100 eprintln!(" 2. Verify you have access to the Gemini TTS model");
101 eprintln!(" 3. Check your internet connection");
102 eprintln!(" 4. Ensure the model 'gemini-2.5-flash-preview-tts' is available");
103 }
104 }
105
106 Ok(())
107}
Additional examples can be found in:
Sourcepub fn with_base_url(api_key: impl Into<String>, base_url: String) -> Self
pub fn with_base_url(api_key: impl Into<String>, base_url: String) -> Self
Create a new client with custom base URL
Sourcepub fn with_model_and_base_url(
api_key: impl Into<String>,
model: String,
base_url: String,
) -> Self
pub fn with_model_and_base_url( api_key: impl Into<String>, model: String, base_url: String, ) -> Self
Create a new client with the specified API key, model, and base URL
Examples found in repository?
examples/custom_base_url.rs (lines 10-14)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
Sourcepub fn generate_content(&self) -> ContentBuilder
pub fn generate_content(&self) -> ContentBuilder
Start building a content generation request
Examples found in repository?
examples/test_api.rs (line 15)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY")?;
7
8 // Create client with the default model (gemini-2.0-flash)
9 let client = Gemini::new(api_key);
10
11 println!("Sending request to Gemini API...");
12
13 // Simple text completion with minimal content
14 let response = client
15 .generate_content()
16 .with_user_message("Say hello")
17 .execute()
18 .await?;
19
20 println!("Response: {}", response.text());
21
22 Ok(())
23}
More examples
examples/google_search.rs (line 19)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 // Create client
10 let client = Gemini::new(api_key);
11
12 println!("--- Google Search tool example ---");
13
14 // Create a Google Search tool
15 let google_search_tool = Tool::google_search();
16
17 // Create a request with Google Search tool
18 let response = client
19 .generate_content()
20 .with_user_message("What is the current Google stock price?")
21 .with_tool(google_search_tool)
22 .execute()
23 .await?;
24
25 println!("Response: {}", response.text());
26
27 Ok(())
28}
examples/custom_base_url.rs (line 17)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8 // Using custom base URL
9 let custom_base_url = "https://generativelanguage.googleapis.com/v1beta/";
10 let client_custom = Gemini::with_model_and_base_url(
11 api_key,
12 "models/gemini-2.5-flash-lite-preview-06-17".to_string(),
13 custom_base_url.to_string(),
14 );
15 println!("Custom base URL client created successfully");
16 let response = client_custom
17 .generate_content()
18 .with_system_prompt("You are a helpful assistant.")
19 .with_user_message("Hello, can you tell me a joke about programming?")
20 .with_generation_config(GenerationConfig {
21 temperature: Some(0.7),
22 max_output_tokens: Some(100),
23 ..Default::default()
24 })
25 .execute()
26 .await?;
27
28 println!("Response: {}", response.text());
29
30 Ok(())
31}
examples/gemini_pro_example.rs (line 35)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Replace with your actual API key
8 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
9
10 // Create a Gemini client
11 let gemini = Gemini::pro(api_key);
12
13 // This example matches the exact curl request format:
14 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
15 // -H 'Content-Type: application/json' \
16 // -d '{
17 // "system_instruction": {
18 // "parts": [
19 // {
20 // "text": "You are a cat. Your name is Neko."
21 // }
22 // ]
23 // },
24 // "contents": [
25 // {
26 // "parts": [
27 // {
28 // "text": "Hello there"
29 // }
30 // ]
31 // }
32 // ]
33 // }'
34 let response = gemini
35 .generate_content()
36 .with_system_instruction("You are a cat. Your name is Neko.")
37 .with_user_message("Hello there")
38 .execute()
39 .await?;
40
41 // Print the response
42 println!("Response: {}", response.text());
43
44 Ok(())
45}
examples/blob.rs (line 34)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 // Get API key from environment variable
11 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
12
13 // Image file path (in the same directory)
14 let image_path = Path::new(file!())
15 .parent()
16 .unwrap_or(Path::new("."))
17 .join("image-example.webp"); // Replace with your image filename
18
19 // Read the image file
20 let mut file = File::open(&image_path)?;
21 let mut buffer = Vec::new();
22 file.read_to_end(&mut buffer)?;
23
24 // Convert to base64
25 let data = general_purpose::STANDARD.encode(&buffer);
26
27 println!("Image loaded: {}", image_path.display());
28
29 // Create client
30 let client = Gemini::new(api_key);
31
32 println!("--- Describe Image ---");
33 let response = client
34 .generate_content()
35 .with_inline_data(data, "image/webp")
36 .with_response_mime_type("text/plain")
37 .with_generation_config(GenerationConfig {
38 temperature: Some(0.7),
39 max_output_tokens: Some(400),
40 ..Default::default()
41 })
42 .execute()
43 .await?;
44
45 println!("Response: {}", response.text());
46
47 Ok(())
48}
examples/mp4_describe.rs (line 25)
11async fn main() -> Result<(), Box<dyn std::error::Error>> {
12 // Read mp4 video file
13 let mut file = File::open("examples/sample.mp4")?;
14 let mut buffer = Vec::new();
15 file.read_to_end(&mut buffer)?;
16 let b64 = general_purpose::STANDARD.encode(&buffer);
17
18 // Get API key
19 let api_key = env::var("GEMINI_API_KEY")?;
20 let gemini = Gemini::pro(api_key);
21
22 // Example 1: Add mp4 blob using Message struct
23 let video_content = Content::inline_data("video/mp4", b64.clone());
24 let response1 = gemini
25 .generate_content()
26 .with_user_message("Please describe the content of this video (Message example)")
27 .with_message(gemini_rust::Message {
28 content: video_content,
29 role: gemini_rust::Role::User,
30 })
31 .execute()
32 .await?;
33
34 println!("AI description (Message): {}", response1.text());
35
36 // Example 2: Add mp4 blob directly using builder's with_inline_data
37 let response2 = gemini
38 .generate_content()
39 .with_user_message("Please describe the content of this video (with_inline_data example)")
40 .with_inline_data(b64, "video/mp4")
41 .execute()
42 .await?;
43
44 println!("AI description (with_inline_data): {}", response2.text());
45 Ok(())
46}
Additional examples can be found in:
- examples/curl_google_search.rs
- examples/curl_equivalent.rs
- examples/generation_config.rs
- examples/structured_response.rs
- examples/streaming.rs
- examples/batch_generate.rs
- examples/simple_image_generation.rs
- examples/advanced.rs
- examples/simple.rs
- examples/thinking_basic.rs
- examples/image_editing.rs
- examples/batch_cancel.rs
- examples/google_search_with_functions.rs
- examples/simple_speech_generation.rs
- examples/thinking_curl_equivalent.rs
- examples/image_generation.rs
- examples/thinking_advanced.rs
- examples/tools.rs
- examples/multi_speaker_tts.rs
Sourcepub fn embed_content(&self) -> EmbedBuilder
pub fn embed_content(&self) -> EmbedBuilder
Start building a content generation request
Examples found in repository?
examples/embedding.rs (line 14)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_text("Hello")
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute()
18 .await?;
19
20 println!("Response: {:?}", response.embedding.values);
21
22 Ok(())
23}
More examples
examples/batch_embedding.rs (line 14)
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 let api_key = std::env::var("GEMINI_API_KEY")?;
6
7 // Create client with the default model (gemini-2.0-flash)
8 let client = Gemini::with_model(api_key, "models/text-embedding-004".to_string());
9
10 println!("Sending batch embedding request to Gemini API...");
11
12 // Simple text embedding
13 let response = client
14 .embed_content()
15 .with_chunks(vec!["Hello", "World", "Test embedding 3"])
16 .with_task_type(TaskType::RetrievalDocument)
17 .execute_batch()
18 .await?;
19
20 println!("Response: ");
21 for (i, e) in response.embeddings.iter().enumerate() {
22 println!("|{}|: {:?}\n", i, e.values);
23 }
24
25 Ok(())
26}
Sourcepub fn batch_generate_content_sync(&self) -> BatchBuilder
pub fn batch_generate_content_sync(&self) -> BatchBuilder
Start building a synchronous batch content generation request
Examples found in repository?
examples/batch_generate.rs (line 39)
18async fn main() -> Result<(), Box<dyn std::error::Error>> {
19 // Get the API key from the environment
20 let api_key = std::env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
21
22 // Create a new Gemini client
23 let gemini = Gemini::new(api_key);
24
25 // Create the first request
26 let request1 = gemini
27 .generate_content()
28 .with_message(Message::user("What is the meaning of life?"))
29 .build();
30
31 // Create the second request
32 let request2 = gemini
33 .generate_content()
34 .with_message(Message::user("What is the best programming language?"))
35 .build();
36
37 // Create the batch request
38 let batch = gemini
39 .batch_generate_content_sync()
40 .with_request(request1)
41 .with_request(request2)
42 .execute()
43 .await?;
44
45 // Print the batch information
46 println!("Batch created successfully!");
47 println!("Batch Name: {}", batch.name());
48
49 // Wait for the batch to complete
50 println!("Waiting for batch to complete...");
51 match batch.wait_for_completion(Duration::from_secs(5)).await {
52 Ok(final_status) => {
53 // Print the final status
54 match final_status {
55 BatchStatus::Succeeded { results } => {
56 println!("Batch succeeded!");
57 for item in results {
58 match item {
59 BatchResultItem::Success { key, response } => {
60 println!("--- Response for Key {} ---", key);
61 println!("{}", response.text());
62 }
63 BatchResultItem::Error { key, error } => {
64 println!("--- Error for Key {} ---", key);
65 println!("Code: {}, Message: {}", error.code, error.message);
66 if let Some(details) = &error.details {
67 println!("Details: {}", details);
68 }
69 }
70 }
71 }
72 }
73 BatchStatus::Cancelled => {
74 println!("Batch was cancelled.");
75 }
76 BatchStatus::Expired => {
77 println!("Batch expired.");
78 }
79 _ => {
80 println!(
81 "Batch finished with an unexpected status: {:?}",
82 final_status
83 );
84 }
85 }
86 }
87 Err((_batch, e)) => {
88 println!(
89 "Batch failed: {}. You can retry with the returned batch.",
90 e
91 );
92 // Here you could retry: batch.wait_for_completion(Duration::from_secs(5)).await, etc.
93 }
94 }
95
96 Ok(())
97}
More examples
examples/batch_cancel.rs (line 24)
15async fn main() -> Result<()> {
16 // Get the API key from the environment
17 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY must be set");
18
19 // Create the Gemini client
20 let gemini = Gemini::new(api_key);
21
22 // Create a batch with multiple requests
23 let mut batch_generate_content = gemini
24 .batch_generate_content_sync()
25 .with_name("batch_cancel_example".to_string());
26
27 // Add several requests to make the batch take some time to process
28 for i in 1..=10 {
29 let request = gemini
30 .generate_content()
31 .with_message(Message::user(format!(
32 "Write a creative story about a robot learning to paint, part {}. Make it at least 100 words long.",
33 i
34 )))
35 .build();
36
37 batch_generate_content = batch_generate_content.with_request(request);
38 }
39
40 // Build and start the batch
41 let batch = batch_generate_content.execute().await?;
42 println!("Batch created successfully!");
43 println!("Batch Name: {}", batch.name());
44 println!("Press CTRL-C to cancel the batch operation...");
45
46 // Wrap the batch in an Arc<Mutex<Option<Batch>>> to allow safe sharing
47 let batch = Arc::new(Mutex::new(Some(batch)));
48 let batch_clone = Arc::clone(&batch);
49
50 // Spawn a task to handle CTRL-C
51 let cancel_task = tokio::spawn(async move {
52 // Wait for CTRL-C signal
53 signal::ctrl_c().await.expect("Failed to listen for CTRL-C");
54 println!("Received CTRL-C, canceling batch operation...");
55
56 // Take the batch from the Option, leaving None.
57 // The lock is released immediately after this block.
58 let mut batch_to_cancel = batch_clone.lock().await;
59
60 if let Some(batch) = batch_to_cancel.take() {
61 // Cancel the batch operation
62 match batch.cancel().await {
63 Ok(()) => {
64 println!("Batch canceled successfully!");
65 }
66 Err((batch, e)) => {
67 println!("Failed to cancel batch: {}. Retrying...", e);
68 // Retry once
69 match batch.cancel().await {
70 Ok(()) => {
71 println!("Batch canceled successfully on retry!");
72 }
73 Err((_, retry_error)) => {
74 eprintln!("Failed to cancel batch even on retry: {}", retry_error);
75 }
76 }
77 }
78 }
79 } else {
80 println!("Batch was already processed.");
81 }
82 });
83
84 // Wait for a short moment to ensure the cancel task is ready
85 tokio::time::sleep(Duration::from_millis(100)).await;
86
87 // Wait for the batch to complete or be canceled
88 if let Some(batch) = batch.lock().await.take() {
89 println!("Waiting for batch to complete or be canceled...");
90 match batch.wait_for_completion(Duration::from_secs(5)).await {
91 Ok(final_status) => {
92 // Cancel task is no longer needed since batch completed
93 cancel_task.abort();
94
95 println!("Batch completed with status: {:?}", final_status);
96
97 // Print some details about the results
98 match final_status {
99 gemini_rust::BatchStatus::Succeeded { .. } => {
100 println!("Batch succeeded!");
101 }
102 gemini_rust::BatchStatus::Cancelled => {
103 println!("Batch was cancelled as requested.");
104 }
105 gemini_rust::BatchStatus::Expired => {
106 println!("Batch expired.");
107 }
108 _ => {
109 println!("Batch finished with an unexpected status.");
110 }
111 }
112 }
113 Err((batch, e)) => {
114 // This could happen if there was a network error while polling
115 println!("Error while waiting for batch completion: {}", e);
116
117 // Try one more time to get the status
118 match batch.status().await {
119 Ok(status) => println!("Current batch status: {:?}", status),
120 Err(status_error) => println!("Error getting final status: {}", status_error),
121 }
122 }
123 }
124 }
125
126 Ok(())
127}
Sourcepub fn get_batch(&self, name: &str) -> Batch
pub fn get_batch(&self, name: &str) -> Batch
Get a handle to a batch operation by its name.
Examples found in repository?
examples/batch_delete.rs (line 34)
23async fn main() -> Result<(), Box<dyn std::error::Error>> {
24 // Get the API key from the environment
25 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
26
27 // Get the batch name from the environment
28 let batch_name = env::var("BATCH_NAME").expect("BATCH_NAME not set");
29
30 // Create a new Gemini client
31 let gemini = Gemini::new(api_key);
32
33 // Get the batch operation
34 let batch = gemini.get_batch(&batch_name);
35
36 // Check the batch status
37 match batch.status().await {
38 Ok(status) => {
39 println!("Batch status: {:?}", status);
40
41 // Only delete completed batches (succeeded, failed, cancelled, or expired)
42 match status {
43 BatchStatus::Succeeded { .. } | BatchStatus::Cancelled | BatchStatus::Expired => {
44 println!("Deleting batch operation...");
45 // We need to handle the std::result::Result<(), (Batch, Error)> return type
46 match batch.delete().await {
47 Ok(()) => println!("Batch deleted successfully!"),
48 Err((_batch, e)) => {
49 println!("Failed to delete batch: {}. You can retry with the returned batch.", e);
50 // Here you could retry: batch.delete().await, etc.
51 }
52 }
53 }
54 _ => {
55 println!("Batch is still running or pending. Use cancel() to stop it, or wait for completion before deleting.");
56 }
57 }
58 }
59 Err(e) => println!("Failed to get batch status: {}", e),
60 }
61
62 Ok(())
63}
Sourcepub fn list_batches(
&self,
page_size: impl Into<Option<u32>>,
) -> impl Stream<Item = Result<BatchOperation>> + Send
pub fn list_batches( &self, page_size: impl Into<Option<u32>>, ) -> impl Stream<Item = Result<BatchOperation>> + Send
Lists batch operations.
This method returns a stream that handles pagination automatically.
Examples found in repository?
examples/batch_list.rs (line 26)
16async fn main() -> Result<(), Box<dyn std::error::Error>> {
17 // Get the API key from the environment
18 let api_key = std::env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY not set");
19
20 // Create a new Gemini client
21 let gemini = Gemini::new(api_key);
22
23 println!("Listing all batch operations...");
24
25 // List all batch operations using the stream
26 let stream = gemini.list_batches(5); // page_size of 5
27 tokio::pin!(stream);
28
29 while let Some(result) = stream.next().await {
30 match result {
31 Ok(operation) => {
32 println!(
33 " - Batch: {}, State: {:?}, Created: {}",
34 operation.name, operation.metadata.state, operation.metadata.create_time
35 );
36 }
37 Err(e) => {
38 eprintln!("Error fetching batch operation: {}", e);
39 }
40 }
41 }
42
43 println!("\nFinished listing operations.");
44
45 Ok(())
46}
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Gemini
impl !RefUnwindSafe for Gemini
impl Send for Gemini
impl Sync for Gemini
impl Unpin for Gemini
impl !UnwindSafe for Gemini
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more