pub struct GenerationResponse {
pub candidates: Vec<Candidate>,
pub prompt_feedback: Option<PromptFeedback>,
pub usage_metadata: Option<UsageMetadata>,
}
Expand description
Response from the Gemini API for content generation
Fields§
§candidates: Vec<Candidate>
The candidates generated
prompt_feedback: Option<PromptFeedback>
The prompt feedback
usage_metadata: Option<UsageMetadata>
Usage metadata
Implementations§
Source§impl GenerationResponse
impl GenerationResponse
Sourcepub fn text(&self) -> String
pub fn text(&self) -> String
Get the text of the first candidate
Examples found in repository?
examples/test_api.rs (line 20)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY")?;
7
8 // Create client with the default model (gemini-2.0-flash)
9 let client = Gemini::new(api_key);
10
11 println!("Sending request to Gemini API...");
12
13 // Simple text completion with minimal content
14 let response = client
15 .generate_content()
16 .with_user_message("Say hello")
17 .execute()
18 .await?;
19
20 println!("Response: {}", response.text());
21
22 Ok(())
23}
More examples
examples/curl_equivalent.rs (line 38)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY")
8 .expect("GEMINI_API_KEY environment variable not set");
9
10 // This is equivalent to the curl example:
11 // curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$YOUR_API_KEY" \
12 // -H 'Content-Type: application/json' \
13 // -X POST \
14 // -d '{
15 // "contents": [
16 // {
17 // "parts": [
18 // {
19 // "text": "Explain how AI works in a few words"
20 // }
21 // ]
22 // }
23 // ]
24 // }'
25
26 // Create client - now using gemini-2.0-flash by default
27 let client = Gemini::new(api_key);
28
29 // Method 1: Using the high-level API (simplest approach)
30 println!("--- Method 1: Using the high-level API ---");
31
32 let response = client
33 .generate_content()
34 .with_user_message("Explain how AI works in a few words")
35 .execute()
36 .await?;
37
38 println!("Response: {}", response.text());
39
40 // Method 2: Using Content directly to match the curl example exactly
41 println!("\n--- Method 2: Matching curl example structure exactly ---");
42
43 // Create a content part that matches the JSON in the curl example
44 let text_part = Part::Text {
45 text: "Explain how AI works in a few words".to_string()
46 };
47
48 let content = Content {
49 parts: vec![text_part],
50 role: None,
51 };
52
53 // Add the content directly to the request
54 // This exactly mirrors the JSON structure in the curl example
55 let mut content_builder = client.generate_content();
56 content_builder.contents.push(content);
57 let response = content_builder.execute().await?;
58
59 println!("Response: {}", response.text());
60
61 Ok(())
62}
examples/generation_config.rs (line 34)
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 // Get API key from environment variable
7 let api_key = env::var("GEMINI_API_KEY")
8 .expect("GEMINI_API_KEY environment variable not set");
9
10 // Create client
11 let client = Gemini::new(api_key);
12
13 // Using the full generation config
14 println!("--- Using full generation config ---");
15 let response1 = client
16 .generate_content()
17 .with_system_prompt("You are a helpful assistant.")
18 .with_user_message("Write a short poem about Rust programming language.")
19 .with_generation_config(
20 GenerationConfig {
21 temperature: Some(0.9),
22 top_p: Some(0.8),
23 top_k: Some(20),
24 max_output_tokens: Some(200),
25 candidate_count: Some(1),
26 stop_sequences: Some(vec!["END".to_string()]),
27 response_mime_type: None,
28 response_schema: None,
29 }
30 )
31 .execute()
32 .await?;
33
34 println!("Response with high temperature (0.9):\n{}\n", response1.text());
35
36 // Using individual generation parameters
37 println!("--- Using individual generation parameters ---");
38 let response2 = client
39 .generate_content()
40 .with_system_prompt("You are a helpful assistant.")
41 .with_user_message("Write a short poem about Rust programming language.")
42 .with_temperature(0.2)
43 .with_max_output_tokens(100)
44 .execute()
45 .await?;
46
47 println!("Response with low temperature (0.2):\n{}\n", response2.text());
48
49 // Setting multiple parameters individually
50 println!("--- Setting multiple parameters individually ---");
51 let response3 = client
52 .generate_content()
53 .with_system_prompt("You are a helpful assistant.")
54 .with_user_message("List 3 benefits of using Rust.")
55 .with_temperature(0.7)
56 .with_top_p(0.9)
57 .with_max_output_tokens(150)
58 .with_stop_sequences(vec!["4.".to_string()])
59 .execute()
60 .await?;
61
62 println!("Response with custom parameters and stop sequence:\n{}", response3.text());
63
64 Ok(())
65}
examples/structured_response.rs (line 58)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Get API key from environment variable
8 let api_key = env::var("GEMINI_API_KEY")
9 .expect("GEMINI_API_KEY environment variable not set");
10
11 // Create client
12 let client = Gemini::new(api_key);
13
14 // Using response_schema for structured output
15 println!("--- Structured Response Example ---");
16
17 // Define a JSON schema for the response
18 let schema = json!({
19 "type": "object",
20 "properties": {
21 "name": {
22 "type": "string",
23 "description": "Name of the programming language"
24 },
25 "year_created": {
26 "type": "integer",
27 "description": "Year the programming language was created"
28 },
29 "creator": {
30 "type": "string",
31 "description": "Person or organization who created the language"
32 },
33 "key_features": {
34 "type": "array",
35 "items": {
36 "type": "string"
37 },
38 "description": "Key features of the programming language"
39 },
40 "popularity_score": {
41 "type": "integer",
42 "description": "Subjective popularity score from 1-10"
43 }
44 },
45 "required": ["name", "year_created", "creator", "key_features", "popularity_score"]
46 });
47
48 let response = client
49 .generate_content()
50 .with_system_prompt("You provide information about programming languages in JSON format.")
51 .with_user_message("Tell me about the Rust programming language.")
52 .with_response_mime_type("application/json")
53 .with_response_schema(schema)
54 .execute()
55 .await?;
56
57 println!("Structured JSON Response:");
58 println!("{}", response.text());
59
60 // Parse the JSON response
61 let json_response: serde_json::Value = serde_json::from_str(&response.text())?;
62
63 println!("\nAccessing specific fields:");
64 println!("Language: {}", json_response["name"]);
65 println!("Created in: {}", json_response["year_created"]);
66 println!("Created by: {}", json_response["creator"]);
67 println!("Popularity: {}/10", json_response["popularity_score"]);
68
69 println!("\nKey Features:");
70 if let Some(features) = json_response["key_features"].as_array() {
71 for (i, feature) in features.iter().enumerate() {
72 println!("{}. {}", i+1, feature);
73 }
74 }
75
76 Ok(())
77}
examples/streaming.rs (line 28)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 // Get API key from environment variable
8 let api_key = env::var("GEMINI_API_KEY")
9 .expect("GEMINI_API_KEY environment variable not set");
10
11 // Create client
12 let client = Gemini::new(api_key);
13
14 // Simple streaming generation
15 println!("--- Streaming generation ---");
16
17 let mut stream = client
18 .generate_content()
19 .with_system_prompt("You are a helpful, creative assistant.")
20 .with_user_message("Write a short story about a robot who learns to feel emotions.")
21 .execute_stream()
22 .await?;
23
24 print!("Streaming response: ");
25 while let Some(chunk_result) = stream.next().await {
26 match chunk_result {
27 Ok(chunk) => {
28 print!("{}", chunk.text());
29 std::io::Write::flush(&mut std::io::stdout())?;
30 }
31 Err(e) => eprintln!("Error in stream: {}", e),
32 }
33 }
34 println!("\n");
35
36 // Multi-turn conversation
37 println!("--- Multi-turn conversation ---");
38
39 // First turn
40 let response1 = client
41 .generate_content()
42 .with_system_prompt("You are a helpful travel assistant.")
43 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
44 .execute()
45 .await?;
46
47 println!("User: I'm planning a trip to Japan. What are the best times to visit?");
48 println!("Assistant: {}\n", response1.text());
49
50 // Second turn (continuing the conversation)
51 let response2 = client
52 .generate_content()
53 .with_system_prompt("You are a helpful travel assistant.")
54 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
55 .with_model_message(response1.text())
56 .with_user_message("What about cherry blossom season? When exactly does that happen?")
57 .execute()
58 .await?;
59
60 println!("User: What about cherry blossom season? When exactly does that happen?");
61 println!("Assistant: {}\n", response2.text());
62
63 // Third turn (continuing the conversation)
64 let response3 = client
65 .generate_content()
66 .with_system_prompt("You are a helpful travel assistant.")
67 .with_user_message("I'm planning a trip to Japan. What are the best times to visit?")
68 .with_model_message(response1.text())
69 .with_user_message("What about cherry blossom season? When exactly does that happen?")
70 .with_model_message(response2.text())
71 .with_user_message("What are some must-visit places in Tokyo?")
72 .execute()
73 .await?;
74
75 println!("User: What are some must-visit places in Tokyo?");
76 println!("Assistant: {}", response3.text());
77
78 Ok(())
79}
Sourcepub fn function_calls(&self) -> Vec<&FunctionCall>
pub fn function_calls(&self) -> Vec<&FunctionCall>
Get function calls from the response
Trait Implementations§
Source§impl Clone for GenerationResponse
impl Clone for GenerationResponse
Source§fn clone(&self) -> GenerationResponse
fn clone(&self) -> GenerationResponse
Returns a copy of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source
. Read moreSource§impl Debug for GenerationResponse
impl Debug for GenerationResponse
Source§impl<'de> Deserialize<'de> for GenerationResponse
impl<'de> Deserialize<'de> for GenerationResponse
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Auto Trait Implementations§
impl Freeze for GenerationResponse
impl RefUnwindSafe for GenerationResponse
impl Send for GenerationResponse
impl Sync for GenerationResponse
impl Unpin for GenerationResponse
impl UnwindSafe for GenerationResponse
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more