generation_config/
generation_config.rs1use gemini_rust::{Gemini, GenerationConfig};
2use std::env;
3
4#[tokio::main]
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
8
9 let client = Gemini::new(api_key);
11
12 println!("--- Using full generation config ---");
14 let response1 = client
15 .generate_content()
16 .with_system_prompt("You are a helpful assistant.")
17 .with_user_message("Write a short poem about Rust programming language.")
18 .with_generation_config(GenerationConfig {
19 temperature: Some(0.9),
20 top_p: Some(0.8),
21 top_k: Some(20),
22 max_output_tokens: Some(200),
23 candidate_count: Some(1),
24 stop_sequences: Some(vec!["END".to_string()]),
25 response_mime_type: None,
26 response_schema: None,
27 thinking_config: None,
28 ..Default::default()
29 })
30 .execute()
31 .await?;
32
33 println!(
34 "Response with high temperature (0.9):\n{}\n",
35 response1.text()
36 );
37
38 println!("--- Using individual generation parameters ---");
40 let response2 = client
41 .generate_content()
42 .with_system_prompt("You are a helpful assistant.")
43 .with_user_message("Write a short poem about Rust programming language.")
44 .with_temperature(0.2)
45 .with_max_output_tokens(100)
46 .execute()
47 .await?;
48
49 println!(
50 "Response with low temperature (0.2):\n{}\n",
51 response2.text()
52 );
53
54 println!("--- Setting multiple parameters individually ---");
56 let response3 = client
57 .generate_content()
58 .with_system_prompt("You are a helpful assistant.")
59 .with_user_message("List 3 benefits of using Rust.")
60 .with_temperature(0.7)
61 .with_top_p(0.9)
62 .with_max_output_tokens(150)
63 .with_stop_sequences(vec!["4.".to_string()])
64 .execute()
65 .await?;
66
67 println!(
68 "Response with custom parameters and stop sequence:\n{}",
69 response3.text()
70 );
71
72 Ok(())
73}