gemini/
gemini.rs

1use llm_api_rs::{
2    core::{ChatCompletionRequest, ChatMessage},
3    providers::gemini::Gemini,
4    LlmProvider,
5};
6
7#[tokio::main]
8async fn main() {
9    let api_key =
10        std::env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
11
12    let client = Gemini::new(api_key);
13
14    let request = ChatCompletionRequest {
15        model: "gemini-2.0-flash-exp".to_string(),
16        messages: vec![
17            ChatMessage {
18                role: "model".to_string(),
19                content: "You are a helpful assistant.".to_string(),
20            },
21            ChatMessage {
22                role: "user".to_string(),
23                content: "Hello!".to_string(),
24            },
25        ],
26        temperature: Some(0.7),
27        max_tokens: Some(50),
28    };
29
30    match client.chat_completion(request).await {
31        Ok(response) => {
32            for choice in response.choices {
33                println!("Response: {}", choice.message.content);
34            }
35        }
36        Err(e) => eprintln!("Error: {}", e),
37    }
38}