use adk_gemini::{Gemini, Model};
use display_error_chain::DisplayErrorChain;
use std::env;
use std::process::ExitCode;
use tracing::info;
#[tokio::main]
async fn main() -> ExitCode {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::builder()
.with_default_directive(tracing::level_filters::LevelFilter::INFO.into())
.from_env_lossy(),
)
.init();
match do_main().await {
Ok(()) => ExitCode::SUCCESS,
Err(e) => {
let error_chain = DisplayErrorChain::new(e.as_ref());
tracing::error!(error.debug = ?e, error.chained = %error_chain, "execution failed");
ExitCode::FAILURE
}
}
}
#[allow(unused)]
async fn do_main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = env::var("GEMINI_API_KEY").expect("GEMINI_API_KEY environment variable not set");
info!("demonstrating different model configuration options");
let client_default = Gemini::new(api_key.clone())?;
info!("created client with default Gemini 2.5 Flash model");
let client_pro = Gemini::pro(api_key.clone())?;
info!("created client with Gemini 2.5 Pro model using convenience method");
let client_flash_lite = Gemini::with_model(api_key.clone(), Model::Gemini25FlashLite)?;
info!("created client with Gemini 2.5 Flash Lite using Model enum");
let client_embedding = Gemini::with_model(api_key.clone(), Model::GeminiEmbedding001)?;
info!("created client with Gemini Embedding 001 model using Model enum");
let client_custom_string =
Gemini::with_model(api_key.clone(), "models/gemini-2.5-flash-image-preview".to_string())?;
info!("created client with custom model string for image generation");
let client_custom_enum = Gemini::with_model(
api_key.clone(),
Model::Custom("models/gemini-2.5-flash-preview-tts".to_string()),
)?;
info!("created client with Model::Custom for text-to-speech model");
let test_message = "Hello! Can you tell me which model you are?";
let response =
client_default.generate_content().with_user_message(test_message).execute().await?;
info!(
model = "default (Gemini 2.5 Flash)",
response = response.text(),
"received response from default model"
);
let response_pro =
client_pro.generate_content().with_user_message(test_message).execute().await?;
info!(
model = "Gemini 2.5 Pro",
response = response_pro.text(),
"received response from Pro model"
);
info!("✅ Successfully demonstrated all model configuration options!");
info!("Default model response: {}", response.text());
info!("Pro model response: {}", response_pro.text());
Ok(())
}