use gh_models::{GHModels, types::ChatMessage};
use std::env;
#[tokio::main]
async fn main() {
let token = match env::var("GITHUB_TOKEN") {
Ok(t) => t,
Err(_) => {
eprintln!("GITHUB_TOKEN environment variable not set.");
return;
}
};
let client = GHModels::new(token);
let messages = vec![
ChatMessage {
role: "system".into(),
content: "You are a helpful assistant.".into(),
},
ChatMessage {
role: "user".into(),
content: "What is the capital of France?".into(),
},
];
match client.chat_completion("openai/gpt-4o", &messages, 1.0, 4096, 1.0).await {
Ok(response) => println!("Response: {}", response.choices[0].message.content),
Err(e) => eprintln!("Request failed: {}", e),
}
}