use chat_gpt_lib_rs::OpenAIClient;
use chat_gpt_lib_rs::api_resources::chat::{
ChatMessage, ChatRole, CreateChatCompletionRequest, create_chat_completion,
};
use chat_gpt_lib_rs::error::OpenAIError;
use console::{StyledObject, style};
use indicatif::{ProgressBar, ProgressStyle};
use std::env;
use std::io::{Write, stdin, stdout};
use std::iter::Skip;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), OpenAIError> {
dotenvy::dotenv().ok();
let client = OpenAIClient::new(None)?;
let use_icons = env::var("USE_ICONS")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase()
.eq("true");
let model = env::var("CHAT_MODEL").unwrap_or_else(|_| "gpt-4o".to_string());
let system_prompt = env::var("SYSTEM_PROMPT").unwrap_or_else(|_| {
"You are a high quality tech lead and are specialized in idiomatic Rust".to_string()
});
let max_tokens: Option<u32> = env::var("MAX_TOKENS")
.ok()
.and_then(|val| val.parse::<u32>().ok())
.or(Some(150));
let temperature: Option<f64> = env::var("TEMPERATURE")
.ok()
.and_then(|val| val.parse::<f64>().ok())
.or(Some(0.7));
let mut messages = vec![ChatMessage {
role: ChatRole::System,
content: system_prompt,
name: None,
}];
let mut args: Skip<env::Args> = env::args().skip(1);
if let Some(first_arg) = args.next() {
let user_message_content = args.fold(first_arg, |acc, arg| acc + " " + &arg);
process_user_input(
&client,
&mut messages,
&user_message_content,
&model,
max_tokens,
temperature,
)
.await?;
}
loop {
let input_prompt: StyledObject<&str> = if use_icons {
style("\u{f0ede} Input: ").green()
} else {
style("Input: ").green()
};
print!("{}", input_prompt);
stdout().flush().unwrap();
let mut user_message_content = String::new();
stdin().read_line(&mut user_message_content).unwrap();
process_user_input(
&client,
&mut messages,
&user_message_content,
&model,
max_tokens,
temperature,
)
.await?;
}
}
async fn process_user_input(
client: &OpenAIClient,
messages: &mut Vec<ChatMessage>,
user_message_content: &String,
model: &String,
max_tokens: Option<u32>,
temperature: Option<f64>,
) -> Result<(), OpenAIError> {
messages.push(ChatMessage {
role: ChatRole::User,
content: user_message_content.trim().to_string(),
name: None,
});
let request = CreateChatCompletionRequest {
model: model.into(),
messages: messages.clone(),
max_tokens,
temperature,
..Default::default()
};
let spinner = ProgressBar::new_spinner();
spinner.set_style(
ProgressStyle::default_spinner()
.tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈ ")
.template("{spinner:.green} Processing...")
.unwrap(),
);
let chat = {
spinner.enable_steady_tick(Duration::from_millis(100));
let result = create_chat_completion(&client, &request).await?;
spinner.finish_and_clear();
result
};
let assistant_message = &chat.choices[0].message.content;
let computer_label: StyledObject<&str> = if env::var("USE_ICONS")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase()
.eq("true")
{
style("\u{f12ca} Computer: ").color256(39)
} else {
style("Computer: ").color256(39)
};
let computer_response: StyledObject<String> = style(assistant_message.clone());
println!("{}{}", computer_label, computer_response);
messages.push(ChatMessage {
role: ChatRole::Assistant,
content: assistant_message.clone(),
name: None,
});
Ok(())
}