mod agent;
mod pipe;
use anyhow::{Context, Result};
use async_openai::{
config::OpenAIConfig,
types::{
ChatCompletionRequestAssistantMessage, ChatCompletionRequestAssistantMessageContent,
ChatCompletionRequestMessage, ChatCompletionRequestSystemMessage,
ChatCompletionRequestSystemMessageContent, ChatCompletionRequestUserMessage,
ChatCompletionRequestUserMessageContent,
},
Client,
};
use tracing::{debug, info};
use crate::config::AiConfig;
use crate::engine::CommandResult;
use super::prompts::{ERROR_INVESTIGATION_PROMPT, SYSTEM_PROMPT};
use super::types::{AiResponse, ConversationOrigin, ConversationResult, ConversationState};
fn build_text_assistant_message(text: String) -> ChatCompletionRequestMessage {
ChatCompletionRequestMessage::Assistant(ChatCompletionRequestAssistantMessage {
content: Some(ChatCompletionRequestAssistantMessageContent::Text(text)),
refusal: None,
name: None,
audio: None,
tool_calls: None,
#[allow(deprecated)]
function_call: None,
})
}
pub struct JarvisAI {
client: Client<OpenAIConfig>,
model: String,
max_rounds: usize,
markdown_rendering: bool,
ai_pipe_max_chars: usize,
ai_redirect_max_chars: usize,
temperature: f32,
}
impl JarvisAI {
pub fn new(ai_config: &AiConfig) -> Result<Self> {
let api_key = std::env::var("OPENAI_API_KEY")
.context("OPENAI_API_KEY is not set. AI features are disabled.")?;
if api_key.is_empty() || api_key == "your_openai_api_key" {
anyhow::bail!("OPENAI_API_KEY is not configured. Please set a valid API key in .env");
}
let config = OpenAIConfig::new().with_api_key(&api_key);
let client = Client::with_config(config);
Ok(Self {
client,
model: ai_config.model.clone(),
max_rounds: ai_config.max_rounds,
markdown_rendering: ai_config.markdown_rendering,
ai_pipe_max_chars: ai_config.ai_pipe_max_chars,
ai_redirect_max_chars: ai_config.ai_redirect_max_chars,
temperature: ai_config.temperature,
})
}
pub fn update_config(&mut self, ai_config: &AiConfig) {
self.model = ai_config.model.clone();
self.max_rounds = ai_config.max_rounds;
self.markdown_rendering = ai_config.markdown_rendering;
self.ai_pipe_max_chars = ai_config.ai_pipe_max_chars;
self.ai_redirect_max_chars = ai_config.ai_redirect_max_chars;
self.temperature = ai_config.temperature;
info!(
model = %self.model,
max_rounds = self.max_rounds,
markdown_rendering = self.markdown_rendering,
ai_pipe_max_chars = self.ai_pipe_max_chars,
ai_redirect_max_chars = self.ai_redirect_max_chars,
temperature = self.temperature,
"AI config updated"
);
}
pub async fn process_input(&self, input: &str, context: &str) -> Result<ConversationResult> {
debug!(
user_input = %input,
context_length = context.len(),
context_empty = context.is_empty(),
"process_input() called"
);
let system_content = if context.is_empty() {
SYSTEM_PROMPT.to_string()
} else {
format!("{SYSTEM_PROMPT}\n\n{context}")
};
debug!(
system_prompt_length = system_content.len(),
"System prompt assembled"
);
debug!(system_prompt = %system_content, "Full system prompt content");
let mut messages: Vec<ChatCompletionRequestMessage> = vec![
ChatCompletionRequestMessage::System(ChatCompletionRequestSystemMessage {
content: ChatCompletionRequestSystemMessageContent::Text(system_content),
name: None,
}),
ChatCompletionRequestMessage::User(ChatCompletionRequestUserMessage {
content: ChatCompletionRequestUserMessageContent::Text(input.to_string()),
name: None,
}),
];
let response = self.run_agent_loop(&mut messages).await?;
Ok(ConversationResult {
response,
conversation: ConversationState {
messages,
origin: ConversationOrigin::NaturalLanguage,
},
})
}
pub async fn investigate_error(
&self,
command: &str,
result: &CommandResult,
context: &str,
) -> Result<ConversationResult> {
debug!(
command = %command,
exit_code = result.exit_code,
stdout_len = result.stdout.len(),
stderr_len = result.stderr.len(),
"investigate_error() called"
);
let mut error_details = format!(
"The following command failed:\n\
Command: {command}\n\
Exit code: {}\n",
result.exit_code
);
if !result.stdout.is_empty() {
error_details.push_str(&format!("\nstdout:\n{}\n", result.stdout));
}
if !result.stderr.is_empty() {
error_details.push_str(&format!("\nstderr:\n{}\n", result.stderr));
}
error_details.push_str("\nPlease investigate the error and suggest a fix.");
let system_content = if context.is_empty() {
ERROR_INVESTIGATION_PROMPT.to_string()
} else {
format!("{ERROR_INVESTIGATION_PROMPT}\n\n{context}")
};
let mut messages: Vec<ChatCompletionRequestMessage> = vec![
ChatCompletionRequestMessage::System(ChatCompletionRequestSystemMessage {
content: ChatCompletionRequestSystemMessageContent::Text(system_content),
name: None,
}),
ChatCompletionRequestMessage::User(ChatCompletionRequestUserMessage {
content: ChatCompletionRequestUserMessageContent::Text(error_details),
name: None,
}),
];
let response = self.run_agent_loop(&mut messages).await?;
Ok(ConversationResult {
response,
conversation: ConversationState {
messages,
origin: ConversationOrigin::Investigation,
},
})
}
pub async fn continue_conversation(
&self,
state: &mut ConversationState,
input: &str,
) -> Result<AiResponse> {
debug!(
user_input = %input,
messages_count = state.messages.len(),
"continue_conversation() called"
);
state.messages.push(ChatCompletionRequestMessage::User(
ChatCompletionRequestUserMessage {
content: ChatCompletionRequestUserMessageContent::Text(input.to_string()),
name: None,
},
));
self.run_agent_loop(&mut state.messages).await
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn new_fails_without_api_key() {
let original = std::env::var("OPENAI_API_KEY").ok();
std::env::remove_var("OPENAI_API_KEY");
let result = JarvisAI::new(&AiConfig::default());
assert!(result.is_err());
if let Some(key) = original {
std::env::set_var("OPENAI_API_KEY", key);
}
}
}