use async_openai::types::ChatCompletionRequestMessage;
#[derive(Debug, Clone)]
pub enum AiResponse {
Command(String),
NaturalLanguage(String),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ConversationOrigin {
NaturalLanguage,
Investigation,
}
pub struct ConversationState {
pub(crate) messages: Vec<ChatCompletionRequestMessage>,
pub origin: ConversationOrigin,
}
pub struct ConversationResult {
pub response: AiResponse,
pub conversation: ConversationState,
}