use crate::agent::utils::convert_messages_to_prompt_args;
use crate::prompt_args;
use crate::schemas::Message;
#[cfg(test)]
mod unit_tests {
use super::*;
#[test]
fn test_convert_messages_with_human_message() {
let messages = vec![
Message::new_system_message("You are helpful"),
Message::new_human_message("Hello"),
Message::new_ai_message("Hi there!"),
];
let input = prompt_args! {
"messages" => messages
};
let result = convert_messages_to_prompt_args(input);
assert!(result.is_ok(), "Conversion should succeed");
let args = result.unwrap();
assert_eq!(args["input"], serde_json::json!("Hello"));
assert!(args.contains_key("chat_history"));
}
#[test]
fn test_convert_messages_without_human_message() {
let messages = vec![
Message::new_system_message("You are helpful"),
Message::new_ai_message("I am an AI"),
];
let input = prompt_args! {
"messages" => messages
};
let result = convert_messages_to_prompt_args(input);
assert!(result.is_ok());
let args = result.unwrap();
assert_eq!(args["input"], serde_json::json!("I am an AI"));
}
#[test]
fn test_convert_messages_preserves_custom_keys() {
let messages = vec![Message::new_human_message("Hello")];
let input = prompt_args! {
"messages" => messages,
"custom_key" => "custom_value",
"another_key" => 42
};
let result = convert_messages_to_prompt_args(input);
assert!(result.is_ok());
let args = result.unwrap();
assert_eq!(args["custom_key"], serde_json::json!("custom_value"));
assert_eq!(args["another_key"], serde_json::json!(42));
}
#[test]
fn test_convert_messages_preserves_chat_history() {
let messages = vec![
Message::new_human_message("First"),
Message::new_ai_message("Response"),
];
let input = prompt_args! {
"messages" => messages,
"chat_history" => serde_json::json!(vec![
Message::new_human_message("Previous")
])
};
let result = convert_messages_to_prompt_args(input);
assert!(result.is_ok());
let args = result.unwrap();
let chat_history = args["chat_history"]
.as_array()
.expect("chat_history should be an array");
assert_eq!(chat_history.len(), 1);
assert_eq!(chat_history[0]["content"], serde_json::json!("Previous"));
}
#[test]
fn test_convert_messages_missing_messages_key() {
let input = prompt_args! {
"other_key" => "value"
};
let result = convert_messages_to_prompt_args(input);
assert!(result.is_err(), "Should error on missing 'messages' key");
}
#[test]
fn test_convert_messages_invalid_json() {
let input = prompt_args! {
"messages" => "not a valid json array"
};
let result = convert_messages_to_prompt_args(input);
assert!(result.is_err(), "Should error on invalid JSON");
}
#[test]
fn test_convert_empty_messages() {
let messages: Vec<Message> = vec![];
let input = prompt_args! {
"messages" => messages
};
let result = convert_messages_to_prompt_args(input);
assert!(result.is_ok());
let args = result.unwrap();
assert_eq!(args["input"], serde_json::json!(""));
}
#[test]
fn test_convert_messages_multiple_human_takes_last() {
let messages = vec![
Message::new_human_message("First human"),
Message::new_ai_message("AI response"),
Message::new_human_message("Second human"),
];
let input = prompt_args! {
"messages" => messages
};
let result = convert_messages_to_prompt_args(input);
assert!(result.is_ok());
let args = result.unwrap();
assert_eq!(args["input"], serde_json::json!("Second human"));
}
}
#[cfg(test)]
mod integration_tests {
use crate::agent::create_agent_from_llm;
use crate::chain::Chain;
use crate::llm::openai::{OpenAI, OpenAIModel};
use crate::prompt_args;
#[tokio::test]
#[ignore = "Requires OPENAI_API_KEY environment variable"]
async fn test_agent_with_openai() -> Result<(), Box<dyn std::error::Error>> {
let llm = OpenAI::new(
crate::llm::openai::OpenAIConfig::new().with_api_key(std::env::var("OPENAI_API_KEY")?),
)
.with_model(OpenAIModel::Gpt35);
let agent = create_agent_from_llm(llm, &[], Some("You are a helpful assistant."))?;
let result = agent
.invoke(prompt_args! { "input" => "Hello, how are you?" })
.await?;
assert!(!result.is_empty(), "Response should not be empty");
Ok(())
}
#[tokio::test]
#[ignore = "Requires running Ollama instance (ollama serve)"]
#[cfg(feature = "ollama")]
async fn test_agent_with_ollama() -> Result<(), Box<dyn std::error::Error>> {
let llm = crate::llm::ollama::Ollama::default().with_model("llama3");
let agent = create_agent_from_llm(llm, &[], Some("You are a helpful assistant."))?;
let result = agent
.invoke(prompt_args! { "input" => "Hello, how are you?" })
.await?;
assert!(!result.is_empty(), "Response should not be empty");
Ok(())
}
}
#[cfg(test)]
mod workflow_tests {
use crate::chain::Chain;
use crate::fmt_template;
use crate::llm::openai::{OpenAI, OpenAIModel};
use crate::message_formatter;
use crate::prompt::HumanMessagePromptTemplate;
use crate::prompt_args;
use crate::template_fstring;
#[tokio::test]
#[ignore = "Requires OPENAI_API_KEY - run with: cargo test --features openai test_basic_workflow -- --ignored"]
async fn test_basic_workflow() {
let prompt = HumanMessagePromptTemplate::new(template_fstring!(
"Translate this to French: {text}",
"text",
));
let formatter = message_formatter![fmt_template!(prompt)];
let llm = OpenAI::default().with_model(OpenAIModel::Gpt35.to_string());
let chain = crate::chain::LLMChainBuilder::new()
.prompt(formatter)
.llm(llm)
.build()
.expect("Failed to build chain");
let result = chain.invoke(prompt_args! { "text" => "Hello world" }).await;
assert!(result.is_ok(), "Chain should execute successfully");
let response = result.unwrap();
assert!(
response.contains("Bonjour") || response.contains("French"),
"Response should contain French translation"
);
}
}