use async_trait::async_trait;
use serde_json::Value;
use prompty::interfaces::{InvokerError, Processor};
use prompty::model::Prompty;
pub struct FoundryProcessor;
#[async_trait]
impl Processor for FoundryProcessor {
async fn process(&self, agent: &Prompty, response: Value) -> Result<Value, InvokerError> {
prompty_openai::process_response(agent, &response)
}
fn process_stream(
&self,
inner: std::pin::Pin<Box<dyn futures::Stream<Item = Value> + Send>>,
) -> Result<
std::pin::Pin<Box<dyn futures::Stream<Item = prompty::types::StreamChunk> + Send>>,
InvokerError,
> {
prompty_openai::OpenAIProcessor.process_stream(inner)
}
}
#[cfg(test)]
mod tests {
use super::*;
use prompty::model::context::LoadContext;
use serde_json::json;
fn make_agent() -> Prompty {
let data = json!({
"name": "test",
"kind": "prompt",
"model": {
"id": "gpt-4",
"connection": {
"kind": "key",
"endpoint": "https://myresource.openai.azure.com",
"apiKey": "test-key"
}
},
"instructions": "test"
});
Prompty::load_from_value(&data, &LoadContext::default())
}
#[tokio::test]
async fn test_process_chat_response() {
let agent = make_agent();
let response = json!({
"choices": [{
"message": {
"role": "assistant",
"content": "Hello from Azure!"
},
"finish_reason": "stop"
}]
});
let result = FoundryProcessor.process(&agent, response).await.unwrap();
assert_eq!(result, "Hello from Azure!");
}
#[tokio::test]
async fn test_process_embedding_response() {
let agent = make_agent();
let response = json!({
"object": "list",
"data": [
{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}
]
});
let result = FoundryProcessor.process(&agent, response).await.unwrap();
assert!(result.is_array());
}
}