use async_trait::async_trait;
use futures_util::Stream;
use serde::{Deserialize, Serialize};
use std::pin::Pin;
use crate::schema::Message;
use crate::RunnableConfig;
use crate::core::tools::ToolCall;
use super::BaseLanguageModel;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LLMResult {
pub content: String,
pub model: String,
pub token_usage: Option<TokenUsage>,
pub tool_calls: Option<Vec<ToolCall>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TokenUsage {
pub prompt_tokens: usize,
pub completion_tokens: usize,
pub total_tokens: usize,
}
#[async_trait]
pub trait BaseChatModel: BaseLanguageModel<Vec<Message>, LLMResult> {
async fn chat(
&self,
messages: Vec<Message>,
config: Option<RunnableConfig>
) -> Result<LLMResult, Self::Error>;
async fn stream_chat(
&self,
messages: Vec<Message>,
config: Option<RunnableConfig>
) -> Result<Pin<Box<dyn Stream<Item = Result<String, Self::Error>> + Send>>, Self::Error>;
async fn chat_with_system(
&self,
system: String,
messages: Vec<Message>
) -> Result<LLMResult, Self::Error> {
let full_messages = vec![Message::system(system)]
.into_iter()
.chain(messages)
.collect();
self.chat(full_messages, None).await
}
}