pub trait LlmProvider: Send + Sync {
// Required method
fn chat<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
system_prompt: &'life1 str,
user_message: &'life2 str,
model: Option<&'life3 str>,
) -> Pin<Box<dyn Future<Output = Result<String, SkillError>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait;
// Provided method
fn chat_stream<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>(
&'life0 self,
system_prompt: &'life1 str,
user_message: &'life2 str,
model: Option<&'life3 str>,
_callback: Box<dyn Fn(&'life4 str) + Send>,
) -> Pin<Box<dyn Future<Output = Result<String, SkillError>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
'life4: 'async_trait { ... }
}Expand description
LLM Provider trait(应用层实现)
定义 LLM 调用的抽象接口,由应用层(如 ProxyCast)实现具体的 API 调用逻辑。
§设计说明
chat: 必须实现的同步聊天方法chat_stream: 可选的流式聊天方法,默认回退到非流式实现
§线程安全
实现必须是 Send + Sync,以支持在异步上下文中使用。
§示例
ⓘ
use aster::skills::executor::LlmProvider;
use aster::skills::error::SkillError;
use async_trait::async_trait;
struct OpenAIProvider {
api_key: String,
}
#[async_trait]
impl LlmProvider for OpenAIProvider {
async fn chat(
&self,
system_prompt: &str,
user_message: &str,
model: Option<&str>,
) -> Result<String, SkillError> {
let model = model.unwrap_or("gpt-4");
// 调用 OpenAI API...
Ok("响应内容".to_string())
}
}Required Methods§
Sourcefn chat<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
system_prompt: &'life1 str,
user_message: &'life2 str,
model: Option<&'life3 str>,
) -> Pin<Box<dyn Future<Output = Result<String, SkillError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
fn chat<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
system_prompt: &'life1 str,
user_message: &'life2 str,
model: Option<&'life3 str>,
) -> Pin<Box<dyn Future<Output = Result<String, SkillError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
Provided Methods§
Sourcefn chat_stream<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>(
&'life0 self,
system_prompt: &'life1 str,
user_message: &'life2 str,
model: Option<&'life3 str>,
_callback: Box<dyn Fn(&'life4 str) + Send>,
) -> Pin<Box<dyn Future<Output = Result<String, SkillError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
'life4: 'async_trait,
fn chat_stream<'life0, 'life1, 'life2, 'life3, 'life4, 'async_trait>(
&'life0 self,
system_prompt: &'life1 str,
user_message: &'life2 str,
model: Option<&'life3 str>,
_callback: Box<dyn Fn(&'life4 str) + Send>,
) -> Pin<Box<dyn Future<Output = Result<String, SkillError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
'life4: 'async_trait,
流式聊天(可选实现)
支持流式响应的 LLM 调用,通过回调函数实时返回生成的内容片段。
§Arguments
system_prompt- 系统提示词user_message- 用户消息model- 可选的模型名称callback- 流式回调函数,每次收到新内容时调用
§Returns
成功时返回完整的 LLM 响应文本,失败时返回 SkillError
§Default Implementation
默认实现回退到非流式 chat 方法,忽略 callback 参数。
如果需要真正的流式支持,应用层应覆盖此方法。
§示例
ⓘ
let callback = Box::new(|chunk: &str| {
print!("{}", chunk);
});
let result = provider.chat_stream(
"你是一个助手",
"你好",
Some("gpt-4"),
callback,
).await?;