Skip to main content

j_cli/command/chat/
api.rs

1use super::model::{ChatMessage, ModelProvider};
2use async_openai::{
3    Client,
4    config::OpenAIConfig,
5    types::chat::{
6        ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestMessage,
7        ChatCompletionRequestSystemMessageArgs, ChatCompletionRequestUserMessageArgs,
8        CreateChatCompletionRequestArgs,
9    },
10};
11use futures::StreamExt;
12
13/// 根据 ModelProvider 配置创建 async-openai Client
14pub fn create_openai_client(provider: &ModelProvider) -> Client<OpenAIConfig> {
15    let config = OpenAIConfig::new()
16        .with_api_key(&provider.api_key)
17        .with_api_base(&provider.api_base);
18    Client::with_config(config)
19}
20
21/// 将内部 ChatMessage 转换为 async-openai 的请求消息格式
22pub fn to_openai_messages(messages: &[ChatMessage]) -> Vec<ChatCompletionRequestMessage> {
23    messages
24        .iter()
25        .filter_map(|msg| match msg.role.as_str() {
26            "system" => ChatCompletionRequestSystemMessageArgs::default()
27                .content(msg.content.as_str())
28                .build()
29                .ok()
30                .map(ChatCompletionRequestMessage::System),
31            "user" => ChatCompletionRequestUserMessageArgs::default()
32                .content(msg.content.as_str())
33                .build()
34                .ok()
35                .map(ChatCompletionRequestMessage::User),
36            "assistant" => ChatCompletionRequestAssistantMessageArgs::default()
37                .content(msg.content.as_str())
38                .build()
39                .ok()
40                .map(ChatCompletionRequestMessage::Assistant),
41            _ => None,
42        })
43        .collect()
44}
45
46/// 使用 async-openai 流式调用 API,通过回调逐步输出
47/// 返回完整的助手回复内容
48pub async fn call_openai_stream_async(
49    provider: &ModelProvider,
50    messages: &[ChatMessage],
51    on_chunk: &mut dyn FnMut(&str),
52) -> Result<String, String> {
53    let client = create_openai_client(provider);
54    let openai_messages = to_openai_messages(messages);
55
56    let request = CreateChatCompletionRequestArgs::default()
57        .model(&provider.model)
58        .messages(openai_messages)
59        .build()
60        .map_err(|e| format!("构建请求失败: {}", e))?;
61
62    let mut stream = client
63        .chat()
64        .create_stream(request)
65        .await
66        .map_err(|e| format!("API 请求失败: {}", e))?;
67
68    let mut full_content = String::new();
69
70    while let Some(result) = stream.next().await {
71        match result {
72            Ok(response) => {
73                for choice in &response.choices {
74                    if let Some(ref content) = choice.delta.content {
75                        full_content.push_str(content);
76                        on_chunk(content);
77                    }
78                }
79            }
80            Err(e) => {
81                return Err(format!("流式响应错误: {}", e));
82            }
83        }
84    }
85
86    Ok(full_content)
87}
88
89/// 同步包装:创建 tokio runtime 执行异步流式调用
90pub fn call_openai_stream(
91    provider: &ModelProvider,
92    messages: &[ChatMessage],
93    on_chunk: &mut dyn FnMut(&str),
94) -> Result<String, String> {
95    let rt = tokio::runtime::Runtime::new().map_err(|e| format!("创建异步运行时失败: {}", e))?;
96    rt.block_on(call_openai_stream_async(provider, messages, on_chunk))
97}