ai_lib/api/
chat.rs

1use async_trait::async_trait;
2use crate::types::{ChatCompletionRequest, ChatCompletionResponse, AiLibError};
3use futures::stream::Stream;
4
5/// 通用的聊天API接口,定义所有AI服务的核心能力
6/// 
7/// Generic chat API interface
8/// 
9/// This trait defines the core capabilities that all AI services should have,
10/// without depending on any specific model implementation details
11#[async_trait]
12pub trait ChatApi: Send + Sync {
13    /// Send chat completion request
14    /// 
15    /// # Arguments
16    /// * `request` - Generic chat completion request
17    /// 
18    /// # Returns
19    /// * `Result<ChatCompletionResponse, AiLibError>` - Returns response on success, error on failure
20    async fn chat_completion(
21        &self,
22        request: ChatCompletionRequest,
23    ) -> Result<ChatCompletionResponse, AiLibError>;
24    
25    /// Streaming chat completion request
26    /// 
27    /// # Arguments
28    /// * `request` - Generic chat completion request
29    /// 
30    /// # Returns
31    /// * `Result<impl Stream<Item = Result<ChatCompletionChunk, AiLibError>>, AiLibError>` - Returns streaming response on success
32    async fn chat_completion_stream(
33        &self,
34        request: ChatCompletionRequest,
35    ) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>;
36    
37    /// Get list of supported models
38    /// 
39    /// # Returns
40    /// * `Result<Vec<String>, AiLibError>` - Returns model list on success, error on failure
41    async fn list_models(&self) -> Result<Vec<String>, AiLibError>;
42    
43    /// Get model information
44    /// 
45    /// # Arguments
46    /// * `model_id` - Model ID
47    /// 
48    /// # Returns
49    /// * `Result<ModelInfo, AiLibError>` - Returns model information on success, error on failure
50    async fn get_model_info(&self, model_id: &str) -> Result<ModelInfo, AiLibError>;
51}
52
53/// 流式响应的数据块
54/// 
55/// Streaming response data chunk
56#[derive(Debug, Clone)]
57pub struct ChatCompletionChunk {
58    pub id: String,
59    pub object: String,
60    pub created: u64,
61    pub model: String,
62    pub choices: Vec<ChoiceDelta>,
63}
64
65/// 流式响应的选择项增量
66/// 
67/// Streaming response choice delta
68#[derive(Debug, Clone)]
69pub struct ChoiceDelta {
70    pub index: u32,
71    pub delta: MessageDelta,
72    pub finish_reason: Option<String>,
73}
74
75/// 消息增量
76/// 
77/// Message delta
78#[derive(Debug, Clone)]
79pub struct MessageDelta {
80    pub role: Option<Role>,
81    pub content: Option<String>,
82}
83
84/// 模型信息
85/// 
86/// Model information
87#[derive(Debug, Clone)]
88pub struct ModelInfo {
89    pub id: String,
90    pub object: String,
91    pub created: u64,
92    pub owned_by: String,
93    pub permission: Vec<ModelPermission>,
94}
95
96/// 模型权限
97/// 
98/// Model permission
99#[derive(Debug, Clone)]
100pub struct ModelPermission {
101    pub id: String,
102    pub object: String,
103    pub created: u64,
104    pub allow_create_engine: bool,
105    pub allow_sampling: bool,
106    pub allow_logprobs: bool,
107    pub allow_search_indices: bool,
108    pub allow_view: bool,
109    pub allow_fine_tuning: bool,
110    pub organization: String,
111    pub group: Option<String>,
112    pub is_blocking: bool,
113}
114
115// Re-export Role type as it's also needed in streaming responses
116use crate::types::Role;