1use axum::{
2 extract::{Query, State},
3 http::{HeaderMap, StatusCode},
4 response::{IntoResponse, Json},
5 response::Response,
6 body::Body,
7};
8use futures::StreamExt;
9use serde::Deserialize;
10use serde_json::{json, Value};
11use std::convert::Infallible;
12use tracing::{info, warn, error};
13
14use crate::adapters::{ClientAdapter, FormatDetector};
15use crate::api::{AppState, convert};
16
17#[derive(Debug, Deserialize)]
18#[allow(dead_code)]
19pub struct OpenAIChatRequest {
20 #[allow(dead_code)]
21 pub model: String,
22 #[allow(dead_code)]
23 pub messages: Vec<Value>,
24 #[allow(dead_code)]
25 pub stream: Option<bool>,
26 #[allow(dead_code)]
27 pub max_tokens: Option<u32>,
28 #[allow(dead_code)]
29 pub temperature: Option<f32>,
30 #[allow(dead_code)]
31 pub tools: Option<Vec<Value>>,
32 #[allow(dead_code)]
33 pub tool_choice: Option<Value>,
34}
35
36#[derive(Debug, Deserialize)]
37pub struct OpenAIModelsParams {
38 }
40
41#[allow(dead_code)]
43pub async fn chat(
44 headers: HeaderMap,
45 State(state): State<AppState>,
46 Json(request): Json<OpenAIChatRequest>,
47) -> Result<Response, StatusCode> {
48 enforce_api_key(&headers, &state)?;
50
51 info!("📝 Received request - model: {}, stream: {:?}, messages count: {}",
52 request.model, request.stream, request.messages.len());
53
54 if !request.model.is_empty() {
56 let validation_result = {
57 let llm_service = state.llm_service.read().unwrap();
58 llm_service.validate_model(&request.model).await
59 };
60
61 match validation_result {
62 Ok(false) => {
63 error!("❌ Model validation failed: model '{}' not found", request.model);
64 return Err(StatusCode::BAD_REQUEST);
65 }
66 Err(e) => {
67 error!("❌ Model validation error: {:?}", e);
68 return Err(StatusCode::INTERNAL_SERVER_ERROR);
69 }
70 Ok(true) => {
71 info!("✅ Model '{}' validated successfully", request.model);
72 }
73 }
74 }
75
76 match convert::openai_messages_to_llm(request.messages) {
78 Ok(messages) => {
79 info!("✅ Successfully converted {} messages", messages.len());
80 let model = if request.model.is_empty() { None } else { Some(request.model.as_str()) };
81
82 let tools = request.tools.map(|t| convert::openai_tools_to_llm(t));
84 if tools.is_some() {
85 info!("🔧 Request includes {} tools", tools.as_ref().unwrap().len());
86 if let Some(first_tool) = tools.as_ref().unwrap().first() {
88 info!("🔧 First tool: {:?}", serde_json::to_value(first_tool).ok());
89 }
90 }
91
92 if request.stream.unwrap_or(false) {
95 handle_streaming_request(headers, state, model, messages, tools).await
96 } else {
97 handle_non_streaming_request(state, model, messages, tools).await
98 }
99 }
100 Err(e) => {
101 error!("❌ Failed to convert OpenAI messages: {:?}", e);
102 Err(StatusCode::BAD_REQUEST)
103 }
104 }
105}
106
107#[allow(dead_code)]
109async fn handle_streaming_request(
110 headers: HeaderMap,
111 state: AppState,
112 model: Option<&str>,
113 messages: Vec<llm_connector::types::Message>,
114 tools: Option<Vec<llm_connector::types::Tool>>,
115) -> Result<Response, StatusCode> {
116 let config = state.config.read().unwrap();
118 let client_adapter = detect_openai_client(&headers, &config);
119 let (_stream_format, _) = FormatDetector::determine_format(&headers);
120 drop(config); let final_format = client_adapter.preferred_format();
124 let content_type = FormatDetector::get_content_type(final_format);
125
126 info!("📡 Starting OpenAI streaming response - Format: {:?} ({})", final_format, content_type);
127
128 let stream_result = {
129 let llm_service = state.llm_service.read().unwrap();
130 llm_service.chat_stream_openai(model, messages.clone(), tools.clone(), final_format).await
131 };
132
133 match stream_result {
134 Ok(rx) => {
135 info!("✅ OpenAI streaming response started successfully");
136
137 let config_clone = state.config.clone();
138 let adapted_stream = rx.map(move |data| {
139 let json_str = if data.starts_with("data: ") {
141 &data[6..] } else {
143 &data
144 };
145
146 if json_str.trim().is_empty() || json_str.trim() == "[DONE]" {
148 return data.to_string();
149 }
150
151 if let Ok(mut json_data) = serde_json::from_str::<Value>(json_str) {
153 tracing::debug!("📝 Parsed JSON chunk, applying adaptations...");
154 let config = config_clone.read().unwrap();
155 client_adapter.apply_response_adaptations(&config, &mut json_data);
156
157 match final_format {
158 llm_connector::StreamFormat::SSE => {
159 format!("data: {}\n\n", json_data)
160 }
161 llm_connector::StreamFormat::NDJSON => {
162 format!("{}\n", json_data)
163 }
164 llm_connector::StreamFormat::Json => {
165 json_data.to_string()
166 }
167 }
168 } else {
169 tracing::debug!("⚠️ Failed to parse chunk as JSON: {}", json_str);
170 data.to_string()
171 }
172 });
173
174 let body_stream = adapted_stream.map(|data| Ok::<_, Infallible>(data));
175 let body = Body::from_stream(body_stream);
176
177 let response = Response::builder()
178 .status(200)
179 .header("content-type", content_type)
180 .header("cache-control", "no-cache")
181 .body(body)
182 .unwrap();
183
184 Ok(response)
185 }
186 Err(e) => {
187 warn!("⚠️ OpenAI streaming failed, falling back to non-streaming: {:?}", e);
188 handle_non_streaming_request(state, model, messages, tools).await
189 }
190 }
191}
192
193#[allow(dead_code)]
195async fn handle_non_streaming_request(
196 state: AppState,
197 model: Option<&str>,
198 messages: Vec<llm_connector::types::Message>,
199 tools: Option<Vec<llm_connector::types::Tool>>,
200) -> Result<Response, StatusCode> {
201 let chat_result = {
202 let llm_service = state.llm_service.read().unwrap();
203 llm_service.chat(model, messages, tools).await
204 };
205
206 match chat_result {
207 Ok(response) => {
208 let openai_response = convert::response_to_openai(response);
209 Ok(Json(openai_response).into_response())
210 }
211 Err(e) => {
212 error!("❌ OpenAI chat request failed: {:?}", e);
213 Err(StatusCode::INTERNAL_SERVER_ERROR)
214 }
215 }
216}
217
218#[allow(dead_code)]
220pub async fn models(
221 headers: HeaderMap,
222 State(state): State<AppState>,
223 Query(_params): Query<OpenAIModelsParams>,
224) -> Result<impl IntoResponse, StatusCode> {
225 enforce_api_key(&headers, &state)?;
226
227 let models_result = {
228 let llm_service = state.llm_service.read().unwrap();
229 llm_service.list_models().await
230 };
231
232 match models_result {
233 Ok(models) => {
234 let openai_models: Vec<Value> = models.into_iter().map(|model| {
235 json!({
236 "id": model.id,
237 "object": "model",
238 "created": chrono::Utc::now().timestamp(),
239 "owned_by": "system"
240 })
241 }).collect();
242
243 let config = state.config.read().unwrap();
244 let current_provider = match &config.llm_backend {
245 crate::settings::LlmBackendSettings::OpenAI { .. } => "openai",
246 crate::settings::LlmBackendSettings::Anthropic { .. } => "anthropic",
247 crate::settings::LlmBackendSettings::Zhipu { .. } => "zhipu",
248 crate::settings::LlmBackendSettings::Ollama { .. } => "ollama",
249 crate::settings::LlmBackendSettings::Aliyun { .. } => "aliyun",
250 crate::settings::LlmBackendSettings::Volcengine { .. } => "volcengine",
251 crate::settings::LlmBackendSettings::Tencent { .. } => "tencent",
252 crate::settings::LlmBackendSettings::Longcat { .. } => "longcat",
253 };
254
255 let response = json!({
256 "object": "list",
257 "data": openai_models,
258 "provider": current_provider,
259 });
260 Ok(Json(response))
261 }
262 Err(_) => Err(StatusCode::INTERNAL_SERVER_ERROR),
263 }
264}
265
266#[allow(dead_code)]
268fn enforce_api_key(headers: &HeaderMap, state: &AppState) -> Result<(), StatusCode> {
269 let config = state.config.read().unwrap();
270 if let Some(cfg) = &config.apis.openai {
271 if cfg.enabled {
272 if let Some(expected_key) = cfg.api_key.as_ref() {
273 let header_name = cfg.api_key_header.as_deref().unwrap_or("authorization").to_ascii_lowercase();
274
275 let value_opt = if header_name == "authorization" {
276 headers.get(axum::http::header::AUTHORIZATION)
277 } else {
278 match axum::http::HeaderName::from_bytes(header_name.as_bytes()) {
279 Ok(name) => headers.get(name),
280 Err(_) => None,
281 }
282 };
283
284 if let Some(value) = value_opt {
285 if let Ok(value_str) = value.to_str() {
286 let token = if value_str.starts_with("Bearer ") {
287 &value_str[7..]
288 } else {
289 value_str
290 };
291
292 if token == expected_key {
293 info!("✅ OpenAI API key authentication successful");
294 return Ok(());
295 }
296 }
297 }
298
299 warn!("🚫 OpenAI API key authentication failed");
300 return Err(StatusCode::UNAUTHORIZED);
301 }
302 }
303 }
304 Ok(())
305}
306
307#[allow(dead_code)]
309fn detect_openai_client(_headers: &HeaderMap, _config: &crate::settings::Settings) -> ClientAdapter {
310 ClientAdapter::OpenAI
312}