1use axum::{
2 extract::{Query, State},
3 http::{HeaderMap, StatusCode},
4 response::{IntoResponse, Json},
5 response::Response,
6 body::Body,
7};
8use futures::StreamExt;
9use serde::Deserialize;
10use serde_json::{json, Value};
11use std::convert::Infallible;
12use tracing::{info, warn, error};
13
14use crate::adapters::{ClientAdapter, FormatDetector};
15use crate::api::{AppState, convert};
16
17#[derive(Debug, Deserialize)]
18#[allow(dead_code)]
19pub struct OpenAIChatRequest {
20 #[allow(dead_code)]
21 pub model: String,
22 #[allow(dead_code)]
23 pub messages: Vec<Value>,
24 #[allow(dead_code)]
25 pub stream: Option<bool>,
26 #[allow(dead_code)]
27 pub max_tokens: Option<u32>,
28 #[allow(dead_code)]
29 pub temperature: Option<f32>,
30 #[allow(dead_code)]
31 pub tools: Option<Vec<Value>>,
32 #[allow(dead_code)]
33 pub tool_choice: Option<Value>,
34}
35
36#[derive(Debug, Deserialize)]
37pub struct OpenAIModelsParams {
38 }
40
41#[allow(dead_code)]
43pub async fn chat(
44 headers: HeaderMap,
45 State(state): State<AppState>,
46 Json(request): Json<OpenAIChatRequest>,
47) -> Result<Response, StatusCode> {
48 enforce_api_key(&headers, &state)?;
50
51 info!("📝 Received request - model: {}, stream: {:?}, messages count: {}",
52 request.model, request.stream, request.messages.len());
53
54 if !request.model.is_empty() {
56 let validation_result = {
57 let llm_service = state.llm_service.read()
58 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
59 llm_service.validate_model(&request.model).await
60 };
61
62 match validation_result {
63 Ok(false) => {
64 error!("❌ Model validation failed: model '{}' not found", request.model);
65 return Err(StatusCode::BAD_REQUEST);
66 }
67 Err(e) => {
68 error!("❌ Model validation error: {:?}", e);
69 return Err(StatusCode::INTERNAL_SERVER_ERROR);
70 }
71 Ok(true) => {
72 info!("✅ Model '{}' validated successfully", request.model);
73 }
74 }
75 }
76
77 match convert::openai_messages_to_llm(request.messages) {
79 Ok(messages) => {
80 info!("✅ Successfully converted {} messages", messages.len());
81 let model = if request.model.is_empty() { None } else { Some(request.model.as_str()) };
82
83 let tools = request.tools.map(|t| convert::openai_tools_to_llm(t));
85 if let Some(ref tools_ref) = tools {
86 info!("🔧 Request includes {} tools", tools_ref.len());
87 if let Some(first_tool) = tools_ref.first() {
89 info!("🔧 First tool: {:?}", serde_json::to_value(first_tool).ok());
90 }
91 }
92
93 if request.stream.unwrap_or(false) {
96 handle_streaming_request(headers, state, model, messages, tools).await
97 } else {
98 handle_non_streaming_request(state, model, messages, tools).await
99 }
100 }
101 Err(e) => {
102 error!("❌ Failed to convert OpenAI messages: {:?}", e);
103 Err(StatusCode::BAD_REQUEST)
104 }
105 }
106}
107
108#[allow(dead_code)]
110async fn handle_streaming_request(
111 headers: HeaderMap,
112 state: AppState,
113 model: Option<&str>,
114 messages: Vec<llm_connector::types::Message>,
115 tools: Option<Vec<llm_connector::types::Tool>>,
116) -> Result<Response, StatusCode> {
117 let config = state.config.read()
119 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
120 let client_adapter = detect_openai_client(&headers, &config);
121 let (_stream_format, _) = FormatDetector::determine_format(&headers);
122 drop(config); let final_format = client_adapter.preferred_format();
126 let content_type = FormatDetector::get_content_type(final_format);
127
128 info!("📡 Starting OpenAI streaming response - Format: {:?} ({})", final_format, content_type);
129
130 let llm_service = state.llm_service.read()
131 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
132 let stream_result = llm_service.chat_stream_openai(model, messages.clone(), tools.clone(), final_format).await;
133 drop(llm_service); match stream_result {
136 Ok(rx) => {
137 info!("✅ OpenAI streaming response started successfully");
138
139 let config_clone = state.config.clone();
140 let adapted_stream = rx.map(move |data| {
141 let json_str = if data.starts_with("data: ") {
143 &data[6..] } else {
145 &data
146 };
147
148 if json_str.trim().is_empty() || json_str.trim() == "[DONE]" {
150 return data.to_string();
151 }
152
153 if let Ok(mut json_data) = serde_json::from_str::<Value>(json_str) {
155 tracing::debug!("📝 Parsed JSON chunk, applying adaptations...");
156 if let Ok(config) = config_clone.read() {
157 client_adapter.apply_response_adaptations(&config, &mut json_data);
158 } else {
159 warn!("Failed to acquire read lock for config in OpenAI stream, skipping adaptations");
160 }
161
162 match final_format {
163 llm_connector::StreamFormat::SSE => {
164 format!("data: {}\n\n", json_data)
165 }
166 llm_connector::StreamFormat::NDJSON => {
167 format!("{}\n", json_data)
168 }
169 llm_connector::StreamFormat::Json => {
170 json_data.to_string()
171 }
172 }
173 } else {
174 tracing::debug!("⚠️ Failed to parse chunk as JSON: {}", json_str);
175 data.to_string()
176 }
177 });
178
179 let body_stream = adapted_stream.map(|data| Ok::<_, Infallible>(data));
180 let body = Body::from_stream(body_stream);
181
182 let response = Response::builder()
183 .status(200)
184 .header("content-type", content_type)
185 .header("cache-control", "no-cache")
186 .body(body)
187 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
188
189 Ok(response)
190 }
191 Err(e) => {
192 warn!("⚠️ OpenAI streaming failed, falling back to non-streaming: {:?}", e);
193 handle_non_streaming_request(state, model, messages, tools).await
194 }
195 }
196}
197
198#[allow(dead_code)]
200async fn handle_non_streaming_request(
201 state: AppState,
202 model: Option<&str>,
203 messages: Vec<llm_connector::types::Message>,
204 tools: Option<Vec<llm_connector::types::Tool>>,
205) -> Result<Response, StatusCode> {
206 let llm_service = state.llm_service.read()
207 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
208 let chat_result = llm_service.chat(model, messages, tools).await;
209
210 match chat_result {
211 Ok(response) => {
212 let openai_response = convert::response_to_openai(response);
213 Ok(Json(openai_response).into_response())
214 }
215 Err(e) => {
216 error!("❌ OpenAI chat request failed: {:?}", e);
217 Err(StatusCode::INTERNAL_SERVER_ERROR)
218 }
219 }
220}
221
222#[allow(dead_code)]
224pub async fn models(
225 headers: HeaderMap,
226 State(state): State<AppState>,
227 Query(_params): Query<OpenAIModelsParams>,
228) -> Result<impl IntoResponse, StatusCode> {
229 enforce_api_key(&headers, &state)?;
230
231 let llm_service = state.llm_service.read()
232 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
233 let models_result = llm_service.list_models().await;
234
235 match models_result {
236 Ok(models) => {
237 let openai_models: Vec<Value> = models.into_iter().map(|model| {
238 json!({
239 "id": model.id,
240 "object": "model",
241 "created": chrono::Utc::now().timestamp(),
242 "owned_by": "system"
243 })
244 }).collect();
245
246 let config = state.config.read()
247 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
248 let current_provider = match &config.llm_backend {
249 crate::settings::LlmBackendSettings::OpenAI { .. } => "openai",
250 crate::settings::LlmBackendSettings::Anthropic { .. } => "anthropic",
251 crate::settings::LlmBackendSettings::Zhipu { .. } => "zhipu",
252 crate::settings::LlmBackendSettings::Ollama { .. } => "ollama",
253 crate::settings::LlmBackendSettings::Aliyun { .. } => "aliyun",
254 crate::settings::LlmBackendSettings::Volcengine { .. } => "volcengine",
255 crate::settings::LlmBackendSettings::Tencent { .. } => "tencent",
256 crate::settings::LlmBackendSettings::Longcat { .. } => "longcat",
257 crate::settings::LlmBackendSettings::Moonshot { .. } => "moonshot",
258 };
259
260 let response = json!({
261 "object": "list",
262 "data": openai_models,
263 "provider": current_provider,
264 });
265 Ok(Json(response))
266 }
267 Err(_) => Err(StatusCode::INTERNAL_SERVER_ERROR),
268 }
269}
270
271#[allow(dead_code)]
273fn enforce_api_key(headers: &HeaderMap, state: &AppState) -> Result<(), StatusCode> {
274 let config = state.config.read()
275 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
276 if let Some(cfg) = &config.apis.openai {
277 if cfg.enabled {
278 if let Some(expected_key) = cfg.api_key.as_ref() {
279 let header_name = cfg.api_key_header.as_deref().unwrap_or("authorization").to_ascii_lowercase();
280
281 let value_opt = if header_name == "authorization" {
282 headers.get(axum::http::header::AUTHORIZATION)
283 } else {
284 match axum::http::HeaderName::from_bytes(header_name.as_bytes()) {
285 Ok(name) => headers.get(name),
286 Err(_) => None,
287 }
288 };
289
290 if let Some(value) = value_opt {
291 if let Ok(value_str) = value.to_str() {
292 let token = if value_str.starts_with("Bearer ") {
293 &value_str[7..]
294 } else {
295 value_str
296 };
297
298 if token == expected_key {
299 info!("✅ OpenAI API key authentication successful");
300 return Ok(());
301 }
302 }
303 }
304
305 warn!("🚫 OpenAI API key authentication failed");
306 return Err(StatusCode::UNAUTHORIZED);
307 }
308 }
309 }
310 Ok(())
311}
312
313#[allow(dead_code)]
315fn detect_openai_client(_headers: &HeaderMap, _config: &crate::settings::Settings) -> ClientAdapter {
316 ClientAdapter::OpenAI
318}