1use crate::cli::WebChatProxyCommands;
4use anyhow::Result;
5use colored::*;
6use axum::{
7 extract::State,
8 http::StatusCode,
9 response::{Html, Json},
10 routing::{get, post},
11 Router,
12};
13use std::sync::Arc;
14
15#[derive(Clone)]
17struct AppState {
18 config: crate::config::Config,
19}
20
21pub async fn handle(command: WebChatProxyCommands) -> Result<()> {
23 match command {
24 WebChatProxyCommands::Start { port, host, cors } => {
25 handle_start(port, host, cors).await
26 }
27 }
28}
29
30async fn handle_start(port: u16, host: String, cors: bool) -> Result<()> {
31 println!(
32 "{} Starting Web Chat Proxy server...",
33 "🌐".blue()
34 );
35 println!(" {} {}:{}", "Address:".bold(), host, port);
36 println!(" {} {}", "CORS:".bold(), if cors { "Enabled".green() } else { "Disabled".yellow() });
37
38 println!("\n{}", "Available endpoints:".bold().blue());
39 println!(" {} http://{}:{}/", "•".blue(), host, port);
40 println!(" Web interface for chat");
41 println!(" {} http://{}:{}/models", "•".blue(), host, port);
42 println!(" List available models");
43 println!(" {} http://{}:{}/v1/models", "•".blue(), host, port);
44 println!(" OpenAI-compatible models endpoint");
45 println!(" {} http://{}:{}/chat/completions", "•".blue(), host, port);
46 println!(" Chat completions endpoint");
47 println!(" {} http://{}:{}/v1/chat/completions", "•".blue(), host, port);
48 println!(" OpenAI-compatible chat endpoint");
49
50 println!("\n{} Press Ctrl+C to stop the server\n", "💡".yellow());
51
52 start_webchat_server(host, port, cors).await
54}
55
56async fn start_webchat_server(host: String, port: u16, cors: bool) -> Result<()> {
57 let config = crate::config::Config::load()?;
58 let state = Arc::new(AppState { config });
59
60 let mut app = Router::new()
62 .route("/", get(serve_index))
63 .route("/models", get(list_models))
64 .route("/v1/models", get(list_models))
65 .route("/chat/completions", post(chat_completions))
66 .route("/v1/chat/completions", post(chat_completions))
67 .with_state(state);
68
69 if cors {
71 use tower_http::cors::CorsLayer;
72 app = app.layer(CorsLayer::permissive());
73 }
74
75 let addr = format!("{}:{}", host, port);
76 println!("{} Server listening on http://{}", "✓".green(), addr);
77
78 let listener = tokio::net::TcpListener::bind(&addr).await?;
79 axum::serve(listener, app).await?;
80
81 Ok(())
82}
83
84async fn serve_index() -> Html<&'static str> {
86
87 Html(r#"
88<!DOCTYPE html>
89<html>
90<head>
91 <title>LC Web Chat</title>
92 <style>
93 body {
94 font-family: system-ui, -apple-system, sans-serif;
95 max-width: 800px;
96 margin: 0 auto;
97 padding: 20px;
98 background: #f5f5f5;
99 }
100 h1 {
101 color: #333;
102 }
103 .container {
104 background: white;
105 border-radius: 8px;
106 padding: 20px;
107 box-shadow: 0 2px 4px rgba(0,0,0,0.1);
108 }
109 .chat-box {
110 height: 400px;
111 overflow-y: auto;
112 border: 1px solid #ddd;
113 border-radius: 4px;
114 padding: 10px;
115 margin-bottom: 10px;
116 background: #fafafa;
117 }
118 .message {
119 margin: 10px 0;
120 padding: 10px;
121 border-radius: 4px;
122 }
123 .user-message {
124 background: #007bff;
125 color: white;
126 text-align: right;
127 }
128 .assistant-message {
129 background: #e9ecef;
130 color: #333;
131 }
132 .input-group {
133 display: flex;
134 gap: 10px;
135 }
136 input[type="text"] {
137 flex: 1;
138 padding: 10px;
139 border: 1px solid #ddd;
140 border-radius: 4px;
141 }
142 button {
143 padding: 10px 20px;
144 background: #007bff;
145 color: white;
146 border: none;
147 border-radius: 4px;
148 cursor: pointer;
149 }
150 button:hover {
151 background: #0056b3;
152 }
153 button:disabled {
154 background: #ccc;
155 cursor: not-allowed;
156 }
157 .model-select {
158 margin-bottom: 10px;
159 }
160 select {
161 padding: 8px;
162 border: 1px solid #ddd;
163 border-radius: 4px;
164 width: 100%;
165 }
166 </style>
167</head>
168<body>
169 <h1>🤖 LC Web Chat</h1>
170 <div class="container">
171 <div class="model-select">
172 <label for="model">Model:</label>
173 <select id="model">
174 <option value="gpt-4o">gpt-4o</option>
175 <option value="gpt-4o-mini">gpt-4o-mini</option>
176 <option value="claude-3-5-sonnet-latest">claude-3-5-sonnet-latest</option>
177 <option value="claude-3-5-haiku-latest">claude-3-5-haiku-latest</option>
178 </select>
179 </div>
180 <div id="chat" class="chat-box"></div>
181 <div class="input-group">
182 <input type="text" id="message" placeholder="Type your message..." autofocus>
183 <button id="send" onclick="sendMessage()">Send</button>
184 </div>
185 </div>
186
187 <script>
188 // Load available models
189 fetch('/models')
190 .then(res => res.json())
191 .then(data => {
192 const select = document.getElementById('model');
193 select.innerHTML = '';
194 data.data.forEach(model => {
195 const option = document.createElement('option');
196 option.value = model.id;
197 option.textContent = model.id;
198 select.appendChild(option);
199 });
200 })
201 .catch(err => console.error('Failed to load models:', err));
202
203 const messages = [];
204
205 function addMessage(role, content) {
206 messages.push({ role, content });
207 const chat = document.getElementById('chat');
208 const div = document.createElement('div');
209 div.className = `message ${role}-message`;
210 div.textContent = content;
211 chat.appendChild(div);
212 chat.scrollTop = chat.scrollHeight;
213 }
214
215 async function sendMessage() {
216 const input = document.getElementById('message');
217 const button = document.getElementById('send');
218 const model = document.getElementById('model').value;
219 const message = input.value.trim();
220
221 if (!message) return;
222
223 input.value = '';
224 button.disabled = true;
225
226 addMessage('user', message);
227
228 try {
229 const response = await fetch('/chat/completions', {
230 method: 'POST',
231 headers: {
232 'Content-Type': 'application/json',
233 },
234 body: JSON.stringify({
235 model: model,
236 messages: messages,
237 }),
238 });
239
240 if (!response.ok) {
241 throw new Error(`HTTP ${response.status}`);
242 }
243
244 const data = await response.json();
245 if (data.choices && data.choices[0]) {
246 addMessage('assistant', data.choices[0].message.content);
247 }
248 } catch (error) {
249 addMessage('assistant', `Error: ${error.message}`);
250 } finally {
251 button.disabled = false;
252 input.focus();
253 }
254 }
255
256 document.getElementById('message').addEventListener('keypress', (e) => {
257 if (e.key === 'Enter' && !e.shiftKey) {
258 e.preventDefault();
259 sendMessage();
260 }
261 });
262 </script>
263</body>
264</html>
265 "#)
266}
267
268async fn list_models(
270 State(_state): State<Arc<AppState>>,
271) -> Result<Json<serde_json::Value>, StatusCode> {
272 use crate::services::proxy::{ProxyModel, ProxyModelsResponse};
273 use crate::models::cache::ModelsCache;
274
275 let mut models = Vec::new();
276 let current_time = std::time::SystemTime::now()
277 .duration_since(std::time::UNIX_EPOCH)
278 .unwrap_or(std::time::Duration::from_secs(0))
279 .as_secs();
280
281 let cache = ModelsCache::load().map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
283
284 let cached_models = cache.get_all_models();
286
287 for cached_model in cached_models {
288 let provider_name = &cached_model.provider;
289 let model_name = &cached_model.model;
290 let model_id = format!("{}:{}", provider_name, model_name);
291
292 models.push(ProxyModel {
293 id: model_id,
294 object: "model".to_string(),
295 created: current_time,
296 owned_by: provider_name.clone(),
297 });
298 }
299
300 let response = ProxyModelsResponse {
301 object: "list".to_string(),
302 data: models,
303 };
304
305 Ok(Json(serde_json::to_value(response).map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?))
306}
307
308async fn chat_completions(
310 State(state): State<Arc<AppState>>,
311 Json(request): Json<serde_json::Value>,
312) -> Result<Json<serde_json::Value>, StatusCode> {
313 use crate::services::proxy::{ProxyChatRequest, ProxyChatResponse, ProxyChoice, ProxyUsage};
314 use crate::core::provider::{ChatRequest, Message, MessageContent};
315
316 let proxy_request: ProxyChatRequest = serde_json::from_value(request)
318 .map_err(|_| StatusCode::BAD_REQUEST)?;
319
320 let (provider_name, model_name) = crate::services::proxy::parse_model_string(&proxy_request.model, &state.config)
322 .map_err(|_| StatusCode::BAD_REQUEST)?;
323
324 let mut config_mut = state.config.clone();
326 let client = crate::core::chat::create_authenticated_client(&mut config_mut, &provider_name)
327 .await
328 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
329
330 let chat_request = ChatRequest {
332 model: model_name.clone(),
333 messages: proxy_request.messages,
334 max_tokens: proxy_request.max_tokens,
335 temperature: proxy_request.temperature,
336 tools: None,
337 stream: None,
338 };
339
340 let response_text = client
342 .chat(&chat_request)
343 .await
344 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
345
346 let current_time = std::time::SystemTime::now()
348 .duration_since(std::time::UNIX_EPOCH)
349 .unwrap_or(std::time::Duration::from_secs(0))
350 .as_secs();
351
352 let response = ProxyChatResponse {
353 id: format!("chatcmpl-{}", uuid::Uuid::new_v4()),
354 object: "chat.completion".to_string(),
355 created: current_time,
356 model: proxy_request.model,
357 choices: vec![ProxyChoice {
358 index: 0,
359 message: Message {
360 role: "assistant".to_string(),
361 content_type: MessageContent::Text {
362 content: Some(response_text),
363 },
364 tool_calls: None,
365 tool_call_id: None,
366 },
367 finish_reason: "stop".to_string(),
368 }],
369 usage: ProxyUsage {
370 prompt_tokens: 0,
371 completion_tokens: 0,
372 total_tokens: 0,
373 },
374 };
375
376 Ok(Json(serde_json::to_value(response).map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?))
377}
378