offline_intelligence/api/
title_api.rs1use axum::{
4 extract::{State, Json},
5 http::StatusCode,
6};
7use serde::{Deserialize, Serialize};
8use tracing::info;
9
10use crate::shared_state::UnifiedAppState;
11
12#[derive(Debug, Deserialize)]
13pub struct GenerateTitleRequest {
14 pub prompt: String,
15 #[serde(default = "default_max_tokens")]
16 pub max_tokens: u32,
17}
18
19fn default_max_tokens() -> u32 {
20 20
21}
22
23#[derive(Debug, Serialize)]
24pub struct GenerateTitleResponse {
25 pub title: String,
26}
27
28#[derive(Debug, Serialize)]
29pub struct ErrorResponse {
30 pub error: String,
31}
32
33pub async fn generate_title(
36 State(state): State<UnifiedAppState>,
37 Json(req): Json<GenerateTitleRequest>,
38) -> Result<Json<GenerateTitleResponse>, (StatusCode, Json<ErrorResponse>)> {
39 info!("Generating title for prompt (length: {} chars)", req.prompt.len());
40
41 if req.prompt.is_empty() {
42 return Err((
43 StatusCode::BAD_REQUEST,
44 Json(ErrorResponse {
45 error: "Prompt cannot be empty".to_string(),
46 }),
47 ));
48 }
49
50 let llm_worker = state.llm_worker.clone();
51
52 let title_instruction = format!(
54 "User prompt: {}\n\n\
55 Create a short, meaningful chat title using 1-5 words maximum that captures the essence of this prompt.",
56 req.prompt
57 );
58
59 match llm_worker.generate_title(&title_instruction, req.max_tokens.min(20)).await {
61 Ok(title) => {
62 let word_count = title.split_whitespace().count();
63 info!("Generated title: '{}' ({} words)", title, word_count);
64 Ok(Json(GenerateTitleResponse { title }))
65 }
66 Err(e) => {
67 info!("Title generation failed: {}", e);
68 Err((
69 StatusCode::INTERNAL_SERVER_ERROR,
70 Json(ErrorResponse {
71 error: format!("Title generation failed: {}", e),
72 }),
73 ))
74 }
75 }
76}