rustapi_toon/
llm_response.rs

1//! # LLM-Optimized Response Wrapper
2//!
3//! Provides `LlmResponse<T>` for AI/LLM endpoints with automatic
4//! token counting and format optimization.
5//!
6//! ## Features
7//!
8//! - Automatic content negotiation (JSON vs TOON)
9//! - Token counting headers
10//! - Token savings calculation
11//!
12//! ## Response Headers
13//!
14//! - `X-Token-Count-JSON`: Estimated token count in JSON format
15//! - `X-Token-Count-TOON`: Estimated token count in TOON format
16//! - `X-Token-Savings`: Percentage of tokens saved with TOON
17//!
18//! ## Example
19//!
20//! ```rust,ignore
21//! use rustapi_rs::prelude::*;
22//! use rustapi_rs::toon::{LlmResponse, AcceptHeader};
23//!
24//! #[derive(Serialize)]
25//! struct ChatResponse {
26//!     messages: Vec<Message>,
27//! }
28//!
29//! async fn chat(accept: AcceptHeader) -> LlmResponse<ChatResponse> {
30//!     let response = ChatResponse {
31//!         messages: vec![...],
32//!     };
33//!     LlmResponse::new(response, accept.preferred)
34//! }
35//! ```
36
37use crate::{OutputFormat, JSON_CONTENT_TYPE, TOON_CONTENT_TYPE};
38use bytes::Bytes;
39use http::{header, StatusCode};
40use http_body_util::Full;
41use rustapi_core::{ApiError, IntoResponse, Response};
42use rustapi_openapi::{MediaType, Operation, OperationModifier, ResponseModifier, ResponseSpec, SchemaRef};
43use serde::Serialize;
44use std::collections::HashMap;
45
46/// Header name for JSON token count
47pub const X_TOKEN_COUNT_JSON: &str = "x-token-count-json";
48/// Header name for TOON token count
49pub const X_TOKEN_COUNT_TOON: &str = "x-token-count-toon";
50/// Header name for token savings percentage
51pub const X_TOKEN_SAVINGS: &str = "x-token-savings";
52/// Header name for format used
53pub const X_FORMAT_USED: &str = "x-format-used";
54
55/// LLM-optimized response wrapper with token counting.
56///
57/// This wrapper automatically:
58/// 1. Serializes to the requested format (JSON or TOON)
59/// 2. Calculates estimated token counts for both formats
60/// 3. Adds informative headers about token usage
61///
62/// ## Token Estimation
63///
64/// Token counts are estimated using a simple heuristic:
65/// - ~4 characters per token (GPT-3/4 average)
66///
67/// For more accurate counts, use a proper tokenizer.
68///
69/// ## Example
70///
71/// ```rust,ignore
72/// use rustapi_rs::prelude::*;
73/// use rustapi_rs::toon::{LlmResponse, AcceptHeader, OutputFormat};
74///
75/// #[derive(Serialize)]
76/// struct ApiData {
77///     items: Vec<Item>,
78/// }
79///
80/// // With content negotiation
81/// async fn get_items(accept: AcceptHeader) -> LlmResponse<ApiData> {
82///     let data = ApiData { items: vec![...] };
83///     LlmResponse::new(data, accept.preferred)
84/// }
85///
86/// // Always TOON format
87/// async fn get_items_toon() -> LlmResponse<ApiData> {
88///     let data = ApiData { items: vec![...] };
89///     LlmResponse::toon(data)
90/// }
91/// ```
92#[derive(Debug, Clone)]
93pub struct LlmResponse<T> {
94    data: T,
95    format: OutputFormat,
96    include_token_headers: bool,
97}
98
99impl<T> LlmResponse<T> {
100    /// Create a new LLM response with the specified format.
101    pub fn new(data: T, format: OutputFormat) -> Self {
102        Self {
103            data,
104            format,
105            include_token_headers: true,
106        }
107    }
108
109    /// Create a JSON-formatted LLM response.
110    pub fn json(data: T) -> Self {
111        Self::new(data, OutputFormat::Json)
112    }
113
114    /// Create a TOON-formatted LLM response.
115    pub fn toon(data: T) -> Self {
116        Self::new(data, OutputFormat::Toon)
117    }
118
119    /// Disable token counting headers.
120    pub fn without_token_headers(mut self) -> Self {
121        self.include_token_headers = false;
122        self
123    }
124
125    /// Enable token counting headers (default).
126    pub fn with_token_headers(mut self) -> Self {
127        self.include_token_headers = true;
128        self
129    }
130}
131
132/// Estimate token count using simple character-based heuristic.
133/// ~4 characters per token (GPT-3/4 average)
134fn estimate_tokens(text: &str) -> usize {
135    // Simple heuristic: ~4 chars per token
136    // Accounts for whitespace and punctuation overhead
137    let char_count = text.len();
138    (char_count + 3) / 4 // Round up
139}
140
141/// Calculate token savings percentage.
142fn calculate_savings(json_tokens: usize, toon_tokens: usize) -> f64 {
143    if json_tokens == 0 {
144        return 0.0;
145    }
146    let savings = json_tokens.saturating_sub(toon_tokens) as f64 / json_tokens as f64 * 100.0;
147    (savings * 100.0).round() / 100.0 // Round to 2 decimal places
148}
149
150impl<T: Serialize> IntoResponse for LlmResponse<T> {
151    fn into_response(self) -> Response {
152        // Always serialize to both formats for token counting
153        let json_result = serde_json::to_string(&self.data);
154        let toon_result = toon_format::encode_default(&self.data);
155
156        // Calculate token counts if enabled
157        let (json_tokens, toon_tokens, savings) = if self.include_token_headers {
158            let json_tokens = json_result.as_ref().map(|s| estimate_tokens(s)).unwrap_or(0);
159            let toon_tokens = toon_result.as_ref().map(|s| estimate_tokens(s)).unwrap_or(0);
160            let savings = calculate_savings(json_tokens, toon_tokens);
161            (Some(json_tokens), Some(toon_tokens), Some(savings))
162        } else {
163            (None, None, None)
164        };
165
166        // Serialize to the requested format
167        let (body, content_type) = match self.format {
168            OutputFormat::Json => match json_result {
169                Ok(json) => (json, JSON_CONTENT_TYPE),
170                Err(e) => {
171                    tracing::error!("Failed to serialize to JSON: {}", e);
172                    return ApiError::internal(format!("JSON serialization error: {}", e))
173                        .into_response();
174                }
175            },
176            OutputFormat::Toon => match toon_result {
177                Ok(toon) => (toon, TOON_CONTENT_TYPE),
178                Err(e) => {
179                    tracing::error!("Failed to serialize to TOON: {}", e);
180                    return ApiError::internal(format!("TOON serialization error: {}", e))
181                        .into_response();
182                }
183            },
184        };
185
186        // Build response with headers
187        let mut builder = http::Response::builder()
188            .status(StatusCode::OK)
189            .header(header::CONTENT_TYPE, content_type)
190            .header(X_FORMAT_USED, match self.format {
191                OutputFormat::Json => "json",
192                OutputFormat::Toon => "toon",
193            });
194
195        // Token counting headers
196        if let Some(json_tokens) = json_tokens {
197            builder = builder.header(X_TOKEN_COUNT_JSON, json_tokens.to_string());
198        }
199        if let Some(toon_tokens) = toon_tokens {
200            builder = builder.header(X_TOKEN_COUNT_TOON, toon_tokens.to_string());
201        }
202        if let Some(savings) = savings {
203            builder = builder.header(X_TOKEN_SAVINGS, format!("{:.2}%", savings));
204        }
205
206        builder.body(Full::new(Bytes::from(body))).unwrap()
207    }
208}
209
210// OpenAPI support
211impl<T: Send> OperationModifier for LlmResponse<T> {
212    fn update_operation(_op: &mut Operation) {
213        // LlmResponse is a response type, no request body modification needed
214    }
215}
216
217impl<T: Serialize> ResponseModifier for LlmResponse<T> {
218    fn update_response(op: &mut Operation) {
219        let mut content = HashMap::new();
220
221        // JSON response
222        content.insert(
223            JSON_CONTENT_TYPE.to_string(),
224            MediaType {
225                schema: SchemaRef::Inline(serde_json::json!({
226                    "type": "object",
227                    "description": "JSON formatted response with token counting headers"
228                })),
229            },
230        );
231
232        // TOON response
233        content.insert(
234            TOON_CONTENT_TYPE.to_string(),
235            MediaType {
236                schema: SchemaRef::Inline(serde_json::json!({
237                    "type": "string",
238                    "description": "TOON (Token-Oriented Object Notation) formatted response with token counting headers"
239                })),
240            },
241        );
242
243        let response = ResponseSpec {
244            description: "LLM-optimized response with token counting headers (X-Token-Count-JSON, X-Token-Count-TOON, X-Token-Savings)".to_string(),
245            content: Some(content),
246        };
247        op.responses.insert("200".to_string(), response);
248    }
249}
250
251#[cfg(test)]
252mod tests {
253    use super::*;
254    use serde::Serialize;
255
256    #[derive(Serialize)]
257    struct TestData {
258        id: u64,
259        name: String,
260        active: bool,
261    }
262
263    #[test]
264    fn test_estimate_tokens() {
265        // ~4 chars per token
266        assert_eq!(estimate_tokens(""), 0);
267        assert_eq!(estimate_tokens("test"), 1); // 4 chars = 1 token
268        assert_eq!(estimate_tokens("hello world"), 3); // 11 chars = ~3 tokens
269        assert_eq!(estimate_tokens("a"), 1); // rounds up
270    }
271
272    #[test]
273    fn test_calculate_savings() {
274        assert_eq!(calculate_savings(100, 70), 30.0);
275        assert_eq!(calculate_savings(100, 80), 20.0);
276        assert_eq!(calculate_savings(100, 100), 0.0);
277        assert_eq!(calculate_savings(0, 0), 0.0);
278    }
279
280    #[test]
281    fn test_llm_response_json_format() {
282        let data = TestData {
283            id: 1,
284            name: "Test".to_string(),
285            active: true,
286        };
287        let response = LlmResponse::json(data);
288        assert!(matches!(response.format, OutputFormat::Json));
289    }
290
291    #[test]
292    fn test_llm_response_toon_format() {
293        let data = TestData {
294            id: 1,
295            name: "Test".to_string(),
296            active: true,
297        };
298        let response = LlmResponse::toon(data);
299        assert!(matches!(response.format, OutputFormat::Toon));
300    }
301
302    #[test]
303    fn test_llm_response_without_headers() {
304        let data = TestData {
305            id: 1,
306            name: "Test".to_string(),
307            active: true,
308        };
309        let response = LlmResponse::json(data).without_token_headers();
310        assert!(!response.include_token_headers);
311    }
312
313    #[test]
314    fn test_llm_response_with_headers() {
315        let data = TestData {
316            id: 1,
317            name: "Test".to_string(),
318            active: true,
319        };
320        let response = LlmResponse::toon(data)
321            .without_token_headers()
322            .with_token_headers();
323        assert!(response.include_token_headers);
324    }
325}