Skip to main content

token_count/output/
verbose.rs

1//! Verbose formatter - outputs detailed information
2
3use crate::output::OutputFormatter;
4use crate::tokenizers::TokenizationResult;
5
6/// Verbose formatter that outputs model info and context window percentage
7pub struct VerboseFormatter;
8
9impl OutputFormatter for VerboseFormatter {
10    fn format(&self, result: &TokenizationResult) -> String {
11        // Safe conversion with overflow and division-by-zero protection
12        let percentage = if result.model_info.context_window == 0 {
13            0.0 // Prevent division by zero
14        } else {
15            let token_count_f64 = result.token_count as f64;
16            let context_window_f64 = result.model_info.context_window as f64;
17
18            // Calculate percentage with overflow check
19            let raw_percentage = (token_count_f64 / context_window_f64) * 100.0;
20            if raw_percentage.is_finite() {
21                raw_percentage
22            } else {
23                f64::MAX // Saturate to maximum representable value
24            }
25        };
26
27        format!(
28            "Model: {} ({})\nTokens: {}\nContext window: {} tokens ({:.4}% used)",
29            result.model_info.name,
30            result.model_info.encoding,
31            result.token_count,
32            result.model_info.context_window,
33            percentage
34        )
35    }
36}
37
38#[cfg(test)]
39mod tests {
40    use super::*;
41    use crate::tokenizers::ModelInfo;
42
43    #[test]
44    fn test_verbose_formatter() {
45        let formatter = VerboseFormatter;
46        let result = TokenizationResult {
47            token_count: 2,
48            model_info: ModelInfo {
49                name: "gpt-4".to_string(),
50                encoding: "cl100k_base".to_string(),
51                context_window: 128000,
52                description: "GPT-4".to_string(),
53            },
54            token_details: None,
55        };
56
57        let output = formatter.format(&result);
58        assert!(output.contains("Model: gpt-4"));
59        assert!(output.contains("Tokens: 2"));
60        assert!(output.contains("Context window: 128000"));
61        assert!(output.contains("%"));
62    }
63
64    #[test]
65    fn test_percentage_calculation() {
66        let formatter = VerboseFormatter;
67        let result = TokenizationResult {
68            token_count: 64000,
69            model_info: ModelInfo {
70                name: "gpt-4".to_string(),
71                encoding: "cl100k_base".to_string(),
72                context_window: 128000,
73                description: "GPT-4".to_string(),
74            },
75            token_details: None,
76        };
77
78        let output = formatter.format(&result);
79        assert!(output.contains("50.0000%"));
80    }
81}