agtrace_providers/codex/
models.rs

1use std::collections::HashMap;
2
3/// Model specification with named fields for type safety
4#[derive(Debug, Clone, Copy, PartialEq)]
5pub struct ModelSpec {
6    pub prefix: &'static str,
7    pub context_window: u64,
8    /// Compaction buffer percentage (0-100)
9    /// When input tokens exceed (100% - compaction_buffer_pct), compaction is triggered
10    pub compaction_buffer_pct: f64,
11}
12
13impl ModelSpec {
14    pub const fn new(
15        prefix: &'static str,
16        context_window: u64,
17        compaction_buffer_pct: f64,
18    ) -> Self {
19        Self {
20            prefix,
21            context_window,
22            compaction_buffer_pct,
23        }
24    }
25}
26
27/// Compaction buffer percentage for Codex/OpenAI models
28/// NOTE: Set to 0 as the actual compaction behavior is not yet known
29const COMPACTION_BUFFER_PCT: f64 = 0.0;
30
31/// Codex/OpenAI provider model specifications
32const MODEL_SPECS: &[ModelSpec] = &[
33    // GPT-5.2 series (as of 2025-12-17)
34    ModelSpec::new("gpt-5.2", 400_000, COMPACTION_BUFFER_PCT),
35    // GPT-5.1 series
36    ModelSpec::new("gpt-5.1-codex-max", 400_000, COMPACTION_BUFFER_PCT),
37    ModelSpec::new("gpt-5.1-codex-mini", 400_000, COMPACTION_BUFFER_PCT),
38    ModelSpec::new("gpt-5.1-codex", 400_000, COMPACTION_BUFFER_PCT),
39    ModelSpec::new("gpt-5.1", 400_000, COMPACTION_BUFFER_PCT),
40    // GPT-5 series
41    ModelSpec::new("gpt-5-codex-mini", 400_000, COMPACTION_BUFFER_PCT),
42    ModelSpec::new("gpt-5-codex", 400_000, COMPACTION_BUFFER_PCT),
43    ModelSpec::new("gpt-5", 400_000, COMPACTION_BUFFER_PCT),
44];
45
46/// Returns model prefix -> (context window, compaction buffer %) mapping
47pub fn get_model_limits() -> HashMap<&'static str, (u64, f64)> {
48    MODEL_SPECS
49        .iter()
50        .map(|spec| {
51            (
52                spec.prefix,
53                (spec.context_window, spec.compaction_buffer_pct),
54            )
55        })
56        .collect()
57}
58
59#[cfg(test)]
60mod tests {
61    use super::*;
62    use std::collections::HashSet;
63
64    #[test]
65    fn test_no_duplicate_prefixes() {
66        let prefixes: Vec<&str> = MODEL_SPECS.iter().map(|spec| spec.prefix).collect();
67        let unique_prefixes: HashSet<&str> = prefixes.iter().copied().collect();
68
69        assert_eq!(
70            prefixes.len(),
71            unique_prefixes.len(),
72            "Duplicate prefixes found in MODEL_SPECS: {:?}",
73            prefixes
74                .iter()
75                .enumerate()
76                .filter(|(i, p)| prefixes.iter().skip(i + 1).any(|other| other == *p))
77                .map(|(_, p)| p)
78                .collect::<Vec<_>>()
79        );
80    }
81
82    #[test]
83    fn test_model_limits_coverage() {
84        let limits = get_model_limits();
85
86        // Verify GPT-5.2 series
87        assert_eq!(limits.get("gpt-5.2"), Some(&(400_000, 0.0)));
88
89        // Verify GPT-5.1 series
90        assert_eq!(limits.get("gpt-5.1-codex-max"), Some(&(400_000, 0.0)));
91        assert_eq!(limits.get("gpt-5.1-codex-mini"), Some(&(400_000, 0.0)));
92        assert_eq!(limits.get("gpt-5.1-codex"), Some(&(400_000, 0.0)));
93        assert_eq!(limits.get("gpt-5.1"), Some(&(400_000, 0.0)));
94
95        // Verify GPT-5 series
96        assert_eq!(limits.get("gpt-5-codex-mini"), Some(&(400_000, 0.0)));
97        assert_eq!(limits.get("gpt-5-codex"), Some(&(400_000, 0.0)));
98        assert_eq!(limits.get("gpt-5"), Some(&(400_000, 0.0)));
99    }
100
101    #[test]
102    fn test_all_specs_converted() {
103        let limits = get_model_limits();
104        assert_eq!(
105            limits.len(),
106            MODEL_SPECS.len(),
107            "HashMap size should match MODEL_SPECS length"
108        );
109    }
110}