Skip to main content

aster/providers/
codex.rs

1use anyhow::Result;
2use async_trait::async_trait;
3use serde_json::json;
4use std::ffi::OsString;
5use std::path::PathBuf;
6use std::process::Stdio;
7use tokio::io::{AsyncBufReadExt, BufReader};
8use tokio::process::Command;
9
10use super::base::{ConfigKey, Provider, ProviderMetadata, ProviderUsage, Usage};
11use super::errors::ProviderError;
12use super::utils::{filter_extensions_from_system_prompt, RequestLog};
13use crate::config::base::{
14    CodexCommand, CodexEnableSkills, CodexReasoningEffort, CodexSkipGitCheck,
15};
16use crate::config::search_path::SearchPaths;
17use crate::config::{AsterMode, Config};
18use crate::conversation::message::{Message, MessageContent};
19use crate::model::ModelConfig;
20use crate::subprocess::configure_command_no_window;
21use rmcp::model::Role;
22use rmcp::model::Tool;
23
24pub const CODEX_DEFAULT_MODEL: &str = "gpt-5.3-codex";
25pub const CODEX_KNOWN_MODELS: &[&str] = &["gpt-5.3-codex", "gpt-5.2-codex", "gpt-5.2"];
26pub const CODEX_DOC_URL: &str = "https://developers.openai.com/codex/cli";
27
28/// Valid reasoning effort levels for Codex
29pub const CODEX_REASONING_LEVELS: &[&str] = &["low", "medium", "high"];
30
31#[derive(Debug, serde::Serialize)]
32pub struct CodexProvider {
33    command: PathBuf,
34    model: ModelConfig,
35    #[serde(skip)]
36    name: String,
37    /// Reasoning effort level (low, medium, high)
38    reasoning_effort: String,
39    /// Whether to enable skills
40    enable_skills: bool,
41    /// Whether to skip git repo check
42    skip_git_check: bool,
43}
44
45impl CodexProvider {
46    pub async fn from_env(model: ModelConfig) -> Result<Self> {
47        let config = Config::global();
48        let command: OsString = config.get_codex_command().unwrap_or_default().into();
49        let resolved_command = SearchPaths::builder().with_npm().resolve(command)?;
50
51        // Get reasoning effort from config, default to "high"
52        let reasoning_effort = config
53            .get_codex_reasoning_effort()
54            .map(|r| r.to_string())
55            .unwrap_or_else(|_| "high".to_string());
56
57        // Validate reasoning effort
58        let reasoning_effort = if CODEX_REASONING_LEVELS.contains(&reasoning_effort.as_str()) {
59            reasoning_effort
60        } else {
61            tracing::warn!(
62                "Invalid CODEX_REASONING_EFFORT '{}', using 'high'",
63                reasoning_effort
64            );
65            "high".to_string()
66        };
67
68        // Get enable_skills from config, default to false (skills feature may not exist in all Codex versions)
69        let enable_skills = config
70            .get_codex_enable_skills()
71            .map(|s| s.to_lowercase() == "true")
72            .unwrap_or(false);
73
74        // Get skip_git_check from config, default to false
75        let skip_git_check = config
76            .get_codex_skip_git_check()
77            .map(|s| s.to_lowercase() == "true")
78            .unwrap_or(false);
79
80        Ok(Self {
81            command: resolved_command,
82            model,
83            name: Self::metadata().name,
84            reasoning_effort,
85            enable_skills,
86            skip_git_check,
87        })
88    }
89
90    /// Convert aster messages to a simple text prompt format
91    /// Similar to Gemini CLI, we use Human:/Assistant: prefixes
92    fn messages_to_prompt(&self, system: &str, messages: &[Message]) -> String {
93        let mut full_prompt = String::new();
94
95        let filtered_system = filter_extensions_from_system_prompt(system);
96        if !filtered_system.is_empty() {
97            full_prompt.push_str(&filtered_system);
98            full_prompt.push_str("\n\n");
99        }
100
101        // Add conversation history
102        for message in messages.iter().filter(|m| m.is_agent_visible()) {
103            let role_prefix = match message.role {
104                Role::User => "Human: ",
105                Role::Assistant => "Assistant: ",
106            };
107            full_prompt.push_str(role_prefix);
108
109            for content in &message.content {
110                if let MessageContent::Text(text_content) = content {
111                    full_prompt.push_str(&text_content.text);
112                    full_prompt.push('\n');
113                }
114            }
115            full_prompt.push('\n');
116        }
117
118        full_prompt.push_str("Assistant: ");
119        full_prompt
120    }
121
122    /// Apply permission flags based on ASTER_MODE setting
123    fn apply_permission_flags(cmd: &mut Command) -> Result<(), ProviderError> {
124        let config = Config::global();
125        let aster_mode = config.get_aster_mode().unwrap_or(AsterMode::Auto);
126
127        match aster_mode {
128            AsterMode::Auto => {
129                // --yolo is shorthand for --dangerously-bypass-approvals-and-sandbox
130                cmd.arg("--yolo");
131            }
132            AsterMode::SmartApprove => {
133                // --full-auto applies workspace-write sandbox and approvals only on failure
134                cmd.arg("--full-auto");
135            }
136            AsterMode::Approve => {
137                // Default codex behavior - interactive approvals
138                // No special flags needed
139            }
140            AsterMode::Chat => {
141                // Read-only sandbox mode
142                cmd.arg("--sandbox").arg("read-only");
143            }
144        }
145        Ok(())
146    }
147
148    /// 从 reader 中读取所有非空行
149    async fn read_lines_from<R: tokio::io::AsyncBufRead + Unpin>(reader: &mut R) -> Vec<String> {
150        let mut lines = Vec::new();
151        let mut line = String::new();
152        loop {
153            line.clear();
154            match reader.read_line(&mut line).await {
155                Ok(0) => break,
156                Ok(_) => {
157                    let trimmed = line.trim();
158                    if !trimmed.is_empty() {
159                        lines.push(trimmed.to_string());
160                    }
161                }
162                Err(_) => break,
163            }
164        }
165        lines
166    }
167
168    /// Execute codex CLI command
169    async fn execute_command(
170        &self,
171        system: &str,
172        messages: &[Message],
173        _tools: &[Tool],
174    ) -> Result<Vec<String>, ProviderError> {
175        let prompt = self.messages_to_prompt(system, messages);
176
177        if std::env::var("ASTER_CODEX_DEBUG").is_ok() {
178            println!("=== CODEX PROVIDER DEBUG ===");
179            println!("Command: {:?}", self.command);
180            println!("Model: {}", self.model.model_name);
181            println!("Reasoning effort: {}", self.reasoning_effort);
182            println!("Enable skills: {}", self.enable_skills);
183            println!("Skip git check: {}", self.skip_git_check);
184            println!("Prompt length: {} chars", prompt.len());
185            println!("Prompt: {}", prompt);
186            println!("============================");
187        }
188
189        let mut cmd = Command::new(&self.command);
190        configure_command_no_window(&mut cmd);
191
192        // Use 'exec' subcommand for non-interactive mode
193        cmd.arg("exec");
194
195        // Only pass model parameter if it's in the known models list
196        // This allows users to set ASTER_PROVIDER=codex without needing to specify a model
197        if CODEX_KNOWN_MODELS.contains(&self.model.model_name.as_str()) {
198            cmd.arg("-m").arg(&self.model.model_name);
199        }
200
201        // Reasoning effort configuration
202        cmd.arg("-c").arg(format!(
203            "model_reasoning_effort=\"{}\"",
204            self.reasoning_effort
205        ));
206
207        // Enable skills if configured
208        if self.enable_skills {
209            cmd.arg("--enable").arg("skills");
210        }
211
212        // JSON output format for structured parsing
213        cmd.arg("--json");
214
215        // Apply permission mode based on ASTER_MODE
216        Self::apply_permission_flags(&mut cmd)?;
217
218        // Skip git repo check if configured
219        if self.skip_git_check {
220            cmd.arg("--skip-git-repo-check");
221        }
222
223        // Pass the prompt via stdin using '-' argument
224        cmd.arg("-");
225
226        cmd.stdin(Stdio::piped())
227            .stdout(Stdio::piped())
228            .stderr(Stdio::piped());
229
230        let mut child = cmd.spawn().map_err(|e| {
231            ProviderError::RequestFailed(format!(
232                "Failed to spawn Codex CLI command '{:?}': {}. \
233                Make sure the Codex CLI is installed (npm i -g @openai/codex) \
234                and available in the configured search paths.",
235                self.command, e
236            ))
237        })?;
238
239        // Write prompt to stdin
240        if let Some(mut stdin) = child.stdin.take() {
241            use tokio::io::AsyncWriteExt;
242            stdin.write_all(prompt.as_bytes()).await.map_err(|e| {
243                ProviderError::RequestFailed(format!("Failed to write to stdin: {}", e))
244            })?;
245            // Close stdin to signal end of input
246            drop(stdin);
247        }
248
249        let stdout = child
250            .stdout
251            .take()
252            .ok_or_else(|| ProviderError::RequestFailed("Failed to capture stdout".to_string()))?;
253
254        // Also capture stderr for error messages
255        let stderr = child
256            .stderr
257            .take()
258            .ok_or_else(|| ProviderError::RequestFailed("Failed to capture stderr".to_string()))?;
259
260        let mut stdout_reader = BufReader::new(stdout);
261        let mut stderr_reader = BufReader::new(stderr);
262
263        let lines = Self::read_lines_from(&mut stdout_reader).await;
264        let stderr_lines = Self::read_lines_from(&mut stderr_reader).await;
265
266        let exit_status = child.wait().await.map_err(|e| {
267            ProviderError::RequestFailed(format!("Failed to wait for command: {}", e))
268        })?;
269
270        if !exit_status.success() {
271            let stderr_output = stderr_lines.join("\n");
272            let error_detail = if stderr_output.is_empty() {
273                format!("exit code: {:?}", exit_status.code())
274            } else {
275                // 检测常见错误并提供升级提示
276                let upgrade_hint = if stderr_output.contains("Unknown feature flag: skills") {
277                    "\n\n提示: 请升级 Codex CLI 到最新版本: npm i -g @openai/codex@latest\n或者设置 CODEX_ENABLE_SKILLS=false 禁用 skills 功能"
278                } else {
279                    ""
280                };
281                format!(
282                    "exit code: {:?}, stderr: {}{}",
283                    exit_status.code(),
284                    stderr_output,
285                    upgrade_hint
286                )
287            };
288            return Err(ProviderError::RequestFailed(format!(
289                "Codex command failed with {}",
290                error_detail
291            )));
292        }
293
294        tracing::debug!("Codex CLI executed successfully, got {} lines", lines.len());
295
296        Ok(lines)
297    }
298
299    /// Extract text content from an item.completed event (agent_message only, skip reasoning)
300    fn extract_text_from_item(item: &serde_json::Value) -> Option<String> {
301        let item_type = item.get("type").and_then(|t| t.as_str());
302        if item_type == Some("agent_message") {
303            item.get("text")
304                .and_then(|t| t.as_str())
305                .filter(|text| !text.trim().is_empty())
306                .map(|s| s.to_string())
307        } else {
308            None
309        }
310    }
311
312    /// Extract usage information from a JSON object
313    fn extract_usage(usage_info: &serde_json::Value, usage: &mut Usage) {
314        if usage.input_tokens.is_none() {
315            usage.input_tokens = usage_info
316                .get("input_tokens")
317                .and_then(|v| v.as_i64())
318                .map(|v| v as i32);
319        }
320        if usage.output_tokens.is_none() {
321            usage.output_tokens = usage_info
322                .get("output_tokens")
323                .and_then(|v| v.as_i64())
324                .map(|v| v as i32);
325        }
326    }
327
328    /// Extract error message from an error event
329    fn extract_error(parsed: &serde_json::Value) -> Option<String> {
330        parsed
331            .get("message")
332            .and_then(|m| m.as_str())
333            .map(|s| s.to_string())
334            .or_else(|| {
335                parsed
336                    .get("error")
337                    .and_then(|e| e.get("message"))
338                    .and_then(|m| m.as_str())
339                    .map(|s| s.to_string())
340            })
341    }
342
343    /// Extract text from legacy message formats
344    fn extract_legacy_text(parsed: &serde_json::Value) -> Vec<String> {
345        let mut texts = Vec::new();
346        if let Some(content) = parsed.get("content").and_then(|c| c.as_array()) {
347            for item in content {
348                if let Some(text) = item.get("text").and_then(|t| t.as_str()) {
349                    texts.push(text.to_string());
350                }
351            }
352        }
353        if let Some(text) = parsed.get("text").and_then(|t| t.as_str()) {
354            texts.push(text.to_string());
355        }
356        if let Some(text) = parsed.get("result").and_then(|r| r.as_str()) {
357            texts.push(text.to_string());
358        }
359        texts
360    }
361
362    /// Build fallback text from non-JSON lines
363    fn build_fallback_text(lines: &[String]) -> Option<String> {
364        let response_text: String = lines
365            .iter()
366            .filter(|line| {
367                !line.starts_with('{')
368                    || serde_json::from_str::<serde_json::Value>(line)
369                        .map(|v| v.get("type").is_none())
370                        .unwrap_or(true)
371            })
372            .cloned()
373            .collect::<Vec<_>>()
374            .join("\n");
375        if response_text.trim().is_empty() {
376            None
377        } else {
378            Some(response_text)
379        }
380    }
381
382    /// Parse newline-delimited JSON response from Codex CLI
383    fn parse_response(&self, lines: &[String]) -> Result<(Message, Usage), ProviderError> {
384        let mut all_text_content = Vec::new();
385        let mut usage = Usage::default();
386        let mut error_message: Option<String> = None;
387
388        for line in lines {
389            if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(line) {
390                if let Some(event_type) = parsed.get("type").and_then(|t| t.as_str()) {
391                    match event_type {
392                        "item.completed" => {
393                            if let Some(item) = parsed.get("item") {
394                                if let Some(text) = Self::extract_text_from_item(item) {
395                                    all_text_content.push(text);
396                                }
397                            }
398                        }
399                        "turn.completed" | "result" | "done" => {
400                            if let Some(usage_info) = parsed.get("usage") {
401                                Self::extract_usage(usage_info, &mut usage);
402                            }
403                            all_text_content.extend(Self::extract_legacy_text(&parsed));
404                        }
405                        "error" | "turn.failed" => {
406                            error_message = Self::extract_error(&parsed);
407                        }
408                        "message" | "assistant" => {
409                            all_text_content.extend(Self::extract_legacy_text(&parsed));
410                        }
411                        _ => {}
412                    }
413                }
414            }
415        }
416
417        if let Some(err) = error_message {
418            if all_text_content.is_empty() {
419                return Err(ProviderError::RequestFailed(format!(
420                    "Codex CLI error: {}",
421                    err
422                )));
423            }
424        }
425
426        if all_text_content.is_empty() {
427            if let Some(fallback) = Self::build_fallback_text(lines) {
428                all_text_content.push(fallback);
429            }
430        }
431
432        if let (Some(input), Some(output)) = (usage.input_tokens, usage.output_tokens) {
433            usage.total_tokens = Some(input + output);
434        }
435
436        let combined_text = all_text_content.join("\n\n");
437        if combined_text.is_empty() {
438            return Err(ProviderError::RequestFailed(
439                "Empty response from Codex CLI".to_string(),
440            ));
441        }
442
443        let message = Message::new(
444            Role::Assistant,
445            chrono::Utc::now().timestamp(),
446            vec![MessageContent::text(combined_text)],
447        );
448
449        Ok((message, usage))
450    }
451
452    /// Generate a simple session description without calling subprocess
453    fn generate_simple_session_description(
454        &self,
455        messages: &[Message],
456    ) -> Result<(Message, ProviderUsage), ProviderError> {
457        // Extract the first user message text
458        let description = messages
459            .iter()
460            .find(|m| m.role == Role::User)
461            .and_then(|m| {
462                m.content.iter().find_map(|c| match c {
463                    MessageContent::Text(text_content) => Some(&text_content.text),
464                    _ => None,
465                })
466            })
467            .map(|text| {
468                // Take first few words, limit to 4 words
469                text.split_whitespace()
470                    .take(4)
471                    .collect::<Vec<_>>()
472                    .join(" ")
473            })
474            .unwrap_or_else(|| "Simple task".to_string());
475
476        if std::env::var("ASTER_CODEX_DEBUG").is_ok() {
477            println!("=== CODEX PROVIDER DEBUG ===");
478            println!("Generated simple session description: {}", description);
479            println!("Skipped subprocess call for session description");
480            println!("============================");
481        }
482
483        let message = Message::new(
484            Role::Assistant,
485            chrono::Utc::now().timestamp(),
486            vec![MessageContent::text(description.clone())],
487        );
488
489        let usage = Usage::default();
490
491        Ok((
492            message,
493            ProviderUsage::new(self.model.model_name.clone(), usage),
494        ))
495    }
496}
497
498#[async_trait]
499impl Provider for CodexProvider {
500    fn metadata() -> ProviderMetadata {
501        ProviderMetadata::new(
502            "codex",
503            "OpenAI Codex CLI",
504            "Execute OpenAI models via Codex CLI tool. Requires codex CLI installed.",
505            CODEX_DEFAULT_MODEL,
506            CODEX_KNOWN_MODELS.to_vec(),
507            CODEX_DOC_URL,
508            vec![
509                ConfigKey::from_value_type::<CodexCommand>(true, false),
510                ConfigKey::from_value_type::<CodexReasoningEffort>(false, false),
511                ConfigKey::from_value_type::<CodexEnableSkills>(false, false),
512                ConfigKey::from_value_type::<CodexSkipGitCheck>(false, false),
513            ],
514        )
515    }
516
517    fn get_name(&self) -> &str {
518        &self.name
519    }
520
521    fn get_model_config(&self) -> ModelConfig {
522        self.model.clone()
523    }
524
525    #[tracing::instrument(
526        skip(self, model_config, system, messages, tools),
527        fields(model_config, input, output, input_tokens, output_tokens, total_tokens)
528    )]
529    async fn complete_with_model(
530        &self,
531        model_config: &ModelConfig,
532        system: &str,
533        messages: &[Message],
534        tools: &[Tool],
535    ) -> Result<(Message, ProviderUsage), ProviderError> {
536        // Check if this is a session description request
537        if system.contains("four words or less") || system.contains("4 words or less") {
538            return self.generate_simple_session_description(messages);
539        }
540
541        let lines = self.execute_command(system, messages, tools).await?;
542
543        let (message, usage) = self.parse_response(&lines)?;
544
545        // Create a payload for debug tracing
546        let payload = json!({
547            "command": self.command,
548            "model": model_config.model_name,
549            "reasoning_effort": self.reasoning_effort,
550            "enable_skills": self.enable_skills,
551            "system_length": system.len(),
552            "messages_count": messages.len()
553        });
554
555        let mut log = RequestLog::start(model_config, &payload).map_err(|e| {
556            ProviderError::RequestFailed(format!("Failed to start request log: {}", e))
557        })?;
558
559        let response = json!({
560            "lines": lines.len(),
561            "usage": usage
562        });
563
564        log.write(&response, Some(&usage)).map_err(|e| {
565            ProviderError::RequestFailed(format!("Failed to write request log: {}", e))
566        })?;
567
568        Ok((
569            message,
570            ProviderUsage::new(model_config.model_name.clone(), usage),
571        ))
572    }
573}
574
575#[cfg(test)]
576mod tests {
577    use super::*;
578
579    #[test]
580    fn test_codex_metadata() {
581        let metadata = CodexProvider::metadata();
582        assert_eq!(metadata.name, "codex");
583        assert_eq!(metadata.default_model, CODEX_DEFAULT_MODEL);
584        assert!(!metadata.known_models.is_empty());
585        // Check that the default model is in the known models
586        assert!(metadata
587            .known_models
588            .iter()
589            .any(|m| m.name == CODEX_DEFAULT_MODEL));
590    }
591
592    #[test]
593    fn test_messages_to_prompt_empty() {
594        let provider = CodexProvider {
595            command: PathBuf::from("codex"),
596            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
597            name: "codex".to_string(),
598            reasoning_effort: "high".to_string(),
599            enable_skills: true,
600            skip_git_check: false,
601        };
602
603        let prompt = provider.messages_to_prompt("", &[]);
604        assert_eq!(prompt, "Assistant: ");
605    }
606
607    #[test]
608    fn test_messages_to_prompt_with_system() {
609        let provider = CodexProvider {
610            command: PathBuf::from("codex"),
611            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
612            name: "codex".to_string(),
613            reasoning_effort: "high".to_string(),
614            enable_skills: true,
615            skip_git_check: false,
616        };
617
618        let prompt = provider.messages_to_prompt("You are a helpful assistant.", &[]);
619        assert!(prompt.starts_with("You are a helpful assistant."));
620        assert!(prompt.ends_with("Assistant: "));
621    }
622
623    #[test]
624    fn test_messages_to_prompt_with_messages() {
625        let provider = CodexProvider {
626            command: PathBuf::from("codex"),
627            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
628            name: "codex".to_string(),
629            reasoning_effort: "high".to_string(),
630            enable_skills: true,
631            skip_git_check: false,
632        };
633
634        let messages = vec![
635            Message::new(
636                Role::User,
637                chrono::Utc::now().timestamp(),
638                vec![MessageContent::text("Hello")],
639            ),
640            Message::new(
641                Role::Assistant,
642                chrono::Utc::now().timestamp(),
643                vec![MessageContent::text("Hi there!")],
644            ),
645        ];
646
647        let prompt = provider.messages_to_prompt("", &messages);
648        assert!(prompt.contains("Human: Hello"));
649        assert!(prompt.contains("Assistant: Hi there!"));
650    }
651
652    #[test]
653    fn test_parse_response_plain_text() {
654        let provider = CodexProvider {
655            command: PathBuf::from("codex"),
656            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
657            name: "codex".to_string(),
658            reasoning_effort: "high".to_string(),
659            enable_skills: true,
660            skip_git_check: false,
661        };
662
663        let lines = vec!["Hello, world!".to_string()];
664        let result = provider.parse_response(&lines);
665        assert!(result.is_ok());
666
667        let (message, _usage) = result.unwrap();
668        assert_eq!(message.role, Role::Assistant);
669        assert!(message.content.len() == 1);
670    }
671
672    #[test]
673    fn test_parse_response_json_events() {
674        let provider = CodexProvider {
675            command: PathBuf::from("codex"),
676            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
677            name: "codex".to_string(),
678            reasoning_effort: "high".to_string(),
679            enable_skills: true,
680            skip_git_check: false,
681        };
682
683        // Test with actual Codex CLI output format
684        let lines = vec![
685            r#"{"type":"thread.started","thread_id":"test-123"}"#.to_string(),
686            r#"{"type":"turn.started"}"#.to_string(),
687            r#"{"type":"item.completed","item":{"id":"item_0","type":"reasoning","text":"Thinking..."}}"#.to_string(),
688            r#"{"type":"item.completed","item":{"id":"item_1","type":"agent_message","text":"Hello there!"}}"#.to_string(),
689            r#"{"type":"turn.completed","usage":{"input_tokens":100,"output_tokens":50,"cached_input_tokens":30}}"#.to_string(),
690        ];
691        let result = provider.parse_response(&lines);
692        assert!(result.is_ok());
693
694        let (message, usage) = result.unwrap();
695        // Should only contain agent_message text, not reasoning
696        if let MessageContent::Text(text) = &message.content[0] {
697            assert!(text.text.contains("Hello there!"));
698            assert!(!text.text.contains("Thinking"));
699        }
700        assert_eq!(usage.input_tokens, Some(100));
701        assert_eq!(usage.output_tokens, Some(50));
702        assert_eq!(usage.total_tokens, Some(150));
703    }
704
705    #[test]
706    fn test_parse_response_empty() {
707        let provider = CodexProvider {
708            command: PathBuf::from("codex"),
709            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
710            name: "codex".to_string(),
711            reasoning_effort: "high".to_string(),
712            enable_skills: true,
713            skip_git_check: false,
714        };
715
716        let lines: Vec<String> = vec![];
717        let result = provider.parse_response(&lines);
718        assert!(result.is_err());
719    }
720
721    #[test]
722    fn test_reasoning_level_validation() {
723        assert!(CODEX_REASONING_LEVELS.contains(&"low"));
724        assert!(CODEX_REASONING_LEVELS.contains(&"medium"));
725        assert!(CODEX_REASONING_LEVELS.contains(&"high"));
726        assert!(!CODEX_REASONING_LEVELS.contains(&"invalid"));
727    }
728
729    #[test]
730    fn test_known_models() {
731        assert!(CODEX_KNOWN_MODELS.contains(&"gpt-5.3-codex"));
732        assert!(CODEX_KNOWN_MODELS.contains(&"gpt-5.2-codex"));
733        assert!(CODEX_KNOWN_MODELS.contains(&"gpt-5.2"));
734    }
735
736    #[test]
737    fn test_parse_response_item_completed() {
738        let provider = CodexProvider {
739            command: PathBuf::from("codex"),
740            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
741            name: "codex".to_string(),
742            reasoning_effort: "high".to_string(),
743            enable_skills: true,
744            skip_git_check: false,
745        };
746
747        let lines = vec![
748            r#"{"type":"item.completed","item":{"id":"item_0","type":"agent_message","text":"Hello from codex"}}"#.to_string(),
749        ];
750        let result = provider.parse_response(&lines);
751        assert!(result.is_ok());
752
753        let (message, _usage) = result.unwrap();
754        if let MessageContent::Text(text) = &message.content[0] {
755            assert!(text.text.contains("Hello from codex"));
756        } else {
757            panic!("Expected text content");
758        }
759    }
760
761    #[test]
762    fn test_parse_response_turn_completed_usage() {
763        let provider = CodexProvider {
764            command: PathBuf::from("codex"),
765            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
766            name: "codex".to_string(),
767            reasoning_effort: "high".to_string(),
768            enable_skills: true,
769            skip_git_check: false,
770        };
771
772        let lines = vec![
773            r#"{"type":"item.completed","item":{"id":"item_0","type":"agent_message","text":"Response"}}"#.to_string(),
774            r#"{"type":"turn.completed","usage":{"input_tokens":5000,"output_tokens":100,"cached_input_tokens":3000}}"#.to_string(),
775        ];
776        let result = provider.parse_response(&lines);
777        assert!(result.is_ok());
778
779        let (_message, usage) = result.unwrap();
780        assert_eq!(usage.input_tokens, Some(5000));
781        assert_eq!(usage.output_tokens, Some(100));
782        assert_eq!(usage.total_tokens, Some(5100));
783    }
784
785    #[test]
786    fn test_parse_response_error_event() {
787        let provider = CodexProvider {
788            command: PathBuf::from("codex"),
789            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
790            name: "codex".to_string(),
791            reasoning_effort: "high".to_string(),
792            enable_skills: true,
793            skip_git_check: false,
794        };
795
796        let lines = vec![
797            r#"{"type":"thread.started","thread_id":"test"}"#.to_string(),
798            r#"{"type":"error","message":"Model not supported"}"#.to_string(),
799        ];
800        let result = provider.parse_response(&lines);
801        assert!(result.is_err());
802
803        let err = result.unwrap_err();
804        assert!(err.to_string().contains("Model not supported"));
805    }
806
807    #[test]
808    fn test_parse_response_skips_reasoning() {
809        let provider = CodexProvider {
810            command: PathBuf::from("codex"),
811            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
812            name: "codex".to_string(),
813            reasoning_effort: "high".to_string(),
814            enable_skills: true,
815            skip_git_check: false,
816        };
817
818        let lines = vec![
819            r#"{"type":"item.completed","item":{"id":"item_0","type":"reasoning","text":"Let me think about this..."}}"#.to_string(),
820            r#"{"type":"item.completed","item":{"id":"item_1","type":"agent_message","text":"The answer is 42"}}"#.to_string(),
821        ];
822        let result = provider.parse_response(&lines);
823        assert!(result.is_ok());
824
825        let (message, _usage) = result.unwrap();
826        if let MessageContent::Text(text) = &message.content[0] {
827            assert!(text.text.contains("The answer is 42"));
828            assert!(!text.text.contains("Let me think"));
829        } else {
830            panic!("Expected text content");
831        }
832    }
833
834    #[test]
835    fn test_session_description_generation() {
836        let provider = CodexProvider {
837            command: PathBuf::from("codex"),
838            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
839            name: "codex".to_string(),
840            reasoning_effort: "high".to_string(),
841            enable_skills: true,
842            skip_git_check: false,
843        };
844
845        let messages = vec![Message::new(
846            Role::User,
847            chrono::Utc::now().timestamp(),
848            vec![MessageContent::text(
849                "This is a very long message that should be truncated to four words",
850            )],
851        )];
852
853        let result = provider.generate_simple_session_description(&messages);
854        assert!(result.is_ok());
855
856        let (message, _usage) = result.unwrap();
857        if let MessageContent::Text(text) = &message.content[0] {
858            // Should be truncated to 4 words
859            let word_count = text.text.split_whitespace().count();
860            assert!(word_count <= 4);
861        } else {
862            panic!("Expected text content");
863        }
864    }
865
866    #[test]
867    fn test_session_description_empty_messages() {
868        let provider = CodexProvider {
869            command: PathBuf::from("codex"),
870            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
871            name: "codex".to_string(),
872            reasoning_effort: "high".to_string(),
873            enable_skills: true,
874            skip_git_check: false,
875        };
876
877        let messages: Vec<Message> = vec![];
878
879        let result = provider.generate_simple_session_description(&messages);
880        assert!(result.is_ok());
881
882        let (message, _usage) = result.unwrap();
883        if let MessageContent::Text(text) = &message.content[0] {
884            assert_eq!(text.text, "Simple task");
885        } else {
886            panic!("Expected text content");
887        }
888    }
889
890    #[test]
891    fn test_config_keys() {
892        let metadata = CodexProvider::metadata();
893        assert_eq!(metadata.config_keys.len(), 4);
894
895        // First key should be CODEX_COMMAND (required)
896        assert_eq!(metadata.config_keys[0].name, "CODEX_COMMAND");
897        assert!(metadata.config_keys[0].required);
898        assert!(!metadata.config_keys[0].secret);
899
900        // Second key should be CODEX_REASONING_EFFORT (optional)
901        assert_eq!(metadata.config_keys[1].name, "CODEX_REASONING_EFFORT");
902        assert!(!metadata.config_keys[1].required);
903
904        // Third key should be CODEX_ENABLE_SKILLS (optional)
905        assert_eq!(metadata.config_keys[2].name, "CODEX_ENABLE_SKILLS");
906        assert!(!metadata.config_keys[2].required);
907
908        // Fourth key should be CODEX_SKIP_GIT_CHECK (optional)
909        assert_eq!(metadata.config_keys[3].name, "CODEX_SKIP_GIT_CHECK");
910        assert!(!metadata.config_keys[3].required);
911    }
912
913    #[test]
914    fn test_messages_to_prompt_filters_non_text() {
915        let provider = CodexProvider {
916            command: PathBuf::from("codex"),
917            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
918            name: "codex".to_string(),
919            reasoning_effort: "high".to_string(),
920            enable_skills: true,
921            skip_git_check: false,
922        };
923
924        // Create messages with both text and non-text content
925        let messages = vec![Message::new(
926            Role::User,
927            chrono::Utc::now().timestamp(),
928            vec![
929                MessageContent::text("Hello"),
930                // Tool requests would be filtered out as they're not text
931            ],
932        )];
933
934        let prompt = provider.messages_to_prompt("System prompt", &messages);
935        assert!(prompt.contains("System prompt"));
936        assert!(prompt.contains("Human: Hello"));
937    }
938
939    #[test]
940    fn test_parse_response_multiple_agent_messages() {
941        let provider = CodexProvider {
942            command: PathBuf::from("codex"),
943            model: ModelConfig::new("gpt-5.2-codex").unwrap(),
944            name: "codex".to_string(),
945            reasoning_effort: "high".to_string(),
946            enable_skills: true,
947            skip_git_check: false,
948        };
949
950        let lines = vec![
951            r#"{"type":"item.completed","item":{"id":"item_0","type":"agent_message","text":"First part"}}"#.to_string(),
952            r#"{"type":"item.completed","item":{"id":"item_1","type":"agent_message","text":"Second part"}}"#.to_string(),
953        ];
954        let result = provider.parse_response(&lines);
955        assert!(result.is_ok());
956
957        let (message, _usage) = result.unwrap();
958        if let MessageContent::Text(text) = &message.content[0] {
959            assert!(text.text.contains("First part"));
960            assert!(text.text.contains("Second part"));
961        } else {
962            panic!("Expected text content");
963        }
964    }
965
966    #[test]
967    fn test_doc_url() {
968        assert_eq!(CODEX_DOC_URL, "https://developers.openai.com/codex/cli");
969    }
970
971    #[test]
972    fn test_default_model() {
973        assert_eq!(CODEX_DEFAULT_MODEL, "gpt-5.3-codex");
974    }
975}