agcodex_core/
config.rs

1use crate::config_profile::ConfigProfile;
2use crate::config_types::History;
3use crate::config_types::McpServerConfig;
4use crate::config_types::SandboxWorkspaceWrite;
5use crate::config_types::ShellEnvironmentPolicy;
6use crate::config_types::ShellEnvironmentPolicyToml;
7use crate::config_types::Tui;
8use crate::config_types::UriBasedFileOpener;
9use crate::error::CodexErr;
10use crate::model_family::ModelFamily;
11use crate::model_family::find_family_for_model;
12use crate::model_provider_info::ModelProviderInfo;
13use crate::model_provider_info::built_in_model_providers;
14use crate::openai_model_info::get_model_info;
15use crate::protocol::AskForApproval;
16use crate::protocol::SandboxPolicy;
17use agcodex_login::AuthMode;
18use agcodex_protocol::config_types::ReasoningEffort;
19use agcodex_protocol::config_types::ReasoningSummary;
20use agcodex_protocol::config_types::SandboxMode;
21use dirs::home_dir;
22use serde::Deserialize;
23use std::collections::HashMap;
24use std::path::Path;
25use std::path::PathBuf;
26use tempfile::NamedTempFile;
27use toml::Value as TomlValue;
28use toml_edit::DocumentMut;
29
30const OPENAI_DEFAULT_MODEL: &str = "gpt-5";
31
32/// Maximum number of bytes of the documentation that will be embedded. Larger
33/// files are *silently truncated* to this size so we do not take up too much of
34/// the context window.
35pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB
36
37const CONFIG_TOML_FILE: &str = "config.toml";
38
39const DEFAULT_RESPONSES_ORIGINATOR_HEADER: &str = "agcodex_cli_rs";
40
41/// Application configuration loaded from disk and merged with overrides.
42#[derive(Debug, Clone, PartialEq)]
43pub struct Config {
44    /// Optional override of model selection.
45    pub model: String,
46
47    pub model_family: ModelFamily,
48
49    /// Size of the context window for the model, in tokens.
50    pub model_context_window: Option<u64>,
51
52    /// Maximum number of output tokens.
53    pub model_max_output_tokens: Option<u64>,
54
55    /// Key into the model_providers map that specifies which provider to use.
56    pub model_provider_id: String,
57
58    /// Info needed to make an API request to the model.
59    pub model_provider: ModelProviderInfo,
60
61    /// Approval policy for executing commands.
62    pub approval_policy: AskForApproval,
63
64    pub sandbox_policy: SandboxPolicy,
65
66    pub shell_environment_policy: ShellEnvironmentPolicy,
67
68    /// When `true`, `AgentReasoning` events emitted by the backend will be
69    /// suppressed from the frontend output. This can reduce visual noise when
70    /// users are only interested in the final agent responses.
71    pub hide_agent_reasoning: bool,
72
73    /// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output.
74    /// Defaults to `false`.
75    pub show_raw_agent_reasoning: bool,
76
77    /// Disable server-side response storage (sends the full conversation
78    /// context with every request). Currently necessary for OpenAI customers
79    /// who have opted into Zero Data Retention (ZDR).
80    pub disable_response_storage: bool,
81
82    /// User-provided instructions from AGENTS.md.
83    pub user_instructions: Option<String>,
84
85    /// Base instructions override.
86    pub base_instructions: Option<String>,
87
88    /// Optional external notifier command. When set, Codex will spawn this
89    /// program after each completed *turn* (i.e. when the agent finishes
90    /// processing a user submission). The value must be the full command
91    /// broken into argv tokens **without** the trailing JSON argument - Codex
92    /// appends one extra argument containing a JSON payload describing the
93    /// event.
94    ///
95    /// Example `~/.agcodex/config.toml` snippet:
96    ///
97    /// ```toml
98    /// notify = ["notify-send", "Codex"]
99    /// ```
100    ///
101    /// which will be invoked as:
102    ///
103    /// ```shell
104    /// notify-send Codex '{"type":"agent-turn-complete","turn-id":"12345"}'
105    /// ```
106    ///
107    /// If unset the feature is disabled.
108    pub notify: Option<Vec<String>>,
109
110    /// The directory that should be treated as the current working directory
111    /// for the session. All relative paths inside the business-logic layer are
112    /// resolved against this path.
113    pub cwd: PathBuf,
114
115    /// Definition for MCP servers that Codex can reach out to for tool calls.
116    pub mcp_servers: HashMap<String, McpServerConfig>,
117
118    /// Combined provider map (defaults merged with user-defined overrides).
119    pub model_providers: HashMap<String, ModelProviderInfo>,
120
121    /// Maximum number of bytes to include from an AGENTS.md project doc file.
122    pub project_doc_max_bytes: usize,
123
124    /// Directory containing all Codex state (defaults to `~/.agcodex` but can be
125    /// overridden by the `CODEX_HOME` environment variable).
126    pub codex_home: PathBuf,
127
128    /// Settings that govern if and what will be written to `~/.agcodex/history.jsonl`.
129    pub history: History,
130
131    /// Optional URI-based file opener. If set, citations to files in the model
132    /// output will be hyperlinked using the specified URI scheme.
133    pub file_opener: UriBasedFileOpener,
134
135    /// Collection of settings that are specific to the TUI.
136    pub tui: Tui,
137
138    /// Path to the `codex-linux-sandbox` executable. This must be set if
139    /// [`crate::exec::SandboxType::LinuxSeccomp`] is used. Note that this
140    /// cannot be set in the config file: it must be set in code via
141    /// [`ConfigOverrides`].
142    ///
143    /// When this program is invoked, arg0 will be set to `codex-linux-sandbox`.
144    pub codex_linux_sandbox_exe: Option<PathBuf>,
145
146    /// Value to use for `reasoning.effort` when making a request using the
147    /// Responses API.
148    pub model_reasoning_effort: ReasoningEffort,
149
150    /// If not "none", the value to use for `reasoning.summary` when making a
151    /// request using the Responses API.
152    pub model_reasoning_summary: ReasoningSummary,
153
154    /// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
155    pub chatgpt_base_url: String,
156
157    /// Experimental rollout resume path (absolute path to .jsonl; undocumented).
158    pub experimental_resume: Option<PathBuf>,
159
160    /// Include an experimental plan tool that the model can use to update its current plan and status of each step.
161    pub include_plan_tool: bool,
162
163    /// Include the `apply_patch` tool for models that benefit from invoking
164    /// file edits as a structured tool call. When unset, this falls back to the
165    /// model family's default preference.
166    pub include_apply_patch_tool: bool,
167
168    /// The value for the `originator` header included with Responses API requests.
169    pub responses_originator_header: String,
170
171    /// If set to `true`, the API key will be signed with the `originator` header.
172    pub preferred_auth_method: AuthMode,
173}
174
175impl Config {
176    /// Load configuration with *generic* CLI overrides (`-c key=value`) applied
177    /// **in between** the values parsed from `config.toml` and the
178    /// strongly-typed overrides specified via [`ConfigOverrides`].
179    ///
180    /// The precedence order is therefore: `config.toml` < `-c` overrides <
181    /// `ConfigOverrides`.
182    pub fn load_with_cli_overrides(
183        cli_overrides: Vec<(String, TomlValue)>,
184        overrides: ConfigOverrides,
185    ) -> std::io::Result<Self> {
186        // Resolve the directory that stores Codex state (e.g. ~/.agcodex or the
187        // value of $CODEX_HOME) so we can embed it into the resulting
188        // `Config` instance.
189        let codex_home = find_agcodex_home()?;
190
191        // Step 1: parse `config.toml` into a generic JSON value.
192        let mut root_value = load_config_as_toml(&codex_home)?;
193
194        // Step 2: apply the `-c` overrides.
195        for (path, value) in cli_overrides.into_iter() {
196            apply_toml_override(&mut root_value, &path, value);
197        }
198
199        // Step 3: deserialize into `ConfigToml` so that Serde can enforce the
200        // correct types.
201        let cfg: ConfigToml = root_value.try_into().map_err(|e| {
202            tracing::error!("Failed to deserialize overridden config: {e}");
203            std::io::Error::new(std::io::ErrorKind::InvalidData, e)
204        })?;
205
206        // Step 4: merge with the strongly-typed overrides.
207        Self::load_from_base_config_with_overrides(cfg, overrides, codex_home)
208    }
209}
210
211pub fn load_config_as_toml_with_cli_overrides(
212    codex_home: &Path,
213    cli_overrides: Vec<(String, TomlValue)>,
214) -> std::io::Result<ConfigToml> {
215    let mut root_value = load_config_as_toml(codex_home)?;
216
217    for (path, value) in cli_overrides.into_iter() {
218        apply_toml_override(&mut root_value, &path, value);
219    }
220
221    let cfg: ConfigToml = root_value.try_into().map_err(|e| {
222        tracing::error!("Failed to deserialize overridden config: {e}");
223        std::io::Error::new(std::io::ErrorKind::InvalidData, e)
224    })?;
225
226    Ok(cfg)
227}
228
229/// Read `CODEX_HOME/config.toml` and return it as a generic TOML value. Returns
230/// an empty TOML table when the file does not exist.
231pub fn load_config_as_toml(codex_home: &Path) -> std::io::Result<TomlValue> {
232    let config_path = codex_home.join(CONFIG_TOML_FILE);
233    match std::fs::read_to_string(&config_path) {
234        Ok(contents) => match toml::from_str::<TomlValue>(&contents) {
235            Ok(val) => Ok(val),
236            Err(e) => {
237                tracing::error!("Failed to parse config.toml: {e}");
238                Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e))
239            }
240        },
241        Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
242            tracing::info!("config.toml not found, using defaults");
243            Ok(TomlValue::Table(Default::default()))
244        }
245        Err(e) => {
246            tracing::error!("Failed to read config.toml: {e}");
247            Err(e)
248        }
249    }
250}
251
252/// Patch `CODEX_HOME/config.toml` project state.
253/// Use with caution.
254pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> crate::error::Result<()> {
255    let config_path = codex_home.join(CONFIG_TOML_FILE);
256    // Parse existing config if present; otherwise start a new document.
257    let mut doc = match std::fs::read_to_string(config_path.clone()) {
258        Ok(s) => s
259            .parse::<DocumentMut>()
260            .map_err(|e| crate::error::CodexErr::InvalidConfig(e.to_string()))?,
261        Err(e) if e.kind() == std::io::ErrorKind::NotFound => DocumentMut::new(),
262        Err(e) => return Err(e.into()),
263    };
264
265    // Mark the project as trusted. toml_edit is very good at handling
266    // missing properties
267    let project_key = project_path.to_string_lossy().to_string();
268    doc["projects"][project_key.as_str()]["trust_level"] = toml_edit::value("trusted");
269
270    // ensure codex_home exists
271    std::fs::create_dir_all(codex_home)?;
272
273    // create a tmp_file
274    let tmp_file = NamedTempFile::new_in(codex_home)?;
275    std::fs::write(tmp_file.path(), doc.to_string())?;
276
277    // atomically move the tmp file into config.toml
278    tmp_file
279        .persist(config_path)
280        .map_err(|e| CodexErr::Io(e.error))?;
281
282    Ok(())
283}
284
285/// Apply a single dotted-path override onto a TOML value.
286fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) {
287    use toml::value::Table;
288
289    let segments: Vec<&str> = path.split('.').collect();
290    let mut current = root;
291
292    for (idx, segment) in segments.iter().enumerate() {
293        let is_last = idx == segments.len() - 1;
294
295        if is_last {
296            match current {
297                TomlValue::Table(table) => {
298                    table.insert((*segment).to_string(), value);
299                }
300                _ => {
301                    let mut table = Table::new();
302                    table.insert((*segment).to_string(), value);
303                    *current = TomlValue::Table(table);
304                }
305            }
306            return;
307        }
308
309        // Traverse or create intermediate object.
310        match current {
311            TomlValue::Table(table) => {
312                current = table
313                    .entry((*segment).to_string())
314                    .or_insert_with(|| TomlValue::Table(Table::new()));
315            }
316            _ => {
317                *current = TomlValue::Table(Table::new());
318                if let TomlValue::Table(tbl) = current {
319                    current = tbl
320                        .entry((*segment).to_string())
321                        .or_insert_with(|| TomlValue::Table(Table::new()));
322                }
323            }
324        }
325    }
326}
327
328/// Base config deserialized from ~/.agcodex/config.toml.
329#[derive(Deserialize, Debug, Clone, Default)]
330pub struct ConfigToml {
331    /// Optional override of model selection.
332    pub model: Option<String>,
333
334    /// Provider to use from the model_providers map.
335    pub model_provider: Option<String>,
336
337    /// Size of the context window for the model, in tokens.
338    pub model_context_window: Option<u64>,
339
340    /// Maximum number of output tokens.
341    pub model_max_output_tokens: Option<u64>,
342
343    /// Default approval policy for executing commands.
344    pub approval_policy: Option<AskForApproval>,
345
346    #[serde(default)]
347    pub shell_environment_policy: ShellEnvironmentPolicyToml,
348
349    /// Sandbox mode to use.
350    pub sandbox_mode: Option<SandboxMode>,
351
352    /// Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`.
353    pub sandbox_workspace_write: Option<SandboxWorkspaceWrite>,
354
355    /// Disable server-side response storage (sends the full conversation
356    /// context with every request). Currently necessary for OpenAI customers
357    /// who have opted into Zero Data Retention (ZDR).
358    pub disable_response_storage: Option<bool>,
359
360    /// Optional external command to spawn for end-user notifications.
361    #[serde(default)]
362    pub notify: Option<Vec<String>>,
363
364    /// System instructions.
365    pub instructions: Option<String>,
366
367    /// Definition for MCP servers that Codex can reach out to for tool calls.
368    #[serde(default)]
369    pub mcp_servers: HashMap<String, McpServerConfig>,
370
371    /// User-defined provider entries that extend/override the built-in list.
372    #[serde(default)]
373    pub model_providers: HashMap<String, ModelProviderInfo>,
374
375    /// Maximum number of bytes to include from an AGENTS.md project doc file.
376    pub project_doc_max_bytes: Option<usize>,
377
378    /// Profile to use from the `profiles` map.
379    pub profile: Option<String>,
380
381    /// Named profiles to facilitate switching between different configurations.
382    #[serde(default)]
383    pub profiles: HashMap<String, ConfigProfile>,
384
385    /// Settings that govern if and what will be written to `~/.agcodex/history.jsonl`.
386    #[serde(default)]
387    pub history: Option<History>,
388
389    /// Optional URI-based file opener. If set, citations to files in the model
390    /// output will be hyperlinked using the specified URI scheme.
391    pub file_opener: Option<UriBasedFileOpener>,
392
393    /// Collection of settings that are specific to the TUI.
394    pub tui: Option<Tui>,
395
396    /// When set to `true`, `AgentReasoning` events will be hidden from the
397    /// UI/output. Defaults to `false`.
398    pub hide_agent_reasoning: Option<bool>,
399
400    /// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output.
401    /// Defaults to `false`.
402    pub show_raw_agent_reasoning: Option<bool>,
403
404    pub model_reasoning_effort: Option<ReasoningEffort>,
405    pub model_reasoning_summary: Option<ReasoningSummary>,
406
407    /// Override to force-enable reasoning summaries for the configured model.
408    pub model_supports_reasoning_summaries: Option<bool>,
409
410    /// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
411    pub chatgpt_base_url: Option<String>,
412
413    /// Experimental rollout resume path (absolute path to .jsonl; undocumented).
414    pub experimental_resume: Option<PathBuf>,
415
416    /// Experimental path to a file whose contents replace the built-in BASE_INSTRUCTIONS.
417    pub experimental_instructions_file: Option<PathBuf>,
418
419    /// The value for the `originator` header included with Responses API requests.
420    pub responses_originator_header_internal_override: Option<String>,
421
422    pub projects: Option<HashMap<String, ProjectConfig>>,
423
424    /// If set to `true`, the API key will be signed with the `originator` header.
425    pub preferred_auth_method: Option<AuthMode>,
426}
427
428#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
429pub struct ProjectConfig {
430    pub trust_level: Option<String>,
431}
432
433impl ConfigToml {
434    /// Derive the effective sandbox policy from the configuration.
435    fn derive_sandbox_policy(&self, sandbox_mode_override: Option<SandboxMode>) -> SandboxPolicy {
436        let resolved_sandbox_mode = sandbox_mode_override
437            .or(self.sandbox_mode)
438            .unwrap_or_default();
439        match resolved_sandbox_mode {
440            SandboxMode::ReadOnly => SandboxPolicy::new_read_only_policy(),
441            SandboxMode::WorkspaceWrite => match self.sandbox_workspace_write.as_ref() {
442                Some(SandboxWorkspaceWrite {
443                    writable_roots,
444                    network_access,
445                    exclude_tmpdir_env_var,
446                    exclude_slash_tmp,
447                }) => SandboxPolicy::WorkspaceWrite {
448                    writable_roots: writable_roots.clone(),
449                    network_access: *network_access,
450                    exclude_tmpdir_env_var: *exclude_tmpdir_env_var,
451                    exclude_slash_tmp: *exclude_slash_tmp,
452                },
453                None => SandboxPolicy::new_workspace_write_policy(),
454            },
455            SandboxMode::DangerFullAccess => SandboxPolicy::DangerFullAccess,
456        }
457    }
458
459    pub fn is_cwd_trusted(&self, resolved_cwd: &Path) -> bool {
460        let projects = self.projects.clone().unwrap_or_default();
461
462        projects
463            .get(&resolved_cwd.to_string_lossy().to_string())
464            .map(|p| p.trust_level.clone().unwrap_or("".to_string()) == "trusted")
465            .unwrap_or(false)
466    }
467
468    pub fn get_config_profile(
469        &self,
470        override_profile: Option<String>,
471    ) -> Result<ConfigProfile, std::io::Error> {
472        let profile = override_profile.or_else(|| self.profile.clone());
473
474        match profile {
475            Some(key) => {
476                if let Some(profile) = self.profiles.get(key.as_str()) {
477                    return Ok(profile.clone());
478                }
479
480                Err(std::io::Error::new(
481                    std::io::ErrorKind::NotFound,
482                    format!("config profile `{key}` not found"),
483                ))
484            }
485            None => Ok(ConfigProfile::default()),
486        }
487    }
488}
489
490/// Optional overrides for user configuration (e.g., from CLI flags).
491#[derive(Default, Debug, Clone)]
492pub struct ConfigOverrides {
493    pub model: Option<String>,
494    pub cwd: Option<PathBuf>,
495    pub approval_policy: Option<AskForApproval>,
496    pub sandbox_mode: Option<SandboxMode>,
497    pub model_provider: Option<String>,
498    pub config_profile: Option<String>,
499    pub codex_linux_sandbox_exe: Option<PathBuf>,
500    pub base_instructions: Option<String>,
501    pub include_plan_tool: Option<bool>,
502    pub include_apply_patch_tool: Option<bool>,
503    pub disable_response_storage: Option<bool>,
504    pub show_raw_agent_reasoning: Option<bool>,
505}
506
507impl Config {
508    /// Meant to be used exclusively for tests: `load_with_overrides()` should
509    /// be used in all other cases.
510    pub fn load_from_base_config_with_overrides(
511        cfg: ConfigToml,
512        overrides: ConfigOverrides,
513        codex_home: PathBuf,
514    ) -> std::io::Result<Self> {
515        let user_instructions = Self::load_instructions(Some(&codex_home));
516
517        // Destructure ConfigOverrides fully to ensure all overrides are applied.
518        let ConfigOverrides {
519            model,
520            cwd,
521            approval_policy,
522            sandbox_mode,
523            model_provider,
524            config_profile: config_profile_key,
525            codex_linux_sandbox_exe,
526            base_instructions,
527            include_plan_tool,
528            include_apply_patch_tool,
529            disable_response_storage,
530            show_raw_agent_reasoning,
531        } = overrides;
532
533        let config_profile = match config_profile_key.as_ref().or(cfg.profile.as_ref()) {
534            Some(key) => cfg
535                .profiles
536                .get(key)
537                .ok_or_else(|| {
538                    std::io::Error::new(
539                        std::io::ErrorKind::NotFound,
540                        format!("config profile `{key}` not found"),
541                    )
542                })?
543                .clone(),
544            None => ConfigProfile::default(),
545        };
546
547        let sandbox_policy = cfg.derive_sandbox_policy(sandbox_mode);
548
549        let mut model_providers = built_in_model_providers();
550        // Merge user-defined providers into the built-in list.
551        for (key, provider) in cfg.model_providers.into_iter() {
552            model_providers.entry(key).or_insert(provider);
553        }
554
555        let model_provider_id = model_provider
556            .or(config_profile.model_provider)
557            .or(cfg.model_provider)
558            .unwrap_or_else(|| "openai".to_string());
559        let model_provider = model_providers
560            .get(&model_provider_id)
561            .ok_or_else(|| {
562                std::io::Error::new(
563                    std::io::ErrorKind::NotFound,
564                    format!("Model provider `{model_provider_id}` not found"),
565                )
566            })?
567            .clone();
568
569        let shell_environment_policy = cfg.shell_environment_policy.into();
570
571        let resolved_cwd = {
572            use std::env;
573
574            match cwd {
575                None => {
576                    tracing::info!("cwd not set, using current dir");
577                    env::current_dir()?
578                }
579                Some(p) if p.is_absolute() => p,
580                Some(p) => {
581                    // Resolve relative path against the current working directory.
582                    tracing::info!("cwd is relative, resolving against current dir");
583                    let mut current = env::current_dir()?;
584                    current.push(p);
585                    current
586                }
587            }
588        };
589
590        let history = cfg.history.unwrap_or_default();
591
592        let model = model
593            .or(config_profile.model)
594            .or(cfg.model)
595            .unwrap_or_else(default_model);
596        let model_family = find_family_for_model(&model).unwrap_or_else(|| {
597            let supports_reasoning_summaries =
598                cfg.model_supports_reasoning_summaries.unwrap_or(false);
599            ModelFamily {
600                slug: model.clone(),
601                family: model.clone(),
602                needs_special_apply_patch_instructions: false,
603                supports_reasoning_summaries,
604                uses_local_shell_tool: false,
605                uses_apply_patch_tool: false,
606            }
607        });
608
609        let openai_model_info = get_model_info(&model_family);
610        let model_context_window = cfg
611            .model_context_window
612            .or_else(|| openai_model_info.as_ref().map(|info| info.context_window));
613        let model_max_output_tokens = cfg.model_max_output_tokens.or_else(|| {
614            openai_model_info
615                .as_ref()
616                .map(|info| info.max_output_tokens)
617        });
618
619        let experimental_resume = cfg.experimental_resume;
620
621        // Load base instructions override from a file if specified. If the
622        // path is relative, resolve it against the effective cwd so the
623        // behaviour matches other path-like config values.
624        let experimental_instructions_path = config_profile
625            .experimental_instructions_file
626            .as_ref()
627            .or(cfg.experimental_instructions_file.as_ref());
628        let file_base_instructions =
629            Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?;
630        let base_instructions = base_instructions.or(file_base_instructions);
631
632        let include_apply_patch_tool_val =
633            include_apply_patch_tool.unwrap_or(model_family.uses_apply_patch_tool);
634
635        let responses_originator_header: String = cfg
636            .responses_originator_header_internal_override
637            .unwrap_or(DEFAULT_RESPONSES_ORIGINATOR_HEADER.to_owned());
638
639        let config = Self {
640            model,
641            model_family,
642            model_context_window,
643            model_max_output_tokens,
644            model_provider_id,
645            model_provider,
646            cwd: resolved_cwd,
647            approval_policy: approval_policy
648                .or(config_profile.approval_policy)
649                .or(cfg.approval_policy)
650                .unwrap_or_else(AskForApproval::default),
651            sandbox_policy,
652            shell_environment_policy,
653            disable_response_storage: config_profile
654                .disable_response_storage
655                .or(cfg.disable_response_storage)
656                .or(disable_response_storage)
657                .unwrap_or(false),
658            notify: cfg.notify,
659            user_instructions,
660            base_instructions,
661            mcp_servers: cfg.mcp_servers,
662            model_providers,
663            project_doc_max_bytes: cfg.project_doc_max_bytes.unwrap_or(PROJECT_DOC_MAX_BYTES),
664            codex_home,
665            history,
666            file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode),
667            tui: cfg.tui.unwrap_or_default(),
668            codex_linux_sandbox_exe,
669
670            hide_agent_reasoning: cfg.hide_agent_reasoning.unwrap_or(false),
671            show_raw_agent_reasoning: cfg
672                .show_raw_agent_reasoning
673                .or(show_raw_agent_reasoning)
674                .unwrap_or(false),
675            model_reasoning_effort: config_profile
676                .model_reasoning_effort
677                .or(cfg.model_reasoning_effort)
678                .unwrap_or(ReasoningEffort::High),
679            model_reasoning_summary: config_profile
680                .model_reasoning_summary
681                .or(cfg.model_reasoning_summary)
682                .unwrap_or(ReasoningSummary::Detailed),
683
684            chatgpt_base_url: config_profile
685                .chatgpt_base_url
686                .or(cfg.chatgpt_base_url)
687                .unwrap_or("https://chatgpt.com/backend-api/".to_string()),
688
689            experimental_resume,
690            include_plan_tool: include_plan_tool.unwrap_or(false),
691            include_apply_patch_tool: include_apply_patch_tool_val,
692            responses_originator_header,
693            preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
694        };
695        Ok(config)
696    }
697
698    fn load_instructions(codex_dir: Option<&Path>) -> Option<String> {
699        let mut p = match codex_dir {
700            Some(p) => p.to_path_buf(),
701            None => return None,
702        };
703
704        p.push("AGENTS.md");
705        std::fs::read_to_string(&p).ok().and_then(|s| {
706            let s = s.trim();
707            if s.is_empty() {
708                None
709            } else {
710                Some(s.to_string())
711            }
712        })
713    }
714
715    fn get_base_instructions(
716        path: Option<&PathBuf>,
717        cwd: &Path,
718    ) -> std::io::Result<Option<String>> {
719        let p = match path.as_ref() {
720            None => return Ok(None),
721            Some(p) => p,
722        };
723
724        // Resolve relative paths against the provided cwd to make CLI
725        // overrides consistent regardless of where the process was launched
726        // from.
727        let full_path = if p.is_relative() {
728            cwd.join(p)
729        } else {
730            p.to_path_buf()
731        };
732
733        let contents = std::fs::read_to_string(&full_path).map_err(|e| {
734            std::io::Error::new(
735                e.kind(),
736                format!(
737                    "failed to read experimental instructions file {}: {e}",
738                    full_path.display()
739                ),
740            )
741        })?;
742
743        let s = contents.trim().to_string();
744        if s.is_empty() {
745            Err(std::io::Error::new(
746                std::io::ErrorKind::InvalidData,
747                format!(
748                    "experimental instructions file is empty: {}",
749                    full_path.display()
750                ),
751            ))
752        } else {
753            Ok(Some(s))
754        }
755    }
756}
757
758fn default_model() -> String {
759    OPENAI_DEFAULT_MODEL.to_string()
760}
761
762/// Returns the path to the Codex configuration directory, which can be
763/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
764/// `~/.agcodex`.
765///
766/// - If `CODEX_HOME` is set, the value will be canonicalized and this
767///   function will Err if the path does not exist.
768/// - If `CODEX_HOME` is not set, this function does not verify that the
769///   directory exists.
770pub fn find_agcodex_home() -> std::io::Result<PathBuf> {
771    // Honor the `CODEX_HOME` environment variable when it is set to allow users
772    // (and tests) to override the default location.
773    if let Ok(val) = std::env::var("CODEX_HOME")
774        && !val.is_empty()
775    {
776        return PathBuf::from(val).canonicalize();
777    }
778
779    let mut p = home_dir().ok_or_else(|| {
780        std::io::Error::new(
781            std::io::ErrorKind::NotFound,
782            "Could not find home directory",
783        )
784    })?;
785    p.push(".codex");
786    Ok(p)
787}
788
789/// Returns the path to the folder where Codex logs are stored. Does not verify
790/// that the directory exists.
791pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> {
792    let mut p = cfg.codex_home.clone();
793    p.push("log");
794    Ok(p)
795}
796
797#[cfg(test)]
798mod tests {
799    use crate::config_types::HistoryPersistence;
800
801    use super::*;
802    use pretty_assertions::assert_eq;
803    use tempfile::TempDir;
804
805    #[test]
806    fn test_toml_parsing() {
807        let history_with_persistence = r#"
808[history]
809persistence = "save-all"
810"#;
811        let history_with_persistence_cfg = toml::from_str::<ConfigToml>(history_with_persistence)
812            .expect("TOML deserialization should succeed");
813        assert_eq!(
814            Some(History {
815                persistence: HistoryPersistence::SaveAll,
816                max_bytes: None,
817            }),
818            history_with_persistence_cfg.history
819        );
820
821        let history_no_persistence = r#"
822[history]
823persistence = "none"
824"#;
825
826        let history_no_persistence_cfg = toml::from_str::<ConfigToml>(history_no_persistence)
827            .expect("TOML deserialization should succeed");
828        assert_eq!(
829            Some(History {
830                persistence: HistoryPersistence::None,
831                max_bytes: None,
832            }),
833            history_no_persistence_cfg.history
834        );
835    }
836
837    #[test]
838    fn test_sandbox_config_parsing() {
839        let sandbox_full_access = r#"
840sandbox_mode = "danger-full-access"
841
842[sandbox_workspace_write]
843network_access = false  # This should be ignored.
844"#;
845        let sandbox_full_access_cfg = toml::from_str::<ConfigToml>(sandbox_full_access)
846            .expect("TOML deserialization should succeed");
847        let sandbox_mode_override = None;
848        assert_eq!(
849            SandboxPolicy::DangerFullAccess,
850            sandbox_full_access_cfg.derive_sandbox_policy(sandbox_mode_override)
851        );
852
853        let sandbox_read_only = r#"
854sandbox_mode = "read-only"
855
856[sandbox_workspace_write]
857network_access = true  # This should be ignored.
858"#;
859
860        let sandbox_read_only_cfg = toml::from_str::<ConfigToml>(sandbox_read_only)
861            .expect("TOML deserialization should succeed");
862        let sandbox_mode_override = None;
863        assert_eq!(
864            SandboxPolicy::ReadOnly,
865            sandbox_read_only_cfg.derive_sandbox_policy(sandbox_mode_override)
866        );
867
868        let sandbox_workspace_write = r#"
869sandbox_mode = "workspace-write"
870
871[sandbox_workspace_write]
872writable_roots = [
873    "/my/workspace",
874]
875exclude_tmpdir_env_var = true
876exclude_slash_tmp = true
877"#;
878
879        let sandbox_workspace_write_cfg = toml::from_str::<ConfigToml>(sandbox_workspace_write)
880            .expect("TOML deserialization should succeed");
881        let sandbox_mode_override = None;
882        assert_eq!(
883            SandboxPolicy::WorkspaceWrite {
884                writable_roots: vec![PathBuf::from("/my/workspace")],
885                network_access: false,
886                exclude_tmpdir_env_var: true,
887                exclude_slash_tmp: true,
888            },
889            sandbox_workspace_write_cfg.derive_sandbox_policy(sandbox_mode_override)
890        );
891    }
892
893    struct PrecedenceTestFixture {
894        cwd: TempDir,
895        codex_home: TempDir,
896        cfg: ConfigToml,
897        model_provider_map: HashMap<String, ModelProviderInfo>,
898        openai_provider: ModelProviderInfo,
899        openai_chat_completions_provider: ModelProviderInfo,
900    }
901
902    impl PrecedenceTestFixture {
903        fn cwd(&self) -> PathBuf {
904            self.cwd.path().to_path_buf()
905        }
906
907        fn codex_home(&self) -> PathBuf {
908            self.codex_home.path().to_path_buf()
909        }
910    }
911
912    fn create_test_fixture() -> std::io::Result<PrecedenceTestFixture> {
913        let toml = r#"
914model = "o3"
915approval_policy = "untrusted"
916disable_response_storage = false
917
918# Can be used to determine which profile to use if not specified by
919# `ConfigOverrides`.
920profile = "gpt3"
921
922[model_providers.openai-chat-completions]
923name = "OpenAI using Chat Completions"
924base_url = "https://api.openai.com/v1"
925env_key = "OPENAI_API_KEY"
926wire_api = "chat"
927request_max_retries = 4            # retry failed HTTP requests
928stream_max_retries = 10            # retry dropped SSE streams
929stream_idle_timeout_ms = 300000    # 5m idle timeout
930
931[profiles.o3]
932model = "o3"
933model_provider = "openai"
934approval_policy = "never"
935model_reasoning_effort = "high"
936model_reasoning_summary = "detailed"
937
938[profiles.gpt3]
939model = "gpt-3.5-turbo"
940model_provider = "openai-chat-completions"
941
942[profiles.zdr]
943model = "o3"
944model_provider = "openai"
945approval_policy = "on-failure"
946disable_response_storage = true
947"#;
948
949        let cfg: ConfigToml = toml::from_str(toml).expect("TOML deserialization should succeed");
950
951        // Use a temporary directory for the cwd so it does not contain an
952        // AGENTS.md file.
953        let cwd_temp_dir = TempDir::new().unwrap();
954        let cwd = cwd_temp_dir.path().to_path_buf();
955        // Make it look like a Git repo so it does not search for AGENTS.md in
956        // a parent folder, either.
957        std::fs::write(cwd.join(".git"), "gitdir: nowhere")?;
958
959        let codex_home_temp_dir = TempDir::new().unwrap();
960
961        let openai_chat_completions_provider = ModelProviderInfo {
962            name: "OpenAI using Chat Completions".to_string(),
963            base_url: Some("https://api.openai.com/v1".to_string()),
964            env_key: Some("OPENAI_API_KEY".to_string()),
965            wire_api: crate::WireApi::Chat,
966            env_key_instructions: None,
967            query_params: None,
968            http_headers: None,
969            env_http_headers: None,
970            request_max_retries: Some(4),
971            stream_max_retries: Some(10),
972            stream_idle_timeout_ms: Some(300_000),
973            requires_openai_auth: false,
974        };
975        let model_provider_map = {
976            let mut model_provider_map = built_in_model_providers();
977            model_provider_map.insert(
978                "openai-chat-completions".to_string(),
979                openai_chat_completions_provider.clone(),
980            );
981            model_provider_map
982        };
983
984        let openai_provider = model_provider_map
985            .get("openai")
986            .expect("openai provider should exist")
987            .clone();
988
989        Ok(PrecedenceTestFixture {
990            cwd: cwd_temp_dir,
991            codex_home: codex_home_temp_dir,
992            cfg,
993            model_provider_map,
994            openai_provider,
995            openai_chat_completions_provider,
996        })
997    }
998
999    /// Users can specify config values at multiple levels that have the
1000    /// following precedence:
1001    ///
1002    /// 1. custom command-line argument, e.g. `--model o3`
1003    /// 2. as part of a profile, where the `--profile` is specified via a CLI
1004    ///    (or in the config file itself)
1005    /// 3. as an entry in `config.toml`, e.g. `model = "o3"`
1006    /// 4. the default value for a required field defined in code, e.g.,
1007    ///    `crate::flags::OPENAI_DEFAULT_MODEL`
1008    ///
1009    /// Note that profiles are the recommended way to specify a group of
1010    /// configuration options together.
1011    #[test]
1012    fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> {
1013        let fixture = create_test_fixture()?;
1014
1015        let o3_profile_overrides = ConfigOverrides {
1016            config_profile: Some("o3".to_string()),
1017            cwd: Some(fixture.cwd()),
1018            ..Default::default()
1019        };
1020        let o3_profile_config: Config = Config::load_from_base_config_with_overrides(
1021            fixture.cfg.clone(),
1022            o3_profile_overrides,
1023            fixture.codex_home(),
1024        )?;
1025        assert_eq!(
1026            Config {
1027                model: "o3".to_string(),
1028                model_family: find_family_for_model("o3").expect("known model slug"),
1029                model_context_window: Some(200_000),
1030                model_max_output_tokens: Some(100_000),
1031                model_provider_id: "openai".to_string(),
1032                model_provider: fixture.openai_provider.clone(),
1033                approval_policy: AskForApproval::Never,
1034                sandbox_policy: SandboxPolicy::new_read_only_policy(),
1035                shell_environment_policy: ShellEnvironmentPolicy::default(),
1036                disable_response_storage: false,
1037                user_instructions: None,
1038                notify: None,
1039                cwd: fixture.cwd(),
1040                mcp_servers: HashMap::new(),
1041                model_providers: fixture.model_provider_map.clone(),
1042                project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
1043                codex_home: fixture.codex_home(),
1044                history: History::default(),
1045                file_opener: UriBasedFileOpener::VsCode,
1046                tui: Tui::default(),
1047                codex_linux_sandbox_exe: None,
1048                hide_agent_reasoning: false,
1049                show_raw_agent_reasoning: false,
1050                model_reasoning_effort: ReasoningEffort::High,
1051                model_reasoning_summary: ReasoningSummary::Detailed,
1052                chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
1053                experimental_resume: None,
1054                base_instructions: None,
1055                include_plan_tool: false,
1056                include_apply_patch_tool: false,
1057                responses_originator_header: "agcodex_cli_rs".to_string(),
1058                preferred_auth_method: AuthMode::ChatGPT,
1059            },
1060            o3_profile_config
1061        );
1062        Ok(())
1063    }
1064
1065    #[test]
1066    fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> {
1067        let fixture = create_test_fixture()?;
1068
1069        let gpt3_profile_overrides = ConfigOverrides {
1070            config_profile: Some("gpt3".to_string()),
1071            cwd: Some(fixture.cwd()),
1072            ..Default::default()
1073        };
1074        let gpt3_profile_config = Config::load_from_base_config_with_overrides(
1075            fixture.cfg.clone(),
1076            gpt3_profile_overrides,
1077            fixture.codex_home(),
1078        )?;
1079        let expected_gpt3_profile_config = Config {
1080            model: "gpt-3.5-turbo".to_string(),
1081            model_family: find_family_for_model("gpt-3.5-turbo").expect("known model slug"),
1082            model_context_window: Some(16_385),
1083            model_max_output_tokens: Some(4_096),
1084            model_provider_id: "openai-chat-completions".to_string(),
1085            model_provider: fixture.openai_chat_completions_provider.clone(),
1086            approval_policy: AskForApproval::UnlessTrusted,
1087            sandbox_policy: SandboxPolicy::new_read_only_policy(),
1088            shell_environment_policy: ShellEnvironmentPolicy::default(),
1089            disable_response_storage: false,
1090            user_instructions: None,
1091            notify: None,
1092            cwd: fixture.cwd(),
1093            mcp_servers: HashMap::new(),
1094            model_providers: fixture.model_provider_map.clone(),
1095            project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
1096            codex_home: fixture.codex_home(),
1097            history: History::default(),
1098            file_opener: UriBasedFileOpener::VsCode,
1099            tui: Tui::default(),
1100            codex_linux_sandbox_exe: None,
1101            hide_agent_reasoning: false,
1102            show_raw_agent_reasoning: false,
1103            model_reasoning_effort: ReasoningEffort::High,
1104            model_reasoning_summary: ReasoningSummary::Detailed,
1105            chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
1106            experimental_resume: None,
1107            base_instructions: None,
1108            include_plan_tool: false,
1109            include_apply_patch_tool: false,
1110            responses_originator_header: "agcodex_cli_rs".to_string(),
1111            preferred_auth_method: AuthMode::ChatGPT,
1112        };
1113
1114        assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
1115
1116        // Verify that loading without specifying a profile in ConfigOverrides
1117        // uses the default profile from the config file (which is "gpt3").
1118        let default_profile_overrides = ConfigOverrides {
1119            cwd: Some(fixture.cwd()),
1120            ..Default::default()
1121        };
1122
1123        let default_profile_config = Config::load_from_base_config_with_overrides(
1124            fixture.cfg.clone(),
1125            default_profile_overrides,
1126            fixture.codex_home(),
1127        )?;
1128
1129        assert_eq!(expected_gpt3_profile_config, default_profile_config);
1130        Ok(())
1131    }
1132
1133    #[test]
1134    fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> {
1135        let fixture = create_test_fixture()?;
1136
1137        let zdr_profile_overrides = ConfigOverrides {
1138            config_profile: Some("zdr".to_string()),
1139            cwd: Some(fixture.cwd()),
1140            ..Default::default()
1141        };
1142        let zdr_profile_config = Config::load_from_base_config_with_overrides(
1143            fixture.cfg.clone(),
1144            zdr_profile_overrides,
1145            fixture.codex_home(),
1146        )?;
1147        let expected_zdr_profile_config = Config {
1148            model: "o3".to_string(),
1149            model_family: find_family_for_model("o3").expect("known model slug"),
1150            model_context_window: Some(200_000),
1151            model_max_output_tokens: Some(100_000),
1152            model_provider_id: "openai".to_string(),
1153            model_provider: fixture.openai_provider.clone(),
1154            approval_policy: AskForApproval::OnFailure,
1155            sandbox_policy: SandboxPolicy::new_read_only_policy(),
1156            shell_environment_policy: ShellEnvironmentPolicy::default(),
1157            disable_response_storage: true,
1158            user_instructions: None,
1159            notify: None,
1160            cwd: fixture.cwd(),
1161            mcp_servers: HashMap::new(),
1162            model_providers: fixture.model_provider_map.clone(),
1163            project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
1164            codex_home: fixture.codex_home(),
1165            history: History::default(),
1166            file_opener: UriBasedFileOpener::VsCode,
1167            tui: Tui::default(),
1168            codex_linux_sandbox_exe: None,
1169            hide_agent_reasoning: false,
1170            show_raw_agent_reasoning: false,
1171            model_reasoning_effort: ReasoningEffort::High,
1172            model_reasoning_summary: ReasoningSummary::Detailed,
1173            chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
1174            experimental_resume: None,
1175            base_instructions: None,
1176            include_plan_tool: false,
1177            include_apply_patch_tool: false,
1178            responses_originator_header: "agcodex_cli_rs".to_string(),
1179            preferred_auth_method: AuthMode::ChatGPT,
1180        };
1181
1182        assert_eq!(expected_zdr_profile_config, zdr_profile_config);
1183
1184        Ok(())
1185    }
1186}