use std::path::PathBuf;
use secrecy::SecretString;
use crate::bootstrap::ironclaw_base_dir;
use crate::config::helpers::{optional_env, parse_optional_env, validate_base_url};
use crate::error::ConfigError;
use crate::llm::config::*;
use crate::llm::registry::{ProviderProtocol, ProviderRegistry};
use crate::llm::session::SessionConfig;
use crate::settings::Settings;
impl LlmConfig {
#[cfg(feature = "libsql")]
pub fn for_testing() -> Self {
Self {
backend: "nearai".to_string(),
session: SessionConfig {
auth_base_url: "http://localhost:0".to_string(),
session_path: std::env::temp_dir().join("ironclaw-test-session.json"),
},
nearai: NearAiConfig {
model: "test-model".to_string(),
cheap_model: None,
base_url: "http://localhost:0".to_string(),
api_key: None,
fallback_model: None,
max_retries: 0,
circuit_breaker_threshold: None,
circuit_breaker_recovery_secs: 30,
response_cache_enabled: false,
response_cache_ttl_secs: 3600,
response_cache_max_entries: 100,
failover_cooldown_secs: 300,
failover_cooldown_threshold: 3,
smart_routing_cascade: false,
},
provider: None,
bedrock: None,
gemini_oauth: None,
openai_codex: None,
request_timeout_secs: 120,
cheap_model: None,
smart_routing_cascade: false,
}
}
fn resolve_model(
env_var: &str,
settings: &Settings,
default: &str,
) -> Result<String, ConfigError> {
Ok(optional_env(env_var)?
.or_else(|| settings.selected_model.clone())
.unwrap_or_else(|| default.to_string()))
}
pub(crate) fn resolve(settings: &Settings) -> Result<Self, ConfigError> {
let registry = ProviderRegistry::load();
let backend = if let Some(b) = optional_env("LLM_BACKEND")? {
b
} else if let Some(ref b) = settings.llm_backend {
b.clone()
} else {
"nearai".to_string()
};
let backend_lower = backend.to_lowercase();
let is_nearai =
backend_lower == "nearai" || backend_lower == "near_ai" || backend_lower == "near";
let is_bedrock =
backend_lower == "bedrock" || backend_lower == "aws_bedrock" || backend_lower == "aws";
let is_gemini_oauth = backend_lower == "gemini_oauth" || backend_lower == "gemini-oauth";
let is_openai_codex = backend_lower == "openai_codex"
|| backend_lower == "openai-codex"
|| backend_lower == "codex";
if !is_nearai
&& !is_bedrock
&& !is_gemini_oauth
&& !is_openai_codex
&& registry.find(&backend_lower).is_none()
{
tracing::warn!(
"Unknown LLM backend '{}'. Will attempt as openai_compatible fallback.",
backend
);
}
let nearai_auth_url = optional_env("NEARAI_AUTH_URL")?
.unwrap_or_else(|| "https://private.near.ai".to_string());
validate_base_url(&nearai_auth_url, "NEARAI_AUTH_URL")?;
let session = SessionConfig {
auth_base_url: nearai_auth_url,
session_path: optional_env("NEARAI_SESSION_PATH")?
.map(PathBuf::from)
.unwrap_or_else(default_session_path),
};
let nearai_api_key = optional_env("NEARAI_API_KEY")?.map(SecretString::from);
let nearai = NearAiConfig {
model: Self::resolve_model("NEARAI_MODEL", settings, crate::llm::DEFAULT_MODEL)?,
cheap_model: optional_env("NEARAI_CHEAP_MODEL")?,
base_url: {
let url = optional_env("NEARAI_BASE_URL")?.unwrap_or_else(|| {
if nearai_api_key.is_some() {
"https://cloud-api.near.ai".to_string()
} else {
"https://private.near.ai".to_string()
}
});
validate_base_url(&url, "NEARAI_BASE_URL")?;
url
},
api_key: nearai_api_key,
fallback_model: optional_env("NEARAI_FALLBACK_MODEL")?,
max_retries: parse_optional_env("NEARAI_MAX_RETRIES", 3)?,
circuit_breaker_threshold: optional_env("CIRCUIT_BREAKER_THRESHOLD")?
.map(|s| s.parse())
.transpose()
.map_err(|e| ConfigError::InvalidValue {
key: "CIRCUIT_BREAKER_THRESHOLD".to_string(),
message: format!("must be a positive integer: {e}"),
})?,
circuit_breaker_recovery_secs: parse_optional_env("CIRCUIT_BREAKER_RECOVERY_SECS", 30)?,
response_cache_enabled: parse_optional_env("RESPONSE_CACHE_ENABLED", false)?,
response_cache_ttl_secs: parse_optional_env("RESPONSE_CACHE_TTL_SECS", 3600)?,
response_cache_max_entries: parse_optional_env("RESPONSE_CACHE_MAX_ENTRIES", 1000)?,
failover_cooldown_secs: parse_optional_env("LLM_FAILOVER_COOLDOWN_SECS", 300)?,
failover_cooldown_threshold: parse_optional_env("LLM_FAILOVER_THRESHOLD", 3)?,
smart_routing_cascade: parse_optional_env("SMART_ROUTING_CASCADE", true)?,
};
let provider = if is_nearai || is_bedrock || is_gemini_oauth || is_openai_codex {
None
} else {
Some(Self::resolve_registry_provider(
&backend_lower,
®istry,
settings,
)?)
};
let bedrock = if is_bedrock {
let explicit_region =
optional_env("BEDROCK_REGION")?.or_else(|| settings.bedrock_region.clone());
if explicit_region.is_none() {
tracing::info!("BEDROCK_REGION not set, defaulting to us-east-1");
}
let region = explicit_region.unwrap_or_else(|| "us-east-1".to_string());
let model = optional_env("BEDROCK_MODEL")?
.or_else(|| settings.selected_model.clone())
.ok_or_else(|| ConfigError::MissingRequired {
key: "BEDROCK_MODEL".to_string(),
hint: "Set BEDROCK_MODEL when LLM_BACKEND=bedrock".to_string(),
})?;
let cross_region = optional_env("BEDROCK_CROSS_REGION")?
.or_else(|| settings.bedrock_cross_region.clone());
if let Some(ref cr) = cross_region
&& !matches!(cr.as_str(), "us" | "eu" | "apac" | "global")
{
return Err(ConfigError::InvalidValue {
key: "BEDROCK_CROSS_REGION".to_string(),
message: format!(
"'{}' is not valid, expected one of: us, eu, apac, global",
cr
),
});
}
let profile = optional_env("AWS_PROFILE")?.or_else(|| settings.bedrock_profile.clone());
Some(BedrockConfig {
region,
model,
cross_region,
profile,
})
} else {
None
};
let openai_codex = if is_openai_codex {
let model = optional_env("OPENAI_CODEX_MODEL")?
.or(optional_env("OPENAI_MODEL")?)
.or_else(|| settings.selected_model.clone())
.unwrap_or_else(|| "gpt-5.3-codex".to_string());
let auth_endpoint = optional_env("OPENAI_CODEX_AUTH_URL")?
.unwrap_or_else(|| "https://auth.openai.com".to_string());
validate_base_url(&auth_endpoint, "OPENAI_CODEX_AUTH_URL")?;
let api_base_url = optional_env("OPENAI_CODEX_API_URL")?
.unwrap_or_else(|| "https://chatgpt.com/backend-api/codex".to_string());
validate_base_url(&api_base_url, "OPENAI_CODEX_API_URL")?;
let client_id = optional_env("OPENAI_CODEX_CLIENT_ID")?
.unwrap_or_else(|| "app_EMoamEEZ73f0CkXaXp7hrann".to_string());
let session_path = optional_env("OPENAI_CODEX_SESSION_PATH")?
.map(PathBuf::from)
.unwrap_or_else(|| ironclaw_base_dir().join("openai_codex_session.json"));
let token_refresh_margin_secs =
parse_optional_env("OPENAI_CODEX_REFRESH_MARGIN_SECS", 300)?;
Some(OpenAiCodexConfig {
model,
auth_endpoint,
api_base_url,
client_id,
session_path,
token_refresh_margin_secs,
})
} else {
None
};
let request_timeout_secs = parse_optional_env("LLM_REQUEST_TIMEOUT_SECS", 120)?;
let gemini_oauth = if backend_lower == "gemini_oauth" || backend_lower == "gemini-oauth" {
let model = Self::resolve_model("GEMINI_MODEL", settings, "gemini-2.5-flash")?;
let credentials_path = optional_env("GEMINI_CREDENTIALS_PATH")?
.map(PathBuf::from)
.unwrap_or_else(GeminiOauthConfig::default_credentials_path);
Some(GeminiOauthConfig {
model,
credentials_path,
})
} else {
None
};
let cheap_model = optional_env("LLM_CHEAP_MODEL")?;
let smart_routing_cascade = parse_optional_env("SMART_ROUTING_CASCADE", true)?;
Ok(Self {
backend: if is_nearai {
"nearai".to_string()
} else if is_bedrock {
"bedrock".to_string()
} else if is_gemini_oauth {
"gemini_oauth".to_string()
} else if is_openai_codex {
"openai_codex".to_string()
} else if let Some(ref p) = provider {
p.provider_id.clone()
} else {
backend_lower
},
session,
nearai,
provider,
bedrock,
gemini_oauth,
openai_codex,
request_timeout_secs,
cheap_model,
smart_routing_cascade,
})
}
fn resolve_registry_provider(
backend: &str,
registry: &ProviderRegistry,
settings: &Settings,
) -> Result<RegistryProviderConfig, ConfigError> {
let def = registry
.find(backend)
.or_else(|| registry.find("openai_compatible"));
let (
canonical_id,
protocol,
api_key_env,
base_url_env,
model_env,
default_model,
default_base_url,
extra_headers_env,
api_key_required,
base_url_required,
unsupported_params,
) = if let Some(def) = def {
(
def.id.as_str(),
def.protocol,
def.api_key_env.as_deref(),
def.base_url_env.as_deref(),
def.model_env.as_str(),
def.default_model.as_str(),
def.default_base_url.as_deref(),
def.extra_headers_env.as_deref(),
def.api_key_required,
def.base_url_required,
def.unsupported_params.clone(),
)
} else {
(
backend,
ProviderProtocol::OpenAiCompletions,
Some("LLM_API_KEY"),
Some("LLM_BASE_URL"),
"LLM_MODEL",
"default",
None,
Some("LLM_EXTRA_HEADERS"),
false,
true,
Vec::new(),
)
};
let mut codex_base_url_override: Option<String> = None;
let codex_creds = if parse_optional_env("LLM_USE_CODEX_AUTH", false)? {
let path = optional_env("CODEX_AUTH_PATH")?
.map(std::path::PathBuf::from)
.unwrap_or_else(crate::llm::codex_auth::default_codex_auth_path);
crate::llm::codex_auth::load_codex_credentials(&path)
} else {
None
};
let codex_refresh_token = codex_creds.as_ref().and_then(|c| c.refresh_token.clone());
let codex_auth_path = codex_creds.as_ref().and_then(|c| c.auth_path.clone());
let api_key = if let Some(creds) = codex_creds {
if creds.is_chatgpt_mode {
codex_base_url_override = Some(creds.base_url().to_string());
}
Some(creds.token)
} else if let Some(env_var) = api_key_env {
optional_env(env_var)?.map(SecretString::from)
} else {
None
};
if api_key_required && api_key.is_none() {
if let Some(env_var) = api_key_env {
tracing::debug!(
"API key not found in {env_var} for backend '{backend}'. \
Will be injected from secrets store if available."
);
}
}
let is_codex_chatgpt = codex_base_url_override.is_some();
let base_url = codex_base_url_override
.or_else(|| {
if let Some(env_var) = base_url_env {
optional_env(env_var).ok().flatten()
} else {
None
}
})
.or_else(|| {
match backend {
"ollama" => settings.ollama_base_url.clone(),
"openai_compatible" | "openrouter" => {
settings.openai_compatible_base_url.clone()
}
_ => None,
}
})
.or_else(|| default_base_url.map(String::from))
.unwrap_or_default();
if base_url_required
&& base_url.is_empty()
&& let Some(env_var) = base_url_env
{
return Err(ConfigError::MissingRequired {
key: env_var.to_string(),
hint: format!("Set {env_var} when LLM_BACKEND={backend}"),
});
}
if !base_url.is_empty() {
let field = base_url_env.unwrap_or("LLM_BASE_URL");
validate_base_url(&base_url, field)?;
}
let model = Self::resolve_model(model_env, settings, default_model)?;
let extra_headers = if let Some(env_var) = extra_headers_env {
optional_env(env_var)?
.map(|val| parse_extra_headers_with_key(&val, env_var))
.transpose()?
.unwrap_or_default()
} else {
Vec::new()
};
let extra_headers = if canonical_id == "github_copilot" {
merge_extra_headers(
crate::llm::github_copilot_auth::default_headers(),
extra_headers,
)
} else {
extra_headers
};
let oauth_token = if canonical_id == "anthropic" {
optional_env("ANTHROPIC_OAUTH_TOKEN")?.map(SecretString::from)
} else {
None
};
let api_key = if api_key.is_none() && oauth_token.is_some() {
Some(SecretString::from(OAUTH_PLACEHOLDER.to_string()))
} else {
api_key
};
let cache_retention: CacheRetention = if canonical_id == "anthropic" {
optional_env("ANTHROPIC_CACHE_RETENTION")?
.and_then(|val| match val.parse::<CacheRetention>() {
Ok(r) => Some(r),
Err(e) => {
tracing::warn!(
"Invalid ANTHROPIC_CACHE_RETENTION: {e}; defaulting to short"
);
None
}
})
.unwrap_or_default()
} else {
CacheRetention::default()
};
Ok(RegistryProviderConfig {
protocol,
provider_id: canonical_id.to_string(),
api_key,
base_url,
model,
extra_headers,
oauth_token,
is_codex_chatgpt,
refresh_token: codex_refresh_token,
auth_path: codex_auth_path,
cache_retention,
unsupported_params,
})
}
}
fn parse_extra_headers_with_key(
val: &str,
env_var_name: &str,
) -> Result<Vec<(String, String)>, ConfigError> {
if val.trim().is_empty() {
return Ok(Vec::new());
}
let mut headers = Vec::new();
for pair in val.split(',') {
let pair = pair.trim();
if pair.is_empty() {
continue;
}
let Some((key, value)) = pair.split_once(':') else {
return Err(ConfigError::InvalidValue {
key: env_var_name.to_string(),
message: format!("malformed header entry '{}', expected Key:Value", pair),
});
};
let key = key.trim();
if key.is_empty() {
return Err(ConfigError::InvalidValue {
key: env_var_name.to_string(),
message: format!("empty header name in entry '{}'", pair),
});
}
headers.push((key.to_string(), value.trim().to_string()));
}
Ok(headers)
}
fn merge_extra_headers(
defaults: Vec<(String, String)>,
overrides: Vec<(String, String)>,
) -> Vec<(String, String)> {
let mut merged = Vec::new();
let mut positions = std::collections::HashMap::<String, usize>::new();
for (key, value) in defaults.into_iter().chain(overrides) {
let normalized = key.to_ascii_lowercase();
if let Some(existing_index) = positions.get(&normalized).copied() {
merged[existing_index] = (key, value);
} else {
positions.insert(normalized, merged.len());
merged.push((key, value));
}
}
merged
}
pub fn default_session_path() -> PathBuf {
ironclaw_base_dir().join("session.json")
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::helpers::lock_env;
use crate::settings::Settings;
use crate::testing::credentials::*;
fn parse_extra_headers(val: &str) -> Result<Vec<(String, String)>, ConfigError> {
parse_extra_headers_with_key(val, "TEST_HEADERS")
}
fn clear_openai_compatible_env() {
unsafe {
std::env::remove_var("LLM_BACKEND");
std::env::remove_var("LLM_BASE_URL");
std::env::remove_var("LLM_MODEL");
}
}
#[test]
fn openai_compatible_uses_selected_model_when_llm_model_unset() {
let _guard = lock_env();
clear_openai_compatible_env();
let settings = Settings {
llm_backend: Some("openai_compatible".to_string()),
openai_compatible_base_url: Some("https://openrouter.ai/api/v1".to_string()),
selected_model: Some("openai/gpt-5.1-codex".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(provider.model, "openai/gpt-5.1-codex");
}
#[test]
fn openai_compatible_llm_model_env_overrides_selected_model() {
let _guard = lock_env();
clear_openai_compatible_env();
unsafe {
std::env::set_var("LLM_MODEL", "openai/gpt-5-codex");
}
let settings = Settings {
llm_backend: Some("openai_compatible".to_string()),
openai_compatible_base_url: Some("https://openrouter.ai/api/v1".to_string()),
selected_model: Some("openai/gpt-5.1-codex".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(provider.model, "openai/gpt-5-codex");
unsafe {
std::env::remove_var("LLM_MODEL");
}
}
#[test]
fn test_extra_headers_parsed() {
let result = parse_extra_headers("HTTP-Referer:https://myapp.com,X-Title:MyApp").unwrap();
assert_eq!(
result,
vec![
("HTTP-Referer".to_string(), "https://myapp.com".to_string()),
("X-Title".to_string(), "MyApp".to_string()),
]
);
}
#[test]
fn test_extra_headers_empty_string() {
let result = parse_extra_headers("").unwrap();
assert!(result.is_empty());
}
#[test]
fn test_extra_headers_whitespace_only() {
let result = parse_extra_headers(" ").unwrap();
assert!(result.is_empty());
}
#[test]
fn test_extra_headers_malformed() {
let result = parse_extra_headers("NoColonHere");
assert!(result.is_err());
}
#[test]
fn test_extra_headers_empty_key() {
let result = parse_extra_headers(":value");
assert!(result.is_err());
}
#[test]
fn test_extra_headers_value_with_colons() {
let result = parse_extra_headers("Authorization:Bearer abc:def").unwrap();
assert_eq!(
result,
vec![("Authorization".to_string(), "Bearer abc:def".to_string())]
);
}
#[test]
fn test_extra_headers_trailing_comma() {
let result = parse_extra_headers("X-Title:MyApp,").unwrap();
assert_eq!(result, vec![("X-Title".to_string(), "MyApp".to_string())]);
}
#[test]
fn test_extra_headers_with_spaces() {
let result =
parse_extra_headers(" HTTP-Referer : https://myapp.com , X-Title : MyApp ").unwrap();
assert_eq!(
result,
vec![
("HTTP-Referer".to_string(), "https://myapp.com".to_string()),
("X-Title".to_string(), "MyApp".to_string()),
]
);
}
#[test]
fn merge_extra_headers_prefers_overrides_case_insensitively() {
let merged = merge_extra_headers(
vec![
("User-Agent".to_string(), "default-agent".to_string()),
("X-Test".to_string(), "default".to_string()),
],
vec![
("user-agent".to_string(), "override-agent".to_string()),
("X-Extra".to_string(), "present".to_string()),
],
);
assert_eq!(
merged,
vec![
("user-agent".to_string(), "override-agent".to_string()),
("X-Test".to_string(), "default".to_string()),
("X-Extra".to_string(), "present".to_string()),
]
);
}
fn clear_ollama_env() {
unsafe {
std::env::remove_var("LLM_BACKEND");
std::env::remove_var("OLLAMA_BASE_URL");
std::env::remove_var("OLLAMA_MODEL");
}
}
#[test]
fn ollama_uses_selected_model_when_ollama_model_unset() {
let _guard = lock_env();
clear_ollama_env();
let settings = Settings {
llm_backend: Some("ollama".to_string()),
selected_model: Some("llama3.2".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(provider.model, "llama3.2");
}
#[test]
fn ollama_model_env_overrides_selected_model() {
let _guard = lock_env();
clear_ollama_env();
unsafe {
std::env::set_var("OLLAMA_MODEL", "mistral:latest");
}
let settings = Settings {
llm_backend: Some("ollama".to_string()),
selected_model: Some("llama3.2".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(provider.model, "mistral:latest");
unsafe {
std::env::remove_var("OLLAMA_MODEL");
}
}
#[test]
fn openai_compatible_preserves_dotted_model_name() {
let _guard = lock_env();
clear_openai_compatible_env();
let settings = Settings {
llm_backend: Some("openai_compatible".to_string()),
openai_compatible_base_url: Some("http://localhost:11434/v1".to_string()),
selected_model: Some("llama3.2".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(
provider.model, "llama3.2",
"model name with dot must not be truncated"
);
}
#[test]
fn registry_provider_resolves_groq() {
let _guard = lock_env();
unsafe {
std::env::remove_var("LLM_BACKEND");
std::env::remove_var("GROQ_API_KEY");
std::env::remove_var("GROQ_MODEL");
}
let settings = Settings {
llm_backend: Some("groq".to_string()),
selected_model: Some("llama-3.3-70b-versatile".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
assert_eq!(cfg.backend, "groq");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(provider.provider_id, "groq");
assert_eq!(provider.model, "llama-3.3-70b-versatile");
assert_eq!(provider.base_url, "https://api.groq.com/openai/v1");
assert_eq!(provider.protocol, ProviderProtocol::OpenAiCompletions);
}
#[test]
fn registry_provider_resolves_tinfoil() {
let _guard = lock_env();
unsafe {
std::env::remove_var("LLM_BACKEND");
std::env::remove_var("TINFOIL_API_KEY");
std::env::remove_var("TINFOIL_MODEL");
}
let settings = Settings {
llm_backend: Some("tinfoil".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
assert_eq!(cfg.backend, "tinfoil");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(provider.base_url, "https://inference.tinfoil.sh/v1");
assert_eq!(provider.model, "kimi-k2-5");
assert!(
provider
.unsupported_params
.contains(&"temperature".to_string()),
"tinfoil should propagate unsupported_params from registry"
);
}
#[test]
fn registry_provider_alias_resolves_zai() {
let _guard = lock_env();
unsafe {
std::env::remove_var("LLM_BACKEND");
std::env::remove_var("ZAI_API_KEY");
std::env::remove_var("ZAI_MODEL");
}
let settings = Settings {
llm_backend: Some("bigmodel".to_string()),
selected_model: Some("glm-5".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
assert_eq!(cfg.backend, "zai");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(provider.provider_id, "zai");
assert_eq!(provider.model, "glm-5");
assert_eq!(provider.base_url, "https://api.z.ai/api/paas/v4");
assert_eq!(provider.protocol, ProviderProtocol::OpenAiCompletions);
}
#[test]
fn registry_provider_resolves_github_copilot_alias() {
let _guard = lock_env();
unsafe {
std::env::set_var("LLM_BACKEND", "github-copilot");
std::env::set_var("GITHUB_COPILOT_TOKEN", "gho_test_token");
std::env::set_var(
"GITHUB_COPILOT_EXTRA_HEADERS",
"Copilot-Integration-Id:custom-chat,X-Test:enabled",
);
}
let settings = Settings::default();
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
assert_eq!(cfg.backend, "github_copilot");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(provider.provider_id, "github_copilot");
assert_eq!(provider.base_url, "https://api.githubcopilot.com");
assert_eq!(provider.model, "gpt-4o");
assert!(
provider
.extra_headers
.iter()
.any(|(key, value)| { key == "Copilot-Integration-Id" && value == "custom-chat" })
);
assert!(
provider
.extra_headers
.iter()
.any(|(key, value)| key == "User-Agent" && value == "GitHubCopilotChat/0.26.7")
);
assert!(
provider
.extra_headers
.iter()
.any(|(key, value)| key == "X-Test" && value == "enabled")
);
unsafe {
std::env::remove_var("LLM_BACKEND");
std::env::remove_var("GITHUB_COPILOT_TOKEN");
std::env::remove_var("GITHUB_COPILOT_EXTRA_HEADERS");
}
}
#[test]
fn nearai_backend_has_no_registry_provider() {
let _guard = lock_env();
unsafe {
std::env::remove_var("LLM_BACKEND");
}
let settings = Settings::default();
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
assert_eq!(cfg.backend, "nearai");
assert!(cfg.provider.is_none());
}
#[test]
fn backend_alias_normalized_to_canonical_id() {
let _guard = lock_env();
clear_openai_compatible_env();
unsafe {
std::env::set_var("LLM_BACKEND", "open_ai");
std::env::set_var("OPENAI_API_KEY", TEST_API_KEY);
}
let settings = Settings::default();
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
assert_eq!(
cfg.backend, "openai",
"alias 'open_ai' should be normalized to canonical 'openai'"
);
let provider = cfg.provider.expect("should have provider config");
assert_eq!(provider.provider_id, "openai");
unsafe {
std::env::remove_var("LLM_BACKEND");
std::env::remove_var("OPENAI_API_KEY");
}
}
#[test]
fn unknown_backend_falls_back_to_openai_compatible() {
let _guard = lock_env();
clear_openai_compatible_env();
unsafe {
std::env::set_var("LLM_BACKEND", "some_custom_provider");
std::env::set_var("LLM_BASE_URL", "http://localhost:8080/v1");
}
let settings = Settings::default();
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
assert_eq!(cfg.backend, "openai_compatible");
let provider = cfg.provider.expect("should have provider config");
assert_eq!(provider.provider_id, "openai_compatible");
assert_eq!(provider.base_url, "http://localhost:8080/v1");
unsafe {
std::env::remove_var("LLM_BACKEND");
std::env::remove_var("LLM_BASE_URL");
}
}
#[test]
fn nearai_aliases_all_resolve_to_nearai() {
let _guard = lock_env();
for alias in &["nearai", "near_ai", "near"] {
unsafe {
std::env::set_var("LLM_BACKEND", alias);
}
let settings = Settings::default();
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
assert_eq!(
cfg.backend, "nearai",
"alias '{alias}' should resolve to 'nearai'"
);
assert!(
cfg.provider.is_none(),
"nearai should not have a registry provider"
);
}
unsafe {
std::env::remove_var("LLM_BACKEND");
}
}
#[test]
fn base_url_resolution_priority() {
let _guard = lock_env();
clear_openai_compatible_env();
unsafe {
std::env::set_var("LLM_BACKEND", "openai_compatible");
std::env::set_var("LLM_BASE_URL", "http://localhost:8000/v1");
}
let settings = Settings {
llm_backend: Some("openai_compatible".to_string()),
openai_compatible_base_url: Some("http://localhost:9000/v1".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let provider = cfg.provider.expect("should have provider config");
assert_eq!(
provider.base_url, "http://localhost:8000/v1",
"env var should take priority over settings"
);
unsafe {
std::env::remove_var("LLM_BASE_URL");
}
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let provider = cfg.provider.expect("should have provider config");
assert_eq!(
provider.base_url, "http://localhost:9000/v1",
"settings should take priority over registry default"
);
unsafe {
std::env::remove_var("LLM_BACKEND");
}
}
fn clear_anthropic_env() {
unsafe {
std::env::remove_var("LLM_BACKEND");
std::env::remove_var("ANTHROPIC_API_KEY");
std::env::remove_var("ANTHROPIC_OAUTH_TOKEN");
std::env::remove_var("ANTHROPIC_MODEL");
std::env::remove_var("ANTHROPIC_BASE_URL");
}
}
#[test]
fn anthropic_oauth_token_sets_placeholder_api_key() {
use secrecy::ExposeSecret;
let _guard = lock_env();
clear_anthropic_env();
unsafe {
std::env::set_var("ANTHROPIC_OAUTH_TOKEN", TEST_ANTHROPIC_OAUTH_TOKEN);
}
let settings = Settings {
llm_backend: Some("anthropic".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(
provider
.api_key
.as_ref()
.map(|k| k.expose_secret().to_string()),
Some(OAUTH_PLACEHOLDER.to_string()),
"api_key should be the OAuth placeholder when only OAuth token is set"
);
assert!(
provider.oauth_token.is_some(),
"oauth_token should be populated"
);
assert_eq!(
provider.oauth_token.as_ref().unwrap().expose_secret(),
TEST_ANTHROPIC_OAUTH_TOKEN
);
clear_anthropic_env();
}
#[test]
fn anthropic_api_key_takes_priority_over_oauth() {
use secrecy::ExposeSecret;
let _guard = lock_env();
clear_anthropic_env();
unsafe {
std::env::set_var("ANTHROPIC_API_KEY", TEST_ANTHROPIC_API_KEY);
std::env::set_var("ANTHROPIC_OAUTH_TOKEN", TEST_ANTHROPIC_OAUTH_TOKEN);
}
let settings = Settings {
llm_backend: Some("anthropic".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let provider = cfg.provider.expect("provider config should be present");
assert_eq!(
provider
.api_key
.as_ref()
.map(|k| k.expose_secret().to_string()),
Some(TEST_ANTHROPIC_API_KEY.to_string()),
"real API key should take priority over OAuth placeholder"
);
assert!(
provider.oauth_token.is_some(),
"oauth_token should still be populated"
);
clear_anthropic_env();
}
#[test]
fn non_anthropic_provider_has_no_oauth_token() {
let _guard = lock_env();
clear_anthropic_env();
unsafe {
std::env::set_var("ANTHROPIC_OAUTH_TOKEN", TEST_ANTHROPIC_OAUTH_TOKEN);
}
let settings = Settings {
llm_backend: Some("openai".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let provider = cfg.provider.expect("provider config should be present");
assert!(
provider.oauth_token.is_none(),
"non-Anthropic providers should not pick up ANTHROPIC_OAUTH_TOKEN"
);
clear_anthropic_env();
}
#[test]
fn cache_retention_from_str_primary_values() {
assert_eq!(
"none".parse::<CacheRetention>().unwrap(),
CacheRetention::None
);
assert_eq!(
"short".parse::<CacheRetention>().unwrap(),
CacheRetention::Short
);
assert_eq!(
"long".parse::<CacheRetention>().unwrap(),
CacheRetention::Long
);
}
#[test]
fn cache_retention_from_str_aliases() {
assert_eq!(
"off".parse::<CacheRetention>().unwrap(),
CacheRetention::None
);
assert_eq!(
"disabled".parse::<CacheRetention>().unwrap(),
CacheRetention::None
);
assert_eq!(
"5m".parse::<CacheRetention>().unwrap(),
CacheRetention::Short
);
assert_eq!(
"ephemeral".parse::<CacheRetention>().unwrap(),
CacheRetention::Short
);
assert_eq!(
"1h".parse::<CacheRetention>().unwrap(),
CacheRetention::Long
);
}
#[test]
fn cache_retention_from_str_case_insensitive() {
assert_eq!(
"NONE".parse::<CacheRetention>().unwrap(),
CacheRetention::None
);
assert_eq!(
"Short".parse::<CacheRetention>().unwrap(),
CacheRetention::Short
);
assert_eq!(
"LONG".parse::<CacheRetention>().unwrap(),
CacheRetention::Long
);
assert_eq!(
"Ephemeral".parse::<CacheRetention>().unwrap(),
CacheRetention::Short
);
}
#[test]
fn cache_retention_from_str_invalid() {
let err = "bogus".parse::<CacheRetention>().unwrap_err();
assert!(
err.contains("bogus"),
"error should mention the invalid value"
);
}
#[test]
fn cache_retention_display_round_trip() {
for variant in [
CacheRetention::None,
CacheRetention::Short,
CacheRetention::Long,
] {
let s = variant.to_string();
let parsed: CacheRetention = s.parse().unwrap();
assert_eq!(parsed, variant, "round-trip failed for {s}");
}
}
#[test]
fn test_request_timeout_defaults_to_120() {
let _guard = lock_env();
unsafe {
std::env::remove_var("LLM_REQUEST_TIMEOUT_SECS");
}
let config = LlmConfig::resolve(&Settings::default()).expect("resolve");
assert_eq!(config.request_timeout_secs, 120);
}
#[test]
fn test_request_timeout_configurable() {
let _guard = lock_env();
unsafe {
std::env::set_var("LLM_REQUEST_TIMEOUT_SECS", "300");
}
let config = LlmConfig::resolve(&Settings::default()).expect("resolve");
assert_eq!(config.request_timeout_secs, 300);
unsafe {
std::env::remove_var("LLM_REQUEST_TIMEOUT_SECS");
}
}
fn clear_openai_codex_env() {
unsafe {
std::env::remove_var("LLM_BACKEND");
std::env::remove_var("OPENAI_CODEX_MODEL");
std::env::remove_var("OPENAI_MODEL");
}
}
#[test]
fn openai_codex_resolves_config() {
let _guard = lock_env();
clear_openai_codex_env();
let settings = Settings {
llm_backend: Some("openai_codex".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
assert_eq!(cfg.backend, "openai_codex");
let codex = cfg.openai_codex.expect("codex config should be present");
assert_eq!(codex.model, "gpt-5.3-codex"); assert!(
cfg.provider.is_none(),
"codex should not use registry provider"
);
}
#[test]
fn openai_codex_model_env_resolution() {
let _guard = lock_env();
clear_openai_codex_env();
unsafe {
std::env::set_var("OPENAI_CODEX_MODEL", "o3-pro");
}
let settings = Settings {
llm_backend: Some("openai_codex".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let codex = cfg.openai_codex.expect("codex config should be present");
assert_eq!(codex.model, "o3-pro");
unsafe {
std::env::remove_var("OPENAI_CODEX_MODEL");
}
}
#[test]
fn openai_codex_falls_back_to_openai_model() {
let _guard = lock_env();
clear_openai_codex_env();
unsafe {
std::env::set_var("OPENAI_MODEL", "gpt-4o");
}
let settings = Settings {
llm_backend: Some("openai_codex".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let codex = cfg.openai_codex.expect("codex config should be present");
assert_eq!(codex.model, "gpt-4o");
unsafe {
std::env::remove_var("OPENAI_MODEL");
}
}
#[test]
fn openai_codex_falls_back_to_selected_model() {
let _guard = lock_env();
clear_openai_codex_env();
let settings = Settings {
llm_backend: Some("openai_codex".to_string()),
selected_model: Some("gpt-4o-mini".to_string()),
..Default::default()
};
let cfg = LlmConfig::resolve(&settings).expect("resolve should succeed");
let codex = cfg.openai_codex.expect("codex config should be present");
assert_eq!(codex.model, "gpt-4o-mini");
}
#[test]
fn openai_codex_rejects_ssrf_api_url() {
let _guard = lock_env();
clear_openai_codex_env();
unsafe {
std::env::set_var(
"OPENAI_CODEX_API_URL",
"http://169.254.169.254/latest/meta-data",
);
}
let settings = Settings {
llm_backend: Some("openai_codex".to_string()),
..Default::default()
};
let err = LlmConfig::resolve(&settings).unwrap_err();
let msg = err.to_string();
assert!(
msg.contains("OPENAI_CODEX_API_URL"),
"error should reference the field name: {msg}"
);
unsafe {
std::env::remove_var("OPENAI_CODEX_API_URL");
}
}
#[test]
fn openai_codex_rejects_ssrf_auth_url() {
let _guard = lock_env();
clear_openai_codex_env();
unsafe {
std::env::set_var("OPENAI_CODEX_AUTH_URL", "http://10.0.0.1");
}
let settings = Settings {
llm_backend: Some("openai_codex".to_string()),
..Default::default()
};
let err = LlmConfig::resolve(&settings).unwrap_err();
let msg = err.to_string();
assert!(
msg.contains("OPENAI_CODEX_AUTH_URL"),
"error should reference the field name: {msg}"
);
unsafe {
std::env::remove_var("OPENAI_CODEX_AUTH_URL");
}
}
}