Skip to main content

distri_types/
agent.rs

1use crate::AgentError;
2use crate::a2a::AgentSkill;
3use crate::browser::{BrowserAgentConfig, BrowsrClientConfig};
4use crate::configuration::DefinitionOverrides;
5use schemars::JsonSchema;
6use serde::{Deserialize, Serialize};
7use std::default::Default;
8
9/// Default timeout for external tool execution in seconds
10pub const DEFAULT_EXTERNAL_TOOL_TIMEOUT_SECS: u64 = 120;
11
12/// A reference to a stored skill that an agent can load on demand
13#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
14pub struct AvailableSkill {
15    /// The skill ID (UUID)
16    pub id: String,
17    /// Human-readable skill name (for display in the partial)
18    pub name: String,
19    /// Brief description of what this skill does (shown to the agent)
20    #[serde(default, skip_serializing_if = "Option::is_none")]
21    pub description: Option<String>,
22}
23
24/// Unified Agent Strategy Configuration
25#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
26#[serde(deny_unknown_fields, rename_all = "snake_case")]
27#[derive(Default)]
28pub struct AgentStrategy {
29    /// Depth of reasoning (shallow, standard, deep)
30    #[serde(default, skip_serializing_if = "Option::is_none")]
31    pub reasoning_depth: Option<ReasoningDepth>,
32
33    /// Execution mode - tools vs code
34    #[serde(default, skip_serializing_if = "Option::is_none")]
35    pub execution_mode: Option<ExecutionMode>,
36    /// When to replan
37    #[serde(default, skip_serializing_if = "Option::is_none")]
38    pub replanning: Option<ReplanningConfig>,
39
40    /// Timeout in seconds for external tool execution (default: 120)
41    /// External tools are tools that delegate execution to the frontend/client.
42    #[serde(default, skip_serializing_if = "Option::is_none")]
43    pub external_tool_timeout_secs: Option<u64>,
44}
45
46impl AgentStrategy {
47    /// Get reasoning depth with default fallback
48    pub fn get_reasoning_depth(&self) -> ReasoningDepth {
49        self.reasoning_depth.clone().unwrap_or_default()
50    }
51
52    /// Get execution mode with default fallback
53    pub fn get_execution_mode(&self) -> ExecutionMode {
54        self.execution_mode.clone().unwrap_or_default()
55    }
56
57    /// Get replanning config with default fallback
58    pub fn get_replanning(&self) -> ReplanningConfig {
59        self.replanning.clone().unwrap_or_default()
60    }
61
62    /// Get external tool timeout with default fallback
63    pub fn get_external_tool_timeout_secs(&self) -> u64 {
64        self.external_tool_timeout_secs
65            .unwrap_or(DEFAULT_EXTERNAL_TOOL_TIMEOUT_SECS)
66    }
67}
68
69#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default, PartialEq)]
70#[serde(rename_all = "snake_case")]
71pub enum CodeLanguage {
72    #[default]
73    Typescript,
74}
75
76impl std::fmt::Display for CodeLanguage {
77    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
78        match self {
79            CodeLanguage::Typescript => write!(f, "typescript"),
80        }
81    }
82}
83
84/// Reflection configuration
85#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
86pub struct ReflectionConfig {
87    /// Whether to enable reflection
88    #[serde(default)]
89    pub enabled: bool,
90    /// Name of the agent definition to use for reflection.
91    /// Must be an agent that has the "reflect" tool configured.
92    /// If not set, uses the built-in reflection_agent.
93    #[serde(default, skip_serializing_if = "Option::is_none")]
94    pub reflection_agent: Option<String>,
95    /// When to trigger reflection
96    #[serde(default)]
97    pub trigger: ReflectionTrigger,
98    /// Depth of reflection
99    #[serde(default)]
100    pub depth: ReflectionDepth,
101}
102
103/// When to trigger reflection
104#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
105#[serde(rename_all = "snake_case")]
106pub enum ReflectionTrigger {
107    /// At the end of execution
108    #[default]
109    EndOfExecution,
110    /// After each step
111    AfterEachStep,
112    /// After failures only
113    AfterFailures,
114    /// After N steps
115    AfterNSteps(usize),
116}
117
118/// Depth of reflection
119#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
120#[serde(rename_all = "snake_case")]
121pub enum ReflectionDepth {
122    /// Light reflection
123    #[default]
124    Light,
125    /// Standard reflection
126    Standard,
127    /// Deep reflection
128    Deep,
129}
130
131/// Configuration for planning operations
132#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
133pub struct PlanConfig {
134    /// The model settings for the planning agent
135    #[serde(default, skip_serializing_if = "Option::is_none")]
136    pub model_settings: Option<ModelSettings>,
137    /// The maximum number of iterations allowed during planning
138    #[serde(default = "default_plan_max_iterations")]
139    pub max_iterations: usize,
140}
141
142impl Default for PlanConfig {
143    fn default() -> Self {
144        Self {
145            model_settings: None,
146            max_iterations: default_plan_max_iterations(),
147        }
148    }
149}
150
151fn default_plan_max_iterations() -> usize {
152    10
153}
154
155/// Depth of reasoning for planning
156#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default, PartialEq)]
157#[serde(rename_all = "snake_case")]
158pub enum ReasoningDepth {
159    /// Shallow reasoning - direct action with minimal thought, skip reasoning sections
160    Shallow,
161    /// Standard reasoning - moderate planning and thought
162    #[default]
163    Standard,
164    /// Deep reasoning - extensive planning, multi-step analysis, and comprehensive thinking
165    Deep,
166}
167
168/// Execution mode - tools vs code
169#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default, PartialEq)]
170#[serde(rename_all = "snake_case", tag = "type")]
171pub enum ExecutionMode {
172    /// Use tools for execution
173    #[default]
174    Tools,
175    /// Use code execution
176    Code { language: CodeLanguage },
177}
178
179/// Replanning configuration
180#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
181#[serde(rename_all = "snake_case")]
182pub struct ReplanningConfig {
183    /// When to trigger replanning
184    #[serde(default, skip_serializing_if = "Option::is_none")]
185    pub trigger: Option<ReplanningTrigger>,
186    /// Whether to replan at all
187    #[serde(default, skip_serializing_if = "Option::is_none")]
188    pub enabled: Option<bool>,
189}
190
191/// When to trigger replanning
192#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
193#[serde(rename_all = "snake_case")]
194pub enum ReplanningTrigger {
195    /// Never replan (default)
196    #[default]
197    Never,
198    /// Replan after execution reflection
199    AfterReflection,
200    /// Replan after N iterations
201    AfterNIterations(usize),
202    /// Replan after failures
203    AfterFailures,
204}
205
206impl ReplanningConfig {
207    /// Get trigger with default fallback
208    pub fn get_trigger(&self) -> ReplanningTrigger {
209        self.trigger.clone().unwrap_or_default()
210    }
211
212    /// Get enabled with default fallback
213    pub fn is_enabled(&self) -> bool {
214        self.enabled.unwrap_or(false)
215    }
216}
217
218#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
219#[serde(rename_all = "snake_case")]
220pub enum ExecutionKind {
221    #[default]
222    Retriable,
223    Interleaved,
224}
225
226#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
227#[serde(rename_all = "snake_case")]
228pub enum MemoryKind {
229    #[default]
230    None,
231    ShortTerm,
232    LongTerm,
233}
234
235/// How tools are delivered to the LLM in the prompt.
236///
237/// Controls the tradeoff between prompt size and tool discoverability:
238/// - `Full`: All tools get full schemas (classic behavior, largest prompt)
239/// - `Deferred`: Core tools get full schemas; others are name+description only
240/// - `NamesOnly`: Maximum savings — only core tools have schemas
241#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default, PartialEq)]
242#[serde(rename_all = "snake_case")]
243pub enum ToolDeliveryMode {
244    /// All tools get full schemas in the prompt.
245    #[serde(alias = "all_tools")]
246    Full,
247    /// Core tools get full schemas; others get name+description only (default).
248    #[default]
249    #[serde(alias = "tool_search")]
250    Deferred,
251    /// Only tool names are listed. Model must use `tool_search` for everything
252    /// except core tools. Maximum context savings.
253    NamesOnly,
254}
255
256/// Which OpenAI-family API format to use when talking to the LLM.
257///
258/// - `Auto` (default): Auto-detects from the model name. Codex models use Responses API,
259///   everything else uses Chat Completions.
260/// - `Completions`: Forces the Chat Completions API (`/v1/chat/completions`)
261/// - `Responses`: Forces the Responses API (`/v1/responses`)
262///
263/// Most OpenAI models (GPT-4o, GPT-4.1, GPT-5, o1, etc.) support both APIs.
264/// Codex models (`codex-*`, `*-codex`) are Responses API only.
265/// OpenAI recommends the Responses API for new projects (better caching, reasoning).
266///
267/// Can be set at the model_settings level in agent definitions:
268/// ```toml
269/// [model_settings]
270/// model = "codex-mini-latest"
271/// api_format = "responses"  # or "completions" or "auto"
272/// ```
273#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default, PartialEq)]
274#[serde(rename_all = "snake_case")]
275pub enum OpenAiApiFormat {
276    /// Auto-detect based on model name (codex models → Responses, everything else → Completions)
277    #[default]
278    Auto,
279    /// Chat Completions API (`/v1/chat/completions`)
280    Completions,
281    /// Responses API (`/v1/responses`) — required for Codex models, recommended for new projects
282    Responses,
283}
284
285impl OpenAiApiFormat {
286    /// Resolve the effective format given a model name.
287    /// When `Auto`, inspects the model name to decide.
288    pub fn resolve(&self, model: &str) -> ResolvedOpenAiApiFormat {
289        match self {
290            OpenAiApiFormat::Completions => ResolvedOpenAiApiFormat::Completions,
291            OpenAiApiFormat::Responses => ResolvedOpenAiApiFormat::Responses,
292            OpenAiApiFormat::Auto => {
293                if Self::model_requires_responses_api(model) {
294                    ResolvedOpenAiApiFormat::Responses
295                } else {
296                    ResolvedOpenAiApiFormat::Completions
297                }
298            }
299        }
300    }
301
302    /// Heuristic: models that require the Responses API.
303    ///
304    /// These models return errors on /v1/chat/completions and MUST use /v1/responses:
305    /// - Codex models: codex-mini-latest, gpt-5.1-codex, gpt-5.3-codex, etc.
306    /// - Pro models: gpt-5-pro, gpt-5.2-pro, gpt-5.4-pro, o3-pro
307    /// - Deep research models: o3-deep-research, o4-mini-deep-research
308    fn model_requires_responses_api(model: &str) -> bool {
309        let m = model.to_lowercase();
310        // Codex models (codex-*, *-codex, */codex*)
311        m.starts_with("codex")
312            || m.ends_with("-codex")
313            || m.contains("/codex")
314            // Pro models (*-pro) — require multi-turn interactions only Responses supports
315            || m.ends_with("-pro")
316            // Deep research models (*-deep-research)
317            || m.ends_with("-deep-research")
318    }
319}
320
321/// Resolved (non-Auto) API format
322#[derive(Debug, Clone, PartialEq)]
323pub enum ResolvedOpenAiApiFormat {
324    Completions,
325    Responses,
326}
327
328/// Supported tool call formats
329#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default, PartialEq)]
330#[serde(rename_all = "snake_case")]
331pub enum ToolCallFormat {
332    /// New XML format: Streaming-capable XML tool calls
333    /// Example: <search><query>test</query></search>
334    #[default]
335    Xml,
336    /// New JSON format: JSONL with tool_calls blocks
337    /// Example: ```tool_calls\n{"name":"search","arguments":{"query":"test"}}```
338    JsonL,
339
340    /// Code execution format: TypeScript/JavaScript code blocks
341    /// Example: ```typescript ... ```
342    Code,
343    #[serde(rename = "provider")]
344    Provider,
345    None,
346}
347
348#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema, Default)]
349pub struct UserMessageOverrides {
350    /// The parts to include in the user message
351    pub parts: Vec<PartDefinition>,
352    /// If true, artifacts will be expanded to their actual content (e.g., image artifacts become Part::Image)
353    #[serde(default)]
354    pub include_artifacts: bool,
355    /// If true (default), step count information will be included at the end of the user message
356    #[serde(default = "default_include_step_count")]
357    pub include_step_count: Option<bool>,
358}
359
360fn default_include_step_count() -> Option<bool> {
361    Some(true)
362}
363
364#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
365#[serde(tag = "type", content = "source", rename_all = "snake_case")]
366pub enum PartDefinition {
367    Template(String),   // Prompt Template Key
368    SessionKey(String), // Session key reference
369}
370
371#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
372#[serde(deny_unknown_fields)]
373pub struct LlmDefinition {
374    /// The name of the agent.
375    pub name: String,
376    /// Settings related to the model used by the agent.
377    #[serde(default, skip_serializing_if = "Option::is_none")]
378    pub model_settings: Option<ModelSettings>,
379    /// Tool calling configuration
380    #[serde(default)]
381    pub tool_format: ToolCallFormat,
382    /// How tools are delivered to the LLM (all upfront vs on-demand search)
383    #[serde(default)]
384    pub tool_delivery_mode: ToolDeliveryMode,
385}
386
387impl LlmDefinition {
388    /// Get a reference to model_settings.
389    /// Returns an error if model_settings is None.
390    pub fn ms(&self) -> Result<&ModelSettings, String> {
391        self.model_settings.as_ref().ok_or_else(|| {
392            "No model configured. Please set a default model in Agent Settings → Default Model."
393                .to_string()
394        })
395    }
396
397    /// Get a mutable reference to model_settings.
398    /// Returns an error if model_settings is None.
399    pub fn ms_mut(&mut self) -> Result<&mut ModelSettings, String> {
400        self.model_settings.as_mut().ok_or_else(|| {
401            "No model configured. Please set a default model in Agent Settings → Default Model."
402                .to_string()
403        })
404    }
405}
406
407/// Runtime environment in which the agent is executing.
408/// Determines which built-in agent variants and tools are available.
409#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, PartialEq, Eq, Hash, Default)]
410#[serde(rename_all = "snake_case")]
411pub enum RuntimeMode {
412    /// Running from distri-cli with local filesystem access
413    Cli,
414    /// Running on distri-cloud server (browsr sandbox for code execution)
415    #[default]
416    Cloud,
417    /// Running in browser with IndexedDB filesystem
418    Browser,
419}
420
421/// Agent definition - complete configuration for an agent
422#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
423pub struct StandardDefinition {
424    /// The name of the agent.
425    pub name: String,
426    /// A brief description of the agent's purpose.
427    #[serde(default)]
428    pub description: String,
429
430    /// The version of the agent.
431    #[serde(default = "default_agent_version")]
432    pub version: Option<String>,
433
434    /// Instructions for the agent - serves as an introduction defining what the agent is and does.
435    #[serde(default)]
436    pub instructions: String,
437
438    /// A list of MCP server definitions associated with the agent.
439    #[serde(default)]
440    pub mcp_servers: Option<Vec<McpDefinition>>,
441    /// Settings related to the model used by the agent.
442    /// When `None`, the agent inherits model settings from the orchestrator context defaults.
443    #[serde(default, skip_serializing_if = "Option::is_none")]
444    pub model_settings: Option<ModelSettings>,
445    /// Optional lower-level model settings for lightweight analysis helpers
446    #[serde(default, skip_serializing_if = "Option::is_none")]
447    pub analysis_model_settings: Option<ModelSettings>,
448
449    /// The size of the history to maintain for the agent.
450    #[serde(default = "default_history_size")]
451    pub history_size: Option<usize>,
452    /// The new strategy configuration for the agent.
453    #[serde(default, skip_serializing_if = "Option::is_none")]
454    pub strategy: Option<AgentStrategy>,
455    /// A2A-specific fields
456    #[serde(default)]
457    pub icon_url: Option<String>,
458
459    #[serde(default, skip_serializing_if = "Option::is_none")]
460    pub max_iterations: Option<usize>,
461
462    /// A2A agent card skills metadata (describes capabilities for agent-to-agent protocol)
463    #[serde(default, skip_serializing_if = "Vec::is_empty")]
464    pub skills_description: Vec<AgentSkill>,
465
466    /// Skills available for on-demand loading by this agent
467    #[serde(default, skip_serializing_if = "Vec::is_empty")]
468    pub available_skills: Vec<AvailableSkill>,
469
470    /// List of sub-agents that this agent can transfer control to
471    #[serde(default)]
472    pub sub_agents: Vec<String>,
473
474    /// Tool calling configuration
475    #[serde(default)]
476    pub tool_format: ToolCallFormat,
477
478    /// How tools are delivered to the LLM (all upfront vs on-demand search)
479    #[serde(default)]
480    pub tool_delivery_mode: ToolDeliveryMode,
481
482    /// Tools configuration for this agent
483    #[serde(default, skip_serializing_if = "Option::is_none")]
484    pub tools: Option<ToolsConfig>,
485
486    /// Custom handlebars partials (name -> template path) for use in custom prompts
487    #[serde(default, skip_serializing_if = "std::collections::HashMap::is_empty")]
488    pub partials: std::collections::HashMap<String, String>,
489
490    /// Reflection configuration for post-execution analysis using a subagent
491    #[serde(default, skip_serializing_if = "Option::is_none")]
492    pub reflection: Option<ReflectionConfig>,
493    /// Whether to enable TODO management functionality
494    #[serde(default, skip_serializing_if = "Option::is_none")]
495    pub enable_todos: Option<bool>,
496
497    /// Browser configuration for this agent (enables shared Chromium automation)
498    #[serde(default, skip_serializing_if = "Option::is_none")]
499    pub browser_config: Option<BrowserAgentConfig>,
500
501    /// Whether to include shell/code execution tools (start_shell, execute_shell, stop_shell)
502    #[serde(default, skip_serializing_if = "Option::is_none")]
503    pub include_shell: Option<bool>,
504
505    /// Context size override for this agent (overrides model_settings.context_size)
506    #[serde(default, skip_serializing_if = "Option::is_none")]
507    pub context_size: Option<u32>,
508
509    /// Strategy for prompt construction (append default template vs fully custom)
510    #[serde(
511        skip_serializing_if = "Option::is_none",
512        default = "default_append_default_instructions"
513    )]
514    pub append_default_instructions: Option<bool>,
515    /// Whether to include the built-in scratchpad/history in prompts (default: true)
516    #[serde(
517        skip_serializing_if = "Option::is_none",
518        default = "default_include_scratchpad"
519    )]
520    pub include_scratchpad: Option<bool>,
521
522    /// Optional hook names to attach to this agent
523    #[serde(default, skip_serializing_if = "Vec::is_empty")]
524    pub hooks: Vec<String>,
525
526    /// Custom user message construction (dynamic prompting)
527    #[serde(default, skip_serializing_if = "Option::is_none")]
528    pub user_message_overrides: Option<UserMessageOverrides>,
529
530    /// Whether context compaction is enabled for this agent (default: true)
531    #[serde(
532        default = "default_compaction_enabled",
533        skip_serializing_if = "is_true"
534    )]
535    pub compaction_enabled: bool,
536
537    /// **DEPRECATED**: prefer `runtime = ["cli"]` instead.
538    ///
539    /// When true, this is treated as `runtime = [Cli]` — the agent needs a
540    /// CLI-style local environment (filesystem, shell exec). In a Cloud
541    /// runtime the orchestrator forks the call into a sandbox via
542    /// `BackgroundRunner`. Kept for backwards compatibility with existing
543    /// agent definitions and the `--remote` CLI flag / `DefinitionOverrides.remote`.
544    #[serde(default, alias = "deepagent")]
545    pub remote: bool,
546
547    /// Runtime constraint for this agent. Like Docker's `platforms` field:
548    ///
549    /// - empty / omitted → runs in any runtime (default).
550    /// - `["cli"]` → only runs when `ExecutorContext.runtime_mode == Cli`,
551    ///   OR via a `BackgroundRunner` providing `Cli` (e.g. `SandboxLauncher`
552    ///   spawning `distri-cli` inside a browsr container).
553    /// - `["cli", "cloud"]` → runs in either Cli or Cloud, but not Browser.
554    ///
555    /// When the current runtime doesn't match any allowed value and no
556    /// compatible runner exists, the orchestrator fails fast at request entry.
557    ///
558    /// Accepts both scalar (`runtime = "cli"`) and array (`runtime = ["cli"]`)
559    /// syntax in TOML/JSON for ergonomics.
560    #[serde(
561        default,
562        deserialize_with = "deserialize_runtime_modes",
563        skip_serializing_if = "Vec::is_empty"
564    )]
565    pub runtime: Vec<RuntimeMode>,
566}
567
568/// Accept either a single `RuntimeMode` string or an array of them.
569fn deserialize_runtime_modes<'de, D>(deserializer: D) -> Result<Vec<RuntimeMode>, D::Error>
570where
571    D: serde::Deserializer<'de>,
572{
573    use serde::de::{self, Deserialize};
574
575    #[derive(Deserialize)]
576    #[serde(untagged)]
577    enum OneOrMany {
578        One(RuntimeMode),
579        Many(Vec<RuntimeMode>),
580    }
581
582    match Option::<OneOrMany>::deserialize(deserializer)? {
583        None => Ok(Vec::new()),
584        Some(OneOrMany::One(rt)) => Ok(vec![rt]),
585        Some(OneOrMany::Many(v)) => {
586            // Reject duplicates so authors notice typos like ["cli", "cli"].
587            let mut seen = std::collections::HashSet::new();
588            for rt in &v {
589                let key = format!("{:?}", rt);
590                if !seen.insert(key) {
591                    return Err(de::Error::custom(format!(
592                        "duplicate runtime entry: {:?}",
593                        rt
594                    )));
595                }
596            }
597            Ok(v)
598        }
599    }
600}
601fn default_append_default_instructions() -> Option<bool> {
602    Some(true)
603}
604fn default_include_scratchpad() -> Option<bool> {
605    Some(true)
606}
607fn default_compaction_enabled() -> bool {
608    true
609}
610fn is_true(v: &bool) -> bool {
611    *v
612}
613impl StandardDefinition {
614    /// The set of runtimes this agent is allowed to run in, with the
615    /// deprecated `remote: true` flag merged in (treated as `[Cli]` when
616    /// `runtime` is empty).
617    ///
618    /// Empty result = no constraint = runs anywhere.
619    pub fn allowed_runtimes(&self) -> Vec<RuntimeMode> {
620        if !self.runtime.is_empty() {
621            return self.runtime.clone();
622        }
623        if self.remote {
624            return vec![RuntimeMode::Cli];
625        }
626        Vec::new()
627    }
628
629    /// Whether this agent can execute given the caller's `current` runtime,
630    /// optionally with a `BackgroundRunner` providing an alternative runtime
631    /// via remote dispatch.
632    ///
633    /// Returns true when:
634    /// - the agent has no runtime constraint, OR
635    /// - the current runtime matches one of the allowed runtimes, OR
636    /// - a runner is available whose `provided_runtime` matches one of the
637    ///   allowed runtimes.
638    pub fn is_runnable_in(
639        &self,
640        current: &RuntimeMode,
641        runner_provides: Option<&RuntimeMode>,
642    ) -> bool {
643        let allowed = self.allowed_runtimes();
644        if allowed.is_empty() {
645            return true;
646        }
647        if allowed.iter().any(|rt| rt == current) {
648            return true;
649        }
650        match runner_provides {
651            Some(p) => allowed.iter().any(|rt| rt == p),
652            None => false,
653        }
654    }
655
656    /// Check if browser should be initialized automatically in orchestrator (default: false)
657    pub fn should_use_browser(&self) -> bool {
658        self.browser_config
659            .as_ref()
660            .map(|cfg| cfg.is_enabled())
661            .unwrap_or(false)
662    }
663
664    /// Returns browser config if defined
665    pub fn browser_settings(&self) -> Option<&BrowserAgentConfig> {
666        self.browser_config.as_ref()
667    }
668
669    /// Returns the runtime Chromium driver configuration if enabled
670    pub fn browser_runtime_config(&self) -> Option<BrowsrClientConfig> {
671        self.browser_config.as_ref().map(|cfg| cfg.runtime_config())
672    }
673
674    /// Should browser session state be serialized after tool runs
675    pub fn should_persist_browser_session(&self) -> bool {
676        self.browser_config
677            .as_ref()
678            .map(|cfg| cfg.should_persist_session())
679            .unwrap_or(false)
680    }
681
682    /// Check if reflection is enabled (default: false)
683    pub fn is_reflection_enabled(&self) -> bool {
684        self.reflection.as_ref().map(|r| r.enabled).unwrap_or(false)
685    }
686
687    /// Get the reflection configuration, if any
688    pub fn reflection_config(&self) -> Option<&ReflectionConfig> {
689        self.reflection.as_ref().filter(|r| r.enabled)
690    }
691    /// Check if TODO management functionality is enabled (default: false)
692    pub fn is_todos_enabled(&self) -> bool {
693        self.enable_todos.unwrap_or(false)
694    }
695
696    /// Check if shell/code execution tools should be included (default: false)
697    pub fn should_include_shell(&self) -> bool {
698        self.include_shell.unwrap_or(false)
699    }
700
701    /// Get model settings if configured.
702    pub fn model_settings(&self) -> Option<&ModelSettings> {
703        self.model_settings.as_ref()
704    }
705
706    /// Get a mutable reference to model settings, if present.
707    pub fn model_settings_mut(&mut self) -> Option<&mut ModelSettings> {
708        self.model_settings.as_mut()
709    }
710
711    /// Get the effective context size (agent-level override or model settings)
712    pub fn get_effective_context_size(&self) -> u32 {
713        self.context_size
714            .filter(|&s| s > 0)
715            .or_else(|| {
716                self.model_settings()
717                    .map(|ms| ms.inner.context_size)
718                    .filter(|&s| s > 0)
719            })
720            .unwrap_or_else(default_context_size)
721    }
722
723    /// Model settings to use for lightweight browser analysis helpers (e.g., observe_summary commands)
724    pub fn analysis_model_settings_config(&self) -> Option<&ModelSettings> {
725        self.analysis_model_settings
726            .as_ref()
727            .or_else(|| self.model_settings())
728    }
729
730    /// Whether to include the persistent scratchpad/history in prompts
731    pub fn include_scratchpad(&self) -> bool {
732        self.include_scratchpad.unwrap_or(true)
733    }
734
735    /// Apply definition overrides to this agent definition
736    pub fn apply_overrides(&mut self, overrides: DefinitionOverrides) {
737        // Override model settings (only if model_settings already exists)
738        if let Some(ref mut ms) = self.model_settings {
739            if let Some(model) = overrides.model {
740                // Strip provider prefix if present (e.g. "custom_microsoft_foundry/gpt-5.4" → "gpt-5.4")
741                ms.model = model
742                    .split_once('/')
743                    .map(|(_, m)| m.to_string())
744                    .unwrap_or(model);
745            }
746            if let Some(temperature) = overrides.temperature {
747                ms.inner.temperature = Some(temperature);
748            }
749            if let Some(max_tokens) = overrides.max_tokens {
750                ms.inner.max_tokens = Some(max_tokens);
751            }
752        }
753
754        // Override max_iterations
755        if let Some(max_iterations) = overrides.max_iterations {
756            self.max_iterations = Some(max_iterations);
757        }
758
759        // Override instructions
760        if let Some(instructions) = overrides.instructions {
761            self.instructions = instructions;
762        }
763
764        if let Some(remote) = overrides.remote {
765            self.remote = remote;
766        }
767
768        if let Some(use_browser) = overrides.use_browser {
769            let mut config = self.browser_config.clone().unwrap_or_default();
770            config.enabled = use_browser;
771            self.browser_config = Some(config);
772        }
773
774        // Append dynamic tool factories
775        if let Some(dynamic_tools) = overrides.dynamic_tools {
776            let tools = self.tools.get_or_insert_with(ToolsConfig::default);
777            tools.dynamic.extend(dynamic_tools);
778        }
779    }
780}
781
782/// Tools configuration for agents
783/// Canonical list of valid builtin tool names.
784///
785/// Includes both server-executed tools (search, start_shell, etc.) and
786/// client-executed tools (http_request). Agent definitions reference these
787/// by name in `tools.builtin = [...]`.
788pub const VALID_BUILTIN_TOOLS: &[&str] = &[
789    // Agent control
790    "final",
791    "reflect",
792    "transfer_to_agent",
793    // Browser & scraping
794    "browsr_scrape",
795    "browsr_browser",
796    "browsr_crawl",
797    "browser_step",
798    "search",
799    // Shell
800    "start_shell",
801    "execute_shell",
802    "stop_shell",
803    // Code execution
804    "distri_execute_code",
805    // Tool discovery
806    "tool_search",
807    "load_skill",
808    // Connection & secrets
809    "inject_connection_env",
810    // Logging
811    "console_log",
812    // Artifacts & filesystem
813    "artifact_tool",
814    // Todos
815    "todos",
816];
817
818/// Tools that always get full schemas, never deferred.
819/// These are the most commonly used tools that agents need immediately.
820pub const CORE_TOOLS: &[&str] = &[
821    "final",
822    "transfer_to_agent",
823    "tool_search",
824    "write_todos",
825    "execute_shell",
826    "start_shell",
827    "load_skill",
828];
829
830/// Default threshold: defer tools when total count exceeds this.
831pub const DEFAULT_DEFERRED_THRESHOLD: usize = 15;
832
833#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
834#[serde(deny_unknown_fields)]
835pub struct ToolsConfig {
836    /// Built-in tools to include (e.g., ["final", "transfer_to_agent"])
837    #[serde(default, skip_serializing_if = "Vec::is_empty")]
838    pub builtin: Vec<String>,
839
840    /// Dynamic tool factories — each creates a named tool at runtime.
841    #[serde(default, skip_serializing_if = "Vec::is_empty")]
842    pub dynamic: Vec<crate::dynamic_tool::DynamicToolFactory>,
843
844    /// MCP server tool configurations
845    #[serde(default, skip_serializing_if = "Vec::is_empty")]
846    pub mcp: Vec<McpToolConfig>,
847
848    /// External tools to include from client
849    #[serde(default, skip_serializing_if = "Option::is_none")]
850    pub external: Option<Vec<String>>,
851
852    /// How tools are delivered to the model. Defaults to `Full`.
853    /// When set to `Deferred`, only core tools get full schemas;
854    /// others appear as name+description and must be fetched via `tool_search`.
855    #[serde(default, skip_serializing_if = "is_default_delivery_mode")]
856    pub delivery_mode: ToolDeliveryMode,
857
858    /// Tool count threshold for automatic deferral.
859    /// When `delivery_mode` is `Deferred` and total tools exceed this,
860    /// non-core tools are deferred. Default: 15.
861    #[serde(default, skip_serializing_if = "Option::is_none")]
862    pub deferred_threshold: Option<usize>,
863
864    /// Additional tool names to always include with full schemas (beyond CORE_TOOLS).
865    /// Useful for agent-specific tools that should never be deferred.
866    #[serde(default, skip_serializing_if = "Vec::is_empty")]
867    pub always_full_schema: Vec<String>,
868}
869
870fn is_default_delivery_mode(mode: &ToolDeliveryMode) -> bool {
871    *mode == ToolDeliveryMode::Deferred
872}
873
874impl ToolsConfig {
875    /// Validate that all builtin tool names are recognized.
876    /// Returns a list of invalid tool names, or empty if all are valid.
877    pub fn invalid_builtin_tools(&self) -> Vec<String> {
878        self.builtin
879            .iter()
880            .filter(|name| !VALID_BUILTIN_TOOLS.contains(&name.as_str()))
881            .cloned()
882            .collect()
883    }
884
885    /// Whether a tool should always get a full schema (never deferred).
886    pub fn is_core_tool(&self, name: &str) -> bool {
887        CORE_TOOLS.contains(&name)
888            || self.always_full_schema.iter().any(|n| n == name)
889            // call_* agent tools are always core (the model needs to know how to call sub-agents)
890            || name.starts_with("call_")
891    }
892
893    /// Effective threshold for automatic tool deferral.
894    pub fn effective_threshold(&self) -> usize {
895        self.deferred_threshold
896            .unwrap_or(DEFAULT_DEFERRED_THRESHOLD)
897    }
898
899    /// Determine the effective delivery mode given the total tool count.
900    /// If mode is `Full` but tool count exceeds threshold, stays `Full`
901    /// Deferred always stays Deferred — context efficiency is the default.
902    pub fn effective_delivery_mode(&self, _total_tools: usize) -> ToolDeliveryMode {
903        self.delivery_mode.clone()
904    }
905}
906
907// Where filesystem and artifact tools should execute.
908// Deprecated: filesystem tools are no longer included as server builtins.
909
910/// Configuration for tools from an MCP server
911#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
912#[serde(deny_unknown_fields)]
913pub struct McpToolConfig {
914    /// Name of the MCP server
915    pub server: String,
916
917    /// Include patterns (glob-style, e.g., ["fetch_*", "extract_text"])
918    /// Use ["*"] to include all tools from the server
919    #[serde(default, skip_serializing_if = "Vec::is_empty")]
920    pub include: Vec<String>,
921
922    /// Exclude patterns (glob-style, e.g., ["delete_*", "rm_*"])
923    #[serde(default, skip_serializing_if = "Vec::is_empty")]
924    pub exclude: Vec<String>,
925}
926
927#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
928#[serde(deny_unknown_fields)]
929pub struct McpDefinition {
930    /// The filter applied to the tools in this MCP definition.
931    #[serde(default)]
932    pub filter: Option<Vec<String>>,
933    /// The name of the MCP server.
934    pub name: String,
935    /// The type of the MCP server (Tool or Agent).
936    #[serde(default)]
937    pub r#type: McpServerType,
938    /// Authentication configuration for this MCP server.
939    #[serde(default)]
940    pub auth_config: Option<crate::a2a::SecurityScheme>,
941}
942
943#[derive(Debug, Clone, Serialize, Deserialize, Default, JsonSchema, PartialEq)]
944#[serde(rename_all = "lowercase")]
945pub enum McpServerType {
946    #[default]
947    Tool,
948    Agent,
949}
950
951#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
952#[serde(deny_unknown_fields, rename_all = "lowercase", tag = "name")]
953pub enum ModelProvider {
954    #[serde(rename = "openai")]
955    OpenAI {},
956    #[serde(rename = "openai_compat")]
957    OpenAICompatible {
958        base_url: String,
959        api_key: Option<String>,
960        project_id: Option<String>,
961    },
962    #[serde(rename = "azure_openai")]
963    AzureOpenAI {
964        base_url: String,
965        api_key: Option<String>,
966        deployment: String,
967        #[serde(default = "ModelProvider::azure_api_version")]
968        api_version: String,
969    },
970    #[serde(rename = "anthropic")]
971    Anthropic {
972        #[serde(default = "ModelProvider::anthropic_base_url")]
973        base_url: Option<String>,
974        api_key: Option<String>,
975    },
976    #[serde(rename = "gemini")]
977    Gemini {
978        #[serde(default = "ModelProvider::gemini_base_url")]
979        base_url: String,
980        api_key: Option<String>,
981    },
982    #[serde(rename = "azure_ai_foundry")]
983    AzureAiFoundry {
984        base_url: String,
985        api_key: Option<String>,
986    },
987    #[serde(rename = "aws_bedrock")]
988    AwsBedrock {
989        base_url: String,
990        api_key: Option<String>,
991    },
992    #[serde(rename = "google_vertex")]
993    GoogleVertex {
994        base_url: String,
995        api_key: Option<String>,
996        project_id: Option<String>,
997    },
998    #[serde(rename = "alibaba_cloud")]
999    AlibabaCloud {
1000        #[serde(default = "ModelProvider::alibaba_cloud_base_url")]
1001        base_url: String,
1002        api_key: Option<String>,
1003    },
1004}
1005/// Defines the secret requirements for a provider
1006#[derive(Debug, Clone, Serialize, Deserialize)]
1007pub struct ProviderSecretDefinition {
1008    /// Provider identifier (e.g., "openai", "anthropic")
1009    pub id: String,
1010    /// Human-readable label
1011    pub label: String,
1012    /// List of required secret keys with metadata
1013    pub keys: Vec<SecretKeyDefinition>,
1014}
1015
1016/// Defines a single secret key requirement
1017#[derive(Debug, Clone, Serialize, Deserialize)]
1018pub struct SecretKeyDefinition {
1019    /// The environment variable / secret store key (e.g., "OPENAI_API_KEY")
1020    pub key: String,
1021    /// Human-readable label
1022    pub label: String,
1023    /// Placeholder for UI input
1024    pub placeholder: String,
1025    /// Whether this secret is required (vs optional)
1026    #[serde(default = "default_required")]
1027    pub required: bool,
1028    /// Whether this field contains sensitive data (masked in UI, stored encrypted).
1029    /// Defaults to true. Set to false for non-sensitive config like URLs, project IDs.
1030    #[serde(default = "default_sensitive")]
1031    pub sensitive: bool,
1032}
1033
1034fn default_required() -> bool {
1035    true
1036}
1037
1038fn default_sensitive() -> bool {
1039    true
1040}
1041
1042/// A model entry within a provider
1043#[derive(Debug, Clone, Serialize, Deserialize)]
1044pub struct ModelInfo {
1045    /// Model identifier (e.g., "gpt-4o", "claude-sonnet-4")
1046    pub id: String,
1047    /// Human-readable name
1048    pub name: String,
1049}
1050
1051/// Combined provider definition used in default_models.json.
1052/// Merges secret key definitions and well-known models into one entry per provider.
1053#[derive(Debug, Clone, Serialize, Deserialize)]
1054struct DefaultProviderEntry {
1055    id: String,
1056    label: String,
1057    keys: Vec<SecretKeyDefinition>,
1058    models: Vec<crate::models::Model>,
1059}
1060
1061#[derive(Debug, Clone, Serialize, Deserialize)]
1062struct DefaultModelsFile {
1063    providers: Vec<DefaultProviderEntry>,
1064}
1065
1066fn load_default_providers() -> &'static [DefaultProviderEntry] {
1067    use std::sync::OnceLock;
1068    static PROVIDERS: OnceLock<Vec<DefaultProviderEntry>> = OnceLock::new();
1069    PROVIDERS.get_or_init(|| {
1070        let json = include_str!("default_models.json");
1071        let file: DefaultModelsFile =
1072            serde_json::from_str(json).expect("Failed to parse default_models.json");
1073        file.providers
1074    })
1075}
1076
1077/// Models grouped by provider, with configuration status
1078#[derive(Debug, Clone, Serialize, Deserialize)]
1079pub struct ProviderModels {
1080    /// Provider identifier
1081    pub provider_id: String,
1082    /// Human-readable provider name
1083    pub provider_label: String,
1084    /// Available models for this provider
1085    pub models: Vec<crate::models::Model>,
1086}
1087
1088/// Provider models with configuration status (returned by API)
1089#[derive(Debug, Clone, Serialize, Deserialize)]
1090pub struct ProviderModelsStatus {
1091    /// Provider identifier
1092    pub provider_id: String,
1093    /// Human-readable provider name
1094    pub provider_label: String,
1095    /// Whether the provider's API key is configured
1096    pub configured: bool,
1097    /// Available models for this provider
1098    pub models: Vec<crate::models::Model>,
1099}
1100
1101impl Default for ModelProvider {
1102    fn default() -> Self {
1103        ModelProvider::OpenAI {}
1104    }
1105}
1106
1107impl ModelProvider {
1108    pub fn openai_base_url() -> String {
1109        "https://api.openai.com/v1".to_string()
1110    }
1111
1112    pub fn anthropic_base_url() -> Option<String> {
1113        None
1114    }
1115
1116    pub fn gemini_base_url() -> String {
1117        "https://generativelanguage.googleapis.com/v1beta/openai".to_string()
1118    }
1119
1120    pub fn azure_api_version() -> String {
1121        "2024-06-01".to_string()
1122    }
1123
1124    pub fn alibaba_cloud_base_url() -> String {
1125        "https://dashscope-intl.aliyuncs.com/compatible-mode/v1".to_string()
1126    }
1127
1128    /// Returns the provider type enum for this provider.
1129    pub fn provider_type(&self) -> crate::models::ProviderType {
1130        match self {
1131            ModelProvider::OpenAI {} => crate::models::ProviderType::OpenAI,
1132            ModelProvider::OpenAICompatible { .. } => {
1133                crate::models::ProviderType::Custom("openai_compat".to_string())
1134            }
1135            ModelProvider::AzureOpenAI { .. } => crate::models::ProviderType::Azure,
1136            ModelProvider::Anthropic { .. } => crate::models::ProviderType::Anthropic,
1137            ModelProvider::Gemini { .. } => crate::models::ProviderType::Gemini,
1138            ModelProvider::AzureAiFoundry { .. } => crate::models::ProviderType::AzureAiFoundry,
1139            ModelProvider::AwsBedrock { .. } => crate::models::ProviderType::AwsBedrock,
1140            ModelProvider::GoogleVertex { .. } => crate::models::ProviderType::GoogleVertex,
1141            ModelProvider::AlibabaCloud { .. } => crate::models::ProviderType::AlibabaCloud,
1142        }
1143    }
1144
1145    /// Returns the provider ID string for secret lookup and "provider/model" format.
1146    pub fn provider_id(&self) -> &str {
1147        match self {
1148            ModelProvider::OpenAI {} => "openai",
1149            ModelProvider::OpenAICompatible { .. } => "openai_compat",
1150            ModelProvider::AzureOpenAI { .. } => "azure_openai",
1151            ModelProvider::Anthropic { .. } => "anthropic",
1152            ModelProvider::Gemini { .. } => "gemini",
1153            ModelProvider::AzureAiFoundry { .. } => "azure_ai_foundry",
1154            ModelProvider::AwsBedrock { .. } => "aws_bedrock",
1155            ModelProvider::GoogleVertex { .. } => "google_vertex",
1156            ModelProvider::AlibabaCloud { .. } => "alibaba_cloud",
1157        }
1158    }
1159
1160    /// Returns the required secret keys for this provider.
1161    pub fn required_secret_keys(&self) -> Vec<&'static str> {
1162        match self {
1163            ModelProvider::OpenAI {} => vec!["OPENAI_API_KEY"],
1164            ModelProvider::OpenAICompatible { api_key, .. } => {
1165                if api_key.is_some() {
1166                    vec![]
1167                } else {
1168                    vec!["OPENAI_API_KEY"]
1169                }
1170            }
1171            ModelProvider::AzureOpenAI { api_key, .. } => {
1172                if api_key.is_some() {
1173                    vec![]
1174                } else {
1175                    vec!["AZURE_OPENAI_API_KEY"]
1176                }
1177            }
1178            ModelProvider::Anthropic { api_key, .. } => {
1179                if api_key.is_some() {
1180                    vec![]
1181                } else {
1182                    vec!["ANTHROPIC_API_KEY"]
1183                }
1184            }
1185            ModelProvider::Gemini { api_key, .. } => {
1186                if api_key.is_some() {
1187                    vec![]
1188                } else {
1189                    vec!["GEMINI_API_KEY"]
1190                }
1191            }
1192            ModelProvider::AzureAiFoundry { api_key, .. } => {
1193                if api_key.is_some() {
1194                    vec![]
1195                } else {
1196                    vec!["AZURE_AI_FOUNDRY_API_KEY"]
1197                }
1198            }
1199            ModelProvider::AwsBedrock { api_key, .. } => {
1200                if api_key.is_some() {
1201                    vec![]
1202                } else {
1203                    vec!["AWS_ACCESS_KEY_ID"]
1204                }
1205            }
1206            ModelProvider::GoogleVertex { api_key, .. } => {
1207                if api_key.is_some() {
1208                    vec![]
1209                } else {
1210                    vec!["GOOGLE_VERTEX_API_KEY"]
1211                }
1212            }
1213            ModelProvider::AlibabaCloud { api_key, .. } => {
1214                if api_key.is_some() {
1215                    vec![]
1216                } else {
1217                    vec!["DASHSCOPE_API_KEY"]
1218                }
1219            }
1220        }
1221    }
1222
1223    /// Returns all provider secret definitions, loaded from default_models.json.
1224    pub fn all_provider_definitions() -> Vec<ProviderSecretDefinition> {
1225        load_default_providers()
1226            .iter()
1227            .map(|p| ProviderSecretDefinition {
1228                id: p.id.clone(),
1229                label: p.label.clone(),
1230                keys: p.keys.clone(),
1231            })
1232            .collect()
1233    }
1234
1235    /// Returns the well-known models grouped by provider, loaded from default_models.json.
1236    pub fn well_known_models() -> Vec<ProviderModels> {
1237        load_default_providers()
1238            .iter()
1239            .filter(|p| !p.models.is_empty())
1240            .map(|p| ProviderModels {
1241                provider_id: p.id.clone(),
1242                provider_label: p.label.clone(),
1243                models: p.models.clone(),
1244            })
1245            .collect()
1246    }
1247
1248    /// Get the human-readable name for a provider
1249    pub fn display_name(&self) -> &'static str {
1250        match self {
1251            ModelProvider::OpenAI {} => "OpenAI",
1252            ModelProvider::OpenAICompatible { .. } => "OpenAI Compatible",
1253            ModelProvider::AzureOpenAI { .. } => "Azure",
1254            ModelProvider::Anthropic { .. } => "Anthropic",
1255            ModelProvider::Gemini { .. } => "Google Gemini",
1256            ModelProvider::AzureAiFoundry { .. } => "Azure AI Foundry",
1257            ModelProvider::AwsBedrock { .. } => "AWS Bedrock",
1258            ModelProvider::GoogleVertex { .. } => "Google Vertex AI",
1259            ModelProvider::AlibabaCloud { .. } => "Alibaba Cloud",
1260        }
1261    }
1262
1263    /// OTel `gen_ai.provider.name` attribute value for this provider.
1264    /// Uses the semantic convention identifiers from the 2025 GenAI spec.
1265    pub fn otel_provider_name(&self) -> &'static str {
1266        match self {
1267            ModelProvider::OpenAI { .. } => "openai",
1268            ModelProvider::OpenAICompatible { .. } => "openai",
1269            ModelProvider::AzureOpenAI { .. } => "azure.ai.openai",
1270            ModelProvider::Anthropic { .. } => "anthropic",
1271            ModelProvider::Gemini { .. } => "google.gemini",
1272            ModelProvider::AzureAiFoundry { .. } => "azure.ai.inference",
1273            ModelProvider::AwsBedrock { .. } => "aws.bedrock",
1274            ModelProvider::GoogleVertex { .. } => "gcp.vertex_ai",
1275            ModelProvider::AlibabaCloud { .. } => "alibaba_cloud",
1276        }
1277    }
1278}
1279
1280/// Model settings configuration.
1281/// A `ModelSettings` always has a valid model string.
1282/// Use `Option<ModelSettings>` when no model is configured yet.
1283#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
1284pub struct ModelSettings {
1285    pub model: String,
1286    #[serde(flatten)]
1287    pub inner: ModelSettingsInner,
1288}
1289
1290/// Optional/defaultable model parameters. Split from `ModelSettings` so callers
1291/// can construct `ModelSettings { model: "...", ..Default::default() }` easily
1292/// via the `inner` field having `Default`.
1293#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)]
1294pub struct ModelSettingsInner {
1295    #[serde(default, skip_serializing_if = "Option::is_none")]
1296    pub temperature: Option<f32>,
1297    #[serde(default, skip_serializing_if = "Option::is_none")]
1298    pub max_tokens: Option<u32>,
1299    #[serde(default = "default_context_size")]
1300    pub context_size: u32,
1301    #[serde(default, skip_serializing_if = "Option::is_none")]
1302    pub top_p: Option<f32>,
1303    #[serde(default, skip_serializing_if = "Option::is_none")]
1304    pub frequency_penalty: Option<f32>,
1305    #[serde(default, skip_serializing_if = "Option::is_none")]
1306    pub presence_penalty: Option<f32>,
1307    #[serde(default = "default_model_provider")]
1308    pub provider: ModelProvider,
1309    /// Additional parameters for the agent, if any.
1310    #[serde(default)]
1311    pub parameters: Option<serde_json::Value>,
1312    /// The format of the response, if specified.
1313    #[serde(default)]
1314    pub response_format: Option<serde_json::Value>,
1315    /// Which OpenAI-family API format to use (auto-detected by default).
1316    /// Only relevant for OpenAI, OpenAI-compatible, and Azure OpenAI providers.
1317    #[serde(default, skip_serializing_if = "is_default_api_format")]
1318    pub api_format: OpenAiApiFormat,
1319}
1320
1321impl ModelSettings {
1322    /// Create a new `ModelSettings` with the given model and default inner settings.
1323    pub fn new(model: impl Into<String>) -> Self {
1324        Self {
1325            model: model.into(),
1326            inner: ModelSettingsInner::default(),
1327        }
1328    }
1329
1330    /// Parse a "provider/model" string (e.g. "anthropic/claude-sonnet-4") into ModelSettings.
1331    /// Returns None if the format is invalid.
1332    /// For custom providers (prefixed with "custom_"), returns an OpenAICompatible provider
1333    /// with empty base_url/api_key — the caller must fill these from secrets/config.
1334    /// Parse "provider/model" string into ModelSettings.
1335    /// Returns Err with a descriptive message if the provider is not recognized.
1336    /// Returns Ok(None) if the input is empty or has no slash.
1337    pub fn from_provider_model_str(s: &str) -> Result<Option<Self>, String> {
1338        let Some((provider_str, model_id)) = s.split_once('/') else {
1339            return Ok(None);
1340        };
1341        if model_id.is_empty() {
1342            return Ok(None);
1343        }
1344        let provider = match provider_str {
1345            "openai" => ModelProvider::OpenAI {},
1346            "anthropic" => ModelProvider::Anthropic {
1347                base_url: None,
1348                api_key: None,
1349            },
1350            "azure_openai" | "azure" => ModelProvider::AzureOpenAI {
1351                base_url: String::new(),
1352                api_key: None,
1353                deployment: model_id.to_string(),
1354                api_version: ModelProvider::azure_api_version(),
1355            },
1356            "gemini" => ModelProvider::Gemini {
1357                base_url: ModelProvider::gemini_base_url(),
1358                api_key: None,
1359            },
1360            "azure_ai_foundry" => ModelProvider::AzureAiFoundry {
1361                base_url: String::new(),
1362                api_key: None,
1363            },
1364            "aws_bedrock" => ModelProvider::AwsBedrock {
1365                base_url: String::new(),
1366                api_key: None,
1367            },
1368            "google_vertex" => ModelProvider::GoogleVertex {
1369                base_url: String::new(),
1370                api_key: None,
1371                project_id: None,
1372            },
1373            "alibaba_cloud" => ModelProvider::AlibabaCloud {
1374                base_url: ModelProvider::alibaba_cloud_base_url(),
1375                api_key: None,
1376            },
1377            _ if provider_str.starts_with("custom_") => ModelProvider::OpenAICompatible {
1378                base_url: String::new(),
1379                api_key: None,
1380                project_id: None,
1381            },
1382            // Unknown providers — treat as OpenAI-compatible
1383            _ => ModelProvider::OpenAICompatible {
1384                base_url: String::new(),
1385                api_key: None,
1386                project_id: None,
1387            },
1388        };
1389        Ok(Some(Self {
1390            model: model_id.to_string(),
1391            inner: ModelSettingsInner {
1392                provider,
1393                ..Default::default()
1394            },
1395        }))
1396    }
1397
1398    /// Merge base (workspace) model settings with agent/request-level overrides.
1399    ///
1400    /// Provider resolution:
1401    /// - If the override explicitly sets a provider (not the default OpenAI),
1402    ///   the override's provider and model are used.
1403    /// - If only the base has a non-default provider and the override uses default
1404    ///   OpenAI, the base's provider AND model win — the override's bare model name
1405    ///   is ignored because it may not exist on the base provider.
1406    /// - Otherwise, the override's model wins if non-empty.
1407    ///
1408    /// Scalar fields (temperature, max_tokens, etc.) use override if present, else base.
1409    ///
1410    /// Returns `None` if the final model string is empty.
1411    pub fn merge(&self, override_settings: &ModelSettings) -> Option<ModelSettings> {
1412        let default_provider = ModelProvider::OpenAI {};
1413        let override_has_explicit_provider = std::mem::discriminant(&override_settings.inner.provider)
1414            != std::mem::discriminant(&default_provider);
1415        let base_has_explicit_provider = std::mem::discriminant(&self.inner.provider)
1416            != std::mem::discriminant(&default_provider);
1417
1418        let (provider, model) = if override_has_explicit_provider {
1419            // Override explicitly set a provider — use override's provider and model.
1420            let model = if !override_settings.model.is_empty() {
1421                override_settings.model.clone()
1422            } else {
1423                self.model.clone()
1424            };
1425            (override_settings.inner.provider.clone(), model)
1426        } else if base_has_explicit_provider {
1427            // Base uses a non-default provider and override didn't specify one — use
1428            // base's provider AND model to avoid mismatching model names.
1429            let model = if !self.model.is_empty() {
1430                self.model.clone()
1431            } else if !override_settings.model.is_empty() {
1432                override_settings.model.clone()
1433            } else {
1434                String::new()
1435            };
1436            (self.inner.provider.clone(), model)
1437        } else {
1438            // Both use default OpenAI — override model can win.
1439            let model = if !override_settings.model.is_empty() {
1440                override_settings.model.clone()
1441            } else {
1442                self.model.clone()
1443            };
1444            (self.inner.provider.clone(), model)
1445        };
1446
1447        if model.is_empty() {
1448            return None;
1449        }
1450
1451        let default_context_size = 20000u32;
1452        Some(ModelSettings {
1453            model,
1454            inner: ModelSettingsInner {
1455                temperature: override_settings
1456                    .inner
1457                    .temperature
1458                    .or(self.inner.temperature),
1459                max_tokens: override_settings.inner.max_tokens.or(self.inner.max_tokens),
1460                context_size: if override_settings.inner.context_size != default_context_size {
1461                    override_settings.inner.context_size
1462                } else {
1463                    self.inner.context_size
1464                },
1465                top_p: override_settings.inner.top_p.or(self.inner.top_p),
1466                frequency_penalty: override_settings
1467                    .inner
1468                    .frequency_penalty
1469                    .or(self.inner.frequency_penalty),
1470                presence_penalty: override_settings
1471                    .inner
1472                    .presence_penalty
1473                    .or(self.inner.presence_penalty),
1474                provider,
1475                parameters: if override_settings.inner.parameters.is_some() {
1476                    override_settings.inner.parameters.clone()
1477                } else {
1478                    self.inner.parameters.clone()
1479                },
1480                response_format: if override_settings.inner.response_format.is_some() {
1481                    override_settings.inner.response_format.clone()
1482                } else {
1483                    self.inner.response_format.clone()
1484                },
1485                api_format: if override_settings.inner.api_format != OpenAiApiFormat::Auto {
1486                    override_settings.inner.api_format.clone()
1487                } else {
1488                    self.inner.api_format.clone()
1489                },
1490            },
1491        })
1492    }
1493}
1494
1495// Default functions
1496pub fn default_agent_version() -> Option<String> {
1497    Some("0.2.2".to_string())
1498}
1499
1500fn default_model_provider() -> ModelProvider {
1501    ModelProvider::OpenAI {}
1502}
1503
1504fn default_context_size() -> u32 {
1505    20000 // Default limit for general use - agents can override with higher values as needed
1506}
1507
1508fn is_default_api_format(f: &OpenAiApiFormat) -> bool {
1509    *f == OpenAiApiFormat::Auto
1510}
1511
1512fn default_history_size() -> Option<usize> {
1513    Some(5)
1514}
1515
1516impl StandardDefinition {
1517    pub fn validate(&self) -> anyhow::Result<()> {
1518        // Basic validation - can be expanded
1519        if self.name.is_empty() {
1520            return Err(anyhow::anyhow!("Agent name cannot be empty"));
1521        }
1522
1523        // Validate reflection configuration
1524        if let Some(ref reflection) = self.reflection
1525            && reflection.enabled
1526        {
1527            // If a custom reflection_agent is specified, validate the name
1528            if let Some(ref agent_name) = reflection.reflection_agent
1529                && agent_name.is_empty()
1530            {
1531                return Err(anyhow::anyhow!(
1532                    "Reflection agent name cannot be empty when specified"
1533                ));
1534            }
1535        }
1536
1537        Ok(())
1538    }
1539
1540    /// Validate that a reflection agent definition has the "reflect" tool configured.
1541    /// This is called at registration time when we have access to the full agent config.
1542    pub fn validate_reflection_agent(agent_def: &StandardDefinition) -> anyhow::Result<()> {
1543        let has_reflect_tool = agent_def
1544            .tools
1545            .as_ref()
1546            .map(|t| t.builtin.iter().any(|name| name == "reflect"))
1547            .unwrap_or(false);
1548
1549        if !has_reflect_tool {
1550            // The built-in reflection_agent gets the reflect tool automatically,
1551            // but custom reflection agents must explicitly list it
1552            anyhow::bail!(
1553                "Reflection agent '{}' must have the 'reflect' tool in its tools.builtin configuration",
1554                agent_def.name
1555            );
1556        }
1557
1558        Ok(())
1559    }
1560}
1561
1562impl From<StandardDefinition> for LlmDefinition {
1563    fn from(definition: StandardDefinition) -> Self {
1564        let model_settings = match (definition.model_settings, definition.context_size) {
1565            (Some(mut ms), Some(ctx)) => {
1566                ms.inner.context_size = ctx;
1567                Some(ms)
1568            }
1569            (ms, _) => ms,
1570        };
1571
1572        Self {
1573            name: definition.name,
1574            model_settings,
1575            tool_format: definition.tool_format,
1576            tool_delivery_mode: definition.tool_delivery_mode,
1577        }
1578    }
1579}
1580
1581impl ToolsConfig {
1582    /// Create a simple configuration with just built-in tools
1583    pub fn builtin_only(tools: Vec<&str>) -> Self {
1584        Self {
1585            builtin: tools.into_iter().map(|s| s.to_string()).collect(),
1586            ..Default::default()
1587        }
1588    }
1589
1590    /// Create a configuration that includes all tools from an MCP server
1591    pub fn mcp_all(server: &str) -> Self {
1592        Self {
1593            mcp: vec![McpToolConfig {
1594                server: server.to_string(),
1595                include: vec!["*".to_string()],
1596                exclude: vec![],
1597            }],
1598            ..Default::default()
1599        }
1600    }
1601
1602    /// Create a configuration with specific MCP tool patterns
1603    pub fn mcp_filtered(server: &str, include: Vec<&str>, exclude: Vec<&str>) -> Self {
1604        Self {
1605            mcp: vec![McpToolConfig {
1606                server: server.to_string(),
1607                include: include.into_iter().map(|s| s.to_string()).collect(),
1608                exclude: exclude.into_iter().map(|s| s.to_string()).collect(),
1609            }],
1610            ..Default::default()
1611        }
1612    }
1613}
1614
1615pub async fn parse_agent_markdown_content(content: &str) -> Result<StandardDefinition, AgentError> {
1616    // Split by --- to separate TOML frontmatter from markdown content
1617    let parts: Vec<&str> = content.split("---").collect();
1618
1619    if parts.len() < 3 {
1620        return Err(AgentError::Validation(
1621            "Invalid agent markdown format. Expected TOML frontmatter between --- markers"
1622                .to_string(),
1623        ));
1624    }
1625
1626    // Parse TOML frontmatter (parts[1] is between the first two --- markers)
1627    let toml_content = parts[1].trim();
1628    let mut agent_def: crate::StandardDefinition =
1629        toml::from_str(toml_content).map_err(|e| AgentError::Validation(e.to_string()))?;
1630
1631    // Validate agent name format using centralized validation
1632    if let Err(validation_error) = validate_plugin_name(&agent_def.name) {
1633        return Err(AgentError::Validation(format!(
1634            "Invalid agent name '{}': {}",
1635            agent_def.name, validation_error
1636        )));
1637    }
1638
1639    // Validate that agent name characters are valid (alphanumeric, underscore, or single '/' for namespacing)
1640    if !agent_def
1641        .name
1642        .chars()
1643        .all(|c| c.is_alphanumeric() || c == '_' || c == '/')
1644        || agent_def
1645            .name
1646            .chars()
1647            .next()
1648            .is_some_and(|c| c.is_numeric())
1649        || agent_def.name.chars().filter(|&c| c == '/').count() > 1
1650    {
1651        return Err(AgentError::Validation(format!(
1652            "Invalid agent name '{}': Agent names must be alphanumeric with underscores, at most one '/' for namespacing (e.g. '_system/plan'), cannot start with number.",
1653            agent_def.name
1654        )));
1655    }
1656
1657    // Extract markdown instructions (everything after the second ---)
1658    let instructions = parts[2..].join("---").trim().to_string();
1659
1660    // Set the instructions in the agent definition
1661    agent_def.instructions = instructions;
1662
1663    Ok(agent_def)
1664}
1665
1666/// Validate plugin name follows naming conventions
1667/// Plugin names must be valid identifiers. At most one '/' is allowed for workspace namespacing (e.g. 'workspace/agent').
1668pub fn validate_plugin_name(name: &str) -> Result<(), String> {
1669    if name.is_empty() {
1670        return Err("Plugin name cannot be empty".to_string());
1671    }
1672
1673    if name.contains('-') {
1674        return Err(format!(
1675            "Plugin name '{}' cannot contain hyphens. Use underscores instead.",
1676            name
1677        ));
1678    }
1679
1680    let slash_count = name.chars().filter(|&c| c == '/').count();
1681    if slash_count > 1 {
1682        return Err(format!(
1683            "Plugin name '{}' can contain at most one '/' for workspace namespacing (e.g. 'workspace/agent')",
1684            name
1685        ));
1686    }
1687
1688    // Validate each segment (split by optional slash)
1689    let segments: Vec<&str> = name.split('/').collect();
1690    for segment in &segments {
1691        if segment.is_empty() {
1692            return Err(format!(
1693                "Plugin name '{}' has an empty segment around '/'",
1694                name
1695            ));
1696        }
1697
1698        if let Some(first_char) = segment.chars().next()
1699            && !first_char.is_ascii_alphabetic()
1700            && first_char != '_'
1701        {
1702            return Err(format!(
1703                "Each segment in '{}' must start with a letter or underscore",
1704                name
1705            ));
1706        }
1707
1708        for ch in segment.chars() {
1709            if !ch.is_ascii_alphanumeric() && ch != '_' {
1710                return Err(format!(
1711                    "Plugin name '{}' can only contain letters, numbers, underscores, and at most one '/' for namespacing",
1712                    name
1713                ));
1714            }
1715        }
1716    }
1717
1718    Ok(())
1719}
1720
1721#[cfg(test)]
1722mod tests {
1723    use super::*;
1724
1725    #[test]
1726    fn test_compaction_enabled_defaults_to_true_via_serde() {
1727        // serde default uses default_compaction_enabled() -> true
1728        let json = r#"{"name": "test"}"#;
1729        let def: StandardDefinition = serde_json::from_str(json).unwrap();
1730        assert!(def.compaction_enabled);
1731    }
1732
1733    #[test]
1734    fn test_compaction_enabled_deserializes_true_when_absent() {
1735        let json = r#"{"name": "test", "description": "test agent"}"#;
1736        let def: StandardDefinition = serde_json::from_str(json).unwrap();
1737        assert!(def.compaction_enabled);
1738    }
1739
1740    #[test]
1741    fn test_compaction_enabled_deserializes_false() {
1742        let json = r#"{"name": "test", "description": "test agent", "compaction_enabled": false}"#;
1743        let def: StandardDefinition = serde_json::from_str(json).unwrap();
1744        assert!(!def.compaction_enabled);
1745    }
1746
1747    #[test]
1748    fn test_compaction_enabled_true_skipped_in_serialization() {
1749        let def = StandardDefinition {
1750            name: "test".to_string(),
1751            compaction_enabled: true,
1752            ..Default::default()
1753        };
1754        let json = serde_json::to_string(&def).unwrap();
1755        assert!(!json.contains("compaction_enabled"));
1756    }
1757
1758    #[test]
1759    fn test_compaction_enabled_false_serialized() {
1760        let def = StandardDefinition {
1761            name: "test".to_string(),
1762            compaction_enabled: false,
1763            ..Default::default()
1764        };
1765        let json = serde_json::to_string(&def).unwrap();
1766        assert!(json.contains("\"compaction_enabled\":false"));
1767    }
1768
1769    #[test]
1770    fn test_max_tokens_optional_defaults_to_none() {
1771        let def = StandardDefinition::default();
1772        assert!(def.model_settings().is_none());
1773    }
1774
1775    #[test]
1776    fn test_max_tokens_deserializes_when_present() {
1777        let json =
1778            r#"{"name": "test", "model_settings": {"model": "gpt-4.1", "max_tokens": 4096}}"#;
1779        let def: StandardDefinition = serde_json::from_str(json).unwrap();
1780        assert_eq!(def.model_settings().unwrap().inner.max_tokens, Some(4096));
1781    }
1782
1783    #[test]
1784    fn test_max_tokens_none_when_absent() {
1785        let json = r#"{"name": "test", "model_settings": {"model": "gpt-4.1"}}"#;
1786        let def: StandardDefinition = serde_json::from_str(json).unwrap();
1787        assert!(def.model_settings().unwrap().inner.max_tokens.is_none());
1788    }
1789
1790    #[test]
1791    fn test_max_tokens_none_skipped_in_serialization() {
1792        let settings = ModelSettings {
1793            model: "test-model".to_string(),
1794            inner: ModelSettingsInner {
1795                max_tokens: None,
1796                provider: ModelProvider::OpenAI {},
1797                ..Default::default()
1798            },
1799        };
1800        let json = serde_json::to_string(&settings).unwrap();
1801        assert!(!json.contains("max_tokens"));
1802    }
1803
1804    #[test]
1805    fn test_max_tokens_some_serialized() {
1806        let settings = ModelSettings {
1807            model: "test-model".to_string(),
1808            inner: ModelSettingsInner {
1809                max_tokens: Some(2048),
1810                provider: ModelProvider::OpenAI {},
1811                ..Default::default()
1812            },
1813        };
1814        let json = serde_json::to_string(&settings).unwrap();
1815        assert!(json.contains("\"max_tokens\":2048"));
1816    }
1817
1818    #[test]
1819    fn test_api_format_auto_detect_codex_prefix() {
1820        let fmt = OpenAiApiFormat::Auto;
1821        assert_eq!(
1822            fmt.resolve("codex-mini-latest"),
1823            ResolvedOpenAiApiFormat::Responses
1824        );
1825        assert_eq!(
1826            fmt.resolve("codex-mini-2025-01-24"),
1827            ResolvedOpenAiApiFormat::Responses
1828        );
1829    }
1830
1831    #[test]
1832    fn test_api_format_auto_detect_codex_suffix() {
1833        let fmt = OpenAiApiFormat::Auto;
1834        assert_eq!(
1835            fmt.resolve("gpt-5.1-codex"),
1836            ResolvedOpenAiApiFormat::Responses
1837        );
1838        assert_eq!(
1839            fmt.resolve("gpt-5.3-codex"),
1840            ResolvedOpenAiApiFormat::Responses
1841        );
1842    }
1843
1844    #[test]
1845    fn test_api_format_auto_detect_pro_models() {
1846        let fmt = OpenAiApiFormat::Auto;
1847        assert_eq!(fmt.resolve("gpt-5-pro"), ResolvedOpenAiApiFormat::Responses);
1848        assert_eq!(
1849            fmt.resolve("gpt-5.2-pro"),
1850            ResolvedOpenAiApiFormat::Responses
1851        );
1852        assert_eq!(
1853            fmt.resolve("gpt-5.4-pro"),
1854            ResolvedOpenAiApiFormat::Responses
1855        );
1856        assert_eq!(fmt.resolve("o3-pro"), ResolvedOpenAiApiFormat::Responses);
1857    }
1858
1859    #[test]
1860    fn test_api_format_auto_detect_deep_research_models() {
1861        let fmt = OpenAiApiFormat::Auto;
1862        assert_eq!(
1863            fmt.resolve("o3-deep-research"),
1864            ResolvedOpenAiApiFormat::Responses
1865        );
1866        assert_eq!(
1867            fmt.resolve("o4-mini-deep-research"),
1868            ResolvedOpenAiApiFormat::Responses
1869        );
1870    }
1871
1872    #[test]
1873    fn test_api_format_auto_detect_non_codex() {
1874        let fmt = OpenAiApiFormat::Auto;
1875        assert_eq!(fmt.resolve("gpt-4o"), ResolvedOpenAiApiFormat::Completions);
1876        assert_eq!(fmt.resolve("gpt-4.1"), ResolvedOpenAiApiFormat::Completions);
1877        assert_eq!(fmt.resolve("gpt-5"), ResolvedOpenAiApiFormat::Completions);
1878        assert_eq!(fmt.resolve("o1"), ResolvedOpenAiApiFormat::Completions);
1879        assert_eq!(
1880            fmt.resolve("gpt-5.4-mini"),
1881            ResolvedOpenAiApiFormat::Completions
1882        );
1883        assert_eq!(fmt.resolve("o3-mini"), ResolvedOpenAiApiFormat::Completions);
1884    }
1885
1886    #[test]
1887    fn test_api_format_explicit_override() {
1888        // Explicit Responses overrides auto-detect even for non-codex models
1889        assert_eq!(
1890            OpenAiApiFormat::Responses.resolve("gpt-4o"),
1891            ResolvedOpenAiApiFormat::Responses
1892        );
1893        // Explicit Completions overrides auto-detect even for codex models
1894        assert_eq!(
1895            OpenAiApiFormat::Completions.resolve("codex-mini-latest"),
1896            ResolvedOpenAiApiFormat::Completions
1897        );
1898    }
1899
1900    #[test]
1901    fn test_api_format_defaults_to_auto() {
1902        let inner = ModelSettingsInner::default();
1903        assert_eq!(inner.api_format, OpenAiApiFormat::Auto);
1904    }
1905
1906    #[test]
1907    fn test_api_format_auto_skipped_in_serialization() {
1908        let settings = ModelSettings {
1909            model: "test-model".to_string(),
1910            inner: ModelSettingsInner {
1911                provider: ModelProvider::OpenAI {},
1912                ..Default::default()
1913            },
1914        };
1915        let json = serde_json::to_string(&settings).unwrap();
1916        assert!(!json.contains("api_format"));
1917    }
1918
1919    #[test]
1920    fn test_api_format_responses_serialized() {
1921        let settings = ModelSettings {
1922            model: "test-model".to_string(),
1923            inner: ModelSettingsInner {
1924                api_format: OpenAiApiFormat::Responses,
1925                provider: ModelProvider::OpenAI {},
1926                ..Default::default()
1927            },
1928        };
1929        let json = serde_json::to_string(&settings).unwrap();
1930        assert!(json.contains("\"api_format\":\"responses\""));
1931    }
1932
1933    #[test]
1934    fn test_api_format_deserializes_from_toml() {
1935        let toml_str = r#"
1936            model = "codex-mini-latest"
1937            api_format = "responses"
1938            [provider]
1939            name = "openai"
1940        "#;
1941        let settings: ModelSettings = toml::from_str(toml_str).unwrap();
1942        assert_eq!(settings.inner.api_format, OpenAiApiFormat::Responses);
1943    }
1944
1945    // ── ToolDeliveryMode tests ────────────────────────────────────
1946
1947    #[test]
1948    fn test_tool_delivery_mode_defaults_to_deferred() {
1949        let mode: ToolDeliveryMode = Default::default();
1950        assert_eq!(mode, ToolDeliveryMode::Deferred);
1951    }
1952
1953    #[test]
1954    fn test_tool_delivery_mode_backwards_compat_all_tools() {
1955        // Old configs that used "all_tools" should deserialize to Full
1956        let json = r#""all_tools""#;
1957        let mode: ToolDeliveryMode = serde_json::from_str(json).unwrap();
1958        assert_eq!(mode, ToolDeliveryMode::Full);
1959    }
1960
1961    #[test]
1962    fn test_tool_delivery_mode_backwards_compat_tool_search() {
1963        // Old configs that used "tool_search" should deserialize to Deferred
1964        let json = r#""tool_search""#;
1965        let mode: ToolDeliveryMode = serde_json::from_str(json).unwrap();
1966        assert_eq!(mode, ToolDeliveryMode::Deferred);
1967    }
1968
1969    #[test]
1970    fn test_tools_config_is_core_tool() {
1971        let config = ToolsConfig::default();
1972        assert!(config.is_core_tool("final"));
1973        assert!(config.is_core_tool("tool_search"));
1974        assert!(config.is_core_tool("execute_shell"));
1975        assert!(config.is_core_tool("call_coder"));
1976        assert!(!config.is_core_tool("browsr_scrape"));
1977    }
1978
1979    #[test]
1980    fn test_tools_config_always_full_schema() {
1981        let config = ToolsConfig {
1982            always_full_schema: vec!["browsr_scrape".to_string()],
1983            ..Default::default()
1984        };
1985        assert!(config.is_core_tool("browsr_scrape"));
1986        assert!(!config.is_core_tool("browsr_browser"));
1987    }
1988
1989    #[test]
1990    fn test_effective_delivery_mode_full_stays_full() {
1991        let config = ToolsConfig {
1992            delivery_mode: ToolDeliveryMode::Full,
1993            ..Default::default()
1994        };
1995        // Even with many tools, Full stays Full
1996        assert_eq!(config.effective_delivery_mode(100), ToolDeliveryMode::Full);
1997    }
1998
1999    #[test]
2000    fn test_effective_delivery_mode_deferred_stays_deferred() {
2001        let config = ToolsConfig {
2002            delivery_mode: ToolDeliveryMode::Deferred,
2003            deferred_threshold: Some(20),
2004            ..Default::default()
2005        };
2006        // Deferred always stays Deferred regardless of count
2007        assert_eq!(
2008            config.effective_delivery_mode(10),
2009            ToolDeliveryMode::Deferred
2010        );
2011    }
2012
2013    #[test]
2014    fn test_effective_delivery_mode_deferred_over_threshold() {
2015        let config = ToolsConfig {
2016            delivery_mode: ToolDeliveryMode::Deferred,
2017            deferred_threshold: Some(10),
2018            ..Default::default()
2019        };
2020        // Over threshold: stays Deferred
2021        assert_eq!(
2022            config.effective_delivery_mode(15),
2023            ToolDeliveryMode::Deferred
2024        );
2025    }
2026
2027    #[test]
2028    fn test_runtime_mode_serde() {
2029        let mode: RuntimeMode = serde_json::from_str("\"cloud\"").unwrap();
2030        assert_eq!(mode, RuntimeMode::Cloud);
2031        let mode: RuntimeMode = serde_json::from_str("\"cli\"").unwrap();
2032        assert_eq!(mode, RuntimeMode::Cli);
2033        let mode: RuntimeMode = serde_json::from_str("\"browser\"").unwrap();
2034        assert_eq!(mode, RuntimeMode::Browser);
2035        assert_eq!(RuntimeMode::default(), RuntimeMode::Cloud);
2036        let json = serde_json::to_string(&RuntimeMode::Cli).unwrap();
2037        assert_eq!(json, "\"cli\"");
2038    }
2039
2040    // ── ModelSettings::merge tests ──────────────────────────────────────────
2041
2042    #[test]
2043    fn merge_both_default_openai_agent_model_wins() {
2044        let base = ModelSettings::new("gpt-5.1");
2045        let agent = ModelSettings::new("gpt-4.1-mini");
2046
2047        let result = base.merge(&agent).unwrap();
2048        assert_eq!(result.model, "gpt-4.1-mini");
2049        assert!(matches!(result.inner.provider, ModelProvider::OpenAI {}));
2050    }
2051
2052    #[test]
2053    fn merge_both_default_openai_base_model_used_when_agent_empty() {
2054        let base = ModelSettings::new("gpt-5.1");
2055        let agent = ModelSettings::new("");
2056
2057        let result = base.merge(&agent).unwrap();
2058        assert_eq!(result.model, "gpt-5.1");
2059    }
2060
2061    #[test]
2062    fn merge_agent_explicit_provider_wins() {
2063        let base = ModelSettings {
2064            model: "gpt-5.1".into(),
2065            inner: ModelSettingsInner {
2066                provider: ModelProvider::OpenAICompatible {
2067                    base_url: "https://custom.com/v1".into(),
2068                    api_key: Some("key".into()),
2069                    project_id: None,
2070                },
2071                ..Default::default()
2072            },
2073        };
2074        let agent = ModelSettings {
2075            model: "claude-sonnet-4".into(),
2076            inner: ModelSettingsInner {
2077                provider: ModelProvider::Anthropic {
2078                    base_url: None,
2079                    api_key: None,
2080                },
2081                ..Default::default()
2082            },
2083        };
2084
2085        let result = base.merge(&agent).unwrap();
2086        assert_eq!(result.model, "claude-sonnet-4");
2087        assert!(matches!(result.inner.provider, ModelProvider::Anthropic { .. }));
2088    }
2089
2090    #[test]
2091    fn merge_agent_explicit_provider_no_model_uses_base() {
2092        let base = ModelSettings::new("gpt-5.1");
2093        let agent = ModelSettings {
2094            model: "".into(),
2095            inner: ModelSettingsInner {
2096                provider: ModelProvider::Anthropic {
2097                    base_url: None,
2098                    api_key: None,
2099                },
2100                ..Default::default()
2101            },
2102        };
2103
2104        let result = base.merge(&agent).unwrap();
2105        assert_eq!(result.model, "gpt-5.1");
2106        assert!(matches!(result.inner.provider, ModelProvider::Anthropic { .. }));
2107    }
2108
2109    #[test]
2110    fn merge_workspace_custom_provider_overrides_agent_model() {
2111        let base = ModelSettings {
2112            model: "gpt-5.4".into(),
2113            inner: ModelSettingsInner {
2114                provider: ModelProvider::OpenAICompatible {
2115                    base_url: "https://custom.azure.com/openai/v1".into(),
2116                    api_key: Some("test-key".into()),
2117                    project_id: None,
2118                },
2119                ..Default::default()
2120            },
2121        };
2122        // Agent has no explicit provider (default OpenAI) but different model
2123        let agent = ModelSettings::new("gpt-5.1");
2124
2125        let result = base.merge(&agent).unwrap();
2126        assert_eq!(result.model, "gpt-5.4");
2127        assert!(matches!(result.inner.provider, ModelProvider::OpenAICompatible { .. }));
2128    }
2129
2130    #[test]
2131    fn merge_workspace_custom_provider_agent_empty_model() {
2132        let base = ModelSettings {
2133            model: "gpt-5.4".into(),
2134            inner: ModelSettingsInner {
2135                provider: ModelProvider::OpenAICompatible {
2136                    base_url: "https://custom.azure.com/openai/v1".into(),
2137                    api_key: Some("test-key".into()),
2138                    project_id: None,
2139                },
2140                ..Default::default()
2141            },
2142        };
2143        let agent = ModelSettings::new("");
2144
2145        let result = base.merge(&agent).unwrap();
2146        assert_eq!(result.model, "gpt-5.4");
2147    }
2148
2149    #[test]
2150    fn merge_both_empty_returns_none() {
2151        let base = ModelSettings::new("");
2152        let agent = ModelSettings::new("");
2153
2154        assert!(base.merge(&agent).is_none());
2155    }
2156
2157    #[test]
2158    fn merge_workspace_empty_agent_empty_returns_none() {
2159        let base = ModelSettings {
2160            model: "".into(),
2161            inner: ModelSettingsInner {
2162                provider: ModelProvider::OpenAICompatible {
2163                    base_url: "https://custom.com".into(),
2164                    api_key: None,
2165                    project_id: None,
2166                },
2167                ..Default::default()
2168            },
2169        };
2170        let agent = ModelSettings::new("");
2171
2172        assert!(base.merge(&agent).is_none());
2173    }
2174
2175    #[test]
2176    fn merge_temperature_max_tokens_override() {
2177        let base = ModelSettings {
2178            model: "gpt-5.1".into(),
2179            inner: ModelSettingsInner {
2180                temperature: Some(0.5),
2181                max_tokens: Some(1000),
2182                top_p: Some(0.9),
2183                ..Default::default()
2184            },
2185        };
2186        let agent = ModelSettings {
2187            model: "gpt-4.1-mini".into(),
2188            inner: ModelSettingsInner {
2189                temperature: Some(0.9),
2190                max_tokens: None, // no override
2191                ..Default::default()
2192            },
2193        };
2194
2195        let result = base.merge(&agent).unwrap();
2196        assert_eq!(result.model, "gpt-4.1-mini");
2197        assert_eq!(result.inner.temperature, Some(0.9));
2198        assert_eq!(result.inner.max_tokens, Some(1000)); // base value preserved
2199        assert_eq!(result.inner.top_p, Some(0.9));      // base value preserved
2200    }
2201
2202    #[test]
2203    fn merge_context_size_non_default_wins() {
2204        let base = ModelSettings {
2205            model: "gpt-5.1".into(),
2206            inner: ModelSettingsInner {
2207                context_size: 20000, // default
2208                ..Default::default()
2209            },
2210        };
2211        let agent = ModelSettings {
2212            model: "gpt-4.1-mini".into(),
2213            inner: ModelSettingsInner {
2214                context_size: 100000, // explicitly set
2215                ..Default::default()
2216            },
2217        };
2218
2219        let result = base.merge(&agent).unwrap();
2220        assert_eq!(result.inner.context_size, 100000);
2221    }
2222
2223    #[test]
2224    fn merge_context_size_default_falls_back() {
2225        let base = ModelSettings {
2226            model: "gpt-5.1".into(),
2227            inner: ModelSettingsInner {
2228                context_size: 128000,
2229                ..Default::default()
2230            },
2231        };
2232        let agent = ModelSettings {
2233            model: "gpt-4.1-mini".into(),
2234            inner: ModelSettingsInner {
2235                context_size: 20000, // default — should use base
2236                ..Default::default()
2237            },
2238        };
2239
2240        let result = base.merge(&agent).unwrap();
2241        assert_eq!(result.inner.context_size, 128000);
2242    }
2243
2244    #[test]
2245    fn merge_azure_ai_foundry_base_url_preserved() {
2246        let base = ModelSettings {
2247            model: "gpt-5.4".into(),
2248            inner: ModelSettingsInner {
2249                provider: ModelProvider::AzureAiFoundry {
2250                    base_url: "https://myresource.openai.azure.com".into(),
2251                    api_key: Some("test-key".into()),
2252                },
2253                ..Default::default()
2254            },
2255        };
2256        let agent = ModelSettings::new("gpt-5.1");
2257
2258        let result = base.merge(&agent).unwrap();
2259        assert_eq!(result.model, "gpt-5.4"); // workspace model wins
2260        assert!(matches!(result.inner.provider, ModelProvider::AzureAiFoundry { .. }));
2261        if let ModelProvider::AzureAiFoundry { base_url, .. } = result.inner.provider {
2262            assert_eq!(base_url, "https://myresource.openai.azure.com");
2263        }
2264    }
2265
2266    #[test]
2267    fn merge_anthropic_provider_preserves_base_url() {
2268        let base = ModelSettings {
2269            model: "claude-sonnet-4".into(),
2270            inner: ModelSettingsInner {
2271                provider: ModelProvider::Anthropic {
2272                    base_url: Some("https://custom.anthropic.com".into()),
2273                    api_key: Some("key".into()),
2274                },
2275                temperature: Some(0.7),
2276                ..Default::default()
2277            },
2278        };
2279        let agent = ModelSettings::new("");
2280
2281        let result = base.merge(&agent).unwrap();
2282        assert_eq!(result.model, "claude-sonnet-4");
2283        assert_eq!(result.inner.temperature, Some(0.7));
2284        if let ModelProvider::Anthropic { base_url, api_key } = result.inner.provider {
2285            assert_eq!(base_url, Some("https://custom.anthropic.com".into()));
2286            assert_eq!(api_key, Some("key".into()));
2287        }
2288    }
2289
2290    #[test]
2291    fn merge_response_format_agent_wins() {
2292        let base = ModelSettings {
2293            model: "gpt-5.1".into(),
2294            inner: ModelSettingsInner {
2295                response_format: Some(serde_json::json!({"type": "text"})),
2296                ..Default::default()
2297            },
2298        };
2299        let agent = ModelSettings {
2300            model: "gpt-4.1-mini".into(),
2301            inner: ModelSettingsInner {
2302                response_format: Some(serde_json::json!({"type": "json_object"})),
2303                ..Default::default()
2304            },
2305        };
2306
2307        let result = base.merge(&agent).unwrap();
2308        assert_eq!(
2309            result.inner.response_format,
2310            Some(serde_json::json!({"type": "json_object"}))
2311        );
2312    }
2313
2314    #[test]
2315    fn merge_response_format_base_fallback() {
2316        let base = ModelSettings {
2317            model: "gpt-5.1".into(),
2318            inner: ModelSettingsInner {
2319                response_format: Some(serde_json::json!({"type": "text"})),
2320                ..Default::default()
2321            },
2322        };
2323        let agent = ModelSettings::new("gpt-4.1-mini");
2324
2325        let result = base.merge(&agent).unwrap();
2326        assert_eq!(
2327            result.inner.response_format,
2328            Some(serde_json::json!({"type": "text"}))
2329        );
2330    }
2331
2332    #[test]
2333    fn merge_parameters_agent_wins() {
2334        let base = ModelSettings {
2335            model: "gpt-5.1".into(),
2336            inner: ModelSettingsInner {
2337                parameters: Some(serde_json::json!({"key": "base"})),
2338                ..Default::default()
2339            },
2340        };
2341        let agent = ModelSettings {
2342            model: "gpt-4.1-mini".into(),
2343            inner: ModelSettingsInner {
2344                parameters: Some(serde_json::json!({"key": "agent"})),
2345                ..Default::default()
2346            },
2347        };
2348
2349        let result = base.merge(&agent).unwrap();
2350        assert_eq!(
2351            result.inner.parameters,
2352            Some(serde_json::json!({"key": "agent"}))
2353        );
2354    }
2355}