open_agent/
types.rs

1//! Core type definitions for the Open Agent SDK.
2//!
3//! This module contains the fundamental data structures used throughout the SDK for
4//! configuring and interacting with AI agents. The type system is organized into three
5//! main categories:
6//!
7//! # Agent Configuration
8//!
9//! - [`AgentOptions`]: Main configuration struct for agent behavior, model settings,
10//!   and tool management
11//! - [`AgentOptionsBuilder`]: Builder pattern implementation for constructing
12//!   [`AgentOptions`] with validation
13//!
14//! # Message System
15//!
16//! The SDK uses a flexible message system that supports multi-modal content:
17//!
18//! - [`Message`]: Container for conversation messages with role and content
19//! - [`MessageRole`]: Enum defining who sent the message (System, User, Assistant, Tool)
20//! - [`ContentBlock`]: Enum for different content types (text, tool use, tool results)
21//! - [`TextBlock`]: Simple text content
22//! - [`ToolUseBlock`]: Represents an AI request to execute a tool
23//! - [`ToolResultBlock`]: Contains the result of a tool execution
24//!
25//! # OpenAI API Compatibility
26//!
27//! The SDK communicates with LLM providers using the OpenAI-compatible API format.
28//! These types handle serialization/deserialization for streaming responses:
29//!
30//! - [`OpenAIRequest`]: Request payload sent to the API
31//! - [`OpenAIMessage`]: Message format for OpenAI API
32//! - [`OpenAIChunk`]: Streaming response chunk from the API
33//! - [`OpenAIToolCall`], [`OpenAIFunction`]: Tool calling format
34//! - [`OpenAIDelta`], [`OpenAIToolCallDelta`]: Incremental updates in streaming
35//!
36//! # Architecture Overview
37//!
38//! The type system is designed to:
39//!
40//! 1. **Separate concerns**: Internal SDK types (Message, ContentBlock) are distinct
41//!    from API wire format (OpenAI types), allowing flexibility in provider support
42//! 2. **Enable streaming**: OpenAI types support incremental delta parsing for
43//!    real-time responses
44//! 3. **Support tool use**: First-class support for function calling with proper
45//!    request/response tracking
46//! 4. **Provide ergonomics**: Builder pattern and convenience constructors make
47//!    common operations simple
48//!
49//! # Example
50//!
51//! ```no_run
52//! use open_agent::{AgentOptions, Message};
53//!
54//! // Build agent configuration
55//! let options = AgentOptions::builder()
56//!     .model("qwen2.5-32b-instruct")
57//!     .base_url("http://localhost:1234/v1")
58//!     .system_prompt("You are a helpful assistant")
59//!     .max_turns(10)
60//!     .auto_execute_tools(true)
61//!     .build()
62//!     .expect("Valid configuration");
63//!
64//! // Create a user message
65//! let msg = Message::user("Hello, how are you?");
66//! ```
67
68use crate::Error;
69use crate::hooks::Hooks;
70use crate::tools::Tool;
71use serde::{Deserialize, Serialize};
72use std::sync::Arc;
73
74// ============================================================================
75// NEWTYPE WRAPPERS FOR COMPILE-TIME TYPE SAFETY
76// ============================================================================
77
78/// Validated model name with compile-time type safety.
79///
80/// This newtype wrapper ensures that model names are validated at construction time
81/// rather than at runtime, catching invalid configurations earlier in development.
82///
83/// # Validation Rules
84///
85/// - Must not be empty
86/// - Must not be only whitespace
87///
88/// # Example
89///
90/// ```
91/// use open_agent::ModelName;
92///
93/// // Valid model name
94/// let model = ModelName::new("qwen2.5-32b-instruct").unwrap();
95/// assert_eq!(model.as_str(), "qwen2.5-32b-instruct");
96///
97/// // Invalid: empty string
98/// assert!(ModelName::new("").is_err());
99///
100/// // Invalid: whitespace only
101/// assert!(ModelName::new("   ").is_err());
102/// ```
103#[derive(Debug, Clone, PartialEq, Eq, Hash)]
104pub struct ModelName(String);
105
106impl ModelName {
107    /// Creates a new `ModelName` after validation.
108    ///
109    /// # Errors
110    ///
111    /// Returns an error if the model name is empty or contains only whitespace.
112    pub fn new(name: impl Into<String>) -> crate::Result<Self> {
113        let name = name.into();
114        let trimmed = name.trim();
115
116        if trimmed.is_empty() {
117            return Err(Error::invalid_input(
118                "Model name cannot be empty or whitespace",
119            ));
120        }
121
122        Ok(ModelName(name))
123    }
124
125    /// Returns the model name as a string slice.
126    pub fn as_str(&self) -> &str {
127        &self.0
128    }
129
130    /// Consumes the `ModelName` and returns the inner `String`.
131    pub fn into_inner(self) -> String {
132        self.0
133    }
134}
135
136impl std::fmt::Display for ModelName {
137    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
138        write!(f, "{}", self.0)
139    }
140}
141
142/// Validated base URL with compile-time type safety.
143///
144/// This newtype wrapper ensures that base URLs are validated at construction time
145/// rather than at runtime, catching invalid configurations earlier in development.
146///
147/// # Validation Rules
148///
149/// - Must not be empty
150/// - Must start with `http://` or `https://`
151///
152/// # Example
153///
154/// ```
155/// use open_agent::BaseUrl;
156///
157/// // Valid base URLs
158/// let url = BaseUrl::new("http://localhost:1234/v1").unwrap();
159/// assert_eq!(url.as_str(), "http://localhost:1234/v1");
160///
161/// let url = BaseUrl::new("https://api.openai.com/v1").unwrap();
162/// assert_eq!(url.as_str(), "https://api.openai.com/v1");
163///
164/// // Invalid: no http/https prefix
165/// assert!(BaseUrl::new("localhost:1234").is_err());
166///
167/// // Invalid: empty string
168/// assert!(BaseUrl::new("").is_err());
169/// ```
170#[derive(Debug, Clone, PartialEq, Eq, Hash)]
171pub struct BaseUrl(String);
172
173impl BaseUrl {
174    /// Creates a new `BaseUrl` after validation.
175    ///
176    /// # Errors
177    ///
178    /// Returns an error if the URL is empty or doesn't start with http:// or https://.
179    pub fn new(url: impl Into<String>) -> crate::Result<Self> {
180        let url = url.into();
181        let trimmed = url.trim();
182
183        if trimmed.is_empty() {
184            return Err(Error::invalid_input("base_url cannot be empty"));
185        }
186
187        if !trimmed.starts_with("http://") && !trimmed.starts_with("https://") {
188            return Err(Error::invalid_input(
189                "base_url must start with http:// or https://",
190            ));
191        }
192
193        Ok(BaseUrl(url))
194    }
195
196    /// Returns the base URL as a string slice.
197    pub fn as_str(&self) -> &str {
198        &self.0
199    }
200
201    /// Consumes the `BaseUrl` and returns the inner `String`.
202    pub fn into_inner(self) -> String {
203        self.0
204    }
205}
206
207impl std::fmt::Display for BaseUrl {
208    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
209        write!(f, "{}", self.0)
210    }
211}
212
213/// Validated temperature value with compile-time type safety.
214///
215/// This newtype wrapper ensures that temperature values are validated at construction time
216/// rather than at runtime, catching invalid configurations earlier in development.
217///
218/// # Validation Rules
219///
220/// - Must be between 0.0 and 2.0 (inclusive)
221///
222/// # Example
223///
224/// ```
225/// use open_agent::Temperature;
226///
227/// // Valid temperatures
228/// let temp = Temperature::new(0.7).unwrap();
229/// assert_eq!(temp.value(), 0.7);
230///
231/// let temp = Temperature::new(0.0).unwrap();
232/// assert_eq!(temp.value(), 0.0);
233///
234/// let temp = Temperature::new(2.0).unwrap();
235/// assert_eq!(temp.value(), 2.0);
236///
237/// // Invalid: below range
238/// assert!(Temperature::new(-0.1).is_err());
239///
240/// // Invalid: above range
241/// assert!(Temperature::new(2.1).is_err());
242/// ```
243#[derive(Debug, Clone, Copy, PartialEq)]
244pub struct Temperature(f32);
245
246impl Temperature {
247    /// Creates a new `Temperature` after validation.
248    ///
249    /// # Errors
250    ///
251    /// Returns an error if the temperature is not between 0.0 and 2.0 (inclusive).
252    pub fn new(temp: f32) -> crate::Result<Self> {
253        if !(0.0..=2.0).contains(&temp) {
254            return Err(Error::invalid_input(
255                "temperature must be between 0.0 and 2.0",
256            ));
257        }
258
259        Ok(Temperature(temp))
260    }
261
262    /// Returns the temperature value.
263    pub fn value(&self) -> f32 {
264        self.0
265    }
266}
267
268impl std::fmt::Display for Temperature {
269    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
270        write!(f, "{}", self.0)
271    }
272}
273
274// ============================================================================
275// AGENT CONFIGURATION
276// ============================================================================
277
278/// Configuration options for an AI agent instance.
279///
280/// `AgentOptions` controls all aspects of agent behavior including model selection,
281/// conversation management, tool usage, and lifecycle hooks. This struct should be
282/// constructed using [`AgentOptions::builder()`] rather than direct instantiation
283/// to ensure required fields are validated.
284///
285/// # Architecture
286///
287/// The options are organized into several functional areas:
288///
289/// - **Model Configuration**: `model`, `base_url`, `api_key`, `temperature`, `max_tokens`
290/// - **Conversation Control**: `system_prompt`, `max_turns`, `timeout`
291/// - **Tool Management**: `tools`, `auto_execute_tools`, `max_tool_iterations`
292/// - **Lifecycle Hooks**: `hooks` for monitoring and interception
293///
294/// # Thread Safety
295///
296/// Tools are wrapped in `Arc<Tool>` to allow efficient cloning and sharing across
297/// threads, as agents may need to be cloned for parallel processing.
298///
299/// # Examples
300///
301/// ```no_run
302/// use open_agent::AgentOptions;
303///
304/// let options = AgentOptions::builder()
305///     .model("qwen2.5-32b-instruct")
306///     .base_url("http://localhost:1234/v1")
307///     .system_prompt("You are a helpful coding assistant")
308///     .max_turns(5)
309///     .temperature(0.7)
310///     .build()
311///     .expect("Valid configuration");
312/// ```
313#[derive(Clone)]
314pub struct AgentOptions {
315    /// System prompt that defines the agent's behavior and personality.
316    ///
317    /// This is sent as the first message in the conversation to establish
318    /// context and instructions. Can be empty if no system-level guidance
319    /// is needed.
320    system_prompt: String,
321
322    /// Model identifier for the LLM to use (e.g., "qwen2.5-32b-instruct", "gpt-4").
323    ///
324    /// This must match a model available at the configured `base_url`.
325    /// Different models have varying capabilities for tool use, context
326    /// length, and response quality.
327    model: String,
328
329    /// OpenAI-compatible API endpoint URL (e.g., "http://localhost:1234/v1").
330    ///
331    /// The SDK communicates using the OpenAI chat completions API format,
332    /// which is widely supported by local inference servers (LM Studio,
333    /// llama.cpp, vLLM) and cloud providers.
334    base_url: String,
335
336    /// API authentication key for the provider.
337    ///
338    /// Many local servers don't require authentication, so the default
339    /// "not-needed" is often sufficient. For cloud providers like OpenAI,
340    /// set this to your actual API key.
341    api_key: String,
342
343    /// Maximum number of conversation turns (user message + assistant response = 1 turn).
344    ///
345    /// This limits how long a conversation can continue. In auto-execution mode
346    /// with tools, this prevents infinite loops. Set to 1 for single-shot
347    /// interactions or higher for multi-turn conversations.
348    max_turns: u32,
349
350    /// Maximum tokens the model should generate in a single response.
351    ///
352    /// `None` uses the provider's default. Lower values constrain response
353    /// length, which can be useful for cost control or ensuring concise answers.
354    /// Note this is separate from the model's context window size.
355    max_tokens: Option<u32>,
356
357    /// Sampling temperature for response generation (typically 0.0 to 2.0).
358    ///
359    /// - 0.0: Deterministic, always picks most likely tokens
360    /// - 0.7: Balanced creativity and consistency (default)
361    /// - 1.0+: More random and creative responses
362    ///
363    /// Lower temperatures are better for factual tasks, higher for creative ones.
364    temperature: f32,
365
366    /// HTTP request timeout in seconds.
367    ///
368    /// Maximum time to wait for the API to respond. Applies per API call,
369    /// not to the entire conversation. Increase for slower models or when
370    /// expecting long responses.
371    timeout: u64,
372
373    /// Tools available for the agent to use during conversations.
374    ///
375    /// Tools are wrapped in `Arc` for efficient cloning. When the agent
376    /// receives a tool use request, it looks up the tool by name in this
377    /// vector. Empty by default.
378    tools: Vec<Arc<Tool>>,
379
380    /// Whether to automatically execute tools and continue the conversation.
381    ///
382    /// - `true`: SDK automatically executes tool calls and sends results back
383    ///   to the model, continuing until no more tools are requested
384    /// - `false`: Tool calls are returned to the caller, who must manually
385    ///   execute them and provide results
386    ///
387    /// Auto-execution is convenient but gives less control. Manual execution
388    /// allows for approval workflows and selective tool access.
389    auto_execute_tools: bool,
390
391    /// Maximum iterations of tool execution in automatic mode.
392    ///
393    /// Prevents infinite loops where the agent continuously requests tools.
394    /// Each tool execution attempt counts as one iteration. Only relevant
395    /// when `auto_execute_tools` is true.
396    max_tool_iterations: u32,
397
398    /// Lifecycle hooks for observing and intercepting agent operations.
399    ///
400    /// Hooks allow you to inject custom logic at various points:
401    /// - Before/after API requests
402    /// - Tool execution interception
403    /// - Response streaming callbacks
404    ///
405    /// Useful for logging, metrics, debugging, and implementing custom
406    /// authorization logic.
407    hooks: Hooks,
408}
409
410/// Custom Debug implementation to prevent sensitive data leakage.
411///
412/// We override the default Debug implementation because:
413/// 1. The `api_key` field may contain sensitive credentials that shouldn't
414///    appear in logs or error messages
415/// 2. The `tools` vector contains Arc-wrapped closures that don't debug nicely,
416///    so we show a count instead
417///
418/// This ensures that debug output is safe for logging while remaining useful
419/// for troubleshooting.
420impl std::fmt::Debug for AgentOptions {
421    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
422        f.debug_struct("AgentOptions")
423            .field("system_prompt", &self.system_prompt)
424            .field("model", &self.model)
425            .field("base_url", &self.base_url)
426            // Mask API key to prevent credential leakage in logs
427            .field("api_key", &"***")
428            .field("max_turns", &self.max_turns)
429            .field("max_tokens", &self.max_tokens)
430            .field("temperature", &self.temperature)
431            .field("timeout", &self.timeout)
432            // Show tool count instead of trying to debug Arc<Tool> contents
433            .field("tools", &format!("{} tools", self.tools.len()))
434            .field("auto_execute_tools", &self.auto_execute_tools)
435            .field("max_tool_iterations", &self.max_tool_iterations)
436            .field("hooks", &self.hooks)
437            .finish()
438    }
439}
440
441/// Default values optimized for common single-turn use cases.
442///
443/// These defaults are chosen to:
444/// - Require explicit configuration of critical fields (model, base_url)
445/// - Provide safe, sensible defaults for optional fields
446/// - Work with local inference servers that don't need authentication
447impl Default for AgentOptions {
448    fn default() -> Self {
449        Self {
450            // Empty string forces users to explicitly set context
451            system_prompt: String::new(),
452            // Empty string forces users to explicitly choose a model
453            model: String::new(),
454            // Empty string forces users to explicitly configure the endpoint
455            base_url: String::new(),
456            // Most local servers (LM Studio, llama.cpp) don't require auth
457            api_key: "not-needed".to_string(),
458            // Default to single-shot interaction; users opt into conversations
459            max_turns: 1,
460            // 4096 is a reasonable default that works with most models
461            // while preventing runaway generation costs
462            max_tokens: Some(4096),
463            // 0.7 balances creativity with consistency for general use
464            temperature: 0.7,
465            // 60 seconds handles most requests without timing out prematurely
466            timeout: 60,
467            // No tools by default; users explicitly add capabilities
468            tools: Vec::new(),
469            // Manual tool execution by default for safety and control
470            auto_execute_tools: false,
471            // 5 iterations prevent infinite loops while allowing multi-step workflows
472            max_tool_iterations: 5,
473            // Empty hooks for no-op behavior
474            hooks: Hooks::new(),
475        }
476    }
477}
478
479impl AgentOptions {
480    /// Creates a new builder for constructing [`AgentOptions`].
481    ///
482    /// The builder pattern is used because:
483    /// 1. Some fields are required (model, base_url) and need validation
484    /// 2. Many fields have sensible defaults that can be overridden
485    /// 3. The API is more discoverable and readable than struct initialization
486    ///
487    /// # Example
488    ///
489    /// ```no_run
490    /// use open_agent::AgentOptions;
491    ///
492    /// let options = AgentOptions::builder()
493    ///     .model("qwen2.5-32b-instruct")
494    ///     .base_url("http://localhost:1234/v1")
495    ///     .build()
496    ///     .expect("Valid configuration");
497    /// ```
498    pub fn builder() -> AgentOptionsBuilder {
499        AgentOptionsBuilder::default()
500    }
501
502    /// Returns the system prompt.
503    pub fn system_prompt(&self) -> &str {
504        &self.system_prompt
505    }
506
507    /// Returns the model identifier.
508    pub fn model(&self) -> &str {
509        &self.model
510    }
511
512    /// Returns the base URL.
513    pub fn base_url(&self) -> &str {
514        &self.base_url
515    }
516
517    /// Returns the API key.
518    pub fn api_key(&self) -> &str {
519        &self.api_key
520    }
521
522    /// Returns the maximum number of conversation turns.
523    pub fn max_turns(&self) -> u32 {
524        self.max_turns
525    }
526
527    /// Returns the maximum tokens setting.
528    pub fn max_tokens(&self) -> Option<u32> {
529        self.max_tokens
530    }
531
532    /// Returns the sampling temperature.
533    pub fn temperature(&self) -> f32 {
534        self.temperature
535    }
536
537    /// Returns the HTTP timeout in seconds.
538    pub fn timeout(&self) -> u64 {
539        self.timeout
540    }
541
542    /// Returns a reference to the tools vector.
543    pub fn tools(&self) -> &[Arc<Tool>] {
544        &self.tools
545    }
546
547    /// Returns whether automatic tool execution is enabled.
548    pub fn auto_execute_tools(&self) -> bool {
549        self.auto_execute_tools
550    }
551
552    /// Returns the maximum tool execution iterations.
553    pub fn max_tool_iterations(&self) -> u32 {
554        self.max_tool_iterations
555    }
556
557    /// Returns a reference to the hooks configuration.
558    pub fn hooks(&self) -> &Hooks {
559        &self.hooks
560    }
561}
562
563/// Builder for constructing [`AgentOptions`] with validation.
564///
565/// This builder implements the typestate pattern using `Option<T>` to track
566/// which required fields have been set. The [`build()`](AgentOptionsBuilder::build)
567/// method validates that all required fields are present before creating
568/// the final [`AgentOptions`].
569///
570/// # Required Fields
571///
572/// - `model`: The LLM model identifier
573/// - `base_url`: The API endpoint URL
574///
575/// All other fields have sensible defaults.
576///
577/// # Usage Pattern
578///
579/// 1. Call [`AgentOptions::builder()`]
580/// 2. Chain method calls to set configuration
581/// 3. Call [`build()`](AgentOptionsBuilder::build) to validate and create the final options
582///
583/// Methods return `self` for chaining, following the fluent interface pattern.
584///
585/// # Examples
586///
587/// ```no_run
588/// use open_agent::AgentOptions;
589/// use open_agent::Tool;
590///
591/// let calculator = Tool::new(
592///     "calculate",
593///     "Perform arithmetic",
594///     serde_json::json!({
595///         "type": "object",
596///         "properties": {
597///             "expression": {"type": "string"}
598///         }
599///     }),
600///     |input| Box::pin(async move {
601///         Ok(serde_json::json!({"result": 42}))
602///     }),
603/// );
604///
605/// let options = AgentOptions::builder()
606///     .model("qwen2.5-32b-instruct")
607///     .base_url("http://localhost:1234/v1")
608///     .system_prompt("You are a helpful assistant")
609///     .max_turns(10)
610///     .temperature(0.8)
611///     .tool(calculator)
612///     .auto_execute_tools(true)
613///     .build()
614///     .expect("Valid configuration");
615/// ```
616#[derive(Default)]
617pub struct AgentOptionsBuilder {
618    /// Optional system prompt; defaults to empty if not set
619    system_prompt: Option<String>,
620    /// Required: model identifier
621    model: Option<String>,
622    /// Required: API endpoint URL
623    base_url: Option<String>,
624    /// Optional API key; defaults to "not-needed"
625    api_key: Option<String>,
626    /// Optional max turns; defaults to 1
627    max_turns: Option<u32>,
628    /// Optional max tokens; defaults to Some(4096)
629    max_tokens: Option<u32>,
630    /// Optional temperature; defaults to 0.7
631    temperature: Option<f32>,
632    /// Optional timeout; defaults to 60 seconds
633    timeout: Option<u64>,
634    /// Tools to provide; starts empty
635    tools: Vec<Arc<Tool>>,
636    /// Optional auto-execute flag; defaults to false
637    auto_execute_tools: Option<bool>,
638    /// Optional max iterations; defaults to 5
639    max_tool_iterations: Option<u32>,
640    /// Lifecycle hooks; defaults to empty
641    hooks: Hooks,
642}
643
644/// Custom Debug implementation for builder to show minimal useful information.
645///
646/// Similar to [`AgentOptions`], we provide a simplified debug output that:
647/// - Omits sensitive fields like API keys (not shown at all in builder)
648/// - Shows tool count rather than tool details
649/// - Focuses on the most important configuration fields
650impl std::fmt::Debug for AgentOptionsBuilder {
651    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
652        f.debug_struct("AgentOptionsBuilder")
653            .field("system_prompt", &self.system_prompt)
654            .field("model", &self.model)
655            .field("base_url", &self.base_url)
656            .field("tools", &format!("{} tools", self.tools.len()))
657            .finish()
658    }
659}
660
661/// Builder methods for configuring agent options.
662///
663/// All methods follow the builder pattern: they consume `self`, update a field,
664/// and return `self` for method chaining. The generic `impl Into<String>` parameters
665/// allow passing `&str`, `String`, or any other type that converts to `String`.
666impl AgentOptionsBuilder {
667    /// Sets the system prompt that defines agent behavior.
668    ///
669    /// The system prompt is sent at the beginning of every conversation to
670    /// establish context, personality, and instructions for the agent.
671    ///
672    /// # Example
673    ///
674    /// ```no_run
675    /// # use open_agent::AgentOptions;
676    /// let options = AgentOptions::builder()
677    ///     .model("qwen2.5-32b-instruct")
678    ///     .base_url("http://localhost:1234/v1")
679    ///     .system_prompt("You are a helpful coding assistant. Be concise.")
680    ///     .build()
681    ///     .unwrap();
682    /// ```
683    pub fn system_prompt(mut self, prompt: impl Into<String>) -> Self {
684        self.system_prompt = Some(prompt.into());
685        self
686    }
687
688    /// Sets the model identifier (required).
689    ///
690    /// This must match a model available at your configured endpoint.
691    /// Common examples: "qwen2.5-32b-instruct", "gpt-4", "claude-3-sonnet".
692    ///
693    /// # Example
694    ///
695    /// ```no_run
696    /// # use open_agent::AgentOptions;
697    /// let options = AgentOptions::builder()
698    ///     .model("gpt-4")
699    ///     .base_url("https://api.openai.com/v1")
700    ///     .build()
701    ///     .unwrap();
702    /// ```
703    pub fn model(mut self, model: impl Into<String>) -> Self {
704        self.model = Some(model.into());
705        self
706    }
707
708    /// Sets the API endpoint URL (required).
709    ///
710    /// Must be an OpenAI-compatible endpoint. Common values:
711    /// - Local: "http://localhost:1234/v1" (LM Studio default)
712    /// - OpenAI: <https://api.openai.com/v1>
713    /// - Custom: Your inference server URL
714    ///
715    /// # Example
716    ///
717    /// ```no_run
718    /// # use open_agent::AgentOptions;
719    /// let options = AgentOptions::builder()
720    ///     .model("qwen2.5-32b-instruct")
721    ///     .base_url("http://localhost:1234/v1")
722    ///     .build()
723    ///     .unwrap();
724    /// ```
725    pub fn base_url(mut self, url: impl Into<String>) -> Self {
726        self.base_url = Some(url.into());
727        self
728    }
729
730    /// Sets the API key for authentication.
731    ///
732    /// Required for cloud providers like OpenAI. Most local servers don't
733    /// need this - the default "not-needed" works fine.
734    ///
735    /// # Example
736    ///
737    /// ```no_run
738    /// # use open_agent::AgentOptions;
739    /// let options = AgentOptions::builder()
740    ///     .model("gpt-4")
741    ///     .base_url("https://api.openai.com/v1")
742    ///     .api_key("sk-...")
743    ///     .build()
744    ///     .unwrap();
745    /// ```
746    pub fn api_key(mut self, key: impl Into<String>) -> Self {
747        self.api_key = Some(key.into());
748        self
749    }
750
751    /// Sets the maximum number of conversation turns.
752    ///
753    /// One turn = user message + assistant response. Higher values enable
754    /// longer conversations but may increase costs and latency.
755    ///
756    /// # Example
757    ///
758    /// ```no_run
759    /// # use open_agent::AgentOptions;
760    /// let options = AgentOptions::builder()
761    ///     .model("qwen2.5-32b-instruct")
762    ///     .base_url("http://localhost:1234/v1")
763    ///     .max_turns(10)  // Allow multi-turn conversation
764    ///     .build()
765    ///     .unwrap();
766    /// ```
767    pub fn max_turns(mut self, turns: u32) -> Self {
768        self.max_turns = Some(turns);
769        self
770    }
771
772    /// Sets the maximum tokens to generate per response.
773    ///
774    /// Constrains response length. Lower values reduce costs but may truncate
775    /// responses. Higher values allow longer, more complete answers.
776    ///
777    /// # Example
778    ///
779    /// ```no_run
780    /// # use open_agent::AgentOptions;
781    /// let options = AgentOptions::builder()
782    ///     .model("qwen2.5-32b-instruct")
783    ///     .base_url("http://localhost:1234/v1")
784    ///     .max_tokens(1000)  // Limit to shorter responses
785    ///     .build()
786    ///     .unwrap();
787    /// ```
788    pub fn max_tokens(mut self, tokens: u32) -> Self {
789        self.max_tokens = Some(tokens);
790        self
791    }
792
793    /// Sets the sampling temperature for response generation.
794    ///
795    /// Controls randomness:
796    /// - 0.0: Deterministic, always picks most likely tokens
797    /// - 0.7: Balanced (default)
798    /// - 1.0+: More creative/random
799    ///
800    /// # Example
801    ///
802    /// ```no_run
803    /// # use open_agent::AgentOptions;
804    /// let options = AgentOptions::builder()
805    ///     .model("qwen2.5-32b-instruct")
806    ///     .base_url("http://localhost:1234/v1")
807    ///     .temperature(0.0)  // Deterministic for coding tasks
808    ///     .build()
809    ///     .unwrap();
810    /// ```
811    pub fn temperature(mut self, temp: f32) -> Self {
812        self.temperature = Some(temp);
813        self
814    }
815
816    /// Sets the HTTP request timeout in seconds.
817    ///
818    /// How long to wait for the API to respond. Increase for slower models
819    /// or when expecting long responses.
820    ///
821    /// # Example
822    ///
823    /// ```no_run
824    /// # use open_agent::AgentOptions;
825    /// let options = AgentOptions::builder()
826    ///     .model("qwen2.5-32b-instruct")
827    ///     .base_url("http://localhost:1234/v1")
828    ///     .timeout(120)  // 2 minutes for complex tasks
829    ///     .build()
830    ///     .unwrap();
831    /// ```
832    pub fn timeout(mut self, timeout: u64) -> Self {
833        self.timeout = Some(timeout);
834        self
835    }
836
837    /// Enables or disables automatic tool execution.
838    ///
839    /// When true, the SDK automatically executes tool calls and continues
840    /// the conversation. When false, tool calls are returned for manual
841    /// handling, allowing approval workflows.
842    ///
843    /// # Example
844    ///
845    /// ```no_run
846    /// # use open_agent::AgentOptions;
847    /// let options = AgentOptions::builder()
848    ///     .model("qwen2.5-32b-instruct")
849    ///     .base_url("http://localhost:1234/v1")
850    ///     .auto_execute_tools(true)  // Automatic execution
851    ///     .build()
852    ///     .unwrap();
853    /// ```
854    pub fn auto_execute_tools(mut self, auto: bool) -> Self {
855        self.auto_execute_tools = Some(auto);
856        self
857    }
858
859    /// Sets the maximum tool execution iterations in automatic mode.
860    ///
861    /// Prevents infinite loops where the agent continuously calls tools.
862    /// Only relevant when `auto_execute_tools` is true.
863    ///
864    /// # Example
865    ///
866    /// ```no_run
867    /// # use open_agent::AgentOptions;
868    /// let options = AgentOptions::builder()
869    ///     .model("qwen2.5-32b-instruct")
870    ///     .base_url("http://localhost:1234/v1")
871    ///     .auto_execute_tools(true)
872    ///     .max_tool_iterations(10)  // Allow up to 10 tool calls
873    ///     .build()
874    ///     .unwrap();
875    /// ```
876    pub fn max_tool_iterations(mut self, iterations: u32) -> Self {
877        self.max_tool_iterations = Some(iterations);
878        self
879    }
880
881    /// Adds a single tool to the agent's available tools.
882    ///
883    /// The tool is wrapped in `Arc` for efficient sharing. Can be called
884    /// multiple times to add multiple tools.
885    ///
886    /// # Example
887    ///
888    /// ```no_run
889    /// # use open_agent::AgentOptions;
890    /// # use open_agent::Tool;
891    /// let calculator = Tool::new(
892    ///     "calculate",
893    ///     "Evaluate a math expression",
894    ///     serde_json::json!({"type": "object"}),
895    ///     |input| Box::pin(async move { Ok(serde_json::json!({"result": 42})) }),
896    /// );
897    ///
898    /// let options = AgentOptions::builder()
899    ///     .model("qwen2.5-32b-instruct")
900    ///     .base_url("http://localhost:1234/v1")
901    ///     .tool(calculator)
902    ///     .build()
903    ///     .unwrap();
904    /// ```
905    pub fn tool(mut self, tool: Tool) -> Self {
906        self.tools.push(Arc::new(tool));
907        self
908    }
909
910    /// Adds multiple tools at once to the agent's available tools.
911    ///
912    /// Convenience method for bulk tool addition. All tools are wrapped
913    /// in `Arc` automatically.
914    ///
915    /// # Example
916    ///
917    /// ```no_run
918    /// # use open_agent::AgentOptions;
919    /// # use open_agent::Tool;
920    /// let tools = vec![
921    ///     Tool::new("add", "Add numbers", serde_json::json!({}),
922    ///         |input| Box::pin(async move { Ok(serde_json::json!({})) })),
923    ///     Tool::new("multiply", "Multiply numbers", serde_json::json!({}),
924    ///         |input| Box::pin(async move { Ok(serde_json::json!({})) })),
925    /// ];
926    ///
927    /// let options = AgentOptions::builder()
928    ///     .model("qwen2.5-32b-instruct")
929    ///     .base_url("http://localhost:1234/v1")
930    ///     .tools(tools)
931    ///     .build()
932    ///     .unwrap();
933    /// ```
934    pub fn tools(mut self, tools: Vec<Tool>) -> Self {
935        self.tools.extend(tools.into_iter().map(Arc::new));
936        self
937    }
938
939    /// Sets lifecycle hooks for monitoring and intercepting agent operations.
940    ///
941    /// Hooks allow custom logic at various points: before/after API calls,
942    /// tool execution, response streaming, etc. Useful for logging, metrics,
943    /// debugging, and custom authorization.
944    ///
945    /// # Example
946    ///
947    /// ```no_run
948    /// # use open_agent::{AgentOptions, Hooks, HookDecision};
949    /// let hooks = Hooks::new()
950    ///     .add_user_prompt_submit(|event| async move {
951    ///         println!("User prompt: {}", event.prompt);
952    ///         Some(HookDecision::continue_())
953    ///     });
954    ///
955    /// let options = AgentOptions::builder()
956    ///     .model("qwen2.5-32b-instruct")
957    ///     .base_url("http://localhost:1234/v1")
958    ///     .hooks(hooks)
959    ///     .build()
960    ///     .unwrap();
961    /// ```
962    pub fn hooks(mut self, hooks: Hooks) -> Self {
963        self.hooks = hooks;
964        self
965    }
966
967    /// Validates configuration and builds the final [`AgentOptions`].
968    ///
969    /// This method performs validation to ensure required fields are set and
970    /// applies default values for optional fields. Returns an error if
971    /// validation fails.
972    ///
973    /// # Required Fields
974    ///
975    /// - `model`: Must be set or build() returns an error
976    /// - `base_url`: Must be set or build() returns an error
977    ///
978    /// # Errors
979    ///
980    /// Returns a configuration error if any required field is missing.
981    ///
982    /// # Example
983    ///
984    /// ```no_run
985    /// # use open_agent::AgentOptions;
986    /// // Success - all required fields set
987    /// let options = AgentOptions::builder()
988    ///     .model("qwen2.5-32b-instruct")
989    ///     .base_url("http://localhost:1234/v1")
990    ///     .build()
991    ///     .expect("Valid configuration");
992    ///
993    /// // Error - missing model
994    /// let result = AgentOptions::builder()
995    ///     .base_url("http://localhost:1234/v1")
996    ///     .build();
997    /// assert!(result.is_err());
998    /// ```
999    pub fn build(self) -> crate::Result<AgentOptions> {
1000        // Validate required fields - these must be explicitly set by the user
1001        // because they're fundamental to connecting to an LLM provider
1002        let model = self
1003            .model
1004            .ok_or_else(|| crate::Error::config("model is required"))?;
1005
1006        let base_url = self
1007            .base_url
1008            .ok_or_else(|| crate::Error::config("base_url is required"))?;
1009
1010        // Validate model is not empty or whitespace
1011        if model.trim().is_empty() {
1012            return Err(crate::Error::invalid_input(
1013                "model cannot be empty or whitespace",
1014            ));
1015        }
1016
1017        // Validate base_url is not empty and has valid URL format
1018        if base_url.trim().is_empty() {
1019            return Err(crate::Error::invalid_input("base_url cannot be empty"));
1020        }
1021        // Check if URL has a valid scheme (http:// or https://)
1022        if !base_url.starts_with("http://") && !base_url.starts_with("https://") {
1023            return Err(crate::Error::invalid_input(
1024                "base_url must start with http:// or https://",
1025            ));
1026        }
1027
1028        // Validate temperature is in valid range (0.0 to 2.0)
1029        let temperature = self.temperature.unwrap_or(0.7);
1030        if !(0.0..=2.0).contains(&temperature) {
1031            return Err(crate::Error::invalid_input(
1032                "temperature must be between 0.0 and 2.0",
1033            ));
1034        }
1035
1036        // Validate max_tokens if set
1037        let max_tokens = self.max_tokens.or(Some(4096));
1038        if let Some(tokens) = max_tokens {
1039            if tokens == 0 {
1040                return Err(crate::Error::invalid_input(
1041                    "max_tokens must be greater than 0",
1042                ));
1043            }
1044        }
1045
1046        // Construct the final options, applying defaults where values weren't set
1047        Ok(AgentOptions {
1048            // Empty system prompt is valid - not all use cases need one
1049            system_prompt: self.system_prompt.unwrap_or_default(),
1050            model,
1051            base_url,
1052            // Default API key works for most local servers
1053            api_key: self.api_key.unwrap_or_else(|| "not-needed".to_string()),
1054            // Default to single-turn for simplicity
1055            max_turns: self.max_turns.unwrap_or(1),
1056            max_tokens,
1057            temperature,
1058            // Conservative timeout that works for most requests
1059            timeout: self.timeout.unwrap_or(60),
1060            // Tools vector was built up during configuration, use as-is
1061            tools: self.tools,
1062            // Manual execution by default for safety and control
1063            auto_execute_tools: self.auto_execute_tools.unwrap_or(false),
1064            // Reasonable limit to prevent runaway tool loops
1065            max_tool_iterations: self.max_tool_iterations.unwrap_or(5),
1066            // Hooks were built up during configuration, use as-is
1067            hooks: self.hooks,
1068        })
1069    }
1070}
1071
1072/// Identifies the sender/role of a message in the conversation.
1073///
1074/// This enum follows the standard chat completion role system used by most
1075/// LLM APIs. The role determines how the message is interpreted and processed.
1076///
1077/// # Serialization
1078///
1079/// Serializes to lowercase strings via serde (`"system"`, `"user"`, etc.)
1080/// to match OpenAI API format.
1081///
1082/// # Role Semantics
1083///
1084/// - [`System`](MessageRole::System): Establishes context, instructions, and behavior
1085/// - [`User`](MessageRole::User): Input from the human or calling application
1086/// - [`Assistant`](MessageRole::Assistant): Response from the AI model
1087/// - [`Tool`](MessageRole::Tool): Results from tool/function execution
1088#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
1089#[serde(rename_all = "lowercase")]
1090pub enum MessageRole {
1091    /// System message that establishes agent behavior and context.
1092    ///
1093    /// Typically the first message in a conversation. Used for instructions,
1094    /// personality definition, and constraints that apply throughout the
1095    /// conversation.
1096    System,
1097
1098    /// User message representing human or application input.
1099    ///
1100    /// The prompt or query that the agent should respond to. In multi-turn
1101    /// conversations, user messages alternate with assistant messages.
1102    User,
1103
1104    /// Assistant message containing the AI model's response.
1105    ///
1106    /// Can include text, tool use requests, or both. When the model wants to
1107    /// call a tool, it includes ToolUseBlock content.
1108    Assistant,
1109
1110    /// Tool result message containing function execution results.
1111    ///
1112    /// Sent back to the model after executing a requested tool. Contains the
1113    /// tool's output that the model can use in its next response.
1114    Tool,
1115}
1116
1117/// Multi-modal content blocks that can appear in messages.
1118///
1119/// Messages are composed of one or more content blocks, allowing rich,
1120/// structured communication between the user, assistant, and tools.
1121///
1122/// # Serialization
1123///
1124/// Uses serde's "externally tagged" enum format with a `"type"` field:
1125/// ```json
1126/// {"type": "text", "text": "Hello"}
1127/// {"type": "tool_use", "id": "call_123", "name": "search", "input": {...}}
1128/// {"type": "tool_result", "tool_use_id": "call_123", "content": {...}}
1129/// ```
1130///
1131/// # Block Types
1132///
1133/// - [`Text`](ContentBlock::Text): Simple text content
1134/// - [`ToolUse`](ContentBlock::ToolUse): Request from model to execute a tool
1135/// - [`ToolResult`](ContentBlock::ToolResult): Result of tool execution
1136///
1137/// # Usage
1138///
1139/// Messages can contain multiple blocks. For example, an assistant message
1140/// might include text explaining its reasoning followed by a tool use request.
1141#[derive(Debug, Clone, Serialize, Deserialize)]
1142#[serde(tag = "type", rename_all = "snake_case")]
1143pub enum ContentBlock {
1144    /// Text content block containing a string message.
1145    Text(TextBlock),
1146
1147    /// Tool use request from the model to execute a function.
1148    ToolUse(ToolUseBlock),
1149
1150    /// Tool execution result sent back to the model.
1151    ToolResult(ToolResultBlock),
1152}
1153
1154/// Simple text content in a message.
1155///
1156/// The most common content type, representing plain text communication.
1157/// Both users and assistants primarily use text blocks for their messages.
1158///
1159/// # Example
1160///
1161/// ```
1162/// use open_agent::{TextBlock, ContentBlock};
1163///
1164/// let block = TextBlock::new("Hello, world!");
1165/// let content = ContentBlock::Text(block);
1166/// ```
1167#[derive(Debug, Clone, Serialize, Deserialize)]
1168pub struct TextBlock {
1169    /// The text content.
1170    pub text: String,
1171}
1172
1173impl TextBlock {
1174    /// Creates a new text block from any string-like type.
1175    ///
1176    /// # Example
1177    ///
1178    /// ```
1179    /// use open_agent::TextBlock;
1180    ///
1181    /// let block = TextBlock::new("Hello");
1182    /// assert_eq!(block.text, "Hello");
1183    /// ```
1184    pub fn new(text: impl Into<String>) -> Self {
1185        Self { text: text.into() }
1186    }
1187}
1188
1189/// Tool use request from the AI model.
1190///
1191/// When the model determines it needs to call a tool/function, it returns
1192/// a ToolUseBlock specifying which tool to call and with what parameters.
1193/// The application must then execute the tool and return results via
1194/// [`ToolResultBlock`].
1195///
1196/// # Fields
1197///
1198/// - `id`: Unique identifier for this tool call, used to correlate results
1199/// - `name`: Name of the tool to execute (must match a registered tool)
1200/// - `input`: JSON parameters to pass to the tool
1201///
1202/// # Example
1203///
1204/// ```
1205/// use open_agent::{ToolUseBlock, ContentBlock};
1206/// use serde_json::json;
1207///
1208/// let block = ToolUseBlock::new(
1209///     "call_123",
1210///     "calculate",
1211///     json!({"expression": "2 + 2"})
1212/// );
1213/// assert_eq!(block.id, "call_123");
1214/// assert_eq!(block.name, "calculate");
1215/// ```
1216#[derive(Debug, Clone, Serialize, Deserialize)]
1217pub struct ToolUseBlock {
1218    /// Unique identifier for this tool call.
1219    ///
1220    /// Generated by the model. Used to correlate the tool result back to
1221    /// this specific request, especially when multiple tools are called.
1222    pub id: String,
1223
1224    /// Name of the tool to execute.
1225    ///
1226    /// Must match the name of a tool that was provided in the agent's
1227    /// configuration, otherwise execution will fail.
1228    pub name: String,
1229
1230    /// JSON parameters to pass to the tool.
1231    ///
1232    /// The structure should match the tool's input schema. The tool's
1233    /// execution function receives this value as input.
1234    pub input: serde_json::Value,
1235}
1236
1237impl ToolUseBlock {
1238    /// Creates a new tool use block.
1239    ///
1240    /// # Parameters
1241    ///
1242    /// - `id`: Unique identifier for this tool call
1243    /// - `name`: Name of the tool to execute
1244    /// - `input`: JSON parameters for the tool
1245    ///
1246    /// # Example
1247    ///
1248    /// ```
1249    /// use open_agent::ToolUseBlock;
1250    /// use serde_json::json;
1251    ///
1252    /// let block = ToolUseBlock::new(
1253    ///     "call_abc",
1254    ///     "search",
1255    ///     json!({"query": "Rust async programming"})
1256    /// );
1257    /// ```
1258    pub fn new(id: impl Into<String>, name: impl Into<String>, input: serde_json::Value) -> Self {
1259        Self {
1260            id: id.into(),
1261            name: name.into(),
1262            input,
1263        }
1264    }
1265}
1266
1267/// Tool execution result sent back to the model.
1268///
1269/// After executing a tool requested via [`ToolUseBlock`], the application
1270/// creates a ToolResultBlock containing the tool's output and sends it back
1271/// to the model. The model then uses this information in its next response.
1272///
1273/// # Fields
1274///
1275/// - `tool_use_id`: Must match the `id` from the corresponding ToolUseBlock
1276/// - `content`: JSON result from the tool execution
1277///
1278/// # Example
1279///
1280/// ```
1281/// use open_agent::{ToolResultBlock, ContentBlock};
1282/// use serde_json::json;
1283///
1284/// let result = ToolResultBlock::new(
1285///     "call_123",
1286///     json!({"result": 4})
1287/// );
1288/// assert_eq!(result.tool_use_id, "call_123");
1289/// ```
1290#[derive(Debug, Clone, Serialize, Deserialize)]
1291pub struct ToolResultBlock {
1292    /// ID of the tool use request this result corresponds to.
1293    ///
1294    /// Must match the `id` field from the ToolUseBlock that requested
1295    /// this tool execution. This correlation is essential for the model
1296    /// to understand which tool call produced which result.
1297    pub tool_use_id: String,
1298
1299    /// JSON result from executing the tool.
1300    ///
1301    /// Contains the tool's output data. Can be any valid JSON structure -
1302    /// the model will interpret it based on the tool's description and
1303    /// output schema.
1304    pub content: serde_json::Value,
1305}
1306
1307impl ToolResultBlock {
1308    /// Creates a new tool result block.
1309    ///
1310    /// # Parameters
1311    ///
1312    /// - `tool_use_id`: ID from the corresponding ToolUseBlock
1313    /// - `content`: JSON result from tool execution
1314    ///
1315    /// # Example
1316    ///
1317    /// ```
1318    /// use open_agent::ToolResultBlock;
1319    /// use serde_json::json;
1320    ///
1321    /// let result = ToolResultBlock::new(
1322    ///     "call_xyz",
1323    ///     json!({
1324    ///         "status": "success",
1325    ///         "data": {"temperature": 72}
1326    ///     })
1327    /// );
1328    /// ```
1329    pub fn new(tool_use_id: impl Into<String>, content: serde_json::Value) -> Self {
1330        Self {
1331            tool_use_id: tool_use_id.into(),
1332            content,
1333        }
1334    }
1335}
1336
1337/// A complete message in a conversation.
1338///
1339/// Messages are the primary unit of communication in the agent system. Each
1340/// message has a role (who sent it) and content (what it contains). Content
1341/// is structured as a vector of blocks to support multi-modal communication.
1342///
1343/// # Structure
1344///
1345/// - `role`: Who sent the message ([`MessageRole`])
1346/// - `content`: What the message contains (one or more [`ContentBlock`]s)
1347///
1348/// # Message Patterns
1349///
1350/// ## Simple Text Message
1351/// ```
1352/// use open_agent::Message;
1353///
1354/// let msg = Message::user("What's the weather?");
1355/// ```
1356///
1357/// ## Assistant Response with Tool Call
1358/// ```
1359/// use open_agent::{Message, ContentBlock, TextBlock, ToolUseBlock};
1360/// use serde_json::json;
1361///
1362/// let msg = Message::assistant(vec![
1363///     ContentBlock::Text(TextBlock::new("Let me check that for you.")),
1364///     ContentBlock::ToolUse(ToolUseBlock::new(
1365///         "call_123",
1366///         "get_weather",
1367///         json!({"location": "San Francisco"})
1368///     ))
1369/// ]);
1370/// ```
1371///
1372/// ## Tool Result
1373/// ```
1374/// use open_agent::{Message, ContentBlock, ToolResultBlock};
1375/// use serde_json::json;
1376///
1377/// let msg = Message::user_with_blocks(vec![
1378///     ContentBlock::ToolResult(ToolResultBlock::new(
1379///         "call_123",
1380///         json!({"temp": 72, "conditions": "sunny"})
1381///     ))
1382/// ]);
1383/// ```
1384#[derive(Debug, Clone, Serialize, Deserialize)]
1385pub struct Message {
1386    /// The role/sender of this message.
1387    pub role: MessageRole,
1388
1389    /// The content blocks that make up this message.
1390    ///
1391    /// A message can contain multiple blocks of different types. For example,
1392    /// an assistant message might have both text and tool use blocks.
1393    pub content: Vec<ContentBlock>,
1394}
1395
1396impl Message {
1397    /// Creates a new message with the specified role and content.
1398    ///
1399    /// This is the most general constructor. For convenience, use the
1400    /// role-specific constructors like [`user()`](Message::user),
1401    /// [`assistant()`](Message::assistant), etc.
1402    ///
1403    /// # Example
1404    ///
1405    /// ```
1406    /// use open_agent::{Message, MessageRole, ContentBlock, TextBlock};
1407    ///
1408    /// let msg = Message::new(
1409    ///     MessageRole::User,
1410    ///     vec![ContentBlock::Text(TextBlock::new("Hello"))]
1411    /// );
1412    /// ```
1413    pub fn new(role: MessageRole, content: Vec<ContentBlock>) -> Self {
1414        Self { role, content }
1415    }
1416
1417    /// Creates a user message with simple text content.
1418    ///
1419    /// This is the most common way to create user messages. For more complex
1420    /// content with multiple blocks, use [`user_with_blocks()`](Message::user_with_blocks).
1421    ///
1422    /// # Example
1423    ///
1424    /// ```
1425    /// use open_agent::Message;
1426    ///
1427    /// let msg = Message::user("What is 2+2?");
1428    /// ```
1429    pub fn user(text: impl Into<String>) -> Self {
1430        Self {
1431            role: MessageRole::User,
1432            content: vec![ContentBlock::Text(TextBlock::new(text))],
1433        }
1434    }
1435
1436    /// Creates an assistant message with the specified content blocks.
1437    ///
1438    /// Assistant messages often contain multiple content blocks (text + tool use).
1439    /// This method takes a vector of blocks for maximum flexibility.
1440    ///
1441    /// # Example
1442    ///
1443    /// ```
1444    /// use open_agent::{Message, ContentBlock, TextBlock};
1445    ///
1446    /// let msg = Message::assistant(vec![
1447    ///     ContentBlock::Text(TextBlock::new("The answer is 4"))
1448    /// ]);
1449    /// ```
1450    pub fn assistant(content: Vec<ContentBlock>) -> Self {
1451        Self {
1452            role: MessageRole::Assistant,
1453            content,
1454        }
1455    }
1456
1457    /// Creates a system message with simple text content.
1458    ///
1459    /// System messages establish the agent's behavior and context. They're
1460    /// typically sent at the start of a conversation.
1461    ///
1462    /// # Example
1463    ///
1464    /// ```
1465    /// use open_agent::Message;
1466    ///
1467    /// let msg = Message::system("You are a helpful assistant. Be concise.");
1468    /// ```
1469    pub fn system(text: impl Into<String>) -> Self {
1470        Self {
1471            role: MessageRole::System,
1472            content: vec![ContentBlock::Text(TextBlock::new(text))],
1473        }
1474    }
1475
1476    /// Creates a user message with custom content blocks.
1477    ///
1478    /// Use this when you need to send structured content beyond simple text,
1479    /// such as tool results. For simple text messages, prefer
1480    /// [`user()`](Message::user).
1481    ///
1482    /// # Example
1483    ///
1484    /// ```
1485    /// use open_agent::{Message, ContentBlock, ToolResultBlock};
1486    /// use serde_json::json;
1487    ///
1488    /// let msg = Message::user_with_blocks(vec![
1489    ///     ContentBlock::ToolResult(ToolResultBlock::new(
1490    ///         "call_123",
1491    ///         json!({"result": "success"})
1492    ///     ))
1493    /// ]);
1494    /// ```
1495    pub fn user_with_blocks(content: Vec<ContentBlock>) -> Self {
1496        Self {
1497            role: MessageRole::User,
1498            content,
1499        }
1500    }
1501}
1502
1503/// OpenAI API message format for serialization.
1504///
1505/// This struct represents the wire format for messages when communicating
1506/// with OpenAI-compatible APIs. It differs from the internal [`Message`]
1507/// type to accommodate the specific serialization requirements of the
1508/// OpenAI API.
1509///
1510/// # Key Differences from Internal Message Type
1511///
1512/// - Content is a flat string rather than structured blocks
1513/// - Tool calls are represented in OpenAI's specific format
1514/// - Supports both sending tool calls (via `tool_calls`) and tool results
1515///   (via `tool_call_id`)
1516///
1517/// # Serialization
1518///
1519/// Optional fields are skipped when `None` to keep payloads minimal.
1520///
1521/// # Usage
1522///
1523/// This type is typically created by the SDK internally when converting
1524/// from [`Message`] to API format. Users rarely need to construct these
1525/// directly.
1526#[derive(Debug, Clone, Serialize, Deserialize)]
1527pub struct OpenAIMessage {
1528    /// Message role as a string ("system", "user", "assistant", "tool").
1529    pub role: String,
1530
1531    /// Text content of the message.
1532    ///
1533    /// For assistant messages with tool calls, this may be empty or contain
1534    /// explanatory text. For tool result messages, this contains the
1535    /// stringified tool output.
1536    pub content: String,
1537
1538    /// Tool calls requested by the assistant (assistant messages only).
1539    ///
1540    /// When the model wants to call tools, this field contains the list
1541    /// of tool invocations with their parameters. Only present in assistant
1542    /// messages.
1543    #[serde(skip_serializing_if = "Option::is_none")]
1544    pub tool_calls: Option<Vec<OpenAIToolCall>>,
1545
1546    /// ID of the tool call this message is responding to (tool messages only).
1547    ///
1548    /// When sending tool results back to the model, this field links the
1549    /// result to the original tool call request. Only present in tool
1550    /// messages.
1551    #[serde(skip_serializing_if = "Option::is_none")]
1552    pub tool_call_id: Option<String>,
1553}
1554
1555/// OpenAI tool call representation in API messages.
1556///
1557/// Represents a request from the model to execute a specific function/tool.
1558/// This is the wire format used in the OpenAI API, distinct from the internal
1559/// [`ToolUseBlock`] representation.
1560///
1561/// # Structure
1562///
1563/// Each tool call has:
1564/// - A unique ID for correlation with results
1565/// - A type (always "function" in current OpenAI API)
1566/// - Function details (name and arguments)
1567///
1568/// # Example JSON
1569///
1570/// ```json
1571/// {
1572///   "id": "call_abc123",
1573///   "type": "function",
1574///   "function": {
1575///     "name": "get_weather",
1576///     "arguments": "{\"location\":\"San Francisco\"}"
1577///   }
1578/// }
1579/// ```
1580#[derive(Debug, Clone, Serialize, Deserialize)]
1581pub struct OpenAIToolCall {
1582    /// Unique identifier for this tool call.
1583    ///
1584    /// Generated by the model. Used to correlate tool results back to
1585    /// this specific call.
1586    pub id: String,
1587
1588    /// Type of the call (always "function" in current API).
1589    ///
1590    /// The `rename` attribute ensures this serializes as `"type"` in JSON
1591    /// since `type` is a Rust keyword.
1592    #[serde(rename = "type")]
1593    pub call_type: String,
1594
1595    /// Function/tool details (name and arguments).
1596    pub function: OpenAIFunction,
1597}
1598
1599/// OpenAI function call details.
1600///
1601/// Contains the function name and its arguments in the OpenAI API format.
1602/// Note that arguments are serialized as a JSON string, not a JSON object,
1603/// which is an OpenAI API quirk.
1604///
1605/// # Arguments Format
1606///
1607/// The `arguments` field is a **JSON string**, not a parsed JSON object.
1608/// For example: `"{\"x\": 1, \"y\": 2}"` not `{"x": 1, "y": 2}`.
1609/// This must be parsed before use.
1610#[derive(Debug, Clone, Serialize, Deserialize)]
1611pub struct OpenAIFunction {
1612    /// Name of the function/tool to call.
1613    pub name: String,
1614
1615    /// Function arguments as a **JSON string** (OpenAI API quirk).
1616    ///
1617    /// Must be parsed as JSON before use. For example, this might contain
1618    /// the string `"{\"location\":\"NYC\",\"units\":\"fahrenheit\"}"` which
1619    /// needs to be parsed into an actual JSON value.
1620    pub arguments: String,
1621}
1622
1623/// Complete request payload for OpenAI chat completions API.
1624///
1625/// This struct is serialized and sent as the request body when making
1626/// API calls to OpenAI-compatible endpoints. It includes the model,
1627/// conversation history, and configuration parameters.
1628///
1629/// # Streaming
1630///
1631/// The SDK always uses streaming mode (`stream: true`) to enable real-time
1632/// response processing and better user experience.
1633///
1634/// # Optional Fields
1635///
1636/// Fields marked with `skip_serializing_if` are omitted from the JSON payload
1637/// when `None`, allowing the API provider to use its defaults.
1638///
1639/// # Example
1640///
1641/// ```ignore
1642/// use open_agent_sdk::types::{OpenAIRequest, OpenAIMessage};
1643///
1644/// let request = OpenAIRequest {
1645///     model: "gpt-4".to_string(),
1646///     messages: vec![
1647///         OpenAIMessage {
1648///             role: "user".to_string(),
1649///             content: "Hello!".to_string(),
1650///             tool_calls: None,
1651///             tool_call_id: None,
1652///         }
1653///     ],
1654///     stream: true,
1655///     max_tokens: Some(1000),
1656///     temperature: Some(0.7),
1657///     tools: None,
1658/// };
1659/// ```
1660#[derive(Debug, Clone, Serialize)]
1661pub struct OpenAIRequest {
1662    /// Model identifier (e.g., "gpt-4", "qwen2.5-32b-instruct").
1663    pub model: String,
1664
1665    /// Conversation history as a sequence of messages.
1666    ///
1667    /// Includes system prompt, user messages, assistant responses, and
1668    /// tool results. Order matters - messages are processed sequentially.
1669    pub messages: Vec<OpenAIMessage>,
1670
1671    /// Whether to stream the response.
1672    ///
1673    /// The SDK always sets this to `true` for better user experience.
1674    /// Streaming allows incremental processing of responses rather than
1675    /// waiting for the entire completion.
1676    pub stream: bool,
1677
1678    /// Maximum tokens to generate (optional).
1679    ///
1680    /// `None` uses the provider's default. Some providers require this
1681    /// to be set explicitly.
1682    #[serde(skip_serializing_if = "Option::is_none")]
1683    pub max_tokens: Option<u32>,
1684
1685    /// Sampling temperature (optional).
1686    ///
1687    /// `None` uses the provider's default. Controls randomness in
1688    /// generation.
1689    #[serde(skip_serializing_if = "Option::is_none")]
1690    pub temperature: Option<f32>,
1691
1692    /// Tools/functions available to the model (optional).
1693    ///
1694    /// When present, enables function calling. Each tool is described
1695    /// with a JSON schema defining its parameters. `None` means no
1696    /// tools are available.
1697    #[serde(skip_serializing_if = "Option::is_none")]
1698    pub tools: Option<Vec<serde_json::Value>>,
1699}
1700
1701/// A single chunk from OpenAI's streaming response.
1702///
1703/// When the SDK requests streaming responses (`stream: true`), the API
1704/// returns the response incrementally as a series of chunks. Each chunk
1705/// represents a small piece of the complete response, allowing the SDK
1706/// to process and display content as it's generated.
1707///
1708/// # Streaming Architecture
1709///
1710/// Instead of waiting for the entire response, streaming sends many small
1711/// chunks in rapid succession. Each chunk contains:
1712/// - Metadata (id, model, timestamp)
1713/// - One or more choices (usually just one for single completions)
1714/// - Incremental deltas with new content
1715///
1716/// # Server-Sent Events Format
1717///
1718/// Chunks are transmitted as Server-Sent Events (SSE) over HTTP:
1719/// ```text
1720/// data: {"id":"chunk_1","object":"chat.completion.chunk",...}
1721/// data: {"id":"chunk_2","object":"chat.completion.chunk",...}
1722/// data: [DONE]
1723/// ```
1724///
1725/// # Example Chunk JSON
1726///
1727/// ```json
1728/// {
1729///   "id": "chatcmpl-123",
1730///   "object": "chat.completion.chunk",
1731///   "created": 1677652288,
1732///   "model": "gpt-4",
1733///   "choices": [{
1734///     "index": 0,
1735///     "delta": {"content": "Hello"},
1736///     "finish_reason": null
1737///   }]
1738/// }
1739/// ```
1740#[derive(Debug, Clone, Deserialize)]
1741pub struct OpenAIChunk {
1742    /// Unique identifier for this completion.
1743    ///
1744    /// All chunks in a single streaming response share the same ID.
1745    /// Not actively used by the SDK but preserved for debugging.
1746    #[allow(dead_code)]
1747    pub id: String,
1748
1749    /// Object type (always "chat.completion.chunk" for streaming).
1750    ///
1751    /// Not actively used by the SDK but preserved for debugging.
1752    #[allow(dead_code)]
1753    pub object: String,
1754
1755    /// Unix timestamp of when this chunk was created.
1756    ///
1757    /// Not actively used by the SDK but preserved for debugging.
1758    #[allow(dead_code)]
1759    pub created: i64,
1760
1761    /// Model that generated this chunk.
1762    ///
1763    /// Not actively used by the SDK but preserved for debugging.
1764    #[allow(dead_code)]
1765    pub model: String,
1766
1767    /// Array of completion choices (usually contains one element).
1768    ///
1769    /// Each choice represents a possible completion. In normal usage,
1770    /// there's only one choice per chunk. This is the critical field
1771    /// that the SDK processes to extract content and tool calls.
1772    pub choices: Vec<OpenAIChoice>,
1773}
1774
1775/// A single choice/completion option in a streaming chunk.
1776///
1777/// In streaming responses, each chunk can theoretically contain multiple
1778/// choices (parallel completions), but in practice there's usually just one.
1779/// Each choice contains a delta with incremental updates and optionally a
1780/// finish reason when the generation is complete.
1781///
1782/// # Delta vs Complete Content
1783///
1784/// Unlike non-streaming responses that send complete messages, streaming
1785/// sends deltas - just the new content added in this chunk. The SDK
1786/// accumulates these deltas to build the complete response.
1787///
1788/// # Finish Reason
1789///
1790/// - `None`: More content is coming
1791/// - `Some("stop")`: Normal completion
1792/// - `Some("length")`: Hit max token limit
1793/// - `Some("tool_calls")`: Model wants to call tools
1794/// - `Some("content_filter")`: Blocked by content policy
1795#[derive(Debug, Clone, Deserialize)]
1796pub struct OpenAIChoice {
1797    /// Index of this choice in the choices array.
1798    ///
1799    /// Usually 0 since most requests generate a single completion.
1800    /// Not actively used by the SDK but preserved for debugging.
1801    #[allow(dead_code)]
1802    pub index: u32,
1803
1804    /// Incremental update/delta for this chunk.
1805    ///
1806    /// Contains the new content, tool calls, or other updates added in
1807    /// this specific chunk. The SDK processes this to update its internal
1808    /// state and accumulate the full response.
1809    pub delta: OpenAIDelta,
1810
1811    /// Reason why generation finished (None if still generating).
1812    ///
1813    /// Only present in the final chunk of a stream:
1814    /// - `None`: Generation is still in progress
1815    /// - `Some("stop")`: Completed normally
1816    /// - `Some("length")`: Hit token limit
1817    /// - `Some("tool_calls")`: Model requested tools
1818    /// - `Some("content_filter")`: Content was filtered
1819    ///
1820    /// The SDK uses this to detect completion and determine next actions.
1821    pub finish_reason: Option<String>,
1822}
1823
1824/// Incremental update in a streaming chunk.
1825///
1826/// Represents the new content/changes added in this specific chunk.
1827/// Unlike complete messages, deltas only contain what's new, not the
1828/// entire accumulated content. The SDK accumulates these deltas to
1829/// build the complete response.
1830///
1831/// # Incremental Nature
1832///
1833/// If the complete response is "Hello, world!", the deltas might be:
1834/// 1. `content: Some("Hello")`
1835/// 2. `content: Some(", ")`
1836/// 3. `content: Some("world")`
1837/// 4. `content: Some("!")`
1838///
1839/// The SDK concatenates these to build the full text.
1840///
1841/// # Tool Call Deltas
1842///
1843/// Tool calls are also streamed incrementally. The first delta might
1844/// include the tool ID and name, while subsequent deltas stream the
1845/// arguments JSON string piece by piece.
1846#[derive(Debug, Clone, Deserialize)]
1847pub struct OpenAIDelta {
1848    /// Role of the message (only in first chunk).
1849    ///
1850    /// Typically "assistant". Only appears in the first delta of a response
1851    /// to establish who's speaking. Subsequent deltas omit this field.
1852    /// Not actively used by the SDK but preserved for completeness.
1853    #[allow(dead_code)]
1854    #[serde(skip_serializing_if = "Option::is_none")]
1855    pub role: Option<String>,
1856
1857    /// Incremental text content added in this chunk.
1858    ///
1859    /// Contains the new text tokens generated. `None` if this chunk doesn't
1860    /// add text (e.g., it might only have tool call updates). The SDK
1861    /// concatenates these across chunks to build the complete response.
1862    #[serde(skip_serializing_if = "Option::is_none")]
1863    pub content: Option<String>,
1864
1865    /// Incremental tool call updates added in this chunk.
1866    ///
1867    /// When the model wants to call tools, tool call information is streamed
1868    /// incrementally. Each delta might add to different parts of the tool
1869    /// call (ID, name, arguments). The SDK accumulates these to reconstruct
1870    /// complete tool calls.
1871    #[serde(skip_serializing_if = "Option::is_none")]
1872    pub tool_calls: Option<Vec<OpenAIToolCallDelta>>,
1873}
1874
1875/// Incremental update for a tool call in streaming.
1876///
1877/// Tool calls are streamed piece-by-piece, with different chunks potentially
1878/// updating different parts. The SDK must accumulate these deltas to
1879/// reconstruct complete tool calls.
1880///
1881/// # Streaming Pattern
1882///
1883/// A complete tool call is typically streamed as:
1884/// 1. First chunk: `index: 0, id: Some("call_123"), type: Some("function")`
1885/// 2. Second chunk: `index: 0, function: Some(FunctionDelta { name: Some("search"), ... })`
1886/// 3. Multiple chunks: `index: 0, function: Some(FunctionDelta { arguments: Some("part") })`
1887///
1888/// The SDK uses the `index` to know which tool call to update, as multiple
1889/// tool calls can be streamed simultaneously.
1890///
1891/// # Index-Based Accumulation
1892///
1893/// The `index` field is crucial for tracking which tool call is being updated.
1894/// When the model calls multiple tools, each has a different index, and deltas
1895/// specify which one they're updating.
1896#[derive(Debug, Clone, Deserialize)]
1897pub struct OpenAIToolCallDelta {
1898    /// Index identifying which tool call this delta updates.
1899    ///
1900    /// When multiple tools are called, each has an index (0, 1, 2, ...).
1901    /// The SDK uses this to route delta updates to the correct tool call
1902    /// in its accumulation buffer.
1903    pub index: u32,
1904
1905    /// Tool call ID (only in first delta for this tool call).
1906    ///
1907    /// Generated by the model. Present in the first chunk for each tool
1908    /// call, then omitted in subsequent chunks. The SDK stores this to
1909    /// correlate results later.
1910    #[serde(skip_serializing_if = "Option::is_none")]
1911    pub id: Option<String>,
1912
1913    /// Type of call (always "function" when present).
1914    ///
1915    /// Only appears in the first delta for each tool call. Subsequent
1916    /// deltas omit this field. Not actively used by the SDK but preserved
1917    /// for completeness.
1918    #[allow(dead_code)]
1919    #[serde(skip_serializing_if = "Option::is_none", rename = "type")]
1920    pub call_type: Option<String>,
1921
1922    /// Incremental function details (name and/or arguments).
1923    ///
1924    /// Contains partial updates to the function name and arguments.
1925    /// The SDK accumulates these across chunks to build the complete
1926    /// function call specification.
1927    #[serde(skip_serializing_if = "Option::is_none")]
1928    pub function: Option<OpenAIFunctionDelta>,
1929}
1930
1931/// Incremental update for function details in streaming tool calls.
1932///
1933/// As the model streams a tool call, the function name and arguments are
1934/// sent incrementally. The name usually comes first in one chunk, then
1935/// arguments are streamed piece-by-piece as a JSON string.
1936///
1937/// # Arguments Streaming
1938///
1939/// The arguments field is particularly important to understand. It contains
1940/// **fragments of a JSON string** that must be accumulated and then parsed:
1941///
1942/// 1. Chunk 1: `arguments: Some("{")`
1943/// 2. Chunk 2: `arguments: Some("\"query\":")`
1944/// 3. Chunk 3: `arguments: Some("\"hello\"")`
1945/// 4. Chunk 4: `arguments: Some("}")`
1946///
1947/// The SDK concatenates these into `"{\"query\":\"hello\"}"` and then
1948/// parses it as JSON.
1949#[derive(Debug, Clone, Deserialize)]
1950pub struct OpenAIFunctionDelta {
1951    /// Function/tool name (only in first delta for this function).
1952    ///
1953    /// Present when the model first starts calling this function, then
1954    /// omitted in subsequent chunks. The SDK stores this to know which
1955    /// tool to execute.
1956    #[serde(skip_serializing_if = "Option::is_none")]
1957    pub name: Option<String>,
1958
1959    /// Incremental fragment of the arguments JSON string.
1960    ///
1961    /// Contains a piece of the complete JSON arguments string. The SDK
1962    /// must concatenate all argument fragments across chunks, then parse
1963    /// the complete string as JSON to get the actual parameters.
1964    ///
1965    /// For example, if the complete arguments should be:
1966    /// `{"x": 1, "y": 2}`
1967    ///
1968    /// This might be streamed as:
1969    /// - `Some("{\"x\": ")`
1970    /// - `Some("1, \"y\": ")`
1971    /// - `Some("2}")`
1972    #[serde(skip_serializing_if = "Option::is_none")]
1973    pub arguments: Option<String>,
1974}
1975
1976#[cfg(test)]
1977mod tests {
1978    use super::*;
1979
1980    #[test]
1981    fn test_agent_options_builder() {
1982        let options = AgentOptions::builder()
1983            .system_prompt("Test prompt")
1984            .model("test-model")
1985            .base_url("http://localhost:1234/v1")
1986            .api_key("test-key")
1987            .max_turns(5)
1988            .max_tokens(1000)
1989            .temperature(0.5)
1990            .timeout(30)
1991            .auto_execute_tools(true)
1992            .max_tool_iterations(10)
1993            .build()
1994            .unwrap();
1995
1996        assert_eq!(options.system_prompt, "Test prompt");
1997        assert_eq!(options.model, "test-model");
1998        assert_eq!(options.base_url, "http://localhost:1234/v1");
1999        assert_eq!(options.api_key, "test-key");
2000        assert_eq!(options.max_turns, 5);
2001        assert_eq!(options.max_tokens, Some(1000));
2002        assert_eq!(options.temperature, 0.5);
2003        assert_eq!(options.timeout, 30);
2004        assert!(options.auto_execute_tools);
2005        assert_eq!(options.max_tool_iterations, 10);
2006    }
2007
2008    #[test]
2009    fn test_agent_options_builder_defaults() {
2010        let options = AgentOptions::builder()
2011            .model("test-model")
2012            .base_url("http://localhost:1234/v1")
2013            .build()
2014            .unwrap();
2015
2016        assert_eq!(options.system_prompt, "");
2017        assert_eq!(options.api_key, "not-needed");
2018        assert_eq!(options.max_turns, 1);
2019        assert_eq!(options.max_tokens, Some(4096));
2020        assert_eq!(options.temperature, 0.7);
2021        assert_eq!(options.timeout, 60);
2022        assert!(!options.auto_execute_tools);
2023        assert_eq!(options.max_tool_iterations, 5);
2024    }
2025
2026    #[test]
2027    fn test_agent_options_builder_missing_required() {
2028        // Missing model
2029        let result = AgentOptions::builder()
2030            .base_url("http://localhost:1234/v1")
2031            .build();
2032        assert!(result.is_err());
2033
2034        // Missing base_url
2035        let result = AgentOptions::builder().model("test-model").build();
2036        assert!(result.is_err());
2037    }
2038
2039    #[test]
2040    fn test_message_user() {
2041        let msg = Message::user("Hello");
2042        assert!(matches!(msg.role, MessageRole::User));
2043        assert_eq!(msg.content.len(), 1);
2044        match &msg.content[0] {
2045            ContentBlock::Text(text) => assert_eq!(text.text, "Hello"),
2046            _ => panic!("Expected TextBlock"),
2047        }
2048    }
2049
2050    #[test]
2051    fn test_message_system() {
2052        let msg = Message::system("System prompt");
2053        assert!(matches!(msg.role, MessageRole::System));
2054        assert_eq!(msg.content.len(), 1);
2055        match &msg.content[0] {
2056            ContentBlock::Text(text) => assert_eq!(text.text, "System prompt"),
2057            _ => panic!("Expected TextBlock"),
2058        }
2059    }
2060
2061    #[test]
2062    fn test_message_assistant() {
2063        let content = vec![ContentBlock::Text(TextBlock::new("Response"))];
2064        let msg = Message::assistant(content);
2065        assert!(matches!(msg.role, MessageRole::Assistant));
2066        assert_eq!(msg.content.len(), 1);
2067    }
2068
2069    #[test]
2070    fn test_text_block() {
2071        let block = TextBlock::new("Hello");
2072        assert_eq!(block.text, "Hello");
2073    }
2074
2075    #[test]
2076    fn test_tool_use_block() {
2077        let input = serde_json::json!({"arg": "value"});
2078        let block = ToolUseBlock::new("call_123", "tool_name", input.clone());
2079        assert_eq!(block.id, "call_123");
2080        assert_eq!(block.name, "tool_name");
2081        assert_eq!(block.input, input);
2082    }
2083
2084    #[test]
2085    fn test_tool_result_block() {
2086        let content = serde_json::json!({"result": "success"});
2087        let block = ToolResultBlock::new("call_123", content.clone());
2088        assert_eq!(block.tool_use_id, "call_123");
2089        assert_eq!(block.content, content);
2090    }
2091
2092    #[test]
2093    fn test_message_role_serialization() {
2094        assert_eq!(
2095            serde_json::to_string(&MessageRole::User).unwrap(),
2096            "\"user\""
2097        );
2098        assert_eq!(
2099            serde_json::to_string(&MessageRole::System).unwrap(),
2100            "\"system\""
2101        );
2102        assert_eq!(
2103            serde_json::to_string(&MessageRole::Assistant).unwrap(),
2104            "\"assistant\""
2105        );
2106        assert_eq!(
2107            serde_json::to_string(&MessageRole::Tool).unwrap(),
2108            "\"tool\""
2109        );
2110    }
2111
2112    #[test]
2113    fn test_openai_request_serialization() {
2114        let request = OpenAIRequest {
2115            model: "gpt-3.5".to_string(),
2116            messages: vec![OpenAIMessage {
2117                role: "user".to_string(),
2118                content: "Hello".to_string(),
2119                tool_calls: None,
2120                tool_call_id: None,
2121            }],
2122            stream: true,
2123            max_tokens: Some(100),
2124            temperature: Some(0.7),
2125            tools: None,
2126        };
2127
2128        let json = serde_json::to_string(&request).unwrap();
2129        assert!(json.contains("gpt-3.5"));
2130        assert!(json.contains("Hello"));
2131        assert!(json.contains("\"stream\":true"));
2132    }
2133
2134    #[test]
2135    fn test_openai_chunk_deserialization() {
2136        let json = r#"{
2137            "id": "chunk_1",
2138            "object": "chat.completion.chunk",
2139            "created": 1234567890,
2140            "model": "gpt-3.5",
2141            "choices": [{
2142                "index": 0,
2143                "delta": {
2144                    "content": "Hello"
2145                },
2146                "finish_reason": null
2147            }]
2148        }"#;
2149
2150        let chunk: OpenAIChunk = serde_json::from_str(json).unwrap();
2151        assert_eq!(chunk.id, "chunk_1");
2152        assert_eq!(chunk.choices.len(), 1);
2153        assert_eq!(chunk.choices[0].delta.content, Some("Hello".to_string()));
2154    }
2155
2156    #[test]
2157    fn test_content_block_serialization() {
2158        let text_block = ContentBlock::Text(TextBlock::new("Hello"));
2159        let json = serde_json::to_string(&text_block).unwrap();
2160        assert!(json.contains("\"type\":\"text\""));
2161        assert!(json.contains("Hello"));
2162    }
2163
2164    #[test]
2165    fn test_agent_options_clone() {
2166        let options1 = AgentOptions::builder()
2167            .model("test-model")
2168            .base_url("http://localhost:1234/v1")
2169            .build()
2170            .unwrap();
2171
2172        let options2 = options1.clone();
2173        assert_eq!(options1.model, options2.model);
2174        assert_eq!(options1.base_url, options2.base_url);
2175    }
2176
2177    #[test]
2178    fn test_temperature_validation() {
2179        // Temperature too low (< 0.0)
2180        let result = AgentOptions::builder()
2181            .model("test-model")
2182            .base_url("http://localhost:1234/v1")
2183            .temperature(-0.1)
2184            .build();
2185        assert!(result.is_err());
2186        assert!(result.unwrap_err().to_string().contains("temperature"));
2187
2188        // Temperature too high (> 2.0)
2189        let result = AgentOptions::builder()
2190            .model("test-model")
2191            .base_url("http://localhost:1234/v1")
2192            .temperature(2.1)
2193            .build();
2194        assert!(result.is_err());
2195        assert!(result.unwrap_err().to_string().contains("temperature"));
2196
2197        // Valid temperatures should work
2198        let result = AgentOptions::builder()
2199            .model("test-model")
2200            .base_url("http://localhost:1234/v1")
2201            .temperature(0.0)
2202            .build();
2203        assert!(result.is_ok());
2204
2205        let result = AgentOptions::builder()
2206            .model("test-model")
2207            .base_url("http://localhost:1234/v1")
2208            .temperature(2.0)
2209            .build();
2210        assert!(result.is_ok());
2211    }
2212
2213    #[test]
2214    fn test_url_validation() {
2215        // Empty URL should fail
2216        let result = AgentOptions::builder()
2217            .model("test-model")
2218            .base_url("")
2219            .build();
2220        assert!(result.is_err());
2221        assert!(result.unwrap_err().to_string().contains("base_url"));
2222
2223        // Invalid URL format should fail
2224        let result = AgentOptions::builder()
2225            .model("test-model")
2226            .base_url("not-a-url")
2227            .build();
2228        assert!(result.is_err());
2229        assert!(result.unwrap_err().to_string().contains("base_url"));
2230
2231        // Valid URLs should work
2232        let result = AgentOptions::builder()
2233            .model("test-model")
2234            .base_url("http://localhost:1234/v1")
2235            .build();
2236        assert!(result.is_ok());
2237
2238        let result = AgentOptions::builder()
2239            .model("test-model")
2240            .base_url("https://api.openai.com/v1")
2241            .build();
2242        assert!(result.is_ok());
2243    }
2244
2245    #[test]
2246    fn test_model_validation() {
2247        // Empty model should fail
2248        let result = AgentOptions::builder()
2249            .model("")
2250            .base_url("http://localhost:1234/v1")
2251            .build();
2252        assert!(result.is_err());
2253        assert!(result.unwrap_err().to_string().contains("model"));
2254
2255        // Whitespace-only model should fail
2256        let result = AgentOptions::builder()
2257            .model("   ")
2258            .base_url("http://localhost:1234/v1")
2259            .build();
2260        assert!(result.is_err());
2261        assert!(result.unwrap_err().to_string().contains("model"));
2262    }
2263
2264    #[test]
2265    fn test_max_tokens_validation() {
2266        // max_tokens = 0 should fail
2267        let result = AgentOptions::builder()
2268            .model("test-model")
2269            .base_url("http://localhost:1234/v1")
2270            .max_tokens(0)
2271            .build();
2272        assert!(result.is_err());
2273        assert!(result.unwrap_err().to_string().contains("max_tokens"));
2274
2275        // Valid max_tokens should work
2276        let result = AgentOptions::builder()
2277            .model("test-model")
2278            .base_url("http://localhost:1234/v1")
2279            .max_tokens(1)
2280            .build();
2281        assert!(result.is_ok());
2282    }
2283
2284    #[test]
2285    fn test_agent_options_getters() {
2286        // Test that AgentOptions provides getter methods for field access
2287        let options = AgentOptions::builder()
2288            .model("test-model")
2289            .base_url("http://localhost:1234/v1")
2290            .system_prompt("Test prompt")
2291            .api_key("test-key")
2292            .max_turns(5)
2293            .max_tokens(1000)
2294            .temperature(0.5)
2295            .timeout(30)
2296            .auto_execute_tools(true)
2297            .max_tool_iterations(10)
2298            .build()
2299            .unwrap();
2300
2301        // All fields should be accessible via getter methods, not direct field access
2302        assert_eq!(options.system_prompt(), "Test prompt");
2303        assert_eq!(options.model(), "test-model");
2304        assert_eq!(options.base_url(), "http://localhost:1234/v1");
2305        assert_eq!(options.api_key(), "test-key");
2306        assert_eq!(options.max_turns(), 5);
2307        assert_eq!(options.max_tokens(), Some(1000));
2308        assert_eq!(options.temperature(), 0.5);
2309        assert_eq!(options.timeout(), 30);
2310        assert!(options.auto_execute_tools());
2311        assert_eq!(options.max_tool_iterations(), 10);
2312        assert_eq!(options.tools().len(), 0);
2313    }
2314}