swiftide_integrations/openai/
mod.rs

1//! This module provides integration with `OpenAI`'s API, enabling the use of language models and
2//! embeddings within the Swiftide project. It includes the `OpenAI` struct for managing API clients
3//! and default options for embedding and prompt models. The module is conditionally compiled based
4//! on the "openai" feature flag.
5
6use anyhow::Context as _;
7use async_openai::error::{OpenAIError, StreamError};
8use async_openai::types::CreateChatCompletionRequestArgs;
9use async_openai::types::CreateEmbeddingRequestArgs;
10use async_openai::types::ReasoningEffort;
11use derive_builder::Builder;
12use reqwest::StatusCode;
13use reqwest_eventsource::Error as EventSourceError;
14use serde_json::Value;
15use std::pin::Pin;
16use std::sync::Arc;
17use swiftide_core::chat_completion::Usage;
18use swiftide_core::chat_completion::errors::LanguageModelError;
19
20mod chat_completion;
21mod embed;
22mod responses_api;
23mod simple_prompt;
24mod structured_prompt;
25
26// expose type aliases to simplify downstream use of the open ai builder invocations
27pub use async_openai::config::AzureConfig;
28pub use async_openai::config::OpenAIConfig;
29
30#[cfg(feature = "tiktoken")]
31use crate::tiktoken::TikToken;
32#[cfg(feature = "tiktoken")]
33use anyhow::Result;
34#[cfg(feature = "tiktoken")]
35use swiftide_core::Estimatable;
36#[cfg(feature = "tiktoken")]
37use swiftide_core::EstimateTokens;
38
39/// The `OpenAI` struct encapsulates an `OpenAI` client and default options for embedding and prompt
40/// models. It uses the `Builder` pattern for flexible and customizable instantiation.
41///
42/// # Example
43///
44/// ```no_run
45/// # use swiftide_integrations::openai::{OpenAI, Options};
46/// # use swiftide_integrations::openai::OpenAIConfig;
47///
48/// // Create an OpenAI client with default options. The client will use the OPENAI_API_KEY environment variable.
49/// let openai = OpenAI::builder()
50///     .default_embed_model("text-embedding-3-small")
51///     .default_prompt_model("gpt-4")
52///     .build().unwrap();
53///
54/// // Create an OpenAI client with a custom api key.
55/// let openai = OpenAI::builder()
56///     .default_embed_model("text-embedding-3-small")
57///     .default_prompt_model("gpt-4")
58///     .client(async_openai::Client::with_config(async_openai::config::OpenAIConfig::default().with_api_key("my-api-key")))
59///     .build().unwrap();
60///
61/// // Create an OpenAI client with custom options
62/// let openai = OpenAI::builder()
63///     .default_embed_model("text-embedding-3-small")
64///     .default_prompt_model("gpt-4")
65///     .default_options(
66///         Options::builder()
67///           .temperature(1.0)
68///           .parallel_tool_calls(false)
69///           .user("MyUserId")
70///     )
71///     .build().unwrap();
72/// ```
73pub type OpenAI = GenericOpenAI<OpenAIConfig>;
74pub type OpenAIBuilder = GenericOpenAIBuilder<OpenAIConfig>;
75
76#[derive(Builder, Clone)]
77#[builder(setter(into, strip_option))]
78/// Generic client for `OpenAI` APIs.
79pub struct GenericOpenAI<
80    C: async_openai::config::Config + Default = async_openai::config::OpenAIConfig,
81> {
82    /// The `OpenAI` client, wrapped in an `Arc` for thread-safe reference counting.
83    /// Defaults to a new instance of `async_openai::Client`.
84    #[builder(
85        default = "Arc::new(async_openai::Client::<C>::default())",
86        setter(custom)
87    )]
88    client: Arc<async_openai::Client<C>>,
89
90    /// Default options for embedding and prompt models.
91    #[builder(default, setter(custom))]
92    pub(crate) default_options: Options,
93
94    #[cfg(feature = "tiktoken")]
95    #[cfg_attr(feature = "tiktoken", builder(default))]
96    pub(crate) tiktoken: TikToken,
97
98    /// Convenience option to stream the full response. Defaults to true, because nobody has time
99    /// to reconstruct the delta. Disabling this will make the streamed content only return the
100    /// delta, for when performance matters. This only has effect when streaming is enabled.
101    #[builder(default = true)]
102    pub stream_full: bool,
103
104    #[cfg(feature = "metrics")]
105    #[builder(default)]
106    /// Optional metadata to attach to metrics emitted by this client.
107    metric_metadata: Option<std::collections::HashMap<String, String>>,
108
109    /// Opt-in flag to use `OpenAI`'s Responses API instead of the legacy Chat Completions API.
110    #[builder(default)]
111    pub(crate) use_responses_api: bool,
112
113    /// A callback function that is called when usage information is available.
114    #[builder(default, setter(custom))]
115    #[allow(clippy::type_complexity)]
116    on_usage: Option<
117        Arc<
118            dyn for<'a> Fn(
119                    &'a Usage,
120                ) -> Pin<
121                    Box<dyn std::future::Future<Output = anyhow::Result<()>> + Send + 'a>,
122                > + Send
123                + Sync,
124        >,
125    >,
126}
127
128impl<C: async_openai::config::Config + Default + std::fmt::Debug> std::fmt::Debug
129    for GenericOpenAI<C>
130{
131    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
132        f.debug_struct("GenericOpenAI")
133            .field("client", &self.client)
134            .field("default_options", &self.default_options)
135            .field("stream_full", &self.stream_full)
136            .field("use_responses_api", &self.use_responses_api)
137            .finish_non_exhaustive()
138    }
139}
140
141/// The `Options` struct holds configuration options for the `OpenAI` client.
142/// It includes optional fields for specifying the embedding and prompt models.
143#[derive(Debug, Clone, Builder, Default)]
144#[builder(setter(strip_option))]
145pub struct Options {
146    /// The default embedding model to use, if specified.
147    #[builder(default, setter(into))]
148    pub embed_model: Option<String>,
149    /// The default prompt model to use, if specified.
150    #[builder(default, setter(into))]
151    pub prompt_model: Option<String>,
152
153    #[builder(default)]
154    /// Option to enable or disable parallel tool calls for completions.
155    ///
156    /// At this moment, o1 and o3-mini do not support it and should be set to `None`.
157    pub parallel_tool_calls: Option<bool>,
158
159    /// Maximum number of tokens to generate in the completion.
160    ///
161    /// By default, the limit is disabled
162    #[builder(default)]
163    pub max_completion_tokens: Option<u32>,
164
165    /// Temperature setting for the model.
166    #[builder(default)]
167    pub temperature: Option<f32>,
168
169    /// Reasoning effor for reasoning models.
170    #[builder(default, setter(into))]
171    pub reasoning_effort: Option<ReasoningEffort>,
172
173    /// This feature is in Beta. If specified, our system will make a best effort to sample
174    /// deterministically, such that repeated requests with the same seed and parameters should
175    /// return the same result. Determinism is not guaranteed, and you should refer to the
176    /// `system_fingerprint` response parameter to monitor changes in the backend.
177    #[builder(default)]
178    pub seed: Option<i64>,
179
180    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they
181    /// appear in the text so far, increasing the model’s likelihood to talk about new topics.
182    #[builder(default)]
183    pub presence_penalty: Option<f32>,
184
185    /// Developer-defined tags and values used for filtering completions in the dashboard.
186    #[builder(default, setter(into))]
187    pub metadata: Option<serde_json::Value>,
188
189    /// A unique identifier representing your end-user, which can help `OpenAI` to monitor and
190    /// detect abuse.
191    #[builder(default, setter(into))]
192    pub user: Option<String>,
193
194    #[builder(default)]
195    /// The number of dimensions the resulting output embeddings should have. Only supported in
196    /// text-embedding-3 and later models.
197    pub dimensions: Option<u32>,
198}
199
200impl Options {
201    /// Creates a new `OptionsBuilder` for constructing `Options` instances.
202    pub fn builder() -> OptionsBuilder {
203        OptionsBuilder::default()
204    }
205
206    /// Extends options with other options
207    pub fn merge(&mut self, other: &Options) {
208        if let Some(embed_model) = &other.embed_model {
209            self.embed_model = Some(embed_model.clone());
210        }
211        if let Some(prompt_model) = &other.prompt_model {
212            self.prompt_model = Some(prompt_model.clone());
213        }
214        if let Some(parallel_tool_calls) = other.parallel_tool_calls {
215            self.parallel_tool_calls = Some(parallel_tool_calls);
216        }
217        if let Some(max_completion_tokens) = other.max_completion_tokens {
218            self.max_completion_tokens = Some(max_completion_tokens);
219        }
220        if let Some(temperature) = other.temperature {
221            self.temperature = Some(temperature);
222        }
223        if let Some(reasoning_effort) = &other.reasoning_effort {
224            self.reasoning_effort = Some(reasoning_effort.clone());
225        }
226        if let Some(seed) = other.seed {
227            self.seed = Some(seed);
228        }
229        if let Some(presence_penalty) = other.presence_penalty {
230            self.presence_penalty = Some(presence_penalty);
231        }
232        if let Some(metadata) = &other.metadata {
233            self.metadata = Some(metadata.clone());
234        }
235        if let Some(user) = &other.user {
236            self.user = Some(user.clone());
237        }
238    }
239}
240
241impl From<OptionsBuilder> for Options {
242    fn from(value: OptionsBuilder) -> Self {
243        Self {
244            embed_model: value.embed_model.flatten(),
245            prompt_model: value.prompt_model.flatten(),
246            parallel_tool_calls: value.parallel_tool_calls.flatten(),
247            max_completion_tokens: value.max_completion_tokens.flatten(),
248            temperature: value.temperature.flatten(),
249            reasoning_effort: value.reasoning_effort.flatten(),
250            presence_penalty: value.presence_penalty.flatten(),
251            seed: value.seed.flatten(),
252            metadata: value.metadata.flatten(),
253            user: value.user.flatten(),
254            dimensions: value.dimensions.flatten(),
255        }
256    }
257}
258
259impl From<&mut OptionsBuilder> for Options {
260    fn from(value: &mut OptionsBuilder) -> Self {
261        let value = value.clone();
262        Self {
263            embed_model: value.embed_model.flatten(),
264            prompt_model: value.prompt_model.flatten(),
265            parallel_tool_calls: value.parallel_tool_calls.flatten(),
266            max_completion_tokens: value.max_completion_tokens.flatten(),
267            temperature: value.temperature.flatten(),
268            reasoning_effort: value.reasoning_effort.flatten(),
269            presence_penalty: value.presence_penalty.flatten(),
270            seed: value.seed.flatten(),
271            metadata: value.metadata.flatten(),
272            user: value.user.flatten(),
273            dimensions: value.dimensions.flatten(),
274        }
275    }
276}
277
278pub(crate) fn ensure_tool_schema_additional_properties_false(
279    parameters: &mut Value,
280) -> anyhow::Result<()> {
281    let object = parameters
282        .as_object_mut()
283        .context("tool schema must be a JSON object")?;
284
285    object.insert("additionalProperties".to_string(), Value::Bool(false));
286
287    Ok(())
288}
289
290pub(crate) fn ensure_tool_schema_required_matches_properties(
291    parameters: &mut Value,
292) -> anyhow::Result<()> {
293    let object = parameters
294        .as_object_mut()
295        .context("tool schema must be a JSON object")?;
296
297    let property_names: Vec<String> = if let Some(Value::Object(map)) = object.get("properties") {
298        map.keys().cloned().collect()
299    } else {
300        object
301            .entry("required".to_string())
302            .or_insert_with(|| Value::Array(Vec::new()));
303        return Ok(());
304    };
305
306    let required_entry = object
307        .entry("required".to_string())
308        .or_insert_with(|| Value::Array(Vec::new()));
309
310    let required_array = required_entry
311        .as_array_mut()
312        .context("tool schema 'required' must be an array")?;
313
314    for name in property_names {
315        let name_ref = name.as_str();
316        let already_present = required_array
317            .iter()
318            .any(|value| value.as_str().is_some_and(|s| s == name_ref));
319
320        if !already_present {
321            required_array.push(Value::String(name));
322        }
323    }
324
325    Ok(())
326}
327
328impl OpenAI {
329    /// Creates a new `OpenAIBuilder` for constructing `OpenAI` instances.
330    pub fn builder() -> OpenAIBuilder {
331        OpenAIBuilder::default()
332    }
333}
334
335impl<C: async_openai::config::Config + Default + Sync + Send + std::fmt::Debug>
336    GenericOpenAIBuilder<C>
337{
338    /// Adds a callback function that will be called when usage information is available.
339    pub fn on_usage<F>(&mut self, func: F) -> &mut Self
340    where
341        F: Fn(&Usage) -> anyhow::Result<()> + Send + Sync + 'static,
342    {
343        let func = Arc::new(func);
344        self.on_usage = Some(Some(Arc::new(move |usage: &Usage| {
345            let func = func.clone();
346            Box::pin(async move { func(usage) })
347        })));
348
349        self
350    }
351
352    /// Adds an asynchronous callback function that will be called when usage information is
353    /// available.
354    pub fn on_usage_async<F>(&mut self, func: F) -> &mut Self
355    where
356        F: for<'a> Fn(
357                &'a Usage,
358            )
359                -> Pin<Box<dyn std::future::Future<Output = anyhow::Result<()>> + Send + 'a>>
360            + Send
361            + Sync
362            + 'static,
363    {
364        let func = Arc::new(func);
365        self.on_usage = Some(Some(Arc::new(move |usage: &Usage| {
366            let func = func.clone();
367            Box::pin(async move { func(usage).await })
368        })));
369
370        self
371    }
372    /// Sets the `OpenAI` client for the `OpenAI` instance.
373    ///
374    /// # Parameters
375    /// - `client`: The `OpenAI` client to set.
376    ///
377    /// # Returns
378    /// A mutable reference to the `OpenAIBuilder`.
379    pub fn client(&mut self, client: async_openai::Client<C>) -> &mut Self {
380        self.client = Some(Arc::new(client));
381        self
382    }
383
384    /// Sets the default embedding model for the `OpenAI` instance.
385    ///
386    /// # Parameters
387    /// - `model`: The embedding model to set.
388    ///
389    /// # Returns
390    /// A mutable reference to the `OpenAIBuilder`.
391    pub fn default_embed_model(&mut self, model: impl Into<String>) -> &mut Self {
392        if let Some(options) = self.default_options.as_mut() {
393            options.embed_model = Some(model.into());
394        } else {
395            self.default_options = Some(Options {
396                embed_model: Some(model.into()),
397                ..Default::default()
398            });
399        }
400        self
401    }
402
403    /// Sets the `user` field used by `OpenAI` to monitor and detect usage and abuse.
404    pub fn for_end_user(&mut self, user: impl Into<String>) -> &mut Self {
405        if let Some(options) = self.default_options.as_mut() {
406            options.user = Some(user.into());
407        } else {
408            self.default_options = Some(Options {
409                user: Some(user.into()),
410                ..Default::default()
411            });
412        }
413        self
414    }
415
416    /// Enable or disable parallel tool calls for completions.
417    ///
418    /// Note that currently reasoning models do not support parallel tool calls
419    ///
420    /// Defaults to `true`
421    pub fn parallel_tool_calls(&mut self, parallel_tool_calls: Option<bool>) -> &mut Self {
422        if let Some(options) = self.default_options.as_mut() {
423            options.parallel_tool_calls = parallel_tool_calls;
424        } else {
425            self.default_options = Some(Options {
426                parallel_tool_calls,
427                ..Default::default()
428            });
429        }
430        self
431    }
432
433    /// Sets the default prompt model for the `OpenAI` instance.
434    ///
435    /// # Parameters
436    /// - `model`: The prompt model to set.
437    ///
438    /// # Returns
439    /// A mutable reference to the `OpenAIBuilder`.
440    pub fn default_prompt_model(&mut self, model: impl Into<String>) -> &mut Self {
441        if let Some(options) = self.default_options.as_mut() {
442            options.prompt_model = Some(model.into());
443        } else {
444            self.default_options = Some(Options {
445                prompt_model: Some(model.into()),
446                ..Default::default()
447            });
448        }
449        self
450    }
451
452    /// Sets the default options to use for requests to the `OpenAI` API.
453    ///
454    /// Merges with any existing options
455    pub fn default_options(&mut self, options: impl Into<Options>) -> &mut Self {
456        if let Some(existing_options) = self.default_options.as_mut() {
457            existing_options.merge(&options.into());
458        } else {
459            self.default_options = Some(options.into());
460        }
461        self
462    }
463}
464
465impl<C: async_openai::config::Config + Default> GenericOpenAI<C> {
466    /// Estimates the number of tokens for implementors of the `Estimatable` trait.
467    ///
468    /// I.e. `String`, `ChatMessage` etc
469    ///
470    /// # Errors
471    ///
472    /// Errors if tokinization fails in any way
473    #[cfg(feature = "tiktoken")]
474    pub async fn estimate_tokens(&self, value: impl Estimatable) -> Result<usize> {
475        self.tiktoken.estimate(value).await
476    }
477
478    pub fn with_default_prompt_model(&mut self, model: impl Into<String>) -> &mut Self {
479        self.default_options = Options {
480            prompt_model: Some(model.into()),
481            ..self.default_options.clone()
482        };
483        self
484    }
485
486    pub fn with_default_embed_model(&mut self, model: impl Into<String>) -> &mut Self {
487        self.default_options = Options {
488            embed_model: Some(model.into()),
489            ..self.default_options.clone()
490        };
491        self
492    }
493
494    /// Retrieve a reference to the inner `OpenAI` client.
495    pub fn client(&self) -> &Arc<async_openai::Client<C>> {
496        &self.client
497    }
498
499    /// Retrieve a reference to the default options for the `OpenAI` instance.
500    pub fn options(&self) -> &Options {
501        &self.default_options
502    }
503
504    /// Retrieve a mutable reference to the default options for the `OpenAI` instance.
505    pub fn options_mut(&mut self) -> &mut Options {
506        &mut self.default_options
507    }
508
509    /// Returns whether the Responses API is enabled for this client.
510    pub fn is_responses_api_enabled(&self) -> bool {
511        self.use_responses_api
512    }
513
514    fn chat_completion_request_defaults(&self) -> CreateChatCompletionRequestArgs {
515        let mut args = CreateChatCompletionRequestArgs::default();
516
517        let options = &self.default_options;
518
519        if let Some(parallel_tool_calls) = options.parallel_tool_calls {
520            args.parallel_tool_calls(parallel_tool_calls);
521        }
522
523        if let Some(max_tokens) = options.max_completion_tokens {
524            args.max_completion_tokens(max_tokens);
525        }
526
527        if let Some(temperature) = options.temperature {
528            args.temperature(temperature);
529        }
530
531        if let Some(reasoning_effort) = &options.reasoning_effort {
532            args.reasoning_effort(reasoning_effort.clone());
533        }
534
535        if let Some(seed) = options.seed {
536            args.seed(seed);
537        }
538
539        if let Some(presence_penalty) = options.presence_penalty {
540            args.presence_penalty(presence_penalty);
541        }
542
543        if let Some(metadata) = &options.metadata {
544            args.metadata(metadata.clone());
545        }
546
547        if let Some(user) = &options.user {
548            args.user(user.clone());
549        }
550
551        args
552    }
553
554    fn embed_request_defaults(&self) -> CreateEmbeddingRequestArgs {
555        let mut args = CreateEmbeddingRequestArgs::default();
556
557        let options = &self.default_options;
558
559        if let Some(user) = &options.user {
560            args.user(user.clone());
561        }
562
563        if let Some(dimensions) = options.dimensions {
564            args.dimensions(dimensions);
565        }
566
567        args
568    }
569}
570
571pub fn openai_error_to_language_model_error(e: OpenAIError) -> LanguageModelError {
572    match e {
573        OpenAIError::ApiError(api_error) => {
574            // If the response is an ApiError, it could be a context length exceeded error
575            if api_error.code == Some("context_length_exceeded".to_string()) {
576                LanguageModelError::context_length_exceeded(OpenAIError::ApiError(api_error))
577            } else {
578                LanguageModelError::permanent(OpenAIError::ApiError(api_error))
579            }
580        }
581        OpenAIError::Reqwest(e) => {
582            // async_openai passes any network errors as reqwest errors, so we just assume they are
583            // recoverable
584            LanguageModelError::transient(e)
585        }
586        OpenAIError::JSONDeserialize(_, _) => {
587            // OpenAI generated a non-json response, probably a temporary problem on their side
588            // (i.e. reverse proxy can't find an available backend)
589            LanguageModelError::transient(e)
590        }
591        OpenAIError::StreamError(stream_error) => {
592            // Note that this will _retry_ the stream. We have to assume that the stream just
593            // started if a 429 happens. For future readers, internally the streaming crate
594            // (eventsource) already applies backoff.
595            if is_rate_limited_stream_error(&stream_error) {
596                LanguageModelError::transient(OpenAIError::StreamError(stream_error))
597            } else {
598                LanguageModelError::permanent(OpenAIError::StreamError(stream_error))
599            }
600        }
601        OpenAIError::FileSaveError(_)
602        | OpenAIError::FileReadError(_)
603        | OpenAIError::InvalidArgument(_) => LanguageModelError::permanent(e),
604    }
605}
606
607fn is_rate_limited_stream_error(error: &StreamError) -> bool {
608    match error {
609        StreamError::ReqwestEventSource(inner) => match inner {
610            EventSourceError::InvalidStatusCode(status, _) => {
611                *status == StatusCode::TOO_MANY_REQUESTS
612            }
613            EventSourceError::Transport(source) => {
614                source.status() == Some(StatusCode::TOO_MANY_REQUESTS)
615            }
616            _ => false,
617        },
618        StreamError::UnknownEvent(_) => false,
619    }
620}
621
622#[cfg(test)]
623mod test {
624    use super::*;
625    use async_openai::error::{ApiError, OpenAIError, StreamError};
626    use eventsource_stream::Event;
627
628    /// test default embed model
629    #[test]
630    fn test_default_embed_and_prompt_model() {
631        let openai: OpenAI = OpenAI::builder()
632            .default_embed_model("gpt-3")
633            .default_prompt_model("gpt-4")
634            .build()
635            .unwrap();
636        assert_eq!(
637            openai.default_options.embed_model,
638            Some("gpt-3".to_string())
639        );
640        assert_eq!(
641            openai.default_options.prompt_model,
642            Some("gpt-4".to_string())
643        );
644
645        let openai: OpenAI = OpenAI::builder()
646            .default_prompt_model("gpt-4")
647            .default_embed_model("gpt-3")
648            .build()
649            .unwrap();
650        assert_eq!(
651            openai.default_options.prompt_model,
652            Some("gpt-4".to_string())
653        );
654        assert_eq!(
655            openai.default_options.embed_model,
656            Some("gpt-3".to_string())
657        );
658    }
659
660    #[test]
661    fn test_use_responses_api_flag() {
662        let openai: OpenAI = OpenAI::builder().use_responses_api(true).build().unwrap();
663
664        assert!(openai.is_responses_api_enabled());
665    }
666
667    #[test]
668    fn test_context_length_exceeded_error() {
669        // Create an API error with the context_length_exceeded code
670        let api_error = ApiError {
671            message: "This model's maximum context length is 8192 tokens".to_string(),
672            r#type: Some("invalid_request_error".to_string()),
673            param: Some("messages".to_string()),
674            code: Some("context_length_exceeded".to_string()),
675        };
676
677        let openai_error = OpenAIError::ApiError(api_error);
678        let result = openai_error_to_language_model_error(openai_error);
679
680        // Verify it's categorized as ContextLengthExceeded
681        match result {
682            LanguageModelError::ContextLengthExceeded(_) => {} // Expected
683            _ => panic!("Expected ContextLengthExceeded error, got {result:?}"),
684        }
685    }
686
687    #[test]
688    fn test_api_error_permanent() {
689        // Create a generic API error (not context length exceeded)
690        let api_error = ApiError {
691            message: "Invalid API key".to_string(),
692            r#type: Some("invalid_request_error".to_string()),
693            param: Some("api_key".to_string()),
694            code: Some("invalid_api_key".to_string()),
695        };
696
697        let openai_error = OpenAIError::ApiError(api_error);
698        let result = openai_error_to_language_model_error(openai_error);
699
700        // Verify it's categorized as PermanentError
701        match result {
702            LanguageModelError::PermanentError(_) => {} // Expected
703            _ => panic!("Expected PermanentError, got {result:?}"),
704        }
705    }
706
707    #[test]
708    fn test_file_save_error_is_permanent() {
709        // Create a file save error
710        let openai_error = OpenAIError::FileSaveError("Failed to save file".to_string());
711        let result = openai_error_to_language_model_error(openai_error);
712
713        // Verify it's categorized as PermanentError
714        match result {
715            LanguageModelError::PermanentError(_) => {} // Expected
716            _ => panic!("Expected PermanentError, got {result:?}"),
717        }
718    }
719
720    #[test]
721    fn test_file_read_error_is_permanent() {
722        // Create a file read error
723        let openai_error = OpenAIError::FileReadError("Failed to read file".to_string());
724        let result = openai_error_to_language_model_error(openai_error);
725
726        // Verify it's categorized as PermanentError
727        match result {
728            LanguageModelError::PermanentError(_) => {} // Expected
729            _ => panic!("Expected PermanentError, got {result:?}"),
730        }
731    }
732
733    #[test]
734    fn test_stream_error_is_permanent() {
735        // Create a stream error
736        let openai_error = OpenAIError::StreamError(StreamError::UnknownEvent(Event::default()));
737        let result = openai_error_to_language_model_error(openai_error);
738
739        // Verify it's categorized as PermanentError
740        match result {
741            LanguageModelError::PermanentError(_) => {} // Expected
742            _ => panic!("Expected PermanentError, got {result:?}"),
743        }
744    }
745
746    #[test]
747    fn test_invalid_argument_is_permanent() {
748        // Create an invalid argument error
749        let openai_error = OpenAIError::InvalidArgument("Invalid argument".to_string());
750        let result = openai_error_to_language_model_error(openai_error);
751
752        // Verify it's categorized as PermanentError
753        match result {
754            LanguageModelError::PermanentError(_) => {} // Expected
755            _ => panic!("Expected PermanentError, got {result:?}"),
756        }
757    }
758}