openai_ergonomic/
client.rs

1//! Client wrapper for ergonomic `OpenAI` API access.
2//!
3//! This module provides a high-level client that wraps the base `OpenAI` client
4//! with ergonomic builders and response handling.
5
6// Allow this lint at module level for interceptor helper methods
7// that require many parameters for comprehensive context passing
8#![allow(clippy::too_many_arguments)]
9
10use crate::interceptor::{
11    AfterResponseContext, BeforeRequestContext, ErrorContext, InterceptorChain,
12};
13use crate::semantic_conventions::operation_names;
14use crate::{
15    builders::{
16        assistants::{AssistantBuilder, MessageBuilder, RunBuilder},
17        audio::{
18            SpeechBuilder, TranscriptionBuilder, TranscriptionRequest, TranslationBuilder,
19            TranslationRequest,
20        },
21        completions::CompletionsBuilder,
22        embeddings::EmbeddingsBuilder,
23        files::{FileDeleteBuilder, FileListBuilder, FileRetrievalBuilder, FileUploadBuilder},
24        images::{
25            ImageEditBuilder, ImageEditRequest, ImageGenerationBuilder, ImageVariationBuilder,
26            ImageVariationRequest,
27        },
28        models::{ModelDeleteBuilder, ModelRetrievalBuilder},
29        moderations::ModerationBuilder,
30        threads::ThreadRequestBuilder,
31        uploads::UploadBuilder,
32        usage::UsageBuilder,
33        Builder, ChatCompletionBuilder, ResponsesBuilder,
34    },
35    config::Config,
36    errors::Result,
37    responses::ChatCompletionResponseWrapper,
38    Error, UploadPurpose,
39};
40use openai_client_base::apis::Error as ApiError;
41use openai_client_base::{
42    apis::{
43        assistants_api, audio_api, batch_api, chat_api, completions_api,
44        configuration::Configuration, embeddings_api, files_api, fine_tuning_api, images_api,
45        models_api, moderations_api, uploads_api, usage_api, vector_stores_api,
46    },
47    models::{
48        AssistantObject, Batch, CreateBatchRequest, CreateChatCompletionRequest,
49        CreateCompletionResponse, CreateEmbeddingResponse, CreateFineTuningJobRequest,
50        CreateModerationResponse, CreateTranscription200Response, CreateTranslation200Response,
51        DeleteAssistantResponse, DeleteFileResponse, DeleteModelResponse,
52        DeleteVectorStoreFileResponse, DeleteVectorStoreResponse, FineTuningJob, ImagesResponse,
53        ListAssistantsResponse, ListBatchesResponse, ListFilesResponse,
54        ListFineTuningJobCheckpointsResponse, ListFineTuningJobEventsResponse,
55        ListMessagesResponse, ListModelsResponse, ListPaginatedFineTuningJobsResponse,
56        ListRunStepsResponse, ListRunsResponse, ListVectorStoreFilesResponse,
57        ListVectorStoresResponse, MessageObject, Model, OpenAiFile, RunObject, RunStepObject,
58        SubmitToolOutputsRunRequestToolOutputsInner, ThreadObject, Upload, UsageResponse,
59        VectorStoreFileObject, VectorStoreObject, VectorStoreSearchResultsPage,
60    },
61};
62use reqwest_middleware::ClientWithMiddleware as HttpClient;
63use std::sync::Arc;
64use std::time::Instant;
65use tokio::time::Duration;
66
67// Helper macro to generate interceptor helper methods for sub-clients
68macro_rules! impl_interceptor_helpers {
69    ($client_type:ty) => {
70        impl<T: Default + Send + Sync> $client_type {
71            /// Helper to call `before_request` hooks
72            async fn call_before_request(
73                &self,
74                operation: &str,
75                model: &str,
76                request_json: &str,
77                state: &mut T,
78            ) -> Result<()> {
79                if !self.client.interceptors.is_empty() {
80                    let mut ctx = BeforeRequestContext {
81                        operation,
82                        model,
83                        request_json,
84                        state,
85                    };
86                    if let Err(e) = self.client.interceptors.before_request(&mut ctx).await {
87                        let error_ctx = ErrorContext {
88                            operation,
89                            model: Some(model),
90                            request_json: Some(request_json),
91                            error: &e,
92                            state: Some(state),
93                        };
94                        self.client.interceptors.on_error(&error_ctx).await;
95                        return Err(e);
96                    }
97                }
98                Ok(())
99            }
100
101            /// Helper to handle API errors with interceptor hooks
102            async fn handle_api_error<E>(
103                &self,
104                error: openai_client_base::apis::Error<E>,
105                operation: &str,
106                model: &str,
107                request_json: &str,
108                state: &T,
109            ) -> Error {
110                let error = map_api_error(error);
111
112                if !self.client.interceptors.is_empty() {
113                    let error_ctx = ErrorContext {
114                        operation,
115                        model: Some(model),
116                        request_json: Some(request_json),
117                        error: &error,
118                        state: Some(state),
119                    };
120                    self.client.interceptors.on_error(&error_ctx).await;
121                }
122
123                error
124            }
125
126            /// Helper to call `after_response` hooks
127            async fn call_after_response<R>(
128                &self,
129                response: &R,
130                operation: &str,
131                model: &str,
132                request_json: &str,
133                state: &T,
134                duration: std::time::Duration,
135                input_tokens: Option<i64>,
136                output_tokens: Option<i64>,
137            ) where
138                R: serde::Serialize + Sync,
139            {
140                if !self.client.interceptors.is_empty() {
141                    let response_json = serde_json::to_string(response).unwrap_or_default();
142                    let ctx = AfterResponseContext {
143                        operation,
144                        model,
145                        request_json,
146                        response_json: &response_json,
147                        duration,
148                        input_tokens,
149                        output_tokens,
150                        state,
151                    };
152                    if let Err(e) = self.client.interceptors.after_response(&ctx).await {
153                        tracing::warn!("Interceptor after_response failed: {}", e);
154                    }
155                }
156            }
157        }
158    };
159}
160
161/// Builder for creating a `Client` with interceptors.
162///
163/// The builder pattern allows you to configure interceptors before the client
164/// is created. Once built, the interceptors are immutable, eliminating the need
165/// for runtime locking.
166///
167/// # Example
168///
169/// ```rust,ignore
170/// let client = Client::from_env()?
171///     .with_interceptor(Box::new(my_interceptor))
172///     .build();
173/// ```
174pub struct ClientBuilder<T = ()> {
175    config: Arc<Config>,
176    http: HttpClient,
177    base_configuration: Configuration,
178    interceptors: InterceptorChain<T>,
179}
180
181/// Main client for interacting with the `OpenAI` API.
182///
183/// The client provides ergonomic methods for all `OpenAI` API endpoints,
184/// with built-in retry logic, rate limiting, error handling, and support
185/// for middleware through interceptors.
186///
187/// Use `Client::from_env()` or `Client::new()` to create a builder, then call
188/// `.build()` to create the client.
189///
190/// # Example
191///
192/// ```rust,ignore
193/// # use openai_ergonomic::{Client, Config};
194/// # #[tokio::main]
195/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
196/// let client = Client::from_env()?.build();
197/// // TODO: Add usage example once builders are implemented
198/// # Ok(())
199/// # }
200/// ```
201#[derive(Clone)]
202pub struct Client<T = ()> {
203    config: Arc<Config>,
204    http: HttpClient,
205    base_configuration: Configuration,
206    interceptors: Arc<InterceptorChain<T>>,
207}
208
209// Custom Debug implementation since InterceptorChain doesn't implement Debug
210impl<T> std::fmt::Debug for Client<T> {
211    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
212        f.debug_struct("Client")
213            .field("config", &self.config)
214            .field("http", &"<HttpClient>")
215            .field("base_configuration", &"<Configuration>")
216            .field("interceptors", &"<InterceptorChain>")
217            .finish()
218    }
219}
220
221// Implementation for ClientBuilder with default state type ()
222impl ClientBuilder {
223    /// Create a new client builder with the given configuration.
224    pub fn new(config: Config) -> Result<Self> {
225        // Use custom HTTP client if provided, otherwise build a default one
226        let http_client = if let Some(client) = config.http_client() {
227            client.clone()
228        } else {
229            let reqwest_client = reqwest::Client::builder()
230                .timeout(Duration::from_secs(120)) // Default timeout: 120 seconds
231                .user_agent(format!("openai-ergonomic/{}", env!("CARGO_PKG_VERSION")))
232                .build()
233                .map_err(Error::Http)?;
234            reqwest_middleware::ClientBuilder::new(reqwest_client).build()
235        };
236
237        // Create openai-client-base configuration
238        let mut base_configuration = Configuration::new();
239        base_configuration.bearer_access_token = Some(config.api_key().to_string());
240        if let Some(base_url) = config.base_url() {
241            base_configuration.base_path = base_url.to_string();
242        }
243        if let Some(org_id) = config.organization_id() {
244            base_configuration.user_agent = Some(format!(
245                "openai-ergonomic/{} org/{}",
246                env!("CARGO_PKG_VERSION"),
247                org_id
248            ));
249        }
250
251        Ok(Self {
252            config: Arc::new(config),
253            http: http_client,
254            base_configuration,
255            interceptors: InterceptorChain::new(),
256        })
257    }
258
259    /// Create a new client builder with default configuration from environment variables.
260    pub fn from_env() -> Result<Self> {
261        Self::new(Config::from_env()?)
262    }
263}
264
265// Implementation for ClientBuilder with any state type
266impl<T> ClientBuilder<T> {
267    /// Add an interceptor to the builder.
268    ///
269    /// Creates a new builder with the interceptor's state type. The interceptor provides
270    /// hooks into the request/response lifecycle for observability, logging, and custom
271    /// processing.
272    ///
273    /// Note: This method transforms the builder's type, so it can only be called once.
274    /// For multiple interceptors with the same state type, use a composite interceptor
275    /// or call this method multiple times (each will replace the previous chain).
276    ///
277    /// # Examples
278    ///
279    /// Simple interceptor (no state):
280    /// ```rust,ignore
281    /// use openai_ergonomic::{Client, Interceptor, BeforeRequestContext};
282    ///
283    /// struct LoggingInterceptor;
284    ///
285    /// #[async_trait::async_trait]
286    /// impl Interceptor for LoggingInterceptor {
287    ///     async fn before_request(&self, ctx: &mut BeforeRequestContext<'_>) -> Result<()> {
288    ///         println!("Calling {}", ctx.operation);
289    ///         Ok(())
290    ///     }
291    /// }
292    ///
293    /// let client = Client::from_env()?
294    ///     .with_interceptor(Box::new(LoggingInterceptor))
295    ///     .build();
296    /// ```
297    ///
298    /// Interceptor with custom state:
299    /// ```rust,ignore
300    /// use openai_ergonomic::{Client, LangfuseInterceptor, LangfuseState};
301    ///
302    /// let interceptor = LangfuseInterceptor::new(tracer, config);
303    /// let client: Client<LangfuseState<_>> = Client::from_env()?
304    ///     .with_interceptor(Box::new(interceptor))
305    ///     .build();
306    /// ```
307    #[must_use]
308    pub fn with_interceptor<U>(
309        self,
310        interceptor: Box<dyn crate::interceptor::Interceptor<U>>,
311    ) -> ClientBuilder<U> {
312        let mut new_chain = InterceptorChain::new();
313        new_chain.add(interceptor);
314
315        ClientBuilder {
316            config: self.config,
317            http: self.http,
318            base_configuration: self.base_configuration,
319            interceptors: new_chain,
320        }
321    }
322
323    /// Add an interceptor that uses the same state type.
324    ///
325    /// This allows chaining multiple interceptors with the same state type without
326    /// type transformation.
327    ///
328    /// # Example
329    ///
330    /// ```rust,ignore
331    /// let client = Client::from_env()?
332    ///     .add_interceptor(Box::new(logger))
333    ///     .add_interceptor(Box::new(metrics))
334    ///     .build();
335    /// ```
336    #[must_use]
337    pub fn add_interceptor(
338        mut self,
339        interceptor: Box<dyn crate::interceptor::Interceptor<T>>,
340    ) -> Self {
341        self.interceptors.add(interceptor);
342        self
343    }
344
345    /// Build the client with the configured interceptors.
346    ///
347    /// After building, the interceptors are immutable, eliminating runtime locking overhead.
348    #[must_use]
349    pub fn build(self) -> Client<T> {
350        Client {
351            config: self.config,
352            http: self.http,
353            base_configuration: self.base_configuration,
354            interceptors: Arc::new(self.interceptors),
355        }
356    }
357}
358
359// Implementation for Client
360impl Client {
361    /// Create a new client builder with the given configuration.
362    pub fn builder(config: Config) -> Result<ClientBuilder> {
363        ClientBuilder::new(config)
364    }
365
366    /// Create a new client builder with default configuration from environment variables.
367    pub fn from_env() -> Result<ClientBuilder> {
368        ClientBuilder::from_env()
369    }
370}
371
372impl<T> Client<T> {
373    /// Get a reference to the client configuration.
374    pub fn config(&self) -> &Config {
375        &self.config
376    }
377
378    /// Get a reference to the HTTP client.
379    pub fn http_client(&self) -> &HttpClient {
380        &self.http
381    }
382}
383
384// Interceptor helper methods
385impl<T: Default + Send + Sync> Client<T> {
386    /// Helper to call `before_request` hooks
387    async fn call_before_request(
388        &self,
389        operation: &str,
390        model: &str,
391        request_json: &str,
392        state: &mut T,
393    ) -> Result<()> {
394        if !self.interceptors.is_empty() {
395            let mut ctx = BeforeRequestContext {
396                operation,
397                model,
398                request_json,
399                state,
400            };
401            if let Err(e) = self.interceptors.before_request(&mut ctx).await {
402                let error_ctx = ErrorContext {
403                    operation,
404                    model: Some(model),
405                    request_json: Some(request_json),
406                    error: &e,
407                    state: Some(state),
408                };
409                self.interceptors.on_error(&error_ctx).await;
410                return Err(e);
411            }
412        }
413        Ok(())
414    }
415
416    /// Helper to handle API errors with interceptor hooks
417    async fn handle_api_error<E>(
418        &self,
419        error: openai_client_base::apis::Error<E>,
420        operation: &str,
421        model: &str,
422        request_json: &str,
423        state: &T,
424    ) -> Error {
425        let error = map_api_error(error);
426
427        if !self.interceptors.is_empty() {
428            let error_ctx = ErrorContext {
429                operation,
430                model: Some(model),
431                request_json: Some(request_json),
432                error: &error,
433                state: Some(state),
434            };
435            self.interceptors.on_error(&error_ctx).await;
436        }
437
438        error
439    }
440
441    /// Helper to call `after_response` hooks
442    async fn call_after_response<R>(
443        &self,
444        response: &R,
445        operation: &str,
446        model: &str,
447        request_json: &str,
448        state: &T,
449        duration: std::time::Duration,
450        input_tokens: Option<i64>,
451        output_tokens: Option<i64>,
452    ) where
453        R: serde::Serialize + Sync,
454    {
455        if !self.interceptors.is_empty() {
456            let response_json = serde_json::to_string(response).unwrap_or_default();
457            let ctx = AfterResponseContext {
458                operation,
459                model,
460                request_json,
461                response_json: &response_json,
462                duration,
463                input_tokens,
464                output_tokens,
465                state,
466            };
467            if let Err(e) = self.interceptors.after_response(&ctx).await {
468                tracing::warn!("Interceptor after_response failed: {}", e);
469            }
470        }
471    }
472}
473
474// Chat API methods
475impl<T: Default + Send + Sync> Client<T> {
476    /// Create a chat completion builder.
477    pub fn chat(&self) -> ChatCompletionBuilder {
478        let model = self.config.default_model().unwrap_or("gpt-4");
479        ChatCompletionBuilder::new(model)
480    }
481
482    /// Create a chat completion with a simple user message.
483    pub fn chat_simple(&self, message: impl Into<String>) -> ChatCompletionBuilder {
484        self.chat().user(message)
485    }
486
487    /// Create a chat completion with system and user messages.
488    pub fn chat_with_system(
489        &self,
490        system: impl Into<String>,
491        user: impl Into<String>,
492    ) -> ChatCompletionBuilder {
493        self.chat().system(system).user(user)
494    }
495
496    /// Execute a chat completion request.
497    pub async fn execute_chat(
498        &self,
499        request: CreateChatCompletionRequest,
500    ) -> Result<ChatCompletionResponseWrapper> {
501        let mut state = T::default();
502        let operation = operation_names::CHAT;
503        let model = request.model.clone();
504        let request_json = serde_json::to_string(&request).unwrap_or_default();
505
506        // Call before_request hook
507        self.call_before_request(operation, &model, &request_json, &mut state)
508            .await?;
509
510        let start_time = Instant::now();
511
512        // Make the API call
513        let response = match chat_api::create_chat_completion()
514            .configuration(&self.base_configuration)
515            .create_chat_completion_request(request)
516            .call()
517            .await
518        {
519            Ok(resp) => resp,
520            Err(e) => {
521                let error = self
522                    .handle_api_error(e, operation, &model, &request_json, &state)
523                    .await;
524                return Err(error);
525            }
526        };
527
528        let duration = start_time.elapsed();
529
530        // Call after_response hook
531        self.call_after_response(
532            &response,
533            operation,
534            &model,
535            &request_json,
536            &state,
537            duration,
538            response.usage.as_ref().map(|u| i64::from(u.prompt_tokens)),
539            response
540                .usage
541                .as_ref()
542                .map(|u| i64::from(u.completion_tokens)),
543        )
544        .await;
545
546        Ok(ChatCompletionResponseWrapper::new(response))
547    }
548
549    /// Execute a chat completion builder.
550    pub async fn send_chat(
551        &self,
552        builder: ChatCompletionBuilder,
553    ) -> Result<ChatCompletionResponseWrapper> {
554        let request = builder.build()?;
555        self.execute_chat(request).await
556    }
557}
558
559// Responses API methods
560impl<T: Default + Send + Sync> Client<T> {
561    /// Create a responses builder for structured outputs.
562    pub fn responses(&self) -> ResponsesBuilder {
563        let model = self.config.default_model().unwrap_or("gpt-4");
564        ResponsesBuilder::new(model)
565    }
566
567    /// Create a simple responses request with a user message.
568    pub fn responses_simple(&self, message: impl Into<String>) -> ResponsesBuilder {
569        self.responses().user(message)
570    }
571
572    /// Execute a responses request.
573    pub async fn execute_responses(
574        &self,
575        request: CreateChatCompletionRequest,
576    ) -> Result<ChatCompletionResponseWrapper> {
577        // The Responses API uses the same underlying endpoint as chat
578        self.execute_chat(request).await
579    }
580
581    /// Execute a responses builder.
582    pub async fn send_responses(
583        &self,
584        builder: ResponsesBuilder,
585    ) -> Result<ChatCompletionResponseWrapper> {
586        let request = builder.build()?;
587        self.execute_responses(request).await
588    }
589}
590
591// TODO: Add methods for other API endpoints
592impl<T: Default + Send + Sync> Client<T> {
593    /// Get assistants client (placeholder).
594    #[must_use]
595    pub fn assistants(&self) -> AssistantsClient<'_, T> {
596        AssistantsClient { client: self }
597    }
598
599    /// Get audio client (placeholder).
600    #[must_use]
601    pub fn audio(&self) -> AudioClient<'_, T> {
602        AudioClient { client: self }
603    }
604
605    /// Get embeddings client (placeholder).
606    #[must_use]
607    pub fn embeddings(&self) -> EmbeddingsClient<'_, T> {
608        EmbeddingsClient { client: self }
609    }
610
611    /// Get images client (placeholder).
612    #[must_use]
613    pub fn images(&self) -> ImagesClient<'_, T> {
614        ImagesClient { client: self }
615    }
616
617    /// Get files client (placeholder).
618    #[must_use]
619    pub fn files(&self) -> FilesClient<'_, T> {
620        FilesClient { client: self }
621    }
622
623    /// Get fine-tuning client (placeholder).
624    #[must_use]
625    pub fn fine_tuning(&self) -> FineTuningClient<'_, T> {
626        FineTuningClient { client: self }
627    }
628
629    /// Get batch client (placeholder).
630    #[must_use]
631    pub fn batch(&self) -> BatchClient<'_, T> {
632        BatchClient { client: self }
633    }
634
635    /// Get vector stores client (placeholder).
636    #[must_use]
637    pub fn vector_stores(&self) -> VectorStoresClient<'_, T> {
638        VectorStoresClient { client: self }
639    }
640
641    /// Get moderations client (placeholder).
642    #[must_use]
643    pub fn moderations(&self) -> ModerationsClient<'_, T> {
644        ModerationsClient { client: self }
645    }
646
647    /// Get threads client (placeholder).
648    #[must_use]
649    pub fn threads(&self) -> ThreadsClient<'_, T> {
650        ThreadsClient { client: self }
651    }
652
653    /// Get uploads client (placeholder).
654    #[must_use]
655    pub fn uploads(&self) -> UploadsClient<'_, T> {
656        UploadsClient { client: self }
657    }
658
659    /// Get models client.
660    #[must_use]
661    pub fn models(&self) -> ModelsClient<'_, T> {
662        ModelsClient { client: self }
663    }
664
665    /// Get completions client.
666    #[must_use]
667    pub fn completions(&self) -> CompletionsClient<'_, T> {
668        CompletionsClient { client: self }
669    }
670
671    /// Get usage client.
672    #[must_use]
673    pub fn usage(&self) -> UsageClient<'_, T> {
674        UsageClient { client: self }
675    }
676}
677
678impl<T: Default + Send + Sync> AudioClient<'_, T> {
679    /// Create a speech builder for text-to-speech generation.
680    #[must_use]
681    pub fn speech(
682        &self,
683        model: impl Into<String>,
684        input: impl Into<String>,
685        voice: impl Into<String>,
686    ) -> SpeechBuilder {
687        SpeechBuilder::new(model, input, voice)
688    }
689
690    /// Submit a speech synthesis request and return binary audio data.
691    pub async fn create_speech(&self, builder: SpeechBuilder) -> Result<Vec<u8>> {
692        let request = builder.build()?;
693        let mut state = T::default();
694        let operation = operation_names::AUDIO_SPEECH;
695        let model = request.model.clone();
696        let request_json = serde_json::to_string(&request).unwrap_or_default();
697
698        // Call before_request hook
699        self.call_before_request(operation, &model, &request_json, &mut state)
700            .await?;
701
702        let start_time = Instant::now();
703
704        // Make the API call
705        let response = match audio_api::create_speech()
706            .configuration(&self.client.base_configuration)
707            .create_speech_request(request)
708            .call()
709            .await
710        {
711            Ok(resp) => resp,
712            Err(e) => {
713                let error = self
714                    .handle_api_error(e, operation, &model, &request_json, &state)
715                    .await;
716                return Err(error);
717            }
718        };
719
720        let bytes = response.bytes().await.map_err(Error::Http)?;
721        let duration = start_time.elapsed();
722
723        // Call after_response hook (note: no JSON response for audio)
724        let response_json = format!("{{\"size\": {}}}", bytes.len());
725        self.call_after_response(
726            &response_json,
727            operation,
728            &model,
729            &request_json,
730            &state,
731            duration,
732            None,
733            None,
734        )
735        .await;
736
737        Ok(bytes.to_vec())
738    }
739
740    /// Create a transcription builder for speech-to-text workflows.
741    #[must_use]
742    pub fn transcription(
743        &self,
744        file: impl AsRef<std::path::Path>,
745        model: impl Into<String>,
746    ) -> TranscriptionBuilder {
747        TranscriptionBuilder::new(file, model)
748    }
749
750    /// Submit a transcription request.
751    pub async fn create_transcription(
752        &self,
753        builder: TranscriptionBuilder,
754    ) -> Result<CreateTranscription200Response> {
755        let request = builder.build()?;
756        let model_str = request.model.clone();
757        let mut state = T::default();
758        let operation = operation_names::AUDIO_TRANSCRIPTION;
759        // TranscriptionRequest doesn't implement Serialize, so we'll create a simple JSON representation
760        let request_json = format!(r#"{{"model":"{model_str}","file":"<audio_file>"}}"#);
761
762        // Call before_request hook
763        self.call_before_request(operation, &model_str, &request_json, &mut state)
764            .await?;
765
766        let TranscriptionRequest {
767            file,
768            model,
769            language,
770            prompt,
771            response_format,
772            temperature,
773            stream,
774            chunking_strategy,
775            timestamp_granularities,
776            include,
777        } = request;
778
779        let timestamp_strings = timestamp_granularities.as_ref().map(|values| {
780            values
781                .iter()
782                .map(|granularity| granularity.as_str().to_string())
783                .collect::<Vec<_>>()
784        });
785
786        let start_time = Instant::now();
787
788        // Make the API call
789        let response = match audio_api::create_transcription()
790            .configuration(&self.client.base_configuration)
791            .file(file)
792            .model(&model)
793            .maybe_language(language.as_deref())
794            .maybe_prompt(prompt.as_deref())
795            .maybe_response_format(response_format)
796            .maybe_temperature(temperature)
797            .maybe_stream(stream)
798            .maybe_chunking_strategy(chunking_strategy)
799            .maybe_timestamp_granularities(timestamp_strings)
800            .maybe_include(include)
801            .call()
802            .await
803        {
804            Ok(resp) => resp,
805            Err(e) => {
806                let error = self
807                    .handle_api_error(e, operation, &model_str, &request_json, &state)
808                    .await;
809                return Err(error);
810            }
811        };
812
813        let duration = start_time.elapsed();
814
815        // Call after_response hook
816        self.call_after_response(
817            &response,
818            operation,
819            &model_str,
820            &request_json,
821            &state,
822            duration,
823            None,
824            None,
825        )
826        .await;
827
828        Ok(response)
829    }
830
831    /// Create a translation builder for audio-to-English translation.
832    #[must_use]
833    pub fn translation(
834        &self,
835        file: impl AsRef<std::path::Path>,
836        model: impl Into<String>,
837    ) -> TranslationBuilder {
838        TranslationBuilder::new(file, model)
839    }
840
841    /// Submit an audio translation request.
842    pub async fn create_translation(
843        &self,
844        builder: TranslationBuilder,
845    ) -> Result<CreateTranslation200Response> {
846        let request = builder.build()?;
847        let model_str = request.model.clone();
848
849        // Prepare interceptor context
850        let mut state = T::default();
851        let operation = operation_names::AUDIO_TRANSLATION;
852        let request_json = format!(r#"{{"model":"{model_str}","file":"<audio_file>"}}"#);
853
854        // Call before_request hook
855        self.call_before_request(operation, &model_str, &request_json, &mut state)
856            .await?;
857
858        let TranslationRequest {
859            file,
860            model,
861            prompt,
862            response_format,
863            temperature,
864        } = request;
865
866        let response_format_owned = response_format.map(|format| format.to_string());
867
868        let start_time = Instant::now();
869
870        // Make the API call
871        let response = match audio_api::create_translation()
872            .configuration(&self.client.base_configuration)
873            .file(file)
874            .model(&model)
875            .maybe_prompt(prompt.as_deref())
876            .maybe_response_format(response_format_owned.as_deref())
877            .maybe_temperature(temperature)
878            .call()
879            .await
880        {
881            Ok(resp) => resp,
882            Err(e) => {
883                let error = self
884                    .handle_api_error(e, operation, &model_str, &request_json, &state)
885                    .await;
886                return Err(error);
887            }
888        };
889
890        let duration = start_time.elapsed();
891
892        // Call after_response hook
893        self.call_after_response(
894            &response,
895            operation,
896            &model_str,
897            &request_json,
898            &state,
899            duration,
900            None,
901            None,
902        )
903        .await;
904
905        Ok(response)
906    }
907}
908
909impl<T: Default + Send + Sync> EmbeddingsClient<'_, T> {
910    /// Start a builder for creating embeddings requests with the given model.
911    #[must_use]
912    pub fn builder(&self, model: impl Into<String>) -> EmbeddingsBuilder {
913        EmbeddingsBuilder::new(model)
914    }
915
916    /// Convenience helper for embedding a single string input.
917    #[must_use]
918    pub fn text(&self, model: impl Into<String>, input: impl Into<String>) -> EmbeddingsBuilder {
919        self.builder(model).input_text(input)
920    }
921
922    /// Convenience helper for embedding a single tokenized input.
923    #[must_use]
924    pub fn tokens<I>(&self, model: impl Into<String>, tokens: I) -> EmbeddingsBuilder
925    where
926        I: IntoIterator<Item = i32>,
927    {
928        self.builder(model).input_tokens(tokens)
929    }
930
931    /// Execute an embeddings request built with [`EmbeddingsBuilder`].
932    pub async fn create(&self, builder: EmbeddingsBuilder) -> Result<CreateEmbeddingResponse> {
933        let request = builder.build()?;
934
935        // Prepare interceptor context
936        let mut state = T::default();
937        let operation = operation_names::EMBEDDINGS;
938        let model = request.model.clone();
939        let request_json = serde_json::to_string(&request).unwrap_or_default();
940
941        // Call before_request hook
942        self.call_before_request(operation, &model, &request_json, &mut state)
943            .await?;
944
945        let start_time = Instant::now();
946
947        // Make the API call
948        let response = match embeddings_api::create_embedding()
949            .configuration(&self.client.base_configuration)
950            .create_embedding_request(request)
951            .call()
952            .await
953        {
954            Ok(resp) => resp,
955            Err(e) => {
956                let error = self
957                    .handle_api_error(e, operation, &model, &request_json, &state)
958                    .await;
959                return Err(error);
960            }
961        };
962
963        let duration = start_time.elapsed();
964
965        // Call after_response hook
966        self.call_after_response(
967            &response,
968            operation,
969            &model,
970            &request_json,
971            &state,
972            duration,
973            Some(i64::from(response.usage.prompt_tokens)),
974            Some(i64::from(response.usage.total_tokens)),
975        )
976        .await;
977
978        Ok(response)
979    }
980}
981
982impl<T: Default + Send + Sync> ImagesClient<'_, T> {
983    /// Create a builder for image generation requests.
984    #[must_use]
985    pub fn generate(&self, prompt: impl Into<String>) -> ImageGenerationBuilder {
986        ImageGenerationBuilder::new(prompt)
987    }
988
989    /// Execute an image generation request.
990    pub async fn create(&self, builder: ImageGenerationBuilder) -> Result<ImagesResponse> {
991        let request = builder.build()?;
992
993        // Prepare interceptor context
994        let mut state = T::default();
995        let operation = operation_names::IMAGE_GENERATION;
996        let model = request
997            .model
998            .as_ref()
999            .map_or_else(|| "dall-e-2".to_string(), ToString::to_string);
1000        let request_json = serde_json::to_string(&request).unwrap_or_default();
1001
1002        // Call before_request hook
1003        self.call_before_request(operation, &model, &request_json, &mut state)
1004            .await?;
1005
1006        let start_time = Instant::now();
1007
1008        // Make the API call
1009        let response = match images_api::create_image()
1010            .configuration(&self.client.base_configuration)
1011            .create_image_request(request)
1012            .call()
1013            .await
1014        {
1015            Ok(resp) => resp,
1016            Err(e) => {
1017                let error = self
1018                    .handle_api_error(e, operation, &model, &request_json, &state)
1019                    .await;
1020                return Err(error);
1021            }
1022        };
1023
1024        let duration = start_time.elapsed();
1025
1026        // Call after_response hook
1027        self.call_after_response(
1028            &response,
1029            operation,
1030            &model,
1031            &request_json,
1032            &state,
1033            duration,
1034            None,
1035            None,
1036        )
1037        .await;
1038
1039        Ok(response)
1040    }
1041
1042    /// Create an image edit builder using a base image and prompt.
1043    #[must_use]
1044    pub fn edit(
1045        &self,
1046        image: impl AsRef<std::path::Path>,
1047        prompt: impl Into<String>,
1048    ) -> ImageEditBuilder {
1049        ImageEditBuilder::new(image, prompt)
1050    }
1051
1052    /// Execute an image edit request.
1053    pub async fn create_edit(&self, builder: ImageEditBuilder) -> Result<ImagesResponse> {
1054        let request = builder.build()?;
1055        let model_str = request
1056            .model
1057            .as_ref()
1058            .map_or_else(|| "dall-e-2".to_string(), ToString::to_string);
1059
1060        // Prepare interceptor context
1061        let mut state = T::default();
1062        let operation = operation_names::IMAGE_EDIT;
1063        let request_json = format!(
1064            r#"{{"prompt":"{}","model":"{}"}}"#,
1065            request.prompt, model_str
1066        );
1067
1068        // Call before_request hook
1069        self.call_before_request(operation, &model_str, &request_json, &mut state)
1070            .await?;
1071
1072        let ImageEditRequest {
1073            image,
1074            prompt,
1075            mask,
1076            background,
1077            model,
1078            n,
1079            size,
1080            response_format,
1081            output_format,
1082            output_compression,
1083            user,
1084            input_fidelity,
1085            stream,
1086            partial_images,
1087            quality,
1088        } = request;
1089
1090        let start_time = Instant::now();
1091
1092        // Make the API call
1093        let response = match images_api::create_image_edit()
1094            .configuration(&self.client.base_configuration)
1095            .image(image)
1096            .prompt(&prompt)
1097            .maybe_mask(mask)
1098            .maybe_background(background.as_deref())
1099            .maybe_model(model.as_deref())
1100            .maybe_n(n)
1101            .maybe_size(size.as_deref())
1102            .maybe_response_format(response_format.as_deref())
1103            .maybe_output_format(output_format.as_deref())
1104            .maybe_output_compression(output_compression)
1105            .maybe_user(user.as_deref())
1106            .maybe_input_fidelity(input_fidelity)
1107            .maybe_stream(stream)
1108            .maybe_partial_images(partial_images)
1109            .maybe_quality(quality.as_deref())
1110            .call()
1111            .await
1112        {
1113            Ok(resp) => resp,
1114            Err(e) => {
1115                let error = self
1116                    .handle_api_error(e, operation, &model_str, &request_json, &state)
1117                    .await;
1118                return Err(error);
1119            }
1120        };
1121
1122        let duration = start_time.elapsed();
1123
1124        // Call after_response hook
1125        self.call_after_response(
1126            &response,
1127            operation,
1128            &model_str,
1129            &request_json,
1130            &state,
1131            duration,
1132            None,
1133            None,
1134        )
1135        .await;
1136
1137        Ok(response)
1138    }
1139
1140    /// Create an image variation builder.
1141    #[must_use]
1142    pub fn variation(&self, image: impl AsRef<std::path::Path>) -> ImageVariationBuilder {
1143        ImageVariationBuilder::new(image)
1144    }
1145
1146    /// Execute an image variation request.
1147    pub async fn create_variation(&self, builder: ImageVariationBuilder) -> Result<ImagesResponse> {
1148        let request = builder.build()?;
1149        let model_str = request
1150            .model
1151            .as_ref()
1152            .map_or_else(|| "dall-e-2".to_string(), ToString::to_string);
1153
1154        // Prepare interceptor context
1155        let mut state = T::default();
1156        let operation = operation_names::IMAGE_VARIATION;
1157        let request_json = format!(r#"{{"model":"{model_str}"}}"#);
1158
1159        // Call before_request hook
1160        self.call_before_request(operation, &model_str, &request_json, &mut state)
1161            .await?;
1162
1163        let ImageVariationRequest {
1164            image,
1165            model,
1166            n,
1167            response_format,
1168            size,
1169            user,
1170        } = request;
1171
1172        let start_time = Instant::now();
1173
1174        // Make the API call
1175        let response = match images_api::create_image_variation()
1176            .configuration(&self.client.base_configuration)
1177            .image(image)
1178            .maybe_model(model.as_deref())
1179            .maybe_n(n)
1180            .maybe_response_format(response_format.as_deref())
1181            .maybe_size(size.as_deref())
1182            .maybe_user(user.as_deref())
1183            .call()
1184            .await
1185        {
1186            Ok(resp) => resp,
1187            Err(e) => {
1188                let error = self
1189                    .handle_api_error(e, operation, &model_str, &request_json, &state)
1190                    .await;
1191                return Err(error);
1192            }
1193        };
1194
1195        let duration = start_time.elapsed();
1196
1197        // Call after_response hook
1198        self.call_after_response(
1199            &response,
1200            operation,
1201            &model_str,
1202            &request_json,
1203            &state,
1204            duration,
1205            None,
1206            None,
1207        )
1208        .await;
1209
1210        Ok(response)
1211    }
1212}
1213
1214impl<T: Default + Send + Sync> ThreadsClient<'_, T> {
1215    /// Start building a new thread request.
1216    #[must_use]
1217    pub fn builder(&self) -> ThreadRequestBuilder {
1218        ThreadRequestBuilder::new()
1219    }
1220
1221    /// Create a thread using the provided builder.
1222    pub async fn create(&self, builder: ThreadRequestBuilder) -> Result<ThreadObject> {
1223        let request = builder.build()?;
1224
1225        // Prepare interceptor context
1226        let mut state = T::default();
1227        let operation = operation_names::THREAD_CREATE;
1228        let model = "thread"; // No model for thread operations
1229        let request_json = serde_json::to_string(&request).unwrap_or_default();
1230
1231        // Call before_request hook
1232        self.call_before_request(operation, model, &request_json, &mut state)
1233            .await?;
1234
1235        let start_time = Instant::now();
1236
1237        // Make the API call
1238        let response = match assistants_api::create_thread()
1239            .configuration(&self.client.base_configuration)
1240            .maybe_create_thread_request(Some(request))
1241            .call()
1242            .await
1243        {
1244            Ok(resp) => resp,
1245            Err(e) => {
1246                let error = self
1247                    .handle_api_error(e, operation, model, &request_json, &state)
1248                    .await;
1249                return Err(error);
1250            }
1251        };
1252
1253        let duration = start_time.elapsed();
1254
1255        // Call after_response hook
1256        self.call_after_response(
1257            &response,
1258            operation,
1259            model,
1260            &request_json,
1261            &state,
1262            duration,
1263            None,
1264            None,
1265        )
1266        .await;
1267
1268        Ok(response)
1269    }
1270}
1271
1272impl<T: Default + Send + Sync> UploadsClient<'_, T> {
1273    /// Create a new upload builder for the given file metadata.
1274    #[must_use]
1275    pub fn builder(
1276        &self,
1277        filename: impl Into<String>,
1278        purpose: UploadPurpose,
1279        bytes: i32,
1280        mime_type: impl Into<String>,
1281    ) -> UploadBuilder {
1282        UploadBuilder::new(filename, purpose, bytes, mime_type)
1283    }
1284
1285    /// Create an upload session.
1286    pub async fn create(&self, builder: UploadBuilder) -> Result<Upload> {
1287        let request = builder.build()?;
1288
1289        // Prepare interceptor context
1290        let mut state = T::default();
1291        let operation = operation_names::UPLOAD_CREATE;
1292        let model = "upload"; // No model for upload operations
1293        let request_json = serde_json::to_string(&request).unwrap_or_default();
1294
1295        // Call before_request hook
1296        self.call_before_request(operation, model, &request_json, &mut state)
1297            .await?;
1298
1299        let start_time = Instant::now();
1300
1301        // Make the API call
1302        let response = match uploads_api::create_upload()
1303            .configuration(&self.client.base_configuration)
1304            .create_upload_request(request)
1305            .call()
1306            .await
1307        {
1308            Ok(resp) => resp,
1309            Err(e) => {
1310                let error = self
1311                    .handle_api_error(e, operation, model, &request_json, &state)
1312                    .await;
1313                return Err(error);
1314            }
1315        };
1316
1317        let duration = start_time.elapsed();
1318
1319        // Call after_response hook
1320        self.call_after_response(
1321            &response,
1322            operation,
1323            model,
1324            &request_json,
1325            &state,
1326            duration,
1327            None,
1328            None,
1329        )
1330        .await;
1331
1332        Ok(response)
1333    }
1334}
1335
1336impl<T: Default + Send + Sync> ModerationsClient<'_, T> {
1337    /// Create a moderation builder for checking text content.
1338    ///
1339    /// # Example
1340    ///
1341    /// ```rust,ignore
1342    /// use openai_ergonomic::Client;
1343    ///
1344    /// # async fn example() -> openai_ergonomic::Result<()> {
1345    /// let client = Client::from_env()?;
1346    /// let builder = client.moderations().builder("Text to check");
1347    /// let response = client.moderations().create(builder).await?;
1348    /// println!("Flagged: {}", response.results[0].flagged);
1349    /// # Ok(())
1350    /// # }
1351    /// ```
1352    #[must_use]
1353    pub fn builder(&self, input: impl Into<String>) -> ModerationBuilder {
1354        ModerationBuilder::new(input)
1355    }
1356
1357    /// Convenience method for moderating a single text input.
1358    ///
1359    /// # Example
1360    ///
1361    /// ```rust,ignore
1362    /// use openai_ergonomic::Client;
1363    ///
1364    /// # async fn example() -> openai_ergonomic::Result<()> {
1365    /// let client = Client::from_env()?;
1366    /// let builder = client.moderations().check("Hello world");
1367    /// let response = client.moderations().create(builder).await?;
1368    ///
1369    /// if response.results[0].flagged {
1370    ///     println!("Content was flagged for moderation");
1371    /// }
1372    /// # Ok(())
1373    /// # }
1374    /// ```
1375    #[must_use]
1376    pub fn check(&self, input: impl Into<String>) -> ModerationBuilder {
1377        ModerationBuilder::new(input)
1378    }
1379
1380    /// Execute a moderation request built with [`ModerationBuilder`].
1381    ///
1382    /// # Example
1383    ///
1384    /// ```rust,ignore
1385    /// use openai_ergonomic::Client;
1386    ///
1387    /// # async fn example() -> openai_ergonomic::Result<()> {
1388    /// let client = Client::from_env()?;
1389    ///
1390    /// let builder = client
1391    ///     .moderations()
1392    ///     .check("Is this content appropriate?")
1393    ///     .model("text-moderation-latest");
1394    ///
1395    /// let response = client.moderations().create(builder).await?;
1396    ///
1397    /// println!("Model: {}", response.model);
1398    /// for result in response.results {
1399    ///     println!("Flagged: {}", result.flagged);
1400    ///     println!("Hate: {}", result.categories.hate);
1401    ///     println!("Violence: {}", result.categories.violence);
1402    /// }
1403    /// # Ok(())
1404    /// # }
1405    /// ```
1406    ///
1407    /// # Errors
1408    ///
1409    /// Returns an error if the API request fails or the response cannot be parsed.
1410    pub async fn create(&self, builder: ModerationBuilder) -> Result<CreateModerationResponse> {
1411        let request = builder.build()?;
1412
1413        // Prepare interceptor context
1414        let mut state = T::default();
1415        let operation = operation_names::MODERATION;
1416        let model = request
1417            .model
1418            .as_ref()
1419            .map_or_else(|| "text-moderation-latest".to_string(), ToString::to_string);
1420        let request_json = serde_json::to_string(&request).unwrap_or_default();
1421
1422        // Call before_request hook
1423        self.call_before_request(operation, &model, &request_json, &mut state)
1424            .await?;
1425
1426        let start_time = Instant::now();
1427
1428        // Make the API call
1429        let response = match moderations_api::create_moderation()
1430            .configuration(&self.client.base_configuration)
1431            .create_moderation_request(request)
1432            .call()
1433            .await
1434        {
1435            Ok(resp) => resp,
1436            Err(e) => {
1437                let error = self
1438                    .handle_api_error(e, operation, &model, &request_json, &state)
1439                    .await;
1440                return Err(error);
1441            }
1442        };
1443
1444        let duration = start_time.elapsed();
1445
1446        // Call after_response hook
1447        self.call_after_response(
1448            &response,
1449            operation,
1450            &model,
1451            &request_json,
1452            &state,
1453            duration,
1454            None,
1455            None,
1456        )
1457        .await;
1458
1459        Ok(response)
1460    }
1461}
1462
1463impl<T: Default + Send + Sync> FilesClient<'_, T> {
1464    /// Upload a file to `OpenAI`.
1465    ///
1466    /// # Example
1467    ///
1468    /// ```rust,ignore
1469    /// use openai_ergonomic::Client;
1470    /// use openai_ergonomic::builders::files::FilePurpose;
1471    ///
1472    /// # async fn example() -> openai_ergonomic::Result<()> {
1473    /// let client = Client::from_env()?;
1474    /// let builder = client
1475    ///     .files()
1476    ///     .upload_text("training.jsonl", FilePurpose::FineTune, "training data");
1477    /// let file = client.files().create(builder).await?;
1478    /// println!("Uploaded file: {}", file.id);
1479    /// # Ok(())
1480    /// # }
1481    /// ```
1482    pub async fn upload(&self, builder: FileUploadBuilder) -> Result<OpenAiFile> {
1483        // Write content to a temporary file
1484        let temp_dir = std::env::temp_dir();
1485        let temp_file_path = temp_dir.join(builder.filename());
1486        std::fs::write(&temp_file_path, builder.content()).map_err(Error::File)?;
1487
1488        // Convert FilePurpose to openai_client_base::models::FilePurpose
1489        let purpose = match builder.purpose().to_string().as_str() {
1490            "fine-tune" => openai_client_base::models::FilePurpose::FineTune,
1491            "vision" => openai_client_base::models::FilePurpose::Vision,
1492            "batch" => openai_client_base::models::FilePurpose::Batch,
1493            _ => openai_client_base::models::FilePurpose::Assistants, // Default for "assistants" and unknown
1494        };
1495
1496        // Prepare interceptor context
1497        let mut state = T::default();
1498        let operation = operation_names::FILE_UPLOAD;
1499        let model = "file-upload"; // No model for file operations
1500        let request_json = format!(
1501            r#"{{"filename":"{}","purpose":"{}","size":{}}}"#,
1502            builder.filename(),
1503            builder.purpose(),
1504            builder.content().len()
1505        );
1506
1507        // Call before_request hook
1508        if let Err(e) = self
1509            .call_before_request(operation, model, &request_json, &mut state)
1510            .await
1511        {
1512            // Clean up temp file before returning
1513            let _ = std::fs::remove_file(&temp_file_path);
1514            return Err(e);
1515        }
1516
1517        let start_time = Instant::now();
1518
1519        // Make the API call
1520        let result = match files_api::create_file()
1521            .configuration(&self.client.base_configuration)
1522            .file(temp_file_path.clone())
1523            .purpose(purpose)
1524            .call()
1525            .await
1526        {
1527            Ok(resp) => resp,
1528            Err(e) => {
1529                // Clean up temp file
1530                let _ = std::fs::remove_file(&temp_file_path);
1531                let error = self
1532                    .handle_api_error(e, operation, model, &request_json, &state)
1533                    .await;
1534                return Err(error);
1535            }
1536        };
1537
1538        // Clean up temporary file
1539        let _ = std::fs::remove_file(temp_file_path);
1540
1541        let duration = start_time.elapsed();
1542
1543        // Call after_response hook
1544        self.call_after_response(
1545            &result,
1546            operation,
1547            model,
1548            &request_json,
1549            &state,
1550            duration,
1551            None,
1552            None,
1553        )
1554        .await;
1555
1556        Ok(result)
1557    }
1558
1559    /// Convenience method to upload a file (alias for upload).
1560    ///
1561    /// # Example
1562    ///
1563    /// ```rust,ignore
1564    /// use openai_ergonomic::Client;
1565    /// use openai_ergonomic::builders::files::FilePurpose;
1566    ///
1567    /// # async fn example() -> openai_ergonomic::Result<()> {
1568    /// let client = Client::from_env()?;
1569    /// let builder = client
1570    ///     .files()
1571    ///     .upload_text("data.txt", FilePurpose::Assistants, "content");
1572    /// let file = client.files().create(builder).await?;
1573    /// println!("File ID: {}", file.id);
1574    /// # Ok(())
1575    /// # }
1576    /// ```
1577    pub async fn create(&self, builder: FileUploadBuilder) -> Result<OpenAiFile> {
1578        self.upload(builder).await
1579    }
1580
1581    /// Create a file upload builder from text content.
1582    #[must_use]
1583    pub fn upload_text(
1584        &self,
1585        filename: impl Into<String>,
1586        purpose: crate::builders::files::FilePurpose,
1587        text: impl Into<String>,
1588    ) -> FileUploadBuilder {
1589        FileUploadBuilder::from_text(filename, purpose, text)
1590    }
1591
1592    /// Create a file upload builder from bytes.
1593    #[must_use]
1594    pub fn upload_bytes(
1595        &self,
1596        filename: impl Into<String>,
1597        purpose: crate::builders::files::FilePurpose,
1598        content: Vec<u8>,
1599    ) -> FileUploadBuilder {
1600        FileUploadBuilder::new(filename, purpose, content)
1601    }
1602
1603    /// Create a file upload builder from a file path.
1604    pub fn upload_from_path(
1605        &self,
1606        path: impl AsRef<std::path::Path>,
1607        purpose: crate::builders::files::FilePurpose,
1608    ) -> Result<FileUploadBuilder> {
1609        FileUploadBuilder::from_path(path, purpose).map_err(Error::File)
1610    }
1611
1612    /// List files.
1613    ///
1614    /// # Example
1615    ///
1616    /// ```rust,ignore
1617    /// use openai_ergonomic::Client;
1618    ///
1619    /// # async fn example() -> openai_ergonomic::Result<()> {
1620    /// let client = Client::from_env()?;
1621    /// let builder = client.files().list_builder();
1622    /// let files = client.files().list(builder).await?;
1623    /// println!("Found {} files", files.data.len());
1624    /// # Ok(())
1625    /// # }
1626    /// ```
1627    pub async fn list(&self, builder: FileListBuilder) -> Result<ListFilesResponse> {
1628        let purpose = builder.purpose_ref().map(ToString::to_string);
1629        let limit = builder.limit_ref();
1630        let order = builder.order_ref().map(ToString::to_string);
1631
1632        // Prepare interceptor context
1633        let mut state = T::default();
1634        let operation = operation_names::FILE_LIST;
1635        let model = "files";
1636        let request_json = format!(
1637            r#"{{"purpose":"{}","limit":{},"order":"{}"}}"#,
1638            purpose.as_deref().unwrap_or(""),
1639            limit.unwrap_or(10000),
1640            order.as_deref().unwrap_or("desc")
1641        );
1642
1643        // Call before_request hook
1644        self.call_before_request(operation, model, &request_json, &mut state)
1645            .await?;
1646
1647        let start_time = Instant::now();
1648
1649        // Make the API call
1650        let response = match files_api::list_files()
1651            .configuration(&self.client.base_configuration)
1652            .maybe_purpose(purpose.as_deref())
1653            .maybe_limit(limit)
1654            .maybe_order(order.as_deref())
1655            .call()
1656            .await
1657        {
1658            Ok(resp) => resp,
1659            Err(e) => {
1660                let error = self
1661                    .handle_api_error(e, operation, model, &request_json, &state)
1662                    .await;
1663                return Err(error);
1664            }
1665        };
1666
1667        let duration = start_time.elapsed();
1668
1669        // Call after_response hook
1670        self.call_after_response(
1671            &response,
1672            operation,
1673            model,
1674            &request_json,
1675            &state,
1676            duration,
1677            None,
1678            None,
1679        )
1680        .await;
1681
1682        Ok(response)
1683    }
1684
1685    /// Create a list files builder.
1686    #[must_use]
1687    pub fn list_builder(&self) -> FileListBuilder {
1688        FileListBuilder::new()
1689    }
1690
1691    /// Retrieve information about a specific file.
1692    ///
1693    /// # Example
1694    ///
1695    /// ```rust,ignore
1696    /// use openai_ergonomic::Client;
1697    ///
1698    /// # async fn example() -> openai_ergonomic::Result<()> {
1699    /// let client = Client::from_env()?;
1700    /// let file = client.files().retrieve("file-123").await?;
1701    /// println!("File: {} ({})", file.filename, file.id);
1702    /// # Ok(())
1703    /// # }
1704    /// ```
1705    pub async fn retrieve(&self, file_id: impl Into<String>) -> Result<OpenAiFile> {
1706        let file_id = file_id.into();
1707
1708        // Prepare interceptor context
1709        let mut state = T::default();
1710        let operation = operation_names::FILE_RETRIEVE;
1711        let model = "files";
1712        let request_json = format!(r#"{{"file_id":"{file_id}"}}"#);
1713
1714        // Call before_request hook
1715        self.call_before_request(operation, model, &request_json, &mut state)
1716            .await?;
1717
1718        let start_time = Instant::now();
1719
1720        // Make the API call
1721        let response = match files_api::retrieve_file()
1722            .configuration(&self.client.base_configuration)
1723            .file_id(&file_id)
1724            .call()
1725            .await
1726        {
1727            Ok(resp) => resp,
1728            Err(e) => {
1729                let error = self
1730                    .handle_api_error(e, operation, model, &request_json, &state)
1731                    .await;
1732                return Err(error);
1733            }
1734        };
1735
1736        let duration = start_time.elapsed();
1737
1738        // Call after_response hook
1739        self.call_after_response(
1740            &response,
1741            operation,
1742            model,
1743            &request_json,
1744            &state,
1745            duration,
1746            None,
1747            None,
1748        )
1749        .await;
1750
1751        Ok(response)
1752    }
1753
1754    /// Retrieve information about a file using a builder.
1755    pub async fn get(&self, builder: FileRetrievalBuilder) -> Result<OpenAiFile> {
1756        self.retrieve(builder.file_id()).await
1757    }
1758
1759    /// Download file content.
1760    ///
1761    /// # Example
1762    ///
1763    /// ```rust,ignore
1764    /// use openai_ergonomic::Client;
1765    ///
1766    /// # async fn example() -> openai_ergonomic::Result<()> {
1767    /// let client = Client::from_env()?;
1768    /// let content = client.files().download("file-123").await?;
1769    /// println!("Downloaded {} bytes", content.len());
1770    /// # Ok(())
1771    /// # }
1772    /// ```
1773    pub async fn download(&self, file_id: impl Into<String>) -> Result<String> {
1774        let file_id = file_id.into();
1775
1776        // Prepare interceptor context
1777        let mut state = T::default();
1778        let operation = operation_names::FILE_DOWNLOAD;
1779        let model = "files";
1780        let request_json = format!(r#"{{"file_id":"{file_id}"}}"#);
1781
1782        // Call before_request hook
1783        self.call_before_request(operation, model, &request_json, &mut state)
1784            .await?;
1785
1786        let start_time = Instant::now();
1787
1788        // Make the API call
1789        let response = match files_api::download_file()
1790            .configuration(&self.client.base_configuration)
1791            .file_id(&file_id)
1792            .call()
1793            .await
1794        {
1795            Ok(resp) => resp,
1796            Err(e) => {
1797                let error = self
1798                    .handle_api_error(e, operation, model, &request_json, &state)
1799                    .await;
1800                return Err(error);
1801            }
1802        };
1803
1804        let duration = start_time.elapsed();
1805
1806        // Call after_response hook
1807        let response_size = format!(r#"{{"size":{}}}"#, response.len());
1808        self.call_after_response(
1809            &response_size,
1810            operation,
1811            model,
1812            &request_json,
1813            &state,
1814            duration,
1815            None,
1816            None,
1817        )
1818        .await;
1819
1820        Ok(response)
1821    }
1822
1823    /// Download file content as bytes.
1824    pub async fn download_bytes(&self, file_id: impl Into<String>) -> Result<Vec<u8>> {
1825        let content = self.download(file_id).await?;
1826        Ok(content.into_bytes())
1827    }
1828
1829    /// Delete a file.
1830    ///
1831    /// # Example
1832    ///
1833    /// ```rust,ignore
1834    /// use openai_ergonomic::Client;
1835    ///
1836    /// # async fn example() -> openai_ergonomic::Result<()> {
1837    /// let client = Client::from_env()?;
1838    /// let response = client.files().delete("file-123").await?;
1839    /// println!("Deleted: {}", response.deleted);
1840    /// # Ok(())
1841    /// # }
1842    /// ```
1843    pub async fn delete(&self, file_id: impl Into<String>) -> Result<DeleteFileResponse> {
1844        let file_id = file_id.into();
1845
1846        // Prepare interceptor context
1847        let mut state = T::default();
1848        let operation = operation_names::FILE_DELETE;
1849        let model = "files";
1850        let request_json = format!(r#"{{"file_id":"{file_id}"}}"#);
1851
1852        // Call before_request hook
1853        self.call_before_request(operation, model, &request_json, &mut state)
1854            .await?;
1855
1856        let start_time = Instant::now();
1857
1858        // Make the API call
1859        let response = match files_api::delete_file()
1860            .configuration(&self.client.base_configuration)
1861            .file_id(&file_id)
1862            .call()
1863            .await
1864        {
1865            Ok(resp) => resp,
1866            Err(e) => {
1867                let error = self
1868                    .handle_api_error(e, operation, model, &request_json, &state)
1869                    .await;
1870                return Err(error);
1871            }
1872        };
1873
1874        let duration = start_time.elapsed();
1875
1876        // Call after_response hook
1877        self.call_after_response(
1878            &response,
1879            operation,
1880            model,
1881            &request_json,
1882            &state,
1883            duration,
1884            None,
1885            None,
1886        )
1887        .await;
1888
1889        Ok(response)
1890    }
1891
1892    /// Delete a file using a builder.
1893    pub async fn remove(&self, builder: FileDeleteBuilder) -> Result<DeleteFileResponse> {
1894        self.delete(builder.file_id()).await
1895    }
1896}
1897
1898impl<T: Default + Send + Sync> VectorStoresClient<'_, T> {
1899    /// Create a new vector store.
1900    ///
1901    /// # Example
1902    ///
1903    /// ```rust,ignore
1904    /// use openai_ergonomic::Client;
1905    /// use openai_ergonomic::builders::vector_stores::VectorStoreBuilder;
1906    ///
1907    /// # async fn example() -> openai_ergonomic::Result<()> {
1908    /// let client = Client::from_env()?;
1909    /// let builder = VectorStoreBuilder::new()
1910    ///     .name("My Knowledge Base")
1911    ///     .add_file("file-123");
1912    /// let vector_store = client.vector_stores().create(builder).await?;
1913    /// println!("Created vector store: {}", vector_store.id);
1914    /// # Ok(())
1915    /// # }
1916    /// ```
1917    pub async fn create(
1918        &self,
1919        builder: crate::builders::vector_stores::VectorStoreBuilder,
1920    ) -> Result<VectorStoreObject> {
1921        use openai_client_base::models::{CreateVectorStoreRequest, VectorStoreExpirationAfter};
1922
1923        let mut request = CreateVectorStoreRequest::new();
1924        request.name = builder.name_ref().map(String::from);
1925        request.file_ids = if builder.has_files() {
1926            Some(builder.file_ids_ref().to_vec())
1927        } else {
1928            None
1929        };
1930
1931        if let Some(expires_after) = builder.expires_after_ref() {
1932            use openai_client_base::models::vector_store_expiration_after::Anchor;
1933            request.expires_after = Some(Box::new(VectorStoreExpirationAfter::new(
1934                Anchor::LastActiveAt,
1935                expires_after.days,
1936            )));
1937        }
1938
1939        if !builder.metadata_ref().is_empty() {
1940            request.metadata = Some(Some(builder.metadata_ref().clone()));
1941        }
1942
1943        // Prepare interceptor context
1944        let mut state = T::default();
1945        let operation = operation_names::VECTOR_STORE_CREATE;
1946        let model = "vector-store";
1947        let request_json = serde_json::to_string(&request).unwrap_or_default();
1948
1949        // Call before_request hook
1950        self.call_before_request(operation, model, &request_json, &mut state)
1951            .await?;
1952
1953        let start_time = Instant::now();
1954
1955        // Make the API call
1956        let response = match vector_stores_api::create_vector_store()
1957            .configuration(&self.client.base_configuration)
1958            .create_vector_store_request(request)
1959            .call()
1960            .await
1961        {
1962            Ok(resp) => resp,
1963            Err(e) => {
1964                let error = self
1965                    .handle_api_error(e, operation, model, &request_json, &state)
1966                    .await;
1967                return Err(error);
1968            }
1969        };
1970
1971        let duration = start_time.elapsed();
1972
1973        // Call after_response hook
1974        self.call_after_response(
1975            &response,
1976            operation,
1977            model,
1978            &request_json,
1979            &state,
1980            duration,
1981            None,
1982            None,
1983        )
1984        .await;
1985
1986        Ok(response)
1987    }
1988
1989    /// List vector stores.
1990    ///
1991    /// # Example
1992    ///
1993    /// ```rust,ignore
1994    /// use openai_ergonomic::Client;
1995    ///
1996    /// # async fn example() -> openai_ergonomic::Result<()> {
1997    /// let client = Client::from_env()?;
1998    /// let response = client.vector_stores().list(Some(20), None, None, None).await?;
1999    /// println!("Found {} vector stores", response.data.len());
2000    /// # Ok(())
2001    /// # }
2002    /// ```
2003    pub async fn list(
2004        &self,
2005        limit: Option<i32>,
2006        order: Option<&str>,
2007        after: Option<&str>,
2008        before: Option<&str>,
2009    ) -> Result<ListVectorStoresResponse> {
2010        // Prepare interceptor context
2011        let mut state = T::default();
2012        let operation = operation_names::VECTOR_STORE_LIST;
2013        let model = "vector-store";
2014        let request_json = format!(
2015            r#"{{"limit":{},"order":"{}"}}"#,
2016            limit.unwrap_or(20),
2017            order.unwrap_or("desc")
2018        );
2019
2020        // Call before_request hook
2021        self.call_before_request(operation, model, &request_json, &mut state)
2022            .await?;
2023
2024        let start_time = Instant::now();
2025
2026        // Make the API call
2027        let response = match vector_stores_api::list_vector_stores()
2028            .configuration(&self.client.base_configuration)
2029            .maybe_limit(limit)
2030            .maybe_order(order)
2031            .maybe_after(after)
2032            .maybe_before(before)
2033            .call()
2034            .await
2035        {
2036            Ok(resp) => resp,
2037            Err(e) => {
2038                let error = self
2039                    .handle_api_error(e, operation, model, &request_json, &state)
2040                    .await;
2041                return Err(error);
2042            }
2043        };
2044
2045        let duration = start_time.elapsed();
2046
2047        // Call after_response hook
2048        self.call_after_response(
2049            &response,
2050            operation,
2051            model,
2052            &request_json,
2053            &state,
2054            duration,
2055            None,
2056            None,
2057        )
2058        .await;
2059
2060        Ok(response)
2061    }
2062
2063    /// Get a specific vector store by ID.
2064    ///
2065    /// # Example
2066    ///
2067    /// ```rust,ignore
2068    /// use openai_ergonomic::Client;
2069    ///
2070    /// # async fn example() -> openai_ergonomic::Result<()> {
2071    /// let client = Client::from_env()?;
2072    /// let vector_store = client.vector_stores().get("vs_123").await?;
2073    /// println!("Vector store: {}", vector_store.name);
2074    /// # Ok(())
2075    /// # }
2076    /// ```
2077    pub async fn get(&self, vector_store_id: impl Into<String>) -> Result<VectorStoreObject> {
2078        let id = vector_store_id.into();
2079
2080        // Prepare interceptor context
2081        let mut state = T::default();
2082        let operation = operation_names::VECTOR_STORE_RETRIEVE;
2083        let model = "vector-store";
2084        let request_json = format!(r#"{{"vector_store_id":"{id}"}}"#);
2085
2086        // Call before_request hook
2087        self.call_before_request(operation, model, &request_json, &mut state)
2088            .await?;
2089
2090        let start_time = Instant::now();
2091
2092        // Make the API call
2093        let response = match vector_stores_api::get_vector_store()
2094            .configuration(&self.client.base_configuration)
2095            .vector_store_id(&id)
2096            .call()
2097            .await
2098        {
2099            Ok(resp) => resp,
2100            Err(e) => {
2101                let error = self
2102                    .handle_api_error(e, operation, model, &request_json, &state)
2103                    .await;
2104                return Err(error);
2105            }
2106        };
2107
2108        let duration = start_time.elapsed();
2109
2110        // Call after_response hook
2111        self.call_after_response(
2112            &response,
2113            operation,
2114            model,
2115            &request_json,
2116            &state,
2117            duration,
2118            None,
2119            None,
2120        )
2121        .await;
2122
2123        Ok(response)
2124    }
2125
2126    /// Update a vector store.
2127    ///
2128    /// # Example
2129    ///
2130    /// ```rust,ignore
2131    /// use openai_ergonomic::Client;
2132    /// use openai_ergonomic::builders::vector_stores::VectorStoreBuilder;
2133    ///
2134    /// # async fn example() -> openai_ergonomic::Result<()> {
2135    /// let client = Client::from_env()?;
2136    /// let builder = VectorStoreBuilder::new()
2137    ///     .name("Updated Name")
2138    ///     .metadata("updated", "true");
2139    /// let vector_store = client.vector_stores().update("vs_123", builder).await?;
2140    /// println!("Updated: {}", vector_store.name);
2141    /// # Ok(())
2142    /// # }
2143    /// ```
2144    pub async fn update(
2145        &self,
2146        vector_store_id: impl Into<String>,
2147        builder: crate::builders::vector_stores::VectorStoreBuilder,
2148    ) -> Result<VectorStoreObject> {
2149        use openai_client_base::models::{UpdateVectorStoreRequest, VectorStoreExpirationAfter};
2150
2151        let id = vector_store_id.into();
2152        let mut request = UpdateVectorStoreRequest::new();
2153        request.name = builder.name_ref().map(String::from);
2154
2155        if let Some(expires_after) = builder.expires_after_ref() {
2156            use openai_client_base::models::vector_store_expiration_after::Anchor;
2157            request.expires_after = Some(Box::new(VectorStoreExpirationAfter::new(
2158                Anchor::LastActiveAt,
2159                expires_after.days,
2160            )));
2161        }
2162
2163        if !builder.metadata_ref().is_empty() {
2164            request.metadata = Some(Some(builder.metadata_ref().clone()));
2165        }
2166
2167        // Prepare interceptor context
2168        let mut state = T::default();
2169        let operation = operation_names::VECTOR_STORE_UPDATE;
2170        let model = "vector-store";
2171        let request_json = serde_json::to_string(&request).unwrap_or_default();
2172
2173        // Call before_request hook
2174        self.call_before_request(operation, model, &request_json, &mut state)
2175            .await?;
2176
2177        let start_time = Instant::now();
2178
2179        // Make the API call
2180        let response = match vector_stores_api::modify_vector_store()
2181            .configuration(&self.client.base_configuration)
2182            .vector_store_id(&id)
2183            .update_vector_store_request(request)
2184            .call()
2185            .await
2186        {
2187            Ok(resp) => resp,
2188            Err(e) => {
2189                let error = self
2190                    .handle_api_error(e, operation, model, &request_json, &state)
2191                    .await;
2192                return Err(error);
2193            }
2194        };
2195
2196        let duration = start_time.elapsed();
2197
2198        // Call after_response hook
2199        self.call_after_response(
2200            &response,
2201            operation,
2202            model,
2203            &request_json,
2204            &state,
2205            duration,
2206            None,
2207            None,
2208        )
2209        .await;
2210
2211        Ok(response)
2212    }
2213
2214    /// Delete a vector store.
2215    ///
2216    /// # Example
2217    ///
2218    /// ```rust,ignore
2219    /// use openai_ergonomic::Client;
2220    ///
2221    /// # async fn example() -> openai_ergonomic::Result<()> {
2222    /// let client = Client::from_env()?;
2223    /// let response = client.vector_stores().delete("vs_123").await?;
2224    /// println!("Deleted: {}", response.deleted);
2225    /// # Ok(())
2226    /// # }
2227    /// ```
2228    pub async fn delete(
2229        &self,
2230        vector_store_id: impl Into<String>,
2231    ) -> Result<DeleteVectorStoreResponse> {
2232        let id = vector_store_id.into();
2233
2234        // Prepare interceptor context
2235        let mut state = T::default();
2236        let operation = operation_names::VECTOR_STORE_DELETE;
2237        let model = "vector-store";
2238        let request_json = format!(r#"{{"vector_store_id":"{id}"}}"#);
2239
2240        // Call before_request hook
2241        self.call_before_request(operation, model, &request_json, &mut state)
2242            .await?;
2243
2244        let start_time = Instant::now();
2245
2246        // Make the API call
2247        let response = match vector_stores_api::delete_vector_store()
2248            .configuration(&self.client.base_configuration)
2249            .vector_store_id(&id)
2250            .call()
2251            .await
2252        {
2253            Ok(resp) => resp,
2254            Err(e) => {
2255                let error = self
2256                    .handle_api_error(e, operation, model, &request_json, &state)
2257                    .await;
2258                return Err(error);
2259            }
2260        };
2261
2262        let duration = start_time.elapsed();
2263
2264        // Call after_response hook
2265        self.call_after_response(
2266            &response,
2267            operation,
2268            model,
2269            &request_json,
2270            &state,
2271            duration,
2272            None,
2273            None,
2274        )
2275        .await;
2276
2277        Ok(response)
2278    }
2279
2280    /// Add a file to a vector store.
2281    ///
2282    /// # Example
2283    ///
2284    /// ```rust,ignore
2285    /// use openai_ergonomic::Client;
2286    ///
2287    /// # async fn example() -> openai_ergonomic::Result<()> {
2288    /// let client = Client::from_env()?;
2289    /// let file = client.vector_stores().add_file("vs_123", "file-456").await?;
2290    /// println!("Added file: {}", file.id);
2291    /// # Ok(())
2292    /// # }
2293    /// ```
2294    pub async fn add_file(
2295        &self,
2296        vector_store_id: impl Into<String>,
2297        file_id: impl Into<String>,
2298    ) -> Result<VectorStoreFileObject> {
2299        use openai_client_base::models::CreateVectorStoreFileRequest;
2300
2301        let vs_id = vector_store_id.into();
2302        let f_id = file_id.into();
2303        let request = CreateVectorStoreFileRequest::new(f_id.clone());
2304
2305        // Prepare interceptor context
2306        let mut state = T::default();
2307        let operation = operation_names::VECTOR_STORE_FILE_ADD;
2308        let model = "vector-store";
2309        let request_json = format!(r#"{{"vector_store_id":"{vs_id}","file_id":"{f_id}"}}"#);
2310
2311        // Call before_request hook
2312        self.call_before_request(operation, model, &request_json, &mut state)
2313            .await?;
2314
2315        let start_time = Instant::now();
2316
2317        // Make the API call
2318        let response = match vector_stores_api::create_vector_store_file()
2319            .configuration(&self.client.base_configuration)
2320            .vector_store_id(&vs_id)
2321            .create_vector_store_file_request(request)
2322            .call()
2323            .await
2324        {
2325            Ok(resp) => resp,
2326            Err(e) => {
2327                let error = self
2328                    .handle_api_error(e, operation, model, &request_json, &state)
2329                    .await;
2330                return Err(error);
2331            }
2332        };
2333
2334        let duration = start_time.elapsed();
2335
2336        // Call after_response hook
2337        self.call_after_response(
2338            &response,
2339            operation,
2340            model,
2341            &request_json,
2342            &state,
2343            duration,
2344            None,
2345            None,
2346        )
2347        .await;
2348
2349        Ok(response)
2350    }
2351
2352    /// List files in a vector store.
2353    ///
2354    /// # Example
2355    ///
2356    /// ```rust,ignore
2357    /// use openai_ergonomic::Client;
2358    ///
2359    /// # async fn example() -> openai_ergonomic::Result<()> {
2360    /// let client = Client::from_env()?;
2361    /// let response = client.vector_stores().list_files("vs_123", None, None, None, None, None).await?;
2362    /// println!("Found {} files", response.data.len());
2363    /// # Ok(())
2364    /// # }
2365    /// ```
2366    pub async fn list_files(
2367        &self,
2368        vector_store_id: impl Into<String>,
2369        limit: Option<i32>,
2370        order: Option<&str>,
2371        after: Option<&str>,
2372        before: Option<&str>,
2373        filter: Option<&str>,
2374    ) -> Result<ListVectorStoreFilesResponse> {
2375        let id = vector_store_id.into();
2376
2377        // Prepare interceptor context
2378        let mut state = T::default();
2379        let operation = operation_names::VECTOR_STORE_FILE_LIST;
2380        let model = "vector-store";
2381        let request_json = format!(r#"{{"vector_store_id":"{id}"}}"#);
2382
2383        // Call before_request hook
2384        self.call_before_request(operation, model, &request_json, &mut state)
2385            .await?;
2386
2387        let start_time = Instant::now();
2388
2389        // Make the API call
2390        let response = match vector_stores_api::list_vector_store_files()
2391            .configuration(&self.client.base_configuration)
2392            .vector_store_id(&id)
2393            .maybe_limit(limit)
2394            .maybe_order(order)
2395            .maybe_after(after)
2396            .maybe_before(before)
2397            .maybe_filter(filter)
2398            .call()
2399            .await
2400        {
2401            Ok(resp) => resp,
2402            Err(e) => {
2403                let error = self
2404                    .handle_api_error(e, operation, model, &request_json, &state)
2405                    .await;
2406                return Err(error);
2407            }
2408        };
2409
2410        let duration = start_time.elapsed();
2411
2412        // Call after_response hook
2413        self.call_after_response(
2414            &response,
2415            operation,
2416            model,
2417            &request_json,
2418            &state,
2419            duration,
2420            None,
2421            None,
2422        )
2423        .await;
2424
2425        Ok(response)
2426    }
2427
2428    /// Get a file from a vector store.
2429    ///
2430    /// # Example
2431    ///
2432    /// ```rust,ignore
2433    /// use openai_ergonomic::Client;
2434    ///
2435    /// # async fn example() -> openai_ergonomic::Result<()> {
2436    /// let client = Client::from_env()?;
2437    /// let file = client.vector_stores().get_file("vs_123", "file-456").await?;
2438    /// println!("File: {}", file.id);
2439    /// # Ok(())
2440    /// # }
2441    /// ```
2442    pub async fn get_file(
2443        &self,
2444        vector_store_id: impl Into<String>,
2445        file_id: impl Into<String>,
2446    ) -> Result<VectorStoreFileObject> {
2447        let vs_id = vector_store_id.into();
2448        let f_id = file_id.into();
2449
2450        // Prepare interceptor context
2451        let mut state = T::default();
2452        let operation = operation_names::VECTOR_STORE_FILE_RETRIEVE;
2453        let model = "vector-store";
2454        let request_json = format!(r#"{{"vector_store_id":"{vs_id}","file_id":"{f_id}"}}"#);
2455
2456        // Call before_request hook
2457        self.call_before_request(operation, model, &request_json, &mut state)
2458            .await?;
2459
2460        let start_time = Instant::now();
2461
2462        // Make the API call
2463        let response = match vector_stores_api::get_vector_store_file()
2464            .configuration(&self.client.base_configuration)
2465            .vector_store_id(&vs_id)
2466            .file_id(&f_id)
2467            .call()
2468            .await
2469        {
2470            Ok(resp) => resp,
2471            Err(e) => {
2472                let error = self
2473                    .handle_api_error(e, operation, model, &request_json, &state)
2474                    .await;
2475                return Err(error);
2476            }
2477        };
2478
2479        let duration = start_time.elapsed();
2480
2481        // Call after_response hook
2482        self.call_after_response(
2483            &response,
2484            operation,
2485            model,
2486            &request_json,
2487            &state,
2488            duration,
2489            None,
2490            None,
2491        )
2492        .await;
2493
2494        Ok(response)
2495    }
2496
2497    /// Delete a file from a vector store.
2498    ///
2499    /// # Example
2500    ///
2501    /// ```rust,ignore
2502    /// use openai_ergonomic::Client;
2503    ///
2504    /// # async fn example() -> openai_ergonomic::Result<()> {
2505    /// let client = Client::from_env()?;
2506    /// let response = client.vector_stores().delete_file("vs_123", "file-456").await?;
2507    /// println!("Deleted: {}", response.deleted);
2508    /// # Ok(())
2509    /// # }
2510    /// ```
2511    pub async fn delete_file(
2512        &self,
2513        vector_store_id: impl Into<String>,
2514        file_id: impl Into<String>,
2515    ) -> Result<DeleteVectorStoreFileResponse> {
2516        let vs_id = vector_store_id.into();
2517        let f_id = file_id.into();
2518
2519        // Prepare interceptor context
2520        let mut state = T::default();
2521        let operation = operation_names::VECTOR_STORE_FILE_DELETE;
2522        let model = "vector-store";
2523        let request_json = format!(r#"{{"vector_store_id":"{vs_id}","file_id":"{f_id}"}}"#);
2524
2525        // Call before_request hook
2526        self.call_before_request(operation, model, &request_json, &mut state)
2527            .await?;
2528
2529        let start_time = Instant::now();
2530
2531        // Make the API call
2532        let response = match vector_stores_api::delete_vector_store_file()
2533            .configuration(&self.client.base_configuration)
2534            .vector_store_id(&vs_id)
2535            .file_id(&f_id)
2536            .call()
2537            .await
2538        {
2539            Ok(resp) => resp,
2540            Err(e) => {
2541                let error = self
2542                    .handle_api_error(e, operation, model, &request_json, &state)
2543                    .await;
2544                return Err(error);
2545            }
2546        };
2547
2548        let duration = start_time.elapsed();
2549
2550        // Call after_response hook
2551        self.call_after_response(
2552            &response,
2553            operation,
2554            model,
2555            &request_json,
2556            &state,
2557            duration,
2558            None,
2559            None,
2560        )
2561        .await;
2562
2563        Ok(response)
2564    }
2565
2566    /// Search a vector store.
2567    ///
2568    /// # Example
2569    ///
2570    /// ```rust,ignore
2571    /// use openai_ergonomic::Client;
2572    /// use openai_ergonomic::builders::vector_stores::VectorStoreSearchBuilder;
2573    ///
2574    /// # async fn example() -> openai_ergonomic::Result<()> {
2575    /// let client = Client::from_env()?;
2576    /// let builder = VectorStoreSearchBuilder::new("vs_123", "machine learning concepts");
2577    /// let results = client.vector_stores().search(builder).await?;
2578    /// println!("Found {} results", results.data.len());
2579    /// # Ok(())
2580    /// # }
2581    /// ```
2582    pub async fn search(
2583        &self,
2584        builder: crate::builders::vector_stores::VectorStoreSearchBuilder,
2585    ) -> Result<VectorStoreSearchResultsPage> {
2586        use openai_client_base::models::{VectorStoreSearchRequest, VectorStoreSearchRequestQuery};
2587
2588        let query = VectorStoreSearchRequestQuery::new_text(builder.query().to_string());
2589        let mut request = VectorStoreSearchRequest::new(query);
2590
2591        if let Some(limit) = builder.limit_ref() {
2592            request.max_num_results = Some(limit);
2593        }
2594
2595        let vs_id = builder.vector_store_id().to_string();
2596
2597        // Prepare interceptor context
2598        let mut state = T::default();
2599        let operation = operation_names::VECTOR_STORE_SEARCH;
2600        let model = "vector-store";
2601        let request_json = format!(
2602            r#"{{"vector_store_id":"{}","query":"{}"}}"#,
2603            vs_id,
2604            builder.query()
2605        );
2606
2607        // Call before_request hook
2608        self.call_before_request(operation, model, &request_json, &mut state)
2609            .await?;
2610
2611        let start_time = Instant::now();
2612
2613        // Make the API call
2614        let response = match vector_stores_api::search_vector_store()
2615            .configuration(&self.client.base_configuration)
2616            .vector_store_id(&vs_id)
2617            .vector_store_search_request(request)
2618            .call()
2619            .await
2620        {
2621            Ok(resp) => resp,
2622            Err(e) => {
2623                let error = self
2624                    .handle_api_error(e, operation, model, &request_json, &state)
2625                    .await;
2626                return Err(error);
2627            }
2628        };
2629
2630        let duration = start_time.elapsed();
2631
2632        // Call after_response hook
2633        self.call_after_response(
2634            &response,
2635            operation,
2636            model,
2637            &request_json,
2638            &state,
2639            duration,
2640            None,
2641            None,
2642        )
2643        .await;
2644
2645        Ok(response)
2646    }
2647}
2648
2649impl<T: Default + Send + Sync> BatchClient<'_, T> {
2650    /// Create a new batch job.
2651    ///
2652    /// # Example
2653    ///
2654    /// ```rust,ignore
2655    /// use openai_ergonomic::Client;
2656    /// use openai_ergonomic::builders::batch::{BatchJobBuilder, BatchEndpoint};
2657    ///
2658    /// # async fn example() -> openai_ergonomic::Result<()> {
2659    /// let client = Client::from_env()?;
2660    /// let builder = BatchJobBuilder::new("file-batch-input", BatchEndpoint::ChatCompletions);
2661    /// let batch = client.batch().create(builder).await?;
2662    /// println!("Created batch: {}", batch.id);
2663    /// # Ok(())
2664    /// # }
2665    /// ```
2666    pub async fn create(&self, builder: crate::builders::batch::BatchJobBuilder) -> Result<Batch> {
2667        use openai_client_base::models::create_batch_request::{CompletionWindow, Endpoint};
2668
2669        // Map our endpoint to the base client enum
2670        let endpoint = match builder.endpoint() {
2671            crate::builders::batch::BatchEndpoint::ChatCompletions => {
2672                Endpoint::SlashV1SlashChatSlashCompletions
2673            }
2674            crate::builders::batch::BatchEndpoint::Embeddings => Endpoint::SlashV1SlashEmbeddings,
2675            crate::builders::batch::BatchEndpoint::Completions => Endpoint::SlashV1SlashCompletions,
2676        };
2677
2678        let mut request = CreateBatchRequest::new(
2679            builder.input_file_id().to_string(),
2680            endpoint,
2681            CompletionWindow::Variant24h,
2682        );
2683
2684        if builder.has_metadata() {
2685            request.metadata = Some(Some(builder.metadata_ref().clone()));
2686        }
2687
2688        // Prepare interceptor context
2689        let mut state = T::default();
2690        let operation = operation_names::BATCH_CREATE;
2691        let model = "batch";
2692        let request_json = serde_json::to_string(&request).unwrap_or_default();
2693
2694        // Call before_request hook
2695        self.call_before_request(operation, model, &request_json, &mut state)
2696            .await?;
2697
2698        let start_time = Instant::now();
2699
2700        // Make the API call
2701        let response = match batch_api::create_batch()
2702            .configuration(&self.client.base_configuration)
2703            .create_batch_request(request)
2704            .call()
2705            .await
2706        {
2707            Ok(resp) => resp,
2708            Err(e) => {
2709                let error = self
2710                    .handle_api_error(e, operation, model, &request_json, &state)
2711                    .await;
2712                return Err(error);
2713            }
2714        };
2715
2716        let duration = start_time.elapsed();
2717
2718        // Call after_response hook
2719        self.call_after_response(
2720            &response,
2721            operation,
2722            model,
2723            &request_json,
2724            &state,
2725            duration,
2726            None,
2727            None,
2728        )
2729        .await;
2730
2731        Ok(response)
2732    }
2733
2734    /// List batch jobs.
2735    ///
2736    /// # Example
2737    ///
2738    /// ```rust,ignore
2739    /// use openai_ergonomic::Client;
2740    ///
2741    /// # async fn example() -> openai_ergonomic::Result<()> {
2742    /// let client = Client::from_env()?;
2743    /// let response = client.batch().list(None, Some(20)).await?;
2744    /// println!("Found {} batches", response.data.len());
2745    /// # Ok(())
2746    /// # }
2747    /// ```
2748    pub async fn list(
2749        &self,
2750        after: Option<&str>,
2751        limit: Option<i32>,
2752    ) -> Result<ListBatchesResponse> {
2753        // Prepare interceptor context
2754        let mut state = T::default();
2755        let operation = operation_names::BATCH_LIST;
2756        let model = "batch";
2757        let request_json = format!("{{\"after\":{after:?},\"limit\":{limit:?}}}");
2758
2759        // Call before_request hook
2760        self.call_before_request(operation, model, &request_json, &mut state)
2761            .await?;
2762
2763        let start_time = Instant::now();
2764
2765        // Make the API call
2766        let response = match batch_api::list_batches()
2767            .configuration(&self.client.base_configuration)
2768            .maybe_after(after)
2769            .maybe_limit(limit)
2770            .call()
2771            .await
2772        {
2773            Ok(resp) => resp,
2774            Err(e) => {
2775                let error = self
2776                    .handle_api_error(e, operation, model, &request_json, &state)
2777                    .await;
2778                return Err(error);
2779            }
2780        };
2781
2782        let duration = start_time.elapsed();
2783
2784        // Call after_response hook
2785        self.call_after_response(
2786            &response,
2787            operation,
2788            model,
2789            &request_json,
2790            &state,
2791            duration,
2792            None,
2793            None,
2794        )
2795        .await;
2796
2797        Ok(response)
2798    }
2799
2800    /// Get a specific batch job.
2801    ///
2802    /// # Example
2803    ///
2804    /// ```rust,ignore
2805    /// use openai_ergonomic::Client;
2806    ///
2807    /// # async fn example() -> openai_ergonomic::Result<()> {
2808    /// let client = Client::from_env()?;
2809    /// let batch = client.batch().get("batch_123").await?;
2810    /// println!("Batch status: {}", batch.status);
2811    /// # Ok(())
2812    /// # }
2813    /// ```
2814    pub async fn get(&self, batch_id: impl Into<String>) -> Result<Batch> {
2815        let id = batch_id.into();
2816
2817        // Prepare interceptor context
2818        let mut state = T::default();
2819        let operation = operation_names::BATCH_RETRIEVE;
2820        let model = "batch";
2821        let request_json = format!("{{\"batch_id\":\"{id}\"}}");
2822
2823        // Call before_request hook
2824        self.call_before_request(operation, model, &request_json, &mut state)
2825            .await?;
2826
2827        let start_time = Instant::now();
2828
2829        // Make the API call
2830        let response = match batch_api::retrieve_batch()
2831            .configuration(&self.client.base_configuration)
2832            .batch_id(&id)
2833            .call()
2834            .await
2835        {
2836            Ok(resp) => resp,
2837            Err(e) => {
2838                let error = self
2839                    .handle_api_error(e, operation, model, &request_json, &state)
2840                    .await;
2841                return Err(error);
2842            }
2843        };
2844
2845        let duration = start_time.elapsed();
2846
2847        // Call after_response hook
2848        self.call_after_response(
2849            &response,
2850            operation,
2851            model,
2852            &request_json,
2853            &state,
2854            duration,
2855            None,
2856            None,
2857        )
2858        .await;
2859
2860        Ok(response)
2861    }
2862
2863    /// Cancel a batch job.
2864    ///
2865    /// # Example
2866    ///
2867    /// ```rust,ignore
2868    /// use openai_ergonomic::Client;
2869    ///
2870    /// # async fn example() -> openai_ergonomic::Result<()> {
2871    /// let client = Client::from_env()?;
2872    /// let batch = client.batch().cancel("batch_123").await?;
2873    /// println!("Batch cancelled: {}", batch.status);
2874    /// # Ok(())
2875    /// # }
2876    /// ```
2877    pub async fn cancel(&self, batch_id: impl Into<String>) -> Result<Batch> {
2878        let id = batch_id.into();
2879
2880        // Prepare interceptor context
2881        let mut state = T::default();
2882        let operation = operation_names::BATCH_CANCEL;
2883        let model = "batch";
2884        let request_json = format!("{{\"batch_id\":\"{id}\"}}");
2885
2886        // Call before_request hook
2887        self.call_before_request(operation, model, &request_json, &mut state)
2888            .await?;
2889
2890        let start_time = Instant::now();
2891
2892        // Make the API call
2893        let response = match batch_api::cancel_batch()
2894            .configuration(&self.client.base_configuration)
2895            .batch_id(&id)
2896            .call()
2897            .await
2898        {
2899            Ok(resp) => resp,
2900            Err(e) => {
2901                let error = self
2902                    .handle_api_error(e, operation, model, &request_json, &state)
2903                    .await;
2904                return Err(error);
2905            }
2906        };
2907
2908        let duration = start_time.elapsed();
2909
2910        // Call after_response hook
2911        self.call_after_response(
2912            &response,
2913            operation,
2914            model,
2915            &request_json,
2916            &state,
2917            duration,
2918            None,
2919            None,
2920        )
2921        .await;
2922
2923        Ok(response)
2924    }
2925}
2926
2927impl<T: Default + Send + Sync> FineTuningClient<'_, T> {
2928    /// Create a new fine-tuning job.
2929    ///
2930    /// # Example
2931    ///
2932    /// ```rust,ignore
2933    /// use openai_ergonomic::Client;
2934    /// use openai_ergonomic::builders::fine_tuning::FineTuningJobBuilder;
2935    ///
2936    /// # async fn example() -> openai_ergonomic::Result<()> {
2937    /// let client = Client::from_env()?;
2938    /// let builder = FineTuningJobBuilder::new("gpt-3.5-turbo", "file-training-data");
2939    /// let job = client.fine_tuning().create_job(builder).await?;
2940    /// println!("Created job: {}", job.id);
2941    /// # Ok(())
2942    /// # }
2943    /// ```
2944    pub async fn create_job(
2945        &self,
2946        builder: crate::builders::fine_tuning::FineTuningJobBuilder,
2947    ) -> Result<FineTuningJob> {
2948        let mut request = CreateFineTuningJobRequest::new(
2949            builder.model().to_string(),
2950            builder.training_file().to_string(),
2951        );
2952
2953        if let Some(validation_file) = builder.validation_file_ref() {
2954            request.validation_file = Some(validation_file.to_string());
2955        }
2956
2957        if let Some(suffix) = builder.suffix_ref() {
2958            request.suffix = Some(suffix.to_string());
2959        }
2960
2961        // Note: Hyperparameters handling is limited due to base client API limitations
2962        // The generated API appears to have empty struct definitions for hyperparameters
2963        // For now, we skip hyperparameters configuration
2964        // TODO: Update when openai-client-base fixes hyperparameters types
2965
2966        // Prepare interceptor context
2967        let mut state = T::default();
2968        let operation = operation_names::FINE_TUNING_CREATE;
2969        let model = builder.model();
2970        let request_json = serde_json::to_string(&request).unwrap_or_default();
2971
2972        // Call before_request hook
2973        self.call_before_request(operation, model, &request_json, &mut state)
2974            .await?;
2975
2976        let start_time = Instant::now();
2977
2978        // Make the API call
2979        let response = match fine_tuning_api::create_fine_tuning_job()
2980            .configuration(&self.client.base_configuration)
2981            .create_fine_tuning_job_request(request)
2982            .call()
2983            .await
2984        {
2985            Ok(resp) => resp,
2986            Err(e) => {
2987                let error = self
2988                    .handle_api_error(e, operation, model, &request_json, &state)
2989                    .await;
2990                return Err(error);
2991            }
2992        };
2993
2994        let duration = start_time.elapsed();
2995
2996        // Call after_response hook
2997        self.call_after_response(
2998            &response,
2999            operation,
3000            model,
3001            &request_json,
3002            &state,
3003            duration,
3004            None,
3005            None,
3006        )
3007        .await;
3008
3009        Ok(response)
3010    }
3011
3012    /// List fine-tuning jobs.
3013    ///
3014    /// # Example
3015    ///
3016    /// ```rust,ignore
3017    /// use openai_ergonomic::Client;
3018    ///
3019    /// # async fn example() -> openai_ergonomic::Result<()> {
3020    /// let client = Client::from_env()?;
3021    /// let response = client.fine_tuning().list_jobs(None, Some(20)).await?;
3022    /// println!("Found {} jobs", response.data.len());
3023    /// # Ok(())
3024    /// # }
3025    /// ```
3026    pub async fn list_jobs(
3027        &self,
3028        after: Option<&str>,
3029        limit: Option<i32>,
3030    ) -> Result<ListPaginatedFineTuningJobsResponse> {
3031        // Prepare interceptor context
3032        let mut state = T::default();
3033        let operation = operation_names::FINE_TUNING_LIST;
3034        let model = "fine-tuning";
3035        let request_json = format!("{{\"after\":{after:?},\"limit\":{limit:?}}}");
3036
3037        // Call before_request hook
3038        self.call_before_request(operation, model, &request_json, &mut state)
3039            .await?;
3040
3041        let start_time = Instant::now();
3042
3043        // Make the API call
3044        let response = match fine_tuning_api::list_paginated_fine_tuning_jobs()
3045            .configuration(&self.client.base_configuration)
3046            .maybe_after(after)
3047            .maybe_limit(limit)
3048            .call()
3049            .await
3050        {
3051            Ok(resp) => resp,
3052            Err(e) => {
3053                let error = self
3054                    .handle_api_error(e, operation, model, &request_json, &state)
3055                    .await;
3056                return Err(error);
3057            }
3058        };
3059
3060        let duration = start_time.elapsed();
3061
3062        // Call after_response hook
3063        self.call_after_response(
3064            &response,
3065            operation,
3066            model,
3067            &request_json,
3068            &state,
3069            duration,
3070            None,
3071            None,
3072        )
3073        .await;
3074
3075        Ok(response)
3076    }
3077
3078    /// Get a specific fine-tuning job.
3079    ///
3080    /// # Example
3081    ///
3082    /// ```rust,ignore
3083    /// use openai_ergonomic::Client;
3084    ///
3085    /// # async fn example() -> openai_ergonomic::Result<()> {
3086    /// let client = Client::from_env()?;
3087    /// let job = client.fine_tuning().get_job("ftjob-123").await?;
3088    /// println!("Job status: {}", job.status);
3089    /// # Ok(())
3090    /// # }
3091    /// ```
3092    pub async fn get_job(&self, job_id: impl Into<String>) -> Result<FineTuningJob> {
3093        let id = job_id.into();
3094
3095        // Prepare interceptor context
3096        let mut state = T::default();
3097        let operation = operation_names::FINE_TUNING_RETRIEVE;
3098        let model = "fine-tuning";
3099        let request_json = format!("{{\"job_id\":\"{id}\"}}");
3100
3101        // Call before_request hook
3102        self.call_before_request(operation, model, &request_json, &mut state)
3103            .await?;
3104
3105        let start_time = Instant::now();
3106
3107        // Make the API call
3108        let response = match fine_tuning_api::retrieve_fine_tuning_job()
3109            .configuration(&self.client.base_configuration)
3110            .fine_tuning_job_id(&id)
3111            .call()
3112            .await
3113        {
3114            Ok(resp) => resp,
3115            Err(e) => {
3116                let error = self
3117                    .handle_api_error(e, operation, model, &request_json, &state)
3118                    .await;
3119                return Err(error);
3120            }
3121        };
3122
3123        let duration = start_time.elapsed();
3124
3125        // Call after_response hook
3126        self.call_after_response(
3127            &response,
3128            operation,
3129            model,
3130            &request_json,
3131            &state,
3132            duration,
3133            None,
3134            None,
3135        )
3136        .await;
3137
3138        Ok(response)
3139    }
3140
3141    /// Cancel a fine-tuning job.
3142    ///
3143    /// # Example
3144    ///
3145    /// ```rust,ignore
3146    /// use openai_ergonomic::Client;
3147    ///
3148    /// # async fn example() -> openai_ergonomic::Result<()> {
3149    /// let client = Client::from_env()?;
3150    /// let job = client.fine_tuning().cancel_job("ftjob-123").await?;
3151    /// println!("Job cancelled: {}", job.status);
3152    /// # Ok(())
3153    /// # }
3154    /// ```
3155    pub async fn cancel_job(&self, job_id: impl Into<String>) -> Result<FineTuningJob> {
3156        let id = job_id.into();
3157
3158        // Prepare interceptor context
3159        let mut state = T::default();
3160        let operation = operation_names::FINE_TUNING_CANCEL;
3161        let model = "fine-tuning";
3162        let request_json = format!("{{\"job_id\":\"{id}\"}}");
3163
3164        // Call before_request hook
3165        self.call_before_request(operation, model, &request_json, &mut state)
3166            .await?;
3167
3168        let start_time = Instant::now();
3169
3170        // Make the API call
3171        let response = match fine_tuning_api::cancel_fine_tuning_job()
3172            .configuration(&self.client.base_configuration)
3173            .fine_tuning_job_id(&id)
3174            .call()
3175            .await
3176        {
3177            Ok(resp) => resp,
3178            Err(e) => {
3179                let error = self
3180                    .handle_api_error(e, operation, model, &request_json, &state)
3181                    .await;
3182                return Err(error);
3183            }
3184        };
3185
3186        let duration = start_time.elapsed();
3187
3188        // Call after_response hook
3189        self.call_after_response(
3190            &response,
3191            operation,
3192            model,
3193            &request_json,
3194            &state,
3195            duration,
3196            None,
3197            None,
3198        )
3199        .await;
3200
3201        Ok(response)
3202    }
3203
3204    /// List events for a fine-tuning job.
3205    ///
3206    /// # Example
3207    ///
3208    /// ```rust,ignore
3209    /// use openai_ergonomic::Client;
3210    ///
3211    /// # async fn example() -> openai_ergonomic::Result<()> {
3212    /// let client = Client::from_env()?;
3213    /// let events = client.fine_tuning().list_events("ftjob-123", None, Some(20)).await?;
3214    /// println!("Found {} events", events.data.len());
3215    /// # Ok(())
3216    /// # }
3217    /// ```
3218    pub async fn list_events(
3219        &self,
3220        job_id: impl Into<String>,
3221        after: Option<&str>,
3222        limit: Option<i32>,
3223    ) -> Result<ListFineTuningJobEventsResponse> {
3224        let id = job_id.into();
3225
3226        // Prepare interceptor context
3227        let mut state = T::default();
3228        let operation = operation_names::FINE_TUNING_LIST_EVENTS;
3229        let model = "fine-tuning";
3230        let request_json =
3231            format!("{{\"job_id\":\"{id}\",\"after\":{after:?},\"limit\":{limit:?}}}");
3232
3233        // Call before_request hook
3234        self.call_before_request(operation, model, &request_json, &mut state)
3235            .await?;
3236
3237        let start_time = Instant::now();
3238
3239        // Make the API call
3240        let response = match fine_tuning_api::list_fine_tuning_events()
3241            .configuration(&self.client.base_configuration)
3242            .fine_tuning_job_id(&id)
3243            .maybe_after(after)
3244            .maybe_limit(limit)
3245            .call()
3246            .await
3247        {
3248            Ok(resp) => resp,
3249            Err(e) => {
3250                let error = self
3251                    .handle_api_error(e, operation, model, &request_json, &state)
3252                    .await;
3253                return Err(error);
3254            }
3255        };
3256
3257        let duration = start_time.elapsed();
3258
3259        // Call after_response hook
3260        self.call_after_response(
3261            &response,
3262            operation,
3263            model,
3264            &request_json,
3265            &state,
3266            duration,
3267            None,
3268            None,
3269        )
3270        .await;
3271
3272        Ok(response)
3273    }
3274
3275    /// List checkpoints for a fine-tuning job.
3276    ///
3277    /// # Example
3278    ///
3279    /// ```rust,ignore
3280    /// use openai_ergonomic::Client;
3281    ///
3282    /// # async fn example() -> openai_ergonomic::Result<()> {
3283    /// let client = Client::from_env()?;
3284    /// let checkpoints = client.fine_tuning().list_checkpoints("ftjob-123", None, Some(10)).await?;
3285    /// println!("Found {} checkpoints", checkpoints.data.len());
3286    /// # Ok(())
3287    /// # }
3288    /// ```
3289    pub async fn list_checkpoints(
3290        &self,
3291        job_id: impl Into<String>,
3292        after: Option<&str>,
3293        limit: Option<i32>,
3294    ) -> Result<ListFineTuningJobCheckpointsResponse> {
3295        let id = job_id.into();
3296
3297        // Prepare interceptor context
3298        let mut state = T::default();
3299        let operation = operation_names::FINE_TUNING_LIST_CHECKPOINTS;
3300        let model = "fine-tuning";
3301        let request_json =
3302            format!("{{\"job_id\":\"{id}\",\"after\":{after:?},\"limit\":{limit:?}}}");
3303
3304        // Call before_request hook
3305        self.call_before_request(operation, model, &request_json, &mut state)
3306            .await?;
3307
3308        let start_time = Instant::now();
3309
3310        // Make the API call
3311        let response = match fine_tuning_api::list_fine_tuning_job_checkpoints()
3312            .configuration(&self.client.base_configuration)
3313            .fine_tuning_job_id(&id)
3314            .maybe_after(after)
3315            .maybe_limit(limit)
3316            .call()
3317            .await
3318        {
3319            Ok(resp) => resp,
3320            Err(e) => {
3321                let error = self
3322                    .handle_api_error(e, operation, model, &request_json, &state)
3323                    .await;
3324                return Err(error);
3325            }
3326        };
3327
3328        let duration = start_time.elapsed();
3329
3330        // Call after_response hook
3331        self.call_after_response(
3332            &response,
3333            operation,
3334            model,
3335            &request_json,
3336            &state,
3337            duration,
3338            None,
3339            None,
3340        )
3341        .await;
3342
3343        Ok(response)
3344    }
3345}
3346
3347fn map_api_error<T>(error: ApiError<T>) -> Error {
3348    match error {
3349        ApiError::Reqwest(err) => Error::Http(err),
3350        ApiError::ReqwestMiddleware(err) => {
3351            Error::Internal(format!("reqwest middleware error: {err}"))
3352        }
3353        ApiError::Serde(err) => Error::Json(err),
3354        ApiError::Io(err) => Error::File(err),
3355        ApiError::ResponseError(response) => Error::Api {
3356            status: response.status.as_u16(),
3357            message: response.content,
3358            error_type: None,
3359            error_code: None,
3360        },
3361    }
3362}
3363
3364#[cfg(test)]
3365mod tests {
3366    use super::*;
3367    use openai_client_base::apis::{Error as BaseError, ResponseContent};
3368
3369    #[test]
3370    fn map_api_error_converts_response() {
3371        let response = ResponseContent {
3372            status: reqwest::StatusCode::BAD_REQUEST,
3373            content: "bad request".to_string(),
3374            entity: Option::<()>::None,
3375        };
3376
3377        let error = map_api_error(BaseError::ResponseError(response));
3378        match error {
3379            Error::Api {
3380                status, message, ..
3381            } => {
3382                assert_eq!(status, 400);
3383                assert!(message.contains("bad request"));
3384            }
3385            other => panic!("expected API error, got {other:?}"),
3386        }
3387    }
3388
3389    #[test]
3390    fn test_moderation_builder_creation() {
3391        use crate::builders::moderations::ModerationBuilder;
3392
3393        let builder = ModerationBuilder::new("Test content");
3394        let request = builder.build().unwrap();
3395
3396        assert_eq!(request.input, "Test content");
3397        assert!(request.model.is_none());
3398    }
3399
3400    #[test]
3401    fn test_moderation_builder_with_model() {
3402        use crate::builders::moderations::ModerationBuilder;
3403
3404        let builder = ModerationBuilder::new("Test content").model("text-moderation-stable");
3405        let request = builder.build().unwrap();
3406
3407        assert_eq!(request.input, "Test content");
3408        assert_eq!(request.model, Some("text-moderation-stable".to_string()));
3409    }
3410
3411    #[test]
3412    fn test_moderation_builder_array_input() {
3413        use crate::builders::moderations::ModerationBuilder;
3414
3415        let inputs = vec!["First text".to_string(), "Second text".to_string()];
3416        let builder = ModerationBuilder::new_array(inputs);
3417        let request = builder.build().unwrap();
3418
3419        // Array inputs are joined with newlines
3420        assert_eq!(request.input, "First text\nSecond text");
3421    }
3422
3423    #[test]
3424    fn test_file_upload_builder_creation() {
3425        use crate::builders::files::{FilePurpose, FileUploadBuilder};
3426
3427        let content = b"test content".to_vec();
3428        let builder = FileUploadBuilder::new("test.txt", FilePurpose::Assistants, content.clone());
3429
3430        assert_eq!(builder.filename(), "test.txt");
3431        assert_eq!(builder.content(), content.as_slice());
3432        assert_eq!(builder.content_size(), content.len());
3433        assert!(!builder.is_empty());
3434    }
3435
3436    #[test]
3437    fn test_file_upload_builder_from_text() {
3438        use crate::builders::files::{FilePurpose, FileUploadBuilder};
3439
3440        let builder =
3441            FileUploadBuilder::from_text("hello.txt", FilePurpose::FineTune, "Hello, world!");
3442
3443        assert_eq!(builder.filename(), "hello.txt");
3444        assert_eq!(
3445            builder.content_as_string(),
3446            Some("Hello, world!".to_string())
3447        );
3448        assert!(!builder.is_empty());
3449    }
3450
3451    #[test]
3452    fn test_file_list_builder() {
3453        use crate::builders::files::{FileListBuilder, FileOrder, FilePurpose};
3454
3455        let builder = FileListBuilder::new()
3456            .purpose(FilePurpose::Assistants)
3457            .limit(10)
3458            .order(FileOrder::Desc);
3459
3460        assert!(builder.purpose_ref().is_some());
3461        assert_eq!(builder.limit_ref(), Some(10));
3462        assert!(builder.order_ref().is_some());
3463    }
3464
3465    #[test]
3466    fn test_file_retrieval_builder() {
3467        use crate::builders::files::FileRetrievalBuilder;
3468
3469        let builder = FileRetrievalBuilder::new("file-123");
3470        assert_eq!(builder.file_id(), "file-123");
3471    }
3472
3473    #[test]
3474    fn test_file_delete_builder() {
3475        use crate::builders::files::FileDeleteBuilder;
3476
3477        let builder = FileDeleteBuilder::new("file-456");
3478        assert_eq!(builder.file_id(), "file-456");
3479    }
3480
3481    #[test]
3482    fn test_file_purpose_display() {
3483        use crate::builders::files::FilePurpose;
3484
3485        assert_eq!(FilePurpose::FineTune.to_string(), "fine-tune");
3486        assert_eq!(FilePurpose::Assistants.to_string(), "assistants");
3487        assert_eq!(FilePurpose::Vision.to_string(), "vision");
3488        assert_eq!(FilePurpose::Batch.to_string(), "batch");
3489    }
3490
3491    #[test]
3492    fn test_vector_store_builder_basic() {
3493        use crate::builders::vector_stores::VectorStoreBuilder;
3494
3495        let builder = VectorStoreBuilder::new()
3496            .name("Test Store")
3497            .add_file("file-1")
3498            .metadata("key", "value");
3499
3500        assert_eq!(builder.name_ref(), Some("Test Store"));
3501        assert_eq!(builder.file_count(), 1);
3502        assert!(builder.has_files());
3503        assert_eq!(builder.metadata_ref().len(), 1);
3504    }
3505
3506    #[test]
3507    fn test_vector_store_builder_with_expiration() {
3508        use crate::builders::vector_stores::VectorStoreBuilder;
3509
3510        let builder = VectorStoreBuilder::new()
3511            .name("Temp Store")
3512            .expires_after_days(30);
3513
3514        assert_eq!(builder.name_ref(), Some("Temp Store"));
3515        assert!(builder.expires_after_ref().is_some());
3516        assert_eq!(builder.expires_after_ref().unwrap().days, 30);
3517    }
3518
3519    #[test]
3520    fn test_vector_store_builder_multiple_files() {
3521        use crate::builders::vector_stores::VectorStoreBuilder;
3522
3523        let files = vec!["file-1".to_string(), "file-2".to_string()];
3524        let builder = VectorStoreBuilder::new()
3525            .name("Multi-File Store")
3526            .file_ids(files.clone());
3527
3528        assert_eq!(builder.file_ids_ref(), files.as_slice());
3529        assert_eq!(builder.file_count(), 2);
3530    }
3531
3532    #[test]
3533    fn test_vector_store_file_builder() {
3534        use crate::builders::vector_stores::VectorStoreFileBuilder;
3535
3536        let builder = VectorStoreFileBuilder::new("vs-123", "file-456");
3537        assert_eq!(builder.vector_store_id(), "vs-123");
3538        assert_eq!(builder.file_id(), "file-456");
3539    }
3540
3541    #[test]
3542    fn test_vector_store_search_builder() {
3543        use crate::builders::vector_stores::VectorStoreSearchBuilder;
3544
3545        let builder = VectorStoreSearchBuilder::new("vs-123", "test query")
3546            .limit(10)
3547            .filter("category", "docs");
3548
3549        assert_eq!(builder.vector_store_id(), "vs-123");
3550        assert_eq!(builder.query(), "test query");
3551        assert_eq!(builder.limit_ref(), Some(10));
3552        assert_eq!(builder.filter_ref().len(), 1);
3553    }
3554
3555    #[test]
3556    fn test_vector_store_search_builder_default() {
3557        use crate::builders::vector_stores::VectorStoreSearchBuilder;
3558
3559        let builder = VectorStoreSearchBuilder::new("vs-123", "query");
3560        assert!(builder.limit_ref().is_none());
3561        assert!(builder.filter_ref().is_empty());
3562    }
3563}
3564
3565// Placeholder client types for different API endpoints
3566// TODO: Implement these properly once the builders are ready
3567
3568/// Client for assistants API.
3569#[derive(Debug, Clone, Copy)]
3570pub struct AssistantsClient<'a, T = ()> {
3571    client: &'a Client<T>,
3572}
3573
3574impl<T: Default + Send + Sync> AssistantsClient<'_, T> {
3575    /// Create a new assistant.
3576    ///
3577    /// # Example
3578    ///
3579    /// ```rust,ignore
3580    /// use openai_ergonomic::Client;
3581    /// use openai_ergonomic::builders::assistants::AssistantBuilder;
3582    ///
3583    /// # async fn example() -> openai_ergonomic::Result<()> {
3584    /// let client = Client::from_env()?;
3585    /// let builder = AssistantBuilder::new("gpt-4")
3586    ///     .name("Math Tutor")
3587    ///     .instructions("You are a helpful math tutor.");
3588    /// let assistant = client.assistants().create(builder).await?;
3589    /// println!("Created assistant: {}", assistant.id);
3590    /// # Ok(())
3591    /// # }
3592    /// ```
3593    pub async fn create(&self, builder: AssistantBuilder) -> Result<AssistantObject> {
3594        let request = builder.build()?;
3595
3596        // Prepare interceptor context
3597        let mut state = T::default();
3598        let operation = operation_names::ASSISTANT_CREATE;
3599        let model = request.model.clone();
3600        let request_json = serde_json::to_string(&request).unwrap_or_default();
3601
3602        // Call before_request hook
3603        self.call_before_request(operation, &model, &request_json, &mut state)
3604            .await?;
3605
3606        let start_time = Instant::now();
3607
3608        // Make the API call
3609        let response = match assistants_api::create_assistant()
3610            .configuration(&self.client.base_configuration)
3611            .create_assistant_request(request)
3612            .call()
3613            .await
3614        {
3615            Ok(resp) => resp,
3616            Err(e) => {
3617                let error = self
3618                    .handle_api_error(e, operation, &model, &request_json, &state)
3619                    .await;
3620                return Err(error);
3621            }
3622        };
3623
3624        let duration = start_time.elapsed();
3625
3626        // Call after_response hook
3627        self.call_after_response(
3628            &response,
3629            operation,
3630            &model,
3631            &request_json,
3632            &state,
3633            duration,
3634            None,
3635            None,
3636        )
3637        .await;
3638
3639        Ok(response)
3640    }
3641
3642    /// List assistants with pagination.
3643    ///
3644    /// # Example
3645    ///
3646    /// ```rust,ignore
3647    /// use openai_ergonomic::Client;
3648    ///
3649    /// # async fn example() -> openai_ergonomic::Result<()> {
3650    /// let client = Client::from_env()?;
3651    /// let response = client.assistants().list(Some(20), None, None, None).await?;
3652    /// println!("Found {} assistants", response.data.len());
3653    /// # Ok(())
3654    /// # }
3655    /// ```
3656    pub async fn list(
3657        &self,
3658        limit: Option<i32>,
3659        order: Option<&str>,
3660        after: Option<&str>,
3661        before: Option<&str>,
3662    ) -> Result<ListAssistantsResponse> {
3663        // Prepare interceptor context
3664        let mut state = T::default();
3665        let operation = operation_names::ASSISTANT_LIST;
3666        let model = "assistants";
3667        let request_json = format!(
3668            "{{\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?}}}"
3669        );
3670
3671        // Call before_request hook
3672        self.call_before_request(operation, model, &request_json, &mut state)
3673            .await?;
3674
3675        let start_time = Instant::now();
3676
3677        // Make the API call
3678        let response = match assistants_api::list_assistants()
3679            .configuration(&self.client.base_configuration)
3680            .maybe_limit(limit)
3681            .maybe_order(order)
3682            .maybe_after(after)
3683            .maybe_before(before)
3684            .call()
3685            .await
3686        {
3687            Ok(resp) => resp,
3688            Err(e) => {
3689                let error = self
3690                    .handle_api_error(e, operation, model, &request_json, &state)
3691                    .await;
3692                return Err(error);
3693            }
3694        };
3695
3696        let duration = start_time.elapsed();
3697
3698        // Call after_response hook
3699        self.call_after_response(
3700            &response,
3701            operation,
3702            model,
3703            &request_json,
3704            &state,
3705            duration,
3706            None,
3707            None,
3708        )
3709        .await;
3710
3711        Ok(response)
3712    }
3713
3714    /// Get an assistant by ID.
3715    ///
3716    /// # Example
3717    ///
3718    /// ```rust,ignore
3719    /// use openai_ergonomic::Client;
3720    ///
3721    /// # async fn example() -> openai_ergonomic::Result<()> {
3722    /// let client = Client::from_env()?;
3723    /// let assistant = client.assistants().get("asst_123").await?;
3724    /// println!("Assistant: {}", assistant.name.unwrap_or_default());
3725    /// # Ok(())
3726    /// # }
3727    /// ```
3728    pub async fn get(&self, assistant_id: impl Into<String>) -> Result<AssistantObject> {
3729        let id = assistant_id.into();
3730
3731        // Prepare interceptor context
3732        let mut state = T::default();
3733        let operation = operation_names::ASSISTANT_RETRIEVE;
3734        let model = "assistants";
3735        let request_json = format!("{{\"assistant_id\":\"{id}\"}}");
3736
3737        // Call before_request hook
3738        self.call_before_request(operation, model, &request_json, &mut state)
3739            .await?;
3740
3741        let start_time = Instant::now();
3742
3743        // Make the API call
3744        let response = match assistants_api::get_assistant()
3745            .configuration(&self.client.base_configuration)
3746            .assistant_id(&id)
3747            .call()
3748            .await
3749        {
3750            Ok(resp) => resp,
3751            Err(e) => {
3752                let error = self
3753                    .handle_api_error(e, operation, model, &request_json, &state)
3754                    .await;
3755                return Err(error);
3756            }
3757        };
3758
3759        let duration = start_time.elapsed();
3760
3761        // Call after_response hook
3762        self.call_after_response(
3763            &response,
3764            operation,
3765            model,
3766            &request_json,
3767            &state,
3768            duration,
3769            None,
3770            None,
3771        )
3772        .await;
3773
3774        Ok(response)
3775    }
3776
3777    /// Update an assistant.
3778    ///
3779    /// # Example
3780    ///
3781    /// ```rust,ignore
3782    /// use openai_ergonomic::Client;
3783    /// use openai_ergonomic::builders::assistants::AssistantBuilder;
3784    ///
3785    /// # async fn example() -> openai_ergonomic::Result<()> {
3786    /// let client = Client::from_env()?;
3787    /// let builder = AssistantBuilder::new("gpt-4")
3788    ///     .name("Updated Name")
3789    ///     .instructions("Updated instructions");
3790    /// let assistant = client.assistants().update("asst_123", builder).await?;
3791    /// println!("Updated: {}", assistant.id);
3792    /// # Ok(())
3793    /// # }
3794    /// ```
3795    pub async fn update(
3796        &self,
3797        assistant_id: impl Into<String>,
3798        builder: AssistantBuilder,
3799    ) -> Result<AssistantObject> {
3800        use openai_client_base::models::ModifyAssistantRequest;
3801
3802        let id = assistant_id.into();
3803        let request_data = builder.build()?;
3804
3805        // Convert CreateAssistantRequest to ModifyAssistantRequest
3806        let mut request = ModifyAssistantRequest::new();
3807        request.model = Some(request_data.model);
3808        // Convert Box<CreateAssistantRequestName> to Option<String> by extracting text
3809        request.name = request_data.name.and_then(|n| match *n {
3810            openai_client_base::models::CreateAssistantRequestName::Text(text) => Some(Some(text)),
3811            openai_client_base::models::CreateAssistantRequestName::Null => None,
3812        });
3813        request.description = request_data.description.and_then(|d| match *d {
3814            openai_client_base::models::CreateAssistantRequestDescription::Text(text) => {
3815                Some(Some(text))
3816            }
3817            openai_client_base::models::CreateAssistantRequestDescription::Null => None,
3818        });
3819        request.instructions = request_data.instructions.and_then(|i| match *i {
3820            openai_client_base::models::CreateAssistantRequestInstructions::Text(text) => {
3821                Some(Some(text))
3822            }
3823            openai_client_base::models::CreateAssistantRequestInstructions::Null => None,
3824        });
3825        request.tools = request_data.tools;
3826        request.metadata = request_data.metadata;
3827
3828        // Prepare interceptor context
3829        let mut state = T::default();
3830        let operation = operation_names::ASSISTANT_UPDATE;
3831        let model = request
3832            .model
3833            .as_ref()
3834            .map_or_else(|| "assistants".to_string(), Clone::clone);
3835        let request_json = serde_json::to_string(&request).unwrap_or_default();
3836
3837        // Call before_request hook
3838        self.call_before_request(operation, &model, &request_json, &mut state)
3839            .await?;
3840
3841        let start_time = Instant::now();
3842
3843        // Make the API call
3844        let response = match assistants_api::modify_assistant()
3845            .configuration(&self.client.base_configuration)
3846            .assistant_id(&id)
3847            .modify_assistant_request(request)
3848            .call()
3849            .await
3850        {
3851            Ok(resp) => resp,
3852            Err(e) => {
3853                let error = self
3854                    .handle_api_error(e, operation, &model, &request_json, &state)
3855                    .await;
3856                return Err(error);
3857            }
3858        };
3859
3860        let duration = start_time.elapsed();
3861
3862        // Call after_response hook
3863        self.call_after_response(
3864            &response,
3865            operation,
3866            &model,
3867            &request_json,
3868            &state,
3869            duration,
3870            None,
3871            None,
3872        )
3873        .await;
3874
3875        Ok(response)
3876    }
3877
3878    /// Delete an assistant.
3879    ///
3880    /// # Example
3881    ///
3882    /// ```rust,ignore
3883    /// use openai_ergonomic::Client;
3884    ///
3885    /// # async fn example() -> openai_ergonomic::Result<()> {
3886    /// let client = Client::from_env()?;
3887    /// let response = client.assistants().delete("asst_123").await?;
3888    /// println!("Deleted: {}", response.deleted);
3889    /// # Ok(())
3890    /// # }
3891    /// ```
3892    pub async fn delete(&self, assistant_id: impl Into<String>) -> Result<DeleteAssistantResponse> {
3893        let id = assistant_id.into();
3894
3895        // Prepare interceptor context
3896        let mut state = T::default();
3897        let operation = operation_names::ASSISTANT_DELETE;
3898        let model = "assistants";
3899        let request_json = format!("{{\"assistant_id\":\"{id}\"}}");
3900
3901        // Call before_request hook
3902        self.call_before_request(operation, model, &request_json, &mut state)
3903            .await?;
3904
3905        let start_time = Instant::now();
3906
3907        // Make the API call
3908        let response = match assistants_api::delete_assistant()
3909            .configuration(&self.client.base_configuration)
3910            .assistant_id(&id)
3911            .call()
3912            .await
3913        {
3914            Ok(resp) => resp,
3915            Err(e) => {
3916                let error = self
3917                    .handle_api_error(e, operation, model, &request_json, &state)
3918                    .await;
3919                return Err(error);
3920            }
3921        };
3922
3923        let duration = start_time.elapsed();
3924
3925        // Call after_response hook
3926        self.call_after_response(
3927            &response,
3928            operation,
3929            model,
3930            &request_json,
3931            &state,
3932            duration,
3933            None,
3934            None,
3935        )
3936        .await;
3937
3938        Ok(response)
3939    }
3940
3941    /// Create a run on a thread.
3942    ///
3943    /// # Example
3944    ///
3945    /// ```rust,ignore
3946    /// use openai_ergonomic::Client;
3947    /// use openai_ergonomic::builders::assistants::RunBuilder;
3948    ///
3949    /// # async fn example() -> openai_ergonomic::Result<()> {
3950    /// let client = Client::from_env()?;
3951    /// let builder = RunBuilder::new("asst_123");
3952    /// let run = client.assistants().create_run("thread_123", builder).await?;
3953    /// println!("Run created: {}", run.id);
3954    /// # Ok(())
3955    /// # }
3956    /// ```
3957    pub async fn create_run(
3958        &self,
3959        thread_id: impl Into<String>,
3960        builder: RunBuilder,
3961    ) -> Result<RunObject> {
3962        let thread_id = thread_id.into();
3963        let request = builder.build()?;
3964
3965        // Prepare interceptor context
3966        let mut state = T::default();
3967        let operation = operation_names::RUN_CREATE;
3968        let model = request
3969            .model
3970            .as_ref()
3971            .map_or_else(|| "runs".to_string(), Clone::clone);
3972        let request_json = serde_json::to_string(&request).unwrap_or_default();
3973
3974        // Call before_request hook
3975        self.call_before_request(operation, &model, &request_json, &mut state)
3976            .await?;
3977
3978        let start_time = Instant::now();
3979
3980        // Make the API call
3981        let response = match assistants_api::create_run()
3982            .configuration(&self.client.base_configuration)
3983            .thread_id(&thread_id)
3984            .create_run_request(request)
3985            .call()
3986            .await
3987        {
3988            Ok(resp) => resp,
3989            Err(e) => {
3990                let error = self
3991                    .handle_api_error(e, operation, &model, &request_json, &state)
3992                    .await;
3993                return Err(error);
3994            }
3995        };
3996
3997        let duration = start_time.elapsed();
3998
3999        // Call after_response hook
4000        self.call_after_response(
4001            &response,
4002            operation,
4003            &model,
4004            &request_json,
4005            &state,
4006            duration,
4007            None,
4008            None,
4009        )
4010        .await;
4011
4012        Ok(response)
4013    }
4014
4015    /// List runs on a thread.
4016    ///
4017    /// # Example
4018    ///
4019    /// ```rust,ignore
4020    /// use openai_ergonomic::Client;
4021    ///
4022    /// # async fn example() -> openai_ergonomic::Result<()> {
4023    /// let client = Client::from_env()?;
4024    /// let response = client.assistants().list_runs("thread_123", None, None, None, None).await?;
4025    /// println!("Found {} runs", response.data.len());
4026    /// # Ok(())
4027    /// # }
4028    /// ```
4029    pub async fn list_runs(
4030        &self,
4031        thread_id: impl Into<String>,
4032        limit: Option<i32>,
4033        order: Option<&str>,
4034        after: Option<&str>,
4035        before: Option<&str>,
4036    ) -> Result<ListRunsResponse> {
4037        let thread_id = thread_id.into();
4038
4039        // Prepare interceptor context
4040        let mut state = T::default();
4041        let operation = operation_names::RUN_LIST;
4042        let model = "runs";
4043        let request_json = format!(
4044            "{{\"thread_id\":\"{thread_id}\",\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?}}}"
4045        );
4046
4047        // Call before_request hook
4048        self.call_before_request(operation, model, &request_json, &mut state)
4049            .await?;
4050
4051        let start_time = Instant::now();
4052
4053        // Make the API call
4054        let response = match assistants_api::list_runs()
4055            .configuration(&self.client.base_configuration)
4056            .thread_id(&thread_id)
4057            .maybe_limit(limit)
4058            .maybe_order(order)
4059            .maybe_after(after)
4060            .maybe_before(before)
4061            .call()
4062            .await
4063        {
4064            Ok(resp) => resp,
4065            Err(e) => {
4066                let error = self
4067                    .handle_api_error(e, operation, model, &request_json, &state)
4068                    .await;
4069                return Err(error);
4070            }
4071        };
4072
4073        let duration = start_time.elapsed();
4074
4075        // Call after_response hook
4076        self.call_after_response(
4077            &response,
4078            operation,
4079            model,
4080            &request_json,
4081            &state,
4082            duration,
4083            None,
4084            None,
4085        )
4086        .await;
4087
4088        Ok(response)
4089    }
4090
4091    /// Get a run.
4092    ///
4093    /// # Example
4094    ///
4095    /// ```rust,ignore
4096    /// use openai_ergonomic::Client;
4097    ///
4098    /// # async fn example() -> openai_ergonomic::Result<()> {
4099    /// let client = Client::from_env()?;
4100    /// let run = client.assistants().get_run("thread_123", "run_123").await?;
4101    /// println!("Run status: {}", run.status);
4102    /// # Ok(())
4103    /// # }
4104    /// ```
4105    pub async fn get_run(
4106        &self,
4107        thread_id: impl Into<String>,
4108        run_id: impl Into<String>,
4109    ) -> Result<RunObject> {
4110        let thread_id = thread_id.into();
4111        let run_id = run_id.into();
4112
4113        // Prepare interceptor context
4114        let mut state = T::default();
4115        let operation = operation_names::RUN_RETRIEVE;
4116        let model = "runs";
4117        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\"}}");
4118
4119        // Call before_request hook
4120        self.call_before_request(operation, model, &request_json, &mut state)
4121            .await?;
4122
4123        let start_time = Instant::now();
4124
4125        // Make the API call
4126        let response = match assistants_api::get_run()
4127            .configuration(&self.client.base_configuration)
4128            .thread_id(&thread_id)
4129            .run_id(&run_id)
4130            .call()
4131            .await
4132        {
4133            Ok(resp) => resp,
4134            Err(e) => {
4135                let error = self
4136                    .handle_api_error(e, operation, model, &request_json, &state)
4137                    .await;
4138                return Err(error);
4139            }
4140        };
4141
4142        let duration = start_time.elapsed();
4143
4144        // Call after_response hook
4145        self.call_after_response(
4146            &response,
4147            operation,
4148            model,
4149            &request_json,
4150            &state,
4151            duration,
4152            None,
4153            None,
4154        )
4155        .await;
4156
4157        Ok(response)
4158    }
4159
4160    /// Cancel a run.
4161    ///
4162    /// # Example
4163    ///
4164    /// ```rust,ignore
4165    /// use openai_ergonomic::Client;
4166    ///
4167    /// # async fn example() -> openai_ergonomic::Result<()> {
4168    /// let client = Client::from_env()?;
4169    /// let run = client.assistants().cancel_run("thread_123", "run_123").await?;
4170    /// println!("Run cancelled: {}", run.status);
4171    /// # Ok(())
4172    /// # }
4173    /// ```
4174    pub async fn cancel_run(
4175        &self,
4176        thread_id: impl Into<String>,
4177        run_id: impl Into<String>,
4178    ) -> Result<RunObject> {
4179        let thread_id = thread_id.into();
4180        let run_id = run_id.into();
4181
4182        // Prepare interceptor context
4183        let mut state = T::default();
4184        let operation = operation_names::RUN_CANCEL;
4185        let model = "runs";
4186        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\"}}");
4187
4188        // Call before_request hook
4189        self.call_before_request(operation, model, &request_json, &mut state)
4190            .await?;
4191
4192        let start_time = Instant::now();
4193
4194        // Make the API call
4195        let response = match assistants_api::cancel_run()
4196            .configuration(&self.client.base_configuration)
4197            .thread_id(&thread_id)
4198            .run_id(&run_id)
4199            .call()
4200            .await
4201        {
4202            Ok(resp) => resp,
4203            Err(e) => {
4204                let error = self
4205                    .handle_api_error(e, operation, model, &request_json, &state)
4206                    .await;
4207                return Err(error);
4208            }
4209        };
4210
4211        let duration = start_time.elapsed();
4212
4213        // Call after_response hook
4214        self.call_after_response(
4215            &response,
4216            operation,
4217            model,
4218            &request_json,
4219            &state,
4220            duration,
4221            None,
4222            None,
4223        )
4224        .await;
4225
4226        Ok(response)
4227    }
4228
4229    /// Submit tool outputs to a run.
4230    ///
4231    /// # Example
4232    ///
4233    /// ```rust,ignore
4234    /// use openai_ergonomic::Client;
4235    ///
4236    /// # async fn example() -> openai_ergonomic::Result<()> {
4237    /// let client = Client::from_env()?;
4238    /// let outputs = vec![
4239    ///     SubmitToolOutputsRunRequestToolOutputsInner::new("call_123", "output data")
4240    /// ];
4241    /// let run = client.assistants().submit_tool_outputs("thread_123", "run_123", outputs).await?;
4242    /// println!("Tool outputs submitted: {}", run.id);
4243    /// # Ok(())
4244    /// # }
4245    /// ```
4246    pub async fn submit_tool_outputs(
4247        &self,
4248        thread_id: impl Into<String>,
4249        run_id: impl Into<String>,
4250        tool_outputs: Vec<SubmitToolOutputsRunRequestToolOutputsInner>,
4251    ) -> Result<RunObject> {
4252        use openai_client_base::models::SubmitToolOutputsRunRequest;
4253
4254        let thread_id = thread_id.into();
4255        let run_id = run_id.into();
4256        let request = SubmitToolOutputsRunRequest::new(tool_outputs);
4257
4258        // Prepare interceptor context
4259        let mut state = T::default();
4260        let operation = operation_names::RUN_SUBMIT_TOOL_OUTPUTS;
4261        let model = "runs";
4262        let request_json = serde_json::to_string(&request).unwrap_or_default();
4263
4264        // Call before_request hook
4265        self.call_before_request(operation, model, &request_json, &mut state)
4266            .await?;
4267
4268        let start_time = Instant::now();
4269
4270        // Make the API call
4271        let response = match assistants_api::submit_tool_ouputs_to_run()
4272            .configuration(&self.client.base_configuration)
4273            .thread_id(&thread_id)
4274            .run_id(&run_id)
4275            .submit_tool_outputs_run_request(request)
4276            .call()
4277            .await
4278        {
4279            Ok(resp) => resp,
4280            Err(e) => {
4281                let error = self
4282                    .handle_api_error(e, operation, model, &request_json, &state)
4283                    .await;
4284                return Err(error);
4285            }
4286        };
4287
4288        let duration = start_time.elapsed();
4289
4290        // Call after_response hook
4291        self.call_after_response(
4292            &response,
4293            operation,
4294            model,
4295            &request_json,
4296            &state,
4297            duration,
4298            None,
4299            None,
4300        )
4301        .await;
4302
4303        Ok(response)
4304    }
4305
4306    /// Create a message on a thread.
4307    ///
4308    /// # Example
4309    ///
4310    /// ```rust,ignore
4311    /// use openai_ergonomic::Client;
4312    /// use openai_ergonomic::builders::assistants::MessageBuilder;
4313    ///
4314    /// # async fn example() -> openai_ergonomic::Result<()> {
4315    /// let client = Client::from_env()?;
4316    /// let builder = MessageBuilder::new("user", "Hello, assistant!");
4317    /// let message = client.assistants().create_message("thread_123", builder).await?;
4318    /// println!("Message created: {}", message.id);
4319    /// # Ok(())
4320    /// # }
4321    /// ```
4322    pub async fn create_message(
4323        &self,
4324        thread_id: impl Into<String>,
4325        builder: MessageBuilder,
4326    ) -> Result<MessageObject> {
4327        let thread_id = thread_id.into();
4328        let request = builder.build()?;
4329
4330        // Prepare interceptor context
4331        let mut state = T::default();
4332        let operation = operation_names::MESSAGE_CREATE;
4333        let model = "messages";
4334        let request_json = serde_json::to_string(&request).unwrap_or_default();
4335
4336        // Call before_request hook
4337        self.call_before_request(operation, model, &request_json, &mut state)
4338            .await?;
4339
4340        let start_time = Instant::now();
4341
4342        // Make the API call
4343        let response = match assistants_api::create_message()
4344            .configuration(&self.client.base_configuration)
4345            .thread_id(&thread_id)
4346            .create_message_request(request)
4347            .call()
4348            .await
4349        {
4350            Ok(resp) => resp,
4351            Err(e) => {
4352                let error = self
4353                    .handle_api_error(e, operation, model, &request_json, &state)
4354                    .await;
4355                return Err(error);
4356            }
4357        };
4358
4359        let duration = start_time.elapsed();
4360
4361        // Call after_response hook
4362        self.call_after_response(
4363            &response,
4364            operation,
4365            model,
4366            &request_json,
4367            &state,
4368            duration,
4369            None,
4370            None,
4371        )
4372        .await;
4373
4374        Ok(response)
4375    }
4376
4377    /// List messages on a thread.
4378    ///
4379    /// # Example
4380    ///
4381    /// ```rust,ignore
4382    /// use openai_ergonomic::Client;
4383    ///
4384    /// # async fn example() -> openai_ergonomic::Result<()> {
4385    /// let client = Client::from_env()?;
4386    /// let response = client.assistants().list_messages("thread_123", None, None, None, None, None).await?;
4387    /// println!("Found {} messages", response.data.len());
4388    /// # Ok(())
4389    /// # }
4390    /// ```
4391    pub async fn list_messages(
4392        &self,
4393        thread_id: impl Into<String>,
4394        limit: Option<i32>,
4395        order: Option<&str>,
4396        after: Option<&str>,
4397        before: Option<&str>,
4398        run_id: Option<&str>,
4399    ) -> Result<ListMessagesResponse> {
4400        let thread_id = thread_id.into();
4401
4402        // Prepare interceptor context
4403        let mut state = T::default();
4404        let operation = operation_names::MESSAGE_LIST;
4405        let model = "messages";
4406        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?},\"run_id\":{run_id:?}}}");
4407
4408        // Call before_request hook
4409        self.call_before_request(operation, model, &request_json, &mut state)
4410            .await?;
4411
4412        let start_time = Instant::now();
4413
4414        // Make the API call
4415        let response = match assistants_api::list_messages()
4416            .configuration(&self.client.base_configuration)
4417            .thread_id(&thread_id)
4418            .maybe_limit(limit)
4419            .maybe_order(order)
4420            .maybe_after(after)
4421            .maybe_before(before)
4422            .maybe_run_id(run_id)
4423            .call()
4424            .await
4425        {
4426            Ok(resp) => resp,
4427            Err(e) => {
4428                let error = self
4429                    .handle_api_error(e, operation, model, &request_json, &state)
4430                    .await;
4431                return Err(error);
4432            }
4433        };
4434
4435        let duration = start_time.elapsed();
4436
4437        // Call after_response hook
4438        self.call_after_response(
4439            &response,
4440            operation,
4441            model,
4442            &request_json,
4443            &state,
4444            duration,
4445            None,
4446            None,
4447        )
4448        .await;
4449
4450        Ok(response)
4451    }
4452
4453    /// Get a message.
4454    ///
4455    /// # Example
4456    ///
4457    /// ```rust,ignore
4458    /// use openai_ergonomic::Client;
4459    ///
4460    /// # async fn example() -> openai_ergonomic::Result<()> {
4461    /// let client = Client::from_env()?;
4462    /// let message = client.assistants().get_message("thread_123", "msg_123").await?;
4463    /// println!("Message role: {}", message.role);
4464    /// # Ok(())
4465    /// # }
4466    /// ```
4467    pub async fn get_message(
4468        &self,
4469        thread_id: impl Into<String>,
4470        message_id: impl Into<String>,
4471    ) -> Result<MessageObject> {
4472        let thread_id = thread_id.into();
4473        let message_id = message_id.into();
4474
4475        // Prepare interceptor context
4476        let mut state = T::default();
4477        let operation = operation_names::MESSAGE_RETRIEVE;
4478        let model = "messages";
4479        let request_json =
4480            format!("{{\"thread_id\":\"{thread_id}\",\"message_id\":\"{message_id}\"}}");
4481
4482        // Call before_request hook
4483        self.call_before_request(operation, model, &request_json, &mut state)
4484            .await?;
4485
4486        let start_time = Instant::now();
4487
4488        // Make the API call
4489        let response = match assistants_api::get_message()
4490            .configuration(&self.client.base_configuration)
4491            .thread_id(&thread_id)
4492            .message_id(&message_id)
4493            .call()
4494            .await
4495        {
4496            Ok(resp) => resp,
4497            Err(e) => {
4498                let error = self
4499                    .handle_api_error(e, operation, model, &request_json, &state)
4500                    .await;
4501                return Err(error);
4502            }
4503        };
4504
4505        let duration = start_time.elapsed();
4506
4507        // Call after_response hook
4508        self.call_after_response(
4509            &response,
4510            operation,
4511            model,
4512            &request_json,
4513            &state,
4514            duration,
4515            None,
4516            None,
4517        )
4518        .await;
4519
4520        Ok(response)
4521    }
4522
4523    /// List run steps.
4524    ///
4525    /// # Example
4526    ///
4527    /// ```rust,ignore
4528    /// use openai_ergonomic::Client;
4529    ///
4530    /// # async fn example() -> openai_ergonomic::Result<()> {
4531    /// let client = Client::from_env()?;
4532    /// let response = client.assistants().list_run_steps("thread_123", "run_123", None, None, None, None, None).await?;
4533    /// println!("Found {} run steps", response.data.len());
4534    /// # Ok(())
4535    /// # }
4536    /// ```
4537    #[allow(clippy::too_many_arguments)]
4538    pub async fn list_run_steps(
4539        &self,
4540        thread_id: impl Into<String>,
4541        run_id: impl Into<String>,
4542        limit: Option<i32>,
4543        order: Option<&str>,
4544        after: Option<&str>,
4545        before: Option<&str>,
4546        include: Option<Vec<String>>,
4547    ) -> Result<ListRunStepsResponse> {
4548        let thread_id = thread_id.into();
4549        let run_id = run_id.into();
4550
4551        // Prepare interceptor context
4552        let mut state = T::default();
4553        let operation = operation_names::RUN_STEP_LIST;
4554        let model = "run_steps";
4555        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\",\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?},\"include\":{include:?}}}");
4556
4557        // Call before_request hook
4558        self.call_before_request(operation, model, &request_json, &mut state)
4559            .await?;
4560
4561        let start_time = Instant::now();
4562
4563        // Make the API call
4564        let response = match assistants_api::list_run_steps()
4565            .configuration(&self.client.base_configuration)
4566            .thread_id(&thread_id)
4567            .run_id(&run_id)
4568            .maybe_limit(limit)
4569            .maybe_order(order)
4570            .maybe_after(after)
4571            .maybe_before(before)
4572            .maybe_include_left_square_bracket_right_square_bracket(include)
4573            .call()
4574            .await
4575        {
4576            Ok(resp) => resp,
4577            Err(e) => {
4578                let error = self
4579                    .handle_api_error(e, operation, model, &request_json, &state)
4580                    .await;
4581                return Err(error);
4582            }
4583        };
4584
4585        let duration = start_time.elapsed();
4586
4587        // Call after_response hook
4588        self.call_after_response(
4589            &response,
4590            operation,
4591            model,
4592            &request_json,
4593            &state,
4594            duration,
4595            None,
4596            None,
4597        )
4598        .await;
4599
4600        Ok(response)
4601    }
4602
4603    /// Get a run step.
4604    ///
4605    /// # Example
4606    ///
4607    /// ```rust,ignore
4608    /// use openai_ergonomic::Client;
4609    ///
4610    /// # async fn example() -> openai_ergonomic::Result<()> {
4611    /// let client = Client::from_env()?;
4612    /// let step = client.assistants().get_run_step("thread_123", "run_123", "step_123", None).await?;
4613    /// println!("Step type: {}", step.type_);
4614    /// # Ok(())
4615    /// # }
4616    /// ```
4617    pub async fn get_run_step(
4618        &self,
4619        thread_id: impl Into<String>,
4620        run_id: impl Into<String>,
4621        step_id: impl Into<String>,
4622        include: Option<Vec<String>>,
4623    ) -> Result<RunStepObject> {
4624        let thread_id = thread_id.into();
4625        let run_id = run_id.into();
4626        let step_id = step_id.into();
4627
4628        // Prepare interceptor context
4629        let mut state = T::default();
4630        let operation = operation_names::RUN_STEP_RETRIEVE;
4631        let model = "run_steps";
4632        let request_json = format!(
4633            "{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\",\"step_id\":\"{step_id}\",\"include\":{include:?}}}"
4634        );
4635
4636        // Call before_request hook
4637        self.call_before_request(operation, model, &request_json, &mut state)
4638            .await?;
4639
4640        let start_time = Instant::now();
4641
4642        // Make the API call
4643        let response = match assistants_api::get_run_step()
4644            .configuration(&self.client.base_configuration)
4645            .thread_id(&thread_id)
4646            .run_id(&run_id)
4647            .step_id(&step_id)
4648            .maybe_include_left_square_bracket_right_square_bracket(include)
4649            .call()
4650            .await
4651        {
4652            Ok(resp) => resp,
4653            Err(e) => {
4654                let error = self
4655                    .handle_api_error(e, operation, model, &request_json, &state)
4656                    .await;
4657                return Err(error);
4658            }
4659        };
4660
4661        let duration = start_time.elapsed();
4662
4663        // Call after_response hook
4664        self.call_after_response(
4665            &response,
4666            operation,
4667            model,
4668            &request_json,
4669            &state,
4670            duration,
4671            None,
4672            None,
4673        )
4674        .await;
4675
4676        Ok(response)
4677    }
4678}
4679
4680/// Client for audio API.
4681#[derive(Debug, Clone, Copy)]
4682#[allow(dead_code)]
4683pub struct AudioClient<'a, T = ()> {
4684    client: &'a Client<T>,
4685}
4686
4687/// Client for embeddings API.
4688#[derive(Debug, Clone, Copy)]
4689#[allow(dead_code)]
4690pub struct EmbeddingsClient<'a, T = ()> {
4691    client: &'a Client<T>,
4692}
4693
4694/// Client for images API.
4695#[derive(Debug, Clone, Copy)]
4696#[allow(dead_code)]
4697pub struct ImagesClient<'a, T = ()> {
4698    client: &'a Client<T>,
4699}
4700
4701/// Client for files API.
4702#[derive(Debug, Clone, Copy)]
4703#[allow(dead_code)]
4704pub struct FilesClient<'a, T = ()> {
4705    client: &'a Client<T>,
4706}
4707
4708/// Client for fine-tuning API.
4709#[derive(Debug, Clone, Copy)]
4710#[allow(dead_code)]
4711pub struct FineTuningClient<'a, T = ()> {
4712    client: &'a Client<T>,
4713}
4714
4715/// Client for batch API.
4716#[derive(Debug, Clone, Copy)]
4717#[allow(dead_code)]
4718pub struct BatchClient<'a, T = ()> {
4719    client: &'a Client<T>,
4720}
4721
4722/// Client for vector stores API.
4723#[derive(Debug, Clone, Copy)]
4724#[allow(dead_code)]
4725pub struct VectorStoresClient<'a, T = ()> {
4726    client: &'a Client<T>,
4727}
4728
4729/// Client for moderations API.
4730#[derive(Debug, Clone, Copy)]
4731#[allow(dead_code)]
4732pub struct ModerationsClient<'a, T = ()> {
4733    client: &'a Client<T>,
4734}
4735
4736/// Client for threads API.
4737#[derive(Debug, Clone, Copy)]
4738#[allow(dead_code)]
4739pub struct ThreadsClient<'a, T = ()> {
4740    client: &'a Client<T>,
4741}
4742
4743/// Client for uploads API.
4744#[derive(Debug, Clone, Copy)]
4745#[allow(dead_code)]
4746pub struct UploadsClient<'a, T = ()> {
4747    client: &'a Client<T>,
4748}
4749
4750/// Client for models API.
4751#[derive(Debug, Clone, Copy)]
4752pub struct ModelsClient<'a, T = ()> {
4753    client: &'a Client<T>,
4754}
4755
4756/// Client for completions API.
4757#[derive(Debug, Clone, Copy)]
4758pub struct CompletionsClient<'a, T = ()> {
4759    client: &'a Client<T>,
4760}
4761
4762/// Client for usage API.
4763#[derive(Debug, Clone, Copy)]
4764pub struct UsageClient<'a, T = ()> {
4765    client: &'a Client<T>,
4766}
4767
4768// Apply interceptor helper methods to all sub-clients
4769impl_interceptor_helpers!(AssistantsClient<'_, T>);
4770impl_interceptor_helpers!(AudioClient<'_, T>);
4771impl_interceptor_helpers!(EmbeddingsClient<'_, T>);
4772impl_interceptor_helpers!(ImagesClient<'_, T>);
4773impl_interceptor_helpers!(FilesClient<'_, T>);
4774impl_interceptor_helpers!(FineTuningClient<'_, T>);
4775impl_interceptor_helpers!(BatchClient<'_, T>);
4776impl_interceptor_helpers!(VectorStoresClient<'_, T>);
4777impl_interceptor_helpers!(ModerationsClient<'_, T>);
4778impl_interceptor_helpers!(ThreadsClient<'_, T>);
4779impl_interceptor_helpers!(UploadsClient<'_, T>);
4780impl_interceptor_helpers!(ModelsClient<'_, T>);
4781impl_interceptor_helpers!(CompletionsClient<'_, T>);
4782impl_interceptor_helpers!(UsageClient<'_, T>);
4783
4784impl<T: Default + Send + Sync> ModelsClient<'_, T> {
4785    /// List all available models.
4786    ///
4787    /// # Example
4788    ///
4789    /// ```rust,ignore
4790    /// use openai_ergonomic::Client;
4791    ///
4792    /// # async fn example() -> openai_ergonomic::Result<()> {
4793    /// let client = Client::from_env()?;
4794    /// let models = client.models().list().await?;
4795    /// println!("Available models: {}", models.data.len());
4796    /// # Ok(())
4797    /// # }
4798    /// ```
4799    pub async fn list(&self) -> Result<ListModelsResponse> {
4800        // Prepare interceptor context
4801        let mut state = T::default();
4802        let operation = operation_names::MODEL_LIST;
4803        let model = "models";
4804        let request_json = "{}".to_string();
4805
4806        // Call before_request hook
4807        self.call_before_request(operation, model, &request_json, &mut state)
4808            .await?;
4809
4810        let start_time = Instant::now();
4811
4812        // Make the API call
4813        let response = match models_api::list_models()
4814            .configuration(&self.client.base_configuration)
4815            .call()
4816            .await
4817        {
4818            Ok(resp) => resp,
4819            Err(e) => {
4820                let error = self
4821                    .handle_api_error(e, operation, model, &request_json, &state)
4822                    .await;
4823                return Err(error);
4824            }
4825        };
4826
4827        let duration = start_time.elapsed();
4828
4829        // Call after_response hook
4830        self.call_after_response(
4831            &response,
4832            operation,
4833            model,
4834            &request_json,
4835            &state,
4836            duration,
4837            None,
4838            None,
4839        )
4840        .await;
4841
4842        Ok(response)
4843    }
4844
4845    /// Retrieve information about a specific model.
4846    ///
4847    /// # Example
4848    ///
4849    /// ```rust,ignore
4850    /// use openai_ergonomic::Client;
4851    ///
4852    /// # async fn example() -> openai_ergonomic::Result<()> {
4853    /// let client = Client::from_env()?;
4854    /// let model = client.models().get("gpt-4").await?;
4855    /// println!("Model ID: {}", model.id);
4856    /// # Ok(())
4857    /// # }
4858    /// ```
4859    pub async fn get(&self, model_id: impl Into<String>) -> Result<Model> {
4860        let id = model_id.into();
4861
4862        // Prepare interceptor context
4863        let mut state = T::default();
4864        let operation = operation_names::MODEL_RETRIEVE;
4865        let model = "models";
4866        let request_json = format!("{{\"model_id\":\"{id}\"}}");
4867
4868        // Call before_request hook
4869        self.call_before_request(operation, model, &request_json, &mut state)
4870            .await?;
4871
4872        let start_time = Instant::now();
4873
4874        // Make the API call
4875        let response = match models_api::retrieve_model()
4876            .configuration(&self.client.base_configuration)
4877            .model(&id)
4878            .call()
4879            .await
4880        {
4881            Ok(resp) => resp,
4882            Err(e) => {
4883                let error = self
4884                    .handle_api_error(e, operation, model, &request_json, &state)
4885                    .await;
4886                return Err(error);
4887            }
4888        };
4889
4890        let duration = start_time.elapsed();
4891
4892        // Call after_response hook
4893        self.call_after_response(
4894            &response,
4895            operation,
4896            model,
4897            &request_json,
4898            &state,
4899            duration,
4900            None,
4901            None,
4902        )
4903        .await;
4904
4905        Ok(response)
4906    }
4907
4908    /// Retrieve information about a model using a builder.
4909    pub async fn retrieve(&self, builder: ModelRetrievalBuilder) -> Result<Model> {
4910        self.get(builder.model_id()).await
4911    }
4912
4913    /// Delete a fine-tuned model.
4914    ///
4915    /// You must have the Owner role in your organization to delete a model.
4916    ///
4917    /// # Example
4918    ///
4919    /// ```rust,ignore
4920    /// use openai_ergonomic::Client;
4921    ///
4922    /// # async fn example() -> openai_ergonomic::Result<()> {
4923    /// let client = Client::from_env()?;
4924    /// let response = client.models().delete("ft:gpt-3.5-turbo:my-org:custom:id").await?;
4925    /// println!("Deleted: {}", response.deleted);
4926    /// # Ok(())
4927    /// # }
4928    /// ```
4929    pub async fn delete(&self, model_id: impl Into<String>) -> Result<DeleteModelResponse> {
4930        let id = model_id.into();
4931
4932        // Prepare interceptor context
4933        let mut state = T::default();
4934        let operation = operation_names::MODEL_DELETE;
4935        let model = "models";
4936        let request_json = format!("{{\"model_id\":\"{id}\"}}");
4937
4938        // Call before_request hook
4939        self.call_before_request(operation, model, &request_json, &mut state)
4940            .await?;
4941
4942        let start_time = Instant::now();
4943
4944        // Make the API call
4945        let response = match models_api::delete_model()
4946            .configuration(&self.client.base_configuration)
4947            .model(&id)
4948            .call()
4949            .await
4950        {
4951            Ok(resp) => resp,
4952            Err(e) => {
4953                let error = self
4954                    .handle_api_error(e, operation, model, &request_json, &state)
4955                    .await;
4956                return Err(error);
4957            }
4958        };
4959
4960        let duration = start_time.elapsed();
4961
4962        // Call after_response hook
4963        self.call_after_response(
4964            &response,
4965            operation,
4966            model,
4967            &request_json,
4968            &state,
4969            duration,
4970            None,
4971            None,
4972        )
4973        .await;
4974
4975        Ok(response)
4976    }
4977
4978    /// Delete a fine-tuned model using a builder.
4979    pub async fn remove(&self, builder: ModelDeleteBuilder) -> Result<DeleteModelResponse> {
4980        self.delete(builder.model_id()).await
4981    }
4982}
4983
4984impl<T: Default + Send + Sync> CompletionsClient<'_, T> {
4985    /// Create a completions builder for the specified model.
4986    ///
4987    /// # Example
4988    ///
4989    /// ```rust,ignore
4990    /// use openai_ergonomic::Client;
4991    ///
4992    /// # async fn example() -> openai_ergonomic::Result<()> {
4993    /// let client = Client::from_env()?;
4994    /// let builder = client.completions().builder("gpt-3.5-turbo-instruct");
4995    /// # Ok(())
4996    /// # }
4997    /// ```
4998    #[must_use]
4999    pub fn builder(&self, model: impl Into<String>) -> CompletionsBuilder {
5000        CompletionsBuilder::new(model)
5001    }
5002
5003    /// Execute a completion request.
5004    ///
5005    /// # Example
5006    ///
5007    /// ```rust,ignore
5008    /// use openai_ergonomic::Client;
5009    ///
5010    /// # async fn example() -> openai_ergonomic::Result<()> {
5011    /// let client = Client::from_env()?;
5012    /// let builder = client.completions()
5013    ///     .builder("gpt-3.5-turbo-instruct")
5014    ///     .prompt("Once upon a time")
5015    ///     .max_tokens(50);
5016    /// let response = client.completions().create(builder).await?;
5017    /// println!("Completion: {:?}", response.choices);
5018    /// # Ok(())
5019    /// # }
5020    /// ```
5021    pub async fn create(&self, builder: CompletionsBuilder) -> Result<CreateCompletionResponse> {
5022        let request = builder.build()?;
5023
5024        // Prepare interceptor context
5025        let mut state = T::default();
5026        let operation = operation_names::TEXT_COMPLETION;
5027        let model = request.model.clone();
5028        let request_json = serde_json::to_string(&request).unwrap_or_default();
5029
5030        // Call before_request hook
5031        self.call_before_request(operation, &model, &request_json, &mut state)
5032            .await?;
5033
5034        let start_time = Instant::now();
5035
5036        // Make the API call
5037        let response = match completions_api::create_completion()
5038            .configuration(&self.client.base_configuration)
5039            .create_completion_request(request)
5040            .call()
5041            .await
5042        {
5043            Ok(resp) => resp,
5044            Err(e) => {
5045                let error = self
5046                    .handle_api_error(e, operation, &model, &request_json, &state)
5047                    .await;
5048                return Err(error);
5049            }
5050        };
5051
5052        let duration = start_time.elapsed();
5053
5054        // Call after_response hook
5055        self.call_after_response(
5056            &response,
5057            operation,
5058            &model,
5059            &request_json,
5060            &state,
5061            duration,
5062            response.usage.as_ref().map(|u| i64::from(u.prompt_tokens)),
5063            response
5064                .usage
5065                .as_ref()
5066                .map(|u| i64::from(u.completion_tokens)),
5067        )
5068        .await;
5069
5070        Ok(response)
5071    }
5072}
5073
5074impl<T: Default + Send + Sync> UsageClient<'_, T> {
5075    /// Get usage data for audio speeches.
5076    ///
5077    /// # Example
5078    ///
5079    /// ```rust,ignore
5080    /// use openai_ergonomic::Client;
5081    /// use openai_ergonomic::builders::usage::UsageBuilder;
5082    ///
5083    /// # async fn example() -> openai_ergonomic::Result<()> {
5084    /// let client = Client::from_env()?;
5085    /// let builder = UsageBuilder::new(1704067200, None);
5086    /// let usage = client.usage().audio_speeches(builder).await?;
5087    /// println!("Usage: {:?}", usage);
5088    /// # Ok(())
5089    /// # }
5090    /// ```
5091    pub async fn audio_speeches(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5092        // Prepare interceptor context
5093        let mut state = T::default();
5094        let operation = operation_names::USAGE_AUDIO_SPEECHES;
5095        let model = "usage";
5096        let start_time = builder.start_time();
5097        let request_json = format!("{{\"start_time\":{start_time}}}");
5098
5099        // Call before_request hook
5100        self.call_before_request(operation, model, &request_json, &mut state)
5101            .await?;
5102
5103        let start_time = Instant::now();
5104
5105        // Make the API call
5106        let response = match usage_api::usage_audio_speeches()
5107            .configuration(&self.client.base_configuration)
5108            .start_time(builder.start_time())
5109            .maybe_end_time(builder.end_time())
5110            .maybe_bucket_width(builder.bucket_width_str())
5111            .maybe_project_ids(builder.project_ids_option())
5112            .maybe_user_ids(builder.user_ids_option())
5113            .maybe_api_key_ids(builder.api_key_ids_option())
5114            .maybe_models(builder.models_option())
5115            .maybe_group_by(builder.group_by_option())
5116            .maybe_limit(builder.limit_ref())
5117            .maybe_page(builder.page_ref())
5118            .call()
5119            .await
5120        {
5121            Ok(resp) => resp,
5122            Err(e) => {
5123                let error = self
5124                    .handle_api_error(e, operation, model, &request_json, &state)
5125                    .await;
5126                return Err(error);
5127            }
5128        };
5129
5130        let duration = start_time.elapsed();
5131
5132        // Call after_response hook
5133        self.call_after_response(
5134            &response,
5135            operation,
5136            model,
5137            &request_json,
5138            &state,
5139            duration,
5140            None,
5141            None,
5142        )
5143        .await;
5144
5145        Ok(response)
5146    }
5147
5148    /// Get usage data for audio transcriptions.
5149    pub async fn audio_transcriptions(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5150        // Prepare interceptor context
5151        let mut state = T::default();
5152        let operation = operation_names::USAGE_AUDIO_TRANSCRIPTIONS;
5153        let model = "usage";
5154        let start_time = builder.start_time();
5155        let request_json = format!("{{\"start_time\":{start_time}}}");
5156
5157        // Call before_request hook
5158        self.call_before_request(operation, model, &request_json, &mut state)
5159            .await?;
5160
5161        let start_time = Instant::now();
5162
5163        // Make the API call
5164        let response = match usage_api::usage_audio_transcriptions()
5165            .configuration(&self.client.base_configuration)
5166            .start_time(builder.start_time())
5167            .maybe_end_time(builder.end_time())
5168            .maybe_bucket_width(builder.bucket_width_str())
5169            .maybe_project_ids(builder.project_ids_option())
5170            .maybe_user_ids(builder.user_ids_option())
5171            .maybe_api_key_ids(builder.api_key_ids_option())
5172            .maybe_models(builder.models_option())
5173            .maybe_group_by(builder.group_by_option())
5174            .maybe_limit(builder.limit_ref())
5175            .maybe_page(builder.page_ref())
5176            .call()
5177            .await
5178        {
5179            Ok(resp) => resp,
5180            Err(e) => {
5181                let error = self
5182                    .handle_api_error(e, operation, model, &request_json, &state)
5183                    .await;
5184                return Err(error);
5185            }
5186        };
5187
5188        let duration = start_time.elapsed();
5189
5190        // Call after_response hook
5191        self.call_after_response(
5192            &response,
5193            operation,
5194            model,
5195            &request_json,
5196            &state,
5197            duration,
5198            None,
5199            None,
5200        )
5201        .await;
5202
5203        Ok(response)
5204    }
5205
5206    /// Get usage data for code interpreter sessions.
5207    pub async fn code_interpreter_sessions(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5208        // Prepare interceptor context
5209        let mut state = T::default();
5210        let operation = operation_names::USAGE_CODE_INTERPRETER;
5211        let model = "usage";
5212        let start_time = builder.start_time();
5213        let request_json = format!("{{\"start_time\":{start_time}}}");
5214
5215        // Call before_request hook
5216        self.call_before_request(operation, model, &request_json, &mut state)
5217            .await?;
5218
5219        let start_time = Instant::now();
5220
5221        // Make the API call
5222        let response = match usage_api::usage_code_interpreter_sessions()
5223            .configuration(&self.client.base_configuration)
5224            .start_time(builder.start_time())
5225            .maybe_end_time(builder.end_time())
5226            .maybe_bucket_width(builder.bucket_width_str())
5227            .maybe_project_ids(builder.project_ids_option())
5228            .maybe_group_by(builder.group_by_option())
5229            .maybe_limit(builder.limit_ref())
5230            .maybe_page(builder.page_ref())
5231            .call()
5232            .await
5233        {
5234            Ok(resp) => resp,
5235            Err(e) => {
5236                let error = self
5237                    .handle_api_error(e, operation, model, &request_json, &state)
5238                    .await;
5239                return Err(error);
5240            }
5241        };
5242
5243        let duration = start_time.elapsed();
5244
5245        // Call after_response hook
5246        self.call_after_response(
5247            &response,
5248            operation,
5249            model,
5250            &request_json,
5251            &state,
5252            duration,
5253            None,
5254            None,
5255        )
5256        .await;
5257
5258        Ok(response)
5259    }
5260
5261    /// Get usage data for completions.
5262    pub async fn completions(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5263        // Prepare interceptor context
5264        let mut state = T::default();
5265        let operation = operation_names::USAGE_COMPLETIONS;
5266        let model = "usage";
5267        let start_time = builder.start_time();
5268        let request_json = format!("{{\"start_time\":{start_time}}}");
5269
5270        // Call before_request hook
5271        self.call_before_request(operation, model, &request_json, &mut state)
5272            .await?;
5273
5274        let start_time = Instant::now();
5275
5276        // Make the API call
5277        let response = match usage_api::usage_completions()
5278            .configuration(&self.client.base_configuration)
5279            .start_time(builder.start_time())
5280            .maybe_end_time(builder.end_time())
5281            .maybe_bucket_width(builder.bucket_width_str())
5282            .maybe_project_ids(builder.project_ids_option())
5283            .maybe_user_ids(builder.user_ids_option())
5284            .maybe_api_key_ids(builder.api_key_ids_option())
5285            .maybe_models(builder.models_option())
5286            .maybe_group_by(builder.group_by_option())
5287            .maybe_limit(builder.limit_ref())
5288            .maybe_page(builder.page_ref())
5289            .call()
5290            .await
5291        {
5292            Ok(resp) => resp,
5293            Err(e) => {
5294                let error = self
5295                    .handle_api_error(e, operation, model, &request_json, &state)
5296                    .await;
5297                return Err(error);
5298            }
5299        };
5300
5301        let duration = start_time.elapsed();
5302
5303        // Call after_response hook
5304        self.call_after_response(
5305            &response,
5306            operation,
5307            model,
5308            &request_json,
5309            &state,
5310            duration,
5311            None,
5312            None,
5313        )
5314        .await;
5315
5316        Ok(response)
5317    }
5318
5319    /// Get usage data for embeddings.
5320    pub async fn embeddings(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5321        // Prepare interceptor context
5322        let mut state = T::default();
5323        let operation = operation_names::USAGE_EMBEDDINGS;
5324        let model = "usage";
5325        let start_time = builder.start_time();
5326        let request_json = format!("{{\"start_time\":{start_time}}}");
5327
5328        // Call before_request hook
5329        self.call_before_request(operation, model, &request_json, &mut state)
5330            .await?;
5331
5332        let start_time = Instant::now();
5333
5334        // Make the API call
5335        let response = match usage_api::usage_embeddings()
5336            .configuration(&self.client.base_configuration)
5337            .start_time(builder.start_time())
5338            .maybe_end_time(builder.end_time())
5339            .maybe_bucket_width(builder.bucket_width_str())
5340            .maybe_project_ids(builder.project_ids_option())
5341            .maybe_user_ids(builder.user_ids_option())
5342            .maybe_api_key_ids(builder.api_key_ids_option())
5343            .maybe_models(builder.models_option())
5344            .maybe_group_by(builder.group_by_option())
5345            .maybe_limit(builder.limit_ref())
5346            .maybe_page(builder.page_ref())
5347            .call()
5348            .await
5349        {
5350            Ok(resp) => resp,
5351            Err(e) => {
5352                let error = self
5353                    .handle_api_error(e, operation, model, &request_json, &state)
5354                    .await;
5355                return Err(error);
5356            }
5357        };
5358
5359        let duration = start_time.elapsed();
5360
5361        // Call after_response hook
5362        self.call_after_response(
5363            &response,
5364            operation,
5365            model,
5366            &request_json,
5367            &state,
5368            duration,
5369            None,
5370            None,
5371        )
5372        .await;
5373
5374        Ok(response)
5375    }
5376
5377    /// Get usage data for images.
5378    pub async fn images(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5379        // Prepare interceptor context
5380        let mut state = T::default();
5381        let operation = operation_names::USAGE_IMAGES;
5382        let model = "usage";
5383        let start_time = builder.start_time();
5384        let request_json = format!("{{\"start_time\":{start_time}}}");
5385
5386        // Call before_request hook
5387        self.call_before_request(operation, model, &request_json, &mut state)
5388            .await?;
5389
5390        let start_time = Instant::now();
5391
5392        // Make the API call
5393        let response = match usage_api::usage_images()
5394            .configuration(&self.client.base_configuration)
5395            .start_time(builder.start_time())
5396            .maybe_end_time(builder.end_time())
5397            .maybe_bucket_width(builder.bucket_width_str())
5398            .maybe_project_ids(builder.project_ids_option())
5399            .maybe_user_ids(builder.user_ids_option())
5400            .maybe_api_key_ids(builder.api_key_ids_option())
5401            .maybe_models(builder.models_option())
5402            .maybe_group_by(builder.group_by_option())
5403            .maybe_limit(builder.limit_ref())
5404            .maybe_page(builder.page_ref())
5405            .call()
5406            .await
5407        {
5408            Ok(resp) => resp,
5409            Err(e) => {
5410                let error = self
5411                    .handle_api_error(e, operation, model, &request_json, &state)
5412                    .await;
5413                return Err(error);
5414            }
5415        };
5416
5417        let duration = start_time.elapsed();
5418
5419        // Call after_response hook
5420        self.call_after_response(
5421            &response,
5422            operation,
5423            model,
5424            &request_json,
5425            &state,
5426            duration,
5427            None,
5428            None,
5429        )
5430        .await;
5431
5432        Ok(response)
5433    }
5434
5435    /// Get usage data for moderations.
5436    pub async fn moderations(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5437        // Prepare interceptor context
5438        let mut state = T::default();
5439        let operation = operation_names::USAGE_MODERATIONS;
5440        let model = "usage";
5441        let start_time = builder.start_time();
5442        let request_json = format!("{{\"start_time\":{start_time}}}");
5443
5444        // Call before_request hook
5445        self.call_before_request(operation, model, &request_json, &mut state)
5446            .await?;
5447
5448        let start_time = Instant::now();
5449
5450        // Make the API call
5451        let response = match usage_api::usage_moderations()
5452            .configuration(&self.client.base_configuration)
5453            .start_time(builder.start_time())
5454            .maybe_end_time(builder.end_time())
5455            .maybe_bucket_width(builder.bucket_width_str())
5456            .maybe_project_ids(builder.project_ids_option())
5457            .maybe_user_ids(builder.user_ids_option())
5458            .maybe_api_key_ids(builder.api_key_ids_option())
5459            .maybe_models(builder.models_option())
5460            .maybe_group_by(builder.group_by_option())
5461            .maybe_limit(builder.limit_ref())
5462            .maybe_page(builder.page_ref())
5463            .call()
5464            .await
5465        {
5466            Ok(resp) => resp,
5467            Err(e) => {
5468                let error = self
5469                    .handle_api_error(e, operation, model, &request_json, &state)
5470                    .await;
5471                return Err(error);
5472            }
5473        };
5474
5475        let duration = start_time.elapsed();
5476
5477        // Call after_response hook
5478        self.call_after_response(
5479            &response,
5480            operation,
5481            model,
5482            &request_json,
5483            &state,
5484            duration,
5485            None,
5486            None,
5487        )
5488        .await;
5489
5490        Ok(response)
5491    }
5492
5493    /// Get usage data for vector stores.
5494    pub async fn vector_stores(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5495        // Prepare interceptor context
5496        let mut state = T::default();
5497        let operation = operation_names::USAGE_VECTOR_STORES;
5498        let model = "usage";
5499        let start_time = builder.start_time();
5500        let request_json = format!("{{\"start_time\":{start_time}}}");
5501
5502        // Call before_request hook
5503        self.call_before_request(operation, model, &request_json, &mut state)
5504            .await?;
5505
5506        let start_time = Instant::now();
5507
5508        // Make the API call
5509        let response = match usage_api::usage_vector_stores()
5510            .configuration(&self.client.base_configuration)
5511            .start_time(builder.start_time())
5512            .maybe_end_time(builder.end_time())
5513            .maybe_bucket_width(builder.bucket_width_str())
5514            .maybe_project_ids(builder.project_ids_option())
5515            .maybe_group_by(builder.group_by_option())
5516            .maybe_limit(builder.limit_ref())
5517            .maybe_page(builder.page_ref())
5518            .call()
5519            .await
5520        {
5521            Ok(resp) => resp,
5522            Err(e) => {
5523                let error = self
5524                    .handle_api_error(e, operation, model, &request_json, &state)
5525                    .await;
5526                return Err(error);
5527            }
5528        };
5529
5530        let duration = start_time.elapsed();
5531
5532        // Call after_response hook
5533        self.call_after_response(
5534            &response,
5535            operation,
5536            model,
5537            &request_json,
5538            &state,
5539            duration,
5540            None,
5541            None,
5542        )
5543        .await;
5544
5545        Ok(response)
5546    }
5547
5548    /// Get cost data.
5549    pub async fn costs(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5550        // Prepare interceptor context
5551        let mut state = T::default();
5552        let operation = operation_names::USAGE_COSTS;
5553        let model = "usage";
5554        let start_time = builder.start_time();
5555        let request_json = format!("{{\"start_time\":{start_time}}}");
5556
5557        // Call before_request hook
5558        self.call_before_request(operation, model, &request_json, &mut state)
5559            .await?;
5560
5561        let start_time = Instant::now();
5562
5563        // Make the API call
5564        let response = match usage_api::usage_costs()
5565            .configuration(&self.client.base_configuration)
5566            .start_time(builder.start_time())
5567            .maybe_end_time(builder.end_time())
5568            .maybe_bucket_width(builder.bucket_width_str())
5569            .maybe_project_ids(builder.project_ids_option())
5570            .maybe_group_by(builder.group_by_option())
5571            .maybe_limit(builder.limit_ref())
5572            .maybe_page(builder.page_ref())
5573            .call()
5574            .await
5575        {
5576            Ok(resp) => resp,
5577            Err(e) => {
5578                let error = self
5579                    .handle_api_error(e, operation, model, &request_json, &state)
5580                    .await;
5581                return Err(error);
5582            }
5583        };
5584
5585        let duration = start_time.elapsed();
5586
5587        // Call after_response hook
5588        self.call_after_response(
5589            &response,
5590            operation,
5591            model,
5592            &request_json,
5593            &state,
5594            duration,
5595            None,
5596            None,
5597        )
5598        .await;
5599
5600        Ok(response)
5601    }
5602}