openai_ergonomic/
client.rs

1//! Client wrapper for ergonomic `OpenAI` API access.
2//!
3//! This module provides a high-level client that wraps the base `OpenAI` client
4//! with ergonomic builders and response handling.
5
6// Allow this lint at module level for interceptor helper methods
7// that require many parameters for comprehensive context passing
8#![allow(clippy::too_many_arguments)]
9
10use crate::interceptor::{
11    AfterResponseContext, BeforeRequestContext, ErrorContext, InterceptorChain,
12};
13use crate::semantic_conventions::operation_names;
14use crate::{
15    builders::{
16        assistants::{AssistantBuilder, MessageBuilder, RunBuilder},
17        audio::{
18            SpeechBuilder, TranscriptionBuilder, TranscriptionRequest, TranslationBuilder,
19            TranslationRequest,
20        },
21        completions::CompletionsBuilder,
22        embeddings::EmbeddingsBuilder,
23        files::{FileDeleteBuilder, FileListBuilder, FileRetrievalBuilder, FileUploadBuilder},
24        images::{
25            ImageEditBuilder, ImageEditRequest, ImageGenerationBuilder, ImageVariationBuilder,
26            ImageVariationRequest,
27        },
28        models::{ModelDeleteBuilder, ModelRetrievalBuilder},
29        moderations::ModerationBuilder,
30        threads::ThreadRequestBuilder,
31        uploads::UploadBuilder,
32        usage::UsageBuilder,
33        Builder, ChatCompletionBuilder, ResponsesBuilder,
34    },
35    config::Config,
36    errors::Result,
37    responses::ChatCompletionResponseWrapper,
38    Error, UploadPurpose,
39};
40use openai_client_base::apis::Error as ApiError;
41use openai_client_base::{
42    apis::{
43        assistants_api, audio_api, batch_api, chat_api, completions_api,
44        configuration::Configuration, embeddings_api, files_api, fine_tuning_api, images_api,
45        models_api, moderations_api, uploads_api, usage_api, vector_stores_api,
46    },
47    models::{
48        AssistantObject, Batch, CreateBatchRequest, CreateChatCompletionRequest,
49        CreateCompletionResponse, CreateEmbeddingResponse, CreateFineTuningJobRequest,
50        CreateModerationResponse, CreateTranscription200Response, CreateTranslation200Response,
51        DeleteAssistantResponse, DeleteFileResponse, DeleteModelResponse,
52        DeleteVectorStoreFileResponse, DeleteVectorStoreResponse, FineTuningJob, ImagesResponse,
53        ListAssistantsResponse, ListBatchesResponse, ListFilesResponse,
54        ListFineTuningJobCheckpointsResponse, ListFineTuningJobEventsResponse,
55        ListMessagesResponse, ListModelsResponse, ListPaginatedFineTuningJobsResponse,
56        ListRunStepsResponse, ListRunsResponse, ListVectorStoreFilesResponse,
57        ListVectorStoresResponse, MessageObject, Model, OpenAiFile, RunObject, RunStepObject,
58        SubmitToolOutputsRunRequestToolOutputsInner, ThreadObject, Upload, UsageResponse,
59        VectorStoreFileObject, VectorStoreObject, VectorStoreSearchResultsPage,
60    },
61};
62use reqwest_middleware::ClientWithMiddleware as HttpClient;
63use std::sync::Arc;
64use std::time::Instant;
65use tokio::time::Duration;
66
67// Helper macro to generate interceptor helper methods for sub-clients
68macro_rules! impl_interceptor_helpers {
69    ($client_type:ty) => {
70        impl<T: Default + Send + Sync> $client_type {
71            /// Helper to call `before_request` hooks
72            async fn call_before_request(
73                &self,
74                operation: &str,
75                model: &str,
76                request_json: &str,
77                state: &mut T,
78            ) -> Result<()> {
79                if !self.client.interceptors.is_empty() {
80                    let mut ctx = BeforeRequestContext {
81                        operation,
82                        model,
83                        request_json,
84                        state,
85                    };
86                    if let Err(e) = self.client.interceptors.before_request(&mut ctx).await {
87                        let error_ctx = ErrorContext {
88                            operation,
89                            model: Some(model),
90                            request_json: Some(request_json),
91                            error: &e,
92                            state: Some(state),
93                        };
94                        self.client.interceptors.on_error(&error_ctx).await;
95                        return Err(e);
96                    }
97                }
98                Ok(())
99            }
100
101            /// Helper to handle API errors with interceptor hooks
102            async fn handle_api_error<E>(
103                &self,
104                error: openai_client_base::apis::Error<E>,
105                operation: &str,
106                model: &str,
107                request_json: &str,
108                state: &T,
109            ) -> Error {
110                let error = map_api_error(error);
111
112                if !self.client.interceptors.is_empty() {
113                    let error_ctx = ErrorContext {
114                        operation,
115                        model: Some(model),
116                        request_json: Some(request_json),
117                        error: &error,
118                        state: Some(state),
119                    };
120                    self.client.interceptors.on_error(&error_ctx).await;
121                }
122
123                error
124            }
125
126            /// Helper to call `after_response` hooks
127            async fn call_after_response<R>(
128                &self,
129                response: &R,
130                operation: &str,
131                model: &str,
132                request_json: &str,
133                state: &T,
134                duration: std::time::Duration,
135                input_tokens: Option<i64>,
136                output_tokens: Option<i64>,
137            ) where
138                R: serde::Serialize + Sync,
139            {
140                if !self.client.interceptors.is_empty() {
141                    let response_json = serde_json::to_string(response).unwrap_or_default();
142                    let ctx = AfterResponseContext {
143                        operation,
144                        model,
145                        request_json,
146                        response_json: &response_json,
147                        duration,
148                        input_tokens,
149                        output_tokens,
150                        state,
151                    };
152                    if let Err(e) = self.client.interceptors.after_response(&ctx).await {
153                        tracing::warn!("Interceptor after_response failed: {}", e);
154                    }
155                }
156            }
157        }
158    };
159}
160
161/// Builder for creating a `Client` with interceptors.
162///
163/// The builder pattern allows you to configure interceptors before the client
164/// is created. Once built, the interceptors are immutable, eliminating the need
165/// for runtime locking.
166///
167/// # Example
168///
169/// ```rust,ignore
170/// let client = Client::from_env()?
171///     .with_interceptor(Box::new(my_interceptor))
172///     .build();
173/// ```
174pub struct ClientBuilder<T = ()> {
175    config: Arc<Config>,
176    http: HttpClient,
177    base_configuration: Configuration,
178    interceptors: InterceptorChain<T>,
179}
180
181/// Main client for interacting with the `OpenAI` API.
182///
183/// The client provides ergonomic methods for all `OpenAI` API endpoints,
184/// with built-in retry logic, rate limiting, error handling, and support
185/// for middleware through interceptors.
186///
187/// Use `Client::from_env()` or `Client::new()` to create a builder, then call
188/// `.build()` to create the client.
189///
190/// # Example
191///
192/// ```rust,ignore
193/// # use openai_ergonomic::{Client, Config};
194/// # #[tokio::main]
195/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
196/// let client = Client::from_env()?.build();
197/// // TODO: Add usage example once builders are implemented
198/// # Ok(())
199/// # }
200/// ```
201#[derive(Clone)]
202pub struct Client<T = ()> {
203    config: Arc<Config>,
204    http: HttpClient,
205    base_configuration: Configuration,
206    interceptors: Arc<InterceptorChain<T>>,
207}
208
209// Custom Debug implementation since InterceptorChain doesn't implement Debug
210impl<T> std::fmt::Debug for Client<T> {
211    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
212        f.debug_struct("Client")
213            .field("config", &self.config)
214            .field("http", &"<HttpClient>")
215            .field("base_configuration", &"<Configuration>")
216            .field("interceptors", &"<InterceptorChain>")
217            .finish()
218    }
219}
220
221// Implementation for ClientBuilder with default state type ()
222impl ClientBuilder {
223    /// Create a new client builder with the given configuration.
224    pub fn new(config: Config) -> Result<Self> {
225        // Check if we're using Azure OpenAI
226        let is_azure = config.is_azure();
227
228        // Use custom HTTP client if provided, otherwise build a default one
229        let http_client = if let Some(client) = config.http_client() {
230            client.clone()
231        } else {
232            let reqwest_client = reqwest::Client::builder()
233                .timeout(Duration::from_secs(120)) // Default timeout: 120 seconds
234                .user_agent(format!("openai-ergonomic/{}", env!("CARGO_PKG_VERSION")))
235                .build()
236                .map_err(Error::Http)?;
237
238            let mut client_builder = reqwest_middleware::ClientBuilder::new(reqwest_client);
239
240            // Add Azure authentication middleware if using Azure OpenAI
241            if is_azure {
242                let azure_middleware = crate::azure_middleware::AzureAuthMiddleware::new(
243                    config.api_key().to_string(),
244                    config.azure_api_version().map(String::from),
245                    config.azure_deployment().map(String::from),
246                );
247                client_builder = client_builder.with(azure_middleware);
248            }
249
250            client_builder.build()
251        };
252
253        // Create openai-client-base configuration
254        let mut base_configuration = Configuration::new();
255
256        // Set the custom HTTP client (with Azure middleware if configured)
257        base_configuration.client = http_client.clone();
258
259        // For Azure OpenAI, we don't use bearer token (handled by middleware)
260        // For standard OpenAI, use bearer token
261        if !is_azure {
262            base_configuration.bearer_access_token = Some(config.api_key().to_string());
263        }
264
265        if let Some(base_url) = config.base_url() {
266            base_configuration.base_path = base_url.to_string();
267        }
268
269        if let Some(org_id) = config.organization_id() {
270            base_configuration.user_agent = Some(format!(
271                "openai-ergonomic/{} org/{}",
272                env!("CARGO_PKG_VERSION"),
273                org_id
274            ));
275        }
276
277        Ok(Self {
278            config: Arc::new(config),
279            http: http_client,
280            base_configuration,
281            interceptors: InterceptorChain::new(),
282        })
283    }
284
285    /// Create a new client builder with default configuration from environment variables.
286    pub fn from_env() -> Result<Self> {
287        Self::new(Config::from_env()?)
288    }
289}
290
291// Implementation for ClientBuilder with any state type
292impl<T> ClientBuilder<T> {
293    /// Add an interceptor to the builder.
294    ///
295    /// Creates a new builder with the interceptor's state type. The interceptor provides
296    /// hooks into the request/response lifecycle for observability, logging, and custom
297    /// processing.
298    ///
299    /// Note: This method transforms the builder's type, so it can only be called once.
300    /// For multiple interceptors with the same state type, use a composite interceptor
301    /// or call this method multiple times (each will replace the previous chain).
302    ///
303    /// # Examples
304    ///
305    /// Simple interceptor (no state):
306    /// ```rust,ignore
307    /// use openai_ergonomic::{Client, Interceptor, BeforeRequestContext};
308    ///
309    /// struct LoggingInterceptor;
310    ///
311    /// #[async_trait::async_trait]
312    /// impl Interceptor for LoggingInterceptor {
313    ///     async fn before_request(&self, ctx: &mut BeforeRequestContext<'_>) -> Result<()> {
314    ///         println!("Calling {}", ctx.operation);
315    ///         Ok(())
316    ///     }
317    /// }
318    ///
319    /// let client = Client::from_env()?
320    ///     .with_interceptor(Box::new(LoggingInterceptor))
321    ///     .build();
322    /// ```
323    ///
324    /// Interceptor with custom state:
325    /// ```rust,ignore
326    /// use openai_ergonomic::{Client, LangfuseInterceptor, LangfuseState};
327    ///
328    /// let interceptor = LangfuseInterceptor::new(tracer, config);
329    /// let client: Client<LangfuseState<_>> = Client::from_env()?
330    ///     .with_interceptor(Box::new(interceptor))
331    ///     .build();
332    /// ```
333    #[must_use]
334    pub fn with_interceptor<U>(
335        self,
336        interceptor: Box<dyn crate::interceptor::Interceptor<U>>,
337    ) -> ClientBuilder<U> {
338        let mut new_chain = InterceptorChain::new();
339        new_chain.add(interceptor);
340
341        ClientBuilder {
342            config: self.config,
343            http: self.http,
344            base_configuration: self.base_configuration,
345            interceptors: new_chain,
346        }
347    }
348
349    /// Add an interceptor that uses the same state type.
350    ///
351    /// This allows chaining multiple interceptors with the same state type without
352    /// type transformation.
353    ///
354    /// # Example
355    ///
356    /// ```rust,ignore
357    /// let client = Client::from_env()?
358    ///     .add_interceptor(Box::new(logger))
359    ///     .add_interceptor(Box::new(metrics))
360    ///     .build();
361    /// ```
362    #[must_use]
363    pub fn add_interceptor(
364        mut self,
365        interceptor: Box<dyn crate::interceptor::Interceptor<T>>,
366    ) -> Self {
367        self.interceptors.add(interceptor);
368        self
369    }
370
371    /// Build the client with the configured interceptors.
372    ///
373    /// After building, the interceptors are immutable, eliminating runtime locking overhead.
374    #[must_use]
375    pub fn build(self) -> Client<T> {
376        Client {
377            config: self.config,
378            http: self.http,
379            base_configuration: self.base_configuration,
380            interceptors: Arc::new(self.interceptors),
381        }
382    }
383}
384
385// Implementation for Client
386impl Client {
387    /// Create a new client builder with the given configuration.
388    pub fn builder(config: Config) -> Result<ClientBuilder> {
389        ClientBuilder::new(config)
390    }
391
392    /// Create a new client builder with default configuration from environment variables.
393    pub fn from_env() -> Result<ClientBuilder> {
394        ClientBuilder::from_env()
395    }
396}
397
398impl<T> Client<T> {
399    /// Get a reference to the client configuration.
400    pub fn config(&self) -> &Config {
401        &self.config
402    }
403
404    /// Get a reference to the HTTP client.
405    pub fn http_client(&self) -> &HttpClient {
406        &self.http
407    }
408}
409
410// Interceptor helper methods
411impl<T: Default + Send + Sync> Client<T> {
412    /// Helper to call `before_request` hooks
413    async fn call_before_request(
414        &self,
415        operation: &str,
416        model: &str,
417        request_json: &str,
418        state: &mut T,
419    ) -> Result<()> {
420        if !self.interceptors.is_empty() {
421            let mut ctx = BeforeRequestContext {
422                operation,
423                model,
424                request_json,
425                state,
426            };
427            if let Err(e) = self.interceptors.before_request(&mut ctx).await {
428                let error_ctx = ErrorContext {
429                    operation,
430                    model: Some(model),
431                    request_json: Some(request_json),
432                    error: &e,
433                    state: Some(state),
434                };
435                self.interceptors.on_error(&error_ctx).await;
436                return Err(e);
437            }
438        }
439        Ok(())
440    }
441
442    /// Helper to handle API errors with interceptor hooks
443    async fn handle_api_error<E>(
444        &self,
445        error: openai_client_base::apis::Error<E>,
446        operation: &str,
447        model: &str,
448        request_json: &str,
449        state: &T,
450    ) -> Error {
451        let error = map_api_error(error);
452
453        if !self.interceptors.is_empty() {
454            let error_ctx = ErrorContext {
455                operation,
456                model: Some(model),
457                request_json: Some(request_json),
458                error: &error,
459                state: Some(state),
460            };
461            self.interceptors.on_error(&error_ctx).await;
462        }
463
464        error
465    }
466
467    /// Helper to call `after_response` hooks
468    async fn call_after_response<R>(
469        &self,
470        response: &R,
471        operation: &str,
472        model: &str,
473        request_json: &str,
474        state: &T,
475        duration: std::time::Duration,
476        input_tokens: Option<i64>,
477        output_tokens: Option<i64>,
478    ) where
479        R: serde::Serialize + Sync,
480    {
481        if !self.interceptors.is_empty() {
482            let response_json = serde_json::to_string(response).unwrap_or_default();
483            let ctx = AfterResponseContext {
484                operation,
485                model,
486                request_json,
487                response_json: &response_json,
488                duration,
489                input_tokens,
490                output_tokens,
491                state,
492            };
493            if let Err(e) = self.interceptors.after_response(&ctx).await {
494                tracing::warn!("Interceptor after_response failed: {}", e);
495            }
496        }
497    }
498}
499
500// Chat API methods
501impl<T: Default + Send + Sync + 'static> Client<T> {
502    /// Create a chat completion builder.
503    pub fn chat(&self) -> ChatCompletionBuilder {
504        let model = self.config.default_model().unwrap_or("gpt-4");
505        ChatCompletionBuilder::new(model)
506    }
507
508    /// Create a chat completion with a simple user message.
509    pub fn chat_simple(&self, message: impl Into<String>) -> ChatCompletionBuilder {
510        self.chat().user(message)
511    }
512
513    /// Create a chat completion with system and user messages.
514    pub fn chat_with_system(
515        &self,
516        system: impl Into<String>,
517        user: impl Into<String>,
518    ) -> ChatCompletionBuilder {
519        self.chat().system(system).user(user)
520    }
521
522    /// Execute a chat completion request.
523    pub async fn execute_chat(
524        &self,
525        request: CreateChatCompletionRequest,
526    ) -> Result<ChatCompletionResponseWrapper> {
527        let mut state = T::default();
528        let operation = operation_names::CHAT;
529        let model = request.model.clone();
530        let request_json = serde_json::to_string(&request).unwrap_or_default();
531
532        // Call before_request hook
533        self.call_before_request(operation, &model, &request_json, &mut state)
534            .await?;
535
536        let start_time = Instant::now();
537
538        // Make the API call
539        let response = match chat_api::create_chat_completion()
540            .configuration(&self.base_configuration)
541            .create_chat_completion_request(request)
542            .call()
543            .await
544        {
545            Ok(resp) => resp,
546            Err(e) => {
547                let error = self
548                    .handle_api_error(e, operation, &model, &request_json, &state)
549                    .await;
550                return Err(error);
551            }
552        };
553
554        let duration = start_time.elapsed();
555
556        // Call after_response hook
557        self.call_after_response(
558            &response,
559            operation,
560            &model,
561            &request_json,
562            &state,
563            duration,
564            response.usage.as_ref().map(|u| i64::from(u.prompt_tokens)),
565            response
566                .usage
567                .as_ref()
568                .map(|u| i64::from(u.completion_tokens)),
569        )
570        .await;
571
572        Ok(ChatCompletionResponseWrapper::new(response))
573    }
574
575    /// Execute a chat completion builder.
576    pub async fn send_chat(
577        &self,
578        builder: ChatCompletionBuilder,
579    ) -> Result<ChatCompletionResponseWrapper> {
580        let request = builder.build()?;
581        self.execute_chat(request).await
582    }
583
584    /// Send a chat completion request with streaming enabled.
585    ///
586    /// Returns a stream of chat completion chunks as they are generated.
587    /// This allows for real-time display of the model's response.
588    ///
589    /// # Example
590    ///
591    /// ```rust,no_run
592    /// use openai_ergonomic::Client;
593    /// use futures::StreamExt;
594    ///
595    /// #[tokio::main]
596    /// async fn main() -> Result<(), Box<dyn std::error::Error>> {
597    ///     let client = Client::from_env()?.build();
598    ///
599    ///     let builder = client.chat().user("Tell me a story");
600    ///     let mut stream = client.send_chat_stream(builder).await?;
601    ///
602    ///     while let Some(chunk) = stream.next().await {
603    ///         let chunk = chunk?;
604    ///         if let Some(content) = chunk.content() {
605    ///             print!("{}", content);
606    ///         }
607    ///     }
608    ///
609    ///     Ok(())
610    /// }
611    /// ```
612    pub async fn send_chat_stream(
613        &self,
614        mut builder: ChatCompletionBuilder,
615    ) -> Result<crate::streaming::BoxedChatStream> {
616        // Force streaming mode
617        builder = builder.stream(true);
618        let mut request = builder.build()?;
619        request.stream = Some(true);
620
621        self.execute_chat_stream(request, crate::semantic_conventions::operation_names::CHAT)
622            .await
623    }
624
625    /// Execute a chat completion request with streaming.
626    ///
627    /// This is the low-level method that performs the actual streaming request.
628    async fn execute_chat_stream(
629        &self,
630        request: CreateChatCompletionRequest,
631        operation: &str,
632    ) -> Result<crate::streaming::BoxedChatStream> {
633        let uri_str = format!("{}/chat/completions", self.config.api_base());
634
635        let mut req_builder = self
636            .http_client()
637            .request(reqwest::Method::POST, &uri_str)
638            .bearer_auth(self.config.api_key())
639            .json(&request);
640
641        // Add organization ID if present
642        if let Some(org_id) = self.config.organization_id() {
643            req_builder = req_builder.header("OpenAI-Organization", org_id);
644        }
645
646        // Add project ID if present
647        if let Some(project_id) = self.config.project() {
648            req_builder = req_builder.header("OpenAI-Project", project_id);
649        }
650
651        let req = req_builder.build()?;
652
653        // Serialize request for interceptors
654        let request_json = serde_json::to_string(&request).unwrap_or_else(|_| "{}".to_string());
655        let model = request.model.clone();
656
657        // Call before_request hook if interceptors are present
658        let mut state = T::default();
659        if !self.interceptors.is_empty() {
660            let mut ctx = crate::interceptor::BeforeRequestContext {
661                operation,
662                model: &model,
663                request_json: &request_json,
664                state: &mut state,
665            };
666            self.interceptors.before_request(&mut ctx).await?;
667        }
668
669        let response = self.http_client().execute(req).await?;
670
671        let status = response.status();
672        if !status.is_success() {
673            let error_text = response.text().await?;
674            return Err(Error::Api {
675                status: status.as_u16(),
676                message: error_text,
677                error_type: None,
678                error_code: None,
679            });
680        }
681
682        let stream = crate::streaming::ChatCompletionStream::new(response);
683
684        // Wrap with interceptors if present
685        if self.interceptors.is_empty() {
686            Ok(Box::pin(stream))
687        } else {
688            let intercepted = crate::streaming::InterceptedStream::new(
689                stream,
690                std::sync::Arc::clone(&self.interceptors),
691                operation.to_string(),
692                model,
693                request_json,
694                state,
695            );
696            Ok(Box::pin(intercepted))
697        }
698    }
699}
700
701// Responses API methods
702impl<T: Default + Send + Sync + 'static> Client<T> {
703    /// Create a responses builder for structured outputs.
704    pub fn responses(&self) -> ResponsesBuilder {
705        let model = self.config.default_model().unwrap_or("gpt-4");
706        ResponsesBuilder::new(model)
707    }
708
709    /// Create a simple responses request with a user message.
710    pub fn responses_simple(&self, message: impl Into<String>) -> ResponsesBuilder {
711        self.responses().user(message)
712    }
713
714    /// Execute a responses request.
715    pub async fn execute_responses(
716        &self,
717        request: CreateChatCompletionRequest,
718    ) -> Result<ChatCompletionResponseWrapper> {
719        // The Responses API uses the same underlying endpoint as chat
720        self.execute_chat(request).await
721    }
722
723    /// Execute a responses builder.
724    pub async fn send_responses(
725        &self,
726        builder: ResponsesBuilder,
727    ) -> Result<ChatCompletionResponseWrapper> {
728        let request = builder.build()?;
729        self.execute_responses(request).await
730    }
731
732    /// Send a responses request with streaming enabled.
733    ///
734    /// This enables real-time streaming of responses using Server-Sent Events (SSE).
735    /// The stream yields chunks as they arrive from the API.
736    pub async fn send_responses_stream(
737        &self,
738        mut builder: ResponsesBuilder,
739    ) -> Result<crate::streaming::BoxedChatStream> {
740        // Force streaming mode
741        builder = builder.stream(true);
742        let mut request = builder.build()?;
743        request.stream = Some(true);
744
745        // The Responses API uses the same streaming endpoint as chat
746        self.execute_chat_stream(
747            request,
748            crate::semantic_conventions::operation_names::RESPONSES,
749        )
750        .await
751    }
752}
753
754// TODO: Add methods for other API endpoints
755impl<T: Default + Send + Sync> Client<T> {
756    /// Get assistants client (placeholder).
757    #[must_use]
758    pub fn assistants(&self) -> AssistantsClient<'_, T> {
759        AssistantsClient { client: self }
760    }
761
762    /// Get audio client (placeholder).
763    #[must_use]
764    pub fn audio(&self) -> AudioClient<'_, T> {
765        AudioClient { client: self }
766    }
767
768    /// Get embeddings client (placeholder).
769    #[must_use]
770    pub fn embeddings(&self) -> EmbeddingsClient<'_, T> {
771        EmbeddingsClient { client: self }
772    }
773
774    /// Get images client (placeholder).
775    #[must_use]
776    pub fn images(&self) -> ImagesClient<'_, T> {
777        ImagesClient { client: self }
778    }
779
780    /// Get files client (placeholder).
781    #[must_use]
782    pub fn files(&self) -> FilesClient<'_, T> {
783        FilesClient { client: self }
784    }
785
786    /// Get fine-tuning client (placeholder).
787    #[must_use]
788    pub fn fine_tuning(&self) -> FineTuningClient<'_, T> {
789        FineTuningClient { client: self }
790    }
791
792    /// Get batch client (placeholder).
793    #[must_use]
794    pub fn batch(&self) -> BatchClient<'_, T> {
795        BatchClient { client: self }
796    }
797
798    /// Get vector stores client (placeholder).
799    #[must_use]
800    pub fn vector_stores(&self) -> VectorStoresClient<'_, T> {
801        VectorStoresClient { client: self }
802    }
803
804    /// Get moderations client (placeholder).
805    #[must_use]
806    pub fn moderations(&self) -> ModerationsClient<'_, T> {
807        ModerationsClient { client: self }
808    }
809
810    /// Get threads client (placeholder).
811    #[must_use]
812    pub fn threads(&self) -> ThreadsClient<'_, T> {
813        ThreadsClient { client: self }
814    }
815
816    /// Get uploads client (placeholder).
817    #[must_use]
818    pub fn uploads(&self) -> UploadsClient<'_, T> {
819        UploadsClient { client: self }
820    }
821
822    /// Get models client.
823    #[must_use]
824    pub fn models(&self) -> ModelsClient<'_, T> {
825        ModelsClient { client: self }
826    }
827
828    /// Get completions client.
829    #[must_use]
830    pub fn completions(&self) -> CompletionsClient<'_, T> {
831        CompletionsClient { client: self }
832    }
833
834    /// Get usage client.
835    #[must_use]
836    pub fn usage(&self) -> UsageClient<'_, T> {
837        UsageClient { client: self }
838    }
839}
840
841impl<T: Default + Send + Sync> AudioClient<'_, T> {
842    /// Create a speech builder for text-to-speech generation.
843    #[must_use]
844    pub fn speech(
845        &self,
846        model: impl Into<String>,
847        input: impl Into<String>,
848        voice: impl Into<String>,
849    ) -> SpeechBuilder {
850        SpeechBuilder::new(model, input, voice)
851    }
852
853    /// Submit a speech synthesis request and return binary audio data.
854    pub async fn create_speech(&self, builder: SpeechBuilder) -> Result<Vec<u8>> {
855        let request = builder.build()?;
856        let mut state = T::default();
857        let operation = operation_names::AUDIO_SPEECH;
858        let model = request.model.clone();
859        let request_json = serde_json::to_string(&request).unwrap_or_default();
860
861        // Call before_request hook
862        self.call_before_request(operation, &model, &request_json, &mut state)
863            .await?;
864
865        let start_time = Instant::now();
866
867        // Make the API call
868        let response = match audio_api::create_speech()
869            .configuration(&self.client.base_configuration)
870            .create_speech_request(request)
871            .call()
872            .await
873        {
874            Ok(resp) => resp,
875            Err(e) => {
876                let error = self
877                    .handle_api_error(e, operation, &model, &request_json, &state)
878                    .await;
879                return Err(error);
880            }
881        };
882
883        let bytes = response.bytes().await.map_err(Error::Http)?;
884        let duration = start_time.elapsed();
885
886        // Call after_response hook (note: no JSON response for audio)
887        let response_json = format!("{{\"size\": {}}}", bytes.len());
888        self.call_after_response(
889            &response_json,
890            operation,
891            &model,
892            &request_json,
893            &state,
894            duration,
895            None,
896            None,
897        )
898        .await;
899
900        Ok(bytes.to_vec())
901    }
902
903    /// Create a transcription builder for speech-to-text workflows.
904    #[must_use]
905    pub fn transcription(
906        &self,
907        file: impl AsRef<std::path::Path>,
908        model: impl Into<String>,
909    ) -> TranscriptionBuilder {
910        TranscriptionBuilder::new(file, model)
911    }
912
913    /// Submit a transcription request.
914    pub async fn create_transcription(
915        &self,
916        builder: TranscriptionBuilder,
917    ) -> Result<CreateTranscription200Response> {
918        let request = builder.build()?;
919        let model_str = request.model.clone();
920        let mut state = T::default();
921        let operation = operation_names::AUDIO_TRANSCRIPTION;
922        // TranscriptionRequest doesn't implement Serialize, so we'll create a simple JSON representation
923        let request_json = format!(r#"{{"model":"{model_str}","file":"<audio_file>"}}"#);
924
925        // Call before_request hook
926        self.call_before_request(operation, &model_str, &request_json, &mut state)
927            .await?;
928
929        let TranscriptionRequest {
930            file,
931            model,
932            language,
933            prompt,
934            response_format,
935            temperature,
936            stream,
937            chunking_strategy,
938            timestamp_granularities,
939            include,
940        } = request;
941
942        let timestamp_strings = timestamp_granularities.as_ref().map(|values| {
943            values
944                .iter()
945                .map(|granularity| granularity.as_str().to_string())
946                .collect::<Vec<_>>()
947        });
948
949        let start_time = Instant::now();
950
951        // Make the API call
952        let response = match audio_api::create_transcription()
953            .configuration(&self.client.base_configuration)
954            .file(file)
955            .model(&model)
956            .maybe_language(language.as_deref())
957            .maybe_prompt(prompt.as_deref())
958            .maybe_response_format(response_format)
959            .maybe_temperature(temperature)
960            .maybe_stream(stream)
961            .maybe_chunking_strategy(chunking_strategy)
962            .maybe_timestamp_granularities(timestamp_strings)
963            .maybe_include(include)
964            .call()
965            .await
966        {
967            Ok(resp) => resp,
968            Err(e) => {
969                let error = self
970                    .handle_api_error(e, operation, &model_str, &request_json, &state)
971                    .await;
972                return Err(error);
973            }
974        };
975
976        let duration = start_time.elapsed();
977
978        // Call after_response hook
979        self.call_after_response(
980            &response,
981            operation,
982            &model_str,
983            &request_json,
984            &state,
985            duration,
986            None,
987            None,
988        )
989        .await;
990
991        Ok(response)
992    }
993
994    /// Create a translation builder for audio-to-English translation.
995    #[must_use]
996    pub fn translation(
997        &self,
998        file: impl AsRef<std::path::Path>,
999        model: impl Into<String>,
1000    ) -> TranslationBuilder {
1001        TranslationBuilder::new(file, model)
1002    }
1003
1004    /// Submit an audio translation request.
1005    pub async fn create_translation(
1006        &self,
1007        builder: TranslationBuilder,
1008    ) -> Result<CreateTranslation200Response> {
1009        let request = builder.build()?;
1010        let model_str = request.model.clone();
1011
1012        // Prepare interceptor context
1013        let mut state = T::default();
1014        let operation = operation_names::AUDIO_TRANSLATION;
1015        let request_json = format!(r#"{{"model":"{model_str}","file":"<audio_file>"}}"#);
1016
1017        // Call before_request hook
1018        self.call_before_request(operation, &model_str, &request_json, &mut state)
1019            .await?;
1020
1021        let TranslationRequest {
1022            file,
1023            model,
1024            prompt,
1025            response_format,
1026            temperature,
1027        } = request;
1028
1029        let response_format_owned = response_format.map(|format| format.to_string());
1030
1031        let start_time = Instant::now();
1032
1033        // Make the API call
1034        let response = match audio_api::create_translation()
1035            .configuration(&self.client.base_configuration)
1036            .file(file)
1037            .model(&model)
1038            .maybe_prompt(prompt.as_deref())
1039            .maybe_response_format(response_format_owned.as_deref())
1040            .maybe_temperature(temperature)
1041            .call()
1042            .await
1043        {
1044            Ok(resp) => resp,
1045            Err(e) => {
1046                let error = self
1047                    .handle_api_error(e, operation, &model_str, &request_json, &state)
1048                    .await;
1049                return Err(error);
1050            }
1051        };
1052
1053        let duration = start_time.elapsed();
1054
1055        // Call after_response hook
1056        self.call_after_response(
1057            &response,
1058            operation,
1059            &model_str,
1060            &request_json,
1061            &state,
1062            duration,
1063            None,
1064            None,
1065        )
1066        .await;
1067
1068        Ok(response)
1069    }
1070}
1071
1072impl<T: Default + Send + Sync> EmbeddingsClient<'_, T> {
1073    /// Start a builder for creating embeddings requests with the given model.
1074    #[must_use]
1075    pub fn builder(&self, model: impl Into<String>) -> EmbeddingsBuilder {
1076        EmbeddingsBuilder::new(model)
1077    }
1078
1079    /// Convenience helper for embedding a single string input.
1080    #[must_use]
1081    pub fn text(&self, model: impl Into<String>, input: impl Into<String>) -> EmbeddingsBuilder {
1082        self.builder(model).input_text(input)
1083    }
1084
1085    /// Convenience helper for embedding a single tokenized input.
1086    #[must_use]
1087    pub fn tokens<I>(&self, model: impl Into<String>, tokens: I) -> EmbeddingsBuilder
1088    where
1089        I: IntoIterator<Item = i32>,
1090    {
1091        self.builder(model).input_tokens(tokens)
1092    }
1093
1094    /// Execute an embeddings request built with [`EmbeddingsBuilder`].
1095    pub async fn create(&self, builder: EmbeddingsBuilder) -> Result<CreateEmbeddingResponse> {
1096        let request = builder.build()?;
1097
1098        // Prepare interceptor context
1099        let mut state = T::default();
1100        let operation = operation_names::EMBEDDINGS;
1101        let model = request.model.clone();
1102        let request_json = serde_json::to_string(&request).unwrap_or_default();
1103
1104        // Call before_request hook
1105        self.call_before_request(operation, &model, &request_json, &mut state)
1106            .await?;
1107
1108        let start_time = Instant::now();
1109
1110        // Make the API call
1111        let response = match embeddings_api::create_embedding()
1112            .configuration(&self.client.base_configuration)
1113            .create_embedding_request(request)
1114            .call()
1115            .await
1116        {
1117            Ok(resp) => resp,
1118            Err(e) => {
1119                let error = self
1120                    .handle_api_error(e, operation, &model, &request_json, &state)
1121                    .await;
1122                return Err(error);
1123            }
1124        };
1125
1126        let duration = start_time.elapsed();
1127
1128        // Call after_response hook
1129        self.call_after_response(
1130            &response,
1131            operation,
1132            &model,
1133            &request_json,
1134            &state,
1135            duration,
1136            Some(i64::from(response.usage.prompt_tokens)),
1137            Some(i64::from(response.usage.total_tokens)),
1138        )
1139        .await;
1140
1141        Ok(response)
1142    }
1143}
1144
1145impl<T: Default + Send + Sync> ImagesClient<'_, T> {
1146    /// Create a builder for image generation requests.
1147    #[must_use]
1148    pub fn generate(&self, prompt: impl Into<String>) -> ImageGenerationBuilder {
1149        ImageGenerationBuilder::new(prompt)
1150    }
1151
1152    /// Execute an image generation request.
1153    pub async fn create(&self, builder: ImageGenerationBuilder) -> Result<ImagesResponse> {
1154        let request = builder.build()?;
1155
1156        // Prepare interceptor context
1157        let mut state = T::default();
1158        let operation = operation_names::IMAGE_GENERATION;
1159        let model = request
1160            .model
1161            .as_ref()
1162            .map_or_else(|| "dall-e-2".to_string(), ToString::to_string);
1163        let request_json = serde_json::to_string(&request).unwrap_or_default();
1164
1165        // Call before_request hook
1166        self.call_before_request(operation, &model, &request_json, &mut state)
1167            .await?;
1168
1169        let start_time = Instant::now();
1170
1171        // Make the API call
1172        let response = match images_api::create_image()
1173            .configuration(&self.client.base_configuration)
1174            .create_image_request(request)
1175            .call()
1176            .await
1177        {
1178            Ok(resp) => resp,
1179            Err(e) => {
1180                let error = self
1181                    .handle_api_error(e, operation, &model, &request_json, &state)
1182                    .await;
1183                return Err(error);
1184            }
1185        };
1186
1187        let duration = start_time.elapsed();
1188
1189        // Call after_response hook
1190        self.call_after_response(
1191            &response,
1192            operation,
1193            &model,
1194            &request_json,
1195            &state,
1196            duration,
1197            None,
1198            None,
1199        )
1200        .await;
1201
1202        Ok(response)
1203    }
1204
1205    /// Create an image edit builder using a base image and prompt.
1206    #[must_use]
1207    pub fn edit(
1208        &self,
1209        image: impl AsRef<std::path::Path>,
1210        prompt: impl Into<String>,
1211    ) -> ImageEditBuilder {
1212        ImageEditBuilder::new(image, prompt)
1213    }
1214
1215    /// Execute an image edit request.
1216    pub async fn create_edit(&self, builder: ImageEditBuilder) -> Result<ImagesResponse> {
1217        let request = builder.build()?;
1218        let model_str = request
1219            .model
1220            .as_ref()
1221            .map_or_else(|| "dall-e-2".to_string(), ToString::to_string);
1222
1223        // Prepare interceptor context
1224        let mut state = T::default();
1225        let operation = operation_names::IMAGE_EDIT;
1226        let request_json = format!(
1227            r#"{{"prompt":"{}","model":"{}"}}"#,
1228            request.prompt, model_str
1229        );
1230
1231        // Call before_request hook
1232        self.call_before_request(operation, &model_str, &request_json, &mut state)
1233            .await?;
1234
1235        let ImageEditRequest {
1236            image,
1237            prompt,
1238            mask,
1239            background,
1240            model,
1241            n,
1242            size,
1243            response_format,
1244            output_format,
1245            output_compression,
1246            user,
1247            input_fidelity,
1248            stream,
1249            partial_images,
1250            quality,
1251        } = request;
1252
1253        let start_time = Instant::now();
1254
1255        // Make the API call
1256        let response = match images_api::create_image_edit()
1257            .configuration(&self.client.base_configuration)
1258            .image(image)
1259            .prompt(&prompt)
1260            .maybe_mask(mask)
1261            .maybe_background(background.as_deref())
1262            .maybe_model(model.as_deref())
1263            .maybe_n(n)
1264            .maybe_size(size.as_deref())
1265            .maybe_response_format(response_format.as_deref())
1266            .maybe_output_format(output_format.as_deref())
1267            .maybe_output_compression(output_compression)
1268            .maybe_user(user.as_deref())
1269            .maybe_input_fidelity(input_fidelity)
1270            .maybe_stream(stream)
1271            .maybe_partial_images(partial_images)
1272            .maybe_quality(quality.as_deref())
1273            .call()
1274            .await
1275        {
1276            Ok(resp) => resp,
1277            Err(e) => {
1278                let error = self
1279                    .handle_api_error(e, operation, &model_str, &request_json, &state)
1280                    .await;
1281                return Err(error);
1282            }
1283        };
1284
1285        let duration = start_time.elapsed();
1286
1287        // Call after_response hook
1288        self.call_after_response(
1289            &response,
1290            operation,
1291            &model_str,
1292            &request_json,
1293            &state,
1294            duration,
1295            None,
1296            None,
1297        )
1298        .await;
1299
1300        Ok(response)
1301    }
1302
1303    /// Create an image variation builder.
1304    #[must_use]
1305    pub fn variation(&self, image: impl AsRef<std::path::Path>) -> ImageVariationBuilder {
1306        ImageVariationBuilder::new(image)
1307    }
1308
1309    /// Execute an image variation request.
1310    pub async fn create_variation(&self, builder: ImageVariationBuilder) -> Result<ImagesResponse> {
1311        let request = builder.build()?;
1312        let model_str = request
1313            .model
1314            .as_ref()
1315            .map_or_else(|| "dall-e-2".to_string(), ToString::to_string);
1316
1317        // Prepare interceptor context
1318        let mut state = T::default();
1319        let operation = operation_names::IMAGE_VARIATION;
1320        let request_json = format!(r#"{{"model":"{model_str}"}}"#);
1321
1322        // Call before_request hook
1323        self.call_before_request(operation, &model_str, &request_json, &mut state)
1324            .await?;
1325
1326        let ImageVariationRequest {
1327            image,
1328            model,
1329            n,
1330            response_format,
1331            size,
1332            user,
1333        } = request;
1334
1335        let start_time = Instant::now();
1336
1337        // Make the API call
1338        let response = match images_api::create_image_variation()
1339            .configuration(&self.client.base_configuration)
1340            .image(image)
1341            .maybe_model(model.as_deref())
1342            .maybe_n(n)
1343            .maybe_response_format(response_format.as_deref())
1344            .maybe_size(size.as_deref())
1345            .maybe_user(user.as_deref())
1346            .call()
1347            .await
1348        {
1349            Ok(resp) => resp,
1350            Err(e) => {
1351                let error = self
1352                    .handle_api_error(e, operation, &model_str, &request_json, &state)
1353                    .await;
1354                return Err(error);
1355            }
1356        };
1357
1358        let duration = start_time.elapsed();
1359
1360        // Call after_response hook
1361        self.call_after_response(
1362            &response,
1363            operation,
1364            &model_str,
1365            &request_json,
1366            &state,
1367            duration,
1368            None,
1369            None,
1370        )
1371        .await;
1372
1373        Ok(response)
1374    }
1375}
1376
1377impl<T: Default + Send + Sync> ThreadsClient<'_, T> {
1378    /// Start building a new thread request.
1379    #[must_use]
1380    pub fn builder(&self) -> ThreadRequestBuilder {
1381        ThreadRequestBuilder::new()
1382    }
1383
1384    /// Create a thread using the provided builder.
1385    pub async fn create(&self, builder: ThreadRequestBuilder) -> Result<ThreadObject> {
1386        let request = builder.build()?;
1387
1388        // Prepare interceptor context
1389        let mut state = T::default();
1390        let operation = operation_names::THREAD_CREATE;
1391        let model = "thread"; // No model for thread operations
1392        let request_json = serde_json::to_string(&request).unwrap_or_default();
1393
1394        // Call before_request hook
1395        self.call_before_request(operation, model, &request_json, &mut state)
1396            .await?;
1397
1398        let start_time = Instant::now();
1399
1400        // Make the API call
1401        let response = match assistants_api::create_thread()
1402            .configuration(&self.client.base_configuration)
1403            .maybe_create_thread_request(Some(request))
1404            .call()
1405            .await
1406        {
1407            Ok(resp) => resp,
1408            Err(e) => {
1409                let error = self
1410                    .handle_api_error(e, operation, model, &request_json, &state)
1411                    .await;
1412                return Err(error);
1413            }
1414        };
1415
1416        let duration = start_time.elapsed();
1417
1418        // Call after_response hook
1419        self.call_after_response(
1420            &response,
1421            operation,
1422            model,
1423            &request_json,
1424            &state,
1425            duration,
1426            None,
1427            None,
1428        )
1429        .await;
1430
1431        Ok(response)
1432    }
1433}
1434
1435impl<T: Default + Send + Sync> UploadsClient<'_, T> {
1436    /// Create a new upload builder for the given file metadata.
1437    #[must_use]
1438    pub fn builder(
1439        &self,
1440        filename: impl Into<String>,
1441        purpose: UploadPurpose,
1442        bytes: i32,
1443        mime_type: impl Into<String>,
1444    ) -> UploadBuilder {
1445        UploadBuilder::new(filename, purpose, bytes, mime_type)
1446    }
1447
1448    /// Create an upload session.
1449    pub async fn create(&self, builder: UploadBuilder) -> Result<Upload> {
1450        let request = builder.build()?;
1451
1452        // Prepare interceptor context
1453        let mut state = T::default();
1454        let operation = operation_names::UPLOAD_CREATE;
1455        let model = "upload"; // No model for upload operations
1456        let request_json = serde_json::to_string(&request).unwrap_or_default();
1457
1458        // Call before_request hook
1459        self.call_before_request(operation, model, &request_json, &mut state)
1460            .await?;
1461
1462        let start_time = Instant::now();
1463
1464        // Make the API call
1465        let response = match uploads_api::create_upload()
1466            .configuration(&self.client.base_configuration)
1467            .create_upload_request(request)
1468            .call()
1469            .await
1470        {
1471            Ok(resp) => resp,
1472            Err(e) => {
1473                let error = self
1474                    .handle_api_error(e, operation, model, &request_json, &state)
1475                    .await;
1476                return Err(error);
1477            }
1478        };
1479
1480        let duration = start_time.elapsed();
1481
1482        // Call after_response hook
1483        self.call_after_response(
1484            &response,
1485            operation,
1486            model,
1487            &request_json,
1488            &state,
1489            duration,
1490            None,
1491            None,
1492        )
1493        .await;
1494
1495        Ok(response)
1496    }
1497}
1498
1499impl<T: Default + Send + Sync> ModerationsClient<'_, T> {
1500    /// Create a moderation builder for checking text content.
1501    ///
1502    /// # Example
1503    ///
1504    /// ```rust,ignore
1505    /// use openai_ergonomic::Client;
1506    ///
1507    /// # async fn example() -> openai_ergonomic::Result<()> {
1508    /// let client = Client::from_env()?;
1509    /// let builder = client.moderations().builder("Text to check");
1510    /// let response = client.moderations().create(builder).await?;
1511    /// println!("Flagged: {}", response.results[0].flagged);
1512    /// # Ok(())
1513    /// # }
1514    /// ```
1515    #[must_use]
1516    pub fn builder(&self, input: impl Into<String>) -> ModerationBuilder {
1517        ModerationBuilder::new(input)
1518    }
1519
1520    /// Convenience method for moderating a single text input.
1521    ///
1522    /// # Example
1523    ///
1524    /// ```rust,ignore
1525    /// use openai_ergonomic::Client;
1526    ///
1527    /// # async fn example() -> openai_ergonomic::Result<()> {
1528    /// let client = Client::from_env()?;
1529    /// let builder = client.moderations().check("Hello world");
1530    /// let response = client.moderations().create(builder).await?;
1531    ///
1532    /// if response.results[0].flagged {
1533    ///     println!("Content was flagged for moderation");
1534    /// }
1535    /// # Ok(())
1536    /// # }
1537    /// ```
1538    #[must_use]
1539    pub fn check(&self, input: impl Into<String>) -> ModerationBuilder {
1540        ModerationBuilder::new(input)
1541    }
1542
1543    /// Execute a moderation request built with [`ModerationBuilder`].
1544    ///
1545    /// # Example
1546    ///
1547    /// ```rust,ignore
1548    /// use openai_ergonomic::Client;
1549    ///
1550    /// # async fn example() -> openai_ergonomic::Result<()> {
1551    /// let client = Client::from_env()?;
1552    ///
1553    /// let builder = client
1554    ///     .moderations()
1555    ///     .check("Is this content appropriate?")
1556    ///     .model("text-moderation-latest");
1557    ///
1558    /// let response = client.moderations().create(builder).await?;
1559    ///
1560    /// println!("Model: {}", response.model);
1561    /// for result in response.results {
1562    ///     println!("Flagged: {}", result.flagged);
1563    ///     println!("Hate: {}", result.categories.hate);
1564    ///     println!("Violence: {}", result.categories.violence);
1565    /// }
1566    /// # Ok(())
1567    /// # }
1568    /// ```
1569    ///
1570    /// # Errors
1571    ///
1572    /// Returns an error if the API request fails or the response cannot be parsed.
1573    pub async fn create(&self, builder: ModerationBuilder) -> Result<CreateModerationResponse> {
1574        let request = builder.build()?;
1575
1576        // Prepare interceptor context
1577        let mut state = T::default();
1578        let operation = operation_names::MODERATION;
1579        let model = request
1580            .model
1581            .as_ref()
1582            .map_or_else(|| "text-moderation-latest".to_string(), ToString::to_string);
1583        let request_json = serde_json::to_string(&request).unwrap_or_default();
1584
1585        // Call before_request hook
1586        self.call_before_request(operation, &model, &request_json, &mut state)
1587            .await?;
1588
1589        let start_time = Instant::now();
1590
1591        // Make the API call
1592        let response = match moderations_api::create_moderation()
1593            .configuration(&self.client.base_configuration)
1594            .create_moderation_request(request)
1595            .call()
1596            .await
1597        {
1598            Ok(resp) => resp,
1599            Err(e) => {
1600                let error = self
1601                    .handle_api_error(e, operation, &model, &request_json, &state)
1602                    .await;
1603                return Err(error);
1604            }
1605        };
1606
1607        let duration = start_time.elapsed();
1608
1609        // Call after_response hook
1610        self.call_after_response(
1611            &response,
1612            operation,
1613            &model,
1614            &request_json,
1615            &state,
1616            duration,
1617            None,
1618            None,
1619        )
1620        .await;
1621
1622        Ok(response)
1623    }
1624}
1625
1626impl<T: Default + Send + Sync> FilesClient<'_, T> {
1627    /// Upload a file to `OpenAI`.
1628    ///
1629    /// # Example
1630    ///
1631    /// ```rust,ignore
1632    /// use openai_ergonomic::Client;
1633    /// use openai_ergonomic::builders::files::FilePurpose;
1634    ///
1635    /// # async fn example() -> openai_ergonomic::Result<()> {
1636    /// let client = Client::from_env()?;
1637    /// let builder = client
1638    ///     .files()
1639    ///     .upload_text("training.jsonl", FilePurpose::FineTune, "training data");
1640    /// let file = client.files().create(builder).await?;
1641    /// println!("Uploaded file: {}", file.id);
1642    /// # Ok(())
1643    /// # }
1644    /// ```
1645    pub async fn upload(&self, builder: FileUploadBuilder) -> Result<OpenAiFile> {
1646        // Write content to a temporary file
1647        let temp_dir = std::env::temp_dir();
1648        let temp_file_path = temp_dir.join(builder.filename());
1649        std::fs::write(&temp_file_path, builder.content()).map_err(Error::File)?;
1650
1651        // Convert FilePurpose to openai_client_base::models::FilePurpose
1652        let purpose = match builder.purpose().to_string().as_str() {
1653            "fine-tune" => openai_client_base::models::FilePurpose::FineTune,
1654            "vision" => openai_client_base::models::FilePurpose::Vision,
1655            "batch" => openai_client_base::models::FilePurpose::Batch,
1656            _ => openai_client_base::models::FilePurpose::Assistants, // Default for "assistants" and unknown
1657        };
1658
1659        // Prepare interceptor context
1660        let mut state = T::default();
1661        let operation = operation_names::FILE_UPLOAD;
1662        let model = "file-upload"; // No model for file operations
1663        let request_json = format!(
1664            r#"{{"filename":"{}","purpose":"{}","size":{}}}"#,
1665            builder.filename(),
1666            builder.purpose(),
1667            builder.content().len()
1668        );
1669
1670        // Call before_request hook
1671        if let Err(e) = self
1672            .call_before_request(operation, model, &request_json, &mut state)
1673            .await
1674        {
1675            // Clean up temp file before returning
1676            let _ = std::fs::remove_file(&temp_file_path);
1677            return Err(e);
1678        }
1679
1680        let start_time = Instant::now();
1681
1682        // Make the API call
1683        let result = match files_api::create_file()
1684            .configuration(&self.client.base_configuration)
1685            .file(temp_file_path.clone())
1686            .purpose(purpose)
1687            .call()
1688            .await
1689        {
1690            Ok(resp) => resp,
1691            Err(e) => {
1692                // Clean up temp file
1693                let _ = std::fs::remove_file(&temp_file_path);
1694                let error = self
1695                    .handle_api_error(e, operation, model, &request_json, &state)
1696                    .await;
1697                return Err(error);
1698            }
1699        };
1700
1701        // Clean up temporary file
1702        let _ = std::fs::remove_file(temp_file_path);
1703
1704        let duration = start_time.elapsed();
1705
1706        // Call after_response hook
1707        self.call_after_response(
1708            &result,
1709            operation,
1710            model,
1711            &request_json,
1712            &state,
1713            duration,
1714            None,
1715            None,
1716        )
1717        .await;
1718
1719        Ok(result)
1720    }
1721
1722    /// Convenience method to upload a file (alias for upload).
1723    ///
1724    /// # Example
1725    ///
1726    /// ```rust,ignore
1727    /// use openai_ergonomic::Client;
1728    /// use openai_ergonomic::builders::files::FilePurpose;
1729    ///
1730    /// # async fn example() -> openai_ergonomic::Result<()> {
1731    /// let client = Client::from_env()?;
1732    /// let builder = client
1733    ///     .files()
1734    ///     .upload_text("data.txt", FilePurpose::Assistants, "content");
1735    /// let file = client.files().create(builder).await?;
1736    /// println!("File ID: {}", file.id);
1737    /// # Ok(())
1738    /// # }
1739    /// ```
1740    pub async fn create(&self, builder: FileUploadBuilder) -> Result<OpenAiFile> {
1741        self.upload(builder).await
1742    }
1743
1744    /// Create a file upload builder from text content.
1745    #[must_use]
1746    pub fn upload_text(
1747        &self,
1748        filename: impl Into<String>,
1749        purpose: crate::builders::files::FilePurpose,
1750        text: impl Into<String>,
1751    ) -> FileUploadBuilder {
1752        FileUploadBuilder::from_text(filename, purpose, text)
1753    }
1754
1755    /// Create a file upload builder from bytes.
1756    #[must_use]
1757    pub fn upload_bytes(
1758        &self,
1759        filename: impl Into<String>,
1760        purpose: crate::builders::files::FilePurpose,
1761        content: Vec<u8>,
1762    ) -> FileUploadBuilder {
1763        FileUploadBuilder::new(filename, purpose, content)
1764    }
1765
1766    /// Create a file upload builder from a file path.
1767    pub fn upload_from_path(
1768        &self,
1769        path: impl AsRef<std::path::Path>,
1770        purpose: crate::builders::files::FilePurpose,
1771    ) -> Result<FileUploadBuilder> {
1772        FileUploadBuilder::from_path(path, purpose).map_err(Error::File)
1773    }
1774
1775    /// List files.
1776    ///
1777    /// # Example
1778    ///
1779    /// ```rust,ignore
1780    /// use openai_ergonomic::Client;
1781    ///
1782    /// # async fn example() -> openai_ergonomic::Result<()> {
1783    /// let client = Client::from_env()?;
1784    /// let builder = client.files().list_builder();
1785    /// let files = client.files().list(builder).await?;
1786    /// println!("Found {} files", files.data.len());
1787    /// # Ok(())
1788    /// # }
1789    /// ```
1790    pub async fn list(&self, builder: FileListBuilder) -> Result<ListFilesResponse> {
1791        let purpose = builder.purpose_ref().map(ToString::to_string);
1792        let limit = builder.limit_ref();
1793        let order = builder.order_ref().map(ToString::to_string);
1794
1795        // Prepare interceptor context
1796        let mut state = T::default();
1797        let operation = operation_names::FILE_LIST;
1798        let model = "files";
1799        let request_json = format!(
1800            r#"{{"purpose":"{}","limit":{},"order":"{}"}}"#,
1801            purpose.as_deref().unwrap_or(""),
1802            limit.unwrap_or(10000),
1803            order.as_deref().unwrap_or("desc")
1804        );
1805
1806        // Call before_request hook
1807        self.call_before_request(operation, model, &request_json, &mut state)
1808            .await?;
1809
1810        let start_time = Instant::now();
1811
1812        // Make the API call
1813        let response = match files_api::list_files()
1814            .configuration(&self.client.base_configuration)
1815            .maybe_purpose(purpose.as_deref())
1816            .maybe_limit(limit)
1817            .maybe_order(order.as_deref())
1818            .call()
1819            .await
1820        {
1821            Ok(resp) => resp,
1822            Err(e) => {
1823                let error = self
1824                    .handle_api_error(e, operation, model, &request_json, &state)
1825                    .await;
1826                return Err(error);
1827            }
1828        };
1829
1830        let duration = start_time.elapsed();
1831
1832        // Call after_response hook
1833        self.call_after_response(
1834            &response,
1835            operation,
1836            model,
1837            &request_json,
1838            &state,
1839            duration,
1840            None,
1841            None,
1842        )
1843        .await;
1844
1845        Ok(response)
1846    }
1847
1848    /// Create a list files builder.
1849    #[must_use]
1850    pub fn list_builder(&self) -> FileListBuilder {
1851        FileListBuilder::new()
1852    }
1853
1854    /// Retrieve information about a specific file.
1855    ///
1856    /// # Example
1857    ///
1858    /// ```rust,ignore
1859    /// use openai_ergonomic::Client;
1860    ///
1861    /// # async fn example() -> openai_ergonomic::Result<()> {
1862    /// let client = Client::from_env()?;
1863    /// let file = client.files().retrieve("file-123").await?;
1864    /// println!("File: {} ({})", file.filename, file.id);
1865    /// # Ok(())
1866    /// # }
1867    /// ```
1868    pub async fn retrieve(&self, file_id: impl Into<String>) -> Result<OpenAiFile> {
1869        let file_id = file_id.into();
1870
1871        // Prepare interceptor context
1872        let mut state = T::default();
1873        let operation = operation_names::FILE_RETRIEVE;
1874        let model = "files";
1875        let request_json = format!(r#"{{"file_id":"{file_id}"}}"#);
1876
1877        // Call before_request hook
1878        self.call_before_request(operation, model, &request_json, &mut state)
1879            .await?;
1880
1881        let start_time = Instant::now();
1882
1883        // Make the API call
1884        let response = match files_api::retrieve_file()
1885            .configuration(&self.client.base_configuration)
1886            .file_id(&file_id)
1887            .call()
1888            .await
1889        {
1890            Ok(resp) => resp,
1891            Err(e) => {
1892                let error = self
1893                    .handle_api_error(e, operation, model, &request_json, &state)
1894                    .await;
1895                return Err(error);
1896            }
1897        };
1898
1899        let duration = start_time.elapsed();
1900
1901        // Call after_response hook
1902        self.call_after_response(
1903            &response,
1904            operation,
1905            model,
1906            &request_json,
1907            &state,
1908            duration,
1909            None,
1910            None,
1911        )
1912        .await;
1913
1914        Ok(response)
1915    }
1916
1917    /// Retrieve information about a file using a builder.
1918    pub async fn get(&self, builder: FileRetrievalBuilder) -> Result<OpenAiFile> {
1919        self.retrieve(builder.file_id()).await
1920    }
1921
1922    /// Download file content.
1923    ///
1924    /// # Example
1925    ///
1926    /// ```rust,ignore
1927    /// use openai_ergonomic::Client;
1928    ///
1929    /// # async fn example() -> openai_ergonomic::Result<()> {
1930    /// let client = Client::from_env()?;
1931    /// let content = client.files().download("file-123").await?;
1932    /// println!("Downloaded {} bytes", content.len());
1933    /// # Ok(())
1934    /// # }
1935    /// ```
1936    pub async fn download(&self, file_id: impl Into<String>) -> Result<String> {
1937        let file_id = file_id.into();
1938
1939        // Prepare interceptor context
1940        let mut state = T::default();
1941        let operation = operation_names::FILE_DOWNLOAD;
1942        let model = "files";
1943        let request_json = format!(r#"{{"file_id":"{file_id}"}}"#);
1944
1945        // Call before_request hook
1946        self.call_before_request(operation, model, &request_json, &mut state)
1947            .await?;
1948
1949        let start_time = Instant::now();
1950
1951        // Make the API call
1952        let response = match files_api::download_file()
1953            .configuration(&self.client.base_configuration)
1954            .file_id(&file_id)
1955            .call()
1956            .await
1957        {
1958            Ok(resp) => resp,
1959            Err(e) => {
1960                let error = self
1961                    .handle_api_error(e, operation, model, &request_json, &state)
1962                    .await;
1963                return Err(error);
1964            }
1965        };
1966
1967        let duration = start_time.elapsed();
1968
1969        // Call after_response hook
1970        let response_size = format!(r#"{{"size":{}}}"#, response.len());
1971        self.call_after_response(
1972            &response_size,
1973            operation,
1974            model,
1975            &request_json,
1976            &state,
1977            duration,
1978            None,
1979            None,
1980        )
1981        .await;
1982
1983        Ok(response)
1984    }
1985
1986    /// Download file content as bytes.
1987    pub async fn download_bytes(&self, file_id: impl Into<String>) -> Result<Vec<u8>> {
1988        let content = self.download(file_id).await?;
1989        Ok(content.into_bytes())
1990    }
1991
1992    /// Delete a file.
1993    ///
1994    /// # Example
1995    ///
1996    /// ```rust,ignore
1997    /// use openai_ergonomic::Client;
1998    ///
1999    /// # async fn example() -> openai_ergonomic::Result<()> {
2000    /// let client = Client::from_env()?;
2001    /// let response = client.files().delete("file-123").await?;
2002    /// println!("Deleted: {}", response.deleted);
2003    /// # Ok(())
2004    /// # }
2005    /// ```
2006    pub async fn delete(&self, file_id: impl Into<String>) -> Result<DeleteFileResponse> {
2007        let file_id = file_id.into();
2008
2009        // Prepare interceptor context
2010        let mut state = T::default();
2011        let operation = operation_names::FILE_DELETE;
2012        let model = "files";
2013        let request_json = format!(r#"{{"file_id":"{file_id}"}}"#);
2014
2015        // Call before_request hook
2016        self.call_before_request(operation, model, &request_json, &mut state)
2017            .await?;
2018
2019        let start_time = Instant::now();
2020
2021        // Make the API call
2022        let response = match files_api::delete_file()
2023            .configuration(&self.client.base_configuration)
2024            .file_id(&file_id)
2025            .call()
2026            .await
2027        {
2028            Ok(resp) => resp,
2029            Err(e) => {
2030                let error = self
2031                    .handle_api_error(e, operation, model, &request_json, &state)
2032                    .await;
2033                return Err(error);
2034            }
2035        };
2036
2037        let duration = start_time.elapsed();
2038
2039        // Call after_response hook
2040        self.call_after_response(
2041            &response,
2042            operation,
2043            model,
2044            &request_json,
2045            &state,
2046            duration,
2047            None,
2048            None,
2049        )
2050        .await;
2051
2052        Ok(response)
2053    }
2054
2055    /// Delete a file using a builder.
2056    pub async fn remove(&self, builder: FileDeleteBuilder) -> Result<DeleteFileResponse> {
2057        self.delete(builder.file_id()).await
2058    }
2059}
2060
2061impl<T: Default + Send + Sync> VectorStoresClient<'_, T> {
2062    /// Create a new vector store.
2063    ///
2064    /// # Example
2065    ///
2066    /// ```rust,ignore
2067    /// use openai_ergonomic::Client;
2068    /// use openai_ergonomic::builders::vector_stores::VectorStoreBuilder;
2069    ///
2070    /// # async fn example() -> openai_ergonomic::Result<()> {
2071    /// let client = Client::from_env()?;
2072    /// let builder = VectorStoreBuilder::new()
2073    ///     .name("My Knowledge Base")
2074    ///     .add_file("file-123");
2075    /// let vector_store = client.vector_stores().create(builder).await?;
2076    /// println!("Created vector store: {}", vector_store.id);
2077    /// # Ok(())
2078    /// # }
2079    /// ```
2080    pub async fn create(
2081        &self,
2082        builder: crate::builders::vector_stores::VectorStoreBuilder,
2083    ) -> Result<VectorStoreObject> {
2084        use openai_client_base::models::{CreateVectorStoreRequest, VectorStoreExpirationAfter};
2085
2086        let mut request = CreateVectorStoreRequest::new();
2087        request.name = builder.name_ref().map(String::from);
2088        request.file_ids = if builder.has_files() {
2089            Some(builder.file_ids_ref().to_vec())
2090        } else {
2091            None
2092        };
2093
2094        if let Some(expires_after) = builder.expires_after_ref() {
2095            use openai_client_base::models::vector_store_expiration_after::Anchor;
2096            request.expires_after = Some(Box::new(VectorStoreExpirationAfter::new(
2097                Anchor::LastActiveAt,
2098                expires_after.days,
2099            )));
2100        }
2101
2102        if !builder.metadata_ref().is_empty() {
2103            request.metadata = Some(Some(builder.metadata_ref().clone()));
2104        }
2105
2106        // Prepare interceptor context
2107        let mut state = T::default();
2108        let operation = operation_names::VECTOR_STORE_CREATE;
2109        let model = "vector-store";
2110        let request_json = serde_json::to_string(&request).unwrap_or_default();
2111
2112        // Call before_request hook
2113        self.call_before_request(operation, model, &request_json, &mut state)
2114            .await?;
2115
2116        let start_time = Instant::now();
2117
2118        // Make the API call
2119        let response = match vector_stores_api::create_vector_store()
2120            .configuration(&self.client.base_configuration)
2121            .create_vector_store_request(request)
2122            .call()
2123            .await
2124        {
2125            Ok(resp) => resp,
2126            Err(e) => {
2127                let error = self
2128                    .handle_api_error(e, operation, model, &request_json, &state)
2129                    .await;
2130                return Err(error);
2131            }
2132        };
2133
2134        let duration = start_time.elapsed();
2135
2136        // Call after_response hook
2137        self.call_after_response(
2138            &response,
2139            operation,
2140            model,
2141            &request_json,
2142            &state,
2143            duration,
2144            None,
2145            None,
2146        )
2147        .await;
2148
2149        Ok(response)
2150    }
2151
2152    /// List vector stores.
2153    ///
2154    /// # Example
2155    ///
2156    /// ```rust,ignore
2157    /// use openai_ergonomic::Client;
2158    ///
2159    /// # async fn example() -> openai_ergonomic::Result<()> {
2160    /// let client = Client::from_env()?;
2161    /// let response = client.vector_stores().list(Some(20), None, None, None).await?;
2162    /// println!("Found {} vector stores", response.data.len());
2163    /// # Ok(())
2164    /// # }
2165    /// ```
2166    pub async fn list(
2167        &self,
2168        limit: Option<i32>,
2169        order: Option<&str>,
2170        after: Option<&str>,
2171        before: Option<&str>,
2172    ) -> Result<ListVectorStoresResponse> {
2173        // Prepare interceptor context
2174        let mut state = T::default();
2175        let operation = operation_names::VECTOR_STORE_LIST;
2176        let model = "vector-store";
2177        let request_json = format!(
2178            r#"{{"limit":{},"order":"{}"}}"#,
2179            limit.unwrap_or(20),
2180            order.unwrap_or("desc")
2181        );
2182
2183        // Call before_request hook
2184        self.call_before_request(operation, model, &request_json, &mut state)
2185            .await?;
2186
2187        let start_time = Instant::now();
2188
2189        // Make the API call
2190        let response = match vector_stores_api::list_vector_stores()
2191            .configuration(&self.client.base_configuration)
2192            .maybe_limit(limit)
2193            .maybe_order(order)
2194            .maybe_after(after)
2195            .maybe_before(before)
2196            .call()
2197            .await
2198        {
2199            Ok(resp) => resp,
2200            Err(e) => {
2201                let error = self
2202                    .handle_api_error(e, operation, model, &request_json, &state)
2203                    .await;
2204                return Err(error);
2205            }
2206        };
2207
2208        let duration = start_time.elapsed();
2209
2210        // Call after_response hook
2211        self.call_after_response(
2212            &response,
2213            operation,
2214            model,
2215            &request_json,
2216            &state,
2217            duration,
2218            None,
2219            None,
2220        )
2221        .await;
2222
2223        Ok(response)
2224    }
2225
2226    /// Get a specific vector store by ID.
2227    ///
2228    /// # Example
2229    ///
2230    /// ```rust,ignore
2231    /// use openai_ergonomic::Client;
2232    ///
2233    /// # async fn example() -> openai_ergonomic::Result<()> {
2234    /// let client = Client::from_env()?;
2235    /// let vector_store = client.vector_stores().get("vs_123").await?;
2236    /// println!("Vector store: {}", vector_store.name);
2237    /// # Ok(())
2238    /// # }
2239    /// ```
2240    pub async fn get(&self, vector_store_id: impl Into<String>) -> Result<VectorStoreObject> {
2241        let id = vector_store_id.into();
2242
2243        // Prepare interceptor context
2244        let mut state = T::default();
2245        let operation = operation_names::VECTOR_STORE_RETRIEVE;
2246        let model = "vector-store";
2247        let request_json = format!(r#"{{"vector_store_id":"{id}"}}"#);
2248
2249        // Call before_request hook
2250        self.call_before_request(operation, model, &request_json, &mut state)
2251            .await?;
2252
2253        let start_time = Instant::now();
2254
2255        // Make the API call
2256        let response = match vector_stores_api::get_vector_store()
2257            .configuration(&self.client.base_configuration)
2258            .vector_store_id(&id)
2259            .call()
2260            .await
2261        {
2262            Ok(resp) => resp,
2263            Err(e) => {
2264                let error = self
2265                    .handle_api_error(e, operation, model, &request_json, &state)
2266                    .await;
2267                return Err(error);
2268            }
2269        };
2270
2271        let duration = start_time.elapsed();
2272
2273        // Call after_response hook
2274        self.call_after_response(
2275            &response,
2276            operation,
2277            model,
2278            &request_json,
2279            &state,
2280            duration,
2281            None,
2282            None,
2283        )
2284        .await;
2285
2286        Ok(response)
2287    }
2288
2289    /// Update a vector store.
2290    ///
2291    /// # Example
2292    ///
2293    /// ```rust,ignore
2294    /// use openai_ergonomic::Client;
2295    /// use openai_ergonomic::builders::vector_stores::VectorStoreBuilder;
2296    ///
2297    /// # async fn example() -> openai_ergonomic::Result<()> {
2298    /// let client = Client::from_env()?;
2299    /// let builder = VectorStoreBuilder::new()
2300    ///     .name("Updated Name")
2301    ///     .metadata("updated", "true");
2302    /// let vector_store = client.vector_stores().update("vs_123", builder).await?;
2303    /// println!("Updated: {}", vector_store.name);
2304    /// # Ok(())
2305    /// # }
2306    /// ```
2307    pub async fn update(
2308        &self,
2309        vector_store_id: impl Into<String>,
2310        builder: crate::builders::vector_stores::VectorStoreBuilder,
2311    ) -> Result<VectorStoreObject> {
2312        use openai_client_base::models::{UpdateVectorStoreRequest, VectorStoreExpirationAfter};
2313
2314        let id = vector_store_id.into();
2315        let mut request = UpdateVectorStoreRequest::new();
2316        request.name = builder.name_ref().map(String::from);
2317
2318        if let Some(expires_after) = builder.expires_after_ref() {
2319            use openai_client_base::models::vector_store_expiration_after::Anchor;
2320            request.expires_after = Some(Box::new(VectorStoreExpirationAfter::new(
2321                Anchor::LastActiveAt,
2322                expires_after.days,
2323            )));
2324        }
2325
2326        if !builder.metadata_ref().is_empty() {
2327            request.metadata = Some(Some(builder.metadata_ref().clone()));
2328        }
2329
2330        // Prepare interceptor context
2331        let mut state = T::default();
2332        let operation = operation_names::VECTOR_STORE_UPDATE;
2333        let model = "vector-store";
2334        let request_json = serde_json::to_string(&request).unwrap_or_default();
2335
2336        // Call before_request hook
2337        self.call_before_request(operation, model, &request_json, &mut state)
2338            .await?;
2339
2340        let start_time = Instant::now();
2341
2342        // Make the API call
2343        let response = match vector_stores_api::modify_vector_store()
2344            .configuration(&self.client.base_configuration)
2345            .vector_store_id(&id)
2346            .update_vector_store_request(request)
2347            .call()
2348            .await
2349        {
2350            Ok(resp) => resp,
2351            Err(e) => {
2352                let error = self
2353                    .handle_api_error(e, operation, model, &request_json, &state)
2354                    .await;
2355                return Err(error);
2356            }
2357        };
2358
2359        let duration = start_time.elapsed();
2360
2361        // Call after_response hook
2362        self.call_after_response(
2363            &response,
2364            operation,
2365            model,
2366            &request_json,
2367            &state,
2368            duration,
2369            None,
2370            None,
2371        )
2372        .await;
2373
2374        Ok(response)
2375    }
2376
2377    /// Delete a vector store.
2378    ///
2379    /// # Example
2380    ///
2381    /// ```rust,ignore
2382    /// use openai_ergonomic::Client;
2383    ///
2384    /// # async fn example() -> openai_ergonomic::Result<()> {
2385    /// let client = Client::from_env()?;
2386    /// let response = client.vector_stores().delete("vs_123").await?;
2387    /// println!("Deleted: {}", response.deleted);
2388    /// # Ok(())
2389    /// # }
2390    /// ```
2391    pub async fn delete(
2392        &self,
2393        vector_store_id: impl Into<String>,
2394    ) -> Result<DeleteVectorStoreResponse> {
2395        let id = vector_store_id.into();
2396
2397        // Prepare interceptor context
2398        let mut state = T::default();
2399        let operation = operation_names::VECTOR_STORE_DELETE;
2400        let model = "vector-store";
2401        let request_json = format!(r#"{{"vector_store_id":"{id}"}}"#);
2402
2403        // Call before_request hook
2404        self.call_before_request(operation, model, &request_json, &mut state)
2405            .await?;
2406
2407        let start_time = Instant::now();
2408
2409        // Make the API call
2410        let response = match vector_stores_api::delete_vector_store()
2411            .configuration(&self.client.base_configuration)
2412            .vector_store_id(&id)
2413            .call()
2414            .await
2415        {
2416            Ok(resp) => resp,
2417            Err(e) => {
2418                let error = self
2419                    .handle_api_error(e, operation, model, &request_json, &state)
2420                    .await;
2421                return Err(error);
2422            }
2423        };
2424
2425        let duration = start_time.elapsed();
2426
2427        // Call after_response hook
2428        self.call_after_response(
2429            &response,
2430            operation,
2431            model,
2432            &request_json,
2433            &state,
2434            duration,
2435            None,
2436            None,
2437        )
2438        .await;
2439
2440        Ok(response)
2441    }
2442
2443    /// Add a file to a vector store.
2444    ///
2445    /// # Example
2446    ///
2447    /// ```rust,ignore
2448    /// use openai_ergonomic::Client;
2449    ///
2450    /// # async fn example() -> openai_ergonomic::Result<()> {
2451    /// let client = Client::from_env()?;
2452    /// let file = client.vector_stores().add_file("vs_123", "file-456").await?;
2453    /// println!("Added file: {}", file.id);
2454    /// # Ok(())
2455    /// # }
2456    /// ```
2457    pub async fn add_file(
2458        &self,
2459        vector_store_id: impl Into<String>,
2460        file_id: impl Into<String>,
2461    ) -> Result<VectorStoreFileObject> {
2462        use openai_client_base::models::CreateVectorStoreFileRequest;
2463
2464        let vs_id = vector_store_id.into();
2465        let f_id = file_id.into();
2466        let request = CreateVectorStoreFileRequest::new(f_id.clone());
2467
2468        // Prepare interceptor context
2469        let mut state = T::default();
2470        let operation = operation_names::VECTOR_STORE_FILE_ADD;
2471        let model = "vector-store";
2472        let request_json = format!(r#"{{"vector_store_id":"{vs_id}","file_id":"{f_id}"}}"#);
2473
2474        // Call before_request hook
2475        self.call_before_request(operation, model, &request_json, &mut state)
2476            .await?;
2477
2478        let start_time = Instant::now();
2479
2480        // Make the API call
2481        let response = match vector_stores_api::create_vector_store_file()
2482            .configuration(&self.client.base_configuration)
2483            .vector_store_id(&vs_id)
2484            .create_vector_store_file_request(request)
2485            .call()
2486            .await
2487        {
2488            Ok(resp) => resp,
2489            Err(e) => {
2490                let error = self
2491                    .handle_api_error(e, operation, model, &request_json, &state)
2492                    .await;
2493                return Err(error);
2494            }
2495        };
2496
2497        let duration = start_time.elapsed();
2498
2499        // Call after_response hook
2500        self.call_after_response(
2501            &response,
2502            operation,
2503            model,
2504            &request_json,
2505            &state,
2506            duration,
2507            None,
2508            None,
2509        )
2510        .await;
2511
2512        Ok(response)
2513    }
2514
2515    /// List files in a vector store.
2516    ///
2517    /// # Example
2518    ///
2519    /// ```rust,ignore
2520    /// use openai_ergonomic::Client;
2521    ///
2522    /// # async fn example() -> openai_ergonomic::Result<()> {
2523    /// let client = Client::from_env()?;
2524    /// let response = client.vector_stores().list_files("vs_123", None, None, None, None, None).await?;
2525    /// println!("Found {} files", response.data.len());
2526    /// # Ok(())
2527    /// # }
2528    /// ```
2529    pub async fn list_files(
2530        &self,
2531        vector_store_id: impl Into<String>,
2532        limit: Option<i32>,
2533        order: Option<&str>,
2534        after: Option<&str>,
2535        before: Option<&str>,
2536        filter: Option<&str>,
2537    ) -> Result<ListVectorStoreFilesResponse> {
2538        let id = vector_store_id.into();
2539
2540        // Prepare interceptor context
2541        let mut state = T::default();
2542        let operation = operation_names::VECTOR_STORE_FILE_LIST;
2543        let model = "vector-store";
2544        let request_json = format!(r#"{{"vector_store_id":"{id}"}}"#);
2545
2546        // Call before_request hook
2547        self.call_before_request(operation, model, &request_json, &mut state)
2548            .await?;
2549
2550        let start_time = Instant::now();
2551
2552        // Make the API call
2553        let response = match vector_stores_api::list_vector_store_files()
2554            .configuration(&self.client.base_configuration)
2555            .vector_store_id(&id)
2556            .maybe_limit(limit)
2557            .maybe_order(order)
2558            .maybe_after(after)
2559            .maybe_before(before)
2560            .maybe_filter(filter)
2561            .call()
2562            .await
2563        {
2564            Ok(resp) => resp,
2565            Err(e) => {
2566                let error = self
2567                    .handle_api_error(e, operation, model, &request_json, &state)
2568                    .await;
2569                return Err(error);
2570            }
2571        };
2572
2573        let duration = start_time.elapsed();
2574
2575        // Call after_response hook
2576        self.call_after_response(
2577            &response,
2578            operation,
2579            model,
2580            &request_json,
2581            &state,
2582            duration,
2583            None,
2584            None,
2585        )
2586        .await;
2587
2588        Ok(response)
2589    }
2590
2591    /// Get a file from a vector store.
2592    ///
2593    /// # Example
2594    ///
2595    /// ```rust,ignore
2596    /// use openai_ergonomic::Client;
2597    ///
2598    /// # async fn example() -> openai_ergonomic::Result<()> {
2599    /// let client = Client::from_env()?;
2600    /// let file = client.vector_stores().get_file("vs_123", "file-456").await?;
2601    /// println!("File: {}", file.id);
2602    /// # Ok(())
2603    /// # }
2604    /// ```
2605    pub async fn get_file(
2606        &self,
2607        vector_store_id: impl Into<String>,
2608        file_id: impl Into<String>,
2609    ) -> Result<VectorStoreFileObject> {
2610        let vs_id = vector_store_id.into();
2611        let f_id = file_id.into();
2612
2613        // Prepare interceptor context
2614        let mut state = T::default();
2615        let operation = operation_names::VECTOR_STORE_FILE_RETRIEVE;
2616        let model = "vector-store";
2617        let request_json = format!(r#"{{"vector_store_id":"{vs_id}","file_id":"{f_id}"}}"#);
2618
2619        // Call before_request hook
2620        self.call_before_request(operation, model, &request_json, &mut state)
2621            .await?;
2622
2623        let start_time = Instant::now();
2624
2625        // Make the API call
2626        let response = match vector_stores_api::get_vector_store_file()
2627            .configuration(&self.client.base_configuration)
2628            .vector_store_id(&vs_id)
2629            .file_id(&f_id)
2630            .call()
2631            .await
2632        {
2633            Ok(resp) => resp,
2634            Err(e) => {
2635                let error = self
2636                    .handle_api_error(e, operation, model, &request_json, &state)
2637                    .await;
2638                return Err(error);
2639            }
2640        };
2641
2642        let duration = start_time.elapsed();
2643
2644        // Call after_response hook
2645        self.call_after_response(
2646            &response,
2647            operation,
2648            model,
2649            &request_json,
2650            &state,
2651            duration,
2652            None,
2653            None,
2654        )
2655        .await;
2656
2657        Ok(response)
2658    }
2659
2660    /// Delete a file from a vector store.
2661    ///
2662    /// # Example
2663    ///
2664    /// ```rust,ignore
2665    /// use openai_ergonomic::Client;
2666    ///
2667    /// # async fn example() -> openai_ergonomic::Result<()> {
2668    /// let client = Client::from_env()?;
2669    /// let response = client.vector_stores().delete_file("vs_123", "file-456").await?;
2670    /// println!("Deleted: {}", response.deleted);
2671    /// # Ok(())
2672    /// # }
2673    /// ```
2674    pub async fn delete_file(
2675        &self,
2676        vector_store_id: impl Into<String>,
2677        file_id: impl Into<String>,
2678    ) -> Result<DeleteVectorStoreFileResponse> {
2679        let vs_id = vector_store_id.into();
2680        let f_id = file_id.into();
2681
2682        // Prepare interceptor context
2683        let mut state = T::default();
2684        let operation = operation_names::VECTOR_STORE_FILE_DELETE;
2685        let model = "vector-store";
2686        let request_json = format!(r#"{{"vector_store_id":"{vs_id}","file_id":"{f_id}"}}"#);
2687
2688        // Call before_request hook
2689        self.call_before_request(operation, model, &request_json, &mut state)
2690            .await?;
2691
2692        let start_time = Instant::now();
2693
2694        // Make the API call
2695        let response = match vector_stores_api::delete_vector_store_file()
2696            .configuration(&self.client.base_configuration)
2697            .vector_store_id(&vs_id)
2698            .file_id(&f_id)
2699            .call()
2700            .await
2701        {
2702            Ok(resp) => resp,
2703            Err(e) => {
2704                let error = self
2705                    .handle_api_error(e, operation, model, &request_json, &state)
2706                    .await;
2707                return Err(error);
2708            }
2709        };
2710
2711        let duration = start_time.elapsed();
2712
2713        // Call after_response hook
2714        self.call_after_response(
2715            &response,
2716            operation,
2717            model,
2718            &request_json,
2719            &state,
2720            duration,
2721            None,
2722            None,
2723        )
2724        .await;
2725
2726        Ok(response)
2727    }
2728
2729    /// Search a vector store.
2730    ///
2731    /// # Example
2732    ///
2733    /// ```rust,ignore
2734    /// use openai_ergonomic::Client;
2735    /// use openai_ergonomic::builders::vector_stores::VectorStoreSearchBuilder;
2736    ///
2737    /// # async fn example() -> openai_ergonomic::Result<()> {
2738    /// let client = Client::from_env()?;
2739    /// let builder = VectorStoreSearchBuilder::new("vs_123", "machine learning concepts");
2740    /// let results = client.vector_stores().search(builder).await?;
2741    /// println!("Found {} results", results.data.len());
2742    /// # Ok(())
2743    /// # }
2744    /// ```
2745    pub async fn search(
2746        &self,
2747        builder: crate::builders::vector_stores::VectorStoreSearchBuilder,
2748    ) -> Result<VectorStoreSearchResultsPage> {
2749        use openai_client_base::models::{VectorStoreSearchRequest, VectorStoreSearchRequestQuery};
2750
2751        let query = VectorStoreSearchRequestQuery::new_text(builder.query().to_string());
2752        let mut request = VectorStoreSearchRequest::new(query);
2753
2754        if let Some(limit) = builder.limit_ref() {
2755            request.max_num_results = Some(limit);
2756        }
2757
2758        let vs_id = builder.vector_store_id().to_string();
2759
2760        // Prepare interceptor context
2761        let mut state = T::default();
2762        let operation = operation_names::VECTOR_STORE_SEARCH;
2763        let model = "vector-store";
2764        let request_json = format!(
2765            r#"{{"vector_store_id":"{}","query":"{}"}}"#,
2766            vs_id,
2767            builder.query()
2768        );
2769
2770        // Call before_request hook
2771        self.call_before_request(operation, model, &request_json, &mut state)
2772            .await?;
2773
2774        let start_time = Instant::now();
2775
2776        // Make the API call
2777        let response = match vector_stores_api::search_vector_store()
2778            .configuration(&self.client.base_configuration)
2779            .vector_store_id(&vs_id)
2780            .vector_store_search_request(request)
2781            .call()
2782            .await
2783        {
2784            Ok(resp) => resp,
2785            Err(e) => {
2786                let error = self
2787                    .handle_api_error(e, operation, model, &request_json, &state)
2788                    .await;
2789                return Err(error);
2790            }
2791        };
2792
2793        let duration = start_time.elapsed();
2794
2795        // Call after_response hook
2796        self.call_after_response(
2797            &response,
2798            operation,
2799            model,
2800            &request_json,
2801            &state,
2802            duration,
2803            None,
2804            None,
2805        )
2806        .await;
2807
2808        Ok(response)
2809    }
2810}
2811
2812impl<T: Default + Send + Sync> BatchClient<'_, T> {
2813    /// Create a new batch job.
2814    ///
2815    /// # Example
2816    ///
2817    /// ```rust,ignore
2818    /// use openai_ergonomic::Client;
2819    /// use openai_ergonomic::builders::batch::{BatchJobBuilder, BatchEndpoint};
2820    ///
2821    /// # async fn example() -> openai_ergonomic::Result<()> {
2822    /// let client = Client::from_env()?;
2823    /// let builder = BatchJobBuilder::new("file-batch-input", BatchEndpoint::ChatCompletions);
2824    /// let batch = client.batch().create(builder).await?;
2825    /// println!("Created batch: {}", batch.id);
2826    /// # Ok(())
2827    /// # }
2828    /// ```
2829    pub async fn create(&self, builder: crate::builders::batch::BatchJobBuilder) -> Result<Batch> {
2830        use openai_client_base::models::create_batch_request::{CompletionWindow, Endpoint};
2831
2832        // Map our endpoint to the base client enum
2833        let endpoint = match builder.endpoint() {
2834            crate::builders::batch::BatchEndpoint::ChatCompletions => {
2835                Endpoint::SlashV1SlashChatSlashCompletions
2836            }
2837            crate::builders::batch::BatchEndpoint::Embeddings => Endpoint::SlashV1SlashEmbeddings,
2838            crate::builders::batch::BatchEndpoint::Completions => Endpoint::SlashV1SlashCompletions,
2839        };
2840
2841        let mut request = CreateBatchRequest::new(
2842            builder.input_file_id().to_string(),
2843            endpoint,
2844            CompletionWindow::Variant24h,
2845        );
2846
2847        if builder.has_metadata() {
2848            request.metadata = Some(Some(builder.metadata_ref().clone()));
2849        }
2850
2851        // Prepare interceptor context
2852        let mut state = T::default();
2853        let operation = operation_names::BATCH_CREATE;
2854        let model = "batch";
2855        let request_json = serde_json::to_string(&request).unwrap_or_default();
2856
2857        // Call before_request hook
2858        self.call_before_request(operation, model, &request_json, &mut state)
2859            .await?;
2860
2861        let start_time = Instant::now();
2862
2863        // Make the API call
2864        let response = match batch_api::create_batch()
2865            .configuration(&self.client.base_configuration)
2866            .create_batch_request(request)
2867            .call()
2868            .await
2869        {
2870            Ok(resp) => resp,
2871            Err(e) => {
2872                let error = self
2873                    .handle_api_error(e, operation, model, &request_json, &state)
2874                    .await;
2875                return Err(error);
2876            }
2877        };
2878
2879        let duration = start_time.elapsed();
2880
2881        // Call after_response hook
2882        self.call_after_response(
2883            &response,
2884            operation,
2885            model,
2886            &request_json,
2887            &state,
2888            duration,
2889            None,
2890            None,
2891        )
2892        .await;
2893
2894        Ok(response)
2895    }
2896
2897    /// List batch jobs.
2898    ///
2899    /// # Example
2900    ///
2901    /// ```rust,ignore
2902    /// use openai_ergonomic::Client;
2903    ///
2904    /// # async fn example() -> openai_ergonomic::Result<()> {
2905    /// let client = Client::from_env()?;
2906    /// let response = client.batch().list(None, Some(20)).await?;
2907    /// println!("Found {} batches", response.data.len());
2908    /// # Ok(())
2909    /// # }
2910    /// ```
2911    pub async fn list(
2912        &self,
2913        after: Option<&str>,
2914        limit: Option<i32>,
2915    ) -> Result<ListBatchesResponse> {
2916        // Prepare interceptor context
2917        let mut state = T::default();
2918        let operation = operation_names::BATCH_LIST;
2919        let model = "batch";
2920        let request_json = format!("{{\"after\":{after:?},\"limit\":{limit:?}}}");
2921
2922        // Call before_request hook
2923        self.call_before_request(operation, model, &request_json, &mut state)
2924            .await?;
2925
2926        let start_time = Instant::now();
2927
2928        // Make the API call
2929        let response = match batch_api::list_batches()
2930            .configuration(&self.client.base_configuration)
2931            .maybe_after(after)
2932            .maybe_limit(limit)
2933            .call()
2934            .await
2935        {
2936            Ok(resp) => resp,
2937            Err(e) => {
2938                let error = self
2939                    .handle_api_error(e, operation, model, &request_json, &state)
2940                    .await;
2941                return Err(error);
2942            }
2943        };
2944
2945        let duration = start_time.elapsed();
2946
2947        // Call after_response hook
2948        self.call_after_response(
2949            &response,
2950            operation,
2951            model,
2952            &request_json,
2953            &state,
2954            duration,
2955            None,
2956            None,
2957        )
2958        .await;
2959
2960        Ok(response)
2961    }
2962
2963    /// Get a specific batch job.
2964    ///
2965    /// # Example
2966    ///
2967    /// ```rust,ignore
2968    /// use openai_ergonomic::Client;
2969    ///
2970    /// # async fn example() -> openai_ergonomic::Result<()> {
2971    /// let client = Client::from_env()?;
2972    /// let batch = client.batch().get("batch_123").await?;
2973    /// println!("Batch status: {}", batch.status);
2974    /// # Ok(())
2975    /// # }
2976    /// ```
2977    pub async fn get(&self, batch_id: impl Into<String>) -> Result<Batch> {
2978        let id = batch_id.into();
2979
2980        // Prepare interceptor context
2981        let mut state = T::default();
2982        let operation = operation_names::BATCH_RETRIEVE;
2983        let model = "batch";
2984        let request_json = format!("{{\"batch_id\":\"{id}\"}}");
2985
2986        // Call before_request hook
2987        self.call_before_request(operation, model, &request_json, &mut state)
2988            .await?;
2989
2990        let start_time = Instant::now();
2991
2992        // Make the API call
2993        let response = match batch_api::retrieve_batch()
2994            .configuration(&self.client.base_configuration)
2995            .batch_id(&id)
2996            .call()
2997            .await
2998        {
2999            Ok(resp) => resp,
3000            Err(e) => {
3001                let error = self
3002                    .handle_api_error(e, operation, model, &request_json, &state)
3003                    .await;
3004                return Err(error);
3005            }
3006        };
3007
3008        let duration = start_time.elapsed();
3009
3010        // Call after_response hook
3011        self.call_after_response(
3012            &response,
3013            operation,
3014            model,
3015            &request_json,
3016            &state,
3017            duration,
3018            None,
3019            None,
3020        )
3021        .await;
3022
3023        Ok(response)
3024    }
3025
3026    /// Cancel a batch job.
3027    ///
3028    /// # Example
3029    ///
3030    /// ```rust,ignore
3031    /// use openai_ergonomic::Client;
3032    ///
3033    /// # async fn example() -> openai_ergonomic::Result<()> {
3034    /// let client = Client::from_env()?;
3035    /// let batch = client.batch().cancel("batch_123").await?;
3036    /// println!("Batch cancelled: {}", batch.status);
3037    /// # Ok(())
3038    /// # }
3039    /// ```
3040    pub async fn cancel(&self, batch_id: impl Into<String>) -> Result<Batch> {
3041        let id = batch_id.into();
3042
3043        // Prepare interceptor context
3044        let mut state = T::default();
3045        let operation = operation_names::BATCH_CANCEL;
3046        let model = "batch";
3047        let request_json = format!("{{\"batch_id\":\"{id}\"}}");
3048
3049        // Call before_request hook
3050        self.call_before_request(operation, model, &request_json, &mut state)
3051            .await?;
3052
3053        let start_time = Instant::now();
3054
3055        // Make the API call
3056        let response = match batch_api::cancel_batch()
3057            .configuration(&self.client.base_configuration)
3058            .batch_id(&id)
3059            .call()
3060            .await
3061        {
3062            Ok(resp) => resp,
3063            Err(e) => {
3064                let error = self
3065                    .handle_api_error(e, operation, model, &request_json, &state)
3066                    .await;
3067                return Err(error);
3068            }
3069        };
3070
3071        let duration = start_time.elapsed();
3072
3073        // Call after_response hook
3074        self.call_after_response(
3075            &response,
3076            operation,
3077            model,
3078            &request_json,
3079            &state,
3080            duration,
3081            None,
3082            None,
3083        )
3084        .await;
3085
3086        Ok(response)
3087    }
3088}
3089
3090impl<T: Default + Send + Sync> FineTuningClient<'_, T> {
3091    /// Create a new fine-tuning job.
3092    ///
3093    /// # Example
3094    ///
3095    /// ```rust,ignore
3096    /// use openai_ergonomic::Client;
3097    /// use openai_ergonomic::builders::fine_tuning::FineTuningJobBuilder;
3098    ///
3099    /// # async fn example() -> openai_ergonomic::Result<()> {
3100    /// let client = Client::from_env()?;
3101    /// let builder = FineTuningJobBuilder::new("gpt-3.5-turbo", "file-training-data");
3102    /// let job = client.fine_tuning().create_job(builder).await?;
3103    /// println!("Created job: {}", job.id);
3104    /// # Ok(())
3105    /// # }
3106    /// ```
3107    pub async fn create_job(
3108        &self,
3109        builder: crate::builders::fine_tuning::FineTuningJobBuilder,
3110    ) -> Result<FineTuningJob> {
3111        let mut request = CreateFineTuningJobRequest::new(
3112            builder.model().to_string(),
3113            builder.training_file().to_string(),
3114        );
3115
3116        if let Some(validation_file) = builder.validation_file_ref() {
3117            request.validation_file = Some(validation_file.to_string());
3118        }
3119
3120        if let Some(suffix) = builder.suffix_ref() {
3121            request.suffix = Some(suffix.to_string());
3122        }
3123
3124        // Note: Hyperparameters handling is limited due to base client API limitations
3125        // The generated API appears to have empty struct definitions for hyperparameters
3126        // For now, we skip hyperparameters configuration
3127        // TODO: Update when openai-client-base fixes hyperparameters types
3128
3129        // Prepare interceptor context
3130        let mut state = T::default();
3131        let operation = operation_names::FINE_TUNING_CREATE;
3132        let model = builder.model();
3133        let request_json = serde_json::to_string(&request).unwrap_or_default();
3134
3135        // Call before_request hook
3136        self.call_before_request(operation, model, &request_json, &mut state)
3137            .await?;
3138
3139        let start_time = Instant::now();
3140
3141        // Make the API call
3142        let response = match fine_tuning_api::create_fine_tuning_job()
3143            .configuration(&self.client.base_configuration)
3144            .create_fine_tuning_job_request(request)
3145            .call()
3146            .await
3147        {
3148            Ok(resp) => resp,
3149            Err(e) => {
3150                let error = self
3151                    .handle_api_error(e, operation, model, &request_json, &state)
3152                    .await;
3153                return Err(error);
3154            }
3155        };
3156
3157        let duration = start_time.elapsed();
3158
3159        // Call after_response hook
3160        self.call_after_response(
3161            &response,
3162            operation,
3163            model,
3164            &request_json,
3165            &state,
3166            duration,
3167            None,
3168            None,
3169        )
3170        .await;
3171
3172        Ok(response)
3173    }
3174
3175    /// List fine-tuning jobs.
3176    ///
3177    /// # Example
3178    ///
3179    /// ```rust,ignore
3180    /// use openai_ergonomic::Client;
3181    ///
3182    /// # async fn example() -> openai_ergonomic::Result<()> {
3183    /// let client = Client::from_env()?;
3184    /// let response = client.fine_tuning().list_jobs(None, Some(20)).await?;
3185    /// println!("Found {} jobs", response.data.len());
3186    /// # Ok(())
3187    /// # }
3188    /// ```
3189    pub async fn list_jobs(
3190        &self,
3191        after: Option<&str>,
3192        limit: Option<i32>,
3193    ) -> Result<ListPaginatedFineTuningJobsResponse> {
3194        // Prepare interceptor context
3195        let mut state = T::default();
3196        let operation = operation_names::FINE_TUNING_LIST;
3197        let model = "fine-tuning";
3198        let request_json = format!("{{\"after\":{after:?},\"limit\":{limit:?}}}");
3199
3200        // Call before_request hook
3201        self.call_before_request(operation, model, &request_json, &mut state)
3202            .await?;
3203
3204        let start_time = Instant::now();
3205
3206        // Make the API call
3207        let response = match fine_tuning_api::list_paginated_fine_tuning_jobs()
3208            .configuration(&self.client.base_configuration)
3209            .maybe_after(after)
3210            .maybe_limit(limit)
3211            .call()
3212            .await
3213        {
3214            Ok(resp) => resp,
3215            Err(e) => {
3216                let error = self
3217                    .handle_api_error(e, operation, model, &request_json, &state)
3218                    .await;
3219                return Err(error);
3220            }
3221        };
3222
3223        let duration = start_time.elapsed();
3224
3225        // Call after_response hook
3226        self.call_after_response(
3227            &response,
3228            operation,
3229            model,
3230            &request_json,
3231            &state,
3232            duration,
3233            None,
3234            None,
3235        )
3236        .await;
3237
3238        Ok(response)
3239    }
3240
3241    /// Get a specific fine-tuning job.
3242    ///
3243    /// # Example
3244    ///
3245    /// ```rust,ignore
3246    /// use openai_ergonomic::Client;
3247    ///
3248    /// # async fn example() -> openai_ergonomic::Result<()> {
3249    /// let client = Client::from_env()?;
3250    /// let job = client.fine_tuning().get_job("ftjob-123").await?;
3251    /// println!("Job status: {}", job.status);
3252    /// # Ok(())
3253    /// # }
3254    /// ```
3255    pub async fn get_job(&self, job_id: impl Into<String>) -> Result<FineTuningJob> {
3256        let id = job_id.into();
3257
3258        // Prepare interceptor context
3259        let mut state = T::default();
3260        let operation = operation_names::FINE_TUNING_RETRIEVE;
3261        let model = "fine-tuning";
3262        let request_json = format!("{{\"job_id\":\"{id}\"}}");
3263
3264        // Call before_request hook
3265        self.call_before_request(operation, model, &request_json, &mut state)
3266            .await?;
3267
3268        let start_time = Instant::now();
3269
3270        // Make the API call
3271        let response = match fine_tuning_api::retrieve_fine_tuning_job()
3272            .configuration(&self.client.base_configuration)
3273            .fine_tuning_job_id(&id)
3274            .call()
3275            .await
3276        {
3277            Ok(resp) => resp,
3278            Err(e) => {
3279                let error = self
3280                    .handle_api_error(e, operation, model, &request_json, &state)
3281                    .await;
3282                return Err(error);
3283            }
3284        };
3285
3286        let duration = start_time.elapsed();
3287
3288        // Call after_response hook
3289        self.call_after_response(
3290            &response,
3291            operation,
3292            model,
3293            &request_json,
3294            &state,
3295            duration,
3296            None,
3297            None,
3298        )
3299        .await;
3300
3301        Ok(response)
3302    }
3303
3304    /// Cancel a fine-tuning job.
3305    ///
3306    /// # Example
3307    ///
3308    /// ```rust,ignore
3309    /// use openai_ergonomic::Client;
3310    ///
3311    /// # async fn example() -> openai_ergonomic::Result<()> {
3312    /// let client = Client::from_env()?;
3313    /// let job = client.fine_tuning().cancel_job("ftjob-123").await?;
3314    /// println!("Job cancelled: {}", job.status);
3315    /// # Ok(())
3316    /// # }
3317    /// ```
3318    pub async fn cancel_job(&self, job_id: impl Into<String>) -> Result<FineTuningJob> {
3319        let id = job_id.into();
3320
3321        // Prepare interceptor context
3322        let mut state = T::default();
3323        let operation = operation_names::FINE_TUNING_CANCEL;
3324        let model = "fine-tuning";
3325        let request_json = format!("{{\"job_id\":\"{id}\"}}");
3326
3327        // Call before_request hook
3328        self.call_before_request(operation, model, &request_json, &mut state)
3329            .await?;
3330
3331        let start_time = Instant::now();
3332
3333        // Make the API call
3334        let response = match fine_tuning_api::cancel_fine_tuning_job()
3335            .configuration(&self.client.base_configuration)
3336            .fine_tuning_job_id(&id)
3337            .call()
3338            .await
3339        {
3340            Ok(resp) => resp,
3341            Err(e) => {
3342                let error = self
3343                    .handle_api_error(e, operation, model, &request_json, &state)
3344                    .await;
3345                return Err(error);
3346            }
3347        };
3348
3349        let duration = start_time.elapsed();
3350
3351        // Call after_response hook
3352        self.call_after_response(
3353            &response,
3354            operation,
3355            model,
3356            &request_json,
3357            &state,
3358            duration,
3359            None,
3360            None,
3361        )
3362        .await;
3363
3364        Ok(response)
3365    }
3366
3367    /// List events for a fine-tuning job.
3368    ///
3369    /// # Example
3370    ///
3371    /// ```rust,ignore
3372    /// use openai_ergonomic::Client;
3373    ///
3374    /// # async fn example() -> openai_ergonomic::Result<()> {
3375    /// let client = Client::from_env()?;
3376    /// let events = client.fine_tuning().list_events("ftjob-123", None, Some(20)).await?;
3377    /// println!("Found {} events", events.data.len());
3378    /// # Ok(())
3379    /// # }
3380    /// ```
3381    pub async fn list_events(
3382        &self,
3383        job_id: impl Into<String>,
3384        after: Option<&str>,
3385        limit: Option<i32>,
3386    ) -> Result<ListFineTuningJobEventsResponse> {
3387        let id = job_id.into();
3388
3389        // Prepare interceptor context
3390        let mut state = T::default();
3391        let operation = operation_names::FINE_TUNING_LIST_EVENTS;
3392        let model = "fine-tuning";
3393        let request_json =
3394            format!("{{\"job_id\":\"{id}\",\"after\":{after:?},\"limit\":{limit:?}}}");
3395
3396        // Call before_request hook
3397        self.call_before_request(operation, model, &request_json, &mut state)
3398            .await?;
3399
3400        let start_time = Instant::now();
3401
3402        // Make the API call
3403        let response = match fine_tuning_api::list_fine_tuning_events()
3404            .configuration(&self.client.base_configuration)
3405            .fine_tuning_job_id(&id)
3406            .maybe_after(after)
3407            .maybe_limit(limit)
3408            .call()
3409            .await
3410        {
3411            Ok(resp) => resp,
3412            Err(e) => {
3413                let error = self
3414                    .handle_api_error(e, operation, model, &request_json, &state)
3415                    .await;
3416                return Err(error);
3417            }
3418        };
3419
3420        let duration = start_time.elapsed();
3421
3422        // Call after_response hook
3423        self.call_after_response(
3424            &response,
3425            operation,
3426            model,
3427            &request_json,
3428            &state,
3429            duration,
3430            None,
3431            None,
3432        )
3433        .await;
3434
3435        Ok(response)
3436    }
3437
3438    /// List checkpoints for a fine-tuning job.
3439    ///
3440    /// # Example
3441    ///
3442    /// ```rust,ignore
3443    /// use openai_ergonomic::Client;
3444    ///
3445    /// # async fn example() -> openai_ergonomic::Result<()> {
3446    /// let client = Client::from_env()?;
3447    /// let checkpoints = client.fine_tuning().list_checkpoints("ftjob-123", None, Some(10)).await?;
3448    /// println!("Found {} checkpoints", checkpoints.data.len());
3449    /// # Ok(())
3450    /// # }
3451    /// ```
3452    pub async fn list_checkpoints(
3453        &self,
3454        job_id: impl Into<String>,
3455        after: Option<&str>,
3456        limit: Option<i32>,
3457    ) -> Result<ListFineTuningJobCheckpointsResponse> {
3458        let id = job_id.into();
3459
3460        // Prepare interceptor context
3461        let mut state = T::default();
3462        let operation = operation_names::FINE_TUNING_LIST_CHECKPOINTS;
3463        let model = "fine-tuning";
3464        let request_json =
3465            format!("{{\"job_id\":\"{id}\",\"after\":{after:?},\"limit\":{limit:?}}}");
3466
3467        // Call before_request hook
3468        self.call_before_request(operation, model, &request_json, &mut state)
3469            .await?;
3470
3471        let start_time = Instant::now();
3472
3473        // Make the API call
3474        let response = match fine_tuning_api::list_fine_tuning_job_checkpoints()
3475            .configuration(&self.client.base_configuration)
3476            .fine_tuning_job_id(&id)
3477            .maybe_after(after)
3478            .maybe_limit(limit)
3479            .call()
3480            .await
3481        {
3482            Ok(resp) => resp,
3483            Err(e) => {
3484                let error = self
3485                    .handle_api_error(e, operation, model, &request_json, &state)
3486                    .await;
3487                return Err(error);
3488            }
3489        };
3490
3491        let duration = start_time.elapsed();
3492
3493        // Call after_response hook
3494        self.call_after_response(
3495            &response,
3496            operation,
3497            model,
3498            &request_json,
3499            &state,
3500            duration,
3501            None,
3502            None,
3503        )
3504        .await;
3505
3506        Ok(response)
3507    }
3508}
3509
3510fn map_api_error<T>(error: ApiError<T>) -> Error {
3511    match error {
3512        ApiError::Reqwest(err) => Error::Http(err),
3513        ApiError::ReqwestMiddleware(err) => {
3514            Error::Internal(format!("reqwest middleware error: {err}"))
3515        }
3516        ApiError::Serde(err) => Error::Json(err),
3517        ApiError::Io(err) => Error::File(err),
3518        ApiError::ResponseError(response) => Error::Api {
3519            status: response.status.as_u16(),
3520            message: response.content,
3521            error_type: None,
3522            error_code: None,
3523        },
3524    }
3525}
3526
3527#[cfg(test)]
3528mod tests {
3529    use super::*;
3530    use openai_client_base::apis::{Error as BaseError, ResponseContent};
3531
3532    #[test]
3533    fn map_api_error_converts_response() {
3534        let response = ResponseContent {
3535            status: reqwest::StatusCode::BAD_REQUEST,
3536            content: "bad request".to_string(),
3537            entity: Option::<()>::None,
3538        };
3539
3540        let error = map_api_error(BaseError::ResponseError(response));
3541        match error {
3542            Error::Api {
3543                status, message, ..
3544            } => {
3545                assert_eq!(status, 400);
3546                assert!(message.contains("bad request"));
3547            }
3548            other => panic!("expected API error, got {other:?}"),
3549        }
3550    }
3551
3552    #[test]
3553    fn test_moderation_builder_creation() {
3554        use crate::builders::moderations::ModerationBuilder;
3555
3556        let builder = ModerationBuilder::new("Test content");
3557        let request = builder.build().unwrap();
3558
3559        assert_eq!(request.input, "Test content");
3560        assert!(request.model.is_none());
3561    }
3562
3563    #[test]
3564    fn test_moderation_builder_with_model() {
3565        use crate::builders::moderations::ModerationBuilder;
3566
3567        let builder = ModerationBuilder::new("Test content").model("text-moderation-stable");
3568        let request = builder.build().unwrap();
3569
3570        assert_eq!(request.input, "Test content");
3571        assert_eq!(request.model, Some("text-moderation-stable".to_string()));
3572    }
3573
3574    #[test]
3575    fn test_moderation_builder_array_input() {
3576        use crate::builders::moderations::ModerationBuilder;
3577
3578        let inputs = vec!["First text".to_string(), "Second text".to_string()];
3579        let builder = ModerationBuilder::new_array(inputs);
3580        let request = builder.build().unwrap();
3581
3582        // Array inputs are joined with newlines
3583        assert_eq!(request.input, "First text\nSecond text");
3584    }
3585
3586    #[test]
3587    fn test_file_upload_builder_creation() {
3588        use crate::builders::files::{FilePurpose, FileUploadBuilder};
3589
3590        let content = b"test content".to_vec();
3591        let builder = FileUploadBuilder::new("test.txt", FilePurpose::Assistants, content.clone());
3592
3593        assert_eq!(builder.filename(), "test.txt");
3594        assert_eq!(builder.content(), content.as_slice());
3595        assert_eq!(builder.content_size(), content.len());
3596        assert!(!builder.is_empty());
3597    }
3598
3599    #[test]
3600    fn test_file_upload_builder_from_text() {
3601        use crate::builders::files::{FilePurpose, FileUploadBuilder};
3602
3603        let builder =
3604            FileUploadBuilder::from_text("hello.txt", FilePurpose::FineTune, "Hello, world!");
3605
3606        assert_eq!(builder.filename(), "hello.txt");
3607        assert_eq!(
3608            builder.content_as_string(),
3609            Some("Hello, world!".to_string())
3610        );
3611        assert!(!builder.is_empty());
3612    }
3613
3614    #[test]
3615    fn test_file_list_builder() {
3616        use crate::builders::files::{FileListBuilder, FileOrder, FilePurpose};
3617
3618        let builder = FileListBuilder::new()
3619            .purpose(FilePurpose::Assistants)
3620            .limit(10)
3621            .order(FileOrder::Desc);
3622
3623        assert!(builder.purpose_ref().is_some());
3624        assert_eq!(builder.limit_ref(), Some(10));
3625        assert!(builder.order_ref().is_some());
3626    }
3627
3628    #[test]
3629    fn test_file_retrieval_builder() {
3630        use crate::builders::files::FileRetrievalBuilder;
3631
3632        let builder = FileRetrievalBuilder::new("file-123");
3633        assert_eq!(builder.file_id(), "file-123");
3634    }
3635
3636    #[test]
3637    fn test_file_delete_builder() {
3638        use crate::builders::files::FileDeleteBuilder;
3639
3640        let builder = FileDeleteBuilder::new("file-456");
3641        assert_eq!(builder.file_id(), "file-456");
3642    }
3643
3644    #[test]
3645    fn test_file_purpose_display() {
3646        use crate::builders::files::FilePurpose;
3647
3648        assert_eq!(FilePurpose::FineTune.to_string(), "fine-tune");
3649        assert_eq!(FilePurpose::Assistants.to_string(), "assistants");
3650        assert_eq!(FilePurpose::Vision.to_string(), "vision");
3651        assert_eq!(FilePurpose::Batch.to_string(), "batch");
3652    }
3653
3654    #[test]
3655    fn test_vector_store_builder_basic() {
3656        use crate::builders::vector_stores::VectorStoreBuilder;
3657
3658        let builder = VectorStoreBuilder::new()
3659            .name("Test Store")
3660            .add_file("file-1")
3661            .metadata("key", "value");
3662
3663        assert_eq!(builder.name_ref(), Some("Test Store"));
3664        assert_eq!(builder.file_count(), 1);
3665        assert!(builder.has_files());
3666        assert_eq!(builder.metadata_ref().len(), 1);
3667    }
3668
3669    #[test]
3670    fn test_vector_store_builder_with_expiration() {
3671        use crate::builders::vector_stores::VectorStoreBuilder;
3672
3673        let builder = VectorStoreBuilder::new()
3674            .name("Temp Store")
3675            .expires_after_days(30);
3676
3677        assert_eq!(builder.name_ref(), Some("Temp Store"));
3678        assert!(builder.expires_after_ref().is_some());
3679        assert_eq!(builder.expires_after_ref().unwrap().days, 30);
3680    }
3681
3682    #[test]
3683    fn test_vector_store_builder_multiple_files() {
3684        use crate::builders::vector_stores::VectorStoreBuilder;
3685
3686        let files = vec!["file-1".to_string(), "file-2".to_string()];
3687        let builder = VectorStoreBuilder::new()
3688            .name("Multi-File Store")
3689            .file_ids(files.clone());
3690
3691        assert_eq!(builder.file_ids_ref(), files.as_slice());
3692        assert_eq!(builder.file_count(), 2);
3693    }
3694
3695    #[test]
3696    fn test_vector_store_file_builder() {
3697        use crate::builders::vector_stores::VectorStoreFileBuilder;
3698
3699        let builder = VectorStoreFileBuilder::new("vs-123", "file-456");
3700        assert_eq!(builder.vector_store_id(), "vs-123");
3701        assert_eq!(builder.file_id(), "file-456");
3702    }
3703
3704    #[test]
3705    fn test_vector_store_search_builder() {
3706        use crate::builders::vector_stores::VectorStoreSearchBuilder;
3707
3708        let builder = VectorStoreSearchBuilder::new("vs-123", "test query")
3709            .limit(10)
3710            .filter("category", "docs");
3711
3712        assert_eq!(builder.vector_store_id(), "vs-123");
3713        assert_eq!(builder.query(), "test query");
3714        assert_eq!(builder.limit_ref(), Some(10));
3715        assert_eq!(builder.filter_ref().len(), 1);
3716    }
3717
3718    #[test]
3719    fn test_vector_store_search_builder_default() {
3720        use crate::builders::vector_stores::VectorStoreSearchBuilder;
3721
3722        let builder = VectorStoreSearchBuilder::new("vs-123", "query");
3723        assert!(builder.limit_ref().is_none());
3724        assert!(builder.filter_ref().is_empty());
3725    }
3726}
3727
3728// Placeholder client types for different API endpoints
3729// TODO: Implement these properly once the builders are ready
3730
3731/// Client for assistants API.
3732#[derive(Debug, Clone, Copy)]
3733pub struct AssistantsClient<'a, T = ()> {
3734    client: &'a Client<T>,
3735}
3736
3737impl<T: Default + Send + Sync> AssistantsClient<'_, T> {
3738    /// Create a new assistant.
3739    ///
3740    /// # Example
3741    ///
3742    /// ```rust,ignore
3743    /// use openai_ergonomic::Client;
3744    /// use openai_ergonomic::builders::assistants::AssistantBuilder;
3745    ///
3746    /// # async fn example() -> openai_ergonomic::Result<()> {
3747    /// let client = Client::from_env()?;
3748    /// let builder = AssistantBuilder::new("gpt-4")
3749    ///     .name("Math Tutor")
3750    ///     .instructions("You are a helpful math tutor.");
3751    /// let assistant = client.assistants().create(builder).await?;
3752    /// println!("Created assistant: {}", assistant.id);
3753    /// # Ok(())
3754    /// # }
3755    /// ```
3756    pub async fn create(&self, builder: AssistantBuilder) -> Result<AssistantObject> {
3757        let request = builder.build()?;
3758
3759        // Prepare interceptor context
3760        let mut state = T::default();
3761        let operation = operation_names::ASSISTANT_CREATE;
3762        let model = request.model.clone();
3763        let request_json = serde_json::to_string(&request).unwrap_or_default();
3764
3765        // Call before_request hook
3766        self.call_before_request(operation, &model, &request_json, &mut state)
3767            .await?;
3768
3769        let start_time = Instant::now();
3770
3771        // Make the API call
3772        let response = match assistants_api::create_assistant()
3773            .configuration(&self.client.base_configuration)
3774            .create_assistant_request(request)
3775            .call()
3776            .await
3777        {
3778            Ok(resp) => resp,
3779            Err(e) => {
3780                let error = self
3781                    .handle_api_error(e, operation, &model, &request_json, &state)
3782                    .await;
3783                return Err(error);
3784            }
3785        };
3786
3787        let duration = start_time.elapsed();
3788
3789        // Call after_response hook
3790        self.call_after_response(
3791            &response,
3792            operation,
3793            &model,
3794            &request_json,
3795            &state,
3796            duration,
3797            None,
3798            None,
3799        )
3800        .await;
3801
3802        Ok(response)
3803    }
3804
3805    /// List assistants with pagination.
3806    ///
3807    /// # Example
3808    ///
3809    /// ```rust,ignore
3810    /// use openai_ergonomic::Client;
3811    ///
3812    /// # async fn example() -> openai_ergonomic::Result<()> {
3813    /// let client = Client::from_env()?;
3814    /// let response = client.assistants().list(Some(20), None, None, None).await?;
3815    /// println!("Found {} assistants", response.data.len());
3816    /// # Ok(())
3817    /// # }
3818    /// ```
3819    pub async fn list(
3820        &self,
3821        limit: Option<i32>,
3822        order: Option<&str>,
3823        after: Option<&str>,
3824        before: Option<&str>,
3825    ) -> Result<ListAssistantsResponse> {
3826        // Prepare interceptor context
3827        let mut state = T::default();
3828        let operation = operation_names::ASSISTANT_LIST;
3829        let model = "assistants";
3830        let request_json = format!(
3831            "{{\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?}}}"
3832        );
3833
3834        // Call before_request hook
3835        self.call_before_request(operation, model, &request_json, &mut state)
3836            .await?;
3837
3838        let start_time = Instant::now();
3839
3840        // Make the API call
3841        let response = match assistants_api::list_assistants()
3842            .configuration(&self.client.base_configuration)
3843            .maybe_limit(limit)
3844            .maybe_order(order)
3845            .maybe_after(after)
3846            .maybe_before(before)
3847            .call()
3848            .await
3849        {
3850            Ok(resp) => resp,
3851            Err(e) => {
3852                let error = self
3853                    .handle_api_error(e, operation, model, &request_json, &state)
3854                    .await;
3855                return Err(error);
3856            }
3857        };
3858
3859        let duration = start_time.elapsed();
3860
3861        // Call after_response hook
3862        self.call_after_response(
3863            &response,
3864            operation,
3865            model,
3866            &request_json,
3867            &state,
3868            duration,
3869            None,
3870            None,
3871        )
3872        .await;
3873
3874        Ok(response)
3875    }
3876
3877    /// Get an assistant by ID.
3878    ///
3879    /// # Example
3880    ///
3881    /// ```rust,ignore
3882    /// use openai_ergonomic::Client;
3883    ///
3884    /// # async fn example() -> openai_ergonomic::Result<()> {
3885    /// let client = Client::from_env()?;
3886    /// let assistant = client.assistants().get("asst_123").await?;
3887    /// println!("Assistant: {}", assistant.name.unwrap_or_default());
3888    /// # Ok(())
3889    /// # }
3890    /// ```
3891    pub async fn get(&self, assistant_id: impl Into<String>) -> Result<AssistantObject> {
3892        let id = assistant_id.into();
3893
3894        // Prepare interceptor context
3895        let mut state = T::default();
3896        let operation = operation_names::ASSISTANT_RETRIEVE;
3897        let model = "assistants";
3898        let request_json = format!("{{\"assistant_id\":\"{id}\"}}");
3899
3900        // Call before_request hook
3901        self.call_before_request(operation, model, &request_json, &mut state)
3902            .await?;
3903
3904        let start_time = Instant::now();
3905
3906        // Make the API call
3907        let response = match assistants_api::get_assistant()
3908            .configuration(&self.client.base_configuration)
3909            .assistant_id(&id)
3910            .call()
3911            .await
3912        {
3913            Ok(resp) => resp,
3914            Err(e) => {
3915                let error = self
3916                    .handle_api_error(e, operation, model, &request_json, &state)
3917                    .await;
3918                return Err(error);
3919            }
3920        };
3921
3922        let duration = start_time.elapsed();
3923
3924        // Call after_response hook
3925        self.call_after_response(
3926            &response,
3927            operation,
3928            model,
3929            &request_json,
3930            &state,
3931            duration,
3932            None,
3933            None,
3934        )
3935        .await;
3936
3937        Ok(response)
3938    }
3939
3940    /// Update an assistant.
3941    ///
3942    /// # Example
3943    ///
3944    /// ```rust,ignore
3945    /// use openai_ergonomic::Client;
3946    /// use openai_ergonomic::builders::assistants::AssistantBuilder;
3947    ///
3948    /// # async fn example() -> openai_ergonomic::Result<()> {
3949    /// let client = Client::from_env()?;
3950    /// let builder = AssistantBuilder::new("gpt-4")
3951    ///     .name("Updated Name")
3952    ///     .instructions("Updated instructions");
3953    /// let assistant = client.assistants().update("asst_123", builder).await?;
3954    /// println!("Updated: {}", assistant.id);
3955    /// # Ok(())
3956    /// # }
3957    /// ```
3958    pub async fn update(
3959        &self,
3960        assistant_id: impl Into<String>,
3961        builder: AssistantBuilder,
3962    ) -> Result<AssistantObject> {
3963        use openai_client_base::models::ModifyAssistantRequest;
3964
3965        let id = assistant_id.into();
3966        let request_data = builder.build()?;
3967
3968        // Convert CreateAssistantRequest to ModifyAssistantRequest
3969        let mut request = ModifyAssistantRequest::new();
3970        request.model = Some(request_data.model);
3971        // Convert Box<CreateAssistantRequestName> to Option<String> by extracting text
3972        request.name = request_data.name.and_then(|n| match *n {
3973            openai_client_base::models::CreateAssistantRequestName::Text(text) => Some(Some(text)),
3974            openai_client_base::models::CreateAssistantRequestName::Null => None,
3975        });
3976        request.description = request_data.description.and_then(|d| match *d {
3977            openai_client_base::models::CreateAssistantRequestDescription::Text(text) => {
3978                Some(Some(text))
3979            }
3980            openai_client_base::models::CreateAssistantRequestDescription::Null => None,
3981        });
3982        request.instructions = request_data.instructions.and_then(|i| match *i {
3983            openai_client_base::models::CreateAssistantRequestInstructions::Text(text) => {
3984                Some(Some(text))
3985            }
3986            openai_client_base::models::CreateAssistantRequestInstructions::Null => None,
3987        });
3988        request.tools = request_data.tools;
3989        request.metadata = request_data.metadata;
3990
3991        // Prepare interceptor context
3992        let mut state = T::default();
3993        let operation = operation_names::ASSISTANT_UPDATE;
3994        let model = request
3995            .model
3996            .as_ref()
3997            .map_or_else(|| "assistants".to_string(), Clone::clone);
3998        let request_json = serde_json::to_string(&request).unwrap_or_default();
3999
4000        // Call before_request hook
4001        self.call_before_request(operation, &model, &request_json, &mut state)
4002            .await?;
4003
4004        let start_time = Instant::now();
4005
4006        // Make the API call
4007        let response = match assistants_api::modify_assistant()
4008            .configuration(&self.client.base_configuration)
4009            .assistant_id(&id)
4010            .modify_assistant_request(request)
4011            .call()
4012            .await
4013        {
4014            Ok(resp) => resp,
4015            Err(e) => {
4016                let error = self
4017                    .handle_api_error(e, operation, &model, &request_json, &state)
4018                    .await;
4019                return Err(error);
4020            }
4021        };
4022
4023        let duration = start_time.elapsed();
4024
4025        // Call after_response hook
4026        self.call_after_response(
4027            &response,
4028            operation,
4029            &model,
4030            &request_json,
4031            &state,
4032            duration,
4033            None,
4034            None,
4035        )
4036        .await;
4037
4038        Ok(response)
4039    }
4040
4041    /// Delete an assistant.
4042    ///
4043    /// # Example
4044    ///
4045    /// ```rust,ignore
4046    /// use openai_ergonomic::Client;
4047    ///
4048    /// # async fn example() -> openai_ergonomic::Result<()> {
4049    /// let client = Client::from_env()?;
4050    /// let response = client.assistants().delete("asst_123").await?;
4051    /// println!("Deleted: {}", response.deleted);
4052    /// # Ok(())
4053    /// # }
4054    /// ```
4055    pub async fn delete(&self, assistant_id: impl Into<String>) -> Result<DeleteAssistantResponse> {
4056        let id = assistant_id.into();
4057
4058        // Prepare interceptor context
4059        let mut state = T::default();
4060        let operation = operation_names::ASSISTANT_DELETE;
4061        let model = "assistants";
4062        let request_json = format!("{{\"assistant_id\":\"{id}\"}}");
4063
4064        // Call before_request hook
4065        self.call_before_request(operation, model, &request_json, &mut state)
4066            .await?;
4067
4068        let start_time = Instant::now();
4069
4070        // Make the API call
4071        let response = match assistants_api::delete_assistant()
4072            .configuration(&self.client.base_configuration)
4073            .assistant_id(&id)
4074            .call()
4075            .await
4076        {
4077            Ok(resp) => resp,
4078            Err(e) => {
4079                let error = self
4080                    .handle_api_error(e, operation, model, &request_json, &state)
4081                    .await;
4082                return Err(error);
4083            }
4084        };
4085
4086        let duration = start_time.elapsed();
4087
4088        // Call after_response hook
4089        self.call_after_response(
4090            &response,
4091            operation,
4092            model,
4093            &request_json,
4094            &state,
4095            duration,
4096            None,
4097            None,
4098        )
4099        .await;
4100
4101        Ok(response)
4102    }
4103
4104    /// Create a run on a thread.
4105    ///
4106    /// # Example
4107    ///
4108    /// ```rust,ignore
4109    /// use openai_ergonomic::Client;
4110    /// use openai_ergonomic::builders::assistants::RunBuilder;
4111    ///
4112    /// # async fn example() -> openai_ergonomic::Result<()> {
4113    /// let client = Client::from_env()?;
4114    /// let builder = RunBuilder::new("asst_123");
4115    /// let run = client.assistants().create_run("thread_123", builder).await?;
4116    /// println!("Run created: {}", run.id);
4117    /// # Ok(())
4118    /// # }
4119    /// ```
4120    pub async fn create_run(
4121        &self,
4122        thread_id: impl Into<String>,
4123        builder: RunBuilder,
4124    ) -> Result<RunObject> {
4125        let thread_id = thread_id.into();
4126        let request = builder.build()?;
4127
4128        // Prepare interceptor context
4129        let mut state = T::default();
4130        let operation = operation_names::RUN_CREATE;
4131        let model = request
4132            .model
4133            .as_ref()
4134            .map_or_else(|| "runs".to_string(), Clone::clone);
4135        let request_json = serde_json::to_string(&request).unwrap_or_default();
4136
4137        // Call before_request hook
4138        self.call_before_request(operation, &model, &request_json, &mut state)
4139            .await?;
4140
4141        let start_time = Instant::now();
4142
4143        // Make the API call
4144        let response = match assistants_api::create_run()
4145            .configuration(&self.client.base_configuration)
4146            .thread_id(&thread_id)
4147            .create_run_request(request)
4148            .call()
4149            .await
4150        {
4151            Ok(resp) => resp,
4152            Err(e) => {
4153                let error = self
4154                    .handle_api_error(e, operation, &model, &request_json, &state)
4155                    .await;
4156                return Err(error);
4157            }
4158        };
4159
4160        let duration = start_time.elapsed();
4161
4162        // Call after_response hook
4163        self.call_after_response(
4164            &response,
4165            operation,
4166            &model,
4167            &request_json,
4168            &state,
4169            duration,
4170            None,
4171            None,
4172        )
4173        .await;
4174
4175        Ok(response)
4176    }
4177
4178    /// List runs on a thread.
4179    ///
4180    /// # Example
4181    ///
4182    /// ```rust,ignore
4183    /// use openai_ergonomic::Client;
4184    ///
4185    /// # async fn example() -> openai_ergonomic::Result<()> {
4186    /// let client = Client::from_env()?;
4187    /// let response = client.assistants().list_runs("thread_123", None, None, None, None).await?;
4188    /// println!("Found {} runs", response.data.len());
4189    /// # Ok(())
4190    /// # }
4191    /// ```
4192    pub async fn list_runs(
4193        &self,
4194        thread_id: impl Into<String>,
4195        limit: Option<i32>,
4196        order: Option<&str>,
4197        after: Option<&str>,
4198        before: Option<&str>,
4199    ) -> Result<ListRunsResponse> {
4200        let thread_id = thread_id.into();
4201
4202        // Prepare interceptor context
4203        let mut state = T::default();
4204        let operation = operation_names::RUN_LIST;
4205        let model = "runs";
4206        let request_json = format!(
4207            "{{\"thread_id\":\"{thread_id}\",\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?}}}"
4208        );
4209
4210        // Call before_request hook
4211        self.call_before_request(operation, model, &request_json, &mut state)
4212            .await?;
4213
4214        let start_time = Instant::now();
4215
4216        // Make the API call
4217        let response = match assistants_api::list_runs()
4218            .configuration(&self.client.base_configuration)
4219            .thread_id(&thread_id)
4220            .maybe_limit(limit)
4221            .maybe_order(order)
4222            .maybe_after(after)
4223            .maybe_before(before)
4224            .call()
4225            .await
4226        {
4227            Ok(resp) => resp,
4228            Err(e) => {
4229                let error = self
4230                    .handle_api_error(e, operation, model, &request_json, &state)
4231                    .await;
4232                return Err(error);
4233            }
4234        };
4235
4236        let duration = start_time.elapsed();
4237
4238        // Call after_response hook
4239        self.call_after_response(
4240            &response,
4241            operation,
4242            model,
4243            &request_json,
4244            &state,
4245            duration,
4246            None,
4247            None,
4248        )
4249        .await;
4250
4251        Ok(response)
4252    }
4253
4254    /// Get a run.
4255    ///
4256    /// # Example
4257    ///
4258    /// ```rust,ignore
4259    /// use openai_ergonomic::Client;
4260    ///
4261    /// # async fn example() -> openai_ergonomic::Result<()> {
4262    /// let client = Client::from_env()?;
4263    /// let run = client.assistants().get_run("thread_123", "run_123").await?;
4264    /// println!("Run status: {}", run.status);
4265    /// # Ok(())
4266    /// # }
4267    /// ```
4268    pub async fn get_run(
4269        &self,
4270        thread_id: impl Into<String>,
4271        run_id: impl Into<String>,
4272    ) -> Result<RunObject> {
4273        let thread_id = thread_id.into();
4274        let run_id = run_id.into();
4275
4276        // Prepare interceptor context
4277        let mut state = T::default();
4278        let operation = operation_names::RUN_RETRIEVE;
4279        let model = "runs";
4280        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\"}}");
4281
4282        // Call before_request hook
4283        self.call_before_request(operation, model, &request_json, &mut state)
4284            .await?;
4285
4286        let start_time = Instant::now();
4287
4288        // Make the API call
4289        let response = match assistants_api::get_run()
4290            .configuration(&self.client.base_configuration)
4291            .thread_id(&thread_id)
4292            .run_id(&run_id)
4293            .call()
4294            .await
4295        {
4296            Ok(resp) => resp,
4297            Err(e) => {
4298                let error = self
4299                    .handle_api_error(e, operation, model, &request_json, &state)
4300                    .await;
4301                return Err(error);
4302            }
4303        };
4304
4305        let duration = start_time.elapsed();
4306
4307        // Call after_response hook
4308        self.call_after_response(
4309            &response,
4310            operation,
4311            model,
4312            &request_json,
4313            &state,
4314            duration,
4315            None,
4316            None,
4317        )
4318        .await;
4319
4320        Ok(response)
4321    }
4322
4323    /// Cancel a run.
4324    ///
4325    /// # Example
4326    ///
4327    /// ```rust,ignore
4328    /// use openai_ergonomic::Client;
4329    ///
4330    /// # async fn example() -> openai_ergonomic::Result<()> {
4331    /// let client = Client::from_env()?;
4332    /// let run = client.assistants().cancel_run("thread_123", "run_123").await?;
4333    /// println!("Run cancelled: {}", run.status);
4334    /// # Ok(())
4335    /// # }
4336    /// ```
4337    pub async fn cancel_run(
4338        &self,
4339        thread_id: impl Into<String>,
4340        run_id: impl Into<String>,
4341    ) -> Result<RunObject> {
4342        let thread_id = thread_id.into();
4343        let run_id = run_id.into();
4344
4345        // Prepare interceptor context
4346        let mut state = T::default();
4347        let operation = operation_names::RUN_CANCEL;
4348        let model = "runs";
4349        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\"}}");
4350
4351        // Call before_request hook
4352        self.call_before_request(operation, model, &request_json, &mut state)
4353            .await?;
4354
4355        let start_time = Instant::now();
4356
4357        // Make the API call
4358        let response = match assistants_api::cancel_run()
4359            .configuration(&self.client.base_configuration)
4360            .thread_id(&thread_id)
4361            .run_id(&run_id)
4362            .call()
4363            .await
4364        {
4365            Ok(resp) => resp,
4366            Err(e) => {
4367                let error = self
4368                    .handle_api_error(e, operation, model, &request_json, &state)
4369                    .await;
4370                return Err(error);
4371            }
4372        };
4373
4374        let duration = start_time.elapsed();
4375
4376        // Call after_response hook
4377        self.call_after_response(
4378            &response,
4379            operation,
4380            model,
4381            &request_json,
4382            &state,
4383            duration,
4384            None,
4385            None,
4386        )
4387        .await;
4388
4389        Ok(response)
4390    }
4391
4392    /// Submit tool outputs to a run.
4393    ///
4394    /// # Example
4395    ///
4396    /// ```rust,ignore
4397    /// use openai_ergonomic::Client;
4398    ///
4399    /// # async fn example() -> openai_ergonomic::Result<()> {
4400    /// let client = Client::from_env()?;
4401    /// let outputs = vec![
4402    ///     SubmitToolOutputsRunRequestToolOutputsInner::new("call_123", "output data")
4403    /// ];
4404    /// let run = client.assistants().submit_tool_outputs("thread_123", "run_123", outputs).await?;
4405    /// println!("Tool outputs submitted: {}", run.id);
4406    /// # Ok(())
4407    /// # }
4408    /// ```
4409    pub async fn submit_tool_outputs(
4410        &self,
4411        thread_id: impl Into<String>,
4412        run_id: impl Into<String>,
4413        tool_outputs: Vec<SubmitToolOutputsRunRequestToolOutputsInner>,
4414    ) -> Result<RunObject> {
4415        use openai_client_base::models::SubmitToolOutputsRunRequest;
4416
4417        let thread_id = thread_id.into();
4418        let run_id = run_id.into();
4419        let request = SubmitToolOutputsRunRequest::new(tool_outputs);
4420
4421        // Prepare interceptor context
4422        let mut state = T::default();
4423        let operation = operation_names::RUN_SUBMIT_TOOL_OUTPUTS;
4424        let model = "runs";
4425        let request_json = serde_json::to_string(&request).unwrap_or_default();
4426
4427        // Call before_request hook
4428        self.call_before_request(operation, model, &request_json, &mut state)
4429            .await?;
4430
4431        let start_time = Instant::now();
4432
4433        // Make the API call
4434        let response = match assistants_api::submit_tool_ouputs_to_run()
4435            .configuration(&self.client.base_configuration)
4436            .thread_id(&thread_id)
4437            .run_id(&run_id)
4438            .submit_tool_outputs_run_request(request)
4439            .call()
4440            .await
4441        {
4442            Ok(resp) => resp,
4443            Err(e) => {
4444                let error = self
4445                    .handle_api_error(e, operation, model, &request_json, &state)
4446                    .await;
4447                return Err(error);
4448            }
4449        };
4450
4451        let duration = start_time.elapsed();
4452
4453        // Call after_response hook
4454        self.call_after_response(
4455            &response,
4456            operation,
4457            model,
4458            &request_json,
4459            &state,
4460            duration,
4461            None,
4462            None,
4463        )
4464        .await;
4465
4466        Ok(response)
4467    }
4468
4469    /// Create a message on a thread.
4470    ///
4471    /// # Example
4472    ///
4473    /// ```rust,ignore
4474    /// use openai_ergonomic::Client;
4475    /// use openai_ergonomic::builders::assistants::MessageBuilder;
4476    ///
4477    /// # async fn example() -> openai_ergonomic::Result<()> {
4478    /// let client = Client::from_env()?;
4479    /// let builder = MessageBuilder::new("user", "Hello, assistant!");
4480    /// let message = client.assistants().create_message("thread_123", builder).await?;
4481    /// println!("Message created: {}", message.id);
4482    /// # Ok(())
4483    /// # }
4484    /// ```
4485    pub async fn create_message(
4486        &self,
4487        thread_id: impl Into<String>,
4488        builder: MessageBuilder,
4489    ) -> Result<MessageObject> {
4490        let thread_id = thread_id.into();
4491        let request = builder.build()?;
4492
4493        // Prepare interceptor context
4494        let mut state = T::default();
4495        let operation = operation_names::MESSAGE_CREATE;
4496        let model = "messages";
4497        let request_json = serde_json::to_string(&request).unwrap_or_default();
4498
4499        // Call before_request hook
4500        self.call_before_request(operation, model, &request_json, &mut state)
4501            .await?;
4502
4503        let start_time = Instant::now();
4504
4505        // Make the API call
4506        let response = match assistants_api::create_message()
4507            .configuration(&self.client.base_configuration)
4508            .thread_id(&thread_id)
4509            .create_message_request(request)
4510            .call()
4511            .await
4512        {
4513            Ok(resp) => resp,
4514            Err(e) => {
4515                let error = self
4516                    .handle_api_error(e, operation, model, &request_json, &state)
4517                    .await;
4518                return Err(error);
4519            }
4520        };
4521
4522        let duration = start_time.elapsed();
4523
4524        // Call after_response hook
4525        self.call_after_response(
4526            &response,
4527            operation,
4528            model,
4529            &request_json,
4530            &state,
4531            duration,
4532            None,
4533            None,
4534        )
4535        .await;
4536
4537        Ok(response)
4538    }
4539
4540    /// List messages on a thread.
4541    ///
4542    /// # Example
4543    ///
4544    /// ```rust,ignore
4545    /// use openai_ergonomic::Client;
4546    ///
4547    /// # async fn example() -> openai_ergonomic::Result<()> {
4548    /// let client = Client::from_env()?;
4549    /// let response = client.assistants().list_messages("thread_123", None, None, None, None, None).await?;
4550    /// println!("Found {} messages", response.data.len());
4551    /// # Ok(())
4552    /// # }
4553    /// ```
4554    pub async fn list_messages(
4555        &self,
4556        thread_id: impl Into<String>,
4557        limit: Option<i32>,
4558        order: Option<&str>,
4559        after: Option<&str>,
4560        before: Option<&str>,
4561        run_id: Option<&str>,
4562    ) -> Result<ListMessagesResponse> {
4563        let thread_id = thread_id.into();
4564
4565        // Prepare interceptor context
4566        let mut state = T::default();
4567        let operation = operation_names::MESSAGE_LIST;
4568        let model = "messages";
4569        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?},\"run_id\":{run_id:?}}}");
4570
4571        // Call before_request hook
4572        self.call_before_request(operation, model, &request_json, &mut state)
4573            .await?;
4574
4575        let start_time = Instant::now();
4576
4577        // Make the API call
4578        let response = match assistants_api::list_messages()
4579            .configuration(&self.client.base_configuration)
4580            .thread_id(&thread_id)
4581            .maybe_limit(limit)
4582            .maybe_order(order)
4583            .maybe_after(after)
4584            .maybe_before(before)
4585            .maybe_run_id(run_id)
4586            .call()
4587            .await
4588        {
4589            Ok(resp) => resp,
4590            Err(e) => {
4591                let error = self
4592                    .handle_api_error(e, operation, model, &request_json, &state)
4593                    .await;
4594                return Err(error);
4595            }
4596        };
4597
4598        let duration = start_time.elapsed();
4599
4600        // Call after_response hook
4601        self.call_after_response(
4602            &response,
4603            operation,
4604            model,
4605            &request_json,
4606            &state,
4607            duration,
4608            None,
4609            None,
4610        )
4611        .await;
4612
4613        Ok(response)
4614    }
4615
4616    /// Get a message.
4617    ///
4618    /// # Example
4619    ///
4620    /// ```rust,ignore
4621    /// use openai_ergonomic::Client;
4622    ///
4623    /// # async fn example() -> openai_ergonomic::Result<()> {
4624    /// let client = Client::from_env()?;
4625    /// let message = client.assistants().get_message("thread_123", "msg_123").await?;
4626    /// println!("Message role: {}", message.role);
4627    /// # Ok(())
4628    /// # }
4629    /// ```
4630    pub async fn get_message(
4631        &self,
4632        thread_id: impl Into<String>,
4633        message_id: impl Into<String>,
4634    ) -> Result<MessageObject> {
4635        let thread_id = thread_id.into();
4636        let message_id = message_id.into();
4637
4638        // Prepare interceptor context
4639        let mut state = T::default();
4640        let operation = operation_names::MESSAGE_RETRIEVE;
4641        let model = "messages";
4642        let request_json =
4643            format!("{{\"thread_id\":\"{thread_id}\",\"message_id\":\"{message_id}\"}}");
4644
4645        // Call before_request hook
4646        self.call_before_request(operation, model, &request_json, &mut state)
4647            .await?;
4648
4649        let start_time = Instant::now();
4650
4651        // Make the API call
4652        let response = match assistants_api::get_message()
4653            .configuration(&self.client.base_configuration)
4654            .thread_id(&thread_id)
4655            .message_id(&message_id)
4656            .call()
4657            .await
4658        {
4659            Ok(resp) => resp,
4660            Err(e) => {
4661                let error = self
4662                    .handle_api_error(e, operation, model, &request_json, &state)
4663                    .await;
4664                return Err(error);
4665            }
4666        };
4667
4668        let duration = start_time.elapsed();
4669
4670        // Call after_response hook
4671        self.call_after_response(
4672            &response,
4673            operation,
4674            model,
4675            &request_json,
4676            &state,
4677            duration,
4678            None,
4679            None,
4680        )
4681        .await;
4682
4683        Ok(response)
4684    }
4685
4686    /// List run steps.
4687    ///
4688    /// # Example
4689    ///
4690    /// ```rust,ignore
4691    /// use openai_ergonomic::Client;
4692    ///
4693    /// # async fn example() -> openai_ergonomic::Result<()> {
4694    /// let client = Client::from_env()?;
4695    /// let response = client.assistants().list_run_steps("thread_123", "run_123", None, None, None, None, None).await?;
4696    /// println!("Found {} run steps", response.data.len());
4697    /// # Ok(())
4698    /// # }
4699    /// ```
4700    #[allow(clippy::too_many_arguments)]
4701    pub async fn list_run_steps(
4702        &self,
4703        thread_id: impl Into<String>,
4704        run_id: impl Into<String>,
4705        limit: Option<i32>,
4706        order: Option<&str>,
4707        after: Option<&str>,
4708        before: Option<&str>,
4709        include: Option<Vec<String>>,
4710    ) -> Result<ListRunStepsResponse> {
4711        let thread_id = thread_id.into();
4712        let run_id = run_id.into();
4713
4714        // Prepare interceptor context
4715        let mut state = T::default();
4716        let operation = operation_names::RUN_STEP_LIST;
4717        let model = "run_steps";
4718        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\",\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?},\"include\":{include:?}}}");
4719
4720        // Call before_request hook
4721        self.call_before_request(operation, model, &request_json, &mut state)
4722            .await?;
4723
4724        let start_time = Instant::now();
4725
4726        // Make the API call
4727        let response = match assistants_api::list_run_steps()
4728            .configuration(&self.client.base_configuration)
4729            .thread_id(&thread_id)
4730            .run_id(&run_id)
4731            .maybe_limit(limit)
4732            .maybe_order(order)
4733            .maybe_after(after)
4734            .maybe_before(before)
4735            .maybe_include_left_square_bracket_right_square_bracket(include)
4736            .call()
4737            .await
4738        {
4739            Ok(resp) => resp,
4740            Err(e) => {
4741                let error = self
4742                    .handle_api_error(e, operation, model, &request_json, &state)
4743                    .await;
4744                return Err(error);
4745            }
4746        };
4747
4748        let duration = start_time.elapsed();
4749
4750        // Call after_response hook
4751        self.call_after_response(
4752            &response,
4753            operation,
4754            model,
4755            &request_json,
4756            &state,
4757            duration,
4758            None,
4759            None,
4760        )
4761        .await;
4762
4763        Ok(response)
4764    }
4765
4766    /// Get a run step.
4767    ///
4768    /// # Example
4769    ///
4770    /// ```rust,ignore
4771    /// use openai_ergonomic::Client;
4772    ///
4773    /// # async fn example() -> openai_ergonomic::Result<()> {
4774    /// let client = Client::from_env()?;
4775    /// let step = client.assistants().get_run_step("thread_123", "run_123", "step_123", None).await?;
4776    /// println!("Step type: {}", step.type_);
4777    /// # Ok(())
4778    /// # }
4779    /// ```
4780    pub async fn get_run_step(
4781        &self,
4782        thread_id: impl Into<String>,
4783        run_id: impl Into<String>,
4784        step_id: impl Into<String>,
4785        include: Option<Vec<String>>,
4786    ) -> Result<RunStepObject> {
4787        let thread_id = thread_id.into();
4788        let run_id = run_id.into();
4789        let step_id = step_id.into();
4790
4791        // Prepare interceptor context
4792        let mut state = T::default();
4793        let operation = operation_names::RUN_STEP_RETRIEVE;
4794        let model = "run_steps";
4795        let request_json = format!(
4796            "{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\",\"step_id\":\"{step_id}\",\"include\":{include:?}}}"
4797        );
4798
4799        // Call before_request hook
4800        self.call_before_request(operation, model, &request_json, &mut state)
4801            .await?;
4802
4803        let start_time = Instant::now();
4804
4805        // Make the API call
4806        let response = match assistants_api::get_run_step()
4807            .configuration(&self.client.base_configuration)
4808            .thread_id(&thread_id)
4809            .run_id(&run_id)
4810            .step_id(&step_id)
4811            .maybe_include_left_square_bracket_right_square_bracket(include)
4812            .call()
4813            .await
4814        {
4815            Ok(resp) => resp,
4816            Err(e) => {
4817                let error = self
4818                    .handle_api_error(e, operation, model, &request_json, &state)
4819                    .await;
4820                return Err(error);
4821            }
4822        };
4823
4824        let duration = start_time.elapsed();
4825
4826        // Call after_response hook
4827        self.call_after_response(
4828            &response,
4829            operation,
4830            model,
4831            &request_json,
4832            &state,
4833            duration,
4834            None,
4835            None,
4836        )
4837        .await;
4838
4839        Ok(response)
4840    }
4841}
4842
4843/// Client for audio API.
4844#[derive(Debug, Clone, Copy)]
4845#[allow(dead_code)]
4846pub struct AudioClient<'a, T = ()> {
4847    client: &'a Client<T>,
4848}
4849
4850/// Client for embeddings API.
4851#[derive(Debug, Clone, Copy)]
4852#[allow(dead_code)]
4853pub struct EmbeddingsClient<'a, T = ()> {
4854    client: &'a Client<T>,
4855}
4856
4857/// Client for images API.
4858#[derive(Debug, Clone, Copy)]
4859#[allow(dead_code)]
4860pub struct ImagesClient<'a, T = ()> {
4861    client: &'a Client<T>,
4862}
4863
4864/// Client for files API.
4865#[derive(Debug, Clone, Copy)]
4866#[allow(dead_code)]
4867pub struct FilesClient<'a, T = ()> {
4868    client: &'a Client<T>,
4869}
4870
4871/// Client for fine-tuning API.
4872#[derive(Debug, Clone, Copy)]
4873#[allow(dead_code)]
4874pub struct FineTuningClient<'a, T = ()> {
4875    client: &'a Client<T>,
4876}
4877
4878/// Client for batch API.
4879#[derive(Debug, Clone, Copy)]
4880#[allow(dead_code)]
4881pub struct BatchClient<'a, T = ()> {
4882    client: &'a Client<T>,
4883}
4884
4885/// Client for vector stores API.
4886#[derive(Debug, Clone, Copy)]
4887#[allow(dead_code)]
4888pub struct VectorStoresClient<'a, T = ()> {
4889    client: &'a Client<T>,
4890}
4891
4892/// Client for moderations API.
4893#[derive(Debug, Clone, Copy)]
4894#[allow(dead_code)]
4895pub struct ModerationsClient<'a, T = ()> {
4896    client: &'a Client<T>,
4897}
4898
4899/// Client for threads API.
4900#[derive(Debug, Clone, Copy)]
4901#[allow(dead_code)]
4902pub struct ThreadsClient<'a, T = ()> {
4903    client: &'a Client<T>,
4904}
4905
4906/// Client for uploads API.
4907#[derive(Debug, Clone, Copy)]
4908#[allow(dead_code)]
4909pub struct UploadsClient<'a, T = ()> {
4910    client: &'a Client<T>,
4911}
4912
4913/// Client for models API.
4914#[derive(Debug, Clone, Copy)]
4915pub struct ModelsClient<'a, T = ()> {
4916    client: &'a Client<T>,
4917}
4918
4919/// Client for completions API.
4920#[derive(Debug, Clone, Copy)]
4921pub struct CompletionsClient<'a, T = ()> {
4922    client: &'a Client<T>,
4923}
4924
4925/// Client for usage API.
4926#[derive(Debug, Clone, Copy)]
4927pub struct UsageClient<'a, T = ()> {
4928    client: &'a Client<T>,
4929}
4930
4931// Apply interceptor helper methods to all sub-clients
4932impl_interceptor_helpers!(AssistantsClient<'_, T>);
4933impl_interceptor_helpers!(AudioClient<'_, T>);
4934impl_interceptor_helpers!(EmbeddingsClient<'_, T>);
4935impl_interceptor_helpers!(ImagesClient<'_, T>);
4936impl_interceptor_helpers!(FilesClient<'_, T>);
4937impl_interceptor_helpers!(FineTuningClient<'_, T>);
4938impl_interceptor_helpers!(BatchClient<'_, T>);
4939impl_interceptor_helpers!(VectorStoresClient<'_, T>);
4940impl_interceptor_helpers!(ModerationsClient<'_, T>);
4941impl_interceptor_helpers!(ThreadsClient<'_, T>);
4942impl_interceptor_helpers!(UploadsClient<'_, T>);
4943impl_interceptor_helpers!(ModelsClient<'_, T>);
4944impl_interceptor_helpers!(CompletionsClient<'_, T>);
4945impl_interceptor_helpers!(UsageClient<'_, T>);
4946
4947impl<T: Default + Send + Sync> ModelsClient<'_, T> {
4948    /// List all available models.
4949    ///
4950    /// # Example
4951    ///
4952    /// ```rust,ignore
4953    /// use openai_ergonomic::Client;
4954    ///
4955    /// # async fn example() -> openai_ergonomic::Result<()> {
4956    /// let client = Client::from_env()?;
4957    /// let models = client.models().list().await?;
4958    /// println!("Available models: {}", models.data.len());
4959    /// # Ok(())
4960    /// # }
4961    /// ```
4962    pub async fn list(&self) -> Result<ListModelsResponse> {
4963        // Prepare interceptor context
4964        let mut state = T::default();
4965        let operation = operation_names::MODEL_LIST;
4966        let model = "models";
4967        let request_json = "{}".to_string();
4968
4969        // Call before_request hook
4970        self.call_before_request(operation, model, &request_json, &mut state)
4971            .await?;
4972
4973        let start_time = Instant::now();
4974
4975        // Make the API call
4976        let response = match models_api::list_models()
4977            .configuration(&self.client.base_configuration)
4978            .call()
4979            .await
4980        {
4981            Ok(resp) => resp,
4982            Err(e) => {
4983                let error = self
4984                    .handle_api_error(e, operation, model, &request_json, &state)
4985                    .await;
4986                return Err(error);
4987            }
4988        };
4989
4990        let duration = start_time.elapsed();
4991
4992        // Call after_response hook
4993        self.call_after_response(
4994            &response,
4995            operation,
4996            model,
4997            &request_json,
4998            &state,
4999            duration,
5000            None,
5001            None,
5002        )
5003        .await;
5004
5005        Ok(response)
5006    }
5007
5008    /// Retrieve information about a specific model.
5009    ///
5010    /// # Example
5011    ///
5012    /// ```rust,ignore
5013    /// use openai_ergonomic::Client;
5014    ///
5015    /// # async fn example() -> openai_ergonomic::Result<()> {
5016    /// let client = Client::from_env()?;
5017    /// let model = client.models().get("gpt-4").await?;
5018    /// println!("Model ID: {}", model.id);
5019    /// # Ok(())
5020    /// # }
5021    /// ```
5022    pub async fn get(&self, model_id: impl Into<String>) -> Result<Model> {
5023        let id = model_id.into();
5024
5025        // Prepare interceptor context
5026        let mut state = T::default();
5027        let operation = operation_names::MODEL_RETRIEVE;
5028        let model = "models";
5029        let request_json = format!("{{\"model_id\":\"{id}\"}}");
5030
5031        // Call before_request hook
5032        self.call_before_request(operation, model, &request_json, &mut state)
5033            .await?;
5034
5035        let start_time = Instant::now();
5036
5037        // Make the API call
5038        let response = match models_api::retrieve_model()
5039            .configuration(&self.client.base_configuration)
5040            .model(&id)
5041            .call()
5042            .await
5043        {
5044            Ok(resp) => resp,
5045            Err(e) => {
5046                let error = self
5047                    .handle_api_error(e, operation, model, &request_json, &state)
5048                    .await;
5049                return Err(error);
5050            }
5051        };
5052
5053        let duration = start_time.elapsed();
5054
5055        // Call after_response hook
5056        self.call_after_response(
5057            &response,
5058            operation,
5059            model,
5060            &request_json,
5061            &state,
5062            duration,
5063            None,
5064            None,
5065        )
5066        .await;
5067
5068        Ok(response)
5069    }
5070
5071    /// Retrieve information about a model using a builder.
5072    pub async fn retrieve(&self, builder: ModelRetrievalBuilder) -> Result<Model> {
5073        self.get(builder.model_id()).await
5074    }
5075
5076    /// Delete a fine-tuned model.
5077    ///
5078    /// You must have the Owner role in your organization to delete a model.
5079    ///
5080    /// # Example
5081    ///
5082    /// ```rust,ignore
5083    /// use openai_ergonomic::Client;
5084    ///
5085    /// # async fn example() -> openai_ergonomic::Result<()> {
5086    /// let client = Client::from_env()?;
5087    /// let response = client.models().delete("ft:gpt-3.5-turbo:my-org:custom:id").await?;
5088    /// println!("Deleted: {}", response.deleted);
5089    /// # Ok(())
5090    /// # }
5091    /// ```
5092    pub async fn delete(&self, model_id: impl Into<String>) -> Result<DeleteModelResponse> {
5093        let id = model_id.into();
5094
5095        // Prepare interceptor context
5096        let mut state = T::default();
5097        let operation = operation_names::MODEL_DELETE;
5098        let model = "models";
5099        let request_json = format!("{{\"model_id\":\"{id}\"}}");
5100
5101        // Call before_request hook
5102        self.call_before_request(operation, model, &request_json, &mut state)
5103            .await?;
5104
5105        let start_time = Instant::now();
5106
5107        // Make the API call
5108        let response = match models_api::delete_model()
5109            .configuration(&self.client.base_configuration)
5110            .model(&id)
5111            .call()
5112            .await
5113        {
5114            Ok(resp) => resp,
5115            Err(e) => {
5116                let error = self
5117                    .handle_api_error(e, operation, model, &request_json, &state)
5118                    .await;
5119                return Err(error);
5120            }
5121        };
5122
5123        let duration = start_time.elapsed();
5124
5125        // Call after_response hook
5126        self.call_after_response(
5127            &response,
5128            operation,
5129            model,
5130            &request_json,
5131            &state,
5132            duration,
5133            None,
5134            None,
5135        )
5136        .await;
5137
5138        Ok(response)
5139    }
5140
5141    /// Delete a fine-tuned model using a builder.
5142    pub async fn remove(&self, builder: ModelDeleteBuilder) -> Result<DeleteModelResponse> {
5143        self.delete(builder.model_id()).await
5144    }
5145}
5146
5147impl<T: Default + Send + Sync> CompletionsClient<'_, T> {
5148    /// Create a completions builder for the specified model.
5149    ///
5150    /// # Example
5151    ///
5152    /// ```rust,ignore
5153    /// use openai_ergonomic::Client;
5154    ///
5155    /// # async fn example() -> openai_ergonomic::Result<()> {
5156    /// let client = Client::from_env()?;
5157    /// let builder = client.completions().builder("gpt-3.5-turbo-instruct");
5158    /// # Ok(())
5159    /// # }
5160    /// ```
5161    #[must_use]
5162    pub fn builder(&self, model: impl Into<String>) -> CompletionsBuilder {
5163        CompletionsBuilder::new(model)
5164    }
5165
5166    /// Execute a completion request.
5167    ///
5168    /// # Example
5169    ///
5170    /// ```rust,ignore
5171    /// use openai_ergonomic::Client;
5172    ///
5173    /// # async fn example() -> openai_ergonomic::Result<()> {
5174    /// let client = Client::from_env()?;
5175    /// let builder = client.completions()
5176    ///     .builder("gpt-3.5-turbo-instruct")
5177    ///     .prompt("Once upon a time")
5178    ///     .max_tokens(50);
5179    /// let response = client.completions().create(builder).await?;
5180    /// println!("Completion: {:?}", response.choices);
5181    /// # Ok(())
5182    /// # }
5183    /// ```
5184    pub async fn create(&self, builder: CompletionsBuilder) -> Result<CreateCompletionResponse> {
5185        let request = builder.build()?;
5186
5187        // Prepare interceptor context
5188        let mut state = T::default();
5189        let operation = operation_names::TEXT_COMPLETION;
5190        let model = request.model.clone();
5191        let request_json = serde_json::to_string(&request).unwrap_or_default();
5192
5193        // Call before_request hook
5194        self.call_before_request(operation, &model, &request_json, &mut state)
5195            .await?;
5196
5197        let start_time = Instant::now();
5198
5199        // Make the API call
5200        let response = match completions_api::create_completion()
5201            .configuration(&self.client.base_configuration)
5202            .create_completion_request(request)
5203            .call()
5204            .await
5205        {
5206            Ok(resp) => resp,
5207            Err(e) => {
5208                let error = self
5209                    .handle_api_error(e, operation, &model, &request_json, &state)
5210                    .await;
5211                return Err(error);
5212            }
5213        };
5214
5215        let duration = start_time.elapsed();
5216
5217        // Call after_response hook
5218        self.call_after_response(
5219            &response,
5220            operation,
5221            &model,
5222            &request_json,
5223            &state,
5224            duration,
5225            response.usage.as_ref().map(|u| i64::from(u.prompt_tokens)),
5226            response
5227                .usage
5228                .as_ref()
5229                .map(|u| i64::from(u.completion_tokens)),
5230        )
5231        .await;
5232
5233        Ok(response)
5234    }
5235}
5236
5237impl<T: Default + Send + Sync> UsageClient<'_, T> {
5238    /// Get usage data for audio speeches.
5239    ///
5240    /// # Example
5241    ///
5242    /// ```rust,ignore
5243    /// use openai_ergonomic::Client;
5244    /// use openai_ergonomic::builders::usage::UsageBuilder;
5245    ///
5246    /// # async fn example() -> openai_ergonomic::Result<()> {
5247    /// let client = Client::from_env()?;
5248    /// let builder = UsageBuilder::new(1704067200, None);
5249    /// let usage = client.usage().audio_speeches(builder).await?;
5250    /// println!("Usage: {:?}", usage);
5251    /// # Ok(())
5252    /// # }
5253    /// ```
5254    pub async fn audio_speeches(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5255        // Prepare interceptor context
5256        let mut state = T::default();
5257        let operation = operation_names::USAGE_AUDIO_SPEECHES;
5258        let model = "usage";
5259        let start_time = builder.start_time();
5260        let request_json = format!("{{\"start_time\":{start_time}}}");
5261
5262        // Call before_request hook
5263        self.call_before_request(operation, model, &request_json, &mut state)
5264            .await?;
5265
5266        let start_time = Instant::now();
5267
5268        // Make the API call
5269        let response = match usage_api::usage_audio_speeches()
5270            .configuration(&self.client.base_configuration)
5271            .start_time(builder.start_time())
5272            .maybe_end_time(builder.end_time())
5273            .maybe_bucket_width(builder.bucket_width_str())
5274            .maybe_project_ids(builder.project_ids_option())
5275            .maybe_user_ids(builder.user_ids_option())
5276            .maybe_api_key_ids(builder.api_key_ids_option())
5277            .maybe_models(builder.models_option())
5278            .maybe_group_by(builder.group_by_option())
5279            .maybe_limit(builder.limit_ref())
5280            .maybe_page(builder.page_ref())
5281            .call()
5282            .await
5283        {
5284            Ok(resp) => resp,
5285            Err(e) => {
5286                let error = self
5287                    .handle_api_error(e, operation, model, &request_json, &state)
5288                    .await;
5289                return Err(error);
5290            }
5291        };
5292
5293        let duration = start_time.elapsed();
5294
5295        // Call after_response hook
5296        self.call_after_response(
5297            &response,
5298            operation,
5299            model,
5300            &request_json,
5301            &state,
5302            duration,
5303            None,
5304            None,
5305        )
5306        .await;
5307
5308        Ok(response)
5309    }
5310
5311    /// Get usage data for audio transcriptions.
5312    pub async fn audio_transcriptions(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5313        // Prepare interceptor context
5314        let mut state = T::default();
5315        let operation = operation_names::USAGE_AUDIO_TRANSCRIPTIONS;
5316        let model = "usage";
5317        let start_time = builder.start_time();
5318        let request_json = format!("{{\"start_time\":{start_time}}}");
5319
5320        // Call before_request hook
5321        self.call_before_request(operation, model, &request_json, &mut state)
5322            .await?;
5323
5324        let start_time = Instant::now();
5325
5326        // Make the API call
5327        let response = match usage_api::usage_audio_transcriptions()
5328            .configuration(&self.client.base_configuration)
5329            .start_time(builder.start_time())
5330            .maybe_end_time(builder.end_time())
5331            .maybe_bucket_width(builder.bucket_width_str())
5332            .maybe_project_ids(builder.project_ids_option())
5333            .maybe_user_ids(builder.user_ids_option())
5334            .maybe_api_key_ids(builder.api_key_ids_option())
5335            .maybe_models(builder.models_option())
5336            .maybe_group_by(builder.group_by_option())
5337            .maybe_limit(builder.limit_ref())
5338            .maybe_page(builder.page_ref())
5339            .call()
5340            .await
5341        {
5342            Ok(resp) => resp,
5343            Err(e) => {
5344                let error = self
5345                    .handle_api_error(e, operation, model, &request_json, &state)
5346                    .await;
5347                return Err(error);
5348            }
5349        };
5350
5351        let duration = start_time.elapsed();
5352
5353        // Call after_response hook
5354        self.call_after_response(
5355            &response,
5356            operation,
5357            model,
5358            &request_json,
5359            &state,
5360            duration,
5361            None,
5362            None,
5363        )
5364        .await;
5365
5366        Ok(response)
5367    }
5368
5369    /// Get usage data for code interpreter sessions.
5370    pub async fn code_interpreter_sessions(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5371        // Prepare interceptor context
5372        let mut state = T::default();
5373        let operation = operation_names::USAGE_CODE_INTERPRETER;
5374        let model = "usage";
5375        let start_time = builder.start_time();
5376        let request_json = format!("{{\"start_time\":{start_time}}}");
5377
5378        // Call before_request hook
5379        self.call_before_request(operation, model, &request_json, &mut state)
5380            .await?;
5381
5382        let start_time = Instant::now();
5383
5384        // Make the API call
5385        let response = match usage_api::usage_code_interpreter_sessions()
5386            .configuration(&self.client.base_configuration)
5387            .start_time(builder.start_time())
5388            .maybe_end_time(builder.end_time())
5389            .maybe_bucket_width(builder.bucket_width_str())
5390            .maybe_project_ids(builder.project_ids_option())
5391            .maybe_group_by(builder.group_by_option())
5392            .maybe_limit(builder.limit_ref())
5393            .maybe_page(builder.page_ref())
5394            .call()
5395            .await
5396        {
5397            Ok(resp) => resp,
5398            Err(e) => {
5399                let error = self
5400                    .handle_api_error(e, operation, model, &request_json, &state)
5401                    .await;
5402                return Err(error);
5403            }
5404        };
5405
5406        let duration = start_time.elapsed();
5407
5408        // Call after_response hook
5409        self.call_after_response(
5410            &response,
5411            operation,
5412            model,
5413            &request_json,
5414            &state,
5415            duration,
5416            None,
5417            None,
5418        )
5419        .await;
5420
5421        Ok(response)
5422    }
5423
5424    /// Get usage data for completions.
5425    pub async fn completions(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5426        // Prepare interceptor context
5427        let mut state = T::default();
5428        let operation = operation_names::USAGE_COMPLETIONS;
5429        let model = "usage";
5430        let start_time = builder.start_time();
5431        let request_json = format!("{{\"start_time\":{start_time}}}");
5432
5433        // Call before_request hook
5434        self.call_before_request(operation, model, &request_json, &mut state)
5435            .await?;
5436
5437        let start_time = Instant::now();
5438
5439        // Make the API call
5440        let response = match usage_api::usage_completions()
5441            .configuration(&self.client.base_configuration)
5442            .start_time(builder.start_time())
5443            .maybe_end_time(builder.end_time())
5444            .maybe_bucket_width(builder.bucket_width_str())
5445            .maybe_project_ids(builder.project_ids_option())
5446            .maybe_user_ids(builder.user_ids_option())
5447            .maybe_api_key_ids(builder.api_key_ids_option())
5448            .maybe_models(builder.models_option())
5449            .maybe_group_by(builder.group_by_option())
5450            .maybe_limit(builder.limit_ref())
5451            .maybe_page(builder.page_ref())
5452            .call()
5453            .await
5454        {
5455            Ok(resp) => resp,
5456            Err(e) => {
5457                let error = self
5458                    .handle_api_error(e, operation, model, &request_json, &state)
5459                    .await;
5460                return Err(error);
5461            }
5462        };
5463
5464        let duration = start_time.elapsed();
5465
5466        // Call after_response hook
5467        self.call_after_response(
5468            &response,
5469            operation,
5470            model,
5471            &request_json,
5472            &state,
5473            duration,
5474            None,
5475            None,
5476        )
5477        .await;
5478
5479        Ok(response)
5480    }
5481
5482    /// Get usage data for embeddings.
5483    pub async fn embeddings(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5484        // Prepare interceptor context
5485        let mut state = T::default();
5486        let operation = operation_names::USAGE_EMBEDDINGS;
5487        let model = "usage";
5488        let start_time = builder.start_time();
5489        let request_json = format!("{{\"start_time\":{start_time}}}");
5490
5491        // Call before_request hook
5492        self.call_before_request(operation, model, &request_json, &mut state)
5493            .await?;
5494
5495        let start_time = Instant::now();
5496
5497        // Make the API call
5498        let response = match usage_api::usage_embeddings()
5499            .configuration(&self.client.base_configuration)
5500            .start_time(builder.start_time())
5501            .maybe_end_time(builder.end_time())
5502            .maybe_bucket_width(builder.bucket_width_str())
5503            .maybe_project_ids(builder.project_ids_option())
5504            .maybe_user_ids(builder.user_ids_option())
5505            .maybe_api_key_ids(builder.api_key_ids_option())
5506            .maybe_models(builder.models_option())
5507            .maybe_group_by(builder.group_by_option())
5508            .maybe_limit(builder.limit_ref())
5509            .maybe_page(builder.page_ref())
5510            .call()
5511            .await
5512        {
5513            Ok(resp) => resp,
5514            Err(e) => {
5515                let error = self
5516                    .handle_api_error(e, operation, model, &request_json, &state)
5517                    .await;
5518                return Err(error);
5519            }
5520        };
5521
5522        let duration = start_time.elapsed();
5523
5524        // Call after_response hook
5525        self.call_after_response(
5526            &response,
5527            operation,
5528            model,
5529            &request_json,
5530            &state,
5531            duration,
5532            None,
5533            None,
5534        )
5535        .await;
5536
5537        Ok(response)
5538    }
5539
5540    /// Get usage data for images.
5541    pub async fn images(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5542        // Prepare interceptor context
5543        let mut state = T::default();
5544        let operation = operation_names::USAGE_IMAGES;
5545        let model = "usage";
5546        let start_time = builder.start_time();
5547        let request_json = format!("{{\"start_time\":{start_time}}}");
5548
5549        // Call before_request hook
5550        self.call_before_request(operation, model, &request_json, &mut state)
5551            .await?;
5552
5553        let start_time = Instant::now();
5554
5555        // Make the API call
5556        let response = match usage_api::usage_images()
5557            .configuration(&self.client.base_configuration)
5558            .start_time(builder.start_time())
5559            .maybe_end_time(builder.end_time())
5560            .maybe_bucket_width(builder.bucket_width_str())
5561            .maybe_project_ids(builder.project_ids_option())
5562            .maybe_user_ids(builder.user_ids_option())
5563            .maybe_api_key_ids(builder.api_key_ids_option())
5564            .maybe_models(builder.models_option())
5565            .maybe_group_by(builder.group_by_option())
5566            .maybe_limit(builder.limit_ref())
5567            .maybe_page(builder.page_ref())
5568            .call()
5569            .await
5570        {
5571            Ok(resp) => resp,
5572            Err(e) => {
5573                let error = self
5574                    .handle_api_error(e, operation, model, &request_json, &state)
5575                    .await;
5576                return Err(error);
5577            }
5578        };
5579
5580        let duration = start_time.elapsed();
5581
5582        // Call after_response hook
5583        self.call_after_response(
5584            &response,
5585            operation,
5586            model,
5587            &request_json,
5588            &state,
5589            duration,
5590            None,
5591            None,
5592        )
5593        .await;
5594
5595        Ok(response)
5596    }
5597
5598    /// Get usage data for moderations.
5599    pub async fn moderations(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5600        // Prepare interceptor context
5601        let mut state = T::default();
5602        let operation = operation_names::USAGE_MODERATIONS;
5603        let model = "usage";
5604        let start_time = builder.start_time();
5605        let request_json = format!("{{\"start_time\":{start_time}}}");
5606
5607        // Call before_request hook
5608        self.call_before_request(operation, model, &request_json, &mut state)
5609            .await?;
5610
5611        let start_time = Instant::now();
5612
5613        // Make the API call
5614        let response = match usage_api::usage_moderations()
5615            .configuration(&self.client.base_configuration)
5616            .start_time(builder.start_time())
5617            .maybe_end_time(builder.end_time())
5618            .maybe_bucket_width(builder.bucket_width_str())
5619            .maybe_project_ids(builder.project_ids_option())
5620            .maybe_user_ids(builder.user_ids_option())
5621            .maybe_api_key_ids(builder.api_key_ids_option())
5622            .maybe_models(builder.models_option())
5623            .maybe_group_by(builder.group_by_option())
5624            .maybe_limit(builder.limit_ref())
5625            .maybe_page(builder.page_ref())
5626            .call()
5627            .await
5628        {
5629            Ok(resp) => resp,
5630            Err(e) => {
5631                let error = self
5632                    .handle_api_error(e, operation, model, &request_json, &state)
5633                    .await;
5634                return Err(error);
5635            }
5636        };
5637
5638        let duration = start_time.elapsed();
5639
5640        // Call after_response hook
5641        self.call_after_response(
5642            &response,
5643            operation,
5644            model,
5645            &request_json,
5646            &state,
5647            duration,
5648            None,
5649            None,
5650        )
5651        .await;
5652
5653        Ok(response)
5654    }
5655
5656    /// Get usage data for vector stores.
5657    pub async fn vector_stores(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5658        // Prepare interceptor context
5659        let mut state = T::default();
5660        let operation = operation_names::USAGE_VECTOR_STORES;
5661        let model = "usage";
5662        let start_time = builder.start_time();
5663        let request_json = format!("{{\"start_time\":{start_time}}}");
5664
5665        // Call before_request hook
5666        self.call_before_request(operation, model, &request_json, &mut state)
5667            .await?;
5668
5669        let start_time = Instant::now();
5670
5671        // Make the API call
5672        let response = match usage_api::usage_vector_stores()
5673            .configuration(&self.client.base_configuration)
5674            .start_time(builder.start_time())
5675            .maybe_end_time(builder.end_time())
5676            .maybe_bucket_width(builder.bucket_width_str())
5677            .maybe_project_ids(builder.project_ids_option())
5678            .maybe_group_by(builder.group_by_option())
5679            .maybe_limit(builder.limit_ref())
5680            .maybe_page(builder.page_ref())
5681            .call()
5682            .await
5683        {
5684            Ok(resp) => resp,
5685            Err(e) => {
5686                let error = self
5687                    .handle_api_error(e, operation, model, &request_json, &state)
5688                    .await;
5689                return Err(error);
5690            }
5691        };
5692
5693        let duration = start_time.elapsed();
5694
5695        // Call after_response hook
5696        self.call_after_response(
5697            &response,
5698            operation,
5699            model,
5700            &request_json,
5701            &state,
5702            duration,
5703            None,
5704            None,
5705        )
5706        .await;
5707
5708        Ok(response)
5709    }
5710
5711    /// Get cost data.
5712    pub async fn costs(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5713        // Prepare interceptor context
5714        let mut state = T::default();
5715        let operation = operation_names::USAGE_COSTS;
5716        let model = "usage";
5717        let start_time = builder.start_time();
5718        let request_json = format!("{{\"start_time\":{start_time}}}");
5719
5720        // Call before_request hook
5721        self.call_before_request(operation, model, &request_json, &mut state)
5722            .await?;
5723
5724        let start_time = Instant::now();
5725
5726        // Make the API call
5727        let response = match usage_api::usage_costs()
5728            .configuration(&self.client.base_configuration)
5729            .start_time(builder.start_time())
5730            .maybe_end_time(builder.end_time())
5731            .maybe_bucket_width(builder.bucket_width_str())
5732            .maybe_project_ids(builder.project_ids_option())
5733            .maybe_group_by(builder.group_by_option())
5734            .maybe_limit(builder.limit_ref())
5735            .maybe_page(builder.page_ref())
5736            .call()
5737            .await
5738        {
5739            Ok(resp) => resp,
5740            Err(e) => {
5741                let error = self
5742                    .handle_api_error(e, operation, model, &request_json, &state)
5743                    .await;
5744                return Err(error);
5745            }
5746        };
5747
5748        let duration = start_time.elapsed();
5749
5750        // Call after_response hook
5751        self.call_after_response(
5752            &response,
5753            operation,
5754            model,
5755            &request_json,
5756            &state,
5757            duration,
5758            None,
5759            None,
5760        )
5761        .await;
5762
5763        Ok(response)
5764    }
5765}