openai_ergonomic/
client.rs

1//! Client wrapper for ergonomic `OpenAI` API access.
2//!
3//! This module provides a high-level client that wraps the base `OpenAI` client
4//! with ergonomic builders and response handling.
5
6// Allow this lint at module level for interceptor helper methods
7// that require many parameters for comprehensive context passing
8#![allow(clippy::too_many_arguments)]
9
10use crate::interceptor::{
11    AfterResponseContext, BeforeRequestContext, ErrorContext, InterceptorChain,
12};
13use crate::semantic_conventions::operation_names;
14use crate::{
15    builders::{
16        assistants::{AssistantBuilder, MessageBuilder, RunBuilder},
17        audio::{
18            SpeechBuilder, TranscriptionBuilder, TranscriptionRequest, TranslationBuilder,
19            TranslationRequest,
20        },
21        completions::CompletionsBuilder,
22        embeddings::EmbeddingsBuilder,
23        files::{FileDeleteBuilder, FileListBuilder, FileRetrievalBuilder, FileUploadBuilder},
24        images::{
25            ImageEditBuilder, ImageEditRequest, ImageGenerationBuilder, ImageVariationBuilder,
26            ImageVariationRequest,
27        },
28        models::{ModelDeleteBuilder, ModelRetrievalBuilder},
29        moderations::ModerationBuilder,
30        threads::ThreadRequestBuilder,
31        uploads::UploadBuilder,
32        usage::UsageBuilder,
33        Builder, ChatCompletionBuilder, ResponsesBuilder,
34    },
35    config::Config,
36    errors::Result,
37    responses::ChatCompletionResponseWrapper,
38    Error, UploadPurpose,
39};
40use openai_client_base::apis::Error as ApiError;
41use openai_client_base::{
42    apis::{
43        assistants_api, audio_api, batch_api, chat_api, completions_api,
44        configuration::Configuration, embeddings_api, files_api, fine_tuning_api, images_api,
45        models_api, moderations_api, uploads_api, usage_api, vector_stores_api,
46    },
47    models::{
48        AssistantObject, Batch, CreateBatchRequest, CreateChatCompletionRequest,
49        CreateCompletionResponse, CreateEmbeddingResponse, CreateFineTuningJobRequest,
50        CreateModerationResponse, CreateTranscription200Response, CreateTranslation200Response,
51        DeleteAssistantResponse, DeleteFileResponse, DeleteModelResponse,
52        DeleteVectorStoreFileResponse, DeleteVectorStoreResponse, FineTuningJob, ImagesResponse,
53        ListAssistantsResponse, ListBatchesResponse, ListFilesResponse,
54        ListFineTuningJobCheckpointsResponse, ListFineTuningJobEventsResponse,
55        ListMessagesResponse, ListModelsResponse, ListPaginatedFineTuningJobsResponse,
56        ListRunStepsResponse, ListRunsResponse, ListVectorStoreFilesResponse,
57        ListVectorStoresResponse, MessageObject, Model, OpenAiFile, RunObject, RunStepObject,
58        SubmitToolOutputsRunRequestToolOutputsInner, ThreadObject, Upload, UsageResponse,
59        VectorStoreFileObject, VectorStoreObject, VectorStoreSearchResultsPage,
60    },
61};
62use reqwest_middleware::ClientWithMiddleware as HttpClient;
63use std::sync::Arc;
64use std::time::Instant;
65use tokio::time::Duration;
66
67// Helper macro to generate interceptor helper methods for sub-clients
68macro_rules! impl_interceptor_helpers {
69    ($client_type:ty) => {
70        impl<T: Default + Send + Sync> $client_type {
71            /// Helper to call `before_request` hooks
72            async fn call_before_request(
73                &self,
74                operation: &str,
75                model: &str,
76                request_json: &str,
77                state: &mut T,
78            ) -> Result<()> {
79                if !self.client.interceptors.is_empty() {
80                    let mut ctx = BeforeRequestContext {
81                        operation,
82                        model,
83                        request_json,
84                        state,
85                    };
86                    if let Err(e) = self.client.interceptors.before_request(&mut ctx).await {
87                        let error_ctx = ErrorContext {
88                            operation,
89                            model: Some(model),
90                            request_json: Some(request_json),
91                            error: &e,
92                            state: Some(state),
93                        };
94                        self.client.interceptors.on_error(&error_ctx).await;
95                        return Err(e);
96                    }
97                }
98                Ok(())
99            }
100
101            /// Helper to handle API errors with interceptor hooks
102            async fn handle_api_error<E>(
103                &self,
104                error: openai_client_base::apis::Error<E>,
105                operation: &str,
106                model: &str,
107                request_json: &str,
108                state: &T,
109            ) -> Error {
110                let error = map_api_error(error);
111
112                if !self.client.interceptors.is_empty() {
113                    let error_ctx = ErrorContext {
114                        operation,
115                        model: Some(model),
116                        request_json: Some(request_json),
117                        error: &error,
118                        state: Some(state),
119                    };
120                    self.client.interceptors.on_error(&error_ctx).await;
121                }
122
123                error
124            }
125
126            /// Helper to call `after_response` hooks
127            async fn call_after_response<R>(
128                &self,
129                response: &R,
130                operation: &str,
131                model: &str,
132                request_json: &str,
133                state: &T,
134                duration: std::time::Duration,
135                input_tokens: Option<i64>,
136                output_tokens: Option<i64>,
137            ) where
138                R: serde::Serialize + Sync,
139            {
140                if !self.client.interceptors.is_empty() {
141                    let response_json = serde_json::to_string(response).unwrap_or_default();
142                    let ctx = AfterResponseContext {
143                        operation,
144                        model,
145                        request_json,
146                        response_json: &response_json,
147                        duration,
148                        input_tokens,
149                        output_tokens,
150                        state,
151                    };
152                    if let Err(e) = self.client.interceptors.after_response(&ctx).await {
153                        tracing::warn!("Interceptor after_response failed: {}", e);
154                    }
155                }
156            }
157        }
158    };
159}
160
161/// Builder for creating a `Client` with interceptors.
162///
163/// The builder pattern allows you to configure interceptors before the client
164/// is created. Once built, the interceptors are immutable, eliminating the need
165/// for runtime locking.
166///
167/// # Example
168///
169/// ```rust,ignore
170/// let client = Client::from_env()?
171///     .with_interceptor(Box::new(my_interceptor))
172///     .build();
173/// ```
174pub struct ClientBuilder<T = ()> {
175    config: Arc<Config>,
176    http: HttpClient,
177    base_configuration: Configuration,
178    interceptors: InterceptorChain<T>,
179}
180
181/// Main client for interacting with the `OpenAI` API.
182///
183/// The client provides ergonomic methods for all `OpenAI` API endpoints,
184/// with built-in retry logic, rate limiting, error handling, and support
185/// for middleware through interceptors.
186///
187/// Use `Client::from_env()` or `Client::new()` to create a builder, then call
188/// `.build()` to create the client.
189///
190/// # Example
191///
192/// ```rust,ignore
193/// # use openai_ergonomic::{Client, Config};
194/// # #[tokio::main]
195/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
196/// let client = Client::from_env()?.build();
197/// // TODO: Add usage example once builders are implemented
198/// # Ok(())
199/// # }
200/// ```
201#[derive(Clone)]
202pub struct Client<T = ()> {
203    config: Arc<Config>,
204    http: HttpClient,
205    base_configuration: Configuration,
206    interceptors: Arc<InterceptorChain<T>>,
207}
208
209// Custom Debug implementation since InterceptorChain doesn't implement Debug
210impl<T> std::fmt::Debug for Client<T> {
211    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
212        f.debug_struct("Client")
213            .field("config", &self.config)
214            .field("http", &"<HttpClient>")
215            .field("base_configuration", &"<Configuration>")
216            .field("interceptors", &"<InterceptorChain>")
217            .finish()
218    }
219}
220
221// Implementation for ClientBuilder with default state type ()
222impl ClientBuilder {
223    /// Create a new client builder with the given configuration.
224    pub fn new(config: Config) -> Result<Self> {
225        // Check if we're using Azure OpenAI
226        let is_azure = config.is_azure();
227
228        // Use custom HTTP client if provided, otherwise build a default one
229        let http_client = if let Some(client) = config.http_client() {
230            client.clone()
231        } else {
232            let reqwest_client = reqwest::Client::builder()
233                .timeout(Duration::from_secs(120)) // Default timeout: 120 seconds
234                .user_agent(format!("openai-ergonomic/{}", env!("CARGO_PKG_VERSION")))
235                .build()
236                .map_err(Error::Http)?;
237
238            let mut client_builder = reqwest_middleware::ClientBuilder::new(reqwest_client);
239
240            // Add Azure authentication middleware if using Azure OpenAI
241            if is_azure {
242                let azure_middleware = crate::azure_middleware::AzureAuthMiddleware::new(
243                    config.api_key().to_string(),
244                    config.azure_api_version().map(String::from),
245                    config.azure_deployment().map(String::from),
246                );
247                client_builder = client_builder.with(azure_middleware);
248            }
249
250            client_builder.build()
251        };
252
253        // Create openai-client-base configuration
254        let mut base_configuration = Configuration::new();
255
256        // Set the custom HTTP client (with Azure middleware if configured)
257        base_configuration.client = http_client.clone();
258
259        // For Azure OpenAI, we don't use bearer token (handled by middleware)
260        // For standard OpenAI, use bearer token
261        if !is_azure {
262            base_configuration.bearer_access_token = Some(config.api_key().to_string());
263        }
264
265        if let Some(base_url) = config.base_url() {
266            base_configuration.base_path = base_url.to_string();
267        }
268
269        if let Some(org_id) = config.organization_id() {
270            base_configuration.user_agent = Some(format!(
271                "openai-ergonomic/{} org/{}",
272                env!("CARGO_PKG_VERSION"),
273                org_id
274            ));
275        }
276
277        Ok(Self {
278            config: Arc::new(config),
279            http: http_client,
280            base_configuration,
281            interceptors: InterceptorChain::new(),
282        })
283    }
284
285    /// Create a new client builder with default configuration from environment variables.
286    pub fn from_env() -> Result<Self> {
287        Self::new(Config::from_env()?)
288    }
289}
290
291// Implementation for ClientBuilder with any state type
292impl<T> ClientBuilder<T> {
293    /// Add an interceptor to the builder.
294    ///
295    /// Creates a new builder with the interceptor's state type. The interceptor provides
296    /// hooks into the request/response lifecycle for observability, logging, and custom
297    /// processing.
298    ///
299    /// Note: This method transforms the builder's type, so it can only be called once.
300    /// For multiple interceptors with the same state type, use a composite interceptor
301    /// or call this method multiple times (each will replace the previous chain).
302    ///
303    /// # Examples
304    ///
305    /// Simple interceptor (no state):
306    /// ```rust,ignore
307    /// use openai_ergonomic::{Client, Interceptor, BeforeRequestContext};
308    ///
309    /// struct LoggingInterceptor;
310    ///
311    /// #[async_trait::async_trait]
312    /// impl Interceptor for LoggingInterceptor {
313    ///     async fn before_request(&self, ctx: &mut BeforeRequestContext<'_>) -> Result<()> {
314    ///         println!("Calling {}", ctx.operation);
315    ///         Ok(())
316    ///     }
317    /// }
318    ///
319    /// let client = Client::from_env()?
320    ///     .with_interceptor(Box::new(LoggingInterceptor))
321    ///     .build();
322    /// ```
323    ///
324    /// Interceptor with custom state:
325    /// ```rust,ignore
326    /// use openai_ergonomic::{Client, LangfuseInterceptor, LangfuseState};
327    ///
328    /// let interceptor = LangfuseInterceptor::new(tracer, config);
329    /// let client: Client<LangfuseState<_>> = Client::from_env()?
330    ///     .with_interceptor(Box::new(interceptor))
331    ///     .build();
332    /// ```
333    #[must_use]
334    pub fn with_interceptor<U>(
335        self,
336        interceptor: Box<dyn crate::interceptor::Interceptor<U>>,
337    ) -> ClientBuilder<U> {
338        let mut new_chain = InterceptorChain::new();
339        new_chain.add(interceptor);
340
341        ClientBuilder {
342            config: self.config,
343            http: self.http,
344            base_configuration: self.base_configuration,
345            interceptors: new_chain,
346        }
347    }
348
349    /// Add an interceptor that uses the same state type.
350    ///
351    /// This allows chaining multiple interceptors with the same state type without
352    /// type transformation.
353    ///
354    /// # Example
355    ///
356    /// ```rust,ignore
357    /// let client = Client::from_env()?
358    ///     .add_interceptor(Box::new(logger))
359    ///     .add_interceptor(Box::new(metrics))
360    ///     .build();
361    /// ```
362    #[must_use]
363    pub fn add_interceptor(
364        mut self,
365        interceptor: Box<dyn crate::interceptor::Interceptor<T>>,
366    ) -> Self {
367        self.interceptors.add(interceptor);
368        self
369    }
370
371    /// Build the client with the configured interceptors.
372    ///
373    /// After building, the interceptors are immutable, eliminating runtime locking overhead.
374    #[must_use]
375    pub fn build(self) -> Client<T> {
376        Client {
377            config: self.config,
378            http: self.http,
379            base_configuration: self.base_configuration,
380            interceptors: Arc::new(self.interceptors),
381        }
382    }
383}
384
385// Implementation for Client
386impl Client {
387    /// Create a new client builder with the given configuration.
388    pub fn builder(config: Config) -> Result<ClientBuilder> {
389        ClientBuilder::new(config)
390    }
391
392    /// Create a new client builder with default configuration from environment variables.
393    pub fn from_env() -> Result<ClientBuilder> {
394        ClientBuilder::from_env()
395    }
396}
397
398impl<T> Client<T> {
399    /// Get a reference to the client configuration.
400    pub fn config(&self) -> &Config {
401        &self.config
402    }
403
404    /// Get a reference to the HTTP client.
405    pub fn http_client(&self) -> &HttpClient {
406        &self.http
407    }
408}
409
410// Interceptor helper methods
411impl<T: Default + Send + Sync> Client<T> {
412    /// Helper to call `before_request` hooks
413    async fn call_before_request(
414        &self,
415        operation: &str,
416        model: &str,
417        request_json: &str,
418        state: &mut T,
419    ) -> Result<()> {
420        if !self.interceptors.is_empty() {
421            let mut ctx = BeforeRequestContext {
422                operation,
423                model,
424                request_json,
425                state,
426            };
427            if let Err(e) = self.interceptors.before_request(&mut ctx).await {
428                let error_ctx = ErrorContext {
429                    operation,
430                    model: Some(model),
431                    request_json: Some(request_json),
432                    error: &e,
433                    state: Some(state),
434                };
435                self.interceptors.on_error(&error_ctx).await;
436                return Err(e);
437            }
438        }
439        Ok(())
440    }
441
442    /// Helper to handle API errors with interceptor hooks
443    async fn handle_api_error<E>(
444        &self,
445        error: openai_client_base::apis::Error<E>,
446        operation: &str,
447        model: &str,
448        request_json: &str,
449        state: &T,
450    ) -> Error {
451        let error = map_api_error(error);
452
453        if !self.interceptors.is_empty() {
454            let error_ctx = ErrorContext {
455                operation,
456                model: Some(model),
457                request_json: Some(request_json),
458                error: &error,
459                state: Some(state),
460            };
461            self.interceptors.on_error(&error_ctx).await;
462        }
463
464        error
465    }
466
467    /// Helper to call `after_response` hooks
468    async fn call_after_response<R>(
469        &self,
470        response: &R,
471        operation: &str,
472        model: &str,
473        request_json: &str,
474        state: &T,
475        duration: std::time::Duration,
476        input_tokens: Option<i64>,
477        output_tokens: Option<i64>,
478    ) where
479        R: serde::Serialize + Sync,
480    {
481        if !self.interceptors.is_empty() {
482            let response_json = serde_json::to_string(response).unwrap_or_default();
483            let ctx = AfterResponseContext {
484                operation,
485                model,
486                request_json,
487                response_json: &response_json,
488                duration,
489                input_tokens,
490                output_tokens,
491                state,
492            };
493            if let Err(e) = self.interceptors.after_response(&ctx).await {
494                tracing::warn!("Interceptor after_response failed: {}", e);
495            }
496        }
497    }
498}
499
500// Chat API methods
501impl<T: Default + Send + Sync> Client<T> {
502    /// Create a chat completion builder.
503    pub fn chat(&self) -> ChatCompletionBuilder {
504        let model = self.config.default_model().unwrap_or("gpt-4");
505        ChatCompletionBuilder::new(model)
506    }
507
508    /// Create a chat completion with a simple user message.
509    pub fn chat_simple(&self, message: impl Into<String>) -> ChatCompletionBuilder {
510        self.chat().user(message)
511    }
512
513    /// Create a chat completion with system and user messages.
514    pub fn chat_with_system(
515        &self,
516        system: impl Into<String>,
517        user: impl Into<String>,
518    ) -> ChatCompletionBuilder {
519        self.chat().system(system).user(user)
520    }
521
522    /// Execute a chat completion request.
523    pub async fn execute_chat(
524        &self,
525        request: CreateChatCompletionRequest,
526    ) -> Result<ChatCompletionResponseWrapper> {
527        let mut state = T::default();
528        let operation = operation_names::CHAT;
529        let model = request.model.clone();
530        let request_json = serde_json::to_string(&request).unwrap_or_default();
531
532        // Call before_request hook
533        self.call_before_request(operation, &model, &request_json, &mut state)
534            .await?;
535
536        let start_time = Instant::now();
537
538        // Make the API call
539        let response = match chat_api::create_chat_completion()
540            .configuration(&self.base_configuration)
541            .create_chat_completion_request(request)
542            .call()
543            .await
544        {
545            Ok(resp) => resp,
546            Err(e) => {
547                let error = self
548                    .handle_api_error(e, operation, &model, &request_json, &state)
549                    .await;
550                return Err(error);
551            }
552        };
553
554        let duration = start_time.elapsed();
555
556        // Call after_response hook
557        self.call_after_response(
558            &response,
559            operation,
560            &model,
561            &request_json,
562            &state,
563            duration,
564            response.usage.as_ref().map(|u| i64::from(u.prompt_tokens)),
565            response
566                .usage
567                .as_ref()
568                .map(|u| i64::from(u.completion_tokens)),
569        )
570        .await;
571
572        Ok(ChatCompletionResponseWrapper::new(response))
573    }
574
575    /// Execute a chat completion builder.
576    pub async fn send_chat(
577        &self,
578        builder: ChatCompletionBuilder,
579    ) -> Result<ChatCompletionResponseWrapper> {
580        let request = builder.build()?;
581        self.execute_chat(request).await
582    }
583}
584
585// Responses API methods
586impl<T: Default + Send + Sync> Client<T> {
587    /// Create a responses builder for structured outputs.
588    pub fn responses(&self) -> ResponsesBuilder {
589        let model = self.config.default_model().unwrap_or("gpt-4");
590        ResponsesBuilder::new(model)
591    }
592
593    /// Create a simple responses request with a user message.
594    pub fn responses_simple(&self, message: impl Into<String>) -> ResponsesBuilder {
595        self.responses().user(message)
596    }
597
598    /// Execute a responses request.
599    pub async fn execute_responses(
600        &self,
601        request: CreateChatCompletionRequest,
602    ) -> Result<ChatCompletionResponseWrapper> {
603        // The Responses API uses the same underlying endpoint as chat
604        self.execute_chat(request).await
605    }
606
607    /// Execute a responses builder.
608    pub async fn send_responses(
609        &self,
610        builder: ResponsesBuilder,
611    ) -> Result<ChatCompletionResponseWrapper> {
612        let request = builder.build()?;
613        self.execute_responses(request).await
614    }
615}
616
617// TODO: Add methods for other API endpoints
618impl<T: Default + Send + Sync> Client<T> {
619    /// Get assistants client (placeholder).
620    #[must_use]
621    pub fn assistants(&self) -> AssistantsClient<'_, T> {
622        AssistantsClient { client: self }
623    }
624
625    /// Get audio client (placeholder).
626    #[must_use]
627    pub fn audio(&self) -> AudioClient<'_, T> {
628        AudioClient { client: self }
629    }
630
631    /// Get embeddings client (placeholder).
632    #[must_use]
633    pub fn embeddings(&self) -> EmbeddingsClient<'_, T> {
634        EmbeddingsClient { client: self }
635    }
636
637    /// Get images client (placeholder).
638    #[must_use]
639    pub fn images(&self) -> ImagesClient<'_, T> {
640        ImagesClient { client: self }
641    }
642
643    /// Get files client (placeholder).
644    #[must_use]
645    pub fn files(&self) -> FilesClient<'_, T> {
646        FilesClient { client: self }
647    }
648
649    /// Get fine-tuning client (placeholder).
650    #[must_use]
651    pub fn fine_tuning(&self) -> FineTuningClient<'_, T> {
652        FineTuningClient { client: self }
653    }
654
655    /// Get batch client (placeholder).
656    #[must_use]
657    pub fn batch(&self) -> BatchClient<'_, T> {
658        BatchClient { client: self }
659    }
660
661    /// Get vector stores client (placeholder).
662    #[must_use]
663    pub fn vector_stores(&self) -> VectorStoresClient<'_, T> {
664        VectorStoresClient { client: self }
665    }
666
667    /// Get moderations client (placeholder).
668    #[must_use]
669    pub fn moderations(&self) -> ModerationsClient<'_, T> {
670        ModerationsClient { client: self }
671    }
672
673    /// Get threads client (placeholder).
674    #[must_use]
675    pub fn threads(&self) -> ThreadsClient<'_, T> {
676        ThreadsClient { client: self }
677    }
678
679    /// Get uploads client (placeholder).
680    #[must_use]
681    pub fn uploads(&self) -> UploadsClient<'_, T> {
682        UploadsClient { client: self }
683    }
684
685    /// Get models client.
686    #[must_use]
687    pub fn models(&self) -> ModelsClient<'_, T> {
688        ModelsClient { client: self }
689    }
690
691    /// Get completions client.
692    #[must_use]
693    pub fn completions(&self) -> CompletionsClient<'_, T> {
694        CompletionsClient { client: self }
695    }
696
697    /// Get usage client.
698    #[must_use]
699    pub fn usage(&self) -> UsageClient<'_, T> {
700        UsageClient { client: self }
701    }
702}
703
704impl<T: Default + Send + Sync> AudioClient<'_, T> {
705    /// Create a speech builder for text-to-speech generation.
706    #[must_use]
707    pub fn speech(
708        &self,
709        model: impl Into<String>,
710        input: impl Into<String>,
711        voice: impl Into<String>,
712    ) -> SpeechBuilder {
713        SpeechBuilder::new(model, input, voice)
714    }
715
716    /// Submit a speech synthesis request and return binary audio data.
717    pub async fn create_speech(&self, builder: SpeechBuilder) -> Result<Vec<u8>> {
718        let request = builder.build()?;
719        let mut state = T::default();
720        let operation = operation_names::AUDIO_SPEECH;
721        let model = request.model.clone();
722        let request_json = serde_json::to_string(&request).unwrap_or_default();
723
724        // Call before_request hook
725        self.call_before_request(operation, &model, &request_json, &mut state)
726            .await?;
727
728        let start_time = Instant::now();
729
730        // Make the API call
731        let response = match audio_api::create_speech()
732            .configuration(&self.client.base_configuration)
733            .create_speech_request(request)
734            .call()
735            .await
736        {
737            Ok(resp) => resp,
738            Err(e) => {
739                let error = self
740                    .handle_api_error(e, operation, &model, &request_json, &state)
741                    .await;
742                return Err(error);
743            }
744        };
745
746        let bytes = response.bytes().await.map_err(Error::Http)?;
747        let duration = start_time.elapsed();
748
749        // Call after_response hook (note: no JSON response for audio)
750        let response_json = format!("{{\"size\": {}}}", bytes.len());
751        self.call_after_response(
752            &response_json,
753            operation,
754            &model,
755            &request_json,
756            &state,
757            duration,
758            None,
759            None,
760        )
761        .await;
762
763        Ok(bytes.to_vec())
764    }
765
766    /// Create a transcription builder for speech-to-text workflows.
767    #[must_use]
768    pub fn transcription(
769        &self,
770        file: impl AsRef<std::path::Path>,
771        model: impl Into<String>,
772    ) -> TranscriptionBuilder {
773        TranscriptionBuilder::new(file, model)
774    }
775
776    /// Submit a transcription request.
777    pub async fn create_transcription(
778        &self,
779        builder: TranscriptionBuilder,
780    ) -> Result<CreateTranscription200Response> {
781        let request = builder.build()?;
782        let model_str = request.model.clone();
783        let mut state = T::default();
784        let operation = operation_names::AUDIO_TRANSCRIPTION;
785        // TranscriptionRequest doesn't implement Serialize, so we'll create a simple JSON representation
786        let request_json = format!(r#"{{"model":"{model_str}","file":"<audio_file>"}}"#);
787
788        // Call before_request hook
789        self.call_before_request(operation, &model_str, &request_json, &mut state)
790            .await?;
791
792        let TranscriptionRequest {
793            file,
794            model,
795            language,
796            prompt,
797            response_format,
798            temperature,
799            stream,
800            chunking_strategy,
801            timestamp_granularities,
802            include,
803        } = request;
804
805        let timestamp_strings = timestamp_granularities.as_ref().map(|values| {
806            values
807                .iter()
808                .map(|granularity| granularity.as_str().to_string())
809                .collect::<Vec<_>>()
810        });
811
812        let start_time = Instant::now();
813
814        // Make the API call
815        let response = match audio_api::create_transcription()
816            .configuration(&self.client.base_configuration)
817            .file(file)
818            .model(&model)
819            .maybe_language(language.as_deref())
820            .maybe_prompt(prompt.as_deref())
821            .maybe_response_format(response_format)
822            .maybe_temperature(temperature)
823            .maybe_stream(stream)
824            .maybe_chunking_strategy(chunking_strategy)
825            .maybe_timestamp_granularities(timestamp_strings)
826            .maybe_include(include)
827            .call()
828            .await
829        {
830            Ok(resp) => resp,
831            Err(e) => {
832                let error = self
833                    .handle_api_error(e, operation, &model_str, &request_json, &state)
834                    .await;
835                return Err(error);
836            }
837        };
838
839        let duration = start_time.elapsed();
840
841        // Call after_response hook
842        self.call_after_response(
843            &response,
844            operation,
845            &model_str,
846            &request_json,
847            &state,
848            duration,
849            None,
850            None,
851        )
852        .await;
853
854        Ok(response)
855    }
856
857    /// Create a translation builder for audio-to-English translation.
858    #[must_use]
859    pub fn translation(
860        &self,
861        file: impl AsRef<std::path::Path>,
862        model: impl Into<String>,
863    ) -> TranslationBuilder {
864        TranslationBuilder::new(file, model)
865    }
866
867    /// Submit an audio translation request.
868    pub async fn create_translation(
869        &self,
870        builder: TranslationBuilder,
871    ) -> Result<CreateTranslation200Response> {
872        let request = builder.build()?;
873        let model_str = request.model.clone();
874
875        // Prepare interceptor context
876        let mut state = T::default();
877        let operation = operation_names::AUDIO_TRANSLATION;
878        let request_json = format!(r#"{{"model":"{model_str}","file":"<audio_file>"}}"#);
879
880        // Call before_request hook
881        self.call_before_request(operation, &model_str, &request_json, &mut state)
882            .await?;
883
884        let TranslationRequest {
885            file,
886            model,
887            prompt,
888            response_format,
889            temperature,
890        } = request;
891
892        let response_format_owned = response_format.map(|format| format.to_string());
893
894        let start_time = Instant::now();
895
896        // Make the API call
897        let response = match audio_api::create_translation()
898            .configuration(&self.client.base_configuration)
899            .file(file)
900            .model(&model)
901            .maybe_prompt(prompt.as_deref())
902            .maybe_response_format(response_format_owned.as_deref())
903            .maybe_temperature(temperature)
904            .call()
905            .await
906        {
907            Ok(resp) => resp,
908            Err(e) => {
909                let error = self
910                    .handle_api_error(e, operation, &model_str, &request_json, &state)
911                    .await;
912                return Err(error);
913            }
914        };
915
916        let duration = start_time.elapsed();
917
918        // Call after_response hook
919        self.call_after_response(
920            &response,
921            operation,
922            &model_str,
923            &request_json,
924            &state,
925            duration,
926            None,
927            None,
928        )
929        .await;
930
931        Ok(response)
932    }
933}
934
935impl<T: Default + Send + Sync> EmbeddingsClient<'_, T> {
936    /// Start a builder for creating embeddings requests with the given model.
937    #[must_use]
938    pub fn builder(&self, model: impl Into<String>) -> EmbeddingsBuilder {
939        EmbeddingsBuilder::new(model)
940    }
941
942    /// Convenience helper for embedding a single string input.
943    #[must_use]
944    pub fn text(&self, model: impl Into<String>, input: impl Into<String>) -> EmbeddingsBuilder {
945        self.builder(model).input_text(input)
946    }
947
948    /// Convenience helper for embedding a single tokenized input.
949    #[must_use]
950    pub fn tokens<I>(&self, model: impl Into<String>, tokens: I) -> EmbeddingsBuilder
951    where
952        I: IntoIterator<Item = i32>,
953    {
954        self.builder(model).input_tokens(tokens)
955    }
956
957    /// Execute an embeddings request built with [`EmbeddingsBuilder`].
958    pub async fn create(&self, builder: EmbeddingsBuilder) -> Result<CreateEmbeddingResponse> {
959        let request = builder.build()?;
960
961        // Prepare interceptor context
962        let mut state = T::default();
963        let operation = operation_names::EMBEDDINGS;
964        let model = request.model.clone();
965        let request_json = serde_json::to_string(&request).unwrap_or_default();
966
967        // Call before_request hook
968        self.call_before_request(operation, &model, &request_json, &mut state)
969            .await?;
970
971        let start_time = Instant::now();
972
973        // Make the API call
974        let response = match embeddings_api::create_embedding()
975            .configuration(&self.client.base_configuration)
976            .create_embedding_request(request)
977            .call()
978            .await
979        {
980            Ok(resp) => resp,
981            Err(e) => {
982                let error = self
983                    .handle_api_error(e, operation, &model, &request_json, &state)
984                    .await;
985                return Err(error);
986            }
987        };
988
989        let duration = start_time.elapsed();
990
991        // Call after_response hook
992        self.call_after_response(
993            &response,
994            operation,
995            &model,
996            &request_json,
997            &state,
998            duration,
999            Some(i64::from(response.usage.prompt_tokens)),
1000            Some(i64::from(response.usage.total_tokens)),
1001        )
1002        .await;
1003
1004        Ok(response)
1005    }
1006}
1007
1008impl<T: Default + Send + Sync> ImagesClient<'_, T> {
1009    /// Create a builder for image generation requests.
1010    #[must_use]
1011    pub fn generate(&self, prompt: impl Into<String>) -> ImageGenerationBuilder {
1012        ImageGenerationBuilder::new(prompt)
1013    }
1014
1015    /// Execute an image generation request.
1016    pub async fn create(&self, builder: ImageGenerationBuilder) -> Result<ImagesResponse> {
1017        let request = builder.build()?;
1018
1019        // Prepare interceptor context
1020        let mut state = T::default();
1021        let operation = operation_names::IMAGE_GENERATION;
1022        let model = request
1023            .model
1024            .as_ref()
1025            .map_or_else(|| "dall-e-2".to_string(), ToString::to_string);
1026        let request_json = serde_json::to_string(&request).unwrap_or_default();
1027
1028        // Call before_request hook
1029        self.call_before_request(operation, &model, &request_json, &mut state)
1030            .await?;
1031
1032        let start_time = Instant::now();
1033
1034        // Make the API call
1035        let response = match images_api::create_image()
1036            .configuration(&self.client.base_configuration)
1037            .create_image_request(request)
1038            .call()
1039            .await
1040        {
1041            Ok(resp) => resp,
1042            Err(e) => {
1043                let error = self
1044                    .handle_api_error(e, operation, &model, &request_json, &state)
1045                    .await;
1046                return Err(error);
1047            }
1048        };
1049
1050        let duration = start_time.elapsed();
1051
1052        // Call after_response hook
1053        self.call_after_response(
1054            &response,
1055            operation,
1056            &model,
1057            &request_json,
1058            &state,
1059            duration,
1060            None,
1061            None,
1062        )
1063        .await;
1064
1065        Ok(response)
1066    }
1067
1068    /// Create an image edit builder using a base image and prompt.
1069    #[must_use]
1070    pub fn edit(
1071        &self,
1072        image: impl AsRef<std::path::Path>,
1073        prompt: impl Into<String>,
1074    ) -> ImageEditBuilder {
1075        ImageEditBuilder::new(image, prompt)
1076    }
1077
1078    /// Execute an image edit request.
1079    pub async fn create_edit(&self, builder: ImageEditBuilder) -> Result<ImagesResponse> {
1080        let request = builder.build()?;
1081        let model_str = request
1082            .model
1083            .as_ref()
1084            .map_or_else(|| "dall-e-2".to_string(), ToString::to_string);
1085
1086        // Prepare interceptor context
1087        let mut state = T::default();
1088        let operation = operation_names::IMAGE_EDIT;
1089        let request_json = format!(
1090            r#"{{"prompt":"{}","model":"{}"}}"#,
1091            request.prompt, model_str
1092        );
1093
1094        // Call before_request hook
1095        self.call_before_request(operation, &model_str, &request_json, &mut state)
1096            .await?;
1097
1098        let ImageEditRequest {
1099            image,
1100            prompt,
1101            mask,
1102            background,
1103            model,
1104            n,
1105            size,
1106            response_format,
1107            output_format,
1108            output_compression,
1109            user,
1110            input_fidelity,
1111            stream,
1112            partial_images,
1113            quality,
1114        } = request;
1115
1116        let start_time = Instant::now();
1117
1118        // Make the API call
1119        let response = match images_api::create_image_edit()
1120            .configuration(&self.client.base_configuration)
1121            .image(image)
1122            .prompt(&prompt)
1123            .maybe_mask(mask)
1124            .maybe_background(background.as_deref())
1125            .maybe_model(model.as_deref())
1126            .maybe_n(n)
1127            .maybe_size(size.as_deref())
1128            .maybe_response_format(response_format.as_deref())
1129            .maybe_output_format(output_format.as_deref())
1130            .maybe_output_compression(output_compression)
1131            .maybe_user(user.as_deref())
1132            .maybe_input_fidelity(input_fidelity)
1133            .maybe_stream(stream)
1134            .maybe_partial_images(partial_images)
1135            .maybe_quality(quality.as_deref())
1136            .call()
1137            .await
1138        {
1139            Ok(resp) => resp,
1140            Err(e) => {
1141                let error = self
1142                    .handle_api_error(e, operation, &model_str, &request_json, &state)
1143                    .await;
1144                return Err(error);
1145            }
1146        };
1147
1148        let duration = start_time.elapsed();
1149
1150        // Call after_response hook
1151        self.call_after_response(
1152            &response,
1153            operation,
1154            &model_str,
1155            &request_json,
1156            &state,
1157            duration,
1158            None,
1159            None,
1160        )
1161        .await;
1162
1163        Ok(response)
1164    }
1165
1166    /// Create an image variation builder.
1167    #[must_use]
1168    pub fn variation(&self, image: impl AsRef<std::path::Path>) -> ImageVariationBuilder {
1169        ImageVariationBuilder::new(image)
1170    }
1171
1172    /// Execute an image variation request.
1173    pub async fn create_variation(&self, builder: ImageVariationBuilder) -> Result<ImagesResponse> {
1174        let request = builder.build()?;
1175        let model_str = request
1176            .model
1177            .as_ref()
1178            .map_or_else(|| "dall-e-2".to_string(), ToString::to_string);
1179
1180        // Prepare interceptor context
1181        let mut state = T::default();
1182        let operation = operation_names::IMAGE_VARIATION;
1183        let request_json = format!(r#"{{"model":"{model_str}"}}"#);
1184
1185        // Call before_request hook
1186        self.call_before_request(operation, &model_str, &request_json, &mut state)
1187            .await?;
1188
1189        let ImageVariationRequest {
1190            image,
1191            model,
1192            n,
1193            response_format,
1194            size,
1195            user,
1196        } = request;
1197
1198        let start_time = Instant::now();
1199
1200        // Make the API call
1201        let response = match images_api::create_image_variation()
1202            .configuration(&self.client.base_configuration)
1203            .image(image)
1204            .maybe_model(model.as_deref())
1205            .maybe_n(n)
1206            .maybe_response_format(response_format.as_deref())
1207            .maybe_size(size.as_deref())
1208            .maybe_user(user.as_deref())
1209            .call()
1210            .await
1211        {
1212            Ok(resp) => resp,
1213            Err(e) => {
1214                let error = self
1215                    .handle_api_error(e, operation, &model_str, &request_json, &state)
1216                    .await;
1217                return Err(error);
1218            }
1219        };
1220
1221        let duration = start_time.elapsed();
1222
1223        // Call after_response hook
1224        self.call_after_response(
1225            &response,
1226            operation,
1227            &model_str,
1228            &request_json,
1229            &state,
1230            duration,
1231            None,
1232            None,
1233        )
1234        .await;
1235
1236        Ok(response)
1237    }
1238}
1239
1240impl<T: Default + Send + Sync> ThreadsClient<'_, T> {
1241    /// Start building a new thread request.
1242    #[must_use]
1243    pub fn builder(&self) -> ThreadRequestBuilder {
1244        ThreadRequestBuilder::new()
1245    }
1246
1247    /// Create a thread using the provided builder.
1248    pub async fn create(&self, builder: ThreadRequestBuilder) -> Result<ThreadObject> {
1249        let request = builder.build()?;
1250
1251        // Prepare interceptor context
1252        let mut state = T::default();
1253        let operation = operation_names::THREAD_CREATE;
1254        let model = "thread"; // No model for thread operations
1255        let request_json = serde_json::to_string(&request).unwrap_or_default();
1256
1257        // Call before_request hook
1258        self.call_before_request(operation, model, &request_json, &mut state)
1259            .await?;
1260
1261        let start_time = Instant::now();
1262
1263        // Make the API call
1264        let response = match assistants_api::create_thread()
1265            .configuration(&self.client.base_configuration)
1266            .maybe_create_thread_request(Some(request))
1267            .call()
1268            .await
1269        {
1270            Ok(resp) => resp,
1271            Err(e) => {
1272                let error = self
1273                    .handle_api_error(e, operation, model, &request_json, &state)
1274                    .await;
1275                return Err(error);
1276            }
1277        };
1278
1279        let duration = start_time.elapsed();
1280
1281        // Call after_response hook
1282        self.call_after_response(
1283            &response,
1284            operation,
1285            model,
1286            &request_json,
1287            &state,
1288            duration,
1289            None,
1290            None,
1291        )
1292        .await;
1293
1294        Ok(response)
1295    }
1296}
1297
1298impl<T: Default + Send + Sync> UploadsClient<'_, T> {
1299    /// Create a new upload builder for the given file metadata.
1300    #[must_use]
1301    pub fn builder(
1302        &self,
1303        filename: impl Into<String>,
1304        purpose: UploadPurpose,
1305        bytes: i32,
1306        mime_type: impl Into<String>,
1307    ) -> UploadBuilder {
1308        UploadBuilder::new(filename, purpose, bytes, mime_type)
1309    }
1310
1311    /// Create an upload session.
1312    pub async fn create(&self, builder: UploadBuilder) -> Result<Upload> {
1313        let request = builder.build()?;
1314
1315        // Prepare interceptor context
1316        let mut state = T::default();
1317        let operation = operation_names::UPLOAD_CREATE;
1318        let model = "upload"; // No model for upload operations
1319        let request_json = serde_json::to_string(&request).unwrap_or_default();
1320
1321        // Call before_request hook
1322        self.call_before_request(operation, model, &request_json, &mut state)
1323            .await?;
1324
1325        let start_time = Instant::now();
1326
1327        // Make the API call
1328        let response = match uploads_api::create_upload()
1329            .configuration(&self.client.base_configuration)
1330            .create_upload_request(request)
1331            .call()
1332            .await
1333        {
1334            Ok(resp) => resp,
1335            Err(e) => {
1336                let error = self
1337                    .handle_api_error(e, operation, model, &request_json, &state)
1338                    .await;
1339                return Err(error);
1340            }
1341        };
1342
1343        let duration = start_time.elapsed();
1344
1345        // Call after_response hook
1346        self.call_after_response(
1347            &response,
1348            operation,
1349            model,
1350            &request_json,
1351            &state,
1352            duration,
1353            None,
1354            None,
1355        )
1356        .await;
1357
1358        Ok(response)
1359    }
1360}
1361
1362impl<T: Default + Send + Sync> ModerationsClient<'_, T> {
1363    /// Create a moderation builder for checking text content.
1364    ///
1365    /// # Example
1366    ///
1367    /// ```rust,ignore
1368    /// use openai_ergonomic::Client;
1369    ///
1370    /// # async fn example() -> openai_ergonomic::Result<()> {
1371    /// let client = Client::from_env()?;
1372    /// let builder = client.moderations().builder("Text to check");
1373    /// let response = client.moderations().create(builder).await?;
1374    /// println!("Flagged: {}", response.results[0].flagged);
1375    /// # Ok(())
1376    /// # }
1377    /// ```
1378    #[must_use]
1379    pub fn builder(&self, input: impl Into<String>) -> ModerationBuilder {
1380        ModerationBuilder::new(input)
1381    }
1382
1383    /// Convenience method for moderating a single text input.
1384    ///
1385    /// # Example
1386    ///
1387    /// ```rust,ignore
1388    /// use openai_ergonomic::Client;
1389    ///
1390    /// # async fn example() -> openai_ergonomic::Result<()> {
1391    /// let client = Client::from_env()?;
1392    /// let builder = client.moderations().check("Hello world");
1393    /// let response = client.moderations().create(builder).await?;
1394    ///
1395    /// if response.results[0].flagged {
1396    ///     println!("Content was flagged for moderation");
1397    /// }
1398    /// # Ok(())
1399    /// # }
1400    /// ```
1401    #[must_use]
1402    pub fn check(&self, input: impl Into<String>) -> ModerationBuilder {
1403        ModerationBuilder::new(input)
1404    }
1405
1406    /// Execute a moderation request built with [`ModerationBuilder`].
1407    ///
1408    /// # Example
1409    ///
1410    /// ```rust,ignore
1411    /// use openai_ergonomic::Client;
1412    ///
1413    /// # async fn example() -> openai_ergonomic::Result<()> {
1414    /// let client = Client::from_env()?;
1415    ///
1416    /// let builder = client
1417    ///     .moderations()
1418    ///     .check("Is this content appropriate?")
1419    ///     .model("text-moderation-latest");
1420    ///
1421    /// let response = client.moderations().create(builder).await?;
1422    ///
1423    /// println!("Model: {}", response.model);
1424    /// for result in response.results {
1425    ///     println!("Flagged: {}", result.flagged);
1426    ///     println!("Hate: {}", result.categories.hate);
1427    ///     println!("Violence: {}", result.categories.violence);
1428    /// }
1429    /// # Ok(())
1430    /// # }
1431    /// ```
1432    ///
1433    /// # Errors
1434    ///
1435    /// Returns an error if the API request fails or the response cannot be parsed.
1436    pub async fn create(&self, builder: ModerationBuilder) -> Result<CreateModerationResponse> {
1437        let request = builder.build()?;
1438
1439        // Prepare interceptor context
1440        let mut state = T::default();
1441        let operation = operation_names::MODERATION;
1442        let model = request
1443            .model
1444            .as_ref()
1445            .map_or_else(|| "text-moderation-latest".to_string(), ToString::to_string);
1446        let request_json = serde_json::to_string(&request).unwrap_or_default();
1447
1448        // Call before_request hook
1449        self.call_before_request(operation, &model, &request_json, &mut state)
1450            .await?;
1451
1452        let start_time = Instant::now();
1453
1454        // Make the API call
1455        let response = match moderations_api::create_moderation()
1456            .configuration(&self.client.base_configuration)
1457            .create_moderation_request(request)
1458            .call()
1459            .await
1460        {
1461            Ok(resp) => resp,
1462            Err(e) => {
1463                let error = self
1464                    .handle_api_error(e, operation, &model, &request_json, &state)
1465                    .await;
1466                return Err(error);
1467            }
1468        };
1469
1470        let duration = start_time.elapsed();
1471
1472        // Call after_response hook
1473        self.call_after_response(
1474            &response,
1475            operation,
1476            &model,
1477            &request_json,
1478            &state,
1479            duration,
1480            None,
1481            None,
1482        )
1483        .await;
1484
1485        Ok(response)
1486    }
1487}
1488
1489impl<T: Default + Send + Sync> FilesClient<'_, T> {
1490    /// Upload a file to `OpenAI`.
1491    ///
1492    /// # Example
1493    ///
1494    /// ```rust,ignore
1495    /// use openai_ergonomic::Client;
1496    /// use openai_ergonomic::builders::files::FilePurpose;
1497    ///
1498    /// # async fn example() -> openai_ergonomic::Result<()> {
1499    /// let client = Client::from_env()?;
1500    /// let builder = client
1501    ///     .files()
1502    ///     .upload_text("training.jsonl", FilePurpose::FineTune, "training data");
1503    /// let file = client.files().create(builder).await?;
1504    /// println!("Uploaded file: {}", file.id);
1505    /// # Ok(())
1506    /// # }
1507    /// ```
1508    pub async fn upload(&self, builder: FileUploadBuilder) -> Result<OpenAiFile> {
1509        // Write content to a temporary file
1510        let temp_dir = std::env::temp_dir();
1511        let temp_file_path = temp_dir.join(builder.filename());
1512        std::fs::write(&temp_file_path, builder.content()).map_err(Error::File)?;
1513
1514        // Convert FilePurpose to openai_client_base::models::FilePurpose
1515        let purpose = match builder.purpose().to_string().as_str() {
1516            "fine-tune" => openai_client_base::models::FilePurpose::FineTune,
1517            "vision" => openai_client_base::models::FilePurpose::Vision,
1518            "batch" => openai_client_base::models::FilePurpose::Batch,
1519            _ => openai_client_base::models::FilePurpose::Assistants, // Default for "assistants" and unknown
1520        };
1521
1522        // Prepare interceptor context
1523        let mut state = T::default();
1524        let operation = operation_names::FILE_UPLOAD;
1525        let model = "file-upload"; // No model for file operations
1526        let request_json = format!(
1527            r#"{{"filename":"{}","purpose":"{}","size":{}}}"#,
1528            builder.filename(),
1529            builder.purpose(),
1530            builder.content().len()
1531        );
1532
1533        // Call before_request hook
1534        if let Err(e) = self
1535            .call_before_request(operation, model, &request_json, &mut state)
1536            .await
1537        {
1538            // Clean up temp file before returning
1539            let _ = std::fs::remove_file(&temp_file_path);
1540            return Err(e);
1541        }
1542
1543        let start_time = Instant::now();
1544
1545        // Make the API call
1546        let result = match files_api::create_file()
1547            .configuration(&self.client.base_configuration)
1548            .file(temp_file_path.clone())
1549            .purpose(purpose)
1550            .call()
1551            .await
1552        {
1553            Ok(resp) => resp,
1554            Err(e) => {
1555                // Clean up temp file
1556                let _ = std::fs::remove_file(&temp_file_path);
1557                let error = self
1558                    .handle_api_error(e, operation, model, &request_json, &state)
1559                    .await;
1560                return Err(error);
1561            }
1562        };
1563
1564        // Clean up temporary file
1565        let _ = std::fs::remove_file(temp_file_path);
1566
1567        let duration = start_time.elapsed();
1568
1569        // Call after_response hook
1570        self.call_after_response(
1571            &result,
1572            operation,
1573            model,
1574            &request_json,
1575            &state,
1576            duration,
1577            None,
1578            None,
1579        )
1580        .await;
1581
1582        Ok(result)
1583    }
1584
1585    /// Convenience method to upload a file (alias for upload).
1586    ///
1587    /// # Example
1588    ///
1589    /// ```rust,ignore
1590    /// use openai_ergonomic::Client;
1591    /// use openai_ergonomic::builders::files::FilePurpose;
1592    ///
1593    /// # async fn example() -> openai_ergonomic::Result<()> {
1594    /// let client = Client::from_env()?;
1595    /// let builder = client
1596    ///     .files()
1597    ///     .upload_text("data.txt", FilePurpose::Assistants, "content");
1598    /// let file = client.files().create(builder).await?;
1599    /// println!("File ID: {}", file.id);
1600    /// # Ok(())
1601    /// # }
1602    /// ```
1603    pub async fn create(&self, builder: FileUploadBuilder) -> Result<OpenAiFile> {
1604        self.upload(builder).await
1605    }
1606
1607    /// Create a file upload builder from text content.
1608    #[must_use]
1609    pub fn upload_text(
1610        &self,
1611        filename: impl Into<String>,
1612        purpose: crate::builders::files::FilePurpose,
1613        text: impl Into<String>,
1614    ) -> FileUploadBuilder {
1615        FileUploadBuilder::from_text(filename, purpose, text)
1616    }
1617
1618    /// Create a file upload builder from bytes.
1619    #[must_use]
1620    pub fn upload_bytes(
1621        &self,
1622        filename: impl Into<String>,
1623        purpose: crate::builders::files::FilePurpose,
1624        content: Vec<u8>,
1625    ) -> FileUploadBuilder {
1626        FileUploadBuilder::new(filename, purpose, content)
1627    }
1628
1629    /// Create a file upload builder from a file path.
1630    pub fn upload_from_path(
1631        &self,
1632        path: impl AsRef<std::path::Path>,
1633        purpose: crate::builders::files::FilePurpose,
1634    ) -> Result<FileUploadBuilder> {
1635        FileUploadBuilder::from_path(path, purpose).map_err(Error::File)
1636    }
1637
1638    /// List files.
1639    ///
1640    /// # Example
1641    ///
1642    /// ```rust,ignore
1643    /// use openai_ergonomic::Client;
1644    ///
1645    /// # async fn example() -> openai_ergonomic::Result<()> {
1646    /// let client = Client::from_env()?;
1647    /// let builder = client.files().list_builder();
1648    /// let files = client.files().list(builder).await?;
1649    /// println!("Found {} files", files.data.len());
1650    /// # Ok(())
1651    /// # }
1652    /// ```
1653    pub async fn list(&self, builder: FileListBuilder) -> Result<ListFilesResponse> {
1654        let purpose = builder.purpose_ref().map(ToString::to_string);
1655        let limit = builder.limit_ref();
1656        let order = builder.order_ref().map(ToString::to_string);
1657
1658        // Prepare interceptor context
1659        let mut state = T::default();
1660        let operation = operation_names::FILE_LIST;
1661        let model = "files";
1662        let request_json = format!(
1663            r#"{{"purpose":"{}","limit":{},"order":"{}"}}"#,
1664            purpose.as_deref().unwrap_or(""),
1665            limit.unwrap_or(10000),
1666            order.as_deref().unwrap_or("desc")
1667        );
1668
1669        // Call before_request hook
1670        self.call_before_request(operation, model, &request_json, &mut state)
1671            .await?;
1672
1673        let start_time = Instant::now();
1674
1675        // Make the API call
1676        let response = match files_api::list_files()
1677            .configuration(&self.client.base_configuration)
1678            .maybe_purpose(purpose.as_deref())
1679            .maybe_limit(limit)
1680            .maybe_order(order.as_deref())
1681            .call()
1682            .await
1683        {
1684            Ok(resp) => resp,
1685            Err(e) => {
1686                let error = self
1687                    .handle_api_error(e, operation, model, &request_json, &state)
1688                    .await;
1689                return Err(error);
1690            }
1691        };
1692
1693        let duration = start_time.elapsed();
1694
1695        // Call after_response hook
1696        self.call_after_response(
1697            &response,
1698            operation,
1699            model,
1700            &request_json,
1701            &state,
1702            duration,
1703            None,
1704            None,
1705        )
1706        .await;
1707
1708        Ok(response)
1709    }
1710
1711    /// Create a list files builder.
1712    #[must_use]
1713    pub fn list_builder(&self) -> FileListBuilder {
1714        FileListBuilder::new()
1715    }
1716
1717    /// Retrieve information about a specific file.
1718    ///
1719    /// # Example
1720    ///
1721    /// ```rust,ignore
1722    /// use openai_ergonomic::Client;
1723    ///
1724    /// # async fn example() -> openai_ergonomic::Result<()> {
1725    /// let client = Client::from_env()?;
1726    /// let file = client.files().retrieve("file-123").await?;
1727    /// println!("File: {} ({})", file.filename, file.id);
1728    /// # Ok(())
1729    /// # }
1730    /// ```
1731    pub async fn retrieve(&self, file_id: impl Into<String>) -> Result<OpenAiFile> {
1732        let file_id = file_id.into();
1733
1734        // Prepare interceptor context
1735        let mut state = T::default();
1736        let operation = operation_names::FILE_RETRIEVE;
1737        let model = "files";
1738        let request_json = format!(r#"{{"file_id":"{file_id}"}}"#);
1739
1740        // Call before_request hook
1741        self.call_before_request(operation, model, &request_json, &mut state)
1742            .await?;
1743
1744        let start_time = Instant::now();
1745
1746        // Make the API call
1747        let response = match files_api::retrieve_file()
1748            .configuration(&self.client.base_configuration)
1749            .file_id(&file_id)
1750            .call()
1751            .await
1752        {
1753            Ok(resp) => resp,
1754            Err(e) => {
1755                let error = self
1756                    .handle_api_error(e, operation, model, &request_json, &state)
1757                    .await;
1758                return Err(error);
1759            }
1760        };
1761
1762        let duration = start_time.elapsed();
1763
1764        // Call after_response hook
1765        self.call_after_response(
1766            &response,
1767            operation,
1768            model,
1769            &request_json,
1770            &state,
1771            duration,
1772            None,
1773            None,
1774        )
1775        .await;
1776
1777        Ok(response)
1778    }
1779
1780    /// Retrieve information about a file using a builder.
1781    pub async fn get(&self, builder: FileRetrievalBuilder) -> Result<OpenAiFile> {
1782        self.retrieve(builder.file_id()).await
1783    }
1784
1785    /// Download file content.
1786    ///
1787    /// # Example
1788    ///
1789    /// ```rust,ignore
1790    /// use openai_ergonomic::Client;
1791    ///
1792    /// # async fn example() -> openai_ergonomic::Result<()> {
1793    /// let client = Client::from_env()?;
1794    /// let content = client.files().download("file-123").await?;
1795    /// println!("Downloaded {} bytes", content.len());
1796    /// # Ok(())
1797    /// # }
1798    /// ```
1799    pub async fn download(&self, file_id: impl Into<String>) -> Result<String> {
1800        let file_id = file_id.into();
1801
1802        // Prepare interceptor context
1803        let mut state = T::default();
1804        let operation = operation_names::FILE_DOWNLOAD;
1805        let model = "files";
1806        let request_json = format!(r#"{{"file_id":"{file_id}"}}"#);
1807
1808        // Call before_request hook
1809        self.call_before_request(operation, model, &request_json, &mut state)
1810            .await?;
1811
1812        let start_time = Instant::now();
1813
1814        // Make the API call
1815        let response = match files_api::download_file()
1816            .configuration(&self.client.base_configuration)
1817            .file_id(&file_id)
1818            .call()
1819            .await
1820        {
1821            Ok(resp) => resp,
1822            Err(e) => {
1823                let error = self
1824                    .handle_api_error(e, operation, model, &request_json, &state)
1825                    .await;
1826                return Err(error);
1827            }
1828        };
1829
1830        let duration = start_time.elapsed();
1831
1832        // Call after_response hook
1833        let response_size = format!(r#"{{"size":{}}}"#, response.len());
1834        self.call_after_response(
1835            &response_size,
1836            operation,
1837            model,
1838            &request_json,
1839            &state,
1840            duration,
1841            None,
1842            None,
1843        )
1844        .await;
1845
1846        Ok(response)
1847    }
1848
1849    /// Download file content as bytes.
1850    pub async fn download_bytes(&self, file_id: impl Into<String>) -> Result<Vec<u8>> {
1851        let content = self.download(file_id).await?;
1852        Ok(content.into_bytes())
1853    }
1854
1855    /// Delete a file.
1856    ///
1857    /// # Example
1858    ///
1859    /// ```rust,ignore
1860    /// use openai_ergonomic::Client;
1861    ///
1862    /// # async fn example() -> openai_ergonomic::Result<()> {
1863    /// let client = Client::from_env()?;
1864    /// let response = client.files().delete("file-123").await?;
1865    /// println!("Deleted: {}", response.deleted);
1866    /// # Ok(())
1867    /// # }
1868    /// ```
1869    pub async fn delete(&self, file_id: impl Into<String>) -> Result<DeleteFileResponse> {
1870        let file_id = file_id.into();
1871
1872        // Prepare interceptor context
1873        let mut state = T::default();
1874        let operation = operation_names::FILE_DELETE;
1875        let model = "files";
1876        let request_json = format!(r#"{{"file_id":"{file_id}"}}"#);
1877
1878        // Call before_request hook
1879        self.call_before_request(operation, model, &request_json, &mut state)
1880            .await?;
1881
1882        let start_time = Instant::now();
1883
1884        // Make the API call
1885        let response = match files_api::delete_file()
1886            .configuration(&self.client.base_configuration)
1887            .file_id(&file_id)
1888            .call()
1889            .await
1890        {
1891            Ok(resp) => resp,
1892            Err(e) => {
1893                let error = self
1894                    .handle_api_error(e, operation, model, &request_json, &state)
1895                    .await;
1896                return Err(error);
1897            }
1898        };
1899
1900        let duration = start_time.elapsed();
1901
1902        // Call after_response hook
1903        self.call_after_response(
1904            &response,
1905            operation,
1906            model,
1907            &request_json,
1908            &state,
1909            duration,
1910            None,
1911            None,
1912        )
1913        .await;
1914
1915        Ok(response)
1916    }
1917
1918    /// Delete a file using a builder.
1919    pub async fn remove(&self, builder: FileDeleteBuilder) -> Result<DeleteFileResponse> {
1920        self.delete(builder.file_id()).await
1921    }
1922}
1923
1924impl<T: Default + Send + Sync> VectorStoresClient<'_, T> {
1925    /// Create a new vector store.
1926    ///
1927    /// # Example
1928    ///
1929    /// ```rust,ignore
1930    /// use openai_ergonomic::Client;
1931    /// use openai_ergonomic::builders::vector_stores::VectorStoreBuilder;
1932    ///
1933    /// # async fn example() -> openai_ergonomic::Result<()> {
1934    /// let client = Client::from_env()?;
1935    /// let builder = VectorStoreBuilder::new()
1936    ///     .name("My Knowledge Base")
1937    ///     .add_file("file-123");
1938    /// let vector_store = client.vector_stores().create(builder).await?;
1939    /// println!("Created vector store: {}", vector_store.id);
1940    /// # Ok(())
1941    /// # }
1942    /// ```
1943    pub async fn create(
1944        &self,
1945        builder: crate::builders::vector_stores::VectorStoreBuilder,
1946    ) -> Result<VectorStoreObject> {
1947        use openai_client_base::models::{CreateVectorStoreRequest, VectorStoreExpirationAfter};
1948
1949        let mut request = CreateVectorStoreRequest::new();
1950        request.name = builder.name_ref().map(String::from);
1951        request.file_ids = if builder.has_files() {
1952            Some(builder.file_ids_ref().to_vec())
1953        } else {
1954            None
1955        };
1956
1957        if let Some(expires_after) = builder.expires_after_ref() {
1958            use openai_client_base::models::vector_store_expiration_after::Anchor;
1959            request.expires_after = Some(Box::new(VectorStoreExpirationAfter::new(
1960                Anchor::LastActiveAt,
1961                expires_after.days,
1962            )));
1963        }
1964
1965        if !builder.metadata_ref().is_empty() {
1966            request.metadata = Some(Some(builder.metadata_ref().clone()));
1967        }
1968
1969        // Prepare interceptor context
1970        let mut state = T::default();
1971        let operation = operation_names::VECTOR_STORE_CREATE;
1972        let model = "vector-store";
1973        let request_json = serde_json::to_string(&request).unwrap_or_default();
1974
1975        // Call before_request hook
1976        self.call_before_request(operation, model, &request_json, &mut state)
1977            .await?;
1978
1979        let start_time = Instant::now();
1980
1981        // Make the API call
1982        let response = match vector_stores_api::create_vector_store()
1983            .configuration(&self.client.base_configuration)
1984            .create_vector_store_request(request)
1985            .call()
1986            .await
1987        {
1988            Ok(resp) => resp,
1989            Err(e) => {
1990                let error = self
1991                    .handle_api_error(e, operation, model, &request_json, &state)
1992                    .await;
1993                return Err(error);
1994            }
1995        };
1996
1997        let duration = start_time.elapsed();
1998
1999        // Call after_response hook
2000        self.call_after_response(
2001            &response,
2002            operation,
2003            model,
2004            &request_json,
2005            &state,
2006            duration,
2007            None,
2008            None,
2009        )
2010        .await;
2011
2012        Ok(response)
2013    }
2014
2015    /// List vector stores.
2016    ///
2017    /// # Example
2018    ///
2019    /// ```rust,ignore
2020    /// use openai_ergonomic::Client;
2021    ///
2022    /// # async fn example() -> openai_ergonomic::Result<()> {
2023    /// let client = Client::from_env()?;
2024    /// let response = client.vector_stores().list(Some(20), None, None, None).await?;
2025    /// println!("Found {} vector stores", response.data.len());
2026    /// # Ok(())
2027    /// # }
2028    /// ```
2029    pub async fn list(
2030        &self,
2031        limit: Option<i32>,
2032        order: Option<&str>,
2033        after: Option<&str>,
2034        before: Option<&str>,
2035    ) -> Result<ListVectorStoresResponse> {
2036        // Prepare interceptor context
2037        let mut state = T::default();
2038        let operation = operation_names::VECTOR_STORE_LIST;
2039        let model = "vector-store";
2040        let request_json = format!(
2041            r#"{{"limit":{},"order":"{}"}}"#,
2042            limit.unwrap_or(20),
2043            order.unwrap_or("desc")
2044        );
2045
2046        // Call before_request hook
2047        self.call_before_request(operation, model, &request_json, &mut state)
2048            .await?;
2049
2050        let start_time = Instant::now();
2051
2052        // Make the API call
2053        let response = match vector_stores_api::list_vector_stores()
2054            .configuration(&self.client.base_configuration)
2055            .maybe_limit(limit)
2056            .maybe_order(order)
2057            .maybe_after(after)
2058            .maybe_before(before)
2059            .call()
2060            .await
2061        {
2062            Ok(resp) => resp,
2063            Err(e) => {
2064                let error = self
2065                    .handle_api_error(e, operation, model, &request_json, &state)
2066                    .await;
2067                return Err(error);
2068            }
2069        };
2070
2071        let duration = start_time.elapsed();
2072
2073        // Call after_response hook
2074        self.call_after_response(
2075            &response,
2076            operation,
2077            model,
2078            &request_json,
2079            &state,
2080            duration,
2081            None,
2082            None,
2083        )
2084        .await;
2085
2086        Ok(response)
2087    }
2088
2089    /// Get a specific vector store by ID.
2090    ///
2091    /// # Example
2092    ///
2093    /// ```rust,ignore
2094    /// use openai_ergonomic::Client;
2095    ///
2096    /// # async fn example() -> openai_ergonomic::Result<()> {
2097    /// let client = Client::from_env()?;
2098    /// let vector_store = client.vector_stores().get("vs_123").await?;
2099    /// println!("Vector store: {}", vector_store.name);
2100    /// # Ok(())
2101    /// # }
2102    /// ```
2103    pub async fn get(&self, vector_store_id: impl Into<String>) -> Result<VectorStoreObject> {
2104        let id = vector_store_id.into();
2105
2106        // Prepare interceptor context
2107        let mut state = T::default();
2108        let operation = operation_names::VECTOR_STORE_RETRIEVE;
2109        let model = "vector-store";
2110        let request_json = format!(r#"{{"vector_store_id":"{id}"}}"#);
2111
2112        // Call before_request hook
2113        self.call_before_request(operation, model, &request_json, &mut state)
2114            .await?;
2115
2116        let start_time = Instant::now();
2117
2118        // Make the API call
2119        let response = match vector_stores_api::get_vector_store()
2120            .configuration(&self.client.base_configuration)
2121            .vector_store_id(&id)
2122            .call()
2123            .await
2124        {
2125            Ok(resp) => resp,
2126            Err(e) => {
2127                let error = self
2128                    .handle_api_error(e, operation, model, &request_json, &state)
2129                    .await;
2130                return Err(error);
2131            }
2132        };
2133
2134        let duration = start_time.elapsed();
2135
2136        // Call after_response hook
2137        self.call_after_response(
2138            &response,
2139            operation,
2140            model,
2141            &request_json,
2142            &state,
2143            duration,
2144            None,
2145            None,
2146        )
2147        .await;
2148
2149        Ok(response)
2150    }
2151
2152    /// Update a vector store.
2153    ///
2154    /// # Example
2155    ///
2156    /// ```rust,ignore
2157    /// use openai_ergonomic::Client;
2158    /// use openai_ergonomic::builders::vector_stores::VectorStoreBuilder;
2159    ///
2160    /// # async fn example() -> openai_ergonomic::Result<()> {
2161    /// let client = Client::from_env()?;
2162    /// let builder = VectorStoreBuilder::new()
2163    ///     .name("Updated Name")
2164    ///     .metadata("updated", "true");
2165    /// let vector_store = client.vector_stores().update("vs_123", builder).await?;
2166    /// println!("Updated: {}", vector_store.name);
2167    /// # Ok(())
2168    /// # }
2169    /// ```
2170    pub async fn update(
2171        &self,
2172        vector_store_id: impl Into<String>,
2173        builder: crate::builders::vector_stores::VectorStoreBuilder,
2174    ) -> Result<VectorStoreObject> {
2175        use openai_client_base::models::{UpdateVectorStoreRequest, VectorStoreExpirationAfter};
2176
2177        let id = vector_store_id.into();
2178        let mut request = UpdateVectorStoreRequest::new();
2179        request.name = builder.name_ref().map(String::from);
2180
2181        if let Some(expires_after) = builder.expires_after_ref() {
2182            use openai_client_base::models::vector_store_expiration_after::Anchor;
2183            request.expires_after = Some(Box::new(VectorStoreExpirationAfter::new(
2184                Anchor::LastActiveAt,
2185                expires_after.days,
2186            )));
2187        }
2188
2189        if !builder.metadata_ref().is_empty() {
2190            request.metadata = Some(Some(builder.metadata_ref().clone()));
2191        }
2192
2193        // Prepare interceptor context
2194        let mut state = T::default();
2195        let operation = operation_names::VECTOR_STORE_UPDATE;
2196        let model = "vector-store";
2197        let request_json = serde_json::to_string(&request).unwrap_or_default();
2198
2199        // Call before_request hook
2200        self.call_before_request(operation, model, &request_json, &mut state)
2201            .await?;
2202
2203        let start_time = Instant::now();
2204
2205        // Make the API call
2206        let response = match vector_stores_api::modify_vector_store()
2207            .configuration(&self.client.base_configuration)
2208            .vector_store_id(&id)
2209            .update_vector_store_request(request)
2210            .call()
2211            .await
2212        {
2213            Ok(resp) => resp,
2214            Err(e) => {
2215                let error = self
2216                    .handle_api_error(e, operation, model, &request_json, &state)
2217                    .await;
2218                return Err(error);
2219            }
2220        };
2221
2222        let duration = start_time.elapsed();
2223
2224        // Call after_response hook
2225        self.call_after_response(
2226            &response,
2227            operation,
2228            model,
2229            &request_json,
2230            &state,
2231            duration,
2232            None,
2233            None,
2234        )
2235        .await;
2236
2237        Ok(response)
2238    }
2239
2240    /// Delete a vector store.
2241    ///
2242    /// # Example
2243    ///
2244    /// ```rust,ignore
2245    /// use openai_ergonomic::Client;
2246    ///
2247    /// # async fn example() -> openai_ergonomic::Result<()> {
2248    /// let client = Client::from_env()?;
2249    /// let response = client.vector_stores().delete("vs_123").await?;
2250    /// println!("Deleted: {}", response.deleted);
2251    /// # Ok(())
2252    /// # }
2253    /// ```
2254    pub async fn delete(
2255        &self,
2256        vector_store_id: impl Into<String>,
2257    ) -> Result<DeleteVectorStoreResponse> {
2258        let id = vector_store_id.into();
2259
2260        // Prepare interceptor context
2261        let mut state = T::default();
2262        let operation = operation_names::VECTOR_STORE_DELETE;
2263        let model = "vector-store";
2264        let request_json = format!(r#"{{"vector_store_id":"{id}"}}"#);
2265
2266        // Call before_request hook
2267        self.call_before_request(operation, model, &request_json, &mut state)
2268            .await?;
2269
2270        let start_time = Instant::now();
2271
2272        // Make the API call
2273        let response = match vector_stores_api::delete_vector_store()
2274            .configuration(&self.client.base_configuration)
2275            .vector_store_id(&id)
2276            .call()
2277            .await
2278        {
2279            Ok(resp) => resp,
2280            Err(e) => {
2281                let error = self
2282                    .handle_api_error(e, operation, model, &request_json, &state)
2283                    .await;
2284                return Err(error);
2285            }
2286        };
2287
2288        let duration = start_time.elapsed();
2289
2290        // Call after_response hook
2291        self.call_after_response(
2292            &response,
2293            operation,
2294            model,
2295            &request_json,
2296            &state,
2297            duration,
2298            None,
2299            None,
2300        )
2301        .await;
2302
2303        Ok(response)
2304    }
2305
2306    /// Add a file to a vector store.
2307    ///
2308    /// # Example
2309    ///
2310    /// ```rust,ignore
2311    /// use openai_ergonomic::Client;
2312    ///
2313    /// # async fn example() -> openai_ergonomic::Result<()> {
2314    /// let client = Client::from_env()?;
2315    /// let file = client.vector_stores().add_file("vs_123", "file-456").await?;
2316    /// println!("Added file: {}", file.id);
2317    /// # Ok(())
2318    /// # }
2319    /// ```
2320    pub async fn add_file(
2321        &self,
2322        vector_store_id: impl Into<String>,
2323        file_id: impl Into<String>,
2324    ) -> Result<VectorStoreFileObject> {
2325        use openai_client_base::models::CreateVectorStoreFileRequest;
2326
2327        let vs_id = vector_store_id.into();
2328        let f_id = file_id.into();
2329        let request = CreateVectorStoreFileRequest::new(f_id.clone());
2330
2331        // Prepare interceptor context
2332        let mut state = T::default();
2333        let operation = operation_names::VECTOR_STORE_FILE_ADD;
2334        let model = "vector-store";
2335        let request_json = format!(r#"{{"vector_store_id":"{vs_id}","file_id":"{f_id}"}}"#);
2336
2337        // Call before_request hook
2338        self.call_before_request(operation, model, &request_json, &mut state)
2339            .await?;
2340
2341        let start_time = Instant::now();
2342
2343        // Make the API call
2344        let response = match vector_stores_api::create_vector_store_file()
2345            .configuration(&self.client.base_configuration)
2346            .vector_store_id(&vs_id)
2347            .create_vector_store_file_request(request)
2348            .call()
2349            .await
2350        {
2351            Ok(resp) => resp,
2352            Err(e) => {
2353                let error = self
2354                    .handle_api_error(e, operation, model, &request_json, &state)
2355                    .await;
2356                return Err(error);
2357            }
2358        };
2359
2360        let duration = start_time.elapsed();
2361
2362        // Call after_response hook
2363        self.call_after_response(
2364            &response,
2365            operation,
2366            model,
2367            &request_json,
2368            &state,
2369            duration,
2370            None,
2371            None,
2372        )
2373        .await;
2374
2375        Ok(response)
2376    }
2377
2378    /// List files in a vector store.
2379    ///
2380    /// # Example
2381    ///
2382    /// ```rust,ignore
2383    /// use openai_ergonomic::Client;
2384    ///
2385    /// # async fn example() -> openai_ergonomic::Result<()> {
2386    /// let client = Client::from_env()?;
2387    /// let response = client.vector_stores().list_files("vs_123", None, None, None, None, None).await?;
2388    /// println!("Found {} files", response.data.len());
2389    /// # Ok(())
2390    /// # }
2391    /// ```
2392    pub async fn list_files(
2393        &self,
2394        vector_store_id: impl Into<String>,
2395        limit: Option<i32>,
2396        order: Option<&str>,
2397        after: Option<&str>,
2398        before: Option<&str>,
2399        filter: Option<&str>,
2400    ) -> Result<ListVectorStoreFilesResponse> {
2401        let id = vector_store_id.into();
2402
2403        // Prepare interceptor context
2404        let mut state = T::default();
2405        let operation = operation_names::VECTOR_STORE_FILE_LIST;
2406        let model = "vector-store";
2407        let request_json = format!(r#"{{"vector_store_id":"{id}"}}"#);
2408
2409        // Call before_request hook
2410        self.call_before_request(operation, model, &request_json, &mut state)
2411            .await?;
2412
2413        let start_time = Instant::now();
2414
2415        // Make the API call
2416        let response = match vector_stores_api::list_vector_store_files()
2417            .configuration(&self.client.base_configuration)
2418            .vector_store_id(&id)
2419            .maybe_limit(limit)
2420            .maybe_order(order)
2421            .maybe_after(after)
2422            .maybe_before(before)
2423            .maybe_filter(filter)
2424            .call()
2425            .await
2426        {
2427            Ok(resp) => resp,
2428            Err(e) => {
2429                let error = self
2430                    .handle_api_error(e, operation, model, &request_json, &state)
2431                    .await;
2432                return Err(error);
2433            }
2434        };
2435
2436        let duration = start_time.elapsed();
2437
2438        // Call after_response hook
2439        self.call_after_response(
2440            &response,
2441            operation,
2442            model,
2443            &request_json,
2444            &state,
2445            duration,
2446            None,
2447            None,
2448        )
2449        .await;
2450
2451        Ok(response)
2452    }
2453
2454    /// Get a file from a vector store.
2455    ///
2456    /// # Example
2457    ///
2458    /// ```rust,ignore
2459    /// use openai_ergonomic::Client;
2460    ///
2461    /// # async fn example() -> openai_ergonomic::Result<()> {
2462    /// let client = Client::from_env()?;
2463    /// let file = client.vector_stores().get_file("vs_123", "file-456").await?;
2464    /// println!("File: {}", file.id);
2465    /// # Ok(())
2466    /// # }
2467    /// ```
2468    pub async fn get_file(
2469        &self,
2470        vector_store_id: impl Into<String>,
2471        file_id: impl Into<String>,
2472    ) -> Result<VectorStoreFileObject> {
2473        let vs_id = vector_store_id.into();
2474        let f_id = file_id.into();
2475
2476        // Prepare interceptor context
2477        let mut state = T::default();
2478        let operation = operation_names::VECTOR_STORE_FILE_RETRIEVE;
2479        let model = "vector-store";
2480        let request_json = format!(r#"{{"vector_store_id":"{vs_id}","file_id":"{f_id}"}}"#);
2481
2482        // Call before_request hook
2483        self.call_before_request(operation, model, &request_json, &mut state)
2484            .await?;
2485
2486        let start_time = Instant::now();
2487
2488        // Make the API call
2489        let response = match vector_stores_api::get_vector_store_file()
2490            .configuration(&self.client.base_configuration)
2491            .vector_store_id(&vs_id)
2492            .file_id(&f_id)
2493            .call()
2494            .await
2495        {
2496            Ok(resp) => resp,
2497            Err(e) => {
2498                let error = self
2499                    .handle_api_error(e, operation, model, &request_json, &state)
2500                    .await;
2501                return Err(error);
2502            }
2503        };
2504
2505        let duration = start_time.elapsed();
2506
2507        // Call after_response hook
2508        self.call_after_response(
2509            &response,
2510            operation,
2511            model,
2512            &request_json,
2513            &state,
2514            duration,
2515            None,
2516            None,
2517        )
2518        .await;
2519
2520        Ok(response)
2521    }
2522
2523    /// Delete a file from a vector store.
2524    ///
2525    /// # Example
2526    ///
2527    /// ```rust,ignore
2528    /// use openai_ergonomic::Client;
2529    ///
2530    /// # async fn example() -> openai_ergonomic::Result<()> {
2531    /// let client = Client::from_env()?;
2532    /// let response = client.vector_stores().delete_file("vs_123", "file-456").await?;
2533    /// println!("Deleted: {}", response.deleted);
2534    /// # Ok(())
2535    /// # }
2536    /// ```
2537    pub async fn delete_file(
2538        &self,
2539        vector_store_id: impl Into<String>,
2540        file_id: impl Into<String>,
2541    ) -> Result<DeleteVectorStoreFileResponse> {
2542        let vs_id = vector_store_id.into();
2543        let f_id = file_id.into();
2544
2545        // Prepare interceptor context
2546        let mut state = T::default();
2547        let operation = operation_names::VECTOR_STORE_FILE_DELETE;
2548        let model = "vector-store";
2549        let request_json = format!(r#"{{"vector_store_id":"{vs_id}","file_id":"{f_id}"}}"#);
2550
2551        // Call before_request hook
2552        self.call_before_request(operation, model, &request_json, &mut state)
2553            .await?;
2554
2555        let start_time = Instant::now();
2556
2557        // Make the API call
2558        let response = match vector_stores_api::delete_vector_store_file()
2559            .configuration(&self.client.base_configuration)
2560            .vector_store_id(&vs_id)
2561            .file_id(&f_id)
2562            .call()
2563            .await
2564        {
2565            Ok(resp) => resp,
2566            Err(e) => {
2567                let error = self
2568                    .handle_api_error(e, operation, model, &request_json, &state)
2569                    .await;
2570                return Err(error);
2571            }
2572        };
2573
2574        let duration = start_time.elapsed();
2575
2576        // Call after_response hook
2577        self.call_after_response(
2578            &response,
2579            operation,
2580            model,
2581            &request_json,
2582            &state,
2583            duration,
2584            None,
2585            None,
2586        )
2587        .await;
2588
2589        Ok(response)
2590    }
2591
2592    /// Search a vector store.
2593    ///
2594    /// # Example
2595    ///
2596    /// ```rust,ignore
2597    /// use openai_ergonomic::Client;
2598    /// use openai_ergonomic::builders::vector_stores::VectorStoreSearchBuilder;
2599    ///
2600    /// # async fn example() -> openai_ergonomic::Result<()> {
2601    /// let client = Client::from_env()?;
2602    /// let builder = VectorStoreSearchBuilder::new("vs_123", "machine learning concepts");
2603    /// let results = client.vector_stores().search(builder).await?;
2604    /// println!("Found {} results", results.data.len());
2605    /// # Ok(())
2606    /// # }
2607    /// ```
2608    pub async fn search(
2609        &self,
2610        builder: crate::builders::vector_stores::VectorStoreSearchBuilder,
2611    ) -> Result<VectorStoreSearchResultsPage> {
2612        use openai_client_base::models::{VectorStoreSearchRequest, VectorStoreSearchRequestQuery};
2613
2614        let query = VectorStoreSearchRequestQuery::new_text(builder.query().to_string());
2615        let mut request = VectorStoreSearchRequest::new(query);
2616
2617        if let Some(limit) = builder.limit_ref() {
2618            request.max_num_results = Some(limit);
2619        }
2620
2621        let vs_id = builder.vector_store_id().to_string();
2622
2623        // Prepare interceptor context
2624        let mut state = T::default();
2625        let operation = operation_names::VECTOR_STORE_SEARCH;
2626        let model = "vector-store";
2627        let request_json = format!(
2628            r#"{{"vector_store_id":"{}","query":"{}"}}"#,
2629            vs_id,
2630            builder.query()
2631        );
2632
2633        // Call before_request hook
2634        self.call_before_request(operation, model, &request_json, &mut state)
2635            .await?;
2636
2637        let start_time = Instant::now();
2638
2639        // Make the API call
2640        let response = match vector_stores_api::search_vector_store()
2641            .configuration(&self.client.base_configuration)
2642            .vector_store_id(&vs_id)
2643            .vector_store_search_request(request)
2644            .call()
2645            .await
2646        {
2647            Ok(resp) => resp,
2648            Err(e) => {
2649                let error = self
2650                    .handle_api_error(e, operation, model, &request_json, &state)
2651                    .await;
2652                return Err(error);
2653            }
2654        };
2655
2656        let duration = start_time.elapsed();
2657
2658        // Call after_response hook
2659        self.call_after_response(
2660            &response,
2661            operation,
2662            model,
2663            &request_json,
2664            &state,
2665            duration,
2666            None,
2667            None,
2668        )
2669        .await;
2670
2671        Ok(response)
2672    }
2673}
2674
2675impl<T: Default + Send + Sync> BatchClient<'_, T> {
2676    /// Create a new batch job.
2677    ///
2678    /// # Example
2679    ///
2680    /// ```rust,ignore
2681    /// use openai_ergonomic::Client;
2682    /// use openai_ergonomic::builders::batch::{BatchJobBuilder, BatchEndpoint};
2683    ///
2684    /// # async fn example() -> openai_ergonomic::Result<()> {
2685    /// let client = Client::from_env()?;
2686    /// let builder = BatchJobBuilder::new("file-batch-input", BatchEndpoint::ChatCompletions);
2687    /// let batch = client.batch().create(builder).await?;
2688    /// println!("Created batch: {}", batch.id);
2689    /// # Ok(())
2690    /// # }
2691    /// ```
2692    pub async fn create(&self, builder: crate::builders::batch::BatchJobBuilder) -> Result<Batch> {
2693        use openai_client_base::models::create_batch_request::{CompletionWindow, Endpoint};
2694
2695        // Map our endpoint to the base client enum
2696        let endpoint = match builder.endpoint() {
2697            crate::builders::batch::BatchEndpoint::ChatCompletions => {
2698                Endpoint::SlashV1SlashChatSlashCompletions
2699            }
2700            crate::builders::batch::BatchEndpoint::Embeddings => Endpoint::SlashV1SlashEmbeddings,
2701            crate::builders::batch::BatchEndpoint::Completions => Endpoint::SlashV1SlashCompletions,
2702        };
2703
2704        let mut request = CreateBatchRequest::new(
2705            builder.input_file_id().to_string(),
2706            endpoint,
2707            CompletionWindow::Variant24h,
2708        );
2709
2710        if builder.has_metadata() {
2711            request.metadata = Some(Some(builder.metadata_ref().clone()));
2712        }
2713
2714        // Prepare interceptor context
2715        let mut state = T::default();
2716        let operation = operation_names::BATCH_CREATE;
2717        let model = "batch";
2718        let request_json = serde_json::to_string(&request).unwrap_or_default();
2719
2720        // Call before_request hook
2721        self.call_before_request(operation, model, &request_json, &mut state)
2722            .await?;
2723
2724        let start_time = Instant::now();
2725
2726        // Make the API call
2727        let response = match batch_api::create_batch()
2728            .configuration(&self.client.base_configuration)
2729            .create_batch_request(request)
2730            .call()
2731            .await
2732        {
2733            Ok(resp) => resp,
2734            Err(e) => {
2735                let error = self
2736                    .handle_api_error(e, operation, model, &request_json, &state)
2737                    .await;
2738                return Err(error);
2739            }
2740        };
2741
2742        let duration = start_time.elapsed();
2743
2744        // Call after_response hook
2745        self.call_after_response(
2746            &response,
2747            operation,
2748            model,
2749            &request_json,
2750            &state,
2751            duration,
2752            None,
2753            None,
2754        )
2755        .await;
2756
2757        Ok(response)
2758    }
2759
2760    /// List batch jobs.
2761    ///
2762    /// # Example
2763    ///
2764    /// ```rust,ignore
2765    /// use openai_ergonomic::Client;
2766    ///
2767    /// # async fn example() -> openai_ergonomic::Result<()> {
2768    /// let client = Client::from_env()?;
2769    /// let response = client.batch().list(None, Some(20)).await?;
2770    /// println!("Found {} batches", response.data.len());
2771    /// # Ok(())
2772    /// # }
2773    /// ```
2774    pub async fn list(
2775        &self,
2776        after: Option<&str>,
2777        limit: Option<i32>,
2778    ) -> Result<ListBatchesResponse> {
2779        // Prepare interceptor context
2780        let mut state = T::default();
2781        let operation = operation_names::BATCH_LIST;
2782        let model = "batch";
2783        let request_json = format!("{{\"after\":{after:?},\"limit\":{limit:?}}}");
2784
2785        // Call before_request hook
2786        self.call_before_request(operation, model, &request_json, &mut state)
2787            .await?;
2788
2789        let start_time = Instant::now();
2790
2791        // Make the API call
2792        let response = match batch_api::list_batches()
2793            .configuration(&self.client.base_configuration)
2794            .maybe_after(after)
2795            .maybe_limit(limit)
2796            .call()
2797            .await
2798        {
2799            Ok(resp) => resp,
2800            Err(e) => {
2801                let error = self
2802                    .handle_api_error(e, operation, model, &request_json, &state)
2803                    .await;
2804                return Err(error);
2805            }
2806        };
2807
2808        let duration = start_time.elapsed();
2809
2810        // Call after_response hook
2811        self.call_after_response(
2812            &response,
2813            operation,
2814            model,
2815            &request_json,
2816            &state,
2817            duration,
2818            None,
2819            None,
2820        )
2821        .await;
2822
2823        Ok(response)
2824    }
2825
2826    /// Get a specific batch job.
2827    ///
2828    /// # Example
2829    ///
2830    /// ```rust,ignore
2831    /// use openai_ergonomic::Client;
2832    ///
2833    /// # async fn example() -> openai_ergonomic::Result<()> {
2834    /// let client = Client::from_env()?;
2835    /// let batch = client.batch().get("batch_123").await?;
2836    /// println!("Batch status: {}", batch.status);
2837    /// # Ok(())
2838    /// # }
2839    /// ```
2840    pub async fn get(&self, batch_id: impl Into<String>) -> Result<Batch> {
2841        let id = batch_id.into();
2842
2843        // Prepare interceptor context
2844        let mut state = T::default();
2845        let operation = operation_names::BATCH_RETRIEVE;
2846        let model = "batch";
2847        let request_json = format!("{{\"batch_id\":\"{id}\"}}");
2848
2849        // Call before_request hook
2850        self.call_before_request(operation, model, &request_json, &mut state)
2851            .await?;
2852
2853        let start_time = Instant::now();
2854
2855        // Make the API call
2856        let response = match batch_api::retrieve_batch()
2857            .configuration(&self.client.base_configuration)
2858            .batch_id(&id)
2859            .call()
2860            .await
2861        {
2862            Ok(resp) => resp,
2863            Err(e) => {
2864                let error = self
2865                    .handle_api_error(e, operation, model, &request_json, &state)
2866                    .await;
2867                return Err(error);
2868            }
2869        };
2870
2871        let duration = start_time.elapsed();
2872
2873        // Call after_response hook
2874        self.call_after_response(
2875            &response,
2876            operation,
2877            model,
2878            &request_json,
2879            &state,
2880            duration,
2881            None,
2882            None,
2883        )
2884        .await;
2885
2886        Ok(response)
2887    }
2888
2889    /// Cancel a batch job.
2890    ///
2891    /// # Example
2892    ///
2893    /// ```rust,ignore
2894    /// use openai_ergonomic::Client;
2895    ///
2896    /// # async fn example() -> openai_ergonomic::Result<()> {
2897    /// let client = Client::from_env()?;
2898    /// let batch = client.batch().cancel("batch_123").await?;
2899    /// println!("Batch cancelled: {}", batch.status);
2900    /// # Ok(())
2901    /// # }
2902    /// ```
2903    pub async fn cancel(&self, batch_id: impl Into<String>) -> Result<Batch> {
2904        let id = batch_id.into();
2905
2906        // Prepare interceptor context
2907        let mut state = T::default();
2908        let operation = operation_names::BATCH_CANCEL;
2909        let model = "batch";
2910        let request_json = format!("{{\"batch_id\":\"{id}\"}}");
2911
2912        // Call before_request hook
2913        self.call_before_request(operation, model, &request_json, &mut state)
2914            .await?;
2915
2916        let start_time = Instant::now();
2917
2918        // Make the API call
2919        let response = match batch_api::cancel_batch()
2920            .configuration(&self.client.base_configuration)
2921            .batch_id(&id)
2922            .call()
2923            .await
2924        {
2925            Ok(resp) => resp,
2926            Err(e) => {
2927                let error = self
2928                    .handle_api_error(e, operation, model, &request_json, &state)
2929                    .await;
2930                return Err(error);
2931            }
2932        };
2933
2934        let duration = start_time.elapsed();
2935
2936        // Call after_response hook
2937        self.call_after_response(
2938            &response,
2939            operation,
2940            model,
2941            &request_json,
2942            &state,
2943            duration,
2944            None,
2945            None,
2946        )
2947        .await;
2948
2949        Ok(response)
2950    }
2951}
2952
2953impl<T: Default + Send + Sync> FineTuningClient<'_, T> {
2954    /// Create a new fine-tuning job.
2955    ///
2956    /// # Example
2957    ///
2958    /// ```rust,ignore
2959    /// use openai_ergonomic::Client;
2960    /// use openai_ergonomic::builders::fine_tuning::FineTuningJobBuilder;
2961    ///
2962    /// # async fn example() -> openai_ergonomic::Result<()> {
2963    /// let client = Client::from_env()?;
2964    /// let builder = FineTuningJobBuilder::new("gpt-3.5-turbo", "file-training-data");
2965    /// let job = client.fine_tuning().create_job(builder).await?;
2966    /// println!("Created job: {}", job.id);
2967    /// # Ok(())
2968    /// # }
2969    /// ```
2970    pub async fn create_job(
2971        &self,
2972        builder: crate::builders::fine_tuning::FineTuningJobBuilder,
2973    ) -> Result<FineTuningJob> {
2974        let mut request = CreateFineTuningJobRequest::new(
2975            builder.model().to_string(),
2976            builder.training_file().to_string(),
2977        );
2978
2979        if let Some(validation_file) = builder.validation_file_ref() {
2980            request.validation_file = Some(validation_file.to_string());
2981        }
2982
2983        if let Some(suffix) = builder.suffix_ref() {
2984            request.suffix = Some(suffix.to_string());
2985        }
2986
2987        // Note: Hyperparameters handling is limited due to base client API limitations
2988        // The generated API appears to have empty struct definitions for hyperparameters
2989        // For now, we skip hyperparameters configuration
2990        // TODO: Update when openai-client-base fixes hyperparameters types
2991
2992        // Prepare interceptor context
2993        let mut state = T::default();
2994        let operation = operation_names::FINE_TUNING_CREATE;
2995        let model = builder.model();
2996        let request_json = serde_json::to_string(&request).unwrap_or_default();
2997
2998        // Call before_request hook
2999        self.call_before_request(operation, model, &request_json, &mut state)
3000            .await?;
3001
3002        let start_time = Instant::now();
3003
3004        // Make the API call
3005        let response = match fine_tuning_api::create_fine_tuning_job()
3006            .configuration(&self.client.base_configuration)
3007            .create_fine_tuning_job_request(request)
3008            .call()
3009            .await
3010        {
3011            Ok(resp) => resp,
3012            Err(e) => {
3013                let error = self
3014                    .handle_api_error(e, operation, model, &request_json, &state)
3015                    .await;
3016                return Err(error);
3017            }
3018        };
3019
3020        let duration = start_time.elapsed();
3021
3022        // Call after_response hook
3023        self.call_after_response(
3024            &response,
3025            operation,
3026            model,
3027            &request_json,
3028            &state,
3029            duration,
3030            None,
3031            None,
3032        )
3033        .await;
3034
3035        Ok(response)
3036    }
3037
3038    /// List fine-tuning jobs.
3039    ///
3040    /// # Example
3041    ///
3042    /// ```rust,ignore
3043    /// use openai_ergonomic::Client;
3044    ///
3045    /// # async fn example() -> openai_ergonomic::Result<()> {
3046    /// let client = Client::from_env()?;
3047    /// let response = client.fine_tuning().list_jobs(None, Some(20)).await?;
3048    /// println!("Found {} jobs", response.data.len());
3049    /// # Ok(())
3050    /// # }
3051    /// ```
3052    pub async fn list_jobs(
3053        &self,
3054        after: Option<&str>,
3055        limit: Option<i32>,
3056    ) -> Result<ListPaginatedFineTuningJobsResponse> {
3057        // Prepare interceptor context
3058        let mut state = T::default();
3059        let operation = operation_names::FINE_TUNING_LIST;
3060        let model = "fine-tuning";
3061        let request_json = format!("{{\"after\":{after:?},\"limit\":{limit:?}}}");
3062
3063        // Call before_request hook
3064        self.call_before_request(operation, model, &request_json, &mut state)
3065            .await?;
3066
3067        let start_time = Instant::now();
3068
3069        // Make the API call
3070        let response = match fine_tuning_api::list_paginated_fine_tuning_jobs()
3071            .configuration(&self.client.base_configuration)
3072            .maybe_after(after)
3073            .maybe_limit(limit)
3074            .call()
3075            .await
3076        {
3077            Ok(resp) => resp,
3078            Err(e) => {
3079                let error = self
3080                    .handle_api_error(e, operation, model, &request_json, &state)
3081                    .await;
3082                return Err(error);
3083            }
3084        };
3085
3086        let duration = start_time.elapsed();
3087
3088        // Call after_response hook
3089        self.call_after_response(
3090            &response,
3091            operation,
3092            model,
3093            &request_json,
3094            &state,
3095            duration,
3096            None,
3097            None,
3098        )
3099        .await;
3100
3101        Ok(response)
3102    }
3103
3104    /// Get a specific fine-tuning job.
3105    ///
3106    /// # Example
3107    ///
3108    /// ```rust,ignore
3109    /// use openai_ergonomic::Client;
3110    ///
3111    /// # async fn example() -> openai_ergonomic::Result<()> {
3112    /// let client = Client::from_env()?;
3113    /// let job = client.fine_tuning().get_job("ftjob-123").await?;
3114    /// println!("Job status: {}", job.status);
3115    /// # Ok(())
3116    /// # }
3117    /// ```
3118    pub async fn get_job(&self, job_id: impl Into<String>) -> Result<FineTuningJob> {
3119        let id = job_id.into();
3120
3121        // Prepare interceptor context
3122        let mut state = T::default();
3123        let operation = operation_names::FINE_TUNING_RETRIEVE;
3124        let model = "fine-tuning";
3125        let request_json = format!("{{\"job_id\":\"{id}\"}}");
3126
3127        // Call before_request hook
3128        self.call_before_request(operation, model, &request_json, &mut state)
3129            .await?;
3130
3131        let start_time = Instant::now();
3132
3133        // Make the API call
3134        let response = match fine_tuning_api::retrieve_fine_tuning_job()
3135            .configuration(&self.client.base_configuration)
3136            .fine_tuning_job_id(&id)
3137            .call()
3138            .await
3139        {
3140            Ok(resp) => resp,
3141            Err(e) => {
3142                let error = self
3143                    .handle_api_error(e, operation, model, &request_json, &state)
3144                    .await;
3145                return Err(error);
3146            }
3147        };
3148
3149        let duration = start_time.elapsed();
3150
3151        // Call after_response hook
3152        self.call_after_response(
3153            &response,
3154            operation,
3155            model,
3156            &request_json,
3157            &state,
3158            duration,
3159            None,
3160            None,
3161        )
3162        .await;
3163
3164        Ok(response)
3165    }
3166
3167    /// Cancel a fine-tuning job.
3168    ///
3169    /// # Example
3170    ///
3171    /// ```rust,ignore
3172    /// use openai_ergonomic::Client;
3173    ///
3174    /// # async fn example() -> openai_ergonomic::Result<()> {
3175    /// let client = Client::from_env()?;
3176    /// let job = client.fine_tuning().cancel_job("ftjob-123").await?;
3177    /// println!("Job cancelled: {}", job.status);
3178    /// # Ok(())
3179    /// # }
3180    /// ```
3181    pub async fn cancel_job(&self, job_id: impl Into<String>) -> Result<FineTuningJob> {
3182        let id = job_id.into();
3183
3184        // Prepare interceptor context
3185        let mut state = T::default();
3186        let operation = operation_names::FINE_TUNING_CANCEL;
3187        let model = "fine-tuning";
3188        let request_json = format!("{{\"job_id\":\"{id}\"}}");
3189
3190        // Call before_request hook
3191        self.call_before_request(operation, model, &request_json, &mut state)
3192            .await?;
3193
3194        let start_time = Instant::now();
3195
3196        // Make the API call
3197        let response = match fine_tuning_api::cancel_fine_tuning_job()
3198            .configuration(&self.client.base_configuration)
3199            .fine_tuning_job_id(&id)
3200            .call()
3201            .await
3202        {
3203            Ok(resp) => resp,
3204            Err(e) => {
3205                let error = self
3206                    .handle_api_error(e, operation, model, &request_json, &state)
3207                    .await;
3208                return Err(error);
3209            }
3210        };
3211
3212        let duration = start_time.elapsed();
3213
3214        // Call after_response hook
3215        self.call_after_response(
3216            &response,
3217            operation,
3218            model,
3219            &request_json,
3220            &state,
3221            duration,
3222            None,
3223            None,
3224        )
3225        .await;
3226
3227        Ok(response)
3228    }
3229
3230    /// List events for a fine-tuning job.
3231    ///
3232    /// # Example
3233    ///
3234    /// ```rust,ignore
3235    /// use openai_ergonomic::Client;
3236    ///
3237    /// # async fn example() -> openai_ergonomic::Result<()> {
3238    /// let client = Client::from_env()?;
3239    /// let events = client.fine_tuning().list_events("ftjob-123", None, Some(20)).await?;
3240    /// println!("Found {} events", events.data.len());
3241    /// # Ok(())
3242    /// # }
3243    /// ```
3244    pub async fn list_events(
3245        &self,
3246        job_id: impl Into<String>,
3247        after: Option<&str>,
3248        limit: Option<i32>,
3249    ) -> Result<ListFineTuningJobEventsResponse> {
3250        let id = job_id.into();
3251
3252        // Prepare interceptor context
3253        let mut state = T::default();
3254        let operation = operation_names::FINE_TUNING_LIST_EVENTS;
3255        let model = "fine-tuning";
3256        let request_json =
3257            format!("{{\"job_id\":\"{id}\",\"after\":{after:?},\"limit\":{limit:?}}}");
3258
3259        // Call before_request hook
3260        self.call_before_request(operation, model, &request_json, &mut state)
3261            .await?;
3262
3263        let start_time = Instant::now();
3264
3265        // Make the API call
3266        let response = match fine_tuning_api::list_fine_tuning_events()
3267            .configuration(&self.client.base_configuration)
3268            .fine_tuning_job_id(&id)
3269            .maybe_after(after)
3270            .maybe_limit(limit)
3271            .call()
3272            .await
3273        {
3274            Ok(resp) => resp,
3275            Err(e) => {
3276                let error = self
3277                    .handle_api_error(e, operation, model, &request_json, &state)
3278                    .await;
3279                return Err(error);
3280            }
3281        };
3282
3283        let duration = start_time.elapsed();
3284
3285        // Call after_response hook
3286        self.call_after_response(
3287            &response,
3288            operation,
3289            model,
3290            &request_json,
3291            &state,
3292            duration,
3293            None,
3294            None,
3295        )
3296        .await;
3297
3298        Ok(response)
3299    }
3300
3301    /// List checkpoints for a fine-tuning job.
3302    ///
3303    /// # Example
3304    ///
3305    /// ```rust,ignore
3306    /// use openai_ergonomic::Client;
3307    ///
3308    /// # async fn example() -> openai_ergonomic::Result<()> {
3309    /// let client = Client::from_env()?;
3310    /// let checkpoints = client.fine_tuning().list_checkpoints("ftjob-123", None, Some(10)).await?;
3311    /// println!("Found {} checkpoints", checkpoints.data.len());
3312    /// # Ok(())
3313    /// # }
3314    /// ```
3315    pub async fn list_checkpoints(
3316        &self,
3317        job_id: impl Into<String>,
3318        after: Option<&str>,
3319        limit: Option<i32>,
3320    ) -> Result<ListFineTuningJobCheckpointsResponse> {
3321        let id = job_id.into();
3322
3323        // Prepare interceptor context
3324        let mut state = T::default();
3325        let operation = operation_names::FINE_TUNING_LIST_CHECKPOINTS;
3326        let model = "fine-tuning";
3327        let request_json =
3328            format!("{{\"job_id\":\"{id}\",\"after\":{after:?},\"limit\":{limit:?}}}");
3329
3330        // Call before_request hook
3331        self.call_before_request(operation, model, &request_json, &mut state)
3332            .await?;
3333
3334        let start_time = Instant::now();
3335
3336        // Make the API call
3337        let response = match fine_tuning_api::list_fine_tuning_job_checkpoints()
3338            .configuration(&self.client.base_configuration)
3339            .fine_tuning_job_id(&id)
3340            .maybe_after(after)
3341            .maybe_limit(limit)
3342            .call()
3343            .await
3344        {
3345            Ok(resp) => resp,
3346            Err(e) => {
3347                let error = self
3348                    .handle_api_error(e, operation, model, &request_json, &state)
3349                    .await;
3350                return Err(error);
3351            }
3352        };
3353
3354        let duration = start_time.elapsed();
3355
3356        // Call after_response hook
3357        self.call_after_response(
3358            &response,
3359            operation,
3360            model,
3361            &request_json,
3362            &state,
3363            duration,
3364            None,
3365            None,
3366        )
3367        .await;
3368
3369        Ok(response)
3370    }
3371}
3372
3373fn map_api_error<T>(error: ApiError<T>) -> Error {
3374    match error {
3375        ApiError::Reqwest(err) => Error::Http(err),
3376        ApiError::ReqwestMiddleware(err) => {
3377            Error::Internal(format!("reqwest middleware error: {err}"))
3378        }
3379        ApiError::Serde(err) => Error::Json(err),
3380        ApiError::Io(err) => Error::File(err),
3381        ApiError::ResponseError(response) => Error::Api {
3382            status: response.status.as_u16(),
3383            message: response.content,
3384            error_type: None,
3385            error_code: None,
3386        },
3387    }
3388}
3389
3390#[cfg(test)]
3391mod tests {
3392    use super::*;
3393    use openai_client_base::apis::{Error as BaseError, ResponseContent};
3394
3395    #[test]
3396    fn map_api_error_converts_response() {
3397        let response = ResponseContent {
3398            status: reqwest::StatusCode::BAD_REQUEST,
3399            content: "bad request".to_string(),
3400            entity: Option::<()>::None,
3401        };
3402
3403        let error = map_api_error(BaseError::ResponseError(response));
3404        match error {
3405            Error::Api {
3406                status, message, ..
3407            } => {
3408                assert_eq!(status, 400);
3409                assert!(message.contains("bad request"));
3410            }
3411            other => panic!("expected API error, got {other:?}"),
3412        }
3413    }
3414
3415    #[test]
3416    fn test_moderation_builder_creation() {
3417        use crate::builders::moderations::ModerationBuilder;
3418
3419        let builder = ModerationBuilder::new("Test content");
3420        let request = builder.build().unwrap();
3421
3422        assert_eq!(request.input, "Test content");
3423        assert!(request.model.is_none());
3424    }
3425
3426    #[test]
3427    fn test_moderation_builder_with_model() {
3428        use crate::builders::moderations::ModerationBuilder;
3429
3430        let builder = ModerationBuilder::new("Test content").model("text-moderation-stable");
3431        let request = builder.build().unwrap();
3432
3433        assert_eq!(request.input, "Test content");
3434        assert_eq!(request.model, Some("text-moderation-stable".to_string()));
3435    }
3436
3437    #[test]
3438    fn test_moderation_builder_array_input() {
3439        use crate::builders::moderations::ModerationBuilder;
3440
3441        let inputs = vec!["First text".to_string(), "Second text".to_string()];
3442        let builder = ModerationBuilder::new_array(inputs);
3443        let request = builder.build().unwrap();
3444
3445        // Array inputs are joined with newlines
3446        assert_eq!(request.input, "First text\nSecond text");
3447    }
3448
3449    #[test]
3450    fn test_file_upload_builder_creation() {
3451        use crate::builders::files::{FilePurpose, FileUploadBuilder};
3452
3453        let content = b"test content".to_vec();
3454        let builder = FileUploadBuilder::new("test.txt", FilePurpose::Assistants, content.clone());
3455
3456        assert_eq!(builder.filename(), "test.txt");
3457        assert_eq!(builder.content(), content.as_slice());
3458        assert_eq!(builder.content_size(), content.len());
3459        assert!(!builder.is_empty());
3460    }
3461
3462    #[test]
3463    fn test_file_upload_builder_from_text() {
3464        use crate::builders::files::{FilePurpose, FileUploadBuilder};
3465
3466        let builder =
3467            FileUploadBuilder::from_text("hello.txt", FilePurpose::FineTune, "Hello, world!");
3468
3469        assert_eq!(builder.filename(), "hello.txt");
3470        assert_eq!(
3471            builder.content_as_string(),
3472            Some("Hello, world!".to_string())
3473        );
3474        assert!(!builder.is_empty());
3475    }
3476
3477    #[test]
3478    fn test_file_list_builder() {
3479        use crate::builders::files::{FileListBuilder, FileOrder, FilePurpose};
3480
3481        let builder = FileListBuilder::new()
3482            .purpose(FilePurpose::Assistants)
3483            .limit(10)
3484            .order(FileOrder::Desc);
3485
3486        assert!(builder.purpose_ref().is_some());
3487        assert_eq!(builder.limit_ref(), Some(10));
3488        assert!(builder.order_ref().is_some());
3489    }
3490
3491    #[test]
3492    fn test_file_retrieval_builder() {
3493        use crate::builders::files::FileRetrievalBuilder;
3494
3495        let builder = FileRetrievalBuilder::new("file-123");
3496        assert_eq!(builder.file_id(), "file-123");
3497    }
3498
3499    #[test]
3500    fn test_file_delete_builder() {
3501        use crate::builders::files::FileDeleteBuilder;
3502
3503        let builder = FileDeleteBuilder::new("file-456");
3504        assert_eq!(builder.file_id(), "file-456");
3505    }
3506
3507    #[test]
3508    fn test_file_purpose_display() {
3509        use crate::builders::files::FilePurpose;
3510
3511        assert_eq!(FilePurpose::FineTune.to_string(), "fine-tune");
3512        assert_eq!(FilePurpose::Assistants.to_string(), "assistants");
3513        assert_eq!(FilePurpose::Vision.to_string(), "vision");
3514        assert_eq!(FilePurpose::Batch.to_string(), "batch");
3515    }
3516
3517    #[test]
3518    fn test_vector_store_builder_basic() {
3519        use crate::builders::vector_stores::VectorStoreBuilder;
3520
3521        let builder = VectorStoreBuilder::new()
3522            .name("Test Store")
3523            .add_file("file-1")
3524            .metadata("key", "value");
3525
3526        assert_eq!(builder.name_ref(), Some("Test Store"));
3527        assert_eq!(builder.file_count(), 1);
3528        assert!(builder.has_files());
3529        assert_eq!(builder.metadata_ref().len(), 1);
3530    }
3531
3532    #[test]
3533    fn test_vector_store_builder_with_expiration() {
3534        use crate::builders::vector_stores::VectorStoreBuilder;
3535
3536        let builder = VectorStoreBuilder::new()
3537            .name("Temp Store")
3538            .expires_after_days(30);
3539
3540        assert_eq!(builder.name_ref(), Some("Temp Store"));
3541        assert!(builder.expires_after_ref().is_some());
3542        assert_eq!(builder.expires_after_ref().unwrap().days, 30);
3543    }
3544
3545    #[test]
3546    fn test_vector_store_builder_multiple_files() {
3547        use crate::builders::vector_stores::VectorStoreBuilder;
3548
3549        let files = vec!["file-1".to_string(), "file-2".to_string()];
3550        let builder = VectorStoreBuilder::new()
3551            .name("Multi-File Store")
3552            .file_ids(files.clone());
3553
3554        assert_eq!(builder.file_ids_ref(), files.as_slice());
3555        assert_eq!(builder.file_count(), 2);
3556    }
3557
3558    #[test]
3559    fn test_vector_store_file_builder() {
3560        use crate::builders::vector_stores::VectorStoreFileBuilder;
3561
3562        let builder = VectorStoreFileBuilder::new("vs-123", "file-456");
3563        assert_eq!(builder.vector_store_id(), "vs-123");
3564        assert_eq!(builder.file_id(), "file-456");
3565    }
3566
3567    #[test]
3568    fn test_vector_store_search_builder() {
3569        use crate::builders::vector_stores::VectorStoreSearchBuilder;
3570
3571        let builder = VectorStoreSearchBuilder::new("vs-123", "test query")
3572            .limit(10)
3573            .filter("category", "docs");
3574
3575        assert_eq!(builder.vector_store_id(), "vs-123");
3576        assert_eq!(builder.query(), "test query");
3577        assert_eq!(builder.limit_ref(), Some(10));
3578        assert_eq!(builder.filter_ref().len(), 1);
3579    }
3580
3581    #[test]
3582    fn test_vector_store_search_builder_default() {
3583        use crate::builders::vector_stores::VectorStoreSearchBuilder;
3584
3585        let builder = VectorStoreSearchBuilder::new("vs-123", "query");
3586        assert!(builder.limit_ref().is_none());
3587        assert!(builder.filter_ref().is_empty());
3588    }
3589}
3590
3591// Placeholder client types for different API endpoints
3592// TODO: Implement these properly once the builders are ready
3593
3594/// Client for assistants API.
3595#[derive(Debug, Clone, Copy)]
3596pub struct AssistantsClient<'a, T = ()> {
3597    client: &'a Client<T>,
3598}
3599
3600impl<T: Default + Send + Sync> AssistantsClient<'_, T> {
3601    /// Create a new assistant.
3602    ///
3603    /// # Example
3604    ///
3605    /// ```rust,ignore
3606    /// use openai_ergonomic::Client;
3607    /// use openai_ergonomic::builders::assistants::AssistantBuilder;
3608    ///
3609    /// # async fn example() -> openai_ergonomic::Result<()> {
3610    /// let client = Client::from_env()?;
3611    /// let builder = AssistantBuilder::new("gpt-4")
3612    ///     .name("Math Tutor")
3613    ///     .instructions("You are a helpful math tutor.");
3614    /// let assistant = client.assistants().create(builder).await?;
3615    /// println!("Created assistant: {}", assistant.id);
3616    /// # Ok(())
3617    /// # }
3618    /// ```
3619    pub async fn create(&self, builder: AssistantBuilder) -> Result<AssistantObject> {
3620        let request = builder.build()?;
3621
3622        // Prepare interceptor context
3623        let mut state = T::default();
3624        let operation = operation_names::ASSISTANT_CREATE;
3625        let model = request.model.clone();
3626        let request_json = serde_json::to_string(&request).unwrap_or_default();
3627
3628        // Call before_request hook
3629        self.call_before_request(operation, &model, &request_json, &mut state)
3630            .await?;
3631
3632        let start_time = Instant::now();
3633
3634        // Make the API call
3635        let response = match assistants_api::create_assistant()
3636            .configuration(&self.client.base_configuration)
3637            .create_assistant_request(request)
3638            .call()
3639            .await
3640        {
3641            Ok(resp) => resp,
3642            Err(e) => {
3643                let error = self
3644                    .handle_api_error(e, operation, &model, &request_json, &state)
3645                    .await;
3646                return Err(error);
3647            }
3648        };
3649
3650        let duration = start_time.elapsed();
3651
3652        // Call after_response hook
3653        self.call_after_response(
3654            &response,
3655            operation,
3656            &model,
3657            &request_json,
3658            &state,
3659            duration,
3660            None,
3661            None,
3662        )
3663        .await;
3664
3665        Ok(response)
3666    }
3667
3668    /// List assistants with pagination.
3669    ///
3670    /// # Example
3671    ///
3672    /// ```rust,ignore
3673    /// use openai_ergonomic::Client;
3674    ///
3675    /// # async fn example() -> openai_ergonomic::Result<()> {
3676    /// let client = Client::from_env()?;
3677    /// let response = client.assistants().list(Some(20), None, None, None).await?;
3678    /// println!("Found {} assistants", response.data.len());
3679    /// # Ok(())
3680    /// # }
3681    /// ```
3682    pub async fn list(
3683        &self,
3684        limit: Option<i32>,
3685        order: Option<&str>,
3686        after: Option<&str>,
3687        before: Option<&str>,
3688    ) -> Result<ListAssistantsResponse> {
3689        // Prepare interceptor context
3690        let mut state = T::default();
3691        let operation = operation_names::ASSISTANT_LIST;
3692        let model = "assistants";
3693        let request_json = format!(
3694            "{{\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?}}}"
3695        );
3696
3697        // Call before_request hook
3698        self.call_before_request(operation, model, &request_json, &mut state)
3699            .await?;
3700
3701        let start_time = Instant::now();
3702
3703        // Make the API call
3704        let response = match assistants_api::list_assistants()
3705            .configuration(&self.client.base_configuration)
3706            .maybe_limit(limit)
3707            .maybe_order(order)
3708            .maybe_after(after)
3709            .maybe_before(before)
3710            .call()
3711            .await
3712        {
3713            Ok(resp) => resp,
3714            Err(e) => {
3715                let error = self
3716                    .handle_api_error(e, operation, model, &request_json, &state)
3717                    .await;
3718                return Err(error);
3719            }
3720        };
3721
3722        let duration = start_time.elapsed();
3723
3724        // Call after_response hook
3725        self.call_after_response(
3726            &response,
3727            operation,
3728            model,
3729            &request_json,
3730            &state,
3731            duration,
3732            None,
3733            None,
3734        )
3735        .await;
3736
3737        Ok(response)
3738    }
3739
3740    /// Get an assistant by ID.
3741    ///
3742    /// # Example
3743    ///
3744    /// ```rust,ignore
3745    /// use openai_ergonomic::Client;
3746    ///
3747    /// # async fn example() -> openai_ergonomic::Result<()> {
3748    /// let client = Client::from_env()?;
3749    /// let assistant = client.assistants().get("asst_123").await?;
3750    /// println!("Assistant: {}", assistant.name.unwrap_or_default());
3751    /// # Ok(())
3752    /// # }
3753    /// ```
3754    pub async fn get(&self, assistant_id: impl Into<String>) -> Result<AssistantObject> {
3755        let id = assistant_id.into();
3756
3757        // Prepare interceptor context
3758        let mut state = T::default();
3759        let operation = operation_names::ASSISTANT_RETRIEVE;
3760        let model = "assistants";
3761        let request_json = format!("{{\"assistant_id\":\"{id}\"}}");
3762
3763        // Call before_request hook
3764        self.call_before_request(operation, model, &request_json, &mut state)
3765            .await?;
3766
3767        let start_time = Instant::now();
3768
3769        // Make the API call
3770        let response = match assistants_api::get_assistant()
3771            .configuration(&self.client.base_configuration)
3772            .assistant_id(&id)
3773            .call()
3774            .await
3775        {
3776            Ok(resp) => resp,
3777            Err(e) => {
3778                let error = self
3779                    .handle_api_error(e, operation, model, &request_json, &state)
3780                    .await;
3781                return Err(error);
3782            }
3783        };
3784
3785        let duration = start_time.elapsed();
3786
3787        // Call after_response hook
3788        self.call_after_response(
3789            &response,
3790            operation,
3791            model,
3792            &request_json,
3793            &state,
3794            duration,
3795            None,
3796            None,
3797        )
3798        .await;
3799
3800        Ok(response)
3801    }
3802
3803    /// Update an assistant.
3804    ///
3805    /// # Example
3806    ///
3807    /// ```rust,ignore
3808    /// use openai_ergonomic::Client;
3809    /// use openai_ergonomic::builders::assistants::AssistantBuilder;
3810    ///
3811    /// # async fn example() -> openai_ergonomic::Result<()> {
3812    /// let client = Client::from_env()?;
3813    /// let builder = AssistantBuilder::new("gpt-4")
3814    ///     .name("Updated Name")
3815    ///     .instructions("Updated instructions");
3816    /// let assistant = client.assistants().update("asst_123", builder).await?;
3817    /// println!("Updated: {}", assistant.id);
3818    /// # Ok(())
3819    /// # }
3820    /// ```
3821    pub async fn update(
3822        &self,
3823        assistant_id: impl Into<String>,
3824        builder: AssistantBuilder,
3825    ) -> Result<AssistantObject> {
3826        use openai_client_base::models::ModifyAssistantRequest;
3827
3828        let id = assistant_id.into();
3829        let request_data = builder.build()?;
3830
3831        // Convert CreateAssistantRequest to ModifyAssistantRequest
3832        let mut request = ModifyAssistantRequest::new();
3833        request.model = Some(request_data.model);
3834        // Convert Box<CreateAssistantRequestName> to Option<String> by extracting text
3835        request.name = request_data.name.and_then(|n| match *n {
3836            openai_client_base::models::CreateAssistantRequestName::Text(text) => Some(Some(text)),
3837            openai_client_base::models::CreateAssistantRequestName::Null => None,
3838        });
3839        request.description = request_data.description.and_then(|d| match *d {
3840            openai_client_base::models::CreateAssistantRequestDescription::Text(text) => {
3841                Some(Some(text))
3842            }
3843            openai_client_base::models::CreateAssistantRequestDescription::Null => None,
3844        });
3845        request.instructions = request_data.instructions.and_then(|i| match *i {
3846            openai_client_base::models::CreateAssistantRequestInstructions::Text(text) => {
3847                Some(Some(text))
3848            }
3849            openai_client_base::models::CreateAssistantRequestInstructions::Null => None,
3850        });
3851        request.tools = request_data.tools;
3852        request.metadata = request_data.metadata;
3853
3854        // Prepare interceptor context
3855        let mut state = T::default();
3856        let operation = operation_names::ASSISTANT_UPDATE;
3857        let model = request
3858            .model
3859            .as_ref()
3860            .map_or_else(|| "assistants".to_string(), Clone::clone);
3861        let request_json = serde_json::to_string(&request).unwrap_or_default();
3862
3863        // Call before_request hook
3864        self.call_before_request(operation, &model, &request_json, &mut state)
3865            .await?;
3866
3867        let start_time = Instant::now();
3868
3869        // Make the API call
3870        let response = match assistants_api::modify_assistant()
3871            .configuration(&self.client.base_configuration)
3872            .assistant_id(&id)
3873            .modify_assistant_request(request)
3874            .call()
3875            .await
3876        {
3877            Ok(resp) => resp,
3878            Err(e) => {
3879                let error = self
3880                    .handle_api_error(e, operation, &model, &request_json, &state)
3881                    .await;
3882                return Err(error);
3883            }
3884        };
3885
3886        let duration = start_time.elapsed();
3887
3888        // Call after_response hook
3889        self.call_after_response(
3890            &response,
3891            operation,
3892            &model,
3893            &request_json,
3894            &state,
3895            duration,
3896            None,
3897            None,
3898        )
3899        .await;
3900
3901        Ok(response)
3902    }
3903
3904    /// Delete an assistant.
3905    ///
3906    /// # Example
3907    ///
3908    /// ```rust,ignore
3909    /// use openai_ergonomic::Client;
3910    ///
3911    /// # async fn example() -> openai_ergonomic::Result<()> {
3912    /// let client = Client::from_env()?;
3913    /// let response = client.assistants().delete("asst_123").await?;
3914    /// println!("Deleted: {}", response.deleted);
3915    /// # Ok(())
3916    /// # }
3917    /// ```
3918    pub async fn delete(&self, assistant_id: impl Into<String>) -> Result<DeleteAssistantResponse> {
3919        let id = assistant_id.into();
3920
3921        // Prepare interceptor context
3922        let mut state = T::default();
3923        let operation = operation_names::ASSISTANT_DELETE;
3924        let model = "assistants";
3925        let request_json = format!("{{\"assistant_id\":\"{id}\"}}");
3926
3927        // Call before_request hook
3928        self.call_before_request(operation, model, &request_json, &mut state)
3929            .await?;
3930
3931        let start_time = Instant::now();
3932
3933        // Make the API call
3934        let response = match assistants_api::delete_assistant()
3935            .configuration(&self.client.base_configuration)
3936            .assistant_id(&id)
3937            .call()
3938            .await
3939        {
3940            Ok(resp) => resp,
3941            Err(e) => {
3942                let error = self
3943                    .handle_api_error(e, operation, model, &request_json, &state)
3944                    .await;
3945                return Err(error);
3946            }
3947        };
3948
3949        let duration = start_time.elapsed();
3950
3951        // Call after_response hook
3952        self.call_after_response(
3953            &response,
3954            operation,
3955            model,
3956            &request_json,
3957            &state,
3958            duration,
3959            None,
3960            None,
3961        )
3962        .await;
3963
3964        Ok(response)
3965    }
3966
3967    /// Create a run on a thread.
3968    ///
3969    /// # Example
3970    ///
3971    /// ```rust,ignore
3972    /// use openai_ergonomic::Client;
3973    /// use openai_ergonomic::builders::assistants::RunBuilder;
3974    ///
3975    /// # async fn example() -> openai_ergonomic::Result<()> {
3976    /// let client = Client::from_env()?;
3977    /// let builder = RunBuilder::new("asst_123");
3978    /// let run = client.assistants().create_run("thread_123", builder).await?;
3979    /// println!("Run created: {}", run.id);
3980    /// # Ok(())
3981    /// # }
3982    /// ```
3983    pub async fn create_run(
3984        &self,
3985        thread_id: impl Into<String>,
3986        builder: RunBuilder,
3987    ) -> Result<RunObject> {
3988        let thread_id = thread_id.into();
3989        let request = builder.build()?;
3990
3991        // Prepare interceptor context
3992        let mut state = T::default();
3993        let operation = operation_names::RUN_CREATE;
3994        let model = request
3995            .model
3996            .as_ref()
3997            .map_or_else(|| "runs".to_string(), Clone::clone);
3998        let request_json = serde_json::to_string(&request).unwrap_or_default();
3999
4000        // Call before_request hook
4001        self.call_before_request(operation, &model, &request_json, &mut state)
4002            .await?;
4003
4004        let start_time = Instant::now();
4005
4006        // Make the API call
4007        let response = match assistants_api::create_run()
4008            .configuration(&self.client.base_configuration)
4009            .thread_id(&thread_id)
4010            .create_run_request(request)
4011            .call()
4012            .await
4013        {
4014            Ok(resp) => resp,
4015            Err(e) => {
4016                let error = self
4017                    .handle_api_error(e, operation, &model, &request_json, &state)
4018                    .await;
4019                return Err(error);
4020            }
4021        };
4022
4023        let duration = start_time.elapsed();
4024
4025        // Call after_response hook
4026        self.call_after_response(
4027            &response,
4028            operation,
4029            &model,
4030            &request_json,
4031            &state,
4032            duration,
4033            None,
4034            None,
4035        )
4036        .await;
4037
4038        Ok(response)
4039    }
4040
4041    /// List runs on a thread.
4042    ///
4043    /// # Example
4044    ///
4045    /// ```rust,ignore
4046    /// use openai_ergonomic::Client;
4047    ///
4048    /// # async fn example() -> openai_ergonomic::Result<()> {
4049    /// let client = Client::from_env()?;
4050    /// let response = client.assistants().list_runs("thread_123", None, None, None, None).await?;
4051    /// println!("Found {} runs", response.data.len());
4052    /// # Ok(())
4053    /// # }
4054    /// ```
4055    pub async fn list_runs(
4056        &self,
4057        thread_id: impl Into<String>,
4058        limit: Option<i32>,
4059        order: Option<&str>,
4060        after: Option<&str>,
4061        before: Option<&str>,
4062    ) -> Result<ListRunsResponse> {
4063        let thread_id = thread_id.into();
4064
4065        // Prepare interceptor context
4066        let mut state = T::default();
4067        let operation = operation_names::RUN_LIST;
4068        let model = "runs";
4069        let request_json = format!(
4070            "{{\"thread_id\":\"{thread_id}\",\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?}}}"
4071        );
4072
4073        // Call before_request hook
4074        self.call_before_request(operation, model, &request_json, &mut state)
4075            .await?;
4076
4077        let start_time = Instant::now();
4078
4079        // Make the API call
4080        let response = match assistants_api::list_runs()
4081            .configuration(&self.client.base_configuration)
4082            .thread_id(&thread_id)
4083            .maybe_limit(limit)
4084            .maybe_order(order)
4085            .maybe_after(after)
4086            .maybe_before(before)
4087            .call()
4088            .await
4089        {
4090            Ok(resp) => resp,
4091            Err(e) => {
4092                let error = self
4093                    .handle_api_error(e, operation, model, &request_json, &state)
4094                    .await;
4095                return Err(error);
4096            }
4097        };
4098
4099        let duration = start_time.elapsed();
4100
4101        // Call after_response hook
4102        self.call_after_response(
4103            &response,
4104            operation,
4105            model,
4106            &request_json,
4107            &state,
4108            duration,
4109            None,
4110            None,
4111        )
4112        .await;
4113
4114        Ok(response)
4115    }
4116
4117    /// Get a run.
4118    ///
4119    /// # Example
4120    ///
4121    /// ```rust,ignore
4122    /// use openai_ergonomic::Client;
4123    ///
4124    /// # async fn example() -> openai_ergonomic::Result<()> {
4125    /// let client = Client::from_env()?;
4126    /// let run = client.assistants().get_run("thread_123", "run_123").await?;
4127    /// println!("Run status: {}", run.status);
4128    /// # Ok(())
4129    /// # }
4130    /// ```
4131    pub async fn get_run(
4132        &self,
4133        thread_id: impl Into<String>,
4134        run_id: impl Into<String>,
4135    ) -> Result<RunObject> {
4136        let thread_id = thread_id.into();
4137        let run_id = run_id.into();
4138
4139        // Prepare interceptor context
4140        let mut state = T::default();
4141        let operation = operation_names::RUN_RETRIEVE;
4142        let model = "runs";
4143        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\"}}");
4144
4145        // Call before_request hook
4146        self.call_before_request(operation, model, &request_json, &mut state)
4147            .await?;
4148
4149        let start_time = Instant::now();
4150
4151        // Make the API call
4152        let response = match assistants_api::get_run()
4153            .configuration(&self.client.base_configuration)
4154            .thread_id(&thread_id)
4155            .run_id(&run_id)
4156            .call()
4157            .await
4158        {
4159            Ok(resp) => resp,
4160            Err(e) => {
4161                let error = self
4162                    .handle_api_error(e, operation, model, &request_json, &state)
4163                    .await;
4164                return Err(error);
4165            }
4166        };
4167
4168        let duration = start_time.elapsed();
4169
4170        // Call after_response hook
4171        self.call_after_response(
4172            &response,
4173            operation,
4174            model,
4175            &request_json,
4176            &state,
4177            duration,
4178            None,
4179            None,
4180        )
4181        .await;
4182
4183        Ok(response)
4184    }
4185
4186    /// Cancel a run.
4187    ///
4188    /// # Example
4189    ///
4190    /// ```rust,ignore
4191    /// use openai_ergonomic::Client;
4192    ///
4193    /// # async fn example() -> openai_ergonomic::Result<()> {
4194    /// let client = Client::from_env()?;
4195    /// let run = client.assistants().cancel_run("thread_123", "run_123").await?;
4196    /// println!("Run cancelled: {}", run.status);
4197    /// # Ok(())
4198    /// # }
4199    /// ```
4200    pub async fn cancel_run(
4201        &self,
4202        thread_id: impl Into<String>,
4203        run_id: impl Into<String>,
4204    ) -> Result<RunObject> {
4205        let thread_id = thread_id.into();
4206        let run_id = run_id.into();
4207
4208        // Prepare interceptor context
4209        let mut state = T::default();
4210        let operation = operation_names::RUN_CANCEL;
4211        let model = "runs";
4212        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\"}}");
4213
4214        // Call before_request hook
4215        self.call_before_request(operation, model, &request_json, &mut state)
4216            .await?;
4217
4218        let start_time = Instant::now();
4219
4220        // Make the API call
4221        let response = match assistants_api::cancel_run()
4222            .configuration(&self.client.base_configuration)
4223            .thread_id(&thread_id)
4224            .run_id(&run_id)
4225            .call()
4226            .await
4227        {
4228            Ok(resp) => resp,
4229            Err(e) => {
4230                let error = self
4231                    .handle_api_error(e, operation, model, &request_json, &state)
4232                    .await;
4233                return Err(error);
4234            }
4235        };
4236
4237        let duration = start_time.elapsed();
4238
4239        // Call after_response hook
4240        self.call_after_response(
4241            &response,
4242            operation,
4243            model,
4244            &request_json,
4245            &state,
4246            duration,
4247            None,
4248            None,
4249        )
4250        .await;
4251
4252        Ok(response)
4253    }
4254
4255    /// Submit tool outputs to a run.
4256    ///
4257    /// # Example
4258    ///
4259    /// ```rust,ignore
4260    /// use openai_ergonomic::Client;
4261    ///
4262    /// # async fn example() -> openai_ergonomic::Result<()> {
4263    /// let client = Client::from_env()?;
4264    /// let outputs = vec![
4265    ///     SubmitToolOutputsRunRequestToolOutputsInner::new("call_123", "output data")
4266    /// ];
4267    /// let run = client.assistants().submit_tool_outputs("thread_123", "run_123", outputs).await?;
4268    /// println!("Tool outputs submitted: {}", run.id);
4269    /// # Ok(())
4270    /// # }
4271    /// ```
4272    pub async fn submit_tool_outputs(
4273        &self,
4274        thread_id: impl Into<String>,
4275        run_id: impl Into<String>,
4276        tool_outputs: Vec<SubmitToolOutputsRunRequestToolOutputsInner>,
4277    ) -> Result<RunObject> {
4278        use openai_client_base::models::SubmitToolOutputsRunRequest;
4279
4280        let thread_id = thread_id.into();
4281        let run_id = run_id.into();
4282        let request = SubmitToolOutputsRunRequest::new(tool_outputs);
4283
4284        // Prepare interceptor context
4285        let mut state = T::default();
4286        let operation = operation_names::RUN_SUBMIT_TOOL_OUTPUTS;
4287        let model = "runs";
4288        let request_json = serde_json::to_string(&request).unwrap_or_default();
4289
4290        // Call before_request hook
4291        self.call_before_request(operation, model, &request_json, &mut state)
4292            .await?;
4293
4294        let start_time = Instant::now();
4295
4296        // Make the API call
4297        let response = match assistants_api::submit_tool_ouputs_to_run()
4298            .configuration(&self.client.base_configuration)
4299            .thread_id(&thread_id)
4300            .run_id(&run_id)
4301            .submit_tool_outputs_run_request(request)
4302            .call()
4303            .await
4304        {
4305            Ok(resp) => resp,
4306            Err(e) => {
4307                let error = self
4308                    .handle_api_error(e, operation, model, &request_json, &state)
4309                    .await;
4310                return Err(error);
4311            }
4312        };
4313
4314        let duration = start_time.elapsed();
4315
4316        // Call after_response hook
4317        self.call_after_response(
4318            &response,
4319            operation,
4320            model,
4321            &request_json,
4322            &state,
4323            duration,
4324            None,
4325            None,
4326        )
4327        .await;
4328
4329        Ok(response)
4330    }
4331
4332    /// Create a message on a thread.
4333    ///
4334    /// # Example
4335    ///
4336    /// ```rust,ignore
4337    /// use openai_ergonomic::Client;
4338    /// use openai_ergonomic::builders::assistants::MessageBuilder;
4339    ///
4340    /// # async fn example() -> openai_ergonomic::Result<()> {
4341    /// let client = Client::from_env()?;
4342    /// let builder = MessageBuilder::new("user", "Hello, assistant!");
4343    /// let message = client.assistants().create_message("thread_123", builder).await?;
4344    /// println!("Message created: {}", message.id);
4345    /// # Ok(())
4346    /// # }
4347    /// ```
4348    pub async fn create_message(
4349        &self,
4350        thread_id: impl Into<String>,
4351        builder: MessageBuilder,
4352    ) -> Result<MessageObject> {
4353        let thread_id = thread_id.into();
4354        let request = builder.build()?;
4355
4356        // Prepare interceptor context
4357        let mut state = T::default();
4358        let operation = operation_names::MESSAGE_CREATE;
4359        let model = "messages";
4360        let request_json = serde_json::to_string(&request).unwrap_or_default();
4361
4362        // Call before_request hook
4363        self.call_before_request(operation, model, &request_json, &mut state)
4364            .await?;
4365
4366        let start_time = Instant::now();
4367
4368        // Make the API call
4369        let response = match assistants_api::create_message()
4370            .configuration(&self.client.base_configuration)
4371            .thread_id(&thread_id)
4372            .create_message_request(request)
4373            .call()
4374            .await
4375        {
4376            Ok(resp) => resp,
4377            Err(e) => {
4378                let error = self
4379                    .handle_api_error(e, operation, model, &request_json, &state)
4380                    .await;
4381                return Err(error);
4382            }
4383        };
4384
4385        let duration = start_time.elapsed();
4386
4387        // Call after_response hook
4388        self.call_after_response(
4389            &response,
4390            operation,
4391            model,
4392            &request_json,
4393            &state,
4394            duration,
4395            None,
4396            None,
4397        )
4398        .await;
4399
4400        Ok(response)
4401    }
4402
4403    /// List messages on a thread.
4404    ///
4405    /// # Example
4406    ///
4407    /// ```rust,ignore
4408    /// use openai_ergonomic::Client;
4409    ///
4410    /// # async fn example() -> openai_ergonomic::Result<()> {
4411    /// let client = Client::from_env()?;
4412    /// let response = client.assistants().list_messages("thread_123", None, None, None, None, None).await?;
4413    /// println!("Found {} messages", response.data.len());
4414    /// # Ok(())
4415    /// # }
4416    /// ```
4417    pub async fn list_messages(
4418        &self,
4419        thread_id: impl Into<String>,
4420        limit: Option<i32>,
4421        order: Option<&str>,
4422        after: Option<&str>,
4423        before: Option<&str>,
4424        run_id: Option<&str>,
4425    ) -> Result<ListMessagesResponse> {
4426        let thread_id = thread_id.into();
4427
4428        // Prepare interceptor context
4429        let mut state = T::default();
4430        let operation = operation_names::MESSAGE_LIST;
4431        let model = "messages";
4432        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?},\"run_id\":{run_id:?}}}");
4433
4434        // Call before_request hook
4435        self.call_before_request(operation, model, &request_json, &mut state)
4436            .await?;
4437
4438        let start_time = Instant::now();
4439
4440        // Make the API call
4441        let response = match assistants_api::list_messages()
4442            .configuration(&self.client.base_configuration)
4443            .thread_id(&thread_id)
4444            .maybe_limit(limit)
4445            .maybe_order(order)
4446            .maybe_after(after)
4447            .maybe_before(before)
4448            .maybe_run_id(run_id)
4449            .call()
4450            .await
4451        {
4452            Ok(resp) => resp,
4453            Err(e) => {
4454                let error = self
4455                    .handle_api_error(e, operation, model, &request_json, &state)
4456                    .await;
4457                return Err(error);
4458            }
4459        };
4460
4461        let duration = start_time.elapsed();
4462
4463        // Call after_response hook
4464        self.call_after_response(
4465            &response,
4466            operation,
4467            model,
4468            &request_json,
4469            &state,
4470            duration,
4471            None,
4472            None,
4473        )
4474        .await;
4475
4476        Ok(response)
4477    }
4478
4479    /// Get a message.
4480    ///
4481    /// # Example
4482    ///
4483    /// ```rust,ignore
4484    /// use openai_ergonomic::Client;
4485    ///
4486    /// # async fn example() -> openai_ergonomic::Result<()> {
4487    /// let client = Client::from_env()?;
4488    /// let message = client.assistants().get_message("thread_123", "msg_123").await?;
4489    /// println!("Message role: {}", message.role);
4490    /// # Ok(())
4491    /// # }
4492    /// ```
4493    pub async fn get_message(
4494        &self,
4495        thread_id: impl Into<String>,
4496        message_id: impl Into<String>,
4497    ) -> Result<MessageObject> {
4498        let thread_id = thread_id.into();
4499        let message_id = message_id.into();
4500
4501        // Prepare interceptor context
4502        let mut state = T::default();
4503        let operation = operation_names::MESSAGE_RETRIEVE;
4504        let model = "messages";
4505        let request_json =
4506            format!("{{\"thread_id\":\"{thread_id}\",\"message_id\":\"{message_id}\"}}");
4507
4508        // Call before_request hook
4509        self.call_before_request(operation, model, &request_json, &mut state)
4510            .await?;
4511
4512        let start_time = Instant::now();
4513
4514        // Make the API call
4515        let response = match assistants_api::get_message()
4516            .configuration(&self.client.base_configuration)
4517            .thread_id(&thread_id)
4518            .message_id(&message_id)
4519            .call()
4520            .await
4521        {
4522            Ok(resp) => resp,
4523            Err(e) => {
4524                let error = self
4525                    .handle_api_error(e, operation, model, &request_json, &state)
4526                    .await;
4527                return Err(error);
4528            }
4529        };
4530
4531        let duration = start_time.elapsed();
4532
4533        // Call after_response hook
4534        self.call_after_response(
4535            &response,
4536            operation,
4537            model,
4538            &request_json,
4539            &state,
4540            duration,
4541            None,
4542            None,
4543        )
4544        .await;
4545
4546        Ok(response)
4547    }
4548
4549    /// List run steps.
4550    ///
4551    /// # Example
4552    ///
4553    /// ```rust,ignore
4554    /// use openai_ergonomic::Client;
4555    ///
4556    /// # async fn example() -> openai_ergonomic::Result<()> {
4557    /// let client = Client::from_env()?;
4558    /// let response = client.assistants().list_run_steps("thread_123", "run_123", None, None, None, None, None).await?;
4559    /// println!("Found {} run steps", response.data.len());
4560    /// # Ok(())
4561    /// # }
4562    /// ```
4563    #[allow(clippy::too_many_arguments)]
4564    pub async fn list_run_steps(
4565        &self,
4566        thread_id: impl Into<String>,
4567        run_id: impl Into<String>,
4568        limit: Option<i32>,
4569        order: Option<&str>,
4570        after: Option<&str>,
4571        before: Option<&str>,
4572        include: Option<Vec<String>>,
4573    ) -> Result<ListRunStepsResponse> {
4574        let thread_id = thread_id.into();
4575        let run_id = run_id.into();
4576
4577        // Prepare interceptor context
4578        let mut state = T::default();
4579        let operation = operation_names::RUN_STEP_LIST;
4580        let model = "run_steps";
4581        let request_json = format!("{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\",\"limit\":{limit:?},\"order\":{order:?},\"after\":{after:?},\"before\":{before:?},\"include\":{include:?}}}");
4582
4583        // Call before_request hook
4584        self.call_before_request(operation, model, &request_json, &mut state)
4585            .await?;
4586
4587        let start_time = Instant::now();
4588
4589        // Make the API call
4590        let response = match assistants_api::list_run_steps()
4591            .configuration(&self.client.base_configuration)
4592            .thread_id(&thread_id)
4593            .run_id(&run_id)
4594            .maybe_limit(limit)
4595            .maybe_order(order)
4596            .maybe_after(after)
4597            .maybe_before(before)
4598            .maybe_include_left_square_bracket_right_square_bracket(include)
4599            .call()
4600            .await
4601        {
4602            Ok(resp) => resp,
4603            Err(e) => {
4604                let error = self
4605                    .handle_api_error(e, operation, model, &request_json, &state)
4606                    .await;
4607                return Err(error);
4608            }
4609        };
4610
4611        let duration = start_time.elapsed();
4612
4613        // Call after_response hook
4614        self.call_after_response(
4615            &response,
4616            operation,
4617            model,
4618            &request_json,
4619            &state,
4620            duration,
4621            None,
4622            None,
4623        )
4624        .await;
4625
4626        Ok(response)
4627    }
4628
4629    /// Get a run step.
4630    ///
4631    /// # Example
4632    ///
4633    /// ```rust,ignore
4634    /// use openai_ergonomic::Client;
4635    ///
4636    /// # async fn example() -> openai_ergonomic::Result<()> {
4637    /// let client = Client::from_env()?;
4638    /// let step = client.assistants().get_run_step("thread_123", "run_123", "step_123", None).await?;
4639    /// println!("Step type: {}", step.type_);
4640    /// # Ok(())
4641    /// # }
4642    /// ```
4643    pub async fn get_run_step(
4644        &self,
4645        thread_id: impl Into<String>,
4646        run_id: impl Into<String>,
4647        step_id: impl Into<String>,
4648        include: Option<Vec<String>>,
4649    ) -> Result<RunStepObject> {
4650        let thread_id = thread_id.into();
4651        let run_id = run_id.into();
4652        let step_id = step_id.into();
4653
4654        // Prepare interceptor context
4655        let mut state = T::default();
4656        let operation = operation_names::RUN_STEP_RETRIEVE;
4657        let model = "run_steps";
4658        let request_json = format!(
4659            "{{\"thread_id\":\"{thread_id}\",\"run_id\":\"{run_id}\",\"step_id\":\"{step_id}\",\"include\":{include:?}}}"
4660        );
4661
4662        // Call before_request hook
4663        self.call_before_request(operation, model, &request_json, &mut state)
4664            .await?;
4665
4666        let start_time = Instant::now();
4667
4668        // Make the API call
4669        let response = match assistants_api::get_run_step()
4670            .configuration(&self.client.base_configuration)
4671            .thread_id(&thread_id)
4672            .run_id(&run_id)
4673            .step_id(&step_id)
4674            .maybe_include_left_square_bracket_right_square_bracket(include)
4675            .call()
4676            .await
4677        {
4678            Ok(resp) => resp,
4679            Err(e) => {
4680                let error = self
4681                    .handle_api_error(e, operation, model, &request_json, &state)
4682                    .await;
4683                return Err(error);
4684            }
4685        };
4686
4687        let duration = start_time.elapsed();
4688
4689        // Call after_response hook
4690        self.call_after_response(
4691            &response,
4692            operation,
4693            model,
4694            &request_json,
4695            &state,
4696            duration,
4697            None,
4698            None,
4699        )
4700        .await;
4701
4702        Ok(response)
4703    }
4704}
4705
4706/// Client for audio API.
4707#[derive(Debug, Clone, Copy)]
4708#[allow(dead_code)]
4709pub struct AudioClient<'a, T = ()> {
4710    client: &'a Client<T>,
4711}
4712
4713/// Client for embeddings API.
4714#[derive(Debug, Clone, Copy)]
4715#[allow(dead_code)]
4716pub struct EmbeddingsClient<'a, T = ()> {
4717    client: &'a Client<T>,
4718}
4719
4720/// Client for images API.
4721#[derive(Debug, Clone, Copy)]
4722#[allow(dead_code)]
4723pub struct ImagesClient<'a, T = ()> {
4724    client: &'a Client<T>,
4725}
4726
4727/// Client for files API.
4728#[derive(Debug, Clone, Copy)]
4729#[allow(dead_code)]
4730pub struct FilesClient<'a, T = ()> {
4731    client: &'a Client<T>,
4732}
4733
4734/// Client for fine-tuning API.
4735#[derive(Debug, Clone, Copy)]
4736#[allow(dead_code)]
4737pub struct FineTuningClient<'a, T = ()> {
4738    client: &'a Client<T>,
4739}
4740
4741/// Client for batch API.
4742#[derive(Debug, Clone, Copy)]
4743#[allow(dead_code)]
4744pub struct BatchClient<'a, T = ()> {
4745    client: &'a Client<T>,
4746}
4747
4748/// Client for vector stores API.
4749#[derive(Debug, Clone, Copy)]
4750#[allow(dead_code)]
4751pub struct VectorStoresClient<'a, T = ()> {
4752    client: &'a Client<T>,
4753}
4754
4755/// Client for moderations API.
4756#[derive(Debug, Clone, Copy)]
4757#[allow(dead_code)]
4758pub struct ModerationsClient<'a, T = ()> {
4759    client: &'a Client<T>,
4760}
4761
4762/// Client for threads API.
4763#[derive(Debug, Clone, Copy)]
4764#[allow(dead_code)]
4765pub struct ThreadsClient<'a, T = ()> {
4766    client: &'a Client<T>,
4767}
4768
4769/// Client for uploads API.
4770#[derive(Debug, Clone, Copy)]
4771#[allow(dead_code)]
4772pub struct UploadsClient<'a, T = ()> {
4773    client: &'a Client<T>,
4774}
4775
4776/// Client for models API.
4777#[derive(Debug, Clone, Copy)]
4778pub struct ModelsClient<'a, T = ()> {
4779    client: &'a Client<T>,
4780}
4781
4782/// Client for completions API.
4783#[derive(Debug, Clone, Copy)]
4784pub struct CompletionsClient<'a, T = ()> {
4785    client: &'a Client<T>,
4786}
4787
4788/// Client for usage API.
4789#[derive(Debug, Clone, Copy)]
4790pub struct UsageClient<'a, T = ()> {
4791    client: &'a Client<T>,
4792}
4793
4794// Apply interceptor helper methods to all sub-clients
4795impl_interceptor_helpers!(AssistantsClient<'_, T>);
4796impl_interceptor_helpers!(AudioClient<'_, T>);
4797impl_interceptor_helpers!(EmbeddingsClient<'_, T>);
4798impl_interceptor_helpers!(ImagesClient<'_, T>);
4799impl_interceptor_helpers!(FilesClient<'_, T>);
4800impl_interceptor_helpers!(FineTuningClient<'_, T>);
4801impl_interceptor_helpers!(BatchClient<'_, T>);
4802impl_interceptor_helpers!(VectorStoresClient<'_, T>);
4803impl_interceptor_helpers!(ModerationsClient<'_, T>);
4804impl_interceptor_helpers!(ThreadsClient<'_, T>);
4805impl_interceptor_helpers!(UploadsClient<'_, T>);
4806impl_interceptor_helpers!(ModelsClient<'_, T>);
4807impl_interceptor_helpers!(CompletionsClient<'_, T>);
4808impl_interceptor_helpers!(UsageClient<'_, T>);
4809
4810impl<T: Default + Send + Sync> ModelsClient<'_, T> {
4811    /// List all available models.
4812    ///
4813    /// # Example
4814    ///
4815    /// ```rust,ignore
4816    /// use openai_ergonomic::Client;
4817    ///
4818    /// # async fn example() -> openai_ergonomic::Result<()> {
4819    /// let client = Client::from_env()?;
4820    /// let models = client.models().list().await?;
4821    /// println!("Available models: {}", models.data.len());
4822    /// # Ok(())
4823    /// # }
4824    /// ```
4825    pub async fn list(&self) -> Result<ListModelsResponse> {
4826        // Prepare interceptor context
4827        let mut state = T::default();
4828        let operation = operation_names::MODEL_LIST;
4829        let model = "models";
4830        let request_json = "{}".to_string();
4831
4832        // Call before_request hook
4833        self.call_before_request(operation, model, &request_json, &mut state)
4834            .await?;
4835
4836        let start_time = Instant::now();
4837
4838        // Make the API call
4839        let response = match models_api::list_models()
4840            .configuration(&self.client.base_configuration)
4841            .call()
4842            .await
4843        {
4844            Ok(resp) => resp,
4845            Err(e) => {
4846                let error = self
4847                    .handle_api_error(e, operation, model, &request_json, &state)
4848                    .await;
4849                return Err(error);
4850            }
4851        };
4852
4853        let duration = start_time.elapsed();
4854
4855        // Call after_response hook
4856        self.call_after_response(
4857            &response,
4858            operation,
4859            model,
4860            &request_json,
4861            &state,
4862            duration,
4863            None,
4864            None,
4865        )
4866        .await;
4867
4868        Ok(response)
4869    }
4870
4871    /// Retrieve information about a specific model.
4872    ///
4873    /// # Example
4874    ///
4875    /// ```rust,ignore
4876    /// use openai_ergonomic::Client;
4877    ///
4878    /// # async fn example() -> openai_ergonomic::Result<()> {
4879    /// let client = Client::from_env()?;
4880    /// let model = client.models().get("gpt-4").await?;
4881    /// println!("Model ID: {}", model.id);
4882    /// # Ok(())
4883    /// # }
4884    /// ```
4885    pub async fn get(&self, model_id: impl Into<String>) -> Result<Model> {
4886        let id = model_id.into();
4887
4888        // Prepare interceptor context
4889        let mut state = T::default();
4890        let operation = operation_names::MODEL_RETRIEVE;
4891        let model = "models";
4892        let request_json = format!("{{\"model_id\":\"{id}\"}}");
4893
4894        // Call before_request hook
4895        self.call_before_request(operation, model, &request_json, &mut state)
4896            .await?;
4897
4898        let start_time = Instant::now();
4899
4900        // Make the API call
4901        let response = match models_api::retrieve_model()
4902            .configuration(&self.client.base_configuration)
4903            .model(&id)
4904            .call()
4905            .await
4906        {
4907            Ok(resp) => resp,
4908            Err(e) => {
4909                let error = self
4910                    .handle_api_error(e, operation, model, &request_json, &state)
4911                    .await;
4912                return Err(error);
4913            }
4914        };
4915
4916        let duration = start_time.elapsed();
4917
4918        // Call after_response hook
4919        self.call_after_response(
4920            &response,
4921            operation,
4922            model,
4923            &request_json,
4924            &state,
4925            duration,
4926            None,
4927            None,
4928        )
4929        .await;
4930
4931        Ok(response)
4932    }
4933
4934    /// Retrieve information about a model using a builder.
4935    pub async fn retrieve(&self, builder: ModelRetrievalBuilder) -> Result<Model> {
4936        self.get(builder.model_id()).await
4937    }
4938
4939    /// Delete a fine-tuned model.
4940    ///
4941    /// You must have the Owner role in your organization to delete a model.
4942    ///
4943    /// # Example
4944    ///
4945    /// ```rust,ignore
4946    /// use openai_ergonomic::Client;
4947    ///
4948    /// # async fn example() -> openai_ergonomic::Result<()> {
4949    /// let client = Client::from_env()?;
4950    /// let response = client.models().delete("ft:gpt-3.5-turbo:my-org:custom:id").await?;
4951    /// println!("Deleted: {}", response.deleted);
4952    /// # Ok(())
4953    /// # }
4954    /// ```
4955    pub async fn delete(&self, model_id: impl Into<String>) -> Result<DeleteModelResponse> {
4956        let id = model_id.into();
4957
4958        // Prepare interceptor context
4959        let mut state = T::default();
4960        let operation = operation_names::MODEL_DELETE;
4961        let model = "models";
4962        let request_json = format!("{{\"model_id\":\"{id}\"}}");
4963
4964        // Call before_request hook
4965        self.call_before_request(operation, model, &request_json, &mut state)
4966            .await?;
4967
4968        let start_time = Instant::now();
4969
4970        // Make the API call
4971        let response = match models_api::delete_model()
4972            .configuration(&self.client.base_configuration)
4973            .model(&id)
4974            .call()
4975            .await
4976        {
4977            Ok(resp) => resp,
4978            Err(e) => {
4979                let error = self
4980                    .handle_api_error(e, operation, model, &request_json, &state)
4981                    .await;
4982                return Err(error);
4983            }
4984        };
4985
4986        let duration = start_time.elapsed();
4987
4988        // Call after_response hook
4989        self.call_after_response(
4990            &response,
4991            operation,
4992            model,
4993            &request_json,
4994            &state,
4995            duration,
4996            None,
4997            None,
4998        )
4999        .await;
5000
5001        Ok(response)
5002    }
5003
5004    /// Delete a fine-tuned model using a builder.
5005    pub async fn remove(&self, builder: ModelDeleteBuilder) -> Result<DeleteModelResponse> {
5006        self.delete(builder.model_id()).await
5007    }
5008}
5009
5010impl<T: Default + Send + Sync> CompletionsClient<'_, T> {
5011    /// Create a completions builder for the specified model.
5012    ///
5013    /// # Example
5014    ///
5015    /// ```rust,ignore
5016    /// use openai_ergonomic::Client;
5017    ///
5018    /// # async fn example() -> openai_ergonomic::Result<()> {
5019    /// let client = Client::from_env()?;
5020    /// let builder = client.completions().builder("gpt-3.5-turbo-instruct");
5021    /// # Ok(())
5022    /// # }
5023    /// ```
5024    #[must_use]
5025    pub fn builder(&self, model: impl Into<String>) -> CompletionsBuilder {
5026        CompletionsBuilder::new(model)
5027    }
5028
5029    /// Execute a completion request.
5030    ///
5031    /// # Example
5032    ///
5033    /// ```rust,ignore
5034    /// use openai_ergonomic::Client;
5035    ///
5036    /// # async fn example() -> openai_ergonomic::Result<()> {
5037    /// let client = Client::from_env()?;
5038    /// let builder = client.completions()
5039    ///     .builder("gpt-3.5-turbo-instruct")
5040    ///     .prompt("Once upon a time")
5041    ///     .max_tokens(50);
5042    /// let response = client.completions().create(builder).await?;
5043    /// println!("Completion: {:?}", response.choices);
5044    /// # Ok(())
5045    /// # }
5046    /// ```
5047    pub async fn create(&self, builder: CompletionsBuilder) -> Result<CreateCompletionResponse> {
5048        let request = builder.build()?;
5049
5050        // Prepare interceptor context
5051        let mut state = T::default();
5052        let operation = operation_names::TEXT_COMPLETION;
5053        let model = request.model.clone();
5054        let request_json = serde_json::to_string(&request).unwrap_or_default();
5055
5056        // Call before_request hook
5057        self.call_before_request(operation, &model, &request_json, &mut state)
5058            .await?;
5059
5060        let start_time = Instant::now();
5061
5062        // Make the API call
5063        let response = match completions_api::create_completion()
5064            .configuration(&self.client.base_configuration)
5065            .create_completion_request(request)
5066            .call()
5067            .await
5068        {
5069            Ok(resp) => resp,
5070            Err(e) => {
5071                let error = self
5072                    .handle_api_error(e, operation, &model, &request_json, &state)
5073                    .await;
5074                return Err(error);
5075            }
5076        };
5077
5078        let duration = start_time.elapsed();
5079
5080        // Call after_response hook
5081        self.call_after_response(
5082            &response,
5083            operation,
5084            &model,
5085            &request_json,
5086            &state,
5087            duration,
5088            response.usage.as_ref().map(|u| i64::from(u.prompt_tokens)),
5089            response
5090                .usage
5091                .as_ref()
5092                .map(|u| i64::from(u.completion_tokens)),
5093        )
5094        .await;
5095
5096        Ok(response)
5097    }
5098}
5099
5100impl<T: Default + Send + Sync> UsageClient<'_, T> {
5101    /// Get usage data for audio speeches.
5102    ///
5103    /// # Example
5104    ///
5105    /// ```rust,ignore
5106    /// use openai_ergonomic::Client;
5107    /// use openai_ergonomic::builders::usage::UsageBuilder;
5108    ///
5109    /// # async fn example() -> openai_ergonomic::Result<()> {
5110    /// let client = Client::from_env()?;
5111    /// let builder = UsageBuilder::new(1704067200, None);
5112    /// let usage = client.usage().audio_speeches(builder).await?;
5113    /// println!("Usage: {:?}", usage);
5114    /// # Ok(())
5115    /// # }
5116    /// ```
5117    pub async fn audio_speeches(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5118        // Prepare interceptor context
5119        let mut state = T::default();
5120        let operation = operation_names::USAGE_AUDIO_SPEECHES;
5121        let model = "usage";
5122        let start_time = builder.start_time();
5123        let request_json = format!("{{\"start_time\":{start_time}}}");
5124
5125        // Call before_request hook
5126        self.call_before_request(operation, model, &request_json, &mut state)
5127            .await?;
5128
5129        let start_time = Instant::now();
5130
5131        // Make the API call
5132        let response = match usage_api::usage_audio_speeches()
5133            .configuration(&self.client.base_configuration)
5134            .start_time(builder.start_time())
5135            .maybe_end_time(builder.end_time())
5136            .maybe_bucket_width(builder.bucket_width_str())
5137            .maybe_project_ids(builder.project_ids_option())
5138            .maybe_user_ids(builder.user_ids_option())
5139            .maybe_api_key_ids(builder.api_key_ids_option())
5140            .maybe_models(builder.models_option())
5141            .maybe_group_by(builder.group_by_option())
5142            .maybe_limit(builder.limit_ref())
5143            .maybe_page(builder.page_ref())
5144            .call()
5145            .await
5146        {
5147            Ok(resp) => resp,
5148            Err(e) => {
5149                let error = self
5150                    .handle_api_error(e, operation, model, &request_json, &state)
5151                    .await;
5152                return Err(error);
5153            }
5154        };
5155
5156        let duration = start_time.elapsed();
5157
5158        // Call after_response hook
5159        self.call_after_response(
5160            &response,
5161            operation,
5162            model,
5163            &request_json,
5164            &state,
5165            duration,
5166            None,
5167            None,
5168        )
5169        .await;
5170
5171        Ok(response)
5172    }
5173
5174    /// Get usage data for audio transcriptions.
5175    pub async fn audio_transcriptions(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5176        // Prepare interceptor context
5177        let mut state = T::default();
5178        let operation = operation_names::USAGE_AUDIO_TRANSCRIPTIONS;
5179        let model = "usage";
5180        let start_time = builder.start_time();
5181        let request_json = format!("{{\"start_time\":{start_time}}}");
5182
5183        // Call before_request hook
5184        self.call_before_request(operation, model, &request_json, &mut state)
5185            .await?;
5186
5187        let start_time = Instant::now();
5188
5189        // Make the API call
5190        let response = match usage_api::usage_audio_transcriptions()
5191            .configuration(&self.client.base_configuration)
5192            .start_time(builder.start_time())
5193            .maybe_end_time(builder.end_time())
5194            .maybe_bucket_width(builder.bucket_width_str())
5195            .maybe_project_ids(builder.project_ids_option())
5196            .maybe_user_ids(builder.user_ids_option())
5197            .maybe_api_key_ids(builder.api_key_ids_option())
5198            .maybe_models(builder.models_option())
5199            .maybe_group_by(builder.group_by_option())
5200            .maybe_limit(builder.limit_ref())
5201            .maybe_page(builder.page_ref())
5202            .call()
5203            .await
5204        {
5205            Ok(resp) => resp,
5206            Err(e) => {
5207                let error = self
5208                    .handle_api_error(e, operation, model, &request_json, &state)
5209                    .await;
5210                return Err(error);
5211            }
5212        };
5213
5214        let duration = start_time.elapsed();
5215
5216        // Call after_response hook
5217        self.call_after_response(
5218            &response,
5219            operation,
5220            model,
5221            &request_json,
5222            &state,
5223            duration,
5224            None,
5225            None,
5226        )
5227        .await;
5228
5229        Ok(response)
5230    }
5231
5232    /// Get usage data for code interpreter sessions.
5233    pub async fn code_interpreter_sessions(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5234        // Prepare interceptor context
5235        let mut state = T::default();
5236        let operation = operation_names::USAGE_CODE_INTERPRETER;
5237        let model = "usage";
5238        let start_time = builder.start_time();
5239        let request_json = format!("{{\"start_time\":{start_time}}}");
5240
5241        // Call before_request hook
5242        self.call_before_request(operation, model, &request_json, &mut state)
5243            .await?;
5244
5245        let start_time = Instant::now();
5246
5247        // Make the API call
5248        let response = match usage_api::usage_code_interpreter_sessions()
5249            .configuration(&self.client.base_configuration)
5250            .start_time(builder.start_time())
5251            .maybe_end_time(builder.end_time())
5252            .maybe_bucket_width(builder.bucket_width_str())
5253            .maybe_project_ids(builder.project_ids_option())
5254            .maybe_group_by(builder.group_by_option())
5255            .maybe_limit(builder.limit_ref())
5256            .maybe_page(builder.page_ref())
5257            .call()
5258            .await
5259        {
5260            Ok(resp) => resp,
5261            Err(e) => {
5262                let error = self
5263                    .handle_api_error(e, operation, model, &request_json, &state)
5264                    .await;
5265                return Err(error);
5266            }
5267        };
5268
5269        let duration = start_time.elapsed();
5270
5271        // Call after_response hook
5272        self.call_after_response(
5273            &response,
5274            operation,
5275            model,
5276            &request_json,
5277            &state,
5278            duration,
5279            None,
5280            None,
5281        )
5282        .await;
5283
5284        Ok(response)
5285    }
5286
5287    /// Get usage data for completions.
5288    pub async fn completions(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5289        // Prepare interceptor context
5290        let mut state = T::default();
5291        let operation = operation_names::USAGE_COMPLETIONS;
5292        let model = "usage";
5293        let start_time = builder.start_time();
5294        let request_json = format!("{{\"start_time\":{start_time}}}");
5295
5296        // Call before_request hook
5297        self.call_before_request(operation, model, &request_json, &mut state)
5298            .await?;
5299
5300        let start_time = Instant::now();
5301
5302        // Make the API call
5303        let response = match usage_api::usage_completions()
5304            .configuration(&self.client.base_configuration)
5305            .start_time(builder.start_time())
5306            .maybe_end_time(builder.end_time())
5307            .maybe_bucket_width(builder.bucket_width_str())
5308            .maybe_project_ids(builder.project_ids_option())
5309            .maybe_user_ids(builder.user_ids_option())
5310            .maybe_api_key_ids(builder.api_key_ids_option())
5311            .maybe_models(builder.models_option())
5312            .maybe_group_by(builder.group_by_option())
5313            .maybe_limit(builder.limit_ref())
5314            .maybe_page(builder.page_ref())
5315            .call()
5316            .await
5317        {
5318            Ok(resp) => resp,
5319            Err(e) => {
5320                let error = self
5321                    .handle_api_error(e, operation, model, &request_json, &state)
5322                    .await;
5323                return Err(error);
5324            }
5325        };
5326
5327        let duration = start_time.elapsed();
5328
5329        // Call after_response hook
5330        self.call_after_response(
5331            &response,
5332            operation,
5333            model,
5334            &request_json,
5335            &state,
5336            duration,
5337            None,
5338            None,
5339        )
5340        .await;
5341
5342        Ok(response)
5343    }
5344
5345    /// Get usage data for embeddings.
5346    pub async fn embeddings(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5347        // Prepare interceptor context
5348        let mut state = T::default();
5349        let operation = operation_names::USAGE_EMBEDDINGS;
5350        let model = "usage";
5351        let start_time = builder.start_time();
5352        let request_json = format!("{{\"start_time\":{start_time}}}");
5353
5354        // Call before_request hook
5355        self.call_before_request(operation, model, &request_json, &mut state)
5356            .await?;
5357
5358        let start_time = Instant::now();
5359
5360        // Make the API call
5361        let response = match usage_api::usage_embeddings()
5362            .configuration(&self.client.base_configuration)
5363            .start_time(builder.start_time())
5364            .maybe_end_time(builder.end_time())
5365            .maybe_bucket_width(builder.bucket_width_str())
5366            .maybe_project_ids(builder.project_ids_option())
5367            .maybe_user_ids(builder.user_ids_option())
5368            .maybe_api_key_ids(builder.api_key_ids_option())
5369            .maybe_models(builder.models_option())
5370            .maybe_group_by(builder.group_by_option())
5371            .maybe_limit(builder.limit_ref())
5372            .maybe_page(builder.page_ref())
5373            .call()
5374            .await
5375        {
5376            Ok(resp) => resp,
5377            Err(e) => {
5378                let error = self
5379                    .handle_api_error(e, operation, model, &request_json, &state)
5380                    .await;
5381                return Err(error);
5382            }
5383        };
5384
5385        let duration = start_time.elapsed();
5386
5387        // Call after_response hook
5388        self.call_after_response(
5389            &response,
5390            operation,
5391            model,
5392            &request_json,
5393            &state,
5394            duration,
5395            None,
5396            None,
5397        )
5398        .await;
5399
5400        Ok(response)
5401    }
5402
5403    /// Get usage data for images.
5404    pub async fn images(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5405        // Prepare interceptor context
5406        let mut state = T::default();
5407        let operation = operation_names::USAGE_IMAGES;
5408        let model = "usage";
5409        let start_time = builder.start_time();
5410        let request_json = format!("{{\"start_time\":{start_time}}}");
5411
5412        // Call before_request hook
5413        self.call_before_request(operation, model, &request_json, &mut state)
5414            .await?;
5415
5416        let start_time = Instant::now();
5417
5418        // Make the API call
5419        let response = match usage_api::usage_images()
5420            .configuration(&self.client.base_configuration)
5421            .start_time(builder.start_time())
5422            .maybe_end_time(builder.end_time())
5423            .maybe_bucket_width(builder.bucket_width_str())
5424            .maybe_project_ids(builder.project_ids_option())
5425            .maybe_user_ids(builder.user_ids_option())
5426            .maybe_api_key_ids(builder.api_key_ids_option())
5427            .maybe_models(builder.models_option())
5428            .maybe_group_by(builder.group_by_option())
5429            .maybe_limit(builder.limit_ref())
5430            .maybe_page(builder.page_ref())
5431            .call()
5432            .await
5433        {
5434            Ok(resp) => resp,
5435            Err(e) => {
5436                let error = self
5437                    .handle_api_error(e, operation, model, &request_json, &state)
5438                    .await;
5439                return Err(error);
5440            }
5441        };
5442
5443        let duration = start_time.elapsed();
5444
5445        // Call after_response hook
5446        self.call_after_response(
5447            &response,
5448            operation,
5449            model,
5450            &request_json,
5451            &state,
5452            duration,
5453            None,
5454            None,
5455        )
5456        .await;
5457
5458        Ok(response)
5459    }
5460
5461    /// Get usage data for moderations.
5462    pub async fn moderations(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5463        // Prepare interceptor context
5464        let mut state = T::default();
5465        let operation = operation_names::USAGE_MODERATIONS;
5466        let model = "usage";
5467        let start_time = builder.start_time();
5468        let request_json = format!("{{\"start_time\":{start_time}}}");
5469
5470        // Call before_request hook
5471        self.call_before_request(operation, model, &request_json, &mut state)
5472            .await?;
5473
5474        let start_time = Instant::now();
5475
5476        // Make the API call
5477        let response = match usage_api::usage_moderations()
5478            .configuration(&self.client.base_configuration)
5479            .start_time(builder.start_time())
5480            .maybe_end_time(builder.end_time())
5481            .maybe_bucket_width(builder.bucket_width_str())
5482            .maybe_project_ids(builder.project_ids_option())
5483            .maybe_user_ids(builder.user_ids_option())
5484            .maybe_api_key_ids(builder.api_key_ids_option())
5485            .maybe_models(builder.models_option())
5486            .maybe_group_by(builder.group_by_option())
5487            .maybe_limit(builder.limit_ref())
5488            .maybe_page(builder.page_ref())
5489            .call()
5490            .await
5491        {
5492            Ok(resp) => resp,
5493            Err(e) => {
5494                let error = self
5495                    .handle_api_error(e, operation, model, &request_json, &state)
5496                    .await;
5497                return Err(error);
5498            }
5499        };
5500
5501        let duration = start_time.elapsed();
5502
5503        // Call after_response hook
5504        self.call_after_response(
5505            &response,
5506            operation,
5507            model,
5508            &request_json,
5509            &state,
5510            duration,
5511            None,
5512            None,
5513        )
5514        .await;
5515
5516        Ok(response)
5517    }
5518
5519    /// Get usage data for vector stores.
5520    pub async fn vector_stores(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5521        // Prepare interceptor context
5522        let mut state = T::default();
5523        let operation = operation_names::USAGE_VECTOR_STORES;
5524        let model = "usage";
5525        let start_time = builder.start_time();
5526        let request_json = format!("{{\"start_time\":{start_time}}}");
5527
5528        // Call before_request hook
5529        self.call_before_request(operation, model, &request_json, &mut state)
5530            .await?;
5531
5532        let start_time = Instant::now();
5533
5534        // Make the API call
5535        let response = match usage_api::usage_vector_stores()
5536            .configuration(&self.client.base_configuration)
5537            .start_time(builder.start_time())
5538            .maybe_end_time(builder.end_time())
5539            .maybe_bucket_width(builder.bucket_width_str())
5540            .maybe_project_ids(builder.project_ids_option())
5541            .maybe_group_by(builder.group_by_option())
5542            .maybe_limit(builder.limit_ref())
5543            .maybe_page(builder.page_ref())
5544            .call()
5545            .await
5546        {
5547            Ok(resp) => resp,
5548            Err(e) => {
5549                let error = self
5550                    .handle_api_error(e, operation, model, &request_json, &state)
5551                    .await;
5552                return Err(error);
5553            }
5554        };
5555
5556        let duration = start_time.elapsed();
5557
5558        // Call after_response hook
5559        self.call_after_response(
5560            &response,
5561            operation,
5562            model,
5563            &request_json,
5564            &state,
5565            duration,
5566            None,
5567            None,
5568        )
5569        .await;
5570
5571        Ok(response)
5572    }
5573
5574    /// Get cost data.
5575    pub async fn costs(&self, builder: UsageBuilder) -> Result<UsageResponse> {
5576        // Prepare interceptor context
5577        let mut state = T::default();
5578        let operation = operation_names::USAGE_COSTS;
5579        let model = "usage";
5580        let start_time = builder.start_time();
5581        let request_json = format!("{{\"start_time\":{start_time}}}");
5582
5583        // Call before_request hook
5584        self.call_before_request(operation, model, &request_json, &mut state)
5585            .await?;
5586
5587        let start_time = Instant::now();
5588
5589        // Make the API call
5590        let response = match usage_api::usage_costs()
5591            .configuration(&self.client.base_configuration)
5592            .start_time(builder.start_time())
5593            .maybe_end_time(builder.end_time())
5594            .maybe_bucket_width(builder.bucket_width_str())
5595            .maybe_project_ids(builder.project_ids_option())
5596            .maybe_group_by(builder.group_by_option())
5597            .maybe_limit(builder.limit_ref())
5598            .maybe_page(builder.page_ref())
5599            .call()
5600            .await
5601        {
5602            Ok(resp) => resp,
5603            Err(e) => {
5604                let error = self
5605                    .handle_api_error(e, operation, model, &request_json, &state)
5606                    .await;
5607                return Err(error);
5608            }
5609        };
5610
5611        let duration = start_time.elapsed();
5612
5613        // Call after_response hook
5614        self.call_after_response(
5615            &response,
5616            operation,
5617            model,
5618            &request_json,
5619            &state,
5620            duration,
5621            None,
5622            None,
5623        )
5624        .await;
5625
5626        Ok(response)
5627    }
5628}