async_openai/
completion.rs

1use crate::{
2    client::Client,
3    config::Config,
4    error::OpenAIError,
5    types::{CompletionResponseStream, CreateCompletionRequest, CreateCompletionResponse},
6    RequestOptions,
7};
8
9/// Given a prompt, the model will return one or more predicted completions,
10/// and can also return the probabilities of alternative tokens at each position.
11/// We recommend most users use our Chat completions API.
12/// [Learn more](https://platform.openai.com/docs/deprecations/2023-07-06-gpt-and-embeddings)
13///
14/// Related guide: [Legacy Completions](https://platform.openai.com/docs/guides/gpt/completions-api)
15pub struct Completions<'c, C: Config> {
16    client: &'c Client<C>,
17    pub(crate) request_options: RequestOptions,
18}
19
20impl<'c, C: Config> Completions<'c, C> {
21    pub fn new(client: &'c Client<C>) -> Self {
22        Self {
23            client,
24            request_options: RequestOptions::new(),
25        }
26    }
27
28    /// Creates a completion for the provided prompt and parameters
29    ///
30    /// You must ensure that "stream: false" in serialized `request`
31    #[crate::byot(
32        T0 = serde::Serialize,
33        R = serde::de::DeserializeOwned
34    )]
35    pub async fn create(
36        &self,
37        request: CreateCompletionRequest,
38    ) -> Result<CreateCompletionResponse, OpenAIError> {
39        #[cfg(not(feature = "byot"))]
40        {
41            if request.stream.is_some() && request.stream.unwrap() {
42                return Err(OpenAIError::InvalidArgument(
43                    "When stream is true, use Completion::create_stream".into(),
44                ));
45            }
46        }
47        self.client
48            .post("/completions", request, &self.request_options)
49            .await
50    }
51
52    /// Creates a completion request for the provided prompt and parameters
53    ///
54    /// Stream back partial progress. Tokens will be sent as data-only
55    /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format)
56    /// as they become available, with the stream terminated by a data: \[DONE\] message.
57    ///
58    /// [CompletionResponseStream] is a parsed SSE stream until a \[DONE\] is received from server.
59    ///
60    /// You must ensure that "stream: true" in serialized `request`
61    #[crate::byot(
62        T0 = serde::Serialize,
63        R = serde::de::DeserializeOwned,
64        stream = "true",
65        where_clause = "R: std::marker::Send + 'static"
66    )]
67    #[allow(unused_mut)]
68    pub async fn create_stream(
69        &self,
70        mut request: CreateCompletionRequest,
71    ) -> Result<CompletionResponseStream, OpenAIError> {
72        #[cfg(not(feature = "byot"))]
73        {
74            if request.stream.is_some() && !request.stream.unwrap() {
75                return Err(OpenAIError::InvalidArgument(
76                    "When stream is false, use Completion::create".into(),
77                ));
78            }
79
80            request.stream = Some(true);
81        }
82        Ok(self
83            .client
84            .post_stream("/completions", request, &self.request_options)
85            .await)
86    }
87}