async_openai/
completion.rs

1use crate::{
2    client::Client,
3    config::Config,
4    error::OpenAIError,
5    types::{CompletionResponseStream, CreateCompletionRequest, CreateCompletionResponse},
6};
7
8/// Given a prompt, the model will return one or more predicted completions,
9/// and can also return the probabilities of alternative tokens at each position.
10/// We recommend most users use our Chat completions API.
11/// [Learn more](https://platform.openai.com/docs/deprecations/2023-07-06-gpt-and-embeddings)
12///
13/// Related guide: [Legacy Completions](https://platform.openai.com/docs/guides/gpt/completions-api)
14pub struct Completions<'c, C: Config> {
15    client: &'c Client<C>,
16}
17
18impl<'c, C: Config> Completions<'c, C> {
19    pub fn new(client: &'c Client<C>) -> Self {
20        Self { client }
21    }
22
23    /// Creates a completion for the provided prompt and parameters
24    ///
25    /// You must ensure that "stream: false" in serialized `request`
26    #[crate::byot(
27        T0 = serde::Serialize,
28        R = serde::de::DeserializeOwned
29    )]
30    pub async fn create(
31        &self,
32        request: CreateCompletionRequest,
33    ) -> Result<CreateCompletionResponse, OpenAIError> {
34        #[cfg(not(feature = "byot"))]
35        {
36            if request.stream.is_some() && request.stream.unwrap() {
37                return Err(OpenAIError::InvalidArgument(
38                    "When stream is true, use Completion::create_stream".into(),
39                ));
40            }
41        }
42        self.client.post("/completions", request).await
43    }
44
45    /// Creates a completion request for the provided prompt and parameters
46    ///
47    /// Stream back partial progress. Tokens will be sent as data-only
48    /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format)
49    /// as they become available, with the stream terminated by a data: \[DONE\] message.
50    ///
51    /// [CompletionResponseStream] is a parsed SSE stream until a \[DONE\] is received from server.
52    ///
53    /// You must ensure that "stream: true" in serialized `request`
54    #[crate::byot(
55        T0 = serde::Serialize,
56        R = serde::de::DeserializeOwned,
57        stream = "true",
58        where_clause = "R: std::marker::Send + 'static"
59    )]
60    #[allow(unused_mut)]
61    pub async fn create_stream(
62        &self,
63        mut request: CreateCompletionRequest,
64    ) -> Result<CompletionResponseStream, OpenAIError> {
65        #[cfg(not(feature = "byot"))]
66        {
67            if request.stream.is_some() && !request.stream.unwrap() {
68                return Err(OpenAIError::InvalidArgument(
69                    "When stream is false, use Completion::create".into(),
70                ));
71            }
72
73            request.stream = Some(true);
74        }
75        Ok(self.client.post_stream("/completions", request).await)
76    }
77}