async_openai/
completion.rs

1use crate::{
2    client::Client,
3    config::Config,
4    error::OpenAIError,
5    types::completions::{
6        CompletionResponseStream, CreateCompletionRequest, CreateCompletionResponse,
7    },
8    RequestOptions,
9};
10
11/// Given a prompt, the model will return one or more predicted completions,
12/// and can also return the probabilities of alternative tokens at each position.
13/// We recommend most users use our Chat completions API.
14/// [Learn more](https://platform.openai.com/docs/deprecations/2023-07-06-gpt-and-embeddings)
15///
16/// Related guide: [Legacy Completions](https://platform.openai.com/docs/guides/gpt/completions-api)
17pub struct Completions<'c, C: Config> {
18    client: &'c Client<C>,
19    pub(crate) request_options: RequestOptions,
20}
21
22impl<'c, C: Config> Completions<'c, C> {
23    pub fn new(client: &'c Client<C>) -> Self {
24        Self {
25            client,
26            request_options: RequestOptions::new(),
27        }
28    }
29
30    /// Creates a completion for the provided prompt and parameters
31    ///
32    /// You must ensure that "stream: false" in serialized `request`
33    #[crate::byot(
34        T0 = serde::Serialize,
35        R = serde::de::DeserializeOwned
36    )]
37    pub async fn create(
38        &self,
39        request: CreateCompletionRequest,
40    ) -> Result<CreateCompletionResponse, OpenAIError> {
41        #[cfg(not(feature = "byot"))]
42        {
43            if request.stream.is_some() && request.stream.unwrap() {
44                return Err(OpenAIError::InvalidArgument(
45                    "When stream is true, use Completion::create_stream".into(),
46                ));
47            }
48        }
49        self.client
50            .post("/completions", request, &self.request_options)
51            .await
52    }
53
54    /// Creates a completion request for the provided prompt and parameters
55    ///
56    /// Stream back partial progress. Tokens will be sent as data-only
57    /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format)
58    /// as they become available, with the stream terminated by a data: \[DONE\] message.
59    ///
60    /// [CompletionResponseStream] is a parsed SSE stream until a \[DONE\] is received from server.
61    ///
62    /// You must ensure that "stream: true" in serialized `request`
63    #[crate::byot(
64        T0 = serde::Serialize,
65        R = serde::de::DeserializeOwned,
66        stream = "true",
67        where_clause = "R: std::marker::Send + 'static"
68    )]
69    #[allow(unused_mut)]
70    pub async fn create_stream(
71        &self,
72        mut request: CreateCompletionRequest,
73    ) -> Result<CompletionResponseStream, OpenAIError> {
74        #[cfg(not(feature = "byot"))]
75        {
76            if request.stream.is_some() && !request.stream.unwrap() {
77                return Err(OpenAIError::InvalidArgument(
78                    "When stream is false, use Completion::create".into(),
79                ));
80            }
81
82            request.stream = Some(true);
83        }
84        Ok(self
85            .client
86            .post_stream("/completions", request, &self.request_options)
87            .await)
88    }
89}