async_openai_wasm/
completion.rs

1use crate::{
2    client::Client,
3    config::Config,
4    error::OpenAIError,
5    types::{CompletionResponseStream, CreateCompletionRequest, CreateCompletionResponse},
6};
7
8/// Given a prompt, the model will return one or more predicted completions,
9/// and can also return the probabilities of alternative tokens at each position.
10/// We recommend most users use our Chat completions API.
11/// [Learn more](https://platform.openai.com/docs/deprecations/2023-07-06-gpt-and-embeddings)
12///
13/// Related guide: [Legacy Completions](https://platform.openai.com/docs/guides/gpt/completions-api)
14pub struct Completions<'c, C: Config> {
15    client: &'c Client<C>,
16}
17
18impl<'c, C: Config> Completions<'c, C> {
19    pub fn new(client: &'c Client<C>) -> Self {
20        Self { client }
21    }
22
23    /// Creates a completion for the provided prompt and parameters
24    pub async fn create(
25        &self,
26        request: CreateCompletionRequest,
27    ) -> Result<CreateCompletionResponse, OpenAIError> {
28        if request.stream.is_some() && request.stream.unwrap() {
29            return Err(OpenAIError::InvalidArgument(
30                "When stream is true, use Completion::create_stream".into(),
31            ));
32        }
33        self.client.post("/completions", request).await
34    }
35
36    /// Creates a completion request for the provided prompt and parameters
37    ///
38    /// Stream back partial progress. Tokens will be sent as data-only
39    /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format)
40    /// as they become available, with the stream terminated by a data: \[DONE\] message.
41    ///
42    /// [CompletionResponseStream] is a parsed SSE stream until a \[DONE\] is received from server.
43    pub async fn create_stream(
44        &self,
45        mut request: CreateCompletionRequest,
46    ) -> Result<CompletionResponseStream, OpenAIError> {
47        if request.stream.is_some() && !request.stream.unwrap() {
48            return Err(OpenAIError::InvalidArgument(
49                "When stream is false, use Completion::create".into(),
50            ));
51        }
52
53        request.stream = Some(true);
54
55        Ok(self.client.post_stream("/completions", request).await)
56    }
57}