1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
mod conversation_items;
mod conversations;
pub use conversation_items::*;
pub use conversations::*;
use crate::config::Config;
use crate::error::OpenAIError;
use crate::spec::responses::{
CompactResource, CompactResponseRequest, CreateResponse, DeleteResponse, Response,
ResponseItemList, ResponseStream, TokenCountsBody, TokenCountsResource,
};
use crate::{Client, RequestOptions};
/// Given text input or a list of context items, the model will generate a response.
///
/// Related guide: [Responses API](https://platform.openai.com/docs/guides/responses)
pub struct Responses<'c, C: Config> {
client: &'c Client<C>,
pub(crate) request_options: RequestOptions,
}
impl<'c, C: Config> Responses<'c, C> {
/// Constructs a new Responses client.
pub fn new(client: &'c Client<C>) -> Self {
Self {
client,
request_options: RequestOptions::new(),
}
}
/// Creates a model response. Provide [text](https://platform.openai.com/docs/guides/text) or
/// [image](https://platform.openai.com/docs/guides/images) inputs to generate
/// [text](https://platform.openai.com/docs/guides/text) or
/// [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have the model call
/// your own [custom code](https://platform.openai.com/docs/guides/function-calling) or use
/// built-in [tools](https://platform.openai.com/docs/guides/tools) like
/// [web search](https://platform.openai.com/docs/guides/tools-web-search)
/// or [file search](https://platform.openai.com/docs/guides/tools-file-search) to use your own data
/// as input for the model's response.
#[crate::byot(
T0 = serde::Serialize,
R = serde::de::DeserializeOwned
)]
pub async fn create(&self, request: CreateResponse) -> Result<Response, OpenAIError> {
self.client
.post("/responses", request, &self.request_options)
.await
}
/// Creates a model response for the given input with streaming.
///
/// Response events will be sent as server-sent events as they become available,
#[cfg(not(target_family = "wasm"))]
#[crate::byot(
T0 = serde::Serialize,
R = serde::de::DeserializeOwned,
stream = "true",
where_clause = "R: std::marker::Send + 'static"
)]
#[allow(unused_mut)]
pub async fn create_stream(
&self,
mut request: CreateResponse,
) -> Result<ResponseStream, OpenAIError> {
#[cfg(not(feature = "byot"))]
{
if matches!(request.stream, Some(false)) {
return Err(OpenAIError::InvalidArgument(
"When stream is false, use Responses::create".into(),
));
}
request.stream = Some(true);
}
Ok(self
.client
.post_stream("/responses", request, &self.request_options)
.await)
}
/// Retrieves a model response with the given ID.
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn retrieve(&self, response_id: &str) -> Result<Response, OpenAIError> {
self.client
.get(
&format!("/responses/{}", response_id),
&self.request_options,
)
.await
}
/// Retrieves a model response with the given ID with streaming.
///
/// Response events will be sent as server-sent events as they become available.
#[cfg(not(target_family = "wasm"))]
#[crate::byot(
T0 = std::fmt::Display,
R = serde::de::DeserializeOwned,
stream = "true",
where_clause = "R: std::marker::Send + 'static"
)]
pub async fn retrieve_stream(&self, response_id: &str) -> Result<ResponseStream, OpenAIError> {
let mut request_options = self.request_options.clone();
request_options.with_query(&[("stream", "true")])?;
Ok(self
.client
.get_stream(&format!("/responses/{}", response_id), &request_options)
.await)
}
/// Deletes a model response with the given ID.
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn delete(&self, response_id: &str) -> Result<DeleteResponse, OpenAIError> {
self.client
.delete(
&format!("/responses/{}", response_id),
&self.request_options,
)
.await
}
/// Cancels a model response with the given ID. Only responses created with the
/// `background` parameter set to `true` can be cancelled.
/// [Learn more](https://platform.openai.com/docs/guides/background).
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn cancel(&self, response_id: &str) -> Result<Response, OpenAIError> {
self.client
.post(
&format!("/responses/{}/cancel", response_id),
serde_json::json!({}),
&self.request_options,
)
.await
}
/// Returns a list of input items for a given response.
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn list_input_items(
&self,
response_id: &str,
) -> Result<ResponseItemList, OpenAIError> {
self.client
.get(
&format!("/responses/{}/input_items", response_id),
&self.request_options,
)
.await
}
/// Get input token counts
#[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn get_input_token_counts(
&self,
request: TokenCountsBody,
) -> Result<TokenCountsResource, OpenAIError> {
self.client
.post("/responses/input_tokens", request, &self.request_options)
.await
}
/// Compact a conversation.
///
/// Learn when and how to compact long-running conversations in the
/// [conversation state guide](https://platform.openai.com/docs/guides/conversation-state#managing-the-context-window).
#[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn compact(
&self,
request: CompactResponseRequest,
) -> Result<CompactResource, OpenAIError> {
self.client
.post("/responses/compact", request, &self.request_options)
.await
}
}