objectiveai_api/chat/completions/upstream/openrouter/request/
chat_completion_create_params.rs1use indexmap::IndexMap;
4use serde::{Deserialize, Serialize};
5
6#[derive(Debug, Clone, Serialize, Deserialize)]
11pub struct ChatCompletionCreateParams {
12 pub messages: Vec<objectiveai::chat::completions::request::Message>,
14 #[serde(skip_serializing_if = "Option::is_none")]
16 pub provider: Option<super::Provider>,
17
18 pub model: String,
20 #[serde(skip_serializing_if = "Option::is_none")]
22 pub frequency_penalty: Option<f64>,
23 #[serde(skip_serializing_if = "Option::is_none")]
25 pub logit_bias: Option<IndexMap<String, i64>>,
26 #[serde(skip_serializing_if = "Option::is_none")]
28 pub max_completion_tokens: Option<u64>,
29 #[serde(skip_serializing_if = "Option::is_none")]
31 pub presence_penalty: Option<f64>,
32 #[serde(skip_serializing_if = "Option::is_none")]
34 pub stop: Option<objectiveai::ensemble_llm::Stop>,
35 #[serde(skip_serializing_if = "Option::is_none")]
37 pub temperature: Option<f64>,
38 #[serde(skip_serializing_if = "Option::is_none")]
40 pub top_p: Option<f64>,
41 #[serde(skip_serializing_if = "Option::is_none")]
43 pub max_tokens: Option<u64>,
44 #[serde(skip_serializing_if = "Option::is_none")]
46 pub min_p: Option<f64>,
47 #[serde(skip_serializing_if = "Option::is_none")]
49 pub reasoning: Option<objectiveai::ensemble_llm::Reasoning>,
50 #[serde(skip_serializing_if = "Option::is_none")]
52 pub repetition_penalty: Option<f64>,
53 #[serde(skip_serializing_if = "Option::is_none")]
55 pub top_a: Option<f64>,
56 #[serde(skip_serializing_if = "Option::is_none")]
58 pub top_k: Option<u64>,
59 #[serde(skip_serializing_if = "Option::is_none")]
61 pub verbosity: Option<objectiveai::ensemble_llm::Verbosity>,
62
63 #[serde(skip_serializing_if = "Option::is_none")]
65 pub logprobs: Option<bool>,
66 #[serde(skip_serializing_if = "Option::is_none")]
68 pub top_logprobs: Option<u64>,
69 #[serde(skip_serializing_if = "Option::is_none")]
71 pub response_format: Option<objectiveai::chat::completions::request::ResponseFormat>,
72 #[serde(skip_serializing_if = "Option::is_none")]
74 pub seed: Option<i64>,
75 #[serde(skip_serializing_if = "Option::is_none")]
77 pub tool_choice: Option<objectiveai::chat::completions::request::ToolChoice>,
78 #[serde(skip_serializing_if = "Option::is_none")]
80 pub tools: Option<Vec<objectiveai::chat::completions::request::Tool>>,
81 #[serde(skip_serializing_if = "Option::is_none")]
83 pub parallel_tool_calls: Option<bool>,
84 #[serde(skip_serializing_if = "Option::is_none")]
86 pub prediction: Option<objectiveai::chat::completions::request::Prediction>,
87
88 pub stream: bool,
90 pub stream_options: super::StreamOptions,
92 pub usage: super::Usage,
94}
95
96impl ChatCompletionCreateParams {
97 pub fn new_for_chat(
101 ensemble_llm: &objectiveai::ensemble_llm::EnsembleLlm,
102 request: &objectiveai::chat::completions::request::ChatCompletionCreateParams,
103 ) -> Self {
104 Self {
105 messages: super::prompt::new_for_chat(
106 ensemble_llm.base.prefix_messages.as_deref(),
107 &request.messages,
108 ensemble_llm.base.suffix_messages.as_deref(),
109 ),
110 provider: super::provider::Provider::new(
111 request.provider,
112 ensemble_llm.base.provider.as_ref(),
113 ),
114 model: ensemble_llm.base.model.clone(),
115 frequency_penalty: ensemble_llm.base.frequency_penalty,
116 logit_bias: ensemble_llm.base.logit_bias.clone(),
117 max_completion_tokens: ensemble_llm.base.max_completion_tokens,
118 presence_penalty: ensemble_llm.base.presence_penalty,
119 stop: ensemble_llm.base.stop.clone(),
120 temperature: ensemble_llm.base.temperature,
121 top_p: ensemble_llm.base.top_p,
122 max_tokens: ensemble_llm.base.max_tokens,
123 min_p: ensemble_llm.base.min_p,
124 reasoning: ensemble_llm.base.reasoning,
125 repetition_penalty: ensemble_llm.base.repetition_penalty,
126 top_a: ensemble_llm.base.top_a,
127 top_k: ensemble_llm.base.top_k,
128 verbosity: ensemble_llm.base.verbosity,
129 logprobs: if let Some(top_logprobs) = request.top_logprobs {
130 Some(top_logprobs > 0)
131 } else {
132 None
133 },
134 top_logprobs: request.top_logprobs,
135 response_format: request.response_format.clone(),
136 seed: request.seed,
137 tool_choice: request.tool_choice.clone(),
138 tools: request.tools.clone(),
139 parallel_tool_calls: request.parallel_tool_calls,
140 prediction: request.prediction.clone(),
141 stream: true,
142 stream_options: super::StreamOptions {
143 include_usage: Some(true),
144 },
145 usage: super::Usage { include: true },
146 }
147 }
148
149 pub fn new_for_vector(
154 vector_pfx_indices: &[(String, usize)],
155 ensemble_llm: &objectiveai::ensemble_llm::EnsembleLlm,
156 request: &objectiveai::vector::completions::request::VectorCompletionCreateParams,
157 ) -> Self {
158 Self {
159 messages: super::prompt::new_for_vector(
160 &request.responses,
161 vector_pfx_indices,
162 ensemble_llm.base.output_mode,
163 ensemble_llm.base.prefix_messages.as_deref(),
164 &request.messages,
165 ensemble_llm.base.suffix_messages.as_deref(),
166 ),
167 provider: super::provider::Provider::new(
168 request.provider,
169 ensemble_llm.base.provider.as_ref(),
170 ),
171 model: ensemble_llm.base.model.clone(),
172 frequency_penalty: ensemble_llm.base.frequency_penalty,
173 logit_bias: ensemble_llm.base.logit_bias.clone(),
174 max_completion_tokens: ensemble_llm.base.max_completion_tokens,
175 presence_penalty: ensemble_llm.base.presence_penalty,
176 stop: ensemble_llm.base.stop.clone(),
177 temperature: ensemble_llm.base.temperature,
178 top_p: ensemble_llm.base.top_p,
179 max_tokens: ensemble_llm.base.max_tokens,
180 min_p: ensemble_llm.base.min_p,
181 reasoning: ensemble_llm.base.reasoning,
182 repetition_penalty: ensemble_llm.base.repetition_penalty,
183 top_a: ensemble_llm.base.top_a,
184 top_k: ensemble_llm.base.top_k,
185 verbosity: ensemble_llm.base.verbosity,
186 logprobs: if let Some(top_logprobs) = ensemble_llm.base.top_logprobs {
187 Some(top_logprobs > 0)
188 } else {
189 None
190 },
191 top_logprobs: ensemble_llm.base.top_logprobs,
192 response_format: super::response_format::new_for_vector(
193 vector_pfx_indices,
194 ensemble_llm.base.output_mode,
195 ensemble_llm.base.synthetic_reasoning,
196 ),
197 seed: request.seed,
198 tool_choice: super::tool_choice::new_for_vector(
199 ensemble_llm.base.output_mode,
200 request.tools.as_deref(),
201 ),
202 tools: super::tools::new_for_vector(
203 vector_pfx_indices,
204 ensemble_llm.base.output_mode,
205 ensemble_llm.base.synthetic_reasoning,
206 request.tools.as_deref(),
207 ),
208 parallel_tool_calls: None,
209 prediction: None,
210 stream: true,
211 stream_options: super::StreamOptions {
212 include_usage: Some(true),
213 },
214 usage: super::Usage { include: true },
215 }
216 }
217}