api_openai/
request_templates.rs1mod private
7{
8 use crate::components::chat_shared::{ ChatCompletionRequest, ChatCompletionRequestMessage };
9
10 #[ derive( Debug, Clone ) ]
12 pub struct RequestTemplate
13 {
14 model : String,
15 max_tokens : Option< i32 >,
16 temperature : Option< f32 >,
17 system_prompt : Option< String >,
18 }
19
20 impl RequestTemplate
21 {
22 #[ must_use ]
24 #[ inline ]
25 pub fn chat( model : impl Into< String > ) -> Self
26 {
27 Self
28 {
29 model : model.into(),
30 max_tokens : Some( 1024 ),
31 temperature : Some( 0.7 ),
32 system_prompt : Some( String::from( "You are a helpful assistant." ) ),
33 }
34 }
35
36 #[ must_use ]
38 #[ inline ]
39 pub fn code_generation( model : impl Into< String > ) -> Self
40 {
41 Self
42 {
43 model : model.into(),
44 max_tokens : Some( 2048 ),
45 temperature : Some( 0.3 ),
46 system_prompt : Some( String::from( "You are an expert programmer. Generate clean, efficient code." ) ),
47 }
48 }
49
50 #[ must_use ]
52 #[ inline ]
53 pub fn creative_writing( model : impl Into< String > ) -> Self
54 {
55 Self
56 {
57 model : model.into(),
58 max_tokens : Some( 2048 ),
59 temperature : Some( 0.9 ),
60 system_prompt : Some( String::from( "You are a creative writer with a vivid imagination." ) ),
61 }
62 }
63
64 #[ must_use ]
66 #[ inline ]
67 pub fn factual_qa( model : impl Into< String > ) -> Self
68 {
69 Self
70 {
71 model : model.into(),
72 max_tokens : Some( 512 ),
73 temperature : Some( 0.2 ),
74 system_prompt : Some( String::from( "You provide accurate, factual answers based on knowledge." ) ),
75 }
76 }
77
78 #[ must_use ]
80 #[ inline ]
81 pub fn summarization( model : impl Into< String > ) -> Self
82 {
83 Self
84 {
85 model : model.into(),
86 max_tokens : Some( 500 ),
87 temperature : Some( 0.3 ),
88 system_prompt : Some( String::from( "You summarize content concisely while preserving key information." ) ),
89 }
90 }
91
92 #[ must_use ]
94 #[ inline ]
95 pub fn with_prompt( mut self, prompt : impl Into< String > ) -> Self
96 {
97 self.system_prompt = Some( prompt.into() );
98 self
99 }
100
101 #[ must_use ]
103 #[ inline ]
104 pub fn with_temperature( mut self, temperature : f32 ) -> Self
105 {
106 self.temperature = Some( temperature );
107 self
108 }
109
110 #[ must_use ]
112 #[ inline ]
113 pub fn with_max_tokens( mut self, max_tokens : i32 ) -> Self
114 {
115 self.max_tokens = Some( max_tokens );
116 self
117 }
118
119 #[ must_use ]
121 #[ inline ]
122 pub fn build( self, user_message : impl Into< String > ) -> ChatCompletionRequest
123 {
124 use crate::components::chat_shared::ChatCompletionRequestMessageContent;
125
126 let mut messages = Vec::new();
127
128 if let Some( system_prompt ) = self.system_prompt
129 {
130 messages.push( ChatCompletionRequestMessage
131 {
132 role : String::from( "system" ),
133 content : Some( ChatCompletionRequestMessageContent::Text( system_prompt ) ),
134 name : None,
135 tool_calls : None,
136 tool_call_id : None,
137 } );
138 }
139
140 messages.push( ChatCompletionRequestMessage
141 {
142 role : String::from( "user" ),
143 content : Some( ChatCompletionRequestMessageContent::Text( user_message.into() ) ),
144 name : None,
145 tool_calls : None,
146 tool_call_id : None,
147 } );
148
149 ChatCompletionRequest
150 {
151 model : self.model,
152 messages,
153 temperature : self.temperature,
154 top_p : None,
155 max_tokens : self.max_tokens,
156 n : None,
157 stop : None,
158 stream : None,
159 system_prompt : None,
160 user : None,
161 tools : None,
162 tool_choice : None,
163 response_format : None,
164 seed : None,
165 logit_bias : None,
166 logprobs : None,
167 top_logprobs : None,
168 }
169 }
170 }
171
172 #[ cfg( test ) ]
173 mod tests
174 {
175 use super::*;
176
177 #[ test ]
178 fn test_chat_template()
179 {
180 let template = RequestTemplate::chat( "gpt-4" );
181 let request = template.build( "Hello" );
182
183 assert_eq!( request.model, "gpt-4" );
184 assert_eq!( request.messages.len(), 2 );
185 assert_eq!( request.messages[ 0 ].role, "system" );
186 assert_eq!( request.messages[ 1 ].role, "user" );
187 assert_eq!( request.temperature, Some( 0.7 ) );
188 assert_eq!( request.max_tokens, Some( 1024 ) );
189 }
190
191 #[ test ]
192 fn test_code_generation_template()
193 {
194 let template = RequestTemplate::code_generation( "gpt-4" );
195 let request = template.build( "Write a function" );
196
197 assert_eq!( request.model, "gpt-4" );
198 assert_eq!( request.temperature, Some( 0.3 ) );
199 assert_eq!( request.max_tokens, Some( 2048 ) );
200 }
201
202 #[ test ]
203 fn test_creative_writing_template()
204 {
205 let template = RequestTemplate::creative_writing( "gpt-4" );
206 let request = template.build( "Write a story" );
207
208 assert_eq!( request.model, "gpt-4" );
209 assert_eq!( request.temperature, Some( 0.9 ) );
210 assert_eq!( request.max_tokens, Some( 2048 ) );
211 }
212
213 #[ test ]
214 fn test_factual_qa_template()
215 {
216 let template = RequestTemplate::factual_qa( "gpt-4" );
217 let request = template.build( "What is 2+2?" );
218
219 assert_eq!( request.model, "gpt-4" );
220 assert_eq!( request.temperature, Some( 0.2 ) );
221 assert_eq!( request.max_tokens, Some( 512 ) );
222 }
223
224 #[ test ]
225 fn test_summarization_template()
226 {
227 let template = RequestTemplate::summarization( "gpt-4" );
228 let request = template.build( "Summarize this text" );
229
230 assert_eq!( request.model, "gpt-4" );
231 assert_eq!( request.temperature, Some( 0.3 ) );
232 assert_eq!( request.max_tokens, Some( 500 ) );
233 }
234
235 #[ test ]
236 fn test_with_prompt()
237 {
238 let template = RequestTemplate::chat( "gpt-4" )
239 .with_prompt( "Custom prompt" );
240 let request = template.build( "Hello" );
241
242 if let Some( content ) = request.messages[ 0 ].content.as_ref()
243 {
244 match content
245 {
246 crate::components::chat_shared::ChatCompletionRequestMessageContent::Text( text ) =>
247 {
248 assert!( text.contains( "Custom prompt" ) );
249 },
250 crate::components::chat_shared::ChatCompletionRequestMessageContent::Parts( _ ) => panic!( "Expected text content" ),
251 }
252 }
253 else
254 {
255 panic!( "Expected content to be present" );
256 }
257 }
258
259 #[ test ]
260 fn test_with_temperature()
261 {
262 let template = RequestTemplate::chat( "gpt-4" )
263 .with_temperature( 0.5 );
264 let request = template.build( "Hello" );
265
266 assert_eq!( request.temperature, Some( 0.5 ) );
267 }
268
269 #[ test ]
270 fn test_with_max_tokens()
271 {
272 let template = RequestTemplate::chat( "gpt-4" )
273 .with_max_tokens( 500 );
274 let request = template.build( "Hello" );
275
276 assert_eq!( request.max_tokens, Some( 500 ) );
277 }
278 }
279}
280
281crate::mod_interface!
282{
283 exposed use
284 {
285 RequestTemplate,
286 };
287}