pub struct ChatCompletionBuilder { /* private fields */ }Expand description
Builder for chat completion requests.
Implementations§
Source§impl ChatCompletionBuilder
impl ChatCompletionBuilder
Sourcepub fn new(model: impl Into<String>) -> Self
pub fn new(model: impl Into<String>) -> Self
Create a new chat completion builder with the specified model.
Examples found in repository?
examples/langfuse.rs (line 104)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32 // Initialize tracing for logging
33 tracing_subscriber::fmt()
34 .with_env_filter(
35 tracing_subscriber::EnvFilter::from_default_env()
36 .add_directive("openai_ergonomic=debug".parse()?),
37 )
38 .init();
39
40 // 1. Build Langfuse exporter from environment variables
41 let exporter = ExporterBuilder::from_env()?.build()?;
42
43 // 2. Create tracer provider with batch processor
44 let provider = SdkTracerProvider::builder()
45 .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46 .build();
47
48 // Set as global provider
49 global::set_tracer_provider(provider.clone());
50
51 // 3. Get tracer and create interceptor
52 let tracer = provider.tracer("openai-ergonomic");
53 let langfuse_interceptor =
54 std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56 // 4. Create the OpenAI client and add the Langfuse interceptor
57 // Keep a reference to the interceptor so we can update context later
58 let client = Client::from_env()?
59 .with_interceptor(Box::new(langfuse_interceptor.clone()))
60 .build();
61
62 println!(" OpenAI client initialized with Langfuse observability");
63 println!(" Traces will be sent to Langfuse for monitoring\n");
64
65 // Example 1: Simple chat completion
66 println!("Example 1: Simple chat completion");
67 println!("---------------------------------");
68 let chat_builder = client
69 .chat_simple("What is the capital of France? Answer in one word.")
70 .build()?;
71 let response = client.execute_chat(chat_builder).await?;
72 println!("Response: {:?}\n", response.content());
73
74 // Example 2: Chat completion with builder pattern
75 println!("Example 2: Chat with builder pattern");
76 println!("-------------------------------------");
77 let chat_builder = client
78 .chat()
79 .system("You are a helpful assistant that speaks like a pirate.")
80 .user("Tell me about the ocean in 2 sentences.")
81 .temperature(0.7)
82 .max_tokens(100)
83 .build()?;
84 let response = client.execute_chat(chat_builder).await?;
85 println!("Response: {:?}\n", response.content());
86
87 // Example 3: Multiple messages in a conversation
88 println!("Example 3: Conversation");
89 println!("-----------------------");
90 let chat_builder = client
91 .chat()
92 .system("You are a math tutor.")
93 .user("What is 2 + 2?")
94 .assistant("2 + 2 equals 4.")
95 .user("And what about 3 + 3?")
96 .build()?;
97 let response = client.execute_chat(chat_builder).await?;
98 println!("Response: {:?}\n", response.content());
99
100 // Example 4: Error handling (intentionally trigger an error)
101 println!("Example 4: Error handling");
102 println!("-------------------------");
103 // Create a builder with a non-existent model
104 let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105 .user("This should fail")
106 .build()?;
107 let result = client.execute_chat(chat_builder).await;
108
109 match result {
110 Ok(_) => println!("Unexpected success"),
111 Err(e) => println!("Expected error captured: {e}\n"),
112 }
113
114 // Example 5: Embeddings
115 println!("Example 5: Embeddings");
116 println!("--------------------");
117 let embeddings_builder = client.embeddings().text(
118 "text-embedding-ada-002",
119 "The quick brown fox jumps over the lazy dog",
120 );
121 let embeddings = client.embeddings().create(embeddings_builder).await?;
122 println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124 // Example 6: Using custom metadata via interceptor context
125 println!("Example 6: Custom metadata via interceptor context");
126 println!("---------------------------------------------------");
127
128 // Set session and user IDs on the interceptor's context
129 langfuse_interceptor.set_session_id("demo-session-123");
130 langfuse_interceptor.set_user_id("demo-user-456");
131 langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133 let chat_builder = client
134 .chat_simple("Say 'Hello from custom session!'")
135 .build()?;
136 let response = client.execute_chat(chat_builder).await?;
137 println!("Response with custom metadata: {:?}\n", response.content());
138
139 // Clear context for subsequent calls
140 langfuse_interceptor.clear_context();
141
142 println!(" All examples completed!");
143 println!(" Check your Langfuse dashboard to see the traces");
144 println!(" - Look for traces with operation name 'chat'");
145 println!(" - Each trace includes request/response details, token usage, and timing");
146 println!(" - Example 6 will have custom session_id, user_id, and tags");
147
148 // Shutdown the tracer provider to flush all spans
149 println!("\n⏳ Flushing spans to Langfuse...");
150 provider.shutdown()?;
151
152 Ok(())
153}Sourcepub fn system(self, content: impl Into<String>) -> Self
pub fn system(self, content: impl Into<String>) -> Self
Add a system message to the conversation.
Examples found in repository?
examples/vision_chat.rs (line 89)
72async fn demonstrate_basic_image_analysis(
73 client: &Client,
74) -> Result<(), Box<dyn std::error::Error>> {
75 println!(" Example 1: Basic Image Analysis");
76 println!("----------------------------------");
77
78 let image_url = SAMPLE_IMAGE_URLS[0];
79 let question = "What do you see in this image? Please describe it in detail.";
80
81 println!("Image URL: {image_url}");
82 println!("Question: {question}");
83 print!("Assistant: ");
84 io::stdout().flush()?;
85
86 // Use the convenient user_with_image_url method
87 let chat_builder = client
88 .chat()
89 .system("You are a helpful AI assistant that can analyze images. Provide detailed, accurate descriptions of what you see.")
90 .user_with_image_url(question, image_url)
91 .temperature(0.3);
92
93 let response = client.send_chat(chat_builder).await?;
94
95 if let Some(content) = response.content() {
96 println!("{content}");
97
98 // Show usage information
99 if let Some(usage) = response.usage() {
100 println!("\n Token usage:");
101 println!(" Prompt tokens: {}", usage.prompt_tokens);
102 println!(" Completion tokens: {}", usage.completion_tokens);
103 println!(" Total tokens: {}", usage.total_tokens);
104 }
105 } else {
106 println!("No response content received");
107 }
108
109 println!();
110 Ok(())
111}
112
113/// Demonstrate analysis of multiple images in a single message.
114async fn demonstrate_multiple_images(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
115 println!(" Example 2: Multiple Image Analysis");
116 println!("---------------------------------------");
117
118 let question = "Compare these two images. What are the differences and similarities?";
119
120 println!("Question: {question}");
121 println!("Image 1: {}", SAMPLE_IMAGE_URLS[0]);
122 println!("Image 2: {}", SAMPLE_IMAGE_URLS[1]);
123 print!("Assistant: ");
124 io::stdout().flush()?;
125
126 // Create message parts manually for multiple images
127 let parts = vec![
128 text_part(question),
129 image_url_part_with_detail(SAMPLE_IMAGE_URLS[0], Detail::Auto),
130 image_url_part_with_detail(SAMPLE_IMAGE_URLS[1], Detail::Auto),
131 ];
132
133 let chat_builder = client
134 .chat()
135 .system("You are an expert at comparing and analyzing images. Provide thoughtful comparisons focusing on visual elements, composition, and content.")
136 .user_with_parts(parts)
137 .temperature(0.4);
138
139 let response = client.send_chat(chat_builder).await?;
140
141 if let Some(content) = response.content() {
142 println!("{content}");
143 } else {
144 println!("No response content received");
145 }
146
147 println!();
148 Ok(())
149}
150
151/// Demonstrate different detail levels for image analysis.
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153 println!(" Example 3: Different Detail Levels");
154 println!("------------------------------------");
155
156 let image_url = SAMPLE_IMAGE_URLS[0];
157 let question = "Analyze this image";
158
159 // Test different detail levels
160 let detail_levels = vec![
161 (Detail::Low, "Low detail (faster, less detailed)"),
162 (Detail::High, "High detail (slower, more detailed)"),
163 (Detail::Auto, "Auto detail (balanced)"),
164 ];
165
166 for (detail, description) in detail_levels {
167 println!("\n{description}:");
168 print!("Assistant: ");
169 io::stdout().flush()?;
170
171 let chat_builder = client
172 .chat()
173 .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174 .user_with_image_url_and_detail(question, image_url, detail)
175 .temperature(0.2)
176 .max_completion_tokens(100); // Limit response length for comparison
177
178 let response = client.send_chat(chat_builder).await?;
179
180 if let Some(content) = response.content() {
181 println!("{content}");
182 }
183 }
184
185 println!();
186 Ok(())
187}
188
189/// Demonstrate base64 image encoding and analysis.
190async fn demonstrate_base64_image(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
191 println!(" Example 4: Base64 Image Analysis");
192 println!("-----------------------------------");
193
194 let question = "What is this image? It's very small, what can you tell about it?";
195
196 println!("Question: {question}");
197 println!("Image: Small test image encoded as base64");
198 print!("Assistant: ");
199 io::stdout().flush()?;
200
201 // Create message parts with base64 image
202 let parts = vec![
203 text_part(question),
204 image_base64_part_with_detail(SAMPLE_BASE64_IMAGE, "image/png", Detail::High),
205 ];
206
207 let chat_builder = client
208 .chat()
209 .system("You are analyzing images provided in base64 format. Even if an image is very small or simple, try to provide what information you can.")
210 .user_with_parts(parts)
211 .temperature(0.3);
212
213 let response = client.send_chat(chat_builder).await?;
214
215 if let Some(content) = response.content() {
216 println!("{content}");
217 } else {
218 println!("No response content received");
219 }
220
221 println!();
222 Ok(())
223}
224
225/// Demonstrate conversation context with images.
226async fn demonstrate_conversation_with_images(
227 client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229 println!(" Example 5: Conversation Context with Images");
230 println!("----------------------------------------------");
231
232 let image_url = SAMPLE_IMAGE_URLS[0];
233
234 // First message: Analyze the image
235 println!("Step 1: Initial image analysis");
236 print!("Assistant: ");
237 io::stdout().flush()?;
238
239 let mut chat_builder = client
240 .chat()
241 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242 .user_with_image_url("What's the main subject of this image?", image_url)
243 .temperature(0.3);
244
245 let response1 = client.send_chat(chat_builder).await?;
246 let first_response = response1.content().unwrap_or("No response").to_string();
247 println!("{first_response}");
248
249 // Second message: Follow-up question (without re-uploading the image)
250 println!("\nStep 2: Follow-up question");
251 print!("Assistant: ");
252 io::stdout().flush()?;
253
254 chat_builder = client
255 .chat()
256 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257 .user_with_image_url("What's the main subject of this image?", image_url)
258 .assistant(&first_response)
259 .user("What colors are most prominent in the image we just discussed?")
260 .temperature(0.3);
261
262 let response2 = client.send_chat(chat_builder).await?;
263
264 if let Some(content) = response2.content() {
265 println!("{content}");
266 }
267
268 // Third message: Ask for creative interpretation
269 println!("\nStep 3: Creative interpretation");
270 print!("Assistant: ");
271 io::stdout().flush()?;
272
273 let second_response = response2.content().unwrap_or("No response").to_string();
274
275 chat_builder = client
276 .chat()
277 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278 .user_with_image_url("What's the main subject of this image?", image_url)
279 .assistant(&first_response)
280 .user("What colors are most prominent in the image we just discussed?")
281 .assistant(second_response)
282 .user("Based on our discussion, write a short poem inspired by this image.")
283 .temperature(0.7);
284
285 let response3 = client.send_chat(chat_builder).await?;
286
287 if let Some(content) = response3.content() {
288 println!("{content}");
289 }
290
291 println!();
292 Ok(())
293}More examples
examples/moderations.rs (line 254)
228async fn response_filtering(client: &Client) -> Result<()> {
229 // Filter AI responses before showing to users
230
231 println!("Generating and moderating AI responses:");
232
233 // Generate response
234 let prompt = "Tell me about technology";
235 let builder = client.chat().user(prompt).max_completion_tokens(100);
236 let response = client.send_chat(builder).await?;
237
238 if let Some(content) = response.content() {
239 println!("Generated response: '{}'", content);
240
241 // Moderate the response
242 let moderation_result = simulate_moderation(content);
243
244 if moderation_result.flagged {
245 println!(
246 " Response flagged! Categories: {:?}",
247 moderation_result.categories
248 );
249 println!("Action: Response blocked or regenerated");
250
251 // Regenerate with more strict instructions
252 let safe_builder = client
253 .chat()
254 .system("Provide helpful, safe, and appropriate responses only.")
255 .user(prompt)
256 .max_completion_tokens(100);
257 let safe_response = client.send_chat(safe_builder).await?;
258
259 if let Some(safe_content) = safe_response.content() {
260 println!("Regenerated safe response: '{}'", safe_content);
261 }
262 } else {
263 println!(" Response passed moderation");
264 }
265 }
266
267 Ok(())
268}examples/chat_comprehensive.rs (line 204)
184async fn demonstrate_basic_chat(
185 client: &Client,
186 conversation: &mut ConversationManager,
187) -> Result<(), Box<dyn std::error::Error>> {
188 println!(" Example 1: Basic Chat Completion");
189 println!("----------------------------------");
190
191 let user_message = "Hello! Can you explain what you can help me with?";
192 conversation.add_user_message(user_message.to_string());
193
194 println!("User: {user_message}");
195 print!("Assistant: ");
196 io::stdout().flush()?;
197
198 // Build the chat request with conversation history
199 let messages = conversation.get_conversation_for_api();
200 let mut chat_builder = client.chat();
201
202 for (role, content) in messages {
203 match role.as_str() {
204 "system" => chat_builder = chat_builder.system(content),
205 "user" => chat_builder = chat_builder.user(content),
206 "assistant" => chat_builder = chat_builder.assistant(content),
207 _ => {} // Ignore unknown roles
208 }
209 }
210
211 // Send the request
212 let response = client.send_chat(chat_builder.temperature(0.7)).await?;
213
214 if let Some(content) = response.content() {
215 println!("{content}");
216 conversation.add_assistant_message(content.to_string(), None);
217
218 // Track token usage if available
219 if let Some(usage) = response.usage() {
220 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
221 }
222 } else {
223 println!("No response content received");
224 }
225
226 println!();
227 Ok(())
228}
229
230/// Demonstrate multi-turn conversation.
231async fn demonstrate_multi_turn_chat(
232 client: &Client,
233 conversation: &mut ConversationManager,
234) -> Result<(), Box<dyn std::error::Error>> {
235 println!(" Example 2: Multi-turn Conversation");
236 println!("------------------------------------");
237
238 let questions = vec![
239 "What's the capital of France?",
240 "What's the population of that city?",
241 "Can you tell me an interesting fact about it?",
242 ];
243
244 for question in questions {
245 conversation.add_user_message(question.to_string());
246
247 println!("User: {question}");
248 print!("Assistant: ");
249 io::stdout().flush()?;
250
251 // Build chat request with full conversation history
252 let messages = conversation.get_conversation_for_api();
253 let mut chat_builder = client.chat();
254
255 for (role, content) in messages {
256 match role.as_str() {
257 "system" => chat_builder = chat_builder.system(content),
258 "user" => chat_builder = chat_builder.user(content),
259 "assistant" => chat_builder = chat_builder.assistant(content),
260 _ => {}
261 }
262 }
263
264 let response = client.send_chat(chat_builder.temperature(0.3)).await?;
265
266 if let Some(content) = response.content() {
267 println!("{content}");
268 conversation.add_assistant_message(content.to_string(), None);
269
270 // Track token usage
271 if let Some(usage) = response.usage() {
272 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
273 }
274 }
275
276 println!();
277 // Small delay between questions for readability
278 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
279 }
280
281 Ok(())
282}
283
284/// Demonstrate streaming chat response.
285async fn demonstrate_streaming_chat(
286 _client: &Client,
287 conversation: &mut ConversationManager,
288) -> Result<(), Box<dyn std::error::Error>> {
289 println!(" Example 3: Streaming Chat Response");
290 println!("------------------------------------");
291
292 // Add user message for streaming example
293 let streaming_question = "Can you write a short poem about programming?";
294 conversation.add_user_message(streaming_question.to_string());
295
296 println!("User: {streaming_question}");
297 println!("Assistant (streaming): ");
298
299 // Note: Streaming is not yet fully implemented in the client
300 // This is a placeholder showing the intended API
301 println!(" Streaming functionality is being implemented...");
302 println!("Future implementation will show real-time token-by-token responses");
303
304 // Simulate what streaming would look like
305 let simulated_response = "Programming flows like poetry in motion,\nEach function a verse, each loop a devotion.\nVariables dance through memory's halls,\nWhile algorithms answer logic's calls.";
306
307 // Simulate typing effect
308 for char in simulated_response.chars() {
309 print!("{char}");
310 io::stdout().flush()?;
311 tokio::time::sleep(tokio::time::Duration::from_millis(30)).await;
312 }
313 println!("\n");
314
315 // Add the response to conversation history
316 conversation.add_assistant_message(simulated_response.to_string(), None);
317
318 Ok(())
319}
320
321/// Demonstrate token usage tracking.
322async fn demonstrate_token_tracking(
323 client: &Client,
324 conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326 println!(" Example 4: Token Usage Tracking");
327 println!("---------------------------------");
328
329 let efficiency_question = "In one sentence, what is machine learning?";
330 conversation.add_user_message(efficiency_question.to_string());
331
332 println!("User: {efficiency_question}");
333 print!("Assistant: ");
334 io::stdout().flush()?;
335
336 // Build chat request
337 let messages = conversation.get_conversation_for_api();
338 let mut chat_builder = client.chat().max_completion_tokens(50); // Limit tokens for demo
339
340 for (role, content) in messages {
341 match role.as_str() {
342 "system" => chat_builder = chat_builder.system(content),
343 "user" => chat_builder = chat_builder.user(content),
344 "assistant" => chat_builder = chat_builder.assistant(content),
345 _ => {}
346 }
347 }
348
349 let response = client.send_chat(chat_builder).await?;
350
351 if let Some(content) = response.content() {
352 println!("{content}");
353
354 // Display detailed token usage
355 if let Some(usage) = response.usage() {
356 println!("\n Token Usage Breakdown:");
357 println!(" Prompt tokens: {}", usage.prompt_tokens);
358 println!(" Completion tokens: {}", usage.completion_tokens);
359 println!(" Total tokens: {}", usage.total_tokens);
360
361 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363 conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364 } else {
365 conversation.add_assistant_message(content.to_string(), None);
366 }
367 }
368
369 println!();
370 Ok(())
371}examples/azure_comprehensive.rs (line 63)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 tracing_subscriber::fmt::init();
11
12 println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14 let client = Client::from_env()?.build();
15
16 // Test 1: Simple chat completion
17 println!("1. Testing simple chat completion...");
18 let builder = client.chat_simple("What is 2+2? Answer in one word.");
19 match client.send_chat(builder).await {
20 Ok(response) => {
21 if let Some(content) = response.content() {
22 println!(" ✓ Chat completion: {content}");
23 }
24 }
25 Err(e) => println!(" ✗ Chat completion failed: {e}"),
26 }
27
28 // Test 2: Chat with system message
29 println!("\n2. Testing chat with system message...");
30 let builder = client.chat_with_system(
31 "You are a helpful assistant that responds in one sentence.",
32 "What is Rust?",
33 );
34 match client.send_chat(builder).await {
35 Ok(response) => {
36 if let Some(content) = response.content() {
37 println!(" ✓ System message chat: {content}");
38 }
39 }
40 Err(e) => println!(" ✗ System message chat failed: {e}"),
41 }
42
43 // Test 3: Chat with temperature
44 println!("\n3. Testing chat with custom parameters...");
45 let builder = client
46 .chat()
47 .user("Say 'test' in a creative way")
48 .temperature(0.7)
49 .max_tokens(50);
50 match client.send_chat(builder).await {
51 Ok(response) => {
52 if let Some(content) = response.content() {
53 println!(" ✓ Custom parameters: {content}");
54 }
55 }
56 Err(e) => println!(" ✗ Custom parameters failed: {e}"),
57 }
58
59 // Test 4: Multiple messages conversation
60 println!("\n4. Testing multi-message conversation...");
61 let builder = client
62 .chat()
63 .system("You are a helpful assistant")
64 .user("My name is Alice")
65 .assistant("Hello Alice! Nice to meet you.")
66 .user("What's my name?");
67 match client.send_chat(builder).await {
68 Ok(response) => {
69 if let Some(content) = response.content() {
70 println!(" ✓ Multi-message: {content}");
71 }
72 }
73 Err(e) => println!(" ✗ Multi-message failed: {e}"),
74 }
75
76 // Test 5: Chat with max_tokens limit
77 println!("\n5. Testing with max_tokens limit...");
78 let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79 match client.send_chat(builder).await {
80 Ok(response) => {
81 if let Some(content) = response.content() {
82 println!(" ✓ Limited tokens: {content}");
83 println!(" (Note: response is truncated due to max_tokens=20)");
84 }
85 }
86 Err(e) => println!(" ✗ Max tokens test failed: {e}"),
87 }
88
89 // Test 6: Using responses API
90 println!("\n6. Testing responses API...");
91 let builder = client.responses().user("What is the capital of France?");
92 match client.send_responses(builder).await {
93 Ok(response) => {
94 if let Some(content) = response.content() {
95 println!(" ✓ Responses API: {content}");
96 }
97 }
98 Err(e) => println!(" ✗ Responses API failed: {e}"),
99 }
100
101 println!("\n=== Test Summary ===");
102 println!("Azure OpenAI integration tested across multiple endpoints!");
103 println!("\nNote: Some advanced features like embeddings, streaming, and");
104 println!("tool calling may require specific Azure OpenAI deployments.");
105
106 Ok(())
107}examples/langfuse.rs (line 79)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32 // Initialize tracing for logging
33 tracing_subscriber::fmt()
34 .with_env_filter(
35 tracing_subscriber::EnvFilter::from_default_env()
36 .add_directive("openai_ergonomic=debug".parse()?),
37 )
38 .init();
39
40 // 1. Build Langfuse exporter from environment variables
41 let exporter = ExporterBuilder::from_env()?.build()?;
42
43 // 2. Create tracer provider with batch processor
44 let provider = SdkTracerProvider::builder()
45 .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46 .build();
47
48 // Set as global provider
49 global::set_tracer_provider(provider.clone());
50
51 // 3. Get tracer and create interceptor
52 let tracer = provider.tracer("openai-ergonomic");
53 let langfuse_interceptor =
54 std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56 // 4. Create the OpenAI client and add the Langfuse interceptor
57 // Keep a reference to the interceptor so we can update context later
58 let client = Client::from_env()?
59 .with_interceptor(Box::new(langfuse_interceptor.clone()))
60 .build();
61
62 println!(" OpenAI client initialized with Langfuse observability");
63 println!(" Traces will be sent to Langfuse for monitoring\n");
64
65 // Example 1: Simple chat completion
66 println!("Example 1: Simple chat completion");
67 println!("---------------------------------");
68 let chat_builder = client
69 .chat_simple("What is the capital of France? Answer in one word.")
70 .build()?;
71 let response = client.execute_chat(chat_builder).await?;
72 println!("Response: {:?}\n", response.content());
73
74 // Example 2: Chat completion with builder pattern
75 println!("Example 2: Chat with builder pattern");
76 println!("-------------------------------------");
77 let chat_builder = client
78 .chat()
79 .system("You are a helpful assistant that speaks like a pirate.")
80 .user("Tell me about the ocean in 2 sentences.")
81 .temperature(0.7)
82 .max_tokens(100)
83 .build()?;
84 let response = client.execute_chat(chat_builder).await?;
85 println!("Response: {:?}\n", response.content());
86
87 // Example 3: Multiple messages in a conversation
88 println!("Example 3: Conversation");
89 println!("-----------------------");
90 let chat_builder = client
91 .chat()
92 .system("You are a math tutor.")
93 .user("What is 2 + 2?")
94 .assistant("2 + 2 equals 4.")
95 .user("And what about 3 + 3?")
96 .build()?;
97 let response = client.execute_chat(chat_builder).await?;
98 println!("Response: {:?}\n", response.content());
99
100 // Example 4: Error handling (intentionally trigger an error)
101 println!("Example 4: Error handling");
102 println!("-------------------------");
103 // Create a builder with a non-existent model
104 let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105 .user("This should fail")
106 .build()?;
107 let result = client.execute_chat(chat_builder).await;
108
109 match result {
110 Ok(_) => println!("Unexpected success"),
111 Err(e) => println!("Expected error captured: {e}\n"),
112 }
113
114 // Example 5: Embeddings
115 println!("Example 5: Embeddings");
116 println!("--------------------");
117 let embeddings_builder = client.embeddings().text(
118 "text-embedding-ada-002",
119 "The quick brown fox jumps over the lazy dog",
120 );
121 let embeddings = client.embeddings().create(embeddings_builder).await?;
122 println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124 // Example 6: Using custom metadata via interceptor context
125 println!("Example 6: Custom metadata via interceptor context");
126 println!("---------------------------------------------------");
127
128 // Set session and user IDs on the interceptor's context
129 langfuse_interceptor.set_session_id("demo-session-123");
130 langfuse_interceptor.set_user_id("demo-user-456");
131 langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133 let chat_builder = client
134 .chat_simple("Say 'Hello from custom session!'")
135 .build()?;
136 let response = client.execute_chat(chat_builder).await?;
137 println!("Response with custom metadata: {:?}\n", response.content());
138
139 // Clear context for subsequent calls
140 langfuse_interceptor.clear_context();
141
142 println!(" All examples completed!");
143 println!(" Check your Langfuse dashboard to see the traces");
144 println!(" - Look for traces with operation name 'chat'");
145 println!(" - Each trace includes request/response details, token usage, and timing");
146 println!(" - Example 6 will have custom session_id, user_id, and tags");
147
148 // Shutdown the tracer provider to flush all spans
149 println!("\n⏳ Flushing spans to Langfuse...");
150 provider.shutdown()?;
151
152 Ok(())
153}Sourcepub fn user(self, content: impl Into<String>) -> Self
pub fn user(self, content: impl Into<String>) -> Self
Add a user message to the conversation.
Examples found in repository?
examples/tool_calling.rs (line 131)
128async fn simple_tool_call(client: &Client) -> Result<()> {
129 let builder = client
130 .chat()
131 .user("What's the weather like in San Francisco?")
132 .tools(vec![get_weather_tool()]);
133 let response = client.send_chat(builder).await?;
134
135 // Check for tool calls
136 let tool_calls = response.tool_calls();
137 if !tool_calls.is_empty() {
138 for tool_call in tool_calls {
139 println!("Tool called: {}", tool_call.function_name());
140 println!("Arguments: {}", tool_call.function_arguments());
141
142 // Execute the function
143 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
144 let result = execute_weather_function(params)?;
145 println!("Function result: {}", result);
146 }
147 }
148
149 Ok(())
150}
151
152async fn multiple_tools(client: &Client) -> Result<()> {
153 let builder = client
154 .chat()
155 .user("What's the weather in NYC and what time is it there?")
156 .tools(vec![get_weather_tool(), get_time_tool()]);
157 let response = client.send_chat(builder).await?;
158
159 for tool_call in response.tool_calls() {
160 match tool_call.function_name() {
161 "get_weather" => {
162 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
163 let result = execute_weather_function(params)?;
164 println!("Weather result: {}", result);
165 }
166 "get_current_time" => {
167 let params: serde_json::Value =
168 serde_json::from_str(tool_call.function_arguments())?;
169 if let Some(timezone) = params["timezone"].as_str() {
170 let result = execute_time_function(timezone);
171 println!("Time result: {}", result);
172 }
173 }
174 _ => println!("Unknown tool: {}", tool_call.function_name()),
175 }
176 }
177
178 Ok(())
179}
180
181async fn tool_choice_control(client: &Client) -> Result<()> {
182 // Force specific tool
183 println!("Forcing weather tool:");
184 let builder = client
185 .chat()
186 .user("Tell me about Paris")
187 .tools(vec![get_weather_tool(), get_time_tool()])
188 .tool_choice(ToolChoiceHelper::specific("get_weather"));
189 let response = client.send_chat(builder).await?;
190
191 for tool_call in response.tool_calls() {
192 println!("Forced tool: {}", tool_call.function_name());
193 }
194
195 // Disable tools
196 println!("\nDisabling tools:");
197 let builder = client
198 .chat()
199 .user("What's the weather?")
200 .tools(vec![get_weather_tool()])
201 .tool_choice(ToolChoiceHelper::none());
202 let response = client.send_chat(builder).await?;
203
204 if let Some(content) = response.content() {
205 println!("Response without tools: {}", content);
206 }
207
208 Ok(())
209}
210
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212 // This is a simplified version that demonstrates the concept
213 // without getting into the complexities of message history management
214
215 println!("=== Conversation with Tools (Simplified) ===");
216
217 // First request with tool call
218 let builder = client
219 .chat()
220 .user("What's the weather in Tokyo?")
221 .tools(vec![get_weather_tool()]);
222 let response = client.send_chat(builder).await?;
223
224 // Check for tool calls and simulate responses
225 for tool_call in response.tool_calls() {
226 println!("Tool called: {}", tool_call.function_name());
227 println!("Arguments: {}", tool_call.function_arguments());
228
229 // In a real implementation, you would:
230 // 1. Parse the arguments
231 // 2. Execute the actual function
232 // 3. Create tool messages with results
233 // 4. Send another request with the tool results
234
235 println!("Simulated weather result: Sunny, 24°C");
236 }
237
238 println!("Note: Full conversation with tool results requires complex message handling");
239 println!("This simplified version demonstrates tool calling detection");
240
241 Ok(())
242}
243
244fn streaming_with_tools(_client: &Client) {
245 println!("Streaming response with tools:");
246
247 // Note: Streaming with tool calls is more complex and requires
248 // proper handling of partial tool call chunks. For now, this is
249 // a placeholder showing the concept.
250
251 println!("This would demonstrate streaming tool calls if streaming API was available");
252 println!("In streaming mode, tool calls would arrive as chunks that need to be assembled");
253}
254
255async fn parallel_tool_calls(client: &Client) -> Result<()> {
256 let builder = client
257 .chat()
258 .user("Check the weather in Tokyo, London, and New York")
259 .tools(vec![get_weather_tool()]);
260 let response = client.send_chat(builder).await?;
261
262 // Modern models can call multiple tools in parallel
263 let tool_calls = response.tool_calls();
264 println!("Parallel tool calls: {}", tool_calls.len());
265
266 // Collect arguments first to avoid lifetime issues
267 let args_vec: Vec<String> = tool_calls
268 .iter()
269 .map(|tc| tc.function_arguments().to_string())
270 .collect();
271
272 // Execute all in parallel using tokio
273 let mut handles = Vec::new();
274 for args in args_vec {
275 let handle = tokio::spawn(async move {
276 let params: WeatherParams = serde_json::from_str(&args)?;
277 execute_weather_function(params)
278 });
279 handles.push(handle);
280 }
281
282 // Wait for all results
283 for (i, handle) in handles.into_iter().enumerate() {
284 match handle.await {
285 Ok(Ok(result)) => println!("Location {}: {}", i + 1, result),
286 Ok(Err(e)) => println!("Location {} error: {}", i + 1, e),
287 Err(e) => println!("Task {} panicked: {}", i + 1, e),
288 }
289 }
290
291 Ok(())
292}More examples
examples/retry_patterns.rs (line 369)
356async fn fallback_chain(client: &Client) -> Result<()> {
357 // Define fallback chain
358 let strategies = vec![
359 ("GPT-4o", "gpt-4o", 1024),
360 ("GPT-4o-mini", "gpt-4o-mini", 512),
361 ("GPT-3.5", "gpt-3.5-turbo", 256),
362 ];
363
364 let prompt = "Explain quantum computing";
365
366 for (name, _model, max_tokens) in strategies {
367 println!("Trying {} (max_tokens: {})", name, max_tokens);
368
369 let builder = client.chat().user(prompt).max_completion_tokens(max_tokens);
370 match client.send_chat(builder).await {
371 Ok(response) => {
372 println!("Success with {}", name);
373 if let Some(content) = response.content() {
374 println!("Response: {}...", &content[..content.len().min(100)]);
375 }
376 return Ok(());
377 }
378 Err(e) => {
379 println!("Failed with {}: {}", name, e);
380 }
381 }
382 }
383
384 println!("All fallback strategies exhausted");
385 Ok(())
386}examples/models.rs (line 186)
159async fn model_selection_by_task(client: &Client) -> Result<()> {
160 // Task-specific model recommendations
161 let task_models = vec![
162 ("Simple Q&A", "gpt-3.5-turbo", "Fast and cost-effective"),
163 ("Complex reasoning", "gpt-4o", "Best reasoning capabilities"),
164 ("Code generation", "gpt-4o", "Excellent code understanding"),
165 ("Vision tasks", "gpt-4o", "Native vision support"),
166 (
167 "Quick responses",
168 "gpt-4o-mini",
169 "Low latency, good quality",
170 ),
171 (
172 "Bulk processing",
173 "gpt-3.5-turbo",
174 "Best cost/performance ratio",
175 ),
176 ];
177
178 for (task, model, reason) in task_models {
179 println!("Task: {}", task);
180 println!(" Recommended: {}", model);
181 println!(" Reason: {}", reason);
182
183 // Demo the model
184 let builder = client
185 .chat()
186 .user(format!("Say 'Hello from {}'", model))
187 .max_completion_tokens(10);
188 let response = client.send_chat(builder).await?;
189
190 if let Some(content) = response.content() {
191 println!(" Response: {}\n", content);
192 }
193 }
194
195 Ok(())
196}
197
198async fn cost_optimization(client: &Client) -> Result<()> {
199 let models = get_model_registry();
200 let test_prompt = "Explain the theory of relativity in one sentence";
201 let estimated_input_tokens = 15;
202 let estimated_output_tokens = 50;
203
204 println!("Cost comparison for same task:");
205 println!("Prompt: '{}'\n", test_prompt);
206
207 let mut costs = Vec::new();
208
209 for (name, info) in &models {
210 if !info.deprecated {
211 let input_cost = (f64::from(estimated_input_tokens) / 1000.0) * info.cost_per_1k_input;
212 let output_cost =
213 (f64::from(estimated_output_tokens) / 1000.0) * info.cost_per_1k_output;
214 let total_cost = input_cost + output_cost;
215
216 costs.push((name.clone(), total_cost));
217 }
218 }
219
220 costs.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
221
222 println!("{:<20} {:>15}", "Model", "Estimated Cost");
223 println!("{:-<35}", "");
224 for (model, cost) in costs {
225 println!("{:<20} ${:>14.6}", model, cost);
226 }
227
228 // Demonstrate cheapest vs best
229 println!("\nRunning with cheapest model (gpt-3.5-turbo):");
230 let builder = client.chat().user(test_prompt);
231 let cheap_response = client.send_chat(builder).await?;
232
233 if let Some(content) = cheap_response.content() {
234 println!("Response: {}", content);
235 }
236
237 Ok(())
238}
239
240async fn performance_testing(client: &Client) -> Result<()> {
241 use std::time::Instant;
242
243 let models_to_test = vec!["gpt-4o-mini", "gpt-3.5-turbo"];
244 let test_prompt = "Write a haiku about programming";
245
246 println!("Performance comparison:");
247 println!("{:<20} {:>10} {:>15}", "Model", "Latency", "Tokens/sec");
248 println!("{:-<45}", "");
249
250 for model in models_to_test {
251 let start = Instant::now();
252
253 let builder = client.chat().user(test_prompt);
254 let response = client.send_chat(builder).await?;
255
256 let elapsed = start.elapsed();
257
258 if let Some(usage) = response.usage() {
259 let total_tokens = f64::from(usage.total_tokens);
260 let tokens_per_sec = total_tokens / elapsed.as_secs_f64();
261
262 println!("{:<20} {:>10.2?} {:>15.1}", model, elapsed, tokens_per_sec);
263 }
264 }
265
266 Ok(())
267}
268
269async fn model_migration(client: &Client) -> Result<()> {
270 // Handle deprecated model migration
271 let deprecated_mappings = HashMap::from([
272 ("text-davinci-003", "gpt-3.5-turbo"),
273 ("gpt-4-32k", "gpt-4o"),
274 ("gpt-4-vision-preview", "gpt-4o"),
275 ]);
276
277 let requested_model = "text-davinci-003"; // Deprecated model
278
279 if let Some(replacement) = deprecated_mappings.get(requested_model) {
280 println!(
281 "Warning: {} is deprecated. Using {} instead.",
282 requested_model, replacement
283 );
284
285 let builder = client.chat().user("Hello from migrated model");
286 let response = client.send_chat(builder).await?;
287
288 if let Some(content) = response.content() {
289 println!("Response from {}: {}", replacement, content);
290 }
291 }
292
293 Ok(())
294}
295
296async fn dynamic_model_selection(client: &Client) -> Result<()> {
297 // Select model based on runtime conditions
298
299 #[derive(Debug)]
300 struct RequestContext {
301 urgency: Urgency,
302 complexity: Complexity,
303 budget: Budget,
304 needs_vision: bool,
305 }
306
307 #[derive(Debug)]
308 enum Urgency {
309 Low,
310 Medium,
311 High,
312 }
313
314 #[derive(Debug)]
315 enum Complexity {
316 Simple,
317 Moderate,
318 Complex,
319 }
320
321 #[derive(Debug)]
322 enum Budget {
323 Tight,
324 Normal,
325 Flexible,
326 }
327
328 const fn select_model(ctx: &RequestContext) -> &'static str {
329 match (&ctx.urgency, &ctx.complexity, &ctx.budget) {
330 // High urgency + simple = fast cheap model, or tight budget = cheapest
331 (Urgency::High, Complexity::Simple, _) | (_, _, Budget::Tight) => "gpt-3.5-turbo",
332
333 // Complex + flexible budget = best model
334 (_, Complexity::Complex, Budget::Flexible) => "gpt-4o",
335
336 // Vision required
337 _ if ctx.needs_vision => "gpt-4o",
338
339 // Default balanced choice
340 _ => "gpt-4o-mini",
341 }
342 }
343
344 // Example contexts
345 let contexts = [
346 RequestContext {
347 urgency: Urgency::High,
348 complexity: Complexity::Simple,
349 budget: Budget::Tight,
350 needs_vision: false,
351 },
352 RequestContext {
353 urgency: Urgency::Low,
354 complexity: Complexity::Complex,
355 budget: Budget::Flexible,
356 needs_vision: false,
357 },
358 RequestContext {
359 urgency: Urgency::Medium,
360 complexity: Complexity::Moderate,
361 budget: Budget::Normal,
362 needs_vision: true,
363 },
364 ];
365
366 for (i, ctx) in contexts.iter().enumerate() {
367 let model = select_model(ctx);
368 println!("Context {}: {:?}", i + 1, ctx);
369 println!(" Selected model: {}", model);
370
371 let builder = client
372 .chat()
373 .user(format!("Hello from dynamically selected {}", model))
374 .max_completion_tokens(20);
375 let response = client.send_chat(builder).await?;
376
377 if let Some(content) = response.content() {
378 println!(" Response: {}\n", content);
379 }
380 }
381
382 Ok(())
383}examples/moderations.rs (line 235)
228async fn response_filtering(client: &Client) -> Result<()> {
229 // Filter AI responses before showing to users
230
231 println!("Generating and moderating AI responses:");
232
233 // Generate response
234 let prompt = "Tell me about technology";
235 let builder = client.chat().user(prompt).max_completion_tokens(100);
236 let response = client.send_chat(builder).await?;
237
238 if let Some(content) = response.content() {
239 println!("Generated response: '{}'", content);
240
241 // Moderate the response
242 let moderation_result = simulate_moderation(content);
243
244 if moderation_result.flagged {
245 println!(
246 " Response flagged! Categories: {:?}",
247 moderation_result.categories
248 );
249 println!("Action: Response blocked or regenerated");
250
251 // Regenerate with more strict instructions
252 let safe_builder = client
253 .chat()
254 .system("Provide helpful, safe, and appropriate responses only.")
255 .user(prompt)
256 .max_completion_tokens(100);
257 let safe_response = client.send_chat(safe_builder).await?;
258
259 if let Some(safe_content) = safe_response.content() {
260 println!("Regenerated safe response: '{}'", safe_content);
261 }
262 } else {
263 println!(" Response passed moderation");
264 }
265 }
266
267 Ok(())
268}
269
270fn policy_enforcement(_client: &Client) {
271 // Enforce content policies
272 let policy = ModerationPolicy {
273 thresholds: HashMap::from([
274 ("harassment".to_string(), 0.5),
275 ("violence".to_string(), 0.6),
276 ("sexual".to_string(), 0.4),
277 ]),
278 auto_reject_categories: vec![
279 "harassment/threatening".to_string(),
280 "violence/graphic".to_string(),
281 ],
282 require_human_review: vec!["self-harm".to_string()],
283 };
284
285 let test_cases = vec![
286 "Normal conversation about work",
287 "Slightly aggressive language here",
288 "Content requiring review",
289 ];
290
291 for content in test_cases {
292 println!("Checking: '{}'", content);
293
294 let result = simulate_moderation(content);
295 let action = apply_policy(&result, &policy);
296
297 match action {
298 PolicyAction::Approve => println!(" Approved"),
299 PolicyAction::Reject(reason) => println!(" Rejected: {}", reason),
300 PolicyAction::Review(reason) => println!(" Human review needed: {}", reason),
301 }
302 }
303}
304
305async fn moderation_pipeline(client: &Client) -> Result<()> {
306 // Complete moderation pipeline
307
308 type FilterFn = Box<dyn Fn(&str) -> bool + Send + Sync>;
309
310 struct ModerationPipeline {
311 pre_filters: Vec<FilterFn>,
312 post_filters: Vec<FilterFn>,
313 }
314
315 let pipeline = ModerationPipeline {
316 pre_filters: vec![
317 Box::new(|text| text.len() < 10000), // Length check
318 Box::new(|text| !text.is_empty()), // Non-empty check
319 ],
320 post_filters: vec![
321 Box::new(|text| !text.contains("blockedword")), // Custom word filter
322 ],
323 };
324
325 println!("Running moderation pipeline:");
326
327 let user_input = "Please help me with this technical question about Rust programming.";
328
329 // Step 1: Pre-filters
330 println!("1. Pre-filters:");
331 for (i, filter) in pipeline.pre_filters.iter().enumerate() {
332 if filter(user_input) {
333 println!(" Pre-filter {} passed", i + 1);
334 } else {
335 println!(" Pre-filter {} failed", i + 1);
336 return Ok(());
337 }
338 }
339
340 // Step 2: API moderation
341 println!("2. API moderation:");
342 let moderation_result = simulate_moderation(user_input);
343 if moderation_result.flagged {
344 println!(" Content flagged by API");
345 return Ok(());
346 }
347 println!(" Passed API moderation");
348
349 // Step 3: Generate response
350 println!("3. Generating response:");
351 let builder = client.chat().user(user_input).max_completion_tokens(50);
352 let response = client.send_chat(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!(" Generated: '{}'", content);
356
357 // Step 4: Post-filters
358 println!("4. Post-filters:");
359 for (i, filter) in pipeline.post_filters.iter().enumerate() {
360 if filter(content) {
361 println!(" Post-filter {} passed", i + 1);
362 } else {
363 println!(" Post-filter {} failed", i + 1);
364 return Ok(());
365 }
366 }
367
368 // Step 5: Response moderation
369 println!("5. Response moderation:");
370 let response_moderation = simulate_moderation(content);
371 if response_moderation.flagged {
372 println!(" Response flagged");
373 } else {
374 println!(" Response approved");
375 println!("\nFinal output: '{}'", content);
376 }
377 }
378
379 Ok(())
380}examples/error_handling.rs (line 85)
78async fn pattern_matching_errors() {
79 let Ok(client_builder) = Client::from_env() else {
80 return;
81 };
82 let client = client_builder.build();
83
84 // Simulate various errors by using invalid parameters
85 let builder = client.chat().user("test");
86 let result = client.send_chat(builder).await;
87
88 match result {
89 Ok(_) => println!("Unexpected success"),
90 Err(e) => match e {
91 Error::Api { message, .. } => {
92 println!("API Error: {}", message);
93 }
94 Error::RateLimit(message) => {
95 println!("Rate limited: {}", message);
96 }
97 Error::Authentication(message) => {
98 println!("Authentication failed: {}", message);
99 }
100 Error::Http(source) => {
101 println!("Network error: {}", source);
102 }
103 Error::Json(source) => {
104 println!("Serialization error: {}", source);
105 }
106 Error::Stream(message) => {
107 println!("Stream error: {}", message);
108 }
109 Error::InvalidRequest(message) => {
110 println!("Invalid request: {}", message);
111 }
112 Error::Config(message) => {
113 println!("Configuration error: {}", message);
114 }
115 _ => {
116 println!("Other error: {}", e);
117 }
118 },
119 }
120}
121
122async fn rate_limit_handling() {
123 const MAX_RETRIES: u32 = 3;
124
125 let Ok(client_builder) = Client::from_env() else {
126 return;
127 };
128 let client = client_builder.build();
129
130 // Retry logic for rate limiting
131 let mut retries = 0;
132
133 loop {
134 match client.send_chat(client.chat_simple("Hello")).await {
135 Ok(response) => {
136 if let Some(content) = response.content() {
137 println!("Success: {}", content);
138 } else {
139 println!("Success: (no content)");
140 }
141 break;
142 }
143 Err(Error::RateLimit(_message)) => {
144 if retries >= MAX_RETRIES {
145 println!("Max retries exceeded");
146 break;
147 }
148
149 let wait_time = Duration::from_secs(1);
150 println!("Rate limited. Waiting {:?} before retry...", wait_time);
151 sleep(wait_time).await;
152 retries += 1;
153 }
154 Err(e) => {
155 println!("Other error: {}", e);
156 break;
157 }
158 }
159 }
160}
161
162async fn token_limit_handling() {
163 let Ok(client_builder) = Client::from_env() else {
164 return;
165 };
166 let client = client_builder.build();
167
168 // Generate a very long prompt that might exceed token limits
169 let long_text = "Lorem ipsum ".repeat(10000);
170
171 match client.send_chat(client.chat_simple(&long_text)).await {
172 Ok(_) => println!("Processed long text successfully"),
173 Err(Error::InvalidRequest(message)) if message.contains("token") => {
174 println!("Token limit issue: {}", message);
175
176 // Retry with truncated text
177 let truncated = &long_text[..1000];
178 println!("Retrying with truncated text...");
179
180 match client.send_chat(client.chat_simple(truncated)).await {
181 Ok(response) => {
182 if let Some(content) = response.content() {
183 println!("Success with truncated: {}", content);
184 } else {
185 println!("Success with truncated: (no content)");
186 }
187 }
188 Err(e) => println!("Still failed: {}", e),
189 }
190 }
191 Err(e) => println!("Other error: {}", e),
192 }
193}
194
195async fn auth_error_handling() -> Result<()> {
196 // Try with invalid API key
197 let config = Config::builder().api_key("invalid-api-key").build();
198 let invalid_client = Client::builder(config)?.build();
199
200 match invalid_client
201 .send_chat(invalid_client.chat_simple("Hello"))
202 .await
203 {
204 Ok(_) => println!("Unexpected success"),
205 Err(Error::Authentication(message)) => {
206 println!("Authentication failed as expected: {}", message);
207
208 // Suggest remediation
209 println!("Suggestions:");
210 println!("1. Check your OPENAI_API_KEY environment variable");
211 println!("2. Verify API key at https://platform.openai.com/api-keys");
212 println!("3. Ensure your API key has necessary permissions");
213 }
214 Err(e) => println!("Unexpected error type: {}", e),
215 }
216
217 Ok(())
218}
219
220async fn network_error_handling() -> Result<()> {
221 use openai_ergonomic::Config;
222 use reqwest_middleware::ClientBuilder;
223
224 // Create a reqwest client with very short timeout to simulate network issues
225 let reqwest_client = reqwest::Client::builder()
226 .timeout(Duration::from_secs(1))
227 .build()
228 .expect("Failed to build reqwest client");
229
230 let http_client = ClientBuilder::new(reqwest_client).build();
231
232 let config = Config::builder()
233 .api_key("test-key")
234 .http_client(http_client)
235 .build();
236
237 let client = Client::builder(config)?.build();
238
239 match client.send_chat(client.chat_simple("Hello")).await {
240 Ok(_) => println!("Unexpected success"),
241 Err(Error::Http(source)) => {
242 println!("Network error as expected: {}", source);
243
244 // Implement exponential backoff
245 let mut backoff = Duration::from_millis(100);
246 for attempt in 1..=3 {
247 println!("Retry attempt {} after {:?}", attempt, backoff);
248 sleep(backoff).await;
249 backoff *= 2;
250
251 // In real scenario, retry with proper timeout
252 // match client.send_chat(client.chat_simple("Hello")).await { ... }
253 }
254 }
255 Err(e) => println!("Other error: {}", e),
256 }
257
258 Ok(())
259}
260
261async fn custom_error_context() -> Result<()> {
262 let client = Client::from_env()?.build();
263
264 // Wrap errors with custom context
265 let result = client
266 .send_chat(client.chat_simple("Analyze this data"))
267 .await
268 .map_err(|e| {
269 eprintln!("Context: Failed during data analysis task");
270 eprintln!("Timestamp: {:?}", std::time::SystemTime::now());
271 eprintln!("Original error: {}", e);
272 e
273 })?;
274
275 if let Some(content) = result.content() {
276 println!("Result: {}", content);
277 } else {
278 println!("Result: (no content)");
279 }
280 Ok(())
281}
282
283async fn error_recovery_strategies() -> Result<()> {
284 let client = Client::from_env()?.build();
285
286 // Strategy 1: Fallback to simpler model
287 let result = try_with_fallback(&client, "gpt-4o", "gpt-3.5-turbo").await?;
288 println!("Fallback strategy result: {}", result);
289
290 // Strategy 2: Circuit breaker pattern
291 let circuit_breaker = CircuitBreaker::new();
292 if circuit_breaker.is_open() {
293 println!("Circuit breaker is open, skipping API calls");
294 return Ok(());
295 }
296
297 match client.send_chat(client.chat_simple("Test")).await {
298 Ok(response) => {
299 circuit_breaker.record_success();
300 if let Some(content) = response.content() {
301 println!("Circuit breaker success: {}", content);
302 } else {
303 println!("Circuit breaker success: (no content)");
304 }
305 }
306 Err(e) => {
307 circuit_breaker.record_failure();
308 println!("Circuit breaker failure: {}", e);
309 }
310 }
311
312 // Strategy 3: Request hedging (parallel requests with first success wins)
313 let hedge_result = hedged_request(&client).await?;
314 println!("Hedged request result: {}", hedge_result);
315
316 Ok(())
317}
318
319async fn try_with_fallback(client: &Client, primary: &str, _fallback: &str) -> Result<String> {
320 // Try primary model first
321 let builder = client.chat().user("Hello");
322 match client.send_chat(builder).await {
323 Ok(response) => Ok(response.content().unwrap_or("").to_string()),
324 Err(e) => {
325 println!("Primary model failed ({}): {}, trying fallback", primary, e);
326
327 // Try fallback model
328 let fallback_builder = client.chat().user("Hello");
329 client
330 .send_chat(fallback_builder)
331 .await
332 .map(|r| r.content().unwrap_or("").to_string())
333 }
334 }
335}examples/chat_comprehensive.rs (line 205)
184async fn demonstrate_basic_chat(
185 client: &Client,
186 conversation: &mut ConversationManager,
187) -> Result<(), Box<dyn std::error::Error>> {
188 println!(" Example 1: Basic Chat Completion");
189 println!("----------------------------------");
190
191 let user_message = "Hello! Can you explain what you can help me with?";
192 conversation.add_user_message(user_message.to_string());
193
194 println!("User: {user_message}");
195 print!("Assistant: ");
196 io::stdout().flush()?;
197
198 // Build the chat request with conversation history
199 let messages = conversation.get_conversation_for_api();
200 let mut chat_builder = client.chat();
201
202 for (role, content) in messages {
203 match role.as_str() {
204 "system" => chat_builder = chat_builder.system(content),
205 "user" => chat_builder = chat_builder.user(content),
206 "assistant" => chat_builder = chat_builder.assistant(content),
207 _ => {} // Ignore unknown roles
208 }
209 }
210
211 // Send the request
212 let response = client.send_chat(chat_builder.temperature(0.7)).await?;
213
214 if let Some(content) = response.content() {
215 println!("{content}");
216 conversation.add_assistant_message(content.to_string(), None);
217
218 // Track token usage if available
219 if let Some(usage) = response.usage() {
220 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
221 }
222 } else {
223 println!("No response content received");
224 }
225
226 println!();
227 Ok(())
228}
229
230/// Demonstrate multi-turn conversation.
231async fn demonstrate_multi_turn_chat(
232 client: &Client,
233 conversation: &mut ConversationManager,
234) -> Result<(), Box<dyn std::error::Error>> {
235 println!(" Example 2: Multi-turn Conversation");
236 println!("------------------------------------");
237
238 let questions = vec![
239 "What's the capital of France?",
240 "What's the population of that city?",
241 "Can you tell me an interesting fact about it?",
242 ];
243
244 for question in questions {
245 conversation.add_user_message(question.to_string());
246
247 println!("User: {question}");
248 print!("Assistant: ");
249 io::stdout().flush()?;
250
251 // Build chat request with full conversation history
252 let messages = conversation.get_conversation_for_api();
253 let mut chat_builder = client.chat();
254
255 for (role, content) in messages {
256 match role.as_str() {
257 "system" => chat_builder = chat_builder.system(content),
258 "user" => chat_builder = chat_builder.user(content),
259 "assistant" => chat_builder = chat_builder.assistant(content),
260 _ => {}
261 }
262 }
263
264 let response = client.send_chat(chat_builder.temperature(0.3)).await?;
265
266 if let Some(content) = response.content() {
267 println!("{content}");
268 conversation.add_assistant_message(content.to_string(), None);
269
270 // Track token usage
271 if let Some(usage) = response.usage() {
272 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
273 }
274 }
275
276 println!();
277 // Small delay between questions for readability
278 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
279 }
280
281 Ok(())
282}
283
284/// Demonstrate streaming chat response.
285async fn demonstrate_streaming_chat(
286 _client: &Client,
287 conversation: &mut ConversationManager,
288) -> Result<(), Box<dyn std::error::Error>> {
289 println!(" Example 3: Streaming Chat Response");
290 println!("------------------------------------");
291
292 // Add user message for streaming example
293 let streaming_question = "Can you write a short poem about programming?";
294 conversation.add_user_message(streaming_question.to_string());
295
296 println!("User: {streaming_question}");
297 println!("Assistant (streaming): ");
298
299 // Note: Streaming is not yet fully implemented in the client
300 // This is a placeholder showing the intended API
301 println!(" Streaming functionality is being implemented...");
302 println!("Future implementation will show real-time token-by-token responses");
303
304 // Simulate what streaming would look like
305 let simulated_response = "Programming flows like poetry in motion,\nEach function a verse, each loop a devotion.\nVariables dance through memory's halls,\nWhile algorithms answer logic's calls.";
306
307 // Simulate typing effect
308 for char in simulated_response.chars() {
309 print!("{char}");
310 io::stdout().flush()?;
311 tokio::time::sleep(tokio::time::Duration::from_millis(30)).await;
312 }
313 println!("\n");
314
315 // Add the response to conversation history
316 conversation.add_assistant_message(simulated_response.to_string(), None);
317
318 Ok(())
319}
320
321/// Demonstrate token usage tracking.
322async fn demonstrate_token_tracking(
323 client: &Client,
324 conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326 println!(" Example 4: Token Usage Tracking");
327 println!("---------------------------------");
328
329 let efficiency_question = "In one sentence, what is machine learning?";
330 conversation.add_user_message(efficiency_question.to_string());
331
332 println!("User: {efficiency_question}");
333 print!("Assistant: ");
334 io::stdout().flush()?;
335
336 // Build chat request
337 let messages = conversation.get_conversation_for_api();
338 let mut chat_builder = client.chat().max_completion_tokens(50); // Limit tokens for demo
339
340 for (role, content) in messages {
341 match role.as_str() {
342 "system" => chat_builder = chat_builder.system(content),
343 "user" => chat_builder = chat_builder.user(content),
344 "assistant" => chat_builder = chat_builder.assistant(content),
345 _ => {}
346 }
347 }
348
349 let response = client.send_chat(chat_builder).await?;
350
351 if let Some(content) = response.content() {
352 println!("{content}");
353
354 // Display detailed token usage
355 if let Some(usage) = response.usage() {
356 println!("\n Token Usage Breakdown:");
357 println!(" Prompt tokens: {}", usage.prompt_tokens);
358 println!(" Completion tokens: {}", usage.completion_tokens);
359 println!(" Total tokens: {}", usage.total_tokens);
360
361 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363 conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364 } else {
365 conversation.add_assistant_message(content.to_string(), None);
366 }
367 }
368
369 println!();
370 Ok(())
371}
372
373/// Demonstrate error handling patterns.
374async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
375 println!(" Example 5: Error Handling Patterns");
376 println!("------------------------------------");
377
378 println!("Testing various error scenarios...\n");
379
380 // Test 1: Invalid model
381 println!("Test 1: Invalid model name");
382 let invalid_model_builder = client.chat()
383 .user("Hello")
384 // Note: We can't easily test invalid model without modifying the builder
385 // This shows the pattern for handling errors
386 .temperature(0.7);
387
388 match client.send_chat(invalid_model_builder).await {
389 Ok(_) => println!(" Request succeeded (model validation not yet implemented)"),
390 Err(e) => match &e {
391 Error::Api {
392 status, message, ..
393 } => {
394 println!(" API Error ({status}): {message}");
395 }
396 Error::Http(reqwest_err) => {
397 println!(" HTTP Error: {reqwest_err}");
398 }
399 Error::InvalidRequest(msg) => {
400 println!(" Invalid Request: {msg}");
401 }
402 _ => {
403 println!(" Unexpected Error: {e}");
404 }
405 },
406 }
407
408 // Test 2: Empty message validation
409 println!("\nTest 2: Empty message validation");
410 let empty_builder = client.chat(); // No messages added
411
412 match client.send_chat(empty_builder).await {
413 Ok(_) => println!(" Empty request unexpectedly succeeded"),
414 Err(Error::InvalidRequest(msg)) => {
415 println!(" Validation caught empty request: {msg}");
416 }
417 Err(e) => {
418 println!(" Unexpected error type: {e}");
419 }
420 }
421
422 // Test 3: Configuration errors
423 println!("\nTest 3: Configuration validation");
424 println!(" Client configuration is valid (created successfully)");
425
426 println!("\n Error handling patterns demonstrated:");
427 println!(" • API error classification");
428 println!(" • Request validation");
429 println!(" • Network error handling");
430 println!(" • Configuration validation");
431
432 println!();
433 Ok(())
434}Additional examples can be found in:
Sourcepub fn user_with_image_url(
self,
text: impl Into<String>,
image_url: impl Into<String>,
) -> Self
pub fn user_with_image_url( self, text: impl Into<String>, image_url: impl Into<String>, ) -> Self
Add a user message with both text and an image URL.
Examples found in repository?
examples/vision_chat.rs (line 90)
72async fn demonstrate_basic_image_analysis(
73 client: &Client,
74) -> Result<(), Box<dyn std::error::Error>> {
75 println!(" Example 1: Basic Image Analysis");
76 println!("----------------------------------");
77
78 let image_url = SAMPLE_IMAGE_URLS[0];
79 let question = "What do you see in this image? Please describe it in detail.";
80
81 println!("Image URL: {image_url}");
82 println!("Question: {question}");
83 print!("Assistant: ");
84 io::stdout().flush()?;
85
86 // Use the convenient user_with_image_url method
87 let chat_builder = client
88 .chat()
89 .system("You are a helpful AI assistant that can analyze images. Provide detailed, accurate descriptions of what you see.")
90 .user_with_image_url(question, image_url)
91 .temperature(0.3);
92
93 let response = client.send_chat(chat_builder).await?;
94
95 if let Some(content) = response.content() {
96 println!("{content}");
97
98 // Show usage information
99 if let Some(usage) = response.usage() {
100 println!("\n Token usage:");
101 println!(" Prompt tokens: {}", usage.prompt_tokens);
102 println!(" Completion tokens: {}", usage.completion_tokens);
103 println!(" Total tokens: {}", usage.total_tokens);
104 }
105 } else {
106 println!("No response content received");
107 }
108
109 println!();
110 Ok(())
111}
112
113/// Demonstrate analysis of multiple images in a single message.
114async fn demonstrate_multiple_images(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
115 println!(" Example 2: Multiple Image Analysis");
116 println!("---------------------------------------");
117
118 let question = "Compare these two images. What are the differences and similarities?";
119
120 println!("Question: {question}");
121 println!("Image 1: {}", SAMPLE_IMAGE_URLS[0]);
122 println!("Image 2: {}", SAMPLE_IMAGE_URLS[1]);
123 print!("Assistant: ");
124 io::stdout().flush()?;
125
126 // Create message parts manually for multiple images
127 let parts = vec![
128 text_part(question),
129 image_url_part_with_detail(SAMPLE_IMAGE_URLS[0], Detail::Auto),
130 image_url_part_with_detail(SAMPLE_IMAGE_URLS[1], Detail::Auto),
131 ];
132
133 let chat_builder = client
134 .chat()
135 .system("You are an expert at comparing and analyzing images. Provide thoughtful comparisons focusing on visual elements, composition, and content.")
136 .user_with_parts(parts)
137 .temperature(0.4);
138
139 let response = client.send_chat(chat_builder).await?;
140
141 if let Some(content) = response.content() {
142 println!("{content}");
143 } else {
144 println!("No response content received");
145 }
146
147 println!();
148 Ok(())
149}
150
151/// Demonstrate different detail levels for image analysis.
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153 println!(" Example 3: Different Detail Levels");
154 println!("------------------------------------");
155
156 let image_url = SAMPLE_IMAGE_URLS[0];
157 let question = "Analyze this image";
158
159 // Test different detail levels
160 let detail_levels = vec![
161 (Detail::Low, "Low detail (faster, less detailed)"),
162 (Detail::High, "High detail (slower, more detailed)"),
163 (Detail::Auto, "Auto detail (balanced)"),
164 ];
165
166 for (detail, description) in detail_levels {
167 println!("\n{description}:");
168 print!("Assistant: ");
169 io::stdout().flush()?;
170
171 let chat_builder = client
172 .chat()
173 .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174 .user_with_image_url_and_detail(question, image_url, detail)
175 .temperature(0.2)
176 .max_completion_tokens(100); // Limit response length for comparison
177
178 let response = client.send_chat(chat_builder).await?;
179
180 if let Some(content) = response.content() {
181 println!("{content}");
182 }
183 }
184
185 println!();
186 Ok(())
187}
188
189/// Demonstrate base64 image encoding and analysis.
190async fn demonstrate_base64_image(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
191 println!(" Example 4: Base64 Image Analysis");
192 println!("-----------------------------------");
193
194 let question = "What is this image? It's very small, what can you tell about it?";
195
196 println!("Question: {question}");
197 println!("Image: Small test image encoded as base64");
198 print!("Assistant: ");
199 io::stdout().flush()?;
200
201 // Create message parts with base64 image
202 let parts = vec![
203 text_part(question),
204 image_base64_part_with_detail(SAMPLE_BASE64_IMAGE, "image/png", Detail::High),
205 ];
206
207 let chat_builder = client
208 .chat()
209 .system("You are analyzing images provided in base64 format. Even if an image is very small or simple, try to provide what information you can.")
210 .user_with_parts(parts)
211 .temperature(0.3);
212
213 let response = client.send_chat(chat_builder).await?;
214
215 if let Some(content) = response.content() {
216 println!("{content}");
217 } else {
218 println!("No response content received");
219 }
220
221 println!();
222 Ok(())
223}
224
225/// Demonstrate conversation context with images.
226async fn demonstrate_conversation_with_images(
227 client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229 println!(" Example 5: Conversation Context with Images");
230 println!("----------------------------------------------");
231
232 let image_url = SAMPLE_IMAGE_URLS[0];
233
234 // First message: Analyze the image
235 println!("Step 1: Initial image analysis");
236 print!("Assistant: ");
237 io::stdout().flush()?;
238
239 let mut chat_builder = client
240 .chat()
241 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242 .user_with_image_url("What's the main subject of this image?", image_url)
243 .temperature(0.3);
244
245 let response1 = client.send_chat(chat_builder).await?;
246 let first_response = response1.content().unwrap_or("No response").to_string();
247 println!("{first_response}");
248
249 // Second message: Follow-up question (without re-uploading the image)
250 println!("\nStep 2: Follow-up question");
251 print!("Assistant: ");
252 io::stdout().flush()?;
253
254 chat_builder = client
255 .chat()
256 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257 .user_with_image_url("What's the main subject of this image?", image_url)
258 .assistant(&first_response)
259 .user("What colors are most prominent in the image we just discussed?")
260 .temperature(0.3);
261
262 let response2 = client.send_chat(chat_builder).await?;
263
264 if let Some(content) = response2.content() {
265 println!("{content}");
266 }
267
268 // Third message: Ask for creative interpretation
269 println!("\nStep 3: Creative interpretation");
270 print!("Assistant: ");
271 io::stdout().flush()?;
272
273 let second_response = response2.content().unwrap_or("No response").to_string();
274
275 chat_builder = client
276 .chat()
277 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278 .user_with_image_url("What's the main subject of this image?", image_url)
279 .assistant(&first_response)
280 .user("What colors are most prominent in the image we just discussed?")
281 .assistant(second_response)
282 .user("Based on our discussion, write a short poem inspired by this image.")
283 .temperature(0.7);
284
285 let response3 = client.send_chat(chat_builder).await?;
286
287 if let Some(content) = response3.content() {
288 println!("{content}");
289 }
290
291 println!();
292 Ok(())
293}
294
295/// Demonstrate error handling patterns for vision requests.
296async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
297 println!(" Example 6: Error Handling Patterns");
298 println!("------------------------------------");
299
300 println!("Testing various error scenarios...\n");
301
302 // Test 1: Invalid image URL
303 println!("Test 1: Invalid image URL");
304 let invalid_url = "https://this-domain-does-not-exist-12345.com/image.jpg";
305
306 let invalid_builder = client
307 .chat()
308 .user_with_image_url("What do you see?", invalid_url)
309 .temperature(0.3);
310
311 match client.send_chat(invalid_builder).await {
312 Ok(_) => println!(" Invalid URL request unexpectedly succeeded"),
313 Err(e) => match &e {
314 Error::Api {
315 status, message, ..
316 } => {
317 println!(" API properly rejected invalid URL ({status}): {message}");
318 }
319 Error::Http(reqwest_err) => {
320 println!(" HTTP error caught: {reqwest_err}");
321 }
322 Error::InvalidRequest(msg) => {
323 println!(" Validation caught invalid URL: {msg}");
324 }
325 _ => {
326 println!("ℹ Other error type: {e}");
327 }
328 },
329 }
330
331 // Test 2: Empty message with image
332 println!("\nTest 2: Empty text with image");
333 let empty_text_builder = client
334 .chat()
335 .user_with_image_url("", SAMPLE_IMAGE_URLS[0])
336 .temperature(0.3);
337
338 match client.send_chat(empty_text_builder).await {
339 Ok(response) => {
340 if let Some(content) = response.content() {
341 println!(
342 " API handled empty text gracefully: {}",
343 content.chars().take(50).collect::<String>()
344 );
345 }
346 }
347 Err(e) => {
348 println!("ℹ Empty text error: {e}");
349 }
350 }
351
352 // Test 3: Malformed base64 data
353 println!("\nTest 3: Malformed base64 image data");
354 let malformed_base64 = "this-is-not-valid-base64!@#$%";
355 let malformed_parts = vec![
356 text_part("What is this?"),
357 image_base64_part_with_detail(malformed_base64, "image/png", Detail::Auto),
358 ];
359
360 let malformed_builder = client.chat().user_with_parts(malformed_parts);
361
362 match client.send_chat(malformed_builder).await {
363 Ok(_) => println!(" Malformed base64 unexpectedly succeeded"),
364 Err(e) => match &e {
365 Error::Api {
366 status, message, ..
367 } => {
368 println!(" API properly rejected malformed base64 ({status}): {message}");
369 }
370 _ => {
371 println!("ℹ Other error for malformed base64: {e}");
372 }
373 },
374 }
375
376 println!("\n Error handling patterns demonstrated:");
377 println!(" • Invalid image URL handling");
378 println!(" • Empty text with image handling");
379 println!(" • Malformed base64 data validation");
380 println!(" • API error classification");
381 println!(" • Network error handling");
382
383 println!();
384 Ok(())
385}Sourcepub fn user_with_image_url_and_detail(
self,
text: impl Into<String>,
image_url: impl Into<String>,
detail: Detail,
) -> Self
pub fn user_with_image_url_and_detail( self, text: impl Into<String>, image_url: impl Into<String>, detail: Detail, ) -> Self
Add a user message with both text and an image URL with specified detail level.
Examples found in repository?
examples/vision_chat.rs (line 174)
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153 println!(" Example 3: Different Detail Levels");
154 println!("------------------------------------");
155
156 let image_url = SAMPLE_IMAGE_URLS[0];
157 let question = "Analyze this image";
158
159 // Test different detail levels
160 let detail_levels = vec![
161 (Detail::Low, "Low detail (faster, less detailed)"),
162 (Detail::High, "High detail (slower, more detailed)"),
163 (Detail::Auto, "Auto detail (balanced)"),
164 ];
165
166 for (detail, description) in detail_levels {
167 println!("\n{description}:");
168 print!("Assistant: ");
169 io::stdout().flush()?;
170
171 let chat_builder = client
172 .chat()
173 .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174 .user_with_image_url_and_detail(question, image_url, detail)
175 .temperature(0.2)
176 .max_completion_tokens(100); // Limit response length for comparison
177
178 let response = client.send_chat(chat_builder).await?;
179
180 if let Some(content) = response.content() {
181 println!("{content}");
182 }
183 }
184
185 println!();
186 Ok(())
187}Sourcepub fn user_with_parts(
self,
parts: Vec<ChatCompletionRequestUserMessageContentPart>,
) -> Self
pub fn user_with_parts( self, parts: Vec<ChatCompletionRequestUserMessageContentPart>, ) -> Self
Add a user message with multiple content parts (text and/or images).
Examples found in repository?
examples/vision_chat.rs (line 136)
114async fn demonstrate_multiple_images(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
115 println!(" Example 2: Multiple Image Analysis");
116 println!("---------------------------------------");
117
118 let question = "Compare these two images. What are the differences and similarities?";
119
120 println!("Question: {question}");
121 println!("Image 1: {}", SAMPLE_IMAGE_URLS[0]);
122 println!("Image 2: {}", SAMPLE_IMAGE_URLS[1]);
123 print!("Assistant: ");
124 io::stdout().flush()?;
125
126 // Create message parts manually for multiple images
127 let parts = vec![
128 text_part(question),
129 image_url_part_with_detail(SAMPLE_IMAGE_URLS[0], Detail::Auto),
130 image_url_part_with_detail(SAMPLE_IMAGE_URLS[1], Detail::Auto),
131 ];
132
133 let chat_builder = client
134 .chat()
135 .system("You are an expert at comparing and analyzing images. Provide thoughtful comparisons focusing on visual elements, composition, and content.")
136 .user_with_parts(parts)
137 .temperature(0.4);
138
139 let response = client.send_chat(chat_builder).await?;
140
141 if let Some(content) = response.content() {
142 println!("{content}");
143 } else {
144 println!("No response content received");
145 }
146
147 println!();
148 Ok(())
149}
150
151/// Demonstrate different detail levels for image analysis.
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153 println!(" Example 3: Different Detail Levels");
154 println!("------------------------------------");
155
156 let image_url = SAMPLE_IMAGE_URLS[0];
157 let question = "Analyze this image";
158
159 // Test different detail levels
160 let detail_levels = vec![
161 (Detail::Low, "Low detail (faster, less detailed)"),
162 (Detail::High, "High detail (slower, more detailed)"),
163 (Detail::Auto, "Auto detail (balanced)"),
164 ];
165
166 for (detail, description) in detail_levels {
167 println!("\n{description}:");
168 print!("Assistant: ");
169 io::stdout().flush()?;
170
171 let chat_builder = client
172 .chat()
173 .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174 .user_with_image_url_and_detail(question, image_url, detail)
175 .temperature(0.2)
176 .max_completion_tokens(100); // Limit response length for comparison
177
178 let response = client.send_chat(chat_builder).await?;
179
180 if let Some(content) = response.content() {
181 println!("{content}");
182 }
183 }
184
185 println!();
186 Ok(())
187}
188
189/// Demonstrate base64 image encoding and analysis.
190async fn demonstrate_base64_image(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
191 println!(" Example 4: Base64 Image Analysis");
192 println!("-----------------------------------");
193
194 let question = "What is this image? It's very small, what can you tell about it?";
195
196 println!("Question: {question}");
197 println!("Image: Small test image encoded as base64");
198 print!("Assistant: ");
199 io::stdout().flush()?;
200
201 // Create message parts with base64 image
202 let parts = vec![
203 text_part(question),
204 image_base64_part_with_detail(SAMPLE_BASE64_IMAGE, "image/png", Detail::High),
205 ];
206
207 let chat_builder = client
208 .chat()
209 .system("You are analyzing images provided in base64 format. Even if an image is very small or simple, try to provide what information you can.")
210 .user_with_parts(parts)
211 .temperature(0.3);
212
213 let response = client.send_chat(chat_builder).await?;
214
215 if let Some(content) = response.content() {
216 println!("{content}");
217 } else {
218 println!("No response content received");
219 }
220
221 println!();
222 Ok(())
223}
224
225/// Demonstrate conversation context with images.
226async fn demonstrate_conversation_with_images(
227 client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229 println!(" Example 5: Conversation Context with Images");
230 println!("----------------------------------------------");
231
232 let image_url = SAMPLE_IMAGE_URLS[0];
233
234 // First message: Analyze the image
235 println!("Step 1: Initial image analysis");
236 print!("Assistant: ");
237 io::stdout().flush()?;
238
239 let mut chat_builder = client
240 .chat()
241 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242 .user_with_image_url("What's the main subject of this image?", image_url)
243 .temperature(0.3);
244
245 let response1 = client.send_chat(chat_builder).await?;
246 let first_response = response1.content().unwrap_or("No response").to_string();
247 println!("{first_response}");
248
249 // Second message: Follow-up question (without re-uploading the image)
250 println!("\nStep 2: Follow-up question");
251 print!("Assistant: ");
252 io::stdout().flush()?;
253
254 chat_builder = client
255 .chat()
256 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257 .user_with_image_url("What's the main subject of this image?", image_url)
258 .assistant(&first_response)
259 .user("What colors are most prominent in the image we just discussed?")
260 .temperature(0.3);
261
262 let response2 = client.send_chat(chat_builder).await?;
263
264 if let Some(content) = response2.content() {
265 println!("{content}");
266 }
267
268 // Third message: Ask for creative interpretation
269 println!("\nStep 3: Creative interpretation");
270 print!("Assistant: ");
271 io::stdout().flush()?;
272
273 let second_response = response2.content().unwrap_or("No response").to_string();
274
275 chat_builder = client
276 .chat()
277 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278 .user_with_image_url("What's the main subject of this image?", image_url)
279 .assistant(&first_response)
280 .user("What colors are most prominent in the image we just discussed?")
281 .assistant(second_response)
282 .user("Based on our discussion, write a short poem inspired by this image.")
283 .temperature(0.7);
284
285 let response3 = client.send_chat(chat_builder).await?;
286
287 if let Some(content) = response3.content() {
288 println!("{content}");
289 }
290
291 println!();
292 Ok(())
293}
294
295/// Demonstrate error handling patterns for vision requests.
296async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
297 println!(" Example 6: Error Handling Patterns");
298 println!("------------------------------------");
299
300 println!("Testing various error scenarios...\n");
301
302 // Test 1: Invalid image URL
303 println!("Test 1: Invalid image URL");
304 let invalid_url = "https://this-domain-does-not-exist-12345.com/image.jpg";
305
306 let invalid_builder = client
307 .chat()
308 .user_with_image_url("What do you see?", invalid_url)
309 .temperature(0.3);
310
311 match client.send_chat(invalid_builder).await {
312 Ok(_) => println!(" Invalid URL request unexpectedly succeeded"),
313 Err(e) => match &e {
314 Error::Api {
315 status, message, ..
316 } => {
317 println!(" API properly rejected invalid URL ({status}): {message}");
318 }
319 Error::Http(reqwest_err) => {
320 println!(" HTTP error caught: {reqwest_err}");
321 }
322 Error::InvalidRequest(msg) => {
323 println!(" Validation caught invalid URL: {msg}");
324 }
325 _ => {
326 println!("ℹ Other error type: {e}");
327 }
328 },
329 }
330
331 // Test 2: Empty message with image
332 println!("\nTest 2: Empty text with image");
333 let empty_text_builder = client
334 .chat()
335 .user_with_image_url("", SAMPLE_IMAGE_URLS[0])
336 .temperature(0.3);
337
338 match client.send_chat(empty_text_builder).await {
339 Ok(response) => {
340 if let Some(content) = response.content() {
341 println!(
342 " API handled empty text gracefully: {}",
343 content.chars().take(50).collect::<String>()
344 );
345 }
346 }
347 Err(e) => {
348 println!("ℹ Empty text error: {e}");
349 }
350 }
351
352 // Test 3: Malformed base64 data
353 println!("\nTest 3: Malformed base64 image data");
354 let malformed_base64 = "this-is-not-valid-base64!@#$%";
355 let malformed_parts = vec![
356 text_part("What is this?"),
357 image_base64_part_with_detail(malformed_base64, "image/png", Detail::Auto),
358 ];
359
360 let malformed_builder = client.chat().user_with_parts(malformed_parts);
361
362 match client.send_chat(malformed_builder).await {
363 Ok(_) => println!(" Malformed base64 unexpectedly succeeded"),
364 Err(e) => match &e {
365 Error::Api {
366 status, message, ..
367 } => {
368 println!(" API properly rejected malformed base64 ({status}): {message}");
369 }
370 _ => {
371 println!("ℹ Other error for malformed base64: {e}");
372 }
373 },
374 }
375
376 println!("\n Error handling patterns demonstrated:");
377 println!(" • Invalid image URL handling");
378 println!(" • Empty text with image handling");
379 println!(" • Malformed base64 data validation");
380 println!(" • API error classification");
381 println!(" • Network error handling");
382
383 println!();
384 Ok(())
385}Sourcepub fn assistant(self, content: impl Into<String>) -> Self
pub fn assistant(self, content: impl Into<String>) -> Self
Add an assistant message to the conversation.
Examples found in repository?
examples/chat_comprehensive.rs (line 206)
184async fn demonstrate_basic_chat(
185 client: &Client,
186 conversation: &mut ConversationManager,
187) -> Result<(), Box<dyn std::error::Error>> {
188 println!(" Example 1: Basic Chat Completion");
189 println!("----------------------------------");
190
191 let user_message = "Hello! Can you explain what you can help me with?";
192 conversation.add_user_message(user_message.to_string());
193
194 println!("User: {user_message}");
195 print!("Assistant: ");
196 io::stdout().flush()?;
197
198 // Build the chat request with conversation history
199 let messages = conversation.get_conversation_for_api();
200 let mut chat_builder = client.chat();
201
202 for (role, content) in messages {
203 match role.as_str() {
204 "system" => chat_builder = chat_builder.system(content),
205 "user" => chat_builder = chat_builder.user(content),
206 "assistant" => chat_builder = chat_builder.assistant(content),
207 _ => {} // Ignore unknown roles
208 }
209 }
210
211 // Send the request
212 let response = client.send_chat(chat_builder.temperature(0.7)).await?;
213
214 if let Some(content) = response.content() {
215 println!("{content}");
216 conversation.add_assistant_message(content.to_string(), None);
217
218 // Track token usage if available
219 if let Some(usage) = response.usage() {
220 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
221 }
222 } else {
223 println!("No response content received");
224 }
225
226 println!();
227 Ok(())
228}
229
230/// Demonstrate multi-turn conversation.
231async fn demonstrate_multi_turn_chat(
232 client: &Client,
233 conversation: &mut ConversationManager,
234) -> Result<(), Box<dyn std::error::Error>> {
235 println!(" Example 2: Multi-turn Conversation");
236 println!("------------------------------------");
237
238 let questions = vec![
239 "What's the capital of France?",
240 "What's the population of that city?",
241 "Can you tell me an interesting fact about it?",
242 ];
243
244 for question in questions {
245 conversation.add_user_message(question.to_string());
246
247 println!("User: {question}");
248 print!("Assistant: ");
249 io::stdout().flush()?;
250
251 // Build chat request with full conversation history
252 let messages = conversation.get_conversation_for_api();
253 let mut chat_builder = client.chat();
254
255 for (role, content) in messages {
256 match role.as_str() {
257 "system" => chat_builder = chat_builder.system(content),
258 "user" => chat_builder = chat_builder.user(content),
259 "assistant" => chat_builder = chat_builder.assistant(content),
260 _ => {}
261 }
262 }
263
264 let response = client.send_chat(chat_builder.temperature(0.3)).await?;
265
266 if let Some(content) = response.content() {
267 println!("{content}");
268 conversation.add_assistant_message(content.to_string(), None);
269
270 // Track token usage
271 if let Some(usage) = response.usage() {
272 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
273 }
274 }
275
276 println!();
277 // Small delay between questions for readability
278 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
279 }
280
281 Ok(())
282}
283
284/// Demonstrate streaming chat response.
285async fn demonstrate_streaming_chat(
286 _client: &Client,
287 conversation: &mut ConversationManager,
288) -> Result<(), Box<dyn std::error::Error>> {
289 println!(" Example 3: Streaming Chat Response");
290 println!("------------------------------------");
291
292 // Add user message for streaming example
293 let streaming_question = "Can you write a short poem about programming?";
294 conversation.add_user_message(streaming_question.to_string());
295
296 println!("User: {streaming_question}");
297 println!("Assistant (streaming): ");
298
299 // Note: Streaming is not yet fully implemented in the client
300 // This is a placeholder showing the intended API
301 println!(" Streaming functionality is being implemented...");
302 println!("Future implementation will show real-time token-by-token responses");
303
304 // Simulate what streaming would look like
305 let simulated_response = "Programming flows like poetry in motion,\nEach function a verse, each loop a devotion.\nVariables dance through memory's halls,\nWhile algorithms answer logic's calls.";
306
307 // Simulate typing effect
308 for char in simulated_response.chars() {
309 print!("{char}");
310 io::stdout().flush()?;
311 tokio::time::sleep(tokio::time::Duration::from_millis(30)).await;
312 }
313 println!("\n");
314
315 // Add the response to conversation history
316 conversation.add_assistant_message(simulated_response.to_string(), None);
317
318 Ok(())
319}
320
321/// Demonstrate token usage tracking.
322async fn demonstrate_token_tracking(
323 client: &Client,
324 conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326 println!(" Example 4: Token Usage Tracking");
327 println!("---------------------------------");
328
329 let efficiency_question = "In one sentence, what is machine learning?";
330 conversation.add_user_message(efficiency_question.to_string());
331
332 println!("User: {efficiency_question}");
333 print!("Assistant: ");
334 io::stdout().flush()?;
335
336 // Build chat request
337 let messages = conversation.get_conversation_for_api();
338 let mut chat_builder = client.chat().max_completion_tokens(50); // Limit tokens for demo
339
340 for (role, content) in messages {
341 match role.as_str() {
342 "system" => chat_builder = chat_builder.system(content),
343 "user" => chat_builder = chat_builder.user(content),
344 "assistant" => chat_builder = chat_builder.assistant(content),
345 _ => {}
346 }
347 }
348
349 let response = client.send_chat(chat_builder).await?;
350
351 if let Some(content) = response.content() {
352 println!("{content}");
353
354 // Display detailed token usage
355 if let Some(usage) = response.usage() {
356 println!("\n Token Usage Breakdown:");
357 println!(" Prompt tokens: {}", usage.prompt_tokens);
358 println!(" Completion tokens: {}", usage.completion_tokens);
359 println!(" Total tokens: {}", usage.total_tokens);
360
361 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363 conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364 } else {
365 conversation.add_assistant_message(content.to_string(), None);
366 }
367 }
368
369 println!();
370 Ok(())
371}More examples
examples/vision_chat.rs (line 258)
226async fn demonstrate_conversation_with_images(
227 client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229 println!(" Example 5: Conversation Context with Images");
230 println!("----------------------------------------------");
231
232 let image_url = SAMPLE_IMAGE_URLS[0];
233
234 // First message: Analyze the image
235 println!("Step 1: Initial image analysis");
236 print!("Assistant: ");
237 io::stdout().flush()?;
238
239 let mut chat_builder = client
240 .chat()
241 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242 .user_with_image_url("What's the main subject of this image?", image_url)
243 .temperature(0.3);
244
245 let response1 = client.send_chat(chat_builder).await?;
246 let first_response = response1.content().unwrap_or("No response").to_string();
247 println!("{first_response}");
248
249 // Second message: Follow-up question (without re-uploading the image)
250 println!("\nStep 2: Follow-up question");
251 print!("Assistant: ");
252 io::stdout().flush()?;
253
254 chat_builder = client
255 .chat()
256 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257 .user_with_image_url("What's the main subject of this image?", image_url)
258 .assistant(&first_response)
259 .user("What colors are most prominent in the image we just discussed?")
260 .temperature(0.3);
261
262 let response2 = client.send_chat(chat_builder).await?;
263
264 if let Some(content) = response2.content() {
265 println!("{content}");
266 }
267
268 // Third message: Ask for creative interpretation
269 println!("\nStep 3: Creative interpretation");
270 print!("Assistant: ");
271 io::stdout().flush()?;
272
273 let second_response = response2.content().unwrap_or("No response").to_string();
274
275 chat_builder = client
276 .chat()
277 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278 .user_with_image_url("What's the main subject of this image?", image_url)
279 .assistant(&first_response)
280 .user("What colors are most prominent in the image we just discussed?")
281 .assistant(second_response)
282 .user("Based on our discussion, write a short poem inspired by this image.")
283 .temperature(0.7);
284
285 let response3 = client.send_chat(chat_builder).await?;
286
287 if let Some(content) = response3.content() {
288 println!("{content}");
289 }
290
291 println!();
292 Ok(())
293}examples/azure_comprehensive.rs (line 65)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 tracing_subscriber::fmt::init();
11
12 println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14 let client = Client::from_env()?.build();
15
16 // Test 1: Simple chat completion
17 println!("1. Testing simple chat completion...");
18 let builder = client.chat_simple("What is 2+2? Answer in one word.");
19 match client.send_chat(builder).await {
20 Ok(response) => {
21 if let Some(content) = response.content() {
22 println!(" ✓ Chat completion: {content}");
23 }
24 }
25 Err(e) => println!(" ✗ Chat completion failed: {e}"),
26 }
27
28 // Test 2: Chat with system message
29 println!("\n2. Testing chat with system message...");
30 let builder = client.chat_with_system(
31 "You are a helpful assistant that responds in one sentence.",
32 "What is Rust?",
33 );
34 match client.send_chat(builder).await {
35 Ok(response) => {
36 if let Some(content) = response.content() {
37 println!(" ✓ System message chat: {content}");
38 }
39 }
40 Err(e) => println!(" ✗ System message chat failed: {e}"),
41 }
42
43 // Test 3: Chat with temperature
44 println!("\n3. Testing chat with custom parameters...");
45 let builder = client
46 .chat()
47 .user("Say 'test' in a creative way")
48 .temperature(0.7)
49 .max_tokens(50);
50 match client.send_chat(builder).await {
51 Ok(response) => {
52 if let Some(content) = response.content() {
53 println!(" ✓ Custom parameters: {content}");
54 }
55 }
56 Err(e) => println!(" ✗ Custom parameters failed: {e}"),
57 }
58
59 // Test 4: Multiple messages conversation
60 println!("\n4. Testing multi-message conversation...");
61 let builder = client
62 .chat()
63 .system("You are a helpful assistant")
64 .user("My name is Alice")
65 .assistant("Hello Alice! Nice to meet you.")
66 .user("What's my name?");
67 match client.send_chat(builder).await {
68 Ok(response) => {
69 if let Some(content) = response.content() {
70 println!(" ✓ Multi-message: {content}");
71 }
72 }
73 Err(e) => println!(" ✗ Multi-message failed: {e}"),
74 }
75
76 // Test 5: Chat with max_tokens limit
77 println!("\n5. Testing with max_tokens limit...");
78 let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79 match client.send_chat(builder).await {
80 Ok(response) => {
81 if let Some(content) = response.content() {
82 println!(" ✓ Limited tokens: {content}");
83 println!(" (Note: response is truncated due to max_tokens=20)");
84 }
85 }
86 Err(e) => println!(" ✗ Max tokens test failed: {e}"),
87 }
88
89 // Test 6: Using responses API
90 println!("\n6. Testing responses API...");
91 let builder = client.responses().user("What is the capital of France?");
92 match client.send_responses(builder).await {
93 Ok(response) => {
94 if let Some(content) = response.content() {
95 println!(" ✓ Responses API: {content}");
96 }
97 }
98 Err(e) => println!(" ✗ Responses API failed: {e}"),
99 }
100
101 println!("\n=== Test Summary ===");
102 println!("Azure OpenAI integration tested across multiple endpoints!");
103 println!("\nNote: Some advanced features like embeddings, streaming, and");
104 println!("tool calling may require specific Azure OpenAI deployments.");
105
106 Ok(())
107}examples/langfuse.rs (line 94)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32 // Initialize tracing for logging
33 tracing_subscriber::fmt()
34 .with_env_filter(
35 tracing_subscriber::EnvFilter::from_default_env()
36 .add_directive("openai_ergonomic=debug".parse()?),
37 )
38 .init();
39
40 // 1. Build Langfuse exporter from environment variables
41 let exporter = ExporterBuilder::from_env()?.build()?;
42
43 // 2. Create tracer provider with batch processor
44 let provider = SdkTracerProvider::builder()
45 .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46 .build();
47
48 // Set as global provider
49 global::set_tracer_provider(provider.clone());
50
51 // 3. Get tracer and create interceptor
52 let tracer = provider.tracer("openai-ergonomic");
53 let langfuse_interceptor =
54 std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56 // 4. Create the OpenAI client and add the Langfuse interceptor
57 // Keep a reference to the interceptor so we can update context later
58 let client = Client::from_env()?
59 .with_interceptor(Box::new(langfuse_interceptor.clone()))
60 .build();
61
62 println!(" OpenAI client initialized with Langfuse observability");
63 println!(" Traces will be sent to Langfuse for monitoring\n");
64
65 // Example 1: Simple chat completion
66 println!("Example 1: Simple chat completion");
67 println!("---------------------------------");
68 let chat_builder = client
69 .chat_simple("What is the capital of France? Answer in one word.")
70 .build()?;
71 let response = client.execute_chat(chat_builder).await?;
72 println!("Response: {:?}\n", response.content());
73
74 // Example 2: Chat completion with builder pattern
75 println!("Example 2: Chat with builder pattern");
76 println!("-------------------------------------");
77 let chat_builder = client
78 .chat()
79 .system("You are a helpful assistant that speaks like a pirate.")
80 .user("Tell me about the ocean in 2 sentences.")
81 .temperature(0.7)
82 .max_tokens(100)
83 .build()?;
84 let response = client.execute_chat(chat_builder).await?;
85 println!("Response: {:?}\n", response.content());
86
87 // Example 3: Multiple messages in a conversation
88 println!("Example 3: Conversation");
89 println!("-----------------------");
90 let chat_builder = client
91 .chat()
92 .system("You are a math tutor.")
93 .user("What is 2 + 2?")
94 .assistant("2 + 2 equals 4.")
95 .user("And what about 3 + 3?")
96 .build()?;
97 let response = client.execute_chat(chat_builder).await?;
98 println!("Response: {:?}\n", response.content());
99
100 // Example 4: Error handling (intentionally trigger an error)
101 println!("Example 4: Error handling");
102 println!("-------------------------");
103 // Create a builder with a non-existent model
104 let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105 .user("This should fail")
106 .build()?;
107 let result = client.execute_chat(chat_builder).await;
108
109 match result {
110 Ok(_) => println!("Unexpected success"),
111 Err(e) => println!("Expected error captured: {e}\n"),
112 }
113
114 // Example 5: Embeddings
115 println!("Example 5: Embeddings");
116 println!("--------------------");
117 let embeddings_builder = client.embeddings().text(
118 "text-embedding-ada-002",
119 "The quick brown fox jumps over the lazy dog",
120 );
121 let embeddings = client.embeddings().create(embeddings_builder).await?;
122 println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124 // Example 6: Using custom metadata via interceptor context
125 println!("Example 6: Custom metadata via interceptor context");
126 println!("---------------------------------------------------");
127
128 // Set session and user IDs on the interceptor's context
129 langfuse_interceptor.set_session_id("demo-session-123");
130 langfuse_interceptor.set_user_id("demo-user-456");
131 langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133 let chat_builder = client
134 .chat_simple("Say 'Hello from custom session!'")
135 .build()?;
136 let response = client.execute_chat(chat_builder).await?;
137 println!("Response with custom metadata: {:?}\n", response.content());
138
139 // Clear context for subsequent calls
140 langfuse_interceptor.clear_context();
141
142 println!(" All examples completed!");
143 println!(" Check your Langfuse dashboard to see the traces");
144 println!(" - Look for traces with operation name 'chat'");
145 println!(" - Each trace includes request/response details, token usage, and timing");
146 println!(" - Example 6 will have custom session_id, user_id, and tags");
147
148 // Shutdown the tracer provider to flush all spans
149 println!("\n⏳ Flushing spans to Langfuse...");
150 provider.shutdown()?;
151
152 Ok(())
153}Sourcepub fn temperature(self, temperature: f64) -> Self
pub fn temperature(self, temperature: f64) -> Self
Set the temperature for the completion.
Examples found in repository?
examples/vision_chat.rs (line 91)
72async fn demonstrate_basic_image_analysis(
73 client: &Client,
74) -> Result<(), Box<dyn std::error::Error>> {
75 println!(" Example 1: Basic Image Analysis");
76 println!("----------------------------------");
77
78 let image_url = SAMPLE_IMAGE_URLS[0];
79 let question = "What do you see in this image? Please describe it in detail.";
80
81 println!("Image URL: {image_url}");
82 println!("Question: {question}");
83 print!("Assistant: ");
84 io::stdout().flush()?;
85
86 // Use the convenient user_with_image_url method
87 let chat_builder = client
88 .chat()
89 .system("You are a helpful AI assistant that can analyze images. Provide detailed, accurate descriptions of what you see.")
90 .user_with_image_url(question, image_url)
91 .temperature(0.3);
92
93 let response = client.send_chat(chat_builder).await?;
94
95 if let Some(content) = response.content() {
96 println!("{content}");
97
98 // Show usage information
99 if let Some(usage) = response.usage() {
100 println!("\n Token usage:");
101 println!(" Prompt tokens: {}", usage.prompt_tokens);
102 println!(" Completion tokens: {}", usage.completion_tokens);
103 println!(" Total tokens: {}", usage.total_tokens);
104 }
105 } else {
106 println!("No response content received");
107 }
108
109 println!();
110 Ok(())
111}
112
113/// Demonstrate analysis of multiple images in a single message.
114async fn demonstrate_multiple_images(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
115 println!(" Example 2: Multiple Image Analysis");
116 println!("---------------------------------------");
117
118 let question = "Compare these two images. What are the differences and similarities?";
119
120 println!("Question: {question}");
121 println!("Image 1: {}", SAMPLE_IMAGE_URLS[0]);
122 println!("Image 2: {}", SAMPLE_IMAGE_URLS[1]);
123 print!("Assistant: ");
124 io::stdout().flush()?;
125
126 // Create message parts manually for multiple images
127 let parts = vec![
128 text_part(question),
129 image_url_part_with_detail(SAMPLE_IMAGE_URLS[0], Detail::Auto),
130 image_url_part_with_detail(SAMPLE_IMAGE_URLS[1], Detail::Auto),
131 ];
132
133 let chat_builder = client
134 .chat()
135 .system("You are an expert at comparing and analyzing images. Provide thoughtful comparisons focusing on visual elements, composition, and content.")
136 .user_with_parts(parts)
137 .temperature(0.4);
138
139 let response = client.send_chat(chat_builder).await?;
140
141 if let Some(content) = response.content() {
142 println!("{content}");
143 } else {
144 println!("No response content received");
145 }
146
147 println!();
148 Ok(())
149}
150
151/// Demonstrate different detail levels for image analysis.
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153 println!(" Example 3: Different Detail Levels");
154 println!("------------------------------------");
155
156 let image_url = SAMPLE_IMAGE_URLS[0];
157 let question = "Analyze this image";
158
159 // Test different detail levels
160 let detail_levels = vec![
161 (Detail::Low, "Low detail (faster, less detailed)"),
162 (Detail::High, "High detail (slower, more detailed)"),
163 (Detail::Auto, "Auto detail (balanced)"),
164 ];
165
166 for (detail, description) in detail_levels {
167 println!("\n{description}:");
168 print!("Assistant: ");
169 io::stdout().flush()?;
170
171 let chat_builder = client
172 .chat()
173 .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174 .user_with_image_url_and_detail(question, image_url, detail)
175 .temperature(0.2)
176 .max_completion_tokens(100); // Limit response length for comparison
177
178 let response = client.send_chat(chat_builder).await?;
179
180 if let Some(content) = response.content() {
181 println!("{content}");
182 }
183 }
184
185 println!();
186 Ok(())
187}
188
189/// Demonstrate base64 image encoding and analysis.
190async fn demonstrate_base64_image(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
191 println!(" Example 4: Base64 Image Analysis");
192 println!("-----------------------------------");
193
194 let question = "What is this image? It's very small, what can you tell about it?";
195
196 println!("Question: {question}");
197 println!("Image: Small test image encoded as base64");
198 print!("Assistant: ");
199 io::stdout().flush()?;
200
201 // Create message parts with base64 image
202 let parts = vec![
203 text_part(question),
204 image_base64_part_with_detail(SAMPLE_BASE64_IMAGE, "image/png", Detail::High),
205 ];
206
207 let chat_builder = client
208 .chat()
209 .system("You are analyzing images provided in base64 format. Even if an image is very small or simple, try to provide what information you can.")
210 .user_with_parts(parts)
211 .temperature(0.3);
212
213 let response = client.send_chat(chat_builder).await?;
214
215 if let Some(content) = response.content() {
216 println!("{content}");
217 } else {
218 println!("No response content received");
219 }
220
221 println!();
222 Ok(())
223}
224
225/// Demonstrate conversation context with images.
226async fn demonstrate_conversation_with_images(
227 client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229 println!(" Example 5: Conversation Context with Images");
230 println!("----------------------------------------------");
231
232 let image_url = SAMPLE_IMAGE_URLS[0];
233
234 // First message: Analyze the image
235 println!("Step 1: Initial image analysis");
236 print!("Assistant: ");
237 io::stdout().flush()?;
238
239 let mut chat_builder = client
240 .chat()
241 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242 .user_with_image_url("What's the main subject of this image?", image_url)
243 .temperature(0.3);
244
245 let response1 = client.send_chat(chat_builder).await?;
246 let first_response = response1.content().unwrap_or("No response").to_string();
247 println!("{first_response}");
248
249 // Second message: Follow-up question (without re-uploading the image)
250 println!("\nStep 2: Follow-up question");
251 print!("Assistant: ");
252 io::stdout().flush()?;
253
254 chat_builder = client
255 .chat()
256 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257 .user_with_image_url("What's the main subject of this image?", image_url)
258 .assistant(&first_response)
259 .user("What colors are most prominent in the image we just discussed?")
260 .temperature(0.3);
261
262 let response2 = client.send_chat(chat_builder).await?;
263
264 if let Some(content) = response2.content() {
265 println!("{content}");
266 }
267
268 // Third message: Ask for creative interpretation
269 println!("\nStep 3: Creative interpretation");
270 print!("Assistant: ");
271 io::stdout().flush()?;
272
273 let second_response = response2.content().unwrap_or("No response").to_string();
274
275 chat_builder = client
276 .chat()
277 .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278 .user_with_image_url("What's the main subject of this image?", image_url)
279 .assistant(&first_response)
280 .user("What colors are most prominent in the image we just discussed?")
281 .assistant(second_response)
282 .user("Based on our discussion, write a short poem inspired by this image.")
283 .temperature(0.7);
284
285 let response3 = client.send_chat(chat_builder).await?;
286
287 if let Some(content) = response3.content() {
288 println!("{content}");
289 }
290
291 println!();
292 Ok(())
293}
294
295/// Demonstrate error handling patterns for vision requests.
296async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
297 println!(" Example 6: Error Handling Patterns");
298 println!("------------------------------------");
299
300 println!("Testing various error scenarios...\n");
301
302 // Test 1: Invalid image URL
303 println!("Test 1: Invalid image URL");
304 let invalid_url = "https://this-domain-does-not-exist-12345.com/image.jpg";
305
306 let invalid_builder = client
307 .chat()
308 .user_with_image_url("What do you see?", invalid_url)
309 .temperature(0.3);
310
311 match client.send_chat(invalid_builder).await {
312 Ok(_) => println!(" Invalid URL request unexpectedly succeeded"),
313 Err(e) => match &e {
314 Error::Api {
315 status, message, ..
316 } => {
317 println!(" API properly rejected invalid URL ({status}): {message}");
318 }
319 Error::Http(reqwest_err) => {
320 println!(" HTTP error caught: {reqwest_err}");
321 }
322 Error::InvalidRequest(msg) => {
323 println!(" Validation caught invalid URL: {msg}");
324 }
325 _ => {
326 println!("ℹ Other error type: {e}");
327 }
328 },
329 }
330
331 // Test 2: Empty message with image
332 println!("\nTest 2: Empty text with image");
333 let empty_text_builder = client
334 .chat()
335 .user_with_image_url("", SAMPLE_IMAGE_URLS[0])
336 .temperature(0.3);
337
338 match client.send_chat(empty_text_builder).await {
339 Ok(response) => {
340 if let Some(content) = response.content() {
341 println!(
342 " API handled empty text gracefully: {}",
343 content.chars().take(50).collect::<String>()
344 );
345 }
346 }
347 Err(e) => {
348 println!("ℹ Empty text error: {e}");
349 }
350 }
351
352 // Test 3: Malformed base64 data
353 println!("\nTest 3: Malformed base64 image data");
354 let malformed_base64 = "this-is-not-valid-base64!@#$%";
355 let malformed_parts = vec![
356 text_part("What is this?"),
357 image_base64_part_with_detail(malformed_base64, "image/png", Detail::Auto),
358 ];
359
360 let malformed_builder = client.chat().user_with_parts(malformed_parts);
361
362 match client.send_chat(malformed_builder).await {
363 Ok(_) => println!(" Malformed base64 unexpectedly succeeded"),
364 Err(e) => match &e {
365 Error::Api {
366 status, message, ..
367 } => {
368 println!(" API properly rejected malformed base64 ({status}): {message}");
369 }
370 _ => {
371 println!("ℹ Other error for malformed base64: {e}");
372 }
373 },
374 }
375
376 println!("\n Error handling patterns demonstrated:");
377 println!(" • Invalid image URL handling");
378 println!(" • Empty text with image handling");
379 println!(" • Malformed base64 data validation");
380 println!(" • API error classification");
381 println!(" • Network error handling");
382
383 println!();
384 Ok(())
385}More examples
examples/chat_comprehensive.rs (line 212)
184async fn demonstrate_basic_chat(
185 client: &Client,
186 conversation: &mut ConversationManager,
187) -> Result<(), Box<dyn std::error::Error>> {
188 println!(" Example 1: Basic Chat Completion");
189 println!("----------------------------------");
190
191 let user_message = "Hello! Can you explain what you can help me with?";
192 conversation.add_user_message(user_message.to_string());
193
194 println!("User: {user_message}");
195 print!("Assistant: ");
196 io::stdout().flush()?;
197
198 // Build the chat request with conversation history
199 let messages = conversation.get_conversation_for_api();
200 let mut chat_builder = client.chat();
201
202 for (role, content) in messages {
203 match role.as_str() {
204 "system" => chat_builder = chat_builder.system(content),
205 "user" => chat_builder = chat_builder.user(content),
206 "assistant" => chat_builder = chat_builder.assistant(content),
207 _ => {} // Ignore unknown roles
208 }
209 }
210
211 // Send the request
212 let response = client.send_chat(chat_builder.temperature(0.7)).await?;
213
214 if let Some(content) = response.content() {
215 println!("{content}");
216 conversation.add_assistant_message(content.to_string(), None);
217
218 // Track token usage if available
219 if let Some(usage) = response.usage() {
220 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
221 }
222 } else {
223 println!("No response content received");
224 }
225
226 println!();
227 Ok(())
228}
229
230/// Demonstrate multi-turn conversation.
231async fn demonstrate_multi_turn_chat(
232 client: &Client,
233 conversation: &mut ConversationManager,
234) -> Result<(), Box<dyn std::error::Error>> {
235 println!(" Example 2: Multi-turn Conversation");
236 println!("------------------------------------");
237
238 let questions = vec![
239 "What's the capital of France?",
240 "What's the population of that city?",
241 "Can you tell me an interesting fact about it?",
242 ];
243
244 for question in questions {
245 conversation.add_user_message(question.to_string());
246
247 println!("User: {question}");
248 print!("Assistant: ");
249 io::stdout().flush()?;
250
251 // Build chat request with full conversation history
252 let messages = conversation.get_conversation_for_api();
253 let mut chat_builder = client.chat();
254
255 for (role, content) in messages {
256 match role.as_str() {
257 "system" => chat_builder = chat_builder.system(content),
258 "user" => chat_builder = chat_builder.user(content),
259 "assistant" => chat_builder = chat_builder.assistant(content),
260 _ => {}
261 }
262 }
263
264 let response = client.send_chat(chat_builder.temperature(0.3)).await?;
265
266 if let Some(content) = response.content() {
267 println!("{content}");
268 conversation.add_assistant_message(content.to_string(), None);
269
270 // Track token usage
271 if let Some(usage) = response.usage() {
272 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
273 }
274 }
275
276 println!();
277 // Small delay between questions for readability
278 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
279 }
280
281 Ok(())
282}
283
284/// Demonstrate streaming chat response.
285async fn demonstrate_streaming_chat(
286 _client: &Client,
287 conversation: &mut ConversationManager,
288) -> Result<(), Box<dyn std::error::Error>> {
289 println!(" Example 3: Streaming Chat Response");
290 println!("------------------------------------");
291
292 // Add user message for streaming example
293 let streaming_question = "Can you write a short poem about programming?";
294 conversation.add_user_message(streaming_question.to_string());
295
296 println!("User: {streaming_question}");
297 println!("Assistant (streaming): ");
298
299 // Note: Streaming is not yet fully implemented in the client
300 // This is a placeholder showing the intended API
301 println!(" Streaming functionality is being implemented...");
302 println!("Future implementation will show real-time token-by-token responses");
303
304 // Simulate what streaming would look like
305 let simulated_response = "Programming flows like poetry in motion,\nEach function a verse, each loop a devotion.\nVariables dance through memory's halls,\nWhile algorithms answer logic's calls.";
306
307 // Simulate typing effect
308 for char in simulated_response.chars() {
309 print!("{char}");
310 io::stdout().flush()?;
311 tokio::time::sleep(tokio::time::Duration::from_millis(30)).await;
312 }
313 println!("\n");
314
315 // Add the response to conversation history
316 conversation.add_assistant_message(simulated_response.to_string(), None);
317
318 Ok(())
319}
320
321/// Demonstrate token usage tracking.
322async fn demonstrate_token_tracking(
323 client: &Client,
324 conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326 println!(" Example 4: Token Usage Tracking");
327 println!("---------------------------------");
328
329 let efficiency_question = "In one sentence, what is machine learning?";
330 conversation.add_user_message(efficiency_question.to_string());
331
332 println!("User: {efficiency_question}");
333 print!("Assistant: ");
334 io::stdout().flush()?;
335
336 // Build chat request
337 let messages = conversation.get_conversation_for_api();
338 let mut chat_builder = client.chat().max_completion_tokens(50); // Limit tokens for demo
339
340 for (role, content) in messages {
341 match role.as_str() {
342 "system" => chat_builder = chat_builder.system(content),
343 "user" => chat_builder = chat_builder.user(content),
344 "assistant" => chat_builder = chat_builder.assistant(content),
345 _ => {}
346 }
347 }
348
349 let response = client.send_chat(chat_builder).await?;
350
351 if let Some(content) = response.content() {
352 println!("{content}");
353
354 // Display detailed token usage
355 if let Some(usage) = response.usage() {
356 println!("\n Token Usage Breakdown:");
357 println!(" Prompt tokens: {}", usage.prompt_tokens);
358 println!(" Completion tokens: {}", usage.completion_tokens);
359 println!(" Total tokens: {}", usage.total_tokens);
360
361 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363 conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364 } else {
365 conversation.add_assistant_message(content.to_string(), None);
366 }
367 }
368
369 println!();
370 Ok(())
371}
372
373/// Demonstrate error handling patterns.
374async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
375 println!(" Example 5: Error Handling Patterns");
376 println!("------------------------------------");
377
378 println!("Testing various error scenarios...\n");
379
380 // Test 1: Invalid model
381 println!("Test 1: Invalid model name");
382 let invalid_model_builder = client.chat()
383 .user("Hello")
384 // Note: We can't easily test invalid model without modifying the builder
385 // This shows the pattern for handling errors
386 .temperature(0.7);
387
388 match client.send_chat(invalid_model_builder).await {
389 Ok(_) => println!(" Request succeeded (model validation not yet implemented)"),
390 Err(e) => match &e {
391 Error::Api {
392 status, message, ..
393 } => {
394 println!(" API Error ({status}): {message}");
395 }
396 Error::Http(reqwest_err) => {
397 println!(" HTTP Error: {reqwest_err}");
398 }
399 Error::InvalidRequest(msg) => {
400 println!(" Invalid Request: {msg}");
401 }
402 _ => {
403 println!(" Unexpected Error: {e}");
404 }
405 },
406 }
407
408 // Test 2: Empty message validation
409 println!("\nTest 2: Empty message validation");
410 let empty_builder = client.chat(); // No messages added
411
412 match client.send_chat(empty_builder).await {
413 Ok(_) => println!(" Empty request unexpectedly succeeded"),
414 Err(Error::InvalidRequest(msg)) => {
415 println!(" Validation caught empty request: {msg}");
416 }
417 Err(e) => {
418 println!(" Unexpected error type: {e}");
419 }
420 }
421
422 // Test 3: Configuration errors
423 println!("\nTest 3: Configuration validation");
424 println!(" Client configuration is valid (created successfully)");
425
426 println!("\n Error handling patterns demonstrated:");
427 println!(" • API error classification");
428 println!(" • Request validation");
429 println!(" • Network error handling");
430 println!(" • Configuration validation");
431
432 println!();
433 Ok(())
434}examples/azure_comprehensive.rs (line 48)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 tracing_subscriber::fmt::init();
11
12 println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14 let client = Client::from_env()?.build();
15
16 // Test 1: Simple chat completion
17 println!("1. Testing simple chat completion...");
18 let builder = client.chat_simple("What is 2+2? Answer in one word.");
19 match client.send_chat(builder).await {
20 Ok(response) => {
21 if let Some(content) = response.content() {
22 println!(" ✓ Chat completion: {content}");
23 }
24 }
25 Err(e) => println!(" ✗ Chat completion failed: {e}"),
26 }
27
28 // Test 2: Chat with system message
29 println!("\n2. Testing chat with system message...");
30 let builder = client.chat_with_system(
31 "You are a helpful assistant that responds in one sentence.",
32 "What is Rust?",
33 );
34 match client.send_chat(builder).await {
35 Ok(response) => {
36 if let Some(content) = response.content() {
37 println!(" ✓ System message chat: {content}");
38 }
39 }
40 Err(e) => println!(" ✗ System message chat failed: {e}"),
41 }
42
43 // Test 3: Chat with temperature
44 println!("\n3. Testing chat with custom parameters...");
45 let builder = client
46 .chat()
47 .user("Say 'test' in a creative way")
48 .temperature(0.7)
49 .max_tokens(50);
50 match client.send_chat(builder).await {
51 Ok(response) => {
52 if let Some(content) = response.content() {
53 println!(" ✓ Custom parameters: {content}");
54 }
55 }
56 Err(e) => println!(" ✗ Custom parameters failed: {e}"),
57 }
58
59 // Test 4: Multiple messages conversation
60 println!("\n4. Testing multi-message conversation...");
61 let builder = client
62 .chat()
63 .system("You are a helpful assistant")
64 .user("My name is Alice")
65 .assistant("Hello Alice! Nice to meet you.")
66 .user("What's my name?");
67 match client.send_chat(builder).await {
68 Ok(response) => {
69 if let Some(content) = response.content() {
70 println!(" ✓ Multi-message: {content}");
71 }
72 }
73 Err(e) => println!(" ✗ Multi-message failed: {e}"),
74 }
75
76 // Test 5: Chat with max_tokens limit
77 println!("\n5. Testing with max_tokens limit...");
78 let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79 match client.send_chat(builder).await {
80 Ok(response) => {
81 if let Some(content) = response.content() {
82 println!(" ✓ Limited tokens: {content}");
83 println!(" (Note: response is truncated due to max_tokens=20)");
84 }
85 }
86 Err(e) => println!(" ✗ Max tokens test failed: {e}"),
87 }
88
89 // Test 6: Using responses API
90 println!("\n6. Testing responses API...");
91 let builder = client.responses().user("What is the capital of France?");
92 match client.send_responses(builder).await {
93 Ok(response) => {
94 if let Some(content) = response.content() {
95 println!(" ✓ Responses API: {content}");
96 }
97 }
98 Err(e) => println!(" ✗ Responses API failed: {e}"),
99 }
100
101 println!("\n=== Test Summary ===");
102 println!("Azure OpenAI integration tested across multiple endpoints!");
103 println!("\nNote: Some advanced features like embeddings, streaming, and");
104 println!("tool calling may require specific Azure OpenAI deployments.");
105
106 Ok(())
107}examples/langfuse.rs (line 81)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32 // Initialize tracing for logging
33 tracing_subscriber::fmt()
34 .with_env_filter(
35 tracing_subscriber::EnvFilter::from_default_env()
36 .add_directive("openai_ergonomic=debug".parse()?),
37 )
38 .init();
39
40 // 1. Build Langfuse exporter from environment variables
41 let exporter = ExporterBuilder::from_env()?.build()?;
42
43 // 2. Create tracer provider with batch processor
44 let provider = SdkTracerProvider::builder()
45 .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46 .build();
47
48 // Set as global provider
49 global::set_tracer_provider(provider.clone());
50
51 // 3. Get tracer and create interceptor
52 let tracer = provider.tracer("openai-ergonomic");
53 let langfuse_interceptor =
54 std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56 // 4. Create the OpenAI client and add the Langfuse interceptor
57 // Keep a reference to the interceptor so we can update context later
58 let client = Client::from_env()?
59 .with_interceptor(Box::new(langfuse_interceptor.clone()))
60 .build();
61
62 println!(" OpenAI client initialized with Langfuse observability");
63 println!(" Traces will be sent to Langfuse for monitoring\n");
64
65 // Example 1: Simple chat completion
66 println!("Example 1: Simple chat completion");
67 println!("---------------------------------");
68 let chat_builder = client
69 .chat_simple("What is the capital of France? Answer in one word.")
70 .build()?;
71 let response = client.execute_chat(chat_builder).await?;
72 println!("Response: {:?}\n", response.content());
73
74 // Example 2: Chat completion with builder pattern
75 println!("Example 2: Chat with builder pattern");
76 println!("-------------------------------------");
77 let chat_builder = client
78 .chat()
79 .system("You are a helpful assistant that speaks like a pirate.")
80 .user("Tell me about the ocean in 2 sentences.")
81 .temperature(0.7)
82 .max_tokens(100)
83 .build()?;
84 let response = client.execute_chat(chat_builder).await?;
85 println!("Response: {:?}\n", response.content());
86
87 // Example 3: Multiple messages in a conversation
88 println!("Example 3: Conversation");
89 println!("-----------------------");
90 let chat_builder = client
91 .chat()
92 .system("You are a math tutor.")
93 .user("What is 2 + 2?")
94 .assistant("2 + 2 equals 4.")
95 .user("And what about 3 + 3?")
96 .build()?;
97 let response = client.execute_chat(chat_builder).await?;
98 println!("Response: {:?}\n", response.content());
99
100 // Example 4: Error handling (intentionally trigger an error)
101 println!("Example 4: Error handling");
102 println!("-------------------------");
103 // Create a builder with a non-existent model
104 let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105 .user("This should fail")
106 .build()?;
107 let result = client.execute_chat(chat_builder).await;
108
109 match result {
110 Ok(_) => println!("Unexpected success"),
111 Err(e) => println!("Expected error captured: {e}\n"),
112 }
113
114 // Example 5: Embeddings
115 println!("Example 5: Embeddings");
116 println!("--------------------");
117 let embeddings_builder = client.embeddings().text(
118 "text-embedding-ada-002",
119 "The quick brown fox jumps over the lazy dog",
120 );
121 let embeddings = client.embeddings().create(embeddings_builder).await?;
122 println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124 // Example 6: Using custom metadata via interceptor context
125 println!("Example 6: Custom metadata via interceptor context");
126 println!("---------------------------------------------------");
127
128 // Set session and user IDs on the interceptor's context
129 langfuse_interceptor.set_session_id("demo-session-123");
130 langfuse_interceptor.set_user_id("demo-user-456");
131 langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133 let chat_builder = client
134 .chat_simple("Say 'Hello from custom session!'")
135 .build()?;
136 let response = client.execute_chat(chat_builder).await?;
137 println!("Response with custom metadata: {:?}\n", response.content());
138
139 // Clear context for subsequent calls
140 langfuse_interceptor.clear_context();
141
142 println!(" All examples completed!");
143 println!(" Check your Langfuse dashboard to see the traces");
144 println!(" - Look for traces with operation name 'chat'");
145 println!(" - Each trace includes request/response details, token usage, and timing");
146 println!(" - Example 6 will have custom session_id, user_id, and tags");
147
148 // Shutdown the tracer provider to flush all spans
149 println!("\n⏳ Flushing spans to Langfuse...");
150 provider.shutdown()?;
151
152 Ok(())
153}Sourcepub fn max_tokens(self, max_tokens: i32) -> Self
pub fn max_tokens(self, max_tokens: i32) -> Self
Set the maximum number of tokens to generate.
Examples found in repository?
examples/azure_comprehensive.rs (line 49)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 tracing_subscriber::fmt::init();
11
12 println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14 let client = Client::from_env()?.build();
15
16 // Test 1: Simple chat completion
17 println!("1. Testing simple chat completion...");
18 let builder = client.chat_simple("What is 2+2? Answer in one word.");
19 match client.send_chat(builder).await {
20 Ok(response) => {
21 if let Some(content) = response.content() {
22 println!(" ✓ Chat completion: {content}");
23 }
24 }
25 Err(e) => println!(" ✗ Chat completion failed: {e}"),
26 }
27
28 // Test 2: Chat with system message
29 println!("\n2. Testing chat with system message...");
30 let builder = client.chat_with_system(
31 "You are a helpful assistant that responds in one sentence.",
32 "What is Rust?",
33 );
34 match client.send_chat(builder).await {
35 Ok(response) => {
36 if let Some(content) = response.content() {
37 println!(" ✓ System message chat: {content}");
38 }
39 }
40 Err(e) => println!(" ✗ System message chat failed: {e}"),
41 }
42
43 // Test 3: Chat with temperature
44 println!("\n3. Testing chat with custom parameters...");
45 let builder = client
46 .chat()
47 .user("Say 'test' in a creative way")
48 .temperature(0.7)
49 .max_tokens(50);
50 match client.send_chat(builder).await {
51 Ok(response) => {
52 if let Some(content) = response.content() {
53 println!(" ✓ Custom parameters: {content}");
54 }
55 }
56 Err(e) => println!(" ✗ Custom parameters failed: {e}"),
57 }
58
59 // Test 4: Multiple messages conversation
60 println!("\n4. Testing multi-message conversation...");
61 let builder = client
62 .chat()
63 .system("You are a helpful assistant")
64 .user("My name is Alice")
65 .assistant("Hello Alice! Nice to meet you.")
66 .user("What's my name?");
67 match client.send_chat(builder).await {
68 Ok(response) => {
69 if let Some(content) = response.content() {
70 println!(" ✓ Multi-message: {content}");
71 }
72 }
73 Err(e) => println!(" ✗ Multi-message failed: {e}"),
74 }
75
76 // Test 5: Chat with max_tokens limit
77 println!("\n5. Testing with max_tokens limit...");
78 let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79 match client.send_chat(builder).await {
80 Ok(response) => {
81 if let Some(content) = response.content() {
82 println!(" ✓ Limited tokens: {content}");
83 println!(" (Note: response is truncated due to max_tokens=20)");
84 }
85 }
86 Err(e) => println!(" ✗ Max tokens test failed: {e}"),
87 }
88
89 // Test 6: Using responses API
90 println!("\n6. Testing responses API...");
91 let builder = client.responses().user("What is the capital of France?");
92 match client.send_responses(builder).await {
93 Ok(response) => {
94 if let Some(content) = response.content() {
95 println!(" ✓ Responses API: {content}");
96 }
97 }
98 Err(e) => println!(" ✗ Responses API failed: {e}"),
99 }
100
101 println!("\n=== Test Summary ===");
102 println!("Azure OpenAI integration tested across multiple endpoints!");
103 println!("\nNote: Some advanced features like embeddings, streaming, and");
104 println!("tool calling may require specific Azure OpenAI deployments.");
105
106 Ok(())
107}More examples
examples/langfuse.rs (line 82)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32 // Initialize tracing for logging
33 tracing_subscriber::fmt()
34 .with_env_filter(
35 tracing_subscriber::EnvFilter::from_default_env()
36 .add_directive("openai_ergonomic=debug".parse()?),
37 )
38 .init();
39
40 // 1. Build Langfuse exporter from environment variables
41 let exporter = ExporterBuilder::from_env()?.build()?;
42
43 // 2. Create tracer provider with batch processor
44 let provider = SdkTracerProvider::builder()
45 .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46 .build();
47
48 // Set as global provider
49 global::set_tracer_provider(provider.clone());
50
51 // 3. Get tracer and create interceptor
52 let tracer = provider.tracer("openai-ergonomic");
53 let langfuse_interceptor =
54 std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56 // 4. Create the OpenAI client and add the Langfuse interceptor
57 // Keep a reference to the interceptor so we can update context later
58 let client = Client::from_env()?
59 .with_interceptor(Box::new(langfuse_interceptor.clone()))
60 .build();
61
62 println!(" OpenAI client initialized with Langfuse observability");
63 println!(" Traces will be sent to Langfuse for monitoring\n");
64
65 // Example 1: Simple chat completion
66 println!("Example 1: Simple chat completion");
67 println!("---------------------------------");
68 let chat_builder = client
69 .chat_simple("What is the capital of France? Answer in one word.")
70 .build()?;
71 let response = client.execute_chat(chat_builder).await?;
72 println!("Response: {:?}\n", response.content());
73
74 // Example 2: Chat completion with builder pattern
75 println!("Example 2: Chat with builder pattern");
76 println!("-------------------------------------");
77 let chat_builder = client
78 .chat()
79 .system("You are a helpful assistant that speaks like a pirate.")
80 .user("Tell me about the ocean in 2 sentences.")
81 .temperature(0.7)
82 .max_tokens(100)
83 .build()?;
84 let response = client.execute_chat(chat_builder).await?;
85 println!("Response: {:?}\n", response.content());
86
87 // Example 3: Multiple messages in a conversation
88 println!("Example 3: Conversation");
89 println!("-----------------------");
90 let chat_builder = client
91 .chat()
92 .system("You are a math tutor.")
93 .user("What is 2 + 2?")
94 .assistant("2 + 2 equals 4.")
95 .user("And what about 3 + 3?")
96 .build()?;
97 let response = client.execute_chat(chat_builder).await?;
98 println!("Response: {:?}\n", response.content());
99
100 // Example 4: Error handling (intentionally trigger an error)
101 println!("Example 4: Error handling");
102 println!("-------------------------");
103 // Create a builder with a non-existent model
104 let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105 .user("This should fail")
106 .build()?;
107 let result = client.execute_chat(chat_builder).await;
108
109 match result {
110 Ok(_) => println!("Unexpected success"),
111 Err(e) => println!("Expected error captured: {e}\n"),
112 }
113
114 // Example 5: Embeddings
115 println!("Example 5: Embeddings");
116 println!("--------------------");
117 let embeddings_builder = client.embeddings().text(
118 "text-embedding-ada-002",
119 "The quick brown fox jumps over the lazy dog",
120 );
121 let embeddings = client.embeddings().create(embeddings_builder).await?;
122 println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124 // Example 6: Using custom metadata via interceptor context
125 println!("Example 6: Custom metadata via interceptor context");
126 println!("---------------------------------------------------");
127
128 // Set session and user IDs on the interceptor's context
129 langfuse_interceptor.set_session_id("demo-session-123");
130 langfuse_interceptor.set_user_id("demo-user-456");
131 langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133 let chat_builder = client
134 .chat_simple("Say 'Hello from custom session!'")
135 .build()?;
136 let response = client.execute_chat(chat_builder).await?;
137 println!("Response with custom metadata: {:?}\n", response.content());
138
139 // Clear context for subsequent calls
140 langfuse_interceptor.clear_context();
141
142 println!(" All examples completed!");
143 println!(" Check your Langfuse dashboard to see the traces");
144 println!(" - Look for traces with operation name 'chat'");
145 println!(" - Each trace includes request/response details, token usage, and timing");
146 println!(" - Example 6 will have custom session_id, user_id, and tags");
147
148 // Shutdown the tracer provider to flush all spans
149 println!("\n⏳ Flushing spans to Langfuse...");
150 provider.shutdown()?;
151
152 Ok(())
153}Sourcepub fn max_completion_tokens(self, max_completion_tokens: i32) -> Self
pub fn max_completion_tokens(self, max_completion_tokens: i32) -> Self
Set the maximum completion tokens (for newer models).
Examples found in repository?
examples/retry_patterns.rs (line 369)
356async fn fallback_chain(client: &Client) -> Result<()> {
357 // Define fallback chain
358 let strategies = vec![
359 ("GPT-4o", "gpt-4o", 1024),
360 ("GPT-4o-mini", "gpt-4o-mini", 512),
361 ("GPT-3.5", "gpt-3.5-turbo", 256),
362 ];
363
364 let prompt = "Explain quantum computing";
365
366 for (name, _model, max_tokens) in strategies {
367 println!("Trying {} (max_tokens: {})", name, max_tokens);
368
369 let builder = client.chat().user(prompt).max_completion_tokens(max_tokens);
370 match client.send_chat(builder).await {
371 Ok(response) => {
372 println!("Success with {}", name);
373 if let Some(content) = response.content() {
374 println!("Response: {}...", &content[..content.len().min(100)]);
375 }
376 return Ok(());
377 }
378 Err(e) => {
379 println!("Failed with {}: {}", name, e);
380 }
381 }
382 }
383
384 println!("All fallback strategies exhausted");
385 Ok(())
386}More examples
examples/models.rs (line 187)
159async fn model_selection_by_task(client: &Client) -> Result<()> {
160 // Task-specific model recommendations
161 let task_models = vec![
162 ("Simple Q&A", "gpt-3.5-turbo", "Fast and cost-effective"),
163 ("Complex reasoning", "gpt-4o", "Best reasoning capabilities"),
164 ("Code generation", "gpt-4o", "Excellent code understanding"),
165 ("Vision tasks", "gpt-4o", "Native vision support"),
166 (
167 "Quick responses",
168 "gpt-4o-mini",
169 "Low latency, good quality",
170 ),
171 (
172 "Bulk processing",
173 "gpt-3.5-turbo",
174 "Best cost/performance ratio",
175 ),
176 ];
177
178 for (task, model, reason) in task_models {
179 println!("Task: {}", task);
180 println!(" Recommended: {}", model);
181 println!(" Reason: {}", reason);
182
183 // Demo the model
184 let builder = client
185 .chat()
186 .user(format!("Say 'Hello from {}'", model))
187 .max_completion_tokens(10);
188 let response = client.send_chat(builder).await?;
189
190 if let Some(content) = response.content() {
191 println!(" Response: {}\n", content);
192 }
193 }
194
195 Ok(())
196}
197
198async fn cost_optimization(client: &Client) -> Result<()> {
199 let models = get_model_registry();
200 let test_prompt = "Explain the theory of relativity in one sentence";
201 let estimated_input_tokens = 15;
202 let estimated_output_tokens = 50;
203
204 println!("Cost comparison for same task:");
205 println!("Prompt: '{}'\n", test_prompt);
206
207 let mut costs = Vec::new();
208
209 for (name, info) in &models {
210 if !info.deprecated {
211 let input_cost = (f64::from(estimated_input_tokens) / 1000.0) * info.cost_per_1k_input;
212 let output_cost =
213 (f64::from(estimated_output_tokens) / 1000.0) * info.cost_per_1k_output;
214 let total_cost = input_cost + output_cost;
215
216 costs.push((name.clone(), total_cost));
217 }
218 }
219
220 costs.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
221
222 println!("{:<20} {:>15}", "Model", "Estimated Cost");
223 println!("{:-<35}", "");
224 for (model, cost) in costs {
225 println!("{:<20} ${:>14.6}", model, cost);
226 }
227
228 // Demonstrate cheapest vs best
229 println!("\nRunning with cheapest model (gpt-3.5-turbo):");
230 let builder = client.chat().user(test_prompt);
231 let cheap_response = client.send_chat(builder).await?;
232
233 if let Some(content) = cheap_response.content() {
234 println!("Response: {}", content);
235 }
236
237 Ok(())
238}
239
240async fn performance_testing(client: &Client) -> Result<()> {
241 use std::time::Instant;
242
243 let models_to_test = vec!["gpt-4o-mini", "gpt-3.5-turbo"];
244 let test_prompt = "Write a haiku about programming";
245
246 println!("Performance comparison:");
247 println!("{:<20} {:>10} {:>15}", "Model", "Latency", "Tokens/sec");
248 println!("{:-<45}", "");
249
250 for model in models_to_test {
251 let start = Instant::now();
252
253 let builder = client.chat().user(test_prompt);
254 let response = client.send_chat(builder).await?;
255
256 let elapsed = start.elapsed();
257
258 if let Some(usage) = response.usage() {
259 let total_tokens = f64::from(usage.total_tokens);
260 let tokens_per_sec = total_tokens / elapsed.as_secs_f64();
261
262 println!("{:<20} {:>10.2?} {:>15.1}", model, elapsed, tokens_per_sec);
263 }
264 }
265
266 Ok(())
267}
268
269async fn model_migration(client: &Client) -> Result<()> {
270 // Handle deprecated model migration
271 let deprecated_mappings = HashMap::from([
272 ("text-davinci-003", "gpt-3.5-turbo"),
273 ("gpt-4-32k", "gpt-4o"),
274 ("gpt-4-vision-preview", "gpt-4o"),
275 ]);
276
277 let requested_model = "text-davinci-003"; // Deprecated model
278
279 if let Some(replacement) = deprecated_mappings.get(requested_model) {
280 println!(
281 "Warning: {} is deprecated. Using {} instead.",
282 requested_model, replacement
283 );
284
285 let builder = client.chat().user("Hello from migrated model");
286 let response = client.send_chat(builder).await?;
287
288 if let Some(content) = response.content() {
289 println!("Response from {}: {}", replacement, content);
290 }
291 }
292
293 Ok(())
294}
295
296async fn dynamic_model_selection(client: &Client) -> Result<()> {
297 // Select model based on runtime conditions
298
299 #[derive(Debug)]
300 struct RequestContext {
301 urgency: Urgency,
302 complexity: Complexity,
303 budget: Budget,
304 needs_vision: bool,
305 }
306
307 #[derive(Debug)]
308 enum Urgency {
309 Low,
310 Medium,
311 High,
312 }
313
314 #[derive(Debug)]
315 enum Complexity {
316 Simple,
317 Moderate,
318 Complex,
319 }
320
321 #[derive(Debug)]
322 enum Budget {
323 Tight,
324 Normal,
325 Flexible,
326 }
327
328 const fn select_model(ctx: &RequestContext) -> &'static str {
329 match (&ctx.urgency, &ctx.complexity, &ctx.budget) {
330 // High urgency + simple = fast cheap model, or tight budget = cheapest
331 (Urgency::High, Complexity::Simple, _) | (_, _, Budget::Tight) => "gpt-3.5-turbo",
332
333 // Complex + flexible budget = best model
334 (_, Complexity::Complex, Budget::Flexible) => "gpt-4o",
335
336 // Vision required
337 _ if ctx.needs_vision => "gpt-4o",
338
339 // Default balanced choice
340 _ => "gpt-4o-mini",
341 }
342 }
343
344 // Example contexts
345 let contexts = [
346 RequestContext {
347 urgency: Urgency::High,
348 complexity: Complexity::Simple,
349 budget: Budget::Tight,
350 needs_vision: false,
351 },
352 RequestContext {
353 urgency: Urgency::Low,
354 complexity: Complexity::Complex,
355 budget: Budget::Flexible,
356 needs_vision: false,
357 },
358 RequestContext {
359 urgency: Urgency::Medium,
360 complexity: Complexity::Moderate,
361 budget: Budget::Normal,
362 needs_vision: true,
363 },
364 ];
365
366 for (i, ctx) in contexts.iter().enumerate() {
367 let model = select_model(ctx);
368 println!("Context {}: {:?}", i + 1, ctx);
369 println!(" Selected model: {}", model);
370
371 let builder = client
372 .chat()
373 .user(format!("Hello from dynamically selected {}", model))
374 .max_completion_tokens(20);
375 let response = client.send_chat(builder).await?;
376
377 if let Some(content) = response.content() {
378 println!(" Response: {}\n", content);
379 }
380 }
381
382 Ok(())
383}examples/vision_chat.rs (line 176)
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153 println!(" Example 3: Different Detail Levels");
154 println!("------------------------------------");
155
156 let image_url = SAMPLE_IMAGE_URLS[0];
157 let question = "Analyze this image";
158
159 // Test different detail levels
160 let detail_levels = vec![
161 (Detail::Low, "Low detail (faster, less detailed)"),
162 (Detail::High, "High detail (slower, more detailed)"),
163 (Detail::Auto, "Auto detail (balanced)"),
164 ];
165
166 for (detail, description) in detail_levels {
167 println!("\n{description}:");
168 print!("Assistant: ");
169 io::stdout().flush()?;
170
171 let chat_builder = client
172 .chat()
173 .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174 .user_with_image_url_and_detail(question, image_url, detail)
175 .temperature(0.2)
176 .max_completion_tokens(100); // Limit response length for comparison
177
178 let response = client.send_chat(chat_builder).await?;
179
180 if let Some(content) = response.content() {
181 println!("{content}");
182 }
183 }
184
185 println!();
186 Ok(())
187}examples/moderations.rs (line 235)
228async fn response_filtering(client: &Client) -> Result<()> {
229 // Filter AI responses before showing to users
230
231 println!("Generating and moderating AI responses:");
232
233 // Generate response
234 let prompt = "Tell me about technology";
235 let builder = client.chat().user(prompt).max_completion_tokens(100);
236 let response = client.send_chat(builder).await?;
237
238 if let Some(content) = response.content() {
239 println!("Generated response: '{}'", content);
240
241 // Moderate the response
242 let moderation_result = simulate_moderation(content);
243
244 if moderation_result.flagged {
245 println!(
246 " Response flagged! Categories: {:?}",
247 moderation_result.categories
248 );
249 println!("Action: Response blocked or regenerated");
250
251 // Regenerate with more strict instructions
252 let safe_builder = client
253 .chat()
254 .system("Provide helpful, safe, and appropriate responses only.")
255 .user(prompt)
256 .max_completion_tokens(100);
257 let safe_response = client.send_chat(safe_builder).await?;
258
259 if let Some(safe_content) = safe_response.content() {
260 println!("Regenerated safe response: '{}'", safe_content);
261 }
262 } else {
263 println!(" Response passed moderation");
264 }
265 }
266
267 Ok(())
268}
269
270fn policy_enforcement(_client: &Client) {
271 // Enforce content policies
272 let policy = ModerationPolicy {
273 thresholds: HashMap::from([
274 ("harassment".to_string(), 0.5),
275 ("violence".to_string(), 0.6),
276 ("sexual".to_string(), 0.4),
277 ]),
278 auto_reject_categories: vec![
279 "harassment/threatening".to_string(),
280 "violence/graphic".to_string(),
281 ],
282 require_human_review: vec!["self-harm".to_string()],
283 };
284
285 let test_cases = vec![
286 "Normal conversation about work",
287 "Slightly aggressive language here",
288 "Content requiring review",
289 ];
290
291 for content in test_cases {
292 println!("Checking: '{}'", content);
293
294 let result = simulate_moderation(content);
295 let action = apply_policy(&result, &policy);
296
297 match action {
298 PolicyAction::Approve => println!(" Approved"),
299 PolicyAction::Reject(reason) => println!(" Rejected: {}", reason),
300 PolicyAction::Review(reason) => println!(" Human review needed: {}", reason),
301 }
302 }
303}
304
305async fn moderation_pipeline(client: &Client) -> Result<()> {
306 // Complete moderation pipeline
307
308 type FilterFn = Box<dyn Fn(&str) -> bool + Send + Sync>;
309
310 struct ModerationPipeline {
311 pre_filters: Vec<FilterFn>,
312 post_filters: Vec<FilterFn>,
313 }
314
315 let pipeline = ModerationPipeline {
316 pre_filters: vec![
317 Box::new(|text| text.len() < 10000), // Length check
318 Box::new(|text| !text.is_empty()), // Non-empty check
319 ],
320 post_filters: vec![
321 Box::new(|text| !text.contains("blockedword")), // Custom word filter
322 ],
323 };
324
325 println!("Running moderation pipeline:");
326
327 let user_input = "Please help me with this technical question about Rust programming.";
328
329 // Step 1: Pre-filters
330 println!("1. Pre-filters:");
331 for (i, filter) in pipeline.pre_filters.iter().enumerate() {
332 if filter(user_input) {
333 println!(" Pre-filter {} passed", i + 1);
334 } else {
335 println!(" Pre-filter {} failed", i + 1);
336 return Ok(());
337 }
338 }
339
340 // Step 2: API moderation
341 println!("2. API moderation:");
342 let moderation_result = simulate_moderation(user_input);
343 if moderation_result.flagged {
344 println!(" Content flagged by API");
345 return Ok(());
346 }
347 println!(" Passed API moderation");
348
349 // Step 3: Generate response
350 println!("3. Generating response:");
351 let builder = client.chat().user(user_input).max_completion_tokens(50);
352 let response = client.send_chat(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!(" Generated: '{}'", content);
356
357 // Step 4: Post-filters
358 println!("4. Post-filters:");
359 for (i, filter) in pipeline.post_filters.iter().enumerate() {
360 if filter(content) {
361 println!(" Post-filter {} passed", i + 1);
362 } else {
363 println!(" Post-filter {} failed", i + 1);
364 return Ok(());
365 }
366 }
367
368 // Step 5: Response moderation
369 println!("5. Response moderation:");
370 let response_moderation = simulate_moderation(content);
371 if response_moderation.flagged {
372 println!(" Response flagged");
373 } else {
374 println!(" Response approved");
375 println!("\nFinal output: '{}'", content);
376 }
377 }
378
379 Ok(())
380}examples/chat_comprehensive.rs (line 338)
322async fn demonstrate_token_tracking(
323 client: &Client,
324 conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326 println!(" Example 4: Token Usage Tracking");
327 println!("---------------------------------");
328
329 let efficiency_question = "In one sentence, what is machine learning?";
330 conversation.add_user_message(efficiency_question.to_string());
331
332 println!("User: {efficiency_question}");
333 print!("Assistant: ");
334 io::stdout().flush()?;
335
336 // Build chat request
337 let messages = conversation.get_conversation_for_api();
338 let mut chat_builder = client.chat().max_completion_tokens(50); // Limit tokens for demo
339
340 for (role, content) in messages {
341 match role.as_str() {
342 "system" => chat_builder = chat_builder.system(content),
343 "user" => chat_builder = chat_builder.user(content),
344 "assistant" => chat_builder = chat_builder.assistant(content),
345 _ => {}
346 }
347 }
348
349 let response = client.send_chat(chat_builder).await?;
350
351 if let Some(content) = response.content() {
352 println!("{content}");
353
354 // Display detailed token usage
355 if let Some(usage) = response.usage() {
356 println!("\n Token Usage Breakdown:");
357 println!(" Prompt tokens: {}", usage.prompt_tokens);
358 println!(" Completion tokens: {}", usage.completion_tokens);
359 println!(" Total tokens: {}", usage.total_tokens);
360
361 conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363 conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364 } else {
365 conversation.add_assistant_message(content.to_string(), None);
366 }
367 }
368
369 println!();
370 Ok(())
371}Sourcepub fn tools(self, tools: Vec<ChatCompletionTool>) -> Self
pub fn tools(self, tools: Vec<ChatCompletionTool>) -> Self
Add tools that the model can use.
Examples found in repository?
examples/tool_calling.rs (line 132)
128async fn simple_tool_call(client: &Client) -> Result<()> {
129 let builder = client
130 .chat()
131 .user("What's the weather like in San Francisco?")
132 .tools(vec![get_weather_tool()]);
133 let response = client.send_chat(builder).await?;
134
135 // Check for tool calls
136 let tool_calls = response.tool_calls();
137 if !tool_calls.is_empty() {
138 for tool_call in tool_calls {
139 println!("Tool called: {}", tool_call.function_name());
140 println!("Arguments: {}", tool_call.function_arguments());
141
142 // Execute the function
143 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
144 let result = execute_weather_function(params)?;
145 println!("Function result: {}", result);
146 }
147 }
148
149 Ok(())
150}
151
152async fn multiple_tools(client: &Client) -> Result<()> {
153 let builder = client
154 .chat()
155 .user("What's the weather in NYC and what time is it there?")
156 .tools(vec![get_weather_tool(), get_time_tool()]);
157 let response = client.send_chat(builder).await?;
158
159 for tool_call in response.tool_calls() {
160 match tool_call.function_name() {
161 "get_weather" => {
162 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
163 let result = execute_weather_function(params)?;
164 println!("Weather result: {}", result);
165 }
166 "get_current_time" => {
167 let params: serde_json::Value =
168 serde_json::from_str(tool_call.function_arguments())?;
169 if let Some(timezone) = params["timezone"].as_str() {
170 let result = execute_time_function(timezone);
171 println!("Time result: {}", result);
172 }
173 }
174 _ => println!("Unknown tool: {}", tool_call.function_name()),
175 }
176 }
177
178 Ok(())
179}
180
181async fn tool_choice_control(client: &Client) -> Result<()> {
182 // Force specific tool
183 println!("Forcing weather tool:");
184 let builder = client
185 .chat()
186 .user("Tell me about Paris")
187 .tools(vec![get_weather_tool(), get_time_tool()])
188 .tool_choice(ToolChoiceHelper::specific("get_weather"));
189 let response = client.send_chat(builder).await?;
190
191 for tool_call in response.tool_calls() {
192 println!("Forced tool: {}", tool_call.function_name());
193 }
194
195 // Disable tools
196 println!("\nDisabling tools:");
197 let builder = client
198 .chat()
199 .user("What's the weather?")
200 .tools(vec![get_weather_tool()])
201 .tool_choice(ToolChoiceHelper::none());
202 let response = client.send_chat(builder).await?;
203
204 if let Some(content) = response.content() {
205 println!("Response without tools: {}", content);
206 }
207
208 Ok(())
209}
210
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212 // This is a simplified version that demonstrates the concept
213 // without getting into the complexities of message history management
214
215 println!("=== Conversation with Tools (Simplified) ===");
216
217 // First request with tool call
218 let builder = client
219 .chat()
220 .user("What's the weather in Tokyo?")
221 .tools(vec![get_weather_tool()]);
222 let response = client.send_chat(builder).await?;
223
224 // Check for tool calls and simulate responses
225 for tool_call in response.tool_calls() {
226 println!("Tool called: {}", tool_call.function_name());
227 println!("Arguments: {}", tool_call.function_arguments());
228
229 // In a real implementation, you would:
230 // 1. Parse the arguments
231 // 2. Execute the actual function
232 // 3. Create tool messages with results
233 // 4. Send another request with the tool results
234
235 println!("Simulated weather result: Sunny, 24°C");
236 }
237
238 println!("Note: Full conversation with tool results requires complex message handling");
239 println!("This simplified version demonstrates tool calling detection");
240
241 Ok(())
242}
243
244fn streaming_with_tools(_client: &Client) {
245 println!("Streaming response with tools:");
246
247 // Note: Streaming with tool calls is more complex and requires
248 // proper handling of partial tool call chunks. For now, this is
249 // a placeholder showing the concept.
250
251 println!("This would demonstrate streaming tool calls if streaming API was available");
252 println!("In streaming mode, tool calls would arrive as chunks that need to be assembled");
253}
254
255async fn parallel_tool_calls(client: &Client) -> Result<()> {
256 let builder = client
257 .chat()
258 .user("Check the weather in Tokyo, London, and New York")
259 .tools(vec![get_weather_tool()]);
260 let response = client.send_chat(builder).await?;
261
262 // Modern models can call multiple tools in parallel
263 let tool_calls = response.tool_calls();
264 println!("Parallel tool calls: {}", tool_calls.len());
265
266 // Collect arguments first to avoid lifetime issues
267 let args_vec: Vec<String> = tool_calls
268 .iter()
269 .map(|tc| tc.function_arguments().to_string())
270 .collect();
271
272 // Execute all in parallel using tokio
273 let mut handles = Vec::new();
274 for args in args_vec {
275 let handle = tokio::spawn(async move {
276 let params: WeatherParams = serde_json::from_str(&args)?;
277 execute_weather_function(params)
278 });
279 handles.push(handle);
280 }
281
282 // Wait for all results
283 for (i, handle) in handles.into_iter().enumerate() {
284 match handle.await {
285 Ok(Ok(result)) => println!("Location {}: {}", i + 1, result),
286 Ok(Err(e)) => println!("Location {} error: {}", i + 1, e),
287 Err(e) => println!("Task {} panicked: {}", i + 1, e),
288 }
289 }
290
291 Ok(())
292}More examples
examples/tool_calling_simple.rs (line 51)
42async fn main() -> Result<()> {
43 println!("=== Tool Calling Example ===");
44
45 let client = Client::from_env()?.build();
46
47 // Simple tool call
48 let builder = client
49 .chat()
50 .user("What's the weather like in San Francisco?")
51 .tools(vec![get_weather_tool()]);
52 let response = client.send_chat(builder).await?;
53
54 // Check for tool calls
55 let tool_calls = response.tool_calls();
56 if !tool_calls.is_empty() {
57 for tool_call in tool_calls {
58 println!("Tool called: {}", tool_call.function_name());
59 println!("Arguments: {}", tool_call.function_arguments());
60
61 // Execute the function
62 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
63 let result = execute_weather_function(¶ms);
64 println!("Function result: {}", result);
65 }
66 } else if let Some(content) = response.content() {
67 println!("Response: {}", content);
68 }
69
70 // Forced tool choice
71 println!("\n=== Forced Tool Choice ===");
72 let builder = client
73 .chat()
74 .user("Tell me about Paris")
75 .tools(vec![get_weather_tool()])
76 .tool_choice(ToolChoiceHelper::specific("get_weather"));
77 let response = client.send_chat(builder).await?;
78
79 for tool_call in response.tool_calls() {
80 println!("Forced tool: {}", tool_call.function_name());
81 }
82
83 // No tools
84 println!("\n=== No Tools Mode ===");
85 let builder = client
86 .chat()
87 .user("What's the weather?")
88 .tools(vec![get_weather_tool()])
89 .tool_choice(ToolChoiceHelper::none());
90 let response = client.send_chat(builder).await?;
91
92 if let Some(content) = response.content() {
93 println!("Response without tools: {}", content);
94 }
95
96 Ok(())
97}Sourcepub fn tool_choice(self, tool_choice: ChatCompletionToolChoiceOption) -> Self
pub fn tool_choice(self, tool_choice: ChatCompletionToolChoiceOption) -> Self
Set the tool choice option.
Examples found in repository?
examples/tool_calling.rs (line 188)
181async fn tool_choice_control(client: &Client) -> Result<()> {
182 // Force specific tool
183 println!("Forcing weather tool:");
184 let builder = client
185 .chat()
186 .user("Tell me about Paris")
187 .tools(vec![get_weather_tool(), get_time_tool()])
188 .tool_choice(ToolChoiceHelper::specific("get_weather"));
189 let response = client.send_chat(builder).await?;
190
191 for tool_call in response.tool_calls() {
192 println!("Forced tool: {}", tool_call.function_name());
193 }
194
195 // Disable tools
196 println!("\nDisabling tools:");
197 let builder = client
198 .chat()
199 .user("What's the weather?")
200 .tools(vec![get_weather_tool()])
201 .tool_choice(ToolChoiceHelper::none());
202 let response = client.send_chat(builder).await?;
203
204 if let Some(content) = response.content() {
205 println!("Response without tools: {}", content);
206 }
207
208 Ok(())
209}More examples
examples/tool_calling_simple.rs (line 76)
42async fn main() -> Result<()> {
43 println!("=== Tool Calling Example ===");
44
45 let client = Client::from_env()?.build();
46
47 // Simple tool call
48 let builder = client
49 .chat()
50 .user("What's the weather like in San Francisco?")
51 .tools(vec![get_weather_tool()]);
52 let response = client.send_chat(builder).await?;
53
54 // Check for tool calls
55 let tool_calls = response.tool_calls();
56 if !tool_calls.is_empty() {
57 for tool_call in tool_calls {
58 println!("Tool called: {}", tool_call.function_name());
59 println!("Arguments: {}", tool_call.function_arguments());
60
61 // Execute the function
62 let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
63 let result = execute_weather_function(¶ms);
64 println!("Function result: {}", result);
65 }
66 } else if let Some(content) = response.content() {
67 println!("Response: {}", content);
68 }
69
70 // Forced tool choice
71 println!("\n=== Forced Tool Choice ===");
72 let builder = client
73 .chat()
74 .user("Tell me about Paris")
75 .tools(vec![get_weather_tool()])
76 .tool_choice(ToolChoiceHelper::specific("get_weather"));
77 let response = client.send_chat(builder).await?;
78
79 for tool_call in response.tool_calls() {
80 println!("Forced tool: {}", tool_call.function_name());
81 }
82
83 // No tools
84 println!("\n=== No Tools Mode ===");
85 let builder = client
86 .chat()
87 .user("What's the weather?")
88 .tools(vec![get_weather_tool()])
89 .tool_choice(ToolChoiceHelper::none());
90 let response = client.send_chat(builder).await?;
91
92 if let Some(content) = response.content() {
93 println!("Response without tools: {}", content);
94 }
95
96 Ok(())
97}Sourcepub fn response_format(
self,
format: CreateChatCompletionRequestAllOfResponseFormat,
) -> Self
pub fn response_format( self, format: CreateChatCompletionRequestAllOfResponseFormat, ) -> Self
Set the response format.
Sourcepub fn presence_penalty(self, presence_penalty: f64) -> Self
pub fn presence_penalty(self, presence_penalty: f64) -> Self
Set the presence penalty.
Sourcepub fn frequency_penalty(self, frequency_penalty: f64) -> Self
pub fn frequency_penalty(self, frequency_penalty: f64) -> Self
Set the frequency penalty.
Trait Implementations§
Source§impl Builder<CreateChatCompletionRequest> for ChatCompletionBuilder
impl Builder<CreateChatCompletionRequest> for ChatCompletionBuilder
Source§fn build(self) -> Result<CreateChatCompletionRequest>
fn build(self) -> Result<CreateChatCompletionRequest>
Build the final request type.
Source§impl Clone for ChatCompletionBuilder
impl Clone for ChatCompletionBuilder
Source§fn clone(&self) -> ChatCompletionBuilder
fn clone(&self) -> ChatCompletionBuilder
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreAuto Trait Implementations§
impl Freeze for ChatCompletionBuilder
impl RefUnwindSafe for ChatCompletionBuilder
impl Send for ChatCompletionBuilder
impl Sync for ChatCompletionBuilder
impl Unpin for ChatCompletionBuilder
impl UnwindSafe for ChatCompletionBuilder
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more