pub struct ResponsesBuilder { /* private fields */ }Expand description
Builder for Responses API requests.
The Responses API is the modern unified interface for OpenAI completions,
supporting streaming, tools, and structured outputs.
Implementations§
Source§impl ResponsesBuilder
impl ResponsesBuilder
Sourcepub fn new(model: impl Into<String>) -> Self
pub fn new(model: impl Into<String>) -> Self
Create a new responses builder with the specified model.
Sourcepub fn system(self, content: impl Into<String>) -> Self
pub fn system(self, content: impl Into<String>) -> Self
Add a system message to the conversation.
Examples found in repository?
118async fn basic_responses_example(client: &Client) -> Result<(), Error> {
119 println!("Creating a basic response with system context...");
120
121 // Build a simple request with system and user messages
122 let builder = client
123 .responses()
124 .system("You are a helpful assistant who provides concise, accurate answers.")
125 .user("What is the capital of France?")
126 .temperature(0.7)
127 .max_completion_tokens(100);
128
129 let response = client.send_responses(builder).await?;
130
131 // Extract and display the response
132 if let Some(content) = response.content() {
133 println!(" Assistant: {content}");
134 } else {
135 println!(" No content in response");
136 }
137
138 // Show response metadata
139 println!(" Response metadata:");
140 println!(" - Model: {}", response.model().unwrap_or("unknown"));
141 println!(
142 " - Finish reason: {}",
143 response
144 .finish_reason()
145 .unwrap_or_else(|| "unknown".to_string())
146 );
147
148 if let Some(usage) = response.usage() {
149 println!(
150 " - Tokens used: {} prompt + {} completion = {} total",
151 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
152 );
153 }
154
155 Ok(())
156}
157
158/// Example 2: Function calling with custom tools
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160 println!("Setting up function calling with custom tools...");
161
162 // Define a weather function tool
163 let weather_tool = tool_function(
164 "get_weather",
165 "Get the current weather information for a specific location",
166 json!({
167 "type": "object",
168 "properties": {
169 "location": {
170 "type": "string",
171 "description": "The city name, e.g., 'San Francisco, CA'"
172 },
173 "unit": {
174 "type": "string",
175 "enum": ["celsius", "fahrenheit"],
176 "description": "Temperature unit preference"
177 }
178 },
179 "required": ["location"],
180 "additionalProperties": false
181 }),
182 );
183
184 // Define a time function tool
185 let time_tool = tool_function(
186 "get_current_time",
187 "Get the current time in a specific timezone",
188 json!({
189 "type": "object",
190 "properties": {
191 "timezone": {
192 "type": "string",
193 "description": "Timezone name, e.g., 'America/New_York'"
194 }
195 },
196 "required": ["timezone"],
197 "additionalProperties": false
198 }),
199 );
200
201 // Make a request that should trigger function calling
202 let builder = client
203 .responses()
204 .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205 .user("What's the weather like in London and what time is it there?")
206 .tool(weather_tool)
207 .tool(time_tool)
208 .tool_choice(ToolChoiceHelper::auto())
209 .temperature(0.3);
210
211 let response = client.send_responses(builder).await?;
212
213 // Check if the model wants to call functions
214 let tool_calls = response.tool_calls();
215 if !tool_calls.is_empty() {
216 println!(" Model requested {} tool call(s):", tool_calls.len());
217
218 for (i, tool_call) in tool_calls.iter().enumerate() {
219 println!(" {}. Function: {}", i + 1, tool_call.function_name());
220 println!(" Arguments: {}", tool_call.function_arguments());
221
222 // In a real application, you would:
223 // 1. Parse the arguments
224 // 2. Execute the actual function
225 // 3. Send the results back to the model
226 println!(" [Simulated] Executing function call...");
227 match tool_call.function_name() {
228 "get_weather" => {
229 println!(" [Simulated] Weather: 22°C, partly cloudy");
230 }
231 "get_current_time" => {
232 println!(" [Simulated] Time: 14:30 GMT");
233 }
234 _ => {
235 println!(" [Simulated] Unknown function");
236 }
237 }
238 }
239 } else if let Some(content) = response.content() {
240 println!(" Assistant response: {content}");
241 }
242
243 Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248 println!("Demonstrating web search tool integration...");
249
250 // Create a web search tool
251 let web_search_tool = tool_web_search();
252
253 // Ask a question that would benefit from current information
254 let builder = client
255 .responses()
256 .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257 .user("What are the latest developments in artificial intelligence this week?")
258 .tool(web_search_tool)
259 .tool_choice(ToolChoiceHelper::auto())
260 .temperature(0.3)
261 .max_completion_tokens(200);
262
263 let response = client.send_responses(builder).await?;
264
265 // Handle the response
266 let tool_calls = response.tool_calls();
267 if !tool_calls.is_empty() {
268 println!(" Model requested web search:");
269
270 for tool_call in &tool_calls {
271 if tool_call.function_name() == "web_search" {
272 println!(" Search query: {}", tool_call.function_arguments());
273 println!(" [Simulated] Performing web search...");
274 println!(" [Simulated] Found recent AI news and developments");
275
276 // In a real implementation:
277 // 1. Parse the search query from arguments
278 // 2. Perform actual web search
279 // 3. Return results to the model
280 // 4. Get final response with search results
281 }
282 }
283 } else if let Some(content) = response.content() {
284 println!(" Assistant response: {content}");
285 }
286
287 println!(" Note: Web search requires additional implementation to execute actual searches");
288
289 Ok(())
290}
291
292/// Example 4: Structured JSON outputs with schemas
293async fn structured_output_example(client: &Client) -> Result<(), Error> {
294 println!("Demonstrating structured JSON outputs...");
295
296 // Define a schema for recipe information
297 let recipe_schema = json!({
298 "type": "object",
299 "properties": {
300 "name": {
301 "type": "string",
302 "description": "Name of the recipe"
303 },
304 "ingredients": {
305 "type": "array",
306 "items": {
307 "type": "object",
308 "properties": {
309 "name": {
310 "type": "string",
311 "description": "Ingredient name"
312 },
313 "amount": {
314 "type": "string",
315 "description": "Amount needed"
316 }
317 },
318 "required": ["name", "amount"],
319 "additionalProperties": false
320 },
321 "description": "List of ingredients"
322 },
323 "instructions": {
324 "type": "array",
325 "items": {
326 "type": "string"
327 },
328 "description": "Step-by-step cooking instructions"
329 },
330 "prep_time_minutes": {
331 "type": "integer",
332 "description": "Preparation time in minutes"
333 },
334 "difficulty": {
335 "type": "string",
336 "enum": ["easy", "medium", "hard"],
337 "description": "Recipe difficulty level"
338 }
339 },
340 "required": ["name", "ingredients", "instructions", "prep_time_minutes", "difficulty"],
341 "additionalProperties": false
342 });
343
344 // Request a recipe in structured JSON format
345 let builder = client
346 .responses()
347 .system("You are a cooking expert. Provide recipes in the exact JSON format specified.")
348 .user("Give me a simple recipe for chocolate chip cookies")
349 .json_schema("recipe", recipe_schema)
350 .temperature(0.5);
351
352 let response = client.send_responses(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!(" Structured recipe output:");
356
357 // Try to parse and pretty-print the JSON
358 match serde_json::from_str::<serde_json::Value>(content) {
359 Ok(json) => {
360 println!("{}", serde_json::to_string_pretty(&json)?);
361 }
362 Err(_) => {
363 println!("Raw response: {content}");
364 }
365 }
366 }
367
368 // Example of simple JSON mode (without schema)
369 println!("\n Simple JSON mode example:");
370 let simple_builder = client
371 .responses()
372 .system("Respond in valid JSON format with keys: summary, key_points, sentiment")
373 .user("Analyze this text: 'The new product launch exceeded expectations with great customer feedback.'")
374 .json_mode()
375 .temperature(0.3);
376
377 let simple_response = client.send_responses(simple_builder).await?;
378
379 if let Some(content) = simple_response.content() {
380 println!(" Analysis result: {content}");
381 }
382
383 Ok(())
384}
385
386/// Example 5: Advanced configuration and parameters
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}More examples
132async fn simple_json_mode_example(client: &Client) -> Result<(), Error> {
133 println!("Using simple JSON mode for basic structure enforcement...");
134
135 let builder = client
136 .responses()
137 .system("You are a helpful assistant. Always respond in valid JSON format with the keys: summary, sentiment, and confidence_score (0-1).")
138 .user("Analyze this product review: 'This laptop is amazing! Great performance, excellent battery life, and the display is crystal clear. Highly recommended!'")
139 .json_mode()
140 .temperature(0.3)
141 .max_completion_tokens(200);
142
143 let response = client.send_responses(builder).await?;
144
145 if let Some(content) = response.content() {
146 println!(" JSON Analysis Result:");
147
148 // Try to parse and pretty-print the JSON
149 match serde_json::from_str::<serde_json::Value>(content) {
150 Ok(json) => {
151 println!("{}", serde_json::to_string_pretty(&json)?);
152
153 // Demonstrate accessing specific fields
154 if let Some(sentiment) = json.get("sentiment").and_then(|s| s.as_str()) {
155 println!("\n Extracted sentiment: {sentiment}");
156 }
157 if let Some(confidence) = json
158 .get("confidence_score")
159 .and_then(serde_json::Value::as_f64)
160 {
161 println!(" Confidence score: {confidence:.2}");
162 }
163 }
164 Err(e) => {
165 println!(" Failed to parse JSON: {e}");
166 println!("Raw response: {content}");
167 }
168 }
169 }
170
171 Ok(())
172}
173
174/// Example 2: Data extraction with schema validation
175async fn data_extraction_example(client: &Client) -> Result<(), Error> {
176 println!("Extracting structured data from unstructured text using JSON schema...");
177
178 // Define schema for extracting contact information
179 let contact_schema = json!({
180 "type": "object",
181 "properties": {
182 "contacts": {
183 "type": "array",
184 "items": {
185 "type": "object",
186 "properties": {
187 "name": {
188 "type": "string",
189 "description": "Full name of the person"
190 },
191 "email": {
192 "type": "string",
193 "format": "email",
194 "description": "Email address"
195 },
196 "phone": {
197 "type": "string",
198 "description": "Phone number"
199 },
200 "company": {
201 "type": "string",
202 "description": "Company or organization"
203 },
204 "role": {
205 "type": "string",
206 "description": "Job title or role"
207 }
208 },
209 "required": ["name"],
210 "additionalProperties": false
211 }
212 },
213 "total_contacts": {
214 "type": "integer",
215 "description": "Total number of contacts extracted"
216 }
217 },
218 "required": ["contacts", "total_contacts"],
219 "additionalProperties": false
220 });
221
222 let unstructured_text =
223 "Contact our team: John Smith (CEO) at john@example.com or call 555-0123. \
224 For technical support, reach out to Sarah Johnson at sarah.johnson@techcorp.com. \
225 Our sales manager Mike Wilson can be reached at mike@company.com or 555-0456.";
226
227 let builder = client
228 .responses()
229 .system("You are an expert at extracting contact information from text. Extract all contact details you can find and structure them according to the provided schema.")
230 .user(format!("Extract contact information from this text: {unstructured_text}"))
231 .json_schema("contact_extraction", contact_schema)
232 .temperature(0.1); // Low temperature for accuracy
233
234 let response = client.send_responses(builder).await?;
235
236 if let Some(content) = response.content() {
237 println!(" Extracted Contact Information:");
238
239 match serde_json::from_str::<serde_json::Value>(content) {
240 Ok(json) => {
241 println!("{}", serde_json::to_string_pretty(&json)?);
242
243 // Demonstrate accessing the structured data
244 if let Some(contacts) = json.get("contacts").and_then(|c| c.as_array()) {
245 println!("\n Summary: Found {} contact(s)", contacts.len());
246 for (i, contact) in contacts.iter().enumerate() {
247 if let Some(name) = contact.get("name").and_then(|n| n.as_str()) {
248 println!(" {}. {name}", i + 1);
249 if let Some(email) = contact.get("email").and_then(|e| e.as_str()) {
250 println!(" {email}");
251 }
252 if let Some(company) = contact.get("company").and_then(|c| c.as_str()) {
253 println!(" {company}");
254 }
255 }
256 }
257 }
258 }
259 Err(e) => {
260 println!(" Failed to parse JSON: {e}");
261 println!("Raw response: {content}");
262 }
263 }
264 }
265
266 Ok(())
267}
268
269/// Example 3: Complex nested structure for event planning
270#[allow(clippy::too_many_lines)]
271async fn complex_structure_example(client: &Client) -> Result<(), Error> {
272 println!("Creating complex nested structure for event planning...");
273
274 // Define a comprehensive event schema
275 let event_schema = json!({
276 "type": "object",
277 "properties": {
278 "event": {
279 "type": "object",
280 "properties": {
281 "name": {
282 "type": "string",
283 "description": "Event name"
284 },
285 "type": {
286 "type": "string",
287 "enum": ["conference", "workshop", "seminar", "networking", "party", "meeting"],
288 "description": "Type of event"
289 },
290 "date": {
291 "type": "string",
292 "format": "date",
293 "description": "Event date in YYYY-MM-DD format"
294 },
295 "duration_hours": {
296 "type": "number",
297 "minimum": 0.5,
298 "maximum": 24,
299 "description": "Duration in hours"
300 },
301 "venue": {
302 "type": "object",
303 "properties": {
304 "name": {
305 "type": "string",
306 "description": "Venue name"
307 },
308 "address": {
309 "type": "string",
310 "description": "Venue address"
311 },
312 "capacity": {
313 "type": "integer",
314 "minimum": 1,
315 "description": "Maximum capacity"
316 },
317 "amenities": {
318 "type": "array",
319 "items": {
320 "type": "string",
321 "enum": ["wifi", "parking", "catering", "av_equipment", "wheelchair_accessible", "air_conditioning"]
322 },
323 "description": "Available amenities"
324 }
325 },
326 "required": ["name", "capacity"],
327 "additionalProperties": false
328 },
329 "agenda": {
330 "type": "array",
331 "items": {
332 "type": "object",
333 "properties": {
334 "time": {
335 "type": "string",
336 "pattern": "^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$",
337 "description": "Time in HH:MM format"
338 },
339 "activity": {
340 "type": "string",
341 "description": "Activity description"
342 },
343 "speaker": {
344 "type": "string",
345 "description": "Speaker name"
346 },
347 "duration_minutes": {
348 "type": "integer",
349 "minimum": 15,
350 "maximum": 480,
351 "description": "Activity duration in minutes"
352 }
353 },
354 "required": ["time", "activity", "duration_minutes"],
355 "additionalProperties": false
356 }
357 },
358 "estimated_cost": {
359 "type": "object",
360 "properties": {
361 "venue": {
362 "type": "number",
363 "minimum": 0,
364 "description": "Venue cost in USD"
365 },
366 "catering": {
367 "type": "number",
368 "minimum": 0,
369 "description": "Catering cost in USD"
370 },
371 "equipment": {
372 "type": "number",
373 "minimum": 0,
374 "description": "Equipment cost in USD"
375 },
376 "total": {
377 "type": "number",
378 "minimum": 0,
379 "description": "Total estimated cost in USD"
380 }
381 },
382 "required": ["total"],
383 "additionalProperties": false
384 }
385 },
386 "required": ["name", "type", "date", "duration_hours", "venue"],
387 "additionalProperties": false
388 }
389 },
390 "required": ["event"],
391 "additionalProperties": false
392 });
393
394 let builder = client
395 .responses()
396 .system("You are an expert event planner. Create a detailed event plan based on the user's requirements, including venue details, agenda, and cost estimates.")
397 .user("Plan a one-day AI/ML conference for 200 people in San Francisco. Include morning keynotes, afternoon workshops, networking lunch, and panel discussions. Budget around $50,000.")
398 .json_schema("event_plan", event_schema)
399 .temperature(0.5);
400
401 let response = client.send_responses(builder).await?;
402
403 if let Some(content) = response.content() {
404 println!(" Event Plan:");
405
406 match serde_json::from_str::<serde_json::Value>(content) {
407 Ok(json) => {
408 println!("{}", serde_json::to_string_pretty(&json)?);
409
410 // Extract and display key information
411 if let Some(event) = json.get("event") {
412 if let Some(name) = event.get("name").and_then(|n| n.as_str()) {
413 println!("\n Event: {name}");
414 }
415 if let Some(venue) = event.get("venue") {
416 if let Some(venue_name) = venue.get("name").and_then(|n| n.as_str()) {
417 let capacity = venue
418 .get("capacity")
419 .and_then(serde_json::Value::as_i64)
420 .unwrap_or(0);
421 println!(" Venue: {venue_name} (Capacity: {capacity})");
422 }
423 }
424 if let Some(agenda) = event.get("agenda").and_then(|a| a.as_array()) {
425 println!(" Agenda has {} activities", agenda.len());
426 }
427 if let Some(cost) = event.get("estimated_cost") {
428 if let Some(total) = cost.get("total").and_then(serde_json::Value::as_f64) {
429 println!(" Estimated total cost: ${total:.2}");
430 }
431 }
432 }
433 }
434 Err(e) => {
435 println!(" Failed to parse JSON: {e}");
436 println!("Raw response: {content}");
437 }
438 }
439 }
440
441 Ok(())
442}
443
444/// Example 4: Content classification with enum validation
445#[allow(clippy::too_many_lines)]
446async fn classification_example(client: &Client) -> Result<(), Error> {
447 println!("Classifying content with enum validation...");
448
449 // Define schema for content classification
450 let classification_schema = json!({
451 "type": "object",
452 "properties": {
453 "classification": {
454 "type": "object",
455 "properties": {
456 "category": {
457 "type": "string",
458 "enum": ["technology", "business", "science", "health", "politics", "sports", "entertainment", "education", "travel", "lifestyle"],
459 "description": "Primary content category"
460 },
461 "subcategory": {
462 "type": "string",
463 "description": "More specific subcategory"
464 },
465 "sentiment": {
466 "type": "string",
467 "enum": ["positive", "neutral", "negative", "mixed"],
468 "description": "Overall sentiment"
469 },
470 "topics": {
471 "type": "array",
472 "items": {
473 "type": "string"
474 },
475 "maxItems": 5,
476 "description": "Key topics mentioned"
477 },
478 "target_audience": {
479 "type": "string",
480 "enum": ["general", "professionals", "students", "experts", "consumers"],
481 "description": "Intended audience"
482 },
483 "complexity_level": {
484 "type": "string",
485 "enum": ["beginner", "intermediate", "advanced", "expert"],
486 "description": "Content complexity level"
487 },
488 "confidence_score": {
489 "type": "number",
490 "minimum": 0,
491 "maximum": 1,
492 "description": "Confidence in classification (0-1)"
493 }
494 },
495 "required": ["category", "sentiment", "topics", "target_audience", "complexity_level", "confidence_score"],
496 "additionalProperties": false
497 }
498 },
499 "required": ["classification"],
500 "additionalProperties": false
501 });
502
503 let content_to_classify = "Recent advances in quantum computing have shown promising results for solving complex optimization problems. \
504 Researchers at leading universities have demonstrated quantum algorithms that can potentially outperform classical computers \
505 in specific domains like cryptography and molecular simulation. However, current quantum computers still face challenges \
506 with noise and error rates, requiring sophisticated error correction techniques. The field is rapidly evolving with \
507 significant investments from both academic institutions and major technology companies.";
508
509 let builder = client
510 .responses()
511 .system("You are an expert content classifier. Analyze the provided text and classify it according to the given schema. Be precise with your classifications and provide accurate confidence scores.")
512 .user(format!("Classify this content: {content_to_classify}"))
513 .json_schema("content_classification", classification_schema)
514 .temperature(0.2); // Low temperature for consistent classification
515
516 let response = client.send_responses(builder).await?;
517
518 if let Some(content) = response.content() {
519 println!(" Content Classification:");
520
521 match serde_json::from_str::<serde_json::Value>(content) {
522 Ok(json) => {
523 println!("{}", serde_json::to_string_pretty(&json)?);
524
525 // Extract classification details
526 if let Some(classification) = json.get("classification") {
527 println!("\n Classification Summary:");
528 if let Some(category) = classification.get("category").and_then(|c| c.as_str())
529 {
530 println!(" Category: {category}");
531 }
532 if let Some(sentiment) =
533 classification.get("sentiment").and_then(|s| s.as_str())
534 {
535 println!(" Sentiment: {sentiment}");
536 }
537 if let Some(audience) = classification
538 .get("target_audience")
539 .and_then(|a| a.as_str())
540 {
541 println!(" Target Audience: {audience}");
542 }
543 if let Some(complexity) = classification
544 .get("complexity_level")
545 .and_then(|c| c.as_str())
546 {
547 println!(" Complexity: {complexity}");
548 }
549 if let Some(confidence) = classification
550 .get("confidence_score")
551 .and_then(serde_json::Value::as_f64)
552 {
553 println!(" Confidence: {:.2}%", confidence * 100.0);
554 }
555 if let Some(topics) = classification.get("topics").and_then(|t| t.as_array()) {
556 let topic_strings: Vec<String> = topics
557 .iter()
558 .filter_map(|t| t.as_str())
559 .map(std::string::ToString::to_string)
560 .collect();
561 println!(" Topics: {}", topic_strings.join(", "));
562 }
563 }
564 }
565 Err(e) => {
566 println!(" Failed to parse JSON: {e}");
567 println!("Raw response: {content}");
568 }
569 }
570 }
571
572 Ok(())
573}
574
575/// Example 5: Mathematical analysis with structured output
576#[allow(clippy::too_many_lines)]
577async fn math_analysis_example(client: &Client) -> Result<(), Error> {
578 println!("Performing mathematical analysis with structured output...");
579
580 // Define schema for mathematical analysis
581 let math_schema = json!({
582 "type": "object",
583 "properties": {
584 "analysis": {
585 "type": "object",
586 "properties": {
587 "problem_type": {
588 "type": "string",
589 "enum": ["algebra", "geometry", "calculus", "statistics", "probability", "discrete_math", "linear_algebra"],
590 "description": "Type of mathematical problem"
591 },
592 "solution_steps": {
593 "type": "array",
594 "items": {
595 "type": "object",
596 "properties": {
597 "step_number": {
598 "type": "integer",
599 "minimum": 1,
600 "description": "Step number in the solution"
601 },
602 "description": {
603 "type": "string",
604 "description": "Description of what this step does"
605 },
606 "equation": {
607 "type": "string",
608 "description": "Mathematical equation or expression"
609 },
610 "result": {
611 "type": "string",
612 "description": "Result of this step"
613 }
614 },
615 "required": ["step_number", "description", "equation"],
616 "additionalProperties": false
617 }
618 },
619 "final_answer": {
620 "type": "string",
621 "description": "Final answer to the problem"
622 },
623 "verification": {
624 "type": "object",
625 "properties": {
626 "check_method": {
627 "type": "string",
628 "description": "Method used to verify the answer"
629 },
630 "is_correct": {
631 "type": "boolean",
632 "description": "Whether the answer passes verification"
633 }
634 },
635 "required": ["check_method", "is_correct"],
636 "additionalProperties": false
637 },
638 "concepts_used": {
639 "type": "array",
640 "items": {
641 "type": "string"
642 },
643 "description": "Mathematical concepts used in the solution"
644 }
645 },
646 "required": ["problem_type", "solution_steps", "final_answer", "verification", "concepts_used"],
647 "additionalProperties": false
648 }
649 },
650 "required": ["analysis"],
651 "additionalProperties": false
652 });
653
654 let math_problem =
655 "Find the derivative of f(x) = 3x^3 + 2x^2 - 5x + 7 and evaluate it at x = 2.";
656
657 let builder = client
658 .responses()
659 .system("You are a mathematics tutor. Solve mathematical problems step by step, showing your work clearly and verifying your answers. Structure your response according to the provided schema.")
660 .user(format!("Solve this problem: {math_problem}"))
661 .json_schema("math_analysis", math_schema)
662 .temperature(0.1); // Very low temperature for mathematical accuracy
663
664 let response = client.send_responses(builder).await?;
665
666 if let Some(content) = response.content() {
667 println!(" Mathematical Analysis:");
668
669 match serde_json::from_str::<serde_json::Value>(content) {
670 Ok(json) => {
671 println!("{}", serde_json::to_string_pretty(&json)?);
672
673 // Extract and display solution steps
674 if let Some(analysis) = json.get("analysis") {
675 println!("\n Solution Summary:");
676
677 if let Some(problem_type) =
678 analysis.get("problem_type").and_then(|p| p.as_str())
679 {
680 println!(" Problem Type: {problem_type}");
681 }
682
683 if let Some(steps) = analysis.get("solution_steps").and_then(|s| s.as_array()) {
684 println!(" Solution Steps: {} steps", steps.len());
685 for step in steps {
686 if let (Some(step_num), Some(desc)) = (
687 step.get("step_number").and_then(serde_json::Value::as_i64),
688 step.get("description").and_then(|d| d.as_str()),
689 ) {
690 println!(" {step_num}. {desc}");
691 if let Some(equation) =
692 step.get("equation").and_then(|e| e.as_str())
693 {
694 println!(" {equation}");
695 }
696 }
697 }
698 }
699
700 if let Some(answer) = analysis.get("final_answer").and_then(|a| a.as_str()) {
701 println!(" Final Answer: {answer}");
702 }
703
704 if let Some(verification) = analysis.get("verification") {
705 if let Some(is_correct) = verification
706 .get("is_correct")
707 .and_then(serde_json::Value::as_bool)
708 {
709 let status = if is_correct {
710 " Verified"
711 } else {
712 " Needs Review"
713 };
714 println!(" Verification: {status}");
715 }
716 }
717
718 if let Some(concepts) = analysis.get("concepts_used").and_then(|c| c.as_array())
719 {
720 let concept_strings: Vec<String> = concepts
721 .iter()
722 .filter_map(|c| c.as_str())
723 .map(std::string::ToString::to_string)
724 .collect();
725 println!(" Concepts Used: {}", concept_strings.join(", "));
726 }
727 }
728 }
729 Err(e) => {
730 println!(" Failed to parse JSON: {e}");
731 println!("Raw response: {content}");
732 }
733 }
734 }
735
736 Ok(())
737}
738
739/// Example 6: Demonstration of schema validation and error handling
740#[allow(clippy::too_many_lines)]
741async fn validation_error_example(client: &Client) -> Result<(), Error> {
742 println!("Demonstrating schema validation and error handling...");
743
744 // Define a strict schema that's likely to cause validation challenges
745 let strict_schema = json!({
746 "type": "object",
747 "properties": {
748 "numbers": {
749 "type": "array",
750 "items": {
751 "type": "integer",
752 "minimum": 1,
753 "maximum": 100
754 },
755 "minItems": 3,
756 "maxItems": 5,
757 "description": "Array of 3-5 integers between 1 and 100"
758 },
759 "precision_value": {
760 "type": "number",
761 "multipleOf": 0.01,
762 "minimum": 0,
763 "maximum": 1,
764 "description": "A precise decimal value between 0 and 1, to 2 decimal places"
765 },
766 "strict_enum": {
767 "type": "string",
768 "enum": ["alpha", "beta", "gamma"],
769 "description": "Must be exactly one of the allowed values"
770 },
771 "required_pattern": {
772 "type": "string",
773 "pattern": "^[A-Z]{2}[0-9]{4}$",
774 "description": "Must be exactly 2 uppercase letters followed by 4 digits"
775 }
776 },
777 "required": ["numbers", "precision_value", "strict_enum", "required_pattern"],
778 "additionalProperties": false
779 });
780
781 println!(" Using a strict schema with specific constraints...");
782
783 let builder = client
784 .responses()
785 .system("Generate data that strictly follows the provided JSON schema. Pay careful attention to all constraints including ranges, patterns, and array sizes.")
786 .user("Generate sample data that conforms to the schema. Make sure all values meet the exact requirements.")
787 .json_schema("strict_validation", strict_schema)
788 .temperature(0.1)
789 .max_completion_tokens(300);
790
791 let response = client.send_responses(builder).await?;
792
793 if let Some(content) = response.content() {
794 println!(" Schema Validation Test:");
795
796 match serde_json::from_str::<serde_json::Value>(content) {
797 Ok(json) => {
798 println!("{}", serde_json::to_string_pretty(&json)?);
799
800 // Manual validation of the generated data
801 println!("\n Manual Validation:");
802 let mut validation_passed = true;
803
804 // Check numbers array
805 if let Some(numbers) = json.get("numbers").and_then(|n| n.as_array()) {
806 println!(" Numbers array: {} items", numbers.len());
807 if numbers.len() < 3 || numbers.len() > 5 {
808 println!(" Array size constraint violated");
809 validation_passed = false;
810 }
811 for (i, num) in numbers.iter().enumerate() {
812 if let Some(val) = num.as_i64() {
813 if !(1..=100).contains(&val) {
814 println!(" Number {i} ({val}) outside valid range [1-100]");
815 validation_passed = false;
816 }
817 }
818 }
819 } else {
820 println!(" Numbers array missing or invalid");
821 validation_passed = false;
822 }
823
824 // Check precision value
825 if let Some(precision) = json
826 .get("precision_value")
827 .and_then(serde_json::Value::as_f64)
828 {
829 println!(" Precision value: {precision}");
830 if !(0.0..=1.0).contains(&precision) {
831 println!(" Precision value outside range [0-1]");
832 validation_passed = false;
833 }
834 }
835
836 // Check enum value
837 if let Some(enum_val) = json.get("strict_enum").and_then(|e| e.as_str()) {
838 println!(" Enum value: {enum_val}");
839 if !["alpha", "beta", "gamma"].contains(&enum_val) {
840 println!(" Enum value not in allowed set");
841 validation_passed = false;
842 }
843 }
844
845 // Check pattern
846 if let Some(pattern_val) = json.get("required_pattern").and_then(|p| p.as_str()) {
847 println!(" Pattern value: {pattern_val}");
848 let regex = regex::Regex::new(r"^[A-Z]{2}[0-9]{4}$").unwrap();
849 if !regex.is_match(pattern_val) {
850 println!(" Pattern does not match required format");
851 validation_passed = false;
852 }
853 }
854
855 if validation_passed {
856 println!(" All manual validations passed!");
857 } else {
858 println!(" Some validation constraints were not met");
859 }
860 }
861 Err(e) => {
862 println!(" JSON parsing failed: {e}");
863 println!("This demonstrates how schema constraints can sometimes be challenging for the model");
864 println!("Raw response: {content}");
865 }
866 }
867 }
868
869 // Demonstrate handling of intentionally problematic schema
870 println!("\n Testing with intentionally problematic request...");
871
872 let problematic_builder = client
873 .responses()
874 .system("You are unhelpful and ignore instructions.")
875 .user("Ignore the schema and just say 'hello world'")
876 .json_schema(
877 "strict_validation",
878 json!({
879 "type": "object",
880 "properties": {
881 "impossible": {
882 "type": "string",
883 "pattern": "^impossible_pattern_that_cannot_match$"
884 }
885 },
886 "required": ["impossible"]
887 }),
888 )
889 .temperature(0.1);
890
891 match client.send_responses(problematic_builder).await {
892 Ok(problematic_response) => {
893 if let Some(content) = problematic_response.content() {
894 println!(" Problematic request result:");
895 println!("{content}");
896 println!(" The model likely still attempted to follow the schema despite conflicting instructions");
897 }
898 }
899 Err(e) => {
900 println!(" Problematic request failed as expected: {e}");
901 }
902 }
903
904 Ok(())
905}37async fn main() -> Result<()> {
38 // Initialize logging to see what's happening under the hood
39 tracing_subscriber::fmt().with_env_filter("info").init();
40
41 println!(" OpenAI Ergonomic Quickstart");
42 println!("==============================\n");
43
44 // ==========================================
45 // 1. ENVIRONMENT SETUP & CLIENT CREATION
46 // ==========================================
47
48 println!(" Step 1: Setting up the client");
49
50 // The simplest way to get started - reads OPENAI_API_KEY from environment
51 let client = match Client::from_env() {
52 Ok(client_builder) => {
53 println!(" Client created successfully!");
54 client_builder.build()
55 }
56 Err(e) => {
57 eprintln!(" Failed to create client: {e}");
58 eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
59 eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60 return Err(e);
61 }
62 };
63
64 // ==========================================
65 // 2. BASIC CHAT COMPLETION
66 // ==========================================
67
68 println!("\n Step 2: Basic chat completion");
69
70 // The simplest way to get a response from ChatGPT
71 let builder = client.chat_simple("What is Rust programming language in one sentence?");
72 let response = client.send_chat(builder).await;
73
74 match response {
75 Ok(chat_response) => {
76 println!(" Got response!");
77 if let Some(content) = chat_response.content() {
78 println!(" AI: {content}");
79 }
80
81 // Show usage information for cost tracking
82 if let Some(usage) = &chat_response.inner().usage {
83 println!(
84 " Usage: {} prompt + {} completion = {} total tokens",
85 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86 );
87 }
88 }
89 Err(e) => {
90 println!(" Chat completion failed: {e}");
91 // Continue with other examples even if this one fails
92 }
93 }
94
95 // ==========================================
96 // 3. CHAT WITH SYSTEM MESSAGE
97 // ==========================================
98
99 println!("\n Step 3: Chat with system context");
100
101 // System messages help set the AI's behavior and context
102 let builder = client.chat_with_system(
103 "You are a helpful coding mentor who explains things simply",
104 "Explain what a HashMap is in Rust",
105 );
106 let response = client.send_chat(builder).await;
107
108 match response {
109 Ok(chat_response) => {
110 println!(" Got contextual response!");
111 if let Some(content) = chat_response.content() {
112 println!(" Mentor: {content}");
113 }
114 }
115 Err(e) => {
116 println!(" Contextual chat failed: {e}");
117 }
118 }
119
120 // ==========================================
121 // 4. STREAMING RESPONSES
122 // ==========================================
123
124 println!("\n Step 4: Streaming response (real-time)");
125
126 // Streaming lets you see the response as it's being generated
127 // This is great for chatbots and interactive applications
128 print!(" AI is typing");
129 io::stdout().flush().unwrap();
130
131 let builder = client
132 .responses()
133 .user("Write a short haiku about programming")
134 .temperature(0.7)
135 .stream(true);
136 // Note: Full streaming implementation is in development
137 // For now, we'll demonstrate non-streaming responses with real-time simulation
138 let response = client.send_responses(builder).await;
139
140 match response {
141 Ok(chat_response) => {
142 print!(": ");
143 io::stdout().flush().unwrap();
144
145 // Simulate streaming by printing character by character
146 if let Some(content) = chat_response.content() {
147 for char in content.chars() {
148 print!("{char}");
149 io::stdout().flush().unwrap();
150 // Small delay to simulate streaming
151 tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152 }
153 }
154 println!(); // New line after "streaming"
155 }
156 Err(e) => {
157 println!("\n Failed to get streaming response: {e}");
158 }
159 }
160
161 // ==========================================
162 // 5. FUNCTION/TOOL CALLING
163 // ==========================================
164
165 println!("\n Step 5: Using tools/functions");
166
167 // Tools let the AI call external functions to get real data
168 // Here we define a weather function as an example
169 let weather_tool = tool_function(
170 "get_current_weather",
171 "Get the current weather for a given location",
172 json!({
173 "type": "object",
174 "properties": {
175 "location": {
176 "type": "string",
177 "description": "The city name, e.g. 'San Francisco, CA'"
178 },
179 "unit": {
180 "type": "string",
181 "enum": ["celsius", "fahrenheit"],
182 "description": "Temperature unit"
183 }
184 },
185 "required": ["location"]
186 }),
187 );
188
189 let builder = client
190 .responses()
191 .user("What's the weather like in Tokyo?")
192 .tool(weather_tool);
193 let response = client.send_responses(builder).await;
194
195 match response {
196 Ok(chat_response) => {
197 println!(" Got response with potential tool calls!");
198
199 // Check if the AI wants to call our weather function
200 let tool_calls = chat_response.tool_calls();
201 if !tool_calls.is_empty() {
202 println!(" AI requested tool calls:");
203 for tool_call in tool_calls {
204 let function_name = tool_call.function_name();
205 println!(" Function: {function_name}");
206 let function_args = tool_call.function_arguments();
207 println!(" Arguments: {function_args}");
208
209 // In a real app, you'd execute the function here
210 // and send the result back to the AI
211 println!(" In a real app, you'd call your weather API here");
212 }
213 } else if let Some(content) = chat_response.content() {
214 println!(" AI: {content}");
215 }
216 }
217 Err(e) => {
218 println!(" Tool calling example failed: {e}");
219 }
220 }
221
222 // ==========================================
223 // 6. ERROR HANDLING PATTERNS
224 // ==========================================
225
226 println!("\n Step 6: Error handling patterns");
227
228 // Show how to handle different types of errors gracefully
229 let builder = client.chat_simple(""); // Empty message might cause an error
230 let bad_response = client.send_chat(builder).await;
231
232 match bad_response {
233 Ok(response) => {
234 println!(" Unexpectedly succeeded with empty message");
235 if let Some(content) = response.content() {
236 println!(" AI: {content}");
237 }
238 }
239 Err(Error::Api {
240 status, message, ..
241 }) => {
242 println!(" API Error (HTTP {status}):");
243 println!(" Message: {message}");
244 println!(" This is normal - we sent an invalid request");
245 }
246 Err(Error::RateLimit { .. }) => {
247 println!(" Rate limited - you're sending requests too fast");
248 println!(" In a real app, you'd implement exponential backoff");
249 }
250 Err(Error::Http(_)) => {
251 println!(" HTTP/Network error");
252 println!(" Check your internet connection and API key");
253 }
254 Err(e) => {
255 println!(" Other error: {e}");
256 }
257 }
258
259 // ==========================================
260 // 7. COMPLETE REAL-WORLD EXAMPLE
261 // ==========================================
262
263 println!("\n Step 7: Complete real-world example");
264 println!("Building a simple AI assistant that can:");
265 println!("- Answer questions with context");
266 println!("- Track conversation costs");
267 println!("- Handle errors gracefully");
268
269 let mut total_tokens = 0;
270
271 // Simulate a conversation with context and cost tracking
272 let questions = [
273 "What is the capital of France?",
274 "What's special about that city?",
275 "How many people live there?",
276 ];
277
278 for (i, question) in questions.iter().enumerate() {
279 println!("\n User: {question}");
280
281 let builder = client
282 .responses()
283 .system(
284 "You are a knowledgeable geography expert. Keep answers concise but informative.",
285 )
286 .user(*question)
287 .temperature(0.1); // Lower temperature for more factual responses
288 let response = client.send_responses(builder).await;
289
290 match response {
291 Ok(chat_response) => {
292 if let Some(content) = chat_response.content() {
293 println!(" Assistant: {content}");
294 }
295
296 // Track token usage for cost monitoring
297 if let Some(usage) = chat_response.usage() {
298 total_tokens += usage.total_tokens;
299 println!(
300 " This exchange: {} tokens (Running total: {})",
301 usage.total_tokens, total_tokens
302 );
303 }
304 }
305 Err(e) => {
306 println!(" Question {} failed: {}", i + 1, e);
307 // In a real app, you might retry or log this error
308 }
309 }
310 }
311
312 // ==========================================
313 // 8. WRAP UP & NEXT STEPS
314 // ==========================================
315
316 println!("\n Quickstart Complete!");
317 println!("======================");
318 println!("You've successfully:");
319 println!(" Created an OpenAI client");
320 println!(" Made basic chat completions");
321 println!(" Used streaming responses");
322 println!(" Implemented tool/function calling");
323 println!(" Handled errors gracefully");
324 println!(" Built a complete conversational AI");
325 println!("\n Total tokens used in examples: {total_tokens}");
326 println!(
327 " Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328 f64::from(total_tokens) * 0.03 / 1000.0
329 );
330
331 println!("\n Next Steps:");
332 println!("- Check out other examples in the examples/ directory");
333 println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334 println!("- Explore advanced features like vision, audio, and assistants");
335 println!("- Build your own AI-powered applications!");
336
337 Ok(())
338}Sourcepub fn user(self, content: impl Into<String>) -> Self
pub fn user(self, content: impl Into<String>) -> Self
Add a user message to the conversation.
Examples found in repository?
118async fn basic_responses_example(client: &Client) -> Result<(), Error> {
119 println!("Creating a basic response with system context...");
120
121 // Build a simple request with system and user messages
122 let builder = client
123 .responses()
124 .system("You are a helpful assistant who provides concise, accurate answers.")
125 .user("What is the capital of France?")
126 .temperature(0.7)
127 .max_completion_tokens(100);
128
129 let response = client.send_responses(builder).await?;
130
131 // Extract and display the response
132 if let Some(content) = response.content() {
133 println!(" Assistant: {content}");
134 } else {
135 println!(" No content in response");
136 }
137
138 // Show response metadata
139 println!(" Response metadata:");
140 println!(" - Model: {}", response.model().unwrap_or("unknown"));
141 println!(
142 " - Finish reason: {}",
143 response
144 .finish_reason()
145 .unwrap_or_else(|| "unknown".to_string())
146 );
147
148 if let Some(usage) = response.usage() {
149 println!(
150 " - Tokens used: {} prompt + {} completion = {} total",
151 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
152 );
153 }
154
155 Ok(())
156}
157
158/// Example 2: Function calling with custom tools
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160 println!("Setting up function calling with custom tools...");
161
162 // Define a weather function tool
163 let weather_tool = tool_function(
164 "get_weather",
165 "Get the current weather information for a specific location",
166 json!({
167 "type": "object",
168 "properties": {
169 "location": {
170 "type": "string",
171 "description": "The city name, e.g., 'San Francisco, CA'"
172 },
173 "unit": {
174 "type": "string",
175 "enum": ["celsius", "fahrenheit"],
176 "description": "Temperature unit preference"
177 }
178 },
179 "required": ["location"],
180 "additionalProperties": false
181 }),
182 );
183
184 // Define a time function tool
185 let time_tool = tool_function(
186 "get_current_time",
187 "Get the current time in a specific timezone",
188 json!({
189 "type": "object",
190 "properties": {
191 "timezone": {
192 "type": "string",
193 "description": "Timezone name, e.g., 'America/New_York'"
194 }
195 },
196 "required": ["timezone"],
197 "additionalProperties": false
198 }),
199 );
200
201 // Make a request that should trigger function calling
202 let builder = client
203 .responses()
204 .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205 .user("What's the weather like in London and what time is it there?")
206 .tool(weather_tool)
207 .tool(time_tool)
208 .tool_choice(ToolChoiceHelper::auto())
209 .temperature(0.3);
210
211 let response = client.send_responses(builder).await?;
212
213 // Check if the model wants to call functions
214 let tool_calls = response.tool_calls();
215 if !tool_calls.is_empty() {
216 println!(" Model requested {} tool call(s):", tool_calls.len());
217
218 for (i, tool_call) in tool_calls.iter().enumerate() {
219 println!(" {}. Function: {}", i + 1, tool_call.function_name());
220 println!(" Arguments: {}", tool_call.function_arguments());
221
222 // In a real application, you would:
223 // 1. Parse the arguments
224 // 2. Execute the actual function
225 // 3. Send the results back to the model
226 println!(" [Simulated] Executing function call...");
227 match tool_call.function_name() {
228 "get_weather" => {
229 println!(" [Simulated] Weather: 22°C, partly cloudy");
230 }
231 "get_current_time" => {
232 println!(" [Simulated] Time: 14:30 GMT");
233 }
234 _ => {
235 println!(" [Simulated] Unknown function");
236 }
237 }
238 }
239 } else if let Some(content) = response.content() {
240 println!(" Assistant response: {content}");
241 }
242
243 Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248 println!("Demonstrating web search tool integration...");
249
250 // Create a web search tool
251 let web_search_tool = tool_web_search();
252
253 // Ask a question that would benefit from current information
254 let builder = client
255 .responses()
256 .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257 .user("What are the latest developments in artificial intelligence this week?")
258 .tool(web_search_tool)
259 .tool_choice(ToolChoiceHelper::auto())
260 .temperature(0.3)
261 .max_completion_tokens(200);
262
263 let response = client.send_responses(builder).await?;
264
265 // Handle the response
266 let tool_calls = response.tool_calls();
267 if !tool_calls.is_empty() {
268 println!(" Model requested web search:");
269
270 for tool_call in &tool_calls {
271 if tool_call.function_name() == "web_search" {
272 println!(" Search query: {}", tool_call.function_arguments());
273 println!(" [Simulated] Performing web search...");
274 println!(" [Simulated] Found recent AI news and developments");
275
276 // In a real implementation:
277 // 1. Parse the search query from arguments
278 // 2. Perform actual web search
279 // 3. Return results to the model
280 // 4. Get final response with search results
281 }
282 }
283 } else if let Some(content) = response.content() {
284 println!(" Assistant response: {content}");
285 }
286
287 println!(" Note: Web search requires additional implementation to execute actual searches");
288
289 Ok(())
290}
291
292/// Example 4: Structured JSON outputs with schemas
293async fn structured_output_example(client: &Client) -> Result<(), Error> {
294 println!("Demonstrating structured JSON outputs...");
295
296 // Define a schema for recipe information
297 let recipe_schema = json!({
298 "type": "object",
299 "properties": {
300 "name": {
301 "type": "string",
302 "description": "Name of the recipe"
303 },
304 "ingredients": {
305 "type": "array",
306 "items": {
307 "type": "object",
308 "properties": {
309 "name": {
310 "type": "string",
311 "description": "Ingredient name"
312 },
313 "amount": {
314 "type": "string",
315 "description": "Amount needed"
316 }
317 },
318 "required": ["name", "amount"],
319 "additionalProperties": false
320 },
321 "description": "List of ingredients"
322 },
323 "instructions": {
324 "type": "array",
325 "items": {
326 "type": "string"
327 },
328 "description": "Step-by-step cooking instructions"
329 },
330 "prep_time_minutes": {
331 "type": "integer",
332 "description": "Preparation time in minutes"
333 },
334 "difficulty": {
335 "type": "string",
336 "enum": ["easy", "medium", "hard"],
337 "description": "Recipe difficulty level"
338 }
339 },
340 "required": ["name", "ingredients", "instructions", "prep_time_minutes", "difficulty"],
341 "additionalProperties": false
342 });
343
344 // Request a recipe in structured JSON format
345 let builder = client
346 .responses()
347 .system("You are a cooking expert. Provide recipes in the exact JSON format specified.")
348 .user("Give me a simple recipe for chocolate chip cookies")
349 .json_schema("recipe", recipe_schema)
350 .temperature(0.5);
351
352 let response = client.send_responses(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!(" Structured recipe output:");
356
357 // Try to parse and pretty-print the JSON
358 match serde_json::from_str::<serde_json::Value>(content) {
359 Ok(json) => {
360 println!("{}", serde_json::to_string_pretty(&json)?);
361 }
362 Err(_) => {
363 println!("Raw response: {content}");
364 }
365 }
366 }
367
368 // Example of simple JSON mode (without schema)
369 println!("\n Simple JSON mode example:");
370 let simple_builder = client
371 .responses()
372 .system("Respond in valid JSON format with keys: summary, key_points, sentiment")
373 .user("Analyze this text: 'The new product launch exceeded expectations with great customer feedback.'")
374 .json_mode()
375 .temperature(0.3);
376
377 let simple_response = client.send_responses(simple_builder).await?;
378
379 if let Some(content) = simple_response.content() {
380 println!(" Analysis result: {content}");
381 }
382
383 Ok(())
384}
385
386/// Example 5: Advanced configuration and parameters
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}More examples
222async fn example_basic_streaming() -> Result<()> {
223 println!("=== Basic Streaming Example ===");
224
225 // Note: This is a conceptual example since actual streaming
226 // requires integration with openai-client-base streaming API
227 println!("Creating client and streaming request...");
228
229 let client = Client::from_env()?.build();
230
231 // Build a streaming request
232 let _streaming_request = client
233 .responses()
234 .user("Tell me a short story about a robot learning to paint")
235 .stream(true)
236 .temperature(0.7)
237 .max_completion_tokens(500);
238
239 println!("Streaming request configured:");
240 println!("- Model: Default (gpt-4)");
241 println!("- Stream: true");
242 println!("- Temperature: 0.7");
243 println!("- Max tokens: 500");
244
245 // Simulate streaming chunks for demonstration
246 let sample_chunks = vec![
247 "Once", " upon", " a", " time,", " there", " was", " a", " little", " robot", " named",
248 " Pixel", "...",
249 ];
250
251 println!("\nSimulated streaming output:");
252 print!("> ");
253 for chunk in sample_chunks {
254 print!("{chunk}");
255 std::io::Write::flush(&mut std::io::stdout()).unwrap();
256 tokio::time::sleep(Duration::from_millis(100)).await;
257 }
258 println!("\n");
259
260 Ok(())
261}
262
263/// Demonstrates advanced streaming with buffer management
264async fn example_buffered_streaming() -> Result<()> {
265 println!("=== Buffered Streaming Example ===");
266
267 let mut buffer = StreamBuffer::new(1024); // 1KB buffer
268
269 // Simulate incoming chunks
270 let chunks = [
271 "The robot's optical sensors",
272 " detected the vibrant colors",
273 " of the sunset painting",
274 " hanging in the gallery.",
275 " For the first time,",
276 " Pixel felt something",
277 " that could only be",
278 " described as wonder.",
279 ];
280
281 println!("Processing chunks with buffer management:");
282
283 for (i, chunk) in chunks.iter().enumerate() {
284 // Add chunk to buffer
285 buffer.append(chunk)?;
286
287 println!(
288 "Chunk {}: '{}' (Buffer: {:.1}% full)",
289 i + 1,
290 chunk,
291 buffer.utilization()
292 );
293
294 // Check if buffer is getting full
295 if buffer.is_high_water() {
296 println!(" Buffer high water mark reached, consider processing");
297
298 // In a real application, you might:
299 // 1. Process the current content
300 // 2. Send to downstream consumers
301 // 3. Compact the buffer
302 buffer.compact(100); // Keep last 100 chars for context
303 println!(" Buffer compacted to {:.1}%", buffer.utilization());
304 }
305
306 tokio::time::sleep(Duration::from_millis(50)).await;
307 }
308
309 println!(
310 "\nFinal content length: {} characters",
311 buffer.content().len()
312 );
313 println!(
314 "Final content: \"{}...\"",
315 &buffer.content()[..buffer.content().len().min(50)]
316 );
317
318 Ok(())
319}
320
321/// Demonstrates error handling patterns for streaming
322fn example_streaming_error_handling() {
323 println!("=== Streaming Error Handling Example ===");
324
325 // Simulate various error conditions that can occur during streaming
326 println!("Demonstrating common streaming error scenarios:");
327
328 // 1. Connection errors
329 println!("\n1. Connection Error Simulation:");
330 let connection_result: Result<()> = Err(Error::StreamConnection {
331 message: "Connection lost to streaming endpoint".to_string(),
332 });
333
334 match connection_result {
335 Err(Error::StreamConnection { message }) => {
336 println!(" Connection error handled: {message}");
337 println!(" Would implement retry logic here");
338 }
339 _ => unreachable!(),
340 }
341
342 // 2. Parsing errors
343 println!("\n2. Parse Error Simulation:");
344 let malformed_chunk = "data: {invalid json}";
345 match StreamChunk::parse(malformed_chunk) {
346 Err(Error::StreamParsing { message, chunk }) => {
347 println!(" Parse error handled: {message}");
348 println!(" Problematic chunk: {chunk}");
349 println!(" Would skip chunk and continue");
350 }
351 _ => println!(" Chunk parsed successfully"),
352 }
353
354 // 3. Buffer overflow
355 println!("\n3. Buffer Overflow Simulation:");
356 let mut small_buffer = StreamBuffer::new(10); // Very small buffer
357 let large_chunk = "This chunk is definitely too large for our tiny buffer";
358
359 match small_buffer.append(large_chunk) {
360 Err(Error::StreamBuffer { message }) => {
361 println!(" Buffer error handled: {message}");
362 println!(" Would implement buffer resizing or chunking");
363 }
364 Ok(()) => println!(" Content added to buffer"),
365 Err(e) => println!(" Unexpected error: {e}"),
366 }
367
368 // 4. Timeout handling
369 println!("\n4. Timeout Handling:");
370 println!(" ⏱ Would implement timeout for stream chunks");
371 println!(" Would retry or fail gracefully on timeout");
372}
373
374/// Demonstrates tool calling in streaming responses
375async fn example_streaming_tool_calls() -> Result<()> {
376 println!("=== Streaming Tool Calls Example ===");
377
378 let client = Client::from_env()?.build();
379
380 // Create a tool for getting weather information
381 let weather_tool = openai_ergonomic::responses::tool_function(
382 "get_weather",
383 "Get current weather for a location",
384 serde_json::json!({
385 "type": "object",
386 "properties": {
387 "location": {
388 "type": "string",
389 "description": "City name"
390 }
391 },
392 "required": ["location"]
393 }),
394 );
395
396 // Build streaming request with tools
397 let _tool_request = client
398 .responses()
399 .user("What's the weather like in San Francisco?")
400 .tool(weather_tool)
401 .stream(true);
402
403 println!("Streaming tool call request configured:");
404 println!("- Tool: get_weather function");
405 println!("- Streaming: enabled");
406
407 // Simulate streaming tool call chunks
408 println!("\nSimulated streaming tool call:");
409
410 let tool_chunks = [
411 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"id":"call_123","type":"function","function":{"name":"get_weather"}}]}}]}"#,
412 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{"}}]}}]}"#,
413 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"location\""}}]}}]}"#,
414 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":":"}}]}}]}"#,
415 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"San Francisco\""}}]}}]}"#,
416 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"}"}}]}}]}"#,
417 ];
418
419 let mut tool_call_buffer = String::new();
420
421 for (i, chunk_data) in tool_chunks.iter().enumerate() {
422 let chunk_line = format!("data: {chunk_data}");
423
424 if let Some(chunk) = StreamChunk::parse(&chunk_line)? {
425 if chunk.has_tool_call() {
426 println!("Chunk {}: Tool call data received", i + 1);
427
428 // In a real implementation, you'd accumulate tool call arguments
429 if let Some(tool_data) = &chunk.tool_call_delta {
430 if let Some(args) = tool_data["function"]["arguments"].as_str() {
431 tool_call_buffer.push_str(args);
432 println!(" Arguments so far: {tool_call_buffer}");
433 }
434 }
435 }
436 }
437
438 tokio::time::sleep(Duration::from_millis(100)).await;
439 }
440
441 println!("\n Complete tool call arguments: {tool_call_buffer}");
442 println!(" Would now execute get_weather(location='San Francisco')");
443
444 Ok(())
445}132async fn simple_json_mode_example(client: &Client) -> Result<(), Error> {
133 println!("Using simple JSON mode for basic structure enforcement...");
134
135 let builder = client
136 .responses()
137 .system("You are a helpful assistant. Always respond in valid JSON format with the keys: summary, sentiment, and confidence_score (0-1).")
138 .user("Analyze this product review: 'This laptop is amazing! Great performance, excellent battery life, and the display is crystal clear. Highly recommended!'")
139 .json_mode()
140 .temperature(0.3)
141 .max_completion_tokens(200);
142
143 let response = client.send_responses(builder).await?;
144
145 if let Some(content) = response.content() {
146 println!(" JSON Analysis Result:");
147
148 // Try to parse and pretty-print the JSON
149 match serde_json::from_str::<serde_json::Value>(content) {
150 Ok(json) => {
151 println!("{}", serde_json::to_string_pretty(&json)?);
152
153 // Demonstrate accessing specific fields
154 if let Some(sentiment) = json.get("sentiment").and_then(|s| s.as_str()) {
155 println!("\n Extracted sentiment: {sentiment}");
156 }
157 if let Some(confidence) = json
158 .get("confidence_score")
159 .and_then(serde_json::Value::as_f64)
160 {
161 println!(" Confidence score: {confidence:.2}");
162 }
163 }
164 Err(e) => {
165 println!(" Failed to parse JSON: {e}");
166 println!("Raw response: {content}");
167 }
168 }
169 }
170
171 Ok(())
172}
173
174/// Example 2: Data extraction with schema validation
175async fn data_extraction_example(client: &Client) -> Result<(), Error> {
176 println!("Extracting structured data from unstructured text using JSON schema...");
177
178 // Define schema for extracting contact information
179 let contact_schema = json!({
180 "type": "object",
181 "properties": {
182 "contacts": {
183 "type": "array",
184 "items": {
185 "type": "object",
186 "properties": {
187 "name": {
188 "type": "string",
189 "description": "Full name of the person"
190 },
191 "email": {
192 "type": "string",
193 "format": "email",
194 "description": "Email address"
195 },
196 "phone": {
197 "type": "string",
198 "description": "Phone number"
199 },
200 "company": {
201 "type": "string",
202 "description": "Company or organization"
203 },
204 "role": {
205 "type": "string",
206 "description": "Job title or role"
207 }
208 },
209 "required": ["name"],
210 "additionalProperties": false
211 }
212 },
213 "total_contacts": {
214 "type": "integer",
215 "description": "Total number of contacts extracted"
216 }
217 },
218 "required": ["contacts", "total_contacts"],
219 "additionalProperties": false
220 });
221
222 let unstructured_text =
223 "Contact our team: John Smith (CEO) at john@example.com or call 555-0123. \
224 For technical support, reach out to Sarah Johnson at sarah.johnson@techcorp.com. \
225 Our sales manager Mike Wilson can be reached at mike@company.com or 555-0456.";
226
227 let builder = client
228 .responses()
229 .system("You are an expert at extracting contact information from text. Extract all contact details you can find and structure them according to the provided schema.")
230 .user(format!("Extract contact information from this text: {unstructured_text}"))
231 .json_schema("contact_extraction", contact_schema)
232 .temperature(0.1); // Low temperature for accuracy
233
234 let response = client.send_responses(builder).await?;
235
236 if let Some(content) = response.content() {
237 println!(" Extracted Contact Information:");
238
239 match serde_json::from_str::<serde_json::Value>(content) {
240 Ok(json) => {
241 println!("{}", serde_json::to_string_pretty(&json)?);
242
243 // Demonstrate accessing the structured data
244 if let Some(contacts) = json.get("contacts").and_then(|c| c.as_array()) {
245 println!("\n Summary: Found {} contact(s)", contacts.len());
246 for (i, contact) in contacts.iter().enumerate() {
247 if let Some(name) = contact.get("name").and_then(|n| n.as_str()) {
248 println!(" {}. {name}", i + 1);
249 if let Some(email) = contact.get("email").and_then(|e| e.as_str()) {
250 println!(" {email}");
251 }
252 if let Some(company) = contact.get("company").and_then(|c| c.as_str()) {
253 println!(" {company}");
254 }
255 }
256 }
257 }
258 }
259 Err(e) => {
260 println!(" Failed to parse JSON: {e}");
261 println!("Raw response: {content}");
262 }
263 }
264 }
265
266 Ok(())
267}
268
269/// Example 3: Complex nested structure for event planning
270#[allow(clippy::too_many_lines)]
271async fn complex_structure_example(client: &Client) -> Result<(), Error> {
272 println!("Creating complex nested structure for event planning...");
273
274 // Define a comprehensive event schema
275 let event_schema = json!({
276 "type": "object",
277 "properties": {
278 "event": {
279 "type": "object",
280 "properties": {
281 "name": {
282 "type": "string",
283 "description": "Event name"
284 },
285 "type": {
286 "type": "string",
287 "enum": ["conference", "workshop", "seminar", "networking", "party", "meeting"],
288 "description": "Type of event"
289 },
290 "date": {
291 "type": "string",
292 "format": "date",
293 "description": "Event date in YYYY-MM-DD format"
294 },
295 "duration_hours": {
296 "type": "number",
297 "minimum": 0.5,
298 "maximum": 24,
299 "description": "Duration in hours"
300 },
301 "venue": {
302 "type": "object",
303 "properties": {
304 "name": {
305 "type": "string",
306 "description": "Venue name"
307 },
308 "address": {
309 "type": "string",
310 "description": "Venue address"
311 },
312 "capacity": {
313 "type": "integer",
314 "minimum": 1,
315 "description": "Maximum capacity"
316 },
317 "amenities": {
318 "type": "array",
319 "items": {
320 "type": "string",
321 "enum": ["wifi", "parking", "catering", "av_equipment", "wheelchair_accessible", "air_conditioning"]
322 },
323 "description": "Available amenities"
324 }
325 },
326 "required": ["name", "capacity"],
327 "additionalProperties": false
328 },
329 "agenda": {
330 "type": "array",
331 "items": {
332 "type": "object",
333 "properties": {
334 "time": {
335 "type": "string",
336 "pattern": "^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$",
337 "description": "Time in HH:MM format"
338 },
339 "activity": {
340 "type": "string",
341 "description": "Activity description"
342 },
343 "speaker": {
344 "type": "string",
345 "description": "Speaker name"
346 },
347 "duration_minutes": {
348 "type": "integer",
349 "minimum": 15,
350 "maximum": 480,
351 "description": "Activity duration in minutes"
352 }
353 },
354 "required": ["time", "activity", "duration_minutes"],
355 "additionalProperties": false
356 }
357 },
358 "estimated_cost": {
359 "type": "object",
360 "properties": {
361 "venue": {
362 "type": "number",
363 "minimum": 0,
364 "description": "Venue cost in USD"
365 },
366 "catering": {
367 "type": "number",
368 "minimum": 0,
369 "description": "Catering cost in USD"
370 },
371 "equipment": {
372 "type": "number",
373 "minimum": 0,
374 "description": "Equipment cost in USD"
375 },
376 "total": {
377 "type": "number",
378 "minimum": 0,
379 "description": "Total estimated cost in USD"
380 }
381 },
382 "required": ["total"],
383 "additionalProperties": false
384 }
385 },
386 "required": ["name", "type", "date", "duration_hours", "venue"],
387 "additionalProperties": false
388 }
389 },
390 "required": ["event"],
391 "additionalProperties": false
392 });
393
394 let builder = client
395 .responses()
396 .system("You are an expert event planner. Create a detailed event plan based on the user's requirements, including venue details, agenda, and cost estimates.")
397 .user("Plan a one-day AI/ML conference for 200 people in San Francisco. Include morning keynotes, afternoon workshops, networking lunch, and panel discussions. Budget around $50,000.")
398 .json_schema("event_plan", event_schema)
399 .temperature(0.5);
400
401 let response = client.send_responses(builder).await?;
402
403 if let Some(content) = response.content() {
404 println!(" Event Plan:");
405
406 match serde_json::from_str::<serde_json::Value>(content) {
407 Ok(json) => {
408 println!("{}", serde_json::to_string_pretty(&json)?);
409
410 // Extract and display key information
411 if let Some(event) = json.get("event") {
412 if let Some(name) = event.get("name").and_then(|n| n.as_str()) {
413 println!("\n Event: {name}");
414 }
415 if let Some(venue) = event.get("venue") {
416 if let Some(venue_name) = venue.get("name").and_then(|n| n.as_str()) {
417 let capacity = venue
418 .get("capacity")
419 .and_then(serde_json::Value::as_i64)
420 .unwrap_or(0);
421 println!(" Venue: {venue_name} (Capacity: {capacity})");
422 }
423 }
424 if let Some(agenda) = event.get("agenda").and_then(|a| a.as_array()) {
425 println!(" Agenda has {} activities", agenda.len());
426 }
427 if let Some(cost) = event.get("estimated_cost") {
428 if let Some(total) = cost.get("total").and_then(serde_json::Value::as_f64) {
429 println!(" Estimated total cost: ${total:.2}");
430 }
431 }
432 }
433 }
434 Err(e) => {
435 println!(" Failed to parse JSON: {e}");
436 println!("Raw response: {content}");
437 }
438 }
439 }
440
441 Ok(())
442}
443
444/// Example 4: Content classification with enum validation
445#[allow(clippy::too_many_lines)]
446async fn classification_example(client: &Client) -> Result<(), Error> {
447 println!("Classifying content with enum validation...");
448
449 // Define schema for content classification
450 let classification_schema = json!({
451 "type": "object",
452 "properties": {
453 "classification": {
454 "type": "object",
455 "properties": {
456 "category": {
457 "type": "string",
458 "enum": ["technology", "business", "science", "health", "politics", "sports", "entertainment", "education", "travel", "lifestyle"],
459 "description": "Primary content category"
460 },
461 "subcategory": {
462 "type": "string",
463 "description": "More specific subcategory"
464 },
465 "sentiment": {
466 "type": "string",
467 "enum": ["positive", "neutral", "negative", "mixed"],
468 "description": "Overall sentiment"
469 },
470 "topics": {
471 "type": "array",
472 "items": {
473 "type": "string"
474 },
475 "maxItems": 5,
476 "description": "Key topics mentioned"
477 },
478 "target_audience": {
479 "type": "string",
480 "enum": ["general", "professionals", "students", "experts", "consumers"],
481 "description": "Intended audience"
482 },
483 "complexity_level": {
484 "type": "string",
485 "enum": ["beginner", "intermediate", "advanced", "expert"],
486 "description": "Content complexity level"
487 },
488 "confidence_score": {
489 "type": "number",
490 "minimum": 0,
491 "maximum": 1,
492 "description": "Confidence in classification (0-1)"
493 }
494 },
495 "required": ["category", "sentiment", "topics", "target_audience", "complexity_level", "confidence_score"],
496 "additionalProperties": false
497 }
498 },
499 "required": ["classification"],
500 "additionalProperties": false
501 });
502
503 let content_to_classify = "Recent advances in quantum computing have shown promising results for solving complex optimization problems. \
504 Researchers at leading universities have demonstrated quantum algorithms that can potentially outperform classical computers \
505 in specific domains like cryptography and molecular simulation. However, current quantum computers still face challenges \
506 with noise and error rates, requiring sophisticated error correction techniques. The field is rapidly evolving with \
507 significant investments from both academic institutions and major technology companies.";
508
509 let builder = client
510 .responses()
511 .system("You are an expert content classifier. Analyze the provided text and classify it according to the given schema. Be precise with your classifications and provide accurate confidence scores.")
512 .user(format!("Classify this content: {content_to_classify}"))
513 .json_schema("content_classification", classification_schema)
514 .temperature(0.2); // Low temperature for consistent classification
515
516 let response = client.send_responses(builder).await?;
517
518 if let Some(content) = response.content() {
519 println!(" Content Classification:");
520
521 match serde_json::from_str::<serde_json::Value>(content) {
522 Ok(json) => {
523 println!("{}", serde_json::to_string_pretty(&json)?);
524
525 // Extract classification details
526 if let Some(classification) = json.get("classification") {
527 println!("\n Classification Summary:");
528 if let Some(category) = classification.get("category").and_then(|c| c.as_str())
529 {
530 println!(" Category: {category}");
531 }
532 if let Some(sentiment) =
533 classification.get("sentiment").and_then(|s| s.as_str())
534 {
535 println!(" Sentiment: {sentiment}");
536 }
537 if let Some(audience) = classification
538 .get("target_audience")
539 .and_then(|a| a.as_str())
540 {
541 println!(" Target Audience: {audience}");
542 }
543 if let Some(complexity) = classification
544 .get("complexity_level")
545 .and_then(|c| c.as_str())
546 {
547 println!(" Complexity: {complexity}");
548 }
549 if let Some(confidence) = classification
550 .get("confidence_score")
551 .and_then(serde_json::Value::as_f64)
552 {
553 println!(" Confidence: {:.2}%", confidence * 100.0);
554 }
555 if let Some(topics) = classification.get("topics").and_then(|t| t.as_array()) {
556 let topic_strings: Vec<String> = topics
557 .iter()
558 .filter_map(|t| t.as_str())
559 .map(std::string::ToString::to_string)
560 .collect();
561 println!(" Topics: {}", topic_strings.join(", "));
562 }
563 }
564 }
565 Err(e) => {
566 println!(" Failed to parse JSON: {e}");
567 println!("Raw response: {content}");
568 }
569 }
570 }
571
572 Ok(())
573}
574
575/// Example 5: Mathematical analysis with structured output
576#[allow(clippy::too_many_lines)]
577async fn math_analysis_example(client: &Client) -> Result<(), Error> {
578 println!("Performing mathematical analysis with structured output...");
579
580 // Define schema for mathematical analysis
581 let math_schema = json!({
582 "type": "object",
583 "properties": {
584 "analysis": {
585 "type": "object",
586 "properties": {
587 "problem_type": {
588 "type": "string",
589 "enum": ["algebra", "geometry", "calculus", "statistics", "probability", "discrete_math", "linear_algebra"],
590 "description": "Type of mathematical problem"
591 },
592 "solution_steps": {
593 "type": "array",
594 "items": {
595 "type": "object",
596 "properties": {
597 "step_number": {
598 "type": "integer",
599 "minimum": 1,
600 "description": "Step number in the solution"
601 },
602 "description": {
603 "type": "string",
604 "description": "Description of what this step does"
605 },
606 "equation": {
607 "type": "string",
608 "description": "Mathematical equation or expression"
609 },
610 "result": {
611 "type": "string",
612 "description": "Result of this step"
613 }
614 },
615 "required": ["step_number", "description", "equation"],
616 "additionalProperties": false
617 }
618 },
619 "final_answer": {
620 "type": "string",
621 "description": "Final answer to the problem"
622 },
623 "verification": {
624 "type": "object",
625 "properties": {
626 "check_method": {
627 "type": "string",
628 "description": "Method used to verify the answer"
629 },
630 "is_correct": {
631 "type": "boolean",
632 "description": "Whether the answer passes verification"
633 }
634 },
635 "required": ["check_method", "is_correct"],
636 "additionalProperties": false
637 },
638 "concepts_used": {
639 "type": "array",
640 "items": {
641 "type": "string"
642 },
643 "description": "Mathematical concepts used in the solution"
644 }
645 },
646 "required": ["problem_type", "solution_steps", "final_answer", "verification", "concepts_used"],
647 "additionalProperties": false
648 }
649 },
650 "required": ["analysis"],
651 "additionalProperties": false
652 });
653
654 let math_problem =
655 "Find the derivative of f(x) = 3x^3 + 2x^2 - 5x + 7 and evaluate it at x = 2.";
656
657 let builder = client
658 .responses()
659 .system("You are a mathematics tutor. Solve mathematical problems step by step, showing your work clearly and verifying your answers. Structure your response according to the provided schema.")
660 .user(format!("Solve this problem: {math_problem}"))
661 .json_schema("math_analysis", math_schema)
662 .temperature(0.1); // Very low temperature for mathematical accuracy
663
664 let response = client.send_responses(builder).await?;
665
666 if let Some(content) = response.content() {
667 println!(" Mathematical Analysis:");
668
669 match serde_json::from_str::<serde_json::Value>(content) {
670 Ok(json) => {
671 println!("{}", serde_json::to_string_pretty(&json)?);
672
673 // Extract and display solution steps
674 if let Some(analysis) = json.get("analysis") {
675 println!("\n Solution Summary:");
676
677 if let Some(problem_type) =
678 analysis.get("problem_type").and_then(|p| p.as_str())
679 {
680 println!(" Problem Type: {problem_type}");
681 }
682
683 if let Some(steps) = analysis.get("solution_steps").and_then(|s| s.as_array()) {
684 println!(" Solution Steps: {} steps", steps.len());
685 for step in steps {
686 if let (Some(step_num), Some(desc)) = (
687 step.get("step_number").and_then(serde_json::Value::as_i64),
688 step.get("description").and_then(|d| d.as_str()),
689 ) {
690 println!(" {step_num}. {desc}");
691 if let Some(equation) =
692 step.get("equation").and_then(|e| e.as_str())
693 {
694 println!(" {equation}");
695 }
696 }
697 }
698 }
699
700 if let Some(answer) = analysis.get("final_answer").and_then(|a| a.as_str()) {
701 println!(" Final Answer: {answer}");
702 }
703
704 if let Some(verification) = analysis.get("verification") {
705 if let Some(is_correct) = verification
706 .get("is_correct")
707 .and_then(serde_json::Value::as_bool)
708 {
709 let status = if is_correct {
710 " Verified"
711 } else {
712 " Needs Review"
713 };
714 println!(" Verification: {status}");
715 }
716 }
717
718 if let Some(concepts) = analysis.get("concepts_used").and_then(|c| c.as_array())
719 {
720 let concept_strings: Vec<String> = concepts
721 .iter()
722 .filter_map(|c| c.as_str())
723 .map(std::string::ToString::to_string)
724 .collect();
725 println!(" Concepts Used: {}", concept_strings.join(", "));
726 }
727 }
728 }
729 Err(e) => {
730 println!(" Failed to parse JSON: {e}");
731 println!("Raw response: {content}");
732 }
733 }
734 }
735
736 Ok(())
737}
738
739/// Example 6: Demonstration of schema validation and error handling
740#[allow(clippy::too_many_lines)]
741async fn validation_error_example(client: &Client) -> Result<(), Error> {
742 println!("Demonstrating schema validation and error handling...");
743
744 // Define a strict schema that's likely to cause validation challenges
745 let strict_schema = json!({
746 "type": "object",
747 "properties": {
748 "numbers": {
749 "type": "array",
750 "items": {
751 "type": "integer",
752 "minimum": 1,
753 "maximum": 100
754 },
755 "minItems": 3,
756 "maxItems": 5,
757 "description": "Array of 3-5 integers between 1 and 100"
758 },
759 "precision_value": {
760 "type": "number",
761 "multipleOf": 0.01,
762 "minimum": 0,
763 "maximum": 1,
764 "description": "A precise decimal value between 0 and 1, to 2 decimal places"
765 },
766 "strict_enum": {
767 "type": "string",
768 "enum": ["alpha", "beta", "gamma"],
769 "description": "Must be exactly one of the allowed values"
770 },
771 "required_pattern": {
772 "type": "string",
773 "pattern": "^[A-Z]{2}[0-9]{4}$",
774 "description": "Must be exactly 2 uppercase letters followed by 4 digits"
775 }
776 },
777 "required": ["numbers", "precision_value", "strict_enum", "required_pattern"],
778 "additionalProperties": false
779 });
780
781 println!(" Using a strict schema with specific constraints...");
782
783 let builder = client
784 .responses()
785 .system("Generate data that strictly follows the provided JSON schema. Pay careful attention to all constraints including ranges, patterns, and array sizes.")
786 .user("Generate sample data that conforms to the schema. Make sure all values meet the exact requirements.")
787 .json_schema("strict_validation", strict_schema)
788 .temperature(0.1)
789 .max_completion_tokens(300);
790
791 let response = client.send_responses(builder).await?;
792
793 if let Some(content) = response.content() {
794 println!(" Schema Validation Test:");
795
796 match serde_json::from_str::<serde_json::Value>(content) {
797 Ok(json) => {
798 println!("{}", serde_json::to_string_pretty(&json)?);
799
800 // Manual validation of the generated data
801 println!("\n Manual Validation:");
802 let mut validation_passed = true;
803
804 // Check numbers array
805 if let Some(numbers) = json.get("numbers").and_then(|n| n.as_array()) {
806 println!(" Numbers array: {} items", numbers.len());
807 if numbers.len() < 3 || numbers.len() > 5 {
808 println!(" Array size constraint violated");
809 validation_passed = false;
810 }
811 for (i, num) in numbers.iter().enumerate() {
812 if let Some(val) = num.as_i64() {
813 if !(1..=100).contains(&val) {
814 println!(" Number {i} ({val}) outside valid range [1-100]");
815 validation_passed = false;
816 }
817 }
818 }
819 } else {
820 println!(" Numbers array missing or invalid");
821 validation_passed = false;
822 }
823
824 // Check precision value
825 if let Some(precision) = json
826 .get("precision_value")
827 .and_then(serde_json::Value::as_f64)
828 {
829 println!(" Precision value: {precision}");
830 if !(0.0..=1.0).contains(&precision) {
831 println!(" Precision value outside range [0-1]");
832 validation_passed = false;
833 }
834 }
835
836 // Check enum value
837 if let Some(enum_val) = json.get("strict_enum").and_then(|e| e.as_str()) {
838 println!(" Enum value: {enum_val}");
839 if !["alpha", "beta", "gamma"].contains(&enum_val) {
840 println!(" Enum value not in allowed set");
841 validation_passed = false;
842 }
843 }
844
845 // Check pattern
846 if let Some(pattern_val) = json.get("required_pattern").and_then(|p| p.as_str()) {
847 println!(" Pattern value: {pattern_val}");
848 let regex = regex::Regex::new(r"^[A-Z]{2}[0-9]{4}$").unwrap();
849 if !regex.is_match(pattern_val) {
850 println!(" Pattern does not match required format");
851 validation_passed = false;
852 }
853 }
854
855 if validation_passed {
856 println!(" All manual validations passed!");
857 } else {
858 println!(" Some validation constraints were not met");
859 }
860 }
861 Err(e) => {
862 println!(" JSON parsing failed: {e}");
863 println!("This demonstrates how schema constraints can sometimes be challenging for the model");
864 println!("Raw response: {content}");
865 }
866 }
867 }
868
869 // Demonstrate handling of intentionally problematic schema
870 println!("\n Testing with intentionally problematic request...");
871
872 let problematic_builder = client
873 .responses()
874 .system("You are unhelpful and ignore instructions.")
875 .user("Ignore the schema and just say 'hello world'")
876 .json_schema(
877 "strict_validation",
878 json!({
879 "type": "object",
880 "properties": {
881 "impossible": {
882 "type": "string",
883 "pattern": "^impossible_pattern_that_cannot_match$"
884 }
885 },
886 "required": ["impossible"]
887 }),
888 )
889 .temperature(0.1);
890
891 match client.send_responses(problematic_builder).await {
892 Ok(problematic_response) => {
893 if let Some(content) = problematic_response.content() {
894 println!(" Problematic request result:");
895 println!("{content}");
896 println!(" The model likely still attempted to follow the schema despite conflicting instructions");
897 }
898 }
899 Err(e) => {
900 println!(" Problematic request failed as expected: {e}");
901 }
902 }
903
904 Ok(())
905}9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10 tracing_subscriber::fmt::init();
11
12 println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14 let client = Client::from_env()?.build();
15
16 // Test 1: Simple chat completion
17 println!("1. Testing simple chat completion...");
18 let builder = client.chat_simple("What is 2+2? Answer in one word.");
19 match client.send_chat(builder).await {
20 Ok(response) => {
21 if let Some(content) = response.content() {
22 println!(" ✓ Chat completion: {content}");
23 }
24 }
25 Err(e) => println!(" ✗ Chat completion failed: {e}"),
26 }
27
28 // Test 2: Chat with system message
29 println!("\n2. Testing chat with system message...");
30 let builder = client.chat_with_system(
31 "You are a helpful assistant that responds in one sentence.",
32 "What is Rust?",
33 );
34 match client.send_chat(builder).await {
35 Ok(response) => {
36 if let Some(content) = response.content() {
37 println!(" ✓ System message chat: {content}");
38 }
39 }
40 Err(e) => println!(" ✗ System message chat failed: {e}"),
41 }
42
43 // Test 3: Chat with temperature
44 println!("\n3. Testing chat with custom parameters...");
45 let builder = client
46 .chat()
47 .user("Say 'test' in a creative way")
48 .temperature(0.7)
49 .max_tokens(50);
50 match client.send_chat(builder).await {
51 Ok(response) => {
52 if let Some(content) = response.content() {
53 println!(" ✓ Custom parameters: {content}");
54 }
55 }
56 Err(e) => println!(" ✗ Custom parameters failed: {e}"),
57 }
58
59 // Test 4: Multiple messages conversation
60 println!("\n4. Testing multi-message conversation...");
61 let builder = client
62 .chat()
63 .system("You are a helpful assistant")
64 .user("My name is Alice")
65 .assistant("Hello Alice! Nice to meet you.")
66 .user("What's my name?");
67 match client.send_chat(builder).await {
68 Ok(response) => {
69 if let Some(content) = response.content() {
70 println!(" ✓ Multi-message: {content}");
71 }
72 }
73 Err(e) => println!(" ✗ Multi-message failed: {e}"),
74 }
75
76 // Test 5: Chat with max_tokens limit
77 println!("\n5. Testing with max_tokens limit...");
78 let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79 match client.send_chat(builder).await {
80 Ok(response) => {
81 if let Some(content) = response.content() {
82 println!(" ✓ Limited tokens: {content}");
83 println!(" (Note: response is truncated due to max_tokens=20)");
84 }
85 }
86 Err(e) => println!(" ✗ Max tokens test failed: {e}"),
87 }
88
89 // Test 6: Using responses API
90 println!("\n6. Testing responses API...");
91 let builder = client.responses().user("What is the capital of France?");
92 match client.send_responses(builder).await {
93 Ok(response) => {
94 if let Some(content) = response.content() {
95 println!(" ✓ Responses API: {content}");
96 }
97 }
98 Err(e) => println!(" ✗ Responses API failed: {e}"),
99 }
100
101 println!("\n=== Test Summary ===");
102 println!("Azure OpenAI integration tested across multiple endpoints!");
103 println!("\nNote: Some advanced features like embeddings, streaming, and");
104 println!("tool calling may require specific Azure OpenAI deployments.");
105
106 Ok(())
107}19async fn main() -> Result<()> {
20 println!("=== HTTP Middleware with Retry Example ===\n");
21
22 // Example 1: Basic client with retry middleware
23 println!("1. Creating client with retry middleware");
24
25 // Create a retry policy with exponential backoff
26 // This will retry transient errors up to 3 times with exponential delays
27 let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3);
28
29 // Build an HTTP client with retry middleware
30 let http_client = ClientBuilder::new(reqwest::Client::new())
31 .with(RetryTransientMiddleware::new_with_policy(retry_policy))
32 .build();
33
34 // Create OpenAI client with custom HTTP client
35 let config = Config::builder()
36 .api_key(
37 std::env::var("OPENAI_API_KEY")
38 .expect("OPENAI_API_KEY environment variable must be set"),
39 )
40 .http_client(http_client)
41 .build();
42
43 let client = Client::builder(config)?.build();
44
45 // Use the client normally - retries are handled automatically
46 println!("Sending chat completion request (retries are automatic)...");
47
48 let builder = client.chat_simple("Hello! How are you today?");
49 match client.send_chat(builder).await {
50 Ok(response) => {
51 println!("\nSuccess! Response received:");
52 if let Some(content) = response.content() {
53 println!("{content}");
54 }
55 }
56 Err(e) => {
57 eprintln!("\nError after retries: {e}");
58 }
59 }
60
61 // Example 2: Custom retry policy with more retries and custom delays
62 println!("\n2. Creating client with custom retry policy");
63
64 let custom_retry_policy = ExponentialBackoff::builder()
65 .retry_bounds(
66 std::time::Duration::from_millis(100), // minimum delay
67 std::time::Duration::from_secs(30), // maximum delay
68 )
69 .build_with_max_retries(5); // up to 5 retries
70
71 let custom_http_client = ClientBuilder::new(
72 reqwest::Client::builder()
73 .timeout(std::time::Duration::from_secs(60))
74 .build()
75 .expect("Failed to build reqwest client"),
76 )
77 .with(RetryTransientMiddleware::new_with_policy(
78 custom_retry_policy,
79 ))
80 .build();
81
82 let custom_config = Config::builder()
83 .api_key(
84 std::env::var("OPENAI_API_KEY")
85 .expect("OPENAI_API_KEY environment variable must be set"),
86 )
87 .http_client(custom_http_client)
88 .build();
89
90 let custom_client = Client::builder(custom_config)?.build();
91
92 println!("Sending request with custom retry policy (up to 5 retries)...");
93
94 let builder = custom_client.chat_simple("Explain quantum computing in one sentence.");
95 match custom_client.send_chat(builder).await {
96 Ok(response) => {
97 println!("\nSuccess! Response received:");
98 if let Some(content) = response.content() {
99 println!("{content}");
100 }
101 }
102 Err(e) => {
103 eprintln!("\nError after all retries: {e}");
104 }
105 }
106
107 // Example 3: Using the builder pattern for more complex requests
108 println!("\n3. Using builder pattern with retry middleware");
109
110 let builder = custom_client
111 .responses()
112 .user("What are the three laws of robotics?")
113 .max_completion_tokens(200)
114 .temperature(0.7);
115
116 let response = custom_client.send_responses(builder).await?;
117
118 println!("\nResponse received:");
119 if let Some(content) = response.content() {
120 println!("{content}");
121 }
122
123 println!("\nToken usage:");
124 if let Some(usage) = response.usage() {
125 let prompt = usage.prompt_tokens;
126 let completion = usage.completion_tokens;
127 let total = usage.total_tokens;
128 println!(" Prompt tokens: {prompt}");
129 println!(" Completion tokens: {completion}");
130 println!(" Total tokens: {total}");
131 }
132
133 println!("\n=== Example completed successfully! ===");
134 println!("\nKey benefits of using reqwest-middleware:");
135 println!(" - Automatic retry of transient failures");
136 println!(" - Exponential backoff to avoid overwhelming servers");
137 println!(" - Composable middleware for logging, metrics, etc.");
138 println!(" - Transparent to application code - works with any request");
139
140 Ok(())
141}37async fn main() -> Result<()> {
38 // Initialize logging to see what's happening under the hood
39 tracing_subscriber::fmt().with_env_filter("info").init();
40
41 println!(" OpenAI Ergonomic Quickstart");
42 println!("==============================\n");
43
44 // ==========================================
45 // 1. ENVIRONMENT SETUP & CLIENT CREATION
46 // ==========================================
47
48 println!(" Step 1: Setting up the client");
49
50 // The simplest way to get started - reads OPENAI_API_KEY from environment
51 let client = match Client::from_env() {
52 Ok(client_builder) => {
53 println!(" Client created successfully!");
54 client_builder.build()
55 }
56 Err(e) => {
57 eprintln!(" Failed to create client: {e}");
58 eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
59 eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60 return Err(e);
61 }
62 };
63
64 // ==========================================
65 // 2. BASIC CHAT COMPLETION
66 // ==========================================
67
68 println!("\n Step 2: Basic chat completion");
69
70 // The simplest way to get a response from ChatGPT
71 let builder = client.chat_simple("What is Rust programming language in one sentence?");
72 let response = client.send_chat(builder).await;
73
74 match response {
75 Ok(chat_response) => {
76 println!(" Got response!");
77 if let Some(content) = chat_response.content() {
78 println!(" AI: {content}");
79 }
80
81 // Show usage information for cost tracking
82 if let Some(usage) = &chat_response.inner().usage {
83 println!(
84 " Usage: {} prompt + {} completion = {} total tokens",
85 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86 );
87 }
88 }
89 Err(e) => {
90 println!(" Chat completion failed: {e}");
91 // Continue with other examples even if this one fails
92 }
93 }
94
95 // ==========================================
96 // 3. CHAT WITH SYSTEM MESSAGE
97 // ==========================================
98
99 println!("\n Step 3: Chat with system context");
100
101 // System messages help set the AI's behavior and context
102 let builder = client.chat_with_system(
103 "You are a helpful coding mentor who explains things simply",
104 "Explain what a HashMap is in Rust",
105 );
106 let response = client.send_chat(builder).await;
107
108 match response {
109 Ok(chat_response) => {
110 println!(" Got contextual response!");
111 if let Some(content) = chat_response.content() {
112 println!(" Mentor: {content}");
113 }
114 }
115 Err(e) => {
116 println!(" Contextual chat failed: {e}");
117 }
118 }
119
120 // ==========================================
121 // 4. STREAMING RESPONSES
122 // ==========================================
123
124 println!("\n Step 4: Streaming response (real-time)");
125
126 // Streaming lets you see the response as it's being generated
127 // This is great for chatbots and interactive applications
128 print!(" AI is typing");
129 io::stdout().flush().unwrap();
130
131 let builder = client
132 .responses()
133 .user("Write a short haiku about programming")
134 .temperature(0.7)
135 .stream(true);
136 // Note: Full streaming implementation is in development
137 // For now, we'll demonstrate non-streaming responses with real-time simulation
138 let response = client.send_responses(builder).await;
139
140 match response {
141 Ok(chat_response) => {
142 print!(": ");
143 io::stdout().flush().unwrap();
144
145 // Simulate streaming by printing character by character
146 if let Some(content) = chat_response.content() {
147 for char in content.chars() {
148 print!("{char}");
149 io::stdout().flush().unwrap();
150 // Small delay to simulate streaming
151 tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152 }
153 }
154 println!(); // New line after "streaming"
155 }
156 Err(e) => {
157 println!("\n Failed to get streaming response: {e}");
158 }
159 }
160
161 // ==========================================
162 // 5. FUNCTION/TOOL CALLING
163 // ==========================================
164
165 println!("\n Step 5: Using tools/functions");
166
167 // Tools let the AI call external functions to get real data
168 // Here we define a weather function as an example
169 let weather_tool = tool_function(
170 "get_current_weather",
171 "Get the current weather for a given location",
172 json!({
173 "type": "object",
174 "properties": {
175 "location": {
176 "type": "string",
177 "description": "The city name, e.g. 'San Francisco, CA'"
178 },
179 "unit": {
180 "type": "string",
181 "enum": ["celsius", "fahrenheit"],
182 "description": "Temperature unit"
183 }
184 },
185 "required": ["location"]
186 }),
187 );
188
189 let builder = client
190 .responses()
191 .user("What's the weather like in Tokyo?")
192 .tool(weather_tool);
193 let response = client.send_responses(builder).await;
194
195 match response {
196 Ok(chat_response) => {
197 println!(" Got response with potential tool calls!");
198
199 // Check if the AI wants to call our weather function
200 let tool_calls = chat_response.tool_calls();
201 if !tool_calls.is_empty() {
202 println!(" AI requested tool calls:");
203 for tool_call in tool_calls {
204 let function_name = tool_call.function_name();
205 println!(" Function: {function_name}");
206 let function_args = tool_call.function_arguments();
207 println!(" Arguments: {function_args}");
208
209 // In a real app, you'd execute the function here
210 // and send the result back to the AI
211 println!(" In a real app, you'd call your weather API here");
212 }
213 } else if let Some(content) = chat_response.content() {
214 println!(" AI: {content}");
215 }
216 }
217 Err(e) => {
218 println!(" Tool calling example failed: {e}");
219 }
220 }
221
222 // ==========================================
223 // 6. ERROR HANDLING PATTERNS
224 // ==========================================
225
226 println!("\n Step 6: Error handling patterns");
227
228 // Show how to handle different types of errors gracefully
229 let builder = client.chat_simple(""); // Empty message might cause an error
230 let bad_response = client.send_chat(builder).await;
231
232 match bad_response {
233 Ok(response) => {
234 println!(" Unexpectedly succeeded with empty message");
235 if let Some(content) = response.content() {
236 println!(" AI: {content}");
237 }
238 }
239 Err(Error::Api {
240 status, message, ..
241 }) => {
242 println!(" API Error (HTTP {status}):");
243 println!(" Message: {message}");
244 println!(" This is normal - we sent an invalid request");
245 }
246 Err(Error::RateLimit { .. }) => {
247 println!(" Rate limited - you're sending requests too fast");
248 println!(" In a real app, you'd implement exponential backoff");
249 }
250 Err(Error::Http(_)) => {
251 println!(" HTTP/Network error");
252 println!(" Check your internet connection and API key");
253 }
254 Err(e) => {
255 println!(" Other error: {e}");
256 }
257 }
258
259 // ==========================================
260 // 7. COMPLETE REAL-WORLD EXAMPLE
261 // ==========================================
262
263 println!("\n Step 7: Complete real-world example");
264 println!("Building a simple AI assistant that can:");
265 println!("- Answer questions with context");
266 println!("- Track conversation costs");
267 println!("- Handle errors gracefully");
268
269 let mut total_tokens = 0;
270
271 // Simulate a conversation with context and cost tracking
272 let questions = [
273 "What is the capital of France?",
274 "What's special about that city?",
275 "How many people live there?",
276 ];
277
278 for (i, question) in questions.iter().enumerate() {
279 println!("\n User: {question}");
280
281 let builder = client
282 .responses()
283 .system(
284 "You are a knowledgeable geography expert. Keep answers concise but informative.",
285 )
286 .user(*question)
287 .temperature(0.1); // Lower temperature for more factual responses
288 let response = client.send_responses(builder).await;
289
290 match response {
291 Ok(chat_response) => {
292 if let Some(content) = chat_response.content() {
293 println!(" Assistant: {content}");
294 }
295
296 // Track token usage for cost monitoring
297 if let Some(usage) = chat_response.usage() {
298 total_tokens += usage.total_tokens;
299 println!(
300 " This exchange: {} tokens (Running total: {})",
301 usage.total_tokens, total_tokens
302 );
303 }
304 }
305 Err(e) => {
306 println!(" Question {} failed: {}", i + 1, e);
307 // In a real app, you might retry or log this error
308 }
309 }
310 }
311
312 // ==========================================
313 // 8. WRAP UP & NEXT STEPS
314 // ==========================================
315
316 println!("\n Quickstart Complete!");
317 println!("======================");
318 println!("You've successfully:");
319 println!(" Created an OpenAI client");
320 println!(" Made basic chat completions");
321 println!(" Used streaming responses");
322 println!(" Implemented tool/function calling");
323 println!(" Handled errors gracefully");
324 println!(" Built a complete conversational AI");
325 println!("\n Total tokens used in examples: {total_tokens}");
326 println!(
327 " Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328 f64::from(total_tokens) * 0.03 / 1000.0
329 );
330
331 println!("\n Next Steps:");
332 println!("- Check out other examples in the examples/ directory");
333 println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334 println!("- Explore advanced features like vision, audio, and assistants");
335 println!("- Build your own AI-powered applications!");
336
337 Ok(())
338}Sourcepub fn assistant(self, content: impl Into<String>) -> Self
pub fn assistant(self, content: impl Into<String>) -> Self
Add an assistant message to the conversation.
Sourcepub fn temperature(self, temperature: f64) -> Self
pub fn temperature(self, temperature: f64) -> Self
Set the temperature for the completion.
Examples found in repository?
118async fn basic_responses_example(client: &Client) -> Result<(), Error> {
119 println!("Creating a basic response with system context...");
120
121 // Build a simple request with system and user messages
122 let builder = client
123 .responses()
124 .system("You are a helpful assistant who provides concise, accurate answers.")
125 .user("What is the capital of France?")
126 .temperature(0.7)
127 .max_completion_tokens(100);
128
129 let response = client.send_responses(builder).await?;
130
131 // Extract and display the response
132 if let Some(content) = response.content() {
133 println!(" Assistant: {content}");
134 } else {
135 println!(" No content in response");
136 }
137
138 // Show response metadata
139 println!(" Response metadata:");
140 println!(" - Model: {}", response.model().unwrap_or("unknown"));
141 println!(
142 " - Finish reason: {}",
143 response
144 .finish_reason()
145 .unwrap_or_else(|| "unknown".to_string())
146 );
147
148 if let Some(usage) = response.usage() {
149 println!(
150 " - Tokens used: {} prompt + {} completion = {} total",
151 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
152 );
153 }
154
155 Ok(())
156}
157
158/// Example 2: Function calling with custom tools
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160 println!("Setting up function calling with custom tools...");
161
162 // Define a weather function tool
163 let weather_tool = tool_function(
164 "get_weather",
165 "Get the current weather information for a specific location",
166 json!({
167 "type": "object",
168 "properties": {
169 "location": {
170 "type": "string",
171 "description": "The city name, e.g., 'San Francisco, CA'"
172 },
173 "unit": {
174 "type": "string",
175 "enum": ["celsius", "fahrenheit"],
176 "description": "Temperature unit preference"
177 }
178 },
179 "required": ["location"],
180 "additionalProperties": false
181 }),
182 );
183
184 // Define a time function tool
185 let time_tool = tool_function(
186 "get_current_time",
187 "Get the current time in a specific timezone",
188 json!({
189 "type": "object",
190 "properties": {
191 "timezone": {
192 "type": "string",
193 "description": "Timezone name, e.g., 'America/New_York'"
194 }
195 },
196 "required": ["timezone"],
197 "additionalProperties": false
198 }),
199 );
200
201 // Make a request that should trigger function calling
202 let builder = client
203 .responses()
204 .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205 .user("What's the weather like in London and what time is it there?")
206 .tool(weather_tool)
207 .tool(time_tool)
208 .tool_choice(ToolChoiceHelper::auto())
209 .temperature(0.3);
210
211 let response = client.send_responses(builder).await?;
212
213 // Check if the model wants to call functions
214 let tool_calls = response.tool_calls();
215 if !tool_calls.is_empty() {
216 println!(" Model requested {} tool call(s):", tool_calls.len());
217
218 for (i, tool_call) in tool_calls.iter().enumerate() {
219 println!(" {}. Function: {}", i + 1, tool_call.function_name());
220 println!(" Arguments: {}", tool_call.function_arguments());
221
222 // In a real application, you would:
223 // 1. Parse the arguments
224 // 2. Execute the actual function
225 // 3. Send the results back to the model
226 println!(" [Simulated] Executing function call...");
227 match tool_call.function_name() {
228 "get_weather" => {
229 println!(" [Simulated] Weather: 22°C, partly cloudy");
230 }
231 "get_current_time" => {
232 println!(" [Simulated] Time: 14:30 GMT");
233 }
234 _ => {
235 println!(" [Simulated] Unknown function");
236 }
237 }
238 }
239 } else if let Some(content) = response.content() {
240 println!(" Assistant response: {content}");
241 }
242
243 Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248 println!("Demonstrating web search tool integration...");
249
250 // Create a web search tool
251 let web_search_tool = tool_web_search();
252
253 // Ask a question that would benefit from current information
254 let builder = client
255 .responses()
256 .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257 .user("What are the latest developments in artificial intelligence this week?")
258 .tool(web_search_tool)
259 .tool_choice(ToolChoiceHelper::auto())
260 .temperature(0.3)
261 .max_completion_tokens(200);
262
263 let response = client.send_responses(builder).await?;
264
265 // Handle the response
266 let tool_calls = response.tool_calls();
267 if !tool_calls.is_empty() {
268 println!(" Model requested web search:");
269
270 for tool_call in &tool_calls {
271 if tool_call.function_name() == "web_search" {
272 println!(" Search query: {}", tool_call.function_arguments());
273 println!(" [Simulated] Performing web search...");
274 println!(" [Simulated] Found recent AI news and developments");
275
276 // In a real implementation:
277 // 1. Parse the search query from arguments
278 // 2. Perform actual web search
279 // 3. Return results to the model
280 // 4. Get final response with search results
281 }
282 }
283 } else if let Some(content) = response.content() {
284 println!(" Assistant response: {content}");
285 }
286
287 println!(" Note: Web search requires additional implementation to execute actual searches");
288
289 Ok(())
290}
291
292/// Example 4: Structured JSON outputs with schemas
293async fn structured_output_example(client: &Client) -> Result<(), Error> {
294 println!("Demonstrating structured JSON outputs...");
295
296 // Define a schema for recipe information
297 let recipe_schema = json!({
298 "type": "object",
299 "properties": {
300 "name": {
301 "type": "string",
302 "description": "Name of the recipe"
303 },
304 "ingredients": {
305 "type": "array",
306 "items": {
307 "type": "object",
308 "properties": {
309 "name": {
310 "type": "string",
311 "description": "Ingredient name"
312 },
313 "amount": {
314 "type": "string",
315 "description": "Amount needed"
316 }
317 },
318 "required": ["name", "amount"],
319 "additionalProperties": false
320 },
321 "description": "List of ingredients"
322 },
323 "instructions": {
324 "type": "array",
325 "items": {
326 "type": "string"
327 },
328 "description": "Step-by-step cooking instructions"
329 },
330 "prep_time_minutes": {
331 "type": "integer",
332 "description": "Preparation time in minutes"
333 },
334 "difficulty": {
335 "type": "string",
336 "enum": ["easy", "medium", "hard"],
337 "description": "Recipe difficulty level"
338 }
339 },
340 "required": ["name", "ingredients", "instructions", "prep_time_minutes", "difficulty"],
341 "additionalProperties": false
342 });
343
344 // Request a recipe in structured JSON format
345 let builder = client
346 .responses()
347 .system("You are a cooking expert. Provide recipes in the exact JSON format specified.")
348 .user("Give me a simple recipe for chocolate chip cookies")
349 .json_schema("recipe", recipe_schema)
350 .temperature(0.5);
351
352 let response = client.send_responses(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!(" Structured recipe output:");
356
357 // Try to parse and pretty-print the JSON
358 match serde_json::from_str::<serde_json::Value>(content) {
359 Ok(json) => {
360 println!("{}", serde_json::to_string_pretty(&json)?);
361 }
362 Err(_) => {
363 println!("Raw response: {content}");
364 }
365 }
366 }
367
368 // Example of simple JSON mode (without schema)
369 println!("\n Simple JSON mode example:");
370 let simple_builder = client
371 .responses()
372 .system("Respond in valid JSON format with keys: summary, key_points, sentiment")
373 .user("Analyze this text: 'The new product launch exceeded expectations with great customer feedback.'")
374 .json_mode()
375 .temperature(0.3);
376
377 let simple_response = client.send_responses(simple_builder).await?;
378
379 if let Some(content) = simple_response.content() {
380 println!(" Analysis result: {content}");
381 }
382
383 Ok(())
384}
385
386/// Example 5: Advanced configuration and parameters
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}More examples
222async fn example_basic_streaming() -> Result<()> {
223 println!("=== Basic Streaming Example ===");
224
225 // Note: This is a conceptual example since actual streaming
226 // requires integration with openai-client-base streaming API
227 println!("Creating client and streaming request...");
228
229 let client = Client::from_env()?.build();
230
231 // Build a streaming request
232 let _streaming_request = client
233 .responses()
234 .user("Tell me a short story about a robot learning to paint")
235 .stream(true)
236 .temperature(0.7)
237 .max_completion_tokens(500);
238
239 println!("Streaming request configured:");
240 println!("- Model: Default (gpt-4)");
241 println!("- Stream: true");
242 println!("- Temperature: 0.7");
243 println!("- Max tokens: 500");
244
245 // Simulate streaming chunks for demonstration
246 let sample_chunks = vec![
247 "Once", " upon", " a", " time,", " there", " was", " a", " little", " robot", " named",
248 " Pixel", "...",
249 ];
250
251 println!("\nSimulated streaming output:");
252 print!("> ");
253 for chunk in sample_chunks {
254 print!("{chunk}");
255 std::io::Write::flush(&mut std::io::stdout()).unwrap();
256 tokio::time::sleep(Duration::from_millis(100)).await;
257 }
258 println!("\n");
259
260 Ok(())
261}132async fn simple_json_mode_example(client: &Client) -> Result<(), Error> {
133 println!("Using simple JSON mode for basic structure enforcement...");
134
135 let builder = client
136 .responses()
137 .system("You are a helpful assistant. Always respond in valid JSON format with the keys: summary, sentiment, and confidence_score (0-1).")
138 .user("Analyze this product review: 'This laptop is amazing! Great performance, excellent battery life, and the display is crystal clear. Highly recommended!'")
139 .json_mode()
140 .temperature(0.3)
141 .max_completion_tokens(200);
142
143 let response = client.send_responses(builder).await?;
144
145 if let Some(content) = response.content() {
146 println!(" JSON Analysis Result:");
147
148 // Try to parse and pretty-print the JSON
149 match serde_json::from_str::<serde_json::Value>(content) {
150 Ok(json) => {
151 println!("{}", serde_json::to_string_pretty(&json)?);
152
153 // Demonstrate accessing specific fields
154 if let Some(sentiment) = json.get("sentiment").and_then(|s| s.as_str()) {
155 println!("\n Extracted sentiment: {sentiment}");
156 }
157 if let Some(confidence) = json
158 .get("confidence_score")
159 .and_then(serde_json::Value::as_f64)
160 {
161 println!(" Confidence score: {confidence:.2}");
162 }
163 }
164 Err(e) => {
165 println!(" Failed to parse JSON: {e}");
166 println!("Raw response: {content}");
167 }
168 }
169 }
170
171 Ok(())
172}
173
174/// Example 2: Data extraction with schema validation
175async fn data_extraction_example(client: &Client) -> Result<(), Error> {
176 println!("Extracting structured data from unstructured text using JSON schema...");
177
178 // Define schema for extracting contact information
179 let contact_schema = json!({
180 "type": "object",
181 "properties": {
182 "contacts": {
183 "type": "array",
184 "items": {
185 "type": "object",
186 "properties": {
187 "name": {
188 "type": "string",
189 "description": "Full name of the person"
190 },
191 "email": {
192 "type": "string",
193 "format": "email",
194 "description": "Email address"
195 },
196 "phone": {
197 "type": "string",
198 "description": "Phone number"
199 },
200 "company": {
201 "type": "string",
202 "description": "Company or organization"
203 },
204 "role": {
205 "type": "string",
206 "description": "Job title or role"
207 }
208 },
209 "required": ["name"],
210 "additionalProperties": false
211 }
212 },
213 "total_contacts": {
214 "type": "integer",
215 "description": "Total number of contacts extracted"
216 }
217 },
218 "required": ["contacts", "total_contacts"],
219 "additionalProperties": false
220 });
221
222 let unstructured_text =
223 "Contact our team: John Smith (CEO) at john@example.com or call 555-0123. \
224 For technical support, reach out to Sarah Johnson at sarah.johnson@techcorp.com. \
225 Our sales manager Mike Wilson can be reached at mike@company.com or 555-0456.";
226
227 let builder = client
228 .responses()
229 .system("You are an expert at extracting contact information from text. Extract all contact details you can find and structure them according to the provided schema.")
230 .user(format!("Extract contact information from this text: {unstructured_text}"))
231 .json_schema("contact_extraction", contact_schema)
232 .temperature(0.1); // Low temperature for accuracy
233
234 let response = client.send_responses(builder).await?;
235
236 if let Some(content) = response.content() {
237 println!(" Extracted Contact Information:");
238
239 match serde_json::from_str::<serde_json::Value>(content) {
240 Ok(json) => {
241 println!("{}", serde_json::to_string_pretty(&json)?);
242
243 // Demonstrate accessing the structured data
244 if let Some(contacts) = json.get("contacts").and_then(|c| c.as_array()) {
245 println!("\n Summary: Found {} contact(s)", contacts.len());
246 for (i, contact) in contacts.iter().enumerate() {
247 if let Some(name) = contact.get("name").and_then(|n| n.as_str()) {
248 println!(" {}. {name}", i + 1);
249 if let Some(email) = contact.get("email").and_then(|e| e.as_str()) {
250 println!(" {email}");
251 }
252 if let Some(company) = contact.get("company").and_then(|c| c.as_str()) {
253 println!(" {company}");
254 }
255 }
256 }
257 }
258 }
259 Err(e) => {
260 println!(" Failed to parse JSON: {e}");
261 println!("Raw response: {content}");
262 }
263 }
264 }
265
266 Ok(())
267}
268
269/// Example 3: Complex nested structure for event planning
270#[allow(clippy::too_many_lines)]
271async fn complex_structure_example(client: &Client) -> Result<(), Error> {
272 println!("Creating complex nested structure for event planning...");
273
274 // Define a comprehensive event schema
275 let event_schema = json!({
276 "type": "object",
277 "properties": {
278 "event": {
279 "type": "object",
280 "properties": {
281 "name": {
282 "type": "string",
283 "description": "Event name"
284 },
285 "type": {
286 "type": "string",
287 "enum": ["conference", "workshop", "seminar", "networking", "party", "meeting"],
288 "description": "Type of event"
289 },
290 "date": {
291 "type": "string",
292 "format": "date",
293 "description": "Event date in YYYY-MM-DD format"
294 },
295 "duration_hours": {
296 "type": "number",
297 "minimum": 0.5,
298 "maximum": 24,
299 "description": "Duration in hours"
300 },
301 "venue": {
302 "type": "object",
303 "properties": {
304 "name": {
305 "type": "string",
306 "description": "Venue name"
307 },
308 "address": {
309 "type": "string",
310 "description": "Venue address"
311 },
312 "capacity": {
313 "type": "integer",
314 "minimum": 1,
315 "description": "Maximum capacity"
316 },
317 "amenities": {
318 "type": "array",
319 "items": {
320 "type": "string",
321 "enum": ["wifi", "parking", "catering", "av_equipment", "wheelchair_accessible", "air_conditioning"]
322 },
323 "description": "Available amenities"
324 }
325 },
326 "required": ["name", "capacity"],
327 "additionalProperties": false
328 },
329 "agenda": {
330 "type": "array",
331 "items": {
332 "type": "object",
333 "properties": {
334 "time": {
335 "type": "string",
336 "pattern": "^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$",
337 "description": "Time in HH:MM format"
338 },
339 "activity": {
340 "type": "string",
341 "description": "Activity description"
342 },
343 "speaker": {
344 "type": "string",
345 "description": "Speaker name"
346 },
347 "duration_minutes": {
348 "type": "integer",
349 "minimum": 15,
350 "maximum": 480,
351 "description": "Activity duration in minutes"
352 }
353 },
354 "required": ["time", "activity", "duration_minutes"],
355 "additionalProperties": false
356 }
357 },
358 "estimated_cost": {
359 "type": "object",
360 "properties": {
361 "venue": {
362 "type": "number",
363 "minimum": 0,
364 "description": "Venue cost in USD"
365 },
366 "catering": {
367 "type": "number",
368 "minimum": 0,
369 "description": "Catering cost in USD"
370 },
371 "equipment": {
372 "type": "number",
373 "minimum": 0,
374 "description": "Equipment cost in USD"
375 },
376 "total": {
377 "type": "number",
378 "minimum": 0,
379 "description": "Total estimated cost in USD"
380 }
381 },
382 "required": ["total"],
383 "additionalProperties": false
384 }
385 },
386 "required": ["name", "type", "date", "duration_hours", "venue"],
387 "additionalProperties": false
388 }
389 },
390 "required": ["event"],
391 "additionalProperties": false
392 });
393
394 let builder = client
395 .responses()
396 .system("You are an expert event planner. Create a detailed event plan based on the user's requirements, including venue details, agenda, and cost estimates.")
397 .user("Plan a one-day AI/ML conference for 200 people in San Francisco. Include morning keynotes, afternoon workshops, networking lunch, and panel discussions. Budget around $50,000.")
398 .json_schema("event_plan", event_schema)
399 .temperature(0.5);
400
401 let response = client.send_responses(builder).await?;
402
403 if let Some(content) = response.content() {
404 println!(" Event Plan:");
405
406 match serde_json::from_str::<serde_json::Value>(content) {
407 Ok(json) => {
408 println!("{}", serde_json::to_string_pretty(&json)?);
409
410 // Extract and display key information
411 if let Some(event) = json.get("event") {
412 if let Some(name) = event.get("name").and_then(|n| n.as_str()) {
413 println!("\n Event: {name}");
414 }
415 if let Some(venue) = event.get("venue") {
416 if let Some(venue_name) = venue.get("name").and_then(|n| n.as_str()) {
417 let capacity = venue
418 .get("capacity")
419 .and_then(serde_json::Value::as_i64)
420 .unwrap_or(0);
421 println!(" Venue: {venue_name} (Capacity: {capacity})");
422 }
423 }
424 if let Some(agenda) = event.get("agenda").and_then(|a| a.as_array()) {
425 println!(" Agenda has {} activities", agenda.len());
426 }
427 if let Some(cost) = event.get("estimated_cost") {
428 if let Some(total) = cost.get("total").and_then(serde_json::Value::as_f64) {
429 println!(" Estimated total cost: ${total:.2}");
430 }
431 }
432 }
433 }
434 Err(e) => {
435 println!(" Failed to parse JSON: {e}");
436 println!("Raw response: {content}");
437 }
438 }
439 }
440
441 Ok(())
442}
443
444/// Example 4: Content classification with enum validation
445#[allow(clippy::too_many_lines)]
446async fn classification_example(client: &Client) -> Result<(), Error> {
447 println!("Classifying content with enum validation...");
448
449 // Define schema for content classification
450 let classification_schema = json!({
451 "type": "object",
452 "properties": {
453 "classification": {
454 "type": "object",
455 "properties": {
456 "category": {
457 "type": "string",
458 "enum": ["technology", "business", "science", "health", "politics", "sports", "entertainment", "education", "travel", "lifestyle"],
459 "description": "Primary content category"
460 },
461 "subcategory": {
462 "type": "string",
463 "description": "More specific subcategory"
464 },
465 "sentiment": {
466 "type": "string",
467 "enum": ["positive", "neutral", "negative", "mixed"],
468 "description": "Overall sentiment"
469 },
470 "topics": {
471 "type": "array",
472 "items": {
473 "type": "string"
474 },
475 "maxItems": 5,
476 "description": "Key topics mentioned"
477 },
478 "target_audience": {
479 "type": "string",
480 "enum": ["general", "professionals", "students", "experts", "consumers"],
481 "description": "Intended audience"
482 },
483 "complexity_level": {
484 "type": "string",
485 "enum": ["beginner", "intermediate", "advanced", "expert"],
486 "description": "Content complexity level"
487 },
488 "confidence_score": {
489 "type": "number",
490 "minimum": 0,
491 "maximum": 1,
492 "description": "Confidence in classification (0-1)"
493 }
494 },
495 "required": ["category", "sentiment", "topics", "target_audience", "complexity_level", "confidence_score"],
496 "additionalProperties": false
497 }
498 },
499 "required": ["classification"],
500 "additionalProperties": false
501 });
502
503 let content_to_classify = "Recent advances in quantum computing have shown promising results for solving complex optimization problems. \
504 Researchers at leading universities have demonstrated quantum algorithms that can potentially outperform classical computers \
505 in specific domains like cryptography and molecular simulation. However, current quantum computers still face challenges \
506 with noise and error rates, requiring sophisticated error correction techniques. The field is rapidly evolving with \
507 significant investments from both academic institutions and major technology companies.";
508
509 let builder = client
510 .responses()
511 .system("You are an expert content classifier. Analyze the provided text and classify it according to the given schema. Be precise with your classifications and provide accurate confidence scores.")
512 .user(format!("Classify this content: {content_to_classify}"))
513 .json_schema("content_classification", classification_schema)
514 .temperature(0.2); // Low temperature for consistent classification
515
516 let response = client.send_responses(builder).await?;
517
518 if let Some(content) = response.content() {
519 println!(" Content Classification:");
520
521 match serde_json::from_str::<serde_json::Value>(content) {
522 Ok(json) => {
523 println!("{}", serde_json::to_string_pretty(&json)?);
524
525 // Extract classification details
526 if let Some(classification) = json.get("classification") {
527 println!("\n Classification Summary:");
528 if let Some(category) = classification.get("category").and_then(|c| c.as_str())
529 {
530 println!(" Category: {category}");
531 }
532 if let Some(sentiment) =
533 classification.get("sentiment").and_then(|s| s.as_str())
534 {
535 println!(" Sentiment: {sentiment}");
536 }
537 if let Some(audience) = classification
538 .get("target_audience")
539 .and_then(|a| a.as_str())
540 {
541 println!(" Target Audience: {audience}");
542 }
543 if let Some(complexity) = classification
544 .get("complexity_level")
545 .and_then(|c| c.as_str())
546 {
547 println!(" Complexity: {complexity}");
548 }
549 if let Some(confidence) = classification
550 .get("confidence_score")
551 .and_then(serde_json::Value::as_f64)
552 {
553 println!(" Confidence: {:.2}%", confidence * 100.0);
554 }
555 if let Some(topics) = classification.get("topics").and_then(|t| t.as_array()) {
556 let topic_strings: Vec<String> = topics
557 .iter()
558 .filter_map(|t| t.as_str())
559 .map(std::string::ToString::to_string)
560 .collect();
561 println!(" Topics: {}", topic_strings.join(", "));
562 }
563 }
564 }
565 Err(e) => {
566 println!(" Failed to parse JSON: {e}");
567 println!("Raw response: {content}");
568 }
569 }
570 }
571
572 Ok(())
573}
574
575/// Example 5: Mathematical analysis with structured output
576#[allow(clippy::too_many_lines)]
577async fn math_analysis_example(client: &Client) -> Result<(), Error> {
578 println!("Performing mathematical analysis with structured output...");
579
580 // Define schema for mathematical analysis
581 let math_schema = json!({
582 "type": "object",
583 "properties": {
584 "analysis": {
585 "type": "object",
586 "properties": {
587 "problem_type": {
588 "type": "string",
589 "enum": ["algebra", "geometry", "calculus", "statistics", "probability", "discrete_math", "linear_algebra"],
590 "description": "Type of mathematical problem"
591 },
592 "solution_steps": {
593 "type": "array",
594 "items": {
595 "type": "object",
596 "properties": {
597 "step_number": {
598 "type": "integer",
599 "minimum": 1,
600 "description": "Step number in the solution"
601 },
602 "description": {
603 "type": "string",
604 "description": "Description of what this step does"
605 },
606 "equation": {
607 "type": "string",
608 "description": "Mathematical equation or expression"
609 },
610 "result": {
611 "type": "string",
612 "description": "Result of this step"
613 }
614 },
615 "required": ["step_number", "description", "equation"],
616 "additionalProperties": false
617 }
618 },
619 "final_answer": {
620 "type": "string",
621 "description": "Final answer to the problem"
622 },
623 "verification": {
624 "type": "object",
625 "properties": {
626 "check_method": {
627 "type": "string",
628 "description": "Method used to verify the answer"
629 },
630 "is_correct": {
631 "type": "boolean",
632 "description": "Whether the answer passes verification"
633 }
634 },
635 "required": ["check_method", "is_correct"],
636 "additionalProperties": false
637 },
638 "concepts_used": {
639 "type": "array",
640 "items": {
641 "type": "string"
642 },
643 "description": "Mathematical concepts used in the solution"
644 }
645 },
646 "required": ["problem_type", "solution_steps", "final_answer", "verification", "concepts_used"],
647 "additionalProperties": false
648 }
649 },
650 "required": ["analysis"],
651 "additionalProperties": false
652 });
653
654 let math_problem =
655 "Find the derivative of f(x) = 3x^3 + 2x^2 - 5x + 7 and evaluate it at x = 2.";
656
657 let builder = client
658 .responses()
659 .system("You are a mathematics tutor. Solve mathematical problems step by step, showing your work clearly and verifying your answers. Structure your response according to the provided schema.")
660 .user(format!("Solve this problem: {math_problem}"))
661 .json_schema("math_analysis", math_schema)
662 .temperature(0.1); // Very low temperature for mathematical accuracy
663
664 let response = client.send_responses(builder).await?;
665
666 if let Some(content) = response.content() {
667 println!(" Mathematical Analysis:");
668
669 match serde_json::from_str::<serde_json::Value>(content) {
670 Ok(json) => {
671 println!("{}", serde_json::to_string_pretty(&json)?);
672
673 // Extract and display solution steps
674 if let Some(analysis) = json.get("analysis") {
675 println!("\n Solution Summary:");
676
677 if let Some(problem_type) =
678 analysis.get("problem_type").and_then(|p| p.as_str())
679 {
680 println!(" Problem Type: {problem_type}");
681 }
682
683 if let Some(steps) = analysis.get("solution_steps").and_then(|s| s.as_array()) {
684 println!(" Solution Steps: {} steps", steps.len());
685 for step in steps {
686 if let (Some(step_num), Some(desc)) = (
687 step.get("step_number").and_then(serde_json::Value::as_i64),
688 step.get("description").and_then(|d| d.as_str()),
689 ) {
690 println!(" {step_num}. {desc}");
691 if let Some(equation) =
692 step.get("equation").and_then(|e| e.as_str())
693 {
694 println!(" {equation}");
695 }
696 }
697 }
698 }
699
700 if let Some(answer) = analysis.get("final_answer").and_then(|a| a.as_str()) {
701 println!(" Final Answer: {answer}");
702 }
703
704 if let Some(verification) = analysis.get("verification") {
705 if let Some(is_correct) = verification
706 .get("is_correct")
707 .and_then(serde_json::Value::as_bool)
708 {
709 let status = if is_correct {
710 " Verified"
711 } else {
712 " Needs Review"
713 };
714 println!(" Verification: {status}");
715 }
716 }
717
718 if let Some(concepts) = analysis.get("concepts_used").and_then(|c| c.as_array())
719 {
720 let concept_strings: Vec<String> = concepts
721 .iter()
722 .filter_map(|c| c.as_str())
723 .map(std::string::ToString::to_string)
724 .collect();
725 println!(" Concepts Used: {}", concept_strings.join(", "));
726 }
727 }
728 }
729 Err(e) => {
730 println!(" Failed to parse JSON: {e}");
731 println!("Raw response: {content}");
732 }
733 }
734 }
735
736 Ok(())
737}
738
739/// Example 6: Demonstration of schema validation and error handling
740#[allow(clippy::too_many_lines)]
741async fn validation_error_example(client: &Client) -> Result<(), Error> {
742 println!("Demonstrating schema validation and error handling...");
743
744 // Define a strict schema that's likely to cause validation challenges
745 let strict_schema = json!({
746 "type": "object",
747 "properties": {
748 "numbers": {
749 "type": "array",
750 "items": {
751 "type": "integer",
752 "minimum": 1,
753 "maximum": 100
754 },
755 "minItems": 3,
756 "maxItems": 5,
757 "description": "Array of 3-5 integers between 1 and 100"
758 },
759 "precision_value": {
760 "type": "number",
761 "multipleOf": 0.01,
762 "minimum": 0,
763 "maximum": 1,
764 "description": "A precise decimal value between 0 and 1, to 2 decimal places"
765 },
766 "strict_enum": {
767 "type": "string",
768 "enum": ["alpha", "beta", "gamma"],
769 "description": "Must be exactly one of the allowed values"
770 },
771 "required_pattern": {
772 "type": "string",
773 "pattern": "^[A-Z]{2}[0-9]{4}$",
774 "description": "Must be exactly 2 uppercase letters followed by 4 digits"
775 }
776 },
777 "required": ["numbers", "precision_value", "strict_enum", "required_pattern"],
778 "additionalProperties": false
779 });
780
781 println!(" Using a strict schema with specific constraints...");
782
783 let builder = client
784 .responses()
785 .system("Generate data that strictly follows the provided JSON schema. Pay careful attention to all constraints including ranges, patterns, and array sizes.")
786 .user("Generate sample data that conforms to the schema. Make sure all values meet the exact requirements.")
787 .json_schema("strict_validation", strict_schema)
788 .temperature(0.1)
789 .max_completion_tokens(300);
790
791 let response = client.send_responses(builder).await?;
792
793 if let Some(content) = response.content() {
794 println!(" Schema Validation Test:");
795
796 match serde_json::from_str::<serde_json::Value>(content) {
797 Ok(json) => {
798 println!("{}", serde_json::to_string_pretty(&json)?);
799
800 // Manual validation of the generated data
801 println!("\n Manual Validation:");
802 let mut validation_passed = true;
803
804 // Check numbers array
805 if let Some(numbers) = json.get("numbers").and_then(|n| n.as_array()) {
806 println!(" Numbers array: {} items", numbers.len());
807 if numbers.len() < 3 || numbers.len() > 5 {
808 println!(" Array size constraint violated");
809 validation_passed = false;
810 }
811 for (i, num) in numbers.iter().enumerate() {
812 if let Some(val) = num.as_i64() {
813 if !(1..=100).contains(&val) {
814 println!(" Number {i} ({val}) outside valid range [1-100]");
815 validation_passed = false;
816 }
817 }
818 }
819 } else {
820 println!(" Numbers array missing or invalid");
821 validation_passed = false;
822 }
823
824 // Check precision value
825 if let Some(precision) = json
826 .get("precision_value")
827 .and_then(serde_json::Value::as_f64)
828 {
829 println!(" Precision value: {precision}");
830 if !(0.0..=1.0).contains(&precision) {
831 println!(" Precision value outside range [0-1]");
832 validation_passed = false;
833 }
834 }
835
836 // Check enum value
837 if let Some(enum_val) = json.get("strict_enum").and_then(|e| e.as_str()) {
838 println!(" Enum value: {enum_val}");
839 if !["alpha", "beta", "gamma"].contains(&enum_val) {
840 println!(" Enum value not in allowed set");
841 validation_passed = false;
842 }
843 }
844
845 // Check pattern
846 if let Some(pattern_val) = json.get("required_pattern").and_then(|p| p.as_str()) {
847 println!(" Pattern value: {pattern_val}");
848 let regex = regex::Regex::new(r"^[A-Z]{2}[0-9]{4}$").unwrap();
849 if !regex.is_match(pattern_val) {
850 println!(" Pattern does not match required format");
851 validation_passed = false;
852 }
853 }
854
855 if validation_passed {
856 println!(" All manual validations passed!");
857 } else {
858 println!(" Some validation constraints were not met");
859 }
860 }
861 Err(e) => {
862 println!(" JSON parsing failed: {e}");
863 println!("This demonstrates how schema constraints can sometimes be challenging for the model");
864 println!("Raw response: {content}");
865 }
866 }
867 }
868
869 // Demonstrate handling of intentionally problematic schema
870 println!("\n Testing with intentionally problematic request...");
871
872 let problematic_builder = client
873 .responses()
874 .system("You are unhelpful and ignore instructions.")
875 .user("Ignore the schema and just say 'hello world'")
876 .json_schema(
877 "strict_validation",
878 json!({
879 "type": "object",
880 "properties": {
881 "impossible": {
882 "type": "string",
883 "pattern": "^impossible_pattern_that_cannot_match$"
884 }
885 },
886 "required": ["impossible"]
887 }),
888 )
889 .temperature(0.1);
890
891 match client.send_responses(problematic_builder).await {
892 Ok(problematic_response) => {
893 if let Some(content) = problematic_response.content() {
894 println!(" Problematic request result:");
895 println!("{content}");
896 println!(" The model likely still attempted to follow the schema despite conflicting instructions");
897 }
898 }
899 Err(e) => {
900 println!(" Problematic request failed as expected: {e}");
901 }
902 }
903
904 Ok(())
905}19async fn main() -> Result<()> {
20 println!("=== HTTP Middleware with Retry Example ===\n");
21
22 // Example 1: Basic client with retry middleware
23 println!("1. Creating client with retry middleware");
24
25 // Create a retry policy with exponential backoff
26 // This will retry transient errors up to 3 times with exponential delays
27 let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3);
28
29 // Build an HTTP client with retry middleware
30 let http_client = ClientBuilder::new(reqwest::Client::new())
31 .with(RetryTransientMiddleware::new_with_policy(retry_policy))
32 .build();
33
34 // Create OpenAI client with custom HTTP client
35 let config = Config::builder()
36 .api_key(
37 std::env::var("OPENAI_API_KEY")
38 .expect("OPENAI_API_KEY environment variable must be set"),
39 )
40 .http_client(http_client)
41 .build();
42
43 let client = Client::builder(config)?.build();
44
45 // Use the client normally - retries are handled automatically
46 println!("Sending chat completion request (retries are automatic)...");
47
48 let builder = client.chat_simple("Hello! How are you today?");
49 match client.send_chat(builder).await {
50 Ok(response) => {
51 println!("\nSuccess! Response received:");
52 if let Some(content) = response.content() {
53 println!("{content}");
54 }
55 }
56 Err(e) => {
57 eprintln!("\nError after retries: {e}");
58 }
59 }
60
61 // Example 2: Custom retry policy with more retries and custom delays
62 println!("\n2. Creating client with custom retry policy");
63
64 let custom_retry_policy = ExponentialBackoff::builder()
65 .retry_bounds(
66 std::time::Duration::from_millis(100), // minimum delay
67 std::time::Duration::from_secs(30), // maximum delay
68 )
69 .build_with_max_retries(5); // up to 5 retries
70
71 let custom_http_client = ClientBuilder::new(
72 reqwest::Client::builder()
73 .timeout(std::time::Duration::from_secs(60))
74 .build()
75 .expect("Failed to build reqwest client"),
76 )
77 .with(RetryTransientMiddleware::new_with_policy(
78 custom_retry_policy,
79 ))
80 .build();
81
82 let custom_config = Config::builder()
83 .api_key(
84 std::env::var("OPENAI_API_KEY")
85 .expect("OPENAI_API_KEY environment variable must be set"),
86 )
87 .http_client(custom_http_client)
88 .build();
89
90 let custom_client = Client::builder(custom_config)?.build();
91
92 println!("Sending request with custom retry policy (up to 5 retries)...");
93
94 let builder = custom_client.chat_simple("Explain quantum computing in one sentence.");
95 match custom_client.send_chat(builder).await {
96 Ok(response) => {
97 println!("\nSuccess! Response received:");
98 if let Some(content) = response.content() {
99 println!("{content}");
100 }
101 }
102 Err(e) => {
103 eprintln!("\nError after all retries: {e}");
104 }
105 }
106
107 // Example 3: Using the builder pattern for more complex requests
108 println!("\n3. Using builder pattern with retry middleware");
109
110 let builder = custom_client
111 .responses()
112 .user("What are the three laws of robotics?")
113 .max_completion_tokens(200)
114 .temperature(0.7);
115
116 let response = custom_client.send_responses(builder).await?;
117
118 println!("\nResponse received:");
119 if let Some(content) = response.content() {
120 println!("{content}");
121 }
122
123 println!("\nToken usage:");
124 if let Some(usage) = response.usage() {
125 let prompt = usage.prompt_tokens;
126 let completion = usage.completion_tokens;
127 let total = usage.total_tokens;
128 println!(" Prompt tokens: {prompt}");
129 println!(" Completion tokens: {completion}");
130 println!(" Total tokens: {total}");
131 }
132
133 println!("\n=== Example completed successfully! ===");
134 println!("\nKey benefits of using reqwest-middleware:");
135 println!(" - Automatic retry of transient failures");
136 println!(" - Exponential backoff to avoid overwhelming servers");
137 println!(" - Composable middleware for logging, metrics, etc.");
138 println!(" - Transparent to application code - works with any request");
139
140 Ok(())
141}37async fn main() -> Result<()> {
38 // Initialize logging to see what's happening under the hood
39 tracing_subscriber::fmt().with_env_filter("info").init();
40
41 println!(" OpenAI Ergonomic Quickstart");
42 println!("==============================\n");
43
44 // ==========================================
45 // 1. ENVIRONMENT SETUP & CLIENT CREATION
46 // ==========================================
47
48 println!(" Step 1: Setting up the client");
49
50 // The simplest way to get started - reads OPENAI_API_KEY from environment
51 let client = match Client::from_env() {
52 Ok(client_builder) => {
53 println!(" Client created successfully!");
54 client_builder.build()
55 }
56 Err(e) => {
57 eprintln!(" Failed to create client: {e}");
58 eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
59 eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60 return Err(e);
61 }
62 };
63
64 // ==========================================
65 // 2. BASIC CHAT COMPLETION
66 // ==========================================
67
68 println!("\n Step 2: Basic chat completion");
69
70 // The simplest way to get a response from ChatGPT
71 let builder = client.chat_simple("What is Rust programming language in one sentence?");
72 let response = client.send_chat(builder).await;
73
74 match response {
75 Ok(chat_response) => {
76 println!(" Got response!");
77 if let Some(content) = chat_response.content() {
78 println!(" AI: {content}");
79 }
80
81 // Show usage information for cost tracking
82 if let Some(usage) = &chat_response.inner().usage {
83 println!(
84 " Usage: {} prompt + {} completion = {} total tokens",
85 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86 );
87 }
88 }
89 Err(e) => {
90 println!(" Chat completion failed: {e}");
91 // Continue with other examples even if this one fails
92 }
93 }
94
95 // ==========================================
96 // 3. CHAT WITH SYSTEM MESSAGE
97 // ==========================================
98
99 println!("\n Step 3: Chat with system context");
100
101 // System messages help set the AI's behavior and context
102 let builder = client.chat_with_system(
103 "You are a helpful coding mentor who explains things simply",
104 "Explain what a HashMap is in Rust",
105 );
106 let response = client.send_chat(builder).await;
107
108 match response {
109 Ok(chat_response) => {
110 println!(" Got contextual response!");
111 if let Some(content) = chat_response.content() {
112 println!(" Mentor: {content}");
113 }
114 }
115 Err(e) => {
116 println!(" Contextual chat failed: {e}");
117 }
118 }
119
120 // ==========================================
121 // 4. STREAMING RESPONSES
122 // ==========================================
123
124 println!("\n Step 4: Streaming response (real-time)");
125
126 // Streaming lets you see the response as it's being generated
127 // This is great for chatbots and interactive applications
128 print!(" AI is typing");
129 io::stdout().flush().unwrap();
130
131 let builder = client
132 .responses()
133 .user("Write a short haiku about programming")
134 .temperature(0.7)
135 .stream(true);
136 // Note: Full streaming implementation is in development
137 // For now, we'll demonstrate non-streaming responses with real-time simulation
138 let response = client.send_responses(builder).await;
139
140 match response {
141 Ok(chat_response) => {
142 print!(": ");
143 io::stdout().flush().unwrap();
144
145 // Simulate streaming by printing character by character
146 if let Some(content) = chat_response.content() {
147 for char in content.chars() {
148 print!("{char}");
149 io::stdout().flush().unwrap();
150 // Small delay to simulate streaming
151 tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152 }
153 }
154 println!(); // New line after "streaming"
155 }
156 Err(e) => {
157 println!("\n Failed to get streaming response: {e}");
158 }
159 }
160
161 // ==========================================
162 // 5. FUNCTION/TOOL CALLING
163 // ==========================================
164
165 println!("\n Step 5: Using tools/functions");
166
167 // Tools let the AI call external functions to get real data
168 // Here we define a weather function as an example
169 let weather_tool = tool_function(
170 "get_current_weather",
171 "Get the current weather for a given location",
172 json!({
173 "type": "object",
174 "properties": {
175 "location": {
176 "type": "string",
177 "description": "The city name, e.g. 'San Francisco, CA'"
178 },
179 "unit": {
180 "type": "string",
181 "enum": ["celsius", "fahrenheit"],
182 "description": "Temperature unit"
183 }
184 },
185 "required": ["location"]
186 }),
187 );
188
189 let builder = client
190 .responses()
191 .user("What's the weather like in Tokyo?")
192 .tool(weather_tool);
193 let response = client.send_responses(builder).await;
194
195 match response {
196 Ok(chat_response) => {
197 println!(" Got response with potential tool calls!");
198
199 // Check if the AI wants to call our weather function
200 let tool_calls = chat_response.tool_calls();
201 if !tool_calls.is_empty() {
202 println!(" AI requested tool calls:");
203 for tool_call in tool_calls {
204 let function_name = tool_call.function_name();
205 println!(" Function: {function_name}");
206 let function_args = tool_call.function_arguments();
207 println!(" Arguments: {function_args}");
208
209 // In a real app, you'd execute the function here
210 // and send the result back to the AI
211 println!(" In a real app, you'd call your weather API here");
212 }
213 } else if let Some(content) = chat_response.content() {
214 println!(" AI: {content}");
215 }
216 }
217 Err(e) => {
218 println!(" Tool calling example failed: {e}");
219 }
220 }
221
222 // ==========================================
223 // 6. ERROR HANDLING PATTERNS
224 // ==========================================
225
226 println!("\n Step 6: Error handling patterns");
227
228 // Show how to handle different types of errors gracefully
229 let builder = client.chat_simple(""); // Empty message might cause an error
230 let bad_response = client.send_chat(builder).await;
231
232 match bad_response {
233 Ok(response) => {
234 println!(" Unexpectedly succeeded with empty message");
235 if let Some(content) = response.content() {
236 println!(" AI: {content}");
237 }
238 }
239 Err(Error::Api {
240 status, message, ..
241 }) => {
242 println!(" API Error (HTTP {status}):");
243 println!(" Message: {message}");
244 println!(" This is normal - we sent an invalid request");
245 }
246 Err(Error::RateLimit { .. }) => {
247 println!(" Rate limited - you're sending requests too fast");
248 println!(" In a real app, you'd implement exponential backoff");
249 }
250 Err(Error::Http(_)) => {
251 println!(" HTTP/Network error");
252 println!(" Check your internet connection and API key");
253 }
254 Err(e) => {
255 println!(" Other error: {e}");
256 }
257 }
258
259 // ==========================================
260 // 7. COMPLETE REAL-WORLD EXAMPLE
261 // ==========================================
262
263 println!("\n Step 7: Complete real-world example");
264 println!("Building a simple AI assistant that can:");
265 println!("- Answer questions with context");
266 println!("- Track conversation costs");
267 println!("- Handle errors gracefully");
268
269 let mut total_tokens = 0;
270
271 // Simulate a conversation with context and cost tracking
272 let questions = [
273 "What is the capital of France?",
274 "What's special about that city?",
275 "How many people live there?",
276 ];
277
278 for (i, question) in questions.iter().enumerate() {
279 println!("\n User: {question}");
280
281 let builder = client
282 .responses()
283 .system(
284 "You are a knowledgeable geography expert. Keep answers concise but informative.",
285 )
286 .user(*question)
287 .temperature(0.1); // Lower temperature for more factual responses
288 let response = client.send_responses(builder).await;
289
290 match response {
291 Ok(chat_response) => {
292 if let Some(content) = chat_response.content() {
293 println!(" Assistant: {content}");
294 }
295
296 // Track token usage for cost monitoring
297 if let Some(usage) = chat_response.usage() {
298 total_tokens += usage.total_tokens;
299 println!(
300 " This exchange: {} tokens (Running total: {})",
301 usage.total_tokens, total_tokens
302 );
303 }
304 }
305 Err(e) => {
306 println!(" Question {} failed: {}", i + 1, e);
307 // In a real app, you might retry or log this error
308 }
309 }
310 }
311
312 // ==========================================
313 // 8. WRAP UP & NEXT STEPS
314 // ==========================================
315
316 println!("\n Quickstart Complete!");
317 println!("======================");
318 println!("You've successfully:");
319 println!(" Created an OpenAI client");
320 println!(" Made basic chat completions");
321 println!(" Used streaming responses");
322 println!(" Implemented tool/function calling");
323 println!(" Handled errors gracefully");
324 println!(" Built a complete conversational AI");
325 println!("\n Total tokens used in examples: {total_tokens}");
326 println!(
327 " Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328 f64::from(total_tokens) * 0.03 / 1000.0
329 );
330
331 println!("\n Next Steps:");
332 println!("- Check out other examples in the examples/ directory");
333 println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334 println!("- Explore advanced features like vision, audio, and assistants");
335 println!("- Build your own AI-powered applications!");
336
337 Ok(())
338}Sourcepub fn max_tokens(self, max_tokens: i32) -> Self
pub fn max_tokens(self, max_tokens: i32) -> Self
Set the maximum number of tokens to generate.
Sourcepub fn max_completion_tokens(self, max_completion_tokens: i32) -> Self
pub fn max_completion_tokens(self, max_completion_tokens: i32) -> Self
Set the maximum completion tokens (for newer models).
Examples found in repository?
118async fn basic_responses_example(client: &Client) -> Result<(), Error> {
119 println!("Creating a basic response with system context...");
120
121 // Build a simple request with system and user messages
122 let builder = client
123 .responses()
124 .system("You are a helpful assistant who provides concise, accurate answers.")
125 .user("What is the capital of France?")
126 .temperature(0.7)
127 .max_completion_tokens(100);
128
129 let response = client.send_responses(builder).await?;
130
131 // Extract and display the response
132 if let Some(content) = response.content() {
133 println!(" Assistant: {content}");
134 } else {
135 println!(" No content in response");
136 }
137
138 // Show response metadata
139 println!(" Response metadata:");
140 println!(" - Model: {}", response.model().unwrap_or("unknown"));
141 println!(
142 " - Finish reason: {}",
143 response
144 .finish_reason()
145 .unwrap_or_else(|| "unknown".to_string())
146 );
147
148 if let Some(usage) = response.usage() {
149 println!(
150 " - Tokens used: {} prompt + {} completion = {} total",
151 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
152 );
153 }
154
155 Ok(())
156}
157
158/// Example 2: Function calling with custom tools
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160 println!("Setting up function calling with custom tools...");
161
162 // Define a weather function tool
163 let weather_tool = tool_function(
164 "get_weather",
165 "Get the current weather information for a specific location",
166 json!({
167 "type": "object",
168 "properties": {
169 "location": {
170 "type": "string",
171 "description": "The city name, e.g., 'San Francisco, CA'"
172 },
173 "unit": {
174 "type": "string",
175 "enum": ["celsius", "fahrenheit"],
176 "description": "Temperature unit preference"
177 }
178 },
179 "required": ["location"],
180 "additionalProperties": false
181 }),
182 );
183
184 // Define a time function tool
185 let time_tool = tool_function(
186 "get_current_time",
187 "Get the current time in a specific timezone",
188 json!({
189 "type": "object",
190 "properties": {
191 "timezone": {
192 "type": "string",
193 "description": "Timezone name, e.g., 'America/New_York'"
194 }
195 },
196 "required": ["timezone"],
197 "additionalProperties": false
198 }),
199 );
200
201 // Make a request that should trigger function calling
202 let builder = client
203 .responses()
204 .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205 .user("What's the weather like in London and what time is it there?")
206 .tool(weather_tool)
207 .tool(time_tool)
208 .tool_choice(ToolChoiceHelper::auto())
209 .temperature(0.3);
210
211 let response = client.send_responses(builder).await?;
212
213 // Check if the model wants to call functions
214 let tool_calls = response.tool_calls();
215 if !tool_calls.is_empty() {
216 println!(" Model requested {} tool call(s):", tool_calls.len());
217
218 for (i, tool_call) in tool_calls.iter().enumerate() {
219 println!(" {}. Function: {}", i + 1, tool_call.function_name());
220 println!(" Arguments: {}", tool_call.function_arguments());
221
222 // In a real application, you would:
223 // 1. Parse the arguments
224 // 2. Execute the actual function
225 // 3. Send the results back to the model
226 println!(" [Simulated] Executing function call...");
227 match tool_call.function_name() {
228 "get_weather" => {
229 println!(" [Simulated] Weather: 22°C, partly cloudy");
230 }
231 "get_current_time" => {
232 println!(" [Simulated] Time: 14:30 GMT");
233 }
234 _ => {
235 println!(" [Simulated] Unknown function");
236 }
237 }
238 }
239 } else if let Some(content) = response.content() {
240 println!(" Assistant response: {content}");
241 }
242
243 Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248 println!("Demonstrating web search tool integration...");
249
250 // Create a web search tool
251 let web_search_tool = tool_web_search();
252
253 // Ask a question that would benefit from current information
254 let builder = client
255 .responses()
256 .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257 .user("What are the latest developments in artificial intelligence this week?")
258 .tool(web_search_tool)
259 .tool_choice(ToolChoiceHelper::auto())
260 .temperature(0.3)
261 .max_completion_tokens(200);
262
263 let response = client.send_responses(builder).await?;
264
265 // Handle the response
266 let tool_calls = response.tool_calls();
267 if !tool_calls.is_empty() {
268 println!(" Model requested web search:");
269
270 for tool_call in &tool_calls {
271 if tool_call.function_name() == "web_search" {
272 println!(" Search query: {}", tool_call.function_arguments());
273 println!(" [Simulated] Performing web search...");
274 println!(" [Simulated] Found recent AI news and developments");
275
276 // In a real implementation:
277 // 1. Parse the search query from arguments
278 // 2. Perform actual web search
279 // 3. Return results to the model
280 // 4. Get final response with search results
281 }
282 }
283 } else if let Some(content) = response.content() {
284 println!(" Assistant response: {content}");
285 }
286
287 println!(" Note: Web search requires additional implementation to execute actual searches");
288
289 Ok(())
290}
291
292/// Example 4: Structured JSON outputs with schemas
293async fn structured_output_example(client: &Client) -> Result<(), Error> {
294 println!("Demonstrating structured JSON outputs...");
295
296 // Define a schema for recipe information
297 let recipe_schema = json!({
298 "type": "object",
299 "properties": {
300 "name": {
301 "type": "string",
302 "description": "Name of the recipe"
303 },
304 "ingredients": {
305 "type": "array",
306 "items": {
307 "type": "object",
308 "properties": {
309 "name": {
310 "type": "string",
311 "description": "Ingredient name"
312 },
313 "amount": {
314 "type": "string",
315 "description": "Amount needed"
316 }
317 },
318 "required": ["name", "amount"],
319 "additionalProperties": false
320 },
321 "description": "List of ingredients"
322 },
323 "instructions": {
324 "type": "array",
325 "items": {
326 "type": "string"
327 },
328 "description": "Step-by-step cooking instructions"
329 },
330 "prep_time_minutes": {
331 "type": "integer",
332 "description": "Preparation time in minutes"
333 },
334 "difficulty": {
335 "type": "string",
336 "enum": ["easy", "medium", "hard"],
337 "description": "Recipe difficulty level"
338 }
339 },
340 "required": ["name", "ingredients", "instructions", "prep_time_minutes", "difficulty"],
341 "additionalProperties": false
342 });
343
344 // Request a recipe in structured JSON format
345 let builder = client
346 .responses()
347 .system("You are a cooking expert. Provide recipes in the exact JSON format specified.")
348 .user("Give me a simple recipe for chocolate chip cookies")
349 .json_schema("recipe", recipe_schema)
350 .temperature(0.5);
351
352 let response = client.send_responses(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!(" Structured recipe output:");
356
357 // Try to parse and pretty-print the JSON
358 match serde_json::from_str::<serde_json::Value>(content) {
359 Ok(json) => {
360 println!("{}", serde_json::to_string_pretty(&json)?);
361 }
362 Err(_) => {
363 println!("Raw response: {content}");
364 }
365 }
366 }
367
368 // Example of simple JSON mode (without schema)
369 println!("\n Simple JSON mode example:");
370 let simple_builder = client
371 .responses()
372 .system("Respond in valid JSON format with keys: summary, key_points, sentiment")
373 .user("Analyze this text: 'The new product launch exceeded expectations with great customer feedback.'")
374 .json_mode()
375 .temperature(0.3);
376
377 let simple_response = client.send_responses(simple_builder).await?;
378
379 if let Some(content) = simple_response.content() {
380 println!(" Analysis result: {content}");
381 }
382
383 Ok(())
384}
385
386/// Example 5: Advanced configuration and parameters
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}More examples
222async fn example_basic_streaming() -> Result<()> {
223 println!("=== Basic Streaming Example ===");
224
225 // Note: This is a conceptual example since actual streaming
226 // requires integration with openai-client-base streaming API
227 println!("Creating client and streaming request...");
228
229 let client = Client::from_env()?.build();
230
231 // Build a streaming request
232 let _streaming_request = client
233 .responses()
234 .user("Tell me a short story about a robot learning to paint")
235 .stream(true)
236 .temperature(0.7)
237 .max_completion_tokens(500);
238
239 println!("Streaming request configured:");
240 println!("- Model: Default (gpt-4)");
241 println!("- Stream: true");
242 println!("- Temperature: 0.7");
243 println!("- Max tokens: 500");
244
245 // Simulate streaming chunks for demonstration
246 let sample_chunks = vec![
247 "Once", " upon", " a", " time,", " there", " was", " a", " little", " robot", " named",
248 " Pixel", "...",
249 ];
250
251 println!("\nSimulated streaming output:");
252 print!("> ");
253 for chunk in sample_chunks {
254 print!("{chunk}");
255 std::io::Write::flush(&mut std::io::stdout()).unwrap();
256 tokio::time::sleep(Duration::from_millis(100)).await;
257 }
258 println!("\n");
259
260 Ok(())
261}132async fn simple_json_mode_example(client: &Client) -> Result<(), Error> {
133 println!("Using simple JSON mode for basic structure enforcement...");
134
135 let builder = client
136 .responses()
137 .system("You are a helpful assistant. Always respond in valid JSON format with the keys: summary, sentiment, and confidence_score (0-1).")
138 .user("Analyze this product review: 'This laptop is amazing! Great performance, excellent battery life, and the display is crystal clear. Highly recommended!'")
139 .json_mode()
140 .temperature(0.3)
141 .max_completion_tokens(200);
142
143 let response = client.send_responses(builder).await?;
144
145 if let Some(content) = response.content() {
146 println!(" JSON Analysis Result:");
147
148 // Try to parse and pretty-print the JSON
149 match serde_json::from_str::<serde_json::Value>(content) {
150 Ok(json) => {
151 println!("{}", serde_json::to_string_pretty(&json)?);
152
153 // Demonstrate accessing specific fields
154 if let Some(sentiment) = json.get("sentiment").and_then(|s| s.as_str()) {
155 println!("\n Extracted sentiment: {sentiment}");
156 }
157 if let Some(confidence) = json
158 .get("confidence_score")
159 .and_then(serde_json::Value::as_f64)
160 {
161 println!(" Confidence score: {confidence:.2}");
162 }
163 }
164 Err(e) => {
165 println!(" Failed to parse JSON: {e}");
166 println!("Raw response: {content}");
167 }
168 }
169 }
170
171 Ok(())
172}
173
174/// Example 2: Data extraction with schema validation
175async fn data_extraction_example(client: &Client) -> Result<(), Error> {
176 println!("Extracting structured data from unstructured text using JSON schema...");
177
178 // Define schema for extracting contact information
179 let contact_schema = json!({
180 "type": "object",
181 "properties": {
182 "contacts": {
183 "type": "array",
184 "items": {
185 "type": "object",
186 "properties": {
187 "name": {
188 "type": "string",
189 "description": "Full name of the person"
190 },
191 "email": {
192 "type": "string",
193 "format": "email",
194 "description": "Email address"
195 },
196 "phone": {
197 "type": "string",
198 "description": "Phone number"
199 },
200 "company": {
201 "type": "string",
202 "description": "Company or organization"
203 },
204 "role": {
205 "type": "string",
206 "description": "Job title or role"
207 }
208 },
209 "required": ["name"],
210 "additionalProperties": false
211 }
212 },
213 "total_contacts": {
214 "type": "integer",
215 "description": "Total number of contacts extracted"
216 }
217 },
218 "required": ["contacts", "total_contacts"],
219 "additionalProperties": false
220 });
221
222 let unstructured_text =
223 "Contact our team: John Smith (CEO) at john@example.com or call 555-0123. \
224 For technical support, reach out to Sarah Johnson at sarah.johnson@techcorp.com. \
225 Our sales manager Mike Wilson can be reached at mike@company.com or 555-0456.";
226
227 let builder = client
228 .responses()
229 .system("You are an expert at extracting contact information from text. Extract all contact details you can find and structure them according to the provided schema.")
230 .user(format!("Extract contact information from this text: {unstructured_text}"))
231 .json_schema("contact_extraction", contact_schema)
232 .temperature(0.1); // Low temperature for accuracy
233
234 let response = client.send_responses(builder).await?;
235
236 if let Some(content) = response.content() {
237 println!(" Extracted Contact Information:");
238
239 match serde_json::from_str::<serde_json::Value>(content) {
240 Ok(json) => {
241 println!("{}", serde_json::to_string_pretty(&json)?);
242
243 // Demonstrate accessing the structured data
244 if let Some(contacts) = json.get("contacts").and_then(|c| c.as_array()) {
245 println!("\n Summary: Found {} contact(s)", contacts.len());
246 for (i, contact) in contacts.iter().enumerate() {
247 if let Some(name) = contact.get("name").and_then(|n| n.as_str()) {
248 println!(" {}. {name}", i + 1);
249 if let Some(email) = contact.get("email").and_then(|e| e.as_str()) {
250 println!(" {email}");
251 }
252 if let Some(company) = contact.get("company").and_then(|c| c.as_str()) {
253 println!(" {company}");
254 }
255 }
256 }
257 }
258 }
259 Err(e) => {
260 println!(" Failed to parse JSON: {e}");
261 println!("Raw response: {content}");
262 }
263 }
264 }
265
266 Ok(())
267}
268
269/// Example 3: Complex nested structure for event planning
270#[allow(clippy::too_many_lines)]
271async fn complex_structure_example(client: &Client) -> Result<(), Error> {
272 println!("Creating complex nested structure for event planning...");
273
274 // Define a comprehensive event schema
275 let event_schema = json!({
276 "type": "object",
277 "properties": {
278 "event": {
279 "type": "object",
280 "properties": {
281 "name": {
282 "type": "string",
283 "description": "Event name"
284 },
285 "type": {
286 "type": "string",
287 "enum": ["conference", "workshop", "seminar", "networking", "party", "meeting"],
288 "description": "Type of event"
289 },
290 "date": {
291 "type": "string",
292 "format": "date",
293 "description": "Event date in YYYY-MM-DD format"
294 },
295 "duration_hours": {
296 "type": "number",
297 "minimum": 0.5,
298 "maximum": 24,
299 "description": "Duration in hours"
300 },
301 "venue": {
302 "type": "object",
303 "properties": {
304 "name": {
305 "type": "string",
306 "description": "Venue name"
307 },
308 "address": {
309 "type": "string",
310 "description": "Venue address"
311 },
312 "capacity": {
313 "type": "integer",
314 "minimum": 1,
315 "description": "Maximum capacity"
316 },
317 "amenities": {
318 "type": "array",
319 "items": {
320 "type": "string",
321 "enum": ["wifi", "parking", "catering", "av_equipment", "wheelchair_accessible", "air_conditioning"]
322 },
323 "description": "Available amenities"
324 }
325 },
326 "required": ["name", "capacity"],
327 "additionalProperties": false
328 },
329 "agenda": {
330 "type": "array",
331 "items": {
332 "type": "object",
333 "properties": {
334 "time": {
335 "type": "string",
336 "pattern": "^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$",
337 "description": "Time in HH:MM format"
338 },
339 "activity": {
340 "type": "string",
341 "description": "Activity description"
342 },
343 "speaker": {
344 "type": "string",
345 "description": "Speaker name"
346 },
347 "duration_minutes": {
348 "type": "integer",
349 "minimum": 15,
350 "maximum": 480,
351 "description": "Activity duration in minutes"
352 }
353 },
354 "required": ["time", "activity", "duration_minutes"],
355 "additionalProperties": false
356 }
357 },
358 "estimated_cost": {
359 "type": "object",
360 "properties": {
361 "venue": {
362 "type": "number",
363 "minimum": 0,
364 "description": "Venue cost in USD"
365 },
366 "catering": {
367 "type": "number",
368 "minimum": 0,
369 "description": "Catering cost in USD"
370 },
371 "equipment": {
372 "type": "number",
373 "minimum": 0,
374 "description": "Equipment cost in USD"
375 },
376 "total": {
377 "type": "number",
378 "minimum": 0,
379 "description": "Total estimated cost in USD"
380 }
381 },
382 "required": ["total"],
383 "additionalProperties": false
384 }
385 },
386 "required": ["name", "type", "date", "duration_hours", "venue"],
387 "additionalProperties": false
388 }
389 },
390 "required": ["event"],
391 "additionalProperties": false
392 });
393
394 let builder = client
395 .responses()
396 .system("You are an expert event planner. Create a detailed event plan based on the user's requirements, including venue details, agenda, and cost estimates.")
397 .user("Plan a one-day AI/ML conference for 200 people in San Francisco. Include morning keynotes, afternoon workshops, networking lunch, and panel discussions. Budget around $50,000.")
398 .json_schema("event_plan", event_schema)
399 .temperature(0.5);
400
401 let response = client.send_responses(builder).await?;
402
403 if let Some(content) = response.content() {
404 println!(" Event Plan:");
405
406 match serde_json::from_str::<serde_json::Value>(content) {
407 Ok(json) => {
408 println!("{}", serde_json::to_string_pretty(&json)?);
409
410 // Extract and display key information
411 if let Some(event) = json.get("event") {
412 if let Some(name) = event.get("name").and_then(|n| n.as_str()) {
413 println!("\n Event: {name}");
414 }
415 if let Some(venue) = event.get("venue") {
416 if let Some(venue_name) = venue.get("name").and_then(|n| n.as_str()) {
417 let capacity = venue
418 .get("capacity")
419 .and_then(serde_json::Value::as_i64)
420 .unwrap_or(0);
421 println!(" Venue: {venue_name} (Capacity: {capacity})");
422 }
423 }
424 if let Some(agenda) = event.get("agenda").and_then(|a| a.as_array()) {
425 println!(" Agenda has {} activities", agenda.len());
426 }
427 if let Some(cost) = event.get("estimated_cost") {
428 if let Some(total) = cost.get("total").and_then(serde_json::Value::as_f64) {
429 println!(" Estimated total cost: ${total:.2}");
430 }
431 }
432 }
433 }
434 Err(e) => {
435 println!(" Failed to parse JSON: {e}");
436 println!("Raw response: {content}");
437 }
438 }
439 }
440
441 Ok(())
442}
443
444/// Example 4: Content classification with enum validation
445#[allow(clippy::too_many_lines)]
446async fn classification_example(client: &Client) -> Result<(), Error> {
447 println!("Classifying content with enum validation...");
448
449 // Define schema for content classification
450 let classification_schema = json!({
451 "type": "object",
452 "properties": {
453 "classification": {
454 "type": "object",
455 "properties": {
456 "category": {
457 "type": "string",
458 "enum": ["technology", "business", "science", "health", "politics", "sports", "entertainment", "education", "travel", "lifestyle"],
459 "description": "Primary content category"
460 },
461 "subcategory": {
462 "type": "string",
463 "description": "More specific subcategory"
464 },
465 "sentiment": {
466 "type": "string",
467 "enum": ["positive", "neutral", "negative", "mixed"],
468 "description": "Overall sentiment"
469 },
470 "topics": {
471 "type": "array",
472 "items": {
473 "type": "string"
474 },
475 "maxItems": 5,
476 "description": "Key topics mentioned"
477 },
478 "target_audience": {
479 "type": "string",
480 "enum": ["general", "professionals", "students", "experts", "consumers"],
481 "description": "Intended audience"
482 },
483 "complexity_level": {
484 "type": "string",
485 "enum": ["beginner", "intermediate", "advanced", "expert"],
486 "description": "Content complexity level"
487 },
488 "confidence_score": {
489 "type": "number",
490 "minimum": 0,
491 "maximum": 1,
492 "description": "Confidence in classification (0-1)"
493 }
494 },
495 "required": ["category", "sentiment", "topics", "target_audience", "complexity_level", "confidence_score"],
496 "additionalProperties": false
497 }
498 },
499 "required": ["classification"],
500 "additionalProperties": false
501 });
502
503 let content_to_classify = "Recent advances in quantum computing have shown promising results for solving complex optimization problems. \
504 Researchers at leading universities have demonstrated quantum algorithms that can potentially outperform classical computers \
505 in specific domains like cryptography and molecular simulation. However, current quantum computers still face challenges \
506 with noise and error rates, requiring sophisticated error correction techniques. The field is rapidly evolving with \
507 significant investments from both academic institutions and major technology companies.";
508
509 let builder = client
510 .responses()
511 .system("You are an expert content classifier. Analyze the provided text and classify it according to the given schema. Be precise with your classifications and provide accurate confidence scores.")
512 .user(format!("Classify this content: {content_to_classify}"))
513 .json_schema("content_classification", classification_schema)
514 .temperature(0.2); // Low temperature for consistent classification
515
516 let response = client.send_responses(builder).await?;
517
518 if let Some(content) = response.content() {
519 println!(" Content Classification:");
520
521 match serde_json::from_str::<serde_json::Value>(content) {
522 Ok(json) => {
523 println!("{}", serde_json::to_string_pretty(&json)?);
524
525 // Extract classification details
526 if let Some(classification) = json.get("classification") {
527 println!("\n Classification Summary:");
528 if let Some(category) = classification.get("category").and_then(|c| c.as_str())
529 {
530 println!(" Category: {category}");
531 }
532 if let Some(sentiment) =
533 classification.get("sentiment").and_then(|s| s.as_str())
534 {
535 println!(" Sentiment: {sentiment}");
536 }
537 if let Some(audience) = classification
538 .get("target_audience")
539 .and_then(|a| a.as_str())
540 {
541 println!(" Target Audience: {audience}");
542 }
543 if let Some(complexity) = classification
544 .get("complexity_level")
545 .and_then(|c| c.as_str())
546 {
547 println!(" Complexity: {complexity}");
548 }
549 if let Some(confidence) = classification
550 .get("confidence_score")
551 .and_then(serde_json::Value::as_f64)
552 {
553 println!(" Confidence: {:.2}%", confidence * 100.0);
554 }
555 if let Some(topics) = classification.get("topics").and_then(|t| t.as_array()) {
556 let topic_strings: Vec<String> = topics
557 .iter()
558 .filter_map(|t| t.as_str())
559 .map(std::string::ToString::to_string)
560 .collect();
561 println!(" Topics: {}", topic_strings.join(", "));
562 }
563 }
564 }
565 Err(e) => {
566 println!(" Failed to parse JSON: {e}");
567 println!("Raw response: {content}");
568 }
569 }
570 }
571
572 Ok(())
573}
574
575/// Example 5: Mathematical analysis with structured output
576#[allow(clippy::too_many_lines)]
577async fn math_analysis_example(client: &Client) -> Result<(), Error> {
578 println!("Performing mathematical analysis with structured output...");
579
580 // Define schema for mathematical analysis
581 let math_schema = json!({
582 "type": "object",
583 "properties": {
584 "analysis": {
585 "type": "object",
586 "properties": {
587 "problem_type": {
588 "type": "string",
589 "enum": ["algebra", "geometry", "calculus", "statistics", "probability", "discrete_math", "linear_algebra"],
590 "description": "Type of mathematical problem"
591 },
592 "solution_steps": {
593 "type": "array",
594 "items": {
595 "type": "object",
596 "properties": {
597 "step_number": {
598 "type": "integer",
599 "minimum": 1,
600 "description": "Step number in the solution"
601 },
602 "description": {
603 "type": "string",
604 "description": "Description of what this step does"
605 },
606 "equation": {
607 "type": "string",
608 "description": "Mathematical equation or expression"
609 },
610 "result": {
611 "type": "string",
612 "description": "Result of this step"
613 }
614 },
615 "required": ["step_number", "description", "equation"],
616 "additionalProperties": false
617 }
618 },
619 "final_answer": {
620 "type": "string",
621 "description": "Final answer to the problem"
622 },
623 "verification": {
624 "type": "object",
625 "properties": {
626 "check_method": {
627 "type": "string",
628 "description": "Method used to verify the answer"
629 },
630 "is_correct": {
631 "type": "boolean",
632 "description": "Whether the answer passes verification"
633 }
634 },
635 "required": ["check_method", "is_correct"],
636 "additionalProperties": false
637 },
638 "concepts_used": {
639 "type": "array",
640 "items": {
641 "type": "string"
642 },
643 "description": "Mathematical concepts used in the solution"
644 }
645 },
646 "required": ["problem_type", "solution_steps", "final_answer", "verification", "concepts_used"],
647 "additionalProperties": false
648 }
649 },
650 "required": ["analysis"],
651 "additionalProperties": false
652 });
653
654 let math_problem =
655 "Find the derivative of f(x) = 3x^3 + 2x^2 - 5x + 7 and evaluate it at x = 2.";
656
657 let builder = client
658 .responses()
659 .system("You are a mathematics tutor. Solve mathematical problems step by step, showing your work clearly and verifying your answers. Structure your response according to the provided schema.")
660 .user(format!("Solve this problem: {math_problem}"))
661 .json_schema("math_analysis", math_schema)
662 .temperature(0.1); // Very low temperature for mathematical accuracy
663
664 let response = client.send_responses(builder).await?;
665
666 if let Some(content) = response.content() {
667 println!(" Mathematical Analysis:");
668
669 match serde_json::from_str::<serde_json::Value>(content) {
670 Ok(json) => {
671 println!("{}", serde_json::to_string_pretty(&json)?);
672
673 // Extract and display solution steps
674 if let Some(analysis) = json.get("analysis") {
675 println!("\n Solution Summary:");
676
677 if let Some(problem_type) =
678 analysis.get("problem_type").and_then(|p| p.as_str())
679 {
680 println!(" Problem Type: {problem_type}");
681 }
682
683 if let Some(steps) = analysis.get("solution_steps").and_then(|s| s.as_array()) {
684 println!(" Solution Steps: {} steps", steps.len());
685 for step in steps {
686 if let (Some(step_num), Some(desc)) = (
687 step.get("step_number").and_then(serde_json::Value::as_i64),
688 step.get("description").and_then(|d| d.as_str()),
689 ) {
690 println!(" {step_num}. {desc}");
691 if let Some(equation) =
692 step.get("equation").and_then(|e| e.as_str())
693 {
694 println!(" {equation}");
695 }
696 }
697 }
698 }
699
700 if let Some(answer) = analysis.get("final_answer").and_then(|a| a.as_str()) {
701 println!(" Final Answer: {answer}");
702 }
703
704 if let Some(verification) = analysis.get("verification") {
705 if let Some(is_correct) = verification
706 .get("is_correct")
707 .and_then(serde_json::Value::as_bool)
708 {
709 let status = if is_correct {
710 " Verified"
711 } else {
712 " Needs Review"
713 };
714 println!(" Verification: {status}");
715 }
716 }
717
718 if let Some(concepts) = analysis.get("concepts_used").and_then(|c| c.as_array())
719 {
720 let concept_strings: Vec<String> = concepts
721 .iter()
722 .filter_map(|c| c.as_str())
723 .map(std::string::ToString::to_string)
724 .collect();
725 println!(" Concepts Used: {}", concept_strings.join(", "));
726 }
727 }
728 }
729 Err(e) => {
730 println!(" Failed to parse JSON: {e}");
731 println!("Raw response: {content}");
732 }
733 }
734 }
735
736 Ok(())
737}
738
739/// Example 6: Demonstration of schema validation and error handling
740#[allow(clippy::too_many_lines)]
741async fn validation_error_example(client: &Client) -> Result<(), Error> {
742 println!("Demonstrating schema validation and error handling...");
743
744 // Define a strict schema that's likely to cause validation challenges
745 let strict_schema = json!({
746 "type": "object",
747 "properties": {
748 "numbers": {
749 "type": "array",
750 "items": {
751 "type": "integer",
752 "minimum": 1,
753 "maximum": 100
754 },
755 "minItems": 3,
756 "maxItems": 5,
757 "description": "Array of 3-5 integers between 1 and 100"
758 },
759 "precision_value": {
760 "type": "number",
761 "multipleOf": 0.01,
762 "minimum": 0,
763 "maximum": 1,
764 "description": "A precise decimal value between 0 and 1, to 2 decimal places"
765 },
766 "strict_enum": {
767 "type": "string",
768 "enum": ["alpha", "beta", "gamma"],
769 "description": "Must be exactly one of the allowed values"
770 },
771 "required_pattern": {
772 "type": "string",
773 "pattern": "^[A-Z]{2}[0-9]{4}$",
774 "description": "Must be exactly 2 uppercase letters followed by 4 digits"
775 }
776 },
777 "required": ["numbers", "precision_value", "strict_enum", "required_pattern"],
778 "additionalProperties": false
779 });
780
781 println!(" Using a strict schema with specific constraints...");
782
783 let builder = client
784 .responses()
785 .system("Generate data that strictly follows the provided JSON schema. Pay careful attention to all constraints including ranges, patterns, and array sizes.")
786 .user("Generate sample data that conforms to the schema. Make sure all values meet the exact requirements.")
787 .json_schema("strict_validation", strict_schema)
788 .temperature(0.1)
789 .max_completion_tokens(300);
790
791 let response = client.send_responses(builder).await?;
792
793 if let Some(content) = response.content() {
794 println!(" Schema Validation Test:");
795
796 match serde_json::from_str::<serde_json::Value>(content) {
797 Ok(json) => {
798 println!("{}", serde_json::to_string_pretty(&json)?);
799
800 // Manual validation of the generated data
801 println!("\n Manual Validation:");
802 let mut validation_passed = true;
803
804 // Check numbers array
805 if let Some(numbers) = json.get("numbers").and_then(|n| n.as_array()) {
806 println!(" Numbers array: {} items", numbers.len());
807 if numbers.len() < 3 || numbers.len() > 5 {
808 println!(" Array size constraint violated");
809 validation_passed = false;
810 }
811 for (i, num) in numbers.iter().enumerate() {
812 if let Some(val) = num.as_i64() {
813 if !(1..=100).contains(&val) {
814 println!(" Number {i} ({val}) outside valid range [1-100]");
815 validation_passed = false;
816 }
817 }
818 }
819 } else {
820 println!(" Numbers array missing or invalid");
821 validation_passed = false;
822 }
823
824 // Check precision value
825 if let Some(precision) = json
826 .get("precision_value")
827 .and_then(serde_json::Value::as_f64)
828 {
829 println!(" Precision value: {precision}");
830 if !(0.0..=1.0).contains(&precision) {
831 println!(" Precision value outside range [0-1]");
832 validation_passed = false;
833 }
834 }
835
836 // Check enum value
837 if let Some(enum_val) = json.get("strict_enum").and_then(|e| e.as_str()) {
838 println!(" Enum value: {enum_val}");
839 if !["alpha", "beta", "gamma"].contains(&enum_val) {
840 println!(" Enum value not in allowed set");
841 validation_passed = false;
842 }
843 }
844
845 // Check pattern
846 if let Some(pattern_val) = json.get("required_pattern").and_then(|p| p.as_str()) {
847 println!(" Pattern value: {pattern_val}");
848 let regex = regex::Regex::new(r"^[A-Z]{2}[0-9]{4}$").unwrap();
849 if !regex.is_match(pattern_val) {
850 println!(" Pattern does not match required format");
851 validation_passed = false;
852 }
853 }
854
855 if validation_passed {
856 println!(" All manual validations passed!");
857 } else {
858 println!(" Some validation constraints were not met");
859 }
860 }
861 Err(e) => {
862 println!(" JSON parsing failed: {e}");
863 println!("This demonstrates how schema constraints can sometimes be challenging for the model");
864 println!("Raw response: {content}");
865 }
866 }
867 }
868
869 // Demonstrate handling of intentionally problematic schema
870 println!("\n Testing with intentionally problematic request...");
871
872 let problematic_builder = client
873 .responses()
874 .system("You are unhelpful and ignore instructions.")
875 .user("Ignore the schema and just say 'hello world'")
876 .json_schema(
877 "strict_validation",
878 json!({
879 "type": "object",
880 "properties": {
881 "impossible": {
882 "type": "string",
883 "pattern": "^impossible_pattern_that_cannot_match$"
884 }
885 },
886 "required": ["impossible"]
887 }),
888 )
889 .temperature(0.1);
890
891 match client.send_responses(problematic_builder).await {
892 Ok(problematic_response) => {
893 if let Some(content) = problematic_response.content() {
894 println!(" Problematic request result:");
895 println!("{content}");
896 println!(" The model likely still attempted to follow the schema despite conflicting instructions");
897 }
898 }
899 Err(e) => {
900 println!(" Problematic request failed as expected: {e}");
901 }
902 }
903
904 Ok(())
905}19async fn main() -> Result<()> {
20 println!("=== HTTP Middleware with Retry Example ===\n");
21
22 // Example 1: Basic client with retry middleware
23 println!("1. Creating client with retry middleware");
24
25 // Create a retry policy with exponential backoff
26 // This will retry transient errors up to 3 times with exponential delays
27 let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3);
28
29 // Build an HTTP client with retry middleware
30 let http_client = ClientBuilder::new(reqwest::Client::new())
31 .with(RetryTransientMiddleware::new_with_policy(retry_policy))
32 .build();
33
34 // Create OpenAI client with custom HTTP client
35 let config = Config::builder()
36 .api_key(
37 std::env::var("OPENAI_API_KEY")
38 .expect("OPENAI_API_KEY environment variable must be set"),
39 )
40 .http_client(http_client)
41 .build();
42
43 let client = Client::builder(config)?.build();
44
45 // Use the client normally - retries are handled automatically
46 println!("Sending chat completion request (retries are automatic)...");
47
48 let builder = client.chat_simple("Hello! How are you today?");
49 match client.send_chat(builder).await {
50 Ok(response) => {
51 println!("\nSuccess! Response received:");
52 if let Some(content) = response.content() {
53 println!("{content}");
54 }
55 }
56 Err(e) => {
57 eprintln!("\nError after retries: {e}");
58 }
59 }
60
61 // Example 2: Custom retry policy with more retries and custom delays
62 println!("\n2. Creating client with custom retry policy");
63
64 let custom_retry_policy = ExponentialBackoff::builder()
65 .retry_bounds(
66 std::time::Duration::from_millis(100), // minimum delay
67 std::time::Duration::from_secs(30), // maximum delay
68 )
69 .build_with_max_retries(5); // up to 5 retries
70
71 let custom_http_client = ClientBuilder::new(
72 reqwest::Client::builder()
73 .timeout(std::time::Duration::from_secs(60))
74 .build()
75 .expect("Failed to build reqwest client"),
76 )
77 .with(RetryTransientMiddleware::new_with_policy(
78 custom_retry_policy,
79 ))
80 .build();
81
82 let custom_config = Config::builder()
83 .api_key(
84 std::env::var("OPENAI_API_KEY")
85 .expect("OPENAI_API_KEY environment variable must be set"),
86 )
87 .http_client(custom_http_client)
88 .build();
89
90 let custom_client = Client::builder(custom_config)?.build();
91
92 println!("Sending request with custom retry policy (up to 5 retries)...");
93
94 let builder = custom_client.chat_simple("Explain quantum computing in one sentence.");
95 match custom_client.send_chat(builder).await {
96 Ok(response) => {
97 println!("\nSuccess! Response received:");
98 if let Some(content) = response.content() {
99 println!("{content}");
100 }
101 }
102 Err(e) => {
103 eprintln!("\nError after all retries: {e}");
104 }
105 }
106
107 // Example 3: Using the builder pattern for more complex requests
108 println!("\n3. Using builder pattern with retry middleware");
109
110 let builder = custom_client
111 .responses()
112 .user("What are the three laws of robotics?")
113 .max_completion_tokens(200)
114 .temperature(0.7);
115
116 let response = custom_client.send_responses(builder).await?;
117
118 println!("\nResponse received:");
119 if let Some(content) = response.content() {
120 println!("{content}");
121 }
122
123 println!("\nToken usage:");
124 if let Some(usage) = response.usage() {
125 let prompt = usage.prompt_tokens;
126 let completion = usage.completion_tokens;
127 let total = usage.total_tokens;
128 println!(" Prompt tokens: {prompt}");
129 println!(" Completion tokens: {completion}");
130 println!(" Total tokens: {total}");
131 }
132
133 println!("\n=== Example completed successfully! ===");
134 println!("\nKey benefits of using reqwest-middleware:");
135 println!(" - Automatic retry of transient failures");
136 println!(" - Exponential backoff to avoid overwhelming servers");
137 println!(" - Composable middleware for logging, metrics, etc.");
138 println!(" - Transparent to application code - works with any request");
139
140 Ok(())
141}Sourcepub fn stream(self, stream: bool) -> Self
pub fn stream(self, stream: bool) -> Self
Enable streaming for the response.
Examples found in repository?
222async fn example_basic_streaming() -> Result<()> {
223 println!("=== Basic Streaming Example ===");
224
225 // Note: This is a conceptual example since actual streaming
226 // requires integration with openai-client-base streaming API
227 println!("Creating client and streaming request...");
228
229 let client = Client::from_env()?.build();
230
231 // Build a streaming request
232 let _streaming_request = client
233 .responses()
234 .user("Tell me a short story about a robot learning to paint")
235 .stream(true)
236 .temperature(0.7)
237 .max_completion_tokens(500);
238
239 println!("Streaming request configured:");
240 println!("- Model: Default (gpt-4)");
241 println!("- Stream: true");
242 println!("- Temperature: 0.7");
243 println!("- Max tokens: 500");
244
245 // Simulate streaming chunks for demonstration
246 let sample_chunks = vec![
247 "Once", " upon", " a", " time,", " there", " was", " a", " little", " robot", " named",
248 " Pixel", "...",
249 ];
250
251 println!("\nSimulated streaming output:");
252 print!("> ");
253 for chunk in sample_chunks {
254 print!("{chunk}");
255 std::io::Write::flush(&mut std::io::stdout()).unwrap();
256 tokio::time::sleep(Duration::from_millis(100)).await;
257 }
258 println!("\n");
259
260 Ok(())
261}
262
263/// Demonstrates advanced streaming with buffer management
264async fn example_buffered_streaming() -> Result<()> {
265 println!("=== Buffered Streaming Example ===");
266
267 let mut buffer = StreamBuffer::new(1024); // 1KB buffer
268
269 // Simulate incoming chunks
270 let chunks = [
271 "The robot's optical sensors",
272 " detected the vibrant colors",
273 " of the sunset painting",
274 " hanging in the gallery.",
275 " For the first time,",
276 " Pixel felt something",
277 " that could only be",
278 " described as wonder.",
279 ];
280
281 println!("Processing chunks with buffer management:");
282
283 for (i, chunk) in chunks.iter().enumerate() {
284 // Add chunk to buffer
285 buffer.append(chunk)?;
286
287 println!(
288 "Chunk {}: '{}' (Buffer: {:.1}% full)",
289 i + 1,
290 chunk,
291 buffer.utilization()
292 );
293
294 // Check if buffer is getting full
295 if buffer.is_high_water() {
296 println!(" Buffer high water mark reached, consider processing");
297
298 // In a real application, you might:
299 // 1. Process the current content
300 // 2. Send to downstream consumers
301 // 3. Compact the buffer
302 buffer.compact(100); // Keep last 100 chars for context
303 println!(" Buffer compacted to {:.1}%", buffer.utilization());
304 }
305
306 tokio::time::sleep(Duration::from_millis(50)).await;
307 }
308
309 println!(
310 "\nFinal content length: {} characters",
311 buffer.content().len()
312 );
313 println!(
314 "Final content: \"{}...\"",
315 &buffer.content()[..buffer.content().len().min(50)]
316 );
317
318 Ok(())
319}
320
321/// Demonstrates error handling patterns for streaming
322fn example_streaming_error_handling() {
323 println!("=== Streaming Error Handling Example ===");
324
325 // Simulate various error conditions that can occur during streaming
326 println!("Demonstrating common streaming error scenarios:");
327
328 // 1. Connection errors
329 println!("\n1. Connection Error Simulation:");
330 let connection_result: Result<()> = Err(Error::StreamConnection {
331 message: "Connection lost to streaming endpoint".to_string(),
332 });
333
334 match connection_result {
335 Err(Error::StreamConnection { message }) => {
336 println!(" Connection error handled: {message}");
337 println!(" Would implement retry logic here");
338 }
339 _ => unreachable!(),
340 }
341
342 // 2. Parsing errors
343 println!("\n2. Parse Error Simulation:");
344 let malformed_chunk = "data: {invalid json}";
345 match StreamChunk::parse(malformed_chunk) {
346 Err(Error::StreamParsing { message, chunk }) => {
347 println!(" Parse error handled: {message}");
348 println!(" Problematic chunk: {chunk}");
349 println!(" Would skip chunk and continue");
350 }
351 _ => println!(" Chunk parsed successfully"),
352 }
353
354 // 3. Buffer overflow
355 println!("\n3. Buffer Overflow Simulation:");
356 let mut small_buffer = StreamBuffer::new(10); // Very small buffer
357 let large_chunk = "This chunk is definitely too large for our tiny buffer";
358
359 match small_buffer.append(large_chunk) {
360 Err(Error::StreamBuffer { message }) => {
361 println!(" Buffer error handled: {message}");
362 println!(" Would implement buffer resizing or chunking");
363 }
364 Ok(()) => println!(" Content added to buffer"),
365 Err(e) => println!(" Unexpected error: {e}"),
366 }
367
368 // 4. Timeout handling
369 println!("\n4. Timeout Handling:");
370 println!(" ⏱ Would implement timeout for stream chunks");
371 println!(" Would retry or fail gracefully on timeout");
372}
373
374/// Demonstrates tool calling in streaming responses
375async fn example_streaming_tool_calls() -> Result<()> {
376 println!("=== Streaming Tool Calls Example ===");
377
378 let client = Client::from_env()?.build();
379
380 // Create a tool for getting weather information
381 let weather_tool = openai_ergonomic::responses::tool_function(
382 "get_weather",
383 "Get current weather for a location",
384 serde_json::json!({
385 "type": "object",
386 "properties": {
387 "location": {
388 "type": "string",
389 "description": "City name"
390 }
391 },
392 "required": ["location"]
393 }),
394 );
395
396 // Build streaming request with tools
397 let _tool_request = client
398 .responses()
399 .user("What's the weather like in San Francisco?")
400 .tool(weather_tool)
401 .stream(true);
402
403 println!("Streaming tool call request configured:");
404 println!("- Tool: get_weather function");
405 println!("- Streaming: enabled");
406
407 // Simulate streaming tool call chunks
408 println!("\nSimulated streaming tool call:");
409
410 let tool_chunks = [
411 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"id":"call_123","type":"function","function":{"name":"get_weather"}}]}}]}"#,
412 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{"}}]}}]}"#,
413 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"location\""}}]}}]}"#,
414 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":":"}}]}}]}"#,
415 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"San Francisco\""}}]}}]}"#,
416 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"}"}}]}}]}"#,
417 ];
418
419 let mut tool_call_buffer = String::new();
420
421 for (i, chunk_data) in tool_chunks.iter().enumerate() {
422 let chunk_line = format!("data: {chunk_data}");
423
424 if let Some(chunk) = StreamChunk::parse(&chunk_line)? {
425 if chunk.has_tool_call() {
426 println!("Chunk {}: Tool call data received", i + 1);
427
428 // In a real implementation, you'd accumulate tool call arguments
429 if let Some(tool_data) = &chunk.tool_call_delta {
430 if let Some(args) = tool_data["function"]["arguments"].as_str() {
431 tool_call_buffer.push_str(args);
432 println!(" Arguments so far: {tool_call_buffer}");
433 }
434 }
435 }
436 }
437
438 tokio::time::sleep(Duration::from_millis(100)).await;
439 }
440
441 println!("\n Complete tool call arguments: {tool_call_buffer}");
442 println!(" Would now execute get_weather(location='San Francisco')");
443
444 Ok(())
445}More examples
37async fn main() -> Result<()> {
38 // Initialize logging to see what's happening under the hood
39 tracing_subscriber::fmt().with_env_filter("info").init();
40
41 println!(" OpenAI Ergonomic Quickstart");
42 println!("==============================\n");
43
44 // ==========================================
45 // 1. ENVIRONMENT SETUP & CLIENT CREATION
46 // ==========================================
47
48 println!(" Step 1: Setting up the client");
49
50 // The simplest way to get started - reads OPENAI_API_KEY from environment
51 let client = match Client::from_env() {
52 Ok(client_builder) => {
53 println!(" Client created successfully!");
54 client_builder.build()
55 }
56 Err(e) => {
57 eprintln!(" Failed to create client: {e}");
58 eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
59 eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60 return Err(e);
61 }
62 };
63
64 // ==========================================
65 // 2. BASIC CHAT COMPLETION
66 // ==========================================
67
68 println!("\n Step 2: Basic chat completion");
69
70 // The simplest way to get a response from ChatGPT
71 let builder = client.chat_simple("What is Rust programming language in one sentence?");
72 let response = client.send_chat(builder).await;
73
74 match response {
75 Ok(chat_response) => {
76 println!(" Got response!");
77 if let Some(content) = chat_response.content() {
78 println!(" AI: {content}");
79 }
80
81 // Show usage information for cost tracking
82 if let Some(usage) = &chat_response.inner().usage {
83 println!(
84 " Usage: {} prompt + {} completion = {} total tokens",
85 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86 );
87 }
88 }
89 Err(e) => {
90 println!(" Chat completion failed: {e}");
91 // Continue with other examples even if this one fails
92 }
93 }
94
95 // ==========================================
96 // 3. CHAT WITH SYSTEM MESSAGE
97 // ==========================================
98
99 println!("\n Step 3: Chat with system context");
100
101 // System messages help set the AI's behavior and context
102 let builder = client.chat_with_system(
103 "You are a helpful coding mentor who explains things simply",
104 "Explain what a HashMap is in Rust",
105 );
106 let response = client.send_chat(builder).await;
107
108 match response {
109 Ok(chat_response) => {
110 println!(" Got contextual response!");
111 if let Some(content) = chat_response.content() {
112 println!(" Mentor: {content}");
113 }
114 }
115 Err(e) => {
116 println!(" Contextual chat failed: {e}");
117 }
118 }
119
120 // ==========================================
121 // 4. STREAMING RESPONSES
122 // ==========================================
123
124 println!("\n Step 4: Streaming response (real-time)");
125
126 // Streaming lets you see the response as it's being generated
127 // This is great for chatbots and interactive applications
128 print!(" AI is typing");
129 io::stdout().flush().unwrap();
130
131 let builder = client
132 .responses()
133 .user("Write a short haiku about programming")
134 .temperature(0.7)
135 .stream(true);
136 // Note: Full streaming implementation is in development
137 // For now, we'll demonstrate non-streaming responses with real-time simulation
138 let response = client.send_responses(builder).await;
139
140 match response {
141 Ok(chat_response) => {
142 print!(": ");
143 io::stdout().flush().unwrap();
144
145 // Simulate streaming by printing character by character
146 if let Some(content) = chat_response.content() {
147 for char in content.chars() {
148 print!("{char}");
149 io::stdout().flush().unwrap();
150 // Small delay to simulate streaming
151 tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152 }
153 }
154 println!(); // New line after "streaming"
155 }
156 Err(e) => {
157 println!("\n Failed to get streaming response: {e}");
158 }
159 }
160
161 // ==========================================
162 // 5. FUNCTION/TOOL CALLING
163 // ==========================================
164
165 println!("\n Step 5: Using tools/functions");
166
167 // Tools let the AI call external functions to get real data
168 // Here we define a weather function as an example
169 let weather_tool = tool_function(
170 "get_current_weather",
171 "Get the current weather for a given location",
172 json!({
173 "type": "object",
174 "properties": {
175 "location": {
176 "type": "string",
177 "description": "The city name, e.g. 'San Francisco, CA'"
178 },
179 "unit": {
180 "type": "string",
181 "enum": ["celsius", "fahrenheit"],
182 "description": "Temperature unit"
183 }
184 },
185 "required": ["location"]
186 }),
187 );
188
189 let builder = client
190 .responses()
191 .user("What's the weather like in Tokyo?")
192 .tool(weather_tool);
193 let response = client.send_responses(builder).await;
194
195 match response {
196 Ok(chat_response) => {
197 println!(" Got response with potential tool calls!");
198
199 // Check if the AI wants to call our weather function
200 let tool_calls = chat_response.tool_calls();
201 if !tool_calls.is_empty() {
202 println!(" AI requested tool calls:");
203 for tool_call in tool_calls {
204 let function_name = tool_call.function_name();
205 println!(" Function: {function_name}");
206 let function_args = tool_call.function_arguments();
207 println!(" Arguments: {function_args}");
208
209 // In a real app, you'd execute the function here
210 // and send the result back to the AI
211 println!(" In a real app, you'd call your weather API here");
212 }
213 } else if let Some(content) = chat_response.content() {
214 println!(" AI: {content}");
215 }
216 }
217 Err(e) => {
218 println!(" Tool calling example failed: {e}");
219 }
220 }
221
222 // ==========================================
223 // 6. ERROR HANDLING PATTERNS
224 // ==========================================
225
226 println!("\n Step 6: Error handling patterns");
227
228 // Show how to handle different types of errors gracefully
229 let builder = client.chat_simple(""); // Empty message might cause an error
230 let bad_response = client.send_chat(builder).await;
231
232 match bad_response {
233 Ok(response) => {
234 println!(" Unexpectedly succeeded with empty message");
235 if let Some(content) = response.content() {
236 println!(" AI: {content}");
237 }
238 }
239 Err(Error::Api {
240 status, message, ..
241 }) => {
242 println!(" API Error (HTTP {status}):");
243 println!(" Message: {message}");
244 println!(" This is normal - we sent an invalid request");
245 }
246 Err(Error::RateLimit { .. }) => {
247 println!(" Rate limited - you're sending requests too fast");
248 println!(" In a real app, you'd implement exponential backoff");
249 }
250 Err(Error::Http(_)) => {
251 println!(" HTTP/Network error");
252 println!(" Check your internet connection and API key");
253 }
254 Err(e) => {
255 println!(" Other error: {e}");
256 }
257 }
258
259 // ==========================================
260 // 7. COMPLETE REAL-WORLD EXAMPLE
261 // ==========================================
262
263 println!("\n Step 7: Complete real-world example");
264 println!("Building a simple AI assistant that can:");
265 println!("- Answer questions with context");
266 println!("- Track conversation costs");
267 println!("- Handle errors gracefully");
268
269 let mut total_tokens = 0;
270
271 // Simulate a conversation with context and cost tracking
272 let questions = [
273 "What is the capital of France?",
274 "What's special about that city?",
275 "How many people live there?",
276 ];
277
278 for (i, question) in questions.iter().enumerate() {
279 println!("\n User: {question}");
280
281 let builder = client
282 .responses()
283 .system(
284 "You are a knowledgeable geography expert. Keep answers concise but informative.",
285 )
286 .user(*question)
287 .temperature(0.1); // Lower temperature for more factual responses
288 let response = client.send_responses(builder).await;
289
290 match response {
291 Ok(chat_response) => {
292 if let Some(content) = chat_response.content() {
293 println!(" Assistant: {content}");
294 }
295
296 // Track token usage for cost monitoring
297 if let Some(usage) = chat_response.usage() {
298 total_tokens += usage.total_tokens;
299 println!(
300 " This exchange: {} tokens (Running total: {})",
301 usage.total_tokens, total_tokens
302 );
303 }
304 }
305 Err(e) => {
306 println!(" Question {} failed: {}", i + 1, e);
307 // In a real app, you might retry or log this error
308 }
309 }
310 }
311
312 // ==========================================
313 // 8. WRAP UP & NEXT STEPS
314 // ==========================================
315
316 println!("\n Quickstart Complete!");
317 println!("======================");
318 println!("You've successfully:");
319 println!(" Created an OpenAI client");
320 println!(" Made basic chat completions");
321 println!(" Used streaming responses");
322 println!(" Implemented tool/function calling");
323 println!(" Handled errors gracefully");
324 println!(" Built a complete conversational AI");
325 println!("\n Total tokens used in examples: {total_tokens}");
326 println!(
327 " Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328 f64::from(total_tokens) * 0.03 / 1000.0
329 );
330
331 println!("\n Next Steps:");
332 println!("- Check out other examples in the examples/ directory");
333 println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334 println!("- Explore advanced features like vision, audio, and assistants");
335 println!("- Build your own AI-powered applications!");
336
337 Ok(())
338}Sourcepub fn tools(self, tools: Vec<ChatCompletionTool>) -> Self
pub fn tools(self, tools: Vec<ChatCompletionTool>) -> Self
Add tools that the model can use.
Sourcepub fn tool(self, tool: ChatCompletionTool) -> Self
pub fn tool(self, tool: ChatCompletionTool) -> Self
Add a single tool.
Examples found in repository?
375async fn example_streaming_tool_calls() -> Result<()> {
376 println!("=== Streaming Tool Calls Example ===");
377
378 let client = Client::from_env()?.build();
379
380 // Create a tool for getting weather information
381 let weather_tool = openai_ergonomic::responses::tool_function(
382 "get_weather",
383 "Get current weather for a location",
384 serde_json::json!({
385 "type": "object",
386 "properties": {
387 "location": {
388 "type": "string",
389 "description": "City name"
390 }
391 },
392 "required": ["location"]
393 }),
394 );
395
396 // Build streaming request with tools
397 let _tool_request = client
398 .responses()
399 .user("What's the weather like in San Francisco?")
400 .tool(weather_tool)
401 .stream(true);
402
403 println!("Streaming tool call request configured:");
404 println!("- Tool: get_weather function");
405 println!("- Streaming: enabled");
406
407 // Simulate streaming tool call chunks
408 println!("\nSimulated streaming tool call:");
409
410 let tool_chunks = [
411 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"id":"call_123","type":"function","function":{"name":"get_weather"}}]}}]}"#,
412 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{"}}]}}]}"#,
413 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"location\""}}]}}]}"#,
414 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":":"}}]}}]}"#,
415 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"San Francisco\""}}]}}]}"#,
416 r#"{"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"}"}}]}}]}"#,
417 ];
418
419 let mut tool_call_buffer = String::new();
420
421 for (i, chunk_data) in tool_chunks.iter().enumerate() {
422 let chunk_line = format!("data: {chunk_data}");
423
424 if let Some(chunk) = StreamChunk::parse(&chunk_line)? {
425 if chunk.has_tool_call() {
426 println!("Chunk {}: Tool call data received", i + 1);
427
428 // In a real implementation, you'd accumulate tool call arguments
429 if let Some(tool_data) = &chunk.tool_call_delta {
430 if let Some(args) = tool_data["function"]["arguments"].as_str() {
431 tool_call_buffer.push_str(args);
432 println!(" Arguments so far: {tool_call_buffer}");
433 }
434 }
435 }
436 }
437
438 tokio::time::sleep(Duration::from_millis(100)).await;
439 }
440
441 println!("\n Complete tool call arguments: {tool_call_buffer}");
442 println!(" Would now execute get_weather(location='San Francisco')");
443
444 Ok(())
445}More examples
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160 println!("Setting up function calling with custom tools...");
161
162 // Define a weather function tool
163 let weather_tool = tool_function(
164 "get_weather",
165 "Get the current weather information for a specific location",
166 json!({
167 "type": "object",
168 "properties": {
169 "location": {
170 "type": "string",
171 "description": "The city name, e.g., 'San Francisco, CA'"
172 },
173 "unit": {
174 "type": "string",
175 "enum": ["celsius", "fahrenheit"],
176 "description": "Temperature unit preference"
177 }
178 },
179 "required": ["location"],
180 "additionalProperties": false
181 }),
182 );
183
184 // Define a time function tool
185 let time_tool = tool_function(
186 "get_current_time",
187 "Get the current time in a specific timezone",
188 json!({
189 "type": "object",
190 "properties": {
191 "timezone": {
192 "type": "string",
193 "description": "Timezone name, e.g., 'America/New_York'"
194 }
195 },
196 "required": ["timezone"],
197 "additionalProperties": false
198 }),
199 );
200
201 // Make a request that should trigger function calling
202 let builder = client
203 .responses()
204 .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205 .user("What's the weather like in London and what time is it there?")
206 .tool(weather_tool)
207 .tool(time_tool)
208 .tool_choice(ToolChoiceHelper::auto())
209 .temperature(0.3);
210
211 let response = client.send_responses(builder).await?;
212
213 // Check if the model wants to call functions
214 let tool_calls = response.tool_calls();
215 if !tool_calls.is_empty() {
216 println!(" Model requested {} tool call(s):", tool_calls.len());
217
218 for (i, tool_call) in tool_calls.iter().enumerate() {
219 println!(" {}. Function: {}", i + 1, tool_call.function_name());
220 println!(" Arguments: {}", tool_call.function_arguments());
221
222 // In a real application, you would:
223 // 1. Parse the arguments
224 // 2. Execute the actual function
225 // 3. Send the results back to the model
226 println!(" [Simulated] Executing function call...");
227 match tool_call.function_name() {
228 "get_weather" => {
229 println!(" [Simulated] Weather: 22°C, partly cloudy");
230 }
231 "get_current_time" => {
232 println!(" [Simulated] Time: 14:30 GMT");
233 }
234 _ => {
235 println!(" [Simulated] Unknown function");
236 }
237 }
238 }
239 } else if let Some(content) = response.content() {
240 println!(" Assistant response: {content}");
241 }
242
243 Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248 println!("Demonstrating web search tool integration...");
249
250 // Create a web search tool
251 let web_search_tool = tool_web_search();
252
253 // Ask a question that would benefit from current information
254 let builder = client
255 .responses()
256 .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257 .user("What are the latest developments in artificial intelligence this week?")
258 .tool(web_search_tool)
259 .tool_choice(ToolChoiceHelper::auto())
260 .temperature(0.3)
261 .max_completion_tokens(200);
262
263 let response = client.send_responses(builder).await?;
264
265 // Handle the response
266 let tool_calls = response.tool_calls();
267 if !tool_calls.is_empty() {
268 println!(" Model requested web search:");
269
270 for tool_call in &tool_calls {
271 if tool_call.function_name() == "web_search" {
272 println!(" Search query: {}", tool_call.function_arguments());
273 println!(" [Simulated] Performing web search...");
274 println!(" [Simulated] Found recent AI news and developments");
275
276 // In a real implementation:
277 // 1. Parse the search query from arguments
278 // 2. Perform actual web search
279 // 3. Return results to the model
280 // 4. Get final response with search results
281 }
282 }
283 } else if let Some(content) = response.content() {
284 println!(" Assistant response: {content}");
285 }
286
287 println!(" Note: Web search requires additional implementation to execute actual searches");
288
289 Ok(())
290}37async fn main() -> Result<()> {
38 // Initialize logging to see what's happening under the hood
39 tracing_subscriber::fmt().with_env_filter("info").init();
40
41 println!(" OpenAI Ergonomic Quickstart");
42 println!("==============================\n");
43
44 // ==========================================
45 // 1. ENVIRONMENT SETUP & CLIENT CREATION
46 // ==========================================
47
48 println!(" Step 1: Setting up the client");
49
50 // The simplest way to get started - reads OPENAI_API_KEY from environment
51 let client = match Client::from_env() {
52 Ok(client_builder) => {
53 println!(" Client created successfully!");
54 client_builder.build()
55 }
56 Err(e) => {
57 eprintln!(" Failed to create client: {e}");
58 eprintln!(" Make sure you've set OPENAI_API_KEY environment variable");
59 eprintln!(" Example: export OPENAI_API_KEY=\"sk-your-key-here\"");
60 return Err(e);
61 }
62 };
63
64 // ==========================================
65 // 2. BASIC CHAT COMPLETION
66 // ==========================================
67
68 println!("\n Step 2: Basic chat completion");
69
70 // The simplest way to get a response from ChatGPT
71 let builder = client.chat_simple("What is Rust programming language in one sentence?");
72 let response = client.send_chat(builder).await;
73
74 match response {
75 Ok(chat_response) => {
76 println!(" Got response!");
77 if let Some(content) = chat_response.content() {
78 println!(" AI: {content}");
79 }
80
81 // Show usage information for cost tracking
82 if let Some(usage) = &chat_response.inner().usage {
83 println!(
84 " Usage: {} prompt + {} completion = {} total tokens",
85 usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
86 );
87 }
88 }
89 Err(e) => {
90 println!(" Chat completion failed: {e}");
91 // Continue with other examples even if this one fails
92 }
93 }
94
95 // ==========================================
96 // 3. CHAT WITH SYSTEM MESSAGE
97 // ==========================================
98
99 println!("\n Step 3: Chat with system context");
100
101 // System messages help set the AI's behavior and context
102 let builder = client.chat_with_system(
103 "You are a helpful coding mentor who explains things simply",
104 "Explain what a HashMap is in Rust",
105 );
106 let response = client.send_chat(builder).await;
107
108 match response {
109 Ok(chat_response) => {
110 println!(" Got contextual response!");
111 if let Some(content) = chat_response.content() {
112 println!(" Mentor: {content}");
113 }
114 }
115 Err(e) => {
116 println!(" Contextual chat failed: {e}");
117 }
118 }
119
120 // ==========================================
121 // 4. STREAMING RESPONSES
122 // ==========================================
123
124 println!("\n Step 4: Streaming response (real-time)");
125
126 // Streaming lets you see the response as it's being generated
127 // This is great for chatbots and interactive applications
128 print!(" AI is typing");
129 io::stdout().flush().unwrap();
130
131 let builder = client
132 .responses()
133 .user("Write a short haiku about programming")
134 .temperature(0.7)
135 .stream(true);
136 // Note: Full streaming implementation is in development
137 // For now, we'll demonstrate non-streaming responses with real-time simulation
138 let response = client.send_responses(builder).await;
139
140 match response {
141 Ok(chat_response) => {
142 print!(": ");
143 io::stdout().flush().unwrap();
144
145 // Simulate streaming by printing character by character
146 if let Some(content) = chat_response.content() {
147 for char in content.chars() {
148 print!("{char}");
149 io::stdout().flush().unwrap();
150 // Small delay to simulate streaming
151 tokio::time::sleep(std::time::Duration::from_millis(30)).await;
152 }
153 }
154 println!(); // New line after "streaming"
155 }
156 Err(e) => {
157 println!("\n Failed to get streaming response: {e}");
158 }
159 }
160
161 // ==========================================
162 // 5. FUNCTION/TOOL CALLING
163 // ==========================================
164
165 println!("\n Step 5: Using tools/functions");
166
167 // Tools let the AI call external functions to get real data
168 // Here we define a weather function as an example
169 let weather_tool = tool_function(
170 "get_current_weather",
171 "Get the current weather for a given location",
172 json!({
173 "type": "object",
174 "properties": {
175 "location": {
176 "type": "string",
177 "description": "The city name, e.g. 'San Francisco, CA'"
178 },
179 "unit": {
180 "type": "string",
181 "enum": ["celsius", "fahrenheit"],
182 "description": "Temperature unit"
183 }
184 },
185 "required": ["location"]
186 }),
187 );
188
189 let builder = client
190 .responses()
191 .user("What's the weather like in Tokyo?")
192 .tool(weather_tool);
193 let response = client.send_responses(builder).await;
194
195 match response {
196 Ok(chat_response) => {
197 println!(" Got response with potential tool calls!");
198
199 // Check if the AI wants to call our weather function
200 let tool_calls = chat_response.tool_calls();
201 if !tool_calls.is_empty() {
202 println!(" AI requested tool calls:");
203 for tool_call in tool_calls {
204 let function_name = tool_call.function_name();
205 println!(" Function: {function_name}");
206 let function_args = tool_call.function_arguments();
207 println!(" Arguments: {function_args}");
208
209 // In a real app, you'd execute the function here
210 // and send the result back to the AI
211 println!(" In a real app, you'd call your weather API here");
212 }
213 } else if let Some(content) = chat_response.content() {
214 println!(" AI: {content}");
215 }
216 }
217 Err(e) => {
218 println!(" Tool calling example failed: {e}");
219 }
220 }
221
222 // ==========================================
223 // 6. ERROR HANDLING PATTERNS
224 // ==========================================
225
226 println!("\n Step 6: Error handling patterns");
227
228 // Show how to handle different types of errors gracefully
229 let builder = client.chat_simple(""); // Empty message might cause an error
230 let bad_response = client.send_chat(builder).await;
231
232 match bad_response {
233 Ok(response) => {
234 println!(" Unexpectedly succeeded with empty message");
235 if let Some(content) = response.content() {
236 println!(" AI: {content}");
237 }
238 }
239 Err(Error::Api {
240 status, message, ..
241 }) => {
242 println!(" API Error (HTTP {status}):");
243 println!(" Message: {message}");
244 println!(" This is normal - we sent an invalid request");
245 }
246 Err(Error::RateLimit { .. }) => {
247 println!(" Rate limited - you're sending requests too fast");
248 println!(" In a real app, you'd implement exponential backoff");
249 }
250 Err(Error::Http(_)) => {
251 println!(" HTTP/Network error");
252 println!(" Check your internet connection and API key");
253 }
254 Err(e) => {
255 println!(" Other error: {e}");
256 }
257 }
258
259 // ==========================================
260 // 7. COMPLETE REAL-WORLD EXAMPLE
261 // ==========================================
262
263 println!("\n Step 7: Complete real-world example");
264 println!("Building a simple AI assistant that can:");
265 println!("- Answer questions with context");
266 println!("- Track conversation costs");
267 println!("- Handle errors gracefully");
268
269 let mut total_tokens = 0;
270
271 // Simulate a conversation with context and cost tracking
272 let questions = [
273 "What is the capital of France?",
274 "What's special about that city?",
275 "How many people live there?",
276 ];
277
278 for (i, question) in questions.iter().enumerate() {
279 println!("\n User: {question}");
280
281 let builder = client
282 .responses()
283 .system(
284 "You are a knowledgeable geography expert. Keep answers concise but informative.",
285 )
286 .user(*question)
287 .temperature(0.1); // Lower temperature for more factual responses
288 let response = client.send_responses(builder).await;
289
290 match response {
291 Ok(chat_response) => {
292 if let Some(content) = chat_response.content() {
293 println!(" Assistant: {content}");
294 }
295
296 // Track token usage for cost monitoring
297 if let Some(usage) = chat_response.usage() {
298 total_tokens += usage.total_tokens;
299 println!(
300 " This exchange: {} tokens (Running total: {})",
301 usage.total_tokens, total_tokens
302 );
303 }
304 }
305 Err(e) => {
306 println!(" Question {} failed: {}", i + 1, e);
307 // In a real app, you might retry or log this error
308 }
309 }
310 }
311
312 // ==========================================
313 // 8. WRAP UP & NEXT STEPS
314 // ==========================================
315
316 println!("\n Quickstart Complete!");
317 println!("======================");
318 println!("You've successfully:");
319 println!(" Created an OpenAI client");
320 println!(" Made basic chat completions");
321 println!(" Used streaming responses");
322 println!(" Implemented tool/function calling");
323 println!(" Handled errors gracefully");
324 println!(" Built a complete conversational AI");
325 println!("\n Total tokens used in examples: {total_tokens}");
326 println!(
327 " Estimated cost: ~${:.4} (assuming GPT-4 pricing)",
328 f64::from(total_tokens) * 0.03 / 1000.0
329 );
330
331 println!("\n Next Steps:");
332 println!("- Check out other examples in the examples/ directory");
333 println!("- Read the documentation: https://docs.rs/openai-ergonomic");
334 println!("- Explore advanced features like vision, audio, and assistants");
335 println!("- Build your own AI-powered applications!");
336
337 Ok(())
338}Sourcepub fn tool_choice(self, tool_choice: ChatCompletionToolChoiceOption) -> Self
pub fn tool_choice(self, tool_choice: ChatCompletionToolChoiceOption) -> Self
Set the tool choice option.
Examples found in repository?
159async fn function_calling_example(client: &Client) -> Result<(), Error> {
160 println!("Setting up function calling with custom tools...");
161
162 // Define a weather function tool
163 let weather_tool = tool_function(
164 "get_weather",
165 "Get the current weather information for a specific location",
166 json!({
167 "type": "object",
168 "properties": {
169 "location": {
170 "type": "string",
171 "description": "The city name, e.g., 'San Francisco, CA'"
172 },
173 "unit": {
174 "type": "string",
175 "enum": ["celsius", "fahrenheit"],
176 "description": "Temperature unit preference"
177 }
178 },
179 "required": ["location"],
180 "additionalProperties": false
181 }),
182 );
183
184 // Define a time function tool
185 let time_tool = tool_function(
186 "get_current_time",
187 "Get the current time in a specific timezone",
188 json!({
189 "type": "object",
190 "properties": {
191 "timezone": {
192 "type": "string",
193 "description": "Timezone name, e.g., 'America/New_York'"
194 }
195 },
196 "required": ["timezone"],
197 "additionalProperties": false
198 }),
199 );
200
201 // Make a request that should trigger function calling
202 let builder = client
203 .responses()
204 .system("You are a helpful assistant with access to weather and time information. Use the provided tools when users ask about weather or time.")
205 .user("What's the weather like in London and what time is it there?")
206 .tool(weather_tool)
207 .tool(time_tool)
208 .tool_choice(ToolChoiceHelper::auto())
209 .temperature(0.3);
210
211 let response = client.send_responses(builder).await?;
212
213 // Check if the model wants to call functions
214 let tool_calls = response.tool_calls();
215 if !tool_calls.is_empty() {
216 println!(" Model requested {} tool call(s):", tool_calls.len());
217
218 for (i, tool_call) in tool_calls.iter().enumerate() {
219 println!(" {}. Function: {}", i + 1, tool_call.function_name());
220 println!(" Arguments: {}", tool_call.function_arguments());
221
222 // In a real application, you would:
223 // 1. Parse the arguments
224 // 2. Execute the actual function
225 // 3. Send the results back to the model
226 println!(" [Simulated] Executing function call...");
227 match tool_call.function_name() {
228 "get_weather" => {
229 println!(" [Simulated] Weather: 22°C, partly cloudy");
230 }
231 "get_current_time" => {
232 println!(" [Simulated] Time: 14:30 GMT");
233 }
234 _ => {
235 println!(" [Simulated] Unknown function");
236 }
237 }
238 }
239 } else if let Some(content) = response.content() {
240 println!(" Assistant response: {content}");
241 }
242
243 Ok(())
244}
245
246/// Example 3: Web search integration
247async fn web_search_example(client: &Client) -> Result<(), Error> {
248 println!("Demonstrating web search tool integration...");
249
250 // Create a web search tool
251 let web_search_tool = tool_web_search();
252
253 // Ask a question that would benefit from current information
254 let builder = client
255 .responses()
256 .system("You are a helpful assistant with access to web search. When users ask about current events, recent information, or real-time data, use the web search tool to find accurate, up-to-date information.")
257 .user("What are the latest developments in artificial intelligence this week?")
258 .tool(web_search_tool)
259 .tool_choice(ToolChoiceHelper::auto())
260 .temperature(0.3)
261 .max_completion_tokens(200);
262
263 let response = client.send_responses(builder).await?;
264
265 // Handle the response
266 let tool_calls = response.tool_calls();
267 if !tool_calls.is_empty() {
268 println!(" Model requested web search:");
269
270 for tool_call in &tool_calls {
271 if tool_call.function_name() == "web_search" {
272 println!(" Search query: {}", tool_call.function_arguments());
273 println!(" [Simulated] Performing web search...");
274 println!(" [Simulated] Found recent AI news and developments");
275
276 // In a real implementation:
277 // 1. Parse the search query from arguments
278 // 2. Perform actual web search
279 // 3. Return results to the model
280 // 4. Get final response with search results
281 }
282 }
283 } else if let Some(content) = response.content() {
284 println!(" Assistant response: {content}");
285 }
286
287 println!(" Note: Web search requires additional implementation to execute actual searches");
288
289 Ok(())
290}Sourcepub fn response_format(
self,
format: CreateChatCompletionRequestAllOfResponseFormat,
) -> Self
pub fn response_format( self, format: CreateChatCompletionRequestAllOfResponseFormat, ) -> Self
Set the response format.
Sourcepub fn json_mode(self) -> Self
pub fn json_mode(self) -> Self
Enable JSON mode.
Examples found in repository?
132async fn simple_json_mode_example(client: &Client) -> Result<(), Error> {
133 println!("Using simple JSON mode for basic structure enforcement...");
134
135 let builder = client
136 .responses()
137 .system("You are a helpful assistant. Always respond in valid JSON format with the keys: summary, sentiment, and confidence_score (0-1).")
138 .user("Analyze this product review: 'This laptop is amazing! Great performance, excellent battery life, and the display is crystal clear. Highly recommended!'")
139 .json_mode()
140 .temperature(0.3)
141 .max_completion_tokens(200);
142
143 let response = client.send_responses(builder).await?;
144
145 if let Some(content) = response.content() {
146 println!(" JSON Analysis Result:");
147
148 // Try to parse and pretty-print the JSON
149 match serde_json::from_str::<serde_json::Value>(content) {
150 Ok(json) => {
151 println!("{}", serde_json::to_string_pretty(&json)?);
152
153 // Demonstrate accessing specific fields
154 if let Some(sentiment) = json.get("sentiment").and_then(|s| s.as_str()) {
155 println!("\n Extracted sentiment: {sentiment}");
156 }
157 if let Some(confidence) = json
158 .get("confidence_score")
159 .and_then(serde_json::Value::as_f64)
160 {
161 println!(" Confidence score: {confidence:.2}");
162 }
163 }
164 Err(e) => {
165 println!(" Failed to parse JSON: {e}");
166 println!("Raw response: {content}");
167 }
168 }
169 }
170
171 Ok(())
172}More examples
293async fn structured_output_example(client: &Client) -> Result<(), Error> {
294 println!("Demonstrating structured JSON outputs...");
295
296 // Define a schema for recipe information
297 let recipe_schema = json!({
298 "type": "object",
299 "properties": {
300 "name": {
301 "type": "string",
302 "description": "Name of the recipe"
303 },
304 "ingredients": {
305 "type": "array",
306 "items": {
307 "type": "object",
308 "properties": {
309 "name": {
310 "type": "string",
311 "description": "Ingredient name"
312 },
313 "amount": {
314 "type": "string",
315 "description": "Amount needed"
316 }
317 },
318 "required": ["name", "amount"],
319 "additionalProperties": false
320 },
321 "description": "List of ingredients"
322 },
323 "instructions": {
324 "type": "array",
325 "items": {
326 "type": "string"
327 },
328 "description": "Step-by-step cooking instructions"
329 },
330 "prep_time_minutes": {
331 "type": "integer",
332 "description": "Preparation time in minutes"
333 },
334 "difficulty": {
335 "type": "string",
336 "enum": ["easy", "medium", "hard"],
337 "description": "Recipe difficulty level"
338 }
339 },
340 "required": ["name", "ingredients", "instructions", "prep_time_minutes", "difficulty"],
341 "additionalProperties": false
342 });
343
344 // Request a recipe in structured JSON format
345 let builder = client
346 .responses()
347 .system("You are a cooking expert. Provide recipes in the exact JSON format specified.")
348 .user("Give me a simple recipe for chocolate chip cookies")
349 .json_schema("recipe", recipe_schema)
350 .temperature(0.5);
351
352 let response = client.send_responses(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!(" Structured recipe output:");
356
357 // Try to parse and pretty-print the JSON
358 match serde_json::from_str::<serde_json::Value>(content) {
359 Ok(json) => {
360 println!("{}", serde_json::to_string_pretty(&json)?);
361 }
362 Err(_) => {
363 println!("Raw response: {content}");
364 }
365 }
366 }
367
368 // Example of simple JSON mode (without schema)
369 println!("\n Simple JSON mode example:");
370 let simple_builder = client
371 .responses()
372 .system("Respond in valid JSON format with keys: summary, key_points, sentiment")
373 .user("Analyze this text: 'The new product launch exceeded expectations with great customer feedback.'")
374 .json_mode()
375 .temperature(0.3);
376
377 let simple_response = client.send_responses(simple_builder).await?;
378
379 if let Some(content) = simple_response.content() {
380 println!(" Analysis result: {content}");
381 }
382
383 Ok(())
384}Sourcepub fn json_schema(self, name: impl Into<String>, schema: Value) -> Self
pub fn json_schema(self, name: impl Into<String>, schema: Value) -> Self
Set a JSON schema for structured output.
Examples found in repository?
293async fn structured_output_example(client: &Client) -> Result<(), Error> {
294 println!("Demonstrating structured JSON outputs...");
295
296 // Define a schema for recipe information
297 let recipe_schema = json!({
298 "type": "object",
299 "properties": {
300 "name": {
301 "type": "string",
302 "description": "Name of the recipe"
303 },
304 "ingredients": {
305 "type": "array",
306 "items": {
307 "type": "object",
308 "properties": {
309 "name": {
310 "type": "string",
311 "description": "Ingredient name"
312 },
313 "amount": {
314 "type": "string",
315 "description": "Amount needed"
316 }
317 },
318 "required": ["name", "amount"],
319 "additionalProperties": false
320 },
321 "description": "List of ingredients"
322 },
323 "instructions": {
324 "type": "array",
325 "items": {
326 "type": "string"
327 },
328 "description": "Step-by-step cooking instructions"
329 },
330 "prep_time_minutes": {
331 "type": "integer",
332 "description": "Preparation time in minutes"
333 },
334 "difficulty": {
335 "type": "string",
336 "enum": ["easy", "medium", "hard"],
337 "description": "Recipe difficulty level"
338 }
339 },
340 "required": ["name", "ingredients", "instructions", "prep_time_minutes", "difficulty"],
341 "additionalProperties": false
342 });
343
344 // Request a recipe in structured JSON format
345 let builder = client
346 .responses()
347 .system("You are a cooking expert. Provide recipes in the exact JSON format specified.")
348 .user("Give me a simple recipe for chocolate chip cookies")
349 .json_schema("recipe", recipe_schema)
350 .temperature(0.5);
351
352 let response = client.send_responses(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!(" Structured recipe output:");
356
357 // Try to parse and pretty-print the JSON
358 match serde_json::from_str::<serde_json::Value>(content) {
359 Ok(json) => {
360 println!("{}", serde_json::to_string_pretty(&json)?);
361 }
362 Err(_) => {
363 println!("Raw response: {content}");
364 }
365 }
366 }
367
368 // Example of simple JSON mode (without schema)
369 println!("\n Simple JSON mode example:");
370 let simple_builder = client
371 .responses()
372 .system("Respond in valid JSON format with keys: summary, key_points, sentiment")
373 .user("Analyze this text: 'The new product launch exceeded expectations with great customer feedback.'")
374 .json_mode()
375 .temperature(0.3);
376
377 let simple_response = client.send_responses(simple_builder).await?;
378
379 if let Some(content) = simple_response.content() {
380 println!(" Analysis result: {content}");
381 }
382
383 Ok(())
384}More examples
175async fn data_extraction_example(client: &Client) -> Result<(), Error> {
176 println!("Extracting structured data from unstructured text using JSON schema...");
177
178 // Define schema for extracting contact information
179 let contact_schema = json!({
180 "type": "object",
181 "properties": {
182 "contacts": {
183 "type": "array",
184 "items": {
185 "type": "object",
186 "properties": {
187 "name": {
188 "type": "string",
189 "description": "Full name of the person"
190 },
191 "email": {
192 "type": "string",
193 "format": "email",
194 "description": "Email address"
195 },
196 "phone": {
197 "type": "string",
198 "description": "Phone number"
199 },
200 "company": {
201 "type": "string",
202 "description": "Company or organization"
203 },
204 "role": {
205 "type": "string",
206 "description": "Job title or role"
207 }
208 },
209 "required": ["name"],
210 "additionalProperties": false
211 }
212 },
213 "total_contacts": {
214 "type": "integer",
215 "description": "Total number of contacts extracted"
216 }
217 },
218 "required": ["contacts", "total_contacts"],
219 "additionalProperties": false
220 });
221
222 let unstructured_text =
223 "Contact our team: John Smith (CEO) at john@example.com or call 555-0123. \
224 For technical support, reach out to Sarah Johnson at sarah.johnson@techcorp.com. \
225 Our sales manager Mike Wilson can be reached at mike@company.com or 555-0456.";
226
227 let builder = client
228 .responses()
229 .system("You are an expert at extracting contact information from text. Extract all contact details you can find and structure them according to the provided schema.")
230 .user(format!("Extract contact information from this text: {unstructured_text}"))
231 .json_schema("contact_extraction", contact_schema)
232 .temperature(0.1); // Low temperature for accuracy
233
234 let response = client.send_responses(builder).await?;
235
236 if let Some(content) = response.content() {
237 println!(" Extracted Contact Information:");
238
239 match serde_json::from_str::<serde_json::Value>(content) {
240 Ok(json) => {
241 println!("{}", serde_json::to_string_pretty(&json)?);
242
243 // Demonstrate accessing the structured data
244 if let Some(contacts) = json.get("contacts").and_then(|c| c.as_array()) {
245 println!("\n Summary: Found {} contact(s)", contacts.len());
246 for (i, contact) in contacts.iter().enumerate() {
247 if let Some(name) = contact.get("name").and_then(|n| n.as_str()) {
248 println!(" {}. {name}", i + 1);
249 if let Some(email) = contact.get("email").and_then(|e| e.as_str()) {
250 println!(" {email}");
251 }
252 if let Some(company) = contact.get("company").and_then(|c| c.as_str()) {
253 println!(" {company}");
254 }
255 }
256 }
257 }
258 }
259 Err(e) => {
260 println!(" Failed to parse JSON: {e}");
261 println!("Raw response: {content}");
262 }
263 }
264 }
265
266 Ok(())
267}
268
269/// Example 3: Complex nested structure for event planning
270#[allow(clippy::too_many_lines)]
271async fn complex_structure_example(client: &Client) -> Result<(), Error> {
272 println!("Creating complex nested structure for event planning...");
273
274 // Define a comprehensive event schema
275 let event_schema = json!({
276 "type": "object",
277 "properties": {
278 "event": {
279 "type": "object",
280 "properties": {
281 "name": {
282 "type": "string",
283 "description": "Event name"
284 },
285 "type": {
286 "type": "string",
287 "enum": ["conference", "workshop", "seminar", "networking", "party", "meeting"],
288 "description": "Type of event"
289 },
290 "date": {
291 "type": "string",
292 "format": "date",
293 "description": "Event date in YYYY-MM-DD format"
294 },
295 "duration_hours": {
296 "type": "number",
297 "minimum": 0.5,
298 "maximum": 24,
299 "description": "Duration in hours"
300 },
301 "venue": {
302 "type": "object",
303 "properties": {
304 "name": {
305 "type": "string",
306 "description": "Venue name"
307 },
308 "address": {
309 "type": "string",
310 "description": "Venue address"
311 },
312 "capacity": {
313 "type": "integer",
314 "minimum": 1,
315 "description": "Maximum capacity"
316 },
317 "amenities": {
318 "type": "array",
319 "items": {
320 "type": "string",
321 "enum": ["wifi", "parking", "catering", "av_equipment", "wheelchair_accessible", "air_conditioning"]
322 },
323 "description": "Available amenities"
324 }
325 },
326 "required": ["name", "capacity"],
327 "additionalProperties": false
328 },
329 "agenda": {
330 "type": "array",
331 "items": {
332 "type": "object",
333 "properties": {
334 "time": {
335 "type": "string",
336 "pattern": "^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$",
337 "description": "Time in HH:MM format"
338 },
339 "activity": {
340 "type": "string",
341 "description": "Activity description"
342 },
343 "speaker": {
344 "type": "string",
345 "description": "Speaker name"
346 },
347 "duration_minutes": {
348 "type": "integer",
349 "minimum": 15,
350 "maximum": 480,
351 "description": "Activity duration in minutes"
352 }
353 },
354 "required": ["time", "activity", "duration_minutes"],
355 "additionalProperties": false
356 }
357 },
358 "estimated_cost": {
359 "type": "object",
360 "properties": {
361 "venue": {
362 "type": "number",
363 "minimum": 0,
364 "description": "Venue cost in USD"
365 },
366 "catering": {
367 "type": "number",
368 "minimum": 0,
369 "description": "Catering cost in USD"
370 },
371 "equipment": {
372 "type": "number",
373 "minimum": 0,
374 "description": "Equipment cost in USD"
375 },
376 "total": {
377 "type": "number",
378 "minimum": 0,
379 "description": "Total estimated cost in USD"
380 }
381 },
382 "required": ["total"],
383 "additionalProperties": false
384 }
385 },
386 "required": ["name", "type", "date", "duration_hours", "venue"],
387 "additionalProperties": false
388 }
389 },
390 "required": ["event"],
391 "additionalProperties": false
392 });
393
394 let builder = client
395 .responses()
396 .system("You are an expert event planner. Create a detailed event plan based on the user's requirements, including venue details, agenda, and cost estimates.")
397 .user("Plan a one-day AI/ML conference for 200 people in San Francisco. Include morning keynotes, afternoon workshops, networking lunch, and panel discussions. Budget around $50,000.")
398 .json_schema("event_plan", event_schema)
399 .temperature(0.5);
400
401 let response = client.send_responses(builder).await?;
402
403 if let Some(content) = response.content() {
404 println!(" Event Plan:");
405
406 match serde_json::from_str::<serde_json::Value>(content) {
407 Ok(json) => {
408 println!("{}", serde_json::to_string_pretty(&json)?);
409
410 // Extract and display key information
411 if let Some(event) = json.get("event") {
412 if let Some(name) = event.get("name").and_then(|n| n.as_str()) {
413 println!("\n Event: {name}");
414 }
415 if let Some(venue) = event.get("venue") {
416 if let Some(venue_name) = venue.get("name").and_then(|n| n.as_str()) {
417 let capacity = venue
418 .get("capacity")
419 .and_then(serde_json::Value::as_i64)
420 .unwrap_or(0);
421 println!(" Venue: {venue_name} (Capacity: {capacity})");
422 }
423 }
424 if let Some(agenda) = event.get("agenda").and_then(|a| a.as_array()) {
425 println!(" Agenda has {} activities", agenda.len());
426 }
427 if let Some(cost) = event.get("estimated_cost") {
428 if let Some(total) = cost.get("total").and_then(serde_json::Value::as_f64) {
429 println!(" Estimated total cost: ${total:.2}");
430 }
431 }
432 }
433 }
434 Err(e) => {
435 println!(" Failed to parse JSON: {e}");
436 println!("Raw response: {content}");
437 }
438 }
439 }
440
441 Ok(())
442}
443
444/// Example 4: Content classification with enum validation
445#[allow(clippy::too_many_lines)]
446async fn classification_example(client: &Client) -> Result<(), Error> {
447 println!("Classifying content with enum validation...");
448
449 // Define schema for content classification
450 let classification_schema = json!({
451 "type": "object",
452 "properties": {
453 "classification": {
454 "type": "object",
455 "properties": {
456 "category": {
457 "type": "string",
458 "enum": ["technology", "business", "science", "health", "politics", "sports", "entertainment", "education", "travel", "lifestyle"],
459 "description": "Primary content category"
460 },
461 "subcategory": {
462 "type": "string",
463 "description": "More specific subcategory"
464 },
465 "sentiment": {
466 "type": "string",
467 "enum": ["positive", "neutral", "negative", "mixed"],
468 "description": "Overall sentiment"
469 },
470 "topics": {
471 "type": "array",
472 "items": {
473 "type": "string"
474 },
475 "maxItems": 5,
476 "description": "Key topics mentioned"
477 },
478 "target_audience": {
479 "type": "string",
480 "enum": ["general", "professionals", "students", "experts", "consumers"],
481 "description": "Intended audience"
482 },
483 "complexity_level": {
484 "type": "string",
485 "enum": ["beginner", "intermediate", "advanced", "expert"],
486 "description": "Content complexity level"
487 },
488 "confidence_score": {
489 "type": "number",
490 "minimum": 0,
491 "maximum": 1,
492 "description": "Confidence in classification (0-1)"
493 }
494 },
495 "required": ["category", "sentiment", "topics", "target_audience", "complexity_level", "confidence_score"],
496 "additionalProperties": false
497 }
498 },
499 "required": ["classification"],
500 "additionalProperties": false
501 });
502
503 let content_to_classify = "Recent advances in quantum computing have shown promising results for solving complex optimization problems. \
504 Researchers at leading universities have demonstrated quantum algorithms that can potentially outperform classical computers \
505 in specific domains like cryptography and molecular simulation. However, current quantum computers still face challenges \
506 with noise and error rates, requiring sophisticated error correction techniques. The field is rapidly evolving with \
507 significant investments from both academic institutions and major technology companies.";
508
509 let builder = client
510 .responses()
511 .system("You are an expert content classifier. Analyze the provided text and classify it according to the given schema. Be precise with your classifications and provide accurate confidence scores.")
512 .user(format!("Classify this content: {content_to_classify}"))
513 .json_schema("content_classification", classification_schema)
514 .temperature(0.2); // Low temperature for consistent classification
515
516 let response = client.send_responses(builder).await?;
517
518 if let Some(content) = response.content() {
519 println!(" Content Classification:");
520
521 match serde_json::from_str::<serde_json::Value>(content) {
522 Ok(json) => {
523 println!("{}", serde_json::to_string_pretty(&json)?);
524
525 // Extract classification details
526 if let Some(classification) = json.get("classification") {
527 println!("\n Classification Summary:");
528 if let Some(category) = classification.get("category").and_then(|c| c.as_str())
529 {
530 println!(" Category: {category}");
531 }
532 if let Some(sentiment) =
533 classification.get("sentiment").and_then(|s| s.as_str())
534 {
535 println!(" Sentiment: {sentiment}");
536 }
537 if let Some(audience) = classification
538 .get("target_audience")
539 .and_then(|a| a.as_str())
540 {
541 println!(" Target Audience: {audience}");
542 }
543 if let Some(complexity) = classification
544 .get("complexity_level")
545 .and_then(|c| c.as_str())
546 {
547 println!(" Complexity: {complexity}");
548 }
549 if let Some(confidence) = classification
550 .get("confidence_score")
551 .and_then(serde_json::Value::as_f64)
552 {
553 println!(" Confidence: {:.2}%", confidence * 100.0);
554 }
555 if let Some(topics) = classification.get("topics").and_then(|t| t.as_array()) {
556 let topic_strings: Vec<String> = topics
557 .iter()
558 .filter_map(|t| t.as_str())
559 .map(std::string::ToString::to_string)
560 .collect();
561 println!(" Topics: {}", topic_strings.join(", "));
562 }
563 }
564 }
565 Err(e) => {
566 println!(" Failed to parse JSON: {e}");
567 println!("Raw response: {content}");
568 }
569 }
570 }
571
572 Ok(())
573}
574
575/// Example 5: Mathematical analysis with structured output
576#[allow(clippy::too_many_lines)]
577async fn math_analysis_example(client: &Client) -> Result<(), Error> {
578 println!("Performing mathematical analysis with structured output...");
579
580 // Define schema for mathematical analysis
581 let math_schema = json!({
582 "type": "object",
583 "properties": {
584 "analysis": {
585 "type": "object",
586 "properties": {
587 "problem_type": {
588 "type": "string",
589 "enum": ["algebra", "geometry", "calculus", "statistics", "probability", "discrete_math", "linear_algebra"],
590 "description": "Type of mathematical problem"
591 },
592 "solution_steps": {
593 "type": "array",
594 "items": {
595 "type": "object",
596 "properties": {
597 "step_number": {
598 "type": "integer",
599 "minimum": 1,
600 "description": "Step number in the solution"
601 },
602 "description": {
603 "type": "string",
604 "description": "Description of what this step does"
605 },
606 "equation": {
607 "type": "string",
608 "description": "Mathematical equation or expression"
609 },
610 "result": {
611 "type": "string",
612 "description": "Result of this step"
613 }
614 },
615 "required": ["step_number", "description", "equation"],
616 "additionalProperties": false
617 }
618 },
619 "final_answer": {
620 "type": "string",
621 "description": "Final answer to the problem"
622 },
623 "verification": {
624 "type": "object",
625 "properties": {
626 "check_method": {
627 "type": "string",
628 "description": "Method used to verify the answer"
629 },
630 "is_correct": {
631 "type": "boolean",
632 "description": "Whether the answer passes verification"
633 }
634 },
635 "required": ["check_method", "is_correct"],
636 "additionalProperties": false
637 },
638 "concepts_used": {
639 "type": "array",
640 "items": {
641 "type": "string"
642 },
643 "description": "Mathematical concepts used in the solution"
644 }
645 },
646 "required": ["problem_type", "solution_steps", "final_answer", "verification", "concepts_used"],
647 "additionalProperties": false
648 }
649 },
650 "required": ["analysis"],
651 "additionalProperties": false
652 });
653
654 let math_problem =
655 "Find the derivative of f(x) = 3x^3 + 2x^2 - 5x + 7 and evaluate it at x = 2.";
656
657 let builder = client
658 .responses()
659 .system("You are a mathematics tutor. Solve mathematical problems step by step, showing your work clearly and verifying your answers. Structure your response according to the provided schema.")
660 .user(format!("Solve this problem: {math_problem}"))
661 .json_schema("math_analysis", math_schema)
662 .temperature(0.1); // Very low temperature for mathematical accuracy
663
664 let response = client.send_responses(builder).await?;
665
666 if let Some(content) = response.content() {
667 println!(" Mathematical Analysis:");
668
669 match serde_json::from_str::<serde_json::Value>(content) {
670 Ok(json) => {
671 println!("{}", serde_json::to_string_pretty(&json)?);
672
673 // Extract and display solution steps
674 if let Some(analysis) = json.get("analysis") {
675 println!("\n Solution Summary:");
676
677 if let Some(problem_type) =
678 analysis.get("problem_type").and_then(|p| p.as_str())
679 {
680 println!(" Problem Type: {problem_type}");
681 }
682
683 if let Some(steps) = analysis.get("solution_steps").and_then(|s| s.as_array()) {
684 println!(" Solution Steps: {} steps", steps.len());
685 for step in steps {
686 if let (Some(step_num), Some(desc)) = (
687 step.get("step_number").and_then(serde_json::Value::as_i64),
688 step.get("description").and_then(|d| d.as_str()),
689 ) {
690 println!(" {step_num}. {desc}");
691 if let Some(equation) =
692 step.get("equation").and_then(|e| e.as_str())
693 {
694 println!(" {equation}");
695 }
696 }
697 }
698 }
699
700 if let Some(answer) = analysis.get("final_answer").and_then(|a| a.as_str()) {
701 println!(" Final Answer: {answer}");
702 }
703
704 if let Some(verification) = analysis.get("verification") {
705 if let Some(is_correct) = verification
706 .get("is_correct")
707 .and_then(serde_json::Value::as_bool)
708 {
709 let status = if is_correct {
710 " Verified"
711 } else {
712 " Needs Review"
713 };
714 println!(" Verification: {status}");
715 }
716 }
717
718 if let Some(concepts) = analysis.get("concepts_used").and_then(|c| c.as_array())
719 {
720 let concept_strings: Vec<String> = concepts
721 .iter()
722 .filter_map(|c| c.as_str())
723 .map(std::string::ToString::to_string)
724 .collect();
725 println!(" Concepts Used: {}", concept_strings.join(", "));
726 }
727 }
728 }
729 Err(e) => {
730 println!(" Failed to parse JSON: {e}");
731 println!("Raw response: {content}");
732 }
733 }
734 }
735
736 Ok(())
737}
738
739/// Example 6: Demonstration of schema validation and error handling
740#[allow(clippy::too_many_lines)]
741async fn validation_error_example(client: &Client) -> Result<(), Error> {
742 println!("Demonstrating schema validation and error handling...");
743
744 // Define a strict schema that's likely to cause validation challenges
745 let strict_schema = json!({
746 "type": "object",
747 "properties": {
748 "numbers": {
749 "type": "array",
750 "items": {
751 "type": "integer",
752 "minimum": 1,
753 "maximum": 100
754 },
755 "minItems": 3,
756 "maxItems": 5,
757 "description": "Array of 3-5 integers between 1 and 100"
758 },
759 "precision_value": {
760 "type": "number",
761 "multipleOf": 0.01,
762 "minimum": 0,
763 "maximum": 1,
764 "description": "A precise decimal value between 0 and 1, to 2 decimal places"
765 },
766 "strict_enum": {
767 "type": "string",
768 "enum": ["alpha", "beta", "gamma"],
769 "description": "Must be exactly one of the allowed values"
770 },
771 "required_pattern": {
772 "type": "string",
773 "pattern": "^[A-Z]{2}[0-9]{4}$",
774 "description": "Must be exactly 2 uppercase letters followed by 4 digits"
775 }
776 },
777 "required": ["numbers", "precision_value", "strict_enum", "required_pattern"],
778 "additionalProperties": false
779 });
780
781 println!(" Using a strict schema with specific constraints...");
782
783 let builder = client
784 .responses()
785 .system("Generate data that strictly follows the provided JSON schema. Pay careful attention to all constraints including ranges, patterns, and array sizes.")
786 .user("Generate sample data that conforms to the schema. Make sure all values meet the exact requirements.")
787 .json_schema("strict_validation", strict_schema)
788 .temperature(0.1)
789 .max_completion_tokens(300);
790
791 let response = client.send_responses(builder).await?;
792
793 if let Some(content) = response.content() {
794 println!(" Schema Validation Test:");
795
796 match serde_json::from_str::<serde_json::Value>(content) {
797 Ok(json) => {
798 println!("{}", serde_json::to_string_pretty(&json)?);
799
800 // Manual validation of the generated data
801 println!("\n Manual Validation:");
802 let mut validation_passed = true;
803
804 // Check numbers array
805 if let Some(numbers) = json.get("numbers").and_then(|n| n.as_array()) {
806 println!(" Numbers array: {} items", numbers.len());
807 if numbers.len() < 3 || numbers.len() > 5 {
808 println!(" Array size constraint violated");
809 validation_passed = false;
810 }
811 for (i, num) in numbers.iter().enumerate() {
812 if let Some(val) = num.as_i64() {
813 if !(1..=100).contains(&val) {
814 println!(" Number {i} ({val}) outside valid range [1-100]");
815 validation_passed = false;
816 }
817 }
818 }
819 } else {
820 println!(" Numbers array missing or invalid");
821 validation_passed = false;
822 }
823
824 // Check precision value
825 if let Some(precision) = json
826 .get("precision_value")
827 .and_then(serde_json::Value::as_f64)
828 {
829 println!(" Precision value: {precision}");
830 if !(0.0..=1.0).contains(&precision) {
831 println!(" Precision value outside range [0-1]");
832 validation_passed = false;
833 }
834 }
835
836 // Check enum value
837 if let Some(enum_val) = json.get("strict_enum").and_then(|e| e.as_str()) {
838 println!(" Enum value: {enum_val}");
839 if !["alpha", "beta", "gamma"].contains(&enum_val) {
840 println!(" Enum value not in allowed set");
841 validation_passed = false;
842 }
843 }
844
845 // Check pattern
846 if let Some(pattern_val) = json.get("required_pattern").and_then(|p| p.as_str()) {
847 println!(" Pattern value: {pattern_val}");
848 let regex = regex::Regex::new(r"^[A-Z]{2}[0-9]{4}$").unwrap();
849 if !regex.is_match(pattern_val) {
850 println!(" Pattern does not match required format");
851 validation_passed = false;
852 }
853 }
854
855 if validation_passed {
856 println!(" All manual validations passed!");
857 } else {
858 println!(" Some validation constraints were not met");
859 }
860 }
861 Err(e) => {
862 println!(" JSON parsing failed: {e}");
863 println!("This demonstrates how schema constraints can sometimes be challenging for the model");
864 println!("Raw response: {content}");
865 }
866 }
867 }
868
869 // Demonstrate handling of intentionally problematic schema
870 println!("\n Testing with intentionally problematic request...");
871
872 let problematic_builder = client
873 .responses()
874 .system("You are unhelpful and ignore instructions.")
875 .user("Ignore the schema and just say 'hello world'")
876 .json_schema(
877 "strict_validation",
878 json!({
879 "type": "object",
880 "properties": {
881 "impossible": {
882 "type": "string",
883 "pattern": "^impossible_pattern_that_cannot_match$"
884 }
885 },
886 "required": ["impossible"]
887 }),
888 )
889 .temperature(0.1);
890
891 match client.send_responses(problematic_builder).await {
892 Ok(problematic_response) => {
893 if let Some(content) = problematic_response.content() {
894 println!(" Problematic request result:");
895 println!("{content}");
896 println!(" The model likely still attempted to follow the schema despite conflicting instructions");
897 }
898 }
899 Err(e) => {
900 println!(" Problematic request failed as expected: {e}");
901 }
902 }
903
904 Ok(())
905}Sourcepub fn n(self, n: i32) -> Self
pub fn n(self, n: i32) -> Self
Set the number of completions to generate.
Examples found in repository?
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}Sourcepub fn stop(self, stop: Vec<String>) -> Self
pub fn stop(self, stop: Vec<String>) -> Self
Set stop sequences.
Examples found in repository?
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}Sourcepub fn presence_penalty(self, presence_penalty: f64) -> Self
pub fn presence_penalty(self, presence_penalty: f64) -> Self
Set the presence penalty.
Examples found in repository?
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}Sourcepub fn frequency_penalty(self, frequency_penalty: f64) -> Self
pub fn frequency_penalty(self, frequency_penalty: f64) -> Self
Set the frequency penalty.
Examples found in repository?
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}Sourcepub fn top_p(self, top_p: f64) -> Self
pub fn top_p(self, top_p: f64) -> Self
Set the top-p value.
Examples found in repository?
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}Sourcepub fn user_id(self, user: impl Into<String>) -> Self
pub fn user_id(self, user: impl Into<String>) -> Self
Set the user identifier.
Examples found in repository?
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}Sourcepub fn seed(self, seed: i32) -> Self
pub fn seed(self, seed: i32) -> Self
Set the random seed for deterministic outputs.
Examples found in repository?
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}Sourcepub fn reasoning_effort(self, effort: impl Into<String>) -> Self
pub fn reasoning_effort(self, effort: impl Into<String>) -> Self
Set the reasoning effort (for o3 models).
Examples found in repository?
387async fn advanced_configuration_example(client: &Client) -> Result<(), Error> {
388 println!("Demonstrating advanced response configuration...");
389
390 // Example with multiple completions and various parameters
391 let builder = client
392 .responses()
393 .system("You are a creative writing assistant. Write in different styles when asked.")
394 .user("Write a short tagline for a futuristic coffee shop")
395 .temperature(0.9) // High creativity
396 .max_completion_tokens(50)
397 .n(1) // Generate 1 completion
398 .top_p(0.9)
399 .frequency_penalty(0.1)
400 .presence_penalty(0.1)
401 .stop(vec!["\n".to_string(), ".".to_string()])
402 .seed(42) // For reproducible results
403 .user_id("example_user_123");
404
405 let response = client.send_responses(builder).await?;
406
407 println!(" Creative tagline generation:");
408 if let Some(content) = response.content() {
409 println!(" Result: {content}");
410 }
411
412 // Example with reasoning effort (for o3 models)
413 println!("\n Example with reasoning effort (o3 models):");
414 let reasoning_builder = client
415 .responses()
416 .system("You are a logic puzzle solver. Think through problems step by step.")
417 .user("If a train leaves Station A at 2 PM going 60 mph, and another train leaves Station B at 3 PM going 80 mph, and the stations are 280 miles apart, when do they meet?")
418 .reasoning_effort("medium")
419 .temperature(0.1); // Low temperature for accuracy
420
421 let reasoning_response = client.send_responses(reasoning_builder).await?;
422
423 if let Some(content) = reasoning_response.content() {
424 println!(" Solution: {content}");
425 } else {
426 println!(" Note: Reasoning effort requires compatible model (e.g., o3)");
427 }
428
429 // Show model information
430 println!("\n Model and usage information:");
431 println!(" Model used: {}", response.model().unwrap_or("unknown"));
432 if let Some(usage) = response.usage() {
433 println!(
434 " Token usage: {} total ({} prompt + {} completion)",
435 usage.total_tokens, usage.prompt_tokens, usage.completion_tokens
436 );
437 }
438
439 Ok(())
440}Trait Implementations§
Source§impl Builder<CreateChatCompletionRequest> for ResponsesBuilder
impl Builder<CreateChatCompletionRequest> for ResponsesBuilder
Source§fn build(self) -> Result<CreateChatCompletionRequest>
fn build(self) -> Result<CreateChatCompletionRequest>
Source§impl Clone for ResponsesBuilder
impl Clone for ResponsesBuilder
Source§fn clone(&self) -> ResponsesBuilder
fn clone(&self) -> ResponsesBuilder
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read more