ChatCompletionBuilder

Struct ChatCompletionBuilder 

Source
pub struct ChatCompletionBuilder { /* private fields */ }
Expand description

Builder for chat completion requests.

Implementations§

Source§

impl ChatCompletionBuilder

Source

pub fn new(model: impl Into<String>) -> Self

Create a new chat completion builder with the specified model.

Examples found in repository?
examples/langfuse.rs (line 104)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32    // Initialize tracing for logging
33    tracing_subscriber::fmt()
34        .with_env_filter(
35            tracing_subscriber::EnvFilter::from_default_env()
36                .add_directive("openai_ergonomic=debug".parse()?),
37        )
38        .init();
39
40    // 1. Build Langfuse exporter from environment variables
41    let exporter = ExporterBuilder::from_env()?.build()?;
42
43    // 2. Create tracer provider with batch processor
44    let provider = SdkTracerProvider::builder()
45        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46        .build();
47
48    // Set as global provider
49    global::set_tracer_provider(provider.clone());
50
51    // 3. Get tracer and create interceptor
52    let tracer = provider.tracer("openai-ergonomic");
53    let langfuse_interceptor =
54        std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56    // 4. Create the OpenAI client and add the Langfuse interceptor
57    // Keep a reference to the interceptor so we can update context later
58    let client = Client::from_env()?
59        .with_interceptor(Box::new(langfuse_interceptor.clone()))
60        .build();
61
62    println!(" OpenAI client initialized with Langfuse observability");
63    println!(" Traces will be sent to Langfuse for monitoring\n");
64
65    // Example 1: Simple chat completion
66    println!("Example 1: Simple chat completion");
67    println!("---------------------------------");
68    let chat_builder = client
69        .chat_simple("What is the capital of France? Answer in one word.")
70        .build()?;
71    let response = client.execute_chat(chat_builder).await?;
72    println!("Response: {:?}\n", response.content());
73
74    // Example 2: Chat completion with builder pattern
75    println!("Example 2: Chat with builder pattern");
76    println!("-------------------------------------");
77    let chat_builder = client
78        .chat()
79        .system("You are a helpful assistant that speaks like a pirate.")
80        .user("Tell me about the ocean in 2 sentences.")
81        .temperature(0.7)
82        .max_tokens(100)
83        .build()?;
84    let response = client.execute_chat(chat_builder).await?;
85    println!("Response: {:?}\n", response.content());
86
87    // Example 3: Multiple messages in a conversation
88    println!("Example 3: Conversation");
89    println!("-----------------------");
90    let chat_builder = client
91        .chat()
92        .system("You are a math tutor.")
93        .user("What is 2 + 2?")
94        .assistant("2 + 2 equals 4.")
95        .user("And what about 3 + 3?")
96        .build()?;
97    let response = client.execute_chat(chat_builder).await?;
98    println!("Response: {:?}\n", response.content());
99
100    // Example 4: Error handling (intentionally trigger an error)
101    println!("Example 4: Error handling");
102    println!("-------------------------");
103    // Create a builder with a non-existent model
104    let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105        .user("This should fail")
106        .build()?;
107    let result = client.execute_chat(chat_builder).await;
108
109    match result {
110        Ok(_) => println!("Unexpected success"),
111        Err(e) => println!("Expected error captured: {e}\n"),
112    }
113
114    // Example 5: Embeddings
115    println!("Example 5: Embeddings");
116    println!("--------------------");
117    let embeddings_builder = client.embeddings().text(
118        "text-embedding-ada-002",
119        "The quick brown fox jumps over the lazy dog",
120    );
121    let embeddings = client.embeddings().create(embeddings_builder).await?;
122    println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124    // Example 6: Using custom metadata via interceptor context
125    println!("Example 6: Custom metadata via interceptor context");
126    println!("---------------------------------------------------");
127
128    // Set session and user IDs on the interceptor's context
129    langfuse_interceptor.set_session_id("demo-session-123");
130    langfuse_interceptor.set_user_id("demo-user-456");
131    langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133    let chat_builder = client
134        .chat_simple("Say 'Hello from custom session!'")
135        .build()?;
136    let response = client.execute_chat(chat_builder).await?;
137    println!("Response with custom metadata: {:?}\n", response.content());
138
139    // Clear context for subsequent calls
140    langfuse_interceptor.clear_context();
141
142    println!(" All examples completed!");
143    println!(" Check your Langfuse dashboard to see the traces");
144    println!("   - Look for traces with operation name 'chat'");
145    println!("   - Each trace includes request/response details, token usage, and timing");
146    println!("   - Example 6 will have custom session_id, user_id, and tags");
147
148    // Shutdown the tracer provider to flush all spans
149    println!("\n⏳ Flushing spans to Langfuse...");
150    provider.shutdown()?;
151
152    Ok(())
153}
Source

pub fn system(self, content: impl Into<String>) -> Self

Add a system message to the conversation.

Examples found in repository?
examples/chat_streaming.rs (line 117)
111async fn streaming_with_system(client: &Client) -> Result<()> {
112    println!("System: You are a helpful assistant that speaks like a pirate");
113    println!("Question: Tell me about the weather");
114
115    let builder = client
116        .chat()
117        .system("You are a helpful assistant that speaks like a pirate")
118        .user("Tell me about the weather")
119        .max_tokens(100);
120
121    let mut stream = client.send_chat_stream(builder).await?;
122
123    print!("Response: ");
124    while let Some(chunk) = stream.next().await {
125        let chunk = chunk?;
126        if let Some(content) = chunk.content() {
127            print!("{}", content);
128        }
129    }
130    println!();
131
132    Ok(())
133}
More examples
Hide additional examples
examples/vision_chat.rs (line 89)
72async fn demonstrate_basic_image_analysis(
73    client: &Client,
74) -> Result<(), Box<dyn std::error::Error>> {
75    println!("  Example 1: Basic Image Analysis");
76    println!("----------------------------------");
77
78    let image_url = SAMPLE_IMAGE_URLS[0];
79    let question = "What do you see in this image? Please describe it in detail.";
80
81    println!("Image URL: {image_url}");
82    println!("Question: {question}");
83    print!("Assistant: ");
84    io::stdout().flush()?;
85
86    // Use the convenient user_with_image_url method
87    let chat_builder = client
88        .chat()
89        .system("You are a helpful AI assistant that can analyze images. Provide detailed, accurate descriptions of what you see.")
90        .user_with_image_url(question, image_url)
91        .temperature(0.3);
92
93    let response = client.send_chat(chat_builder).await?;
94
95    if let Some(content) = response.content() {
96        println!("{content}");
97
98        // Show usage information
99        if let Some(usage) = response.usage() {
100            println!("\n Token usage:");
101            println!("  Prompt tokens: {}", usage.prompt_tokens);
102            println!("  Completion tokens: {}", usage.completion_tokens);
103            println!("  Total tokens: {}", usage.total_tokens);
104        }
105    } else {
106        println!("No response content received");
107    }
108
109    println!();
110    Ok(())
111}
112
113/// Demonstrate analysis of multiple images in a single message.
114async fn demonstrate_multiple_images(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
115    println!(" Example 2: Multiple Image Analysis");
116    println!("---------------------------------------");
117
118    let question = "Compare these two images. What are the differences and similarities?";
119
120    println!("Question: {question}");
121    println!("Image 1: {}", SAMPLE_IMAGE_URLS[0]);
122    println!("Image 2: {}", SAMPLE_IMAGE_URLS[1]);
123    print!("Assistant: ");
124    io::stdout().flush()?;
125
126    // Create message parts manually for multiple images
127    let parts = vec![
128        text_part(question),
129        image_url_part_with_detail(SAMPLE_IMAGE_URLS[0], Detail::Auto),
130        image_url_part_with_detail(SAMPLE_IMAGE_URLS[1], Detail::Auto),
131    ];
132
133    let chat_builder = client
134        .chat()
135        .system("You are an expert at comparing and analyzing images. Provide thoughtful comparisons focusing on visual elements, composition, and content.")
136        .user_with_parts(parts)
137        .temperature(0.4);
138
139    let response = client.send_chat(chat_builder).await?;
140
141    if let Some(content) = response.content() {
142        println!("{content}");
143    } else {
144        println!("No response content received");
145    }
146
147    println!();
148    Ok(())
149}
150
151/// Demonstrate different detail levels for image analysis.
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153    println!(" Example 3: Different Detail Levels");
154    println!("------------------------------------");
155
156    let image_url = SAMPLE_IMAGE_URLS[0];
157    let question = "Analyze this image";
158
159    // Test different detail levels
160    let detail_levels = vec![
161        (Detail::Low, "Low detail (faster, less detailed)"),
162        (Detail::High, "High detail (slower, more detailed)"),
163        (Detail::Auto, "Auto detail (balanced)"),
164    ];
165
166    for (detail, description) in detail_levels {
167        println!("\n{description}:");
168        print!("Assistant: ");
169        io::stdout().flush()?;
170
171        let chat_builder = client
172            .chat()
173            .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174            .user_with_image_url_and_detail(question, image_url, detail)
175            .temperature(0.2)
176            .max_completion_tokens(100); // Limit response length for comparison
177
178        let response = client.send_chat(chat_builder).await?;
179
180        if let Some(content) = response.content() {
181            println!("{content}");
182        }
183    }
184
185    println!();
186    Ok(())
187}
188
189/// Demonstrate base64 image encoding and analysis.
190async fn demonstrate_base64_image(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
191    println!(" Example 4: Base64 Image Analysis");
192    println!("-----------------------------------");
193
194    let question = "What is this image? It's very small, what can you tell about it?";
195
196    println!("Question: {question}");
197    println!("Image: Small test image encoded as base64");
198    print!("Assistant: ");
199    io::stdout().flush()?;
200
201    // Create message parts with base64 image
202    let parts = vec![
203        text_part(question),
204        image_base64_part_with_detail(SAMPLE_BASE64_IMAGE, "image/png", Detail::High),
205    ];
206
207    let chat_builder = client
208        .chat()
209        .system("You are analyzing images provided in base64 format. Even if an image is very small or simple, try to provide what information you can.")
210        .user_with_parts(parts)
211        .temperature(0.3);
212
213    let response = client.send_chat(chat_builder).await?;
214
215    if let Some(content) = response.content() {
216        println!("{content}");
217    } else {
218        println!("No response content received");
219    }
220
221    println!();
222    Ok(())
223}
224
225/// Demonstrate conversation context with images.
226async fn demonstrate_conversation_with_images(
227    client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229    println!(" Example 5: Conversation Context with Images");
230    println!("----------------------------------------------");
231
232    let image_url = SAMPLE_IMAGE_URLS[0];
233
234    // First message: Analyze the image
235    println!("Step 1: Initial image analysis");
236    print!("Assistant: ");
237    io::stdout().flush()?;
238
239    let mut chat_builder = client
240        .chat()
241        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242        .user_with_image_url("What's the main subject of this image?", image_url)
243        .temperature(0.3);
244
245    let response1 = client.send_chat(chat_builder).await?;
246    let first_response = response1.content().unwrap_or("No response").to_string();
247    println!("{first_response}");
248
249    // Second message: Follow-up question (without re-uploading the image)
250    println!("\nStep 2: Follow-up question");
251    print!("Assistant: ");
252    io::stdout().flush()?;
253
254    chat_builder = client
255        .chat()
256        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257        .user_with_image_url("What's the main subject of this image?", image_url)
258        .assistant(&first_response)
259        .user("What colors are most prominent in the image we just discussed?")
260        .temperature(0.3);
261
262    let response2 = client.send_chat(chat_builder).await?;
263
264    if let Some(content) = response2.content() {
265        println!("{content}");
266    }
267
268    // Third message: Ask for creative interpretation
269    println!("\nStep 3: Creative interpretation");
270    print!("Assistant: ");
271    io::stdout().flush()?;
272
273    let second_response = response2.content().unwrap_or("No response").to_string();
274
275    chat_builder = client
276        .chat()
277        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278        .user_with_image_url("What's the main subject of this image?", image_url)
279        .assistant(&first_response)
280        .user("What colors are most prominent in the image we just discussed?")
281        .assistant(second_response)
282        .user("Based on our discussion, write a short poem inspired by this image.")
283        .temperature(0.7);
284
285    let response3 = client.send_chat(chat_builder).await?;
286
287    if let Some(content) = response3.content() {
288        println!("{content}");
289    }
290
291    println!();
292    Ok(())
293}
examples/moderations.rs (line 254)
228async fn response_filtering(client: &Client) -> Result<()> {
229    // Filter AI responses before showing to users
230
231    println!("Generating and moderating AI responses:");
232
233    // Generate response
234    let prompt = "Tell me about technology";
235    let builder = client.chat().user(prompt).max_completion_tokens(100);
236    let response = client.send_chat(builder).await?;
237
238    if let Some(content) = response.content() {
239        println!("Generated response: '{}'", content);
240
241        // Moderate the response
242        let moderation_result = simulate_moderation(content);
243
244        if moderation_result.flagged {
245            println!(
246                "  Response flagged! Categories: {:?}",
247                moderation_result.categories
248            );
249            println!("Action: Response blocked or regenerated");
250
251            // Regenerate with more strict instructions
252            let safe_builder = client
253                .chat()
254                .system("Provide helpful, safe, and appropriate responses only.")
255                .user(prompt)
256                .max_completion_tokens(100);
257            let safe_response = client.send_chat(safe_builder).await?;
258
259            if let Some(safe_content) = safe_response.content() {
260                println!("Regenerated safe response: '{}'", safe_content);
261            }
262        } else {
263            println!(" Response passed moderation");
264        }
265    }
266
267    Ok(())
268}
examples/chat_comprehensive.rs (line 204)
184async fn demonstrate_basic_chat(
185    client: &Client,
186    conversation: &mut ConversationManager,
187) -> Result<(), Box<dyn std::error::Error>> {
188    println!(" Example 1: Basic Chat Completion");
189    println!("----------------------------------");
190
191    let user_message = "Hello! Can you explain what you can help me with?";
192    conversation.add_user_message(user_message.to_string());
193
194    println!("User: {user_message}");
195    print!("Assistant: ");
196    io::stdout().flush()?;
197
198    // Build the chat request with conversation history
199    let messages = conversation.get_conversation_for_api();
200    let mut chat_builder = client.chat();
201
202    for (role, content) in messages {
203        match role.as_str() {
204            "system" => chat_builder = chat_builder.system(content),
205            "user" => chat_builder = chat_builder.user(content),
206            "assistant" => chat_builder = chat_builder.assistant(content),
207            _ => {} // Ignore unknown roles
208        }
209    }
210
211    // Send the request
212    let response = client.send_chat(chat_builder.temperature(0.7)).await?;
213
214    if let Some(content) = response.content() {
215        println!("{content}");
216        conversation.add_assistant_message(content.to_string(), None);
217
218        // Track token usage if available
219        if let Some(usage) = response.usage() {
220            conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
221        }
222    } else {
223        println!("No response content received");
224    }
225
226    println!();
227    Ok(())
228}
229
230/// Demonstrate multi-turn conversation.
231async fn demonstrate_multi_turn_chat(
232    client: &Client,
233    conversation: &mut ConversationManager,
234) -> Result<(), Box<dyn std::error::Error>> {
235    println!(" Example 2: Multi-turn Conversation");
236    println!("------------------------------------");
237
238    let questions = vec![
239        "What's the capital of France?",
240        "What's the population of that city?",
241        "Can you tell me an interesting fact about it?",
242    ];
243
244    for question in questions {
245        conversation.add_user_message(question.to_string());
246
247        println!("User: {question}");
248        print!("Assistant: ");
249        io::stdout().flush()?;
250
251        // Build chat request with full conversation history
252        let messages = conversation.get_conversation_for_api();
253        let mut chat_builder = client.chat();
254
255        for (role, content) in messages {
256            match role.as_str() {
257                "system" => chat_builder = chat_builder.system(content),
258                "user" => chat_builder = chat_builder.user(content),
259                "assistant" => chat_builder = chat_builder.assistant(content),
260                _ => {}
261            }
262        }
263
264        let response = client.send_chat(chat_builder.temperature(0.3)).await?;
265
266        if let Some(content) = response.content() {
267            println!("{content}");
268            conversation.add_assistant_message(content.to_string(), None);
269
270            // Track token usage
271            if let Some(usage) = response.usage() {
272                conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
273            }
274        }
275
276        println!();
277        // Small delay between questions for readability
278        tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
279    }
280
281    Ok(())
282}
283
284/// Demonstrate streaming chat response.
285async fn demonstrate_streaming_chat(
286    _client: &Client,
287    conversation: &mut ConversationManager,
288) -> Result<(), Box<dyn std::error::Error>> {
289    println!(" Example 3: Streaming Chat Response");
290    println!("------------------------------------");
291
292    // Add user message for streaming example
293    let streaming_question = "Can you write a short poem about programming?";
294    conversation.add_user_message(streaming_question.to_string());
295
296    println!("User: {streaming_question}");
297    println!("Assistant (streaming): ");
298
299    // Note: Streaming is not yet fully implemented in the client
300    // This is a placeholder showing the intended API
301    println!(" Streaming functionality is being implemented...");
302    println!("Future implementation will show real-time token-by-token responses");
303
304    // Simulate what streaming would look like
305    let simulated_response = "Programming flows like poetry in motion,\nEach function a verse, each loop a devotion.\nVariables dance through memory's halls,\nWhile algorithms answer logic's calls.";
306
307    // Simulate typing effect
308    for char in simulated_response.chars() {
309        print!("{char}");
310        io::stdout().flush()?;
311        tokio::time::sleep(tokio::time::Duration::from_millis(30)).await;
312    }
313    println!("\n");
314
315    // Add the response to conversation history
316    conversation.add_assistant_message(simulated_response.to_string(), None);
317
318    Ok(())
319}
320
321/// Demonstrate token usage tracking.
322async fn demonstrate_token_tracking(
323    client: &Client,
324    conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326    println!(" Example 4: Token Usage Tracking");
327    println!("---------------------------------");
328
329    let efficiency_question = "In one sentence, what is machine learning?";
330    conversation.add_user_message(efficiency_question.to_string());
331
332    println!("User: {efficiency_question}");
333    print!("Assistant: ");
334    io::stdout().flush()?;
335
336    // Build chat request
337    let messages = conversation.get_conversation_for_api();
338    let mut chat_builder = client.chat().max_completion_tokens(50); // Limit tokens for demo
339
340    for (role, content) in messages {
341        match role.as_str() {
342            "system" => chat_builder = chat_builder.system(content),
343            "user" => chat_builder = chat_builder.user(content),
344            "assistant" => chat_builder = chat_builder.assistant(content),
345            _ => {}
346        }
347    }
348
349    let response = client.send_chat(chat_builder).await?;
350
351    if let Some(content) = response.content() {
352        println!("{content}");
353
354        // Display detailed token usage
355        if let Some(usage) = response.usage() {
356            println!("\n Token Usage Breakdown:");
357            println!("  Prompt tokens: {}", usage.prompt_tokens);
358            println!("  Completion tokens: {}", usage.completion_tokens);
359            println!("  Total tokens: {}", usage.total_tokens);
360
361            conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363            conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364        } else {
365            conversation.add_assistant_message(content.to_string(), None);
366        }
367    }
368
369    println!();
370    Ok(())
371}
examples/azure_comprehensive.rs (line 63)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10    tracing_subscriber::fmt::init();
11
12    println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14    let client = Client::from_env()?.build();
15
16    // Test 1: Simple chat completion
17    println!("1. Testing simple chat completion...");
18    let builder = client.chat_simple("What is 2+2? Answer in one word.");
19    match client.send_chat(builder).await {
20        Ok(response) => {
21            if let Some(content) = response.content() {
22                println!("   ✓ Chat completion: {content}");
23            }
24        }
25        Err(e) => println!("   ✗ Chat completion failed: {e}"),
26    }
27
28    // Test 2: Chat with system message
29    println!("\n2. Testing chat with system message...");
30    let builder = client.chat_with_system(
31        "You are a helpful assistant that responds in one sentence.",
32        "What is Rust?",
33    );
34    match client.send_chat(builder).await {
35        Ok(response) => {
36            if let Some(content) = response.content() {
37                println!("   ✓ System message chat: {content}");
38            }
39        }
40        Err(e) => println!("   ✗ System message chat failed: {e}"),
41    }
42
43    // Test 3: Chat with temperature
44    println!("\n3. Testing chat with custom parameters...");
45    let builder = client
46        .chat()
47        .user("Say 'test' in a creative way")
48        .temperature(0.7)
49        .max_tokens(50);
50    match client.send_chat(builder).await {
51        Ok(response) => {
52            if let Some(content) = response.content() {
53                println!("   ✓ Custom parameters: {content}");
54            }
55        }
56        Err(e) => println!("   ✗ Custom parameters failed: {e}"),
57    }
58
59    // Test 4: Multiple messages conversation
60    println!("\n4. Testing multi-message conversation...");
61    let builder = client
62        .chat()
63        .system("You are a helpful assistant")
64        .user("My name is Alice")
65        .assistant("Hello Alice! Nice to meet you.")
66        .user("What's my name?");
67    match client.send_chat(builder).await {
68        Ok(response) => {
69            if let Some(content) = response.content() {
70                println!("   ✓ Multi-message: {content}");
71            }
72        }
73        Err(e) => println!("   ✗ Multi-message failed: {e}"),
74    }
75
76    // Test 5: Chat with max_tokens limit
77    println!("\n5. Testing with max_tokens limit...");
78    let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79    match client.send_chat(builder).await {
80        Ok(response) => {
81            if let Some(content) = response.content() {
82                println!("   ✓ Limited tokens: {content}");
83                println!("   (Note: response is truncated due to max_tokens=20)");
84            }
85        }
86        Err(e) => println!("   ✗ Max tokens test failed: {e}"),
87    }
88
89    // Test 6: Using responses API
90    println!("\n6. Testing responses API...");
91    let builder = client.responses().user("What is the capital of France?");
92    match client.send_responses(builder).await {
93        Ok(response) => {
94            if let Some(content) = response.content() {
95                println!("   ✓ Responses API: {content}");
96            }
97        }
98        Err(e) => println!("   ✗ Responses API failed: {e}"),
99    }
100
101    println!("\n=== Test Summary ===");
102    println!("Azure OpenAI integration tested across multiple endpoints!");
103    println!("\nNote: Some advanced features like embeddings, streaming, and");
104    println!("tool calling may require specific Azure OpenAI deployments.");
105
106    Ok(())
107}
examples/tool_calling_multiturn.rs (line 251)
228async fn main() -> Result<()> {
229    println!("=== Multi-turn Tool Calling Example ===\n");
230
231    // Initialize client
232    let client = Client::from_env()?.build();
233
234    // Create storage for the memory tool
235    let storage = Arc::new(Mutex::new(HashMap::new()));
236
237    // Define available tools
238    let tools = vec![get_calculator_tool(), get_memory_tool()];
239
240    println!("Available tools:");
241    println!("  - calculator: Perform arithmetic operations");
242    println!("  - memory: Store and retrieve values");
243    println!();
244
245    // Example 1: Single tool call
246    println!("Example 1: Single Tool Call");
247    println!("User: What is 15 + 27?");
248    {
249        let chat_builder = client
250            .chat()
251            .system("You are a helpful assistant with access to a calculator and memory storage.")
252            .user("What is 15 + 27?");
253
254        let result = handle_tool_loop(&client, chat_builder, &tools, &storage).await?;
255        println!("Assistant: {}", result);
256    }
257
258    // Example 2: Multiple sequential tool calls
259    println!("\n\nExample 2: Multiple Sequential Tool Calls");
260    println!("User: Calculate 10 * 5 and store the result in memory as 'product'");
261    {
262        let chat_builder = client
263            .chat()
264            .system("You are a helpful assistant with access to a calculator and memory storage.")
265            .user("Calculate 10 * 5 and store the result in memory as 'product'");
266
267        let result = handle_tool_loop(&client, chat_builder, &tools, &storage).await?;
268        println!("Assistant: {}", result);
269    }
270
271    // Example 3: Retrieve from memory
272    println!("\n\nExample 3: Retrieve from Memory");
273    println!("User: What did I store in 'product'?");
274    {
275        let chat_builder = client
276            .chat()
277            .system("You are a helpful assistant with access to a calculator and memory storage.")
278            .user("What did I store in 'product'?");
279
280        let result = handle_tool_loop(&client, chat_builder, &tools, &storage).await?;
281        println!("Assistant: {}", result);
282    }
283
284    // Example 4: Complex multi-step task
285    println!("\n\nExample 4: Complex Multi-step Task");
286    println!("User: Calculate 100 / 4, multiply that by 3, and tell me the final result");
287    {
288        let chat_builder = client
289            .chat()
290            .system("You are a helpful assistant with access to a calculator and memory storage.")
291            .user("Calculate 100 / 4, multiply that by 3, and tell me the final result");
292
293        let result = handle_tool_loop(&client, chat_builder, &tools, &storage).await?;
294        println!("Assistant: {}", result);
295    }
296
297    // Example 5: Conversation with history
298    println!("\n\nExample 5: Conversation with History");
299    {
300        let mut chat_builder = client
301            .chat()
302            .system("You are a helpful assistant with access to a calculator and memory storage.");
303
304        // First question
305        println!("User: What is 8 + 7?");
306        chat_builder = chat_builder.user("What is 8 + 7?");
307        let result = handle_tool_loop(&client, chat_builder.clone(), &tools, &storage).await?;
308        println!("Assistant: {}", result);
309
310        // Add assistant response to history
311        chat_builder = chat_builder.assistant(&result);
312
313        // Follow-up question that depends on previous context
314        println!("\nUser: Now multiply that by 3");
315        chat_builder = chat_builder.user("Now multiply that by 3");
316        let result = handle_tool_loop(&client, chat_builder.clone(), &tools, &storage).await?;
317        println!("Assistant: {}", result);
318    }
319
320    println!("\n\n=== All examples completed successfully ===");
321    println!("\nKey Takeaway:");
322    println!("  When implementing multi-turn tool calling, ALWAYS use");
323    println!("  assistant_with_tool_calls() to maintain proper conversation");
324    println!("  history. This is essential for the model to understand the");
325    println!("  tool results and continue the conversation correctly.");
326
327    Ok(())
328}
Source

pub fn user(self, content: impl Into<String>) -> Self

Add a user message to the conversation.

Examples found in repository?
examples/chat_streaming.rs (line 52)
49async fn basic_streaming(client: &Client) -> Result<()> {
50    println!("Question: Tell me a short joke");
51
52    let builder = client.chat().user("Tell me a short joke");
53
54    let mut stream = client.send_chat_stream(builder).await?;
55
56    print!("Response: ");
57    while let Some(chunk) = stream.next().await {
58        let chunk = chunk?;
59        if let Some(content) = chunk.content() {
60            print!("{}", content);
61        }
62    }
63    println!();
64
65    Ok(())
66}
67
68async fn streaming_with_parameters(client: &Client) -> Result<()> {
69    println!("Question: Write a creative tagline for a bakery");
70
71    let builder = client
72        .chat()
73        .user("Write a creative tagline for a bakery")
74        .temperature(0.9)
75        .max_tokens(50);
76
77    let mut stream = client.send_chat_stream(builder).await?;
78
79    print!("Response: ");
80    while let Some(chunk) = stream.next().await {
81        let chunk = chunk?;
82        if let Some(content) = chunk.content() {
83            print!("{}", content);
84        }
85    }
86    println!();
87
88    Ok(())
89}
90
91async fn collect_content(client: &Client) -> Result<()> {
92    println!("Question: What is the capital of France?");
93
94    let builder = client.chat().user("What is the capital of France?");
95
96    let mut stream = client.send_chat_stream(builder).await?;
97
98    // Manually collect all content
99    let mut content = String::new();
100    while let Some(chunk) = stream.next().await {
101        let chunk = chunk?;
102        if let Some(text) = chunk.content() {
103            content.push_str(text);
104        }
105    }
106    println!("Full response: {}", content);
107
108    Ok(())
109}
110
111async fn streaming_with_system(client: &Client) -> Result<()> {
112    println!("System: You are a helpful assistant that speaks like a pirate");
113    println!("Question: Tell me about the weather");
114
115    let builder = client
116        .chat()
117        .system("You are a helpful assistant that speaks like a pirate")
118        .user("Tell me about the weather")
119        .max_tokens(100);
120
121    let mut stream = client.send_chat_stream(builder).await?;
122
123    print!("Response: ");
124    while let Some(chunk) = stream.next().await {
125        let chunk = chunk?;
126        if let Some(content) = chunk.content() {
127            print!("{}", content);
128        }
129    }
130    println!();
131
132    Ok(())
133}
134
135async fn multiple_turns(client: &Client) -> Result<()> {
136    println!("Building a conversation with multiple turns...\n");
137
138    // First turn
139    println!("User: What is 2+2?");
140    let builder = client.chat().user("What is 2+2?");
141
142    let mut stream = client.send_chat_stream(builder).await?;
143
144    print!("Assistant: ");
145    let mut first_response = String::new();
146    while let Some(chunk) = stream.next().await {
147        let chunk = chunk?;
148        if let Some(content) = chunk.content() {
149            print!("{}", content);
150            first_response.push_str(content);
151        }
152    }
153    println!();
154
155    // Second turn - continuing the conversation
156    println!("\nUser: Now multiply that by 3");
157    let builder = client
158        .chat()
159        .user("What is 2+2?")
160        .assistant(&first_response)
161        .user("Now multiply that by 3");
162
163    let mut stream = client.send_chat_stream(builder).await?;
164
165    print!("Assistant: ");
166    while let Some(chunk) = stream.next().await {
167        let chunk = chunk?;
168        if let Some(content) = chunk.content() {
169            print!("{}", content);
170        }
171    }
172    println!();
173
174    Ok(())
175}
More examples
Hide additional examples
examples/langfuse_streaming.rs (line 97)
94async fn basic_streaming(client: &Client<LangfuseState<Span>>) -> Result<()> {
95    println!("Question: Tell me a short joke");
96
97    let builder = client.chat().user("Tell me a short joke");
98
99    let mut stream = client.send_chat_stream(builder).await?;
100
101    print!("Response: ");
102    let mut chunk_count = 0;
103    while let Some(chunk) = stream.next().await {
104        let chunk = chunk?;
105        if let Some(content) = chunk.content() {
106            print!("{}", content);
107            chunk_count += 1;
108        }
109    }
110    println!(
111        "\n(Received {} chunks, all traced to Langfuse)",
112        chunk_count
113    );
114
115    Ok(())
116}
117
118async fn streaming_with_parameters(client: &Client<LangfuseState<Span>>) -> Result<()> {
119    println!("Question: Write a creative tagline for a bakery");
120
121    let builder = client
122        .chat()
123        .user("Write a creative tagline for a bakery")
124        .temperature(0.9)
125        .max_tokens(50);
126
127    let mut stream = client.send_chat_stream(builder).await?;
128
129    print!("Response: ");
130    let mut chunk_count = 0;
131    while let Some(chunk) = stream.next().await {
132        let chunk = chunk?;
133        if let Some(content) = chunk.content() {
134            print!("{}", content);
135            chunk_count += 1;
136        }
137    }
138    println!(
139        "\n(Received {} chunks, all traced to Langfuse)",
140        chunk_count
141    );
142
143    Ok(())
144}
145
146async fn collect_content(client: &Client<LangfuseState<Span>>) -> Result<()> {
147    println!("Question: What is the capital of France?");
148
149    let builder = client.chat().user("What is the capital of France?");
150
151    let mut stream = client.send_chat_stream(builder).await?;
152
153    // Manually collect content (interceptor hooks are still called for each chunk)
154    let mut content = String::new();
155    while let Some(chunk) = stream.next().await {
156        let chunk = chunk?;
157        if let Some(text) = chunk.content() {
158            content.push_str(text);
159        }
160    }
161    println!("Full response: {}", content);
162    println!("(All chunks were traced to Langfuse during collection)");
163
164    Ok(())
165}
examples/tool_calling.rs (line 131)
128async fn simple_tool_call(client: &Client) -> Result<()> {
129    let builder = client
130        .chat()
131        .user("What's the weather like in San Francisco?")
132        .tools(vec![get_weather_tool()]);
133    let response = client.send_chat(builder).await?;
134
135    // Check for tool calls
136    let tool_calls = response.tool_calls();
137    if !tool_calls.is_empty() {
138        for tool_call in tool_calls {
139            println!("Tool called: {}", tool_call.function_name());
140            println!("Arguments: {}", tool_call.function_arguments());
141
142            // Execute the function
143            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
144            let result = execute_weather_function(params)?;
145            println!("Function result: {}", result);
146        }
147    }
148
149    Ok(())
150}
151
152async fn multiple_tools(client: &Client) -> Result<()> {
153    let builder = client
154        .chat()
155        .user("What's the weather in NYC and what time is it there?")
156        .tools(vec![get_weather_tool(), get_time_tool()]);
157    let response = client.send_chat(builder).await?;
158
159    for tool_call in response.tool_calls() {
160        match tool_call.function_name() {
161            "get_weather" => {
162                let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
163                let result = execute_weather_function(params)?;
164                println!("Weather result: {}", result);
165            }
166            "get_current_time" => {
167                let params: serde_json::Value =
168                    serde_json::from_str(tool_call.function_arguments())?;
169                if let Some(timezone) = params["timezone"].as_str() {
170                    let result = execute_time_function(timezone);
171                    println!("Time result: {}", result);
172                }
173            }
174            _ => println!("Unknown tool: {}", tool_call.function_name()),
175        }
176    }
177
178    Ok(())
179}
180
181async fn tool_choice_control(client: &Client) -> Result<()> {
182    // Force specific tool
183    println!("Forcing weather tool:");
184    let builder = client
185        .chat()
186        .user("Tell me about Paris")
187        .tools(vec![get_weather_tool(), get_time_tool()])
188        .tool_choice(ToolChoiceHelper::specific("get_weather"));
189    let response = client.send_chat(builder).await?;
190
191    for tool_call in response.tool_calls() {
192        println!("Forced tool: {}", tool_call.function_name());
193    }
194
195    // Disable tools
196    println!("\nDisabling tools:");
197    let builder = client
198        .chat()
199        .user("What's the weather?")
200        .tools(vec![get_weather_tool()])
201        .tool_choice(ToolChoiceHelper::none());
202    let response = client.send_chat(builder).await?;
203
204    if let Some(content) = response.content() {
205        println!("Response without tools: {}", content);
206    }
207
208    Ok(())
209}
210
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212    // This example demonstrates proper multi-turn tool calling with full message history
213
214    println!("=== Conversation with Tools (Full Implementation) ===");
215
216    // Initialize the conversation
217    let mut builder = client
218        .chat()
219        .user("What's the weather in Tokyo?")
220        .tools(vec![get_weather_tool()]);
221
222    // First request - the model will call the tool
223    let response = client.send_chat(builder.clone()).await?;
224
225    // Check for tool calls
226    let tool_calls = response.tool_calls();
227    if !tool_calls.is_empty() {
228        println!("Step 1: Model requests tool call");
229        for tool_call in &tool_calls {
230            println!("  Tool: {}", tool_call.function_name());
231            println!("  Args: {}", tool_call.function_arguments());
232        }
233
234        // IMPORTANT: Add the assistant's response (with tool calls) to the history
235        // This is the key step for maintaining proper conversation context!
236        builder = builder.assistant_with_tool_calls(
237            response.content().unwrap_or(""),
238            tool_calls.iter().map(|tc| (*tc).clone()).collect(),
239        );
240
241        // Execute the tools and add results
242        println!("\nStep 2: Execute tools and add results to conversation");
243        for tool_call in tool_calls {
244            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
245            let result = execute_weather_function(params)?;
246            println!("  Tool result: {}", result);
247
248            // Add the tool result to the conversation history
249            builder = builder.tool(tool_call.id(), result);
250        }
251
252        // Send the follow-up request with tool results
253        println!("\nStep 3: Send follow-up request with tool results");
254        let final_response = client
255            .send_chat(builder.tools(vec![get_weather_tool()]))
256            .await?;
257
258        if let Some(content) = final_response.content() {
259            println!("  Final assistant response: {}", content);
260        }
261    }
262
263    println!("\nNote: This demonstrates the complete tool calling loop with proper");
264    println!("message history management using assistant_with_tool_calls()");
265
266    Ok(())
267}
268
269fn streaming_with_tools(_client: &Client) {
270    println!("Streaming response with tools:");
271
272    // Note: Streaming with tool calls is more complex and requires
273    // proper handling of partial tool call chunks. For now, this is
274    // a placeholder showing the concept.
275
276    println!("This would demonstrate streaming tool calls if streaming API was available");
277    println!("In streaming mode, tool calls would arrive as chunks that need to be assembled");
278}
279
280async fn parallel_tool_calls(client: &Client) -> Result<()> {
281    let builder = client
282        .chat()
283        .user("Check the weather in Tokyo, London, and New York")
284        .tools(vec![get_weather_tool()]);
285    let response = client.send_chat(builder).await?;
286
287    // Modern models can call multiple tools in parallel
288    let tool_calls = response.tool_calls();
289    println!("Parallel tool calls: {}", tool_calls.len());
290
291    // Collect arguments first to avoid lifetime issues
292    let args_vec: Vec<String> = tool_calls
293        .iter()
294        .map(|tc| tc.function_arguments().to_string())
295        .collect();
296
297    // Execute all in parallel using tokio
298    let mut handles = Vec::new();
299    for args in args_vec {
300        let handle = tokio::spawn(async move {
301            let params: WeatherParams = serde_json::from_str(&args)?;
302            execute_weather_function(params)
303        });
304        handles.push(handle);
305    }
306
307    // Wait for all results
308    for (i, handle) in handles.into_iter().enumerate() {
309        match handle.await {
310            Ok(Ok(result)) => println!("Location {}: {}", i + 1, result),
311            Ok(Err(e)) => println!("Location {} error: {}", i + 1, e),
312            Err(e) => println!("Task {} panicked: {}", i + 1, e),
313        }
314    }
315
316    Ok(())
317}
examples/retry_patterns.rs (line 369)
356async fn fallback_chain(client: &Client) -> Result<()> {
357    // Define fallback chain
358    let strategies = vec![
359        ("GPT-4o", "gpt-4o", 1024),
360        ("GPT-4o-mini", "gpt-4o-mini", 512),
361        ("GPT-3.5", "gpt-3.5-turbo", 256),
362    ];
363
364    let prompt = "Explain quantum computing";
365
366    for (name, _model, max_tokens) in strategies {
367        println!("Trying {} (max_tokens: {})", name, max_tokens);
368
369        let builder = client.chat().user(prompt).max_completion_tokens(max_tokens);
370        match client.send_chat(builder).await {
371            Ok(response) => {
372                println!("Success with {}", name);
373                if let Some(content) = response.content() {
374                    println!("Response: {}...", &content[..content.len().min(100)]);
375                }
376                return Ok(());
377            }
378            Err(e) => {
379                println!("Failed with {}: {}", name, e);
380            }
381        }
382    }
383
384    println!("All fallback strategies exhausted");
385    Ok(())
386}
examples/models.rs (line 186)
159async fn model_selection_by_task(client: &Client) -> Result<()> {
160    // Task-specific model recommendations
161    let task_models = vec![
162        ("Simple Q&A", "gpt-3.5-turbo", "Fast and cost-effective"),
163        ("Complex reasoning", "gpt-4o", "Best reasoning capabilities"),
164        ("Code generation", "gpt-4o", "Excellent code understanding"),
165        ("Vision tasks", "gpt-4o", "Native vision support"),
166        (
167            "Quick responses",
168            "gpt-4o-mini",
169            "Low latency, good quality",
170        ),
171        (
172            "Bulk processing",
173            "gpt-3.5-turbo",
174            "Best cost/performance ratio",
175        ),
176    ];
177
178    for (task, model, reason) in task_models {
179        println!("Task: {}", task);
180        println!("  Recommended: {}", model);
181        println!("  Reason: {}", reason);
182
183        // Demo the model
184        let builder = client
185            .chat()
186            .user(format!("Say 'Hello from {}'", model))
187            .max_completion_tokens(10);
188        let response = client.send_chat(builder).await?;
189
190        if let Some(content) = response.content() {
191            println!("  Response: {}\n", content);
192        }
193    }
194
195    Ok(())
196}
197
198async fn cost_optimization(client: &Client) -> Result<()> {
199    let models = get_model_registry();
200    let test_prompt = "Explain the theory of relativity in one sentence";
201    let estimated_input_tokens = 15;
202    let estimated_output_tokens = 50;
203
204    println!("Cost comparison for same task:");
205    println!("Prompt: '{}'\n", test_prompt);
206
207    let mut costs = Vec::new();
208
209    for (name, info) in &models {
210        if !info.deprecated {
211            let input_cost = (f64::from(estimated_input_tokens) / 1000.0) * info.cost_per_1k_input;
212            let output_cost =
213                (f64::from(estimated_output_tokens) / 1000.0) * info.cost_per_1k_output;
214            let total_cost = input_cost + output_cost;
215
216            costs.push((name.clone(), total_cost));
217        }
218    }
219
220    costs.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
221
222    println!("{:<20} {:>15}", "Model", "Estimated Cost");
223    println!("{:-<35}", "");
224    for (model, cost) in costs {
225        println!("{:<20} ${:>14.6}", model, cost);
226    }
227
228    // Demonstrate cheapest vs best
229    println!("\nRunning with cheapest model (gpt-3.5-turbo):");
230    let builder = client.chat().user(test_prompt);
231    let cheap_response = client.send_chat(builder).await?;
232
233    if let Some(content) = cheap_response.content() {
234        println!("Response: {}", content);
235    }
236
237    Ok(())
238}
239
240async fn performance_testing(client: &Client) -> Result<()> {
241    use std::time::Instant;
242
243    let models_to_test = vec!["gpt-4o-mini", "gpt-3.5-turbo"];
244    let test_prompt = "Write a haiku about programming";
245
246    println!("Performance comparison:");
247    println!("{:<20} {:>10} {:>15}", "Model", "Latency", "Tokens/sec");
248    println!("{:-<45}", "");
249
250    for model in models_to_test {
251        let start = Instant::now();
252
253        let builder = client.chat().user(test_prompt);
254        let response = client.send_chat(builder).await?;
255
256        let elapsed = start.elapsed();
257
258        if let Some(usage) = response.usage() {
259            let total_tokens = f64::from(usage.total_tokens);
260            let tokens_per_sec = total_tokens / elapsed.as_secs_f64();
261
262            println!("{:<20} {:>10.2?} {:>15.1}", model, elapsed, tokens_per_sec);
263        }
264    }
265
266    Ok(())
267}
268
269async fn model_migration(client: &Client) -> Result<()> {
270    // Handle deprecated model migration
271    let deprecated_mappings = HashMap::from([
272        ("text-davinci-003", "gpt-3.5-turbo"),
273        ("gpt-4-32k", "gpt-4o"),
274        ("gpt-4-vision-preview", "gpt-4o"),
275    ]);
276
277    let requested_model = "text-davinci-003"; // Deprecated model
278
279    if let Some(replacement) = deprecated_mappings.get(requested_model) {
280        println!(
281            "Warning: {} is deprecated. Using {} instead.",
282            requested_model, replacement
283        );
284
285        let builder = client.chat().user("Hello from migrated model");
286        let response = client.send_chat(builder).await?;
287
288        if let Some(content) = response.content() {
289            println!("Response from {}: {}", replacement, content);
290        }
291    }
292
293    Ok(())
294}
295
296async fn dynamic_model_selection(client: &Client) -> Result<()> {
297    // Select model based on runtime conditions
298
299    #[derive(Debug)]
300    struct RequestContext {
301        urgency: Urgency,
302        complexity: Complexity,
303        budget: Budget,
304        needs_vision: bool,
305    }
306
307    #[derive(Debug)]
308    enum Urgency {
309        Low,
310        Medium,
311        High,
312    }
313
314    #[derive(Debug)]
315    enum Complexity {
316        Simple,
317        Moderate,
318        Complex,
319    }
320
321    #[derive(Debug)]
322    enum Budget {
323        Tight,
324        Normal,
325        Flexible,
326    }
327
328    const fn select_model(ctx: &RequestContext) -> &'static str {
329        match (&ctx.urgency, &ctx.complexity, &ctx.budget) {
330            // High urgency + simple = fast cheap model, or tight budget = cheapest
331            (Urgency::High, Complexity::Simple, _) | (_, _, Budget::Tight) => "gpt-3.5-turbo",
332
333            // Complex + flexible budget = best model
334            (_, Complexity::Complex, Budget::Flexible) => "gpt-4o",
335
336            // Vision required
337            _ if ctx.needs_vision => "gpt-4o",
338
339            // Default balanced choice
340            _ => "gpt-4o-mini",
341        }
342    }
343
344    // Example contexts
345    let contexts = [
346        RequestContext {
347            urgency: Urgency::High,
348            complexity: Complexity::Simple,
349            budget: Budget::Tight,
350            needs_vision: false,
351        },
352        RequestContext {
353            urgency: Urgency::Low,
354            complexity: Complexity::Complex,
355            budget: Budget::Flexible,
356            needs_vision: false,
357        },
358        RequestContext {
359            urgency: Urgency::Medium,
360            complexity: Complexity::Moderate,
361            budget: Budget::Normal,
362            needs_vision: true,
363        },
364    ];
365
366    for (i, ctx) in contexts.iter().enumerate() {
367        let model = select_model(ctx);
368        println!("Context {}: {:?}", i + 1, ctx);
369        println!("  Selected model: {}", model);
370
371        let builder = client
372            .chat()
373            .user(format!("Hello from dynamically selected {}", model))
374            .max_completion_tokens(20);
375        let response = client.send_chat(builder).await?;
376
377        if let Some(content) = response.content() {
378            println!("  Response: {}\n", content);
379        }
380    }
381
382    Ok(())
383}
examples/moderations.rs (line 235)
228async fn response_filtering(client: &Client) -> Result<()> {
229    // Filter AI responses before showing to users
230
231    println!("Generating and moderating AI responses:");
232
233    // Generate response
234    let prompt = "Tell me about technology";
235    let builder = client.chat().user(prompt).max_completion_tokens(100);
236    let response = client.send_chat(builder).await?;
237
238    if let Some(content) = response.content() {
239        println!("Generated response: '{}'", content);
240
241        // Moderate the response
242        let moderation_result = simulate_moderation(content);
243
244        if moderation_result.flagged {
245            println!(
246                "  Response flagged! Categories: {:?}",
247                moderation_result.categories
248            );
249            println!("Action: Response blocked or regenerated");
250
251            // Regenerate with more strict instructions
252            let safe_builder = client
253                .chat()
254                .system("Provide helpful, safe, and appropriate responses only.")
255                .user(prompt)
256                .max_completion_tokens(100);
257            let safe_response = client.send_chat(safe_builder).await?;
258
259            if let Some(safe_content) = safe_response.content() {
260                println!("Regenerated safe response: '{}'", safe_content);
261            }
262        } else {
263            println!(" Response passed moderation");
264        }
265    }
266
267    Ok(())
268}
269
270fn policy_enforcement(_client: &Client) {
271    // Enforce content policies
272    let policy = ModerationPolicy {
273        thresholds: HashMap::from([
274            ("harassment".to_string(), 0.5),
275            ("violence".to_string(), 0.6),
276            ("sexual".to_string(), 0.4),
277        ]),
278        auto_reject_categories: vec![
279            "harassment/threatening".to_string(),
280            "violence/graphic".to_string(),
281        ],
282        require_human_review: vec!["self-harm".to_string()],
283    };
284
285    let test_cases = vec![
286        "Normal conversation about work",
287        "Slightly aggressive language here",
288        "Content requiring review",
289    ];
290
291    for content in test_cases {
292        println!("Checking: '{}'", content);
293
294        let result = simulate_moderation(content);
295        let action = apply_policy(&result, &policy);
296
297        match action {
298            PolicyAction::Approve => println!("   Approved"),
299            PolicyAction::Reject(reason) => println!("   Rejected: {}", reason),
300            PolicyAction::Review(reason) => println!("   Human review needed: {}", reason),
301        }
302    }
303}
304
305async fn moderation_pipeline(client: &Client) -> Result<()> {
306    // Complete moderation pipeline
307
308    type FilterFn = Box<dyn Fn(&str) -> bool + Send + Sync>;
309
310    struct ModerationPipeline {
311        pre_filters: Vec<FilterFn>,
312        post_filters: Vec<FilterFn>,
313    }
314
315    let pipeline = ModerationPipeline {
316        pre_filters: vec![
317            Box::new(|text| text.len() < 10000), // Length check
318            Box::new(|text| !text.is_empty()),   // Non-empty check
319        ],
320        post_filters: vec![
321            Box::new(|text| !text.contains("blockedword")), // Custom word filter
322        ],
323    };
324
325    println!("Running moderation pipeline:");
326
327    let user_input = "Please help me with this technical question about Rust programming.";
328
329    // Step 1: Pre-filters
330    println!("1. Pre-filters:");
331    for (i, filter) in pipeline.pre_filters.iter().enumerate() {
332        if filter(user_input) {
333            println!("   Pre-filter {} passed", i + 1);
334        } else {
335            println!("   Pre-filter {} failed", i + 1);
336            return Ok(());
337        }
338    }
339
340    // Step 2: API moderation
341    println!("2. API moderation:");
342    let moderation_result = simulate_moderation(user_input);
343    if moderation_result.flagged {
344        println!("   Content flagged by API");
345        return Ok(());
346    }
347    println!("   Passed API moderation");
348
349    // Step 3: Generate response
350    println!("3. Generating response:");
351    let builder = client.chat().user(user_input).max_completion_tokens(50);
352    let response = client.send_chat(builder).await?;
353
354    if let Some(content) = response.content() {
355        println!("  Generated: '{}'", content);
356
357        // Step 4: Post-filters
358        println!("4. Post-filters:");
359        for (i, filter) in pipeline.post_filters.iter().enumerate() {
360            if filter(content) {
361                println!("   Post-filter {} passed", i + 1);
362            } else {
363                println!("   Post-filter {} failed", i + 1);
364                return Ok(());
365            }
366        }
367
368        // Step 5: Response moderation
369        println!("5. Response moderation:");
370        let response_moderation = simulate_moderation(content);
371        if response_moderation.flagged {
372            println!("   Response flagged");
373        } else {
374            println!("   Response approved");
375            println!("\nFinal output: '{}'", content);
376        }
377    }
378
379    Ok(())
380}
Source

pub fn user_with_image_url( self, text: impl Into<String>, image_url: impl Into<String>, ) -> Self

Add a user message with both text and an image URL.

Examples found in repository?
examples/vision_chat.rs (line 90)
72async fn demonstrate_basic_image_analysis(
73    client: &Client,
74) -> Result<(), Box<dyn std::error::Error>> {
75    println!("  Example 1: Basic Image Analysis");
76    println!("----------------------------------");
77
78    let image_url = SAMPLE_IMAGE_URLS[0];
79    let question = "What do you see in this image? Please describe it in detail.";
80
81    println!("Image URL: {image_url}");
82    println!("Question: {question}");
83    print!("Assistant: ");
84    io::stdout().flush()?;
85
86    // Use the convenient user_with_image_url method
87    let chat_builder = client
88        .chat()
89        .system("You are a helpful AI assistant that can analyze images. Provide detailed, accurate descriptions of what you see.")
90        .user_with_image_url(question, image_url)
91        .temperature(0.3);
92
93    let response = client.send_chat(chat_builder).await?;
94
95    if let Some(content) = response.content() {
96        println!("{content}");
97
98        // Show usage information
99        if let Some(usage) = response.usage() {
100            println!("\n Token usage:");
101            println!("  Prompt tokens: {}", usage.prompt_tokens);
102            println!("  Completion tokens: {}", usage.completion_tokens);
103            println!("  Total tokens: {}", usage.total_tokens);
104        }
105    } else {
106        println!("No response content received");
107    }
108
109    println!();
110    Ok(())
111}
112
113/// Demonstrate analysis of multiple images in a single message.
114async fn demonstrate_multiple_images(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
115    println!(" Example 2: Multiple Image Analysis");
116    println!("---------------------------------------");
117
118    let question = "Compare these two images. What are the differences and similarities?";
119
120    println!("Question: {question}");
121    println!("Image 1: {}", SAMPLE_IMAGE_URLS[0]);
122    println!("Image 2: {}", SAMPLE_IMAGE_URLS[1]);
123    print!("Assistant: ");
124    io::stdout().flush()?;
125
126    // Create message parts manually for multiple images
127    let parts = vec![
128        text_part(question),
129        image_url_part_with_detail(SAMPLE_IMAGE_URLS[0], Detail::Auto),
130        image_url_part_with_detail(SAMPLE_IMAGE_URLS[1], Detail::Auto),
131    ];
132
133    let chat_builder = client
134        .chat()
135        .system("You are an expert at comparing and analyzing images. Provide thoughtful comparisons focusing on visual elements, composition, and content.")
136        .user_with_parts(parts)
137        .temperature(0.4);
138
139    let response = client.send_chat(chat_builder).await?;
140
141    if let Some(content) = response.content() {
142        println!("{content}");
143    } else {
144        println!("No response content received");
145    }
146
147    println!();
148    Ok(())
149}
150
151/// Demonstrate different detail levels for image analysis.
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153    println!(" Example 3: Different Detail Levels");
154    println!("------------------------------------");
155
156    let image_url = SAMPLE_IMAGE_URLS[0];
157    let question = "Analyze this image";
158
159    // Test different detail levels
160    let detail_levels = vec![
161        (Detail::Low, "Low detail (faster, less detailed)"),
162        (Detail::High, "High detail (slower, more detailed)"),
163        (Detail::Auto, "Auto detail (balanced)"),
164    ];
165
166    for (detail, description) in detail_levels {
167        println!("\n{description}:");
168        print!("Assistant: ");
169        io::stdout().flush()?;
170
171        let chat_builder = client
172            .chat()
173            .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174            .user_with_image_url_and_detail(question, image_url, detail)
175            .temperature(0.2)
176            .max_completion_tokens(100); // Limit response length for comparison
177
178        let response = client.send_chat(chat_builder).await?;
179
180        if let Some(content) = response.content() {
181            println!("{content}");
182        }
183    }
184
185    println!();
186    Ok(())
187}
188
189/// Demonstrate base64 image encoding and analysis.
190async fn demonstrate_base64_image(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
191    println!(" Example 4: Base64 Image Analysis");
192    println!("-----------------------------------");
193
194    let question = "What is this image? It's very small, what can you tell about it?";
195
196    println!("Question: {question}");
197    println!("Image: Small test image encoded as base64");
198    print!("Assistant: ");
199    io::stdout().flush()?;
200
201    // Create message parts with base64 image
202    let parts = vec![
203        text_part(question),
204        image_base64_part_with_detail(SAMPLE_BASE64_IMAGE, "image/png", Detail::High),
205    ];
206
207    let chat_builder = client
208        .chat()
209        .system("You are analyzing images provided in base64 format. Even if an image is very small or simple, try to provide what information you can.")
210        .user_with_parts(parts)
211        .temperature(0.3);
212
213    let response = client.send_chat(chat_builder).await?;
214
215    if let Some(content) = response.content() {
216        println!("{content}");
217    } else {
218        println!("No response content received");
219    }
220
221    println!();
222    Ok(())
223}
224
225/// Demonstrate conversation context with images.
226async fn demonstrate_conversation_with_images(
227    client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229    println!(" Example 5: Conversation Context with Images");
230    println!("----------------------------------------------");
231
232    let image_url = SAMPLE_IMAGE_URLS[0];
233
234    // First message: Analyze the image
235    println!("Step 1: Initial image analysis");
236    print!("Assistant: ");
237    io::stdout().flush()?;
238
239    let mut chat_builder = client
240        .chat()
241        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242        .user_with_image_url("What's the main subject of this image?", image_url)
243        .temperature(0.3);
244
245    let response1 = client.send_chat(chat_builder).await?;
246    let first_response = response1.content().unwrap_or("No response").to_string();
247    println!("{first_response}");
248
249    // Second message: Follow-up question (without re-uploading the image)
250    println!("\nStep 2: Follow-up question");
251    print!("Assistant: ");
252    io::stdout().flush()?;
253
254    chat_builder = client
255        .chat()
256        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257        .user_with_image_url("What's the main subject of this image?", image_url)
258        .assistant(&first_response)
259        .user("What colors are most prominent in the image we just discussed?")
260        .temperature(0.3);
261
262    let response2 = client.send_chat(chat_builder).await?;
263
264    if let Some(content) = response2.content() {
265        println!("{content}");
266    }
267
268    // Third message: Ask for creative interpretation
269    println!("\nStep 3: Creative interpretation");
270    print!("Assistant: ");
271    io::stdout().flush()?;
272
273    let second_response = response2.content().unwrap_or("No response").to_string();
274
275    chat_builder = client
276        .chat()
277        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278        .user_with_image_url("What's the main subject of this image?", image_url)
279        .assistant(&first_response)
280        .user("What colors are most prominent in the image we just discussed?")
281        .assistant(second_response)
282        .user("Based on our discussion, write a short poem inspired by this image.")
283        .temperature(0.7);
284
285    let response3 = client.send_chat(chat_builder).await?;
286
287    if let Some(content) = response3.content() {
288        println!("{content}");
289    }
290
291    println!();
292    Ok(())
293}
294
295/// Demonstrate error handling patterns for vision requests.
296async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
297    println!("  Example 6: Error Handling Patterns");
298    println!("------------------------------------");
299
300    println!("Testing various error scenarios...\n");
301
302    // Test 1: Invalid image URL
303    println!("Test 1: Invalid image URL");
304    let invalid_url = "https://this-domain-does-not-exist-12345.com/image.jpg";
305
306    let invalid_builder = client
307        .chat()
308        .user_with_image_url("What do you see?", invalid_url)
309        .temperature(0.3);
310
311    match client.send_chat(invalid_builder).await {
312        Ok(_) => println!(" Invalid URL request unexpectedly succeeded"),
313        Err(e) => match &e {
314            Error::Api {
315                status, message, ..
316            } => {
317                println!(" API properly rejected invalid URL ({status}): {message}");
318            }
319            Error::Http(reqwest_err) => {
320                println!(" HTTP error caught: {reqwest_err}");
321            }
322            Error::InvalidRequest(msg) => {
323                println!(" Validation caught invalid URL: {msg}");
324            }
325            _ => {
326                println!("ℹ  Other error type: {e}");
327            }
328        },
329    }
330
331    // Test 2: Empty message with image
332    println!("\nTest 2: Empty text with image");
333    let empty_text_builder = client
334        .chat()
335        .user_with_image_url("", SAMPLE_IMAGE_URLS[0])
336        .temperature(0.3);
337
338    match client.send_chat(empty_text_builder).await {
339        Ok(response) => {
340            if let Some(content) = response.content() {
341                println!(
342                    " API handled empty text gracefully: {}",
343                    content.chars().take(50).collect::<String>()
344                );
345            }
346        }
347        Err(e) => {
348            println!("ℹ  Empty text error: {e}");
349        }
350    }
351
352    // Test 3: Malformed base64 data
353    println!("\nTest 3: Malformed base64 image data");
354    let malformed_base64 = "this-is-not-valid-base64!@#$%";
355    let malformed_parts = vec![
356        text_part("What is this?"),
357        image_base64_part_with_detail(malformed_base64, "image/png", Detail::Auto),
358    ];
359
360    let malformed_builder = client.chat().user_with_parts(malformed_parts);
361
362    match client.send_chat(malformed_builder).await {
363        Ok(_) => println!(" Malformed base64 unexpectedly succeeded"),
364        Err(e) => match &e {
365            Error::Api {
366                status, message, ..
367            } => {
368                println!(" API properly rejected malformed base64 ({status}): {message}");
369            }
370            _ => {
371                println!("ℹ  Other error for malformed base64: {e}");
372            }
373        },
374    }
375
376    println!("\n  Error handling patterns demonstrated:");
377    println!("  • Invalid image URL handling");
378    println!("  • Empty text with image handling");
379    println!("  • Malformed base64 data validation");
380    println!("  • API error classification");
381    println!("  • Network error handling");
382
383    println!();
384    Ok(())
385}
Source

pub fn user_with_image_url_and_detail( self, text: impl Into<String>, image_url: impl Into<String>, detail: Detail, ) -> Self

Add a user message with both text and an image URL with specified detail level.

Examples found in repository?
examples/vision_chat.rs (line 174)
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153    println!(" Example 3: Different Detail Levels");
154    println!("------------------------------------");
155
156    let image_url = SAMPLE_IMAGE_URLS[0];
157    let question = "Analyze this image";
158
159    // Test different detail levels
160    let detail_levels = vec![
161        (Detail::Low, "Low detail (faster, less detailed)"),
162        (Detail::High, "High detail (slower, more detailed)"),
163        (Detail::Auto, "Auto detail (balanced)"),
164    ];
165
166    for (detail, description) in detail_levels {
167        println!("\n{description}:");
168        print!("Assistant: ");
169        io::stdout().flush()?;
170
171        let chat_builder = client
172            .chat()
173            .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174            .user_with_image_url_and_detail(question, image_url, detail)
175            .temperature(0.2)
176            .max_completion_tokens(100); // Limit response length for comparison
177
178        let response = client.send_chat(chat_builder).await?;
179
180        if let Some(content) = response.content() {
181            println!("{content}");
182        }
183    }
184
185    println!();
186    Ok(())
187}
Source

pub fn user_with_parts( self, parts: Vec<ChatCompletionRequestUserMessageContentPart>, ) -> Self

Add a user message with multiple content parts (text and/or images).

Examples found in repository?
examples/vision_chat.rs (line 136)
114async fn demonstrate_multiple_images(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
115    println!(" Example 2: Multiple Image Analysis");
116    println!("---------------------------------------");
117
118    let question = "Compare these two images. What are the differences and similarities?";
119
120    println!("Question: {question}");
121    println!("Image 1: {}", SAMPLE_IMAGE_URLS[0]);
122    println!("Image 2: {}", SAMPLE_IMAGE_URLS[1]);
123    print!("Assistant: ");
124    io::stdout().flush()?;
125
126    // Create message parts manually for multiple images
127    let parts = vec![
128        text_part(question),
129        image_url_part_with_detail(SAMPLE_IMAGE_URLS[0], Detail::Auto),
130        image_url_part_with_detail(SAMPLE_IMAGE_URLS[1], Detail::Auto),
131    ];
132
133    let chat_builder = client
134        .chat()
135        .system("You are an expert at comparing and analyzing images. Provide thoughtful comparisons focusing on visual elements, composition, and content.")
136        .user_with_parts(parts)
137        .temperature(0.4);
138
139    let response = client.send_chat(chat_builder).await?;
140
141    if let Some(content) = response.content() {
142        println!("{content}");
143    } else {
144        println!("No response content received");
145    }
146
147    println!();
148    Ok(())
149}
150
151/// Demonstrate different detail levels for image analysis.
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153    println!(" Example 3: Different Detail Levels");
154    println!("------------------------------------");
155
156    let image_url = SAMPLE_IMAGE_URLS[0];
157    let question = "Analyze this image";
158
159    // Test different detail levels
160    let detail_levels = vec![
161        (Detail::Low, "Low detail (faster, less detailed)"),
162        (Detail::High, "High detail (slower, more detailed)"),
163        (Detail::Auto, "Auto detail (balanced)"),
164    ];
165
166    for (detail, description) in detail_levels {
167        println!("\n{description}:");
168        print!("Assistant: ");
169        io::stdout().flush()?;
170
171        let chat_builder = client
172            .chat()
173            .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174            .user_with_image_url_and_detail(question, image_url, detail)
175            .temperature(0.2)
176            .max_completion_tokens(100); // Limit response length for comparison
177
178        let response = client.send_chat(chat_builder).await?;
179
180        if let Some(content) = response.content() {
181            println!("{content}");
182        }
183    }
184
185    println!();
186    Ok(())
187}
188
189/// Demonstrate base64 image encoding and analysis.
190async fn demonstrate_base64_image(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
191    println!(" Example 4: Base64 Image Analysis");
192    println!("-----------------------------------");
193
194    let question = "What is this image? It's very small, what can you tell about it?";
195
196    println!("Question: {question}");
197    println!("Image: Small test image encoded as base64");
198    print!("Assistant: ");
199    io::stdout().flush()?;
200
201    // Create message parts with base64 image
202    let parts = vec![
203        text_part(question),
204        image_base64_part_with_detail(SAMPLE_BASE64_IMAGE, "image/png", Detail::High),
205    ];
206
207    let chat_builder = client
208        .chat()
209        .system("You are analyzing images provided in base64 format. Even if an image is very small or simple, try to provide what information you can.")
210        .user_with_parts(parts)
211        .temperature(0.3);
212
213    let response = client.send_chat(chat_builder).await?;
214
215    if let Some(content) = response.content() {
216        println!("{content}");
217    } else {
218        println!("No response content received");
219    }
220
221    println!();
222    Ok(())
223}
224
225/// Demonstrate conversation context with images.
226async fn demonstrate_conversation_with_images(
227    client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229    println!(" Example 5: Conversation Context with Images");
230    println!("----------------------------------------------");
231
232    let image_url = SAMPLE_IMAGE_URLS[0];
233
234    // First message: Analyze the image
235    println!("Step 1: Initial image analysis");
236    print!("Assistant: ");
237    io::stdout().flush()?;
238
239    let mut chat_builder = client
240        .chat()
241        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242        .user_with_image_url("What's the main subject of this image?", image_url)
243        .temperature(0.3);
244
245    let response1 = client.send_chat(chat_builder).await?;
246    let first_response = response1.content().unwrap_or("No response").to_string();
247    println!("{first_response}");
248
249    // Second message: Follow-up question (without re-uploading the image)
250    println!("\nStep 2: Follow-up question");
251    print!("Assistant: ");
252    io::stdout().flush()?;
253
254    chat_builder = client
255        .chat()
256        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257        .user_with_image_url("What's the main subject of this image?", image_url)
258        .assistant(&first_response)
259        .user("What colors are most prominent in the image we just discussed?")
260        .temperature(0.3);
261
262    let response2 = client.send_chat(chat_builder).await?;
263
264    if let Some(content) = response2.content() {
265        println!("{content}");
266    }
267
268    // Third message: Ask for creative interpretation
269    println!("\nStep 3: Creative interpretation");
270    print!("Assistant: ");
271    io::stdout().flush()?;
272
273    let second_response = response2.content().unwrap_or("No response").to_string();
274
275    chat_builder = client
276        .chat()
277        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278        .user_with_image_url("What's the main subject of this image?", image_url)
279        .assistant(&first_response)
280        .user("What colors are most prominent in the image we just discussed?")
281        .assistant(second_response)
282        .user("Based on our discussion, write a short poem inspired by this image.")
283        .temperature(0.7);
284
285    let response3 = client.send_chat(chat_builder).await?;
286
287    if let Some(content) = response3.content() {
288        println!("{content}");
289    }
290
291    println!();
292    Ok(())
293}
294
295/// Demonstrate error handling patterns for vision requests.
296async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
297    println!("  Example 6: Error Handling Patterns");
298    println!("------------------------------------");
299
300    println!("Testing various error scenarios...\n");
301
302    // Test 1: Invalid image URL
303    println!("Test 1: Invalid image URL");
304    let invalid_url = "https://this-domain-does-not-exist-12345.com/image.jpg";
305
306    let invalid_builder = client
307        .chat()
308        .user_with_image_url("What do you see?", invalid_url)
309        .temperature(0.3);
310
311    match client.send_chat(invalid_builder).await {
312        Ok(_) => println!(" Invalid URL request unexpectedly succeeded"),
313        Err(e) => match &e {
314            Error::Api {
315                status, message, ..
316            } => {
317                println!(" API properly rejected invalid URL ({status}): {message}");
318            }
319            Error::Http(reqwest_err) => {
320                println!(" HTTP error caught: {reqwest_err}");
321            }
322            Error::InvalidRequest(msg) => {
323                println!(" Validation caught invalid URL: {msg}");
324            }
325            _ => {
326                println!("ℹ  Other error type: {e}");
327            }
328        },
329    }
330
331    // Test 2: Empty message with image
332    println!("\nTest 2: Empty text with image");
333    let empty_text_builder = client
334        .chat()
335        .user_with_image_url("", SAMPLE_IMAGE_URLS[0])
336        .temperature(0.3);
337
338    match client.send_chat(empty_text_builder).await {
339        Ok(response) => {
340            if let Some(content) = response.content() {
341                println!(
342                    " API handled empty text gracefully: {}",
343                    content.chars().take(50).collect::<String>()
344                );
345            }
346        }
347        Err(e) => {
348            println!("ℹ  Empty text error: {e}");
349        }
350    }
351
352    // Test 3: Malformed base64 data
353    println!("\nTest 3: Malformed base64 image data");
354    let malformed_base64 = "this-is-not-valid-base64!@#$%";
355    let malformed_parts = vec![
356        text_part("What is this?"),
357        image_base64_part_with_detail(malformed_base64, "image/png", Detail::Auto),
358    ];
359
360    let malformed_builder = client.chat().user_with_parts(malformed_parts);
361
362    match client.send_chat(malformed_builder).await {
363        Ok(_) => println!(" Malformed base64 unexpectedly succeeded"),
364        Err(e) => match &e {
365            Error::Api {
366                status, message, ..
367            } => {
368                println!(" API properly rejected malformed base64 ({status}): {message}");
369            }
370            _ => {
371                println!("ℹ  Other error for malformed base64: {e}");
372            }
373        },
374    }
375
376    println!("\n  Error handling patterns demonstrated:");
377    println!("  • Invalid image URL handling");
378    println!("  • Empty text with image handling");
379    println!("  • Malformed base64 data validation");
380    println!("  • API error classification");
381    println!("  • Network error handling");
382
383    println!();
384    Ok(())
385}
Source

pub fn assistant(self, content: impl Into<String>) -> Self

Add an assistant message to the conversation.

Examples found in repository?
examples/chat_streaming.rs (line 160)
135async fn multiple_turns(client: &Client) -> Result<()> {
136    println!("Building a conversation with multiple turns...\n");
137
138    // First turn
139    println!("User: What is 2+2?");
140    let builder = client.chat().user("What is 2+2?");
141
142    let mut stream = client.send_chat_stream(builder).await?;
143
144    print!("Assistant: ");
145    let mut first_response = String::new();
146    while let Some(chunk) = stream.next().await {
147        let chunk = chunk?;
148        if let Some(content) = chunk.content() {
149            print!("{}", content);
150            first_response.push_str(content);
151        }
152    }
153    println!();
154
155    // Second turn - continuing the conversation
156    println!("\nUser: Now multiply that by 3");
157    let builder = client
158        .chat()
159        .user("What is 2+2?")
160        .assistant(&first_response)
161        .user("Now multiply that by 3");
162
163    let mut stream = client.send_chat_stream(builder).await?;
164
165    print!("Assistant: ");
166    while let Some(chunk) = stream.next().await {
167        let chunk = chunk?;
168        if let Some(content) = chunk.content() {
169            print!("{}", content);
170        }
171    }
172    println!();
173
174    Ok(())
175}
More examples
Hide additional examples
examples/chat_comprehensive.rs (line 206)
184async fn demonstrate_basic_chat(
185    client: &Client,
186    conversation: &mut ConversationManager,
187) -> Result<(), Box<dyn std::error::Error>> {
188    println!(" Example 1: Basic Chat Completion");
189    println!("----------------------------------");
190
191    let user_message = "Hello! Can you explain what you can help me with?";
192    conversation.add_user_message(user_message.to_string());
193
194    println!("User: {user_message}");
195    print!("Assistant: ");
196    io::stdout().flush()?;
197
198    // Build the chat request with conversation history
199    let messages = conversation.get_conversation_for_api();
200    let mut chat_builder = client.chat();
201
202    for (role, content) in messages {
203        match role.as_str() {
204            "system" => chat_builder = chat_builder.system(content),
205            "user" => chat_builder = chat_builder.user(content),
206            "assistant" => chat_builder = chat_builder.assistant(content),
207            _ => {} // Ignore unknown roles
208        }
209    }
210
211    // Send the request
212    let response = client.send_chat(chat_builder.temperature(0.7)).await?;
213
214    if let Some(content) = response.content() {
215        println!("{content}");
216        conversation.add_assistant_message(content.to_string(), None);
217
218        // Track token usage if available
219        if let Some(usage) = response.usage() {
220            conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
221        }
222    } else {
223        println!("No response content received");
224    }
225
226    println!();
227    Ok(())
228}
229
230/// Demonstrate multi-turn conversation.
231async fn demonstrate_multi_turn_chat(
232    client: &Client,
233    conversation: &mut ConversationManager,
234) -> Result<(), Box<dyn std::error::Error>> {
235    println!(" Example 2: Multi-turn Conversation");
236    println!("------------------------------------");
237
238    let questions = vec![
239        "What's the capital of France?",
240        "What's the population of that city?",
241        "Can you tell me an interesting fact about it?",
242    ];
243
244    for question in questions {
245        conversation.add_user_message(question.to_string());
246
247        println!("User: {question}");
248        print!("Assistant: ");
249        io::stdout().flush()?;
250
251        // Build chat request with full conversation history
252        let messages = conversation.get_conversation_for_api();
253        let mut chat_builder = client.chat();
254
255        for (role, content) in messages {
256            match role.as_str() {
257                "system" => chat_builder = chat_builder.system(content),
258                "user" => chat_builder = chat_builder.user(content),
259                "assistant" => chat_builder = chat_builder.assistant(content),
260                _ => {}
261            }
262        }
263
264        let response = client.send_chat(chat_builder.temperature(0.3)).await?;
265
266        if let Some(content) = response.content() {
267            println!("{content}");
268            conversation.add_assistant_message(content.to_string(), None);
269
270            // Track token usage
271            if let Some(usage) = response.usage() {
272                conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
273            }
274        }
275
276        println!();
277        // Small delay between questions for readability
278        tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
279    }
280
281    Ok(())
282}
283
284/// Demonstrate streaming chat response.
285async fn demonstrate_streaming_chat(
286    _client: &Client,
287    conversation: &mut ConversationManager,
288) -> Result<(), Box<dyn std::error::Error>> {
289    println!(" Example 3: Streaming Chat Response");
290    println!("------------------------------------");
291
292    // Add user message for streaming example
293    let streaming_question = "Can you write a short poem about programming?";
294    conversation.add_user_message(streaming_question.to_string());
295
296    println!("User: {streaming_question}");
297    println!("Assistant (streaming): ");
298
299    // Note: Streaming is not yet fully implemented in the client
300    // This is a placeholder showing the intended API
301    println!(" Streaming functionality is being implemented...");
302    println!("Future implementation will show real-time token-by-token responses");
303
304    // Simulate what streaming would look like
305    let simulated_response = "Programming flows like poetry in motion,\nEach function a verse, each loop a devotion.\nVariables dance through memory's halls,\nWhile algorithms answer logic's calls.";
306
307    // Simulate typing effect
308    for char in simulated_response.chars() {
309        print!("{char}");
310        io::stdout().flush()?;
311        tokio::time::sleep(tokio::time::Duration::from_millis(30)).await;
312    }
313    println!("\n");
314
315    // Add the response to conversation history
316    conversation.add_assistant_message(simulated_response.to_string(), None);
317
318    Ok(())
319}
320
321/// Demonstrate token usage tracking.
322async fn demonstrate_token_tracking(
323    client: &Client,
324    conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326    println!(" Example 4: Token Usage Tracking");
327    println!("---------------------------------");
328
329    let efficiency_question = "In one sentence, what is machine learning?";
330    conversation.add_user_message(efficiency_question.to_string());
331
332    println!("User: {efficiency_question}");
333    print!("Assistant: ");
334    io::stdout().flush()?;
335
336    // Build chat request
337    let messages = conversation.get_conversation_for_api();
338    let mut chat_builder = client.chat().max_completion_tokens(50); // Limit tokens for demo
339
340    for (role, content) in messages {
341        match role.as_str() {
342            "system" => chat_builder = chat_builder.system(content),
343            "user" => chat_builder = chat_builder.user(content),
344            "assistant" => chat_builder = chat_builder.assistant(content),
345            _ => {}
346        }
347    }
348
349    let response = client.send_chat(chat_builder).await?;
350
351    if let Some(content) = response.content() {
352        println!("{content}");
353
354        // Display detailed token usage
355        if let Some(usage) = response.usage() {
356            println!("\n Token Usage Breakdown:");
357            println!("  Prompt tokens: {}", usage.prompt_tokens);
358            println!("  Completion tokens: {}", usage.completion_tokens);
359            println!("  Total tokens: {}", usage.total_tokens);
360
361            conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363            conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364        } else {
365            conversation.add_assistant_message(content.to_string(), None);
366        }
367    }
368
369    println!();
370    Ok(())
371}
examples/vision_chat.rs (line 258)
226async fn demonstrate_conversation_with_images(
227    client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229    println!(" Example 5: Conversation Context with Images");
230    println!("----------------------------------------------");
231
232    let image_url = SAMPLE_IMAGE_URLS[0];
233
234    // First message: Analyze the image
235    println!("Step 1: Initial image analysis");
236    print!("Assistant: ");
237    io::stdout().flush()?;
238
239    let mut chat_builder = client
240        .chat()
241        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242        .user_with_image_url("What's the main subject of this image?", image_url)
243        .temperature(0.3);
244
245    let response1 = client.send_chat(chat_builder).await?;
246    let first_response = response1.content().unwrap_or("No response").to_string();
247    println!("{first_response}");
248
249    // Second message: Follow-up question (without re-uploading the image)
250    println!("\nStep 2: Follow-up question");
251    print!("Assistant: ");
252    io::stdout().flush()?;
253
254    chat_builder = client
255        .chat()
256        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257        .user_with_image_url("What's the main subject of this image?", image_url)
258        .assistant(&first_response)
259        .user("What colors are most prominent in the image we just discussed?")
260        .temperature(0.3);
261
262    let response2 = client.send_chat(chat_builder).await?;
263
264    if let Some(content) = response2.content() {
265        println!("{content}");
266    }
267
268    // Third message: Ask for creative interpretation
269    println!("\nStep 3: Creative interpretation");
270    print!("Assistant: ");
271    io::stdout().flush()?;
272
273    let second_response = response2.content().unwrap_or("No response").to_string();
274
275    chat_builder = client
276        .chat()
277        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278        .user_with_image_url("What's the main subject of this image?", image_url)
279        .assistant(&first_response)
280        .user("What colors are most prominent in the image we just discussed?")
281        .assistant(second_response)
282        .user("Based on our discussion, write a short poem inspired by this image.")
283        .temperature(0.7);
284
285    let response3 = client.send_chat(chat_builder).await?;
286
287    if let Some(content) = response3.content() {
288        println!("{content}");
289    }
290
291    println!();
292    Ok(())
293}
examples/azure_comprehensive.rs (line 65)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10    tracing_subscriber::fmt::init();
11
12    println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14    let client = Client::from_env()?.build();
15
16    // Test 1: Simple chat completion
17    println!("1. Testing simple chat completion...");
18    let builder = client.chat_simple("What is 2+2? Answer in one word.");
19    match client.send_chat(builder).await {
20        Ok(response) => {
21            if let Some(content) = response.content() {
22                println!("   ✓ Chat completion: {content}");
23            }
24        }
25        Err(e) => println!("   ✗ Chat completion failed: {e}"),
26    }
27
28    // Test 2: Chat with system message
29    println!("\n2. Testing chat with system message...");
30    let builder = client.chat_with_system(
31        "You are a helpful assistant that responds in one sentence.",
32        "What is Rust?",
33    );
34    match client.send_chat(builder).await {
35        Ok(response) => {
36            if let Some(content) = response.content() {
37                println!("   ✓ System message chat: {content}");
38            }
39        }
40        Err(e) => println!("   ✗ System message chat failed: {e}"),
41    }
42
43    // Test 3: Chat with temperature
44    println!("\n3. Testing chat with custom parameters...");
45    let builder = client
46        .chat()
47        .user("Say 'test' in a creative way")
48        .temperature(0.7)
49        .max_tokens(50);
50    match client.send_chat(builder).await {
51        Ok(response) => {
52            if let Some(content) = response.content() {
53                println!("   ✓ Custom parameters: {content}");
54            }
55        }
56        Err(e) => println!("   ✗ Custom parameters failed: {e}"),
57    }
58
59    // Test 4: Multiple messages conversation
60    println!("\n4. Testing multi-message conversation...");
61    let builder = client
62        .chat()
63        .system("You are a helpful assistant")
64        .user("My name is Alice")
65        .assistant("Hello Alice! Nice to meet you.")
66        .user("What's my name?");
67    match client.send_chat(builder).await {
68        Ok(response) => {
69            if let Some(content) = response.content() {
70                println!("   ✓ Multi-message: {content}");
71            }
72        }
73        Err(e) => println!("   ✗ Multi-message failed: {e}"),
74    }
75
76    // Test 5: Chat with max_tokens limit
77    println!("\n5. Testing with max_tokens limit...");
78    let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79    match client.send_chat(builder).await {
80        Ok(response) => {
81            if let Some(content) = response.content() {
82                println!("   ✓ Limited tokens: {content}");
83                println!("   (Note: response is truncated due to max_tokens=20)");
84            }
85        }
86        Err(e) => println!("   ✗ Max tokens test failed: {e}"),
87    }
88
89    // Test 6: Using responses API
90    println!("\n6. Testing responses API...");
91    let builder = client.responses().user("What is the capital of France?");
92    match client.send_responses(builder).await {
93        Ok(response) => {
94            if let Some(content) = response.content() {
95                println!("   ✓ Responses API: {content}");
96            }
97        }
98        Err(e) => println!("   ✗ Responses API failed: {e}"),
99    }
100
101    println!("\n=== Test Summary ===");
102    println!("Azure OpenAI integration tested across multiple endpoints!");
103    println!("\nNote: Some advanced features like embeddings, streaming, and");
104    println!("tool calling may require specific Azure OpenAI deployments.");
105
106    Ok(())
107}
examples/tool_calling_multiturn.rs (line 311)
228async fn main() -> Result<()> {
229    println!("=== Multi-turn Tool Calling Example ===\n");
230
231    // Initialize client
232    let client = Client::from_env()?.build();
233
234    // Create storage for the memory tool
235    let storage = Arc::new(Mutex::new(HashMap::new()));
236
237    // Define available tools
238    let tools = vec![get_calculator_tool(), get_memory_tool()];
239
240    println!("Available tools:");
241    println!("  - calculator: Perform arithmetic operations");
242    println!("  - memory: Store and retrieve values");
243    println!();
244
245    // Example 1: Single tool call
246    println!("Example 1: Single Tool Call");
247    println!("User: What is 15 + 27?");
248    {
249        let chat_builder = client
250            .chat()
251            .system("You are a helpful assistant with access to a calculator and memory storage.")
252            .user("What is 15 + 27?");
253
254        let result = handle_tool_loop(&client, chat_builder, &tools, &storage).await?;
255        println!("Assistant: {}", result);
256    }
257
258    // Example 2: Multiple sequential tool calls
259    println!("\n\nExample 2: Multiple Sequential Tool Calls");
260    println!("User: Calculate 10 * 5 and store the result in memory as 'product'");
261    {
262        let chat_builder = client
263            .chat()
264            .system("You are a helpful assistant with access to a calculator and memory storage.")
265            .user("Calculate 10 * 5 and store the result in memory as 'product'");
266
267        let result = handle_tool_loop(&client, chat_builder, &tools, &storage).await?;
268        println!("Assistant: {}", result);
269    }
270
271    // Example 3: Retrieve from memory
272    println!("\n\nExample 3: Retrieve from Memory");
273    println!("User: What did I store in 'product'?");
274    {
275        let chat_builder = client
276            .chat()
277            .system("You are a helpful assistant with access to a calculator and memory storage.")
278            .user("What did I store in 'product'?");
279
280        let result = handle_tool_loop(&client, chat_builder, &tools, &storage).await?;
281        println!("Assistant: {}", result);
282    }
283
284    // Example 4: Complex multi-step task
285    println!("\n\nExample 4: Complex Multi-step Task");
286    println!("User: Calculate 100 / 4, multiply that by 3, and tell me the final result");
287    {
288        let chat_builder = client
289            .chat()
290            .system("You are a helpful assistant with access to a calculator and memory storage.")
291            .user("Calculate 100 / 4, multiply that by 3, and tell me the final result");
292
293        let result = handle_tool_loop(&client, chat_builder, &tools, &storage).await?;
294        println!("Assistant: {}", result);
295    }
296
297    // Example 5: Conversation with history
298    println!("\n\nExample 5: Conversation with History");
299    {
300        let mut chat_builder = client
301            .chat()
302            .system("You are a helpful assistant with access to a calculator and memory storage.");
303
304        // First question
305        println!("User: What is 8 + 7?");
306        chat_builder = chat_builder.user("What is 8 + 7?");
307        let result = handle_tool_loop(&client, chat_builder.clone(), &tools, &storage).await?;
308        println!("Assistant: {}", result);
309
310        // Add assistant response to history
311        chat_builder = chat_builder.assistant(&result);
312
313        // Follow-up question that depends on previous context
314        println!("\nUser: Now multiply that by 3");
315        chat_builder = chat_builder.user("Now multiply that by 3");
316        let result = handle_tool_loop(&client, chat_builder.clone(), &tools, &storage).await?;
317        println!("Assistant: {}", result);
318    }
319
320    println!("\n\n=== All examples completed successfully ===");
321    println!("\nKey Takeaway:");
322    println!("  When implementing multi-turn tool calling, ALWAYS use");
323    println!("  assistant_with_tool_calls() to maintain proper conversation");
324    println!("  history. This is essential for the model to understand the");
325    println!("  tool results and continue the conversation correctly.");
326
327    Ok(())
328}
examples/langfuse.rs (line 94)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32    // Initialize tracing for logging
33    tracing_subscriber::fmt()
34        .with_env_filter(
35            tracing_subscriber::EnvFilter::from_default_env()
36                .add_directive("openai_ergonomic=debug".parse()?),
37        )
38        .init();
39
40    // 1. Build Langfuse exporter from environment variables
41    let exporter = ExporterBuilder::from_env()?.build()?;
42
43    // 2. Create tracer provider with batch processor
44    let provider = SdkTracerProvider::builder()
45        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46        .build();
47
48    // Set as global provider
49    global::set_tracer_provider(provider.clone());
50
51    // 3. Get tracer and create interceptor
52    let tracer = provider.tracer("openai-ergonomic");
53    let langfuse_interceptor =
54        std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56    // 4. Create the OpenAI client and add the Langfuse interceptor
57    // Keep a reference to the interceptor so we can update context later
58    let client = Client::from_env()?
59        .with_interceptor(Box::new(langfuse_interceptor.clone()))
60        .build();
61
62    println!(" OpenAI client initialized with Langfuse observability");
63    println!(" Traces will be sent to Langfuse for monitoring\n");
64
65    // Example 1: Simple chat completion
66    println!("Example 1: Simple chat completion");
67    println!("---------------------------------");
68    let chat_builder = client
69        .chat_simple("What is the capital of France? Answer in one word.")
70        .build()?;
71    let response = client.execute_chat(chat_builder).await?;
72    println!("Response: {:?}\n", response.content());
73
74    // Example 2: Chat completion with builder pattern
75    println!("Example 2: Chat with builder pattern");
76    println!("-------------------------------------");
77    let chat_builder = client
78        .chat()
79        .system("You are a helpful assistant that speaks like a pirate.")
80        .user("Tell me about the ocean in 2 sentences.")
81        .temperature(0.7)
82        .max_tokens(100)
83        .build()?;
84    let response = client.execute_chat(chat_builder).await?;
85    println!("Response: {:?}\n", response.content());
86
87    // Example 3: Multiple messages in a conversation
88    println!("Example 3: Conversation");
89    println!("-----------------------");
90    let chat_builder = client
91        .chat()
92        .system("You are a math tutor.")
93        .user("What is 2 + 2?")
94        .assistant("2 + 2 equals 4.")
95        .user("And what about 3 + 3?")
96        .build()?;
97    let response = client.execute_chat(chat_builder).await?;
98    println!("Response: {:?}\n", response.content());
99
100    // Example 4: Error handling (intentionally trigger an error)
101    println!("Example 4: Error handling");
102    println!("-------------------------");
103    // Create a builder with a non-existent model
104    let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105        .user("This should fail")
106        .build()?;
107    let result = client.execute_chat(chat_builder).await;
108
109    match result {
110        Ok(_) => println!("Unexpected success"),
111        Err(e) => println!("Expected error captured: {e}\n"),
112    }
113
114    // Example 5: Embeddings
115    println!("Example 5: Embeddings");
116    println!("--------------------");
117    let embeddings_builder = client.embeddings().text(
118        "text-embedding-ada-002",
119        "The quick brown fox jumps over the lazy dog",
120    );
121    let embeddings = client.embeddings().create(embeddings_builder).await?;
122    println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124    // Example 6: Using custom metadata via interceptor context
125    println!("Example 6: Custom metadata via interceptor context");
126    println!("---------------------------------------------------");
127
128    // Set session and user IDs on the interceptor's context
129    langfuse_interceptor.set_session_id("demo-session-123");
130    langfuse_interceptor.set_user_id("demo-user-456");
131    langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133    let chat_builder = client
134        .chat_simple("Say 'Hello from custom session!'")
135        .build()?;
136    let response = client.execute_chat(chat_builder).await?;
137    println!("Response with custom metadata: {:?}\n", response.content());
138
139    // Clear context for subsequent calls
140    langfuse_interceptor.clear_context();
141
142    println!(" All examples completed!");
143    println!(" Check your Langfuse dashboard to see the traces");
144    println!("   - Look for traces with operation name 'chat'");
145    println!("   - Each trace includes request/response details, token usage, and timing");
146    println!("   - Example 6 will have custom session_id, user_id, and tags");
147
148    // Shutdown the tracer provider to flush all spans
149    println!("\n⏳ Flushing spans to Langfuse...");
150    provider.shutdown()?;
151
152    Ok(())
153}
Source

pub fn assistant_with_tool_calls( self, content: impl Into<String>, tool_calls: Vec<ChatCompletionMessageToolCallsInner>, ) -> Self

Add an assistant message with tool calls to the conversation.

This is used when the assistant wants to call tools. Each tool call should be represented as a tuple of (tool_call_id, function_name, function_arguments).

Examples found in repository?
examples/tool_calling.rs (lines 236-239)
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212    // This example demonstrates proper multi-turn tool calling with full message history
213
214    println!("=== Conversation with Tools (Full Implementation) ===");
215
216    // Initialize the conversation
217    let mut builder = client
218        .chat()
219        .user("What's the weather in Tokyo?")
220        .tools(vec![get_weather_tool()]);
221
222    // First request - the model will call the tool
223    let response = client.send_chat(builder.clone()).await?;
224
225    // Check for tool calls
226    let tool_calls = response.tool_calls();
227    if !tool_calls.is_empty() {
228        println!("Step 1: Model requests tool call");
229        for tool_call in &tool_calls {
230            println!("  Tool: {}", tool_call.function_name());
231            println!("  Args: {}", tool_call.function_arguments());
232        }
233
234        // IMPORTANT: Add the assistant's response (with tool calls) to the history
235        // This is the key step for maintaining proper conversation context!
236        builder = builder.assistant_with_tool_calls(
237            response.content().unwrap_or(""),
238            tool_calls.iter().map(|tc| (*tc).clone()).collect(),
239        );
240
241        // Execute the tools and add results
242        println!("\nStep 2: Execute tools and add results to conversation");
243        for tool_call in tool_calls {
244            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
245            let result = execute_weather_function(params)?;
246            println!("  Tool result: {}", result);
247
248            // Add the tool result to the conversation history
249            builder = builder.tool(tool_call.id(), result);
250        }
251
252        // Send the follow-up request with tool results
253        println!("\nStep 3: Send follow-up request with tool results");
254        let final_response = client
255            .send_chat(builder.tools(vec![get_weather_tool()]))
256            .await?;
257
258        if let Some(content) = final_response.content() {
259            println!("  Final assistant response: {}", content);
260        }
261    }
262
263    println!("\nNote: This demonstrates the complete tool calling loop with proper");
264    println!("message history management using assistant_with_tool_calls()");
265
266    Ok(())
267}
More examples
Hide additional examples
examples/tool_calling_multiturn.rs (lines 196-199)
160async fn handle_tool_loop(
161    client: &Client,
162    mut chat_builder: openai_ergonomic::builders::chat::ChatCompletionBuilder,
163    tools: &[openai_client_base::models::ChatCompletionTool],
164    storage: &Arc<Mutex<HashMap<String, String>>>,
165) -> Result<String> {
166    const MAX_ITERATIONS: usize = 10; // Prevent infinite loops
167    let mut iteration = 0;
168
169    loop {
170        iteration += 1;
171        if iteration > MAX_ITERATIONS {
172            return Err(std::io::Error::other("Max iterations reached in tool loop").into());
173        }
174
175        println!("\n  [Iteration {}]", iteration);
176
177        // Send request with tools
178        let request = chat_builder.clone().tools(tools.to_vec());
179        let response = client.send_chat(request).await?;
180
181        // Check if there are tool calls
182        let tool_calls = response.tool_calls();
183        if tool_calls.is_empty() {
184            // No more tool calls, return the final response
185            if let Some(content) = response.content() {
186                return Ok(content.to_string());
187            }
188            return Err(std::io::Error::other("No content in final response").into());
189        }
190
191        // Process tool calls
192        println!("  Tool calls: {}", tool_calls.len());
193
194        // IMPORTANT: Add assistant message with tool calls to history
195        // This is the key step that maintains proper conversation context!
196        chat_builder = chat_builder.assistant_with_tool_calls(
197            response.content().unwrap_or(""),
198            tool_calls.iter().map(|tc| (*tc).clone()).collect(),
199        );
200
201        // Execute each tool call and add results to history
202        for tool_call in tool_calls {
203            let tool_name = tool_call.function_name();
204            let tool_args = tool_call.function_arguments();
205            let tool_id = tool_call.id();
206
207            println!("    → {}: {}", tool_name, tool_args);
208
209            let result = match execute_tool(tool_name, tool_args, storage) {
210                Ok(result) => {
211                    println!("    ✓ Result: {}", result);
212                    result
213                }
214                Err(e) => {
215                    let error_msg = format!("Error: {}", e);
216                    eprintln!("    ✗ {}", error_msg);
217                    error_msg
218                }
219            };
220
221            // Add tool result to the conversation
222            chat_builder = chat_builder.tool(tool_id, result);
223        }
224    }
225}
Source

pub fn tool( self, tool_call_id: impl Into<String>, content: impl Into<String>, ) -> Self

Add a tool result message to the conversation.

This is used to provide the result of a tool call back to the assistant.

Examples found in repository?
examples/tool_calling.rs (line 249)
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212    // This example demonstrates proper multi-turn tool calling with full message history
213
214    println!("=== Conversation with Tools (Full Implementation) ===");
215
216    // Initialize the conversation
217    let mut builder = client
218        .chat()
219        .user("What's the weather in Tokyo?")
220        .tools(vec![get_weather_tool()]);
221
222    // First request - the model will call the tool
223    let response = client.send_chat(builder.clone()).await?;
224
225    // Check for tool calls
226    let tool_calls = response.tool_calls();
227    if !tool_calls.is_empty() {
228        println!("Step 1: Model requests tool call");
229        for tool_call in &tool_calls {
230            println!("  Tool: {}", tool_call.function_name());
231            println!("  Args: {}", tool_call.function_arguments());
232        }
233
234        // IMPORTANT: Add the assistant's response (with tool calls) to the history
235        // This is the key step for maintaining proper conversation context!
236        builder = builder.assistant_with_tool_calls(
237            response.content().unwrap_or(""),
238            tool_calls.iter().map(|tc| (*tc).clone()).collect(),
239        );
240
241        // Execute the tools and add results
242        println!("\nStep 2: Execute tools and add results to conversation");
243        for tool_call in tool_calls {
244            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
245            let result = execute_weather_function(params)?;
246            println!("  Tool result: {}", result);
247
248            // Add the tool result to the conversation history
249            builder = builder.tool(tool_call.id(), result);
250        }
251
252        // Send the follow-up request with tool results
253        println!("\nStep 3: Send follow-up request with tool results");
254        let final_response = client
255            .send_chat(builder.tools(vec![get_weather_tool()]))
256            .await?;
257
258        if let Some(content) = final_response.content() {
259            println!("  Final assistant response: {}", content);
260        }
261    }
262
263    println!("\nNote: This demonstrates the complete tool calling loop with proper");
264    println!("message history management using assistant_with_tool_calls()");
265
266    Ok(())
267}
More examples
Hide additional examples
examples/tool_calling_multiturn.rs (line 222)
160async fn handle_tool_loop(
161    client: &Client,
162    mut chat_builder: openai_ergonomic::builders::chat::ChatCompletionBuilder,
163    tools: &[openai_client_base::models::ChatCompletionTool],
164    storage: &Arc<Mutex<HashMap<String, String>>>,
165) -> Result<String> {
166    const MAX_ITERATIONS: usize = 10; // Prevent infinite loops
167    let mut iteration = 0;
168
169    loop {
170        iteration += 1;
171        if iteration > MAX_ITERATIONS {
172            return Err(std::io::Error::other("Max iterations reached in tool loop").into());
173        }
174
175        println!("\n  [Iteration {}]", iteration);
176
177        // Send request with tools
178        let request = chat_builder.clone().tools(tools.to_vec());
179        let response = client.send_chat(request).await?;
180
181        // Check if there are tool calls
182        let tool_calls = response.tool_calls();
183        if tool_calls.is_empty() {
184            // No more tool calls, return the final response
185            if let Some(content) = response.content() {
186                return Ok(content.to_string());
187            }
188            return Err(std::io::Error::other("No content in final response").into());
189        }
190
191        // Process tool calls
192        println!("  Tool calls: {}", tool_calls.len());
193
194        // IMPORTANT: Add assistant message with tool calls to history
195        // This is the key step that maintains proper conversation context!
196        chat_builder = chat_builder.assistant_with_tool_calls(
197            response.content().unwrap_or(""),
198            tool_calls.iter().map(|tc| (*tc).clone()).collect(),
199        );
200
201        // Execute each tool call and add results to history
202        for tool_call in tool_calls {
203            let tool_name = tool_call.function_name();
204            let tool_args = tool_call.function_arguments();
205            let tool_id = tool_call.id();
206
207            println!("    → {}: {}", tool_name, tool_args);
208
209            let result = match execute_tool(tool_name, tool_args, storage) {
210                Ok(result) => {
211                    println!("    ✓ Result: {}", result);
212                    result
213                }
214                Err(e) => {
215                    let error_msg = format!("Error: {}", e);
216                    eprintln!("    ✗ {}", error_msg);
217                    error_msg
218                }
219            };
220
221            // Add tool result to the conversation
222            chat_builder = chat_builder.tool(tool_id, result);
223        }
224    }
225}
Source

pub fn temperature(self, temperature: f64) -> Self

Set the temperature for the completion.

Examples found in repository?
examples/chat_streaming.rs (line 74)
68async fn streaming_with_parameters(client: &Client) -> Result<()> {
69    println!("Question: Write a creative tagline for a bakery");
70
71    let builder = client
72        .chat()
73        .user("Write a creative tagline for a bakery")
74        .temperature(0.9)
75        .max_tokens(50);
76
77    let mut stream = client.send_chat_stream(builder).await?;
78
79    print!("Response: ");
80    while let Some(chunk) = stream.next().await {
81        let chunk = chunk?;
82        if let Some(content) = chunk.content() {
83            print!("{}", content);
84        }
85    }
86    println!();
87
88    Ok(())
89}
More examples
Hide additional examples
examples/langfuse_streaming.rs (line 124)
118async fn streaming_with_parameters(client: &Client<LangfuseState<Span>>) -> Result<()> {
119    println!("Question: Write a creative tagline for a bakery");
120
121    let builder = client
122        .chat()
123        .user("Write a creative tagline for a bakery")
124        .temperature(0.9)
125        .max_tokens(50);
126
127    let mut stream = client.send_chat_stream(builder).await?;
128
129    print!("Response: ");
130    let mut chunk_count = 0;
131    while let Some(chunk) = stream.next().await {
132        let chunk = chunk?;
133        if let Some(content) = chunk.content() {
134            print!("{}", content);
135            chunk_count += 1;
136        }
137    }
138    println!(
139        "\n(Received {} chunks, all traced to Langfuse)",
140        chunk_count
141    );
142
143    Ok(())
144}
examples/vision_chat.rs (line 91)
72async fn demonstrate_basic_image_analysis(
73    client: &Client,
74) -> Result<(), Box<dyn std::error::Error>> {
75    println!("  Example 1: Basic Image Analysis");
76    println!("----------------------------------");
77
78    let image_url = SAMPLE_IMAGE_URLS[0];
79    let question = "What do you see in this image? Please describe it in detail.";
80
81    println!("Image URL: {image_url}");
82    println!("Question: {question}");
83    print!("Assistant: ");
84    io::stdout().flush()?;
85
86    // Use the convenient user_with_image_url method
87    let chat_builder = client
88        .chat()
89        .system("You are a helpful AI assistant that can analyze images. Provide detailed, accurate descriptions of what you see.")
90        .user_with_image_url(question, image_url)
91        .temperature(0.3);
92
93    let response = client.send_chat(chat_builder).await?;
94
95    if let Some(content) = response.content() {
96        println!("{content}");
97
98        // Show usage information
99        if let Some(usage) = response.usage() {
100            println!("\n Token usage:");
101            println!("  Prompt tokens: {}", usage.prompt_tokens);
102            println!("  Completion tokens: {}", usage.completion_tokens);
103            println!("  Total tokens: {}", usage.total_tokens);
104        }
105    } else {
106        println!("No response content received");
107    }
108
109    println!();
110    Ok(())
111}
112
113/// Demonstrate analysis of multiple images in a single message.
114async fn demonstrate_multiple_images(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
115    println!(" Example 2: Multiple Image Analysis");
116    println!("---------------------------------------");
117
118    let question = "Compare these two images. What are the differences and similarities?";
119
120    println!("Question: {question}");
121    println!("Image 1: {}", SAMPLE_IMAGE_URLS[0]);
122    println!("Image 2: {}", SAMPLE_IMAGE_URLS[1]);
123    print!("Assistant: ");
124    io::stdout().flush()?;
125
126    // Create message parts manually for multiple images
127    let parts = vec![
128        text_part(question),
129        image_url_part_with_detail(SAMPLE_IMAGE_URLS[0], Detail::Auto),
130        image_url_part_with_detail(SAMPLE_IMAGE_URLS[1], Detail::Auto),
131    ];
132
133    let chat_builder = client
134        .chat()
135        .system("You are an expert at comparing and analyzing images. Provide thoughtful comparisons focusing on visual elements, composition, and content.")
136        .user_with_parts(parts)
137        .temperature(0.4);
138
139    let response = client.send_chat(chat_builder).await?;
140
141    if let Some(content) = response.content() {
142        println!("{content}");
143    } else {
144        println!("No response content received");
145    }
146
147    println!();
148    Ok(())
149}
150
151/// Demonstrate different detail levels for image analysis.
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153    println!(" Example 3: Different Detail Levels");
154    println!("------------------------------------");
155
156    let image_url = SAMPLE_IMAGE_URLS[0];
157    let question = "Analyze this image";
158
159    // Test different detail levels
160    let detail_levels = vec![
161        (Detail::Low, "Low detail (faster, less detailed)"),
162        (Detail::High, "High detail (slower, more detailed)"),
163        (Detail::Auto, "Auto detail (balanced)"),
164    ];
165
166    for (detail, description) in detail_levels {
167        println!("\n{description}:");
168        print!("Assistant: ");
169        io::stdout().flush()?;
170
171        let chat_builder = client
172            .chat()
173            .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174            .user_with_image_url_and_detail(question, image_url, detail)
175            .temperature(0.2)
176            .max_completion_tokens(100); // Limit response length for comparison
177
178        let response = client.send_chat(chat_builder).await?;
179
180        if let Some(content) = response.content() {
181            println!("{content}");
182        }
183    }
184
185    println!();
186    Ok(())
187}
188
189/// Demonstrate base64 image encoding and analysis.
190async fn demonstrate_base64_image(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
191    println!(" Example 4: Base64 Image Analysis");
192    println!("-----------------------------------");
193
194    let question = "What is this image? It's very small, what can you tell about it?";
195
196    println!("Question: {question}");
197    println!("Image: Small test image encoded as base64");
198    print!("Assistant: ");
199    io::stdout().flush()?;
200
201    // Create message parts with base64 image
202    let parts = vec![
203        text_part(question),
204        image_base64_part_with_detail(SAMPLE_BASE64_IMAGE, "image/png", Detail::High),
205    ];
206
207    let chat_builder = client
208        .chat()
209        .system("You are analyzing images provided in base64 format. Even if an image is very small or simple, try to provide what information you can.")
210        .user_with_parts(parts)
211        .temperature(0.3);
212
213    let response = client.send_chat(chat_builder).await?;
214
215    if let Some(content) = response.content() {
216        println!("{content}");
217    } else {
218        println!("No response content received");
219    }
220
221    println!();
222    Ok(())
223}
224
225/// Demonstrate conversation context with images.
226async fn demonstrate_conversation_with_images(
227    client: &Client,
228) -> Result<(), Box<dyn std::error::Error>> {
229    println!(" Example 5: Conversation Context with Images");
230    println!("----------------------------------------------");
231
232    let image_url = SAMPLE_IMAGE_URLS[0];
233
234    // First message: Analyze the image
235    println!("Step 1: Initial image analysis");
236    print!("Assistant: ");
237    io::stdout().flush()?;
238
239    let mut chat_builder = client
240        .chat()
241        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
242        .user_with_image_url("What's the main subject of this image?", image_url)
243        .temperature(0.3);
244
245    let response1 = client.send_chat(chat_builder).await?;
246    let first_response = response1.content().unwrap_or("No response").to_string();
247    println!("{first_response}");
248
249    // Second message: Follow-up question (without re-uploading the image)
250    println!("\nStep 2: Follow-up question");
251    print!("Assistant: ");
252    io::stdout().flush()?;
253
254    chat_builder = client
255        .chat()
256        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
257        .user_with_image_url("What's the main subject of this image?", image_url)
258        .assistant(&first_response)
259        .user("What colors are most prominent in the image we just discussed?")
260        .temperature(0.3);
261
262    let response2 = client.send_chat(chat_builder).await?;
263
264    if let Some(content) = response2.content() {
265        println!("{content}");
266    }
267
268    // Third message: Ask for creative interpretation
269    println!("\nStep 3: Creative interpretation");
270    print!("Assistant: ");
271    io::stdout().flush()?;
272
273    let second_response = response2.content().unwrap_or("No response").to_string();
274
275    chat_builder = client
276        .chat()
277        .system("You are having a conversation about images. Remember details from previous messages to maintain context.")
278        .user_with_image_url("What's the main subject of this image?", image_url)
279        .assistant(&first_response)
280        .user("What colors are most prominent in the image we just discussed?")
281        .assistant(second_response)
282        .user("Based on our discussion, write a short poem inspired by this image.")
283        .temperature(0.7);
284
285    let response3 = client.send_chat(chat_builder).await?;
286
287    if let Some(content) = response3.content() {
288        println!("{content}");
289    }
290
291    println!();
292    Ok(())
293}
294
295/// Demonstrate error handling patterns for vision requests.
296async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
297    println!("  Example 6: Error Handling Patterns");
298    println!("------------------------------------");
299
300    println!("Testing various error scenarios...\n");
301
302    // Test 1: Invalid image URL
303    println!("Test 1: Invalid image URL");
304    let invalid_url = "https://this-domain-does-not-exist-12345.com/image.jpg";
305
306    let invalid_builder = client
307        .chat()
308        .user_with_image_url("What do you see?", invalid_url)
309        .temperature(0.3);
310
311    match client.send_chat(invalid_builder).await {
312        Ok(_) => println!(" Invalid URL request unexpectedly succeeded"),
313        Err(e) => match &e {
314            Error::Api {
315                status, message, ..
316            } => {
317                println!(" API properly rejected invalid URL ({status}): {message}");
318            }
319            Error::Http(reqwest_err) => {
320                println!(" HTTP error caught: {reqwest_err}");
321            }
322            Error::InvalidRequest(msg) => {
323                println!(" Validation caught invalid URL: {msg}");
324            }
325            _ => {
326                println!("ℹ  Other error type: {e}");
327            }
328        },
329    }
330
331    // Test 2: Empty message with image
332    println!("\nTest 2: Empty text with image");
333    let empty_text_builder = client
334        .chat()
335        .user_with_image_url("", SAMPLE_IMAGE_URLS[0])
336        .temperature(0.3);
337
338    match client.send_chat(empty_text_builder).await {
339        Ok(response) => {
340            if let Some(content) = response.content() {
341                println!(
342                    " API handled empty text gracefully: {}",
343                    content.chars().take(50).collect::<String>()
344                );
345            }
346        }
347        Err(e) => {
348            println!("ℹ  Empty text error: {e}");
349        }
350    }
351
352    // Test 3: Malformed base64 data
353    println!("\nTest 3: Malformed base64 image data");
354    let malformed_base64 = "this-is-not-valid-base64!@#$%";
355    let malformed_parts = vec![
356        text_part("What is this?"),
357        image_base64_part_with_detail(malformed_base64, "image/png", Detail::Auto),
358    ];
359
360    let malformed_builder = client.chat().user_with_parts(malformed_parts);
361
362    match client.send_chat(malformed_builder).await {
363        Ok(_) => println!(" Malformed base64 unexpectedly succeeded"),
364        Err(e) => match &e {
365            Error::Api {
366                status, message, ..
367            } => {
368                println!(" API properly rejected malformed base64 ({status}): {message}");
369            }
370            _ => {
371                println!("ℹ  Other error for malformed base64: {e}");
372            }
373        },
374    }
375
376    println!("\n  Error handling patterns demonstrated:");
377    println!("  • Invalid image URL handling");
378    println!("  • Empty text with image handling");
379    println!("  • Malformed base64 data validation");
380    println!("  • API error classification");
381    println!("  • Network error handling");
382
383    println!();
384    Ok(())
385}
examples/chat_comprehensive.rs (line 212)
184async fn demonstrate_basic_chat(
185    client: &Client,
186    conversation: &mut ConversationManager,
187) -> Result<(), Box<dyn std::error::Error>> {
188    println!(" Example 1: Basic Chat Completion");
189    println!("----------------------------------");
190
191    let user_message = "Hello! Can you explain what you can help me with?";
192    conversation.add_user_message(user_message.to_string());
193
194    println!("User: {user_message}");
195    print!("Assistant: ");
196    io::stdout().flush()?;
197
198    // Build the chat request with conversation history
199    let messages = conversation.get_conversation_for_api();
200    let mut chat_builder = client.chat();
201
202    for (role, content) in messages {
203        match role.as_str() {
204            "system" => chat_builder = chat_builder.system(content),
205            "user" => chat_builder = chat_builder.user(content),
206            "assistant" => chat_builder = chat_builder.assistant(content),
207            _ => {} // Ignore unknown roles
208        }
209    }
210
211    // Send the request
212    let response = client.send_chat(chat_builder.temperature(0.7)).await?;
213
214    if let Some(content) = response.content() {
215        println!("{content}");
216        conversation.add_assistant_message(content.to_string(), None);
217
218        // Track token usage if available
219        if let Some(usage) = response.usage() {
220            conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
221        }
222    } else {
223        println!("No response content received");
224    }
225
226    println!();
227    Ok(())
228}
229
230/// Demonstrate multi-turn conversation.
231async fn demonstrate_multi_turn_chat(
232    client: &Client,
233    conversation: &mut ConversationManager,
234) -> Result<(), Box<dyn std::error::Error>> {
235    println!(" Example 2: Multi-turn Conversation");
236    println!("------------------------------------");
237
238    let questions = vec![
239        "What's the capital of France?",
240        "What's the population of that city?",
241        "Can you tell me an interesting fact about it?",
242    ];
243
244    for question in questions {
245        conversation.add_user_message(question.to_string());
246
247        println!("User: {question}");
248        print!("Assistant: ");
249        io::stdout().flush()?;
250
251        // Build chat request with full conversation history
252        let messages = conversation.get_conversation_for_api();
253        let mut chat_builder = client.chat();
254
255        for (role, content) in messages {
256            match role.as_str() {
257                "system" => chat_builder = chat_builder.system(content),
258                "user" => chat_builder = chat_builder.user(content),
259                "assistant" => chat_builder = chat_builder.assistant(content),
260                _ => {}
261            }
262        }
263
264        let response = client.send_chat(chat_builder.temperature(0.3)).await?;
265
266        if let Some(content) = response.content() {
267            println!("{content}");
268            conversation.add_assistant_message(content.to_string(), None);
269
270            // Track token usage
271            if let Some(usage) = response.usage() {
272                conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
273            }
274        }
275
276        println!();
277        // Small delay between questions for readability
278        tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
279    }
280
281    Ok(())
282}
283
284/// Demonstrate streaming chat response.
285async fn demonstrate_streaming_chat(
286    _client: &Client,
287    conversation: &mut ConversationManager,
288) -> Result<(), Box<dyn std::error::Error>> {
289    println!(" Example 3: Streaming Chat Response");
290    println!("------------------------------------");
291
292    // Add user message for streaming example
293    let streaming_question = "Can you write a short poem about programming?";
294    conversation.add_user_message(streaming_question.to_string());
295
296    println!("User: {streaming_question}");
297    println!("Assistant (streaming): ");
298
299    // Note: Streaming is not yet fully implemented in the client
300    // This is a placeholder showing the intended API
301    println!(" Streaming functionality is being implemented...");
302    println!("Future implementation will show real-time token-by-token responses");
303
304    // Simulate what streaming would look like
305    let simulated_response = "Programming flows like poetry in motion,\nEach function a verse, each loop a devotion.\nVariables dance through memory's halls,\nWhile algorithms answer logic's calls.";
306
307    // Simulate typing effect
308    for char in simulated_response.chars() {
309        print!("{char}");
310        io::stdout().flush()?;
311        tokio::time::sleep(tokio::time::Duration::from_millis(30)).await;
312    }
313    println!("\n");
314
315    // Add the response to conversation history
316    conversation.add_assistant_message(simulated_response.to_string(), None);
317
318    Ok(())
319}
320
321/// Demonstrate token usage tracking.
322async fn demonstrate_token_tracking(
323    client: &Client,
324    conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326    println!(" Example 4: Token Usage Tracking");
327    println!("---------------------------------");
328
329    let efficiency_question = "In one sentence, what is machine learning?";
330    conversation.add_user_message(efficiency_question.to_string());
331
332    println!("User: {efficiency_question}");
333    print!("Assistant: ");
334    io::stdout().flush()?;
335
336    // Build chat request
337    let messages = conversation.get_conversation_for_api();
338    let mut chat_builder = client.chat().max_completion_tokens(50); // Limit tokens for demo
339
340    for (role, content) in messages {
341        match role.as_str() {
342            "system" => chat_builder = chat_builder.system(content),
343            "user" => chat_builder = chat_builder.user(content),
344            "assistant" => chat_builder = chat_builder.assistant(content),
345            _ => {}
346        }
347    }
348
349    let response = client.send_chat(chat_builder).await?;
350
351    if let Some(content) = response.content() {
352        println!("{content}");
353
354        // Display detailed token usage
355        if let Some(usage) = response.usage() {
356            println!("\n Token Usage Breakdown:");
357            println!("  Prompt tokens: {}", usage.prompt_tokens);
358            println!("  Completion tokens: {}", usage.completion_tokens);
359            println!("  Total tokens: {}", usage.total_tokens);
360
361            conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363            conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364        } else {
365            conversation.add_assistant_message(content.to_string(), None);
366        }
367    }
368
369    println!();
370    Ok(())
371}
372
373/// Demonstrate error handling patterns.
374async fn demonstrate_error_handling(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
375    println!("  Example 5: Error Handling Patterns");
376    println!("------------------------------------");
377
378    println!("Testing various error scenarios...\n");
379
380    // Test 1: Invalid model
381    println!("Test 1: Invalid model name");
382    let invalid_model_builder = client.chat()
383        .user("Hello")
384        // Note: We can't easily test invalid model without modifying the builder
385        // This shows the pattern for handling errors
386        .temperature(0.7);
387
388    match client.send_chat(invalid_model_builder).await {
389        Ok(_) => println!(" Request succeeded (model validation not yet implemented)"),
390        Err(e) => match &e {
391            Error::Api {
392                status, message, ..
393            } => {
394                println!(" API Error ({status}): {message}");
395            }
396            Error::Http(reqwest_err) => {
397                println!(" HTTP Error: {reqwest_err}");
398            }
399            Error::InvalidRequest(msg) => {
400                println!(" Invalid Request: {msg}");
401            }
402            _ => {
403                println!(" Unexpected Error: {e}");
404            }
405        },
406    }
407
408    // Test 2: Empty message validation
409    println!("\nTest 2: Empty message validation");
410    let empty_builder = client.chat(); // No messages added
411
412    match client.send_chat(empty_builder).await {
413        Ok(_) => println!(" Empty request unexpectedly succeeded"),
414        Err(Error::InvalidRequest(msg)) => {
415            println!(" Validation caught empty request: {msg}");
416        }
417        Err(e) => {
418            println!(" Unexpected error type: {e}");
419        }
420    }
421
422    // Test 3: Configuration errors
423    println!("\nTest 3: Configuration validation");
424    println!(" Client configuration is valid (created successfully)");
425
426    println!("\n  Error handling patterns demonstrated:");
427    println!("  • API error classification");
428    println!("  • Request validation");
429    println!("  • Network error handling");
430    println!("  • Configuration validation");
431
432    println!();
433    Ok(())
434}
examples/azure_comprehensive.rs (line 48)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10    tracing_subscriber::fmt::init();
11
12    println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14    let client = Client::from_env()?.build();
15
16    // Test 1: Simple chat completion
17    println!("1. Testing simple chat completion...");
18    let builder = client.chat_simple("What is 2+2? Answer in one word.");
19    match client.send_chat(builder).await {
20        Ok(response) => {
21            if let Some(content) = response.content() {
22                println!("   ✓ Chat completion: {content}");
23            }
24        }
25        Err(e) => println!("   ✗ Chat completion failed: {e}"),
26    }
27
28    // Test 2: Chat with system message
29    println!("\n2. Testing chat with system message...");
30    let builder = client.chat_with_system(
31        "You are a helpful assistant that responds in one sentence.",
32        "What is Rust?",
33    );
34    match client.send_chat(builder).await {
35        Ok(response) => {
36            if let Some(content) = response.content() {
37                println!("   ✓ System message chat: {content}");
38            }
39        }
40        Err(e) => println!("   ✗ System message chat failed: {e}"),
41    }
42
43    // Test 3: Chat with temperature
44    println!("\n3. Testing chat with custom parameters...");
45    let builder = client
46        .chat()
47        .user("Say 'test' in a creative way")
48        .temperature(0.7)
49        .max_tokens(50);
50    match client.send_chat(builder).await {
51        Ok(response) => {
52            if let Some(content) = response.content() {
53                println!("   ✓ Custom parameters: {content}");
54            }
55        }
56        Err(e) => println!("   ✗ Custom parameters failed: {e}"),
57    }
58
59    // Test 4: Multiple messages conversation
60    println!("\n4. Testing multi-message conversation...");
61    let builder = client
62        .chat()
63        .system("You are a helpful assistant")
64        .user("My name is Alice")
65        .assistant("Hello Alice! Nice to meet you.")
66        .user("What's my name?");
67    match client.send_chat(builder).await {
68        Ok(response) => {
69            if let Some(content) = response.content() {
70                println!("   ✓ Multi-message: {content}");
71            }
72        }
73        Err(e) => println!("   ✗ Multi-message failed: {e}"),
74    }
75
76    // Test 5: Chat with max_tokens limit
77    println!("\n5. Testing with max_tokens limit...");
78    let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79    match client.send_chat(builder).await {
80        Ok(response) => {
81            if let Some(content) = response.content() {
82                println!("   ✓ Limited tokens: {content}");
83                println!("   (Note: response is truncated due to max_tokens=20)");
84            }
85        }
86        Err(e) => println!("   ✗ Max tokens test failed: {e}"),
87    }
88
89    // Test 6: Using responses API
90    println!("\n6. Testing responses API...");
91    let builder = client.responses().user("What is the capital of France?");
92    match client.send_responses(builder).await {
93        Ok(response) => {
94            if let Some(content) = response.content() {
95                println!("   ✓ Responses API: {content}");
96            }
97        }
98        Err(e) => println!("   ✗ Responses API failed: {e}"),
99    }
100
101    println!("\n=== Test Summary ===");
102    println!("Azure OpenAI integration tested across multiple endpoints!");
103    println!("\nNote: Some advanced features like embeddings, streaming, and");
104    println!("tool calling may require specific Azure OpenAI deployments.");
105
106    Ok(())
107}
examples/langfuse.rs (line 81)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32    // Initialize tracing for logging
33    tracing_subscriber::fmt()
34        .with_env_filter(
35            tracing_subscriber::EnvFilter::from_default_env()
36                .add_directive("openai_ergonomic=debug".parse()?),
37        )
38        .init();
39
40    // 1. Build Langfuse exporter from environment variables
41    let exporter = ExporterBuilder::from_env()?.build()?;
42
43    // 2. Create tracer provider with batch processor
44    let provider = SdkTracerProvider::builder()
45        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46        .build();
47
48    // Set as global provider
49    global::set_tracer_provider(provider.clone());
50
51    // 3. Get tracer and create interceptor
52    let tracer = provider.tracer("openai-ergonomic");
53    let langfuse_interceptor =
54        std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56    // 4. Create the OpenAI client and add the Langfuse interceptor
57    // Keep a reference to the interceptor so we can update context later
58    let client = Client::from_env()?
59        .with_interceptor(Box::new(langfuse_interceptor.clone()))
60        .build();
61
62    println!(" OpenAI client initialized with Langfuse observability");
63    println!(" Traces will be sent to Langfuse for monitoring\n");
64
65    // Example 1: Simple chat completion
66    println!("Example 1: Simple chat completion");
67    println!("---------------------------------");
68    let chat_builder = client
69        .chat_simple("What is the capital of France? Answer in one word.")
70        .build()?;
71    let response = client.execute_chat(chat_builder).await?;
72    println!("Response: {:?}\n", response.content());
73
74    // Example 2: Chat completion with builder pattern
75    println!("Example 2: Chat with builder pattern");
76    println!("-------------------------------------");
77    let chat_builder = client
78        .chat()
79        .system("You are a helpful assistant that speaks like a pirate.")
80        .user("Tell me about the ocean in 2 sentences.")
81        .temperature(0.7)
82        .max_tokens(100)
83        .build()?;
84    let response = client.execute_chat(chat_builder).await?;
85    println!("Response: {:?}\n", response.content());
86
87    // Example 3: Multiple messages in a conversation
88    println!("Example 3: Conversation");
89    println!("-----------------------");
90    let chat_builder = client
91        .chat()
92        .system("You are a math tutor.")
93        .user("What is 2 + 2?")
94        .assistant("2 + 2 equals 4.")
95        .user("And what about 3 + 3?")
96        .build()?;
97    let response = client.execute_chat(chat_builder).await?;
98    println!("Response: {:?}\n", response.content());
99
100    // Example 4: Error handling (intentionally trigger an error)
101    println!("Example 4: Error handling");
102    println!("-------------------------");
103    // Create a builder with a non-existent model
104    let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105        .user("This should fail")
106        .build()?;
107    let result = client.execute_chat(chat_builder).await;
108
109    match result {
110        Ok(_) => println!("Unexpected success"),
111        Err(e) => println!("Expected error captured: {e}\n"),
112    }
113
114    // Example 5: Embeddings
115    println!("Example 5: Embeddings");
116    println!("--------------------");
117    let embeddings_builder = client.embeddings().text(
118        "text-embedding-ada-002",
119        "The quick brown fox jumps over the lazy dog",
120    );
121    let embeddings = client.embeddings().create(embeddings_builder).await?;
122    println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124    // Example 6: Using custom metadata via interceptor context
125    println!("Example 6: Custom metadata via interceptor context");
126    println!("---------------------------------------------------");
127
128    // Set session and user IDs on the interceptor's context
129    langfuse_interceptor.set_session_id("demo-session-123");
130    langfuse_interceptor.set_user_id("demo-user-456");
131    langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133    let chat_builder = client
134        .chat_simple("Say 'Hello from custom session!'")
135        .build()?;
136    let response = client.execute_chat(chat_builder).await?;
137    println!("Response with custom metadata: {:?}\n", response.content());
138
139    // Clear context for subsequent calls
140    langfuse_interceptor.clear_context();
141
142    println!(" All examples completed!");
143    println!(" Check your Langfuse dashboard to see the traces");
144    println!("   - Look for traces with operation name 'chat'");
145    println!("   - Each trace includes request/response details, token usage, and timing");
146    println!("   - Example 6 will have custom session_id, user_id, and tags");
147
148    // Shutdown the tracer provider to flush all spans
149    println!("\n⏳ Flushing spans to Langfuse...");
150    provider.shutdown()?;
151
152    Ok(())
153}
Source

pub fn max_tokens(self, max_tokens: i32) -> Self

Set the maximum number of tokens to generate.

Examples found in repository?
examples/chat_streaming.rs (line 75)
68async fn streaming_with_parameters(client: &Client) -> Result<()> {
69    println!("Question: Write a creative tagline for a bakery");
70
71    let builder = client
72        .chat()
73        .user("Write a creative tagline for a bakery")
74        .temperature(0.9)
75        .max_tokens(50);
76
77    let mut stream = client.send_chat_stream(builder).await?;
78
79    print!("Response: ");
80    while let Some(chunk) = stream.next().await {
81        let chunk = chunk?;
82        if let Some(content) = chunk.content() {
83            print!("{}", content);
84        }
85    }
86    println!();
87
88    Ok(())
89}
90
91async fn collect_content(client: &Client) -> Result<()> {
92    println!("Question: What is the capital of France?");
93
94    let builder = client.chat().user("What is the capital of France?");
95
96    let mut stream = client.send_chat_stream(builder).await?;
97
98    // Manually collect all content
99    let mut content = String::new();
100    while let Some(chunk) = stream.next().await {
101        let chunk = chunk?;
102        if let Some(text) = chunk.content() {
103            content.push_str(text);
104        }
105    }
106    println!("Full response: {}", content);
107
108    Ok(())
109}
110
111async fn streaming_with_system(client: &Client) -> Result<()> {
112    println!("System: You are a helpful assistant that speaks like a pirate");
113    println!("Question: Tell me about the weather");
114
115    let builder = client
116        .chat()
117        .system("You are a helpful assistant that speaks like a pirate")
118        .user("Tell me about the weather")
119        .max_tokens(100);
120
121    let mut stream = client.send_chat_stream(builder).await?;
122
123    print!("Response: ");
124    while let Some(chunk) = stream.next().await {
125        let chunk = chunk?;
126        if let Some(content) = chunk.content() {
127            print!("{}", content);
128        }
129    }
130    println!();
131
132    Ok(())
133}
More examples
Hide additional examples
examples/langfuse_streaming.rs (line 125)
118async fn streaming_with_parameters(client: &Client<LangfuseState<Span>>) -> Result<()> {
119    println!("Question: Write a creative tagline for a bakery");
120
121    let builder = client
122        .chat()
123        .user("Write a creative tagline for a bakery")
124        .temperature(0.9)
125        .max_tokens(50);
126
127    let mut stream = client.send_chat_stream(builder).await?;
128
129    print!("Response: ");
130    let mut chunk_count = 0;
131    while let Some(chunk) = stream.next().await {
132        let chunk = chunk?;
133        if let Some(content) = chunk.content() {
134            print!("{}", content);
135            chunk_count += 1;
136        }
137    }
138    println!(
139        "\n(Received {} chunks, all traced to Langfuse)",
140        chunk_count
141    );
142
143    Ok(())
144}
examples/azure_comprehensive.rs (line 49)
9async fn main() -> Result<(), Box<dyn std::error::Error>> {
10    tracing_subscriber::fmt::init();
11
12    println!("=== Azure OpenAI Comprehensive API Test ===\n");
13
14    let client = Client::from_env()?.build();
15
16    // Test 1: Simple chat completion
17    println!("1. Testing simple chat completion...");
18    let builder = client.chat_simple("What is 2+2? Answer in one word.");
19    match client.send_chat(builder).await {
20        Ok(response) => {
21            if let Some(content) = response.content() {
22                println!("   ✓ Chat completion: {content}");
23            }
24        }
25        Err(e) => println!("   ✗ Chat completion failed: {e}"),
26    }
27
28    // Test 2: Chat with system message
29    println!("\n2. Testing chat with system message...");
30    let builder = client.chat_with_system(
31        "You are a helpful assistant that responds in one sentence.",
32        "What is Rust?",
33    );
34    match client.send_chat(builder).await {
35        Ok(response) => {
36            if let Some(content) = response.content() {
37                println!("   ✓ System message chat: {content}");
38            }
39        }
40        Err(e) => println!("   ✗ System message chat failed: {e}"),
41    }
42
43    // Test 3: Chat with temperature
44    println!("\n3. Testing chat with custom parameters...");
45    let builder = client
46        .chat()
47        .user("Say 'test' in a creative way")
48        .temperature(0.7)
49        .max_tokens(50);
50    match client.send_chat(builder).await {
51        Ok(response) => {
52            if let Some(content) = response.content() {
53                println!("   ✓ Custom parameters: {content}");
54            }
55        }
56        Err(e) => println!("   ✗ Custom parameters failed: {e}"),
57    }
58
59    // Test 4: Multiple messages conversation
60    println!("\n4. Testing multi-message conversation...");
61    let builder = client
62        .chat()
63        .system("You are a helpful assistant")
64        .user("My name is Alice")
65        .assistant("Hello Alice! Nice to meet you.")
66        .user("What's my name?");
67    match client.send_chat(builder).await {
68        Ok(response) => {
69            if let Some(content) = response.content() {
70                println!("   ✓ Multi-message: {content}");
71            }
72        }
73        Err(e) => println!("   ✗ Multi-message failed: {e}"),
74    }
75
76    // Test 5: Chat with max_tokens limit
77    println!("\n5. Testing with max_tokens limit...");
78    let builder = client.chat().user("Explain quantum physics").max_tokens(20);
79    match client.send_chat(builder).await {
80        Ok(response) => {
81            if let Some(content) = response.content() {
82                println!("   ✓ Limited tokens: {content}");
83                println!("   (Note: response is truncated due to max_tokens=20)");
84            }
85        }
86        Err(e) => println!("   ✗ Max tokens test failed: {e}"),
87    }
88
89    // Test 6: Using responses API
90    println!("\n6. Testing responses API...");
91    let builder = client.responses().user("What is the capital of France?");
92    match client.send_responses(builder).await {
93        Ok(response) => {
94            if let Some(content) = response.content() {
95                println!("   ✓ Responses API: {content}");
96            }
97        }
98        Err(e) => println!("   ✗ Responses API failed: {e}"),
99    }
100
101    println!("\n=== Test Summary ===");
102    println!("Azure OpenAI integration tested across multiple endpoints!");
103    println!("\nNote: Some advanced features like embeddings, streaming, and");
104    println!("tool calling may require specific Azure OpenAI deployments.");
105
106    Ok(())
107}
examples/langfuse.rs (line 82)
31async fn main() -> Result<(), Box<dyn std::error::Error>> {
32    // Initialize tracing for logging
33    tracing_subscriber::fmt()
34        .with_env_filter(
35            tracing_subscriber::EnvFilter::from_default_env()
36                .add_directive("openai_ergonomic=debug".parse()?),
37        )
38        .init();
39
40    // 1. Build Langfuse exporter from environment variables
41    let exporter = ExporterBuilder::from_env()?.build()?;
42
43    // 2. Create tracer provider with batch processor
44    let provider = SdkTracerProvider::builder()
45        .with_span_processor(BatchSpanProcessor::builder(exporter, Tokio).build())
46        .build();
47
48    // Set as global provider
49    global::set_tracer_provider(provider.clone());
50
51    // 3. Get tracer and create interceptor
52    let tracer = provider.tracer("openai-ergonomic");
53    let langfuse_interceptor =
54        std::sync::Arc::new(LangfuseInterceptor::new(tracer, LangfuseConfig::new()));
55
56    // 4. Create the OpenAI client and add the Langfuse interceptor
57    // Keep a reference to the interceptor so we can update context later
58    let client = Client::from_env()?
59        .with_interceptor(Box::new(langfuse_interceptor.clone()))
60        .build();
61
62    println!(" OpenAI client initialized with Langfuse observability");
63    println!(" Traces will be sent to Langfuse for monitoring\n");
64
65    // Example 1: Simple chat completion
66    println!("Example 1: Simple chat completion");
67    println!("---------------------------------");
68    let chat_builder = client
69        .chat_simple("What is the capital of France? Answer in one word.")
70        .build()?;
71    let response = client.execute_chat(chat_builder).await?;
72    println!("Response: {:?}\n", response.content());
73
74    // Example 2: Chat completion with builder pattern
75    println!("Example 2: Chat with builder pattern");
76    println!("-------------------------------------");
77    let chat_builder = client
78        .chat()
79        .system("You are a helpful assistant that speaks like a pirate.")
80        .user("Tell me about the ocean in 2 sentences.")
81        .temperature(0.7)
82        .max_tokens(100)
83        .build()?;
84    let response = client.execute_chat(chat_builder).await?;
85    println!("Response: {:?}\n", response.content());
86
87    // Example 3: Multiple messages in a conversation
88    println!("Example 3: Conversation");
89    println!("-----------------------");
90    let chat_builder = client
91        .chat()
92        .system("You are a math tutor.")
93        .user("What is 2 + 2?")
94        .assistant("2 + 2 equals 4.")
95        .user("And what about 3 + 3?")
96        .build()?;
97    let response = client.execute_chat(chat_builder).await?;
98    println!("Response: {:?}\n", response.content());
99
100    // Example 4: Error handling (intentionally trigger an error)
101    println!("Example 4: Error handling");
102    println!("-------------------------");
103    // Create a builder with a non-existent model
104    let chat_builder = ChatCompletionBuilder::new("non-existent-model")
105        .user("This should fail")
106        .build()?;
107    let result = client.execute_chat(chat_builder).await;
108
109    match result {
110        Ok(_) => println!("Unexpected success"),
111        Err(e) => println!("Expected error captured: {e}\n"),
112    }
113
114    // Example 5: Embeddings
115    println!("Example 5: Embeddings");
116    println!("--------------------");
117    let embeddings_builder = client.embeddings().text(
118        "text-embedding-ada-002",
119        "The quick brown fox jumps over the lazy dog",
120    );
121    let embeddings = client.embeddings().create(embeddings_builder).await?;
122    println!("Generated {} embedding(s)\n", embeddings.data.len());
123
124    // Example 6: Using custom metadata via interceptor context
125    println!("Example 6: Custom metadata via interceptor context");
126    println!("---------------------------------------------------");
127
128    // Set session and user IDs on the interceptor's context
129    langfuse_interceptor.set_session_id("demo-session-123");
130    langfuse_interceptor.set_user_id("demo-user-456");
131    langfuse_interceptor.add_tags(vec!["example".to_string(), "demo".to_string()]);
132
133    let chat_builder = client
134        .chat_simple("Say 'Hello from custom session!'")
135        .build()?;
136    let response = client.execute_chat(chat_builder).await?;
137    println!("Response with custom metadata: {:?}\n", response.content());
138
139    // Clear context for subsequent calls
140    langfuse_interceptor.clear_context();
141
142    println!(" All examples completed!");
143    println!(" Check your Langfuse dashboard to see the traces");
144    println!("   - Look for traces with operation name 'chat'");
145    println!("   - Each trace includes request/response details, token usage, and timing");
146    println!("   - Example 6 will have custom session_id, user_id, and tags");
147
148    // Shutdown the tracer provider to flush all spans
149    println!("\n⏳ Flushing spans to Langfuse...");
150    provider.shutdown()?;
151
152    Ok(())
153}
Source

pub fn max_completion_tokens(self, max_completion_tokens: i32) -> Self

Set the maximum completion tokens (for newer models).

Examples found in repository?
examples/retry_patterns.rs (line 369)
356async fn fallback_chain(client: &Client) -> Result<()> {
357    // Define fallback chain
358    let strategies = vec![
359        ("GPT-4o", "gpt-4o", 1024),
360        ("GPT-4o-mini", "gpt-4o-mini", 512),
361        ("GPT-3.5", "gpt-3.5-turbo", 256),
362    ];
363
364    let prompt = "Explain quantum computing";
365
366    for (name, _model, max_tokens) in strategies {
367        println!("Trying {} (max_tokens: {})", name, max_tokens);
368
369        let builder = client.chat().user(prompt).max_completion_tokens(max_tokens);
370        match client.send_chat(builder).await {
371            Ok(response) => {
372                println!("Success with {}", name);
373                if let Some(content) = response.content() {
374                    println!("Response: {}...", &content[..content.len().min(100)]);
375                }
376                return Ok(());
377            }
378            Err(e) => {
379                println!("Failed with {}: {}", name, e);
380            }
381        }
382    }
383
384    println!("All fallback strategies exhausted");
385    Ok(())
386}
More examples
Hide additional examples
examples/models.rs (line 187)
159async fn model_selection_by_task(client: &Client) -> Result<()> {
160    // Task-specific model recommendations
161    let task_models = vec![
162        ("Simple Q&A", "gpt-3.5-turbo", "Fast and cost-effective"),
163        ("Complex reasoning", "gpt-4o", "Best reasoning capabilities"),
164        ("Code generation", "gpt-4o", "Excellent code understanding"),
165        ("Vision tasks", "gpt-4o", "Native vision support"),
166        (
167            "Quick responses",
168            "gpt-4o-mini",
169            "Low latency, good quality",
170        ),
171        (
172            "Bulk processing",
173            "gpt-3.5-turbo",
174            "Best cost/performance ratio",
175        ),
176    ];
177
178    for (task, model, reason) in task_models {
179        println!("Task: {}", task);
180        println!("  Recommended: {}", model);
181        println!("  Reason: {}", reason);
182
183        // Demo the model
184        let builder = client
185            .chat()
186            .user(format!("Say 'Hello from {}'", model))
187            .max_completion_tokens(10);
188        let response = client.send_chat(builder).await?;
189
190        if let Some(content) = response.content() {
191            println!("  Response: {}\n", content);
192        }
193    }
194
195    Ok(())
196}
197
198async fn cost_optimization(client: &Client) -> Result<()> {
199    let models = get_model_registry();
200    let test_prompt = "Explain the theory of relativity in one sentence";
201    let estimated_input_tokens = 15;
202    let estimated_output_tokens = 50;
203
204    println!("Cost comparison for same task:");
205    println!("Prompt: '{}'\n", test_prompt);
206
207    let mut costs = Vec::new();
208
209    for (name, info) in &models {
210        if !info.deprecated {
211            let input_cost = (f64::from(estimated_input_tokens) / 1000.0) * info.cost_per_1k_input;
212            let output_cost =
213                (f64::from(estimated_output_tokens) / 1000.0) * info.cost_per_1k_output;
214            let total_cost = input_cost + output_cost;
215
216            costs.push((name.clone(), total_cost));
217        }
218    }
219
220    costs.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
221
222    println!("{:<20} {:>15}", "Model", "Estimated Cost");
223    println!("{:-<35}", "");
224    for (model, cost) in costs {
225        println!("{:<20} ${:>14.6}", model, cost);
226    }
227
228    // Demonstrate cheapest vs best
229    println!("\nRunning with cheapest model (gpt-3.5-turbo):");
230    let builder = client.chat().user(test_prompt);
231    let cheap_response = client.send_chat(builder).await?;
232
233    if let Some(content) = cheap_response.content() {
234        println!("Response: {}", content);
235    }
236
237    Ok(())
238}
239
240async fn performance_testing(client: &Client) -> Result<()> {
241    use std::time::Instant;
242
243    let models_to_test = vec!["gpt-4o-mini", "gpt-3.5-turbo"];
244    let test_prompt = "Write a haiku about programming";
245
246    println!("Performance comparison:");
247    println!("{:<20} {:>10} {:>15}", "Model", "Latency", "Tokens/sec");
248    println!("{:-<45}", "");
249
250    for model in models_to_test {
251        let start = Instant::now();
252
253        let builder = client.chat().user(test_prompt);
254        let response = client.send_chat(builder).await?;
255
256        let elapsed = start.elapsed();
257
258        if let Some(usage) = response.usage() {
259            let total_tokens = f64::from(usage.total_tokens);
260            let tokens_per_sec = total_tokens / elapsed.as_secs_f64();
261
262            println!("{:<20} {:>10.2?} {:>15.1}", model, elapsed, tokens_per_sec);
263        }
264    }
265
266    Ok(())
267}
268
269async fn model_migration(client: &Client) -> Result<()> {
270    // Handle deprecated model migration
271    let deprecated_mappings = HashMap::from([
272        ("text-davinci-003", "gpt-3.5-turbo"),
273        ("gpt-4-32k", "gpt-4o"),
274        ("gpt-4-vision-preview", "gpt-4o"),
275    ]);
276
277    let requested_model = "text-davinci-003"; // Deprecated model
278
279    if let Some(replacement) = deprecated_mappings.get(requested_model) {
280        println!(
281            "Warning: {} is deprecated. Using {} instead.",
282            requested_model, replacement
283        );
284
285        let builder = client.chat().user("Hello from migrated model");
286        let response = client.send_chat(builder).await?;
287
288        if let Some(content) = response.content() {
289            println!("Response from {}: {}", replacement, content);
290        }
291    }
292
293    Ok(())
294}
295
296async fn dynamic_model_selection(client: &Client) -> Result<()> {
297    // Select model based on runtime conditions
298
299    #[derive(Debug)]
300    struct RequestContext {
301        urgency: Urgency,
302        complexity: Complexity,
303        budget: Budget,
304        needs_vision: bool,
305    }
306
307    #[derive(Debug)]
308    enum Urgency {
309        Low,
310        Medium,
311        High,
312    }
313
314    #[derive(Debug)]
315    enum Complexity {
316        Simple,
317        Moderate,
318        Complex,
319    }
320
321    #[derive(Debug)]
322    enum Budget {
323        Tight,
324        Normal,
325        Flexible,
326    }
327
328    const fn select_model(ctx: &RequestContext) -> &'static str {
329        match (&ctx.urgency, &ctx.complexity, &ctx.budget) {
330            // High urgency + simple = fast cheap model, or tight budget = cheapest
331            (Urgency::High, Complexity::Simple, _) | (_, _, Budget::Tight) => "gpt-3.5-turbo",
332
333            // Complex + flexible budget = best model
334            (_, Complexity::Complex, Budget::Flexible) => "gpt-4o",
335
336            // Vision required
337            _ if ctx.needs_vision => "gpt-4o",
338
339            // Default balanced choice
340            _ => "gpt-4o-mini",
341        }
342    }
343
344    // Example contexts
345    let contexts = [
346        RequestContext {
347            urgency: Urgency::High,
348            complexity: Complexity::Simple,
349            budget: Budget::Tight,
350            needs_vision: false,
351        },
352        RequestContext {
353            urgency: Urgency::Low,
354            complexity: Complexity::Complex,
355            budget: Budget::Flexible,
356            needs_vision: false,
357        },
358        RequestContext {
359            urgency: Urgency::Medium,
360            complexity: Complexity::Moderate,
361            budget: Budget::Normal,
362            needs_vision: true,
363        },
364    ];
365
366    for (i, ctx) in contexts.iter().enumerate() {
367        let model = select_model(ctx);
368        println!("Context {}: {:?}", i + 1, ctx);
369        println!("  Selected model: {}", model);
370
371        let builder = client
372            .chat()
373            .user(format!("Hello from dynamically selected {}", model))
374            .max_completion_tokens(20);
375        let response = client.send_chat(builder).await?;
376
377        if let Some(content) = response.content() {
378            println!("  Response: {}\n", content);
379        }
380    }
381
382    Ok(())
383}
examples/vision_chat.rs (line 176)
152async fn demonstrate_detail_levels(client: &Client) -> Result<(), Box<dyn std::error::Error>> {
153    println!(" Example 3: Different Detail Levels");
154    println!("------------------------------------");
155
156    let image_url = SAMPLE_IMAGE_URLS[0];
157    let question = "Analyze this image";
158
159    // Test different detail levels
160    let detail_levels = vec![
161        (Detail::Low, "Low detail (faster, less detailed)"),
162        (Detail::High, "High detail (slower, more detailed)"),
163        (Detail::Auto, "Auto detail (balanced)"),
164    ];
165
166    for (detail, description) in detail_levels {
167        println!("\n{description}:");
168        print!("Assistant: ");
169        io::stdout().flush()?;
170
171        let chat_builder = client
172            .chat()
173            .system("Analyze the image and describe what you see. Adjust your response detail based on the image quality provided.")
174            .user_with_image_url_and_detail(question, image_url, detail)
175            .temperature(0.2)
176            .max_completion_tokens(100); // Limit response length for comparison
177
178        let response = client.send_chat(chat_builder).await?;
179
180        if let Some(content) = response.content() {
181            println!("{content}");
182        }
183    }
184
185    println!();
186    Ok(())
187}
examples/moderations.rs (line 235)
228async fn response_filtering(client: &Client) -> Result<()> {
229    // Filter AI responses before showing to users
230
231    println!("Generating and moderating AI responses:");
232
233    // Generate response
234    let prompt = "Tell me about technology";
235    let builder = client.chat().user(prompt).max_completion_tokens(100);
236    let response = client.send_chat(builder).await?;
237
238    if let Some(content) = response.content() {
239        println!("Generated response: '{}'", content);
240
241        // Moderate the response
242        let moderation_result = simulate_moderation(content);
243
244        if moderation_result.flagged {
245            println!(
246                "  Response flagged! Categories: {:?}",
247                moderation_result.categories
248            );
249            println!("Action: Response blocked or regenerated");
250
251            // Regenerate with more strict instructions
252            let safe_builder = client
253                .chat()
254                .system("Provide helpful, safe, and appropriate responses only.")
255                .user(prompt)
256                .max_completion_tokens(100);
257            let safe_response = client.send_chat(safe_builder).await?;
258
259            if let Some(safe_content) = safe_response.content() {
260                println!("Regenerated safe response: '{}'", safe_content);
261            }
262        } else {
263            println!(" Response passed moderation");
264        }
265    }
266
267    Ok(())
268}
269
270fn policy_enforcement(_client: &Client) {
271    // Enforce content policies
272    let policy = ModerationPolicy {
273        thresholds: HashMap::from([
274            ("harassment".to_string(), 0.5),
275            ("violence".to_string(), 0.6),
276            ("sexual".to_string(), 0.4),
277        ]),
278        auto_reject_categories: vec![
279            "harassment/threatening".to_string(),
280            "violence/graphic".to_string(),
281        ],
282        require_human_review: vec!["self-harm".to_string()],
283    };
284
285    let test_cases = vec![
286        "Normal conversation about work",
287        "Slightly aggressive language here",
288        "Content requiring review",
289    ];
290
291    for content in test_cases {
292        println!("Checking: '{}'", content);
293
294        let result = simulate_moderation(content);
295        let action = apply_policy(&result, &policy);
296
297        match action {
298            PolicyAction::Approve => println!("   Approved"),
299            PolicyAction::Reject(reason) => println!("   Rejected: {}", reason),
300            PolicyAction::Review(reason) => println!("   Human review needed: {}", reason),
301        }
302    }
303}
304
305async fn moderation_pipeline(client: &Client) -> Result<()> {
306    // Complete moderation pipeline
307
308    type FilterFn = Box<dyn Fn(&str) -> bool + Send + Sync>;
309
310    struct ModerationPipeline {
311        pre_filters: Vec<FilterFn>,
312        post_filters: Vec<FilterFn>,
313    }
314
315    let pipeline = ModerationPipeline {
316        pre_filters: vec![
317            Box::new(|text| text.len() < 10000), // Length check
318            Box::new(|text| !text.is_empty()),   // Non-empty check
319        ],
320        post_filters: vec![
321            Box::new(|text| !text.contains("blockedword")), // Custom word filter
322        ],
323    };
324
325    println!("Running moderation pipeline:");
326
327    let user_input = "Please help me with this technical question about Rust programming.";
328
329    // Step 1: Pre-filters
330    println!("1. Pre-filters:");
331    for (i, filter) in pipeline.pre_filters.iter().enumerate() {
332        if filter(user_input) {
333            println!("   Pre-filter {} passed", i + 1);
334        } else {
335            println!("   Pre-filter {} failed", i + 1);
336            return Ok(());
337        }
338    }
339
340    // Step 2: API moderation
341    println!("2. API moderation:");
342    let moderation_result = simulate_moderation(user_input);
343    if moderation_result.flagged {
344        println!("   Content flagged by API");
345        return Ok(());
346    }
347    println!("   Passed API moderation");
348
349    // Step 3: Generate response
350    println!("3. Generating response:");
351    let builder = client.chat().user(user_input).max_completion_tokens(50);
352    let response = client.send_chat(builder).await?;
353
354    if let Some(content) = response.content() {
355        println!("  Generated: '{}'", content);
356
357        // Step 4: Post-filters
358        println!("4. Post-filters:");
359        for (i, filter) in pipeline.post_filters.iter().enumerate() {
360            if filter(content) {
361                println!("   Post-filter {} passed", i + 1);
362            } else {
363                println!("   Post-filter {} failed", i + 1);
364                return Ok(());
365            }
366        }
367
368        // Step 5: Response moderation
369        println!("5. Response moderation:");
370        let response_moderation = simulate_moderation(content);
371        if response_moderation.flagged {
372            println!("   Response flagged");
373        } else {
374            println!("   Response approved");
375            println!("\nFinal output: '{}'", content);
376        }
377    }
378
379    Ok(())
380}
examples/chat_comprehensive.rs (line 338)
322async fn demonstrate_token_tracking(
323    client: &Client,
324    conversation: &mut ConversationManager,
325) -> Result<(), Box<dyn std::error::Error>> {
326    println!(" Example 4: Token Usage Tracking");
327    println!("---------------------------------");
328
329    let efficiency_question = "In one sentence, what is machine learning?";
330    conversation.add_user_message(efficiency_question.to_string());
331
332    println!("User: {efficiency_question}");
333    print!("Assistant: ");
334    io::stdout().flush()?;
335
336    // Build chat request
337    let messages = conversation.get_conversation_for_api();
338    let mut chat_builder = client.chat().max_completion_tokens(50); // Limit tokens for demo
339
340    for (role, content) in messages {
341        match role.as_str() {
342            "system" => chat_builder = chat_builder.system(content),
343            "user" => chat_builder = chat_builder.user(content),
344            "assistant" => chat_builder = chat_builder.assistant(content),
345            _ => {}
346        }
347    }
348
349    let response = client.send_chat(chat_builder).await?;
350
351    if let Some(content) = response.content() {
352        println!("{content}");
353
354        // Display detailed token usage
355        if let Some(usage) = response.usage() {
356            println!("\n Token Usage Breakdown:");
357            println!("  Prompt tokens: {}", usage.prompt_tokens);
358            println!("  Completion tokens: {}", usage.completion_tokens);
359            println!("  Total tokens: {}", usage.total_tokens);
360
361            conversation.update_token_usage(usage.prompt_tokens, usage.completion_tokens);
362
363            conversation.add_assistant_message(content.to_string(), Some(usage.completion_tokens));
364        } else {
365            conversation.add_assistant_message(content.to_string(), None);
366        }
367    }
368
369    println!();
370    Ok(())
371}
Source

pub fn stream(self, stream: bool) -> Self

Enable streaming for the completion.

Source

pub fn tools(self, tools: Vec<ChatCompletionTool>) -> Self

Add tools that the model can use.

Examples found in repository?
examples/tool_calling.rs (line 132)
128async fn simple_tool_call(client: &Client) -> Result<()> {
129    let builder = client
130        .chat()
131        .user("What's the weather like in San Francisco?")
132        .tools(vec![get_weather_tool()]);
133    let response = client.send_chat(builder).await?;
134
135    // Check for tool calls
136    let tool_calls = response.tool_calls();
137    if !tool_calls.is_empty() {
138        for tool_call in tool_calls {
139            println!("Tool called: {}", tool_call.function_name());
140            println!("Arguments: {}", tool_call.function_arguments());
141
142            // Execute the function
143            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
144            let result = execute_weather_function(params)?;
145            println!("Function result: {}", result);
146        }
147    }
148
149    Ok(())
150}
151
152async fn multiple_tools(client: &Client) -> Result<()> {
153    let builder = client
154        .chat()
155        .user("What's the weather in NYC and what time is it there?")
156        .tools(vec![get_weather_tool(), get_time_tool()]);
157    let response = client.send_chat(builder).await?;
158
159    for tool_call in response.tool_calls() {
160        match tool_call.function_name() {
161            "get_weather" => {
162                let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
163                let result = execute_weather_function(params)?;
164                println!("Weather result: {}", result);
165            }
166            "get_current_time" => {
167                let params: serde_json::Value =
168                    serde_json::from_str(tool_call.function_arguments())?;
169                if let Some(timezone) = params["timezone"].as_str() {
170                    let result = execute_time_function(timezone);
171                    println!("Time result: {}", result);
172                }
173            }
174            _ => println!("Unknown tool: {}", tool_call.function_name()),
175        }
176    }
177
178    Ok(())
179}
180
181async fn tool_choice_control(client: &Client) -> Result<()> {
182    // Force specific tool
183    println!("Forcing weather tool:");
184    let builder = client
185        .chat()
186        .user("Tell me about Paris")
187        .tools(vec![get_weather_tool(), get_time_tool()])
188        .tool_choice(ToolChoiceHelper::specific("get_weather"));
189    let response = client.send_chat(builder).await?;
190
191    for tool_call in response.tool_calls() {
192        println!("Forced tool: {}", tool_call.function_name());
193    }
194
195    // Disable tools
196    println!("\nDisabling tools:");
197    let builder = client
198        .chat()
199        .user("What's the weather?")
200        .tools(vec![get_weather_tool()])
201        .tool_choice(ToolChoiceHelper::none());
202    let response = client.send_chat(builder).await?;
203
204    if let Some(content) = response.content() {
205        println!("Response without tools: {}", content);
206    }
207
208    Ok(())
209}
210
211async fn conversation_with_tools(client: &Client) -> Result<()> {
212    // This example demonstrates proper multi-turn tool calling with full message history
213
214    println!("=== Conversation with Tools (Full Implementation) ===");
215
216    // Initialize the conversation
217    let mut builder = client
218        .chat()
219        .user("What's the weather in Tokyo?")
220        .tools(vec![get_weather_tool()]);
221
222    // First request - the model will call the tool
223    let response = client.send_chat(builder.clone()).await?;
224
225    // Check for tool calls
226    let tool_calls = response.tool_calls();
227    if !tool_calls.is_empty() {
228        println!("Step 1: Model requests tool call");
229        for tool_call in &tool_calls {
230            println!("  Tool: {}", tool_call.function_name());
231            println!("  Args: {}", tool_call.function_arguments());
232        }
233
234        // IMPORTANT: Add the assistant's response (with tool calls) to the history
235        // This is the key step for maintaining proper conversation context!
236        builder = builder.assistant_with_tool_calls(
237            response.content().unwrap_or(""),
238            tool_calls.iter().map(|tc| (*tc).clone()).collect(),
239        );
240
241        // Execute the tools and add results
242        println!("\nStep 2: Execute tools and add results to conversation");
243        for tool_call in tool_calls {
244            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
245            let result = execute_weather_function(params)?;
246            println!("  Tool result: {}", result);
247
248            // Add the tool result to the conversation history
249            builder = builder.tool(tool_call.id(), result);
250        }
251
252        // Send the follow-up request with tool results
253        println!("\nStep 3: Send follow-up request with tool results");
254        let final_response = client
255            .send_chat(builder.tools(vec![get_weather_tool()]))
256            .await?;
257
258        if let Some(content) = final_response.content() {
259            println!("  Final assistant response: {}", content);
260        }
261    }
262
263    println!("\nNote: This demonstrates the complete tool calling loop with proper");
264    println!("message history management using assistant_with_tool_calls()");
265
266    Ok(())
267}
268
269fn streaming_with_tools(_client: &Client) {
270    println!("Streaming response with tools:");
271
272    // Note: Streaming with tool calls is more complex and requires
273    // proper handling of partial tool call chunks. For now, this is
274    // a placeholder showing the concept.
275
276    println!("This would demonstrate streaming tool calls if streaming API was available");
277    println!("In streaming mode, tool calls would arrive as chunks that need to be assembled");
278}
279
280async fn parallel_tool_calls(client: &Client) -> Result<()> {
281    let builder = client
282        .chat()
283        .user("Check the weather in Tokyo, London, and New York")
284        .tools(vec![get_weather_tool()]);
285    let response = client.send_chat(builder).await?;
286
287    // Modern models can call multiple tools in parallel
288    let tool_calls = response.tool_calls();
289    println!("Parallel tool calls: {}", tool_calls.len());
290
291    // Collect arguments first to avoid lifetime issues
292    let args_vec: Vec<String> = tool_calls
293        .iter()
294        .map(|tc| tc.function_arguments().to_string())
295        .collect();
296
297    // Execute all in parallel using tokio
298    let mut handles = Vec::new();
299    for args in args_vec {
300        let handle = tokio::spawn(async move {
301            let params: WeatherParams = serde_json::from_str(&args)?;
302            execute_weather_function(params)
303        });
304        handles.push(handle);
305    }
306
307    // Wait for all results
308    for (i, handle) in handles.into_iter().enumerate() {
309        match handle.await {
310            Ok(Ok(result)) => println!("Location {}: {}", i + 1, result),
311            Ok(Err(e)) => println!("Location {} error: {}", i + 1, e),
312            Err(e) => println!("Task {} panicked: {}", i + 1, e),
313        }
314    }
315
316    Ok(())
317}
More examples
Hide additional examples
examples/tool_calling_simple.rs (line 51)
42async fn main() -> Result<()> {
43    println!("=== Tool Calling Example ===");
44
45    let client = Client::from_env()?.build();
46
47    // Simple tool call
48    let builder = client
49        .chat()
50        .user("What's the weather like in San Francisco?")
51        .tools(vec![get_weather_tool()]);
52    let response = client.send_chat(builder).await?;
53
54    // Check for tool calls
55    let tool_calls = response.tool_calls();
56    if !tool_calls.is_empty() {
57        for tool_call in tool_calls {
58            println!("Tool called: {}", tool_call.function_name());
59            println!("Arguments: {}", tool_call.function_arguments());
60
61            // Execute the function
62            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
63            let result = execute_weather_function(&params);
64            println!("Function result: {}", result);
65        }
66    } else if let Some(content) = response.content() {
67        println!("Response: {}", content);
68    }
69
70    // Forced tool choice
71    println!("\n=== Forced Tool Choice ===");
72    let builder = client
73        .chat()
74        .user("Tell me about Paris")
75        .tools(vec![get_weather_tool()])
76        .tool_choice(ToolChoiceHelper::specific("get_weather"));
77    let response = client.send_chat(builder).await?;
78
79    for tool_call in response.tool_calls() {
80        println!("Forced tool: {}", tool_call.function_name());
81    }
82
83    // No tools
84    println!("\n=== No Tools Mode ===");
85    let builder = client
86        .chat()
87        .user("What's the weather?")
88        .tools(vec![get_weather_tool()])
89        .tool_choice(ToolChoiceHelper::none());
90    let response = client.send_chat(builder).await?;
91
92    if let Some(content) = response.content() {
93        println!("Response without tools: {}", content);
94    }
95
96    Ok(())
97}
examples/tool_calling_multiturn.rs (line 178)
160async fn handle_tool_loop(
161    client: &Client,
162    mut chat_builder: openai_ergonomic::builders::chat::ChatCompletionBuilder,
163    tools: &[openai_client_base::models::ChatCompletionTool],
164    storage: &Arc<Mutex<HashMap<String, String>>>,
165) -> Result<String> {
166    const MAX_ITERATIONS: usize = 10; // Prevent infinite loops
167    let mut iteration = 0;
168
169    loop {
170        iteration += 1;
171        if iteration > MAX_ITERATIONS {
172            return Err(std::io::Error::other("Max iterations reached in tool loop").into());
173        }
174
175        println!("\n  [Iteration {}]", iteration);
176
177        // Send request with tools
178        let request = chat_builder.clone().tools(tools.to_vec());
179        let response = client.send_chat(request).await?;
180
181        // Check if there are tool calls
182        let tool_calls = response.tool_calls();
183        if tool_calls.is_empty() {
184            // No more tool calls, return the final response
185            if let Some(content) = response.content() {
186                return Ok(content.to_string());
187            }
188            return Err(std::io::Error::other("No content in final response").into());
189        }
190
191        // Process tool calls
192        println!("  Tool calls: {}", tool_calls.len());
193
194        // IMPORTANT: Add assistant message with tool calls to history
195        // This is the key step that maintains proper conversation context!
196        chat_builder = chat_builder.assistant_with_tool_calls(
197            response.content().unwrap_or(""),
198            tool_calls.iter().map(|tc| (*tc).clone()).collect(),
199        );
200
201        // Execute each tool call and add results to history
202        for tool_call in tool_calls {
203            let tool_name = tool_call.function_name();
204            let tool_args = tool_call.function_arguments();
205            let tool_id = tool_call.id();
206
207            println!("    → {}: {}", tool_name, tool_args);
208
209            let result = match execute_tool(tool_name, tool_args, storage) {
210                Ok(result) => {
211                    println!("    ✓ Result: {}", result);
212                    result
213                }
214                Err(e) => {
215                    let error_msg = format!("Error: {}", e);
216                    eprintln!("    ✗ {}", error_msg);
217                    error_msg
218                }
219            };
220
221            // Add tool result to the conversation
222            chat_builder = chat_builder.tool(tool_id, result);
223        }
224    }
225}
Source

pub fn tool_choice(self, tool_choice: ChatCompletionToolChoiceOption) -> Self

Set the tool choice option.

Examples found in repository?
examples/tool_calling.rs (line 188)
181async fn tool_choice_control(client: &Client) -> Result<()> {
182    // Force specific tool
183    println!("Forcing weather tool:");
184    let builder = client
185        .chat()
186        .user("Tell me about Paris")
187        .tools(vec![get_weather_tool(), get_time_tool()])
188        .tool_choice(ToolChoiceHelper::specific("get_weather"));
189    let response = client.send_chat(builder).await?;
190
191    for tool_call in response.tool_calls() {
192        println!("Forced tool: {}", tool_call.function_name());
193    }
194
195    // Disable tools
196    println!("\nDisabling tools:");
197    let builder = client
198        .chat()
199        .user("What's the weather?")
200        .tools(vec![get_weather_tool()])
201        .tool_choice(ToolChoiceHelper::none());
202    let response = client.send_chat(builder).await?;
203
204    if let Some(content) = response.content() {
205        println!("Response without tools: {}", content);
206    }
207
208    Ok(())
209}
More examples
Hide additional examples
examples/tool_calling_simple.rs (line 76)
42async fn main() -> Result<()> {
43    println!("=== Tool Calling Example ===");
44
45    let client = Client::from_env()?.build();
46
47    // Simple tool call
48    let builder = client
49        .chat()
50        .user("What's the weather like in San Francisco?")
51        .tools(vec![get_weather_tool()]);
52    let response = client.send_chat(builder).await?;
53
54    // Check for tool calls
55    let tool_calls = response.tool_calls();
56    if !tool_calls.is_empty() {
57        for tool_call in tool_calls {
58            println!("Tool called: {}", tool_call.function_name());
59            println!("Arguments: {}", tool_call.function_arguments());
60
61            // Execute the function
62            let params: WeatherParams = serde_json::from_str(tool_call.function_arguments())?;
63            let result = execute_weather_function(&params);
64            println!("Function result: {}", result);
65        }
66    } else if let Some(content) = response.content() {
67        println!("Response: {}", content);
68    }
69
70    // Forced tool choice
71    println!("\n=== Forced Tool Choice ===");
72    let builder = client
73        .chat()
74        .user("Tell me about Paris")
75        .tools(vec![get_weather_tool()])
76        .tool_choice(ToolChoiceHelper::specific("get_weather"));
77    let response = client.send_chat(builder).await?;
78
79    for tool_call in response.tool_calls() {
80        println!("Forced tool: {}", tool_call.function_name());
81    }
82
83    // No tools
84    println!("\n=== No Tools Mode ===");
85    let builder = client
86        .chat()
87        .user("What's the weather?")
88        .tools(vec![get_weather_tool()])
89        .tool_choice(ToolChoiceHelper::none());
90    let response = client.send_chat(builder).await?;
91
92    if let Some(content) = response.content() {
93        println!("Response without tools: {}", content);
94    }
95
96    Ok(())
97}
Source

pub fn response_format( self, format: CreateChatCompletionRequestAllOfResponseFormat, ) -> Self

Set the response format.

Source

pub fn n(self, n: i32) -> Self

Set the number of completions to generate.

Source

pub fn stop(self, stop: Vec<String>) -> Self

Set stop sequences.

Source

pub fn presence_penalty(self, presence_penalty: f64) -> Self

Set the presence penalty.

Source

pub fn frequency_penalty(self, frequency_penalty: f64) -> Self

Set the frequency penalty.

Source

pub fn top_p(self, top_p: f64) -> Self

Set the top-p value.

Source

pub fn user_id(self, user: impl Into<String>) -> Self

Set the user identifier.

Source

pub fn seed(self, seed: i32) -> Self

Set the random seed for deterministic outputs.

Trait Implementations§

Source§

impl Builder<CreateChatCompletionRequest> for ChatCompletionBuilder

Source§

fn build(self) -> Result<CreateChatCompletionRequest>

Build the final request type.
Source§

impl Clone for ChatCompletionBuilder

Source§

fn clone(&self) -> ChatCompletionBuilder

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for ChatCompletionBuilder

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> FutureExt for T

Source§

fn with_context(self, otel_cx: Context) -> WithContext<Self>

Attaches the provided Context to this type, returning a WithContext wrapper. Read more
Source§

fn with_current_context(self) -> WithContext<Self>

Attaches the current Context to this type, returning a WithContext wrapper. Read more
Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> ErasedDestructor for T
where T: 'static,