completions/
completions.rs

1#![allow(clippy::uninlined_format_args)]
2#![allow(clippy::doc_markdown)]
3#![allow(clippy::useless_vec)]
4//! Completions API example.
5//!
6//! This example demonstrates:
7//! - Creating text completions
8//! - Using different parameters
9//! - Handling completion responses
10//! - Best practices for the legacy Completions API
11//!
12//! Note: The Completions API is legacy. For new applications, use the Chat API instead.
13//!
14//! Run with: `cargo run --example completions`
15
16use openai_ergonomic::{Client, Result};
17
18#[tokio::main]
19async fn main() -> Result<()> {
20    println!("=== Completions API Examples ===\n");
21
22    // Initialize client
23    let client = Client::from_env()?.build();
24
25    // Example 1: Basic completion
26    println!("1. Basic Text Completion:");
27    basic_completion(&client).await?;
28
29    // Example 2: Completion with parameters
30    println!("\n2. Completion with Parameters:");
31    completion_with_parameters(&client).await?;
32
33    // Example 3: Multiple completions
34    println!("\n3. Multiple Completions:");
35    multiple_completions(&client).await?;
36
37    // Example 4: Completion with stop sequences
38    println!("\n4. Completion with Stop Sequences:");
39    completion_with_stop(&client).await?;
40
41    // Example 5: Completion with suffix (insert mode)
42    println!("\n5. Completion with Suffix (Insert Mode):");
43    completion_with_suffix(&client).await?;
44
45    // Example 6: Completion with echo
46    println!("\n6. Completion with Echo:");
47    completion_with_echo(&client).await?;
48
49    println!("\n=== All examples completed successfully ===");
50
51    Ok(())
52}
53
54async fn basic_completion(client: &Client) -> Result<()> {
55    let builder = client
56        .completions()
57        .builder("gpt-3.5-turbo-instruct")
58        .prompt("Write a tagline for an ice cream shop")
59        .max_tokens(60);
60
61    let response = client.completions().create(builder).await?;
62
63    println!("Prompt: Write a tagline for an ice cream shop");
64    if let Some(choice) = response.choices.first() {
65        println!("Completion: {}", choice.text);
66        println!("Finish reason: {:?}", choice.finish_reason);
67    }
68
69    if let Some(usage) = response.usage {
70        println!(
71            "Tokens used: {} prompt + {} completion = {} total",
72            usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
73        );
74    }
75
76    Ok(())
77}
78
79async fn completion_with_parameters(client: &Client) -> Result<()> {
80    let builder = client
81        .completions()
82        .builder("gpt-3.5-turbo-instruct")
83        .prompt("Explain quantum computing in simple terms:")
84        .max_tokens(100)
85        .temperature(0.7)
86        .top_p(0.9)
87        .frequency_penalty(0.5)
88        .presence_penalty(0.0);
89
90    let response = client.completions().create(builder).await?;
91
92    println!("Parameters:");
93    println!("  Temperature: 0.7");
94    println!("  Top P: 0.9");
95    println!("  Frequency Penalty: 0.5");
96    println!("  Presence Penalty: 0.0");
97    println!();
98
99    if let Some(choice) = response.choices.first() {
100        println!("Completion: {}", choice.text);
101    }
102
103    Ok(())
104}
105
106async fn multiple_completions(client: &Client) -> Result<()> {
107    let builder = client
108        .completions()
109        .builder("gpt-3.5-turbo-instruct")
110        .prompt("Brainstorm three names for a pet cat:")
111        .max_tokens(50)
112        .n(3) // Generate 3 different completions
113        .temperature(0.9); // Higher temperature for more variety
114
115    let response = client.completions().create(builder).await?;
116
117    println!("Generating {} completions:", response.choices.len());
118    for (i, choice) in response.choices.iter().enumerate() {
119        println!("  {}. {}", i + 1, choice.text.trim());
120    }
121
122    Ok(())
123}
124
125async fn completion_with_stop(client: &Client) -> Result<()> {
126    let builder = client
127        .completions()
128        .builder("gpt-3.5-turbo-instruct")
129        .prompt("List three programming languages:\n1.")
130        .max_tokens(100)
131        .temperature(0.0)
132        .add_stop("\n4.") // Stop at the fourth item
133        .add_stop("\n\n"); // Also stop at double newline
134
135    let response = client.completions().create(builder).await?;
136
137    println!("Prompt: List three programming languages:");
138    if let Some(choice) = response.choices.first() {
139        println!("Completion:\n1.{}", choice.text);
140        println!("Stopped because: {:?}", choice.finish_reason);
141    }
142
143    Ok(())
144}
145
146async fn completion_with_suffix(client: &Client) -> Result<()> {
147    // Insert mode: provide text before and after the insertion point
148    let builder = client
149        .completions()
150        .builder("gpt-3.5-turbo-instruct")
151        .prompt("def hello_world():\n    print(\"Hello, ")
152        .suffix("\")\n    return True")
153        .max_tokens(10)
154        .temperature(0.0);
155
156    let response = client.completions().create(builder).await?;
157
158    println!("Insert mode example:");
159    println!("Before: def hello_world():\\n    print(\"Hello, ");
160    if let Some(choice) = response.choices.first() {
161        println!("Inserted: {}", choice.text);
162    }
163    println!("After: \")\\n    return True");
164
165    Ok(())
166}
167
168async fn completion_with_echo(client: &Client) -> Result<()> {
169    let builder = client
170        .completions()
171        .builder("gpt-3.5-turbo-instruct")
172        .prompt("The capital of France is")
173        .max_tokens(10)
174        .echo(true) // Echo back the prompt
175        .temperature(0.0);
176
177    let response = client.completions().create(builder).await?;
178
179    println!("Echo enabled:");
180    if let Some(choice) = response.choices.first() {
181        println!("Full text (prompt + completion): {}", choice.text);
182    }
183
184    Ok(())
185}