
OpenAI Tools
API Wrapper for OpenAI API.
Installation
To start using the openai-tools, add it to your projects's dependencies in the `Cargo.toml' file:
cargo add openai-tools
API key is necessary to access OpenAI API.
Set it in the .env file:
OPENAI_API_KEY = "xxxxxxxxxxxxxxxxxxxxxxxxxxx"
Then, import the necesarry modules in your code:
use openai_tools::chat::ChatCompletion;
use openai_tools::responses::Responses;
use openai_tools::embedding::Embedding;
use openai_tools::realtime::RealtimeClient;
use openai_tools::conversations::Conversations;
use openai_tools::models::Models;
use openai_tools::files::Files;
use openai_tools::moderations::Moderations;
use openai_tools::images::Images;
use openai_tools::audio::Audio;
use openai_tools::batch::Batches;
use openai_tools::fine_tuning::FineTuning;
Features
| Feature |
Chat |
Responses |
Conversations |
Embedding |
Realtime |
Models |
Files |
Moderations |
Images |
Audio |
Batch |
Fine-tuning |
| Basic |
✅ |
✅ |
✅ |
✅ |
✅ |
✅ |
✅ |
✅ |
✅ |
✅ |
✅ |
✅ |
| Structured Output |
✅ |
✅ |
- |
- |
- |
- |
- |
- |
- |
- |
- |
- |
| Function Calling |
✅ |
✅ |
- |
- |
✅ |
- |
- |
- |
- |
- |
- |
- |
| Image Input |
✅ |
✅ |
- |
- |
- |
- |
- |
- |
- |
- |
- |
- |
| Audio Input/Output |
- |
- |
- |
- |
✅ |
- |
- |
- |
- |
✅ |
- |
- |
| VAD |
- |
- |
- |
- |
✅ |
- |
- |
- |
- |
- |
- |
- |
| WebSocket |
- |
- |
- |
- |
✅ |
- |
- |
- |
- |
- |
- |
- |
| Multipart Upload |
- |
- |
- |
- |
- |
- |
✅ |
- |
✅ |
✅ |
- |
- |
Chat Completions API
use openai_tools::chat::request::ChatCompletion;
use openai_tools::common::message::Message;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let messages = vec![Message::from_string("user", "Hello!")];
let mut chat = ChatCompletion::new();
let response = chat
.model_id("gpt-4o-mini")
.messages(messages)
.temperature(0.7)
.chat()
.await?;
println!("{}", response.choices[0].message.content);
Ok(())
}
Responses API
use openai_tools::responses::request::Responses;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut client = Responses::new();
let response = client
.model_id("gpt-4o")
.str_message("What is the capital of France?")
.complete()
.await?;
println!("{}", response.output_text());
Ok(())
}
Conversations API
Manage long-running conversations with the Responses API:
use openai_tools::conversations::request::Conversations;
use openai_tools::conversations::response::InputItem;
use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let conversations = Conversations::new()?;
let mut metadata = HashMap::new();
metadata.insert("user_id".to_string(), "user123".to_string());
let conv = conversations.create(Some(metadata), None).await?;
println!("Created conversation: {}", conv.id);
let items = vec![InputItem::user_message("Hello!")];
conversations.create_items(&conv.id, items).await?;
let items = conversations.list_items(&conv.id, Some(10), None, None, None).await?;
for item in &items.data {
println!("Item: {} ({})", item.id, item.item_type);
}
conversations.delete(&conv.id).await?;
Ok(())
}
Realtime API
Real-time audio and text communication with GPT-4o models through WebSocket:
use openai_tools::realtime::{RealtimeClient, Modality, Voice};
use openai_tools::realtime::events::server::ServerEvent;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut client = RealtimeClient::new();
client
.model("gpt-4o-realtime-preview")
.modalities(vec![Modality::Text, Modality::Audio])
.voice(Voice::Alloy)
.instructions("You are a helpful assistant.");
let mut session = client.connect().await?;
session.send_text("Hello!").await?;
session.create_response(None).await?;
while let Some(event) = session.recv().await? {
match event {
ServerEvent::ResponseTextDelta(e) => print!("{}", e.delta),
ServerEvent::ResponseDone(_) => break,
_ => {}
}
}
session.close().await?;
Ok(())
}
Models API
List and retrieve available models:
use openai_tools::models::request::Models;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let models = Models::new()?;
let response = models.list().await?;
for model in &response.data {
println!("{}: owned by {}", model.id, model.owned_by);
}
let model = models.retrieve("gpt-4o-mini").await?;
println!("Model: {}", model.id);
Ok(())
}
Files API
Upload, manage, and retrieve files:
use openai_tools::files::request::Files;
use openai_tools::files::response::FilePurpose;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let files = Files::new()?;
let file = files.upload_path("training.jsonl", FilePurpose::FineTune).await?;
println!("Uploaded: {}", file.id);
let response = files.list(None).await?;
for file in &response.data {
println!("{}: {} bytes", file.filename, file.bytes);
}
files.delete(&file.id).await?;
Ok(())
}
Moderations API
Check content for policy violations:
use openai_tools::moderations::request::Moderations;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let moderations = Moderations::new()?;
let response = moderations.moderate_text("Hello, world!", None).await?;
if response.results[0].flagged {
println!("Content was flagged!");
} else {
println!("Content is safe.");
}
let texts = vec!["Text 1".to_string(), "Text 2".to_string()];
let response = moderations.moderate_texts(texts, None).await?;
Ok(())
}
Images API (DALL-E)
Generate images with DALL-E:
use openai_tools::images::request::{Images, GenerateOptions, ImageModel, ImageSize, ImageQuality};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let images = Images::new()?;
let options = GenerateOptions {
model: Some(ImageModel::DallE3),
size: Some(ImageSize::Size1024x1024),
quality: Some(ImageQuality::Hd),
..Default::default()
};
let response = images.generate("A sunset over mountains", options).await?;
println!("Image URL: {:?}", response.data[0].url);
Ok(())
}
Audio API
Text-to-speech and transcription:
use openai_tools::audio::request::{Audio, TtsOptions, TranscribeOptions};
use openai_tools::audio::response::{TtsModel, Voice};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let audio = Audio::new()?;
let options = TtsOptions {
model: TtsModel::Tts1Hd,
voice: Voice::Nova,
..Default::default()
};
let bytes = audio.text_to_speech("Hello!", options).await?;
std::fs::write("hello.mp3", bytes)?;
let options = TranscribeOptions {
language: Some("en".to_string()),
..Default::default()
};
let response = audio.transcribe("audio.mp3", options).await?;
println!("Transcript: {}", response.text);
Ok(())
}
Batch API
Process large volumes of requests asynchronously with 50% cost savings:
use openai_tools::batch::request::{Batches, CreateBatchRequest, BatchEndpoint};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let batches = Batches::new()?;
let response = batches.list(Some(20), None).await?;
for batch in &response.data {
println!("Batch: {} - {:?}", batch.id, batch.status);
}
let request = CreateBatchRequest::new("file-abc123", BatchEndpoint::ChatCompletions);
let batch = batches.create(request).await?;
println!("Created batch: {}", batch.id);
Ok(())
}
Fine-tuning API
Customize models with your training data:
use openai_tools::fine_tuning::request::{FineTuning, CreateFineTuningJobRequest};
use openai_tools::fine_tuning::response::Hyperparameters;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let fine_tuning = FineTuning::new()?;
let response = fine_tuning.list(Some(10), None).await?;
for job in &response.data {
println!("Job: {} - {:?}", job.id, job.status);
}
let hyperparams = Hyperparameters {
n_epochs: Some(3),
..Default::default()
};
let request = CreateFineTuningJobRequest::new("gpt-4o-mini-2024-07-18", "file-abc123")
.with_suffix("my-model")
.with_supervised_method(Some(hyperparams));
let job = fine_tuning.create(request).await?;
println!("Created job: {}", job.id);
Ok(())
}
Embedding API
use openai_tools::embedding::request::Embedding;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut embedding = Embedding::new();
let response = embedding
.model("text-embedding-3-small")
.input_text("Hello, world!")
.embed()
.await?;
println!("Embedding dimensions: {}", response.data[0].embedding.as_1d().unwrap().len());
Ok(())
}
License
MIT License