openai4rs-0.1.4 has been yanked.
openai4rs

简体中文 | English
一个基于 tokio 和 reqwest 的异步 Rust crate,用于与遵循 OpenAI 规范的大模型供应商进行交互。
✨ 特性
🗨️ Chat 聊天
📝 Completions 文本补全
🤖 Models 模型管理
🔄 HTTP 请求控制
- ✅ 可配置的重试次数
- ✅ 可配置的请求超时
- ✅ 可配置的连接超时
- ✅ HTTP 代理支持
- ✅ 自定义 User-Agent
🚀 快速开始
安装
添加依赖到你的 Cargo.toml:
[dependencies]
openai4rs = "0.1.4"
tokio = { version = "1.45.1", features = ["full"] }
futures = "0.3.31"
或使用 cargo 命令:
cargo add openai4rs
基础使用
use openai4rs::{OpenAI, chat_request, user};
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "your_base_url");
let messages = vec![user!("Hello, world!")];
let response = client
.chat()
.create(chat_request("gpt-3.5-turbo", &messages))
.await
.unwrap();
println!("{:#?}", response);
}
📚 详细使用指南
🗨️ Chat 聊天
非流式聊天
最简单的聊天方式,一次性获取完整响应:
use openai4rs::{OpenAI, chat_request, user};
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "your_base_url");
let messages = vec![user!("你好,请介绍一下你自己")];
let chat_completion = client
.chat()
.create(chat_request("your_model_name", &messages))
.await
.unwrap();
println!("{:#?}", chat_completion);
}
流式聊天
实时接收响应内容,适合需要逐步显示的场景:
use futures::StreamExt;
use openai4rs::{OpenAI, chat_request, user};
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "your_base_url");
let messages = vec![user!("请写一个关于人工智能的故事")];
let mut stream = client
.chat()
.create_stream(chat_request("your_model_name", &messages))
.await
.unwrap();
while let Some(result) = stream.next().await {
let chunk = result.unwrap();
for choice in chunk.choices.iter() {
if let Some(content) = &choice.delta.content {
print!("{}", content);
}
}
}
}
🔧 工具调用
让模型能够调用外部工具来增强功能:
use futures::StreamExt;
use openai4rs::{ChatCompletionToolParam, OpenAI, chat_request, user, ToolChoice};
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "your_base_url");
let tools = vec![ChatCompletionToolParam::function(
"get_current_time",
"获取当前时间",
serde_json::json!({
"type": "object",
"properties": {},
"description": "获取当前的日期和时间"
}),
)];
let messages = vec![user!("现在几点了?")];
let mut stream = client
.chat()
.create_stream(
chat_request("your_model_name", &messages)
.tools(tools)
.tool_choice(ToolChoice::Auto)
)
.await
.unwrap();
while let Some(result) = stream.next().await {
match result {
Ok(chunk) => {
println!("收到响应: {:#?}", chunk);
}
Err(err) => {
eprintln!("错误: {:#?}", err);
}
}
}
}
🧠 思考模式
供应商返回字段为reasoning或reasoning_content都会映射到reasoning字段。
适用于支持思考功能的模型(如 qwen 的 qwq-32b):
use futures::StreamExt;
use openai4rs::{OpenAI, chat_request, user};
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "your_base_url");
let messages = vec![user!("请解决这个数学问题:如果一个三角形的两边分别是3和4,第三边是5,这是什么类型的三角形?")];
let mut stream = client
.chat()
.create_stream(chat_request("qwq-32b", &messages))
.await
.unwrap();
while let Some(result) = stream.next().await {
let chunk = result.unwrap();
for choice in chunk.choices.iter() {
if choice.delta.is_reasoning() {
println!("🤔 思考过程:\n{}", choice.delta.get_reasoning_str());
}
if let Some(content) = &choice.delta.content {
if !content.is_empty() {
println!("💡 回答:\n{}", content);
}
}
}
}
}
🔗 响应合并与消息映射
合并流式响应输出(使用重载的 + 运行符)
将流式响应合并为完整的回复内容:
use futures::stream::StreamExt;
use openai4rs::{OpenAI, StreamChoice, chat_request, user};
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "your_base_url");
let messages = vec![user!("请详细介绍一下 Rust 的所有权机制")];
let mut stream = client
.chat()
.create_stream(chat_request("your_model_name", &messages))
.await
.unwrap();
let mut merged_choice: Option<StreamChoice> = None;
while let Some(result) = stream.next().await {
let chat_completion_chunk = result.unwrap();
let choice = chat_completion_chunk.choices[0].clone();
merged_choice = Some(match merged_choice {
Some(l) => l + choice,
None => choice,
})
}
println!("{:#?}", merged_choice.unwrap());
}
将响应映射到消息链
use futures::stream::StreamExt;
use openai4rs::{OpenAI, StreamChoice, chat_request, user};
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "your_base_url");
let mut messages = vec![user!("请详细介绍一下 Rust 的所有权机制")];
let mut stream = client
.chat()
.create_stream(chat_request("your_model_name", &messages))
.await
.unwrap();
let mut merged_choice: Option<StreamChoice> = None;
while let Some(result) = stream.next().await {
let chat_completion_chunk = result.unwrap();
let choice = chat_completion_chunk.choices[0].clone();
merged_choice = Some(match merged_choice {
Some(l) => l + choice,
None => choice,
})
}
messages.push(merged_choice.unwrap().delta.into());
messages.push(user!("好的, 谢谢你"));
let chat_completion = client
.chat()
.create(chat_request("your_model_name", &messages))
.await
.unwrap();
messages.push(chat_completion.choices[0].message.clone().into())
}
📝 Completions 文本补全
非流式补全
use openai4rs::{OpenAI, completions_request};
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "your_base_url");
let completion = client
.completions()
.create(completions_request("your_model_name", "请补全这句话:人工智能的未来"))
.await
.unwrap();
println!("补全结果: {:#?}", completion);
}
流式补全
use futures::StreamExt;
use openai4rs::{OpenAI, completions_request};
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "your_base_url");
let mut stream = client
.completions()
.create_stream(completions_request("your_model_name", "编写一个快速排序算法:"))
.await
.unwrap();
while let Some(result) = stream.next().await {
match result {
Ok(completion) => {
println!("补全内容: {:#?}", completion);
}
Err(err) => {
eprintln!("错误: {}", err);
}
}
}
}
🤖 Models 模型管理
获取所有可用模型
use openai4rs::{OpenAI, models_request};
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "your_base_url");
let models = client
.models()
.list(models_request())
.await
.unwrap();
println!("可用模型:");
for model in models.data.iter() {
println!("- {}: {}", model.id, model.created);
}
}
🔧 配置选项
客户端配置
use openai4rs::{OpenAI, Config};
let client = OpenAI::new("your_api_key", "https://api.openai.com/v1");
let client = OpenAI::from_env().unwrap();
let mut config = Config::new("your_api_key".to_string(), "https://api.openai.com/v1".to_string());
config.set_retry_count(3) .set_timeout_seconds(120) .set_connect_timeout_seconds(5) .set_proxy(Some("http://localhost:8080".to_string())) .set_user_agent(Some("MyApp/1.0".to_string()));
let client = OpenAI::with_config(config);
client.update_config(|config| {
config.set_timeout_seconds(180)
.set_retry_count(2);
}).await;
#[tokio::main]
async fn main() {
let client = OpenAI::new("your_api_key", "https://api.openai.com/v1");
let base_url = client.get_base_url().await;
let api_key = client.get_api_key().await;
client.set_base_url("https://api.custom-provider.com/v1".to_string()).await;
client.set_api_key("new-api-key".to_string()).await;
}
请求参数配置
use openai4rs::{chat_request, user};
let messages = vec![user!("Hello")];
let request = chat_request("gpt-3.5-turbo", &messages)
.temperature(0.7) .max_completion_tokens(1000) .top_p(0.9) .frequency_penalty(0.1) .presence_penalty(0.1);
📖 更多示例
查看 examples 目录获取更多使用示例:
📄 许可证
本项目采用 Apache-2.0 许可证。
🔗 相关链接