use anyhow::Result;
use async_openai::types::{
ChatCompletionRequestMessage, ChatCompletionRequestSystemMessage,
ChatCompletionRequestSystemMessageContent, ChatCompletionRequestUserMessage,
};
use base64::{engine::general_purpose, Engine as _};
use opencrates::{
core::OpenCrates,
providers::{
enhanced_openai::{
AudioTranscriptionRequest, EmbeddingRequest, EnhancedOpenAIConfig,
EnhancedOpenAIProvider, ImageGenerationRequest, SpeechRequest,
},
GenerationRequest, LLMProvider,
},
utils::config::OpenCratesConfig,
utils::templates::CrateSpec,
};
use serde_json::json;
use std::fs::{self, File};
use std::io::Write;
use std::path::Path;
use tokio::fs;
use tracing::{error, info};
use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
println!("OpenCrates async-openai Integration Examples");
println!("===============================================");
let config = OpenCratesConfig::default();
let opencrates = OpenCrates::new_with_config(config).await?;
let enhanced_provider = EnhancedOpenAIProvider::new().await?;
run_chat_examples(&enhanced_provider).await?;
run_embedding_examples(&enhanced_provider).await?;
run_image_examples(&enhanced_provider).await?;
run_audio_examples(&enhanced_provider).await?;
run_structured_output_examples(&enhanced_provider).await?;
run_function_calling_examples(&enhanced_provider).await?;
run_code_generation_examples(&enhanced_provider, &opencrates).await?;
println!("\n✅ All examples completed successfully!");
Ok(())
}
async fn run_chat_examples(provider: &EnhancedOpenAIProvider) -> Result<()> {
println!("\nChat Completion Examples");
println!("---------------------------");
let messages = vec![
ChatCompletionRequestMessage::System(ChatCompletionRequestSystemMessage {
content: ChatCompletionRequestSystemMessageContent::Text(
"You are a helpful Rust programming assistant.".to_string(),
),
name: None,
}),
ChatCompletionRequestUserMessage::new(
async_openai::types::ChatCompletionRequestUserMessageContent::Text(
"Generate a simple hello world function in Rust.".to_string(),
),
)
.into(),
];
let response = provider
.chat_completion(
messages.clone(),
Some("gpt-4o".to_string()),
Some(500),
Some(0.7),
false,
)
.await?;
println!(" Chat Response:");
println!("{}", response.content);
println!("Tokens used: {}", response.usage.total_tokens);
println!("\n🌊 Streaming Chat Example:");
let streaming_future = provider.chat_completion(messages.clone(), None, Some(100), None, true);
if let Ok(streaming_response) = streaming_future.await {
info!("Successfully received streaming response");
println!("Streamed content: {}", streaming_response.preview);
} else {
error!("Failed to receive streaming response");
}
Ok(())
}
async fn run_embedding_examples(provider: &EnhancedOpenAIProvider) -> Result<()> {
println!("\nEmbedding Examples");
println!("---------------------");
let embedding_request = EmbeddingRequest {
input: vec![
"Rust programming language".to_string(),
"Memory safety without garbage collection".to_string(),
"Zero-cost abstractions in systems programming".to_string(),
"Async/await concurrency model".to_string(),
],
model: Some("text-embedding-3-large".to_string()),
};
let embedding_response = provider.generate_embeddings(embedding_request).await?;
println!(
"Generated {} embeddings",
embedding_response.embeddings.len()
);
println!(
"Embedding dimensions: {}",
embedding_response.embeddings[0].len()
);
println!("Tokens used: {}", embedding_response.usage.total_tokens);
if embedding_response.embeddings.len() >= 2 {
let similarity = cosine_similarity(
&embedding_response.embeddings[0],
&embedding_response.embeddings[1],
);
println!(
" Similarity between first two embeddings: {:.4}",
similarity
);
}
Ok(())
}
async fn run_image_examples(provider: &EnhancedOpenAIProvider) -> Result<()> {
println!("\n Image Generation Examples");
println!("----------------------------");
let image_request = ImageGenerationRequest {
prompt:
"A futuristic Rust programming IDE with holographic code displays, cyberpunk aesthetic"
.to_string(),
n: Some(1),
size: Some("1024x1024".to_string()),
quality: Some("standard".to_string()),
response_format: Some("url".to_string()),
};
let image_response = provider.generate_image(image_request).await?;
if let Some(first_image) = image_response.images.first() {
if let Some(url) = &first_image.url {
println!(" Generated image URL: {}", url);
}
if let Some(revised_prompt) = &first_image.revised_prompt {
println!(" Revised prompt: {}", revised_prompt);
}
}
let base64_request = ImageGenerationRequest {
prompt: "Rust crab mascot wearing a space helmet, cartoon style".to_string(),
n: Some(1),
size: Some("512x512".to_string()),
quality: Some("standard".to_string()),
response_format: Some("b64_json".to_string()),
};
let base64_response = provider.generate_image(base64_request).await?;
if let Some(first_image) = base64_response.images.first() {
if let Some(b64_data) = &first_image.b64_json {
println!(" Generated base64 image (length: {} chars)", b64_data.len());
let image_data = general_purpose::STANDARD.decode(b64_data)?;
let mut file = File::create("generated_rust_crab.png")?;
file.write_all(&image_data)?;
println!(" Saved image to: generated_rust_crab.png");
}
}
Ok(())
}
async fn run_audio_examples(provider: &EnhancedOpenAIProvider) -> Result<()> {
println!("\n Audio Processing Examples");
println!("----------------------------");
let speech_request = SpeechRequest {
input: "Welcome to OpenCrates, the AI-powered Rust development companion!".to_string(),
model: Some("tts-1-hd".to_string()),
voice: Some("alloy".to_string()),
};
match provider.generate_speech(speech_request).await {
Ok(audio_data) => {
fs::write("welcome_message.mp3", audio_data).await?;
println!(" Generated speech audio: welcome_message.mp3");
}
Err(e) => {
println!("⚠ Speech generation skipped: {}", e);
}
}
if Path::new("test_audio.mp3").exists() {
let transcription_request = AudioTranscriptionRequest {
file_path: "test_audio.mp3".to_string(),
model: Some("whisper-1".to_string()),
language: Some("en".to_string()),
response_format: Some("json".to_string()),
};
match provider.transcribe_audio(transcription_request).await {
Ok(transcription_response) => {
println!(" Transcription: {}", transcription_response.text);
}
Err(e) => {
println!("⚠ Transcription skipped: {}", e);
}
}
} else {
println!("⚠ Audio transcription skipped: test_audio.mp3 not found");
}
Ok(())
}
async fn run_structured_output_examples(provider: &EnhancedOpenAIProvider) -> Result<()> {
println!("\n Structured Output Examples");
println!("-----------------------------");
let schema = json!({
"type": "object",
"properties": {
"crate_name": {
"type": "string",
"description": "Name of the Rust crate"
},
"description": {
"type": "string",
"description": "Brief description of the crate"
},
"features": {
"type": "array",
"items": {
"type": "string"
},
"description": "List of key features"
},
"dependencies": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {"type": "string"},
"version": {"type": "string"},
"features": {
"type": "array",
"items": {"type": "string"}
}
},
"required": ["name", "version"]
}
},
"estimated_complexity": {
"type": "string",
"enum": ["simple", "moderate", "complex"]
}
},
"required": ["crate_name", "description", "features", "dependencies", "estimated_complexity"],
"additionalProperties": false
});
let messages = vec![
ChatCompletionRequestMessage::System(ChatCompletionRequestSystemMessage {
content: ChatCompletionRequestSystemMessageContent::Text("You are a Rust crate planning assistant. Generate structured crate specifications.".to_string()),
name: None,
}),
ChatCompletionRequestUserMessage::new(
async_openai::types::ChatCompletionRequestUserMessageContent::Text(
"Plan a crate named 'fast-http' with basic features.".to_string(),
),
)
.into(),
];
match provider
.structured_completion(messages, schema, "crate_specification".to_string())
.await
{
Ok(structured_response) => {
println!(" Structured Crate Specification:");
println!("{}", serde_json::to_string_pretty(&structured_response)?);
}
Err(e) => {
println!("⚠ Structured output skipped: {}", e);
}
}
Ok(())
}
async fn run_function_calling_examples(provider: &EnhancedOpenAIProvider) -> Result<()> {
println!("\n Function Calling Examples");
println!("----------------------------");
let tools = vec![
json!({
"type": "function",
"function": {
"name": "analyze_crate_dependencies",
"description": "Analyze dependencies of a Rust crate",
"parameters": {
"type": "object",
"properties": {
"crate_name": {
"type": "string",
"description": "Name of the crate to analyze"
},
"include_dev_deps": {
"type": "boolean",
"description": "Whether to include dev dependencies"
}
},
"required": ["crate_name"]
}
}
}),
json!({
"type": "function",
"function": {
"name": "get_crate_info",
"description": "Get information about a Rust crate from crates.io",
"parameters": {
"type": "object",
"properties": {
"crate_name": {
"type": "string",
"description": "Name of the crate"
}
},
"required": ["crate_name"]
}
}
}),
];
let messages = vec![
ChatCompletionRequestMessage::System(ChatCompletionRequestSystemMessage {
content: ChatCompletionRequestSystemMessageContent::Text("You are a Rust development assistant with access to crate analysis tools. Use the available functions when needed.".to_string()),
name: None,
}),
ChatCompletionRequestUserMessage::new(
async_openai::types::ChatCompletionRequestUserMessageContent::Text(
"What are the dependencies of the 'serde' crate?",
),
)
.into(),
];
let function_future = provider.function_calling(messages, tools);
if let Ok(function_response) = function_future.await {
if let Some(tool_calls) = &function_response.preview.tool_calls {
println!("Function calls: {:?}", tool_calls);
} else {
println!("{}", function_response.preview);
}
} else {
error!("Function call failed");
}
Ok(())
}
async fn run_code_generation_examples(
provider: &EnhancedOpenAIProvider,
opencrates: &OpenCrates,
) -> Result<()> {
println!("\n OpenCrates Code Generation Examples");
println!("--------------------------------------");
let generation_request = GenerationRequest {
spec: CrateSpec::default(),
prompt: Some("Create a comprehensive async HTTP client library for Rust with the following features:
- Connection pooling
- Automatic retries with exponential backoff
- Request/response middleware support
- Customizable timeouts and connection settings
- Gzip and Brotli decompression
- SSL/TLS verification options
- Async/await support throughout".to_string()),
context: Some("This is for a production-ready HTTP client that will be used in enterprise applications. Focus on performance, reliability, and ease of use.".to_string()),
max_tokens: Some(4000),
temperature: Some(0.7),
model: Some("gpt-4o".to_string()),
};
if let Ok(response) = provider.generate(&generation_request).await {
println!("\n--- Generated Crate Code ---");
println!("{}", response.preview);
println!("--- End Generated Crate Code ---");
if let Some(finish_reason) = response.finish_reason {
println!(" Finish Reason: {}", finish_reason);
}
println!(" Tokens: {}", response.metrics.total_tokens);
} else {
error!("Crate generation failed");
}
match provider.health_check().await {
Ok(healthy) => {
println!(
"\n💚 Provider health check: {}",
if healthy { "HEALTHY" } else { "UNHEALTHY" }
);
}
Err(e) => {
println!("⚠ Health check failed: {}", e);
}
}
Ok(())
}
fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 {
let dot_product: f32 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum();
let magnitude_a: f32 = a.iter().map(|x| x * x).sum::<f32>().sqrt();
let magnitude_b: f32 = b.iter().map(|x| x * x).sum::<f32>().sqrt();
if magnitude_a == 0.0 || magnitude_b == 0.0 {
0.0
} else {
dot_product / (magnitude_a * magnitude_b)
}
}