use chat_gpt_lib_rs::OpenAIClient;
use chat_gpt_lib_rs::api_resources::moderations::{
CreateModerationRequest, ModerationsInput, create_moderation,
};
use chat_gpt_lib_rs::error::OpenAIError;
#[tokio::main]
async fn main() -> Result<(), OpenAIError> {
dotenvy::dotenv().ok();
let client = OpenAIClient::new(None)?;
let moderation_text = "I hate you and want to harm you.".to_string();
println!("Text to moderate: {}", &moderation_text);
let request = CreateModerationRequest {
input: ModerationsInput::String(moderation_text),
model: None,
};
println!("Sending a moderation request...");
let response = create_moderation(&client, &request).await?;
for (i, result) in response.results.iter().enumerate() {
println!("\n== Moderation Result {} ==", i);
println!("Flagged: {}", result.flagged);
println!("Categories:");
println!(" hate: {}", result.categories.hate);
println!(" hate/threatening: {}", result.categories.hate_threatening);
println!(" self-harm: {}", result.categories.self_harm);
println!(" sexual: {}", result.categories.sexual);
println!(" sexual/minors: {}", result.categories.sexual_minors);
println!(" violence: {}", result.categories.violence);
println!(" violence/graphic: {}", result.categories.violence_graphic);
println!("Scores:");
println!(" hate: {}", result.category_scores.hate);
println!(
" hate/threatening: {}",
result.category_scores.hate_threatening
);
println!(" self-harm: {}", result.category_scores.self_harm);
println!(" sexual: {}", result.category_scores.sexual);
println!(" sexual/minors: {}", result.category_scores.sexual_minors);
println!(" violence: {}", result.category_scores.violence);
println!(
" violence/graphic: {}",
result.category_scores.violence_graphic
);
}
Ok(())
}